summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/base/firmware_class.c1
-rw-r--r--drivers/base/platform.c20
-rw-r--r--drivers/base/power/main.c4
-rw-r--r--drivers/char/diag/diag_masks.c9
-rw-r--r--drivers/char/diag/diag_usb.c6
-rw-r--r--drivers/char/diag/diagchar.h3
-rw-r--r--drivers/char/diag/diagchar_core.c45
-rw-r--r--drivers/char/diag/diagfwd.c17
-rw-r--r--drivers/char/diag/diagfwd_bridge.c2
-rw-r--r--drivers/char/diag/diagfwd_glink.c18
-rw-r--r--drivers/char/diag/diagfwd_glink.h1
-rw-r--r--drivers/char/diag/diagfwd_mhi.c5
-rw-r--r--drivers/char/random.c22
-rw-r--r--drivers/clk/msm/Kconfig1
-rw-r--r--drivers/clk/msm/clock-gcc-cobalt.c44
-rw-r--r--drivers/clk/msm/clock-mmss-cobalt.c40
-rw-r--r--drivers/clk/msm/clock-osm.c231
-rw-r--r--drivers/clk/msm/vdd-level-cobalt.h18
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c560
-rw-r--r--drivers/crypto/msm/qce50.c38
-rw-r--r--drivers/gpu/msm/adreno-gpulist.h2
-rw-r--r--drivers/gpu/msm/adreno.c15
-rw-r--r--drivers/gpu/msm/adreno_a3xx.c2
-rw-r--r--drivers/gpu/msm/adreno_a4xx.c2
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c11
-rw-r--r--drivers/gpu/msm/adreno_a5xx_preempt.c11
-rw-r--r--drivers/gpu/msm/adreno_a5xx_snapshot.c4
-rw-r--r--drivers/gpu/msm/adreno_profile.c3
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c6
-rw-r--r--drivers/gpu/msm/kgsl.c9
-rw-r--r--drivers/gpu/msm/kgsl_debugfs.c26
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c123
-rw-r--r--drivers/gpu/msm/kgsl_mmu.c6
-rw-r--r--drivers/gpu/msm/kgsl_mmu.h5
-rw-r--r--drivers/gpu/msm/kgsl_pool.c14
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.h4
-rw-r--r--drivers/gpu/msm/kgsl_trace.h35
-rw-r--r--drivers/hid/hid-sensor-hub.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c23
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c6
-rw-r--r--drivers/iio/adc/Kconfig12
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/qcom-tadc.c742
-rw-r--r--drivers/input/touchscreen/Kconfig42
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/ft5x06_ts.c1025
-rw-r--r--drivers/input/touchscreen/gt9xx/Kconfig51
-rw-r--r--drivers/input/touchscreen/gt9xx/Makefile8
-rw-r--r--drivers/input/touchscreen/gt9xx/goodix_tool.c166
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx.c963
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx.h122
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx_update.c254
-rw-r--r--drivers/input/touchscreen/it7258_ts_i2c.c4
-rw-r--r--drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c185
-rw-r--r--drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.h7
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/md/Kconfig12
-rw-r--r--drivers/md/Makefile5
-rw-r--r--drivers/md/dm-cache-target.c3
-rw-r--r--drivers/md/dm-crypt.c14
-rw-r--r--drivers/md/dm-snap.c6
-rw-r--r--drivers/md/dm-verity-fec.c861
-rw-r--r--drivers/md/dm-verity-fec.h155
-rw-r--r--drivers/md/dm-verity-target.c (renamed from drivers/md/dm-verity.c)602
-rw-r--r--drivers/md/dm-verity.h129
-rw-r--r--drivers/media/platform/msm/camera_v2/Kconfig48
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c163
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h16
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.c5
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h79
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp32.c71
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c198
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp44.c203
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp46.c196
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c193
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.h6
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c3088
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h101
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c1153
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h54
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c85
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h2
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c76
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h57
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c237
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h6
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c153
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h9
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/Makefile3
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c.h56
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c1093
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c46
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h3
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c12
-rw-r--r--drivers/media/platform/msm/sde/Kconfig11
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_base.h29
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_core.c24
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c671
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h26
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c2
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c63
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c10
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_util.c2
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c121
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.h1
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_res_parse.c52
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_res_parse.h1
-rw-r--r--drivers/media/platform/msm/vidc/venus_hfi.c27
-rw-r--r--drivers/mfd/wcd9xxx-irq.c33
-rw-r--r--drivers/misc/qcom/Kconfig2
-rw-r--r--drivers/misc/qcom/qdsp6v2/aac_in.c4
-rw-r--r--drivers/misc/qcom/qdsp6v2/amrnb_in.c5
-rw-r--r--drivers/misc/qcom/qdsp6v2/amrwb_in.c5
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_alac.c4
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c6
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_ape.c4
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c2
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_multi_aac.c4
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_utils.c15
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_utils_aio.c2
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_wmapro.c4
-rw-r--r--drivers/misc/qcom/qdsp6v2/evrc_in.c4
-rw-r--r--drivers/misc/qcom/qdsp6v2/qcelp_in.c4
-rw-r--r--drivers/misc/qseecom.c196
-rw-r--r--drivers/misc/uid_stat.c153
-rw-r--r--drivers/mmc/card/Kconfig12
-rw-r--r--drivers/mmc/card/block.c300
-rw-r--r--drivers/mmc/card/queue.h8
-rw-r--r--drivers/mmc/core/core.c6
-rw-r--r--drivers/mmc/core/sdio.c1
-rw-r--r--drivers/mmc/host/sdhci-msm.c14
-rw-r--r--drivers/mmc/host/sdhci.c7
-rw-r--r--drivers/net/ppp/Kconfig4
-rw-r--r--drivers/net/ppp/pppolac.c9
-rw-r--r--drivers/net/ppp/pppopns.c9
-rw-r--r--drivers/net/wireless/cnss/cnss_pci.c45
-rw-r--r--drivers/net/wireless/cnss/cnss_sdio.c212
-rw-r--r--drivers/pci/host/pci-msm.c121
-rw-r--r--drivers/phy/phy-qcom-ufs-i.h5
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-v3.c17
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-v3.h16
-rw-r--r--drivers/phy/phy-qcom-ufs.c18
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c333
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c67
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c21
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c21
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c20
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/ipa_usb.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_rm.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c226
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_intf.c23
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c48
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c257
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c31
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_intf.c21
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c48
-rw-r--r--drivers/platform/msm/ipa/test/Makefile2
-rw-r--r--drivers/platform/msm/ipa/test/ipa_test_dma.c931
-rw-r--r--drivers/platform/msm/ipa/test/ipa_ut_suite_list.h2
-rw-r--r--drivers/platform/msm/sps/sps_bam.c3
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c260
-rw-r--r--drivers/power/qcom-charger/smb-lib.c347
-rw-r--r--drivers/power/qcom-charger/smb-lib.h35
-rw-r--r--drivers/power/qcom-charger/smb-reg.h30
-rw-r--r--drivers/power/qcom-charger/smb138x-charger.c20
-rw-r--r--drivers/power/qcom/debug_core.c63
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/cpr3-mmss-regulator.c4
-rw-r--r--drivers/regulator/cpr3-regulator.c30
-rw-r--r--drivers/regulator/cpr3-regulator.h11
-rw-r--r--drivers/regulator/cpr3-util.c18
-rw-r--r--drivers/regulator/cprh-kbss-regulator.c28
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c151
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h1
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c23
-rw-r--r--drivers/scsi/ufs/ufshcd.c34
-rw-r--r--drivers/scsi/ufs/ufshcd.h11
-rw-r--r--drivers/slimbus/slim-msm-ngd.c34
-rw-r--r--drivers/slimbus/slim-msm.h1
-rw-r--r--drivers/soc/qcom/Kconfig20
-rw-r--r--drivers/soc/qcom/Makefile3
-rw-r--r--drivers/soc/qcom/glink.c289
-rw-r--r--drivers/soc/qcom/icnss.c1175
-rw-r--r--drivers/soc/qcom/icnss_utils.c132
-rw-r--r--drivers/soc/qcom/irq-helper.c4
-rw-r--r--drivers/soc/qcom/memshare/msm_memshare.c2
-rw-r--r--drivers/soc/qcom/remoteqdss.c36
-rw-r--r--drivers/soc/qcom/rpm-smd.c34
-rw-r--r--drivers/soc/qcom/socinfo.c7
-rw-r--r--drivers/soc/qcom/spss_utils.c294
-rw-r--r--drivers/soc/qcom/subsystem_restart.c22
-rw-r--r--drivers/tty/serial/msm_serial.c187
-rw-r--r--drivers/usb/core/devio.c9
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c43
-rw-r--r--drivers/usb/dwc3/gadget.c7
-rw-r--r--drivers/usb/gadget/composite.c23
-rw-r--r--drivers/usb/gadget/function/f_accessory.c22
-rw-r--r--drivers/usb/gadget/function/f_audio_source.c6
-rw-r--r--drivers/usb/gadget/function/f_gsi.c76
-rw-r--r--drivers/usb/gadget/function/f_mtp.c88
-rw-r--r--drivers/usb/gadget/function/u_ether.c1
-rw-r--r--drivers/usb/pd/policy_engine.c5
-rw-r--r--drivers/usb/phy/phy-msm-qusb-v2.c201
-rw-r--r--drivers/usb/phy/phy-msm-qusb.c20
-rw-r--r--drivers/usb/phy/phy-msm-ssusb-qmp.c189
-rw-r--r--drivers/video/adf/Kconfig2
-rw-r--r--drivers/video/adf/Makefile6
-rw-r--r--drivers/video/adf/adf_client.c4
-rw-r--r--drivers/video/fbdev/msm/mdss.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c44
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_panel.c7
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_hwio.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_video.c32
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c8
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp.c4
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.c1
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.h3
225 files changed, 17319 insertions, 5364 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 1761f8f2cda7..eb67aadf2ee0 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/
obj-$(CONFIG_IDE) += ide/
+obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SCSI) += scsi/
obj-y += nvme/
obj-$(CONFIG_ATA) += ata/
@@ -126,7 +127,6 @@ obj-$(CONFIG_SWITCH) += switch/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
-obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
obj-$(CONFIG_ARCH_SHMOBILE) += sh/
ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 5645850cb4d3..152c81ca50ea 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -1555,6 +1555,7 @@ _request_firmware_nowait(
get_device(desc->device);
INIT_WORK(&desc->work, request_firmware_work_func);
+ schedule_work(&desc->work);
return 0;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 176b59f5bc47..bd70f8db469b 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -117,6 +117,26 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
EXPORT_SYMBOL_GPL(platform_get_irq);
/**
+ * platform_irq_count - Count the number of IRQs a platform device uses
+ * @dev: platform device
+ *
+ * Return: Number of IRQs a platform device uses or EPROBE_DEFER
+ */
+int platform_irq_count(struct platform_device *dev)
+{
+ int ret, nr = 0;
+
+ while ((ret = platform_get_irq(dev, nr)) >= 0)
+ nr++;
+
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ return nr;
+}
+EXPORT_SYMBOL_GPL(platform_irq_count);
+
+/**
* platform_get_resource_byname - get a resource for a device by name
* @dev: platform device
* @type: resource type
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a54d810f2966..6ed8b9326629 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -35,6 +35,8 @@
#include <linux/timer.h>
#include <linux/wakeup_reason.h>
+#include <asm/current.h>
+
#include "../base.h"
#include "power.h"
@@ -1412,7 +1414,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
goto Complete;
data.dev = dev;
- data.tsk = get_current();
+ data.tsk = current;
init_timer_on_stack(&timer);
timer.expires = jiffies + HZ * 12;
timer.function = dpm_drv_timeout;
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 7cb64e012f1f..a1721a3b80cc 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -1780,6 +1780,15 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
if (!mask_info)
return -EIO;
+ mutex_lock(&driver->diag_maskclear_mutex);
+ if (driver->mask_clear) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag:%s: count = %zu\n", __func__, count);
+ mutex_unlock(&driver->diag_maskclear_mutex);
+ return -EIO;
+ }
+ mutex_unlock(&driver->diag_maskclear_mutex);
+
mutex_lock(&mask_info->lock);
mask = (struct diag_msg_mask_t *)(mask_info->ptr);
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
index eb715cc8501c..ca54b24ec604 100644
--- a/drivers/char/diag/diag_usb.c
+++ b/drivers/char/diag/diag_usb.c
@@ -215,6 +215,12 @@ static void usb_connect_work_fn(struct work_struct *work)
*/
static void usb_disconnect(struct diag_usb_info *ch)
{
+ if (!ch)
+ return;
+
+ if (!atomic_read(&ch->connected) && driver->usb_connected)
+ diag_clear_masks(NULL);
+
if (ch && ch->ops && ch->ops->close)
ch->ops->close(ch->ctxt, DIAG_USB_MODE);
}
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index dccaa6a0d9c4..2aef98f4fe04 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -467,6 +467,8 @@ struct diagchar_dev {
struct class *diagchar_class;
struct device *diag_dev;
int ref_count;
+ int mask_clear;
+ struct mutex diag_maskclear_mutex;
struct mutex diagchar_mutex;
struct mutex diag_file_mutex;
wait_queue_head_t wait_q;
@@ -625,6 +627,7 @@ void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc);
void diag_cmd_remove_reg_by_pid(int pid);
void diag_cmd_remove_reg_by_proc(int proc);
int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry);
+void diag_clear_masks(struct diag_md_session_t *info);
void diag_record_stats(int type, int flag);
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index a39e4929d999..39be6ef3735e 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -389,6 +389,27 @@ static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
return ret;
}
+void diag_clear_masks(struct diag_md_session_t *info)
+{
+ int ret;
+ char cmd_disable_log_mask[] = { 0x73, 0, 0, 0, 0, 0, 0, 0};
+ char cmd_disable_msg_mask[] = { 0x7D, 0x05, 0, 0, 0, 0, 0, 0};
+ char cmd_disable_event_mask[] = { 0x60, 0};
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: %s: masks clear request upon %s\n", __func__,
+ ((info) ? "ODL exit" : "USB Disconnection"));
+
+ ret = diag_process_apps_masks(cmd_disable_log_mask,
+ sizeof(cmd_disable_log_mask), info);
+ ret = diag_process_apps_masks(cmd_disable_msg_mask,
+ sizeof(cmd_disable_msg_mask), info);
+ ret = diag_process_apps_masks(cmd_disable_event_mask,
+ sizeof(cmd_disable_event_mask), info);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag:%s: masks cleared successfully\n", __func__);
+}
+
static void diag_close_logging_process(const int pid)
{
int i;
@@ -400,6 +421,12 @@ static void diag_close_logging_process(const int pid)
if (!session_info)
return;
+ diag_clear_masks(session_info);
+
+ mutex_lock(&driver->diag_maskclear_mutex);
+ driver->mask_clear = 1;
+ mutex_unlock(&driver->diag_maskclear_mutex);
+
session_peripheral_mask = session_info->peripheral_mask;
diag_md_session_close(session_info);
for (i = 0; i < NUM_MD_SESSIONS; i++)
@@ -475,9 +502,14 @@ static int diag_remove_client_entry(struct file *file)
}
static int diagchar_close(struct inode *inode, struct file *file)
{
+ int ret;
DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n",
current->comm);
- return diag_remove_client_entry(file);
+ ret = diag_remove_client_entry(file);
+ mutex_lock(&driver->diag_maskclear_mutex);
+ driver->mask_clear = 0;
+ mutex_unlock(&driver->diag_maskclear_mutex);
+ return ret;
}
void diag_record_stats(int type, int flag)
@@ -967,7 +999,7 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len,
(void *)driver->hdlc_encode_buf);
send_data:
- err = diagfwd_bridge_write(proc, driver->hdlc_encode_buf,
+ err = diagfwd_bridge_write(bridge_index, driver->hdlc_encode_buf,
driver->hdlc_encode_buf_len);
if (err) {
pr_err_ratelimited("diag: Error writing Callback packet to proc: %d, err: %d\n",
@@ -2538,7 +2570,7 @@ static int diag_user_process_raw_data(const char __user *buf, int len)
}
}
if (remote_proc) {
- ret = diag_send_raw_data_remote(remote_proc - 1,
+ ret = diag_send_raw_data_remote(remote_proc,
(void *)(user_space_data + token_offset),
len, USER_SPACE_RAW_DATA);
if (ret) {
@@ -3358,6 +3390,7 @@ static int __init diagchar_init(void)
non_hdlc_data.len = 0;
mutex_init(&driver->hdlc_disable_mutex);
mutex_init(&driver->diagchar_mutex);
+ mutex_init(&driver->diag_maskclear_mutex);
mutex_init(&driver->diag_file_mutex);
mutex_init(&driver->delayed_rsp_mutex);
mutex_init(&apps_data_mutex);
@@ -3391,13 +3424,13 @@ static int __init diagchar_init(void)
ret = diag_masks_init();
if (ret)
goto fail;
- ret = diag_mux_init();
+ ret = diag_remote_init();
if (ret)
goto fail;
- ret = diagfwd_init();
+ ret = diag_mux_init();
if (ret)
goto fail;
- ret = diag_remote_init();
+ ret = diagfwd_init();
if (ret)
goto fail;
ret = diagfwd_bridge_init();
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 8205e5b05d85..0111b02634c8 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1232,8 +1232,6 @@ static int diagfwd_mux_open(int id, int mode)
static int diagfwd_mux_close(int id, int mode)
{
- uint8_t i;
-
switch (mode) {
case DIAG_USB_MODE:
driver->usb_connected = 0;
@@ -1248,15 +1246,16 @@ static int diagfwd_mux_close(int id, int mode)
driver->md_session_mode == DIAG_MD_NONE) ||
(driver->md_session_mode == DIAG_MD_PERIPHERAL)) {
/*
- * In this case the channel must not be closed. This case
- * indicates that the USB is removed but there is a client
- * running in background with Memory Device mode
+ * This case indicates that the USB is removed
+ * but there is a client running in background
+ * with Memory Device mode.
*/
} else {
- for (i = 0; i < NUM_PERIPHERALS; i++) {
- diagfwd_close(i, TYPE_DATA);
- diagfwd_close(i, TYPE_CMD);
- }
+ /*
+ * With clearing of masks on ODL exit and
+ * USB disconnection, closing of the channel is
+ * not needed.This enables read and drop of stale packets.
+ */
/* Re enable HDLC encoding */
pr_debug("diag: In %s, re-enabling HDLC encoding\n",
__func__);
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
index 701677b30a71..0462a64614f3 100644
--- a/drivers/char/diag/diagfwd_bridge.c
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -107,7 +107,7 @@ static int diagfwd_bridge_mux_write_done(unsigned char *buf, int len,
if (id < 0 || id >= NUM_REMOTE_DEV)
return -EINVAL;
- ch = &bridge_info[id];
+ ch = &bridge_info[buf_ctx];
if (ch->dev_ops && ch->dev_ops->fwd_complete)
ch->dev_ops->fwd_complete(ch->ctxt, buf, len, 0);
return 0;
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
index 0a6d8bb7b21f..fea1b74aacae 100644
--- a/drivers/char/diag/diagfwd_glink.c
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -475,7 +475,7 @@ static void diag_glink_open_work_fn(struct work_struct *work)
struct glink_open_config open_cfg;
void *handle = NULL;
- if (!glink_info)
+ if (!glink_info || glink_info->hdl)
return;
memset(&open_cfg, 0, sizeof(struct glink_open_config));
@@ -499,11 +499,12 @@ static void diag_glink_close_work_fn(struct work_struct *work)
struct diag_glink_info *glink_info = container_of(work,
struct diag_glink_info,
close_work);
- if (!glink_info->inited)
+ if (!glink_info || !glink_info->inited || !glink_info->hdl)
return;
glink_close(glink_info->hdl);
atomic_set(&glink_info->opened, 0);
+ glink_info->hdl = NULL;
diagfwd_channel_close(glink_info->fwd_ctxt);
}
@@ -586,6 +587,7 @@ static void __diag_glink_init(struct diag_glink_info *glink_info)
{
char wq_name[DIAG_GLINK_NAME_SZ + 12];
struct glink_link_info link_info;
+ void *link_state_handle = NULL;
if (!glink_info)
return;
@@ -606,23 +608,25 @@ static void __diag_glink_init(struct diag_glink_info *glink_info)
INIT_WORK(&(glink_info->read_work), diag_glink_read_work_fn);
link_info.glink_link_state_notif_cb = diag_glink_notify_cb;
link_info.transport = NULL;
- strlcpy((char *)link_info.edge, glink_info->edge,
- sizeof(link_info.edge));
- glink_info->hdl = glink_register_link_state_cb(&link_info,
+ link_info.edge = glink_info->edge;
+ glink_info->link_state_handle = NULL;
+ link_state_handle = glink_register_link_state_cb(&link_info,
(void *)glink_info);
- if (IS_ERR_OR_NULL(glink_info->hdl)) {
+ if (IS_ERR_OR_NULL(link_state_handle)) {
pr_err("diag: In %s, unable to register for glink channel %s\n",
__func__, glink_info->name);
destroy_workqueue(glink_info->wq);
return;
}
+ glink_info->link_state_handle = link_state_handle;
glink_info->fwd_ctxt = NULL;
atomic_set(&glink_info->tx_intent_ready, 0);
atomic_set(&glink_info->opened, 0);
atomic_set(&glink_info->diag_state, 0);
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"%s initialized fwd_ctxt: %pK hdl: %pK\n",
- glink_info->name, glink_info->fwd_ctxt, glink_info->hdl);
+ glink_info->name, glink_info->fwd_ctxt,
+ glink_info->link_state_handle);
}
void diag_glink_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
diff --git a/drivers/char/diag/diagfwd_glink.h b/drivers/char/diag/diagfwd_glink.h
index 3f00a7ed60a8..bad4629b5ab8 100644
--- a/drivers/char/diag/diagfwd_glink.h
+++ b/drivers/char/diag/diagfwd_glink.h
@@ -25,6 +25,7 @@ struct diag_glink_info {
uint32_t fifo_size;
atomic_t tx_intent_ready;
void *hdl;
+ void *link_state_handle;
char edge[DIAG_GLINK_NAME_SZ];
char name[DIAG_GLINK_NAME_SZ];
struct mutex lock;
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
index b8ed216faaf6..f7b1e98f22b0 100644
--- a/drivers/char/diag/diagfwd_mhi.c
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -404,8 +404,11 @@ static void mhi_read_done_work_fn(struct work_struct *work)
* buffers here and do not forward them to the mux layer.
*/
if ((atomic_read(&(mhi_info->read_ch.opened)))) {
- diag_remote_dev_read_done(mhi_info->dev_id, buf,
+ err = diag_remote_dev_read_done(mhi_info->dev_id, buf,
result.bytes_xferd);
+ if (err)
+ mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH,
+ buf, result.bytes_xferd);
} else {
mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH, buf,
result.bytes_xferd);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d0da5d852d41..b583e5336630 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1819,6 +1819,28 @@ unsigned int get_random_int(void)
EXPORT_SYMBOL(get_random_int);
/*
+ * Same as get_random_int(), but returns unsigned long.
+ */
+unsigned long get_random_long(void)
+{
+ __u32 *hash;
+ unsigned long ret;
+
+ if (arch_get_random_long(&ret))
+ return ret;
+
+ hash = get_cpu_var(get_random_int_hash);
+
+ hash[0] += current->pid + jiffies + random_get_entropy();
+ md5_transform(hash, random_int_secret);
+ ret = *(unsigned long *)hash;
+ put_cpu_var(get_random_int_hash);
+
+ return ret;
+}
+EXPORT_SYMBOL(get_random_long);
+
+/*
* randomize_range() returns a start address such that
*
* [...... <range> .....]
diff --git a/drivers/clk/msm/Kconfig b/drivers/clk/msm/Kconfig
index d94678e9e4fc..650a23ea1ead 100644
--- a/drivers/clk/msm/Kconfig
+++ b/drivers/clk/msm/Kconfig
@@ -2,6 +2,7 @@ config COMMON_CLK_MSM
tristate "Support for MSM clock controllers"
depends on OF
depends on ARCH_QCOM
+ select RATIONAL
help
This support clock controller used by MSM devices which support
global, mmss and gpu clock controller.
diff --git a/drivers/clk/msm/clock-gcc-cobalt.c b/drivers/clk/msm/clock-gcc-cobalt.c
index 2049a0bf7dd2..05272118af16 100644
--- a/drivers/clk/msm/clock-gcc-cobalt.c
+++ b/drivers/clk/msm/clock-gcc-cobalt.c
@@ -32,6 +32,7 @@
#include <dt-bindings/clock/msm-clocks-hwio-cobalt.h>
#include "vdd-level-cobalt.h"
+#include "reset.h"
static void __iomem *virt_base;
static void __iomem *virt_dbgbase;
@@ -240,8 +241,8 @@ DEFINE_EXT_CLK(gpll4_out_main, &gpll4.c);
static struct clk_freq_tbl ftbl_hmss_ahb_clk_src[] = {
F( 19200000, cxo_clk_src_ao, 1, 0, 0),
- F( 37500000, gpll0_out_main, 16, 0, 0),
- F( 75000000, gpll0_out_main, 8, 0, 0),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F( 100000000, gpll0_out_main, 6, 0, 0),
F_END
};
@@ -1199,6 +1200,17 @@ static struct branch_clk gcc_aggre1_ufs_axi_clk = {
},
};
+static struct hw_ctl_clk gcc_aggre1_ufs_axi_hw_ctl_clk = {
+ .cbcr_reg = GCC_AGGRE1_UFS_AXI_CBCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_aggre1_ufs_axi_hw_ctl_clk",
+ .parent = &gcc_aggre1_ufs_axi_clk.c,
+ .ops = &clk_ops_branch_hw_ctl,
+ CLK_INIT(gcc_aggre1_ufs_axi_hw_ctl_clk.c),
+ },
+};
+
static struct branch_clk gcc_aggre1_usb3_axi_clk = {
.cbcr_reg = GCC_AGGRE1_USB3_AXI_CBCR,
.has_sibling = 1,
@@ -2353,11 +2365,13 @@ static struct mux_clk gcc_debug_mux = {
&gpu_gcc_debug_clk.c,
&gfx_gcc_debug_clk.c,
&debug_mmss_clk.c,
+ &debug_cpu_clk.c,
),
MUX_SRC_LIST(
{ &gpu_gcc_debug_clk.c, 0x013d },
{ &gfx_gcc_debug_clk.c, 0x013d },
{ &debug_mmss_clk.c, 0x0022 },
+ { &debug_cpu_clk.c, 0x00c0 },
{ &snoc_clk.c, 0x0000 },
{ &cnoc_clk.c, 0x000e },
{ &bimc_clk.c, 0x00a9 },
@@ -2594,6 +2608,7 @@ static struct clk_lookup msm_clocks_gcc_cobalt[] = {
CLK_LIST(gcc_qusb2phy_sec_reset),
CLK_LIST(gpll0_out_msscc),
CLK_LIST(gcc_aggre1_ufs_axi_clk),
+ CLK_LIST(gcc_aggre1_ufs_axi_hw_ctl_clk),
CLK_LIST(gcc_aggre1_usb3_axi_clk),
CLK_LIST(gcc_bimc_mss_q6_axi_clk),
CLK_LIST(gcc_blsp1_ahb_clk),
@@ -2695,6 +2710,23 @@ static struct clk_lookup msm_clocks_gcc_cobalt[] = {
CLK_LIST(gcc_qspi_ref_clk),
};
+static const struct msm_reset_map gcc_cobalt_resets[] = {
+ [QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [BLSP1_BCR] = { 0x17000 },
+ [BLSP2_BCR] = { 0x25000 },
+ [BOOT_ROM_BCR] = { 0x38000 },
+ [PRNG_BCR] = { 0x34000 },
+ [UFS_BCR] = { 0x75000 },
+ [USB_30_BCR] = { 0x0f000 },
+ [USB3_PHY_BCR] = { 0x50020 },
+ [USB3PHY_PHY_BCR] = { 0x50024 },
+ [PCIE_0_PHY_BCR] = { 0x6c01c },
+ [PCIE_PHY_BCR] = { 0x6f000 },
+ [PCIE_PHY_NOCSR_COM_PHY_BCR] = { 0x6f00C },
+ [PCIE_PHY_COM_BCR] = { 0x6f014 },
+};
+
static void msm_gcc_cobalt_v1_fixup(void)
{
gcc_ufs_rx_symbol_1_clk.c.ops = &clk_ops_dummy;
@@ -2800,6 +2832,10 @@ static int msm_gcc_cobalt_probe(struct platform_device *pdev)
clk_set_flags(&gcc_gpu_bimc_gfx_clk.c, CLKFLAG_RETAIN_MEM);
+ /* Register block resets */
+ msm_reset_controller_register(pdev, gcc_cobalt_resets,
+ ARRAY_SIZE(gcc_cobalt_resets), virt_base);
+
dev_info(&pdev->dev, "Registered GCC clocks\n");
return 0;
}
@@ -2831,6 +2867,7 @@ static struct clk_lookup msm_clocks_measure_cobalt[] = {
CLK_LIST(gpu_gcc_debug_clk),
CLK_LIST(gfx_gcc_debug_clk),
CLK_LIST(debug_mmss_clk),
+ CLK_LIST(debug_cpu_clk),
CLK_LOOKUP_OF("measure", gcc_debug_mux, "debug"),
};
@@ -2867,6 +2904,9 @@ static int msm_clock_debug_cobalt_probe(struct platform_device *pdev)
debug_mmss_clk.dev = &pdev->dev;
debug_mmss_clk.clk_id = "debug_mmss_clk";
+ debug_cpu_clk.dev = &pdev->dev;
+ debug_cpu_clk.clk_id = "debug_cpu_clk";
+
ret = of_msm_clock_register(pdev->dev.of_node,
msm_clocks_measure_cobalt,
ARRAY_SIZE(msm_clocks_measure_cobalt));
diff --git a/drivers/clk/msm/clock-mmss-cobalt.c b/drivers/clk/msm/clock-mmss-cobalt.c
index bbb9af961235..220a8ba377ba 100644
--- a/drivers/clk/msm/clock-mmss-cobalt.c
+++ b/drivers/clk/msm/clock-mmss-cobalt.c
@@ -30,6 +30,7 @@
#include <dt-bindings/clock/msm-clocks-hwio-cobalt.h>
#include "vdd-level-cobalt.h"
+#include "reset.h"
static void __iomem *virt_base;
@@ -82,6 +83,7 @@ DEFINE_EXT_CLK(ext_dp_phy_pll_vco, NULL);
DEFINE_EXT_CLK(ext_dp_phy_pll_link, NULL);
static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
+static DEFINE_VDD_REGULATORS(vdd_mmsscc_mx, VDD_DIG_NUM, 1, vdd_corner, NULL);
static struct alpha_pll_masks pll_masks_p = {
.lock_mask = BIT(31),
@@ -101,7 +103,7 @@ static struct pll_vote_clk mmpll0_pll = {
.parent = &mmsscc_xo.c,
.dbg_name = "mmpll0",
.ops = &clk_ops_pll_vote,
- VDD_DIG_FMAX_MAP2(LOWER, 404000000, NOMINAL, 808000000),
+ VDD_MM_PLL_FMAX_MAP2(LOWER, 404000000, NOMINAL, 808000000),
CLK_INIT(mmpll0_pll.c),
},
};
@@ -118,7 +120,7 @@ static struct pll_vote_clk mmpll1_pll = {
.parent = &mmsscc_xo.c,
.dbg_name = "mmpll1_pll",
.ops = &clk_ops_pll_vote,
- VDD_DIG_FMAX_MAP2(LOWER, 406000000, NOMINAL, 812000000),
+ VDD_MM_PLL_FMAX_MAP2(LOWER, 406000000, NOMINAL, 812000000),
CLK_INIT(mmpll1_pll.c),
},
};
@@ -135,7 +137,7 @@ static struct alpha_pll_clk mmpll3_pll = {
.parent = &mmsscc_xo.c,
.dbg_name = "mmpll3_pll",
.ops = &clk_ops_fixed_fabia_alpha_pll,
- VDD_DIG_FMAX_MAP2(LOWER, 465000000, LOW, 930000000),
+ VDD_MM_PLL_FMAX_MAP2(LOWER, 465000000, LOW, 930000000),
CLK_INIT(mmpll3_pll.c),
},
};
@@ -152,7 +154,7 @@ static struct alpha_pll_clk mmpll4_pll = {
.parent = &mmsscc_xo.c,
.dbg_name = "mmpll4_pll",
.ops = &clk_ops_fixed_fabia_alpha_pll,
- VDD_DIG_FMAX_MAP2(LOWER, 384000000, LOW, 768000000),
+ VDD_MM_PLL_FMAX_MAP2(LOWER, 384000000, LOW, 768000000),
CLK_INIT(mmpll4_pll.c),
},
};
@@ -169,7 +171,7 @@ static struct alpha_pll_clk mmpll5_pll = {
.parent = &mmsscc_xo.c,
.dbg_name = "mmpll5_pll",
.ops = &clk_ops_fixed_fabia_alpha_pll,
- VDD_DIG_FMAX_MAP2(LOWER, 412500000, LOW, 825000000),
+ VDD_MM_PLL_FMAX_MAP2(LOWER, 412500000, LOW, 825000000),
CLK_INIT(mmpll5_pll.c),
},
};
@@ -186,7 +188,7 @@ static struct alpha_pll_clk mmpll6_pll = {
.parent = &mmsscc_xo.c,
.dbg_name = "mmpll6_pll",
.ops = &clk_ops_fixed_fabia_alpha_pll,
- VDD_DIG_FMAX_MAP2(LOWER, 360000000, NOMINAL, 720000000),
+ VDD_MM_PLL_FMAX_MAP2(LOWER, 360000000, NOMINAL, 720000000),
CLK_INIT(mmpll6_pll.c),
},
};
@@ -203,7 +205,7 @@ static struct alpha_pll_clk mmpll7_pll = {
.parent = &mmsscc_xo.c,
.dbg_name = "mmpll7_pll",
.ops = &clk_ops_fixed_fabia_alpha_pll,
- VDD_DIG_FMAX_MAP2(LOWER, 480000000, NOMINAL, 960000000),
+ VDD_MM_PLL_FMAX_MAP1(LOW, 960000000),
CLK_INIT(mmpll7_pll.c),
},
};
@@ -220,7 +222,7 @@ static struct alpha_pll_clk mmpll10_pll = {
.parent = &mmsscc_xo.c,
.dbg_name = "mmpll10_pll",
.ops = &clk_ops_fixed_fabia_alpha_pll,
- VDD_DIG_FMAX_MAP2(LOWER, 288000000, NOMINAL, 576000000),
+ VDD_MM_PLL_FMAX_MAP2(LOWER, 288000000, NOMINAL, 576000000),
CLK_INIT(mmpll10_pll.c),
},
};
@@ -2625,7 +2627,6 @@ static struct clk_lookup msm_clocks_mmss_cobalt[] = {
CLK_LIST(mmss_misc_ahb_clk),
CLK_LIST(mmss_misc_cxo_clk),
CLK_LIST(mmss_mnoc_ahb_clk),
- CLK_LIST(mmss_mnoc_maxi_clk),
CLK_LIST(mmss_video_subcore0_clk),
CLK_LIST(mmss_video_subcore1_clk),
CLK_LIST(mmss_video_ahb_clk),
@@ -2634,9 +2635,14 @@ static struct clk_lookup msm_clocks_mmss_cobalt[] = {
CLK_LIST(mmss_video_maxi_clk),
CLK_LIST(mmss_vmem_ahb_clk),
CLK_LIST(mmss_vmem_maxi_clk),
+ CLK_LIST(mmss_mnoc_maxi_clk),
CLK_LIST(mmss_debug_mux),
};
+static const struct msm_reset_map mmss_cobalt_resets[] = {
+ [CAMSS_MICRO_BCR] = { 0x3490 },
+};
+
static void msm_mmsscc_hamster_fixup(void)
{
mmpll3_pll.c.rate = 1066000000;
@@ -2740,10 +2746,6 @@ static void msm_mmsscc_v2_fixup(void)
csiphy_clk_src.c.fmax[VDD_DIG_LOW] = 256000000;
dp_pixel_clk_src.c.fmax[VDD_DIG_LOWER] = 148380000;
-
- video_subcore0_clk_src.c.fmax[VDD_DIG_LOW_L1] = 355200000;
- video_subcore1_clk_src.c.fmax[VDD_DIG_LOW_L1] = 355200000;
- video_core_clk_src.c.fmax[VDD_DIG_LOW_L1] = 355200000;
}
int msm_mmsscc_cobalt_probe(struct platform_device *pdev)
@@ -2778,6 +2780,14 @@ int msm_mmsscc_cobalt_probe(struct platform_device *pdev)
return PTR_ERR(reg);
}
+ reg = vdd_mmsscc_mx.regulator[0] = devm_regulator_get(&pdev->dev,
+ "vdd_mmsscc_mx");
+ if (IS_ERR(reg)) {
+ if (PTR_ERR(reg) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get vdd_mmsscc_mx regulator!");
+ return PTR_ERR(reg);
+ }
+
tmp = mmsscc_xo.c.parent = devm_clk_get(&pdev->dev, "xo");
if (IS_ERR(tmp)) {
if (PTR_ERR(tmp) != -EPROBE_DEFER)
@@ -2838,6 +2848,10 @@ int msm_mmsscc_cobalt_probe(struct platform_device *pdev)
if (rc)
return rc;
+ /* Register block resets */
+ msm_reset_controller_register(pdev, mmss_cobalt_resets,
+ ARRAY_SIZE(mmss_cobalt_resets), virt_base);
+
dev_info(&pdev->dev, "Registered MMSS clocks.\n");
return 0;
}
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index 9d605503520f..8ae6a4e994f0 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -48,6 +48,7 @@
enum clk_osm_bases {
OSM_BASE,
PLL_BASE,
+ EFUSE_BASE,
NUM_BASES,
};
@@ -76,6 +77,7 @@ enum clk_osm_trace_packet_id {
#define MEM_ACC_SEQ_CONST(n) (n)
#define MEM_ACC_INSTR_COMP(n) (0x67 + ((n) * 0x40))
#define MEM_ACC_SEQ_REG_VAL_START(n) (SEQ_REG(60 + (n)))
+#define SEQ_REG1_MSMCOBALT_V2 0x1048
#define OSM_TABLE_SIZE 40
#define MAX_CLUSTER_CNT 2
@@ -115,7 +117,7 @@ enum clk_osm_trace_packet_id {
#define PLL_TEST_CTL_HI 0x1C
#define PLL_STATUS 0x2C
#define PLL_LOCK_DET_MASK BIT(16)
-#define PLL_WAIT_LOCK_TIME_US 5
+#define PLL_WAIT_LOCK_TIME_US 10
#define PLL_WAIT_LOCK_TIME_NS (PLL_WAIT_LOCK_TIME_US * 1000)
#define PLL_MIN_LVAL 43
@@ -164,7 +166,8 @@ enum clk_osm_trace_packet_id {
#define DCVS_DROOP_EN_MASK BIT(5)
#define LMH_PS_EN_MASK BIT(6)
#define IGNORE_PLL_LOCK_MASK BIT(15)
-#define SAFE_FREQ_WAIT_NS 1000
+#define SAFE_FREQ_WAIT_NS 5000
+#define DEXT_DECREMENT_WAIT_NS 1000
#define DCVS_BOOST_TIMER_REG0 0x1084
#define DCVS_BOOST_TIMER_REG1 0x1088
#define DCVS_BOOST_TIMER_REG2 0x108C
@@ -173,7 +176,8 @@ enum clk_osm_trace_packet_id {
#define PS_BOOST_TIMER_REG2 0x109C
#define BOOST_PROG_SYNC_DELAY_REG 0x10A0
#define DROOP_CTRL_REG 0x10A4
-#define DROOP_PROG_SYNC_DELAY_REG 0x10B8
+#define DROOP_RELEASE_TIMER_CTRL 0x10A8
+#define DROOP_PROG_SYNC_DELAY_REG 0x10BC
#define DROOP_UNSTALL_TIMER_CTRL_REG 0x10AC
#define DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG 0x10B0
#define DROOP_WAIT_TO_RELEASE_TIMER_CTRL1_REG 0x10B4
@@ -208,7 +212,13 @@ enum clk_osm_trace_packet_id {
#define PLL_DD_D0_USER_CTL_LO 0x17916208
#define PLL_DD_D1_USER_CTL_LO 0x17816208
+#define PWRCL_EFUSE_SHIFT 0
+#define PWRCL_EFUSE_MASK 0
+#define PERFCL_EFUSE_SHIFT 29
+#define PERFCL_EFUSE_MASK 0x7
+
static void __iomem *virt_base;
+static void __iomem *debug_base;
#define lmh_lite_clk_src_source_val 1
@@ -335,6 +345,9 @@ struct clk_osm {
bool trace_en;
};
+static bool msmcobalt_v1;
+static bool msmcobalt_v2;
+
static inline void clk_osm_masked_write_reg(struct clk_osm *c, u32 val,
u32 offset, u32 mask)
{
@@ -525,14 +538,45 @@ static struct clk_osm perfcl_clk = {
},
};
+static struct clk_ops clk_ops_cpu_dbg_mux;
+
+static struct mux_clk cpu_debug_mux = {
+ .offset = 0x0,
+ .mask = 0x3,
+ .shift = 8,
+ .ops = &mux_reg_ops,
+ MUX_SRC_LIST(
+ { &pwrcl_clk.c, 0x00 },
+ { &perfcl_clk.c, 0x01 },
+ ),
+ .base = &debug_base,
+ .c = {
+ .dbg_name = "cpu_debug_mux",
+ .ops = &clk_ops_cpu_dbg_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(cpu_debug_mux.c),
+ },
+};
+
static struct clk_lookup cpu_clocks_osm[] = {
CLK_LIST(pwrcl_clk),
CLK_LIST(perfcl_clk),
CLK_LIST(sys_apcsaux_clk_gcc),
CLK_LIST(xo_ao),
CLK_LIST(osm_clk_src),
+ CLK_LIST(cpu_debug_mux),
};
+static unsigned long cpu_dbg_mux_get_rate(struct clk *clk)
+{
+ /* Account for the divider between the clock and the debug mux */
+ if (!strcmp(clk->parent->dbg_name, "pwrcl_clk"))
+ return clk->rate/4;
+ else if (!strcmp(clk->parent->dbg_name, "perfcl_clk"))
+ return clk->rate/8;
+ return clk->rate;
+}
+
static void clk_osm_print_osm_table(struct clk_osm *c)
{
int i;
@@ -901,6 +945,22 @@ static int clk_osm_resources_init(struct platform_device *pdev)
}
}
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "debug");
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get debug mux base\n");
+ return -EINVAL;
+ }
+
+ debug_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!debug_base) {
+ dev_err(&pdev->dev, "Unable to map in debug mux base\n");
+ return -ENOMEM;
+ }
+
+ clk_ops_cpu_dbg_mux = clk_ops_gen_mux;
+ clk_ops_cpu_dbg_mux.get_rate = cpu_dbg_mux_get_rate;
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs_common");
if (!res) {
dev_err(&pdev->dev, "Failed to get apcs common base\n");
@@ -913,6 +973,35 @@ static int clk_osm_resources_init(struct platform_device *pdev)
return -ENOMEM;
}
+ /* efuse speed bin fuses are optional */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "pwrcl_efuse");
+ if (res) {
+ pbase = (unsigned long)res->start;
+ vbase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!vbase) {
+ dev_err(&pdev->dev, "Unable to map in pwrcl_efuse base\n");
+ return -ENOMEM;
+ }
+ pwrcl_clk.pbases[EFUSE_BASE] = pbase;
+ pwrcl_clk.vbases[EFUSE_BASE] = vbase;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "perfcl_efuse");
+ if (res) {
+ pbase = (unsigned long)res->start;
+ vbase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!vbase) {
+ dev_err(&pdev->dev, "Unable to map in perfcl_efuse base\n");
+ return -ENOMEM;
+ }
+ perfcl_clk.pbases[EFUSE_BASE] = pbase;
+ perfcl_clk.vbases[EFUSE_BASE] = vbase;
+ }
+
vdd_pwrcl = devm_regulator_get(&pdev->dev, "vdd-pwrcl");
if (IS_ERR(vdd_pwrcl)) {
rc = PTR_ERR(vdd_pwrcl);
@@ -1582,6 +1671,9 @@ static void clk_osm_setup_osm_was(struct clk_osm *c)
u32 cc_hyst;
u32 val;
+ if (msmcobalt_v2)
+ return;
+
val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
val |= IGNORE_PLL_LOCK_MASK;
cc_hyst = clk_osm_read_reg(c, SPM_CC_HYSTERESIS);
@@ -1673,19 +1765,19 @@ static void clk_osm_setup_fsms(struct clk_osm *c)
if (c->boost_fsm_en) {
val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
clk_osm_write_reg(c, val | CC_BOOST_EN_MASK, PDN_FSM_CTRL_REG);
+
val = clk_osm_read_reg(c, CC_BOOST_TIMER_REG0);
val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
- val |= BVAL(31, 16, clk_osm_count_ns(c,
- SAFE_FREQ_WAIT_NS));
+ val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
clk_osm_write_reg(c, val, CC_BOOST_TIMER_REG0);
val = clk_osm_read_reg(c, CC_BOOST_TIMER_REG1);
val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
- val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+ val |= BVAL(31, 16, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
clk_osm_write_reg(c, val, CC_BOOST_TIMER_REG1);
val = clk_osm_read_reg(c, CC_BOOST_TIMER_REG2);
- val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+ val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
clk_osm_write_reg(c, val, CC_BOOST_TIMER_REG2);
}
@@ -1696,12 +1788,19 @@ static void clk_osm_setup_fsms(struct clk_osm *c)
PDN_FSM_CTRL_REG);
val = clk_osm_read_reg(c, DCVS_BOOST_TIMER_REG0);
+ val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
clk_osm_write_reg(c, val, DCVS_BOOST_TIMER_REG0);
val = clk_osm_read_reg(c, DCVS_BOOST_TIMER_REG1);
val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+ val |= BVAL(31, 16, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
clk_osm_write_reg(c, val, DCVS_BOOST_TIMER_REG1);
+
+ val = clk_osm_read_reg(c, DCVS_BOOST_TIMER_REG2);
+ val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+ clk_osm_write_reg(c, val, DCVS_BOOST_TIMER_REG2);
+
}
/* PS FSM */
@@ -1709,13 +1808,19 @@ static void clk_osm_setup_fsms(struct clk_osm *c)
val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
clk_osm_write_reg(c, val | PS_BOOST_EN_MASK, PDN_FSM_CTRL_REG);
- val = clk_osm_read_reg(c, PS_BOOST_TIMER_REG0) |
- BVAL(31, 16, clk_osm_count_ns(c, 1000));
+ val = clk_osm_read_reg(c, PS_BOOST_TIMER_REG0);
+ val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+ val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
clk_osm_write_reg(c, val, PS_BOOST_TIMER_REG0);
- val = clk_osm_read_reg(c, PS_BOOST_TIMER_REG1) |
- clk_osm_count_ns(c, 1000);
+ val = clk_osm_read_reg(c, PS_BOOST_TIMER_REG1);
+ val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+ val |= BVAL(31, 16, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
clk_osm_write_reg(c, val, PS_BOOST_TIMER_REG1);
+
+ val = clk_osm_read_reg(c, PS_BOOST_TIMER_REG2);
+ val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+ clk_osm_write_reg(c, val, PS_BOOST_TIMER_REG2);
}
/* PLL signal timing control */
@@ -1728,15 +1833,15 @@ static void clk_osm_setup_fsms(struct clk_osm *c)
val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
clk_osm_write_reg(c, val | WFX_DROOP_EN_MASK, PDN_FSM_CTRL_REG);
- val = clk_osm_read_reg(c, DROOP_UNSTALL_TIMER_CTRL_REG) |
- BVAL(31, 16, clk_osm_count_ns(c, 1000));
+ val = clk_osm_read_reg(c, DROOP_UNSTALL_TIMER_CTRL_REG);
+ val |= BVAL(31, 16, clk_osm_count_ns(c, 500));
clk_osm_write_reg(c, val, DROOP_UNSTALL_TIMER_CTRL_REG);
val = clk_osm_read_reg(c,
- DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG) |
- BVAL(31, 16, clk_osm_count_ns(c, 1000));
+ DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
+ val |= BVAL(31, 16, clk_osm_count_ns(c, 500));
clk_osm_write_reg(c, val,
- DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
+ DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
}
/* PC/RET FSM */
@@ -1745,9 +1850,15 @@ static void clk_osm_setup_fsms(struct clk_osm *c)
clk_osm_write_reg(c, val | PC_RET_EXIT_DROOP_EN_MASK,
PDN_FSM_CTRL_REG);
- val = clk_osm_read_reg(c, DROOP_UNSTALL_TIMER_CTRL_REG) |
- BVAL(15, 0, clk_osm_count_ns(c, 5000));
+ val = clk_osm_read_reg(c, DROOP_UNSTALL_TIMER_CTRL_REG);
+ val |= BVAL(15, 0, clk_osm_count_ns(c, 500));
clk_osm_write_reg(c, val, DROOP_UNSTALL_TIMER_CTRL_REG);
+
+ val = clk_osm_read_reg(c,
+ DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
+ val |= BVAL(15, 0, clk_osm_count_ns(c, 500));
+ clk_osm_write_reg(c, val,
+ DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
}
/* DCVS droop FSM - only if RCGwRC is not used for di/dt control */
@@ -1758,14 +1869,14 @@ static void clk_osm_setup_fsms(struct clk_osm *c)
}
if (c->wfx_fsm_en || c->ps_fsm_en || c->droop_fsm_en) {
- val = clk_osm_read_reg(c,
- DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG) |
- BVAL(15, 0, clk_osm_count_ns(c, 1000));
- clk_osm_write_reg(c, val,
- DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
clk_osm_write_reg(c, 0x1, DROOP_PROG_SYNC_DELAY_REG);
- val = clk_osm_read_reg(c, DROOP_CTRL_REG) |
- BVAL(22, 16, 0x2);
+ clk_osm_write_reg(c, clk_osm_count_ns(c, 250),
+ DROOP_RELEASE_TIMER_CTRL);
+ clk_osm_write_reg(c, clk_osm_count_ns(c, 500),
+ DCVS_DROOP_TIMER_CTRL);
+ val = clk_osm_read_reg(c, DROOP_CTRL_REG);
+ val |= BIT(31) | BVAL(22, 16, 0x2) |
+ BVAL(6, 0, 0x8);
clk_osm_write_reg(c, val, DROOP_CTRL_REG);
}
}
@@ -1803,6 +1914,9 @@ static void clk_osm_do_additional_setup(struct clk_osm *c,
clk_osm_write_reg(c, RCG_UPDATE_SUCCESS, SEQ_REG(84));
clk_osm_write_reg(c, RCG_UPDATE, SEQ_REG(85));
+ /* ITM to OSM handoff */
+ clk_osm_setup_itm_to_osm_handoff();
+
pr_debug("seq_size: %lu, seqbr_size: %lu\n", ARRAY_SIZE(seq_instr),
ARRAY_SIZE(seq_br_instr));
clk_osm_setup_sequencer(&pwrcl_clk);
@@ -1835,18 +1949,22 @@ static void clk_osm_apm_vc_setup(struct clk_osm *c)
/* Ensure writes complete before returning */
mb();
} else {
- scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(1),
- c->apm_threshold_vc);
+ if (msmcobalt_v1) {
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(1),
+ c->apm_threshold_vc);
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(73),
+ 0x3b | c->apm_threshold_vc << 6);
+ } else if (msmcobalt_v2) {
+ clk_osm_write_reg(c, c->apm_threshold_vc,
+ SEQ_REG1_MSMCOBALT_V2);
+ }
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(72),
c->apm_crossover_vc);
- /* SEQ_REG(8) = address of SEQ_REG(1) init by TZ */
clk_osm_write_reg(c, c->apm_threshold_vc,
SEQ_REG(15));
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(31),
c->apm_threshold_vc != 0 ?
c->apm_threshold_vc - 1 : 0xff);
- scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(73),
- 0x3b | c->apm_threshold_vc << 6);
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(76),
0x39 | c->apm_threshold_vc << 6);
}
@@ -2441,13 +2559,23 @@ static unsigned long osm_clk_init_rate = 200000000;
static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
{
- char perfclspeedbinstr[] = "qcom,perfcl-speedbin0-v0";
- char pwrclspeedbinstr[] = "qcom,pwrcl-speedbin0-v0";
int rc, cpu;
+ int speedbin = 0, pvs_ver = 0;
+ u32 pte_efuse;
+ char pwrclspeedbinstr[] = "qcom,pwrcl-speedbin0-v0";
+ char perfclspeedbinstr[] = "qcom,perfcl-speedbin0-v0";
struct cpu_cycle_counter_cb cb = {
.get_cpu_cycle_counter = clk_osm_get_cpu_cycle_counter,
};
+ if (of_find_compatible_node(NULL, NULL,
+ "qcom,cpu-clock-osm-msmcobalt-v1")) {
+ msmcobalt_v1 = true;
+ } else if (of_find_compatible_node(NULL, NULL,
+ "qcom,cpu-clock-osm-msmcobalt-v2")) {
+ msmcobalt_v2 = true;
+ }
+
rc = clk_osm_resources_init(pdev);
if (rc) {
if (rc != -EPROBE_DEFER)
@@ -2462,6 +2590,24 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
return rc;
}
+ if ((pwrcl_clk.secure_init || perfcl_clk.secure_init) &&
+ msmcobalt_v2) {
+ pr_err("unsupported configuration for msmcobalt v2\n");
+ return -EINVAL;
+ }
+
+ if (pwrcl_clk.vbases[EFUSE_BASE]) {
+ /* Multiple speed-bins are supported */
+ pte_efuse = readl_relaxed(pwrcl_clk.vbases[EFUSE_BASE]);
+ speedbin = ((pte_efuse >> PWRCL_EFUSE_SHIFT) &
+ PWRCL_EFUSE_MASK);
+ snprintf(pwrclspeedbinstr, ARRAY_SIZE(pwrclspeedbinstr),
+ "qcom,pwrcl-speedbin%d-v%d", speedbin, pvs_ver);
+ }
+
+ dev_info(&pdev->dev, "using pwrcl speed bin %u and pvs_ver %d\n",
+ speedbin, pvs_ver);
+
rc = clk_osm_get_lut(pdev, &pwrcl_clk,
pwrclspeedbinstr);
if (rc) {
@@ -2470,6 +2616,18 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
return rc;
}
+ if (perfcl_clk.vbases[EFUSE_BASE]) {
+ /* Multiple speed-bins are supported */
+ pte_efuse = readl_relaxed(perfcl_clk.vbases[EFUSE_BASE]);
+ speedbin = ((pte_efuse >> PERFCL_EFUSE_SHIFT) &
+ PERFCL_EFUSE_MASK);
+ snprintf(perfclspeedbinstr, ARRAY_SIZE(perfclspeedbinstr),
+ "qcom,perfcl-speedbin%d-v%d", speedbin, pvs_ver);
+ }
+
+ dev_info(&pdev->dev, "using perfcl speed bin %u and pvs_ver %d\n",
+ speedbin, pvs_ver);
+
rc = clk_osm_get_lut(pdev, &perfcl_clk, perfclspeedbinstr);
if (rc) {
dev_err(&pdev->dev, "Unable to get OSM LUT for perf cluster, rc=%d\n",
@@ -2509,10 +2667,6 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
clk_osm_print_osm_table(&pwrcl_clk);
clk_osm_print_osm_table(&perfcl_clk);
- /* Program the minimum PLL frequency */
- clk_osm_write_reg(&pwrcl_clk, PLL_MIN_LVAL, SEQ_REG(27));
- clk_osm_write_reg(&perfcl_clk, PLL_MIN_LVAL, SEQ_REG(27));
-
rc = clk_osm_setup_hw_table(&pwrcl_clk);
if (rc) {
dev_err(&pdev->dev, "failed to setup power cluster hardware table\n");
@@ -2531,8 +2685,6 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
goto exit;
}
- clk_osm_setup_itm_to_osm_handoff();
-
/* LLM Freq Policy Tuning */
rc = clk_osm_set_llm_freq_policy(pdev);
if (rc < 0) {
@@ -2666,7 +2818,8 @@ exit:
}
static struct of_device_id match_table[] = {
- { .compatible = "qcom,cpu-clock-osm" },
+ { .compatible = "qcom,cpu-clock-osm-msmcobalt-v1" },
+ { .compatible = "qcom,cpu-clock-osm-msmcobalt-v2" },
{}
};
diff --git a/drivers/clk/msm/vdd-level-cobalt.h b/drivers/clk/msm/vdd-level-cobalt.h
index f847a4104d4d..c1897b7da7f7 100644
--- a/drivers/clk/msm/vdd-level-cobalt.h
+++ b/drivers/clk/msm/vdd-level-cobalt.h
@@ -24,6 +24,7 @@
[VDD_DIG_##l1] = (f1), \
}, \
.num_fmax = VDD_DIG_NUM
+
#define VDD_DIG_FMAX_MAP2(l1, f1, l2, f2) \
.vdd_class = &vdd_dig, \
.fmax = (unsigned long[VDD_DIG_NUM]) { \
@@ -40,6 +41,7 @@
[VDD_DIG_##l3] = (f3), \
}, \
.num_fmax = VDD_DIG_NUM
+
#define VDD_DIG_FMAX_MAP4(l1, f1, l2, f2, l3, f3, l4, f4) \
.vdd_class = &vdd_dig, \
.fmax = (unsigned long[VDD_DIG_NUM]) { \
@@ -66,6 +68,22 @@
}, \
.num_fmax = VDD_DIG_NUM
+#define VDD_MM_PLL_FMAX_MAP1(l1, f1) \
+ .vdd_class = &vdd_mmsscc_mx, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_MM_PLL_FMAX_MAP2(l1, f1, l2, f2) \
+ .vdd_class = &vdd_mmsscc_mx, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+
#define VDD_GPU_PLL_FMAX_MAP1(l1, f1) \
.vdd_class = &vdd_gpucc_mx, \
.fmax = (unsigned long[VDD_DIG_NUM]) { \
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index a63680290219..a7b8ac07e73a 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -21,6 +21,7 @@
#include <linux/clk-provider.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
+#include <linux/clk.h>
#include <dt-bindings/clock/qcom,gcc-msm8996.h>
@@ -30,7 +31,6 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
-#include "gdsc.h"
#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
@@ -48,14 +48,6 @@ enum {
P_GPLL1_EARLY_DIV
};
-static const struct parent_map gcc_sleep_clk_map[] = {
- { P_SLEEP_CLK, 5 }
-};
-
-static const char * const gcc_sleep_clk[] = {
- "sleep_clk"
-};
-
static const struct parent_map gcc_xo_gpll0_map[] = {
{ P_XO, 0 },
{ P_GPLL0, 1 }
@@ -284,71 +276,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
},
};
-static const struct freq_tbl ftbl_system_noc_clk_src[] = {
- F(19200000, P_XO, 1, 0, 0),
- F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0),
- F(100000000, P_GPLL0, 6, 0, 0),
- F(150000000, P_GPLL0, 4, 0, 0),
- F(200000000, P_GPLL0, 3, 0, 0),
- F(240000000, P_GPLL0, 2.5, 0, 0),
- { }
-};
-
-static struct clk_rcg2 system_noc_clk_src = {
- .cmd_rcgr = 0x0401c,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll2_early_gpll0_early_div_map,
- .freq_tbl = ftbl_system_noc_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "system_noc_clk_src",
- .parent_names = gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll2_early_gpll0_early_div,
- .num_parents = 7,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static const struct freq_tbl ftbl_config_noc_clk_src[] = {
- F(19200000, P_XO, 1, 0, 0),
- F(37500000, P_GPLL0, 16, 0, 0),
- F(75000000, P_GPLL0, 8, 0, 0),
- { }
-};
-
-static struct clk_rcg2 config_noc_clk_src = {
- .cmd_rcgr = 0x0500c,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_config_noc_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "config_noc_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
- .ops = &clk_rcg2_ops,
- },
-};
-
-static const struct freq_tbl ftbl_periph_noc_clk_src[] = {
- F(19200000, P_XO, 1, 0, 0),
- F(37500000, P_GPLL0, 16, 0, 0),
- F(50000000, P_GPLL0, 12, 0, 0),
- F(75000000, P_GPLL0, 8, 0, 0),
- F(100000000, P_GPLL0, 6, 0, 0),
- { }
-};
-
-static struct clk_rcg2 periph_noc_clk_src = {
- .cmd_rcgr = 0x06014,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_periph_noc_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "periph_noc_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
- .ops = &clk_rcg2_ops,
- },
-};
-
static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(120000000, P_GPLL0, 5, 0, 0),
@@ -464,10 +391,18 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
},
};
+static struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
static struct clk_rcg2 sdcc1_ice_core_clk_src = {
.cmd_rcgr = 0x13024,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_gpll0_early_div_map,
+ .freq_tbl = ftbl_sdcc1_ice_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_ice_core_clk_src",
.parent_names = gcc_xo_gpll0_gpll4_gpll0_early_div,
@@ -1104,18 +1039,6 @@ static struct clk_rcg2 tsif_ref_clk_src = {
},
};
-static struct clk_rcg2 gcc_sleep_clk_src = {
- .cmd_rcgr = 0x43014,
- .hid_width = 5,
- .parent_map = gcc_sleep_clk_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_sleep_clk_src",
- .parent_names = gcc_sleep_clk,
- .num_parents = 1,
- .ops = &clk_rcg2_ops,
- },
-};
-
static struct clk_rcg2 hmss_rbcpr_clk_src = {
.cmd_rcgr = 0x48040,
.hid_width = 5,
@@ -1230,10 +1153,18 @@ static struct clk_rcg2 ufs_axi_clk_src = {
},
};
+static const struct freq_tbl ftbl_ufs_ice_core_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
static struct clk_rcg2 ufs_ice_core_clk_src = {
.cmd_rcgr = 0x76014,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_ufs_ice_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_ice_core_clk_src",
.parent_names = gcc_xo_gpll0,
@@ -1242,10 +1173,19 @@ static struct clk_rcg2 ufs_ice_core_clk_src = {
},
};
+static const struct freq_tbl ftbl_qspi_ser_clk_src[] = {
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(256000000, P_GPLL4, 1.5, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
static struct clk_rcg2 qspi_ser_clk_src = {
.cmd_rcgr = 0x8b00c,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div_map,
+ .freq_tbl = ftbl_qspi_ser_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "qspi_ser_clk_src",
.parent_names = gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div,
@@ -1306,9 +1246,6 @@ static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mmss_noc_cfg_ahb_clk",
- .parent_names = (const char *[]){ "config_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
.ops = &clk_branch2_ops,
},
},
@@ -1316,12 +1253,12 @@ static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
static struct clk_branch gcc_mmss_bimc_gfx_clk = {
.halt_reg = 0x9010,
+ .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
.clkr = {
.enable_reg = 0x9010,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mmss_bimc_gfx_clk",
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1349,9 +1286,6 @@ static struct clk_branch gcc_usb30_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_sleep_clk",
- .parent_names = (const char *[]){ "gcc_sleep_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1387,17 +1321,26 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
},
};
-static struct clk_branch gcc_usb3_phy_pipe_clk = {
- .halt_reg = 0x50004,
+static struct clk_gate2 gcc_usb3_phy_pipe_clk = {
+ .udelay = 50,
.clkr = {
.enable_reg = 0x50004,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_phy_pipe_clk",
- .parent_names = (const char *[]){ "usb3_phy_pipe_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
+ .ops = &clk_gate2_ops,
+ },
+ },
+};
+
+static struct clk_gate2 gpll0_out_msscc = {
+ .udelay = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_msscc",
+ .ops = &clk_gate2_ops,
},
},
};
@@ -1424,9 +1367,6 @@ static struct clk_branch gcc_usb20_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb20_sleep_clk",
- .parent_names = (const char *[]){ "gcc_sleep_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1454,9 +1394,6 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_phy_cfg_ahb2phy_clk",
- .parent_names = (const char *[]){ "periph_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1484,9 +1421,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_ahb_clk",
- .parent_names = (const char *[]){ "periph_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1529,9 +1463,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_ahb_clk",
- .parent_names = (const char *[]){ "periph_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1559,9 +1490,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc3_ahb_clk",
- .parent_names = (const char *[]){ "periph_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1589,9 +1517,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_ahb_clk",
- .parent_names = (const char *[]){ "periph_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1605,9 +1530,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
.enable_mask = BIT(17),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_ahb_clk",
- .parent_names = (const char *[]){ "periph_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1621,9 +1543,6 @@ static struct clk_branch gcc_blsp1_sleep_clk = {
.enable_mask = BIT(16),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_sleep_clk",
- .parent_names = (const char *[]){ "gcc_sleep_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1907,9 +1826,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_ahb_clk",
- .parent_names = (const char *[]){ "periph_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1923,9 +1839,6 @@ static struct clk_branch gcc_blsp2_sleep_clk = {
.enable_mask = BIT(14),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_sleep_clk",
- .parent_names = (const char *[]){ "gcc_sleep_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2208,9 +2121,6 @@ static struct clk_branch gcc_pdm_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm_ahb_clk",
- .parent_names = (const char *[]){ "periph_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2239,9 +2149,6 @@ static struct clk_branch gcc_prng_ahb_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "gcc_prng_ahb_clk",
- .parent_names = (const char *[]){ "config_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2254,9 +2161,6 @@ static struct clk_branch gcc_tsif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_tsif_ahb_clk",
- .parent_names = (const char *[]){ "periph_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2277,21 +2181,6 @@ static struct clk_branch gcc_tsif_ref_clk = {
},
};
-static struct clk_branch gcc_tsif_inactivity_timers_clk = {
- .halt_reg = 0x3600c,
- .clkr = {
- .enable_reg = 0x3600c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_tsif_inactivity_timers_clk",
- .parent_names = (const char *[]){ "gcc_sleep_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_boot_rom_ahb_clk = {
.halt_reg = 0x38004,
.halt_check = BRANCH_HALT_VOTED,
@@ -2300,9 +2189,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_boot_rom_ahb_clk",
- .parent_names = (const char *[]){ "config_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2315,7 +2201,6 @@ static struct clk_branch gcc_bimc_gfx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_bimc_gfx_clk",
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2388,9 +2273,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_slv_axi_clk",
- .parent_names = (const char *[]){ "system_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2403,9 +2285,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_mstr_axi_clk",
- .parent_names = (const char *[]){ "system_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2418,9 +2297,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_cfg_ahb_clk",
- .parent_names = (const char *[]){ "config_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2441,17 +2317,14 @@ static struct clk_branch gcc_pcie_0_aux_clk = {
},
};
-static struct clk_branch gcc_pcie_0_pipe_clk = {
- .halt_reg = 0x6b018,
+static struct clk_gate2 gcc_pcie_0_pipe_clk = {
+ .udelay = 500,
.clkr = {
.enable_reg = 0x6b018,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk",
- .parent_names = (const char *[]){ "pcie_0_pipe_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
+ .ops = &clk_gate2_ops,
},
},
};
@@ -2463,9 +2336,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_slv_axi_clk",
- .parent_names = (const char *[]){ "system_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2478,9 +2348,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_mstr_axi_clk",
- .parent_names = (const char *[]){ "system_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2493,9 +2360,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_cfg_ahb_clk",
- .parent_names = (const char *[]){ "config_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2516,17 +2380,14 @@ static struct clk_branch gcc_pcie_1_aux_clk = {
},
};
-static struct clk_branch gcc_pcie_1_pipe_clk = {
- .halt_reg = 0x6d018,
+static struct clk_gate2 gcc_pcie_1_pipe_clk = {
+ .udelay = 500,
.clkr = {
.enable_reg = 0x6d018,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk",
- .parent_names = (const char *[]){ "pcie_1_pipe_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
+ .ops = &clk_gate2_ops,
},
},
};
@@ -2538,9 +2399,6 @@ static struct clk_branch gcc_pcie_2_slv_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_2_slv_axi_clk",
- .parent_names = (const char *[]){ "system_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2553,9 +2411,6 @@ static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_2_mstr_axi_clk",
- .parent_names = (const char *[]){ "system_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2568,9 +2423,6 @@ static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_2_cfg_ahb_clk",
- .parent_names = (const char *[]){ "config_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2591,17 +2443,14 @@ static struct clk_branch gcc_pcie_2_aux_clk = {
},
};
-static struct clk_branch gcc_pcie_2_pipe_clk = {
- .halt_reg = 0x6e108,
+static struct clk_gate2 gcc_pcie_2_pipe_clk = {
+ .udelay = 500,
.clkr = {
- .enable_reg = 0x6e108,
+ .enable_reg = 0x6e018,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_2_pipe_clk",
- .parent_names = (const char *[]){ "pcie_2_pipe_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
+ .ops = &clk_gate2_ops,
},
},
};
@@ -2613,9 +2462,6 @@ static struct clk_branch gcc_pcie_phy_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_phy_cfg_ahb_clk",
- .parent_names = (const char *[]){ "config_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2658,9 +2504,6 @@ static struct clk_branch gcc_ufs_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_ahb_clk",
- .parent_names = (const char *[]){ "config_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2720,47 +2563,39 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
},
};
-static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
- .halt_reg = 0x75018,
+static struct clk_gate2 gcc_ufs_tx_symbol_0_clk = {
+ .udelay = 500,
.clkr = {
.enable_reg = 0x75018,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_0_clk",
- .parent_names = (const char *[]){ "ufs_tx_symbol_0_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
+ .ops = &clk_gate2_ops,
},
},
};
-static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
- .halt_reg = 0x7501c,
+static struct clk_gate2 gcc_ufs_rx_symbol_0_clk = {
+ .udelay = 500,
.clkr = {
.enable_reg = 0x7501c,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_symbol_0_clk",
- .parent_names = (const char *[]){ "ufs_rx_symbol_0_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
+ .ops = &clk_gate2_ops,
},
},
};
-static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
- .halt_reg = 0x75020,
+static struct clk_gate2 gcc_ufs_rx_symbol_1_clk = {
+ .udelay = 500,
.clkr = {
.enable_reg = 0x75020,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_symbol_1_clk",
- .parent_names = (const char *[]){ "ufs_rx_symbol_1_clk_src" },
- .num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
+ .ops = &clk_gate2_ops,
},
},
};
@@ -2807,26 +2642,26 @@ static struct clk_branch gcc_ufs_ice_core_clk = {
},
};
-static struct clk_branch gcc_ufs_sys_clk_core_clk = {
- .halt_check = BRANCH_HALT_DELAY,
+static struct clk_gate2 gcc_ufs_sys_clk_core_clk = {
+ .udelay = 500,
.clkr = {
.enable_reg = 0x76030,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_sys_clk_core_clk",
- .ops = &clk_branch2_ops,
+ .ops = &clk_gate2_ops,
},
},
};
-static struct clk_branch gcc_ufs_tx_symbol_clk_core_clk = {
- .halt_check = BRANCH_HALT_DELAY,
+static struct clk_gate2 gcc_ufs_tx_symbol_clk_core_clk = {
+ .udelay = 500,
.clkr = {
.enable_reg = 0x76034,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_clk_core_clk",
- .ops = &clk_branch2_ops,
+ .ops = &clk_gate2_ops,
},
},
};
@@ -2838,9 +2673,6 @@ static struct clk_branch gcc_aggre0_snoc_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_aggre0_snoc_axi_clk",
- .parent_names = (const char *[]){ "system_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2853,9 +2685,6 @@ static struct clk_branch gcc_aggre0_cnoc_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_aggre0_cnoc_ahb_clk",
- .parent_names = (const char *[]){ "config_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2863,29 +2692,37 @@ static struct clk_branch gcc_aggre0_cnoc_ahb_clk = {
static struct clk_branch gcc_smmu_aggre0_axi_clk = {
.halt_reg = 0x81014,
+ .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
.clkr = {
.enable_reg = 0x81014,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_smmu_aggre0_axi_clk",
- .parent_names = (const char *[]){ "system_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
+static struct clk_gate2 gcc_aggre0_noc_qosgen_extref_clk = {
+ .udelay = 500,
+ .clkr = {
+ .enable_reg = 0x8101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre0_noc_qosgen_extref_clk",
+ .ops = &clk_gate2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
.halt_reg = 0x81018,
+ .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
.clkr = {
.enable_reg = 0x81018,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_smmu_aggre0_ahb_clk",
- .parent_names = (const char *[]){ "config_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2898,9 +2735,6 @@ static struct clk_branch gcc_aggre1_pnoc_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_aggre1_pnoc_ahb_clk",
- .parent_names = (const char *[]){ "periph_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2943,9 +2777,6 @@ static struct clk_branch gcc_qspi_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_qspi_ahb_clk",
- .parent_names = (const char *[]){ "periph_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2973,8 +2804,6 @@ static struct clk_branch gcc_usb3_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2987,8 +2816,6 @@ static struct clk_branch gcc_hdmi_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_hdmi_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -3001,8 +2828,6 @@ static struct clk_branch gcc_ufs_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -3015,8 +2840,6 @@ static struct clk_branch gcc_pcie_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -3029,8 +2852,6 @@ static struct clk_branch gcc_rx2_usb2_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_rx2_usb2_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -3043,96 +2864,142 @@ static struct clk_branch gcc_rx1_usb2_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_rx1_usb2_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
};
-static struct clk_hw *gcc_msm8996_hws[] = {
- &xo.hw,
- &gpll0_early_div.hw,
- &ufs_tx_cfg_clk_src.hw,
- &ufs_rx_cfg_clk_src.hw,
- &ufs_ice_core_postdiv_clk_src.hw,
+static struct clk_branch hlos1_vote_lpass_core_smmu_clk = {
+ .halt_reg = 0x7d010,
+ .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "hlos1_vote_lpass_core_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
};
-static struct gdsc aggre0_noc_gdsc = {
- .gdscr = 0x81004,
- .gds_hw_ctrl = 0x81028,
- .pd = {
- .name = "aggre0_noc",
+static struct clk_branch hlos1_vote_lpass_adsp_smmu_clk = {
+ .halt_reg = 0x7d014,
+ .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .clkr = {
+ .enable_reg = 0x7d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "hlos1_vote_lpass_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
},
- .pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
};
-static struct gdsc hlos1_vote_aggre0_noc_gdsc = {
- .gdscr = 0x7d024,
- .pd = {
- .name = "hlos1_vote_aggre0_noc",
+static struct clk_branch gcc_edp_clkref_clk = {
+ .halt_reg = 0x88004,
+ .clkr = {
+ .enable_reg = 0x88004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_edp_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
},
- .pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
};
-static struct gdsc hlos1_vote_lpass_adsp_gdsc = {
- .gdscr = 0x7d034,
- .pd = {
- .name = "hlos1_vote_lpass_adsp",
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .clkr = {
+ .enable_reg = 0x8a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
},
- .pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
};
-static struct gdsc hlos1_vote_lpass_core_gdsc = {
- .gdscr = 0x7d038,
- .pd = {
- .name = "hlos1_vote_lpass_core",
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x8a028,
+ .clkr = {
+ .enable_reg = 0x8a028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
},
- .pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
};
-static struct gdsc usb30_gdsc = {
- .gdscr = 0xf004,
- .pd = {
- .name = "usb30",
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x8a024,
+ .clkr = {
+ .enable_reg = 0x8a024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
},
- .pwrsts = PWRSTS_OFF_ON,
};
-static struct gdsc pcie0_gdsc = {
- .gdscr = 0x6b004,
- .pd = {
- .name = "pcie0",
+static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
+ .halt_reg = 0x8a004,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mnoc_bimc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
},
- .pwrsts = PWRSTS_OFF_ON,
};
-static struct gdsc pcie1_gdsc = {
- .gdscr = 0x6d004,
- .pd = {
- .name = "pcie1",
+static struct clk_branch gcc_dcc_ahb_clk = {
+ .halt_reg = 0x84004,
+ .clkr = {
+ .enable_reg = 0x84004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_dcc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
},
- .pwrsts = PWRSTS_OFF_ON,
};
-static struct gdsc pcie2_gdsc = {
- .gdscr = 0x6e004,
- .pd = {
- .name = "pcie2",
+static struct clk_branch gcc_aggre0_noc_mpu_cfg_ahb_clk = {
+ .halt_reg = 0x85000,
+ .clkr = {
+ .enable_reg = 0x85000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre0_noc_mpu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
},
- .pwrsts = PWRSTS_OFF_ON,
};
-static struct gdsc ufs_gdsc = {
- .gdscr = 0x75004,
- .pd = {
- .name = "ufs",
+static struct clk_branch gcc_mmss_gpll0_div_clk = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_gpll0_div_clk",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
},
- .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_hw *gcc_msm8996_hws[] = {
+ &xo.hw,
+ &gpll0_early_div.hw,
+ &ufs_tx_cfg_clk_src.hw,
+ &ufs_rx_cfg_clk_src.hw,
+ &ufs_ice_core_postdiv_clk_src.hw,
};
static struct clk_regmap *gcc_msm8996_clocks[] = {
@@ -3140,9 +3007,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL4_EARLY] = &gpll4_early.clkr,
[GPLL4] = &gpll4.clkr,
- [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
- [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
- [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
[USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
[USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
@@ -3191,7 +3055,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
[BLSP2_UART6_APPS_CLK_SRC] = &blsp2_uart6_apps_clk_src.clkr,
[PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
[TSIF_REF_CLK_SRC] = &tsif_ref_clk_src.clkr,
- [GCC_SLEEP_CLK_SRC] = &gcc_sleep_clk_src.clkr,
[HMSS_RBCPR_CLK_SRC] = &hmss_rbcpr_clk_src.clkr,
[HMSS_GPLL0_CLK_SRC] = &hmss_gpll0_clk_src.clkr,
[GP1_CLK_SRC] = &gp1_clk_src.clkr,
@@ -3269,7 +3132,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
[GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
[GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
[GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
- [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr,
[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
[GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
[GCC_HMSS_RBCPR_CLK] = &gcc_hmss_rbcpr_clk.clkr,
@@ -3319,18 +3181,21 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
[GCC_PCIE_CLKREF_CLK] = &gcc_pcie_clkref_clk.clkr,
[GCC_RX2_USB2_CLKREF_CLK] = &gcc_rx2_usb2_clkref_clk.clkr,
[GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
-};
-
-static struct gdsc *gcc_msm8996_gdscs[] = {
- [AGGRE0_NOC_GDSC] = &aggre0_noc_gdsc,
- [HLOS1_VOTE_AGGRE0_NOC_GDSC] = &hlos1_vote_aggre0_noc_gdsc,
- [HLOS1_VOTE_LPASS_ADSP_GDSC] = &hlos1_vote_lpass_adsp_gdsc,
- [HLOS1_VOTE_LPASS_CORE_GDSC] = &hlos1_vote_lpass_core_gdsc,
- [USB30_GDSC] = &usb30_gdsc,
- [PCIE0_GDSC] = &pcie0_gdsc,
- [PCIE1_GDSC] = &pcie1_gdsc,
- [PCIE2_GDSC] = &pcie2_gdsc,
- [UFS_GDSC] = &ufs_gdsc,
+ [GCC_AGGRE0_NOC_QOSGEN_EXTREF_CLK] =
+ &gcc_aggre0_noc_qosgen_extref_clk.clkr,
+ [GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CLK] =
+ &hlos1_vote_lpass_core_smmu_clk.clkr,
+ [GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK] =
+ &hlos1_vote_lpass_adsp_smmu_clk.clkr,
+ [GCC_EDP_CLKREF_CLK] = &gcc_edp_clkref_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_MSS_MNOC_BIMC_AXI_CLK] = &gcc_mss_mnoc_bimc_axi_clk.clkr,
+ [GCC_DCC_AHB_ALK] = &gcc_dcc_ahb_clk.clkr,
+ [GCC_AGGRE0_NOC_MPU_CFG_AHB_CLK] = &gcc_aggre0_noc_mpu_cfg_ahb_clk.clkr,
+ [GCC_MMSS_GPLL0_DIV_CLK] = &gcc_mmss_gpll0_div_clk.clkr,
+ [GPLL0_OUT_MSSCC] = &gpll0_out_msscc.clkr,
};
static const struct qcom_reset_map gcc_msm8996_resets[] = {
@@ -3455,8 +3320,6 @@ static const struct qcom_cc_desc gcc_msm8996_desc = {
.num_clks = ARRAY_SIZE(gcc_msm8996_clocks),
.resets = gcc_msm8996_resets,
.num_resets = ARRAY_SIZE(gcc_msm8996_resets),
- .gdscs = gcc_msm8996_gdscs,
- .num_gdscs = ARRAY_SIZE(gcc_msm8996_gdscs),
};
static const struct of_device_id gcc_msm8996_match_table[] = {
@@ -3469,18 +3332,15 @@ static int gcc_msm8996_probe(struct platform_device *pdev)
{
struct clk *clk;
struct device *dev = &pdev->dev;
- int i;
+ int i, ret = 0;
struct regmap *regmap;
regmap = qcom_cc_map(pdev, &gcc_msm8996_desc);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- /*
- * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be
- * turned off by hardware during certain apps low power modes.
- */
- regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
+ /* Set the HMSS_AHB_CLK_ENA bit to enable the hmss_ahb_clk */
+ regmap_update_bits(regmap, 0x52004, BIT(21), BIT(21));
for (i = 0; i < ARRAY_SIZE(gcc_msm8996_hws); i++) {
clk = devm_clk_register(dev, gcc_msm8996_hws[i]);
@@ -3488,7 +3348,19 @@ static int gcc_msm8996_probe(struct platform_device *pdev)
return PTR_ERR(clk);
}
- return qcom_cc_really_probe(pdev, &gcc_msm8996_desc, regmap);
+ ret = qcom_cc_really_probe(pdev, &gcc_msm8996_desc, regmap);
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register GCC clocks\n");
+ return ret;
+ }
+
+ /* This clock is used for all MMSS register access */
+ clk_prepare_enable(gcc_mmss_noc_cfg_ahb_clk.clkr.hw.clk);
+
+ dev_info(&pdev->dev, "Registered GCC clocks\n");
+
+ return ret;
}
static struct platform_driver gcc_msm8996_driver = {
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 61f99370863d..9b42e5ae129a 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -84,6 +84,8 @@ static LIST_HEAD(qce50_bam_list);
/* Index to point the dummy request */
#define DUMMY_REQ_INDEX MAX_QCE_BAM_REQ
+#define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec))
+
enum qce_owner {
QCE_OWNER_NONE = 0,
QCE_OWNER_CLIENT = 1,
@@ -110,6 +112,8 @@ struct qce_device {
unsigned char *coh_vmem; /* Allocated coherent virtual memory */
dma_addr_t coh_pmem; /* Allocated coherent physical memory */
int memsize; /* Memory allocated */
+ unsigned char *iovec_vmem; /* Allocate iovec virtual memory */
+ int iovec_memsize; /* Memory allocated */
uint32_t bam_mem; /* bam physical address, from DT */
uint32_t bam_mem_size; /* bam io size, from DT */
int is_shared; /* CE HW is shared */
@@ -4299,24 +4303,30 @@ static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
{
unsigned char *vaddr;
int i;
+ unsigned char *iovec_vaddr;
+ int iovec_memsize;
vaddr = pce_dev->coh_vmem;
vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
pce_dev->ce_bam_info.ce_burst_size);
+ iovec_vaddr = pce_dev->iovec_vmem;
+ iovec_memsize = pce_dev->iovec_memsize;
for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) {
/* Allow for 256 descriptor (cmd and data) entries per pipe */
pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec =
- (struct sps_iovec *)vaddr;
+ (struct sps_iovec *)iovec_vaddr;
pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec_phys =
- (uintptr_t)GET_PHYS_ADDR(vaddr);
- vaddr += QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec);
-
+ virt_to_phys(pce_dev->ce_request_info[i].
+ ce_sps.in_transfer.iovec);
+ iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
+ iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec =
- (struct sps_iovec *)vaddr;
+ (struct sps_iovec *)iovec_vaddr;
pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec_phys =
- (uintptr_t)GET_PHYS_ADDR(vaddr);
- vaddr += QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec);
-
+ virt_to_phys(pce_dev->ce_request_info[i].
+ ce_sps.out_transfer.iovec);
+ iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
+ iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
if (pce_dev->support_cmd_dscr)
qce_setup_cmdlistptrs(pce_dev, i, &vaddr);
vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
@@ -4343,7 +4353,8 @@ static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
}
pce_dev->dummyreq.in_buf = (uint8_t *)vaddr;
vaddr += DUMMY_REQ_DATA_LEN;
- if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize)
+ if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize ||
+ iovec_memsize < 0)
panic("qce50: Not enough coherent memory. Allocate %x , need %lx\n",
pce_dev->memsize, (uintptr_t)vaddr -
(uintptr_t)pce_dev->coh_vmem);
@@ -5933,12 +5944,19 @@ void *qce_open(struct platform_device *pdev, int *rc)
pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
+
if (pce_dev->coh_vmem == NULL) {
*rc = -ENOMEM;
pr_err("Can not allocate coherent memory for sps data\n");
goto err_iobase;
}
+ pce_dev->iovec_memsize = TOTAL_IOVEC_SPACE_PER_PIPE *
+ MAX_QCE_ALLOC_BAM_REQ * 2;
+ pce_dev->iovec_vmem = kzalloc(pce_dev->iovec_memsize, GFP_KERNEL);
+ if (pce_dev->iovec_vmem == NULL)
+ goto err_mem;
+
*rc = __qce_init_clk(pce_dev);
if (*rc)
goto err_mem;
@@ -5978,6 +5996,7 @@ err_enable_clk:
__qce_deinit_clk(pce_dev);
err_mem:
+ kfree(pce_dev->iovec_vmem);
if (pce_dev->coh_vmem)
dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
pce_dev->coh_vmem, pce_dev->coh_pmem);
@@ -6008,6 +6027,7 @@ int qce_close(void *handle)
if (pce_dev->coh_vmem)
dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
pce_dev->coh_vmem, pce_dev->coh_pmem);
+ kfree(pce_dev->iovec_vmem);
qce_disable_clk(pce_dev);
__qce_deinit_clk(pce_dev);
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 778c76f52d0b..a3b25b3d8dd1 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -250,7 +250,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
ADRENO_GPMU | ADRENO_SPTP_PC,
.pm4fw_name = "a530_pm4.fw",
.pfpfw_name = "a530_pfp.fw",
- .zap_name = "a530_zap",
+ .zap_name = "a540_zap",
.gpudev = &adreno_a5xx_gpudev,
.gmem_size = SZ_1M,
.num_protected_regs = 0x20,
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 18fdd400ac7a..362493118670 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1193,7 +1193,8 @@ static int adreno_init(struct kgsl_device *device)
if (!adreno_is_a3xx(adreno_dev)) {
int r = kgsl_allocate_global(device,
- &adreno_dev->cmdbatch_profile_buffer, PAGE_SIZE, 0, 0);
+ &adreno_dev->cmdbatch_profile_buffer, PAGE_SIZE,
+ 0, 0, "alwayson");
adreno_dev->cmdbatch_profile_index = 0;
@@ -1279,7 +1280,7 @@ static void _setup_throttling_counters(struct adreno_device *adreno_dev)
static uint64_t _read_throttling_counters(struct adreno_device *adreno_dev)
{
- int i;
+ int i, adj;
uint32_t th[ADRENO_GPMU_THROTTLE_COUNTERS];
struct adreno_busy_data *busy = &adreno_dev->busy_data;
@@ -1300,8 +1301,14 @@ static uint64_t _read_throttling_counters(struct adreno_device *adreno_dev)
adreno_dev->gpmu_throttle_counters[i],
&busy->throttle_cycles[i]);
}
- i = th[CRC_MORE50PCT] - th[IDLE_10PCT];
- return th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (i < 0 ? 0 : i) * 3;
+ adj = th[CRC_MORE50PCT] - th[IDLE_10PCT];
+ adj = th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (adj < 0 ? 0 : adj) * 3;
+
+ trace_kgsl_clock_throttling(
+ th[IDLE_10PCT], th[CRC_50PCT],
+ th[CRC_MORE50PCT], th[CRC_LESS50PCT],
+ adj);
+ return adj;
}
static void _update_threshold_count(struct adreno_device *adreno_dev,
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 2accbe5c5764..97e71464c2df 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -174,7 +174,7 @@ static int _a3xx_pwron_fixup(struct adreno_device *adreno_dev)
ret = kgsl_allocate_global(KGSL_DEVICE(adreno_dev),
&adreno_dev->pwron_fixup, PAGE_SIZE,
- KGSL_MEMFLAGS_GPUREADONLY, 0);
+ KGSL_MEMFLAGS_GPUREADONLY, 0, "pwron_fixup");
if (ret)
return ret;
diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c
index b15d23cfbe0a..7a691667e59f 100644
--- a/drivers/gpu/msm/adreno_a4xx.c
+++ b/drivers/gpu/msm/adreno_a4xx.c
@@ -1360,7 +1360,7 @@ static int _a4xx_pwron_fixup(struct adreno_device *adreno_dev)
ret = kgsl_allocate_global(KGSL_DEVICE(adreno_dev),
&adreno_dev->pwron_fixup, PAGE_SIZE,
- KGSL_MEMFLAGS_GPUREADONLY, 0);
+ KGSL_MEMFLAGS_GPUREADONLY, 0, "pwron_fixup");
if (ret)
return ret;
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 3252bfb764f2..583de85678fc 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -244,7 +244,8 @@ static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev)
ret = kgsl_allocate_global(&adreno_dev->dev,
&crit_pkts, PAGE_SIZE,
- KGSL_MEMFLAGS_GPUREADONLY, 0);
+ KGSL_MEMFLAGS_GPUREADONLY,
+ 0, "crit_pkts");
if (ret)
return ret;
@@ -258,19 +259,19 @@ static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev)
ret = kgsl_allocate_global(&adreno_dev->dev,
&crit_pkts_refbuf1,
- PAGE_SIZE, 0, 0);
+ PAGE_SIZE, 0, 0, "crit_pkts_refbuf1");
if (ret)
return ret;
ret = kgsl_allocate_global(&adreno_dev->dev,
&crit_pkts_refbuf2,
- PAGE_SIZE, 0, 0);
+ PAGE_SIZE, 0, 0, "crit_pkts_refbuf2");
if (ret)
return ret;
ret = kgsl_allocate_global(&adreno_dev->dev,
&crit_pkts_refbuf3,
- PAGE_SIZE, 0, 0);
+ PAGE_SIZE, 0, 0, "crit_pkts_refbuf3");
if (ret)
return ret;
@@ -2366,7 +2367,7 @@ static int _load_firmware(struct kgsl_device *device, const char *fwfile,
}
ret = kgsl_allocate_global(device, ucode, fw->size - 4,
- KGSL_MEMFLAGS_GPUREADONLY, 0);
+ KGSL_MEMFLAGS_GPUREADONLY, 0, "ucode");
if (ret)
goto done;
diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c
index c1463b824c67..4baee4a5c0b1 100644
--- a/drivers/gpu/msm/adreno_a5xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a5xx_preempt.c
@@ -311,6 +311,8 @@ void a5xx_preempt_callback(struct adreno_device *adreno_dev, int bit)
adreno_dev->cur_rb->dispatch_q.expires);
adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+
+ a5xx_preemption_trigger(adreno_dev);
}
void a5xx_preemption_schedule(struct adreno_device *adreno_dev)
@@ -490,7 +492,8 @@ static int a5xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
int ret;
ret = kgsl_allocate_global(device, &rb->preemption_desc,
- A5XX_CP_CTXRECORD_SIZE_IN_BYTES, 0, KGSL_MEMDESC_PRIVILEGED);
+ A5XX_CP_CTXRECORD_SIZE_IN_BYTES, 0, KGSL_MEMDESC_PRIVILEGED,
+ "preemption_desc");
if (ret)
return ret;
@@ -525,7 +528,8 @@ static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
/* Allocate mem for storing preemption smmu record */
return kgsl_allocate_global(device, &iommu->smmu_info, PAGE_SIZE,
- KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED);
+ KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED,
+ "smmu_info");
}
#else
static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
@@ -555,7 +559,8 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev)
/* Allocate mem for storing preemption counters */
ret = kgsl_allocate_global(device, &preempt->counters,
adreno_dev->num_ringbuffers *
- A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0);
+ A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0,
+ "preemption_counters");
if (ret)
return ret;
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index 04d82844a5e9..aeffeab2f6dc 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -1033,11 +1033,11 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev)
/* The script buffers needs 2 extra qwords on the end */
if (kgsl_allocate_global(device, &capturescript,
script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
- KGSL_MEMDESC_PRIVILEGED))
+ KGSL_MEMDESC_PRIVILEGED, "capturescript"))
return;
if (kgsl_allocate_global(device, &registers, data_size, 0,
- KGSL_MEMDESC_PRIVILEGED)) {
+ KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
kgsl_free_global(KGSL_DEVICE(adreno_dev), &capturescript);
return;
}
diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c
index c4fab8a5528a..d8af520b2fe6 100644
--- a/drivers/gpu/msm/adreno_profile.c
+++ b/drivers/gpu/msm/adreno_profile.c
@@ -1071,7 +1071,8 @@ void adreno_profile_init(struct adreno_device *adreno_dev)
/* allocate shared_buffer, which includes pre_ib and post_ib */
profile->shared_size = ADRENO_PROFILE_SHARED_BUF_SIZE_DWORDS;
ret = kgsl_allocate_global(device, &profile->shared_buffer,
- profile->shared_size * sizeof(unsigned int), 0, 0);
+ profile->shared_size * sizeof(unsigned int),
+ 0, 0, "profile");
if (ret) {
profile->shared_size = 0;
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 5ffb0b2513f3..07ef09034d7c 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -250,12 +250,12 @@ static int _adreno_ringbuffer_probe(struct adreno_device *adreno_dev,
* switch pagetable
*/
ret = kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &rb->pagetable_desc,
- PAGE_SIZE, 0, KGSL_MEMDESC_PRIVILEGED);
+ PAGE_SIZE, 0, KGSL_MEMDESC_PRIVILEGED, "pagetable_desc");
if (ret)
return ret;
-
return kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &rb->buffer_desc,
- KGSL_RB_SIZE, KGSL_MEMFLAGS_GPUREADONLY, 0);
+ KGSL_RB_SIZE, KGSL_MEMFLAGS_GPUREADONLY,
+ 0, "ringbuffer");
}
int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt)
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 691f687cd839..6e9abc99bcc4 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -4474,13 +4474,13 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
goto error_close_mmu;
status = kgsl_allocate_global(device, &device->memstore,
- KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG);
+ KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG, "memstore");
if (status != 0)
goto error_close_mmu;
status = kgsl_allocate_global(device, &device->scratch,
- PAGE_SIZE, 0, 0);
+ PAGE_SIZE, 0, 0, "scratch");
if (status != 0)
goto error_free_memstore;
@@ -4519,7 +4519,7 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
}
device->events_wq = alloc_workqueue("kgsl-events",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
/* Initalize the snapshot engine */
kgsl_device_snapshot_init(device);
@@ -4662,7 +4662,8 @@ static int __init kgsl_core_init(void)
INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
- kgsl_driver.workqueue = create_singlethread_workqueue("kgsl-workqueue");
+ kgsl_driver.workqueue = alloc_workqueue("kgsl-workqueue",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index df9eb9ebd779..2f293e4da398 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -280,6 +280,29 @@ static const struct file_operations process_sparse_mem_fops = {
.release = process_mem_release,
};
+static int globals_print(struct seq_file *s, void *unused)
+{
+ kgsl_print_global_pt_entries(s);
+ return 0;
+}
+
+static int globals_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, globals_print, NULL);
+}
+
+static int globals_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations global_fops = {
+ .open = globals_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = globals_release,
+};
+
/**
* kgsl_process_init_debugfs() - Initialize debugfs for a process
* @private: Pointer to process private structure created for the process
@@ -336,6 +359,9 @@ void kgsl_core_debugfs_init(void)
kgsl_debugfs_dir = debugfs_create_dir("kgsl", NULL);
+ debugfs_create_file("globals", 0444, kgsl_debugfs_dir, NULL,
+ &global_fops);
+
debug_dir = debugfs_create_dir("debug", kgsl_debugfs_dir);
debugfs_create_file("strict_memory", 0644, debug_dir, NULL,
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 166bb68e64a1..71b6086423d6 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -38,6 +38,10 @@
#define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
+#define ADDR_IN_GLOBAL(_a) \
+ (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE) && \
+ ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE)))
+
static struct kgsl_mmu_pt_ops iommu_pt_ops;
static bool need_iommu_sync;
@@ -92,19 +96,41 @@ static struct kmem_cache *addr_entry_cache;
#define GLOBAL_PT_ENTRIES 32
-static struct kgsl_memdesc *global_pt_entries[GLOBAL_PT_ENTRIES];
+struct global_pt_entry {
+ struct kgsl_memdesc *memdesc;
+ char name[32];
+};
+
+static struct global_pt_entry global_pt_entries[GLOBAL_PT_ENTRIES];
static struct kgsl_memdesc *kgsl_global_secure_pt_entry;
static int global_pt_count;
uint64_t global_pt_alloc;
static struct kgsl_memdesc gpu_qdss_desc;
+void kgsl_print_global_pt_entries(struct seq_file *s)
+{
+ int i;
+
+ for (i = 0; i < global_pt_count; i++) {
+ struct kgsl_memdesc *memdesc = global_pt_entries[i].memdesc;
+
+ if (memdesc == NULL)
+ continue;
+
+ seq_printf(s, "0x%16.16llX-0x%16.16llX %16llu %s\n",
+ memdesc->gpuaddr, memdesc->gpuaddr + memdesc->size - 1,
+ memdesc->size, global_pt_entries[i].name);
+ }
+}
+
static void kgsl_iommu_unmap_globals(struct kgsl_pagetable *pagetable)
{
unsigned int i;
for (i = 0; i < global_pt_count; i++) {
- if (global_pt_entries[i] != NULL)
- kgsl_mmu_unmap(pagetable, global_pt_entries[i]);
+ if (global_pt_entries[i].memdesc != NULL)
+ kgsl_mmu_unmap(pagetable,
+ global_pt_entries[i].memdesc);
}
}
@@ -113,8 +139,9 @@ static void kgsl_iommu_map_globals(struct kgsl_pagetable *pagetable)
unsigned int i;
for (i = 0; i < global_pt_count; i++) {
- if (global_pt_entries[i] != NULL) {
- int ret = kgsl_mmu_map(pagetable, global_pt_entries[i]);
+ if (global_pt_entries[i].memdesc != NULL) {
+ int ret = kgsl_mmu_map(pagetable,
+ global_pt_entries[i].memdesc);
BUG_ON(ret);
}
@@ -152,17 +179,17 @@ static void kgsl_iommu_remove_global(struct kgsl_mmu *mmu,
return;
for (i = 0; i < global_pt_count; i++) {
- if (global_pt_entries[i] == memdesc) {
+ if (global_pt_entries[i].memdesc == memdesc) {
memdesc->gpuaddr = 0;
memdesc->priv &= ~KGSL_MEMDESC_GLOBAL;
- global_pt_entries[i] = NULL;
+ global_pt_entries[i].memdesc = NULL;
return;
}
}
}
static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
- struct kgsl_memdesc *memdesc)
+ struct kgsl_memdesc *memdesc, const char *name)
{
if (memdesc->gpuaddr != 0)
return;
@@ -174,7 +201,10 @@ static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
memdesc->priv |= KGSL_MEMDESC_GLOBAL;
global_pt_alloc += memdesc->size;
- global_pt_entries[global_pt_count++] = memdesc;
+ global_pt_entries[global_pt_count].memdesc = memdesc;
+ strlcpy(global_pt_entries[global_pt_count].name, name,
+ sizeof(global_pt_entries[global_pt_count].name));
+ global_pt_count++;
}
void kgsl_add_global_secure_entry(struct kgsl_device *device,
@@ -220,7 +250,7 @@ static void kgsl_setup_qdss_desc(struct kgsl_device *device)
return;
}
- kgsl_mmu_add_global(device, &gpu_qdss_desc);
+ kgsl_mmu_add_global(device, &gpu_qdss_desc, "gpu-qdss");
}
static inline void kgsl_cleanup_qdss_desc(struct kgsl_mmu *mmu)
@@ -493,8 +523,62 @@ struct _mem_entry {
unsigned int priv;
int pending_free;
pid_t pid;
+ char name[32];
};
+static void _get_global_entries(uint64_t faultaddr,
+ struct _mem_entry *prev,
+ struct _mem_entry *next)
+{
+ int i;
+ uint64_t prevaddr = 0;
+ struct global_pt_entry *p = NULL;
+
+ uint64_t nextaddr = (uint64_t) -1;
+ struct global_pt_entry *n = NULL;
+
+ for (i = 0; i < global_pt_count; i++) {
+ uint64_t addr;
+
+ if (global_pt_entries[i].memdesc == NULL)
+ continue;
+
+ addr = global_pt_entries[i].memdesc->gpuaddr;
+ if ((addr < faultaddr) && (addr > prevaddr)) {
+ prevaddr = addr;
+ p = &global_pt_entries[i];
+ }
+
+ if ((addr > faultaddr) && (addr < nextaddr)) {
+ nextaddr = addr;
+ n = &global_pt_entries[i];
+ }
+ }
+
+ if (p != NULL) {
+ prev->gpuaddr = p->memdesc->gpuaddr;
+ prev->size = p->memdesc->size;
+ prev->flags = p->memdesc->flags;
+ prev->priv = p->memdesc->priv;
+ prev->pid = 0;
+ strlcpy(prev->name, p->name, sizeof(prev->name));
+ }
+
+ if (n != NULL) {
+ next->gpuaddr = n->memdesc->gpuaddr;
+ next->size = n->memdesc->size;
+ next->flags = n->memdesc->flags;
+ next->priv = n->memdesc->priv;
+ next->pid = 0;
+ strlcpy(next->name, n->name, sizeof(next->name));
+ }
+}
+
+void __kgsl_get_memory_usage(struct _mem_entry *entry)
+{
+ kgsl_get_memory_usage(entry->name, sizeof(entry->name), entry->flags);
+}
+
static void _get_entries(struct kgsl_process_private *private,
uint64_t faultaddr, struct _mem_entry *prev,
struct _mem_entry *next)
@@ -529,6 +613,7 @@ static void _get_entries(struct kgsl_process_private *private,
prev->priv = p->memdesc.priv;
prev->pending_free = p->pending_free;
prev->pid = private->pid;
+ __kgsl_get_memory_usage(prev);
}
if (n != NULL) {
@@ -538,6 +623,7 @@ static void _get_entries(struct kgsl_process_private *private,
next->priv = n->memdesc.priv;
next->pending_free = n->pending_free;
next->pid = private->pid;
+ __kgsl_get_memory_usage(next);
}
}
@@ -553,7 +639,9 @@ static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
/* Set the maximum possible size as an initial value */
nextentry->gpuaddr = (uint64_t) -1;
- if (context) {
+ if (ADDR_IN_GLOBAL(faultaddr)) {
+ _get_global_entries(faultaddr, preventry, nextentry);
+ } else if (context) {
private = context->proc_priv;
spin_lock(&private->mem_lock);
_get_entries(private, faultaddr, preventry, nextentry);
@@ -563,18 +651,13 @@ static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
static void _print_entry(struct kgsl_device *device, struct _mem_entry *entry)
{
- char name[32];
- memset(name, 0, sizeof(name));
-
- kgsl_get_memory_usage(name, sizeof(name) - 1, entry->flags);
-
KGSL_LOG_DUMP(device,
"[%016llX - %016llX] %s %s (pid = %d) (%s)\n",
entry->gpuaddr,
entry->gpuaddr + entry->size,
entry->priv & KGSL_MEMDESC_GUARD_PAGE ? "(+guard)" : "",
entry->pending_free ? "(pending free)" : "",
- entry->pid, name);
+ entry->pid, entry->name);
}
static void _check_if_freed(struct kgsl_iommu_context *ctx,
@@ -1395,7 +1478,7 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu)
}
}
- kgsl_iommu_add_global(mmu, &iommu->setstate);
+ kgsl_iommu_add_global(mmu, &iommu->setstate, "setstate");
kgsl_setup_qdss_desc(device);
done:
@@ -2220,10 +2303,6 @@ static uint64_t kgsl_iommu_find_svm_region(struct kgsl_pagetable *pagetable,
return addr;
}
-#define ADDR_IN_GLOBAL(_a) \
- (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE) && \
- ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE)))
-
static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable,
uint64_t gpuaddr, uint64_t size)
{
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 10f6b8049d36..4371c9a1b87e 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -543,12 +543,12 @@ void kgsl_mmu_remove_global(struct kgsl_device *device,
EXPORT_SYMBOL(kgsl_mmu_remove_global);
void kgsl_mmu_add_global(struct kgsl_device *device,
- struct kgsl_memdesc *memdesc)
+ struct kgsl_memdesc *memdesc, const char *name)
{
struct kgsl_mmu *mmu = &device->mmu;
if (MMU_OP_VALID(mmu, mmu_add_global))
- mmu->mmu_ops->mmu_add_global(mmu, memdesc);
+ mmu->mmu_ops->mmu_add_global(mmu, memdesc, name);
}
EXPORT_SYMBOL(kgsl_mmu_add_global);
@@ -620,7 +620,7 @@ static struct kgsl_mmu_pt_ops nommu_pt_ops = {
};
static void nommu_add_global(struct kgsl_mmu *mmu,
- struct kgsl_memdesc *memdesc)
+ struct kgsl_memdesc *memdesc, const char *name)
{
memdesc->gpuaddr = (uint64_t) sg_phys(memdesc->sgt->sgl);
}
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 53645cc1741c..acbc0e784cf2 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -75,7 +75,7 @@ struct kgsl_mmu_ops {
(struct kgsl_mmu *mmu);
int (*mmu_init_pt)(struct kgsl_mmu *mmu, struct kgsl_pagetable *);
void (*mmu_add_global)(struct kgsl_mmu *mmu,
- struct kgsl_memdesc *memdesc);
+ struct kgsl_memdesc *memdesc, const char *name);
void (*mmu_remove_global)(struct kgsl_mmu *mmu,
struct kgsl_memdesc *memdesc);
struct kgsl_pagetable * (*mmu_getpagetable)(struct kgsl_mmu *mmu,
@@ -174,6 +174,7 @@ struct kgsl_pagetable *kgsl_mmu_getpagetable_ptbase(struct kgsl_mmu *,
void kgsl_add_global_secure_entry(struct kgsl_device *device,
struct kgsl_memdesc *memdesc);
+void kgsl_print_global_pt_entries(struct seq_file *s);
void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
int kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
@@ -198,7 +199,7 @@ int kgsl_mmu_find_region(struct kgsl_pagetable *pagetable,
uint64_t *gpuaddr, uint64_t size, unsigned int align);
void kgsl_mmu_add_global(struct kgsl_device *device,
- struct kgsl_memdesc *memdesc);
+ struct kgsl_memdesc *memdesc, const char *name);
void kgsl_mmu_remove_global(struct kgsl_device *device,
struct kgsl_memdesc *memdesc);
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index 7967b19779db..f5402fdc7e57 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -197,7 +197,7 @@ _kgsl_pool_shrink(struct kgsl_page_pool *pool, int num_pages)
* starting from higher order pool.
*/
static unsigned long
-kgsl_pool_reduce(unsigned int target_pages)
+kgsl_pool_reduce(unsigned int target_pages, bool exit)
{
int total_pages = 0;
int i;
@@ -210,6 +210,14 @@ kgsl_pool_reduce(unsigned int target_pages)
for (i = (KGSL_NUM_POOLS - 1); i >= 0; i--) {
pool = &kgsl_pools[i];
+ /*
+ * Only reduce the pool sizes for pools which are allowed to
+ * allocate memory unless we are at close, in which case the
+ * reserved memory for all pools needs to be freed
+ */
+ if (!pool->allocation_allowed && !exit)
+ continue;
+
total_pages -= pcount;
nr_removed = total_pages - target_pages;
@@ -418,7 +426,7 @@ kgsl_pool_shrink_scan_objects(struct shrinker *shrinker,
int target_pages = (nr > total_pages) ? 0 : (total_pages - nr);
/* Reduce pool size to target_pages */
- return kgsl_pool_reduce(target_pages);
+ return kgsl_pool_reduce(target_pages, false);
}
static unsigned long
@@ -449,7 +457,7 @@ void kgsl_init_page_pools(void)
void kgsl_exit_page_pools(void)
{
/* Release all pages in pools, if any.*/
- kgsl_pool_reduce(0);
+ kgsl_pool_reduce(0, true);
/* Unregister shrinker */
unregister_shrinker(&kgsl_pool_shrinker);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 565ae4c39fdd..9e6817c76df8 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -279,7 +279,7 @@ kgsl_memdesc_footprint(const struct kgsl_memdesc *memdesc)
*/
static inline int kgsl_allocate_global(struct kgsl_device *device,
struct kgsl_memdesc *memdesc, uint64_t size, uint64_t flags,
- unsigned int priv)
+ unsigned int priv, const char *name)
{
int ret;
@@ -297,7 +297,7 @@ static inline int kgsl_allocate_global(struct kgsl_device *device,
}
if (ret == 0)
- kgsl_mmu_add_global(device, memdesc);
+ kgsl_mmu_add_global(device, memdesc, name);
return ret;
}
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 1b51eb591036..4ef9f80177d6 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -1192,6 +1192,41 @@ TRACE_EVENT(sparse_unbind,
);
+TRACE_EVENT(kgsl_clock_throttling,
+ TP_PROTO(
+ int idle_10pct,
+ int crc_50pct,
+ int crc_more50pct,
+ int crc_less50pct,
+ int adj
+ ),
+ TP_ARGS(
+ idle_10pct,
+ crc_50pct,
+ crc_more50pct,
+ crc_less50pct,
+ adj
+ ),
+ TP_STRUCT__entry(
+ __field(int, idle_10pct)
+ __field(int, crc_50pct)
+ __field(int, crc_more50pct)
+ __field(int, crc_less50pct)
+ __field(int, adj)
+ ),
+ TP_fast_assign(
+ __entry->idle_10pct = idle_10pct;
+ __entry->crc_50pct = crc_50pct;
+ __entry->crc_more50pct = crc_more50pct;
+ __entry->crc_less50pct = crc_less50pct;
+ __entry->adj = adj;
+ ),
+ TP_printk("idle_10=%d crc_50=%d crc_more50=%d crc_less50=%d adj=%d",
+ __entry->idle_10pct, __entry->crc_50pct, __entry->crc_more50pct,
+ __entry->crc_less50pct, __entry->adj
+ )
+);
+
#endif /* _KGSL_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 92870cdb52d9..8efaa88329aa 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -218,7 +218,8 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
goto done_proc;
}
- remaining_bytes = do_div(buffer_size, sizeof(__s32));
+ remaining_bytes = buffer_size % sizeof(__s32);
+ buffer_size = buffer_size / sizeof(__s32);
if (buffer_size) {
for (i = 0; i < buffer_size; ++i) {
hid_set_field(report->field[field_index], i,
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 26fbfba23c94..a234d61802ce 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -22,6 +22,7 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/bitmap.h>
#include <linux/of.h>
@@ -135,7 +136,6 @@ struct stm_drvdata {
struct device *dev;
struct coresight_device *csdev;
struct miscdevice miscdev;
- struct clk *clk;
spinlock_t spinlock;
struct channel_space chs;
bool enable;
@@ -270,8 +270,8 @@ static int stm_enable(struct coresight_device *csdev)
int ret;
unsigned long flags;
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
+ ret = pm_runtime_get_sync(drvdata->dev);
+ if (ret < 0)
return ret;
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -349,7 +349,7 @@ static void stm_disable(struct coresight_device *csdev)
/* Wait for 100ms so that pending data has been written to HW */
msleep(100);
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
dev_info(drvdata->dev, "STM tracing disabled\n");
}
@@ -360,7 +360,7 @@ static int stm_trace_id(struct coresight_device *csdev)
unsigned long flags;
int trace_id = -1;
- if (clk_prepare_enable(drvdata->clk))
+ if (pm_runtime_get_sync(drvdata->dev) < 0)
goto out;
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -370,7 +370,7 @@ static int stm_trace_id(struct coresight_device *csdev)
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
out:
return trace_id;
}
@@ -806,19 +806,14 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&drvdata->spinlock);
- drvdata->clk = adev->pclk;
- ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(drvdata->clk);
+ ret = clk_set_rate(adev->pclk, CORESIGHT_CLK_RATE_TRACE);
if (ret)
return ret;
if (!coresight_authstatus_enabled(drvdata->base))
goto err1;
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(&adev->dev);
bitmap_fill(drvdata->entities, OST_ENTITY_MAX);
@@ -856,7 +851,7 @@ err:
coresight_unregister(drvdata->csdev);
return ret;
err1:
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(&adev->dev);
return -EPERM;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index d48d8485f979..306465ededf9 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -95,6 +95,7 @@
#define TMC_ETR_BAM_PIPE_INDEX 0
#define TMC_ETR_BAM_NR_PIPES 2
+#define TMC_ETFETB_DUMP_MAGIC_V2 (0x42445953)
#define TMC_REG_DUMP_MAGIC_V2 (0x42445953)
#define TMC_REG_DUMP_VER (1)
@@ -178,6 +179,7 @@ struct tmc_drvdata {
spinlock_t spinlock;
int read_count;
bool reading;
+ bool aborting;
char *buf;
dma_addr_t paddr;
void __iomem *vaddr;
@@ -883,11 +885,15 @@ static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
for (i = 0; i < memwords; i++) {
read_data = readl_relaxed(drvdata->base + TMC_RRD);
if (read_data == 0xFFFFFFFF)
- return;
+ goto out;
memcpy(bufp, &read_data, 4);
bufp += 4;
}
}
+
+out:
+ if (drvdata->aborting)
+ drvdata->buf_data.magic = TMC_ETFETB_DUMP_MAGIC_V2;
}
static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
@@ -1070,6 +1076,8 @@ static void tmc_abort(struct coresight_device *csdev)
unsigned long flags;
enum tmc_mode mode;
+ drvdata->aborting = true;
+
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading)
goto out0;
@@ -1728,7 +1736,7 @@ static void __tmc_reg_dump(struct tmc_drvdata *drvdata)
if (!drvdata->reg_buf)
return;
- else if (!drvdata->dump_reg)
+ else if (!drvdata->aborting && !drvdata->dump_reg)
return;
drvdata->reg_data.version = TMC_REG_DUMP_VER;
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index e4e188fc67fc..c43d8596a203 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -94,6 +94,10 @@ static void __tpda_enable_pre_port(struct tpda_drvdata *drvdata)
val = val | BIT(2);
else
val = val & ~BIT(2);
+
+ /* Force ASYNC-VERSION-FREQTS sequence */
+ val = val | BIT(21);
+
tpda_writel(drvdata, val, TPDA_CR);
/*
@@ -154,8 +158,6 @@ static void __tpda_enable_post_port(struct tpda_drvdata *drvdata)
if (drvdata->freq_req_val)
tpda_writel(drvdata, drvdata->freq_req_val, TPDA_FREQREQ_VAL);
- else
- tpda_writel(drvdata, 0x0, TPDA_FREQREQ_VAL);
val = tpda_readl(drvdata, TPDA_CR);
if (drvdata->freq_req)
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 8a0e2c809fb2..90135f496aaf 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -318,6 +318,18 @@ config QCOM_RRADC
To compile this driver as a module, choose M here: the module will
be called qcom-rradc.
+config QCOM_TADC
+ tristate "Qualcomm Technologies Inc. TADC driver"
+ depends on MFD_I2C_PMIC
+ help
+ Say yes here to support the Qualcomm Technologies Inc. telemetry ADC.
+ The TADC provides battery temperature, skin temperature,
+ die temperature, battery voltage, battery current, input voltage,
+ input current, and OTG current.
+
+ The driver can also be built as a module. If so, the module will be
+ called qcom-tadc.
+
config ROCKCHIP_SARADC
tristate "Rockchip SARADC driver"
depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 59cac58d769e..9124e49a5f65 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
obj-$(CONFIG_QCOM_RRADC) += qcom-rradc.o
+obj-$(CONFIG_QCOM_TADC) += qcom-tadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
diff --git a/drivers/iio/adc/qcom-tadc.c b/drivers/iio/adc/qcom-tadc.c
new file mode 100644
index 000000000000..3cc2694f9a03
--- /dev/null
+++ b/drivers/iio/adc/qcom-tadc.c
@@ -0,0 +1,742 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define TADC_REVISION1_REG 0x00
+#define TADC_REVISION2_REG 0x01
+#define TADC_REVISION3_REG 0x02
+#define TADC_REVISION4_REG 0x03
+#define TADC_PERPH_TYPE_REG 0x04
+#define TADC_PERPH_SUBTYPE_REG 0x05
+
+/* TADC register definitions */
+#define TADC_SW_CH_CONV_REG(chip) (chip->tadc_base + 0x06)
+#define TADC_MBG_ERR_REG(chip) (chip->tadc_base + 0x07)
+#define TADC_EN_CTL_REG(chip) (chip->tadc_base + 0x46)
+#define TADC_CONV_REQ_REG(chip) (chip->tadc_base + 0x51)
+#define TADC_HWTRIG_CONV_CH_EN_REG(chip) (chip->tadc_base + 0x52)
+#define TADC_HW_SETTLE_DELAY_REG(chip) (chip->tadc_base + 0x53)
+#define TADC_LONG_HW_SETTLE_DLY_EN_REG(chip) (chip->tadc_base + 0x54)
+#define TADC_LONG_HW_SETTLE_DLY_REG(chip) (chip->tadc_base + 0x55)
+#define TADC_ADC_BUF_CH_REG(chip) (chip->tadc_base + 0x56)
+#define TADC_ADC_AAF_CH_REG(chip) (chip->tadc_base + 0x57)
+#define TADC_ADC_DATA_RDBK_REG(chip) (chip->tadc_base + 0x58)
+#define TADC_CH1_ADC_LO_REG(chip) (chip->tadc_base + 0x60)
+#define TADC_CH1_ADC_HI_REG(chip) (chip->tadc_base + 0x61)
+#define TADC_CH2_ADC_LO_REG(chip) (chip->tadc_base + 0x62)
+#define TADC_CH2_ADC_HI_REG(chip) (chip->tadc_base + 0x63)
+#define TADC_CH3_ADC_LO_REG(chip) (chip->tadc_base + 0x64)
+#define TADC_CH3_ADC_HI_REG(chip) (chip->tadc_base + 0x65)
+#define TADC_CH4_ADC_LO_REG(chip) (chip->tadc_base + 0x66)
+#define TADC_CH4_ADC_HI_REG(chip) (chip->tadc_base + 0x67)
+#define TADC_CH5_ADC_LO_REG(chip) (chip->tadc_base + 0x68)
+#define TADC_CH5_ADC_HI_REG(chip) (chip->tadc_base + 0x69)
+#define TADC_CH6_ADC_LO_REG(chip) (chip->tadc_base + 0x70)
+#define TADC_CH6_ADC_HI_REG(chip) (chip->tadc_base + 0x71)
+#define TADC_CH7_ADC_LO_REG(chip) (chip->tadc_base + 0x72)
+#define TADC_CH7_ADC_HI_REG(chip) (chip->tadc_base + 0x73)
+#define TADC_CH8_ADC_LO_REG(chip) (chip->tadc_base + 0x74)
+#define TADC_CH8_ADC_HI_REG(chip) (chip->tadc_base + 0x75)
+
+/* TADC_CMP register definitions */
+#define TADC_CMP_THR1_CMP_REG(chip) (chip->tadc_cmp_base + 0x51)
+#define TADC_CMP_THR1_CH1_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x52)
+#define TADC_CMP_THR1_CH1_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x53)
+#define TADC_CMP_THR1_CH2_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x54)
+#define TADC_CMP_THR1_CH2_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x55)
+#define TADC_CMP_THR1_CH3_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x56)
+#define TADC_CMP_THR1_CH3_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x57)
+#define TADC_CMP_THR2_CMP_REG(chip) (chip->tadc_cmp_base + 0x67)
+#define TADC_CMP_THR2_CH1_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x68)
+#define TADC_CMP_THR2_CH1_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x69)
+#define TADC_CMP_THR2_CH2_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x6A)
+#define TADC_CMP_THR2_CH2_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x6B)
+#define TADC_CMP_THR2_CH3_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x6C)
+#define TADC_CMP_THR2_CH3_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x6D)
+#define TADC_CMP_THR3_CMP_REG(chip) (chip->tadc_cmp_base + 0x7D)
+#define TADC_CMP_THR3_CH1_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x7E)
+#define TADC_CMP_THR3_CH1_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x7F)
+#define TADC_CMP_THR3_CH2_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x80)
+#define TADC_CMP_THR3_CH2_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x81)
+#define TADC_CMP_THR3_CH3_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x82)
+#define TADC_CMP_THR3_CH3_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x83)
+#define TADC_CMP_THR4_CMP_REG(chip) (chip->tadc_cmp_base + 0x93)
+#define TADC_CMP_THR4_CH1_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x94)
+#define TADC_CMP_THR4_CH1_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x95)
+#define TADC_CMP_THR1_CH1_HYST_REG(chip) (chip->tadc_cmp_base + 0xB0)
+#define TADC_CMP_THR2_CH1_HYST_REG(chip) (chip->tadc_cmp_base + 0xB1)
+#define TADC_CMP_THR3_CH1_HYST_REG(chip) (chip->tadc_cmp_base + 0xB2)
+#define TADC_CMP_THR4_CH1_HYST_REG(chip) (chip->tadc_cmp_base + 0xB3)
+
+/* 10 bits of resolution */
+#define TADC_RESOLUTION 1024
+/* number of hardware channels */
+#define TADC_NUM_CH 8
+
+enum tadc_chan_id {
+ TADC_THERM1 = 0,
+ TADC_THERM2,
+ TADC_DIE_TEMP,
+ TADC_BATT_I,
+ TADC_BATT_V,
+ TADC_INPUT_I,
+ TADC_INPUT_V,
+ TADC_OTG_I,
+ /* virtual channels */
+ TADC_BATT_P,
+ TADC_INPUT_P,
+ TADC_THERM1_THR1,
+ TADC_THERM2_THR1,
+ TADC_DIE_TEMP_THR1,
+};
+
+#define TADC_CHAN(_name, _type, _channel, _info_mask) \
+{ \
+ .type = _type, \
+ .channel = _channel, \
+ .info_mask_separate = _info_mask, \
+ .extend_name = _name, \
+}
+
+#define TADC_THERM_CHAN(_name, _channel) \
+TADC_CHAN(_name, IIO_TEMP, _channel, \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_PROCESSED))
+
+#define TADC_TEMP_CHAN(_name, _channel) \
+TADC_CHAN(_name, IIO_TEMP, _channel, \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_PROCESSED) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET))
+
+#define TADC_CURRENT_CHAN(_name, _channel) \
+TADC_CHAN(_name, IIO_CURRENT, _channel, \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_PROCESSED) | \
+ BIT(IIO_CHAN_INFO_SCALE))
+
+
+#define TADC_VOLTAGE_CHAN(_name, _channel) \
+TADC_CHAN(_name, IIO_VOLTAGE, _channel, \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_PROCESSED) | \
+ BIT(IIO_CHAN_INFO_SCALE))
+
+#define TADC_POWER_CHAN(_name, _channel) \
+TADC_CHAN(_name, IIO_POWER, _channel, \
+ BIT(IIO_CHAN_INFO_PROCESSED))
+
+static const struct iio_chan_spec tadc_iio_chans[] = {
+ [TADC_THERM1] = TADC_THERM_CHAN(
+ "batt", TADC_THERM1),
+ [TADC_THERM2] = TADC_THERM_CHAN(
+ "skin", TADC_THERM2),
+ [TADC_DIE_TEMP] = TADC_TEMP_CHAN(
+ "die", TADC_DIE_TEMP),
+ [TADC_BATT_I] = TADC_CURRENT_CHAN(
+ "batt", TADC_BATT_I),
+ [TADC_BATT_V] = TADC_VOLTAGE_CHAN(
+ "batt", TADC_BATT_V),
+ [TADC_INPUT_I] = TADC_CURRENT_CHAN(
+ "input", TADC_INPUT_I),
+ [TADC_INPUT_V] = TADC_VOLTAGE_CHAN(
+ "input", TADC_INPUT_V),
+ [TADC_OTG_I] = TADC_CURRENT_CHAN(
+ "otg", TADC_OTG_I),
+ [TADC_BATT_P] = TADC_POWER_CHAN(
+ "batt", TADC_BATT_P),
+ [TADC_INPUT_P] = TADC_POWER_CHAN(
+ "input", TADC_INPUT_P),
+ [TADC_THERM1_THR1] = TADC_THERM_CHAN(
+ "batt_hot", TADC_THERM1_THR1),
+ [TADC_THERM2_THR1] = TADC_THERM_CHAN(
+ "skin_hot", TADC_THERM2_THR1),
+ [TADC_DIE_TEMP_THR1] = TADC_THERM_CHAN(
+ "die_hot", TADC_DIE_TEMP_THR1),
+};
+
+struct tadc_chan_data {
+ s32 scale;
+ s32 offset;
+ u32 rbias;
+ const struct tadc_pt *table;
+ size_t tablesize;
+};
+
+struct tadc_chip {
+ struct device *dev;
+ struct regmap *regmap;
+ u32 tadc_base;
+ u32 tadc_cmp_base;
+ struct tadc_chan_data chans[TADC_NUM_CH];
+ struct completion eoc_complete;
+};
+
+struct tadc_pt {
+ s32 x;
+ s32 y;
+};
+
+/*
+ * Thermistor tables are generated by the B-parameter equation which is a
+ * simplifed version of the Steinhart-Hart equation.
+ *
+ * (1 / T) = (1 / T0) + (1 / B) * ln(R / R0)
+ *
+ * Where R0 is the resistance at temperature T0, and T0 is typically room
+ * temperature (25C).
+ */
+static const struct tadc_pt tadc_therm_3450b_68k[] = {
+ { 4151, 120000 },
+ { 4648, 115000 },
+ { 5220, 110000 },
+ { 5880, 105000 },
+ { 6644, 100000 },
+ { 7533, 95000 },
+ { 8571, 90000 },
+ { 9786, 85000 },
+ { 11216, 80000 },
+ { 12906, 75000 },
+ { 14910, 70000 },
+ { 17300, 65000 },
+ { 20163, 60000 },
+ { 23609, 55000 },
+ { 27780, 50000 },
+ { 32855, 45000 },
+ { 39065, 40000 },
+ { 46712, 35000 },
+ { 56185, 30000 },
+ { 68000, 25000 },
+ { 82837, 20000 },
+ { 101604, 15000 },
+ { 125525, 10000 },
+ { 156261, 5000 },
+ { 196090, 0 },
+ { 248163, -5000 },
+ { 316887, -10000 },
+ { 408493, -15000 },
+ { 531889, -20000 },
+ { 699966, -25000 },
+ { 931618, -30000 },
+ { 1254910, -35000 },
+ { 1712127, -40000 },
+};
+
+static int tadc_read(struct tadc_chip *chip, u16 reg, u8 *val,
+ size_t val_count)
+{
+ int rc = 0;
+
+ rc = regmap_bulk_read(chip->regmap, reg, val, val_count);
+ if (rc < 0)
+ pr_err("Couldn't read %04x rc=%d\n", reg, rc);
+
+ return rc;
+}
+
+static int tadc_write(struct tadc_chip *chip, u16 reg, u8 data)
+{
+ int rc = 0;
+
+ rc = regmap_write(chip->regmap, reg, data);
+ if (rc < 0)
+ pr_err("Couldn't write %02x to %04x rc=%d\n",
+ data, reg, rc);
+
+ return rc;
+}
+
+static int tadc_lerp(const struct tadc_pt *pts, size_t tablesize, s32 input,
+ s32 *output)
+{
+ int i;
+ s64 temp;
+
+ if (pts == NULL) {
+ pr_err("Table is NULL\n");
+ return -EINVAL;
+ }
+
+ if (tablesize < 1) {
+ pr_err("Table has no entries\n");
+ return -ENOENT;
+ }
+
+ if (tablesize == 1) {
+ *output = pts[0].y;
+ return 0;
+ }
+
+ if (pts[0].x > pts[1].x) {
+ pr_err("Table is not in acending order\n");
+ return -EINVAL;
+ }
+
+ if (input <= pts[0].x) {
+ *output = pts[0].y;
+ return 0;
+ }
+
+ if (input >= pts[tablesize - 1].x) {
+ *output = pts[tablesize - 1].y;
+ return 0;
+ }
+
+ for (i = 1; i < tablesize; i++)
+ if (input <= pts[i].x)
+ break;
+
+ temp = (s64)(pts[i].y - pts[i - 1].y) * (s64)(input - pts[i - 1].x);
+ temp = div_s64(temp, pts[i].x - pts[i - 1].x);
+ *output = temp + pts[i - 1].y;
+ return 0;
+}
+
+/*
+ * Process the result of a thermistor reading.
+ *
+ * The voltage input to the ADC is a result of a voltage divider circuit.
+ * Vout = (Rtherm / (Rbias + Rtherm)) * Vbias
+ *
+ * The ADC value is based on the output voltage of the voltage divider, and the
+ * bias voltage.
+ * ADC = (Vin * 1024) / Vbias
+ *
+ * Combine these equations and solve for Rtherm
+ * Rtherm = (ADC * Rbias) / (1024 - ADC)
+ */
+static int tadc_process_therm(const struct tadc_chan_data *chan_data,
+ s16 adc, s32 *result)
+{
+ s64 rtherm;
+
+ rtherm = (s64)adc * (s64)chan_data->rbias;
+ rtherm = div_s64(rtherm, TADC_RESOLUTION - adc);
+ return tadc_lerp(chan_data->table, chan_data->tablesize, rtherm,
+ result);
+}
+
+static int tadc_read_channel(struct tadc_chip *chip, u16 address, int *adc)
+{
+ u8 val[2];
+ int rc;
+
+ rc = tadc_read(chip, address, val, ARRAY_SIZE(val));
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read channel rc=%d\n", rc);
+ return rc;
+ }
+
+ *adc = (s16)(val[0] | val[1] << BITS_PER_BYTE);
+ return 0;
+}
+
+#define CONVERSION_TIMEOUT_MS 100
+static int tadc_do_conversion(struct tadc_chip *chip, u8 channels, s16 *adc)
+{
+ unsigned long timeout, timeleft;
+ u8 val[TADC_NUM_CH * 2];
+ int rc, i;
+
+ rc = tadc_read(chip, TADC_MBG_ERR_REG(chip), val, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read mbg error status rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (val[0] != 0) {
+ tadc_write(chip, TADC_EN_CTL_REG(chip), 0);
+ tadc_write(chip, TADC_EN_CTL_REG(chip), 0x80);
+ }
+
+ rc = tadc_write(chip, TADC_CONV_REQ_REG(chip), channels);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't write conversion request rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ timeout = msecs_to_jiffies(CONVERSION_TIMEOUT_MS);
+ timeleft = wait_for_completion_timeout(&chip->eoc_complete, timeout);
+
+ if (timeleft == 0) {
+ rc = tadc_read(chip, TADC_SW_CH_CONV_REG(chip), val, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read conversion status rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (val[0] != channels) {
+ dev_err(chip->dev, "Conversion timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ rc = tadc_read(chip, TADC_CH1_ADC_LO_REG(chip), val, ARRAY_SIZE(val));
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read adc channels rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ for (i = 0; i < TADC_NUM_CH; i++)
+ adc[i] = val[i * 2] | val[i * 2 + 1] << BITS_PER_BYTE;
+
+ return jiffies_to_msecs(timeout - timeleft);
+}
+
+static int tadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask)
+{
+ struct tadc_chip *chip = iio_priv(indio_dev);
+ const struct tadc_chan_data *chan_data = &chip->chans[chan->channel];
+ int rc = 0, offset = 0, scale, scale2, scale_type;
+ s16 adc[TADC_NUM_CH];
+
+ switch (chan->channel) {
+ case TADC_THERM1_THR1:
+ chan_data = &chip->chans[TADC_THERM1];
+ break;
+ case TADC_THERM2_THR1:
+ chan_data = &chip->chans[TADC_THERM2];
+ break;
+ case TADC_DIE_TEMP_THR1:
+ chan_data = &chip->chans[TADC_DIE_TEMP];
+ break;
+ default:
+ break;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->channel) {
+ case TADC_THERM1_THR1:
+ rc = tadc_read_channel(chip,
+ TADC_CMP_THR1_CH1_CMP_LO_REG(chip), val);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read THERM1 threshold rc=%d\n",
+ rc);
+ return rc;
+ }
+ break;
+ case TADC_THERM2_THR1:
+ rc = tadc_read_channel(chip,
+ TADC_CMP_THR1_CH2_CMP_LO_REG(chip), val);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read THERM2 threshold rc=%d\n",
+ rc);
+ return rc;
+ }
+ break;
+ case TADC_DIE_TEMP_THR1:
+ rc = tadc_read_channel(chip,
+ TADC_CMP_THR1_CH3_CMP_LO_REG(chip), val);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read DIE_TEMP threshold rc=%d\n",
+ rc);
+ return rc;
+ }
+ break;
+ default:
+ rc = tadc_do_conversion(chip, BIT(chan->channel), adc);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read channel %d\n",
+ chan->channel);
+ return rc;
+ }
+ *val = adc[chan->channel];
+ break;
+ }
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->channel) {
+ case TADC_THERM1:
+ case TADC_THERM2:
+ case TADC_THERM1_THR1:
+ case TADC_THERM2_THR1:
+ rc = tadc_read_raw(indio_dev, chan, val, NULL,
+ IIO_CHAN_INFO_RAW);
+ if (rc < 0)
+ return rc;
+
+ rc = tadc_process_therm(chan_data, *val, val);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't process 0x%04x from channel %d rc=%d\n",
+ *val, chan->channel, rc);
+ return rc;
+ }
+ break;
+ case TADC_BATT_P:
+ rc = tadc_do_conversion(chip,
+ BIT(TADC_BATT_I) | BIT(TADC_BATT_V), adc);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read battery current and voltage channels\n");
+ return rc;
+ }
+
+ *val = adc[TADC_BATT_I] * adc[TADC_BATT_V];
+ break;
+ case TADC_INPUT_P:
+ rc = tadc_do_conversion(chip,
+ BIT(TADC_INPUT_I) | BIT(TADC_INPUT_V), adc);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read input current and voltage channels\n");
+ return rc;
+ }
+
+ *val = adc[TADC_INPUT_I] * adc[TADC_INPUT_V];
+ break;
+ default:
+ rc = tadc_read_raw(indio_dev, chan, val, NULL,
+ IIO_CHAN_INFO_RAW);
+ if (rc < 0)
+ return rc;
+
+ /* offset is optional */
+ rc = tadc_read_raw(indio_dev, chan, &offset, NULL,
+ IIO_CHAN_INFO_OFFSET);
+ if (rc < 0)
+ return rc;
+
+ scale_type = tadc_read_raw(indio_dev, chan,
+ &scale, &scale2, IIO_CHAN_INFO_SCALE);
+ switch (scale_type) {
+ case IIO_VAL_INT:
+ *val = *val * scale + offset;
+ break;
+ case IIO_VAL_FRACTIONAL:
+ *val = div_s64((s64)*val * scale + offset,
+ scale2);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ }
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->channel) {
+ case TADC_DIE_TEMP:
+ case TADC_DIE_TEMP_THR1:
+ *val = chan_data->scale;
+ return IIO_VAL_INT;
+ case TADC_BATT_I:
+ case TADC_BATT_V:
+ case TADC_INPUT_I:
+ case TADC_INPUT_V:
+ case TADC_OTG_I:
+ *val = chan_data->scale;
+ *val2 = TADC_RESOLUTION;
+ return IIO_VAL_FRACTIONAL;
+ }
+ return -EINVAL;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = chan_data->offset;
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+static irqreturn_t handle_eoc(int irq, void *dev_id)
+{
+ struct tadc_chip *chip = dev_id;
+
+ complete(&chip->eoc_complete);
+ return IRQ_HANDLED;
+}
+
+static int tadc_set_therm_table(struct tadc_chan_data *chan_data, u32 beta,
+ u32 rtherm)
+{
+ if (beta == 3450 && rtherm == 68000) {
+ chan_data->table = tadc_therm_3450b_68k;
+ chan_data->tablesize = ARRAY_SIZE(tadc_therm_3450b_68k);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int tadc_parse_dt(struct tadc_chip *chip)
+{
+ struct device_node *child, *node;
+ struct tadc_chan_data *chan_data;
+ u32 chan_id, rtherm, beta;
+ int rc = 0;
+
+ node = chip->dev->of_node;
+ for_each_available_child_of_node(node, child) {
+ rc = of_property_read_u32(child, "reg", &chan_id);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't find channel for %s rc=%d",
+ child->name, rc);
+ return rc;
+ }
+
+ if (chan_id > TADC_NUM_CH - 1) {
+ dev_err(chip->dev, "Channel %d is out of range [0, %d]\n",
+ chan_id, TADC_NUM_CH - 1);
+ return -EINVAL;
+ }
+
+ chan_data = &chip->chans[chan_id];
+ switch (chan_id) {
+ case TADC_THERM1:
+ case TADC_THERM2:
+ rc = of_property_read_u32(child,
+ "qcom,rbias", &chan_data->rbias);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read qcom,rbias rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(child,
+ "qcom,beta-coefficient", &beta);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read qcom,beta-coefficient rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(child,
+ "qcom,rtherm-at-25degc", &rtherm);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read qcom,rtherm-at-25degc rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = tadc_set_therm_table(chan_data, beta, rtherm);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set therm table rc=%d\n",
+ rc);
+ return rc;
+ }
+ break;
+ default:
+ rc = of_property_read_s32(child, "qcom,scale",
+ &chan_data->scale);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read scale rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ of_property_read_s32(child, "qcom,offset",
+ &chan_data->offset);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static const struct iio_info tadc_info = {
+ .read_raw = &tadc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int tadc_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct iio_dev *indio_dev;
+ struct tadc_chip *chip;
+ int rc = 0, irq;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = iio_priv(indio_dev);
+ chip->dev = &pdev->dev;
+ init_completion(&chip->eoc_complete);
+
+ rc = of_property_read_u32(node, "reg", &chip->tadc_base);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read base address rc=%d\n", rc);
+ return rc;
+ }
+ chip->tadc_cmp_base = chip->tadc_base + 0x100;
+
+ chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+ if (!chip->regmap) {
+ pr_err("Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ rc = tadc_parse_dt(chip);
+ if (rc < 0) {
+ pr_err("Couldn't parse device tree rc=%d\n", rc);
+ return rc;
+ }
+
+ irq = of_irq_get_byname(node, "eoc");
+ if (irq < 0) {
+ pr_err("Couldn't get eoc irq rc=%d\n", irq);
+ return irq;
+ }
+
+ rc = devm_request_threaded_irq(chip->dev, irq, NULL, handle_eoc,
+ IRQF_ONESHOT, "eoc", chip);
+ if (rc < 0) {
+ pr_err("Couldn't request irq %d rc=%d\n", irq, rc);
+ return rc;
+ }
+
+ indio_dev->dev.parent = chip->dev;
+ indio_dev->name = pdev->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &tadc_info;
+ indio_dev->channels = tadc_iio_chans;
+ indio_dev->num_channels = ARRAY_SIZE(tadc_iio_chans);
+
+ rc = devm_iio_device_register(chip->dev, indio_dev);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't register IIO device rc=%d\n", rc);
+
+ return rc;
+}
+
+static int tadc_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id tadc_match_table[] = {
+ { .compatible = "qcom,tadc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tadc_match_table);
+
+static struct platform_driver tadc_driver = {
+ .driver = {
+ .name = "qcom-tadc",
+ .of_match_table = tadc_match_table,
+ },
+ .probe = tadc_probe,
+ .remove = tadc_remove,
+};
+module_platform_driver(tadc_driver);
+
+MODULE_DESCRIPTION("Qualcomm Technologies Inc. TADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 7c4a7c7c16e7..49df5e0afbfb 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1119,6 +1119,15 @@ config TOUCHSCREEN_FT5X06_PSENSOR
If unsure, say N.
+config TOUCHSCREEN_FT5X06_GESTURE
+ tristate "FocalTech gesture feature support"
+ depends on TOUCHSCREEN_FT5X06
+ help
+ Say Y here if you want to support ft5x06's gesture
+ feature.
+
+ If unsure, say N.
+
config TOUCHSCREEN_MSTAR21XX
tristate "Mstar touchscreens"
depends on I2C
@@ -1186,4 +1195,37 @@ config TOUCHSCREEN_FT5X06
To compile this driver as a module, choose M here: the
module will be called ft5x06_ts.
+config FT_SECURE_TOUCH
+ bool "Secure Touch support for Focaltech Touchscreen"
+ depends on TOUCHSCREEN_FT5X06
+ help
+ Say Y here
+ -Focaltech touch driver is connected
+ -To enable secure touch for Focaltech touch driver
+
+ If unsure, say N.
+
+config TOUCHSCREEN_IT7260_I2C
+ tristate "IT7260 Touchscreen Driver"
+ depends on I2C
+ help
+ Say Y here if you have a IT7260 Touchscreen Driver
+ connected to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called it7258_ts_i2c.
+
+config TOUCHSCREEN_GT9XX
+ bool "Goodix touchpanel GT9xx series"
+ depends on I2C
+ help
+ Say Y here if you have a Goodix GT9xx touchscreen.
+ Gt9xx controllers are multi touch controllers which can
+ report 5 touches at a time.
+
+ If unsure, say N.
+
+source "drivers/input/touchscreen/gt9xx/Kconfig"
endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index e1777f11d77b..06953a69123f 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
obj-$(CONFIG_TOUCHSCREEN_IMX6UL_TSC) += imx6ul_tsc.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o
+obj-$(CONFIG_TOUCHSCREEN_IT7260_I2C) += it7258_ts_i2c.o
obj-$(CONFIG_TOUCHSCREEN_IPROC) += bcm_iproc_tsc.o
obj-$(CONFIG_TOUCHSCREEN_LPC32XX) += lpc32xx_ts.o
obj-$(CONFIG_TOUCHSCREEN_MAX11801) += max11801_ts.o
@@ -98,3 +99,4 @@ obj-$(CONFIG_TOUCHSCREEN_ZFORCE) += zforce_ts.o
obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50) += colibri-vf50-ts.o
obj-$(CONFIG_TOUCHSCREEN_ROHM_BU21023) += rohm_bu21023.o
obj-$(CONFIG_TOUCHSCREEN_MSTAR21XX) += msg21xx_ts.o
+obj-$(CONFIG_TOUCHSCREEN_GT9XX) += gt9xx/
diff --git a/drivers/input/touchscreen/ft5x06_ts.c b/drivers/input/touchscreen/ft5x06_ts.c
index beb86c9dea11..499ba692d53c 100644
--- a/drivers/input/touchscreen/ft5x06_ts.c
+++ b/drivers/input/touchscreen/ft5x06_ts.c
@@ -29,8 +29,13 @@
#include <linux/regulator/consumer.h>
#include <linux/firmware.h>
#include <linux/debugfs.h>
-#include <linux/sensors.h>
#include <linux/input/ft5x06_ts.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
#if defined(CONFIG_FB)
#include <linux/notifier.h>
@@ -42,6 +47,14 @@
#define FT_SUSPEND_LEVEL 1
#endif
+#if defined(CONFIG_FT_SECURE_TOUCH)
+#include <linux/completion.h>
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+static irqreturn_t ft5x06_ts_interrupt(int irq, void *data);
+#endif
+
#define FT_DRIVER_VERSION 0x02
#define FT_META_REGS 3
@@ -74,18 +87,26 @@
#define FT_REG_FW_MIN_VER 0xB2
#define FT_REG_FW_SUB_MIN_VER 0xB3
-/* psensor register address*/
-#define FT_REG_PSENSOR_ENABLE 0xB0
-#define FT_REG_PSENSOR_STATUS 0x01
-
-/* psensor register bits*/
-#define FT_PSENSOR_ENABLE_MASK 0x01
-#define FT_PSENSOR_STATUS_NEAR 0xC0
-#define FT_PSENSOR_STATUS_FAR 0xE0
-#define FT_PSENSOR_FAR_TO_NEAR 0
-#define FT_PSENSOR_NEAR_TO_FAR 1
-#define FT_PSENSOR_ORIGINAL_STATE_FAR 1
-#define FT_PSENSOR_WAKEUP_TIMEOUT 100
+/* gesture register address*/
+#define FT_REG_GESTURE_ENABLE 0xD0
+#define FT_REG_GESTURE_OUTPUT 0xD3
+
+/* gesture register bits*/
+#define FT_GESTURE_DOUBLECLICK_COORD_X 100
+#define FT_GESTURE_DOUBLECLICK_COORD_Y 100
+#define FT_GESTURE_WAKEUP_TIMEOUT 500
+#define FT_GESTURE_DEFAULT_TRACKING_ID 0x0A
+#define FT_GESTURE_DOUBLECLICK_ID 0x24
+#define FT_GESTURE_POINTER_NUM_MAX 128
+#define FT_GESTURE_POINTER_SIZEOF 4
+#define FT_GESTURE_ID_FLAG_SIZE 1
+#define FT_GESTURE_POINTER_NUM_FLAG_SIZE 1
+/* 6 bytes are taken to mark which gesture is supported in firmware */
+#define FT_GESTURE_SET_FLAG_SIZE 6
+#define I2C_TRANSFER_MAX_BYTE 255
+#define FT_GESTURE_DATA_HEADER (FT_GESTURE_ID_FLAG_SIZE + \
+ FT_GESTURE_POINTER_NUM_FLAG_SIZE + \
+ FT_GESTURE_SET_FLAG_SIZE)
/* power register bits*/
#define FT_PMODE_ACTIVE 0x00
@@ -186,6 +207,8 @@
#define PINCTRL_STATE_SUSPEND "pmx_ts_suspend"
#define PINCTRL_STATE_RELEASE "pmx_ts_release"
+static irqreturn_t ft5x06_ts_interrupt(int irq, void *data);
+
enum {
FT_BLOADER_VERSION_LZ4 = 0,
FT_BLOADER_VERSION_Z7 = 1,
@@ -199,8 +222,17 @@ enum {
FT_FT5336_FAMILY_ID_0x14 = 0x14,
};
-#define FT_STORE_TS_INFO(buf, id, name, max_tch, group_id, fw_vkey_support, \
- fw_name, fw_maj, fw_min, fw_sub_min) \
+#define FT_STORE_TS_INFO(buf, id, fw_maj, fw_min, fw_sub_min) \
+ snprintf(buf, FT_INFO_MAX_LEN, \
+ "vendor name = Focaltech\n" \
+ "model = 0x%x\n" \
+ "fw_version = %d.%d.%d\n", \
+ id, fw_maj, fw_min, fw_sub_min)
+#define FT_TS_INFO_SYSFS_DIR_NAME "ts_info"
+static char *ts_info_buff;
+
+#define FT_STORE_TS_DBG_INFO(buf, id, name, max_tch, group_id, \
+ fw_vkey_support, fw_name, fw_maj, fw_min, fw_sub_min) \
snprintf(buf, FT_INFO_MAX_LEN, \
"controller\t= focaltech\n" \
"model\t\t= 0x%x\n" \
@@ -221,9 +253,10 @@ struct ft5x06_ts_data {
struct i2c_client *client;
struct input_dev *input_dev;
const struct ft5x06_ts_platform_data *pdata;
- struct ft5x06_psensor_platform_data *psensor_pdata;
+ struct ft5x06_gesture_platform_data *gesture_pdata;
struct regulator *vdd;
struct regulator *vcc_i2c;
+ struct mutex ft_clk_io_ctrl_mutex;
char fw_name[FT_FW_NAME_MAX_LEN];
bool loading_fw;
u8 family_id;
@@ -235,7 +268,9 @@ struct ft5x06_ts_data {
u32 tch_data_len;
u8 fw_ver[3];
u8 fw_vendor_id;
+ struct kobject *ts_info_kobj;
#if defined(CONFIG_FB)
+ struct work_struct fb_notify_work;
struct notifier_block fb_notif;
#elif defined(CONFIG_HAS_EARLYSUSPEND)
struct early_suspend early_suspend;
@@ -244,29 +279,258 @@ struct ft5x06_ts_data {
struct pinctrl_state *pinctrl_state_active;
struct pinctrl_state *pinctrl_state_suspend;
struct pinctrl_state *pinctrl_state_release;
+#if defined(CONFIG_FT_SECURE_TOUCH)
+ atomic_t st_enabled;
+ atomic_t st_pending_irqs;
+ struct completion st_powerdown;
+ struct completion st_irq_processed;
+ bool st_initialized;
+ struct clk *core_clk;
+ struct clk *iface_clk;
+#endif
};
-static struct sensors_classdev __maybe_unused sensors_proximity_cdev = {
- .name = "ft5x06-proximity",
- .vendor = "FocalTech",
- .version = 1,
- .handle = SENSORS_PROXIMITY_HANDLE,
- .type = SENSOR_TYPE_PROXIMITY,
- .max_range = "5.0",
- .resolution = "5.0",
- .sensor_power = "0.1",
- .min_delay = 0,
- .fifo_reserved_event_count = 0,
- .fifo_max_event_count = 0,
- .enabled = 0,
- .delay_msec = 200,
- .sensors_enable = NULL,
- .sensors_poll_delay = NULL,
+static int ft5x06_ts_start(struct device *dev);
+static int ft5x06_ts_stop(struct device *dev);
+
+#if defined(CONFIG_FT_SECURE_TOUCH)
+static void ft5x06_secure_touch_init(struct ft5x06_ts_data *data)
+{
+ data->st_initialized = 0;
+
+ init_completion(&data->st_powerdown);
+ init_completion(&data->st_irq_processed);
+
+ /* Get clocks */
+ data->core_clk = devm_clk_get(&data->client->dev, "core_clk");
+ if (IS_ERR(data->core_clk)) {
+ data->core_clk = NULL;
+ dev_warn(&data->client->dev,
+ "%s: core_clk is not defined\n", __func__);
+ }
+
+ data->iface_clk = devm_clk_get(&data->client->dev, "iface_clk");
+ if (IS_ERR(data->iface_clk)) {
+ data->iface_clk = NULL;
+ dev_warn(&data->client->dev,
+ "%s: iface_clk is not defined", __func__);
+ }
+ data->st_initialized = 1;
+}
+
+static void ft5x06_secure_touch_notify(struct ft5x06_ts_data *data)
+{
+ sysfs_notify(&data->input_dev->dev.kobj, NULL, "secure_touch");
+}
+
+static irqreturn_t ft5x06_filter_interrupt(struct ft5x06_ts_data *data)
+{
+ if (atomic_read(&data->st_enabled)) {
+ if (atomic_cmpxchg(&data->st_pending_irqs, 0, 1) == 0) {
+ reinit_completion(&data->st_irq_processed);
+ ft5x06_secure_touch_notify(data);
+ wait_for_completion_interruptible(
+ &data->st_irq_processed);
+ }
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/*
+ * 'blocking' variable will have value 'true' when we want to prevent the driver
+ * from accessing the xPU/SMMU protected HW resources while the session is
+ * active.
+ */
+static void ft5x06_secure_touch_stop(struct ft5x06_ts_data *data, bool blocking)
+{
+ if (atomic_read(&data->st_enabled)) {
+ atomic_set(&data->st_pending_irqs, -1);
+ ft5x06_secure_touch_notify(data);
+ if (blocking)
+ wait_for_completion_interruptible(
+ &data->st_powerdown);
+ }
+}
+
+static int ft5x06_clk_prepare_enable(struct ft5x06_ts_data *data)
+{
+ int ret;
+
+ ret = clk_prepare_enable(data->iface_clk);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "error on clk_prepare_enable(iface_clk):%d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(data->core_clk);
+ if (ret) {
+ clk_disable_unprepare(data->iface_clk);
+ dev_err(&data->client->dev,
+ "error clk_prepare_enable(core_clk):%d\n", ret);
+ }
+ return ret;
+}
+
+static void ft5x06_clk_disable_unprepare(struct ft5x06_ts_data *data)
+{
+ clk_disable_unprepare(data->core_clk);
+ clk_disable_unprepare(data->iface_clk);
+}
+
+static int ft5x06_bus_get(struct ft5x06_ts_data *data)
+{
+ int retval;
+
+ mutex_lock(&data->ft_clk_io_ctrl_mutex);
+ retval = pm_runtime_get_sync(data->client->adapter->dev.parent);
+ if (retval >= 0 && data->core_clk != NULL && data->iface_clk != NULL) {
+ retval = ft5x06_clk_prepare_enable(data);
+ if (retval)
+ pm_runtime_put_sync(data->client->adapter->dev.parent);
+ }
+ mutex_unlock(&data->ft_clk_io_ctrl_mutex);
+ return retval;
+}
+
+static void ft5x06_bus_put(struct ft5x06_ts_data *data)
+{
+ mutex_lock(&data->ft_clk_io_ctrl_mutex);
+ if (data->core_clk != NULL && data->iface_clk != NULL)
+ ft5x06_clk_disable_unprepare(data);
+ pm_runtime_put_sync(data->client->adapter->dev.parent);
+ mutex_unlock(&data->ft_clk_io_ctrl_mutex);
+}
+
+static ssize_t ft5x06_secure_touch_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ft5x06_ts_data *data = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d", atomic_read(&data->st_enabled));
+}
+
+/*
+ * Accept only "0" and "1" valid values.
+ * "0" will reset the st_enabled flag, then wake up the reading process and
+ * the interrupt handler.
+ * The bus driver is notified via pm_runtime that it is not required to stay
+ * awake anymore.
+ * It will also make sure the queue of events is emptied in the controller,
+ * in case a touch happened in between the secure touch being disabled and
+ * the local ISR being ungated.
+ * "1" will set the st_enabled flag and clear the st_pending_irqs flag.
+ * The bus driver is requested via pm_runtime to stay awake.
+ */
+static ssize_t ft5x06_secure_touch_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ft5x06_ts_data *data = dev_get_drvdata(dev);
+ unsigned long value;
+ int err = 0;
+
+ if (count > 2)
+ return -EINVAL;
+ err = kstrtoul(buf, 10, &value);
+ if (err != 0)
+ return err;
+
+ if (!data->st_initialized)
+ return -EIO;
+
+ err = count;
+ switch (value) {
+ case 0:
+ if (atomic_read(&data->st_enabled) == 0)
+ break;
+ ft5x06_bus_put(data);
+ atomic_set(&data->st_enabled, 0);
+ ft5x06_secure_touch_notify(data);
+ complete(&data->st_irq_processed);
+ ft5x06_ts_interrupt(data->client->irq, data);
+ complete(&data->st_powerdown);
+ break;
+
+ case 1:
+ if (atomic_read(&data->st_enabled)) {
+ err = -EBUSY;
+ break;
+ }
+ synchronize_irq(data->client->irq);
+ if (ft5x06_bus_get(data) < 0) {
+ dev_err(&data->client->dev, "ft5x06_bus_get failed\n");
+ err = -EIO;
+ break;
+ }
+ reinit_completion(&data->st_powerdown);
+ reinit_completion(&data->st_irq_processed);
+ atomic_set(&data->st_enabled, 1);
+ atomic_set(&data->st_pending_irqs, 0);
+ break;
+
+ default:
+ dev_err(&data->client->dev, "unsupported value: %lu\n", value);
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+/*
+ * This function returns whether there are pending interrupts, or
+ * other error conditions that need to be signaled to the userspace library,
+ * according tot he following logic:
+ * - st_enabled is 0 if secure touch is not enabled, returning -EBADF
+ * - st_pending_irqs is -1 to signal that secure touch is in being stopped,
+ * returning -EINVAL
+ * - st_pending_irqs is 1 to signal that there is a pending irq, returning
+ * the value "1" to the sysfs read operation
+ * - st_pending_irqs is 0 (only remaining case left) if the pending interrupt
+ * has been processed, so the interrupt handler can be allowed to continue.
+ */
+static ssize_t ft5x06_secure_touch_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ft5x06_ts_data *data = dev_get_drvdata(dev);
+ int val = 0;
+
+ if (atomic_read(&data->st_enabled) == 0)
+ return -EBADF;
+ if (atomic_cmpxchg(&data->st_pending_irqs, -1, 0) == -1)
+ return -EINVAL;
+ if (atomic_cmpxchg(&data->st_pending_irqs, 1, 0) == 1)
+ val = 1;
+ else
+ complete(&data->st_irq_processed);
+ return scnprintf(buf, PAGE_SIZE, "%u", val);
+}
+#else
+static void ft5x06_secure_touch_init(struct ft5x06_ts_data *data)
+{
+}
+static irqreturn_t ft5x06_filter_interrupt(struct ft5x06_ts_data *data)
+{
+ return IRQ_NONE;
+}
+static void ft5x06_secure_touch_stop(struct ft5x06_ts_data *data, bool blocking)
+{
+}
+#endif
+
+static struct device_attribute attrs[] = {
+#if defined(CONFIG_FT_SECURE_TOUCH)
+ __ATTR(secure_touch_enable, (S_IRUGO | S_IWUSR | S_IWGRP),
+ ft5x06_secure_touch_enable_show,
+ ft5x06_secure_touch_enable_store),
+ __ATTR(secure_touch, S_IRUGO,
+ ft5x06_secure_touch_show, NULL),
+#endif
};
-static inline bool ft5x06_psensor_support_enabled(void)
+static inline bool ft5x06_gesture_support_enabled(void)
{
- return config_enabled(CONFIG_TOUCHSCREEN_FT5X06_PSENSOR);
+ return config_enabled(CONFIG_TOUCHSCREEN_FT5X06_GESTURE);
}
static int ft5x06_i2c_read(struct i2c_client *client, char *writebuf,
@@ -344,79 +608,196 @@ static int ft5x0x_read_reg(struct i2c_client *client, u8 addr, u8 *val)
return ft5x06_i2c_read(client, &addr, 1, val, 1);
}
-#ifdef CONFIG_TOUCHSCREEN_FT5X06_PSENSOR
-static void ft5x06_psensor_enable(struct ft5x06_ts_data *data, int enable)
+#ifdef CONFIG_TOUCHSCREEN_FT5X06_GESTURE
+static ssize_t ft5x06_gesture_enable_to_set_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- u8 state;
- int ret = -1;
+ struct ft5x06_ts_data *data = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ data->gesture_pdata->gesture_enable_to_set);
+}
+
+static ssize_t ft5x06_gesture_enable_to_set_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct ft5x06_ts_data *data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
+
+ if (data->suspended)
+ return -EINVAL;
- if (data->client == NULL)
- return;
+ ret = kstrtoul(buf, 16, &value);
+ if (ret < 0) {
+ dev_err(dev, "%s:kstrtoul failed, ret=0x%x\n",
+ __func__, ret);
+ return ret;
+ }
- ft5x0x_read_reg(data->client, FT_REG_PSENSOR_ENABLE, &state);
- if (enable)
- state |= FT_PSENSOR_ENABLE_MASK;
+ if (value == 1)
+ data->gesture_pdata->gesture_enable_to_set = 1;
else
- state &= ~FT_PSENSOR_ENABLE_MASK;
+ data->gesture_pdata->gesture_enable_to_set = 0;
+ return size;
+}
- ret = ft5x0x_write_reg(data->client, FT_REG_PSENSOR_ENABLE, state);
- if (ret < 0)
+static DEVICE_ATTR(enable, 0664,
+ ft5x06_gesture_enable_to_set_show,
+ ft5x06_gesture_enable_to_set_store);
+
+static int ft5x06_entry_pocket(struct device *dev)
+{
+ return ft5x06_ts_stop(dev);
+}
+
+static int ft5x06_leave_pocket(struct device *dev)
+{
+ struct ft5x06_ts_data *data = dev_get_drvdata(dev);
+ int err;
+
+ ft5x06_ts_start(dev);
+ ft5x0x_write_reg(data->client, FT_REG_GESTURE_ENABLE, 1);
+ err = enable_irq_wake(data->client->irq);
+ if (err)
dev_err(&data->client->dev,
- "write psensor switch command failed\n");
+ "%s: set_irq_wake failed\n", __func__);
+ data->suspended = true;
+
+ return err;
}
-static int ft5x06_psensor_enable_set(struct sensors_classdev *sensors_cdev,
- unsigned int enable)
+static ssize_t gesture_in_pocket_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct ft5x06_psensor_platform_data *psensor_pdata =
- container_of(sensors_cdev,
- struct ft5x06_psensor_platform_data, ps_cdev);
- struct ft5x06_ts_data *data = psensor_pdata->data;
- struct input_dev *input_dev = data->psensor_pdata->input_psensor_dev;
+ struct ft5x06_ts_data *data = dev_get_drvdata(dev);
- mutex_lock(&input_dev->mutex);
- ft5x06_psensor_enable(data, enable);
- psensor_pdata->tp_psensor_data = FT_PSENSOR_ORIGINAL_STATE_FAR;
- if (enable)
- psensor_pdata->tp_psensor_opened = 1;
- else
- psensor_pdata->tp_psensor_opened = 0;
- mutex_unlock(&input_dev->mutex);
- return enable;
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ data->gesture_pdata->in_pocket);
}
-static int ft5x06_read_tp_psensor_data(struct ft5x06_ts_data *data)
+static ssize_t gesture_in_pocket_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
{
- u8 psensor_status;
- char tmp;
- int ret = 0;
+ struct ft5x06_ts_data *data = dev_get_drvdata(dev);
+ unsigned long value = 0;
+ int ret;
- ft5x0x_read_reg(data->client,
- FT_REG_PSENSOR_STATUS, &psensor_status);
+ ret = kstrtoul(buf, 16, &value);
+ if (ret < 0) {
+ dev_err(dev, "%s:kstrtoul failed, ret=0x%x\n",
+ __func__, ret);
+ return ret;
+ }
- tmp = data->psensor_pdata->tp_psensor_data;
- if (psensor_status == FT_PSENSOR_STATUS_NEAR)
- data->psensor_pdata->tp_psensor_data =
- FT_PSENSOR_FAR_TO_NEAR;
- else if (psensor_status == FT_PSENSOR_STATUS_FAR)
- data->psensor_pdata->tp_psensor_data =
- FT_PSENSOR_NEAR_TO_FAR;
+ if (value == 1 && data->gesture_pdata->in_pocket == 0) {
+ data->gesture_pdata->in_pocket = 1;
+ ft5x06_entry_pocket(dev);
+ } else if (value == 0 && data->gesture_pdata->in_pocket == 1) {
+ ft5x06_leave_pocket(dev);
+ data->gesture_pdata->in_pocket = 0;
+ }
+ return size;
+}
- if (tmp != data->psensor_pdata->tp_psensor_data) {
- dev_info(&data->client->dev,
- "%s sensor data changed\n", __func__);
- ret = 1;
+static DEVICE_ATTR(pocket, 0664,
+ gesture_in_pocket_mode_show,
+ gesture_in_pocket_mode_store);
+
+static int ft5x06_report_gesture_doubleclick(struct input_dev *ip_dev)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ input_mt_slot(ip_dev, FT_GESTURE_DEFAULT_TRACKING_ID);
+ input_mt_report_slot_state(ip_dev, MT_TOOL_FINGER, 1);
+ input_report_abs(ip_dev, ABS_MT_POSITION_X,
+ FT_GESTURE_DOUBLECLICK_COORD_X);
+ input_report_abs(ip_dev, ABS_MT_POSITION_Y,
+ FT_GESTURE_DOUBLECLICK_COORD_Y);
+ input_mt_report_pointer_emulation(ip_dev, false);
+ input_sync(ip_dev);
+ input_mt_slot(ip_dev, FT_GESTURE_DEFAULT_TRACKING_ID);
+ input_mt_report_slot_state(ip_dev, MT_TOOL_FINGER, 0);
+ input_mt_report_pointer_emulation(ip_dev, false);
+ input_sync(ip_dev);
}
- return ret;
+ return 0;
}
-#else
-static int ft5x06_psensor_enable_set(struct sensors_classdev *sensors_cdev,
- unsigned int enable)
+
+static int ft5x06_report_gesture(struct i2c_client *i2c_client,
+ struct input_dev *ip_dev)
{
- return enable;
+ int i, temp, gesture_data_size;
+ int gesture_coord_x, gesture_coord_y;
+ int ret = -1;
+ short pointnum = 0;
+ unsigned char buf[FT_GESTURE_POINTER_NUM_MAX *
+ FT_GESTURE_POINTER_SIZEOF + FT_GESTURE_DATA_HEADER];
+
+ buf[0] = FT_REG_GESTURE_OUTPUT;
+ ret = ft5x06_i2c_read(i2c_client, buf, 1,
+ buf, FT_GESTURE_DATA_HEADER);
+ if (ret < 0) {
+ dev_err(&i2c_client->dev, "%s read touchdata failed.\n",
+ __func__);
+ return ret;
+ }
+
+ /* FW support doubleclick */
+ if (buf[0] == FT_GESTURE_DOUBLECLICK_ID) {
+ ft5x06_report_gesture_doubleclick(ip_dev);
+ return 0;
+ }
+
+ pointnum = (short)(buf[1]) & 0xff;
+ gesture_data_size = pointnum * FT_GESTURE_POINTER_SIZEOF +
+ FT_GESTURE_DATA_HEADER;
+ buf[0] = FT_REG_GESTURE_OUTPUT;
+ temp = gesture_data_size / I2C_TRANSFER_MAX_BYTE;
+ for (i = 0; i < temp; i++)
+ ret = ft5x06_i2c_read(i2c_client, buf, ((i == 0) ? 1 : 0),
+ buf + I2C_TRANSFER_MAX_BYTE * i, I2C_TRANSFER_MAX_BYTE);
+ ret = ft5x06_i2c_read(i2c_client, buf, ((temp == 0) ? 1 : 0),
+ buf + I2C_TRANSFER_MAX_BYTE * temp,
+ gesture_data_size - I2C_TRANSFER_MAX_BYTE * temp);
+ if (ret < 0) {
+ dev_err(&i2c_client->dev, "%s read touchdata failed.\n",
+ __func__);
+ return ret;
+ }
+
+ for (i = 0; i < pointnum; i++) {
+ gesture_coord_x = (((s16) buf[FT_GESTURE_DATA_HEADER +
+ (FT_GESTURE_POINTER_SIZEOF * i)]) & 0x0F) << 8 |
+ (((s16) buf[FT_GESTURE_DATA_HEADER + 1 +
+ (FT_GESTURE_POINTER_SIZEOF * i)]) & 0xFF);
+ gesture_coord_y = (((s16) buf[FT_GESTURE_DATA_HEADER + 2 +
+ (FT_GESTURE_POINTER_SIZEOF * i)]) & 0x0F) << 8 |
+ (((s16) buf[FT_GESTURE_DATA_HEADER + 3 +
+ (FT_GESTURE_POINTER_SIZEOF * i)]) & 0xFF);
+ input_mt_slot(ip_dev, FT_GESTURE_DEFAULT_TRACKING_ID);
+ input_mt_report_slot_state(ip_dev, MT_TOOL_FINGER, 1);
+ input_report_abs(ip_dev, ABS_MT_POSITION_X, gesture_coord_x);
+ input_report_abs(ip_dev, ABS_MT_POSITION_Y, gesture_coord_y);
+ input_mt_report_pointer_emulation(ip_dev, false);
+ input_sync(ip_dev);
+ }
+ input_mt_slot(ip_dev, FT_GESTURE_DEFAULT_TRACKING_ID);
+ input_mt_report_slot_state(ip_dev, MT_TOOL_FINGER, 0);
+ input_mt_report_pointer_emulation(ip_dev, false);
+ input_sync(ip_dev);
+
+ return 0;
}
+#else
+static DEVICE_ATTR(pocket, 0664, NULL, NULL);
+static DEVICE_ATTR(enable, 0664, NULL, NULL);
-static int ft5x06_read_tp_psensor_data(struct ft5x06_ts_data *data)
+static int ft5x06_report_gesture(struct i2c_client *i2c_client,
+ struct input_dev *ip_dev)
{
return 0;
}
@@ -465,7 +846,7 @@ static irqreturn_t ft5x06_ts_interrupt(int irq, void *dev_id)
struct input_dev *ip_dev;
int rc, i;
u32 id, x, y, status, num_touches;
- u8 reg, *buf;
+ u8 reg, *buf, gesture_is_active;
bool update_input = false;
if (!data) {
@@ -473,28 +854,24 @@ static irqreturn_t ft5x06_ts_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+ if (ft5x06_filter_interrupt(data) == IRQ_HANDLED)
+ return IRQ_HANDLED;
+
ip_dev = data->input_dev;
buf = data->tch_data;
- if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support &&
- data->psensor_pdata->tp_psensor_opened) {
- rc = ft5x06_read_tp_psensor_data(data);
- if (rc) {
- if (data->suspended)
- pm_wakeup_event(&data->client->dev,
- FT_PSENSOR_WAKEUP_TIMEOUT);
- input_report_abs(data->psensor_pdata->input_psensor_dev,
- ABS_DISTANCE,
- data->psensor_pdata->tp_psensor_data);
- input_sync(data->psensor_pdata->input_psensor_dev);
- if (data->suspended)
- return IRQ_HANDLED;
- }
- if (data->suspended)
+ if (ft5x06_gesture_support_enabled() && data->pdata->gesture_support) {
+ ft5x0x_read_reg(data->client, FT_REG_GESTURE_ENABLE,
+ &gesture_is_active);
+ if (gesture_is_active) {
+ pm_wakeup_event(&(data->client->dev),
+ FT_GESTURE_WAKEUP_TIMEOUT);
+ ft5x06_report_gesture(data->client, ip_dev);
return IRQ_HANDLED;
+ }
}
- /**
+ /*
* Read touch data start from register FT_REG_DEV_MODE.
* The touch x/y value start from FT_TOUCH_X_H/L_POS and
* FT_TOUCH_Y_H/L_POS in buf.
@@ -507,6 +884,10 @@ static irqreturn_t ft5x06_ts_interrupt(int irq, void *dev_id)
}
for (i = 0; i < data->pdata->num_max_touches; i++) {
+ /*
+ * Getting the finger ID of the touch event incase of
+ * multiple touch events
+ */
id = (buf[FT_TOUCH_ID_POS + FT_ONE_TCH_LEN * i]) >> 4;
if (id >= FT_MAX_ID)
break;
@@ -784,32 +1165,77 @@ err_pinctrl_get:
}
#ifdef CONFIG_PM
-static int ft5x06_ts_suspend(struct device *dev)
+static int ft5x06_ts_start(struct device *dev)
{
struct ft5x06_ts_data *data = dev_get_drvdata(dev);
- char txbuf[2], i;
int err;
- if (data->loading_fw) {
- dev_info(dev, "Firmware loading in process...\n");
- return 0;
+ if (data->pdata->power_on) {
+ err = data->pdata->power_on(true);
+ if (err) {
+ dev_err(dev, "power on failed");
+ return err;
+ }
+ } else {
+ err = ft5x06_power_on(data, true);
+ if (err) {
+ dev_err(dev, "power on failed");
+ return err;
+ }
}
- if (data->suspended) {
- dev_info(dev, "Already in suspend state\n");
- return 0;
+ if (data->ts_pinctrl) {
+ err = pinctrl_select_state(data->ts_pinctrl,
+ data->pinctrl_state_active);
+ if (err < 0)
+ dev_err(dev, "Cannot get active pinctrl state\n");
}
- if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support &&
- device_may_wakeup(dev) &&
- data->psensor_pdata->tp_psensor_opened) {
- err = enable_irq_wake(data->client->irq);
+ err = ft5x06_gpio_configure(data, true);
+ if (err < 0) {
+ dev_err(&data->client->dev,
+ "failed to put gpios in resue state\n");
+ goto err_gpio_configuration;
+ }
+
+ if (gpio_is_valid(data->pdata->reset_gpio)) {
+ gpio_set_value_cansleep(data->pdata->reset_gpio, 0);
+ msleep(data->pdata->hard_rst_dly);
+ gpio_set_value_cansleep(data->pdata->reset_gpio, 1);
+ }
+
+ msleep(data->pdata->soft_rst_dly);
+
+ enable_irq(data->client->irq);
+ data->suspended = false;
+
+ return 0;
+
+err_gpio_configuration:
+ if (data->ts_pinctrl) {
+ err = pinctrl_select_state(data->ts_pinctrl,
+ data->pinctrl_state_suspend);
+ if (err < 0)
+ dev_err(dev, "Cannot get suspend pinctrl state\n");
+ }
+ if (data->pdata->power_on) {
+ err = data->pdata->power_on(false);
if (err)
- dev_err(&data->client->dev,
- "%s: set_irq_wake failed\n", __func__);
- data->suspended = true;
- return err;
+ dev_err(dev, "power off failed");
+ } else {
+ err = ft5x06_power_on(data, false);
+ if (err)
+ dev_err(dev, "power off failed");
}
+ return err;
+}
+
+static int ft5x06_ts_stop(struct device *dev)
+{
+ struct ft5x06_ts_data *data = dev_get_drvdata(dev);
+ char txbuf[2];
+ int i, err;
+
disable_irq(data->client->irq);
/* release all touches */
@@ -884,87 +1310,83 @@ pwr_off_fail:
return err;
}
-static int ft5x06_ts_resume(struct device *dev)
+static int ft5x06_ts_suspend(struct device *dev)
{
struct ft5x06_ts_data *data = dev_get_drvdata(dev);
int err;
- if (!data->suspended) {
- dev_dbg(dev, "Already in awake state\n");
+ if (data->loading_fw) {
+ dev_info(dev, "Firmware loading in process...\n");
+ return 0;
+ }
+
+ if (data->suspended) {
+ dev_info(dev, "Already in suspend state\n");
return 0;
}
- if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support &&
+ ft5x06_secure_touch_stop(data, true);
+
+ if (ft5x06_gesture_support_enabled() && data->pdata->gesture_support &&
device_may_wakeup(dev) &&
- data->psensor_pdata->tp_psensor_opened) {
- err = disable_irq_wake(data->client->irq);
+ data->gesture_pdata->gesture_enable_to_set) {
+
+ ft5x0x_write_reg(data->client, FT_REG_GESTURE_ENABLE, 1);
+ err = enable_irq_wake(data->client->irq);
if (err)
dev_err(&data->client->dev,
- "%s: disable_irq_wake failed\n",
- __func__);
- data->suspended = false;
+ "%s: set_irq_wake failed\n", __func__);
+ data->suspended = true;
return err;
}
- if (data->pdata->power_on) {
- err = data->pdata->power_on(true);
- if (err) {
- dev_err(dev, "power on failed");
- return err;
- }
- } else {
- err = ft5x06_power_on(data, true);
- if (err) {
- dev_err(dev, "power on failed");
- return err;
- }
- }
+ return ft5x06_ts_stop(dev);
+}
- if (data->ts_pinctrl) {
- err = pinctrl_select_state(data->ts_pinctrl,
- data->pinctrl_state_active);
- if (err < 0)
- dev_err(dev, "Cannot get active pinctrl state\n");
- }
+static int ft5x06_ts_resume(struct device *dev)
+{
+ struct ft5x06_ts_data *data = dev_get_drvdata(dev);
+ int err;
- err = ft5x06_gpio_configure(data, true);
- if (err < 0) {
- dev_err(&data->client->dev,
- "failed to put gpios in resue state\n");
- goto err_gpio_configuration;
+ if (!data->suspended) {
+ dev_dbg(dev, "Already in awake state\n");
+ return 0;
}
- if (gpio_is_valid(data->pdata->reset_gpio)) {
- gpio_set_value_cansleep(data->pdata->reset_gpio, 0);
- msleep(data->pdata->hard_rst_dly);
- gpio_set_value_cansleep(data->pdata->reset_gpio, 1);
- }
+ ft5x06_secure_touch_stop(data, true);
- msleep(data->pdata->soft_rst_dly);
+ if (ft5x06_gesture_support_enabled() && data->pdata->gesture_support &&
+ device_may_wakeup(dev) &&
+ !(data->gesture_pdata->in_pocket) &&
+ data->gesture_pdata->gesture_enable_to_set) {
- enable_irq(data->client->irq);
+ ft5x0x_write_reg(data->client, FT_REG_GESTURE_ENABLE, 0);
+ err = disable_irq_wake(data->client->irq);
+ if (err)
+ dev_err(dev, "%s: disable_irq_wake failed\n",
+ __func__);
+ data->suspended = false;
+ return err;
+ }
- data->suspended = false;
+ err = ft5x06_ts_start(dev);
+ if (err < 0)
+ return err;
- return 0;
+ if (ft5x06_gesture_support_enabled() && data->pdata->gesture_support &&
+ device_may_wakeup(dev) &&
+ data->gesture_pdata->in_pocket &&
+ data->gesture_pdata->gesture_enable_to_set) {
-err_gpio_configuration:
- if (data->ts_pinctrl) {
- err = pinctrl_select_state(data->ts_pinctrl,
- data->pinctrl_state_suspend);
- if (err < 0)
- dev_err(dev, "Cannot get suspend pinctrl state\n");
- }
- if (data->pdata->power_on) {
- err = data->pdata->power_on(false);
- if (err)
- dev_err(dev, "power off failed");
- } else {
- err = ft5x06_power_on(data, false);
+ ft5x0x_write_reg(data->client, FT_REG_GESTURE_ENABLE, 0);
+ err = disable_irq_wake(data->client->irq);
if (err)
- dev_err(dev, "power off failed");
+ dev_err(dev, "%s: disable_irq_wake failed\n",
+ __func__);
+ data->suspended = false;
+ data->gesture_pdata->in_pocket = 0;
}
- return err;
+ return 0;
}
static const struct dev_pm_ops ft5x06_ts_pm_ops = {
@@ -988,6 +1410,13 @@ static int ft5x06_ts_resume(struct device *dev)
#endif
#if defined(CONFIG_FB)
+static void fb_notify_resume_work(struct work_struct *work)
+{
+ struct ft5x06_ts_data *ft5x06_data =
+ container_of(work, struct ft5x06_ts_data, fb_notify_work);
+ ft5x06_ts_resume(&ft5x06_data->client->dev);
+}
+
static int fb_notifier_callback(struct notifier_block *self,
unsigned long event, void *data)
{
@@ -996,13 +1425,27 @@ static int fb_notifier_callback(struct notifier_block *self,
struct ft5x06_ts_data *ft5x06_data =
container_of(self, struct ft5x06_ts_data, fb_notif);
- if (evdata && evdata->data && event == FB_EVENT_BLANK &&
- ft5x06_data && ft5x06_data->client) {
+ if (evdata && evdata->data && ft5x06_data && ft5x06_data->client) {
blank = evdata->data;
- if (*blank == FB_BLANK_UNBLANK)
- ft5x06_ts_resume(&ft5x06_data->client->dev);
- else if (*blank == FB_BLANK_POWERDOWN)
- ft5x06_ts_suspend(&ft5x06_data->client->dev);
+ if (ft5x06_data->pdata->resume_in_workqueue) {
+ if (event == FB_EARLY_EVENT_BLANK &&
+ *blank == FB_BLANK_UNBLANK)
+ schedule_work(&ft5x06_data->fb_notify_work);
+ else if (event == FB_EVENT_BLANK &&
+ *blank == FB_BLANK_POWERDOWN) {
+ flush_work(&ft5x06_data->fb_notify_work);
+ ft5x06_ts_suspend(&ft5x06_data->client->dev);
+ }
+ } else {
+ if (event == FB_EVENT_BLANK) {
+ if (*blank == FB_BLANK_UNBLANK)
+ ft5x06_ts_resume(
+ &ft5x06_data->client->dev);
+ else if (*blank == FB_BLANK_POWERDOWN)
+ ft5x06_ts_suspend(
+ &ft5x06_data->client->dev);
+ }
+ }
}
return 0;
@@ -1014,6 +1457,13 @@ static void ft5x06_ts_early_suspend(struct early_suspend *handler)
struct ft5x06_ts_data,
early_suspend);
+ /*
+ * During early suspend/late resume, the driver doesn't access xPU/SMMU
+ * protected HW resources. So, there is no compelling need to block,
+ * but notifying the userspace that a power event has occurred is
+ * enough. Hence 'blocking' variable can be set to false.
+ */
+ ft5x06_secure_touch_stop(data, false);
ft5x06_ts_suspend(&data->client->dev);
}
@@ -1023,6 +1473,7 @@ static void ft5x06_ts_late_resume(struct early_suspend *handler)
struct ft5x06_ts_data,
early_suspend);
+ ft5x06_secure_touch_stop(data, false);
ft5x06_ts_resume(&data->client->dev);
}
#endif
@@ -1339,11 +1790,13 @@ static int ft5x06_fw_upgrade(struct device *dev, bool force)
ft5x06_update_fw_ver(data);
- FT_STORE_TS_INFO(data->ts_info, data->family_id, data->pdata->name,
+ FT_STORE_TS_DBG_INFO(data->ts_info, data->family_id, data->pdata->name,
data->pdata->num_max_touches, data->pdata->group_id,
data->pdata->fw_vkey_support ? "yes" : "no",
data->pdata->fw_name, data->fw_ver[0],
data->fw_ver[1], data->fw_ver[2]);
+ FT_STORE_TS_INFO(ts_info_buff, data->family_id, data->fw_ver[0],
+ data->fw_ver[1], data->fw_ver[2]);
rel_fw:
release_firmware(fw);
return rc;
@@ -1446,6 +1899,14 @@ static ssize_t ft5x06_fw_name_store(struct device *dev,
static DEVICE_ATTR(fw_name, 0664, ft5x06_fw_name_show, ft5x06_fw_name_store);
+static ssize_t ts_info_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ strlcpy(buf, ts_info_buff, FT_INFO_MAX_LEN);
+ return strnlen(buf, FT_INFO_MAX_LEN);
+}
+static struct kobj_attribute ts_info_attr = __ATTR_RO(ts_info);
+
static bool ft5x06_debug_addr_is_valid(int addr)
{
if (addr < 0 || addr > 0xFF) {
@@ -1749,8 +2210,12 @@ static int ft5x06_parse_dt(struct device *dev,
pdata->ignore_id_check = of_property_read_bool(np,
"focaltech,ignore-id-check");
- pdata->psensor_support = of_property_read_bool(np,
- "focaltech,psensor-support");
+ pdata->gesture_support = of_property_read_bool(np,
+ "focaltech,gesture-support");
+
+ pdata->resume_in_workqueue = of_property_read_bool(np,
+ "focaltech,resume-in-workqueue");
+
rc = of_property_read_u32(np, "focaltech,family-id", &temp_val);
if (!rc)
pdata->family_id = temp_val;
@@ -1786,14 +2251,13 @@ static int ft5x06_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ft5x06_ts_platform_data *pdata;
- struct ft5x06_psensor_platform_data *psensor_pdata;
+ struct ft5x06_gesture_platform_data *gesture_pdata;
struct ft5x06_ts_data *data;
struct input_dev *input_dev;
- struct input_dev *psensor_input_dev;
struct dentry *temp;
u8 reg_value;
u8 reg_addr;
- int err, len;
+ int err, len, retval, attr_count;
if (client->dev.of_node) {
pdata = devm_kzalloc(&client->dev,
@@ -1871,7 +2335,8 @@ static int ft5x06_ts_probe(struct i2c_client *client,
err = input_register_device(input_dev);
if (err) {
dev_err(&client->dev, "Input device registration failed\n");
- goto free_inputdev;
+ input_free_device(input_dev);
+ return err;
}
if (pdata->power_init) {
@@ -1946,6 +2411,10 @@ static int ft5x06_ts_probe(struct i2c_client *client,
err = request_threaded_irq(client->irq, NULL,
ft5x06_ts_interrupt,
+ /*
+ * the interrupt trigger mode will be set in Device Tree with property
+ * "interrupts", so here we just need to set the flag IRQF_ONESHOT
+ */
IRQF_ONESHOT,
client->dev.driver->name, data);
if (err) {
@@ -1953,54 +2422,55 @@ static int ft5x06_ts_probe(struct i2c_client *client,
goto free_gpio;
}
- if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support) {
+ if (ft5x06_gesture_support_enabled() && data->pdata->gesture_support) {
device_init_wakeup(&client->dev, 1);
- psensor_pdata = devm_kzalloc(&client->dev,
- sizeof(struct ft5x06_psensor_platform_data),
+ gesture_pdata = devm_kzalloc(&client->dev,
+ sizeof(struct ft5x06_gesture_platform_data),
GFP_KERNEL);
- if (!psensor_pdata) {
+ if (!gesture_pdata) {
dev_err(&client->dev, "Failed to allocate memory\n");
- goto irq_free;
+ goto free_gesture_dev;
}
- data->psensor_pdata = psensor_pdata;
-
- psensor_input_dev = input_allocate_device();
- if (!psensor_input_dev) {
- dev_err(&data->client->dev,
- "Failed to allocate device\n");
- goto free_psensor_pdata;
+ data->gesture_pdata = gesture_pdata;
+ gesture_pdata->data = data;
+
+ gesture_pdata->gesture_class =
+ class_create(THIS_MODULE, "gesture");
+ if (IS_ERR(gesture_pdata->gesture_class)) {
+ err = PTR_ERR(gesture_pdata->gesture_class);
+ dev_err(&client->dev, "Failed to create class.\n");
+ goto free_gesture_pdata;
}
- __set_bit(EV_ABS, psensor_input_dev->evbit);
- input_set_abs_params(psensor_input_dev,
- ABS_DISTANCE, 0, 1, 0, 0);
- psensor_input_dev->name = "proximity";
- psensor_input_dev->id.bustype = BUS_I2C;
- psensor_input_dev->dev.parent = &data->client->dev;
- data->psensor_pdata->input_psensor_dev = psensor_input_dev;
+ gesture_pdata->dev = device_create(gesture_pdata->gesture_class,
+ NULL, 0, NULL, "gesture_ft5x06");
+ if (IS_ERR(gesture_pdata->dev)) {
+ err = PTR_ERR(gesture_pdata->dev);
+ dev_err(&client->dev, "Failed to create device.\n");
+ goto free_gesture_class;
+ }
- err = input_register_device(psensor_input_dev);
+ dev_set_drvdata(gesture_pdata->dev, data);
+ err = device_create_file(gesture_pdata->dev,
+ &dev_attr_enable);
if (err) {
- dev_err(&data->client->dev,
- "Unable to register device, err=%d\n", err);
- goto free_psensor_input_dev;
+ dev_err(gesture_pdata->dev,
+ "sys file creation failed\n");
+ goto free_gesture_dev;
+ }
+ err = device_create_file(gesture_pdata->dev,
+ &dev_attr_pocket);
+ if (err) {
+ dev_err(gesture_pdata->dev,
+ "sys file creation failed\n");
+ goto free_enable_sys;
}
-
- psensor_pdata->ps_cdev = sensors_proximity_cdev;
- psensor_pdata->ps_cdev.sensors_enable =
- ft5x06_psensor_enable_set;
- psensor_pdata->data = data;
-
- err = sensors_classdev_register(&client->dev,
- &psensor_pdata->ps_cdev);
- if (err)
- goto unregister_psensor_input_device;
}
err = device_create_file(&client->dev, &dev_attr_fw_name);
if (err) {
dev_err(&client->dev, "sys file creation failed\n");
- goto free_psensor_class_sysfs;
+ goto free_pocket_sys;
}
err = device_create_file(&client->dev, &dev_attr_update_fw);
@@ -2074,16 +2544,53 @@ static int ft5x06_ts_probe(struct i2c_client *client,
dev_dbg(&client->dev, "touch threshold = %d\n", reg_value * 4);
+ /*creation touch panel info kobj*/
+ data->ts_info_kobj = kobject_create_and_add(FT_TS_INFO_SYSFS_DIR_NAME,
+ kernel_kobj);
+ if (!data->ts_info_kobj) {
+ dev_err(&client->dev, "kobject creation failed.\n");
+ } else {
+ err = sysfs_create_file(data->ts_info_kobj, &ts_info_attr.attr);
+ if (err) {
+ kobject_put(data->ts_info_kobj);
+ dev_err(&client->dev, "sysfs creation failed.\n");
+ } else {
+ ts_info_buff = devm_kzalloc(&client->dev,
+ FT_INFO_MAX_LEN, GFP_KERNEL);
+ if (!ts_info_buff)
+ goto free_debug_dir;
+ }
+ }
+
+ /*Initialize secure touch */
+ ft5x06_secure_touch_init(data);
+ ft5x06_secure_touch_stop(data, true);
+ mutex_init(&(data->ft_clk_io_ctrl_mutex));
+
+ /* Creation of secure touch sysfs files */
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(&data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(&client->dev,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ goto free_secure_touch_sysfs;
+ }
+ }
+
ft5x06_update_fw_ver(data);
ft5x06_update_fw_vendor_id(data);
- FT_STORE_TS_INFO(data->ts_info, data->family_id, data->pdata->name,
+ FT_STORE_TS_DBG_INFO(data->ts_info, data->family_id, data->pdata->name,
data->pdata->num_max_touches, data->pdata->group_id,
data->pdata->fw_vkey_support ? "yes" : "no",
data->pdata->fw_name, data->fw_ver[0],
data->fw_ver[1], data->fw_ver[2]);
-
+ FT_STORE_TS_INFO(ts_info_buff, data->family_id, data->fw_ver[0],
+ data->fw_ver[1], data->fw_ver[2]);
#if defined(CONFIG_FB)
+ INIT_WORK(&data->fb_notify_work, fb_notify_resume_work);
data->fb_notif.notifier_call = fb_notifier_callback;
err = fb_register_client(&data->fb_notif);
@@ -2098,9 +2605,13 @@ static int ft5x06_ts_probe(struct i2c_client *client,
data->early_suspend.resume = ft5x06_ts_late_resume;
register_early_suspend(&data->early_suspend);
#endif
-
return 0;
+free_secure_touch_sysfs:
+ for (attr_count--; attr_count >= 0; attr_count--) {
+ sysfs_remove_file(&data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
free_debug_dir:
debugfs_remove_recursive(data->dir);
free_force_update_fw_sys:
@@ -2109,24 +2620,24 @@ free_update_fw_sys:
device_remove_file(&client->dev, &dev_attr_update_fw);
free_fw_name_sys:
device_remove_file(&client->dev, &dev_attr_fw_name);
-free_psensor_class_sysfs:
- if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support)
- sensors_classdev_unregister(&psensor_pdata->ps_cdev);
-unregister_psensor_input_device:
- if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support)
- input_unregister_device(data->psensor_pdata->input_psensor_dev);
-free_psensor_input_dev:
- if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support)
- input_free_device(data->psensor_pdata->input_psensor_dev);
-free_psensor_pdata:
- if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support) {
- devm_kfree(&client->dev, psensor_pdata);
- data->psensor_pdata = NULL;
- }
-irq_free:
- if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support)
- device_init_wakeup(&client->dev, 0);
- free_irq(client->irq, data);
+free_pocket_sys:
+ if (ft5x06_gesture_support_enabled() && data->pdata->gesture_support)
+ device_remove_file(&client->dev, &dev_attr_pocket);
+free_enable_sys:
+ if (ft5x06_gesture_support_enabled() && data->pdata->gesture_support)
+ device_remove_file(&client->dev, &dev_attr_enable);
+free_gesture_dev:
+ if (ft5x06_gesture_support_enabled() && data->pdata->gesture_support)
+ device_destroy(gesture_pdata->gesture_class, 0);
+free_gesture_class:
+ if (ft5x06_gesture_support_enabled() && data->pdata->gesture_support)
+ class_destroy(gesture_pdata->gesture_class);
+free_gesture_pdata:
+ if (ft5x06_gesture_support_enabled() && data->pdata->gesture_support) {
+ devm_kfree(&client->dev, gesture_pdata);
+ data->gesture_pdata = NULL;
+ }
+
free_gpio:
if (gpio_is_valid(pdata->reset_gpio))
gpio_free(pdata->reset_gpio);
@@ -2155,25 +2666,22 @@ pwr_deinit:
ft5x06_power_init(data, false);
unreg_inputdev:
input_unregister_device(input_dev);
-free_inputdev:
- input_free_device(input_dev);
- input_dev = NULL;
return err;
}
static int ft5x06_ts_remove(struct i2c_client *client)
{
struct ft5x06_ts_data *data = i2c_get_clientdata(client);
- int retval;
-
- if (ft5x06_psensor_support_enabled() && data->pdata->psensor_support) {
+ int retval, attr_count;
+ if (ft5x06_gesture_support_enabled() && data->pdata->gesture_support) {
device_init_wakeup(&client->dev, 0);
- sensors_classdev_unregister(&data->psensor_pdata->ps_cdev);
- input_unregister_device(data->psensor_pdata->input_psensor_dev);
- input_free_device(data->psensor_pdata->input_psensor_dev);
- devm_kfree(&client->dev, data->psensor_pdata);
- data->psensor_pdata = NULL;
+ device_remove_file(&client->dev, &dev_attr_pocket);
+ device_remove_file(&client->dev, &dev_attr_enable);
+ device_destroy(data->gesture_pdata->gesture_class, 0);
+ class_destroy(data->gesture_pdata->gesture_class);
+ devm_kfree(&client->dev, data->gesture_pdata);
+ data->gesture_pdata = NULL;
}
debugfs_remove_recursive(data->dir);
@@ -2207,6 +2715,11 @@ static int ft5x06_ts_remove(struct i2c_client *client)
}
}
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ sysfs_remove_file(&data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
if (data->pdata->power_on)
data->pdata->power_on(false);
else
@@ -2218,7 +2731,7 @@ static int ft5x06_ts_remove(struct i2c_client *client)
ft5x06_power_init(data, false);
input_unregister_device(data->input_dev);
-
+ kobject_put(data->ts_info_kobj);
return 0;
}
diff --git a/drivers/input/touchscreen/gt9xx/Kconfig b/drivers/input/touchscreen/gt9xx/Kconfig
new file mode 100644
index 000000000000..2e1b5ba567a0
--- /dev/null
+++ b/drivers/input/touchscreen/gt9xx/Kconfig
@@ -0,0 +1,51 @@
+#
+# Goodix GT9xx Touchscreen driver
+#
+
+config GT9XX_TOUCHPANEL_DRIVER
+ tristate "Goodix GT9xx touchpanel driver"
+ depends on TOUCHSCREEN_GT9XX
+ default n
+ help
+ This is the main file for touchpanel driver for Goodix GT9xx
+ touchscreens.
+
+ Say Y here if you have a Goodix GT9xx touchscreen connected
+ to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gt9xx.
+
+config GT9XX_TOUCHPANEL_UPDATE
+ tristate "Goodix GT9xx touchpanel auto update support"
+ depends on GT9XX_TOUCHPANEL_DRIVER
+ default n
+ help
+ This enables support for firmware update for Goodix GT9xx
+ touchscreens.
+
+ Say Y here if you have a Goodix GT9xx touchscreen connected
+ to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gt9xx_update.
+
+config GT9XX_TOUCHPANEL_DEBUG
+ tristate "Goodix GT9xx Tools for debuging"
+ depends on GT9XX_TOUCHPANEL_DRIVER
+ default n
+ help
+ This is application debug interface support for Goodix GT9xx
+ touchscreens.
+
+ Say Y here if you want to have a Android app debug interface
+ to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gt9xx_tool.
diff --git a/drivers/input/touchscreen/gt9xx/Makefile b/drivers/input/touchscreen/gt9xx/Makefile
new file mode 100644
index 000000000000..482d869a2d37
--- /dev/null
+++ b/drivers/input/touchscreen/gt9xx/Makefile
@@ -0,0 +1,8 @@
+#gt915 touchpanel driver
+
+
+obj-$(CONFIG_GT9XX_TOUCHPANEL_DRIVER) += gt9xx.o
+#gt915 update file
+obj-$(CONFIG_GT9XX_TOUCHPANEL_UPDATE) += gt9xx_update.o
+#debug tool
+obj-$(CONFIG_GT9XX_TOUCHPANEL_DEBUG) += goodix_tool.o
diff --git a/drivers/input/touchscreen/gt9xx/goodix_tool.c b/drivers/input/touchscreen/gt9xx/goodix_tool.c
index bef080671ab0..c67c4c8f1207 100644
--- a/drivers/input/touchscreen/gt9xx/goodix_tool.c
+++ b/drivers/input/touchscreen/gt9xx/goodix_tool.c
@@ -22,6 +22,7 @@
*/
#include "gt9xx.h"
+#include <linux/mutex.h>
#define DATA_LENGTH_UINT 512
#define CMD_HEAD_LENGTH (sizeof(st_cmd_head) - sizeof(u8 *))
@@ -53,6 +54,8 @@ static struct i2c_client *gt_client;
static struct proc_dir_entry *goodix_proc_entry;
+static struct mutex lock;
+
static s32 goodix_tool_write(struct file *filp, const char __user *buff,
unsigned long len, void *data);
static s32 goodix_tool_read(char *page, char **start, off_t off, int count,
@@ -88,18 +91,21 @@ static void tool_set_proc_name(char *procname)
static s32 tool_i2c_read_no_extra(u8 *buf, u16 len)
{
s32 ret = -1;
- s32 i = 0;
- struct i2c_msg msgs[2];
-
- msgs[0].flags = !I2C_M_RD;
- msgs[0].addr = gt_client->addr;
- msgs[0].len = cmd_head.addr_len;
- msgs[0].buf = &buf[0];
-
- msgs[1].flags = I2C_M_RD;
- msgs[1].addr = gt_client->addr;
- msgs[1].len = len;
- msgs[1].buf = &buf[GTP_ADDR_LENGTH];
+ u8 i = 0;
+ struct i2c_msg msgs[2] = {
+ {
+ .flags = !I2C_M_RD,
+ .addr = gt_client->addr,
+ .len = cmd_head.addr_len,
+ .buf = &buf[0],
+ },
+ {
+ .flags = I2C_M_RD,
+ .addr = gt_client->addr,
+ .len = len,
+ .buf = &buf[GTP_ADDR_LENGTH],
+ },
+ };
for (i = 0; i < cmd_head.retry; i++) {
ret = i2c_transfer(gt_client->adapter, msgs, 2);
@@ -107,25 +113,35 @@ static s32 tool_i2c_read_no_extra(u8 *buf, u16 len)
break;
}
+ if (i == cmd_head.retry) {
+ dev_err(&client->dev, "I2C read retry limit over.\n");
+ ret = -EIO;
+ }
+
return ret;
}
static s32 tool_i2c_write_no_extra(u8 *buf, u16 len)
{
s32 ret = -1;
- s32 i = 0;
- struct i2c_msg msg;
-
- msg.flags = !I2C_M_RD;
- msg.addr = gt_client->addr;
- msg.len = len;
- msg.buf = buf;
+ u8 i = 0;
+ struct i2c_msg msg = {
+ .flags = !I2C_M_RD,
+ .addr = gt_client->addr,
+ .len = len,
+ .buf = buf,
+ };
for (i = 0; i < cmd_head.retry; i++) {
ret = i2c_transfer(gt_client->adapter, &msg, 1);
if (ret > 0)
break;
- }
+ }
+
+ if (i == cmd_head.retry) {
+ dev_err(&client->dev, "I2C write retry limit over.\n");
+ ret = -EIO;
+ }
return ret;
}
@@ -181,7 +197,7 @@ static void unregister_i2c_func(void)
s32 init_wr_node(struct i2c_client *client)
{
- s32 i;
+ u8 i;
gt_client = client;
memset(&cmd_head, 0, sizeof(cmd_head));
@@ -189,14 +205,15 @@ s32 init_wr_node(struct i2c_client *client)
i = 5;
while ((!cmd_head.data) && i) {
- cmd_head.data = kzalloc(i * DATA_LENGTH_UINT, GFP_KERNEL);
- if (cmd_head.data != NULL)
+ cmd_head.data = devm_kzalloc(&client->dev,
+ i * DATA_LENGTH_UINT, GFP_KERNEL);
+ if (cmd_head.data)
break;
i--;
}
if (i) {
- DATA_LENGTH = i * DATA_LENGTH_UINT + GTP_ADDR_LENGTH;
- GTP_INFO("Applied memory size:%d.", DATA_LENGTH);
+ DATA_LENGTH = i * DATA_LENGTH_UINT;
+ dev_dbg(&client->dev, "Applied memory size:%d.", DATA_LENGTH);
} else {
GTP_ERROR("Apply for memory failed.");
return FAIL;
@@ -207,8 +224,9 @@ s32 init_wr_node(struct i2c_client *client)
register_i2c_func();
+ mutex_init(&lock);
tool_set_proc_name(procname);
- goodix_proc_entry = create_proc_entry(procname, 0666, NULL);
+ goodix_proc_entry = create_proc_entry(procname, 0660, NULL);
if (goodix_proc_entry == NULL) {
GTP_ERROR("Couldn't create proc entry!");
return FAIL;
@@ -222,7 +240,6 @@ s32 init_wr_node(struct i2c_client *client)
void uninit_wr_node(void)
{
- kfree(cmd_head.data);
cmd_head.data = NULL;
unregister_i2c_func();
remove_proc_entry(procname, NULL);
@@ -330,9 +347,13 @@ static s32 goodix_tool_write(struct file *filp, const char __user *buff,
GTP_DEBUG_FUNC();
GTP_DEBUG_ARRAY((u8 *)buff, len);
+ mutex_lock(&lock);
ret = copy_from_user(&cmd_head, buff, CMD_HEAD_LENGTH);
- if (ret)
+ if (ret) {
GTP_ERROR("copy_from_user failed.");
+ ret = -EACCES;
+ goto exit;
+ }
GTP_DEBUG("wr :0x%02x.", cmd_head.wr);
GTP_DEBUG("flag:0x%02x.", cmd_head.flag);
@@ -350,6 +371,19 @@ static s32 goodix_tool_write(struct file *filp, const char __user *buff,
GTP_DEBUG("len:%d.", (s32)len);
GTP_DEBUG("buf[20]:0x%02x.", buff[CMD_HEAD_LENGTH]);
+ if (cmd_head.data_len > (DATA_LENGTH - GTP_ADDR_LENGTH)) {
+ pr_err("data len %d > data buff %d, rejected!\n",
+ cmd_head.data_len, (DATA_LENGTH - GTP_ADDR_LENGTH));
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (cmd_head.addr_len > GTP_ADDR_LENGTH) {
+ pr_err(" addr len %d > data buff %d, rejected!\n",
+ cmd_head.addr_len, GTP_ADDR_LENGTH);
+ ret = -EINVAL;
+ goto exit;
+ }
+
if (cmd_head.wr == 1) {
/* copy_from_user(&cmd_head.data[cmd_head.addr_len],
* &buff[CMD_HEAD_LENGTH], cmd_head.data_len);
@@ -370,7 +404,8 @@ static s32 goodix_tool_write(struct file *filp, const char __user *buff,
if (cmd_head.flag == 1) {
if (comfirm() == FAIL) {
GTP_ERROR("[WRITE]Comfirm fail!");
- return FAIL;
+ ret = -EINVAL;
+ goto exit;
}
} else if (cmd_head.flag == 2) {
/* Need interrupt! */
@@ -379,7 +414,8 @@ static s32 goodix_tool_write(struct file *filp, const char __user *buff,
&cmd_head.data[GTP_ADDR_LENGTH - cmd_head.addr_len],
cmd_head.data_len + cmd_head.addr_len) <= 0) {
GTP_ERROR("[WRITE]Write data failed!");
- return FAIL;
+ ret = -EIO;
+ goto exit;
}
GTP_DEBUG_ARRAY(
@@ -388,7 +424,8 @@ static s32 goodix_tool_write(struct file *filp, const char __user *buff,
if (cmd_head.delay)
msleep(cmd_head.delay);
- return cmd_head.data_len + CMD_HEAD_LENGTH;
+ ret = cmd_head.data_len + CMD_HEAD_LENGTH;
+ goto exit;
} else if (cmd_head.wr == 3) { /* Write ic type */
ret = copy_from_user(&cmd_head.data[0], &buff[CMD_HEAD_LENGTH],
@@ -396,30 +433,40 @@ static s32 goodix_tool_write(struct file *filp, const char __user *buff,
if (ret)
GTP_ERROR("copy_from_user failed.");
+ if (cmd_head.data_len > sizeof(IC_TYPE)) {
+ pr_err("<<-GTP->> data len %d > data buff %d, rejected!\n",
+ cmd_head.data_len, sizeof(IC_TYPE));
+ ret = -EINVAL;
+ goto exit;
+ }
memcpy(IC_TYPE, cmd_head.data, cmd_head.data_len);
register_i2c_func();
- return cmd_head.data_len + CMD_HEAD_LENGTH;
- } else if (cmd_head.wr == 3) {
+ ret = cmd_head.data_len + CMD_HEAD_LENGTH;
+ goto exit;
+ } else if (cmd_head.wr == 5) {
/* memcpy(IC_TYPE, cmd_head.data, cmd_head.data_len); */
- return cmd_head.data_len + CMD_HEAD_LENGTH;
+ ret = cmd_head.data_len + CMD_HEAD_LENGTH;
+ goto exit;
} else if (cmd_head.wr == 7) { /* disable irq! */
gtp_irq_disable(i2c_get_clientdata(gt_client));
#if GTP_ESD_PROTECT
gtp_esd_switch(gt_client, SWITCH_OFF);
#endif
- return CMD_HEAD_LENGTH;
+ ret = CMD_HEAD_LENGTH;
+ goto exit;
} else if (cmd_head.wr == 9) { /* enable irq! */
gtp_irq_enable(i2c_get_clientdata(gt_client));
#if GTP_ESD_PROTECT
gtp_esd_switch(gt_client, SWITCH_ON);
#endif
- return CMD_HEAD_LENGTH;
+ ret = CMD_HEAD_LENGTH;
+ goto exit;
} else if (cmd_head.wr == 17) {
struct goodix_ts_data *ts = i2c_get_clientdata(gt_client);
@@ -434,27 +481,40 @@ static s32 goodix_tool_write(struct file *filp, const char __user *buff,
ts->gtp_rawdiff_mode = false;
GTP_DEBUG("gtp leave rawdiff.");
}
- return CMD_HEAD_LENGTH;
+ ret = CMD_HEAD_LENGTH;
+ goto exit;
}
#ifdef UPDATE_FUNCTIONS
else if (cmd_head.wr == 11) { /* Enter update mode! */
if (gup_enter_update_mode(gt_client) == FAIL)
- return FAIL;
+ ret = -EBUSY;
+ goto exit;
} else if (cmd_head.wr == 13) { /* Leave update mode! */
gup_leave_update_mode();
} else if (cmd_head.wr == 15) { /* Update firmware! */
show_len = 0;
total_len = 0;
+ if (cmd_head.data_len + 1 > DATA_LENGTH) {
+ pr_err("<<-GTP->> data len %d > data buff %d, rejected!\n",
+ cmd_head.data_len + 1, DATA_LENGTH);
+ ret = -EINVAL;
+ goto exit;
+ }
memset(cmd_head.data, 0, cmd_head.data_len + 1);
memcpy(cmd_head.data, &buff[CMD_HEAD_LENGTH],
cmd_head.data_len);
- if (gup_update_proc((void *)cmd_head.data) == FAIL)
- return FAIL;
+ if (gup_update_proc((void *)cmd_head.data) == FAIL) {
+ ret = -EBUSY;
+ goto exit;
+ }
}
#endif
+ ret = CMD_HEAD_LENGTH;
- return CMD_HEAD_LENGTH;
+exit:
+ mutex_unlock(&lock);
+ return ret;
}
/*******************************************************
@@ -469,10 +529,14 @@ static s32 goodix_tool_write(struct file *filp, const char __user *buff,
static s32 goodix_tool_read(char *page, char **start, off_t off, int count,
int *eof, void *data)
{
+ s32 ret;
GTP_DEBUG_FUNC();
+ mutex_lock(&lock);
if (cmd_head.wr % 2) {
- return FAIL;
+ pr_err("<< [READ]command head wrong\n");
+ ret = -EINVAL;
+ goto exit;
} else if (!cmd_head.wr) {
u16 len = 0;
s16 data_len = 0;
@@ -481,7 +545,8 @@ static s32 goodix_tool_read(char *page, char **start, off_t off, int count,
if (cmd_head.flag == 1) {
if (comfirm() == FAIL) {
GTP_ERROR("[READ]Comfirm fail!");
- return FAIL;
+ ret = -EINVAL;
+ goto exit;
}
} else if (cmd_head.flag == 2) {
/* Need interrupt! */
@@ -504,11 +569,12 @@ static s32 goodix_tool_read(char *page, char **start, off_t off, int count,
else
len = data_len;
- data_len -= DATA_LENGTH;
+ data_len -= len;
if (tool_i2c_read(cmd_head.data, len) <= 0) {
GTP_ERROR("[READ]Read data failed!");
- return FAIL;
+ ret = -EINVAL;
+ goto exit;
}
memcpy(&page[loc], &cmd_head.data[GTP_ADDR_LENGTH],
len);
@@ -525,15 +591,14 @@ static s32 goodix_tool_read(char *page, char **start, off_t off, int count,
GTP_DEBUG("Return ic type:%s len:%d.", page,
(s32)cmd_head.data_len);
- return cmd_head.data_len;
+ ret = cmd_head.data_len;
+ goto exit;
/* return sizeof(IC_TYPE_NAME); */
} else if (cmd_head.wr == 4) {
page[0] = show_len >> 8;
page[1] = show_len & 0xff;
page[2] = total_len >> 8;
page[3] = total_len & 0xff;
-
- return cmd_head.data_len;
} else if (cmd_head.wr == 6) {
/* Read error code! */
} else if (cmd_head.wr == 8) { /*Read driver version */
@@ -546,6 +611,9 @@ static s32 goodix_tool_read(char *page, char **start, off_t off, int count,
memcpy(page, GTP_DRIVER_VERSION, tmp_len);
page[tmp_len] = 0;
}
+ ret = cmd_head.data_len;
- return cmd_head.data_len;
+exit:
+ mutex_unlock(&lock);
+ return ret;
}
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.c b/drivers/input/touchscreen/gt9xx/gt9xx.c
index 6615c3a039a0..6c3747108d75 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx.c
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.c
@@ -40,13 +40,14 @@
* By Meta, 2013/06/08
*/
+#include <linux/regulator/consumer.h>
#include "gt9xx.h"
-#if GTP_ICS_SLOT_REPORT
+#include <linux/of_gpio.h>
+
#include <linux/input/mt.h>
-#endif
-#define GOODIX_DEV_NAME "Goodix Capacitive TouchScreen"
+#define GOODIX_DEV_NAME "Goodix-CTP"
#define CFG_MAX_TOUCH_POINTS 5
#define GOODIX_COORDS_ARR_SIZE 4
#define MAX_BUTTONS 4
@@ -55,10 +56,20 @@
#define GTP_I2C_ADDRESS_HIGH 0x14
#define GTP_I2C_ADDRESS_LOW 0x5D
+#define GOODIX_VTG_MIN_UV 2600000
+#define GOODIX_VTG_MAX_UV 3300000
+#define GOODIX_I2C_VTG_MIN_UV 1800000
+#define GOODIX_I2C_VTG_MAX_UV 1800000
+#define GOODIX_VDD_LOAD_MIN_UA 0
+#define GOODIX_VDD_LOAD_MAX_UA 10000
+#define GOODIX_VIO_LOAD_MIN_UA 0
+#define GOODIX_VIO_LOAD_MAX_UA 10000
+
#define RESET_DELAY_T3_US 200 /* T3: > 100us */
#define RESET_DELAY_T4 20 /* T4: > 5ms */
-#define PHY_BUF_SIZE 32
+#define PHY_BUF_SIZE 32
+#define PROP_NAME_SIZE 24
#define GTP_MAX_TOUCH 5
#define GTP_ESD_CHECK_CIRCLE_MS 2000
@@ -80,7 +91,10 @@ static void gtp_reset_guitar(struct goodix_ts_data *ts, int ms);
static void gtp_int_sync(struct goodix_ts_data *ts, int ms);
static int gtp_i2c_test(struct i2c_client *client);
-#ifdef CONFIG_HAS_EARLYSUSPEND
+#if defined(CONFIG_FB)
+static int fb_notifier_callback(struct notifier_block *self,
+ unsigned long event, void *data);
+#elif defined(CONFIG_HAS_EARLYSUSPEND)
static void goodix_ts_early_suspend(struct early_suspend *h);
static void goodix_ts_late_resume(struct early_suspend *h);
#endif
@@ -121,40 +135,40 @@ Output:
int gtp_i2c_read(struct i2c_client *client, u8 *buf, int len)
{
struct goodix_ts_data *ts = i2c_get_clientdata(client);
- struct i2c_msg msgs[2];
int ret = -EIO;
- int retries = 0;
-
- GTP_DEBUG_FUNC();
-
- msgs[0].flags = !I2C_M_RD;
- msgs[0].addr = client->addr;
- msgs[0].len = GTP_ADDR_LENGTH;
- msgs[0].buf = &buf[0];
-
- msgs[1].flags = I2C_M_RD;
- msgs[1].addr = client->addr;
- msgs[1].len = len - GTP_ADDR_LENGTH;
- msgs[1].buf = &buf[GTP_ADDR_LENGTH];
-
- while (retries < 5) {
+ u8 retries;
+ struct i2c_msg msgs[2] = {
+ {
+ .flags = !I2C_M_RD,
+ .addr = client->addr,
+ .len = GTP_ADDR_LENGTH,
+ .buf = &buf[0],
+ },
+ {
+ .flags = I2C_M_RD,
+ .addr = client->addr,
+ .len = len - GTP_ADDR_LENGTH,
+ .buf = &buf[GTP_ADDR_LENGTH],
+ },
+ };
+
+ for (retries = 0; retries < GTP_I2C_RETRY_5; retries++) {
ret = i2c_transfer(client->adapter, msgs, 2);
if (ret == 2)
break;
- retries++;
+ dev_err(&client->dev, "I2C retry: %d\n", retries + 1);
}
- if (retries >= 5) {
+ if (retries == GTP_I2C_RETRY_5) {
#if GTP_SLIDE_WAKEUP
/* reset chip would quit doze mode */
if (doze_status == DOZE_ENABLED)
return ret;
#endif
- GTP_DEBUG("I2C communication timeout, resetting chip...");
if (init_done)
gtp_reset_guitar(ts, 10);
else
dev_warn(&client->dev,
- "<GTP> gtp_reset_guitar exit init_done=%d:\n",
+ "gtp_reset_guitar exit init_done=%d:\n",
init_done);
}
return ret;
@@ -175,38 +189,36 @@ Output:
int gtp_i2c_write(struct i2c_client *client, u8 *buf, int len)
{
struct goodix_ts_data *ts = i2c_get_clientdata(client);
- struct i2c_msg msg;
int ret = -EIO;
- int retries = 0;
-
- GTP_DEBUG_FUNC();
-
- msg.flags = !I2C_M_RD;
- msg.addr = client->addr;
- msg.len = len;
- msg.buf = buf;
-
- while (retries < 5) {
+ u8 retries;
+ struct i2c_msg msg = {
+ .flags = !I2C_M_RD,
+ .addr = client->addr,
+ .len = len,
+ .buf = buf,
+ };
+
+ for (retries = 0; retries < GTP_I2C_RETRY_5; retries++) {
ret = i2c_transfer(client->adapter, &msg, 1);
if (ret == 1)
break;
- retries++;
+ dev_err(&client->dev, "I2C retry: %d\n", retries + 1);
}
- if ((retries >= 5)) {
+ if (retries == GTP_I2C_RETRY_5) {
#if GTP_SLIDE_WAKEUP
if (doze_status == DOZE_ENABLED)
return ret;
#endif
- GTP_DEBUG("I2C communication timeout, resetting chip...");
if (init_done)
gtp_reset_guitar(ts, 10);
else
dev_warn(&client->dev,
- "<GTP> gtp_reset_guitar exit init_done=%d:\n",
+ "gtp_reset_guitar exit init_done=%d:\n",
init_done);
}
return ret;
}
+
/*******************************************************
Function:
i2c read twice, compare the results
@@ -226,7 +238,7 @@ int gtp_i2c_read_dbl_check(struct i2c_client *client,
u8 confirm_buf[16] = {0};
u8 retry = 0;
- while (retry++ < 3) {
+ while (retry++ < GTP_I2C_RETRY_3) {
memset(buf, 0xAA, 16);
buf[0] = (u8)(addr >> 8);
buf[1] = (u8)(addr & 0xFF);
@@ -240,7 +252,7 @@ int gtp_i2c_read_dbl_check(struct i2c_client *client,
if (!memcmp(buf, confirm_buf, len + 2))
break;
}
- if (retry < 3) {
+ if (retry < GTP_I2C_RETRY_3) {
memcpy(rxbuf, confirm_buf + 2, len);
return SUCCESS;
}
@@ -269,7 +281,7 @@ static int gtp_send_cfg(struct goodix_ts_data *ts)
"Ic fixed config, no config sent!");
ret = 2;
} else {
- for (retry = 0; retry < 5; retry++) {
+ for (retry = 0; retry < GTP_I2C_RETRY_5; retry++) {
ret = gtp_i2c_write(ts->client,
ts->config_data,
GTP_CONFIG_MAX_LENGTH + GTP_ADDR_LENGTH);
@@ -294,8 +306,6 @@ void gtp_irq_disable(struct goodix_ts_data *ts)
{
unsigned long irqflags;
- GTP_DEBUG_FUNC();
-
spin_lock_irqsave(&ts->irq_lock, irqflags);
if (!ts->irq_is_disabled) {
ts->irq_is_disabled = true;
@@ -316,8 +326,6 @@ void gtp_irq_enable(struct goodix_ts_data *ts)
{
unsigned long irqflags = 0;
- GTP_DEBUG_FUNC();
-
spin_lock_irqsave(&ts->irq_lock, irqflags);
if (ts->irq_is_disabled) {
enable_irq(ts->client->irq);
@@ -342,26 +350,15 @@ static void gtp_touch_down(struct goodix_ts_data *ts, int id, int x, int y,
int w)
{
#if GTP_CHANGE_X2Y
- GTP_SWAP(x, y);
+ swap(x, y);
#endif
-#if GTP_ICS_SLOT_REPORT
input_mt_slot(ts->input_dev, id);
- input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, id);
+ input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x);
input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y);
input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, w);
input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
-#else
- input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x);
- input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y);
- input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, w);
- input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
- input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, id);
- input_mt_sync(ts->input_dev);
-#endif
-
- GTP_DEBUG("ID:%d, X:%d, Y:%d, W:%d", id, x, y, w);
}
/*******************************************************
@@ -374,15 +371,8 @@ Output:
*********************************************************/
static void gtp_touch_up(struct goodix_ts_data *ts, int id)
{
-#if GTP_ICS_SLOT_REPORT
input_mt_slot(ts->input_dev, id);
- input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, -1);
- GTP_DEBUG("Touch id[%2d] release!", id);
-#else
- input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0);
- input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0);
- input_mt_sync(ts->input_dev);
-#endif
+ input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, false);
}
@@ -423,8 +413,6 @@ static void goodix_ts_work_func(struct work_struct *work)
u8 doze_buf[3] = {0x81, 0x4B};
#endif
- GTP_DEBUG_FUNC();
-
ts = container_of(work, struct goodix_ts_data, work);
#ifdef CONFIG_GT9XX_TOUCHPANEL_UPDATE
if (ts->enter_update)
@@ -434,7 +422,6 @@ static void goodix_ts_work_func(struct work_struct *work)
#if GTP_SLIDE_WAKEUP
if (doze_status == DOZE_ENABLED) {
ret = gtp_i2c_read(ts->client, doze_buf, 3);
- GTP_DEBUG("0x814B = 0x%02X", doze_buf[2]);
if (ret > 0) {
if (doze_buf[2] == 0xAA) {
dev_dbg(&ts->client->dev,
@@ -532,12 +519,9 @@ static void goodix_ts_work_func(struct work_struct *work)
#endif
pre_key = key_value;
- GTP_DEBUG("pre_touch:%02x, finger:%02x.", pre_touch, finger);
-
-#if GTP_ICS_SLOT_REPORT
#if GTP_WITH_PEN
if (pre_pen && (touch_num == 0)) {
- GTP_DEBUG("Pen touch UP(Slot)!");
+ dev_dbg(&ts->client->dev, "Pen touch UP(Slot)!");
input_report_key(ts->input_dev, BTN_TOOL_PEN, 0);
input_mt_slot(ts->input_dev, 5);
input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, -1);
@@ -554,7 +538,8 @@ static void goodix_ts_work_func(struct work_struct *work)
#if GTP_WITH_PEN
id = coor_data[pos];
if (id == 128) {
- GTP_DEBUG("Pen touch DOWN(Slot)!");
+ dev_dbg(&ts->client->dev,
+ "Pen touch DOWN(Slot)!");
input_x = coor_data[pos + 1]
| (coor_data[pos + 2] << 8);
input_y = coor_data[pos + 3]
@@ -573,7 +558,8 @@ static void goodix_ts_work_func(struct work_struct *work)
ABS_MT_POSITION_Y, input_y);
input_report_abs(ts->input_dev,
ABS_MT_TOUCH_MAJOR, input_w);
- GTP_DEBUG("Pen/Stylus: (%d, %d)[%d]",
+ dev_dbg(&ts->client->dev,
+ "Pen/Stylus: (%d, %d)[%d]",
input_x, input_y, input_w);
pre_pen = 1;
pre_touch = 0;
@@ -583,8 +569,6 @@ static void goodix_ts_work_func(struct work_struct *work)
touch_index |= (0x01<<id);
}
- GTP_DEBUG("id = %d,touch_index = 0x%x, pre_touch = 0x%x\n",
- id, touch_index, pre_touch);
for (i = 0; i < GTP_MAX_TOUCH; i++) {
#if GTP_WITH_PEN
if (pre_pen == 1)
@@ -611,42 +595,6 @@ static void goodix_ts_work_func(struct work_struct *work)
}
}
}
-#else
- input_report_key(ts->input_dev, BTN_TOUCH, (touch_num || key_value));
- if (touch_num) {
- for (i = 0; i < touch_num; i++) {
- coor_data = &point_data[i * 8 + 3];
-
- id = coor_data[0];
- input_x = coor_data[1] | coor_data[2] << 8;
- input_y = coor_data[3] | coor_data[4] << 8;
- input_w = coor_data[5] | coor_data[6] << 8;
-#if GTP_WITH_PEN
- if (id == 128) {
- GTP_DEBUG("Pen touch DOWN!");
- input_report_key(ts->input_dev,
- BTN_TOOL_PEN, 1);
- pre_pen = 1;
- id = 0;
- }
-#endif
- gtp_touch_down(ts, id, input_x, input_y, input_w);
- }
- } else if (pre_touch) {
-#if GTP_WITH_PEN
- if (pre_pen == 1) {
- GTP_DEBUG("Pen touch UP!");
- input_report_key(ts->input_dev, BTN_TOOL_PEN, 0);
- pre_pen = 0;
- }
-#endif
- GTP_DEBUG("Touch Released!");
- gtp_touch_up(ts, 0);
- }
-
- pre_touch = touch_num;
-#endif
-
input_sync(ts->input_dev);
exit_work_func:
@@ -676,8 +624,6 @@ static enum hrtimer_restart goodix_ts_timer_handler(struct hrtimer *timer)
struct goodix_ts_data
*ts = container_of(timer, struct goodix_ts_data, timer);
- GTP_DEBUG_FUNC();
-
queue_work(ts->goodix_wq, &ts->work);
hrtimer_start(&ts->timer, ktime_set(0, (GTP_POLL_TIME + 6) * 1000000),
HRTIMER_MODE_REL);
@@ -698,8 +644,6 @@ static irqreturn_t goodix_ts_irq_handler(int irq, void *dev_id)
{
struct goodix_ts_data *ts = dev_id;
- GTP_DEBUG_FUNC();
-
gtp_irq_disable(ts);
queue_work(ts->goodix_wq, &ts->work);
@@ -731,8 +675,6 @@ Output:
*******************************************************/
static void gtp_reset_guitar(struct goodix_ts_data *ts, int ms)
{
- GTP_DEBUG_FUNC();
-
/* This reset sequence will selcet I2C slave address */
gpio_direction_output(ts->pdata->reset_gpio, 0);
msleep(ms);
@@ -755,7 +697,7 @@ static void gtp_reset_guitar(struct goodix_ts_data *ts, int ms)
#endif
}
-#ifdef CONFIG_HAS_EARLYSUSPEND
+#if defined(CONFIG_HAS_EARLYSUSPEND) || defined(CONFIG_FB)
#if GTP_SLIDE_WAKEUP
/*******************************************************
Function:
@@ -773,20 +715,17 @@ static s8 gtp_enter_doze(struct goodix_ts_data *ts)
(u8)(GTP_REG_SLEEP >> 8),
(u8)GTP_REG_SLEEP, 8};
- GTP_DEBUG_FUNC();
-
#if GTP_DBL_CLK_WAKEUP
i2c_control_buf[2] = 0x09;
#endif
gtp_irq_disable(ts);
- GTP_DEBUG("entering doze mode...");
- while (retry++ < 5) {
+ while (retry++ < GTP_I2C_RETRY_3) {
i2c_control_buf[0] = 0x80;
i2c_control_buf[1] = 0x46;
ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
if (ret < 0) {
- GTP_DEBUG(
+ dev_err(&ts->client->dev,
"failed to set doze flag into 0x8046, %d",
retry);
continue;
@@ -825,11 +764,9 @@ static s8 gtp_enter_sleep(struct goodix_ts_data *ts)
(u8)(GTP_REG_SLEEP >> 8),
(u8)GTP_REG_SLEEP, 5};
- GTP_DEBUG_FUNC();
-
ret = gpio_direction_output(ts->pdata->irq_gpio, 0);
usleep(5000);
- while (retry++ < 5) {
+ while (retry++ < GTP_I2C_RETRY_5) {
ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
if (ret > 0) {
dev_dbg(&ts->client->dev,
@@ -857,23 +794,17 @@ static s8 gtp_wakeup_sleep(struct goodix_ts_data *ts)
u8 retry = 0;
s8 ret = -1;
- GTP_DEBUG_FUNC();
-
#if GTP_POWER_CTRL_SLEEP
- while (retry++ < 5) {
- gtp_reset_guitar(ts, 20);
+ gtp_reset_guitar(ts, 20);
- ret = gtp_send_cfg(ts);
- if (ret > 0) {
- dev_dbg(&ts->client->dev,
- "Wakeup sleep send config success.");
- continue;
- }
- dev_dbg(&ts->client->dev, "GTP Wakeup!");
+ ret = gtp_send_cfg(ts);
+ if (ret > 0) {
+ dev_dbg(&ts->client->dev,
+ "Wakeup sleep send config success.");
return 1;
}
#else
- while (retry++ < 10) {
+ while (retry++ < GTP_I2C_RETRY_10) {
#if GTP_SLIDE_WAKEUP
/* wakeup not by slide */
if (doze_status != DOZE_WAKEUP)
@@ -910,7 +841,7 @@ static s8 gtp_wakeup_sleep(struct goodix_ts_data *ts)
dev_err(&ts->client->dev, "GTP wakeup sleep failed.\n");
return ret;
}
-#endif /* !CONFIG_HAS_EARLYSUSPEND */
+#endif /* !CONFIG_HAS_EARLYSUSPEND && !CONFIG_FB*/
/*******************************************************
Function:
@@ -933,26 +864,9 @@ static int gtp_init_panel(struct goodix_ts_data *ts)
u8 opr_buf[16];
u8 sensor_id = 0;
- u8 cfg_info_group1[] = CTP_CFG_GROUP1;
- u8 cfg_info_group2[] = CTP_CFG_GROUP2;
- u8 cfg_info_group3[] = CTP_CFG_GROUP3;
- u8 cfg_info_group4[] = CTP_CFG_GROUP4;
- u8 cfg_info_group5[] = CTP_CFG_GROUP5;
- u8 cfg_info_group6[] = CTP_CFG_GROUP6;
- u8 *send_cfg_buf[] = {cfg_info_group1, cfg_info_group2,
- cfg_info_group3, cfg_info_group4,
- cfg_info_group5, cfg_info_group6};
-
- u8 cfg_info_len[] = {ARRAY_SIZE(cfg_info_group1),
- ARRAY_SIZE(cfg_info_group2),
- ARRAY_SIZE(cfg_info_group3),
- ARRAY_SIZE(cfg_info_group4),
- ARRAY_SIZE(cfg_info_group5),
- ARRAY_SIZE(cfg_info_group6)};
-
- GTP_DEBUG("Config Groups\' Lengths: %d, %d, %d, %d, %d, %d",
- cfg_info_len[0], cfg_info_len[1], cfg_info_len[2],
- cfg_info_len[3], cfg_info_len[4], cfg_info_len[5]);
+ for (i = 0; i < GOODIX_MAX_CFG_GROUP; i++)
+ dev_dbg(&client->dev, "Config Groups(%d) Lengths: %d",
+ i, ts->pdata->config_data_len[i]);
ret = gtp_i2c_read_dbl_check(ts->client, 0x41E4, opr_buf, 1);
if (ret == SUCCESS) {
@@ -963,14 +877,18 @@ static int gtp_init_panel(struct goodix_ts_data *ts)
return -EINVAL;
}
}
- if ((!cfg_info_len[1]) && (!cfg_info_len[2]) && (!cfg_info_len[3])
- && (!cfg_info_len[4]) && (!cfg_info_len[5])) {
+
+ for (i = 1; i < GOODIX_MAX_CFG_GROUP; i++) {
+ if (ts->pdata->config_data_len[i])
+ break;
+ }
+ if (i == GOODIX_MAX_CFG_GROUP) {
sensor_id = 0;
} else {
ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_SENSOR_ID,
&sensor_id, 1);
if (ret == SUCCESS) {
- if (sensor_id >= 0x06) {
+ if (sensor_id >= GOODIX_MAX_CFG_GROUP) {
dev_err(&client->dev,
"Invalid sensor_id(0x%02X), No Config Sent!",
sensor_id);
@@ -982,24 +900,26 @@ static int gtp_init_panel(struct goodix_ts_data *ts)
return -EINVAL;
}
}
- GTP_DEBUG("Sensor_ID: %d", sensor_id);
- ts->gtp_cfg_len = cfg_info_len[sensor_id];
+ dev_dbg(&client->dev, "Sensor ID selected: %d", sensor_id);
- if (ts->gtp_cfg_len < GTP_CONFIG_MIN_LENGTH) {
+ if (ts->pdata->config_data_len[sensor_id] < GTP_CONFIG_MIN_LENGTH ||
+ !ts->pdata->config_data[sensor_id]) {
dev_err(&client->dev,
- "Sensor_ID(%d) matches with NULL or INVALID CONFIG GROUP! NO Config Sent! You need to check you header file CFG_GROUP section!\n",
+ "Sensor_ID(%d) matches with NULL or invalid config group!\n",
sensor_id);
return -EINVAL;
}
+
ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_CONFIG_DATA,
&opr_buf[0], 1);
-
if (ret == SUCCESS) {
if (opr_buf[0] < 90) {
/* backup group config version */
- grp_cfg_version = send_cfg_buf[sensor_id][0];
- send_cfg_buf[sensor_id][0] = 0x00;
+ grp_cfg_version =
+ ts->pdata->config_data[sensor_id][GTP_ADDR_LENGTH];
+ ts->pdata->config_data[sensor_id][GTP_ADDR_LENGTH] =
+ 0x00;
ts->fixed_cfg = 0;
} else {
/* treated as fixed config, not send config */
@@ -1014,21 +934,9 @@ static int gtp_init_panel(struct goodix_ts_data *ts)
return -EINVAL;
}
- config_data = devm_kzalloc(&client->dev,
- GTP_CONFIG_MAX_LENGTH + GTP_ADDR_LENGTH,
- GFP_KERNEL);
- if (!config_data) {
- dev_err(&client->dev,
- "Not enough memory for panel config data\n");
- return -ENOMEM;
- }
-
- ts->config_data = config_data;
- config_data[0] = GTP_REG_CONFIG_DATA >> 8;
- config_data[1] = GTP_REG_CONFIG_DATA & 0xff;
- memset(&config_data[GTP_ADDR_LENGTH], 0, GTP_CONFIG_MAX_LENGTH);
- memcpy(&config_data[GTP_ADDR_LENGTH], send_cfg_buf[sensor_id],
- ts->gtp_cfg_len);
+ config_data = ts->pdata->config_data[sensor_id];
+ ts->config_data = ts->pdata->config_data[sensor_id];
+ ts->gtp_cfg_len = ts->pdata->config_data_len[sensor_id];
#if GTP_CUSTOM_CFG
config_data[RESOLUTION_LOC] =
@@ -1065,7 +973,6 @@ static int gtp_init_panel(struct goodix_ts_data *ts)
}
#endif /* !DRIVER NOT SEND CONFIG */
- GTP_DEBUG_FUNC();
if ((ts->abs_x_max == 0) && (ts->abs_y_max == 0)) {
ts->abs_x_max = (config_data[RESOLUTION_LOC + 1] << 8)
+ config_data[RESOLUTION_LOC];
@@ -1077,49 +984,79 @@ static int gtp_init_panel(struct goodix_ts_data *ts)
if (ret < 0)
dev_err(&client->dev, "%s: Send config error.\n", __func__);
- GTP_DEBUG("X_MAX = %d, Y_MAX = %d, TRIGGER = 0x%02x",
- ts->abs_x_max, ts->abs_y_max,
- ts->int_trigger_type);
-
msleep(20);
return ret;
}
/*******************************************************
Function:
- Read chip version.
+ Read firmware version
Input:
client: i2c device
version: buffer to keep ic firmware version
Output:
read operation return.
- 2: succeed, otherwise: failed
+ 0: succeed, otherwise: failed
*******************************************************/
-int gtp_read_version(struct i2c_client *client, u16 *version)
+static int gtp_read_fw_version(struct i2c_client *client, u16 *version)
{
- int ret = -EIO;
- u8 buf[8] = { GTP_REG_VERSION >> 8, GTP_REG_VERSION & 0xff };
-
- GTP_DEBUG_FUNC();
+ int ret = 0;
+ u8 buf[GTP_FW_VERSION_BUFFER_MAXSIZE] = {
+ GTP_REG_FW_VERSION >> 8, GTP_REG_FW_VERSION & 0xff };
ret = gtp_i2c_read(client, buf, sizeof(buf));
if (ret < 0) {
dev_err(&client->dev, "GTP read version failed.\n");
- return ret;
+ return -EIO;
}
if (version)
- *version = (buf[7] << 8) | buf[6];
+ *version = (buf[3] << 8) | buf[2];
+
+ return ret;
+}
+/*
+ * Function:
+ * Read and check chip id.
+ * Input:
+ * client: i2c device
+ * Output:
+ * read operation return.
+ * 0: succeed, otherwise: failed
+ */
+static int gtp_check_product_id(struct i2c_client *client)
+{
+ int ret = 0;
+ char product_id[GTP_PRODUCT_ID_MAXSIZE];
+ struct goodix_ts_data *ts = i2c_get_clientdata(client);
+ /* 04 bytes are used for the Product-id in the register space.*/
+ u8 buf[GTP_PRODUCT_ID_BUFFER_MAXSIZE] = {
+ GTP_REG_PRODUCT_ID >> 8, GTP_REG_PRODUCT_ID & 0xff };
+
+ ret = gtp_i2c_read(client, buf, sizeof(buf));
+ if (ret < 0) {
+ dev_err(&client->dev, "GTP read product_id failed.\n");
+ return -EIO;
+ }
if (buf[5] == 0x00) {
- dev_dbg(&client->dev, "IC Version: %c%c%c_%02x%02x\n", buf[2],
- buf[3], buf[4], buf[7], buf[6]);
+ /* copy (GTP_PRODUCT_ID_MAXSIZE - 1) from buffer. Ex: 915 */
+ strlcpy(product_id, &buf[2], GTP_PRODUCT_ID_MAXSIZE - 1);
} else {
if (buf[5] == 'S' || buf[5] == 's')
chip_gt9xxs = 1;
- dev_dbg(&client->dev, "IC Version: %c%c%c%c_%02x%02x\n", buf[2],
- buf[3], buf[4], buf[5], buf[7], buf[6]);
+ /* copy GTP_PRODUCT_ID_MAXSIZE from buffer. Ex: 915s */
+ strlcpy(product_id, &buf[2], GTP_PRODUCT_ID_MAXSIZE);
}
+
+ dev_info(&client->dev, "Goodix Product ID = %s\n", product_id);
+
+ if (!IS_ERR(ts->pdata->product_id))
+ ret = strcmp(product_id, ts->pdata->product_id);
+
+ if (ret != 0)
+ return -EINVAL;
+
return ret;
}
@@ -1135,11 +1072,9 @@ Output:
static int gtp_i2c_test(struct i2c_client *client)
{
u8 buf[3] = { GTP_REG_CONFIG_DATA >> 8, GTP_REG_CONFIG_DATA & 0xff };
- int retry = 5;
+ int retry = GTP_I2C_RETRY_5;
int ret = -EIO;
- GTP_DEBUG_FUNC();
-
while (retry--) {
ret = gtp_i2c_read(client, buf, 3);
if (ret > 0)
@@ -1168,50 +1103,66 @@ static int gtp_request_io_port(struct goodix_ts_data *ts)
if (gpio_is_valid(pdata->irq_gpio)) {
ret = gpio_request(pdata->irq_gpio, "goodix_ts_irq_gpio");
if (ret) {
- dev_err(&client->dev, "irq gpio request failed\n");
- goto pwr_off;
+ dev_err(&client->dev, "Unable to request irq gpio [%d]\n",
+ pdata->irq_gpio);
+ goto err_pwr_off;
}
ret = gpio_direction_input(pdata->irq_gpio);
if (ret) {
- dev_err(&client->dev,
- "set_direction for irq gpio failed\n");
- goto free_irq_gpio;
+ dev_err(&client->dev, "Unable to set direction for irq gpio [%d]\n",
+ pdata->irq_gpio);
+ goto err_free_irq_gpio;
}
} else {
- dev_err(&client->dev, "irq gpio is invalid!\n");
+ dev_err(&client->dev, "Invalid irq gpio [%d]!\n",
+ pdata->irq_gpio);
ret = -EINVAL;
- goto free_irq_gpio;
+ goto err_pwr_off;
}
if (gpio_is_valid(pdata->reset_gpio)) {
- ret = gpio_request(pdata->reset_gpio, "goodix_ts__reset_gpio");
+ ret = gpio_request(pdata->reset_gpio, "goodix_ts_reset_gpio");
if (ret) {
- dev_err(&client->dev, "reset gpio request failed\n");
- goto free_irq_gpio;
+ dev_err(&client->dev, "Unable to request reset gpio [%d]\n",
+ pdata->reset_gpio);
+ goto err_free_irq_gpio;
}
ret = gpio_direction_output(pdata->reset_gpio, 0);
if (ret) {
- dev_err(&client->dev,
- "set_direction for reset gpio failed\n");
- goto free_reset_gpio;
+ dev_err(&client->dev, "Unable to set direction for reset gpio [%d]\n",
+ pdata->reset_gpio);
+ goto err_free_reset_gpio;
}
} else {
- dev_err(&client->dev, "reset gpio is invalid!\n");
+ dev_err(&client->dev, "Invalid irq gpio [%d]!\n",
+ pdata->reset_gpio);
ret = -EINVAL;
- goto free_reset_gpio;
+ goto err_free_irq_gpio;
}
- gpio_direction_input(pdata->reset_gpio);
+ /* IRQ GPIO is an input signal, but we are setting it to output
+ * direction and pulling it down, to comply with power up timing
+ * requirements, mentioned in power up timing section of device
+ * datasheet.
+ */
+ ret = gpio_direction_output(pdata->irq_gpio, 0);
+ if (ret)
+ dev_warn(&client->dev,
+ "pull down interrupt gpio failed\n");
+ ret = gpio_direction_output(pdata->reset_gpio, 0);
+ if (ret)
+ dev_warn(&client->dev,
+ "pull down reset gpio failed\n");
return ret;
-free_reset_gpio:
+err_free_reset_gpio:
if (gpio_is_valid(pdata->reset_gpio))
gpio_free(pdata->reset_gpio);
-free_irq_gpio:
+err_free_irq_gpio:
if (gpio_is_valid(pdata->irq_gpio))
gpio_free(pdata->irq_gpio);
-pwr_off:
+err_pwr_off:
return ret;
}
@@ -1229,9 +1180,6 @@ static int gtp_request_irq(struct goodix_ts_data *ts)
int ret;
const u8 irq_table[] = GTP_IRQ_TAB;
- GTP_DEBUG("INT trigger type:%x, irq=%d", ts->int_trigger_type,
- ts->client->irq);
-
ret = request_irq(ts->client->irq, goodix_ts_irq_handler,
irq_table[ts->int_trigger_type],
ts->client->name, ts);
@@ -1270,8 +1218,6 @@ static int gtp_request_input_dev(struct goodix_ts_data *ts)
int index = 0;
#endif
- GTP_DEBUG_FUNC();
-
ts->input_dev = input_allocate_device();
if (ts->input_dev == NULL) {
dev_err(&ts->client->dev,
@@ -1281,12 +1227,10 @@ static int gtp_request_input_dev(struct goodix_ts_data *ts)
ts->input_dev->evbit[0] =
BIT_MASK(EV_SYN) | BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-#if GTP_ICS_SLOT_REPORT
+ set_bit(BTN_TOOL_FINGER, ts->input_dev->keybit);
__set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit);
- input_mt_init_slots(ts->input_dev, 10);/* in case of "out of memory" */
-#else
- ts->input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
-#endif
+ /* in case of "out of memory" */
+ input_mt_init_slots(ts->input_dev, 10, 0);
#if GTP_HAVE_TOUCH_KEY
for (index = 0; index < ARRAY_SIZE(touch_key_array); index++) {
@@ -1307,7 +1251,7 @@ static int gtp_request_input_dev(struct goodix_ts_data *ts)
#endif
#if GTP_CHANGE_X2Y
- GTP_SWAP(ts->abs_x_max, ts->abs_y_max);
+ swap(ts->abs_x_max, ts->abs_y_max);
#endif
input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X,
@@ -1345,6 +1289,327 @@ exit_free_inputdev:
return ret;
}
+static int reg_set_optimum_mode_check(struct regulator *reg, int load_uA)
+{
+ return (regulator_count_voltages(reg) > 0) ?
+ regulator_set_optimum_mode(reg, load_uA) : 0;
+}
+
+/**
+ * goodix_power_on - Turn device power ON
+ * @ts: driver private data
+ *
+ * Returns zero on success, else an error.
+ */
+static int goodix_power_on(struct goodix_ts_data *ts)
+{
+ int ret;
+
+ if (!IS_ERR(ts->avdd)) {
+ ret = reg_set_optimum_mode_check(ts->avdd,
+ GOODIX_VDD_LOAD_MAX_UA);
+ if (ret < 0) {
+ dev_err(&ts->client->dev,
+ "Regulator avdd set_opt failed rc=%d\n", ret);
+ goto err_set_opt_avdd;
+ }
+ ret = regulator_enable(ts->avdd);
+ if (ret) {
+ dev_err(&ts->client->dev,
+ "Regulator avdd enable failed ret=%d\n", ret);
+ goto err_enable_avdd;
+ }
+ }
+
+ if (!IS_ERR(ts->vdd)) {
+ ret = regulator_set_voltage(ts->vdd, GOODIX_VTG_MIN_UV,
+ GOODIX_VTG_MAX_UV);
+ if (ret) {
+ dev_err(&ts->client->dev,
+ "Regulator set_vtg failed vdd ret=%d\n", ret);
+ goto err_set_vtg_vdd;
+ }
+ ret = reg_set_optimum_mode_check(ts->vdd,
+ GOODIX_VDD_LOAD_MAX_UA);
+ if (ret < 0) {
+ dev_err(&ts->client->dev,
+ "Regulator vdd set_opt failed rc=%d\n", ret);
+ goto err_set_opt_vdd;
+ }
+ ret = regulator_enable(ts->vdd);
+ if (ret) {
+ dev_err(&ts->client->dev,
+ "Regulator vdd enable failed ret=%d\n", ret);
+ goto err_enable_vdd;
+ }
+ }
+
+ if (!IS_ERR(ts->vcc_i2c)) {
+ ret = regulator_set_voltage(ts->vcc_i2c, GOODIX_I2C_VTG_MIN_UV,
+ GOODIX_I2C_VTG_MAX_UV);
+ if (ret) {
+ dev_err(&ts->client->dev,
+ "Regulator set_vtg failed vcc_i2c ret=%d\n",
+ ret);
+ goto err_set_vtg_vcc_i2c;
+ }
+ ret = reg_set_optimum_mode_check(ts->vcc_i2c,
+ GOODIX_VIO_LOAD_MAX_UA);
+ if (ret < 0) {
+ dev_err(&ts->client->dev,
+ "Regulator vcc_i2c set_opt failed rc=%d\n",
+ ret);
+ goto err_set_opt_vcc_i2c;
+ }
+ ret = regulator_enable(ts->vcc_i2c);
+ if (ret) {
+ dev_err(&ts->client->dev,
+ "Regulator vcc_i2c enable failed ret=%d\n",
+ ret);
+ regulator_disable(ts->vdd);
+ goto err_enable_vcc_i2c;
+ }
+ }
+
+ return 0;
+
+err_enable_vcc_i2c:
+err_set_opt_vcc_i2c:
+ if (!IS_ERR(ts->vcc_i2c))
+ regulator_set_voltage(ts->vcc_i2c, 0, GOODIX_I2C_VTG_MAX_UV);
+err_set_vtg_vcc_i2c:
+ if (!IS_ERR(ts->vdd))
+ regulator_disable(ts->vdd);
+err_enable_vdd:
+err_set_opt_vdd:
+ if (!IS_ERR(ts->vdd))
+ regulator_set_voltage(ts->vdd, 0, GOODIX_VTG_MAX_UV);
+err_set_vtg_vdd:
+ if (!IS_ERR(ts->avdd))
+ regulator_disable(ts->avdd);
+err_enable_avdd:
+err_set_opt_avdd:
+ return ret;
+}
+
+/**
+ * goodix_power_off - Turn device power OFF
+ * @ts: driver private data
+ *
+ * Returns zero on success, else an error.
+ */
+static int goodix_power_off(struct goodix_ts_data *ts)
+{
+ int ret;
+
+ if (!IS_ERR(ts->vcc_i2c)) {
+ ret = regulator_set_voltage(ts->vcc_i2c, 0,
+ GOODIX_I2C_VTG_MAX_UV);
+ if (ret < 0)
+ dev_err(&ts->client->dev,
+ "Regulator vcc_i2c set_vtg failed ret=%d\n",
+ ret);
+ ret = regulator_disable(ts->vcc_i2c);
+ if (ret)
+ dev_err(&ts->client->dev,
+ "Regulator vcc_i2c disable failed ret=%d\n",
+ ret);
+ }
+
+ if (!IS_ERR(ts->vdd)) {
+ ret = regulator_set_voltage(ts->vdd, 0, GOODIX_VTG_MAX_UV);
+ if (ret < 0)
+ dev_err(&ts->client->dev,
+ "Regulator vdd set_vtg failed ret=%d\n", ret);
+ ret = regulator_disable(ts->vdd);
+ if (ret)
+ dev_err(&ts->client->dev,
+ "Regulator vdd disable failed ret=%d\n", ret);
+ }
+
+ if (!IS_ERR(ts->avdd)) {
+ ret = regulator_disable(ts->avdd);
+ if (ret)
+ dev_err(&ts->client->dev,
+ "Regulator avdd disable failed ret=%d\n", ret);
+ }
+
+ return 0;
+}
+
+/**
+ * goodix_power_init - Initialize device power
+ * @ts: driver private data
+ *
+ * Returns zero on success, else an error.
+ */
+static int goodix_power_init(struct goodix_ts_data *ts)
+{
+ int ret;
+
+ ts->avdd = regulator_get(&ts->client->dev, "avdd");
+ if (IS_ERR(ts->avdd)) {
+ ret = PTR_ERR(ts->avdd);
+ dev_info(&ts->client->dev,
+ "Regulator get failed avdd ret=%d\n", ret);
+ }
+
+ ts->vdd = regulator_get(&ts->client->dev, "vdd");
+ if (IS_ERR(ts->vdd)) {
+ ret = PTR_ERR(ts->vdd);
+ dev_info(&ts->client->dev,
+ "Regulator get failed vdd ret=%d\n", ret);
+ }
+
+ ts->vcc_i2c = regulator_get(&ts->client->dev, "vcc-i2c");
+ if (IS_ERR(ts->vcc_i2c)) {
+ ret = PTR_ERR(ts->vcc_i2c);
+ dev_info(&ts->client->dev,
+ "Regulator get failed vcc_i2c ret=%d\n", ret);
+ }
+
+ return 0;
+}
+
+/**
+ * goodix_power_deinit - Deinitialize device power
+ * @ts: driver private data
+ *
+ * Returns zero on success, else an error.
+ */
+static int goodix_power_deinit(struct goodix_ts_data *ts)
+{
+ regulator_put(ts->vdd);
+ regulator_put(ts->vcc_i2c);
+ regulator_put(ts->avdd);
+
+ return 0;
+}
+
+static int goodix_ts_get_dt_coords(struct device *dev, char *name,
+ struct goodix_ts_platform_data *pdata)
+{
+ struct property *prop;
+ struct device_node *np = dev->of_node;
+ int rc;
+ u32 coords[GOODIX_COORDS_ARR_SIZE];
+
+ prop = of_find_property(np, name, NULL);
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+
+ rc = of_property_read_u32_array(np, name, coords,
+ GOODIX_COORDS_ARR_SIZE);
+ if (rc && (rc != -EINVAL)) {
+ dev_err(dev, "Unable to read %s\n", name);
+ return rc;
+ }
+
+ if (!strcmp(name, "goodix,panel-coords")) {
+ pdata->panel_minx = coords[0];
+ pdata->panel_miny = coords[1];
+ pdata->panel_maxx = coords[2];
+ pdata->panel_maxy = coords[3];
+ } else if (!strcmp(name, "goodix,display-coords")) {
+ pdata->x_min = coords[0];
+ pdata->y_min = coords[1];
+ pdata->x_max = coords[2];
+ pdata->y_max = coords[3];
+ } else {
+ dev_err(dev, "unsupported property %s\n", name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int goodix_parse_dt(struct device *dev,
+ struct goodix_ts_platform_data *pdata)
+{
+ int rc;
+ struct device_node *np = dev->of_node;
+ struct property *prop;
+ u32 temp_val, num_buttons;
+ u32 button_map[MAX_BUTTONS];
+ char prop_name[PROP_NAME_SIZE];
+ int i, read_cfg_num;
+
+ rc = goodix_ts_get_dt_coords(dev, "goodix,panel-coords", pdata);
+ if (rc && (rc != -EINVAL))
+ return rc;
+
+ rc = goodix_ts_get_dt_coords(dev, "goodix,display-coords", pdata);
+ if (rc)
+ return rc;
+
+ pdata->i2c_pull_up = of_property_read_bool(np,
+ "goodix,i2c-pull-up");
+
+ pdata->no_force_update = of_property_read_bool(np,
+ "goodix,no-force-update");
+ /* reset, irq gpio info */
+ pdata->reset_gpio = of_get_named_gpio_flags(np, "reset-gpios",
+ 0, &pdata->reset_gpio_flags);
+ if (pdata->reset_gpio < 0)
+ return pdata->reset_gpio;
+
+ pdata->irq_gpio = of_get_named_gpio_flags(np, "interrupt-gpios",
+ 0, &pdata->irq_gpio_flags);
+ if (pdata->irq_gpio < 0)
+ return pdata->irq_gpio;
+
+ rc = of_property_read_string(np, "goodix,product-id",
+ &pdata->product_id);
+ if (rc < 0 || strlen(pdata->product_id) > GTP_PRODUCT_ID_MAXSIZE)
+ return rc;
+
+ prop = of_find_property(np, "goodix,button-map", NULL);
+ if (prop) {
+ num_buttons = prop->length / sizeof(temp_val);
+ if (num_buttons > MAX_BUTTONS)
+ return -EINVAL;
+
+ rc = of_property_read_u32_array(np,
+ "goodix,button-map", button_map,
+ num_buttons);
+ if (rc) {
+ dev_err(dev, "Unable to read key codes\n");
+ return rc;
+ }
+ }
+
+ read_cfg_num = 0;
+ for (i = 0; i < GOODIX_MAX_CFG_GROUP; i++) {
+ snprintf(prop_name, sizeof(prop_name), "goodix,cfg-data%d", i);
+ prop = of_find_property(np, prop_name,
+ &pdata->config_data_len[i]);
+ if (!prop || !prop->value) {
+ pdata->config_data_len[i] = 0;
+ pdata->config_data[i] = NULL;
+ continue;
+ }
+ pdata->config_data[i] = devm_kzalloc(dev,
+ GTP_CONFIG_MAX_LENGTH + GTP_ADDR_LENGTH,
+ GFP_KERNEL);
+ if (!pdata->config_data[i]) {
+ dev_err(dev,
+ "Not enough memory for panel config data %d\n",
+ i);
+ return -ENOMEM;
+ }
+ pdata->config_data[i][0] = GTP_REG_CONFIG_DATA >> 8;
+ pdata->config_data[i][1] = GTP_REG_CONFIG_DATA & 0xff;
+ memcpy(&pdata->config_data[i][GTP_ADDR_LENGTH],
+ prop->value, pdata->config_data_len[i]);
+ read_cfg_num++;
+ }
+ dev_dbg(dev, "%d config data read from device tree.\n", read_cfg_num);
+
+ return 0;
+}
+
/*******************************************************
Function:
I2c probe.
@@ -1359,38 +1624,69 @@ Output:
static int goodix_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct goodix_ts_platform_data *pdata;
struct goodix_ts_data *ts;
u16 version_info;
int ret;
dev_dbg(&client->dev, "GTP I2C Address: 0x%02x\n", client->addr);
+ if (client->dev.of_node) {
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct goodix_ts_platform_data), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ ret = goodix_parse_dt(&client->dev, pdata);
+ if (ret)
+ return ret;
+ } else {
+ pdata = client->dev.platform_data;
+ }
+
+ if (!pdata) {
+ dev_err(&client->dev, "GTP invalid pdata\n");
+ return -EINVAL;
+ }
#if GTP_ESD_PROTECT
i2c_connect_client = client;
#endif
+
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "GTP I2C not supported\n");
return -ENODEV;
}
- ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+ ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
if (!ts)
return -ENOMEM;
memset(ts, 0, sizeof(*ts));
ts->client = client;
- /* For kernel 2.6.39 later we spin_lock_init(&ts->irq_lock)
+ ts->pdata = pdata;
+ /* For 2.6.39 & later use spin_lock_init(&ts->irq_lock)
* For 2.6.39 & before, use ts->irq_lock = SPIN_LOCK_UNLOCKED
*/
spin_lock_init(&ts->irq_lock);
i2c_set_clientdata(client, ts);
-
ts->gtp_rawdiff_mode = 0;
ret = gtp_request_io_port(ts);
if (ret) {
dev_err(&client->dev, "GTP request IO port failed.\n");
- goto exit_power_off;
+ goto exit_free_client_data;
+ }
+
+ ret = goodix_power_init(ts);
+ if (ret) {
+ dev_err(&client->dev, "GTP power init failed\n");
+ goto exit_free_io_port;
+ }
+
+ ret = goodix_power_on(ts);
+ if (ret) {
+ dev_err(&client->dev, "GTP power on failed\n");
+ goto exit_deinit_power;
}
gtp_reset_guitar(ts, 20);
@@ -1398,7 +1694,7 @@ static int goodix_ts_probe(struct i2c_client *client,
ret = gtp_i2c_test(client);
if (ret != 2) {
dev_err(&client->dev, "I2C communication ERROR!\n");
- goto exit_free_io_port;
+ goto exit_power_off;
}
#if GTP_AUTO_UPDATE
@@ -1424,6 +1720,20 @@ static int goodix_ts_probe(struct i2c_client *client,
goto exit_free_inputdev;
}
+#if defined(CONFIG_FB)
+ ts->fb_notif.notifier_call = fb_notifier_callback;
+ ret = fb_register_client(&ts->fb_notif);
+ if (ret)
+ dev_err(&ts->client->dev,
+ "Unable to register fb_notifier: %d\n",
+ ret);
+#elif defined(CONFIG_HAS_EARLYSUSPEND)
+ ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ts->early_suspend.suspend = goodix_ts_early_suspend;
+ ts->early_suspend.resume = goodix_ts_late_resume;
+ register_early_suspend(&ts->early_suspend);
+#endif
+
ts->goodix_wq = create_singlethread_workqueue("goodix_wq");
INIT_WORK(&ts->work, goodix_ts_work_func);
@@ -1433,9 +1743,13 @@ static int goodix_ts_probe(struct i2c_client *client,
else
dev_info(&client->dev, "GTP works in interrupt mode.\n");
- ret = gtp_read_version(client, &version_info);
- if (ret != 2) {
- dev_err(&client->dev, "Read version failed.\n");
+ ret = gtp_read_fw_version(client, &version_info);
+ if (ret != 2)
+ dev_err(&client->dev, "GTP firmware version read failed.\n");
+
+ ret = gtp_check_product_id(client);
+ if (ret != 0) {
+ dev_err(&client->dev, "GTP Product id doesn't match.\n");
goto exit_free_irq;
}
if (ts->use_irq)
@@ -1451,6 +1765,13 @@ static int goodix_ts_probe(struct i2c_client *client,
init_done = true;
return 0;
exit_free_irq:
+#if defined(CONFIG_FB)
+ if (fb_unregister_client(&ts->fb_notif))
+ dev_err(&client->dev,
+ "Error occurred while unregistering fb_notifier.\n");
+#elif defined(CONFIG_HAS_EARLYSUSPEND)
+ unregister_early_suspend(&ts->early_suspend);
+#endif
if (ts->use_irq)
free_irq(client->irq, ts);
else
@@ -1466,10 +1787,17 @@ exit_free_irq:
}
exit_free_inputdev:
kfree(ts->config_data);
-exit_free_io_port:
exit_power_off:
+ goodix_power_off(ts);
+exit_deinit_power:
+ goodix_power_deinit(ts);
+exit_free_io_port:
+ if (gpio_is_valid(pdata->reset_gpio))
+ gpio_free(pdata->reset_gpio);
+ if (gpio_is_valid(pdata->irq_gpio))
+ gpio_free(pdata->irq_gpio);
+exit_free_client_data:
i2c_set_clientdata(client, NULL);
- kfree(ts);
return ret;
}
@@ -1485,8 +1813,11 @@ static int goodix_ts_remove(struct i2c_client *client)
{
struct goodix_ts_data *ts = i2c_get_clientdata(client);
- GTP_DEBUG_FUNC();
-#ifdef CONFIG_HAS_EARLYSUSPEND
+#if defined(CONFIG_FB)
+ if (fb_unregister_client(&ts->fb_notif))
+ dev_err(&client->dev,
+ "Error occurred while unregistering fb_notifier.\n");
+#elif defined(CONFIG_HAS_EARLYSUSPEND)
unregister_early_suspend(&ts->early_suspend);
#endif
@@ -1515,21 +1846,21 @@ static int goodix_ts_remove(struct i2c_client *client)
input_free_device(ts->input_dev);
ts->input_dev = NULL;
}
- kfree(ts->config_data);
if (gpio_is_valid(ts->pdata->reset_gpio))
gpio_free(ts->pdata->reset_gpio);
if (gpio_is_valid(ts->pdata->irq_gpio))
gpio_free(ts->pdata->irq_gpio);
+ goodix_power_off(ts);
+ goodix_power_deinit(ts);
i2c_set_clientdata(client, NULL);
- kfree(ts);
}
return 0;
}
-#ifdef CONFIG_HAS_EARLYSUSPEND
+#if defined(CONFIG_HAS_EARLYSUSPEND) || defined(CONFIG_FB)
/*******************************************************
Function:
Early suspend function.
@@ -1538,14 +1869,9 @@ Input:
Output:
None.
*******************************************************/
-static void goodix_ts_early_suspend(struct early_suspend *h)
+static void goodix_ts_suspend(struct goodix_ts_data *ts)
{
- struct goodix_ts_data *ts;
- s8 ret = -1;
-
- ts = container_of(h, struct goodix_ts_data, early_suspend);
-
- GTP_DEBUG_FUNC();
+ int ret = -1, i;
#if GTP_ESD_PROTECT
ts->gtp_is_suspend = 1;
@@ -1559,6 +1885,12 @@ static void goodix_ts_early_suspend(struct early_suspend *h)
gtp_irq_disable(ts);
else
hrtimer_cancel(&ts->timer);
+
+ for (i = 0; i < GTP_MAX_TOUCH; i++)
+ gtp_touch_up(ts, i);
+
+ input_sync(ts->input_dev);
+
ret = gtp_enter_sleep(ts);
#endif
if (ret < 0)
@@ -1577,14 +1909,9 @@ Input:
Output:
None.
*******************************************************/
-static void goodix_ts_late_resume(struct early_suspend *h)
+static void goodix_ts_resume(struct goodix_ts_data *ts)
{
- struct goodix_ts_data *ts;
- s8 ret = -1;
-
- ts = container_of(h, struct goodix_ts_data, early_suspend);
-
- GTP_DEBUG_FUNC();
+ int ret = -1;
ret = gtp_wakeup_sleep(ts);
@@ -1593,7 +1920,7 @@ static void goodix_ts_late_resume(struct early_suspend *h)
#endif
if (ret < 0)
- dev_err(&ts->client->dev, "GTP later resume failed.\n");
+ dev_err(&ts->client->dev, "GTP resume failed.\n");
if (ts->use_irq)
gtp_irq_enable(ts);
@@ -1606,18 +1933,72 @@ static void goodix_ts_late_resume(struct early_suspend *h)
gtp_esd_switch(ts->client, SWITCH_ON);
#endif
}
+
+#if defined(CONFIG_FB)
+static int fb_notifier_callback(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ struct fb_event *evdata = data;
+ int *blank;
+ struct goodix_ts_data *ts =
+ container_of(self, struct goodix_ts_data, fb_notif);
+
+ if (evdata && evdata->data && event == FB_EVENT_BLANK &&
+ ts && ts->client) {
+ blank = evdata->data;
+ if (*blank == FB_BLANK_UNBLANK)
+ goodix_ts_resume(ts);
+ else if (*blank == FB_BLANK_POWERDOWN)
+ goodix_ts_suspend(ts);
+ }
+
+ return 0;
+}
+#elif defined(CONFIG_HAS_EARLYSUSPEND)
+/*
+ * Function:
+ * Early suspend function.
+ * Input:
+ * h: early_suspend struct.
+ * Output:
+ * None.
+ */
+static void goodix_ts_early_suspend(struct early_suspend *h)
+{
+ struct goodix_ts_data *ts;
+
+ ts = container_of(h, struct goodix_ts_data, early_suspend);
+ goodix_ts_suspend(ts);
+}
+
+/*
+ * Function:
+ * Late resume function.
+ * Input:
+ * h: early_suspend struct.
+ * Output:
+ * None.
+ */
+static void goodix_ts_late_resume(struct early_suspend *h)
+{
+ struct goodix_ts_data *ts;
+
+ ts = container_of(h, struct goodix_ts_data, early_suspend);
+ goodix_ts_late_resume(ts);
+}
#endif
+#endif /* !CONFIG_HAS_EARLYSUSPEND && !CONFIG_FB*/
#if GTP_ESD_PROTECT
-/*******************************************************
-Function:
- switch on & off esd delayed work
-Input:
- client: i2c device
- on: SWITCH_ON / SWITCH_OFF
-Output:
- void
-*********************************************************/
+/*
+ * Function:
+ * switch on & off esd delayed work
+ * Input:
+ * client: i2c device
+ * on: SWITCH_ON / SWITCH_OFF
+ * Output:
+ * void
+ */
void gtp_esd_switch(struct i2c_client *client, int on)
{
struct goodix_ts_data *ts;
@@ -1658,21 +2039,18 @@ static int gtp_init_ext_watchdog(struct i2c_client *client)
int ret;
int retries = 0;
- GTP_DEBUG("Init external watchdog...");
- GTP_DEBUG_FUNC();
-
msg.flags = !I2C_M_RD;
msg.addr = client->addr;
msg.len = 4;
msg.buf = opr_buffer;
- while (retries < 5) {
+ while (retries < GTP_I2C_RETRY_5) {
ret = i2c_transfer(client->adapter, &msg, 1);
if (ret == 1)
return 1;
retries++;
}
- if (retries >= 5)
+ if (retries == GTP_I2C_RETRY_5)
dev_err(&client->dev, "init external watchdog failed!");
return 0;
}
@@ -1688,13 +2066,11 @@ Output:
*******************************************************/
static void gtp_esd_check_func(struct work_struct *work)
{
- s32 i;
+ s32 retry;
s32 ret = -1;
struct goodix_ts_data *ts = NULL;
u8 test[4] = {0x80, 0x40};
- GTP_DEBUG_FUNC();
-
ts = i2c_get_clientdata(i2c_connect_client);
if (ts->gtp_is_suspend) {
@@ -1707,17 +2083,16 @@ static void gtp_esd_check_func(struct work_struct *work)
return;
#endif
- for (i = 0; i < 3; i++) {
+ for (retry = 0; retry < GTP_I2C_RETRY_3; retry++) {
ret = gtp_i2c_read(ts->client, test, 4);
- GTP_DEBUG("0x8040 = 0x%02X, 0x8041 = 0x%02X", test[2], test[3]);
if ((ret < 0)) {
/* IC works abnormally..*/
continue;
} else {
if ((test[2] == 0xAA) || (test[3] != 0xAA)) {
/* IC works abnormally..*/
- i = 3;
+ retry = GTP_I2C_RETRY_3;
break;
}
/* IC works normally, Write 0x8040 0xAA*/
@@ -1726,7 +2101,7 @@ static void gtp_esd_check_func(struct work_struct *work)
break;
}
}
- if (i >= 3) {
+ if (retry == GTP_I2C_RETRY_3) {
dev_err(&ts->client->dev,
"IC Working ABNORMALLY, Resetting Guitar...\n");
gtp_reset_guitar(ts, 50);
@@ -1749,6 +2124,11 @@ static const struct i2c_device_id goodix_ts_id[] = {
{ }
};
+static const struct of_device_id goodix_match_table[] = {
+ { .compatible = "goodix,gt9xx", },
+ { },
+};
+
static struct i2c_driver goodix_ts_driver = {
.probe = goodix_ts_probe,
.remove = goodix_ts_remove,
@@ -1760,6 +2140,7 @@ static struct i2c_driver goodix_ts_driver = {
.driver = {
.name = GTP_I2C_NAME,
.owner = THIS_MODULE,
+ .of_match_table = goodix_match_table,
},
};
@@ -1775,7 +2156,6 @@ static int __init goodix_ts_init(void)
{
int ret;
- GTP_DEBUG_FUNC();
#if GTP_ESD_PROTECT
INIT_DELAYED_WORK(&gtp_esd_check_work, gtp_esd_check_func);
gtp_esd_check_workqueue = create_workqueue("gtp_esd_check");
@@ -1794,7 +2174,6 @@ Output:
********************************************************/
static void __exit goodix_ts_exit(void)
{
- GTP_DEBUG_FUNC();
i2c_del_driver(&goodix_ts_driver);
}
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.h b/drivers/input/touchscreen/gt9xx/gt9xx.h
index 48fa2ad2faca..843e3d6c05b2 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx.h
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.h
@@ -33,19 +33,22 @@
#include <linux/regulator/consumer.h>
#include <linux/firmware.h>
#include <linux/debugfs.h>
-#if defined(CONFIG_HAS_EARLYSUSPEND)
+
+#if defined(CONFIG_FB)
+#include <linux/notifier.h>
+#include <linux/fb.h>
+#elif defined(CONFIG_HAS_EARLYSUSPEND)
#include <linux/earlysuspend.h>
#define GOODIX_SUSPEND_LEVEL 1
#endif
+#define GOODIX_MAX_CFG_GROUP 6
struct goodix_ts_platform_data {
int irq_gpio;
u32 irq_gpio_flags;
int reset_gpio;
u32 reset_gpio_flags;
- int ldo_en_gpio;
- u32 ldo_en_gpio_flags;
- u32 family_id;
+ const char *product_id;
u32 x_max;
u32 y_max;
u32 x_min;
@@ -56,6 +59,8 @@ struct goodix_ts_platform_data {
u32 panel_maxy;
bool no_force_update;
bool i2c_pull_up;
+ size_t config_data_len[GOODIX_MAX_CFG_GROUP];
+ u8 *config_data[GOODIX_MAX_CFG_GROUP];
};
struct goodix_ts_data {
spinlock_t irq_lock;
@@ -65,9 +70,6 @@ struct goodix_ts_data {
struct hrtimer timer;
struct workqueue_struct *goodix_wq;
struct work_struct work;
-#if defined(CONFIG_HAS_EARLYSUSPEND)
- struct early_suspend early_suspend;
-#endif
s32 irq_is_disabled;
s32 use_irq;
u16 abs_x_max;
@@ -84,6 +86,14 @@ struct goodix_ts_data {
u8 fixed_cfg;
u8 esd_running;
u8 fw_error;
+ struct regulator *avdd;
+ struct regulator *vdd;
+ struct regulator *vcc_i2c;
+#if defined(CONFIG_FB)
+ struct notifier_block fb_notif;
+#elif defined(CONFIG_HAS_EARLYSUSPEND)
+ struct early_suspend early_suspend;
+#endif
};
extern u16 show_len;
@@ -94,8 +104,7 @@ extern u16 total_len;
#define GTP_CHANGE_X2Y 0
#define GTP_DRIVER_SEND_CFG 1
#define GTP_HAVE_TOUCH_KEY 1
-#define GTP_POWER_CTRL_SLEEP 1
-#define GTP_ICS_SLOT_REPORT 0
+#define GTP_POWER_CTRL_SLEEP 0
/* auto updated by .bin file as default */
#define GTP_AUTO_UPDATE 0
@@ -112,7 +121,7 @@ extern u16 total_len;
/* double-click wakeup, function together with GTP_SLIDE_WAKEUP */
#define GTP_DBL_CLK_WAKEUP 0
-#define GTP_DEBUG_ON 1
+#define GTP_DEBUG_ON 0
#define GTP_DEBUG_ARRAY_ON 0
#define GTP_DEBUG_FUNC_ON 0
@@ -126,56 +135,7 @@ extern u16 total_len;
* GND NC/300K 3
* VDDIO NC/300K 4
* NC NC/300K 5
-*/
-/* Define your own default or for Sensor_ID == 0 config here */
-/* The predefined one is just a sample config,
- * which is not suitable for your tp in most cases.
*/
-#define CTP_CFG_GROUP1 {\
- 0x41, 0x1C, 0x02, 0xC0, 0x03, 0x0A, 0x05, 0x01, 0x01, 0x0F,\
- 0x23, 0x0F, 0x5F, 0x41, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00,\
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0x00, 0x0A,\
- 0x28, 0x00, 0xB8, 0x0B, 0x00, 0x00, 0x00, 0x9A, 0x03, 0x25,\
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x64, 0x32, 0x00, 0x00,\
- 0x00, 0x32, 0x8C, 0x94, 0x05, 0x01, 0x05, 0x00, 0x00, 0x96,\
- 0x0C, 0x22, 0xD8, 0x0E, 0x23, 0x56, 0x11, 0x25, 0xFF, 0x13,\
- 0x28, 0xA7, 0x15, 0x2E, 0x00, 0x00, 0x10, 0x30, 0x48, 0x00,\
- 0x56, 0x4A, 0x3A, 0xFF, 0xFF, 0x16, 0x00, 0x00, 0x00, 0x00,\
- 0x00, 0x01, 0x1B, 0x14, 0x0D, 0x19, 0x00, 0x00, 0x01, 0x00,\
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
- 0x00, 0x00, 0x1A, 0x18, 0x16, 0x14, 0x12, 0x10, 0x0E, 0x0C,\
- 0x0A, 0x08, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\
- 0xFF, 0xFF, 0x1D, 0x1E, 0x1F, 0x20, 0x22, 0x24, 0x28, 0x29,\
- 0x0C, 0x0A, 0x08, 0x00, 0x02, 0x04, 0x05, 0x06, 0x0E, 0xFF,\
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\
- 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0x01\
- }
-
-/* Define your config for Sensor_ID == 1 here, if needed */
-#define CTP_CFG_GROUP2 {\
- }
-
-/* Define your config for Sensor_ID == 2 here, if needed */
-#define CTP_CFG_GROUP3 {\
- }
-
-/* Define your config for Sensor_ID == 3 here, if needed */
-#define CTP_CFG_GROUP4 {\
- }
-
-/* Define your config for Sensor_ID == 4 here, if needed */
-#define CTP_CFG_GROUP5 {\
- }
-
-/* Define your config for Sensor_ID == 5 here, if needed */
-#define CTP_CFG_GROUP6 {\
- }
#define GTP_IRQ_TAB {\
IRQ_TYPE_EDGE_RISING,\
@@ -197,30 +157,38 @@ extern u16 total_len;
#define GTP_INT_TRIGGER GTP_IRQ_TAB_FALLING
#endif
-#define GTP_MAX_TOUCH 5
-#define GTP_ESD_CHECK_CIRCLE 2000 /* jiffy: ms */
+#define GTP_PRODUCT_ID_MAXSIZE 5
+#define GTP_PRODUCT_ID_BUFFER_MAXSIZE 6
+#define GTP_FW_VERSION_BUFFER_MAXSIZE 4
+#define GTP_MAX_TOUCH 5
+#define GTP_ESD_CHECK_CIRCLE 2000 /* jiffy: ms */
/***************************PART3:OTHER define*********************************/
-#define GTP_DRIVER_VERSION "V1.8<2013/06/08>"
-#define GTP_I2C_NAME "Goodix-TS"
-#define GTP_POLL_TIME 10 /* jiffy: ms*/
-#define GTP_ADDR_LENGTH 2
+#define GTP_DRIVER_VERSION "V1.8.1<2013/09/01>"
+#define GTP_I2C_NAME "Goodix-TS"
+#define GTP_POLL_TIME 10 /* jiffy: ms*/
+#define GTP_ADDR_LENGTH 2
#define GTP_CONFIG_MIN_LENGTH 186
#define GTP_CONFIG_MAX_LENGTH 240
-#define FAIL 0
-#define SUCCESS 1
-#define SWITCH_OFF 0
-#define SWITCH_ON 1
+#define FAIL 0
+#define SUCCESS 1
+#define SWITCH_OFF 0
+#define SWITCH_ON 1
/* Registers define */
-#define GTP_READ_COOR_ADDR 0x814E
-#define GTP_REG_SLEEP 0x8040
-#define GTP_REG_SENSOR_ID 0x814A
-#define GTP_REG_CONFIG_DATA 0x8047
-#define GTP_REG_VERSION 0x8140
-
-#define RESOLUTION_LOC 3
-#define TRIGGER_LOC 8
+#define GTP_READ_COOR_ADDR 0x814E
+#define GTP_REG_SLEEP 0x8040
+#define GTP_REG_SENSOR_ID 0x814A
+#define GTP_REG_CONFIG_DATA 0x8047
+#define GTP_REG_FW_VERSION 0x8144
+#define GTP_REG_PRODUCT_ID 0x8140
+
+#define GTP_I2C_RETRY_3 3
+#define GTP_I2C_RETRY_5 5
+#define GTP_I2C_RETRY_10 10
+
+#define RESOLUTION_LOC 3
+#define TRIGGER_LOC 8
/* Log define */
#define GTP_DEBUG(fmt, arg...) do {\
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx_update.c b/drivers/input/touchscreen/gt9xx/gt9xx_update.c
index 29459b6d9ad1..9fcf7f0bef86 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx_update.c
+++ b/drivers/input/touchscreen/gt9xx/gt9xx_update.c
@@ -109,24 +109,25 @@ Output:
*********************************************************/
s32 gup_i2c_read(struct i2c_client *client, u8 *buf, s32 len)
{
- struct i2c_msg msgs[2];
s32 ret = -1;
- s32 retries = 0;
+ u8 retries = 0;
+ struct i2c_msg msgs[2] = {
+ {
+ .flags = !I2C_M_RD,
+ .addr = client->addr,
+ .len = GTP_ADDR_LENGTH,
+ .buf = &buf[0],
+ },
+ {
+ .flags = I2C_M_RD,
+ .addr = client->addr,
+ .len = len - GTP_ADDR_LENGTH,
+ .buf = &buf[GTP_ADDR_LENGTH],
+ },
+ };
GTP_DEBUG_FUNC();
- msgs[0].flags = !I2C_M_RD;
- msgs[0].addr = client->addr;
- msgs[0].len = GTP_ADDR_LENGTH;
- msgs[0].buf = &buf[0];
- /* msgs[0].scl_rate = 300 * 1000; (for Rockchip) */
-
- msgs[1].flags = I2C_M_RD;
- msgs[1].addr = client->addr;
- msgs[1].len = len - GTP_ADDR_LENGTH;
- msgs[1].buf = &buf[GTP_ADDR_LENGTH];
- /* msgs[1].scl_rate = 300 * 1000; */
-
while (retries < 5) {
ret = i2c_transfer(client->adapter, msgs, 2);
if (ret == 2)
@@ -134,6 +135,11 @@ s32 gup_i2c_read(struct i2c_client *client, u8 *buf, s32 len)
retries++;
}
+ if (retries == 5) {
+ dev_err(&client->dev, "I2C read retry limit over.\n");
+ ret = -EIO;
+ }
+
return ret;
}
@@ -151,18 +157,17 @@ Output:
*********************************************************/
s32 gup_i2c_write(struct i2c_client *client, u8 *buf, s32 len)
{
- struct i2c_msg msg;
s32 ret = -1;
- s32 retries = 0;
+ u8 retries = 0;
+ struct i2c_msg msg = {
+ .flags = !I2C_M_RD,
+ .addr = client->addr,
+ .len = len,
+ .buf = buf,
+ };
GTP_DEBUG_FUNC();
- msg.flags = !I2C_M_RD;
- msg.addr = client->addr;
- msg.len = len;
- msg.buf = buf;
- /* msg.scl_rate = 300 * 1000; (for Rockchip) */
-
while (retries < 5) {
ret = i2c_transfer(client->adapter, &msg, 1);
if (ret == 1)
@@ -170,6 +175,11 @@ s32 gup_i2c_write(struct i2c_client *client, u8 *buf, s32 len)
retries++;
}
+ if (retries == 5) {
+ dev_err(&client->dev, "I2C write retry limit over.\n");
+ ret = -EIO;
+ }
+
return ret;
}
@@ -288,7 +298,7 @@ static s32 gup_init_panel(struct goodix_ts_data *ts)
static u8 gup_get_ic_msg(struct i2c_client *client, u16 addr, u8 *msg, s32 len)
{
- s32 i = 0;
+ u8 i = 0;
msg[0] = (addr >> 8) & 0xff;
msg[1] = addr & 0xff;
@@ -307,12 +317,12 @@ static u8 gup_get_ic_msg(struct i2c_client *client, u16 addr, u8 *msg, s32 len)
static u8 gup_set_ic_msg(struct i2c_client *client, u16 addr, u8 val)
{
- s32 i = 0;
- u8 msg[3];
-
- msg[0] = (addr >> 8) & 0xff;
- msg[1] = addr & 0xff;
- msg[2] = val;
+ u8 i = 0;
+ u8 msg[3] = {
+ (addr >> 8) & 0xff,
+ addr & 0xff,
+ val,
+ };
for (i = 0; i < 5; i++)
if (gup_i2c_write(client, msg, GTP_ADDR_LENGTH + 1) > 0)
@@ -411,7 +421,7 @@ static u8 gup_get_ic_fw_msg(struct i2c_client *client)
s32 gup_enter_update_mode(struct i2c_client *client)
{
s32 ret = -1;
- s32 retry = 0;
+ u8 retry = 0;
u8 rd_buf[3];
/* step1:RST output low last at least 2ms */
@@ -575,11 +585,11 @@ static u8 ascii2hex(u8 a)
static s8 gup_update_config(struct i2c_client *client)
{
- s32 file_len = 0;
+ u32 file_len = 0;
s32 ret = 0;
s32 i = 0;
s32 file_cfg_len = 0;
- s32 chip_cfg_len = 0;
+ u32 chip_cfg_len = 0;
s32 count = 0;
u8 *buf;
u8 *pre_buf;
@@ -615,9 +625,19 @@ static s8 gup_update_config(struct i2c_client *client)
return -EINVAL;
}
- buf = kzalloc(file_len, GFP_KERNEL);
- pre_buf = kzalloc(file_len, GFP_KERNEL);
- file_config = kzalloc(chip_cfg_len + GTP_ADDR_LENGTH, GFP_KERNEL);
+ buf = devm_kzalloc(&client->dev, file_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pre_buf = devm_kzalloc(&client->dev, file_len, GFP_KERNEL);
+ if (!pre_buf)
+ return -ENOMEM;
+
+ file_config = devm_kzalloc(&client->dev, chip_cfg_len + GTP_ADDR_LENGTH,
+ GFP_KERNEL);
+ if (!file_config)
+ return -ENOMEM;
+
update_msg.cfg_file->f_op->llseek(update_msg.cfg_file, 0, SEEK_SET);
GTP_DEBUG("[update_cfg]Read config from file.");
@@ -625,7 +645,7 @@ static s8 gup_update_config(struct i2c_client *client)
(char *)pre_buf, file_len, &update_msg.cfg_file->f_pos);
if (ret < 0) {
GTP_ERROR("[update_cfg]Read config file failed.");
- goto update_cfg_file_failed;
+ return ret;
}
GTP_DEBUG("[update_cfg]Delete illegal character.");
@@ -650,13 +670,13 @@ static s8 gup_update_config(struct i2c_client *client)
if ((high == 0xFF) || (low == 0xFF)) {
ret = 0;
GTP_ERROR("[update_cfg]Illegal config file.");
- goto update_cfg_file_failed;
+ return ret;
}
file_config[file_cfg_len++] = (high<<4) + low;
} else {
ret = 0;
GTP_ERROR("[update_cfg]Illegal config file.");
- goto update_cfg_file_failed;
+ return ret;
}
}
@@ -680,10 +700,6 @@ static s8 gup_update_config(struct i2c_client *client)
GTP_ERROR("[update_cfg]Send config i2c error.");
}
-update_cfg_file_failed:
- kfree(pre_buf);
- kfree(buf);
- kfree(file_config);
return ret;
}
@@ -787,16 +803,22 @@ static u8 gup_check_update_file(struct i2c_client *client, st_fw_head *fw_head,
sizeof(UPDATE_FILE_PATH_2));
u8 cfp_len = max(sizeof(CONFIG_FILE_PATH_1),
sizeof(CONFIG_FILE_PATH_2));
- u8 *search_update_path = kzalloc(fp_len, GFP_KERNEL);
- u8 *search_cfg_path = kzalloc(cfp_len, GFP_KERNEL);
+
+ u8 *search_update_path = devm_kzalloc(&client->dev, fp_len,
+ GFP_KERNEL);
+ if (!search_update_path)
+ goto load_failed;
+
+ u8 *search_cfg_path = devm_kzalloc(&client->dev, cfp_len,
+ GFP_KERNEL);
+ if (!search_cfg_path)
+ goto load_failed;
/* Begin to search update file,the config file & firmware
* file must be in the same path,single or double.
*/
searching_file = 1;
for (i = 0; i < GUP_SEARCH_FILE_TIMES; i++) {
if (searching_file == 0) {
- kfree(search_update_path);
- kfree(search_cfg_path);
GTP_INFO(".bin/.cfg update file search ",
"forcely terminated!");
return FAIL;
@@ -843,8 +865,6 @@ static u8 gup_check_update_file(struct i2c_client *client, st_fw_head *fw_head,
}
searching_file = 0;
- kfree(search_update_path);
- kfree(search_cfg_path);
if (!got_file_flag) {
GTP_ERROR("Can't find update file.");
@@ -1168,7 +1188,8 @@ static u8 gup_burn_dsp_isp(struct i2c_client *client)
/* step1:alloc memory */
GTP_DEBUG("[burn_dsp_isp]step1:alloc memory");
while (retry++ < 5) {
- fw_dsp_isp = kzalloc(FW_DSP_ISP_LENGTH, GFP_KERNEL);
+ fw_dsp_isp = devm_kzalloc(&client->dev, FW_DSP_ISP_LENGTH,
+ GFP_KERNEL);
if (fw_dsp_isp == NULL) {
continue;
} else {
@@ -1177,7 +1198,7 @@ static u8 gup_burn_dsp_isp(struct i2c_client *client)
break;
}
}
- if (retry >= 5) {
+ if (retry == 5) {
GTP_ERROR("[burn_dsp_isp]Alloc memory fail,exit.");
return FAIL;
}
@@ -1188,7 +1209,7 @@ static u8 gup_burn_dsp_isp(struct i2c_client *client)
FW_DSP_LENGTH + FW_BOOT_LENGTH), FW_DSP_ISP_LENGTH);
if (ret == FAIL) {
GTP_ERROR("[burn_dsp_isp]load firmware dsp_isp fail.");
- goto exit_burn_dsp_isp;
+ return FAIL;
}
/* step3:disable wdt,clear cache enable */
@@ -1196,14 +1217,12 @@ static u8 gup_burn_dsp_isp(struct i2c_client *client)
ret = gup_set_ic_msg(client, _bRW_MISCTL__TMR0_EN, 0x00);
if (ret <= 0) {
GTP_ERROR("[burn_dsp_isp]disable wdt fail.");
- ret = FAIL;
- goto exit_burn_dsp_isp;
+ return FAIL;
}
ret = gup_set_ic_msg(client, _bRW_MISCTL__CACHE_EN, 0x00);
if (ret <= 0) {
GTP_ERROR("[burn_dsp_isp]clear cache enable fail.");
- ret = FAIL;
- goto exit_burn_dsp_isp;
+ return FAIL;
}
/* step4:hold ss51 & dsp */
@@ -1211,8 +1230,7 @@ static u8 gup_burn_dsp_isp(struct i2c_client *client)
ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
if (ret <= 0) {
GTP_ERROR("[burn_dsp_isp]hold ss51 & dsp fail.");
- ret = FAIL;
- goto exit_burn_dsp_isp;
+ return FAIL;
}
/* step5:set boot from sram */
@@ -1220,8 +1238,7 @@ static u8 gup_burn_dsp_isp(struct i2c_client *client)
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOTCTL_B0_, 0x02);
if (ret <= 0) {
GTP_ERROR("[burn_dsp_isp]set boot from sram fail.");
- ret = FAIL;
- goto exit_burn_dsp_isp;
+ return FAIL;
}
/* step6:software reboot */
@@ -1229,8 +1246,7 @@ static u8 gup_burn_dsp_isp(struct i2c_client *client)
ret = gup_set_ic_msg(client, _bWO_MISCTL__CPU_SWRST_PULSE, 0x01);
if (ret <= 0) {
GTP_ERROR("[burn_dsp_isp]software reboot fail.");
- ret = FAIL;
- goto exit_burn_dsp_isp;
+ return FAIL;
}
/* step7:select bank2 */
@@ -1238,8 +1254,7 @@ static u8 gup_burn_dsp_isp(struct i2c_client *client)
ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, 0x02);
if (ret <= 0) {
GTP_ERROR("[burn_dsp_isp]select bank2 fail.");
- ret = FAIL;
- goto exit_burn_dsp_isp;
+ return FAIL;
}
/* step8:enable accessing code */
@@ -1247,8 +1262,7 @@ static u8 gup_burn_dsp_isp(struct i2c_client *client)
ret = gup_set_ic_msg(client, _bRW_MISCTL__MEM_CD_EN, 0x01);
if (ret <= 0) {
GTP_ERROR("[burn_dsp_isp]enable accessing code fail.");
- ret = FAIL;
- goto exit_burn_dsp_isp;
+ return FAIL;
}
/* step9:burn 4k dsp_isp */
@@ -1256,7 +1270,7 @@ static u8 gup_burn_dsp_isp(struct i2c_client *client)
ret = gup_burn_proc(client, fw_dsp_isp, 0xC000, FW_DSP_ISP_LENGTH);
if (ret == FAIL) {
GTP_ERROR("[burn_dsp_isp]burn dsp_isp fail.");
- goto exit_burn_dsp_isp;
+ return FAIL;
}
/* step10:set scramble */
@@ -1264,14 +1278,10 @@ static u8 gup_burn_dsp_isp(struct i2c_client *client)
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_OPT_B0_, 0x00);
if (ret <= 0) {
GTP_ERROR("[burn_dsp_isp]set scramble fail.");
- ret = FAIL;
- goto exit_burn_dsp_isp;
+ return FAIL;
}
- ret = SUCCESS;
-exit_burn_dsp_isp:
- kfree(fw_dsp_isp);
- return ret;
+ return SUCCESS;
}
static u8 gup_burn_fw_ss51(struct i2c_client *client)
@@ -1285,7 +1295,8 @@ static u8 gup_burn_fw_ss51(struct i2c_client *client)
/* step1:alloc memory */
GTP_DEBUG("[burn_fw_ss51]step1:alloc memory");
while (retry++ < 5) {
- fw_ss51 = kzalloc(FW_SECTION_LENGTH, GFP_KERNEL);
+ fw_ss51 = devm_kzalloc(&client->dev, FW_SECTION_LENGTH,
+ GFP_KERNEL);
if (fw_ss51 == NULL) {
continue;
} else {
@@ -1294,7 +1305,7 @@ static u8 gup_burn_fw_ss51(struct i2c_client *client)
break;
}
}
- if (retry >= 5) {
+ if (retry == 5) {
GTP_ERROR("[burn_fw_ss51]Alloc memory fail,exit.");
return FAIL;
}
@@ -1304,7 +1315,7 @@ static u8 gup_burn_fw_ss51(struct i2c_client *client)
ret = gup_load_section_file(fw_ss51, 0, FW_SECTION_LENGTH);
if (ret == FAIL) {
GTP_ERROR("[burn_fw_ss51]load ss51 firmware section 1 fail.");
- goto exit_burn_fw_ss51;
+ return FAIL;
}
/* step3:clear control flag */
@@ -1312,8 +1323,7 @@ static u8 gup_burn_fw_ss51(struct i2c_client *client)
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x00);
if (ret <= 0) {
GTP_ERROR("[burn_fw_ss51]clear control flag fail.");
- ret = FAIL;
- goto exit_burn_fw_ss51;
+ return FAIL;
}
/* step4:burn ss51 firmware section 1 */
@@ -1321,7 +1331,7 @@ static u8 gup_burn_fw_ss51(struct i2c_client *client)
ret = gup_burn_fw_section(client, fw_ss51, 0xC000, 0x01);
if (ret == FAIL) {
GTP_ERROR("[burn_fw_ss51]burn ss51 firmware section 1 fail.");
- goto exit_burn_fw_ss51;
+ return FAIL;
}
/* step5:load ss51 firmware section 2 file data */
@@ -1330,7 +1340,7 @@ static u8 gup_burn_fw_ss51(struct i2c_client *client)
FW_SECTION_LENGTH);
if (ret == FAIL) {
GTP_ERROR("[burn_fw_ss51]load ss51 firmware section 2 fail.");
- goto exit_burn_fw_ss51;
+ return FAIL;
}
/* step6:burn ss51 firmware section 2 */
@@ -1338,7 +1348,7 @@ static u8 gup_burn_fw_ss51(struct i2c_client *client)
ret = gup_burn_fw_section(client, fw_ss51, 0xE000, 0x02);
if (ret == FAIL) {
GTP_ERROR("[burn_fw_ss51]burn ss51 firmware section 2 fail.");
- goto exit_burn_fw_ss51;
+ return FAIL;
}
/* step7:load ss51 firmware section 3 file data */
@@ -1347,7 +1357,7 @@ static u8 gup_burn_fw_ss51(struct i2c_client *client)
FW_SECTION_LENGTH);
if (ret == FAIL) {
GTP_ERROR("[burn_fw_ss51]load ss51 firmware section 3 fail.");
- goto exit_burn_fw_ss51;
+ return FAIL;
}
/* step8:burn ss51 firmware section 3 */
@@ -1355,7 +1365,7 @@ static u8 gup_burn_fw_ss51(struct i2c_client *client)
ret = gup_burn_fw_section(client, fw_ss51, 0xC000, 0x13);
if (ret == FAIL) {
GTP_ERROR("[burn_fw_ss51]burn ss51 firmware section 3 fail.");
- goto exit_burn_fw_ss51;
+ return FAIL;
}
/* step9:load ss51 firmware section 4 file data */
@@ -1364,7 +1374,7 @@ static u8 gup_burn_fw_ss51(struct i2c_client *client)
FW_SECTION_LENGTH);
if (ret == FAIL) {
GTP_ERROR("[burn_fw_ss51]load ss51 firmware section 4 fail.");
- goto exit_burn_fw_ss51;
+ return FAIL;
}
/* step10:burn ss51 firmware section 4 */
@@ -1372,14 +1382,10 @@ static u8 gup_burn_fw_ss51(struct i2c_client *client)
ret = gup_burn_fw_section(client, fw_ss51, 0xE000, 0x14);
if (ret == FAIL) {
GTP_ERROR("[burn_fw_ss51]burn ss51 firmware section 4 fail.");
- goto exit_burn_fw_ss51;
+ return FAIL;
}
- ret = SUCCESS;
-
-exit_burn_fw_ss51:
- kfree(fw_ss51);
- return ret;
+ return SUCCESS;
}
static u8 gup_burn_fw_dsp(struct i2c_client *client)
@@ -1393,7 +1399,8 @@ static u8 gup_burn_fw_dsp(struct i2c_client *client)
/* step1:alloc memory */
GTP_DEBUG("[burn_fw_dsp]step1:alloc memory");
while (retry++ < 5) {
- fw_dsp = kzalloc(FW_DSP_LENGTH, GFP_KERNEL);
+ fw_dsp = devm_kzalloc(&client->dev, FW_DSP_LENGTH,
+ GFP_KERNEL);
if (fw_dsp == NULL) {
continue;
} else {
@@ -1402,7 +1409,7 @@ static u8 gup_burn_fw_dsp(struct i2c_client *client)
break;
}
}
- if (retry >= 5) {
+ if (retry == 5) {
GTP_ERROR("[burn_fw_dsp]Alloc memory fail,exit.");
return FAIL;
}
@@ -1412,7 +1419,7 @@ static u8 gup_burn_fw_dsp(struct i2c_client *client)
ret = gup_load_section_file(fw_dsp, 4*FW_SECTION_LENGTH, FW_DSP_LENGTH);
if (ret == FAIL) {
GTP_ERROR("[burn_fw_dsp]load firmware dsp fail.");
- goto exit_burn_fw_dsp;
+ return ret;
}
/* step3:select bank3 */
@@ -1420,8 +1427,7 @@ static u8 gup_burn_fw_dsp(struct i2c_client *client)
ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, 0x03);
if (ret <= 0) {
GTP_ERROR("[burn_fw_dsp]select bank3 fail.");
- ret = FAIL;
- goto exit_burn_fw_dsp;
+ return FAIL;
}
/* Step4:hold ss51 & dsp */
@@ -1429,8 +1435,7 @@ static u8 gup_burn_fw_dsp(struct i2c_client *client)
ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
if (ret <= 0) {
GTP_ERROR("[burn_fw_dsp]hold ss51 & dsp fail.");
- ret = FAIL;
- goto exit_burn_fw_dsp;
+ return FAIL;
}
/* step5:set scramble */
@@ -1438,8 +1443,7 @@ static u8 gup_burn_fw_dsp(struct i2c_client *client)
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_OPT_B0_, 0x00);
if (ret <= 0) {
GTP_ERROR("[burn_fw_dsp]set scramble fail.");
- ret = FAIL;
- goto exit_burn_fw_dsp;
+ return FAIL;
}
/* step6:release ss51 & dsp */
@@ -1447,8 +1451,7 @@ static u8 gup_burn_fw_dsp(struct i2c_client *client)
ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x04);
if (ret <= 0) {
GTP_ERROR("[burn_fw_dsp]release ss51 & dsp fail.");
- ret = FAIL;
- goto exit_burn_fw_dsp;
+ return FAIL;
}
/* must delay */
msleep(20);
@@ -1458,7 +1461,7 @@ static u8 gup_burn_fw_dsp(struct i2c_client *client)
ret = gup_burn_proc(client, fw_dsp, 0x9000, FW_DSP_LENGTH);
if (ret == FAIL) {
GTP_ERROR("[burn_fw_dsp]burn fw_section fail.");
- goto exit_burn_fw_dsp;
+ return ret;
}
/* step8:send burn cmd to move data to flash from sram */
@@ -1467,14 +1470,14 @@ static u8 gup_burn_fw_dsp(struct i2c_client *client)
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x05);
if (ret <= 0) {
GTP_ERROR("[burn_fw_dsp]send burn cmd fail.");
- goto exit_burn_fw_dsp;
+ return ret;
}
GTP_DEBUG("[burn_fw_dsp]Wait for the burn is complete......");
do {
ret = gup_get_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, rd_buf, 1);
if (ret <= 0) {
GTP_ERROR("[burn_fw_dsp]Get burn state fail");
- goto exit_burn_fw_dsp;
+ return ret;
}
msleep(20);
/* GTP_DEBUG("[burn_fw_dsp]Get burn state:%d.",
@@ -1487,14 +1490,10 @@ static u8 gup_burn_fw_dsp(struct i2c_client *client)
ret = gup_recall_check(client, fw_dsp, 0x9000, FW_DSP_LENGTH);
if (ret == FAIL) {
GTP_ERROR("[burn_fw_dsp]recall check 4k dsp firmware fail.");
- goto exit_burn_fw_dsp;
+ return ret;
}
ret = SUCCESS;
-
-exit_burn_fw_dsp:
- kfree(fw_dsp);
- return ret;
}
static u8 gup_burn_fw_boot(struct i2c_client *client)
@@ -1509,7 +1508,8 @@ static u8 gup_burn_fw_boot(struct i2c_client *client)
/* step1:Alloc memory */
GTP_DEBUG("[burn_fw_boot]step1:Alloc memory");
while (retry++ < 5) {
- fw_boot = kzalloc(FW_BOOT_LENGTH, GFP_KERNEL);
+ fw_boot = devm_kzalloc(&client->dev, FW_BOOT_LENGTH,
+ GFP_KERNEL);
if (fw_boot == NULL) {
continue;
} else {
@@ -1518,7 +1518,7 @@ static u8 gup_burn_fw_boot(struct i2c_client *client)
break;
}
}
- if (retry >= 5) {
+ if (retry == 5) {
GTP_ERROR("[burn_fw_boot]Alloc memory fail,exit.");
return FAIL;
}
@@ -1529,7 +1529,7 @@ static u8 gup_burn_fw_boot(struct i2c_client *client)
FW_DSP_LENGTH), FW_BOOT_LENGTH);
if (ret == FAIL) {
GTP_ERROR("[burn_fw_boot]load firmware dsp fail.");
- goto exit_burn_fw_boot;
+ return ret;
}
/* step3:hold ss51 & dsp */
@@ -1537,8 +1537,7 @@ static u8 gup_burn_fw_boot(struct i2c_client *client)
ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x0C);
if (ret <= 0) {
GTP_ERROR("[burn_fw_boot]hold ss51 & dsp fail.");
- ret = FAIL;
- goto exit_burn_fw_boot;
+ return FAIL;
}
/* step4:set scramble */
@@ -1546,8 +1545,7 @@ static u8 gup_burn_fw_boot(struct i2c_client *client)
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_OPT_B0_, 0x00);
if (ret <= 0) {
GTP_ERROR("[burn_fw_boot]set scramble fail.");
- ret = FAIL;
- goto exit_burn_fw_boot;
+ return FAIL;
}
/* step5:release ss51 & dsp */
@@ -1555,8 +1553,7 @@ static u8 gup_burn_fw_boot(struct i2c_client *client)
ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x04);
if (ret <= 0) {
GTP_ERROR("[burn_fw_boot]release ss51 & dsp fail.");
- ret = FAIL;
- goto exit_burn_fw_boot;
+ return FAIL;
}
/* must delay */
msleep(20);
@@ -1566,8 +1563,7 @@ static u8 gup_burn_fw_boot(struct i2c_client *client)
ret = gup_set_ic_msg(client, _bRW_MISCTL__SRAM_BANK, 0x03);
if (ret <= 0) {
GTP_ERROR("[burn_fw_boot]select bank3 fail.");
- ret = FAIL;
- goto exit_burn_fw_boot;
+ return FAIL;
}
/* step7:burn 2k bootloader firmware */
@@ -1575,7 +1571,7 @@ static u8 gup_burn_fw_boot(struct i2c_client *client)
ret = gup_burn_proc(client, fw_boot, 0x9000, FW_BOOT_LENGTH);
if (ret == FAIL) {
GTP_ERROR("[burn_fw_boot]burn fw_section fail.");
- goto exit_burn_fw_boot;
+ return ret;
}
/* step7:send burn cmd to move data to flash from sram */
@@ -1584,14 +1580,14 @@ static u8 gup_burn_fw_boot(struct i2c_client *client)
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x06);
if (ret <= 0) {
GTP_ERROR("[burn_fw_boot]send burn cmd fail.");
- goto exit_burn_fw_boot;
+ return ret;
}
GTP_DEBUG("[burn_fw_boot]Wait for the burn is complete......");
do {
ret = gup_get_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, rd_buf, 1);
if (ret <= 0) {
GTP_ERROR("[burn_fw_boot]Get burn state fail");
- goto exit_burn_fw_boot;
+ return ret;
}
msleep(20);
/* GTP_DEBUG("[burn_fw_boot]Get burn state:%d.",
@@ -1604,7 +1600,7 @@ static u8 gup_burn_fw_boot(struct i2c_client *client)
ret = gup_recall_check(client, fw_boot, 0x9000, FW_BOOT_LENGTH);
if (ret == FAIL) {
GTP_ERROR("[burn_fw_boot]recall check 4k dsp firmware fail.");
- goto exit_burn_fw_boot;
+ return ret;
}
/* step9:enable download DSP code */
@@ -1612,8 +1608,7 @@ static u8 gup_burn_fw_boot(struct i2c_client *client)
ret = gup_set_ic_msg(client, _rRW_MISCTL__BOOT_CTL_, 0x99);
if (ret <= 0) {
GTP_ERROR("[burn_fw_boot]enable download DSP code fail.");
- ret = FAIL;
- goto exit_burn_fw_boot;
+ return FAIL;
}
/* step10:release ss51 & hold dsp */
@@ -1621,15 +1616,10 @@ static u8 gup_burn_fw_boot(struct i2c_client *client)
ret = gup_set_ic_msg(client, _rRW_MISCTL__SWRST_B0_, 0x08);
if (ret <= 0) {
GTP_ERROR("[burn_fw_boot]release ss51 & hold dsp fail.");
- ret = FAIL;
- goto exit_burn_fw_boot;
+ return FAIL;
}
- ret = SUCCESS;
-
-exit_burn_fw_boot:
- kfree(fw_boot);
- return ret;
+ return SUCCESS;
}
s32 gup_update_proc(void *dir)
diff --git a/drivers/input/touchscreen/it7258_ts_i2c.c b/drivers/input/touchscreen/it7258_ts_i2c.c
index 88b1d3c013c7..1a2afd1c1117 100644
--- a/drivers/input/touchscreen/it7258_ts_i2c.c
+++ b/drivers/input/touchscreen/it7258_ts_i2c.c
@@ -1383,7 +1383,7 @@ static int it7260_ts_chip_identify(struct it7260_ts_data *ts_data)
static int reg_set_optimum_mode_check(struct regulator *reg, int load_uA)
{
return (regulator_count_voltages(reg) > 0) ?
- regulator_set_optimum_mode(reg, load_uA) : 0;
+ regulator_set_load(reg, load_uA) : 0;
}
static int it7260_regulator_configure(struct it7260_ts_data *ts_data, bool on)
@@ -1950,7 +1950,7 @@ static int it7260_ts_probe(struct i2c_client *client,
dev_err(&client->dev, "Unable to register fb_notifier %d\n",
ret);
#endif
-
+
it7260_i2c_write_no_ready_check(ts_data, BUF_COMMAND, cmd_start,
sizeof(cmd_start));
msleep(pdata->reset_delay);
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
index eb4947536230..d358f329e7a8 100644
--- a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.c
@@ -117,7 +117,6 @@
static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
bool *was_in_bl_mode);
static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data);
-static int synaptics_rmi4_reinit_device(struct synaptics_rmi4_data *rmi4_data);
static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data,
bool rebuild);
@@ -652,8 +651,6 @@ static struct kobj_attribute virtual_key_map_attr = {
#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26)
static void synaptics_secure_touch_init(struct synaptics_rmi4_data *data)
{
- int ret = 0;
-
data->st_initialized = 0;
init_completion(&data->st_powerdown);
init_completion(&data->st_irq_processed);
@@ -661,24 +658,21 @@ static void synaptics_secure_touch_init(struct synaptics_rmi4_data *data)
/* Get clocks */
data->core_clk = devm_clk_get(data->pdev->dev.parent, "core_clk");
if (IS_ERR(data->core_clk)) {
- ret = PTR_ERR(data->core_clk);
- data->core_clk = NULL;
dev_warn(data->pdev->dev.parent,
- "%s: error on clk_get(core_clk): %d\n", __func__, ret);
- return;
+ "%s: error on clk_get(core_clk): %ld\n", __func__,
+ PTR_ERR(data->core_clk));
+ data->core_clk = NULL;
}
data->iface_clk = devm_clk_get(data->pdev->dev.parent, "iface_clk");
if (IS_ERR(data->iface_clk)) {
- ret = PTR_ERR(data->iface_clk);
- data->iface_clk = NULL;
dev_warn(data->pdev->dev.parent,
- "%s: error on clk_get(iface_clk): %d\n", __func__, ret);
- return;
+ "%s: error on clk_get(iface_clk): %ld\n", __func__,
+ PTR_ERR(data->iface_clk));
+ data->iface_clk = NULL;
}
data->st_initialized = 1;
- return;
}
static void synaptics_secure_touch_notify(struct synaptics_rmi4_data *rmi4_data)
@@ -1691,12 +1685,6 @@ static void synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
}
if (status.unconfigured && !status.flash_prog) {
pr_notice("%s: spontaneous reset detected\n", __func__);
- retval = synaptics_rmi4_reinit_device(rmi4_data);
- if (retval < 0) {
- dev_err(rmi4_data->pdev->dev.parent,
- "%s: Failed to reinit device\n",
- __func__);
- }
}
if (!report)
@@ -3665,49 +3653,6 @@ exit:
return;
}
-static int synaptics_rmi4_reinit_device(struct synaptics_rmi4_data *rmi4_data)
-{
- int retval;
- struct synaptics_rmi4_fn *fhandler;
- struct synaptics_rmi4_exp_fhandler *exp_fhandler;
- struct synaptics_rmi4_device_info *rmi;
-
- rmi = &(rmi4_data->rmi4_mod_info);
-
- mutex_lock(&(rmi4_data->rmi4_reset_mutex));
-
- synaptics_rmi4_free_fingers(rmi4_data);
-
- if (!list_empty(&rmi->support_fn_list)) {
- list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
- if (fhandler->fn_number == SYNAPTICS_RMI4_F12) {
- synaptics_rmi4_f12_set_enables(rmi4_data, 0);
- break;
- }
- }
- }
-
- retval = synaptics_rmi4_int_enable(rmi4_data, true);
- if (retval < 0)
- goto exit;
-
- mutex_lock(&exp_data.mutex);
- if (!list_empty(&exp_data.list)) {
- list_for_each_entry(exp_fhandler, &exp_data.list, link)
- if (exp_fhandler->exp_fn->reinit != NULL)
- exp_fhandler->exp_fn->reinit(rmi4_data);
- }
- mutex_unlock(&exp_data.mutex);
-
- synaptics_rmi4_set_configured(rmi4_data);
-
- retval = 0;
-
-exit:
- mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
- return retval;
-}
-
static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data,
bool rebuild)
{
@@ -3919,6 +3864,57 @@ exit:
}
EXPORT_SYMBOL(synaptics_rmi4_new_function);
+static int synaptics_dsx_pinctrl_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+
+ /* Get pinctrl if target uses pinctrl */
+ rmi4_data->ts_pinctrl = devm_pinctrl_get((rmi4_data->pdev->dev.parent));
+ if (IS_ERR_OR_NULL(rmi4_data->ts_pinctrl)) {
+ retval = PTR_ERR(rmi4_data->ts_pinctrl);
+ dev_err(rmi4_data->pdev->dev.parent,
+ "Target does not use pinctrl %d\n", retval);
+ goto err_pinctrl_get;
+ }
+
+ rmi4_data->pinctrl_state_active
+ = pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_active");
+ if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_active)) {
+ retval = PTR_ERR(rmi4_data->pinctrl_state_active);
+ dev_err(rmi4_data->pdev->dev.parent,
+ "Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_ACTIVE, retval);
+ goto err_pinctrl_lookup;
+ }
+
+ rmi4_data->pinctrl_state_suspend
+ = pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_suspend");
+ if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_suspend)) {
+ retval = PTR_ERR(rmi4_data->pinctrl_state_suspend);
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_SUSPEND, retval);
+ goto err_pinctrl_lookup;
+ }
+
+ rmi4_data->pinctrl_state_release
+ = pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_release");
+ if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+ retval = PTR_ERR(rmi4_data->pinctrl_state_release);
+ dev_dbg(rmi4_data->pdev->dev.parent,
+ "Can not lookup %s pinstate %d\n",
+ PINCTRL_STATE_RELEASE, retval);
+ }
+
+ return 0;
+
+err_pinctrl_lookup:
+ devm_pinctrl_put(rmi4_data->ts_pinctrl);
+err_pinctrl_get:
+ rmi4_data->ts_pinctrl = NULL;
+ return retval;
+}
+
static int synaptics_rmi4_probe(struct platform_device *pdev)
{
int retval;
@@ -3988,6 +3984,21 @@ static int synaptics_rmi4_probe(struct platform_device *pdev)
goto err_enable_reg;
}
+ retval = synaptics_dsx_pinctrl_init(rmi4_data);
+ if (!retval && rmi4_data->ts_pinctrl) {
+ /*
+ * Pinctrl handle is optional. If pinctrl handle is found
+ * let pins to be configured in active state. If not
+ * found continue further without error.
+ */
+ retval = pinctrl_select_state(rmi4_data->ts_pinctrl,
+ rmi4_data->pinctrl_state_active);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s: Failed to select %s pinstate %d\n",
+ __func__, PINCTRL_STATE_ACTIVE, retval);
+ }
+ }
retval = synaptics_rmi4_set_gpio(rmi4_data);
if (retval < 0) {
dev_err(&pdev->dev,
@@ -4173,6 +4184,21 @@ err_ui_hw_init:
err_set_gpio:
synaptics_rmi4_enable_reg(rmi4_data, false);
+ if (rmi4_data->ts_pinctrl) {
+ if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+ devm_pinctrl_put(rmi4_data->ts_pinctrl);
+ rmi4_data->ts_pinctrl = NULL;
+ } else {
+ retval = pinctrl_select_state(
+ rmi4_data->ts_pinctrl,
+ rmi4_data->pinctrl_state_release);
+ if (retval)
+ dev_err(&pdev->dev,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ }
+ }
+
err_enable_reg:
synaptics_rmi4_get_reg(rmi4_data, false);
@@ -4185,6 +4211,7 @@ err_get_reg:
static int synaptics_rmi4_remove(struct platform_device *pdev)
{
unsigned char attr_count;
+ int err;
struct synaptics_rmi4_data *rmi4_data = platform_get_drvdata(pdev);
const struct synaptics_dsx_board_data *bdata =
rmi4_data->hw_if->board_data;
@@ -4240,6 +4267,22 @@ static int synaptics_rmi4_remove(struct platform_device *pdev)
if (bdata->power_gpio >= 0)
synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+ if (rmi4_data->ts_pinctrl) {
+ if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+ devm_pinctrl_put(rmi4_data->ts_pinctrl);
+ rmi4_data->ts_pinctrl = NULL;
+ } else {
+ err = pinctrl_select_state(
+ rmi4_data->ts_pinctrl,
+ rmi4_data->pinctrl_state_release);
+ if (err)
+ dev_err(&pdev->dev,
+ "Failed to select release pinctrl state %d\n",
+ err);
+ }
+ }
+
synaptics_rmi4_enable_reg(rmi4_data, false);
synaptics_rmi4_get_reg(rmi4_data, false);
@@ -4512,6 +4555,7 @@ static int synaptics_rmi4_suspend(struct device *dev)
{
struct synaptics_rmi4_exp_fhandler *exp_fhandler;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ int retval;
if (rmi4_data->stay_awake)
return 0;
@@ -4530,6 +4574,13 @@ static int synaptics_rmi4_suspend(struct device *dev)
synaptics_rmi4_free_fingers(rmi4_data);
}
+ if (rmi4_data->ts_pinctrl) {
+ retval = pinctrl_select_state(rmi4_data->ts_pinctrl,
+ rmi4_data->pinctrl_state_suspend);
+ if (retval < 0)
+ dev_err(dev, "Cannot get idle pinctrl state\n");
+ goto err_pinctrl;
+ }
exit:
mutex_lock(&exp_data.mutex);
if (!list_empty(&exp_data.list)) {
@@ -4546,6 +4597,12 @@ exit:
rmi4_data->suspend = true;
return 0;
+
+err_pinctrl:
+ synaptics_rmi4_sleep_enable(rmi4_data, false);
+ synaptics_rmi4_irq_enable(rmi4_data, true, false);
+ return retval;
+
}
static int synaptics_rmi4_resume(struct device *dev)
@@ -4576,6 +4633,12 @@ static int synaptics_rmi4_resume(struct device *dev)
synaptics_rmi4_sleep_enable(rmi4_data, false);
synaptics_rmi4_irq_enable(rmi4_data, true, false);
+ if (rmi4_data->ts_pinctrl) {
+ retval = pinctrl_select_state(rmi4_data->ts_pinctrl,
+ rmi4_data->pinctrl_state_active);
+ if (retval < 0)
+ dev_err(dev, "Cannot get default pinctrl state\n");
+ }
exit:
#ifdef FB_READY_RESET
diff --git a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.h b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.h
index 6fa392473885..7d92791afb25 100644
--- a/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.h
+++ b/drivers/input/touchscreen/synaptics_dsx_2.6/synaptics_dsx_core.h
@@ -122,6 +122,9 @@
#define MASK_2BIT 0x03
#define MASK_1BIT 0x01
+#define PINCTRL_STATE_ACTIVE "pmx_ts_active"
+#define PINCTRL_STATE_SUSPEND "pmx_ts_suspend"
+#define PINCTRL_STATE_RELEASE "pmx_ts_release"
enum exp_fn {
RMI_DEV = 0,
RMI_FW_UPDATER,
@@ -382,6 +385,10 @@ struct synaptics_rmi4_data {
bool enable);
void (*report_touch)(struct synaptics_rmi4_data *rmi4_data,
struct synaptics_rmi4_fn *fhandler);
+ struct pinctrl *ts_pinctrl;
+ struct pinctrl_state *pinctrl_state_active;
+ struct pinctrl_state *pinctrl_state_suspend;
+ struct pinctrl_state *pinctrl_state_release;
#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26)
atomic_t st_enabled;
atomic_t st_pending_irqs;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 190d294197a7..0608e08b4427 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -96,7 +96,7 @@ static void gic_do_wait_for_rwp(void __iomem *base)
{
u32 count = 1000000; /* 1s! */
- while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
+ while (readl_relaxed_no_log(base + GICD_CTLR) & GICD_CTLR_RWP) {
count--;
if (!count) {
pr_err_ratelimited("RWP timeout, gone fishing\n");
@@ -182,7 +182,7 @@ static int gic_peek_irq(struct irq_data *d, u32 offset)
else
base = gic_data.dist_base;
- return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
+ return !!(readl_relaxed_no_log(base + offset + (gic_irq(d) / 32) * 4) & mask);
}
static void gic_poke_irq(struct irq_data *d, u32 offset)
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 8810c9fb981a..998bd1ec0415 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -475,6 +475,18 @@ config DM_VERITY
If unsure, say N.
+config DM_VERITY_FEC
+ bool "Verity forward error correction support"
+ depends on DM_VERITY
+ select REED_SOLOMON
+ select REED_SOLOMON_DEC8
+ ---help---
+ Add forward error correction support to dm-verity. This option
+ makes it possible to use pre-generated error correction data to
+ recover from corrupted blocks.
+
+ If unsure, say N.
+
config DM_SWITCH
tristate "Switch target support (EXPERIMENTAL)"
depends on BLK_DEV_DM
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 02de7d19853e..d470143dcf40 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -16,6 +16,7 @@ dm-cache-mq-y += dm-cache-policy-mq.o
dm-cache-smq-y += dm-cache-policy-smq.o
dm-cache-cleaner-y += dm-cache-policy-cleaner.o
dm-era-y += dm-era-target.o
+dm-verity-y += dm-verity-target.o
md-mod-y += md.o bitmap.o
raid456-y += raid5.o raid5-cache.o
@@ -64,3 +65,7 @@ obj-$(CONFIG_DM_REQ_CRYPT) += dm-req-crypt.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
endif
+
+ifeq ($(CONFIG_DM_VERITY_FEC),y)
+dm-verity-objs += dm-verity-fec.o
+endif
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 515f83e7d9ab..bb9b92ebbf8e 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -118,14 +118,12 @@ static void iot_io_end(struct io_tracker *iot, sector_t len)
*/
struct dm_hook_info {
bio_end_io_t *bi_end_io;
- void *bi_private;
};
static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
bio_end_io_t *bi_end_io, void *bi_private)
{
h->bi_end_io = bio->bi_end_io;
- h->bi_private = bio->bi_private;
bio->bi_end_io = bi_end_io;
bio->bi_private = bi_private;
@@ -134,7 +132,6 @@ static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
{
bio->bi_end_io = h->bi_end_io;
- bio->bi_private = h->bi_private;
}
/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 3147c8d09ea8..e85bcae50f65 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1864,16 +1864,24 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -ENOMEM;
- cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
+ cc->io_queue = alloc_workqueue("kcryptd_io",
+ WQ_HIGHPRI |
+ WQ_MEM_RECLAIM,
+ 1);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
- cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+ cc->crypt_queue = alloc_workqueue("kcryptd",
+ WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
else
- cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ cc->crypt_queue = alloc_workqueue("kcryptd",
+ WQ_HIGHPRI |
+ WQ_MEM_RECLAIM |
+ WQ_UNBOUND,
num_online_cpus());
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index e108deebbaaa..e4d1bafe78c1 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -207,7 +207,6 @@ struct dm_snap_pending_exception {
*/
struct bio *full_bio;
bio_end_io_t *full_bio_end_io;
- void *full_bio_private;
};
/*
@@ -1495,10 +1494,8 @@ out:
snapshot_bios = bio_list_get(&pe->snapshot_bios);
origin_bios = bio_list_get(&pe->origin_bios);
full_bio = pe->full_bio;
- if (full_bio) {
+ if (full_bio)
full_bio->bi_end_io = pe->full_bio_end_io;
- full_bio->bi_private = pe->full_bio_private;
- }
increment_pending_exceptions_done_count();
up_write(&s->lock);
@@ -1604,7 +1601,6 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
pe->full_bio = bio;
pe->full_bio_end_io = bio->bi_end_io;
- pe->full_bio_private = bio->bi_private;
callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
copy_callback, pe);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
new file mode 100644
index 000000000000..ad10d6d8ed28
--- /dev/null
+++ b/drivers/md/dm-verity-fec.c
@@ -0,0 +1,861 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * Author: Sami Tolvanen <samitolvanen@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include "dm-verity-fec.h"
+#include <linux/math64.h>
+#include <linux/sysfs.h>
+
+#define DM_MSG_PREFIX "verity-fec"
+
+/*
+ * If error correction has been configured, returns true.
+ */
+bool verity_fec_is_enabled(struct dm_verity *v)
+{
+ return v->fec && v->fec->dev;
+}
+
+/*
+ * Return a pointer to dm_verity_fec_io after dm_verity_io and its variable
+ * length fields.
+ */
+static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
+{
+ return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io);
+}
+
+/*
+ * Return an interleaved offset for a byte in RS block.
+ */
+static inline u64 fec_interleave(struct dm_verity *v, u64 offset)
+{
+ u32 mod;
+
+ mod = do_div(offset, v->fec->rsn);
+ return offset + mod * (v->fec->rounds << v->data_dev_block_bits);
+}
+
+/*
+ * Decode an RS block using Reed-Solomon.
+ */
+static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
+ u8 *data, u8 *fec, int neras)
+{
+ int i;
+ uint16_t par[DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN];
+
+ for (i = 0; i < v->fec->roots; i++)
+ par[i] = fec[i];
+
+ return decode_rs8(fio->rs, data, par, v->fec->rsn, NULL, neras,
+ fio->erasures, 0, NULL);
+}
+
+/*
+ * Read error-correcting codes for the requested RS block. Returns a pointer
+ * to the data block. Caller is responsible for releasing buf.
+ */
+static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
+ unsigned *offset, struct dm_buffer **buf)
+{
+ u64 position, block;
+ u8 *res;
+
+ position = (index + rsb) * v->fec->roots;
+ block = position >> v->data_dev_block_bits;
+ *offset = (unsigned)(position - (block << v->data_dev_block_bits));
+
+ res = dm_bufio_read(v->fec->bufio, v->fec->start + block, buf);
+ if (unlikely(IS_ERR(res))) {
+ DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
+ v->data_dev->name, (unsigned long long)rsb,
+ (unsigned long long)(v->fec->start + block),
+ PTR_ERR(res));
+ *buf = NULL;
+ }
+
+ return res;
+}
+
+/* Loop over each preallocated buffer slot. */
+#define fec_for_each_prealloc_buffer(__i) \
+ for (__i = 0; __i < DM_VERITY_FEC_BUF_PREALLOC; __i++)
+
+/* Loop over each extra buffer slot. */
+#define fec_for_each_extra_buffer(io, __i) \
+ for (__i = DM_VERITY_FEC_BUF_PREALLOC; __i < DM_VERITY_FEC_BUF_MAX; __i++)
+
+/* Loop over each allocated buffer. */
+#define fec_for_each_buffer(io, __i) \
+ for (__i = 0; __i < (io)->nbufs; __i++)
+
+/* Loop over each RS block in each allocated buffer. */
+#define fec_for_each_buffer_rs_block(io, __i, __j) \
+ fec_for_each_buffer(io, __i) \
+ for (__j = 0; __j < 1 << DM_VERITY_FEC_BUF_RS_BITS; __j++)
+
+/*
+ * Return a pointer to the current RS block when called inside
+ * fec_for_each_buffer_rs_block.
+ */
+static inline u8 *fec_buffer_rs_block(struct dm_verity *v,
+ struct dm_verity_fec_io *fio,
+ unsigned i, unsigned j)
+{
+ return &fio->bufs[i][j * v->fec->rsn];
+}
+
+/*
+ * Return an index to the current RS block when called inside
+ * fec_for_each_buffer_rs_block.
+ */
+static inline unsigned fec_buffer_rs_index(unsigned i, unsigned j)
+{
+ return (i << DM_VERITY_FEC_BUF_RS_BITS) + j;
+}
+
+/*
+ * Decode all RS blocks from buffers and copy corrected bytes into fio->output
+ * starting from block_offset.
+ */
+static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
+ u64 rsb, int byte_index, unsigned block_offset,
+ int neras)
+{
+ int r, corrected = 0, res;
+ struct dm_buffer *buf;
+ unsigned n, i, offset;
+ u8 *par, *block;
+
+ par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
+ if (IS_ERR(par))
+ return PTR_ERR(par);
+
+ /*
+ * Decode the RS blocks we have in bufs. Each RS block results in
+ * one corrected target byte and consumes fec->roots parity bytes.
+ */
+ fec_for_each_buffer_rs_block(fio, n, i) {
+ block = fec_buffer_rs_block(v, fio, n, i);
+ res = fec_decode_rs8(v, fio, block, &par[offset], neras);
+ if (res < 0) {
+ dm_bufio_release(buf);
+
+ r = res;
+ goto error;
+ }
+
+ corrected += res;
+ fio->output[block_offset] = block[byte_index];
+
+ block_offset++;
+ if (block_offset >= 1 << v->data_dev_block_bits)
+ goto done;
+
+ /* read the next block when we run out of parity bytes */
+ offset += v->fec->roots;
+ if (offset >= 1 << v->data_dev_block_bits) {
+ dm_bufio_release(buf);
+
+ par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
+ if (unlikely(IS_ERR(par)))
+ return PTR_ERR(par);
+ }
+ }
+done:
+ r = corrected;
+error:
+ if (r < 0 && neras)
+ DMERR_LIMIT("%s: FEC %llu: failed to correct: %d",
+ v->data_dev->name, (unsigned long long)rsb, r);
+ else if (r > 0) {
+ DMWARN_LIMIT("%s: FEC %llu: corrected %d errors",
+ v->data_dev->name, (unsigned long long)rsb, r);
+ atomic_add_unless(&v->fec->corrected, 1, INT_MAX);
+ }
+
+ return r;
+}
+
+/*
+ * Locate data block erasures using verity hashes.
+ */
+static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
+ u8 *want_digest, u8 *data)
+{
+ if (unlikely(verity_hash(v, verity_io_hash_desc(v, io),
+ data, 1 << v->data_dev_block_bits,
+ verity_io_real_digest(v, io))))
+ return 0;
+
+ return memcmp(verity_io_real_digest(v, io), want_digest,
+ v->digest_size) != 0;
+}
+
+/*
+ * Read data blocks that are part of the RS block and deinterleave as much as
+ * fits into buffers. Check for erasure locations if @neras is non-NULL.
+ */
+static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
+ u64 rsb, u64 target, unsigned block_offset,
+ int *neras)
+{
+ bool is_zero;
+ int i, j, target_index = -1;
+ struct dm_buffer *buf;
+ struct dm_bufio_client *bufio;
+ struct dm_verity_fec_io *fio = fec_io(io);
+ u64 block, ileaved;
+ u8 *bbuf, *rs_block;
+ u8 want_digest[v->digest_size];
+ unsigned n, k;
+
+ if (neras)
+ *neras = 0;
+
+ /*
+ * read each of the rsn data blocks that are part of the RS block, and
+ * interleave contents to available bufs
+ */
+ for (i = 0; i < v->fec->rsn; i++) {
+ ileaved = fec_interleave(v, rsb * v->fec->rsn + i);
+
+ /*
+ * target is the data block we want to correct, target_index is
+ * the index of this block within the rsn RS blocks
+ */
+ if (ileaved == target)
+ target_index = i;
+
+ block = ileaved >> v->data_dev_block_bits;
+ bufio = v->fec->data_bufio;
+
+ if (block >= v->data_blocks) {
+ block -= v->data_blocks;
+
+ /*
+ * blocks outside the area were assumed to contain
+ * zeros when encoding data was generated
+ */
+ if (unlikely(block >= v->fec->hash_blocks))
+ continue;
+
+ block += v->hash_start;
+ bufio = v->bufio;
+ }
+
+ bbuf = dm_bufio_read(bufio, block, &buf);
+ if (unlikely(IS_ERR(bbuf))) {
+ DMWARN_LIMIT("%s: FEC %llu: read failed (%llu): %ld",
+ v->data_dev->name,
+ (unsigned long long)rsb,
+ (unsigned long long)block, PTR_ERR(bbuf));
+
+ /* assume the block is corrupted */
+ if (neras && *neras <= v->fec->roots)
+ fio->erasures[(*neras)++] = i;
+
+ continue;
+ }
+
+ /* locate erasures if the block is on the data device */
+ if (bufio == v->fec->data_bufio &&
+ verity_hash_for_block(v, io, block, want_digest,
+ &is_zero) == 0) {
+ /* skip known zero blocks entirely */
+ if (is_zero)
+ continue;
+
+ /*
+ * skip if we have already found the theoretical
+ * maximum number (i.e. fec->roots) of erasures
+ */
+ if (neras && *neras <= v->fec->roots &&
+ fec_is_erasure(v, io, want_digest, bbuf))
+ fio->erasures[(*neras)++] = i;
+ }
+
+ /*
+ * deinterleave and copy the bytes that fit into bufs,
+ * starting from block_offset
+ */
+ fec_for_each_buffer_rs_block(fio, n, j) {
+ k = fec_buffer_rs_index(n, j) + block_offset;
+
+ if (k >= 1 << v->data_dev_block_bits)
+ goto done;
+
+ rs_block = fec_buffer_rs_block(v, fio, n, j);
+ rs_block[i] = bbuf[k];
+ }
+done:
+ dm_bufio_release(buf);
+ }
+
+ return target_index;
+}
+
+/*
+ * Allocate RS control structure and FEC buffers from preallocated mempools,
+ * and attempt to allocate as many extra buffers as available.
+ */
+static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
+{
+ unsigned n;
+
+ if (!fio->rs) {
+ fio->rs = mempool_alloc(v->fec->rs_pool, 0);
+ if (unlikely(!fio->rs)) {
+ DMERR("failed to allocate RS");
+ return -ENOMEM;
+ }
+ }
+
+ fec_for_each_prealloc_buffer(n) {
+ if (fio->bufs[n])
+ continue;
+
+ fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOIO);
+ if (unlikely(!fio->bufs[n])) {
+ DMERR("failed to allocate FEC buffer");
+ return -ENOMEM;
+ }
+ }
+
+ /* try to allocate the maximum number of buffers */
+ fec_for_each_extra_buffer(fio, n) {
+ if (fio->bufs[n])
+ continue;
+
+ fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOIO);
+ /* we can manage with even one buffer if necessary */
+ if (unlikely(!fio->bufs[n]))
+ break;
+ }
+ fio->nbufs = n;
+
+ if (!fio->output) {
+ fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO);
+
+ if (!fio->output) {
+ DMERR("failed to allocate FEC page");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize buffers and clear erasures. fec_read_bufs() assumes buffers are
+ * zeroed before deinterleaving.
+ */
+static void fec_init_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
+{
+ unsigned n;
+
+ fec_for_each_buffer(fio, n)
+ memset(fio->bufs[n], 0, v->fec->rsn << DM_VERITY_FEC_BUF_RS_BITS);
+
+ memset(fio->erasures, 0, sizeof(fio->erasures));
+}
+
+/*
+ * Decode all RS blocks in a single data block and return the target block
+ * (indicated by @offset) in fio->output. If @use_erasures is non-zero, uses
+ * hashes to locate erasures.
+ */
+static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
+ struct dm_verity_fec_io *fio, u64 rsb, u64 offset,
+ bool use_erasures)
+{
+ int r, neras = 0;
+ unsigned pos;
+
+ r = fec_alloc_bufs(v, fio);
+ if (unlikely(r < 0))
+ return r;
+
+ for (pos = 0; pos < 1 << v->data_dev_block_bits; ) {
+ fec_init_bufs(v, fio);
+
+ r = fec_read_bufs(v, io, rsb, offset, pos,
+ use_erasures ? &neras : NULL);
+ if (unlikely(r < 0))
+ return r;
+
+ r = fec_decode_bufs(v, fio, rsb, r, pos, neras);
+ if (r < 0)
+ return r;
+
+ pos += fio->nbufs << DM_VERITY_FEC_BUF_RS_BITS;
+ }
+
+ /* Always re-validate the corrected block against the expected hash */
+ r = verity_hash(v, verity_io_hash_desc(v, io), fio->output,
+ 1 << v->data_dev_block_bits,
+ verity_io_real_digest(v, io));
+ if (unlikely(r < 0))
+ return r;
+
+ if (memcmp(verity_io_real_digest(v, io), verity_io_want_digest(v, io),
+ v->digest_size)) {
+ DMERR_LIMIT("%s: FEC %llu: failed to correct (%d erasures)",
+ v->data_dev->name, (unsigned long long)rsb, neras);
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static int fec_bv_copy(struct dm_verity *v, struct dm_verity_io *io, u8 *data,
+ size_t len)
+{
+ struct dm_verity_fec_io *fio = fec_io(io);
+
+ memcpy(data, &fio->output[fio->output_pos], len);
+ fio->output_pos += len;
+
+ return 0;
+}
+
+/*
+ * Correct errors in a block. Copies corrected block to dest if non-NULL,
+ * otherwise to a bio_vec starting from iter.
+ */
+int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
+ enum verity_block_type type, sector_t block, u8 *dest,
+ struct bvec_iter *iter)
+{
+ int r;
+ struct dm_verity_fec_io *fio = fec_io(io);
+ u64 offset, res, rsb;
+
+ if (!verity_fec_is_enabled(v))
+ return -EOPNOTSUPP;
+
+ if (type == DM_VERITY_BLOCK_TYPE_METADATA)
+ block += v->data_blocks;
+
+ /*
+ * For RS(M, N), the continuous FEC data is divided into blocks of N
+ * bytes. Since block size may not be divisible by N, the last block
+ * is zero padded when decoding.
+ *
+ * Each byte of the block is covered by a different RS(M, N) code,
+ * and each code is interleaved over N blocks to make it less likely
+ * that bursty corruption will leave us in unrecoverable state.
+ */
+
+ offset = block << v->data_dev_block_bits;
+
+ res = offset;
+ div64_u64(res, v->fec->rounds << v->data_dev_block_bits);
+
+ /*
+ * The base RS block we can feed to the interleaver to find out all
+ * blocks required for decoding.
+ */
+ rsb = offset - res * (v->fec->rounds << v->data_dev_block_bits);
+
+ /*
+ * Locating erasures is slow, so attempt to recover the block without
+ * them first. Do a second attempt with erasures if the corruption is
+ * bad enough.
+ */
+ r = fec_decode_rsb(v, io, fio, rsb, offset, false);
+ if (r < 0) {
+ r = fec_decode_rsb(v, io, fio, rsb, offset, true);
+ if (r < 0)
+ return r;
+ }
+
+ if (dest)
+ memcpy(dest, fio->output, 1 << v->data_dev_block_bits);
+ else if (iter) {
+ fio->output_pos = 0;
+ r = verity_for_bv_block(v, io, iter, fec_bv_copy);
+ }
+
+ return r;
+}
+
+/*
+ * Clean up per-bio data.
+ */
+void verity_fec_finish_io(struct dm_verity_io *io)
+{
+ unsigned n;
+ struct dm_verity_fec *f = io->v->fec;
+ struct dm_verity_fec_io *fio = fec_io(io);
+
+ if (!verity_fec_is_enabled(io->v))
+ return;
+
+ mempool_free(fio->rs, f->rs_pool);
+
+ fec_for_each_prealloc_buffer(n)
+ mempool_free(fio->bufs[n], f->prealloc_pool);
+
+ fec_for_each_extra_buffer(fio, n)
+ mempool_free(fio->bufs[n], f->extra_pool);
+
+ mempool_free(fio->output, f->output_pool);
+}
+
+/*
+ * Initialize per-bio data.
+ */
+void verity_fec_init_io(struct dm_verity_io *io)
+{
+ struct dm_verity_fec_io *fio = fec_io(io);
+
+ if (!verity_fec_is_enabled(io->v))
+ return;
+
+ fio->rs = NULL;
+ memset(fio->bufs, 0, sizeof(fio->bufs));
+ fio->nbufs = 0;
+ fio->output = NULL;
+}
+
+/*
+ * Append feature arguments and values to the status table.
+ */
+unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz,
+ char *result, unsigned maxlen)
+{
+ if (!verity_fec_is_enabled(v))
+ return sz;
+
+ DMEMIT(" " DM_VERITY_OPT_FEC_DEV " %s "
+ DM_VERITY_OPT_FEC_BLOCKS " %llu "
+ DM_VERITY_OPT_FEC_START " %llu "
+ DM_VERITY_OPT_FEC_ROOTS " %d",
+ v->fec->dev->name,
+ (unsigned long long)v->fec->blocks,
+ (unsigned long long)v->fec->start,
+ v->fec->roots);
+
+ return sz;
+}
+
+void verity_fec_dtr(struct dm_verity *v)
+{
+ struct dm_verity_fec *f = v->fec;
+ struct kobject *kobj = &f->kobj_holder.kobj;
+
+ if (!verity_fec_is_enabled(v))
+ goto out;
+
+ mempool_destroy(f->rs_pool);
+ mempool_destroy(f->prealloc_pool);
+ mempool_destroy(f->extra_pool);
+ kmem_cache_destroy(f->cache);
+
+ if (f->data_bufio)
+ dm_bufio_client_destroy(f->data_bufio);
+ if (f->bufio)
+ dm_bufio_client_destroy(f->bufio);
+
+ if (f->dev)
+ dm_put_device(v->ti, f->dev);
+
+ if (kobj->state_initialized) {
+ kobject_put(kobj);
+ wait_for_completion(dm_get_completion_from_kobject(kobj));
+ }
+
+out:
+ kfree(f);
+ v->fec = NULL;
+}
+
+static void *fec_rs_alloc(gfp_t gfp_mask, void *pool_data)
+{
+ struct dm_verity *v = (struct dm_verity *)pool_data;
+
+ return init_rs(8, 0x11d, 0, 1, v->fec->roots);
+}
+
+static void fec_rs_free(void *element, void *pool_data)
+{
+ struct rs_control *rs = (struct rs_control *)element;
+
+ if (rs)
+ free_rs(rs);
+}
+
+bool verity_is_fec_opt_arg(const char *arg_name)
+{
+ return (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV) ||
+ !strcasecmp(arg_name, DM_VERITY_OPT_FEC_BLOCKS) ||
+ !strcasecmp(arg_name, DM_VERITY_OPT_FEC_START) ||
+ !strcasecmp(arg_name, DM_VERITY_OPT_FEC_ROOTS));
+}
+
+int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
+ unsigned *argc, const char *arg_name)
+{
+ int r;
+ struct dm_target *ti = v->ti;
+ const char *arg_value;
+ unsigned long long num_ll;
+ unsigned char num_c;
+ char dummy;
+
+ if (!*argc) {
+ ti->error = "FEC feature arguments require a value";
+ return -EINVAL;
+ }
+
+ arg_value = dm_shift_arg(as);
+ (*argc)--;
+
+ if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV)) {
+ r = dm_get_device(ti, arg_value, FMODE_READ, &v->fec->dev);
+ if (r) {
+ ti->error = "FEC device lookup failed";
+ return r;
+ }
+
+ } else if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_BLOCKS)) {
+ if (sscanf(arg_value, "%llu%c", &num_ll, &dummy) != 1 ||
+ ((sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
+ >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll)) {
+ ti->error = "Invalid " DM_VERITY_OPT_FEC_BLOCKS;
+ return -EINVAL;
+ }
+ v->fec->blocks = num_ll;
+
+ } else if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_START)) {
+ if (sscanf(arg_value, "%llu%c", &num_ll, &dummy) != 1 ||
+ ((sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) >>
+ (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll)) {
+ ti->error = "Invalid " DM_VERITY_OPT_FEC_START;
+ return -EINVAL;
+ }
+ v->fec->start = num_ll;
+
+ } else if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_ROOTS)) {
+ if (sscanf(arg_value, "%hhu%c", &num_c, &dummy) != 1 || !num_c ||
+ num_c < (DM_VERITY_FEC_RSM - DM_VERITY_FEC_MAX_RSN) ||
+ num_c > (DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN)) {
+ ti->error = "Invalid " DM_VERITY_OPT_FEC_ROOTS;
+ return -EINVAL;
+ }
+ v->fec->roots = num_c;
+
+ } else {
+ ti->error = "Unrecognized verity FEC feature request";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t corrected_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct dm_verity_fec *f = container_of(kobj, struct dm_verity_fec,
+ kobj_holder.kobj);
+
+ return sprintf(buf, "%d\n", atomic_read(&f->corrected));
+}
+
+static struct kobj_attribute attr_corrected = __ATTR_RO(corrected);
+
+static struct attribute *fec_attrs[] = {
+ &attr_corrected.attr,
+ NULL
+};
+
+static struct kobj_type fec_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_attrs = fec_attrs
+};
+
+/*
+ * Allocate dm_verity_fec for v->fec. Must be called before verity_fec_ctr.
+ */
+int verity_fec_ctr_alloc(struct dm_verity *v)
+{
+ struct dm_verity_fec *f;
+
+ f = kzalloc(sizeof(struct dm_verity_fec), GFP_KERNEL);
+ if (!f) {
+ v->ti->error = "Cannot allocate FEC structure";
+ return -ENOMEM;
+ }
+ v->fec = f;
+
+ return 0;
+}
+
+/*
+ * Validate arguments and preallocate memory. Must be called after arguments
+ * have been parsed using verity_fec_parse_opt_args.
+ */
+int verity_fec_ctr(struct dm_verity *v)
+{
+ int r;
+ struct dm_verity_fec *f = v->fec;
+ struct dm_target *ti = v->ti;
+ struct mapped_device *md = dm_table_get_md(ti->table);
+ u64 hash_blocks;
+
+ if (!verity_fec_is_enabled(v)) {
+ verity_fec_dtr(v);
+ return 0;
+ }
+
+ /* Create a kobject and sysfs attributes */
+ init_completion(&f->kobj_holder.completion);
+
+ r = kobject_init_and_add(&f->kobj_holder.kobj, &fec_ktype,
+ &disk_to_dev(dm_disk(md))->kobj, "%s", "fec");
+ if (r) {
+ ti->error = "Cannot create kobject";
+ return r;
+ }
+
+ /*
+ * FEC is computed over data blocks, possible metadata, and
+ * hash blocks. In other words, FEC covers total of fec_blocks
+ * blocks consisting of the following:
+ *
+ * data blocks | hash blocks | metadata (optional)
+ *
+ * We allow metadata after hash blocks to support a use case
+ * where all data is stored on the same device and FEC covers
+ * the entire area.
+ *
+ * If metadata is included, we require it to be available on the
+ * hash device after the hash blocks.
+ */
+
+ hash_blocks = v->hash_blocks - v->hash_start;
+
+ /*
+ * Require matching block sizes for data and hash devices for
+ * simplicity.
+ */
+ if (v->data_dev_block_bits != v->hash_dev_block_bits) {
+ ti->error = "Block sizes must match to use FEC";
+ return -EINVAL;
+ }
+
+ if (!f->roots) {
+ ti->error = "Missing " DM_VERITY_OPT_FEC_ROOTS;
+ return -EINVAL;
+ }
+ f->rsn = DM_VERITY_FEC_RSM - f->roots;
+
+ if (!f->blocks) {
+ ti->error = "Missing " DM_VERITY_OPT_FEC_BLOCKS;
+ return -EINVAL;
+ }
+
+ f->rounds = f->blocks;
+ if (sector_div(f->rounds, f->rsn))
+ f->rounds++;
+
+ /*
+ * Due to optional metadata, f->blocks can be larger than
+ * data_blocks and hash_blocks combined.
+ */
+ if (f->blocks < v->data_blocks + hash_blocks || !f->rounds) {
+ ti->error = "Invalid " DM_VERITY_OPT_FEC_BLOCKS;
+ return -EINVAL;
+ }
+
+ /*
+ * Metadata is accessed through the hash device, so we require
+ * it to be large enough.
+ */
+ f->hash_blocks = f->blocks - v->data_blocks;
+ if (dm_bufio_get_device_size(v->bufio) < f->hash_blocks) {
+ ti->error = "Hash device is too small for "
+ DM_VERITY_OPT_FEC_BLOCKS;
+ return -E2BIG;
+ }
+
+ f->bufio = dm_bufio_client_create(f->dev->bdev,
+ 1 << v->data_dev_block_bits,
+ 1, 0, NULL, NULL);
+ if (IS_ERR(f->bufio)) {
+ ti->error = "Cannot initialize FEC bufio client";
+ return PTR_ERR(f->bufio);
+ }
+
+ if (dm_bufio_get_device_size(f->bufio) <
+ ((f->start + f->rounds * f->roots) >> v->data_dev_block_bits)) {
+ ti->error = "FEC device is too small";
+ return -E2BIG;
+ }
+
+ f->data_bufio = dm_bufio_client_create(v->data_dev->bdev,
+ 1 << v->data_dev_block_bits,
+ 1, 0, NULL, NULL);
+ if (IS_ERR(f->data_bufio)) {
+ ti->error = "Cannot initialize FEC data bufio client";
+ return PTR_ERR(f->data_bufio);
+ }
+
+ if (dm_bufio_get_device_size(f->data_bufio) < v->data_blocks) {
+ ti->error = "Data device is too small";
+ return -E2BIG;
+ }
+
+ /* Preallocate an rs_control structure for each worker thread */
+ f->rs_pool = mempool_create(num_online_cpus(), fec_rs_alloc,
+ fec_rs_free, (void *) v);
+ if (!f->rs_pool) {
+ ti->error = "Cannot allocate RS pool";
+ return -ENOMEM;
+ }
+
+ f->cache = kmem_cache_create("dm_verity_fec_buffers",
+ f->rsn << DM_VERITY_FEC_BUF_RS_BITS,
+ 0, 0, NULL);
+ if (!f->cache) {
+ ti->error = "Cannot create FEC buffer cache";
+ return -ENOMEM;
+ }
+
+ /* Preallocate DM_VERITY_FEC_BUF_PREALLOC buffers for each thread */
+ f->prealloc_pool = mempool_create_slab_pool(num_online_cpus() *
+ DM_VERITY_FEC_BUF_PREALLOC,
+ f->cache);
+ if (!f->prealloc_pool) {
+ ti->error = "Cannot allocate FEC buffer prealloc pool";
+ return -ENOMEM;
+ }
+
+ f->extra_pool = mempool_create_slab_pool(0, f->cache);
+ if (!f->extra_pool) {
+ ti->error = "Cannot allocate FEC buffer extra pool";
+ return -ENOMEM;
+ }
+
+ /* Preallocate an output buffer for each thread */
+ f->output_pool = mempool_create_kmalloc_pool(num_online_cpus(),
+ 1 << v->data_dev_block_bits);
+ if (!f->output_pool) {
+ ti->error = "Cannot allocate FEC output pool";
+ return -ENOMEM;
+ }
+
+ /* Reserve space for our per-bio data */
+ ti->per_bio_data_size += sizeof(struct dm_verity_fec_io);
+
+ return 0;
+}
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
new file mode 100644
index 000000000000..8c4bee052a73
--- /dev/null
+++ b/drivers/md/dm-verity-fec.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * Author: Sami Tolvanen <samitolvanen@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef DM_VERITY_FEC_H
+#define DM_VERITY_FEC_H
+
+#include "dm.h"
+#include "dm-verity.h"
+#include <linux/rslib.h>
+
+/* Reed-Solomon(M, N) parameters */
+#define DM_VERITY_FEC_RSM 255
+#define DM_VERITY_FEC_MAX_RSN 253
+#define DM_VERITY_FEC_MIN_RSN 231 /* ~10% space overhead */
+
+/* buffers for deinterleaving and decoding */
+#define DM_VERITY_FEC_BUF_PREALLOC 1 /* buffers to preallocate */
+#define DM_VERITY_FEC_BUF_RS_BITS 4 /* 1 << RS blocks per buffer */
+/* we need buffers for at most 1 << block size RS blocks */
+#define DM_VERITY_FEC_BUF_MAX \
+ (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS))
+
+#define DM_VERITY_OPT_FEC_DEV "use_fec_from_device"
+#define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks"
+#define DM_VERITY_OPT_FEC_START "fec_start"
+#define DM_VERITY_OPT_FEC_ROOTS "fec_roots"
+
+/* configuration */
+struct dm_verity_fec {
+ struct dm_dev *dev; /* parity data device */
+ struct dm_bufio_client *data_bufio; /* for data dev access */
+ struct dm_bufio_client *bufio; /* for parity data access */
+ sector_t start; /* parity data start in blocks */
+ sector_t blocks; /* number of blocks covered */
+ sector_t rounds; /* number of interleaving rounds */
+ sector_t hash_blocks; /* blocks covered after v->hash_start */
+ unsigned char roots; /* number of parity bytes, M-N of RS(M, N) */
+ unsigned char rsn; /* N of RS(M, N) */
+ mempool_t *rs_pool; /* mempool for fio->rs */
+ mempool_t *prealloc_pool; /* mempool for preallocated buffers */
+ mempool_t *extra_pool; /* mempool for extra buffers */
+ mempool_t *output_pool; /* mempool for output */
+ struct kmem_cache *cache; /* cache for buffers */
+ atomic_t corrected; /* corrected errors */
+ struct dm_kobject_holder kobj_holder; /* for sysfs attributes */
+};
+
+/* per-bio data */
+struct dm_verity_fec_io {
+ struct rs_control *rs; /* Reed-Solomon state */
+ int erasures[DM_VERITY_FEC_MAX_RSN]; /* erasures for decode_rs8 */
+ u8 *bufs[DM_VERITY_FEC_BUF_MAX]; /* bufs for deinterleaving */
+ unsigned nbufs; /* number of buffers allocated */
+ u8 *output; /* buffer for corrected output */
+ size_t output_pos;
+};
+
+#ifdef CONFIG_DM_VERITY_FEC
+
+/* each feature parameter requires a value */
+#define DM_VERITY_OPTS_FEC 8
+
+extern bool verity_fec_is_enabled(struct dm_verity *v);
+
+extern int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
+ enum verity_block_type type, sector_t block,
+ u8 *dest, struct bvec_iter *iter);
+
+extern unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz,
+ char *result, unsigned maxlen);
+
+extern void verity_fec_finish_io(struct dm_verity_io *io);
+extern void verity_fec_init_io(struct dm_verity_io *io);
+
+extern bool verity_is_fec_opt_arg(const char *arg_name);
+extern int verity_fec_parse_opt_args(struct dm_arg_set *as,
+ struct dm_verity *v, unsigned *argc,
+ const char *arg_name);
+
+extern void verity_fec_dtr(struct dm_verity *v);
+
+extern int verity_fec_ctr_alloc(struct dm_verity *v);
+extern int verity_fec_ctr(struct dm_verity *v);
+
+#else /* !CONFIG_DM_VERITY_FEC */
+
+#define DM_VERITY_OPTS_FEC 0
+
+static inline bool verity_fec_is_enabled(struct dm_verity *v)
+{
+ return false;
+}
+
+static inline int verity_fec_decode(struct dm_verity *v,
+ struct dm_verity_io *io,
+ enum verity_block_type type,
+ sector_t block, u8 *dest,
+ struct bvec_iter *iter)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline unsigned verity_fec_status_table(struct dm_verity *v,
+ unsigned sz, char *result,
+ unsigned maxlen)
+{
+ return sz;
+}
+
+static inline void verity_fec_finish_io(struct dm_verity_io *io)
+{
+}
+
+static inline void verity_fec_init_io(struct dm_verity_io *io)
+{
+}
+
+static inline bool verity_is_fec_opt_arg(const char *arg_name)
+{
+ return false;
+}
+
+static inline int verity_fec_parse_opt_args(struct dm_arg_set *as,
+ struct dm_verity *v,
+ unsigned *argc,
+ const char *arg_name)
+{
+ return -EINVAL;
+}
+
+static inline void verity_fec_dtr(struct dm_verity *v)
+{
+}
+
+static inline int verity_fec_ctr_alloc(struct dm_verity *v)
+{
+ return 0;
+}
+
+static inline int verity_fec_ctr(struct dm_verity *v)
+{
+ return 0;
+}
+
+#endif /* CONFIG_DM_VERITY_FEC */
+
+#endif /* DM_VERITY_FEC_H */
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity-target.c
index ccf41886ebcf..5c5d30cb6ec5 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity-target.c
@@ -14,12 +14,11 @@
* access behavior.
*/
-#include "dm-bufio.h"
+#include "dm-verity.h"
+#include "dm-verity-fec.h"
#include <linux/module.h>
-#include <linux/device-mapper.h>
#include <linux/reboot.h>
-#include <crypto/hash.h>
#define DM_MSG_PREFIX "verity"
@@ -28,83 +27,18 @@
#define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
-#define DM_VERITY_MAX_LEVELS 63
#define DM_VERITY_MAX_CORRUPTED_ERRS 100
#define DM_VERITY_OPT_LOGGING "ignore_corruption"
#define DM_VERITY_OPT_RESTART "restart_on_corruption"
+#define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
+
+#define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC)
static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
-enum verity_mode {
- DM_VERITY_MODE_EIO,
- DM_VERITY_MODE_LOGGING,
- DM_VERITY_MODE_RESTART
-};
-
-enum verity_block_type {
- DM_VERITY_BLOCK_TYPE_DATA,
- DM_VERITY_BLOCK_TYPE_METADATA
-};
-
-struct dm_verity {
- struct dm_dev *data_dev;
- struct dm_dev *hash_dev;
- struct dm_target *ti;
- struct dm_bufio_client *bufio;
- char *alg_name;
- struct crypto_shash *tfm;
- u8 *root_digest; /* digest of the root block */
- u8 *salt; /* salt: its size is salt_size */
- unsigned salt_size;
- sector_t data_start; /* data offset in 512-byte sectors */
- sector_t hash_start; /* hash start in blocks */
- sector_t data_blocks; /* the number of data blocks */
- sector_t hash_blocks; /* the number of hash blocks */
- unsigned char data_dev_block_bits; /* log2(data blocksize) */
- unsigned char hash_dev_block_bits; /* log2(hash blocksize) */
- unsigned char hash_per_block_bits; /* log2(hashes in hash block) */
- unsigned char levels; /* the number of tree levels */
- unsigned char version;
- unsigned digest_size; /* digest size for the current hash algorithm */
- unsigned shash_descsize;/* the size of temporary space for crypto */
- int hash_failed; /* set to 1 if hash of any block failed */
- enum verity_mode mode; /* mode for handling verification errors */
- unsigned corrupted_errs;/* Number of errors for corrupted blocks */
-
- struct workqueue_struct *verify_wq;
-
- /* starting blocks for each tree level. 0 is the lowest level. */
- sector_t hash_level_block[DM_VERITY_MAX_LEVELS];
-};
-
-struct dm_verity_io {
- struct dm_verity *v;
-
- /* original values of bio->bi_end_io and bio->bi_private */
- bio_end_io_t *orig_bi_end_io;
- void *orig_bi_private;
-
- sector_t block;
- unsigned n_blocks;
-
- struct bvec_iter iter;
-
- struct work_struct work;
-
- /*
- * Three variably-size fields follow this struct:
- *
- * u8 hash_desc[v->shash_descsize];
- * u8 real_digest[v->digest_size];
- * u8 want_digest[v->digest_size];
- *
- * To access them use: io_hash_desc(), io_real_digest() and io_want_digest().
- */
-};
-
struct dm_verity_prefetch_work {
struct work_struct work;
struct dm_verity *v;
@@ -112,21 +46,6 @@ struct dm_verity_prefetch_work {
unsigned n_blocks;
};
-static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io)
-{
- return (struct shash_desc *)(io + 1);
-}
-
-static u8 *io_real_digest(struct dm_verity *v, struct dm_verity_io *io)
-{
- return (u8 *)(io + 1) + v->shash_descsize;
-}
-
-static u8 *io_want_digest(struct dm_verity *v, struct dm_verity_io *io)
-{
- return (u8 *)(io + 1) + v->shash_descsize + v->digest_size;
-}
-
/*
* Auxiliary structure appended to each dm-bufio buffer. If the value
* hash_verified is nonzero, hash of the block has been verified.
@@ -173,6 +92,84 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
return block >> (level * v->hash_per_block_bits);
}
+/*
+ * Wrapper for crypto_shash_init, which handles verity salting.
+ */
+static int verity_hash_init(struct dm_verity *v, struct shash_desc *desc)
+{
+ int r;
+
+ desc->tfm = v->tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ r = crypto_shash_init(desc);
+
+ if (unlikely(r < 0)) {
+ DMERR("crypto_shash_init failed: %d", r);
+ return r;
+ }
+
+ if (likely(v->version >= 1)) {
+ r = crypto_shash_update(desc, v->salt, v->salt_size);
+
+ if (unlikely(r < 0)) {
+ DMERR("crypto_shash_update failed: %d", r);
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static int verity_hash_update(struct dm_verity *v, struct shash_desc *desc,
+ const u8 *data, size_t len)
+{
+ int r = crypto_shash_update(desc, data, len);
+
+ if (unlikely(r < 0))
+ DMERR("crypto_shash_update failed: %d", r);
+
+ return r;
+}
+
+static int verity_hash_final(struct dm_verity *v, struct shash_desc *desc,
+ u8 *digest)
+{
+ int r;
+
+ if (unlikely(!v->version)) {
+ r = crypto_shash_update(desc, v->salt, v->salt_size);
+
+ if (r < 0) {
+ DMERR("crypto_shash_update failed: %d", r);
+ return r;
+ }
+ }
+
+ r = crypto_shash_final(desc, digest);
+
+ if (unlikely(r < 0))
+ DMERR("crypto_shash_final failed: %d", r);
+
+ return r;
+}
+
+int verity_hash(struct dm_verity *v, struct shash_desc *desc,
+ const u8 *data, size_t len, u8 *digest)
+{
+ int r;
+
+ r = verity_hash_init(v, desc);
+ if (unlikely(r < 0))
+ return r;
+
+ r = verity_hash_update(v, desc, data, len);
+ if (unlikely(r < 0))
+ return r;
+
+ return verity_hash_final(v, desc, digest);
+}
+
static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
sector_t *hash_block, unsigned *offset)
{
@@ -246,17 +243,17 @@ out:
* Verify hash of a metadata block pertaining to the specified data block
* ("block" argument) at a specified level ("level" argument).
*
- * On successful return, io_want_digest(v, io) contains the hash value for
- * a lower tree level or for the data block (if we're at the lowest leve).
+ * On successful return, verity_io_want_digest(v, io) contains the hash value
+ * for a lower tree level or for the data block (if we're at the lowest level).
*
* If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
* If "skip_unverified" is false, unverified buffer is hashed and verified
- * against current value of io_want_digest(v, io).
+ * against current value of verity_io_want_digest(v, io).
*/
-static int verity_verify_level(struct dm_verity_io *io, sector_t block,
- int level, bool skip_unverified)
+static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
+ sector_t block, int level, bool skip_unverified,
+ u8 *want_digest)
{
- struct dm_verity *v = io->v;
struct dm_buffer *buf;
struct buffer_aux *aux;
u8 *data;
@@ -273,72 +270,128 @@ static int verity_verify_level(struct dm_verity_io *io, sector_t block,
aux = dm_bufio_get_aux_data(buf);
if (!aux->hash_verified) {
- struct shash_desc *desc;
- u8 *result;
-
if (skip_unverified) {
r = 1;
goto release_ret_r;
}
- desc = io_hash_desc(v, io);
- desc->tfm = v->tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- r = crypto_shash_init(desc);
- if (r < 0) {
- DMERR("crypto_shash_init failed: %d", r);
+ r = verity_hash(v, verity_io_hash_desc(v, io),
+ data, 1 << v->hash_dev_block_bits,
+ verity_io_real_digest(v, io));
+ if (unlikely(r < 0))
goto release_ret_r;
- }
-
- if (likely(v->version >= 1)) {
- r = crypto_shash_update(desc, v->salt, v->salt_size);
- if (r < 0) {
- DMERR("crypto_shash_update failed: %d", r);
- goto release_ret_r;
- }
- }
- r = crypto_shash_update(desc, data, 1 << v->hash_dev_block_bits);
- if (r < 0) {
- DMERR("crypto_shash_update failed: %d", r);
+ if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
+ v->digest_size) == 0))
+ aux->hash_verified = 1;
+ else if (verity_fec_decode(v, io,
+ DM_VERITY_BLOCK_TYPE_METADATA,
+ hash_block, data, NULL) == 0)
+ aux->hash_verified = 1;
+ else if (verity_handle_err(v,
+ DM_VERITY_BLOCK_TYPE_METADATA,
+ hash_block)) {
+ r = -EIO;
goto release_ret_r;
}
+ }
- if (!v->version) {
- r = crypto_shash_update(desc, v->salt, v->salt_size);
- if (r < 0) {
- DMERR("crypto_shash_update failed: %d", r);
- goto release_ret_r;
- }
- }
+ data += offset;
+ memcpy(want_digest, data, v->digest_size);
+ r = 0;
- result = io_real_digest(v, io);
- r = crypto_shash_final(desc, result);
- if (r < 0) {
- DMERR("crypto_shash_final failed: %d", r);
- goto release_ret_r;
- }
- if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
- if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_METADATA,
- hash_block)) {
- r = -EIO;
- goto release_ret_r;
- }
- } else
- aux->hash_verified = 1;
+release_ret_r:
+ dm_bufio_release(buf);
+ return r;
+}
+
+/*
+ * Find a hash for a given block, write it to digest and verify the integrity
+ * of the hash tree if necessary.
+ */
+int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
+ sector_t block, u8 *digest, bool *is_zero)
+{
+ int r = 0, i;
+
+ if (likely(v->levels)) {
+ /*
+ * First, we try to get the requested hash for
+ * the current block. If the hash block itself is
+ * verified, zero is returned. If it isn't, this
+ * function returns 1 and we fall back to whole
+ * chain verification.
+ */
+ r = verity_verify_level(v, io, block, 0, true, digest);
+ if (likely(r <= 0))
+ goto out;
}
- data += offset;
+ memcpy(digest, v->root_digest, v->digest_size);
- memcpy(io_want_digest(v, io), data, v->digest_size);
+ for (i = v->levels - 1; i >= 0; i--) {
+ r = verity_verify_level(v, io, block, i, false, digest);
+ if (unlikely(r))
+ goto out;
+ }
+out:
+ if (!r && v->zero_digest)
+ *is_zero = !memcmp(v->zero_digest, digest, v->digest_size);
+ else
+ *is_zero = false;
+
+ return r;
+}
+
+/*
+ * Calls function process for 1 << v->data_dev_block_bits bytes in the bio_vec
+ * starting from iter.
+ */
+int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ struct bvec_iter *iter,
+ int (*process)(struct dm_verity *v,
+ struct dm_verity_io *io, u8 *data,
+ size_t len))
+{
+ unsigned todo = 1 << v->data_dev_block_bits;
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
+
+ do {
+ int r;
+ u8 *page;
+ unsigned len;
+ struct bio_vec bv = bio_iter_iovec(bio, *iter);
+
+ page = kmap_atomic(bv.bv_page);
+ len = bv.bv_len;
+
+ if (likely(len >= todo))
+ len = todo;
+
+ r = process(v, io, page + bv.bv_offset, len);
+ kunmap_atomic(page);
+
+ if (r < 0)
+ return r;
+
+ bio_advance_iter(bio, iter, len);
+ todo -= len;
+ } while (todo);
- dm_bufio_release(buf);
return 0;
+}
-release_ret_r:
- dm_bufio_release(buf);
+static int verity_bv_hash_update(struct dm_verity *v, struct dm_verity_io *io,
+ u8 *data, size_t len)
+{
+ return verity_hash_update(v, verity_io_hash_desc(v, io), data, len);
+}
- return r;
+static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
+ u8 *data, size_t len)
+{
+ memset(data, 0, len);
+ return 0;
}
/*
@@ -346,99 +399,56 @@ release_ret_r:
*/
static int verity_verify_io(struct dm_verity_io *io)
{
+ bool is_zero;
struct dm_verity *v = io->v;
- struct bio *bio = dm_bio_from_per_bio_data(io,
- v->ti->per_bio_data_size);
+ struct bvec_iter start;
unsigned b;
- int i;
for (b = 0; b < io->n_blocks; b++) {
- struct shash_desc *desc;
- u8 *result;
int r;
- unsigned todo;
+ struct shash_desc *desc = verity_io_hash_desc(v, io);
+
+ r = verity_hash_for_block(v, io, io->block + b,
+ verity_io_want_digest(v, io),
+ &is_zero);
+ if (unlikely(r < 0))
+ return r;
- if (likely(v->levels)) {
+ if (is_zero) {
/*
- * First, we try to get the requested hash for
- * the current block. If the hash block itself is
- * verified, zero is returned. If it isn't, this
- * function returns 0 and we fall back to whole
- * chain verification.
+ * If we expect a zero block, don't validate, just
+ * return zeros.
*/
- int r = verity_verify_level(io, io->block + b, 0, true);
- if (likely(!r))
- goto test_block_hash;
- if (r < 0)
+ r = verity_for_bv_block(v, io, &io->iter,
+ verity_bv_zero);
+ if (unlikely(r < 0))
return r;
- }
- memcpy(io_want_digest(v, io), v->root_digest, v->digest_size);
-
- for (i = v->levels - 1; i >= 0; i--) {
- int r = verity_verify_level(io, io->block + b, i, false);
- if (unlikely(r))
- return r;
+ continue;
}
-test_block_hash:
- desc = io_hash_desc(v, io);
- desc->tfm = v->tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- r = crypto_shash_init(desc);
- if (r < 0) {
- DMERR("crypto_shash_init failed: %d", r);
+ r = verity_hash_init(v, desc);
+ if (unlikely(r < 0))
return r;
- }
-
- if (likely(v->version >= 1)) {
- r = crypto_shash_update(desc, v->salt, v->salt_size);
- if (r < 0) {
- DMERR("crypto_shash_update failed: %d", r);
- return r;
- }
- }
- todo = 1 << v->data_dev_block_bits;
- do {
- u8 *page;
- unsigned len;
- struct bio_vec bv = bio_iter_iovec(bio, io->iter);
-
- page = kmap_atomic(bv.bv_page);
- len = bv.bv_len;
- if (likely(len >= todo))
- len = todo;
- r = crypto_shash_update(desc, page + bv.bv_offset, len);
- kunmap_atomic(page);
-
- if (r < 0) {
- DMERR("crypto_shash_update failed: %d", r);
- return r;
- }
-
- bio_advance_iter(bio, &io->iter, len);
- todo -= len;
- } while (todo);
- if (!v->version) {
- r = crypto_shash_update(desc, v->salt, v->salt_size);
- if (r < 0) {
- DMERR("crypto_shash_update failed: %d", r);
- return r;
- }
- }
+ start = io->iter;
+ r = verity_for_bv_block(v, io, &io->iter, verity_bv_hash_update);
+ if (unlikely(r < 0))
+ return r;
- result = io_real_digest(v, io);
- r = crypto_shash_final(desc, result);
- if (r < 0) {
- DMERR("crypto_shash_final failed: %d", r);
+ r = verity_hash_final(v, desc, verity_io_real_digest(v, io));
+ if (unlikely(r < 0))
return r;
- }
- if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
- if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
- io->block + b))
- return -EIO;
- }
+
+ if (likely(memcmp(verity_io_real_digest(v, io),
+ verity_io_want_digest(v, io), v->digest_size) == 0))
+ continue;
+ else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
+ io->block + b, NULL, &start) == 0)
+ continue;
+ else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
+ io->block + b))
+ return -EIO;
}
return 0;
@@ -453,9 +463,10 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
bio->bi_end_io = io->orig_bi_end_io;
- bio->bi_private = io->orig_bi_private;
bio->bi_error = error;
+ verity_fec_finish_io(io);
+
bio_endio(bio);
}
@@ -470,7 +481,7 @@ static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
- if (bio->bi_error) {
+ if (bio->bi_error && !verity_fec_is_enabled(io->v)) {
verity_finish_io(io, bio->bi_error);
return;
}
@@ -566,7 +577,6 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
io = dm_per_bio_data(bio, ti->per_bio_data_size);
io->v = v;
io->orig_bi_end_io = bio->bi_end_io;
- io->orig_bi_private = bio->bi_private;
io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
@@ -574,6 +584,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
bio->bi_private = io;
io->iter = bio->bi_iter;
+ verity_fec_init_io(io);
+
verity_submit_prefetch(v, io);
generic_make_request(bio);
@@ -588,6 +600,7 @@ static void verity_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
struct dm_verity *v = ti->private;
+ unsigned args = 0;
unsigned sz = 0;
unsigned x;
@@ -614,8 +627,17 @@ static void verity_status(struct dm_target *ti, status_type_t type,
else
for (x = 0; x < v->salt_size; x++)
DMEMIT("%02x", v->salt[x]);
+ if (v->mode != DM_VERITY_MODE_EIO)
+ args++;
+ if (verity_fec_is_enabled(v))
+ args += DM_VERITY_OPTS_FEC;
+ if (v->zero_digest)
+ args++;
+ if (!args)
+ return;
+ DMEMIT(" %u", args);
if (v->mode != DM_VERITY_MODE_EIO) {
- DMEMIT(" 1 ");
+ DMEMIT(" ");
switch (v->mode) {
case DM_VERITY_MODE_LOGGING:
DMEMIT(DM_VERITY_OPT_LOGGING);
@@ -627,6 +649,9 @@ static void verity_status(struct dm_target *ti, status_type_t type,
BUG();
}
}
+ if (v->zero_digest)
+ DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
+ sz = verity_fec_status_table(v, sz, result, maxlen);
break;
}
}
@@ -677,6 +702,7 @@ static void verity_dtr(struct dm_target *ti)
kfree(v->salt);
kfree(v->root_digest);
+ kfree(v->zero_digest);
if (v->tfm)
crypto_free_shash(v->tfm);
@@ -689,9 +715,94 @@ static void verity_dtr(struct dm_target *ti)
if (v->data_dev)
dm_put_device(ti, v->data_dev);
+ verity_fec_dtr(v);
+
kfree(v);
}
+static int verity_alloc_zero_digest(struct dm_verity *v)
+{
+ int r = -ENOMEM;
+ struct shash_desc *desc;
+ u8 *zero_data;
+
+ v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
+
+ if (!v->zero_digest)
+ return r;
+
+ desc = kmalloc(v->shash_descsize, GFP_KERNEL);
+
+ if (!desc)
+ return r; /* verity_dtr will free zero_digest */
+
+ zero_data = kzalloc(1 << v->data_dev_block_bits, GFP_KERNEL);
+
+ if (!zero_data)
+ goto out;
+
+ r = verity_hash(v, desc, zero_data, 1 << v->data_dev_block_bits,
+ v->zero_digest);
+
+out:
+ kfree(desc);
+ kfree(zero_data);
+
+ return r;
+}
+
+static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v)
+{
+ int r;
+ unsigned argc;
+ struct dm_target *ti = v->ti;
+ const char *arg_name;
+
+ static struct dm_arg _args[] = {
+ {0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"},
+ };
+
+ r = dm_read_arg_group(_args, as, &argc, &ti->error);
+ if (r)
+ return -EINVAL;
+
+ if (!argc)
+ return 0;
+
+ do {
+ arg_name = dm_shift_arg(as);
+ argc--;
+
+ if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING)) {
+ v->mode = DM_VERITY_MODE_LOGGING;
+ continue;
+
+ } else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART)) {
+ v->mode = DM_VERITY_MODE_RESTART;
+ continue;
+
+ } else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
+ r = verity_alloc_zero_digest(v);
+ if (r) {
+ ti->error = "Cannot allocate zero digest";
+ return r;
+ }
+ continue;
+
+ } else if (verity_is_fec_opt_arg(arg_name)) {
+ r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
+ if (r)
+ return r;
+ continue;
+ }
+
+ ti->error = "Unrecognized verity feature request";
+ return -EINVAL;
+ } while (argc && !r);
+
+ return r;
+}
+
/*
* Target parameters:
* <version> The current format is version 1.
@@ -710,18 +821,13 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
{
struct dm_verity *v;
struct dm_arg_set as;
- const char *opt_string;
- unsigned int num, opt_params;
+ unsigned int num;
unsigned long long num_ll;
int r;
int i;
sector_t hash_position;
char dummy;
- static struct dm_arg _args[] = {
- {0, 1, "Invalid number of feature args"},
- };
-
v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL);
if (!v) {
ti->error = "Cannot allocate verity structure";
@@ -730,6 +836,10 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->private = v;
v->ti = ti;
+ r = verity_fec_ctr_alloc(v);
+ if (r)
+ goto bad;
+
if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) {
ti->error = "Device must be readonly";
r = -EINVAL;
@@ -866,29 +976,9 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
as.argc = argc;
as.argv = argv;
- r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
- if (r)
+ r = verity_parse_opt_args(&as, v);
+ if (r < 0)
goto bad;
-
- while (opt_params) {
- opt_params--;
- opt_string = dm_shift_arg(&as);
- if (!opt_string) {
- ti->error = "Not enough feature arguments";
- r = -EINVAL;
- goto bad;
- }
-
- if (!strcasecmp(opt_string, DM_VERITY_OPT_LOGGING))
- v->mode = DM_VERITY_MODE_LOGGING;
- else if (!strcasecmp(opt_string, DM_VERITY_OPT_RESTART))
- v->mode = DM_VERITY_MODE_RESTART;
- else {
- ti->error = "Invalid feature arguments";
- r = -EINVAL;
- goto bad;
- }
- }
}
v->hash_per_block_bits =
@@ -938,8 +1028,6 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- ti->per_bio_data_size = roundup(sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2, __alignof__(struct dm_verity_io));
-
/* WQ_UNBOUND greatly improves performance when running on ramdisk */
v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
if (!v->verify_wq) {
@@ -948,6 +1036,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
+ ti->per_bio_data_size = sizeof(struct dm_verity_io) +
+ v->shash_descsize + v->digest_size * 2;
+
+ r = verity_fec_ctr(v);
+ if (r)
+ goto bad;
+
+ ti->per_bio_data_size = roundup(ti->per_bio_data_size,
+ __alignof__(struct dm_verity_io));
+
return 0;
bad:
@@ -958,7 +1056,7 @@ bad:
static struct target_type verity_target = {
.name = "verity",
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
new file mode 100644
index 000000000000..fb419f422d73
--- /dev/null
+++ b/drivers/md/dm-verity.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * Author: Mikulas Patocka <mpatocka@redhat.com>
+ *
+ * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef DM_VERITY_H
+#define DM_VERITY_H
+
+#include "dm-bufio.h"
+#include <linux/device-mapper.h>
+#include <crypto/hash.h>
+
+#define DM_VERITY_MAX_LEVELS 63
+
+enum verity_mode {
+ DM_VERITY_MODE_EIO,
+ DM_VERITY_MODE_LOGGING,
+ DM_VERITY_MODE_RESTART
+};
+
+enum verity_block_type {
+ DM_VERITY_BLOCK_TYPE_DATA,
+ DM_VERITY_BLOCK_TYPE_METADATA
+};
+
+struct dm_verity_fec;
+
+struct dm_verity {
+ struct dm_dev *data_dev;
+ struct dm_dev *hash_dev;
+ struct dm_target *ti;
+ struct dm_bufio_client *bufio;
+ char *alg_name;
+ struct crypto_shash *tfm;
+ u8 *root_digest; /* digest of the root block */
+ u8 *salt; /* salt: its size is salt_size */
+ u8 *zero_digest; /* digest for a zero block */
+ unsigned salt_size;
+ sector_t data_start; /* data offset in 512-byte sectors */
+ sector_t hash_start; /* hash start in blocks */
+ sector_t data_blocks; /* the number of data blocks */
+ sector_t hash_blocks; /* the number of hash blocks */
+ unsigned char data_dev_block_bits; /* log2(data blocksize) */
+ unsigned char hash_dev_block_bits; /* log2(hash blocksize) */
+ unsigned char hash_per_block_bits; /* log2(hashes in hash block) */
+ unsigned char levels; /* the number of tree levels */
+ unsigned char version;
+ unsigned digest_size; /* digest size for the current hash algorithm */
+ unsigned shash_descsize;/* the size of temporary space for crypto */
+ int hash_failed; /* set to 1 if hash of any block failed */
+ enum verity_mode mode; /* mode for handling verification errors */
+ unsigned corrupted_errs;/* Number of errors for corrupted blocks */
+
+ struct workqueue_struct *verify_wq;
+
+ /* starting blocks for each tree level. 0 is the lowest level. */
+ sector_t hash_level_block[DM_VERITY_MAX_LEVELS];
+
+ struct dm_verity_fec *fec; /* forward error correction */
+};
+
+struct dm_verity_io {
+ struct dm_verity *v;
+
+ /* original value of bio->bi_end_io */
+ bio_end_io_t *orig_bi_end_io;
+
+ sector_t block;
+ unsigned n_blocks;
+
+ struct bvec_iter iter;
+
+ struct work_struct work;
+
+ /*
+ * Three variably-size fields follow this struct:
+ *
+ * u8 hash_desc[v->shash_descsize];
+ * u8 real_digest[v->digest_size];
+ * u8 want_digest[v->digest_size];
+ *
+ * To access them use: verity_io_hash_desc(), verity_io_real_digest()
+ * and verity_io_want_digest().
+ */
+};
+
+static inline struct shash_desc *verity_io_hash_desc(struct dm_verity *v,
+ struct dm_verity_io *io)
+{
+ return (struct shash_desc *)(io + 1);
+}
+
+static inline u8 *verity_io_real_digest(struct dm_verity *v,
+ struct dm_verity_io *io)
+{
+ return (u8 *)(io + 1) + v->shash_descsize;
+}
+
+static inline u8 *verity_io_want_digest(struct dm_verity *v,
+ struct dm_verity_io *io)
+{
+ return (u8 *)(io + 1) + v->shash_descsize + v->digest_size;
+}
+
+static inline u8 *verity_io_digest_end(struct dm_verity *v,
+ struct dm_verity_io *io)
+{
+ return verity_io_want_digest(v, io) + v->digest_size;
+}
+
+extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ struct bvec_iter *iter,
+ int (*process)(struct dm_verity *v,
+ struct dm_verity_io *io,
+ u8 *data, size_t len));
+
+extern int verity_hash(struct dm_verity *v, struct shash_desc *desc,
+ const u8 *data, size_t len, u8 *digest);
+
+extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
+ sector_t block, u8 *digest, bool *is_zero);
+
+#endif /* DM_VERITY_H */
diff --git a/drivers/media/platform/msm/camera_v2/Kconfig b/drivers/media/platform/msm/camera_v2/Kconfig
index 7f7cc8ee9be3..568f817e8614 100644
--- a/drivers/media/platform/msm/camera_v2/Kconfig
+++ b/drivers/media/platform/msm/camera_v2/Kconfig
@@ -1,5 +1,5 @@
config MSM_CAMERA_SENSOR
- bool "Qualcomm MSM camera sensor support"
+ bool "QTI MSM camera sensor support"
depends on MSMB_CAMERA
select NEW_LEDS
select LEDS_CLASS
@@ -10,7 +10,7 @@ config MSM_CAMERA_SENSOR
subdev APIs.
config MSM_CPP
- bool "Qualcomm MSM Camera Post Processing Engine support"
+ bool "QTI MSM Camera Post Processing Engine support"
depends on MSMB_CAMERA
---help---
Enable support for Camera Post-processing Engine
@@ -19,7 +19,7 @@ config MSM_CPP
APIs.
config MSM_CCI
- bool "Qualcomm MSM Camera Control Interface support"
+ bool "QTI MSM Camera Control Interface support"
depends on MSMB_CAMERA
---help---
Enable support for Camera Control Interface driver only
@@ -29,7 +29,7 @@ config MSM_CCI
GPIO and data frames.
config MSM_CSI20_HEADER
- bool "Qualcomm MSM CSI 2.0 Header"
+ bool "QTI MSM CSI 2.0 Header"
depends on MSMB_CAMERA
---help---
Enable support for CSI drivers to include 2.0
@@ -39,7 +39,7 @@ config MSM_CSI20_HEADER
8930 and 8064 platforms.
config MSM_CSI22_HEADER
- bool "Qualcomm MSM CSI 2.2 Header"
+ bool "QTI MSM CSI 2.2 Header"
depends on MSMB_CAMERA
---help---
Enable support for CSI drivers to include 2.2
@@ -49,7 +49,7 @@ config MSM_CSI22_HEADER
platform.
config MSM_CSI30_HEADER
- bool "Qualcomm MSM CSI 3.0 Header"
+ bool "QTI MSM CSI 3.0 Header"
depends on MSMB_CAMERA
---help---
Enable support for CSI drivers to include 3.0
@@ -59,7 +59,7 @@ config MSM_CSI30_HEADER
8064 platforms.
config MSM_CSI31_HEADER
- bool "Qualcomm MSM CSI 3.1 Header"
+ bool "QTI MSM CSI 3.1 Header"
depends on MSMB_CAMERA
---help---
Enable support for CSI drivers to include 3.0
@@ -69,7 +69,7 @@ config MSM_CSI31_HEADER
APQ8084 platform.
config MSM_CSIPHY
- bool "Qualcomm MSM Camera Serial Interface Physical receiver support"
+ bool "QTI MSM Camera Serial Interface Physical receiver support"
depends on MSMB_CAMERA
---help---
Enable support for Camera Serial Interface
@@ -78,7 +78,7 @@ config MSM_CSIPHY
signalling.
config MSM_CSID
- bool "Qualcomm MSM Camera Serial Interface decoder support"
+ bool "QTI MSM Camera Serial Interface decoder support"
depends on MSMB_CAMERA
---help---
Enable support for Camera Serial Interface decoder.
@@ -87,7 +87,7 @@ config MSM_CSID
and datatype.
config MSM_EEPROM
- bool "Qualcomm MSM Camera ROM Interface for Calibration support"
+ bool "QTI MSM Camera ROM Interface for Calibration support"
depends on MSMB_CAMERA
---help---
Enable support for ROM Interface for Calibration
@@ -96,7 +96,7 @@ config MSM_EEPROM
Currently supports I2C, CCI and SPI protocol
config MSM_ISPIF
- bool "Qualcomm MSM Image Signal Processing interface support"
+ bool "QTI MSM Image Signal Processing interface support"
depends on MSMB_CAMERA
---help---
Enable support for Image Signal Processing interface module.
@@ -105,7 +105,7 @@ config MSM_ISPIF
data interface in VFE.
config MSM_ISPIF_V1
- bool "Qualcomm MSM Image Signal Processing interface support"
+ bool "QTI MSM Image Signal Processing interface support"
depends on MSMB_CAMERA
---help---
Enable support for Image Signal Processing interface module.
@@ -114,7 +114,7 @@ config MSM_ISPIF_V1
or raw data interface in VFE.
config MSM_ISPIF_V2
- bool "Qualcomm MSM Image Signal Processing interface support"
+ bool "QTI MSM Image Signal Processing interface support"
depends on MSMB_CAMERA
---help---
Enable support for Image Signal Processing interface module.
@@ -204,7 +204,7 @@ config OV12830
2 lanes max fps is 18, 4 lanes max fps is 24.
config MSM_V4L2_VIDEO_OVERLAY_DEVICE
- tristate "Qualcomm MSM V4l2 video overlay device"
+ tristate "QTI MSM V4l2 video overlay device"
---help---
Enables support for the MSM V4L2 video
overlay driver. This allows video rendering
@@ -212,7 +212,7 @@ config MSM_V4L2_VIDEO_OVERLAY_DEVICE
APIs, by using /dev/videoX device
config MSMB_JPEG
- tristate "Qualcomm MSM Jpeg Encoder Engine support"
+ tristate "QTI MSM Jpeg Encoder Engine support"
depends on MSMB_CAMERA && (ARCH_MSM8974 || ARCH_MSM8226 || ARCH_APQ8084 || ARCH_MSM8916 || ARCH_QCOM)
---help---
Enable support for Jpeg Encoder/Decoder
@@ -221,7 +221,7 @@ config MSMB_JPEG
for the JPEG 1.0 encoder and decoder.
config MSM_GEMINI
- tristate "Qualcomm MSM Gemini JPEG engine support"
+ tristate "QTI MSM Gemini JPEG engine support"
depends on MSMB_CAMERA && (ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_MSM8960)
---help---
Enables support for the Gemini JPEG encoder
@@ -230,14 +230,26 @@ config MSM_GEMINI
for JPEG encoding functionality.
config MSM_FD
- tristate "Qualcomm MSM FD face detection engine support"
+ tristate "QTI MSM FD face detection engine support"
depends on MSMB_CAMERA
---help---
Enables support for the MSM FD face detection engine.
config MSM_JPEGDMA
- tristate "Qualcomm Technologies Inc. MSM Jpeg dma"
+ tristate "QTI MSM Jpeg dma"
depends on MSMB_CAMERA
select V4L2_MEM2MEM_DEV
---help---
Enable support for Jpeg dma engine.
+
+config MSM_SEC_CCI_TA_NAME
+ string "Name of TA to handle Secure CCI transactions"
+ depends on MSM_CCI
+ default "seccamdemo64"
+
+config MSM_SEC_CCI_DEBUG
+ bool "QTI MSM Secure CCI Relay Debug"
+ depends on MSM_CCI
+ ---help---
+ Enables simulation of secure camera for Secure CCI Realy
+ debugging.
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index d3f6fa3fa52d..4200215705d0 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -463,7 +463,8 @@ static int msm_isp_buf_unprepare(struct msm_isp_buf_mgr *buf_mgr,
static int msm_isp_get_buf(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
- uint32_t bufq_handle, struct msm_isp_buffer **buf_info)
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct msm_isp_buffer **buf_info)
{
int rc = -1;
unsigned long flags;
@@ -513,8 +514,12 @@ static int msm_isp_get_buf(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
}
break;
case MSM_ISP_BUFFER_SRC_HAL:
- vb2_v4l2_buf = buf_mgr->vb2_ops->get_buf(
- bufq->session_id, bufq->stream_id);
+ if (buf_index == MSM_ISP_INVALID_BUF_INDEX)
+ vb2_v4l2_buf = buf_mgr->vb2_ops->get_buf(
+ bufq->session_id, bufq->stream_id);
+ else
+ vb2_v4l2_buf = buf_mgr->vb2_ops->get_buf_by_idx(
+ bufq->session_id, bufq->stream_id, buf_index);
if (vb2_v4l2_buf) {
if (vb2_v4l2_buf->vb2_buf.index < bufq->num_bufs) {
*buf_info = &bufq->bufs[vb2_v4l2_buf
@@ -649,95 +654,35 @@ static int msm_isp_put_buf(struct msm_isp_buf_mgr *buf_mgr,
return rc;
}
-static int msm_isp_update_put_buf_cnt_unsafe(
- struct msm_isp_buf_mgr *buf_mgr,
- uint32_t id, uint32_t bufq_handle, int32_t buf_index,
- struct timeval *tv, uint32_t frame_id, uint32_t pingpong_bit)
+static int msm_isp_buf_divert(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct timeval *tv, uint32_t frame_id)
{
- int rc = -1;
+ unsigned long flags;
struct msm_isp_bufq *bufq = NULL;
struct msm_isp_buffer *buf_info = NULL;
- uint8_t *put_buf_mask = NULL;
bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
if (!bufq) {
pr_err("Invalid bufq\n");
- return rc;
- }
-
- put_buf_mask = &bufq->put_buf_mask[pingpong_bit];
-
- if (buf_index >= 0) {
- buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, buf_index);
- if (!buf_info) {
- pr_err("%s: buf not found\n", __func__);
- return -EFAULT;
- }
- if (buf_info->state != MSM_ISP_BUFFER_STATE_DEQUEUED) {
- pr_err(
- "%s: Invalid state, bufq_handle %x stream id %x, state %d\n",
- __func__, bufq_handle,
- bufq->stream_id, buf_info->state);
- return -EFAULT;
- }
- if (buf_info->pingpong_bit != pingpong_bit) {
- pr_err("%s: Pingpong bit mismatch\n", __func__);
- return -EFAULT;
- }
- }
-
- if (bufq->buf_type != ISP_SHARE_BUF ||
- (*put_buf_mask == 0)) {
- if (buf_info)
- buf_info->frame_id = frame_id;
- }
-
- if (bufq->buf_type == ISP_SHARE_BUF &&
- ((*put_buf_mask & (1 << id)) == 0)) {
- *put_buf_mask |= (1 << id);
- if (*put_buf_mask != ISP_SHARE_BUF_MASK) {
- rc = *put_buf_mask;
- return 1;
- }
- *put_buf_mask = 0;
- rc = 0;
- } else if (bufq->buf_type == ISP_SHARE_BUF &&
- (*put_buf_mask & (1 << id)) != 0) {
- return -ENOTEMPTY;
+ return -EINVAL;
}
- if (buf_info &&
- MSM_ISP_BUFFER_SRC_NATIVE == BUF_SRC(bufq->stream_id)) {
- buf_info->state = MSM_ISP_BUFFER_STATE_DIVERTED;
- buf_info->tv = tv;
- }
- return 0;
-}
-
-static int msm_isp_update_put_buf_cnt(struct msm_isp_buf_mgr *buf_mgr,
- uint32_t id, uint32_t bufq_handle, int32_t buf_index,
- struct timeval *tv, uint32_t frame_id, uint32_t pingpong_bit)
-{
- int rc = -1;
- struct msm_isp_bufq *bufq = NULL;
- unsigned long flags;
-
- bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
- if (!bufq) {
- pr_err("Invalid bufq\n");
- return rc;
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, buf_index);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return -EINVAL;
}
spin_lock_irqsave(&bufq->bufq_lock, flags);
- rc = msm_isp_update_put_buf_cnt_unsafe(buf_mgr, id, bufq_handle,
- buf_index, tv, frame_id, pingpong_bit);
- if (-ENOTEMPTY == rc) {
- pr_err("%s: Error! Uncleared put_buf_mask for pingpong(%d) from vfe %d bufq 0x%x buf_idx %d\n",
- __func__, pingpong_bit, id, bufq_handle, buf_index);
- rc = -EFAULT;
+
+ buf_info->frame_id = frame_id;
+ if (BUF_SRC(bufq->stream_id) == MSM_ISP_BUFFER_SRC_NATIVE) {
+ buf_info->state = MSM_ISP_BUFFER_STATE_DIVERTED;
+ buf_info->tv = tv;
}
spin_unlock_irqrestore(&bufq->bufq_lock, flags);
- return rc;
+ return 0;
}
static int msm_isp_buf_done(struct msm_isp_buf_mgr *buf_mgr,
@@ -795,11 +740,11 @@ done:
return rc;
}
-static int msm_isp_flush_buf(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
+static int msm_isp_flush_buf(struct msm_isp_buf_mgr *buf_mgr,
uint32_t bufq_handle, enum msm_isp_buffer_flush_t flush_type,
struct timeval *tv, uint32_t frame_id)
{
- int rc = 0, i;
+ int i;
struct msm_isp_bufq *bufq = NULL;
struct msm_isp_buffer *buf_info = NULL;
unsigned long flags;
@@ -817,43 +762,27 @@ static int msm_isp_flush_buf(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
pr_err("%s: buf not found\n", __func__);
continue;
}
- if (flush_type == MSM_ISP_BUFFER_FLUSH_DIVERTED &&
- buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED) {
+ switch (flush_type) {
+ case MSM_ISP_BUFFER_FLUSH_DIVERTED:
+ if (buf_info->state !=
+ MSM_ISP_BUFFER_STATE_DIVERTED)
+ continue;
buf_info->state = MSM_ISP_BUFFER_STATE_PREPARED;
msm_isp_put_buf_unsafe(buf_mgr,
- bufq_handle, buf_info->buf_idx);
- } else if (flush_type == MSM_ISP_BUFFER_FLUSH_ALL) {
- if (buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED) {
- CDBG("%s: no need to queue Diverted buffer\n",
- __func__);
- } else if (buf_info->state ==
- MSM_ISP_BUFFER_STATE_DEQUEUED) {
- rc = msm_isp_update_put_buf_cnt_unsafe(buf_mgr,
- id, bufq_handle, buf_info->buf_idx, tv,
- frame_id, buf_info->pingpong_bit);
- if (-ENOTEMPTY == rc) {
- rc = 0;
- continue;
- }
-
- if (rc == 0) {
- buf_info->buf_debug.put_state[
- buf_info->buf_debug.
- put_state_last]
- = MSM_ISP_BUFFER_STATE_FLUSH;
- buf_info->buf_debug.put_state_last ^= 1;
- buf_info->state =
- MSM_ISP_BUFFER_STATE_PREPARED;
- rc = msm_isp_put_buf_unsafe(buf_mgr,
- bufq_handle, buf_info->buf_idx);
- if (rc == -EFAULT) {
- spin_unlock_irqrestore(
- &bufq->bufq_lock,
- flags);
- return rc;
- }
- }
- }
+ bufq_handle, buf_info->buf_idx);
+ break;
+ case MSM_ISP_BUFFER_FLUSH_ALL:
+ if (buf_info->state ==
+ MSM_ISP_BUFFER_STATE_DIVERTED)
+ continue;
+ if (buf_info->state !=
+ MSM_ISP_BUFFER_STATE_DEQUEUED)
+ continue;
+ msm_isp_put_buf_unsafe(buf_mgr,
+ bufq_handle, buf_info->buf_idx);
+ break;
+ default:
+ WARN(1, "Invalid flush type %d\n", flush_type);
}
}
@@ -1031,8 +960,6 @@ static int msm_isp_request_bufq(struct msm_isp_buf_mgr *buf_mgr,
bufq->stream_id = buf_request->stream_id;
bufq->num_bufs = buf_request->num_buf;
bufq->buf_type = buf_request->buf_type;
- for (i = 0; i < ISP_NUM_BUF_MASK; i++)
- bufq->put_buf_mask[i] = 0;
INIT_LIST_HEAD(&bufq->head);
for (i = 0; i < buf_request->num_buf; i++) {
@@ -1448,7 +1375,7 @@ static struct msm_isp_buf_ops isp_buf_ops = {
.buf_mgr_deinit = msm_isp_deinit_isp_buf_mgr,
.buf_mgr_debug = msm_isp_buf_mgr_debug,
.get_bufq = msm_isp_get_bufq,
- .update_put_buf_cnt = msm_isp_update_put_buf_cnt,
+ .buf_divert = msm_isp_buf_divert,
};
int msm_isp_create_isp_buf_mgr(
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
index b22fb6a43145..43519ee74062 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -32,6 +32,8 @@
#define BUF_MGR_NUM_BUF_Q 28
#define MAX_IOMMU_CTX 2
+#define MSM_ISP_INVALID_BUF_INDEX 0xFFFFFFFF
+
struct msm_isp_buf_mgr;
enum msm_isp_buffer_src_t {
@@ -115,7 +117,6 @@ struct msm_isp_bufq {
enum msm_isp_buf_type buf_type;
struct msm_isp_buffer *bufs;
spinlock_t bufq_lock;
- uint8_t put_buf_mask[ISP_NUM_BUF_MASK];
/*Native buffer queue*/
struct list_head head;
};
@@ -140,7 +141,8 @@ struct msm_isp_buf_ops {
uint32_t bufq_handle, uint32_t *buf_src);
int (*get_buf)(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
- uint32_t bufq_handle, struct msm_isp_buffer **buf_info);
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct msm_isp_buffer **buf_info);
int (*get_buf_by_index)(struct msm_isp_buf_mgr *buf_mgr,
uint32_t bufq_handle, uint32_t buf_index,
@@ -154,7 +156,7 @@ struct msm_isp_buf_ops {
int (*put_buf)(struct msm_isp_buf_mgr *buf_mgr,
uint32_t bufq_handle, uint32_t buf_index);
- int (*flush_buf)(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
+ int (*flush_buf)(struct msm_isp_buf_mgr *buf_mgr,
uint32_t bufq_handle, enum msm_isp_buffer_flush_t flush_type,
struct timeval *tv, uint32_t frame_id);
@@ -171,9 +173,9 @@ struct msm_isp_buf_ops {
unsigned long fault_addr);
struct msm_isp_bufq * (*get_bufq)(struct msm_isp_buf_mgr *buf_mgr,
uint32_t bufq_handle);
- int (*update_put_buf_cnt)(struct msm_isp_buf_mgr *buf_mgr,
- uint32_t id, uint32_t bufq_handle, int32_t buf_index,
- struct timeval *tv, uint32_t frame_id, uint32_t pingpong_bit);
+ int (*buf_divert)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct timeval *tv, uint32_t frame_id);
};
struct msm_isp_buf_mgr {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index d3c2d77b0107..094996b2d60b 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -498,7 +498,12 @@ static int vfe_probe(struct platform_device *pdev)
vfe_parent_dev->common_sd->common_data = &vfe_common_data;
memset(&vfe_common_data, 0, sizeof(vfe_common_data));
+ mutex_init(&vfe_common_data.vfe_common_mutex);
spin_lock_init(&vfe_common_data.common_dev_data_lock);
+ for (i = 0; i < (VFE_AXI_SRC_MAX * MAX_VFE); i++)
+ spin_lock_init(&(vfe_common_data.streams[i].lock));
+ for (i = 0; i < (MSM_ISP_STATS_MAX * MAX_VFE); i++)
+ spin_lock_init(&(vfe_common_data.stats_streams[i].lock));
of_property_read_u32(pdev->dev.of_node,
"num_child", &vfe_parent_dev->num_hw_sd);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 763b6a575326..3b6a2eecb4b6 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -169,7 +169,7 @@ struct msm_vfe_axi_ops {
int32_t (*cfg_io_format)(struct vfe_device *vfe_dev,
enum msm_vfe_axi_stream_src stream_src,
uint32_t io_format);
- void (*cfg_framedrop)(void __iomem *vfe_base,
+ void (*cfg_framedrop)(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info,
uint32_t framedrop_pattern, uint32_t framedrop_period);
void (*clear_framedrop)(struct vfe_device *vfe_dev,
@@ -207,7 +207,7 @@ struct msm_vfe_axi_ops {
uint32_t (*get_comp_mask)(uint32_t irq_status0, uint32_t irq_status1);
uint32_t (*get_pingpong_status)(struct vfe_device *vfe_dev);
int (*halt)(struct vfe_device *vfe_dev, uint32_t blocking);
- int (*restart)(struct vfe_device *vfe_dev, uint32_t blocking,
+ void (*restart)(struct vfe_device *vfe_dev, uint32_t blocking,
uint32_t enable_camif);
void (*update_cgc_override)(struct vfe_device *vfe_dev,
uint8_t wm_idx, uint8_t cgc_override);
@@ -270,7 +270,7 @@ struct msm_vfe_stats_ops {
void (*enable_module)(struct vfe_device *vfe_dev,
uint32_t stats_mask, uint8_t enable);
- void (*update_ping_pong_addr)(void __iomem *vfe_base,
+ void (*update_ping_pong_addr)(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info,
uint32_t pingpong_status, dma_addr_t paddr);
@@ -373,12 +373,6 @@ enum msm_vfe_axi_state {
UPDATING,
};
-enum msm_vfe_axi_cfg_update_state {
- NO_AXI_CFG_UPDATE,
- APPLYING_UPDATE_RESUME,
- UPDATE_REQUESTED,
-};
-
#define VFE_NO_DROP 0xFFFFFFFF
#define VFE_DROP_EVERY_2FRAME 0x55555555
#define VFE_DROP_EVERY_4FRAME 0x11111111
@@ -394,9 +388,18 @@ enum msm_vfe_axi_stream_type {
struct msm_vfe_frame_request_queue {
struct list_head list;
enum msm_vfe_buff_queue_id buff_queue_id;
+ uint32_t buf_index;
uint8_t cmd_used;
};
+enum msm_isp_comp_irq_types {
+ MSM_ISP_COMP_IRQ_REG_UPD = 0,
+ MSM_ISP_COMP_IRQ_EPOCH = 1,
+ MSM_ISP_COMP_IRQ_PING_BUFDONE = 2,
+ MSM_ISP_COMP_IRQ_PONG_BUFDONE = 3,
+ MSM_ISP_COMP_IRQ_MAX = 4
+};
+
#define MSM_VFE_REQUESTQ_SIZE 8
struct msm_vfe_axi_stream {
@@ -404,10 +407,10 @@ struct msm_vfe_axi_stream {
enum msm_vfe_axi_state state;
enum msm_vfe_axi_stream_src stream_src;
uint8_t num_planes;
- uint8_t wm[MAX_PLANES_PER_STREAM];
+ uint8_t wm[MAX_VFE][MAX_PLANES_PER_STREAM];
uint32_t output_format;/*Planar/RAW/Misc*/
- struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
- uint8_t comp_mask_index;
+ struct msm_vfe_axi_plane_cfg plane_cfg[MAX_VFE][MAX_PLANES_PER_STREAM];
+ uint8_t comp_mask_index[MAX_VFE];
struct msm_isp_buffer *buf[2];
uint32_t session_id;
uint32_t stream_id;
@@ -419,7 +422,7 @@ struct msm_vfe_axi_stream {
struct list_head request_q;
struct msm_vfe_frame_request_queue
request_queue_cmd[MSM_VFE_REQUESTQ_SIZE];
- uint32_t stream_handle;
+ uint32_t stream_handle[MAX_VFE];
uint8_t buf_divert;
enum msm_vfe_axi_stream_type stream_type;
uint32_t frame_based;
@@ -432,16 +435,28 @@ struct msm_vfe_axi_stream {
spinlock_t lock;
/*Bandwidth calculation info*/
- uint32_t max_width;
+ uint32_t max_width[MAX_VFE];
/*Based on format plane size in Q2. e.g NV12 = 1.5*/
uint32_t format_factor;
- uint32_t bandwidth;
+ uint32_t bandwidth[MAX_VFE];
uint32_t runtime_num_burst_capture;
uint32_t runtime_output_format;
enum msm_stream_memory_input_t memory_input;
struct msm_isp_sw_framskip sw_skip;
uint8_t sw_ping_pong_bit;
+
+ struct vfe_device *vfe_dev[MAX_VFE];
+ int num_isp;
+ struct completion active_comp;
+ struct completion inactive_comp;
+ uint32_t update_vfe_mask;
+ /*
+ * bits in this mask are set that correspond to vfe_id of
+ * the vfe on which this stream operates
+ */
+ uint32_t vfe_mask;
+ uint32_t composite_irq[MSM_ISP_COMP_IRQ_MAX];
};
struct msm_vfe_axi_composite_info {
@@ -450,17 +465,15 @@ struct msm_vfe_axi_composite_info {
};
enum msm_vfe_camif_state {
- CAMIF_STOPPED,
CAMIF_ENABLE,
CAMIF_DISABLE,
- CAMIF_STOPPING,
};
struct msm_vfe_src_info {
uint32_t frame_id;
uint32_t reg_update_frame_id;
uint8_t active;
- uint8_t pix_stream_count;
+ uint8_t stream_count;
uint8_t raw_stream_count;
enum msm_vfe_inputmux input_mux;
uint32_t width;
@@ -491,7 +504,6 @@ enum msm_wm_ub_cfg_type {
struct msm_vfe_axi_shared_data {
struct msm_vfe_axi_hardware_info *hw_info;
- struct msm_vfe_axi_stream stream_info[VFE_AXI_SRC_MAX];
uint32_t free_wm[MAX_NUM_WM];
uint32_t wm_image_size[MAX_NUM_WM];
enum msm_wm_ub_cfg_type wm_ub_cfg_policy;
@@ -503,14 +515,11 @@ struct msm_vfe_axi_shared_data {
struct msm_vfe_axi_composite_info
composite_info[MAX_NUM_COMPOSITE_MASK];
uint8_t num_used_composite_mask;
- uint32_t stream_update[VFE_SRC_MAX];
atomic_t axi_cfg_update[VFE_SRC_MAX];
- enum msm_isp_camif_update_state pipeline_update;
struct msm_vfe_src_info src_info[VFE_SRC_MAX];
uint16_t stream_handle_cnt;
uint32_t event_mask;
uint8_t enable_frameid_recovery;
- enum msm_vfe_camif_state camif_state;
};
struct msm_vfe_stats_hardware_info {
@@ -522,7 +531,7 @@ struct msm_vfe_stats_hardware_info {
};
enum msm_vfe_stats_state {
- STATS_AVALIABLE,
+ STATS_AVAILABLE,
STATS_INACTIVE,
STATS_ACTIVE,
STATS_START_PENDING,
@@ -534,7 +543,7 @@ enum msm_vfe_stats_state {
struct msm_vfe_stats_stream {
uint32_t session_id;
uint32_t stream_id;
- uint32_t stream_handle;
+ uint32_t stream_handle[MAX_VFE];
uint32_t composite_flag;
enum msm_isp_stats_type stats_type;
enum msm_vfe_stats_state state;
@@ -544,17 +553,27 @@ struct msm_vfe_stats_stream {
uint32_t init_stats_frame_drop;
struct msm_isp_sw_framskip sw_skip;
- uint32_t buffer_offset;
+ uint32_t buffer_offset[MAX_VFE];
struct msm_isp_buffer *buf[2];
uint32_t bufq_handle;
+
+ spinlock_t lock;
+ struct vfe_device *vfe_dev[MAX_VFE];
+ int num_isp;
+ struct completion active_comp;
+ struct completion inactive_comp;
+ /*
+ * bits in this mask are set that correspond to vfe_id of
+ * the vfe on which this stream operates
+ */
+ uint32_t vfe_mask;
+ uint32_t composite_irq[MSM_ISP_COMP_IRQ_MAX];
};
struct msm_vfe_stats_shared_data {
- struct msm_vfe_stats_stream stream_info[MSM_ISP_STATS_MAX];
uint8_t num_active_stream;
atomic_t stats_comp_mask[MAX_NUM_STATS_COMP_MASK];
uint16_t stream_handle_cnt;
- atomic_t stats_update;
};
struct msm_vfe_tasklet_queue_cmd {
@@ -653,7 +672,6 @@ struct dual_vfe_resource {
struct msm_vfe_stats_shared_data *stats_data[MAX_VFE];
struct msm_vfe_axi_shared_data *axi_data[MAX_VFE];
uint32_t wm_reload_mask[MAX_VFE];
- uint32_t epoch_sync_mask;
};
struct master_slave_resource_info {
@@ -671,6 +689,9 @@ struct msm_vfe_common_dev_data {
spinlock_t common_dev_data_lock;
struct dual_vfe_resource *dual_vfe_res;
struct master_slave_resource_info ms_resource;
+ struct msm_vfe_axi_stream streams[VFE_AXI_SRC_MAX * MAX_VFE];
+ struct msm_vfe_stats_stream stats_streams[MSM_ISP_STATS_MAX * MAX_VFE];
+ struct mutex vfe_common_mutex;
};
struct msm_vfe_common_subdev {
@@ -713,8 +734,6 @@ struct vfe_device {
/* Sync variables*/
struct completion reset_complete;
struct completion halt_complete;
- struct completion stream_config_complete;
- struct completion stats_config_complete;
struct mutex realtime_mutex;
struct mutex core_mutex;
spinlock_t shared_data_lock;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index 9481bede6417..8b5a3d8d508d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -412,10 +412,9 @@ static void msm_vfe32_process_camif_irq(struct vfe_device *vfe_dev,
ISP_DBG("%s: SOF IRQ\n", __func__);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count == 0) {
+ stream_count == 0) {
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
- if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
- msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
+ msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0, ts);
msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
}
}
@@ -608,15 +607,14 @@ static void msm_vfe32_process_reg_update(struct vfe_device *vfe_dev,
if ((rdi_status & BIT(7)) && (!(irq_status0 & 0x20)))
return;
}
- if (atomic_read(&vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_REG_UPD);
}
for (i = VFE_RAW_0; i <= VFE_RAW_2; i++) {
if (irq_status1 & BIT(26 + (i - VFE_RAW_0))) {
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
+ msm_isp_axi_stream_update(vfe_dev, i, ts);
msm_isp_update_framedrop_reg(vfe_dev, i);
vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
@@ -693,8 +691,9 @@ static void msm_vfe32_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
uint32_t comp_mask, comp_mask_index =
- stream_info->comp_mask_index;
+ stream_info->comp_mask_index[vfe_idx];
uint32_t irq_mask;
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x34);
@@ -711,7 +710,9 @@ static void msm_vfe32_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe32_axi_clear_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index =
+ stream_info->comp_mask_index[vfe_idx];
uint32_t irq_mask;
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x34);
@@ -727,8 +728,10 @@ static void msm_vfe32_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t irq_mask;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
- irq_mask |= BIT(stream_info->wm[0] + 6);
+ irq_mask |= BIT(stream_info->wm[vfe_idx][0] + 6);
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
}
@@ -736,15 +739,19 @@ static void msm_vfe32_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t irq_mask;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
- irq_mask &= ~BIT(stream_info->wm[0] + 6);
+ irq_mask &= ~BIT(stream_info->wm[vfe_idx][0] + 6);
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
}
-static void msm_vfe32_cfg_framedrop(void __iomem *vfe_base,
+static void msm_vfe32_cfg_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
uint32_t framedrop_period)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
+
if (stream_info->stream_src == PIX_ENCODER) {
msm_camera_io_w(framedrop_period - 1, vfe_base + 0x504);
msm_camera_io_w(framedrop_period - 1, vfe_base + 0x508);
@@ -929,7 +936,7 @@ static void msm_vfe32_update_camif_state(
VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
vfe_en =
((vfe_dev->axi_data.src_info[
- VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ VFE_PIX_0].stream_count > 0) ? 1 : 0);
val &= 0xFFFFFF3F;
val = val | bus_en << 7 | vfe_en << 6;
msm_camera_io_w(val, vfe_dev->vfe_base + 0x1E4);
@@ -971,16 +978,17 @@ static void msm_vfe32_axi_cfg_wm_reg(
uint8_t plane_idx)
{
uint32_t val;
- uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
if (!stream_info->frame_based) {
/*WR_IMAGE_SIZE*/
val =
((msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_width)+1)/2 - 1) << 16 |
- (stream_info->plane_cfg[plane_idx].
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
output_height - 1);
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x10);
@@ -988,9 +996,9 @@ static void msm_vfe32_axi_cfg_wm_reg(
val =
msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_stride) << 16 |
- (stream_info->plane_cfg[plane_idx].
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
output_height - 1) << 4 | VFE32_BURST_LEN;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
} else {
@@ -998,9 +1006,9 @@ static void msm_vfe32_axi_cfg_wm_reg(
val =
msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_width) << 16 |
- (stream_info->plane_cfg[plane_idx].
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
output_height - 1) << 4 | VFE32_BURST_LEN;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
}
@@ -1012,7 +1020,8 @@ static void msm_vfe32_axi_clear_wm_reg(
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
uint32_t val = 0;
- uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
/*WR_IMAGE_SIZE*/
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x10);
/*WR_BUFFER_CFG*/
@@ -1024,9 +1033,10 @@ static void msm_vfe32_axi_cfg_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
struct msm_vfe_axi_plane_cfg *plane_cfg =
- &stream_info->plane_cfg[plane_idx];
- uint8_t wm = stream_info->wm[plane_idx];
+ &stream_info->plane_cfg[vfe_idx][plane_idx];
+ uint8_t wm = stream_info->wm[vfe_idx][plane_idx];
uint32_t xbar_cfg = 0;
uint32_t xbar_reg_cfg = 0;
@@ -1080,7 +1090,8 @@ static void msm_vfe32_axi_clear_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint8_t wm = stream_info->wm[vfe_idx][plane_idx];
uint32_t xbar_reg_cfg = 0;
xbar_reg_cfg = msm_camera_io_r(vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
@@ -1098,6 +1109,7 @@ static void msm_vfe32_cfg_axi_ub_equal_default(struct vfe_device *vfe_dev)
uint32_t prop_size = 0;
uint32_t wm_ub_size;
uint64_t delta;
+
for (i = 0; i < axi_data->hw_info->num_wm; i++) {
if (axi_data->free_wm[i] > 0) {
num_used_wms++;
@@ -1243,9 +1255,11 @@ static void msm_vfe32_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe32_stats_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t irq_mask;
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
- irq_mask |= BIT(STATS_IDX(stream_info->stream_handle) + 13);
+ irq_mask |= BIT(STATS_IDX(stream_info->stream_handle[vfe_idx]) + 13);
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
return;
}
@@ -1342,12 +1356,15 @@ static void msm_vfe32_stats_enable_module(struct vfe_device *vfe_dev,
msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x10);
}
-static void msm_vfe32_stats_update_ping_pong_addr(void __iomem *vfe_base,
+static void msm_vfe32_stats_update_ping_pong_addr(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status,
dma_addr_t paddr)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t paddr32 = (paddr & 0xFFFFFFFF);
- int stats_idx = STATS_IDX(stream_info->stream_handle);
+ int stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
msm_camera_io_w(paddr32, vfe_base +
VFE32_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index d42ada769380..a2aa2983b056 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -599,7 +599,6 @@ static void msm_vfe40_process_reg_update(struct vfe_device *vfe_dev,
return;
/* Shift status bits so that PIX REG UPDATE is 1st bit */
shift_irq = ((irq_status0 & 0xF0) >> 4);
-
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
if (shift_irq & BIT(i)) {
reg_updated |= BIT(i);
@@ -607,15 +606,17 @@ static void msm_vfe40_process_reg_update(struct vfe_device *vfe_dev,
(uint32_t)BIT(i));
switch (i) {
case VFE_PIX_0:
- msm_isp_save_framedrop_values(vfe_dev,
- VFE_PIX_0);
msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE,
VFE_PIX_0, ts);
- if (atomic_read(
- &vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- if (vfe_dev->axi_data.camif_state ==
- CAMIF_STOPPING)
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_REG_UPD);
+ if (vfe_dev->axi_data.src_info[i].stream_count
+ == 0 &&
+ vfe_dev->axi_data.src_info[i].
+ raw_stream_count == 0 &&
+ vfe_dev->axi_data.src_info[i].active)
vfe_dev->hw_info->vfe_ops.core_ops.
reg_update(vfe_dev, i);
break;
@@ -624,29 +625,22 @@ static void msm_vfe40_process_reg_update(struct vfe_device *vfe_dev,
case VFE_RAW_2:
msm_isp_increment_frame_id(vfe_dev, i, ts);
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
- msm_isp_update_framedrop_reg(vfe_dev, i);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
/*
* Reg Update is pseudo SOF for RDI,
* so request every frame
*/
vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
vfe_dev, i);
+ /* reg upd is also epoch for RDI */
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
break;
default:
pr_err("%s: Error case\n", __func__);
return;
}
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(
- &vfe_dev->axi_data.axi_cfg_update[i]) ==
- 0)
- msm_isp_notify(vfe_dev,
- ISP_EVENT_STREAM_UPDATE_DONE,
- i, ts);
- }
}
}
@@ -695,7 +689,9 @@ static void msm_vfe40_reg_update(struct vfe_device *vfe_dev,
vfe_dev->vfe_base + 0x378);
} else if (!vfe_dev->is_split ||
((frame_src == VFE_PIX_0) &&
- (vfe_dev->axi_data.camif_state == CAMIF_STOPPING)) ||
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].stream_count == 0) &&
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count == 0)) ||
(frame_src >= VFE_RAW_0 && frame_src <= VFE_SRC_MAX)) {
msm_camera_io_w_mb(update_mask,
vfe_dev->vfe_base + 0x378);
@@ -713,16 +709,18 @@ static void msm_vfe40_process_epoch_irq(struct vfe_device *vfe_dev,
if (irq_status0 & BIT(2)) {
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
- msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
- msm_isp_update_stats_framedrop_reg(vfe_dev);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_EPOCH);
msm_isp_update_error_frame_count(vfe_dev);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count == 0) {
+ stream_count == 0) {
ISP_DBG("%s: SOF IRQ\n", __func__);
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
- if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
- msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
vfe_dev, VFE_PIX_0);
}
@@ -791,8 +789,10 @@ static void msm_vfe40_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint32_t comp_mask, comp_mask_index =
- stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
@@ -807,8 +807,11 @@ static void msm_vfe40_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe40_axi_clear_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
- vfe_dev->irq0_mask &= ~BIT(27);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
+ vfe_dev->irq0_mask &= ~BIT(27);
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
@@ -821,32 +824,38 @@ static void msm_vfe40_axi_clear_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe40_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- msm_vfe40_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ msm_vfe40_config_irq(vfe_dev, 1 << (stream_info->wm[vfe_idx][0] + 8), 0,
MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe40_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask &= ~(1 << (stream_info->wm[0] + 8));
- msm_vfe40_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
- MSM_ISP_IRQ_DISABLE);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ vfe_dev->irq0_mask &= ~(1 << (stream_info->wm[vfe_idx][0] + 8));
+ msm_vfe40_config_irq(vfe_dev, (1 << (stream_info->wm[vfe_idx][0] + 8)),
+ 0, MSM_ISP_IRQ_DISABLE);
}
-static void msm_vfe40_cfg_framedrop(void __iomem *vfe_base,
+static void msm_vfe40_cfg_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
uint32_t framedrop_period)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
uint32_t i, temp;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++) {
msm_camera_io_w(framedrop_pattern, vfe_base +
- VFE40_WM_BASE(stream_info->wm[i]) + 0x1C);
+ VFE40_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x1C);
temp = msm_camera_io_r(vfe_base +
- VFE40_WM_BASE(stream_info->wm[i]) + 0xC);
+ VFE40_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
temp &= 0xFFFFFF83;
msm_camera_io_w(temp | (framedrop_period - 1) << 2,
- vfe_base + VFE40_WM_BASE(stream_info->wm[i]) + 0xC);
+ vfe_base + VFE40_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
}
msm_camera_io_w_mb(0x1, vfe_base + 0x378);
@@ -856,9 +865,11 @@ static void msm_vfe40_clear_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t i;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
for (i = 0; i < stream_info->num_planes; i++)
msm_camera_io_w(0, vfe_dev->vfe_base +
- VFE40_WM_BASE(stream_info->wm[i]) + 0x1C);
+ VFE40_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x1C);
}
static int32_t msm_vfe40_convert_bpp_to_reg(int32_t bpp, uint32_t *bpp_reg)
@@ -1374,7 +1385,7 @@ static void msm_vfe40_update_camif_state(struct vfe_device *vfe_dev,
src_info[VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
vfe_en =
((vfe_dev->axi_data.
- src_info[VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ src_info[VFE_PIX_0].stream_count > 0) ? 1 : 0);
val = msm_camera_io_r(vfe_dev->vfe_base + 0x2F8);
val &= 0xFFFFFF3F;
val = val | bus_en << 7 | vfe_en << 6;
@@ -1443,7 +1454,10 @@ static void msm_vfe40_axi_cfg_wm_reg(
{
uint32_t val;
uint32_t burst_len, wm_bit_shift = VFE40_WM_BIT_SHIFT_8976_VERSION;
- uint32_t wm_base = VFE40_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+
+ wm_base = VFE40_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
if (vfe_dev->vfe_hw_version == VFE40_8916_VERSION ||
vfe_dev->vfe_hw_version == VFE40_8939_VERSION) {
@@ -1468,18 +1482,18 @@ static void msm_vfe40_axi_cfg_wm_reg(
val =
((msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_width)+1)/2 - 1) << 16 |
- (stream_info->plane_cfg[plane_idx].
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
output_height - 1);
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
/*WR_BUFFER_CFG*/
val =
msm_isp_cal_word_per_line(stream_info->output_format,
- stream_info->plane_cfg[
+ stream_info->plane_cfg[vfe_idx][
plane_idx].output_stride) << 16 |
- (stream_info->plane_cfg[
+ (stream_info->plane_cfg[vfe_idx][
plane_idx].output_height - 1) << wm_bit_shift |
burst_len;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
@@ -1487,9 +1501,9 @@ static void msm_vfe40_axi_cfg_wm_reg(
msm_camera_io_w(0x2, vfe_dev->vfe_base + wm_base);
val =
msm_isp_cal_word_per_line(stream_info->output_format,
- stream_info->plane_cfg[
+ stream_info->plane_cfg[vfe_idx][
plane_idx].output_width) << 16 |
- (stream_info->plane_cfg[
+ (stream_info->plane_cfg[vfe_idx][
plane_idx].output_height - 1) << 4 |
burst_len;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
@@ -1507,7 +1521,10 @@ static void msm_vfe40_axi_clear_wm_reg(
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
uint32_t val = 0;
- uint32_t wm_base = VFE40_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+
+ wm_base = VFE40_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
/*WR_ADDR_CFG*/
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0xC);
/*WR_IMAGE_SIZE*/
@@ -1524,12 +1541,15 @@ static void msm_vfe40_axi_cfg_wm_xbar_reg(
struct msm_vfe_axi_stream *stream_info,
uint8_t plane_idx)
{
- struct msm_vfe_axi_plane_cfg *plane_cfg =
- &stream_info->plane_cfg[plane_idx];
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ struct msm_vfe_axi_plane_cfg *plane_cfg;
+ uint8_t wm;
uint32_t xbar_cfg = 0;
uint32_t xbar_reg_cfg = 0;
+ plane_cfg = &stream_info->plane_cfg[vfe_idx][plane_idx];
+ wm = stream_info->wm[vfe_idx][plane_idx];
+
switch (stream_info->stream_src) {
case PIX_ENCODER:
case PIX_VIEWFINDER: {
@@ -1584,9 +1604,12 @@ static void msm_vfe40_axi_clear_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint8_t wm;
uint32_t xbar_reg_cfg = 0;
+ wm = stream_info->wm[vfe_idx][plane_idx];
+
xbar_reg_cfg =
msm_camera_io_r(vfe_dev->vfe_base + VFE40_XBAR_BASE(wm));
xbar_reg_cfg &= ~(0xFFFF << VFE40_XBAR_SHIFT(wm));
@@ -1714,6 +1737,7 @@ static int msm_vfe40_axi_halt(struct vfe_device *vfe_dev,
{
int rc = 0;
enum msm_vfe_input_src i;
+ struct msm_isp_timestamp ts;
/* Keep only halt and restart mask */
msm_vfe40_config_irq(vfe_dev, (1 << 31), (1 << 8),
@@ -1722,30 +1746,16 @@ static int msm_vfe40_axi_halt(struct vfe_device *vfe_dev,
msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x30);
msm_camera_io_w(0xFEFFFEFF, vfe_dev->vfe_base + 0x34);
msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24);
+
+ msm_isp_get_timestamp(&ts);
/* if any stream is waiting for update, signal complete */
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
- /* if any stream is waiting for update, signal complete */
- if (vfe_dev->axi_data.stream_update[i]) {
- ISP_DBG("%s: complete stream update\n", __func__);
- msm_isp_axi_stream_update(vfe_dev, i);
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- }
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- ISP_DBG("%s: complete on axi config update\n",
- __func__);
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i]))
- msm_isp_axi_cfg_update(vfe_dev, i);
- }
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
}
- if (atomic_read(&vfe_dev->stats_data.stats_update)) {
- ISP_DBG("%s: complete on stats update\n", __func__);
- msm_isp_stats_stream_update(vfe_dev);
- if (atomic_read(&vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- }
+ msm_isp_stats_stream_update(vfe_dev);
+ msm_isp_stats_stream_update(vfe_dev);
if (blocking) {
init_completion(&vfe_dev->halt_complete);
@@ -1764,7 +1774,7 @@ static int msm_vfe40_axi_halt(struct vfe_device *vfe_dev,
return rc;
}
-static int msm_vfe40_axi_restart(struct vfe_device *vfe_dev,
+static void msm_vfe40_axi_restart(struct vfe_device *vfe_dev,
uint32_t blocking, uint32_t enable_camif)
{
msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
@@ -1786,8 +1796,6 @@ static int msm_vfe40_axi_restart(struct vfe_device *vfe_dev,
vfe_dev->hw_info->vfe_ops.core_ops.
update_camif_state(vfe_dev, ENABLE_CAMIF);
}
-
- return 0;
}
static uint32_t msm_vfe40_get_wm_mask(
@@ -1903,27 +1911,37 @@ static void msm_vfe40_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+
msm_vfe40_config_irq(vfe_dev,
- 1 << (STATS_IDX(stream_info->stream_handle) + 16), 0,
- MSM_ISP_IRQ_ENABLE);
+ 1 << (STATS_IDX(stream_info->stream_handle[vfe_idx]) + 16), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe40_stats_clear_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+
msm_vfe40_config_irq(vfe_dev,
- (1 << (STATS_IDX(stream_info->stream_handle) + 16)), 0,
- MSM_ISP_IRQ_DISABLE);
+ (1 << (STATS_IDX(stream_info->stream_handle[vfe_idx]) + 16)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe40_stats_cfg_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE40_STATS_BASE(stats_idx);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+ int stats_idx;
+ uint32_t stats_base;
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE40_STATS_BASE(stats_idx);
/*WR_ADDR_CFG*/
msm_camera_io_w(stream_info->framedrop_period << 2,
vfe_dev->vfe_base + stats_base + 0x8);
@@ -1939,9 +1957,14 @@ static void msm_vfe40_stats_clear_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t val = 0;
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE40_STATS_BASE(stats_idx);
+ int stats_idx;
+ uint32_t stats_base;
+
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE40_STATS_BASE(stats_idx);
/*WR_ADDR_CFG*/
msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x8);
@@ -2095,11 +2118,16 @@ static void msm_vfe40_stats_enable_module(struct vfe_device *vfe_dev,
}
static void msm_vfe40_stats_update_ping_pong_addr(
- void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ struct vfe_device *vfe_dev, struct msm_vfe_stats_stream *stream_info,
uint32_t pingpong_status, dma_addr_t paddr)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t paddr32 = (paddr & 0xFFFFFFFF);
- int stats_idx = STATS_IDX(stream_info->stream_handle);
+ int stats_idx;
+
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
msm_camera_io_w(paddr32, vfe_base +
VFE40_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
index 388656b9ca30..c77eff66ccca 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
@@ -437,15 +437,17 @@ static void msm_vfe44_process_reg_update(struct vfe_device *vfe_dev,
(uint32_t)BIT(i));
switch (i) {
case VFE_PIX_0:
- msm_isp_save_framedrop_values(vfe_dev,
- VFE_PIX_0);
msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE,
VFE_PIX_0, ts);
- if (atomic_read(
- &vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- if (vfe_dev->axi_data.camif_state ==
- CAMIF_STOPPING)
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_REG_UPD);
+ if (vfe_dev->axi_data.src_info[i].stream_count
+ == 0 &&
+ vfe_dev->axi_data.src_info[i].
+ raw_stream_count == 0 &&
+ vfe_dev->axi_data.src_info[i].active)
vfe_dev->hw_info->vfe_ops.core_ops.
reg_update(vfe_dev, i);
break;
@@ -454,29 +456,22 @@ static void msm_vfe44_process_reg_update(struct vfe_device *vfe_dev,
case VFE_RAW_2:
msm_isp_increment_frame_id(vfe_dev, i, ts);
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
- msm_isp_update_framedrop_reg(vfe_dev, i);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
/*
* Reg Update is pseudo SOF for RDI,
* so request every frame
*/
vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
vfe_dev, i);
+ /* reg upd is epoch for rdi */
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
break;
default:
pr_err("%s: Error case\n", __func__);
return;
}
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(
- &vfe_dev->axi_data.axi_cfg_update[i]) ==
- 0)
- msm_isp_notify(vfe_dev,
- ISP_EVENT_STREAM_UPDATE_DONE,
- i, ts);
- }
}
}
@@ -498,17 +493,19 @@ static void msm_vfe44_process_epoch_irq(struct vfe_device *vfe_dev,
if (irq_status0 & BIT(2)) {
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
- msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
- msm_isp_update_stats_framedrop_reg(vfe_dev);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_EPOCH);
msm_isp_update_error_frame_count(vfe_dev);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count == 0) {
+ stream_count == 0) {
ISP_DBG("%s: SOF IRQ\n", __func__);
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
- if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
- msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
- vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
vfe_dev, VFE_PIX_0);
}
}
@@ -550,7 +547,9 @@ static void msm_vfe44_reg_update(struct vfe_device *vfe_dev,
vfe_dev->vfe_base + 0x378);
} else if (!vfe_dev->is_split ||
((frame_src == VFE_PIX_0) &&
- (vfe_dev->axi_data.camif_state == CAMIF_STOPPING)) ||
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].stream_count == 0) &&
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count == 0)) ||
(frame_src >= VFE_RAW_0 && frame_src <= VFE_SRC_MAX)) {
msm_camera_io_w_mb(update_mask,
vfe_dev->vfe_base + 0x378);
@@ -628,8 +627,10 @@ static void msm_vfe44_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint32_t comp_mask, comp_mask_index =
- stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
@@ -644,7 +645,10 @@ static void msm_vfe44_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe44_axi_clear_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
@@ -657,31 +661,38 @@ static void msm_vfe44_axi_clear_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe44_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- msm_vfe44_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ msm_vfe44_config_irq(vfe_dev, 1 << (stream_info->wm[vfe_idx][0] + 8), 0,
MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe44_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- msm_vfe44_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
- MSM_ISP_IRQ_DISABLE);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ msm_vfe44_config_irq(vfe_dev, (1 << (stream_info->wm[vfe_idx][0] + 8)),
+ 0, MSM_ISP_IRQ_DISABLE);
}
-static void msm_vfe44_cfg_framedrop(void __iomem *vfe_base,
+static void msm_vfe44_cfg_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
uint32_t framedrop_period)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
uint32_t i, temp;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++) {
msm_camera_io_w(framedrop_pattern, vfe_base +
- VFE44_WM_BASE(stream_info->wm[i]) + 0x1C);
+ VFE44_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x1C);
temp = msm_camera_io_r(vfe_base +
- VFE44_WM_BASE(stream_info->wm[i]) + 0xC);
+ VFE44_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
temp &= 0xFFFFFF83;
msm_camera_io_w(temp | (framedrop_period - 1) << 2,
- vfe_base + VFE44_WM_BASE(stream_info->wm[i]) + 0xC);
+ vfe_base +
+ VFE44_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
}
}
@@ -689,9 +700,11 @@ static void msm_vfe44_clear_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t i;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
for (i = 0; i < stream_info->num_planes; i++)
msm_camera_io_w(0, vfe_dev->vfe_base +
- VFE44_WM_BASE(stream_info->wm[i]) + 0x1C);
+ VFE44_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x1C);
}
static int32_t msm_vfe44_convert_bpp_to_reg(int32_t bpp, uint32_t *bpp_reg)
@@ -1039,7 +1052,7 @@ static void msm_vfe44_update_camif_state(struct vfe_device *vfe_dev,
src_info[VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
vfe_en =
((vfe_dev->axi_data.
- src_info[VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ src_info[VFE_PIX_0].stream_count > 0) ? 1 : 0);
val = msm_camera_io_r(vfe_dev->vfe_base + 0x2F8);
val &= 0xFFFFFF3F;
val = val | bus_en << 7 | vfe_en << 6;
@@ -1101,7 +1114,10 @@ static void msm_vfe44_axi_cfg_wm_reg(
uint8_t plane_idx)
{
uint32_t val;
- uint32_t wm_base = VFE44_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+
+ wm_base = VFE44_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
if (!stream_info->frame_based) {
msm_camera_io_w(0x0, vfe_dev->vfe_base + wm_base);
@@ -1109,28 +1125,30 @@ static void msm_vfe44_axi_cfg_wm_reg(
val =
((msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_width)+1)/2 - 1) << 16 |
- (stream_info->plane_cfg[plane_idx].
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
output_height - 1);
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
/*WR_BUFFER_CFG*/
- val = (stream_info->plane_cfg[plane_idx].output_height - 1);
+ val = (stream_info->plane_cfg[vfe_idx][plane_idx].
+ output_height - 1);
val = (((val & 0xfff) << 2) | ((val >> 12) & 0x3));
val = val << 2 |
msm_isp_cal_word_per_line(stream_info->output_format,
- stream_info->plane_cfg[
+ stream_info->plane_cfg[vfe_idx][
plane_idx].output_stride) << 16 |
VFE44_BURST_LEN;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
} else {
msm_camera_io_w(0x2, vfe_dev->vfe_base + wm_base);
- val = (stream_info->plane_cfg[plane_idx].output_height - 1);
+ val = (stream_info->plane_cfg[vfe_idx][plane_idx].
+ output_height - 1);
val = (((val & 0xfff) << 2) | ((val >> 12) & 0x3));
val = val << 2 |
msm_isp_cal_word_per_line(stream_info->output_format,
- stream_info->plane_cfg[
+ stream_info->plane_cfg[vfe_idx][
plane_idx].output_width) << 16 |
VFE44_BURST_LEN;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
@@ -1147,8 +1165,10 @@ static void msm_vfe44_axi_clear_wm_reg(
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
uint32_t val = 0;
- uint32_t wm_base = VFE44_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+ wm_base = VFE44_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
/*WR_ADDR_CFG*/
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0xC);
/*WR_IMAGE_SIZE*/
@@ -1164,12 +1184,15 @@ static void msm_vfe44_axi_cfg_wm_xbar_reg(
struct msm_vfe_axi_stream *stream_info,
uint8_t plane_idx)
{
- struct msm_vfe_axi_plane_cfg *plane_cfg =
- &stream_info->plane_cfg[plane_idx];
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ struct msm_vfe_axi_plane_cfg *plane_cfg;
+ uint8_t wm;
uint32_t xbar_cfg = 0;
uint32_t xbar_reg_cfg = 0;
+ plane_cfg = &stream_info->plane_cfg[vfe_idx][plane_idx];
+ wm = stream_info->wm[vfe_idx][plane_idx];
+
switch (stream_info->stream_src) {
case PIX_ENCODER:
case PIX_VIEWFINDER: {
@@ -1223,9 +1246,12 @@ static void msm_vfe44_axi_clear_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint8_t wm;
uint32_t xbar_reg_cfg = 0;
+ wm = stream_info->wm[vfe_idx][plane_idx];
+
xbar_reg_cfg =
msm_camera_io_r(vfe_dev->vfe_base + VFE44_XBAR_BASE(wm));
xbar_reg_cfg &= ~(0xFFFF << VFE44_XBAR_SHIFT(wm));
@@ -1245,6 +1271,7 @@ static void msm_vfe44_cfg_axi_ub_equal_default(
uint32_t prop_size = 0;
uint32_t wm_ub_size;
uint64_t delta;
+
for (i = 0; i < axi_data->hw_info->num_wm; i++) {
if (axi_data->free_wm[i] > 0) {
num_used_wms++;
@@ -1316,6 +1343,7 @@ static int msm_vfe44_axi_halt(struct vfe_device *vfe_dev,
{
int rc = 0;
enum msm_vfe_input_src i;
+ struct msm_isp_timestamp ts;
/* Keep only halt and restart mask */
msm_vfe44_config_irq(vfe_dev, (1 << 31), (1 << 8),
@@ -1349,34 +1377,20 @@ static int msm_vfe44_axi_halt(struct vfe_device *vfe_dev,
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2C0);
}
+ msm_isp_get_timestamp(&ts);
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
/* if any stream is waiting for update, signal complete */
- if (vfe_dev->axi_data.stream_update[i]) {
- ISP_DBG("%s: complete stream update\n", __func__);
- msm_isp_axi_stream_update(vfe_dev, i);
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- }
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- ISP_DBG("%s: complete on axi config update\n",
- __func__);
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i]))
- msm_isp_axi_cfg_update(vfe_dev, i);
- }
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
}
- if (atomic_read(&vfe_dev->stats_data.stats_update)) {
- ISP_DBG("%s: complete on stats update\n", __func__);
- msm_isp_stats_stream_update(vfe_dev);
- if (atomic_read(&vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- }
+ msm_isp_stats_stream_update(vfe_dev);
+ msm_isp_stats_stream_update(vfe_dev);
return rc;
}
-static int msm_vfe44_axi_restart(struct vfe_device *vfe_dev,
+static void msm_vfe44_axi_restart(struct vfe_device *vfe_dev,
uint32_t blocking, uint32_t enable_camif)
{
msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
@@ -1397,8 +1411,6 @@ static int msm_vfe44_axi_restart(struct vfe_device *vfe_dev,
vfe_dev->hw_info->vfe_ops.core_ops.
update_camif_state(vfe_dev, ENABLE_CAMIF);
}
-
- return 0;
}
static uint32_t msm_vfe44_get_wm_mask(
@@ -1450,15 +1462,15 @@ static int msm_vfe44_stats_check_streams(
struct msm_vfe_stats_stream *stream_info)
{
if (stream_info[STATS_IDX_BF].state ==
- STATS_AVALIABLE &&
+ STATS_AVAILABLE &&
stream_info[STATS_IDX_BF_SCALE].state !=
- STATS_AVALIABLE) {
+ STATS_AVAILABLE) {
pr_err("%s: does not support BF_SCALE while BF is disabled\n",
__func__);
return -EINVAL;
}
- if (stream_info[STATS_IDX_BF].state != STATS_AVALIABLE &&
- stream_info[STATS_IDX_BF_SCALE].state != STATS_AVALIABLE &&
+ if (stream_info[STATS_IDX_BF].state != STATS_AVAILABLE &&
+ stream_info[STATS_IDX_BF_SCALE].state != STATS_AVAILABLE &&
stream_info[STATS_IDX_BF].composite_flag !=
stream_info[STATS_IDX_BF_SCALE].composite_flag) {
pr_err("%s: Different composite flag for BF and BF_SCALE\n",
@@ -1541,27 +1553,37 @@ static void msm_vfe44_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+
msm_vfe44_config_irq(vfe_dev,
- 1 << (STATS_IDX(stream_info->stream_handle) + 15), 0,
- MSM_ISP_IRQ_ENABLE);
+ 1 << (STATS_IDX(stream_info->stream_handle[vfe_idx]) + 15), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe44_stats_clear_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+
msm_vfe44_config_irq(vfe_dev,
- (1 << (STATS_IDX(stream_info->stream_handle) + 15)), 0,
- MSM_ISP_IRQ_DISABLE);
+ (1 << (STATS_IDX(stream_info->stream_handle[vfe_idx]) + 15)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe44_stats_cfg_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE44_STATS_BASE(stats_idx);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+ int stats_idx;
+ uint32_t stats_base;
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE44_STATS_BASE(stats_idx);
/* BF_SCALE does not have its own WR_ADDR_CFG,
* IRQ_FRAMEDROP_PATTERN and IRQ_SUBSAMPLE_PATTERN;
* it's using the same from BF */
@@ -1582,9 +1604,14 @@ static void msm_vfe44_stats_clear_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t val = 0;
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE44_STATS_BASE(stats_idx);
+ int stats_idx;
+ uint32_t stats_base;
+
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE44_STATS_BASE(stats_idx);
/* BF_SCALE does not have its own WR_ADDR_CFG,
* IRQ_FRAMEDROP_PATTERN and IRQ_SUBSAMPLE_PATTERN;
* it's using the same from BF */
@@ -1742,12 +1769,16 @@ static void msm_vfe44_stats_update_cgc_override(struct vfe_device *vfe_dev,
}
static void msm_vfe44_stats_update_ping_pong_addr(
- void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ struct vfe_device *vfe_dev, struct msm_vfe_stats_stream *stream_info,
uint32_t pingpong_status, dma_addr_t paddr)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t paddr32 = (paddr & 0xFFFFFFFF);
- int stats_idx = STATS_IDX(stream_info->stream_handle);
+ int stats_idx;
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
msm_camera_io_w(paddr32, vfe_base +
VFE44_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
index 40bb044fde47..6336892b1b4e 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
@@ -376,46 +376,40 @@ static void msm_vfe46_process_reg_update(struct vfe_device *vfe_dev,
switch (i) {
case VFE_PIX_0:
- msm_isp_save_framedrop_values(vfe_dev,
- VFE_PIX_0);
msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE,
VFE_PIX_0, ts);
- if (atomic_read(
- &vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- if (vfe_dev->axi_data.camif_state ==
- CAMIF_STOPPING)
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_REG_UPD);
+ msm_isp_stats_stream_update(vfe_dev);
+ if (vfe_dev->axi_data.src_info[i].stream_count
+ == 0 &&
+ vfe_dev->axi_data.src_info[i].active)
vfe_dev->hw_info->vfe_ops.core_ops.
- reg_update(vfe_dev, i);
+ reg_update(vfe_dev, i);
break;
case VFE_RAW_0:
case VFE_RAW_1:
case VFE_RAW_2:
msm_isp_increment_frame_id(vfe_dev, i, ts);
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
- msm_isp_update_framedrop_reg(vfe_dev, i);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
/*
* Reg Update is pseudo SOF for RDI,
* so request every frame
*/
vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
vfe_dev, i);
+ /* reg upd is also epoch for rdi */
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
break;
default:
pr_err("%s: Error case\n", __func__);
return;
}
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(
- &vfe_dev->axi_data.axi_cfg_update[i]) ==
- 0)
- msm_isp_notify(vfe_dev,
- ISP_EVENT_STREAM_UPDATE_DONE,
- i, ts);
- }
}
}
@@ -437,14 +431,16 @@ static void msm_vfe46_process_epoch_irq(struct vfe_device *vfe_dev,
if (irq_status0 & BIT(2)) {
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
- msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
- msm_isp_update_stats_framedrop_reg(vfe_dev);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_EPOCH);
msm_isp_update_error_frame_count(vfe_dev);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count == 0) {
- if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
- msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
+ stream_count == 0) {
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
vfe_dev, VFE_PIX_0);
}
@@ -488,7 +484,9 @@ static void msm_vfe46_reg_update(struct vfe_device *vfe_dev,
vfe_dev->vfe_base + 0x3D8);
} else if (!vfe_dev->is_split ||
((frame_src == VFE_PIX_0) &&
- (vfe_dev->axi_data.camif_state == CAMIF_STOPPING)) ||
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].stream_count == 0) &&
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count == 0)) ||
(frame_src >= VFE_RAW_0 && frame_src <= VFE_SRC_MAX)) {
msm_camera_io_w_mb(update_mask,
vfe_dev->vfe_base + 0x3D8);
@@ -567,8 +565,10 @@ static void msm_vfe46_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint32_t comp_mask, comp_mask_index =
- stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
@@ -583,7 +583,10 @@ static void msm_vfe46_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe46_axi_clear_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
@@ -596,31 +599,37 @@ static void msm_vfe46_axi_clear_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe46_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- msm_vfe46_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ msm_vfe46_config_irq(vfe_dev, 1 << (stream_info->wm[vfe_idx][0] + 8), 0,
MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe46_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- msm_vfe46_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
- MSM_ISP_IRQ_DISABLE);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ msm_vfe46_config_irq(vfe_dev, (1 << (stream_info->wm[vfe_idx][0] + 8)),
+ 0, MSM_ISP_IRQ_DISABLE);
}
-static void msm_vfe46_cfg_framedrop(void __iomem *vfe_base,
+static void msm_vfe46_cfg_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
uint32_t framedrop_period)
{
uint32_t i, temp;
+ void __iomem *vfe_base = vfe_dev->vfe_base;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++) {
msm_camera_io_w(framedrop_pattern, vfe_base +
- VFE46_WM_BASE(stream_info->wm[i]) + 0x1C);
+ VFE46_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x1C);
temp = msm_camera_io_r(vfe_base +
- VFE46_WM_BASE(stream_info->wm[i]) + 0xC);
+ VFE46_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
temp &= 0xFFFFFF83;
msm_camera_io_w(temp | (framedrop_period - 1) << 2,
- vfe_base + VFE46_WM_BASE(stream_info->wm[i]) + 0xC);
+ vfe_base + VFE46_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
}
}
@@ -628,10 +637,11 @@ static void msm_vfe46_clear_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t i;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++)
msm_camera_io_w(0, vfe_dev->vfe_base +
- VFE46_WM_BASE(stream_info->wm[i]) + 0x1C);
+ VFE46_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x1C);
}
static int32_t msm_vfe46_convert_bpp_to_reg(int32_t bpp, uint32_t *bpp_reg)
@@ -1114,7 +1124,7 @@ static void msm_vfe46_update_camif_state(struct vfe_device *vfe_dev,
src_info[VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
vfe_en =
((vfe_dev->axi_data.
- src_info[VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ src_info[VFE_PIX_0].stream_count > 0) ? 1 : 0);
val = msm_camera_io_r(vfe_dev->vfe_base + 0x3AC);
val &= 0xFFFFFF3F;
val = val | bus_en << 7 | vfe_en << 6;
@@ -1178,7 +1188,10 @@ static void msm_vfe46_axi_cfg_wm_reg(
uint8_t plane_idx)
{
uint32_t val;
- uint32_t wm_base = VFE46_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+
+ wm_base = VFE46_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
val = msm_camera_io_r(vfe_dev->vfe_base + wm_base + 0xC);
val &= ~0x2;
@@ -1190,17 +1203,18 @@ static void msm_vfe46_axi_cfg_wm_reg(
val =
((msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_width)+3)/4 - 1) << 16 |
- (stream_info->plane_cfg[plane_idx].
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
output_height - 1);
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
/* WR_BUFFER_CFG */
val = VFE46_BURST_LEN |
- (stream_info->plane_cfg[plane_idx].output_height - 1) <<
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
+ output_height - 1) <<
2 |
((msm_isp_cal_word_per_line(stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_stride)+1)/2) << 16;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
}
@@ -1215,7 +1229,10 @@ static void msm_vfe46_axi_clear_wm_reg(
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
uint32_t val = 0;
- uint32_t wm_base = VFE46_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+
+ wm_base = VFE46_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
/* WR_ADDR_CFG */
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0xC);
@@ -1232,12 +1249,15 @@ static void msm_vfe46_axi_cfg_wm_xbar_reg(
struct msm_vfe_axi_stream *stream_info,
uint8_t plane_idx)
{
- struct msm_vfe_axi_plane_cfg *plane_cfg =
- &stream_info->plane_cfg[plane_idx];
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ struct msm_vfe_axi_plane_cfg *plane_cfg;
+ uint8_t wm;
uint32_t xbar_cfg = 0;
uint32_t xbar_reg_cfg = 0;
+ plane_cfg = &stream_info->plane_cfg[vfe_idx][plane_idx];
+ wm = stream_info->wm[vfe_idx][plane_idx];
+
switch (stream_info->stream_src) {
case PIX_VIDEO:
case PIX_ENCODER:
@@ -1295,9 +1315,12 @@ static void msm_vfe46_axi_clear_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint8_t wm;
uint32_t xbar_reg_cfg = 0;
+ wm = stream_info->wm[vfe_idx][plane_idx];
+
xbar_reg_cfg =
msm_camera_io_r(vfe_dev->vfe_base + VFE46_XBAR_BASE(wm));
xbar_reg_cfg &= ~(0xFFFF << VFE46_XBAR_SHIFT(wm));
@@ -1407,6 +1430,7 @@ static int msm_vfe46_axi_halt(struct vfe_device *vfe_dev,
{
int rc = 0;
enum msm_vfe_input_src i;
+ struct msm_isp_timestamp ts;
/* Keep only halt and restart mask */
msm_vfe46_config_irq(vfe_dev, (1 << 31), (1 << 8),
@@ -1440,34 +1464,19 @@ static int msm_vfe46_axi_halt(struct vfe_device *vfe_dev,
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x374);
}
+ msm_isp_get_timestamp(&ts);
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
- /* if any stream is waiting for update, signal complete */
- if (vfe_dev->axi_data.stream_update[i]) {
- ISP_DBG("%s: complete stream update\n", __func__);
- msm_isp_axi_stream_update(vfe_dev, i);
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- }
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- ISP_DBG("%s: complete on axi config update\n",
- __func__);
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i]))
- msm_isp_axi_cfg_update(vfe_dev, i);
- }
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
}
- if (atomic_read(&vfe_dev->stats_data.stats_update)) {
- ISP_DBG("%s: complete on stats update\n", __func__);
- msm_isp_stats_stream_update(vfe_dev);
- if (atomic_read(&vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- }
+ msm_isp_stats_stream_update(vfe_dev);
+ msm_isp_stats_stream_update(vfe_dev);
return rc;
}
-static int msm_vfe46_axi_restart(struct vfe_device *vfe_dev,
+static void msm_vfe46_axi_restart(struct vfe_device *vfe_dev,
uint32_t blocking, uint32_t enable_camif)
{
msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
@@ -1488,8 +1497,6 @@ static int msm_vfe46_axi_restart(struct vfe_device *vfe_dev,
vfe_dev->hw_info->vfe_ops.core_ops.
update_camif_state(vfe_dev, ENABLE_CAMIF);
}
-
- return 0;
}
static uint32_t msm_vfe46_get_wm_mask(
@@ -1541,15 +1548,15 @@ static int msm_vfe46_stats_check_streams(
struct msm_vfe_stats_stream *stream_info)
{
if (stream_info[STATS_IDX_BF].state ==
- STATS_AVALIABLE &&
+ STATS_AVAILABLE &&
stream_info[STATS_IDX_BF_SCALE].state !=
- STATS_AVALIABLE) {
+ STATS_AVAILABLE) {
pr_err("%s: does not support BF_SCALE while BF is disabled\n",
__func__);
return -EINVAL;
}
- if (stream_info[STATS_IDX_BF].state != STATS_AVALIABLE &&
- stream_info[STATS_IDX_BF_SCALE].state != STATS_AVALIABLE &&
+ if (stream_info[STATS_IDX_BF].state != STATS_AVAILABLE &&
+ stream_info[STATS_IDX_BF_SCALE].state != STATS_AVAILABLE &&
stream_info[STATS_IDX_BF].composite_flag !=
stream_info[STATS_IDX_BF_SCALE].composite_flag) {
pr_err("%s: Different composite flag for BF and BF_SCALE\n",
@@ -1632,26 +1639,37 @@ static void msm_vfe46_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+
msm_vfe46_config_irq(vfe_dev,
- 1 << (STATS_IDX(stream_info->stream_handle) + 15), 0,
- MSM_ISP_IRQ_ENABLE);
+ 1 << (STATS_IDX(stream_info->stream_handle[vfe_idx]) + 15), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe46_stats_clear_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+
msm_vfe46_config_irq(vfe_dev,
- 1 << (STATS_IDX(stream_info->stream_handle) + 15), 0,
- MSM_ISP_IRQ_DISABLE);
+ 1 << (STATS_IDX(stream_info->stream_handle[vfe_idx]) + 15), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe46_stats_cfg_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE46_STATS_BASE(stats_idx);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+ int stats_idx;
+ uint32_t stats_base;
+
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE46_STATS_BASE(stats_idx);
/*
* BF_SCALE does not have its own WR_ADDR_CFG,
@@ -1676,10 +1694,14 @@ static void msm_vfe46_stats_clear_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t val = 0;
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE46_STATS_BASE(stats_idx);
+ int stats_idx;
+ uint32_t stats_base;
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE46_STATS_BASE(stats_idx);
/*
* BF_SCALE does not have its own WR_ADDR_CFG,
* IRQ_FRAMEDROP_PATTERN and IRQ_SUBSAMPLE_PATTERN;
@@ -1845,12 +1867,16 @@ static void msm_vfe46_stats_enable_module(struct vfe_device *vfe_dev,
}
static void msm_vfe46_stats_update_ping_pong_addr(
- void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ struct vfe_device *vfe_dev, struct msm_vfe_stats_stream *stream_info,
uint32_t pingpong_status, dma_addr_t paddr)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t paddr32 = (paddr & 0xFFFFFFFF);
- int stats_idx = STATS_IDX(stream_info->stream_handle);
+ int stats_idx;
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
msm_camera_io_w(paddr32, vfe_base +
VFE46_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index 290f100ffeba..b434161f5599 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -562,19 +562,20 @@ void msm_vfe47_process_reg_update(struct vfe_device *vfe_dev,
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
if (shift_irq & BIT(i)) {
reg_updated |= BIT(i);
- ISP_DBG("%s REG_UPDATE IRQ %x\n", __func__,
- (uint32_t)BIT(i));
+ ISP_DBG("%s REG_UPDATE IRQ %x vfe %d\n", __func__,
+ (uint32_t)BIT(i), vfe_dev->pdev->id);
switch (i) {
case VFE_PIX_0:
- msm_isp_save_framedrop_values(vfe_dev,
- VFE_PIX_0);
msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE,
VFE_PIX_0, ts);
- if (atomic_read(
- &vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- if (vfe_dev->axi_data.camif_state ==
- CAMIF_STOPPING)
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_REG_UPD);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
+ /* if 0 streams then force reg update */
+ if (vfe_dev->axi_data.src_info
+ [i].stream_count == 0 &&
+ vfe_dev->axi_data.src_info[i].active)
vfe_dev->hw_info->vfe_ops.core_ops.
reg_update(vfe_dev, i);
break;
@@ -582,31 +583,23 @@ void msm_vfe47_process_reg_update(struct vfe_device *vfe_dev,
case VFE_RAW_1:
case VFE_RAW_2:
msm_isp_increment_frame_id(vfe_dev, i, ts);
- msm_isp_save_framedrop_values(vfe_dev, i);
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
- msm_isp_update_framedrop_reg(vfe_dev, i);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
/*
* Reg Update is pseudo SOF for RDI,
* so request every frame
*/
vfe_dev->hw_info->vfe_ops.core_ops.
reg_update(vfe_dev, i);
+ /* reg upd is also epoch for RDI */
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
break;
default:
pr_err("%s: Error case\n", __func__);
return;
}
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(
- &vfe_dev->axi_data.axi_cfg_update[i]) ==
- 0)
- msm_isp_notify(vfe_dev,
- ISP_EVENT_STREAM_UPDATE_DONE,
- i, ts);
- }
}
}
@@ -627,15 +620,17 @@ void msm_vfe47_process_epoch_irq(struct vfe_device *vfe_dev,
if (irq_status0 & BIT(2)) {
ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
- msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
- msm_isp_update_stats_framedrop_reg(vfe_dev);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_EPOCH);
msm_isp_update_error_frame_count(vfe_dev);
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count == 0) {
- if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
- msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
+ stream_count == 0) {
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
vfe_dev, VFE_PIX_0);
}
@@ -679,7 +674,9 @@ void msm_vfe47_reg_update(struct vfe_device *vfe_dev,
vfe_dev->vfe_base + 0x4AC);
} else if (!vfe_dev->is_split ||
((frame_src == VFE_PIX_0) &&
- (vfe_dev->axi_data.camif_state == CAMIF_STOPPING)) ||
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].stream_count == 0) &&
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count == 0)) ||
(frame_src >= VFE_RAW_0 && frame_src <= VFE_SRC_MAX)) {
msm_camera_io_w_mb(update_mask,
vfe_dev->vfe_base + 0x4AC);
@@ -768,9 +765,10 @@ void msm_vfe47_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint32_t comp_mask, comp_mask_index =
- stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
comp_mask |= (axi_data->composite_info[comp_mask_index].
@@ -784,8 +782,10 @@ void msm_vfe47_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
void msm_vfe47_axi_clear_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
@@ -797,31 +797,37 @@ void msm_vfe47_axi_clear_comp_mask(struct vfe_device *vfe_dev,
void msm_vfe47_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- msm_vfe47_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ msm_vfe47_config_irq(vfe_dev, 1 << (stream_info->wm[vfe_idx][0] + 8), 0,
MSM_ISP_IRQ_ENABLE);
}
void msm_vfe47_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- msm_vfe47_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
- MSM_ISP_IRQ_DISABLE);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ msm_vfe47_config_irq(vfe_dev, (1 << (stream_info->wm[vfe_idx][0] + 8)),
+ 0, MSM_ISP_IRQ_DISABLE);
}
-void msm_vfe47_cfg_framedrop(void __iomem *vfe_base,
+void msm_vfe47_cfg_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
uint32_t framedrop_period)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
uint32_t i, temp;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++) {
msm_camera_io_w(framedrop_pattern, vfe_base +
- VFE47_WM_BASE(stream_info->wm[i]) + 0x24);
+ VFE47_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x24);
temp = msm_camera_io_r(vfe_base +
- VFE47_WM_BASE(stream_info->wm[i]) + 0x14);
+ VFE47_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x14);
temp &= 0xFFFFFF83;
msm_camera_io_w(temp | (framedrop_period - 1) << 2,
- vfe_base + VFE47_WM_BASE(stream_info->wm[i]) + 0x14);
+ vfe_base + VFE47_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x14);
}
}
@@ -829,10 +835,11 @@ void msm_vfe47_clear_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t i;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++)
msm_camera_io_w(0, vfe_dev->vfe_base +
- VFE47_WM_BASE(stream_info->wm[i]) + 0x24);
+ VFE47_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x24);
}
static int32_t msm_vfe47_convert_bpp_to_reg(int32_t bpp, uint32_t *bpp_reg)
@@ -1395,7 +1402,7 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
src_info[VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
vfe_en =
((vfe_dev->axi_data.
- src_info[VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ src_info[VFE_PIX_0].stream_count > 0) ? 1 : 0);
val = msm_camera_io_r(vfe_dev->vfe_base + 0x47C);
val &= 0xFFFFFF3F;
val = val | bus_en << 7 | vfe_en << 6;
@@ -1404,7 +1411,6 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x478);
/* configure EPOCH0 for 20 lines */
msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x4A0);
- vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
/* testgen GO*/
if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
msm_camera_io_w(1, vfe_dev->vfe_base + 0xC58);
@@ -1427,7 +1433,6 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
poll_val, poll_val & 0x80000000, 1000, 2000000))
pr_err("%s: camif disable failed %x\n",
__func__, poll_val);
- vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
/* testgen OFF*/
if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
msm_camera_io_w(1 << 1, vfe_dev->vfe_base + 0xC58);
@@ -1469,8 +1474,10 @@ void msm_vfe47_axi_cfg_wm_reg(
uint8_t plane_idx)
{
uint32_t val;
- uint32_t wm_base = VFE47_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+ wm_base = VFE47_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
val = msm_camera_io_r(vfe_dev->vfe_base + wm_base + 0x14);
val &= ~0x2;
if (stream_info->frame_based)
@@ -1480,17 +1487,18 @@ void msm_vfe47_axi_cfg_wm_reg(
/* WR_IMAGE_SIZE */
val = ((msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_width)+3)/4 - 1) << 16 |
- (stream_info->plane_cfg[plane_idx].
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
output_height - 1);
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x1C);
/* WR_BUFFER_CFG */
val = VFE47_BURST_LEN |
- (stream_info->plane_cfg[plane_idx].output_height - 1) <<
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
+ output_height - 1) <<
2 |
((msm_isp_cal_word_per_line(stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_stride)+1)/2) << 16;
}
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x20);
@@ -1504,8 +1512,10 @@ void msm_vfe47_axi_clear_wm_reg(
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
uint32_t val = 0;
- uint32_t wm_base = VFE47_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+ wm_base = VFE47_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
/* WR_ADDR_CFG */
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
/* WR_IMAGE_SIZE */
@@ -1521,12 +1531,14 @@ void msm_vfe47_axi_cfg_wm_xbar_reg(
struct msm_vfe_axi_stream *stream_info,
uint8_t plane_idx)
{
- struct msm_vfe_axi_plane_cfg *plane_cfg =
- &stream_info->plane_cfg[plane_idx];
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ struct msm_vfe_axi_plane_cfg *plane_cfg;
+ uint8_t wm;
uint32_t xbar_cfg = 0;
uint32_t xbar_reg_cfg = 0;
+ plane_cfg = &stream_info->plane_cfg[vfe_idx][plane_idx];
+ wm = stream_info->wm[vfe_idx][plane_idx];
switch (stream_info->stream_src) {
case PIX_VIDEO:
case PIX_ENCODER:
@@ -1585,9 +1597,11 @@ void msm_vfe47_axi_clear_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint8_t wm;
uint32_t xbar_reg_cfg = 0;
+ wm = stream_info->wm[vfe_idx][plane_idx];
xbar_reg_cfg =
msm_camera_io_r(vfe_dev->vfe_base + VFE47_XBAR_BASE(wm));
xbar_reg_cfg &= ~(0xFFFF << VFE47_XBAR_SHIFT(wm));
@@ -1707,6 +1721,7 @@ int msm_vfe47_axi_halt(struct vfe_device *vfe_dev,
int rc = 0;
enum msm_vfe_input_src i;
uint32_t val = 0;
+ struct msm_isp_timestamp ts;
val = msm_camera_io_r(vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET);
val |= 0x1;
@@ -1746,34 +1761,20 @@ int msm_vfe47_axi_halt(struct vfe_device *vfe_dev,
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x400);
}
+ msm_isp_get_timestamp(&ts);
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
- /* if any stream is waiting for update, signal complete */
- if (vfe_dev->axi_data.stream_update[i]) {
- ISP_DBG("%s: complete stream update\n", __func__);
- msm_isp_axi_stream_update(vfe_dev, i);
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- }
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- ISP_DBG("%s: complete on axi config update\n",
- __func__);
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i]))
- msm_isp_axi_cfg_update(vfe_dev, i);
- }
+ /* if any stream is waiting for update, signal fake completes */
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
}
- if (atomic_read(&vfe_dev->stats_data.stats_update)) {
- ISP_DBG("%s: complete on stats update\n", __func__);
- msm_isp_stats_stream_update(vfe_dev);
- if (atomic_read(&vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- }
+ msm_isp_stats_stream_update(vfe_dev);
+ msm_isp_stats_stream_update(vfe_dev);
return rc;
}
-int msm_vfe47_axi_restart(struct vfe_device *vfe_dev,
+void msm_vfe47_axi_restart(struct vfe_device *vfe_dev,
uint32_t blocking, uint32_t enable_camif)
{
msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
@@ -1793,8 +1794,6 @@ int msm_vfe47_axi_restart(struct vfe_device *vfe_dev,
vfe_dev->hw_info->vfe_ops.core_ops.
update_camif_state(vfe_dev, ENABLE_CAMIF);
}
-
- return 0;
}
uint32_t msm_vfe47_get_wm_mask(
@@ -1912,7 +1911,10 @@ void msm_vfe47_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- switch (STATS_IDX(stream_info->stream_handle)) {
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+
+ switch (STATS_IDX(stream_info->stream_handle[vfe_idx])) {
case STATS_COMP_IDX_AEC_BG:
msm_vfe47_config_irq(vfe_dev, 1 << 15, 0, MSM_ISP_IRQ_ENABLE);
break;
@@ -1943,7 +1945,7 @@ void msm_vfe47_stats_cfg_wm_irq_mask(
break;
default:
pr_err("%s: Invalid stats idx %d\n", __func__,
- STATS_IDX(stream_info->stream_handle));
+ STATS_IDX(stream_info->stream_handle[vfe_idx]));
}
}
@@ -1951,12 +1953,10 @@ void msm_vfe47_stats_clear_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- uint32_t irq_mask, irq_mask_1;
-
- irq_mask = vfe_dev->irq0_mask;
- irq_mask_1 = vfe_dev->irq1_mask;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
- switch (STATS_IDX(stream_info->stream_handle)) {
+ switch (STATS_IDX(stream_info->stream_handle[vfe_idx])) {
case STATS_COMP_IDX_AEC_BG:
msm_vfe47_config_irq(vfe_dev, 1 << 15, 0, MSM_ISP_IRQ_DISABLE);
break;
@@ -1987,7 +1987,7 @@ void msm_vfe47_stats_clear_wm_irq_mask(
break;
default:
pr_err("%s: Invalid stats idx %d\n", __func__,
- STATS_IDX(stream_info->stream_handle));
+ STATS_IDX(stream_info->stream_handle[vfe_idx]));
}
}
@@ -1995,8 +1995,13 @@ void msm_vfe47_stats_cfg_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE47_STATS_BASE(stats_idx);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+ int stats_idx;
+ uint32_t stats_base;
+
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE47_STATS_BASE(stats_idx);
/* WR_ADDR_CFG */
msm_camera_io_w(stream_info->framedrop_period << 2,
@@ -2013,9 +2018,14 @@ void msm_vfe47_stats_clear_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t val = 0;
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE47_STATS_BASE(stats_idx);
+ int stats_idx;
+ uint32_t stats_base;
+
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE47_STATS_BASE(stats_idx);
/* WR_ADDR_CFG */
msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x10);
@@ -2171,11 +2181,16 @@ void msm_vfe47_stats_enable_module(struct vfe_device *vfe_dev,
}
void msm_vfe47_stats_update_ping_pong_addr(
- void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ struct vfe_device *vfe_dev, struct msm_vfe_stats_stream *stream_info,
uint32_t pingpong_status, dma_addr_t paddr)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t paddr32 = (paddr & 0xFFFFFFFF);
- int stats_idx = STATS_IDX(stream_info->stream_handle);
+ int stats_idx;
+
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
msm_camera_io_w(paddr32, vfe_base +
VFE47_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h
index 737f845c7272..8581373b3b71 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h
@@ -56,7 +56,7 @@ void msm_vfe47_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info);
void msm_vfe47_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info);
-void msm_vfe47_cfg_framedrop(void __iomem *vfe_base,
+void msm_vfe47_cfg_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
uint32_t framedrop_period);
void msm_vfe47_clear_framedrop(struct vfe_device *vfe_dev,
@@ -107,7 +107,7 @@ void msm_vfe47_update_ping_pong_addr(
int32_t buf_size);
int msm_vfe47_axi_halt(struct vfe_device *vfe_dev,
uint32_t blocking);
-int msm_vfe47_axi_restart(struct vfe_device *vfe_dev,
+void msm_vfe47_axi_restart(struct vfe_device *vfe_dev,
uint32_t blocking, uint32_t enable_camif);
uint32_t msm_vfe47_get_wm_mask(
uint32_t irq_status0, uint32_t irq_status1);
@@ -141,7 +141,7 @@ bool msm_vfe47_is_module_cfg_lock_needed(
void msm_vfe47_stats_enable_module(struct vfe_device *vfe_dev,
uint32_t stats_mask, uint8_t enable);
void msm_vfe47_stats_update_ping_pong_addr(
- void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ struct vfe_device *vfe_dev, struct msm_vfe_stats_stream *stream_info,
uint32_t pingpong_status, dma_addr_t paddr);
uint32_t msm_vfe47_stats_get_wm_mask(
uint32_t irq_status0, uint32_t irq_status1);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index ac2d508269a4..39a0845a886f 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -22,9 +22,12 @@ static int msm_isp_update_dual_HW_ms_info_at_start(
struct vfe_device *vfe_dev,
enum msm_vfe_input_src stream_src);
-static int msm_isp_update_dual_HW_axi(
- struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info);
+static void msm_isp_reload_ping_pong_offset(
+ struct msm_vfe_axi_stream *stream_info);
+
+static void __msm_isp_axi_stream_update(
+ struct msm_vfe_axi_stream *stream_info,
+ struct msm_isp_timestamp *ts);
#define DUAL_VFE_AND_VFE1(s, v) ((s->stream_src < RDI_INTF_0) && \
v->is_split && vfe_dev->pdev->id == ISP_VFE1)
@@ -33,105 +36,151 @@ static int msm_isp_update_dual_HW_axi(
((s->stream_src >= RDI_INTF_0) && \
(stream_info->stream_src <= RDI_INTF_2)))
-static inline struct msm_vfe_axi_stream *msm_isp_vfe_get_stream(
- struct dual_vfe_resource *dual_vfe_res,
- int vfe_id, uint32_t index)
-{
- struct msm_vfe_axi_shared_data *axi_data =
- dual_vfe_res->axi_data[vfe_id];
- return &axi_data->stream_info[index];
-}
-
-static inline struct msm_vfe_axi_stream *msm_isp_get_controllable_stream(
- struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info)
-{
- if (vfe_dev->is_split && stream_info->stream_src < RDI_INTF_0 &&
- stream_info->controllable_output)
- return msm_isp_vfe_get_stream(
- vfe_dev->common_data->dual_vfe_res,
- ISP_VFE1,
- HANDLE_TO_IDX(
- stream_info->stream_handle));
- return stream_info;
-}
-
-int msm_isp_axi_create_stream(struct vfe_device *vfe_dev,
+static int msm_isp_axi_create_stream(struct vfe_device *vfe_dev,
struct msm_vfe_axi_shared_data *axi_data,
- struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd,
+ struct msm_vfe_axi_stream *stream_info)
{
- uint32_t i = stream_cfg_cmd->stream_src;
-
- if (i >= VFE_AXI_SRC_MAX) {
- pr_err("%s:%d invalid stream_src %d\n", __func__, __LINE__,
- stream_cfg_cmd->stream_src);
- return -EINVAL;
- }
+ uint32_t i;
+ int rc = 0;
- if (axi_data->stream_info[i].state != AVAILABLE) {
+ if (stream_info->state != AVAILABLE) {
pr_err("%s:%d invalid state %d expected %d for src %d\n",
- __func__, __LINE__, axi_data->stream_info[i].state,
+ __func__, __LINE__, stream_info->state,
AVAILABLE, i);
return -EINVAL;
}
+ if (stream_info->num_isp == 0) {
+ stream_info->session_id = stream_cfg_cmd->session_id;
+ stream_info->stream_id = stream_cfg_cmd->stream_id;
+ stream_info->buf_divert = stream_cfg_cmd->buf_divert;
+ stream_info->stream_src = stream_cfg_cmd->stream_src;
+ stream_info->controllable_output =
+ stream_cfg_cmd->controllable_output;
+ stream_info->activated_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ if (stream_cfg_cmd->controllable_output)
+ stream_cfg_cmd->frame_skip_pattern = SKIP_ALL;
+ INIT_LIST_HEAD(&stream_info->request_q);
+ } else {
+ /* check if the stream has been added for the vfe-device */
+ if (stream_info->vfe_mask & (1 << vfe_dev->pdev->id)) {
+ pr_err("%s: stream %p/%x is already added for vfe dev %d vfe_mask %x\n",
+ __func__, stream_info, stream_info->stream_id,
+ vfe_dev->pdev->id, stream_info->vfe_mask);
+ return -EINVAL;
+ }
+ if (stream_info->session_id != stream_cfg_cmd->session_id) {
+ pr_err("%s: dual stream session id mismatch %d/%d\n",
+ __func__, stream_info->session_id,
+ stream_cfg_cmd->session_id);
+ rc = -EINVAL;
+ }
+ if (stream_info->stream_id != stream_cfg_cmd->stream_id) {
+ pr_err("%s: dual stream stream id mismatch %d/%d\n",
+ __func__, stream_info->stream_id,
+ stream_cfg_cmd->stream_id);
+ rc = -EINVAL;
+ }
+ if (stream_info->controllable_output !=
+ stream_cfg_cmd->controllable_output) {
+ pr_err("%s: dual stream controllable_op mismatch %d/%d\n",
+ __func__, stream_info->controllable_output,
+ stream_cfg_cmd->controllable_output);
+ rc = -EINVAL;
+ }
+ if (stream_info->buf_divert != stream_cfg_cmd->buf_divert) {
+ pr_err("%s: dual stream buf_divert mismatch %d/%d\n",
+ __func__, stream_info->buf_divert,
+ stream_cfg_cmd->buf_divert);
+ rc = -EINVAL;
+ }
+ if (rc)
+ return rc;
+ }
+ stream_info->vfe_dev[stream_info->num_isp] = vfe_dev;
+ stream_info->num_isp++;
+
if ((axi_data->stream_handle_cnt << 8) == 0)
axi_data->stream_handle_cnt++;
stream_cfg_cmd->axi_stream_handle =
- (++axi_data->stream_handle_cnt) << 8 | i;
+ (++axi_data->stream_handle_cnt) << 8 | stream_info->stream_src;
ISP_DBG("%s: vfe %d handle %x\n", __func__, vfe_dev->pdev->id,
stream_cfg_cmd->axi_stream_handle);
- memset(&axi_data->stream_info[i], 0,
- sizeof(struct msm_vfe_axi_stream));
- spin_lock_init(&axi_data->stream_info[i].lock);
- axi_data->stream_info[i].session_id = stream_cfg_cmd->session_id;
- axi_data->stream_info[i].stream_id = stream_cfg_cmd->stream_id;
- axi_data->stream_info[i].buf_divert = stream_cfg_cmd->buf_divert;
- axi_data->stream_info[i].state = INACTIVE;
- axi_data->stream_info[i].stream_handle =
+ stream_info->stream_handle[stream_info->num_isp - 1] =
stream_cfg_cmd->axi_stream_handle;
- axi_data->stream_info[i].controllable_output =
- stream_cfg_cmd->controllable_output;
- axi_data->stream_info[i].activated_framedrop_period =
- MSM_VFE_STREAM_STOP_PERIOD;
- if (stream_cfg_cmd->controllable_output)
- stream_cfg_cmd->frame_skip_pattern = SKIP_ALL;
- INIT_LIST_HEAD(&axi_data->stream_info[i].request_q);
+ stream_info->vfe_mask |= (1 << vfe_dev->pdev->id);
+
+ if (!vfe_dev->is_split || stream_cfg_cmd->stream_src >= RDI_INTF_0 ||
+ stream_info->num_isp == MAX_VFE) {
+ stream_info->state = INACTIVE;
+
+ for (i = 0; i < MSM_ISP_COMP_IRQ_MAX; i++)
+ stream_info->composite_irq[i] = 0;
+ }
return 0;
}
-void msm_isp_axi_destroy_stream(
- struct msm_vfe_axi_shared_data *axi_data, int stream_idx)
+static void msm_isp_axi_destroy_stream(
+ struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info)
{
- if (axi_data->stream_info[stream_idx].state != AVAILABLE) {
- axi_data->stream_info[stream_idx].state = AVAILABLE;
- axi_data->stream_info[stream_idx].stream_handle = 0;
- } else {
- pr_err("%s: stream does not exist\n", __func__);
+ int k;
+ int j;
+ int i;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ /*
+ * For the index being removed, shift everything to it's right by 1
+ * so that the index being removed becomes the last index
+ */
+ for (i = vfe_idx, k = vfe_idx + 1; k < stream_info->num_isp; k++, i++) {
+ stream_info->vfe_dev[i] = stream_info->vfe_dev[k];
+ stream_info->stream_handle[i] = stream_info->stream_handle[k];
+ stream_info->bandwidth[i] = stream_info->bandwidth[k];
+ stream_info->max_width[i] = stream_info->max_width[k];
+ stream_info->comp_mask_index[i] =
+ stream_info->comp_mask_index[k];
+ for (j = 0; j < stream_info->num_planes; j++) {
+ stream_info->plane_cfg[i][j] =
+ stream_info->plane_cfg[k][j];
+ stream_info->wm[i][j] = stream_info->wm[k][j];
+ }
+ }
+
+ stream_info->num_isp--;
+ stream_info->vfe_dev[stream_info->num_isp] = NULL;
+ stream_info->stream_handle[stream_info->num_isp] = 0;
+ stream_info->bandwidth[stream_info->num_isp] = 0;
+ stream_info->max_width[stream_info->num_isp] = 0;
+ stream_info->comp_mask_index[stream_info->num_isp] = -1;
+ stream_info->vfe_mask &= ~(1 << vfe_dev->pdev->id);
+ for (j = 0; j < stream_info->num_planes; j++) {
+ stream_info->wm[stream_info->num_isp][j] = -1;
+ memset(&stream_info->plane_cfg[stream_info->num_isp][j],
+ 0, sizeof(
+ stream_info->plane_cfg[stream_info->num_isp][j]));
+ }
+
+ if (stream_info->num_isp == 0) {
+ /* release the bufq */
+ for (k = 0; k < VFE_BUF_QUEUE_MAX; k++)
+ stream_info->bufq_handle[k] = 0;
+ stream_info->vfe_mask = 0;
+ stream_info->state = AVAILABLE;
}
}
-int msm_isp_validate_axi_request(struct msm_vfe_axi_shared_data *axi_data,
+static int msm_isp_validate_axi_request(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
{
int rc = -1, i;
- struct msm_vfe_axi_stream *stream_info = NULL;
- if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)
- < VFE_AXI_SRC_MAX) {
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
- } else {
- pr_err("%s: Invalid axi_stream_handle\n", __func__);
- return rc;
- }
-
- if (!stream_info) {
- pr_err("%s: Stream info is NULL\n", __func__);
- return -EINVAL;
- }
+ int vfe_idx;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
switch (stream_cfg_cmd->output_format) {
case V4L2_PIX_FMT_YUYV:
@@ -236,9 +285,13 @@ int msm_isp_validate_axi_request(struct msm_vfe_axi_shared_data *axi_data,
return rc;
}
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
for (i = 0; i < stream_info->num_planes; i++) {
- stream_info->plane_cfg[i] = stream_cfg_cmd->plane_cfg[i];
- stream_info->max_width = max(stream_info->max_width,
+ stream_info->plane_cfg[vfe_idx][i] =
+ stream_cfg_cmd->plane_cfg[i];
+ stream_info->max_width[vfe_idx] =
+ max(stream_info->max_width[vfe_idx],
stream_cfg_cmd->plane_cfg[i].output_width);
}
@@ -250,10 +303,11 @@ int msm_isp_validate_axi_request(struct msm_vfe_axi_shared_data *axi_data,
}
static uint32_t msm_isp_axi_get_plane_size(
- struct msm_vfe_axi_stream *stream_info, int plane_idx)
+ struct msm_vfe_axi_stream *stream_info, int vfe_idx, int plane_idx)
{
uint32_t size = 0;
- struct msm_vfe_axi_plane_cfg *plane_cfg = stream_info->plane_cfg;
+ struct msm_vfe_axi_plane_cfg *plane_cfg =
+ stream_info->plane_cfg[vfe_idx];
switch (stream_info->output_format) {
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
@@ -355,37 +409,41 @@ static uint32_t msm_isp_axi_get_plane_size(
return size;
}
-void msm_isp_axi_reserve_wm(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_shared_data *axi_data,
+static void msm_isp_axi_reserve_wm(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
int i, j;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
for (i = 0; i < stream_info->num_planes; i++) {
for (j = 0; j < axi_data->hw_info->num_wm; j++) {
if (!axi_data->free_wm[j]) {
axi_data->free_wm[j] =
- stream_info->stream_handle;
+ stream_info->stream_handle[vfe_idx];
axi_data->wm_image_size[j] =
msm_isp_axi_get_plane_size(
- stream_info, i);
+ stream_info, vfe_idx, i);
axi_data->num_used_wm++;
break;
}
}
ISP_DBG("%s vfe %d stream_handle %x wm %d\n", __func__,
vfe_dev->pdev->id,
- stream_info->stream_handle, j);
- stream_info->wm[i] = j;
+ stream_info->stream_handle[vfe_idx], j);
+ stream_info->wm[vfe_idx][i] = j;
}
}
-void msm_isp_axi_free_wm(struct msm_vfe_axi_shared_data *axi_data,
+void msm_isp_axi_free_wm(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
int i;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++) {
- axi_data->free_wm[stream_info->wm[i]] = 0;
+ axi_data->free_wm[stream_info->wm[vfe_idx][i]] = 0;
axi_data->num_used_wm--;
}
if (stream_info->stream_src <= IDEAL_RAW)
@@ -394,88 +452,47 @@ void msm_isp_axi_free_wm(struct msm_vfe_axi_shared_data *axi_data,
axi_data->num_rdi_stream++;
}
-void msm_isp_axi_reserve_comp_mask(
- struct msm_vfe_axi_shared_data *axi_data,
+static void msm_isp_axi_reserve_comp_mask(
+ struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
int i;
uint8_t comp_mask = 0;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
for (i = 0; i < stream_info->num_planes; i++)
- comp_mask |= 1 << stream_info->wm[i];
+ comp_mask |= 1 << stream_info->wm[vfe_idx][i];
for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
if (!axi_data->composite_info[i].stream_handle) {
axi_data->composite_info[i].stream_handle =
- stream_info->stream_handle;
+ stream_info->stream_handle[vfe_idx];
axi_data->composite_info[i].
stream_composite_mask = comp_mask;
axi_data->num_used_composite_mask++;
break;
}
}
- stream_info->comp_mask_index = i;
+ stream_info->comp_mask_index[vfe_idx] = i;
return;
}
-void msm_isp_axi_free_comp_mask(struct msm_vfe_axi_shared_data *axi_data,
+static void msm_isp_axi_free_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- axi_data->composite_info[stream_info->comp_mask_index].
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ axi_data->composite_info[stream_info->comp_mask_index[vfe_idx]].
stream_composite_mask = 0;
- axi_data->composite_info[stream_info->comp_mask_index].
+ axi_data->composite_info[stream_info->comp_mask_index[vfe_idx]].
stream_handle = 0;
axi_data->num_used_composite_mask--;
}
-int msm_isp_axi_check_stream_state(
- struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
-{
- int rc = 0, i;
- unsigned long flags;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- struct msm_vfe_axi_stream *stream_info;
- enum msm_vfe_axi_state valid_state =
- (stream_cfg_cmd->cmd == START_STREAM) ? INACTIVE : ACTIVE;
-
- if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
- return -EINVAL;
-
- for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
- VFE_AXI_SRC_MAX) {
- return -EINVAL;
- }
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
- spin_lock_irqsave(&stream_info->lock, flags);
- if (stream_info->state != valid_state) {
- if ((stream_info->state == PAUSING ||
- stream_info->state == PAUSED ||
- stream_info->state == RESUME_PENDING ||
- stream_info->state == RESUMING ||
- stream_info->state == UPDATING) &&
- (stream_cfg_cmd->cmd == STOP_STREAM ||
- stream_cfg_cmd->cmd == STOP_IMMEDIATELY)) {
- stream_info->state = ACTIVE;
- } else {
- pr_err("%s: Invalid stream state: %d\n",
- __func__, stream_info->state);
- spin_unlock_irqrestore(
- &stream_info->lock, flags);
- if (stream_cfg_cmd->cmd == START_STREAM)
- rc = -EINVAL;
- break;
- }
- }
- spin_unlock_irqrestore(&stream_info->lock, flags);
- }
- return rc;
-}
-
/**
* msm_isp_cfg_framedrop_reg() - Program the period and pattern
- * @vfe_dev: The device for which the period and pattern is programmed
* @stream_info: The stream for which programming is done
*
* This function calculates the period and pattern to be configured
@@ -484,15 +501,15 @@ int msm_isp_axi_check_stream_state(
*
* Returns void.
*/
-static void msm_isp_cfg_framedrop_reg(struct vfe_device *vfe_dev,
+static void msm_isp_cfg_framedrop_reg(
struct msm_vfe_axi_stream *stream_info)
{
- struct msm_vfe_axi_stream *vfe0_stream_info = NULL;
+ struct vfe_device *vfe_dev = stream_info->vfe_dev[0];
uint32_t runtime_init_frame_drop;
-
uint32_t framedrop_pattern = 0;
uint32_t framedrop_period = MSM_VFE_STREAM_STOP_PERIOD;
enum msm_vfe_input_src frame_src = SRC_TO_INTF(stream_info->stream_src);
+ int i;
if (vfe_dev->axi_data.src_info[frame_src].frame_id >=
stream_info->init_frame_drop)
@@ -507,127 +524,45 @@ static void msm_isp_cfg_framedrop_reg(struct vfe_device *vfe_dev,
if (MSM_VFE_STREAM_STOP_PERIOD != framedrop_period)
framedrop_pattern = 0x1;
- ISP_DBG("%s: stream %x framedrop pattern %x period %u\n", __func__,
- stream_info->stream_handle, framedrop_pattern,
- framedrop_period);
-
BUG_ON(0 == framedrop_period);
- if (DUAL_VFE_AND_VFE1(stream_info, vfe_dev)) {
- vfe0_stream_info = msm_isp_vfe_get_stream(
- vfe_dev->common_data->dual_vfe_res,
- ISP_VFE0,
- HANDLE_TO_IDX(
- stream_info->stream_handle));
+ for (i = 0; i < stream_info->num_isp; i++) {
+ vfe_dev = stream_info->vfe_dev[i];
vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
- vfe_dev->common_data->dual_vfe_res->
- vfe_base[ISP_VFE0],
- vfe0_stream_info, framedrop_pattern,
- framedrop_period);
- vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
- vfe_dev->vfe_base, stream_info,
- framedrop_pattern,
- framedrop_period);
+ vfe_dev, stream_info, framedrop_pattern,
+ framedrop_period);
+ }
- stream_info->requested_framedrop_period =
- framedrop_period;
- vfe0_stream_info->requested_framedrop_period =
- framedrop_period;
+ ISP_DBG("%s: stream %x src %x framedrop pattern %x period %u\n",
+ __func__,
+ stream_info->stream_handle[0], stream_info->stream_src,
+ framedrop_pattern, framedrop_period);
- } else if (RDI_OR_NOT_DUAL_VFE(vfe_dev, stream_info)) {
- vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
- vfe_dev->vfe_base, stream_info, framedrop_pattern,
- framedrop_period);
- stream_info->requested_framedrop_period = framedrop_period;
- }
+ stream_info->requested_framedrop_period = framedrop_period;
}
-/**
- * msm_isp_check_epoch_status() - check the epock signal for framedrop
- *
- * @vfe_dev: The h/w on which the epoch signel is reveived
- * @frame_src: The source of the epoch signal for this frame
- *
- * For dual vfe case and pixel stream, if both vfe's epoch signal is
- * received, this function will return success.
- * It will also return the vfe1 for further process
- * For none dual VFE stream or none pixl source, this
- * funciton will just return success.
- *
- * Returns 1 - epoch received is complete.
- * 0 - epoch reveived is not complete.
- */
-static int msm_isp_check_epoch_status(struct vfe_device **vfe_dev,
- enum msm_vfe_input_src frame_src)
+static int msm_isp_composite_irq(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ enum msm_isp_comp_irq_types irq)
{
- struct vfe_device *vfe_dev_cur = *vfe_dev;
- struct vfe_device *vfe_dev_other = NULL;
- uint32_t vfe_id_other = 0;
- uint32_t vfe_id_cur = 0;
- uint32_t epoch_mask = 0;
- unsigned long flags;
- int completed = 0;
-
- spin_lock_irqsave(
- &vfe_dev_cur->common_data->common_dev_data_lock, flags);
-
- if (vfe_dev_cur->is_split &&
- frame_src == VFE_PIX_0) {
- if (vfe_dev_cur->pdev->id == ISP_VFE0) {
- vfe_id_cur = ISP_VFE0;
- vfe_id_other = ISP_VFE1;
- } else {
- vfe_id_cur = ISP_VFE1;
- vfe_id_other = ISP_VFE0;
- }
- vfe_dev_other = vfe_dev_cur->common_data->dual_vfe_res->
- vfe_dev[vfe_id_other];
-
- if (vfe_dev_cur->common_data->dual_vfe_res->
- epoch_sync_mask & (1 << vfe_id_cur)) {
- /* serious scheduling delay */
- pr_err("Missing epoch: vfe %d, epoch mask 0x%x\n",
- vfe_dev_cur->pdev->id,
- vfe_dev_cur->common_data->dual_vfe_res->
- epoch_sync_mask);
- goto fatal;
- }
-
- vfe_dev_cur->common_data->dual_vfe_res->
- epoch_sync_mask |= (1 << vfe_id_cur);
-
- epoch_mask = (1 << vfe_id_cur) | (1 << vfe_id_other);
- if ((vfe_dev_cur->common_data->dual_vfe_res->
- epoch_sync_mask & epoch_mask) == epoch_mask) {
-
- if (vfe_id_other == ISP_VFE0)
- *vfe_dev = vfe_dev_cur;
- else
- *vfe_dev = vfe_dev_other;
+ /* interrupt recv on same vfe w/o recv on other vfe */
+ if (stream_info->composite_irq[irq] & (1 << vfe_dev->pdev->id)) {
+ pr_err("%s: irq %d out of sync for dual vfe on vfe %d\n",
+ __func__, irq, vfe_dev->pdev->id);
+ return -EINVAL;
+ }
- vfe_dev_cur->common_data->dual_vfe_res->
- epoch_sync_mask &= ~epoch_mask;
- completed = 1;
- }
- } else
- completed = 1;
+ stream_info->composite_irq[irq] |= (1 << vfe_dev->pdev->id);
+ if (stream_info->composite_irq[irq] != stream_info->vfe_mask)
+ return 1;
- spin_unlock_irqrestore(
- &vfe_dev_cur->common_data->common_dev_data_lock, flags);
+ stream_info->composite_irq[irq] = 0;
- return completed;
-fatal:
- spin_unlock_irqrestore(
- &vfe_dev_cur->common_data->common_dev_data_lock, flags);
- /* new error event code will be added later */
- msm_isp_halt_send_error(vfe_dev_cur, ISP_EVENT_PING_PONG_MISMATCH);
return 0;
}
-
/**
* msm_isp_update_framedrop_reg() - Update frame period pattern on h/w
- * @vfe_dev: The h/w on which the perion pattern is updated.
- * @frame_src: Input source.
+ * @stream_info: Stream for which update is to be performed
*
* If the period and pattern needs to be updated for a stream then it is
* updated here. Updates happen if initial frame drop reaches 0 or burst
@@ -635,47 +570,75 @@ fatal:
*
* Returns void
*/
-void msm_isp_update_framedrop_reg(struct vfe_device *vfe_dev,
- enum msm_vfe_input_src frame_src)
+static void msm_isp_update_framedrop_reg(struct msm_vfe_axi_stream *stream_info)
+{
+ if (stream_info->stream_type == BURST_STREAM) {
+ if (stream_info->runtime_num_burst_capture == 0 ||
+ (stream_info->runtime_num_burst_capture == 1 &&
+ stream_info->activated_framedrop_period == 1))
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ }
+
+ if (stream_info->undelivered_request_cnt > 0)
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+
+ /*
+ * re-configure the period pattern, only if it's not already
+ * set to what we want
+ */
+ if (stream_info->current_framedrop_period !=
+ stream_info->requested_framedrop_period) {
+ msm_isp_cfg_framedrop_reg(stream_info);
+ }
+}
+
+void msm_isp_process_reg_upd_epoch_irq(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src,
+ enum msm_isp_comp_irq_types irq,
+ struct msm_isp_timestamp *ts)
{
int i;
- struct msm_vfe_axi_shared_data *axi_data = NULL;
struct msm_vfe_axi_stream *stream_info;
unsigned long flags;
-
- if (msm_isp_check_epoch_status(&vfe_dev, frame_src) != 1)
- return;
-
- axi_data = &vfe_dev->axi_data;
+ int ret;
for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
- if (SRC_TO_INTF(axi_data->stream_info[i].stream_src) !=
+ stream_info = msm_isp_get_stream_common_data(vfe_dev, i);
+ if (SRC_TO_INTF(stream_info->stream_src) !=
frame_src) {
continue;
}
- stream_info = &axi_data->stream_info[i];
- if (stream_info->state != ACTIVE)
+ if (stream_info->state == AVAILABLE ||
+ stream_info->state == INACTIVE)
continue;
spin_lock_irqsave(&stream_info->lock, flags);
- if (BURST_STREAM == stream_info->stream_type) {
- if (0 == stream_info->runtime_num_burst_capture)
- stream_info->current_framedrop_period =
- MSM_VFE_STREAM_STOP_PERIOD;
+ ret = msm_isp_composite_irq(vfe_dev, stream_info, irq);
+ if (ret) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ if (ret < 0) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return;
+ }
+ continue;
}
- if (stream_info->undelivered_request_cnt > 0)
- stream_info->current_framedrop_period =
- MSM_VFE_STREAM_STOP_PERIOD;
-
- /*
- * re-configure the period pattern, only if it's not already
- * set to what we want
- */
- if (stream_info->current_framedrop_period !=
- stream_info->requested_framedrop_period) {
- msm_isp_cfg_framedrop_reg(vfe_dev, stream_info);
+ switch (irq) {
+ case MSM_ISP_COMP_IRQ_REG_UPD:
+ stream_info->activated_framedrop_period =
+ stream_info->requested_framedrop_period;
+ __msm_isp_axi_stream_update(stream_info, ts);
+ break;
+ case MSM_ISP_COMP_IRQ_EPOCH:
+ if (stream_info->state == ACTIVE)
+ msm_isp_update_framedrop_reg(stream_info);
+ break;
+ default:
+ WARN(1, "Invalid irq %d\n", irq);
}
spin_unlock_irqrestore(&stream_info->lock, flags);
}
@@ -708,7 +671,7 @@ void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
stream_info->frame_skip_pattern);
}
- msm_isp_cfg_framedrop_reg(vfe_dev, stream_info);
+ msm_isp_cfg_framedrop_reg(stream_info);
ISP_DBG("%s: init frame drop: %d\n", __func__,
stream_info->init_frame_drop);
ISP_DBG("%s: num_burst_capture: %d\n", __func__,
@@ -741,10 +704,9 @@ void msm_isp_check_for_output_error(struct vfe_device *vfe_dev,
vfe_dev->reg_update_requested;
}
for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
- struct msm_vfe_axi_stream *temp_stream_info;
-
- stream_info = &axi_data->stream_info[i];
- stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ i);
+ stream_idx = HANDLE_TO_IDX(stream_info->stream_handle[0]);
/*
* Process drop only if controllable ACTIVE PIX stream &&
@@ -761,10 +723,8 @@ void msm_isp_check_for_output_error(struct vfe_device *vfe_dev,
if (stream_info->controllable_output &&
!vfe_dev->reg_updated) {
- temp_stream_info =
- msm_isp_get_controllable_stream(vfe_dev,
- stream_info);
- if (temp_stream_info->undelivered_request_cnt) {
+ if (stream_info->undelivered_request_cnt) {
+ pr_err("Drop frame no reg update\n");
if (msm_isp_drop_frame(vfe_dev, stream_info, ts,
sof_info)) {
pr_err("drop frame failed\n");
@@ -991,7 +951,7 @@ void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
spin_unlock_irqrestore(&vfe_dev->common_data->
common_dev_data_lock, flags);
} else {
- if (frame_src == VFE_PIX_0) {
+ if (frame_src <= VFE_RAW_2) {
msm_isp_check_for_output_error(vfe_dev, ts,
&event_data.u.sof_info);
}
@@ -1010,7 +970,7 @@ void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
/**
* msm_isp_calculate_framedrop() - Setup frame period and pattern
- * @axi_data: Structure describing the h/w streams.
+ * @vfe_dev: vfe device.
* @stream_cfg_cmd: User space input parameter for perion/pattern.
*
* Initialize the h/w stream framedrop period and pattern sent
@@ -1018,16 +978,16 @@ void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
*
* Returns 0 on success else error code.
*/
-int msm_isp_calculate_framedrop(
- struct msm_vfe_axi_shared_data *axi_data,
+static int msm_isp_calculate_framedrop(
+ struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
{
uint32_t framedrop_period = 0;
struct msm_vfe_axi_stream *stream_info = NULL;
if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)
< VFE_AXI_SRC_MAX) {
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
} else {
pr_err("%s: Invalid stream handle", __func__);
return -EINVAL;
@@ -1059,26 +1019,36 @@ int msm_isp_calculate_framedrop(
return 0;
}
-void msm_isp_calculate_bandwidth(
- struct msm_vfe_axi_shared_data *axi_data,
+static void msm_isp_calculate_bandwidth(
struct msm_vfe_axi_stream *stream_info)
{
int bpp = 0;
+ struct msm_vfe_axi_shared_data *axi_data;
+ int i;
+
if (stream_info->stream_src < RDI_INTF_0) {
- stream_info->bandwidth =
- (axi_data->src_info[VFE_PIX_0].pixel_clock /
- axi_data->src_info[VFE_PIX_0].width) *
- stream_info->max_width;
- stream_info->bandwidth = (unsigned long)stream_info->bandwidth *
- stream_info->format_factor / ISP_Q2;
+ for (i = 0; i < stream_info->num_isp; i++) {
+ axi_data = &stream_info->vfe_dev[i]->axi_data;
+ stream_info->bandwidth[i] =
+ (axi_data->src_info[VFE_PIX_0].pixel_clock /
+ axi_data->src_info[VFE_PIX_0].width) *
+ stream_info->max_width[i];
+ stream_info->bandwidth[i] =
+ (unsigned long)stream_info->bandwidth[i] *
+ stream_info->format_factor / ISP_Q2;
+ }
} else {
int rdi = SRC_TO_INTF(stream_info->stream_src);
bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
- if (rdi < VFE_SRC_MAX)
- stream_info->bandwidth =
+ if (rdi < VFE_SRC_MAX) {
+ for (i = 0; i < stream_info->num_isp; i++) {
+ axi_data = &stream_info->vfe_dev[i]->axi_data;
+ stream_info->bandwidth[i] =
(axi_data->src_info[rdi].pixel_clock / 8) * bpp;
- else
+ }
+ } else {
pr_err("%s: Invalid rdi interface\n", __func__);
+ }
}
}
@@ -1133,37 +1103,40 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
uint32_t io_format = 0;
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd = arg;
struct msm_vfe_axi_stream *stream_info;
+ unsigned long flags;
+
+ if (stream_cfg_cmd->stream_src >= VFE_AXI_SRC_MAX) {
+ pr_err("%s:%d invalid stream_src %d\n", __func__, __LINE__,
+ stream_cfg_cmd->stream_src);
+ return -EINVAL;
+ }
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ stream_cfg_cmd->stream_src);
+
+ spin_lock_irqsave(&stream_info->lock, flags);
rc = msm_isp_axi_create_stream(vfe_dev,
- &vfe_dev->axi_data, stream_cfg_cmd);
+ &vfe_dev->axi_data, stream_cfg_cmd, stream_info);
if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: create stream failed\n", __func__);
return rc;
}
rc = msm_isp_validate_axi_request(
- &vfe_dev->axi_data, stream_cfg_cmd);
+ vfe_dev, stream_info, stream_cfg_cmd);
if (rc) {
+ msm_isp_axi_destroy_stream(vfe_dev, stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: Request validation failed\n", __func__);
- if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle) <
- VFE_AXI_SRC_MAX)
- msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
- HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
return rc;
}
- stream_info = &vfe_dev->axi_data.
- stream_info[HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
- if (!stream_info) {
- pr_err("%s: can not find stream handle %x\n", __func__,
- stream_cfg_cmd->axi_stream_handle);
- return -EINVAL;
- }
stream_info->memory_input = stream_cfg_cmd->memory_input;
vfe_dev->reg_update_requested &=
~(BIT(SRC_TO_INTF(stream_info->stream_src)));
- msm_isp_axi_reserve_wm(vfe_dev, &vfe_dev->axi_data, stream_info);
+ msm_isp_axi_reserve_wm(vfe_dev, stream_info);
if (stream_info->stream_src < RDI_INTF_0) {
io_format = vfe_dev->axi_data.src_info[VFE_PIX_0].input_format;
@@ -1183,16 +1156,67 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
goto done;
}
}
- rc = msm_isp_calculate_framedrop(&vfe_dev->axi_data, stream_cfg_cmd);
- if (rc)
- goto done;
+
+ if (!stream_info->controllable_output) {
+ /*
+ * check that the parameters passed from second vfe is same
+ * as first vfe, do this only for non controllable stream
+ * right now because user driver has bug where it sends
+ * mismatch info for controllable streams
+ */
+ if (stream_info->num_isp > 1) {
+ if (stream_cfg_cmd->init_frame_drop !=
+ stream_info->init_frame_drop) {
+ pr_err("%s: stream %d init drop mismatch %d/%d\n",
+ __func__, stream_info->stream_id,
+ stream_info->init_frame_drop,
+ stream_cfg_cmd->init_frame_drop);
+ rc = -EINVAL;
+ }
+ if (stream_cfg_cmd->frame_skip_pattern !=
+ stream_info->frame_skip_pattern) {
+ pr_err("%s: stream %d skip pattern mismatch %d/%d\n",
+ __func__, stream_info->stream_id,
+ stream_info->frame_skip_pattern,
+ stream_cfg_cmd->frame_skip_pattern);
+ rc = -EINVAL;
+ }
+ if (stream_info->stream_type == CONTINUOUS_STREAM &&
+ stream_cfg_cmd->burst_count > 0) {
+ pr_err("%s: stream %d stream type mismatch\n",
+ __func__, stream_info->stream_id);
+ rc = -EINVAL;
+ }
+ if (stream_info->stream_type == BURST_STREAM &&
+ stream_info->num_burst_capture !=
+ stream_cfg_cmd->burst_count) {
+ pr_err("%s: stream %d stream burst count mismatch %d/%d\n",
+ __func__, stream_info->stream_id,
+ stream_info->num_burst_capture,
+ stream_cfg_cmd->burst_count);
+ rc = -EINVAL;
+ }
+ } else {
+ rc = msm_isp_calculate_framedrop(vfe_dev,
+ stream_cfg_cmd);
+ }
+ if (rc)
+ goto done;
+ } else {
+ stream_info->stream_type = BURST_STREAM;
+ stream_info->num_burst_capture = 0;
+ stream_info->frame_skip_pattern = NO_SKIP;
+ stream_info->init_frame_drop = stream_cfg_cmd->init_frame_drop;
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ }
if (stream_cfg_cmd->vt_enable && !vfe_dev->vt_enable) {
vfe_dev->vt_enable = stream_cfg_cmd->vt_enable;
msm_isp_start_avtimer();
}
+
if (stream_info->num_planes > 1)
- msm_isp_axi_reserve_comp_mask(
- &vfe_dev->axi_data, stream_info);
+ msm_isp_axi_reserve_comp_mask(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++) {
vfe_dev->hw_info->vfe_ops.axi_ops.
@@ -1201,16 +1225,17 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
vfe_dev->hw_info->vfe_ops.axi_ops.
cfg_wm_xbar_reg(vfe_dev, stream_info, i);
}
- /* initialize the WM ping pong with scratch buffer */
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PING_FLAG);
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PONG_FLAG);
-
+ if (stream_info->state == INACTIVE) {
+ /* initialize the WM ping pong with scratch buffer */
+ msm_isp_cfg_stream_scratch(stream_info, VFE_PING_FLAG);
+ msm_isp_cfg_stream_scratch(stream_info, VFE_PONG_FLAG);
+ }
done:
if (rc) {
- msm_isp_axi_free_wm(&vfe_dev->axi_data, stream_info);
- msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
- HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
+ msm_isp_axi_free_wm(vfe_dev, stream_info);
+ msm_isp_axi_destroy_stream(vfe_dev, stream_info);
}
+ spin_unlock_irqrestore(&stream_info->lock, flags);
return rc;
}
@@ -1218,26 +1243,42 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
{
int rc = 0, i;
struct msm_vfe_axi_stream_release_cmd *stream_release_cmd = arg;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_stream_cfg_cmd stream_cfg;
-
+ int vfe_idx;
+ unsigned long flags;
if (HANDLE_TO_IDX(stream_release_cmd->stream_handle) >=
VFE_AXI_SRC_MAX) {
pr_err("%s: Invalid stream handle\n", __func__);
return -EINVAL;
}
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_release_cmd->stream_handle)];
- if (stream_info->state == AVAILABLE) {
- pr_err("%s: Stream already released\n", __func__);
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(stream_release_cmd->stream_handle));
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+
+ vfe_idx = msm_isp_get_vfe_idx_for_stream_user(vfe_dev, stream_info);
+ if (vfe_idx == -ENOTTY ||
+ stream_release_cmd->stream_handle !=
+ stream_info->stream_handle[vfe_idx]) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: Invalid stream %p handle %x/%x vfe_idx %d vfe_dev %d num_isp %d\n",
+ __func__, stream_info,
+ stream_release_cmd->stream_handle,
+ vfe_idx != -ENOTTY ?
+ stream_info->stream_handle[vfe_idx] : 0, vfe_idx,
+ vfe_dev->pdev->id, stream_info->num_isp);
return -EINVAL;
- } else if (stream_info->state != INACTIVE) {
+ }
+
+ if (stream_info->state != INACTIVE && stream_info->state != AVAILABLE) {
stream_cfg.cmd = STOP_STREAM;
stream_cfg.num_streams = 1;
stream_cfg.stream_handle[0] = stream_release_cmd->stream_handle;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
msm_isp_cfg_axi_stream(vfe_dev, (void *) &stream_cfg);
+ spin_lock_irqsave(&stream_info->lock, flags);
}
for (i = 0; i < stream_info->num_planes; i++) {
@@ -1249,33 +1290,75 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
}
if (stream_info->num_planes > 1)
- msm_isp_axi_free_comp_mask(&vfe_dev->axi_data, stream_info);
+ msm_isp_axi_free_comp_mask(vfe_dev, stream_info);
vfe_dev->hw_info->vfe_ops.axi_ops.clear_framedrop(vfe_dev, stream_info);
- msm_isp_axi_free_wm(axi_data, stream_info);
+ msm_isp_axi_free_wm(vfe_dev, stream_info);
- msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
- HANDLE_TO_IDX(stream_release_cmd->stream_handle));
+ msm_isp_axi_destroy_stream(vfe_dev, stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
return rc;
}
-static int msm_isp_axi_stream_enable_cfg(
- struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info, int32_t dual_vfe_sync)
+void msm_isp_release_all_axi_stream(struct vfe_device *vfe_dev)
{
- int i, vfe_id = 0, enable_wm = 0;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
- struct dual_vfe_resource *dual_vfe_res = NULL;
+ struct msm_vfe_axi_stream_release_cmd
+ stream_release_cmd[VFE_AXI_SRC_MAX];
+ struct msm_vfe_axi_stream_cfg_cmd stream_cfg_cmd;
+ struct msm_vfe_axi_stream *stream_info;
+ int i;
+ int vfe_idx;
+ int num_stream = 0;
+ unsigned long flags;
- if (stream_idx >= VFE_AXI_SRC_MAX) {
- pr_err("%s: Invalid stream_idx", __func__);
- goto error;
+ stream_cfg_cmd.cmd = STOP_STREAM;
+ stream_cfg_cmd.num_streams = 0;
+
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ stream_info = msm_isp_get_stream_common_data(vfe_dev, i);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ vfe_idx = msm_isp_get_vfe_idx_for_stream_user(
+ vfe_dev, stream_info);
+ if (-ENOTTY == vfe_idx) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ stream_release_cmd[num_stream++].stream_handle =
+ stream_info->stream_handle[vfe_idx];
+ if (stream_info->state == INACTIVE) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ stream_cfg_cmd.stream_handle[
+ stream_cfg_cmd.num_streams] =
+ stream_info->stream_handle[vfe_idx];
+ stream_cfg_cmd.num_streams++;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
}
+ if (stream_cfg_cmd.num_streams)
+ msm_isp_cfg_axi_stream(vfe_dev, (void *) &stream_cfg_cmd);
- if (stream_info->state == INACTIVE)
- goto error;
+ for (i = 0; i < num_stream; i++)
+ msm_isp_release_axi_stream(vfe_dev, &stream_release_cmd[i]);
+}
+
+static void msm_isp_axi_stream_enable_cfg(
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int enable_wm = 0;
+ struct vfe_device *vfe_dev;
+ struct msm_vfe_axi_shared_data *axi_data;
+ uint32_t stream_idx = stream_info->stream_src;
+ int k;
+ int i;
+
+ WARN_ON(stream_idx >= VFE_AXI_SRC_MAX);
+
+ WARN_ON(stream_info->state != START_PENDING &&
+ stream_info->state != RESUME_PENDING &&
+ stream_info->state != STOP_PENDING &&
+ stream_info->state != PAUSE_PENDING);
if (stream_info->state == START_PENDING ||
stream_info->state == RESUME_PENDING) {
@@ -1283,50 +1366,24 @@ static int msm_isp_axi_stream_enable_cfg(
} else {
enable_wm = 0;
}
- for (i = 0; i < stream_info->num_planes; i++) {
- /*
- * In case when sensor is streaming, use dual vfe sync mode
- * to enable wm together and avoid split.
- */
- if ((stream_info->stream_src < RDI_INTF_0) &&
- vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1 &&
- dual_vfe_sync) {
- dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
- if (!dual_vfe_res->vfe_base[ISP_VFE0] ||
- !dual_vfe_res->axi_data[ISP_VFE0] ||
- !dual_vfe_res->vfe_base[ISP_VFE1] ||
- !dual_vfe_res->axi_data[ISP_VFE1]) {
- pr_err("%s:%d failed vfe0 %pK %pK vfe %pK %pK\n",
- __func__, __LINE__,
- dual_vfe_res->vfe_base[ISP_VFE0],
- dual_vfe_res->axi_data[ISP_VFE0],
- dual_vfe_res->vfe_base[ISP_VFE1],
- dual_vfe_res->axi_data[ISP_VFE1]);
- goto error;
- }
- for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- enable_wm(dual_vfe_res->vfe_base[vfe_id],
- dual_vfe_res->axi_data[vfe_id]->
- stream_info[stream_idx].wm[i],
- enable_wm);
- }
- } else if (!vfe_dev->is_split ||
- (stream_info->stream_src >= RDI_INTF_0 &&
- stream_info->stream_src <= RDI_INTF_2) ||
- !dual_vfe_sync) {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- enable_wm(vfe_dev->vfe_base, stream_info->wm[i],
- enable_wm);
- }
- if (!enable_wm) {
- /* Issue a reg update for Raw Snapshot Case
+
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ axi_data = &vfe_dev->axi_data;
+ for (i = 0; i < stream_info->num_planes; i++) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.enable_wm(
+ vfe_dev->vfe_base,
+ stream_info->wm[k][i], enable_wm);
+ if (enable_wm)
+ continue;
+ /*
+ * Issue a reg update for Raw Snapshot Case
* since we dont have reg update ack
- */
+ */
if (vfe_dev->axi_data.src_info[VFE_PIX_0].
raw_stream_count > 0
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count == 0) {
+ stream_count == 0) {
if (stream_info->stream_src == CAMIF_RAW ||
stream_info->stream_src == IDEAL_RAW) {
vfe_dev->hw_info->vfe_ops.core_ops.
@@ -1335,70 +1392,103 @@ static int msm_isp_axi_stream_enable_cfg(
}
}
}
+ if (stream_info->state == START_PENDING)
+ axi_data->num_active_stream++;
+ else if (stream_info->state == STOP_PENDING)
+ axi_data->num_active_stream--;
+ }
+}
+
+static void __msm_isp_axi_stream_update(
+ struct msm_vfe_axi_stream *stream_info,
+ struct msm_isp_timestamp *ts)
+{
+ int j;
+ int intf = SRC_TO_INTF(stream_info->stream_src);
+ struct vfe_device *vfe_dev;
+ int k;
+
+ switch (stream_info->state) {
+ case UPDATING:
+ stream_info->state = ACTIVE;
+ break;
+ case STOP_PENDING:
+ msm_isp_axi_stream_enable_cfg(stream_info);
+ stream_info->state = STOPPING;
+ break;
+ case START_PENDING:
+ msm_isp_axi_stream_enable_cfg(stream_info);
+ stream_info->state = STARTING;
+ break;
+ case STOPPING:
+ stream_info->state = INACTIVE;
+ for (k = 0; k < MSM_ISP_COMP_IRQ_MAX; k++)
+ stream_info->composite_irq[k] = 0;
+ complete_all(&stream_info->inactive_comp);
+ break;
+ case STARTING:
+ stream_info->state = ACTIVE;
+ complete_all(&stream_info->active_comp);
+ break;
+ case PAUSING:
+ stream_info->state = PAUSED;
+ msm_isp_reload_ping_pong_offset(stream_info);
+ for (j = 0; j < stream_info->num_planes; j++) {
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_reg(vfe_dev, stream_info, j);
+ }
+ }
+ stream_info->state = RESUME_PENDING;
+ msm_isp_axi_stream_enable_cfg(stream_info);
+ stream_info->state = RESUMING;
+ break;
+ case RESUMING:
+ stream_info->runtime_output_format = stream_info->output_format;
+ stream_info->state = ACTIVE;
+ complete_all(&stream_info->active_comp);
+ for (j = 0; j < stream_info->num_isp; j++) {
+ /* notify that all streams have been updated */
+ msm_isp_notify(stream_info->vfe_dev[j],
+ ISP_EVENT_STREAM_UPDATE_DONE, intf, ts);
+ atomic_set(&stream_info->vfe_dev[j]->
+ axi_data.axi_cfg_update[intf], 0);
+ }
+ stream_info->update_vfe_mask = 0;
+ break;
+ default:
+ break;
}
- if (stream_info->state == START_PENDING)
- axi_data->num_active_stream++;
- else if (stream_info->state == STOP_PENDING)
- axi_data->num_active_stream--;
- return 0;
-error:
- return -EINVAL;
}
void msm_isp_axi_stream_update(struct vfe_device *vfe_dev,
- enum msm_vfe_input_src frame_src)
+ enum msm_vfe_input_src frame_src,
+ struct msm_isp_timestamp *ts)
{
int i;
unsigned long flags;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream *stream_info;
for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
- if (SRC_TO_INTF(axi_data->stream_info[i].stream_src) !=
+ stream_info = msm_isp_get_stream_common_data(vfe_dev, i);
+ if (SRC_TO_INTF(stream_info->stream_src) !=
frame_src) {
ISP_DBG("%s stream_src %d frame_src %d\n", __func__,
SRC_TO_INTF(
- axi_data->stream_info[i].stream_src),
+ stream_info->stream_src),
frame_src);
continue;
}
- if (axi_data->stream_info[i].state == UPDATING)
- axi_data->stream_info[i].state = ACTIVE;
- else if (axi_data->stream_info[i].state == START_PENDING ||
- axi_data->stream_info[i].state == STOP_PENDING) {
- msm_isp_axi_stream_enable_cfg(
- vfe_dev, &axi_data->stream_info[i],
- axi_data->stream_info[i].state ==
- START_PENDING ? 1 : 0);
- axi_data->stream_info[i].state =
- axi_data->stream_info[i].state ==
- START_PENDING ? STARTING : STOPPING;
- } else if (axi_data->stream_info[i].state == STARTING ||
- axi_data->stream_info[i].state == STOPPING) {
- axi_data->stream_info[i].state =
- axi_data->stream_info[i].state == STARTING ?
- ACTIVE : INACTIVE;
- }
- }
-
- spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
- if (vfe_dev->axi_data.stream_update[frame_src]) {
- vfe_dev->axi_data.stream_update[frame_src]--;
- }
- spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
-
- if (vfe_dev->axi_data.pipeline_update == DISABLE_CAMIF ||
- (vfe_dev->axi_data.pipeline_update ==
- DISABLE_CAMIF_IMMEDIATELY)) {
- vfe_dev->hw_info->vfe_ops.stats_ops.
- enable_module(vfe_dev, 0xFF, 0);
- vfe_dev->axi_data.pipeline_update = NO_UPDATE;
+ if (stream_info->state == AVAILABLE)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ __msm_isp_axi_stream_update(stream_info, ts);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
}
-
- if (vfe_dev->axi_data.stream_update[frame_src] == 0)
- complete(&vfe_dev->stream_config_complete);
}
-static void msm_isp_reload_ping_pong_offset(struct vfe_device *vfe_dev,
+static void msm_isp_reload_ping_pong_offset(
struct msm_vfe_axi_stream *stream_info)
{
int i, j;
@@ -1406,120 +1496,70 @@ static void msm_isp_reload_ping_pong_offset(struct vfe_device *vfe_dev,
struct msm_isp_buffer *buf;
int32_t buf_size_byte = 0;
int32_t word_per_line = 0;
+ int k;
+ struct vfe_device *vfe_dev;
+
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ for (i = 0; i < 2; i++) {
+ buf = stream_info->buf[i];
+ if (!buf)
+ continue;
- for (i = 0; i < 2; i++) {
- buf = stream_info->buf[i];
- if (!buf)
- continue;
-
- bit = i ? 0 : 1;
+ bit = i ? 0 : 1;
- for (j = 0; j < stream_info->num_planes; j++) {
- word_per_line = msm_isp_cal_word_per_line(
+ for (j = 0; j < stream_info->num_planes; j++) {
+ word_per_line = msm_isp_cal_word_per_line(
stream_info->output_format, stream_info->
- plane_cfg[j].output_stride);
- if (word_per_line < 0) {
- /* 0 means no prefetch*/
- word_per_line = 0;
- buf_size_byte = 0;
- } else {
- buf_size_byte = (word_per_line * 8 *
- stream_info->plane_cfg[j].
+ plane_cfg[k][j].output_stride);
+ if (word_per_line < 0) {
+ /* 0 means no prefetch*/
+ word_per_line = 0;
+ buf_size_byte = 0;
+ } else {
+ buf_size_byte = (word_per_line * 8 *
+ stream_info->plane_cfg[k][j].
output_scan_lines) - stream_info->
- plane_cfg[j].plane_addr_offset;
- }
-
- vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
- vfe_dev->vfe_base, stream_info->wm[j], bit,
- buf->mapped_info[j].paddr +
- stream_info->plane_cfg[j].plane_addr_offset,
- buf_size_byte);
- }
- }
-}
-
-void msm_isp_axi_cfg_update(struct vfe_device *vfe_dev,
- enum msm_vfe_input_src frame_src)
-{
- int i, j;
- uint32_t update_state;
- unsigned long flags;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- struct msm_vfe_axi_stream *stream_info;
- int num_stream = 0;
+ plane_cfg[k][j].plane_addr_offset;
+ }
- spin_lock_irqsave(&vfe_dev->common_data->common_dev_data_lock, flags);
- for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
- if (SRC_TO_INTF(axi_data->stream_info[i].stream_src) !=
- frame_src) {
- continue;
- }
- num_stream++;
- stream_info = &axi_data->stream_info[i];
- if ((stream_info->stream_type == BURST_STREAM &&
- !stream_info->controllable_output) ||
- stream_info->state == AVAILABLE)
- continue;
- spin_lock_irqsave(&stream_info->lock, flags);
- if (stream_info->state == PAUSING) {
- /*AXI Stopped, apply update*/
- stream_info->state = PAUSED;
- msm_isp_reload_ping_pong_offset(vfe_dev, stream_info);
- for (j = 0; j < stream_info->num_planes; j++)
vfe_dev->hw_info->vfe_ops.axi_ops.
- cfg_wm_reg(vfe_dev, stream_info, j);
- /*Resume AXI*/
- stream_info->state = RESUME_PENDING;
- if (vfe_dev->is_split) {
- msm_isp_update_dual_HW_axi(vfe_dev,
- stream_info);
- } else {
- msm_isp_axi_stream_enable_cfg(
- vfe_dev,
- &axi_data->stream_info[i], 1);
- stream_info->state = RESUMING;
+ update_ping_pong_addr(
+ vfe_dev->vfe_base,
+ stream_info->wm[k][j],
+ bit,
+ buf->mapped_info[j].paddr +
+ stream_info->plane_cfg[k][j].
+ plane_addr_offset,
+ buf_size_byte);
}
- } else if (stream_info->state == RESUMING) {
- stream_info->runtime_output_format =
- stream_info->output_format;
- stream_info->state = ACTIVE;
}
- spin_unlock_irqrestore(&stream_info->lock, flags);
}
- spin_unlock_irqrestore(&vfe_dev->common_data->common_dev_data_lock,
- flags);
- if (num_stream)
- update_state = atomic_dec_return(
- &axi_data->axi_cfg_update[frame_src]);
}
static int msm_isp_update_deliver_count(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_bit)
{
- struct msm_vfe_axi_stream *temp_stream_info;
int rc = 0;
if (!stream_info->controllable_output)
goto done;
- temp_stream_info =
- msm_isp_get_controllable_stream(vfe_dev, stream_info);
-
- if (!temp_stream_info->undelivered_request_cnt) {
+ if (!stream_info->undelivered_request_cnt) {
pr_err_ratelimited("%s:%d error undelivered_request_cnt 0\n",
__func__, __LINE__);
rc = -EINVAL;
goto done;
} else {
- temp_stream_info->undelivered_request_cnt--;
- if (pingpong_bit != temp_stream_info->sw_ping_pong_bit) {
+ stream_info->undelivered_request_cnt--;
+ if (pingpong_bit != stream_info->sw_ping_pong_bit) {
pr_err("%s:%d ping pong bit actual %d sw %d\n",
__func__, __LINE__, pingpong_bit,
- temp_stream_info->sw_ping_pong_bit);
+ stream_info->sw_ping_pong_bit);
rc = -EINVAL;
goto done;
}
- temp_stream_info->sw_ping_pong_bit ^= 1;
+ stream_info->sw_ping_pong_bit ^= 1;
}
done:
return rc;
@@ -1527,7 +1567,6 @@ done:
void msm_isp_halt_send_error(struct vfe_device *vfe_dev, uint32_t event)
{
- uint32_t i = 0;
struct msm_isp_event_data error_event;
struct msm_vfe_axi_halt_cmd halt_cmd;
@@ -1545,10 +1584,6 @@ void msm_isp_halt_send_error(struct vfe_device *vfe_dev, uint32_t event)
/* heavy spin lock in axi halt, avoid spin lock outside. */
msm_isp_axi_halt(vfe_dev, &halt_cmd);
- for (i = 0; i < VFE_AXI_SRC_MAX; i++)
- vfe_dev->axi_data.stream_info[i].state =
- INACTIVE;
-
error_event.frame_id =
vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
@@ -1562,31 +1597,49 @@ int msm_isp_print_ping_pong_address(struct vfe_device *vfe_dev,
struct msm_isp_buffer *buf = NULL;
uint32_t pingpong_bit;
struct msm_vfe_axi_stream *stream_info = NULL;
+ int k;
for (j = 0; j < VFE_AXI_SRC_MAX; j++) {
- stream_info = &vfe_dev->axi_data.stream_info[j];
- if (stream_info->state == INACTIVE)
+ stream_info = msm_isp_get_stream_common_data(vfe_dev, j);
+ if (stream_info->state == INACTIVE ||
+ stream_info->state == AVAILABLE)
continue;
for (pingpong_bit = 0; pingpong_bit < 2; pingpong_bit++) {
+ dma_addr_t temp;
+
+ buf = stream_info->buf[pingpong_bit];
+ if (buf == NULL) {
+ pr_err("%s: buf NULL for stream %x num_isp %d\n",
+ __func__,
+ stream_info->stream_src,
+ stream_info->num_isp);
+ continue;
+ }
+ temp = buf->mapped_info[0].paddr +
+ buf->mapped_info[0].len;
+ pr_err("%s: stream %x ping bit %d uses buffer %pa-%pa, num_isp %d\n",
+ __func__, stream_info->stream_src,
+ pingpong_bit,
+ &buf->mapped_info[0].paddr, &temp,
+ stream_info->num_isp);
+
for (i = 0; i < stream_info->num_planes; i++) {
- buf = stream_info->buf[pingpong_bit];
- if (buf == NULL) {
- pr_err("%s: buf NULL\n", __func__);
- continue;
- }
- pr_debug("%s: stream_id %x ping-pong %d plane %d start_addr %lu addr_offset %x len %zx stride %d scanline %d\n"
+ for (k = 0; k < stream_info->num_isp; k++) {
+ pr_debug(
+ "%s: stream_id %x ping-pong %d plane %d start_addr %lu addr_offset %x len %zx stride %d scanline %d\n"
, __func__, stream_info->stream_id,
pingpong_bit, i, (unsigned long)
buf->mapped_info[i].paddr,
stream_info->
- plane_cfg[i].plane_addr_offset,
+ plane_cfg[k][i].plane_addr_offset,
buf->mapped_info[i].len,
stream_info->
- plane_cfg[i].output_stride,
+ plane_cfg[k][i].output_stride,
stream_info->
- plane_cfg[i].output_scan_lines
+ plane_cfg[k][i].output_scan_lines
);
+ }
}
}
}
@@ -1601,36 +1654,35 @@ static struct msm_isp_buffer *msm_isp_get_stream_buffer(
int rc = 0;
uint32_t bufq_handle = 0;
struct msm_isp_buffer *buf = NULL;
- struct msm_vfe_axi_stream *temp_stream_info = NULL;
struct msm_vfe_frame_request_queue *queue_req;
+ uint32_t buf_index = MSM_ISP_INVALID_BUF_INDEX;
if (!stream_info->controllable_output) {
bufq_handle = stream_info->bufq_handle
[VFE_BUF_QUEUE_DEFAULT];
} else {
- temp_stream_info = msm_isp_get_controllable_stream(
- vfe_dev, stream_info);
queue_req = list_first_entry_or_null(
- &temp_stream_info->request_q,
+ &stream_info->request_q,
struct msm_vfe_frame_request_queue, list);
if (!queue_req)
return buf;
- bufq_handle = temp_stream_info->
+ bufq_handle = stream_info->
bufq_handle[queue_req->buff_queue_id];
if (!bufq_handle ||
- temp_stream_info->request_q_cnt <= 0) {
+ stream_info->request_q_cnt <= 0) {
pr_err_ratelimited("%s: Drop request. Shared stream is stopped.\n",
__func__);
return buf;
}
+ buf_index = queue_req->buf_index;
queue_req->cmd_used = 0;
list_del(&queue_req->list);
- temp_stream_info->request_q_cnt--;
+ stream_info->request_q_cnt--;
}
rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
- vfe_dev->pdev->id, bufq_handle, &buf);
+ vfe_dev->pdev->id, bufq_handle, buf_index, &buf);
if (rc == -EFAULT) {
msm_isp_halt_send_error(vfe_dev,
@@ -1649,144 +1701,74 @@ static struct msm_isp_buffer *msm_isp_get_stream_buffer(
return buf;
}
-static int msm_isp_cfg_ping_pong_address(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status,
- int scratch)
+static int msm_isp_cfg_ping_pong_address(
+ struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status)
{
int i;
- struct msm_isp_buffer *buf = NULL;
+ int j;
uint32_t pingpong_bit;
- uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ struct vfe_device *vfe_dev = stream_info->vfe_dev[0];
uint32_t buffer_size_byte = 0;
int32_t word_per_line = 0;
dma_addr_t paddr;
- struct dual_vfe_resource *dual_vfe_res = NULL;
- uint32_t vfe_id = 0;
- unsigned long flags;
+ struct msm_isp_buffer *buf = NULL;
- if (stream_idx >= VFE_AXI_SRC_MAX) {
- pr_err("%s: Invalid stream_idx", __func__);
- return -EINVAL;
- }
- /* make sure that streams are in right state */
- if ((stream_info->stream_src < RDI_INTF_0) &&
- vfe_dev->is_split) {
- dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
- if (!dual_vfe_res->vfe_base[ISP_VFE0] ||
- !dual_vfe_res->axi_data[ISP_VFE0] ||
- !dual_vfe_res->vfe_base[ISP_VFE1] ||
- !dual_vfe_res->axi_data[ISP_VFE1]) {
- pr_err("%s:%d failed vfe0 %pK %pK vfe %pK %pK\n",
- __func__, __LINE__,
- dual_vfe_res->vfe_base[ISP_VFE0],
- dual_vfe_res->axi_data[ISP_VFE0],
- dual_vfe_res->vfe_base[ISP_VFE1],
- dual_vfe_res->axi_data[ISP_VFE1]);
- return -EINVAL;
- }
- } else if (!vfe_dev->is_split ||
- (stream_info->stream_src >= RDI_INTF_0 &&
- stream_info->stream_src <= RDI_INTF_2)) {
- dual_vfe_res = NULL;
- } else {
- pr_err("%s: Error! Should not reach this case is_split %d stream_src %d\n",
- __func__, vfe_dev->is_split, stream_info->stream_src);
- msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
+ /* Isolate pingpong_bit from pingpong_status */
+ pingpong_bit = ((pingpong_status >>
+ stream_info->wm[0][0]) & 0x1);
+
+ /* return if buffer already present */
+ if (stream_info->buf[!pingpong_bit]) {
+ pr_err("stream %x buffer already set for pingpong %d\n",
+ stream_info->stream_src, pingpong_bit);
return 0;
}
- if (!scratch)
- buf = msm_isp_get_stream_buffer(vfe_dev, stream_info);
+ buf = msm_isp_get_stream_buffer(vfe_dev, stream_info);
- /* Isolate pingpong_bit from pingpong_status */
- pingpong_bit = ((pingpong_status >>
- stream_info->wm[0]) & 0x1);
+ if (!buf) {
+ msm_isp_cfg_stream_scratch(stream_info, pingpong_status);
+ return 0;
+ }
for (i = 0; i < stream_info->num_planes; i++) {
- if (buf) {
- word_per_line = msm_isp_cal_word_per_line(
- stream_info->output_format, stream_info->
- plane_cfg[i].output_stride);
+ paddr = buf->mapped_info[i].paddr;
+ ISP_DBG(
+ "%s: vfe %d config buf %d to pingpong %d stream %x\n",
+ __func__, vfe_dev->pdev->id,
+ buf->buf_idx, !pingpong_bit,
+ stream_info->stream_id);
+ for (j = 0; j < stream_info->num_isp; j++) {
+ vfe_dev = stream_info->vfe_dev[j];
+ word_per_line =
+ msm_isp_cal_word_per_line(
+ stream_info->output_format,
+ stream_info->plane_cfg[j][i].output_stride);
if (word_per_line < 0) {
/* 0 means no prefetch*/
word_per_line = 0;
buffer_size_byte = 0;
} else {
- buffer_size_byte = (word_per_line * 8 *
- stream_info->plane_cfg[i].
- output_scan_lines) - stream_info->
- plane_cfg[i].plane_addr_offset;
+ buffer_size_byte =
+ (word_per_line * 8 *
+ stream_info->plane_cfg[j][i].
+ output_scan_lines) -
+ stream_info->plane_cfg[j][i].
+ plane_addr_offset;
}
-
- paddr = buf->mapped_info[i].paddr;
- ISP_DBG(
- "%s: vfe %d config buf %d to pingpong %d stream %x\n",
- __func__, vfe_dev->pdev->id,
- buf->buf_idx, !pingpong_bit,
- stream_info->stream_id);
- }
-
- if (dual_vfe_res) {
- for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
- if (vfe_id != vfe_dev->pdev->id)
- spin_lock_irqsave(
- &dual_vfe_res->
- axi_data[vfe_id]->
- stream_info[stream_idx].
- lock, flags);
-
- if (buf)
- vfe_dev->hw_info->vfe_ops.axi_ops.
- update_ping_pong_addr(
- dual_vfe_res->vfe_base[vfe_id],
- dual_vfe_res->axi_data[vfe_id]->
- stream_info[stream_idx].wm[i],
- pingpong_bit, paddr +
- dual_vfe_res->axi_data[vfe_id]->
- stream_info[stream_idx].
- plane_cfg[i].plane_addr_offset,
- buffer_size_byte);
- else
- msm_isp_cfg_stream_scratch(
- dual_vfe_res->vfe_dev[vfe_id],
- &(dual_vfe_res->axi_data
- [vfe_id]->
- stream_info[stream_idx]),
- pingpong_status);
-
- if (i == 0) {
- dual_vfe_res->axi_data[vfe_id]->
- stream_info[stream_idx].
- buf[!pingpong_bit] =
- buf;
- }
- if (vfe_id != vfe_dev->pdev->id)
- spin_unlock_irqrestore(
- &dual_vfe_res->
- axi_data[vfe_id]->
- stream_info[stream_idx].
- lock, flags);
- }
- } else {
- if (buf)
- vfe_dev->hw_info->vfe_ops.axi_ops.
+ vfe_dev->hw_info->vfe_ops.axi_ops.
update_ping_pong_addr(
- vfe_dev->vfe_base, stream_info->wm[i],
+ vfe_dev->vfe_base,
+ stream_info->wm[j][i],
pingpong_bit, paddr +
- stream_info->plane_cfg[i].
- plane_addr_offset,
+ stream_info->plane_cfg[j][i].
+ plane_addr_offset,
buffer_size_byte);
- else
- msm_isp_cfg_stream_scratch(vfe_dev,
- stream_info, pingpong_status);
- if (0 == i)
- stream_info->buf[!pingpong_bit] = buf;
}
- if (0 == i && buf)
- buf->pingpong_bit = !pingpong_bit;
}
-
+ stream_info->buf[!pingpong_bit] = buf;
+ buf->pingpong_bit = !pingpong_bit;
return 0;
}
@@ -1803,10 +1785,14 @@ static void msm_isp_handle_done_buf_frame_id_mismatch(
vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
error_event.u.error_info.err_type =
ISP_ERROR_FRAME_ID_MISMATCH;
- ret = vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
- buf->bufq_handle, buf->buf_idx, time_stamp,
- frame_id,
- stream_info->runtime_output_format);
+ if (stream_info->buf_divert)
+ vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ else
+ ret = vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx, time_stamp,
+ frame_id,
+ stream_info->runtime_output_format);
if (ret == -EFAULT) {
msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
return;
@@ -1825,7 +1811,7 @@ static int msm_isp_process_done_buf(struct vfe_device *vfe_dev,
int rc;
unsigned long flags;
struct msm_isp_event_data buf_event;
- uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ uint32_t stream_idx = stream_info->stream_src;
uint32_t buf_src;
uint8_t drop_frame = 0;
struct msm_isp_bufq *bufq = NULL;
@@ -1875,11 +1861,16 @@ static int msm_isp_process_done_buf(struct vfe_device *vfe_dev,
buf->buf_debug.put_state_last] =
MSM_ISP_BUFFER_STATE_DROP_SKIP;
buf->buf_debug.put_state_last ^= 1;
- rc = vfe_dev->buf_mgr->ops->buf_done(
- vfe_dev->buf_mgr,
- buf->bufq_handle, buf->buf_idx,
- time_stamp, frame_id,
- stream_info->runtime_output_format);
+ if (stream_info->buf_divert)
+ vfe_dev->buf_mgr->ops->put_buf(
+ vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ else
+ rc = vfe_dev->buf_mgr->ops->buf_done(
+ vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx,
+ time_stamp, frame_id,
+ stream_info->runtime_output_format);
if (rc == -EFAULT) {
msm_isp_halt_send_error(vfe_dev,
@@ -1918,6 +1909,11 @@ static int msm_isp_process_done_buf(struct vfe_device *vfe_dev,
return -EINVAL;
}
+ /* divert native buffers */
+ vfe_dev->buf_mgr->ops->buf_divert(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx, time_stamp,
+ frame_id);
+
if ((bufq != NULL) && bufq->buf_type == ISP_SHARE_BUF)
msm_isp_send_event(vfe_dev->common_data->
dual_vfe_res->vfe_dev[ISP_VFE1],
@@ -1957,6 +1953,7 @@ int msm_isp_drop_frame(struct vfe_device *vfe_dev,
unsigned long flags;
struct msm_isp_bufq *bufq = NULL;
uint32_t pingpong_bit;
+ int vfe_idx;
if (!vfe_dev || !stream_info || !ts || !sof_info) {
pr_err("%s %d vfe_dev %pK stream_info %pK ts %pK op_info %pK\n",
@@ -1964,11 +1961,14 @@ int msm_isp_drop_frame(struct vfe_device *vfe_dev,
sof_info);
return -EINVAL;
}
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
pingpong_status =
~vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
spin_lock_irqsave(&stream_info->lock, flags);
- pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+ pingpong_bit =
+ (~(pingpong_status >> stream_info->wm[vfe_idx][0]) & 0x1);
done_buf = stream_info->buf[pingpong_bit];
if (done_buf) {
bufq = vfe_dev->buf_mgr->ops->get_bufq(vfe_dev->buf_mgr,
@@ -1994,93 +1994,149 @@ int msm_isp_drop_frame(struct vfe_device *vfe_dev,
return 0;
}
-static void msm_isp_get_camif_update_state_and_halt(
- struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
- enum msm_isp_camif_update_state *camif_update,
- int *halt)
+/**
+ * msm_isp_input_disable() - Disable the input for given vfe
+ * @vfe_dev: The vfe device whose input is to be disabled
+ *
+ * Returns - void
+ *
+ * If stream count on an input line is 0 then disable the input
+ */
+static void msm_isp_input_disable(struct vfe_device *vfe_dev)
{
- int i;
- struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint8_t pix_stream_cnt = 0, cur_pix_stream_cnt;
- cur_pix_stream_cnt =
- axi_data->src_info[VFE_PIX_0].pix_stream_count +
- axi_data->src_info[VFE_PIX_0].raw_stream_count;
- for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- stream_info =
- &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
- if (stream_info->stream_src < RDI_INTF_0)
- pix_stream_cnt++;
- }
+ int ext_read =
+ (axi_data->src_info[VFE_PIX_0].input_mux == EXTERNAL_READ);
+ int stream_count;
+ int total_stream_count = 0;
+ int i;
- if (vfe_dev->axi_data.num_active_stream == stream_cfg_cmd->num_streams
- && (stream_cfg_cmd->cmd == STOP_STREAM ||
- stream_cfg_cmd->cmd == STOP_IMMEDIATELY))
- *halt = 1;
- else
- *halt = 0;
-
- if ((pix_stream_cnt) &&
- (axi_data->src_info[VFE_PIX_0].input_mux != EXTERNAL_READ)) {
- if (cur_pix_stream_cnt == 0 && pix_stream_cnt &&
- stream_cfg_cmd->cmd == START_STREAM)
- *camif_update = ENABLE_CAMIF;
- else if (cur_pix_stream_cnt &&
- (cur_pix_stream_cnt - pix_stream_cnt) == 0 &&
- (stream_cfg_cmd->cmd == STOP_STREAM ||
- stream_cfg_cmd->cmd == STOP_IMMEDIATELY)) {
- if (*halt)
- *camif_update = DISABLE_CAMIF_IMMEDIATELY;
- else
- *camif_update = DISABLE_CAMIF;
- }
+ for (i = 0; i < VFE_SRC_MAX; i++)
+ total_stream_count += axi_data->src_info[i].stream_count +
+ axi_data->src_info[i].raw_stream_count;
+
+ for (i = 0; i < VFE_SRC_MAX; i++) {
+ stream_count = axi_data->src_info[i].stream_count +
+ axi_data->src_info[i].raw_stream_count;
+ if (stream_count)
+ continue;
+ if (axi_data->src_info[i].active == 0)
+ continue;
+ /* deactivate the input line */
+ axi_data->src_info[i].active = 0;
+
+ if (i != VFE_PIX_0 || ext_read)
+ continue;
+ /* halt camif */
+ if (total_stream_count == 0)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev,
+ DISABLE_CAMIF_IMMEDIATELY);
else
- *camif_update = NO_UPDATE;
- } else
- *camif_update = NO_UPDATE;
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, DISABLE_CAMIF);
+ }
+
+ /* halt and reset hardware if all streams are disabled */
+ if (total_stream_count == 0) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
+ vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 1);
+ vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
+
+ }
}
-static void msm_isp_update_camif_output_count(
- struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+/**
+ * msm_isp_input_enable() - Enable the input for given vfe
+ * @vfe_dev: The vfe device whose input is to be enabled
+ *
+ * Returns - void
+ *
+ * Enable inout line if it is not enabled
+ */
+static void msm_isp_input_enable(struct vfe_device *vfe_dev,
+ int sync_frame_id_src)
{
- int i;
- struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int ext_read =
+ (axi_data->src_info[VFE_PIX_0].input_mux == EXTERNAL_READ);
+ int stream_count;
+ int i;
- if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
- return;
-
- for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
- VFE_AXI_SRC_MAX) {
- return;
+ for (i = 0; i < VFE_SRC_MAX; i++) {
+ stream_count = axi_data->src_info[i].stream_count +
+ axi_data->src_info[i].raw_stream_count;
+ if (stream_count == 0)
+ continue;
+ if (axi_data->src_info[i].active)
+ continue;
+ /* activate the input since it is deactivated */
+ axi_data->src_info[i].frame_id = 0;
+ axi_data->src_info[i].active = 1;
+ if (i >= VFE_RAW_0 && sync_frame_id_src) {
+ /*
+ * Incase PIX and RDI streams are part
+ * of same session, this will ensure
+ * RDI stream will have same frame id
+ * as of PIX stream
+ */
+ axi_data->src_info[i].frame_id =
+ axi_data->src_info[VFE_PIX_0].frame_id;
}
- stream_info =
- &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
- if (stream_info->stream_src >= RDI_INTF_0)
+ if (i != VFE_PIX_0 || ext_read)
continue;
- if (stream_info->stream_src == PIX_ENCODER ||
- stream_info->stream_src == PIX_VIEWFINDER ||
- stream_info->stream_src == PIX_VIDEO ||
- stream_info->stream_src == IDEAL_RAW) {
- if (stream_cfg_cmd->cmd == START_STREAM)
- vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count++;
+ /* for camif input the camif needs enabling */
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, ENABLE_CAMIF);
+ }
+}
+
+/**
+ * msm_isp_update_intf_stream_cnt() - Update the stream count in axi interface
+ * @stream_info: The stream that is either being enabled/disabled
+ * @enable: 0 means stream is being disabled, else enabled
+ *
+ * Returns - void
+ */
+static void msm_isp_update_intf_stream_cnt(
+ struct msm_vfe_axi_stream *stream_info,
+ int enable)
+{
+ int i;
+
+ switch (stream_info->stream_src) {
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER:
+ case PIX_VIDEO:
+ case IDEAL_RAW:
+ case RDI_INTF_0:
+ case RDI_INTF_1:
+ case RDI_INTF_2:
+ for (i = 0; i < stream_info->num_isp; i++) {
+ if (enable)
+ stream_info->vfe_dev[i]->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].
+ stream_count++;
else
- vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count--;
- } else if (stream_info->stream_src == CAMIF_RAW) {
- if (stream_cfg_cmd->cmd == START_STREAM)
- vfe_dev->axi_data.src_info[VFE_PIX_0].
+ stream_info->vfe_dev[i]->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].
+ stream_count--;
+ }
+ break;
+ case CAMIF_RAW:
+ for (i = 0; i < stream_info->num_isp; i++) {
+ if (enable)
+ stream_info->vfe_dev[i]->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].
raw_stream_count++;
else
- vfe_dev->axi_data.src_info[VFE_PIX_0].
+ stream_info->vfe_dev[i]->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].
raw_stream_count--;
}
+ break;
+ default:
+ WARN(1, "Invalid steam src %d\n", stream_info->stream_src);
}
}
@@ -2091,20 +2147,24 @@ static int msm_isp_update_stream_bandwidth(struct vfe_device *vfe_dev)
{
int i, rc = 0;
struct msm_vfe_axi_stream *stream_info;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
uint64_t total_pix_bandwidth = 0, total_rdi_bandwidth = 0;
uint32_t num_pix_streams = 0;
uint64_t total_bandwidth = 0;
+ int vfe_idx;
for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
- stream_info = &axi_data->stream_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev, i);
if (stream_info->state == ACTIVE ||
stream_info->state == START_PENDING) {
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev,
+ stream_info);
if (stream_info->stream_src < RDI_INTF_0) {
- total_pix_bandwidth += stream_info->bandwidth;
+ total_pix_bandwidth +=
+ stream_info->bandwidth[vfe_idx];
num_pix_streams++;
} else {
- total_rdi_bandwidth += stream_info->bandwidth;
+ total_rdi_bandwidth +=
+ stream_info->bandwidth[vfe_idx];
}
}
}
@@ -2119,121 +2179,88 @@ static int msm_isp_update_stream_bandwidth(struct vfe_device *vfe_dev)
return rc;
}
-static int msm_isp_axi_wait_for_cfg_done(struct vfe_device *vfe_dev,
- enum msm_isp_camif_update_state camif_update,
- uint32_t src_mask, int regUpdateCnt)
-{
- int rc;
- unsigned long flags;
- enum msm_vfe_input_src i = 0;
- spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
-
- for (i = 0; i < VFE_SRC_MAX; i++) {
- if (src_mask & (1 << i)) {
- if (vfe_dev->axi_data.stream_update[i] > 0) {
- pr_err("%s:Stream Update in progress. cnt %d\n",
- __func__,
- vfe_dev->axi_data.stream_update[i]);
- spin_unlock_irqrestore(
- &vfe_dev->shared_data_lock, flags);
- return -EINVAL;
- }
- vfe_dev->axi_data.stream_update[i] = regUpdateCnt;
- }
- }
- if (src_mask) {
- init_completion(&vfe_dev->stream_config_complete);
- vfe_dev->axi_data.pipeline_update = camif_update;
- }
- spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
- rc = wait_for_completion_timeout(
- &vfe_dev->stream_config_complete,
- msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
- if (rc == 0) {
- for (i = 0; i < VFE_SRC_MAX; i++) {
- if (src_mask & (1 << i)) {
- spin_lock_irqsave(&vfe_dev->shared_data_lock,
- flags);
- vfe_dev->axi_data.stream_update[i] = 0;
- spin_unlock_irqrestore(&vfe_dev->
- shared_data_lock, flags);
- }
- }
- pr_err("%s: wait timeout\n", __func__);
- rc = -EBUSY;
- } else {
- rc = 0;
- }
- return rc;
-}
-
static int msm_isp_init_stream_ping_pong_reg(
- struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
int rc = 0;
- if ((vfe_dev->is_split && vfe_dev->pdev->id == 1 &&
- stream_info->stream_src < RDI_INTF_0) ||
- !vfe_dev->is_split || stream_info->stream_src >= RDI_INTF_0) {
- /* Set address for both PING & PONG register */
- rc = msm_isp_cfg_ping_pong_address(vfe_dev,
- stream_info, VFE_PING_FLAG, 0);
- if (rc < 0) {
- pr_err("%s: No free buffer for ping\n",
- __func__);
- return rc;
- }
-
- if (stream_info->stream_type != BURST_STREAM ||
- stream_info->runtime_num_burst_capture > 1)
- rc = msm_isp_cfg_ping_pong_address(vfe_dev,
- stream_info, VFE_PONG_FLAG, 0);
+ /* Set address for both PING & PO NG register */
+ rc = msm_isp_cfg_ping_pong_address(
+ stream_info, VFE_PING_FLAG);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for ping\n",
+ __func__);
+ return rc;
+ }
+ if (stream_info->stream_type != BURST_STREAM ||
+ stream_info->runtime_num_burst_capture > 1)
+ rc = msm_isp_cfg_ping_pong_address(
+ stream_info, VFE_PONG_FLAG);
- if (rc < 0) {
- pr_err("%s: No free buffer for pong\n",
- __func__);
- return rc;
- }
+ if (rc < 0) {
+ pr_err("%s: No free buffer for pong\n",
+ __func__);
+ return rc;
}
return rc;
}
static void msm_isp_get_stream_wm_mask(
+ struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info,
uint32_t *wm_reload_mask)
{
int i;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
for (i = 0; i < stream_info->num_planes; i++)
- *wm_reload_mask |= (1 << stream_info->wm[i]);
+ *wm_reload_mask |= (1 << stream_info->wm[vfe_idx][i]);
}
int msm_isp_axi_halt(struct vfe_device *vfe_dev,
struct msm_vfe_axi_halt_cmd *halt_cmd)
{
int rc = 0;
+ int i;
+ struct vfe_device *halt_vfes[MAX_VFE] = { NULL, NULL };
- if (atomic_read(&vfe_dev->error_info.overflow_state) ==
- OVERFLOW_DETECTED) {
- ISP_DBG("%s: VFE%d already halted, direct return\n",
- __func__, vfe_dev->pdev->id);
- return rc;
- }
+ if (vfe_dev->is_split)
+ for (i = 0; i < MAX_VFE; i++)
+ halt_vfes[i] = vfe_dev->common_data->
+ dual_vfe_res->vfe_dev[i];
+ else
+ halt_vfes[vfe_dev->pdev->id] = vfe_dev;
- if (halt_cmd->overflow_detected) {
- atomic_cmpxchg(&vfe_dev->error_info.overflow_state,
- NO_OVERFLOW, OVERFLOW_DETECTED);
- pr_err("%s: VFE%d Bus overflow detected: start recovery!\n",
- __func__, vfe_dev->pdev->id);
- }
+ for (i = 0; i < MAX_VFE; i++) {
+ vfe_dev = halt_vfes[i];
+ if (!vfe_dev)
+ continue;
+ if (atomic_read(&vfe_dev->error_info.overflow_state) ==
+ OVERFLOW_DETECTED) {
+ ISP_DBG("%s: VFE%d already halted, direct return\n",
+ __func__, vfe_dev->pdev->id);
+ continue;
+ }
- if (halt_cmd->stop_camif) {
- vfe_dev->hw_info->vfe_ops.core_ops.
- update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
+ if (halt_cmd->overflow_detected) {
+ atomic_cmpxchg(&vfe_dev->error_info.overflow_state,
+ NO_OVERFLOW, OVERFLOW_DETECTED);
+ pr_err("%s: VFE%d Bus overflow detected: start recovery!\n",
+ __func__, vfe_dev->pdev->id);
+ }
+
+ if (halt_cmd->stop_camif) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev,
+ DISABLE_CAMIF_IMMEDIATELY);
+ }
+ rc |= vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev,
+ halt_cmd->blocking_halt);
+
+ /* take care of pending items in tasklet after halt */
+ msm_isp_flush_tasklet(vfe_dev);
}
- rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev,
- halt_cmd->blocking_halt);
return rc;
}
@@ -2241,12 +2268,13 @@ int msm_isp_axi_halt(struct vfe_device *vfe_dev,
int msm_isp_axi_reset(struct vfe_device *vfe_dev,
struct msm_vfe_axi_reset_cmd *reset_cmd)
{
- int rc = 0, i, j;
+ int rc = 0, i, k;
struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
uint32_t bufq_handle = 0, bufq_id = 0;
struct msm_isp_timestamp timestamp;
unsigned long flags;
+ struct vfe_device *update_vfes[MAX_VFE] = {0, 0};
if (!reset_cmd) {
pr_err("%s: NULL pointer reset cmd %pK\n", __func__, reset_cmd);
@@ -2256,49 +2284,74 @@ int msm_isp_axi_reset(struct vfe_device *vfe_dev,
rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
0, reset_cmd->blocking);
+ if (vfe_dev->is_split) {
+ for (i = 0; i < MAX_VFE; i++)
+ update_vfes[i] = vfe_dev->common_data->dual_vfe_res->
+ vfe_dev[i];
+ } else {
+ update_vfes[vfe_dev->pdev->id] = vfe_dev;
+ }
msm_isp_get_timestamp(&timestamp);
- for (i = 0, j = 0; j < axi_data->num_active_stream &&
- i < VFE_AXI_SRC_MAX; i++, j++) {
- stream_info = &axi_data->stream_info[i];
- if (stream_info->stream_src >= VFE_AXI_SRC_MAX) {
- rc = -1;
- pr_err("%s invalid stream src = %d\n", __func__,
- stream_info->stream_src);
- break;
- }
- if (stream_info->state != ACTIVE) {
- j--;
+ for (k = 0; k < MAX_VFE; k++) {
+ vfe_dev = update_vfes[k];
+ if (!vfe_dev)
continue;
- }
+ rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
+ 0, reset_cmd->blocking);
- for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX; bufq_id++) {
- bufq_handle = stream_info->bufq_handle[bufq_id];
- if (!bufq_handle)
- continue;
- /* set ping pong address to scratch before flush */
- spin_lock_irqsave(&stream_info->lock, flags);
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
- VFE_PING_FLAG);
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
- VFE_PONG_FLAG);
- spin_unlock_irqrestore(&stream_info->lock, flags);
- rc = vfe_dev->buf_mgr->ops->flush_buf(
- vfe_dev->buf_mgr, vfe_dev->pdev->id,
- bufq_handle, MSM_ISP_BUFFER_FLUSH_ALL,
- &timestamp.buf_time, reset_cmd->frame_id);
- if (rc == -EFAULT) {
- msm_isp_halt_send_error(vfe_dev,
- ISP_EVENT_BUF_FATAL_ERROR);
- return rc;
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ stream_info = msm_isp_get_stream_common_data(
+ vfe_dev, i);
+ if (stream_info->stream_src >= VFE_AXI_SRC_MAX) {
+ rc = -1;
+ pr_err("%s invalid stream src = %d\n",
+ __func__,
+ stream_info->stream_src);
+ break;
}
+ if (stream_info->state == AVAILABLE ||
+ stream_info->state == INACTIVE)
+ continue;
- axi_data->src_info[SRC_TO_INTF(stream_info->
- stream_src)].frame_id = reset_cmd->frame_id;
- msm_isp_reset_burst_count_and_frame_drop(vfe_dev,
- stream_info);
+ /* handle dual stream on ISP_VFE1 turn */
+ if (stream_info->num_isp > 1 &&
+ vfe_dev->pdev->id == ISP_VFE0)
+ continue;
+
+ for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX;
+ bufq_id++) {
+ bufq_handle = stream_info->bufq_handle[bufq_id];
+ if (!bufq_handle)
+ continue;
+
+ /* set ping pong to scratch before flush */
+ spin_lock_irqsave(&stream_info->lock, flags);
+ msm_isp_cfg_stream_scratch(stream_info,
+ VFE_PING_FLAG);
+ msm_isp_cfg_stream_scratch(stream_info,
+ VFE_PONG_FLAG);
+ spin_unlock_irqrestore(&stream_info->lock,
+ flags);
+ rc = vfe_dev->buf_mgr->ops->flush_buf(
+ vfe_dev->buf_mgr,
+ bufq_handle, MSM_ISP_BUFFER_FLUSH_ALL,
+ &timestamp.buf_time,
+ reset_cmd->frame_id);
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
+
+ axi_data->src_info[SRC_TO_INTF(stream_info->
+ stream_src)].frame_id =
+ reset_cmd->frame_id;
+ msm_isp_reset_burst_count_and_frame_drop(
+ vfe_dev, stream_info);
+ }
}
}
@@ -2308,46 +2361,67 @@ int msm_isp_axi_reset(struct vfe_device *vfe_dev,
return rc;
}
-int msm_isp_axi_restart(struct vfe_device *vfe_dev,
+int msm_isp_axi_restart(struct vfe_device *vfe_dev_ioctl,
struct msm_vfe_axi_restart_cmd *restart_cmd)
{
- int rc = 0, i, j;
+ int rc = 0, i, k, j;
struct msm_vfe_axi_stream *stream_info;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint32_t wm_reload_mask = 0x0;
+ uint32_t wm_reload_mask = 0;
unsigned long flags;
+ struct vfe_device *update_vfes[MAX_VFE] = {0, 0};
+ struct vfe_device *vfe_dev;
+
+ if (vfe_dev_ioctl->is_split) {
+ for (i = 0; i < MAX_VFE; i++)
+ update_vfes[i] = vfe_dev_ioctl->common_data->
+ dual_vfe_res->vfe_dev[i];
+ } else {
+ update_vfes[vfe_dev_ioctl->pdev->id] = vfe_dev_ioctl;
+ }
- vfe_dev->buf_mgr->frameId_mismatch_recovery = 0;
- for (i = 0, j = 0; j < axi_data->num_active_stream &&
- i < VFE_AXI_SRC_MAX; i++, j++) {
- stream_info = &axi_data->stream_info[i];
- if (stream_info->state != ACTIVE) {
- j--;
+ vfe_dev_ioctl->buf_mgr->frameId_mismatch_recovery = 0;
+ for (k = 0; k < MAX_VFE; k++) {
+ vfe_dev = update_vfes[k];
+ if (!vfe_dev)
continue;
+ vfe_dev->buf_mgr->frameId_mismatch_recovery = 0;
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ stream_info = msm_isp_get_stream_common_data(
+ vfe_dev, i);
+ if (stream_info->state == AVAILABLE ||
+ stream_info->state == INACTIVE)
+ continue;
+ msm_isp_get_stream_wm_mask(vfe_dev, stream_info,
+ &wm_reload_mask);
+ /* handle dual stream on ISP_VFE1 turn */
+ if (stream_info->num_isp > 1 &&
+ vfe_dev->pdev->id == ISP_VFE0)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ for (j = 0; j < MSM_ISP_COMP_IRQ_MAX; j++)
+ stream_info->composite_irq[j] = 0;
+ msm_isp_init_stream_ping_pong_reg(stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
}
- msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
- spin_lock_irqsave(&stream_info->lock, flags);
- msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
- spin_unlock_irqrestore(&stream_info->lock, flags);
- }
- vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
- vfe_dev->vfe_base, wm_reload_mask);
- rc = vfe_dev->hw_info->vfe_ops.axi_ops.restart(vfe_dev, 0,
- restart_cmd->enable_camif);
- if (rc < 0)
- pr_err("%s Error restarting HW\n", __func__);
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ vfe_dev->vfe_base, wm_reload_mask);
+ vfe_dev->hw_info->vfe_ops.axi_ops.restart(vfe_dev, 0,
+ restart_cmd->enable_camif);
+ }
return rc;
}
-static int msm_isp_axi_update_cgc_override(struct vfe_device *vfe_dev,
+static int msm_isp_axi_update_cgc_override(struct vfe_device *vfe_dev_ioctl,
struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
uint8_t cgc_override)
{
int i = 0, j = 0;
struct msm_vfe_axi_stream *stream_info;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int k;
+ struct vfe_device *vfe_dev;
+ int vfe_idx;
if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
return -EINVAL;
@@ -2357,14 +2431,21 @@ static int msm_isp_axi_update_cgc_override(struct vfe_device *vfe_dev,
VFE_AXI_SRC_MAX) {
return -EINVAL;
}
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl,
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
for (j = 0; j < stream_info->num_planes; j++) {
- if (vfe_dev->hw_info->vfe_ops.axi_ops.
- update_cgc_override)
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ if (!vfe_dev->hw_info->vfe_ops.axi_ops.
+ update_cgc_override)
+ continue;
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(
+ vfe_dev, stream_info);
vfe_dev->hw_info->vfe_ops.axi_ops.
update_cgc_override(vfe_dev,
- stream_info->wm[j], cgc_override);
+ stream_info->wm[vfe_idx][j],
+ cgc_override);
+ }
}
}
return 0;
@@ -2456,8 +2537,7 @@ static int msm_isp_update_dual_HW_ms_info_at_start(
static int msm_isp_update_dual_HW_ms_info_at_stop(
struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
- enum msm_isp_camif_update_state camif_update)
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
{
int i, rc = 0;
uint8_t slave_id;
@@ -2476,13 +2556,13 @@ static int msm_isp_update_dual_HW_ms_info_at_stop(
VFE_AXI_SRC_MAX) {
return -EINVAL;
}
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
stream_src = SRC_TO_INTF(stream_info->stream_src);
/* Remove PIX if DISABLE CAMIF */
- if (stream_src == VFE_PIX_0 && !((camif_update == DISABLE_CAMIF)
- || (camif_update == DISABLE_CAMIF_IMMEDIATELY)))
+ if (stream_src == VFE_PIX_0 &&
+ axi_data->src_info[VFE_PIX_0].active)
continue;
src_info = &axi_data->src_info[stream_src];
@@ -2517,404 +2597,489 @@ static int msm_isp_update_dual_HW_ms_info_at_stop(
return rc;
}
-static int msm_isp_update_dual_HW_axi(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info)
+/**
+ * msm_isp_axi_wait_for_stream_cfg_done() - Wait for a stream completion
+ * @stream_info: The stream to wait on
+ * @active: Reset means wait for stream to be INACTIVE else wait for ACTIVE
+ *
+ * Returns - 0 on success else error code
+ */
+static int msm_isp_axi_wait_for_stream_cfg_done(
+ struct msm_vfe_axi_stream *stream_info, int active)
+{
+ int rc = -1;
+ unsigned long flags;
+
+ /* No need to wait if stream is already in required state */
+ spin_lock_irqsave(&stream_info->lock, flags);
+ if (active && ACTIVE == stream_info->state)
+ rc = 0;
+ if (!active && INACTIVE == stream_info->state)
+ rc = 0;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ if (rc == 0)
+ return rc;
+
+ rc = wait_for_completion_timeout(
+ active ? &stream_info->active_comp :
+ &stream_info->inactive_comp,
+ msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
+
+ if (rc <= 0) {
+ rc = rc ? rc : -ETIMEDOUT;
+ pr_err("%s: wait for stream %x/%x state %d config failed %d\n",
+ __func__,
+ stream_info->stream_id,
+ stream_info->stream_src,
+ stream_info->state,
+ rc);
+ rc = -EINVAL;
+ } else {
+ rc = 0;
+ }
+ return rc;
+}
+
+/**
+ * msm_isp_axi_wait_for_streams() - Wait for completion of a number of streams
+ * @streams: The streams to wait on
+ * @num_stream: Number of streams to wait on
+ * @active: Reset means wait for stream to be INACTIVE else wait for ACTIVE
+ *
+ * Returns - 0 on success else error code
+ */
+static int msm_isp_axi_wait_for_streams(struct msm_vfe_axi_stream **streams,
+ int num_stream, int active)
{
+ int i;
int rc = 0;
- int vfe_id;
- uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
- struct dual_vfe_resource *dual_vfe_res = NULL;
+ struct msm_vfe_axi_stream *stream_info;
- if (stream_idx >= VFE_AXI_SRC_MAX) {
- pr_err("%s: Invalid stream idx %d\n", __func__, stream_idx);
+ for (i = 0; i < num_stream; i++) {
+ stream_info = streams[i];
+ rc |= msm_isp_axi_wait_for_stream_cfg_done(stream_info, active);
+ }
+ return rc;
+}
+
+static int __msm_isp_check_stream_state(struct msm_vfe_axi_stream *stream_info,
+ int cmd)
+{
+ switch (stream_info->state) {
+ case AVAILABLE:
return -EINVAL;
+ case PAUSING:
+ case RESUMING:
+ case RESUME_PENDING:
+ case ACTIVE:
+ if (cmd != 0)
+ return -EALREADY;
+ break;
+ case INACTIVE:
+ if (cmd == 0)
+ return -EALREADY;
+ break;
+ /*
+ * stream cannot be in following states since we always
+ * wait in ioctl for stream to be active or inactive
+ */
+ case UPDATING:
+ case START_PENDING:
+ case STARTING:
+ case STOPPING:
+ case STOP_PENDING:
+ case PAUSE_PENDING:
+ default:
+ WARN(1, "Invalid state %d\n", stream_info->state);
}
+ return 0;
+}
- dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
- if (!dual_vfe_res->vfe_dev[ISP_VFE0] ||
- !dual_vfe_res->vfe_dev[ISP_VFE1] ||
- !dual_vfe_res->axi_data[ISP_VFE0] ||
- !dual_vfe_res->axi_data[ISP_VFE1]) {
- pr_err("%s: Error in dual vfe resource\n", __func__);
- rc = -EINVAL;
- } else {
- if (stream_info->state == RESUME_PENDING &&
- (dual_vfe_res->axi_data[!vfe_dev->pdev->id]->
- stream_info[stream_idx].state == RESUME_PENDING)) {
- /* Update the AXI only after both ISPs receiving the
- Reg update interrupt*/
- for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
- rc = msm_isp_axi_stream_enable_cfg(
- dual_vfe_res->vfe_dev[vfe_id],
- &dual_vfe_res->axi_data[vfe_id]->
- stream_info[stream_idx], 1);
- dual_vfe_res->axi_data[vfe_id]->
- stream_info[stream_idx].state =
- RESUMING;
+static void __msm_isp_stop_axi_streams(struct msm_vfe_axi_stream **streams,
+ int num_streams, int cmd_type)
+{
+ int i;
+ struct msm_vfe_axi_shared_data *axi_data;
+ struct msm_isp_timestamp timestamp;
+ int total_stream_count = 0;
+ uint32_t bufq_id = 0, bufq_handle = 0;
+ struct msm_vfe_axi_stream *stream_info;
+ unsigned long flags;
+ uint32_t intf;
+ int rc;
+ struct vfe_device *vfe_dev;
+ struct vfe_device *update_vfes[MAX_VFE] = {0, 0};
+ int k;
+
+ msm_isp_get_timestamp(&timestamp);
+
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ spin_lock_irqsave(&stream_info->lock, flags);
+ /*
+ * since we can get here from start axi stream error path due
+ * to which the stream may be intermittent state like
+ * STARTING/START_PENDING, force the stream to move out of
+ * intermittent state so it can be made INACTIVE. The
+ * intermittent states update variables so better to go through
+ * those state transitions instead of directly forcing stream to
+ * be INACTIVE
+ */
+ while (stream_info->state != ACTIVE)
+ __msm_isp_axi_stream_update(stream_info,
+ &timestamp);
+ msm_isp_cfg_stream_scratch(stream_info, VFE_PING_FLAG);
+ msm_isp_cfg_stream_scratch(stream_info, VFE_PONG_FLAG);
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ if (stream_info->num_planes > 1)
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_comp_mask(vfe_dev, stream_info);
+ else
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_wm_irq_mask(vfe_dev, stream_info);
+ update_vfes[vfe_dev->pdev->id] = vfe_dev;
+ }
+ init_completion(&stream_info->inactive_comp);
+ stream_info->state = STOP_PENDING;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ msm_isp_update_intf_stream_cnt(stream_info, 0);
+ }
+
+ for (k = 0; k < MAX_VFE; k++) {
+ int ext_read;
+
+ if (!update_vfes[k])
+ continue;
+ vfe_dev = update_vfes[k];
+ axi_data = &vfe_dev->axi_data;
+ ext_read =
+ (axi_data->src_info[VFE_PIX_0].input_mux == EXTERNAL_READ);
+ for (i = 0; i < VFE_SRC_MAX; i++) {
+ total_stream_count +=
+ axi_data->src_info[i].stream_count +
+ axi_data->src_info[i].raw_stream_count;
+ if (i != VFE_PIX_0)
+ continue;
+ if (axi_data->src_info[i].stream_count == 0) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ enable_module(vfe_dev, 0xFF, 0);
+ /* reg update for PIX with 0 streams active */
+ if (ext_read == 0)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, VFE_PIX_0);
}
}
+
+ }
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ intf = SRC_TO_INTF(stream_info->stream_src);
+ if (total_stream_count == 0 ||
+ ((stream_info->stream_type == BURST_STREAM) &&
+ stream_info->runtime_num_burst_capture == 0)) {
+ spin_lock_irqsave(&stream_info->lock, flags);
+ while (stream_info->state != INACTIVE)
+ __msm_isp_axi_stream_update(
+ stream_info, &timestamp);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ }
+
+ rc = msm_isp_axi_wait_for_streams(streams, num_streams, 0);
+ if (rc) {
+ pr_err("%s: wait for stream comp failed, retry...\n", __func__);
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ if (stream_info->state == INACTIVE)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ __msm_isp_axi_stream_update(stream_info,
+ &timestamp);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+ rc = msm_isp_axi_wait_for_streams(streams, num_streams, 0);
+ if (rc) {
+ pr_err("%s: wait for stream comp failed, force streams to inactive\n",
+ __func__);
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ if (stream_info->state == INACTIVE)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ while (stream_info->state != INACTIVE)
+ __msm_isp_axi_stream_update(
+ stream_info, &timestamp);
+ spin_unlock_irqrestore(&stream_info->lock,
+ flags);
+ }
+ }
+ }
+ /* clear buffers that are dequeued */
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX; bufq_id++) {
+ bufq_handle = stream_info->bufq_handle[bufq_id];
+ if (!bufq_handle)
+ continue;
+ vfe_dev = stream_info->vfe_dev[0];
+ rc = vfe_dev->buf_mgr->ops->flush_buf(
+ vfe_dev->buf_mgr,
+ bufq_handle, MSM_ISP_BUFFER_FLUSH_ALL,
+ &timestamp.buf_time, 0);
+ if (rc == -EFAULT)
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ }
+ }
+
+ for (k = 0; k < MAX_VFE; k++) {
+ if (!update_vfes[k])
+ continue;
+ msm_isp_update_stream_bandwidth(update_vfes[k]);
+ msm_isp_input_disable(update_vfes[k]);
+ }
+
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ intf = SRC_TO_INTF(stream_info->stream_src);
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ axi_data = &vfe_dev->axi_data;
+ if (axi_data->src_info[intf].stream_count == 0)
+ vfe_dev->reg_update_requested &=
+ ~(BIT(intf));
+ }
}
- return rc;
}
-static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
- enum msm_isp_camif_update_state camif_update)
+static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
{
int i, rc = 0;
- uint8_t src_state, wait_for_complete = 0;
- uint32_t wm_reload_mask = 0x0;
+ uint8_t src_state;
+ uint32_t wm_reload_mask[MAX_VFE] = {0, 0};
struct msm_vfe_axi_stream *stream_info;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
uint32_t src_mask = 0;
unsigned long flags;
+ struct msm_vfe_axi_stream *streams[MAX_NUM_STREAM];
+ int num_streams = 0;
+ struct msm_isp_timestamp timestamp;
+ struct vfe_device *update_vfes[MAX_VFE] = {0, 0};
+ int k;
+ uint32_t num_active_streams[MAX_VFE] = {0, 0};
+ struct vfe_device *vfe_dev;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev_ioctl->axi_data;
if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
return -EINVAL;
- if (camif_update == ENABLE_CAMIF) {
- ISP_DBG("%s: vfe %d camif enable\n", __func__,
- vfe_dev->pdev->id);
- vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id = 0;
- }
+ msm_isp_get_timestamp(&timestamp);
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
- VFE_AXI_SRC_MAX) {
- return -EINVAL;
- }
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl,
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
src_state = axi_data->src_info[
SRC_TO_INTF(stream_info->stream_src)].active;
+
else {
ISP_DBG("%s: invalid src info index\n", __func__);
- return -EINVAL;
+ rc = -EINVAL;
+ goto error;
}
-
- msm_isp_calculate_bandwidth(axi_data, stream_info);
- msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
spin_lock_irqsave(&stream_info->lock, flags);
- msm_isp_reset_framedrop(vfe_dev, stream_info);
- rc = msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
+ rc = __msm_isp_check_stream_state(stream_info, 1);
+ if (-EALREADY == rc) {
+ rc = 0;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ goto error;
+ }
+
+ msm_isp_calculate_bandwidth(stream_info);
+ for (k = 0; k < stream_info->num_isp; k++) {
+ msm_isp_get_stream_wm_mask(stream_info->vfe_dev[k],
+ stream_info, &wm_reload_mask[
+ stream_info->vfe_dev[k]->pdev->id]);
+ src_state = stream_info->vfe_dev[k]->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].active;
+ if (update_vfes[stream_info->vfe_dev[k]->pdev->id])
+ continue;
+ update_vfes[stream_info->vfe_dev[k]->pdev->id] =
+ stream_info->vfe_dev[k];
+ num_active_streams[stream_info->vfe_dev[k]->pdev->id] =
+ stream_info->vfe_dev[k]->axi_data.
+ num_active_stream;
+ }
+ msm_isp_reset_framedrop(vfe_dev_ioctl, stream_info);
+ rc = msm_isp_init_stream_ping_pong_reg(stream_info);
if (rc < 0) {
pr_err("%s: No buffer for stream%d\n", __func__,
HANDLE_TO_IDX(
stream_cfg_cmd->stream_handle[i]));
spin_unlock_irqrestore(&stream_info->lock, flags);
- return rc;
+ goto error;
}
- spin_unlock_irqrestore(&stream_info->lock, flags);
- if (stream_info->num_planes > 1) {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- cfg_comp_mask(vfe_dev, stream_info);
- } else {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- cfg_wm_irq_mask(vfe_dev, stream_info);
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ if (stream_info->num_planes > 1) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_comp_mask(vfe_dev, stream_info);
+ } else {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_irq_mask(vfe_dev, stream_info);
+ }
}
+ init_completion(&stream_info->active_comp);
stream_info->state = START_PENDING;
+ msm_isp_update_intf_stream_cnt(stream_info, 1);
- ISP_DBG("%s, Stream 0x%x src %d src_state %d on vfe %d\n",
- __func__, stream_info->stream_id,
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]),
- src_state, vfe_dev->pdev->id);
-
+ ISP_DBG("%s, Stream 0x%x src_state %d on vfe %d\n", __func__,
+ stream_info->stream_src, src_state,
+ vfe_dev_ioctl->pdev->id);
if (src_state) {
src_mask |= (1 << SRC_TO_INTF(stream_info->stream_src));
- wait_for_complete = 1;
} else {
- if (vfe_dev->dump_reg)
- msm_camera_io_dump(vfe_dev->vfe_base,
- 0x1000, 1);
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+
+ if (vfe_dev->dump_reg)
+ msm_camera_io_dump(vfe_dev->vfe_base,
+ 0x1000, 1);
+ }
- /*Configure AXI start bits to start immediately*/
- msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info, 0);
- stream_info->state = ACTIVE;
- vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
- SRC_TO_INTF(stream_info->stream_src));
+ /* Configure AXI start bits to start immediately */
+ while (stream_info->state != ACTIVE)
+ __msm_isp_axi_stream_update(
+ stream_info, &timestamp);
- /*
- * Active bit is set in enable_camif for PIX.
- * For RDI, set it here
- */
- if (SRC_TO_INTF(stream_info->stream_src) >= VFE_RAW_0 &&
- SRC_TO_INTF(stream_info->stream_src) <
- VFE_SRC_MAX) {
- /* Incase PIX and RDI streams are part of same
- * session, this will ensure RDI stream will
- * have same frame id as of PIX stream
- */
- if (stream_cfg_cmd->sync_frame_id_src)
- vfe_dev->axi_data.src_info[SRC_TO_INTF(
- stream_info->stream_src)].frame_id =
- vfe_dev->axi_data.src_info[VFE_PIX_0]
- .frame_id;
- else
- vfe_dev->axi_data.src_info[SRC_TO_INTF(
- stream_info->stream_src)].frame_id = 0;
- vfe_dev->axi_data.src_info[SRC_TO_INTF(
- stream_info->stream_src)].active = 1;
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
+ vfe_dev,
+ SRC_TO_INTF(stream_info->stream_src));
}
}
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ streams[num_streams++] = stream_info;
}
- msm_isp_update_stream_bandwidth(vfe_dev);
- vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
- vfe_dev->vfe_base, wm_reload_mask);
- msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
- if (camif_update == ENABLE_CAMIF) {
- vfe_dev->hw_info->vfe_ops.core_ops.
- update_camif_state(vfe_dev, camif_update);
- vfe_dev->axi_data.camif_state = CAMIF_ENABLE;
- vfe_dev->common_data->dual_vfe_res->epoch_sync_mask = 0;
+ for (i = 0; i < MAX_VFE; i++) {
+ vfe_dev = update_vfes[i];
+ if (!vfe_dev)
+ continue;
+ if (num_active_streams[i] == 0) {
+ /* Configure UB */
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_ub(vfe_dev);
+ /* when start reset overflow state */
+ atomic_set(&vfe_dev->error_info.overflow_state,
+ NO_OVERFLOW);
+ }
+ msm_isp_update_stream_bandwidth(vfe_dev);
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ vfe_dev->vfe_base, wm_reload_mask[i]);
+
+ msm_isp_input_enable(vfe_dev,
+ stream_cfg_cmd->sync_frame_id_src);
}
- if (wait_for_complete) {
- rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update,
- src_mask, 2);
- if (rc < 0) {
- pr_err("%s: wait for config done failed\n", __func__);
- for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(
- stream_cfg_cmd->stream_handle[i])];
- stream_info->state = STOPPING;
- msm_isp_axi_stream_enable_cfg(
- vfe_dev, stream_info, 0);
- stream_cfg_cmd->cmd = STOP_IMMEDIATELY;
- msm_isp_update_camif_output_count(vfe_dev,
- stream_cfg_cmd);
- }
- }
+ rc = msm_isp_axi_wait_for_streams(streams, num_streams, 1);
+ if (rc < 0) {
+ pr_err("%s: wait for config done failed\n", __func__);
+ goto error;
}
+ return 0;
+error:
+ __msm_isp_stop_axi_streams(streams, num_streams,
+ STOP_STREAM);
+
return rc;
}
-static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
- enum msm_isp_camif_update_state camif_update,
- int halt)
+static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev_ioctl,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
{
int i, rc = 0;
- uint8_t wait_for_complete_for_this_stream = 0;
struct msm_vfe_axi_stream *stream_info = NULL;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- int ext_read =
- (axi_data->src_info[VFE_PIX_0].input_mux == EXTERNAL_READ);
- uint32_t src_mask = 0, intf, bufq_id = 0, bufq_handle = 0;
+ struct msm_vfe_axi_stream *streams[MAX_NUM_STREAM];
+ int num_streams = 0;
unsigned long flags;
- struct msm_isp_timestamp timestamp;
if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM ||
stream_cfg_cmd->num_streams == 0)
return -EINVAL;
- msm_isp_get_timestamp(&timestamp);
-
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
- VFE_AXI_SRC_MAX) {
- return -EINVAL;
- }
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl,
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
- /* set ping pong address to scratch before stream stop */
spin_lock_irqsave(&stream_info->lock, flags);
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PING_FLAG);
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PONG_FLAG);
+ rc = __msm_isp_check_stream_state(stream_info, 0);
spin_unlock_irqrestore(&stream_info->lock, flags);
- wait_for_complete_for_this_stream = 0;
-
- if (stream_info->num_planes > 1)
- vfe_dev->hw_info->vfe_ops.axi_ops.
- clear_comp_mask(vfe_dev, stream_info);
- else
- vfe_dev->hw_info->vfe_ops.axi_ops.
- clear_wm_irq_mask(vfe_dev, stream_info);
-
- stream_info->state = STOP_PENDING;
-
- if (!halt && !ext_read &&
- !(stream_info->stream_type == BURST_STREAM &&
- stream_info->runtime_num_burst_capture == 0))
- wait_for_complete_for_this_stream = 1;
-
- ISP_DBG("%s: stream 0x%x, vfe %d camif %d halt %d wait %d\n",
- __func__,
- stream_info->stream_id,
- vfe_dev->pdev->id,
- camif_update,
- halt,
- wait_for_complete_for_this_stream);
-
- intf = SRC_TO_INTF(stream_info->stream_src);
- if (!wait_for_complete_for_this_stream ||
- stream_info->state == INACTIVE ||
- !vfe_dev->axi_data.src_info[intf].active) {
- msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info, 0);
- stream_info->state = INACTIVE;
- vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
- SRC_TO_INTF(stream_info->stream_src));
-
+ if (rc) {
/*
- * Active bit is reset in disble_camif for PIX.
- * For RDI, reset it here for not wait_for_complete
- * This is assuming there is only 1 stream mapped to
- * each RDI.
+ * continue stopping other streams as error here means
+ * stream is already not active
*/
- if (intf >= VFE_RAW_0 &&
- intf < VFE_SRC_MAX) {
- vfe_dev->axi_data.src_info[intf].active = 0;
- }
- } else
- src_mask |= (1 << intf);
-
- }
-
- if (src_mask) {
- rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update,
- src_mask, 2);
- if (rc < 0) {
- pr_err("%s: wait for config done failed, retry...\n",
- __func__);
- for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(
- stream_cfg_cmd->stream_handle[i])];
- stream_info->state = STOPPING;
- msm_isp_axi_stream_enable_cfg(
- vfe_dev, stream_info, 0);
- vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
- vfe_dev,
- SRC_TO_INTF(stream_info->stream_src));
- rc = msm_isp_axi_wait_for_cfg_done(vfe_dev,
- camif_update, src_mask, 1);
- if (rc < 0) {
- pr_err("%s: vfe%d cfg done failed\n",
- __func__, vfe_dev->pdev->id);
- stream_info->state = INACTIVE;
- } else
- pr_err("%s: vfe%d retry success! report err!\n",
- __func__, vfe_dev->pdev->id);
-
- rc = -EBUSY;
- }
- }
-
- /*
- * Active bit is reset in disble_camif for PIX.
- * For RDI, reset it here after wait_for_complete
- * This is assuming there is only 1 stream mapped to each RDI
- */
- for (i = VFE_RAW_0; i < VFE_SRC_MAX; i++) {
- if (src_mask & (1 << i)) {
- vfe_dev->axi_data.src_info[i].active = 0;
- }
- }
- }
-
- if (camif_update == DISABLE_CAMIF) {
- vfe_dev->hw_info->vfe_ops.core_ops.
- update_camif_state(vfe_dev, DISABLE_CAMIF);
- vfe_dev->axi_data.camif_state = CAMIF_DISABLE;
- } else if ((camif_update == DISABLE_CAMIF_IMMEDIATELY) ||
- (ext_read)) {
- if (!ext_read)
- vfe_dev->hw_info->vfe_ops.core_ops.
- update_camif_state(vfe_dev,
- DISABLE_CAMIF_IMMEDIATELY);
- vfe_dev->axi_data.camif_state = CAMIF_STOPPED;
- }
- if (halt) {
- /*during stop immediately, stop output then stop input*/
- vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
- vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 1);
- vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
- }
-
- msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
- msm_isp_update_stream_bandwidth(vfe_dev);
-
- for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
- for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX; bufq_id++) {
- bufq_handle = stream_info->bufq_handle[bufq_id];
- if (!bufq_handle)
- continue;
-
- rc = vfe_dev->buf_mgr->ops->flush_buf(
- vfe_dev->buf_mgr, vfe_dev->pdev->id,
- bufq_handle, MSM_ISP_BUFFER_FLUSH_ALL,
- &timestamp.buf_time, 0);
- if (rc == -EFAULT) {
- msm_isp_halt_send_error(vfe_dev,
- ISP_EVENT_BUF_FATAL_ERROR);
- return rc;
- }
+ rc = 0;
+ continue;
}
- vfe_dev->reg_update_requested &=
- ~(BIT(SRC_TO_INTF(stream_info->stream_src)));
+ streams[num_streams++] = stream_info;
}
+ __msm_isp_stop_axi_streams(streams, num_streams,
+ stream_cfg_cmd->cmd);
return rc;
}
-
int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg)
{
int rc = 0, ret;
struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd = arg;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- enum msm_isp_camif_update_state camif_update;
- int halt = 0;
-
- rc = msm_isp_axi_check_stream_state(vfe_dev, stream_cfg_cmd);
- if (rc < 0) {
- pr_err("%s: Invalid stream state\n", __func__);
- return rc;
- }
+ int i;
- if (axi_data->num_active_stream == 0) {
- /*Configure UB*/
- vfe_dev->hw_info->vfe_ops.axi_ops.cfg_ub(vfe_dev);
- /*when start reset overflow state*/
- atomic_set(&vfe_dev->error_info.overflow_state,
- NO_OVERFLOW);
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ VFE_AXI_SRC_MAX)
+ return -EINVAL;
}
- msm_isp_get_camif_update_state_and_halt(vfe_dev, stream_cfg_cmd,
- &camif_update, &halt);
- if (camif_update == DISABLE_CAMIF)
- vfe_dev->axi_data.camif_state = CAMIF_STOPPING;
if (stream_cfg_cmd->cmd == START_STREAM) {
msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 1);
rc = msm_isp_start_axi_stream(
- vfe_dev, stream_cfg_cmd, camif_update);
+ vfe_dev, stream_cfg_cmd);
} else {
rc = msm_isp_stop_axi_stream(
- vfe_dev, stream_cfg_cmd, camif_update, halt);
+ vfe_dev, stream_cfg_cmd);
msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 0);
- if (axi_data->num_active_stream == 0) {
- /* Reset hvx state */
- vfe_dev->hvx_cmd = HVX_DISABLE;
- }
/*
* Use different ret value to not overwrite the error from
* msm_isp_stop_axi_stream
*/
ret = msm_isp_update_dual_HW_ms_info_at_stop(
- vfe_dev, stream_cfg_cmd, camif_update);
+ vfe_dev, stream_cfg_cmd);
if (ret < 0)
pr_warn("%s: Warning! Update dual_cam failed\n",
__func__);
+ if (vfe_dev->axi_data.num_active_stream == 0)
+ vfe_dev->hvx_cmd = HVX_DISABLE;
+ if (vfe_dev->is_split) {
+ struct vfe_device *vfe_temp =
+ vfe_dev->common_data->
+ dual_vfe_res->vfe_dev[ISP_VFE0];
+ if (vfe_temp->axi_data.num_active_stream == 0)
+ vfe_temp->hvx_cmd = HVX_DISABLE;
+ }
}
if (rc < 0)
@@ -2925,7 +3090,8 @@ int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg)
static int msm_isp_return_empty_buffer(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t user_stream_id,
- uint32_t frame_id, enum msm_vfe_input_src frame_src)
+ uint32_t frame_id, uint32_t buf_index,
+ enum msm_vfe_input_src frame_src)
{
int rc = -1;
struct msm_isp_buffer *buf = NULL;
@@ -2940,7 +3106,7 @@ static int msm_isp_return_empty_buffer(struct vfe_device *vfe_dev,
return -EINVAL;
}
- stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ stream_idx = stream_info->stream_src;
if (!stream_info->controllable_output)
return -EINVAL;
@@ -2961,7 +3127,7 @@ static int msm_isp_return_empty_buffer(struct vfe_device *vfe_dev,
rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
- vfe_dev->pdev->id, bufq_handle, &buf);
+ vfe_dev->pdev->id, bufq_handle, buf_index, &buf);
if (rc == -EFAULT) {
msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
return rc;
@@ -2999,7 +3165,7 @@ static int msm_isp_return_empty_buffer(struct vfe_device *vfe_dev,
static int msm_isp_request_frame(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t user_stream_id,
- uint32_t frame_id)
+ uint32_t frame_id, uint32_t buf_index)
{
struct msm_vfe_axi_stream_request_cmd stream_cfg_cmd;
struct msm_vfe_frame_request_queue *queue_req;
@@ -3007,10 +3173,9 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
unsigned long flags;
int rc = 0;
enum msm_vfe_input_src frame_src = 0;
- struct dual_vfe_resource *dual_vfe_res =
- vfe_dev->common_data->dual_vfe_res;
- uint32_t vfe_id = 0;
- bool dual_vfe = false;
+ int k;
+ uint32_t wm_mask = 0;
+ int vfe_idx;
if (!vfe_dev || !stream_info) {
pr_err("%s %d failed: vfe_dev %pK stream_info %pK\n", __func__,
@@ -3018,16 +3183,9 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
return -EINVAL;
}
- if (vfe_dev->is_split) {
- if (stream_info->stream_src < RDI_INTF_0) {
- if (vfe_dev->pdev->id == ISP_VFE1) {
- dual_vfe = true;
- } else {
- /* return early for dual vfe0 */
- return 0;
- }
- }
- }
+ /* return early for dual vfe0 */
+ if (stream_info->num_isp > 1 && vfe_dev->pdev->id == ISP_VFE0)
+ return 0;
if (stream_info->stream_src >= VFE_AXI_SRC_MAX) {
pr_err("%s:%d invalid stream src %d\n", __func__, __LINE__,
@@ -3052,7 +3210,7 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
vfe_dev->axi_data.src_info[VFE_PIX_0].active);
rc = msm_isp_return_empty_buffer(vfe_dev, stream_info,
- user_stream_id, frame_id, frame_src);
+ user_stream_id, frame_id, buf_index, frame_src);
if (rc < 0)
pr_err("%s:%d failed: return_empty_buffer src %d\n",
__func__, __LINE__, frame_src);
@@ -3067,13 +3225,13 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
stream_info->stream_id);
rc = msm_isp_return_empty_buffer(vfe_dev, stream_info,
- user_stream_id, frame_id, frame_src);
+ user_stream_id, frame_id, buf_index, frame_src);
if (rc < 0)
pr_err("%s:%d failed: return_empty_buffer src %d\n",
__func__, __LINE__, frame_src);
stream_info->current_framedrop_period =
MSM_VFE_STREAM_STOP_PERIOD;
- msm_isp_cfg_framedrop_reg(vfe_dev, stream_info);
+ msm_isp_cfg_framedrop_reg(stream_info);
return 0;
}
@@ -3093,10 +3251,12 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
if (!stream_info->bufq_handle[queue_req->buff_queue_id]) {
spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s:%d request frame failed on hw stream 0x%x, request stream %d due to no bufq idx: %d\n",
- __func__, __LINE__, stream_info->stream_handle,
+ __func__, __LINE__,
+ stream_info->stream_handle[0],
user_stream_id, queue_req->buff_queue_id);
return 0;
}
+ queue_req->buf_index = buf_index;
queue_req->cmd_used = 1;
stream_info->request_q_idx =
@@ -3105,14 +3265,15 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
stream_info->request_q_cnt++;
stream_info->undelivered_request_cnt++;
- stream_cfg_cmd.axi_stream_handle = stream_info->stream_handle;
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ stream_cfg_cmd.axi_stream_handle = stream_info->stream_handle[vfe_idx];
stream_cfg_cmd.frame_skip_pattern = NO_SKIP;
stream_cfg_cmd.init_frame_drop = 0;
stream_cfg_cmd.burst_count = stream_info->request_q_cnt;
if (stream_info->undelivered_request_cnt == 1) {
- rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
- VFE_PING_FLAG, 0);
+ rc = msm_isp_cfg_ping_pong_address(stream_info,
+ VFE_PING_FLAG);
if (rc) {
spin_unlock_irqrestore(&stream_info->lock, flags);
stream_info->undelivered_request_cnt--;
@@ -3121,41 +3282,23 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
return rc;
}
- vfe_id = vfe_dev->pdev->id;
- if (dual_vfe) {
- struct msm_vfe_axi_stream *temp_stream_info;
-
- temp_stream_info = msm_isp_vfe_get_stream(dual_vfe_res,
- ISP_VFE0,
- HANDLE_TO_IDX(
- stream_info->stream_handle));
- msm_isp_get_stream_wm_mask(temp_stream_info,
- &dual_vfe_res->wm_reload_mask[ISP_VFE0]);
- msm_isp_get_stream_wm_mask(stream_info,
- &dual_vfe_res->wm_reload_mask[ISP_VFE1]);
- vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
- dual_vfe_res->vfe_base[ISP_VFE0],
- dual_vfe_res->wm_reload_mask[ISP_VFE0]);
- vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
- dual_vfe_res->vfe_base[ISP_VFE1],
- dual_vfe_res->wm_reload_mask[ISP_VFE1]);
- dual_vfe_res->wm_reload_mask[ISP_VFE0] = 0;
- dual_vfe_res->wm_reload_mask[ISP_VFE1] = 0;
- } else {
- msm_isp_get_stream_wm_mask(stream_info,
- &dual_vfe_res->wm_reload_mask[vfe_id]);
- vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
- vfe_dev->vfe_base,
- dual_vfe_res->wm_reload_mask[vfe_id]);
- dual_vfe_res->wm_reload_mask[vfe_id] = 0;
+ for (k = 0; k < stream_info->num_isp; k++) {
+ wm_mask = 0;
+ msm_isp_get_stream_wm_mask(stream_info->vfe_dev[k],
+ stream_info, &wm_mask);
+ stream_info->vfe_dev[k]->
+ hw_info->vfe_ops.axi_ops.reload_wm(
+ stream_info->vfe_dev[k],
+ stream_info->vfe_dev[k]->vfe_base, wm_mask);
+
}
stream_info->sw_ping_pong_bit = 0;
} else if (stream_info->undelivered_request_cnt == 2) {
pingpong_status =
vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(
vfe_dev);
- rc = msm_isp_cfg_ping_pong_address(vfe_dev,
- stream_info, pingpong_status, 0);
+ rc = msm_isp_cfg_ping_pong_address(
+ stream_info, pingpong_status);
if (rc) {
stream_info->undelivered_request_cnt--;
spin_unlock_irqrestore(&stream_info->lock,
@@ -3172,7 +3315,7 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
return -EINVAL;
}
- rc = msm_isp_calculate_framedrop(&vfe_dev->axi_data, &stream_cfg_cmd);
+ rc = msm_isp_calculate_framedrop(vfe_dev, &stream_cfg_cmd);
if (0 == rc)
msm_isp_reset_framedrop(vfe_dev, stream_info);
@@ -3186,25 +3329,45 @@ static int msm_isp_add_buf_queue(struct vfe_device *vfe_dev,
{
int rc = 0;
uint32_t bufq_id = 0;
+ unsigned long flags;
if (stream_id == stream_info->stream_id)
bufq_id = VFE_BUF_QUEUE_DEFAULT;
else
bufq_id = VFE_BUF_QUEUE_SHARED;
+ spin_lock_irqsave(&stream_info->lock, flags);
- stream_info->bufq_handle[bufq_id] =
- vfe_dev->buf_mgr->ops->get_bufq_handle(vfe_dev->buf_mgr,
- stream_info->session_id, stream_id);
if (stream_info->bufq_handle[bufq_id] == 0) {
- pr_err("%s: failed: No valid buffer queue for stream: 0x%x\n",
- __func__, stream_id);
- rc = -EINVAL;
+ stream_info->bufq_handle[bufq_id] =
+ vfe_dev->buf_mgr->ops->get_bufq_handle(vfe_dev->buf_mgr,
+ stream_info->session_id, stream_id);
+ if (stream_info->bufq_handle[bufq_id] == 0) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: failed: No valid buffer queue for stream: 0x%x\n",
+ __func__, stream_id);
+ return -EINVAL;
+ }
+ } else {
+ uint32_t bufq_handle = vfe_dev->buf_mgr->ops->get_bufq_handle(
+ vfe_dev->buf_mgr,
+ stream_info->session_id,
+ stream_id);
+ if (bufq_handle != stream_info->bufq_handle[bufq_id]) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: Stream %x already has buffer q %x cannot add handle %x\n",
+ __func__, stream_id,
+ stream_info->bufq_handle[bufq_id], bufq_handle);
+ return -EINVAL;
+ }
}
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+
ISP_DBG("%d: Add bufq handle:0x%x, idx:%d, for stream %d on VFE %d\n",
__LINE__, stream_info->bufq_handle[bufq_id],
- bufq_id, stream_info->stream_handle, vfe_dev->pdev->id);
+ bufq_id, stream_info->stream_handle[0],
+ vfe_dev->pdev->id);
return rc;
}
@@ -3221,18 +3384,102 @@ static void msm_isp_remove_buf_queue(struct vfe_device *vfe_dev,
bufq_id = VFE_BUF_QUEUE_SHARED;
spin_lock_irqsave(&stream_info->lock, flags);
- stream_info->bufq_handle[bufq_id] = 0;
+
+ if (stream_info->bufq_handle[bufq_id]) {
+ stream_info->bufq_handle[bufq_id] = 0;
+ if (stream_info->state == ACTIVE)
+ stream_info->state = UPDATING;
+ }
spin_unlock_irqrestore(&stream_info->lock, flags);
+ if (stream_info->state == UPDATING)
+ msm_isp_axi_wait_for_stream_cfg_done(stream_info, 1);
+
+}
+
+/**
+ * msm_isp_stream_axi_cfg_update() - Apply axi config update to a stream
+ * @vfe_dev: The vfe device on which the update is to be applied
+ * @stream_info: Stream for which update is to be applied
+ * @update_info: Parameters of the update
+ *
+ * Returns - 0 on success else error code
+ *
+ * For dual vfe stream apply the update once update for both vfe is
+ * received.
+ */
+static int msm_isp_stream_axi_cfg_update(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ struct msm_vfe_axi_stream_cfg_update_info *update_info)
+{
+ int j;
+ int k;
+ unsigned long flags;
+ int vfe_idx;
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[
+ SRC_TO_INTF(stream_info->stream_src)])) {
+ pr_err("%s: Update in progress for vfe %d intf %d\n",
+ __func__, vfe_dev->pdev->id,
+ SRC_TO_INTF(stream_info->stream_src));
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&stream_info->lock, flags);
+ if (stream_info->state != ACTIVE) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("Invalid stream state for axi update %d\n",
+ stream_info->state);
+ return -EINVAL;
+ }
+ if (stream_info->update_vfe_mask) {
+ if (stream_info->update_vfe_mask & (1 << vfe_dev->pdev->id)) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: Stream %p/%x Update already in progress for vfe %d\n",
+ __func__, stream_info, stream_info->stream_src,
+ vfe_dev->pdev->id);
+ return -EINVAL;
+ }
+ }
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ for (j = 0; j < stream_info->num_planes; j++)
+ stream_info->plane_cfg[vfe_idx][j] = update_info->plane_cfg[j];
+
+ stream_info->update_vfe_mask |= (1 << vfe_dev->pdev->id);
+ /* wait for update from all vfe's under stream before applying */
+ if (stream_info->update_vfe_mask != stream_info->vfe_mask) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ return 0;
+ }
+
+ atomic_set(&vfe_dev->axi_data.axi_cfg_update[
+ SRC_TO_INTF(stream_info->stream_src)], 1);
+ stream_info->output_format = update_info->output_format;
+ init_completion(&stream_info->active_comp);
+ if (((vfe_dev->hw_info->runtime_axi_update == 0) ||
+ (vfe_dev->dual_vfe_enable == 1))) {
+ stream_info->state = PAUSE_PENDING;
+ msm_isp_axi_stream_enable_cfg(stream_info);
+ stream_info->state = PAUSING;
+ } else {
+ for (j = 0; j < stream_info->num_planes; j++) {
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_reg(vfe_dev, stream_info, j);
+ }
+ }
+ stream_info->state = RESUMING;
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ return 0;
}
int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
{
- int rc = 0, i, j;
+ int rc = 0, i;
struct msm_vfe_axi_stream *stream_info;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
- struct msm_vfe_axi_stream_cfg_update_info *update_info;
+ struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL;
struct msm_isp_sw_framskip *sw_skip_info = NULL;
unsigned long flags;
struct msm_isp_timestamp timestamp;
@@ -3243,14 +3490,15 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
return -EINVAL;
for (i = 0; i < update_cmd->num_streams; i++) {
- update_info = &update_cmd->update_info[i];
+ update_info = (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
/*check array reference bounds*/
if (HANDLE_TO_IDX(update_info->stream_handle) >=
VFE_AXI_SRC_MAX) {
return -EINVAL;
}
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(update_info->stream_handle)];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
if (SRC_TO_INTF(stream_info->stream_src) >= VFE_SRC_MAX)
continue;
if (stream_info->state != ACTIVE &&
@@ -3267,37 +3515,45 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
return -EINVAL;
}
if (update_cmd->update_type == UPDATE_STREAM_AXI_CONFIG &&
- atomic_read(&axi_data->axi_cfg_update[
- SRC_TO_INTF(stream_info->stream_src)])) {
+ stream_info->state != ACTIVE) {
pr_err("%s: AXI stream config updating\n", __func__);
return -EBUSY;
}
}
- for (i = 0; i < update_cmd->num_streams; i++) {
- update_info = &update_cmd->update_info[i];
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(update_info->stream_handle)];
-
- switch (update_cmd->update_type) {
- case ENABLE_STREAM_BUF_DIVERT:
+ switch (update_cmd->update_type) {
+ case ENABLE_STREAM_BUF_DIVERT:
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
stream_info->buf_divert = 1;
- break;
- case DISABLE_STREAM_BUF_DIVERT:
+ }
+ break;
+ case DISABLE_STREAM_BUF_DIVERT:
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
stream_info->buf_divert = 0;
msm_isp_get_timestamp(&timestamp);
frame_id = vfe_dev->axi_data.src_info[
SRC_TO_INTF(stream_info->stream_src)].frame_id;
/* set ping pong address to scratch before flush */
spin_lock_irqsave(&stream_info->lock, flags);
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
- VFE_PING_FLAG);
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
- VFE_PONG_FLAG);
+ msm_isp_cfg_stream_scratch(stream_info,
+ VFE_PING_FLAG);
+ msm_isp_cfg_stream_scratch(stream_info,
+ VFE_PONG_FLAG);
spin_unlock_irqrestore(&stream_info->lock, flags);
- rc = vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
- vfe_dev->pdev->id,
- stream_info->bufq_handle[VFE_BUF_QUEUE_DEFAULT],
+ rc = vfe_dev->buf_mgr->ops->flush_buf(
+ vfe_dev->buf_mgr,
+ stream_info->bufq_handle
+ [VFE_BUF_QUEUE_DEFAULT],
MSM_ISP_BUFFER_FLUSH_DIVERTED,
&timestamp.buf_time, frame_id);
if (rc == -EFAULT) {
@@ -3305,11 +3561,18 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
ISP_EVENT_BUF_FATAL_ERROR);
return rc;
}
- break;
- case UPDATE_STREAM_FRAMEDROP_PATTERN: {
+ }
+ break;
+ case UPDATE_STREAM_FRAMEDROP_PATTERN: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
uint32_t framedrop_period =
msm_isp_get_framedrop_period(
- update_info->skip_pattern);
+ update_info->skip_pattern);
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
spin_lock_irqsave(&stream_info->lock, flags);
/* no change then break early */
if (stream_info->current_framedrop_period ==
@@ -3331,11 +3594,18 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
stream_info->current_framedrop_period =
framedrop_period;
if (stream_info->stream_type != BURST_STREAM)
- msm_isp_cfg_framedrop_reg(vfe_dev, stream_info);
+ msm_isp_cfg_framedrop_reg(stream_info);
spin_unlock_irqrestore(&stream_info->lock, flags);
- break;
}
- case UPDATE_STREAM_SW_FRAME_DROP: {
+ break;
+ }
+ case UPDATE_STREAM_SW_FRAME_DROP: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
sw_skip_info = &update_info->sw_skip_info;
if (sw_skip_info->stream_src_mask != 0) {
/* SW image buffer drop */
@@ -3350,88 +3620,88 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
spin_unlock_irqrestore(&stream_info->lock,
flags);
}
- break;
}
- case UPDATE_STREAM_AXI_CONFIG: {
- for (j = 0; j < stream_info->num_planes; j++) {
- stream_info->plane_cfg[j] =
- update_info->plane_cfg[j];
- }
- stream_info->output_format = update_info->output_format;
- if ((stream_info->state == ACTIVE) &&
- ((vfe_dev->hw_info->runtime_axi_update == 0) ||
- (vfe_dev->dual_vfe_enable == 1))) {
- spin_lock_irqsave(&stream_info->lock, flags);
- stream_info->state = PAUSE_PENDING;
- msm_isp_axi_stream_enable_cfg(
- vfe_dev, stream_info, 1);
- stream_info->state = PAUSING;
- atomic_set(&axi_data->
- axi_cfg_update[SRC_TO_INTF(
- stream_info->stream_src)],
- UPDATE_REQUESTED);
- spin_unlock_irqrestore(&stream_info->lock,
- flags);
- } else {
- for (j = 0; j < stream_info->num_planes; j++) {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- cfg_wm_reg(vfe_dev, stream_info, j);
- }
-
- spin_lock_irqsave(&stream_info->lock, flags);
- if (stream_info->state != ACTIVE) {
- stream_info->runtime_output_format =
- stream_info->output_format;
- } else {
- stream_info->state = RESUMING;
- atomic_set(&axi_data->
- axi_cfg_update[SRC_TO_INTF(
- stream_info->stream_src)],
- APPLYING_UPDATE_RESUME);
- }
- spin_unlock_irqrestore(&stream_info->lock,
- flags);
- }
- break;
+ break;
+ }
+ case UPDATE_STREAM_AXI_CONFIG: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
+ rc = msm_isp_stream_axi_cfg_update(vfe_dev, stream_info,
+ update_info);
+ if (rc)
+ return rc;
}
- case UPDATE_STREAM_REQUEST_FRAMES: {
+ break;
+ }
+ case UPDATE_STREAM_REQUEST_FRAMES: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
rc = msm_isp_request_frame(vfe_dev, stream_info,
update_info->user_stream_id,
- update_info->frame_id);
+ update_info->frame_id,
+ MSM_ISP_INVALID_BUF_INDEX);
if (rc)
pr_err("%s failed to request frame!\n",
__func__);
- break;
}
- case UPDATE_STREAM_ADD_BUFQ: {
+ break;
+ }
+ case UPDATE_STREAM_ADD_BUFQ: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
rc = msm_isp_add_buf_queue(vfe_dev, stream_info,
update_info->user_stream_id);
if (rc)
pr_err("%s failed to add bufq!\n", __func__);
- break;
}
- case UPDATE_STREAM_REMOVE_BUFQ: {
+ break;
+ }
+ case UPDATE_STREAM_REMOVE_BUFQ: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
msm_isp_remove_buf_queue(vfe_dev, stream_info,
update_info->user_stream_id);
pr_debug("%s, Remove bufq for Stream 0x%x\n",
__func__, stream_info->stream_id);
- if (stream_info->state == ACTIVE) {
- stream_info->state = UPDATING;
- rc = msm_isp_axi_wait_for_cfg_done(vfe_dev,
- NO_UPDATE, (1 << SRC_TO_INTF(
- stream_info->stream_src)), 2);
- if (rc < 0)
- pr_err("%s: wait for update failed\n",
- __func__);
- }
-
- break;
- }
- default:
- pr_err("%s: Invalid update type\n", __func__);
- return -EINVAL;
}
+ break;
+ }
+ case UPDATE_STREAM_REQUEST_FRAMES_VER2: {
+ struct msm_vfe_axi_stream_cfg_update_info_req_frm *req_frm =
+ &update_cmd->req_frm_ver2;
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(req_frm->stream_handle));
+ rc = msm_isp_request_frame(vfe_dev, stream_info,
+ req_frm->user_stream_id,
+ req_frm->frame_id,
+ req_frm->buf_index);
+ if (rc)
+ pr_err("%s failed to request frame!\n",
+ __func__);
+ break;
}
+ default:
+ pr_err("%s: Invalid update type %d\n", __func__,
+ update_cmd->update_type);
+ return -EINVAL;
+ }
+
return rc;
}
@@ -3446,7 +3716,7 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
unsigned long flags;
struct timeval *time_stamp;
uint32_t frame_id, buf_index = -1;
- struct msm_vfe_axi_stream *temp_stream;
+ int vfe_idx;
if (!ts) {
pr_err("%s: Error! Invalid argument\n", __func__);
@@ -3464,10 +3734,13 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
spin_lock_irqsave(&stream_info->lock, flags);
- pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ pingpong_bit = (~(pingpong_status >>
+ stream_info->wm[vfe_idx][0]) & 0x1);
for (i = 0; i < stream_info->num_planes; i++) {
if (pingpong_bit !=
- (~(pingpong_status >> stream_info->wm[i]) & 0x1)) {
+ (~(pingpong_status >>
+ stream_info->wm[vfe_idx][i]) & 0x1)) {
spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: Write master ping pong mismatch. Status: 0x%x\n",
__func__, pingpong_status);
@@ -3476,15 +3749,23 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
return;
}
}
-
if (stream_info->state == INACTIVE) {
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
- pingpong_status);
+ WARN_ON(stream_info->buf[pingpong_bit] != NULL);
spin_unlock_irqrestore(&stream_info->lock, flags);
- pr_err_ratelimited("%s: Warning! Stream already inactive. Drop irq handling\n",
- __func__);
return;
}
+
+ /* composite the irq for dual vfe */
+ rc = msm_isp_composite_irq(vfe_dev, stream_info,
+ MSM_ISP_COMP_IRQ_PING_BUFDONE + pingpong_bit);
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ if (rc < 0)
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return;
+ }
+
done_buf = stream_info->buf[pingpong_bit];
if (vfe_dev->buf_mgr->frameId_mismatch_recovery == 1) {
@@ -3494,46 +3775,28 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
return;
}
- stream_info->frame_id++;
if (done_buf)
buf_index = done_buf->buf_idx;
- rc = vfe_dev->buf_mgr->ops->update_put_buf_cnt(vfe_dev->buf_mgr,
+ ISP_DBG("%s: vfe %d: stream 0x%x, frame id %d, pingpong bit %d\n",
+ __func__,
vfe_dev->pdev->id,
- done_buf ? done_buf->bufq_handle :
- stream_info->bufq_handle[VFE_BUF_QUEUE_DEFAULT], buf_index,
- time_stamp, frame_id, pingpong_bit);
+ stream_info->stream_id,
+ frame_id,
+ pingpong_bit);
- if (rc < 0) {
- spin_unlock_irqrestore(&stream_info->lock, flags);
- /* this usually means a serious scheduling error */
- msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
- return;
- }
- /*
- * Buf divert return value represent whether the buf
- * can be diverted. A positive return value means
- * other ISP hardware is still processing the frame.
- * A negative value is error. Return in both cases.
- */
- if (rc != 0) {
- spin_unlock_irqrestore(&stream_info->lock, flags);
- return;
- }
+ stream_info->frame_id++;
+ stream_info->buf[pingpong_bit] = NULL;
if (stream_info->stream_type == CONTINUOUS_STREAM ||
stream_info->runtime_num_burst_capture > 1) {
- rc = msm_isp_cfg_ping_pong_address(vfe_dev,
- stream_info, pingpong_status, 0);
+ rc = msm_isp_cfg_ping_pong_address(
+ stream_info, pingpong_status);
if (rc < 0)
ISP_DBG("%s: Error configuring ping_pong\n",
__func__);
} else if (done_buf) {
- rc = msm_isp_cfg_ping_pong_address(vfe_dev,
- stream_info, pingpong_status, 1);
- if (rc < 0)
- ISP_DBG("%s: Error configuring ping_pong\n",
- __func__);
+ msm_isp_cfg_stream_scratch(stream_info, pingpong_status);
}
if (!done_buf) {
@@ -3547,28 +3810,12 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
return;
}
- temp_stream = msm_isp_get_controllable_stream(vfe_dev,
- stream_info);
- if (temp_stream->stream_type == BURST_STREAM &&
- temp_stream->runtime_num_burst_capture) {
+ if (stream_info->stream_type == BURST_STREAM &&
+ stream_info->runtime_num_burst_capture) {
ISP_DBG("%s: burst_frame_count: %d\n",
__func__,
- temp_stream->runtime_num_burst_capture);
- temp_stream->runtime_num_burst_capture--;
- /*
- * For non controllable stream decrement the burst count for
- * dual stream as well here
- */
- if (!stream_info->controllable_output && vfe_dev->is_split &&
- RDI_INTF_0 > stream_info->stream_src) {
- temp_stream = msm_isp_vfe_get_stream(
- vfe_dev->common_data->dual_vfe_res,
- ((vfe_dev->pdev->id == ISP_VFE0) ?
- ISP_VFE1 : ISP_VFE0),
- HANDLE_TO_IDX(
- stream_info->stream_handle));
- temp_stream->runtime_num_burst_capture--;
- }
+ stream_info->runtime_num_burst_capture);
+ stream_info->runtime_num_burst_capture--;
}
rc = msm_isp_update_deliver_count(vfe_dev, stream_info,
@@ -3637,7 +3884,8 @@ void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
continue;
}
stream_idx = HANDLE_TO_IDX(comp_info->stream_handle);
- stream_info = &axi_data->stream_info[stream_idx];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ stream_idx);
msm_isp_process_axi_irq_stream(vfe_dev, stream_info,
pingpong_status, ts);
@@ -3656,7 +3904,8 @@ void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
pingpong_status);
continue;
}
- stream_info = &axi_data->stream_info[stream_idx];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ stream_idx);
msm_isp_process_axi_irq_stream(vfe_dev, stream_info,
pingpong_status, ts);
@@ -3670,6 +3919,7 @@ void msm_isp_axi_disable_all_wm(struct vfe_device *vfe_dev)
struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
int i, j;
+ int vfe_idx;
if (!vfe_dev || !axi_data) {
pr_err("%s: error %pK %pK\n", __func__, vfe_dev, axi_data);
@@ -3677,14 +3927,16 @@ void msm_isp_axi_disable_all_wm(struct vfe_device *vfe_dev)
}
for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
- stream_info = &axi_data->stream_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev, i);
if (stream_info->state != ACTIVE)
continue;
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev,
+ stream_info);
for (j = 0; j < stream_info->num_planes; j++)
vfe_dev->hw_info->vfe_ops.axi_ops.enable_wm(
vfe_dev->vfe_base,
- stream_info->wm[j], 0);
+ stream_info->wm[vfe_idx][j], 0);
}
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
index 08053aa410e7..84720f3d8625 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
@@ -14,37 +14,15 @@
#include "msm_isp.h"
+#define HANDLE_TO_IDX(handle) (handle & 0xFF)
#define SRC_TO_INTF(src) \
((src < RDI_INTF_0 || src == VFE_AXI_SRC_MAX) ? VFE_PIX_0 : \
(VFE_RAW_0 + src - RDI_INTF_0))
-int msm_isp_axi_create_stream(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_shared_data *axi_data,
- struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd);
-
-void msm_isp_axi_destroy_stream(
- struct msm_vfe_axi_shared_data *axi_data, int stream_idx);
-
-int msm_isp_validate_axi_request(
- struct msm_vfe_axi_shared_data *axi_data,
- struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd);
-
-void msm_isp_axi_reserve_wm(
- struct vfe_device *vfe_dev,
- struct msm_vfe_axi_shared_data *axi_data,
- struct msm_vfe_axi_stream *stream_info);
-
-void msm_isp_axi_reserve_comp_mask(
- struct msm_vfe_axi_shared_data *axi_data,
- struct msm_vfe_axi_stream *stream_info);
-
int msm_isp_axi_check_stream_state(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd);
-int msm_isp_calculate_framedrop(
- struct msm_vfe_axi_shared_data *axi_data,
- struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd);
void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info);
@@ -62,10 +40,13 @@ int msm_isp_axi_restart(struct vfe_device *vfe_dev,
struct msm_vfe_axi_restart_cmd *restart_cmd);
void msm_isp_axi_stream_update(struct vfe_device *vfe_dev,
- enum msm_vfe_input_src frame_src);
+ enum msm_vfe_input_src frame_src,
+ struct msm_isp_timestamp *ts);
-void msm_isp_update_framedrop_reg(struct vfe_device *vfe_dev,
- enum msm_vfe_input_src frame_src);
+void msm_isp_process_reg_upd_epoch_irq(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src,
+ enum msm_isp_comp_irq_types irq,
+ struct msm_isp_timestamp *ts);
void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts);
@@ -94,6 +75,34 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
uint32_t pingpong_status,
struct msm_isp_timestamp *ts);
+void msm_isp_release_all_axi_stream(struct vfe_device *vfe_dev);
+
+static inline int msm_isp_get_vfe_idx_for_stream_user(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int vfe_idx;
+
+ for (vfe_idx = 0; vfe_idx < stream_info->num_isp; vfe_idx++) {
+ if (stream_info->vfe_dev[vfe_idx] == vfe_dev)
+ return vfe_idx;
+ }
+ return -ENOTTY;
+}
+
+static inline int msm_isp_get_vfe_idx_for_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream_user(vfe_dev, stream_info);
+
+ if (vfe_idx < 0) {
+ WARN(1, "%s vfe index misssing for stream %d, vfe %d\n",
+ __func__, stream_info->stream_src, vfe_dev->pdev->id);
+ vfe_idx = 0;
+ }
+ return vfe_idx;
+}
+
static inline void msm_isp_cfg_wm_scratch(struct vfe_device *vfe_dev,
int wm,
uint32_t pingpong_bit)
@@ -103,18 +112,48 @@ static inline void msm_isp_cfg_wm_scratch(struct vfe_device *vfe_dev,
pingpong_bit, vfe_dev->buf_mgr->scratch_buf_addr, 0);
}
-static inline void msm_isp_cfg_stream_scratch(struct vfe_device *vfe_dev,
+static inline void msm_isp_cfg_stream_scratch(
struct msm_vfe_axi_stream *stream_info,
uint32_t pingpong_status)
{
int i;
+ int j;
uint32_t pingpong_bit;
-
- pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
- for (i = 0; i < stream_info->num_planes; i++)
- msm_isp_cfg_wm_scratch(vfe_dev, stream_info->wm[i],
+ int vfe_idx;
+
+ pingpong_bit = (~(pingpong_status >> stream_info->wm[0][0]) & 0x1);
+ for (i = 0; i < stream_info->num_planes; i++) {
+ for (j = 0; j < stream_info->num_isp; j++) {
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(
+ stream_info->vfe_dev[j], stream_info);
+ msm_isp_cfg_wm_scratch(stream_info->vfe_dev[j],
+ stream_info->wm[vfe_idx][i],
~pingpong_bit);
+ }
+ }
stream_info->buf[pingpong_bit] = NULL;
}
+static inline struct msm_vfe_axi_stream *msm_isp_get_stream_common_data(
+ struct vfe_device *vfe_dev, int stream_idx)
+{
+ struct msm_vfe_common_dev_data *common_data = vfe_dev->common_data;
+ struct msm_vfe_axi_stream *stream_info;
+
+ if (vfe_dev->is_split && stream_idx < RDI_INTF_0)
+ stream_info = &common_data->streams[stream_idx];
+ else
+ stream_info = &common_data->streams[VFE_AXI_SRC_MAX *
+ vfe_dev->pdev->id + stream_idx];
+ return stream_info;
+}
+
+static inline struct msm_vfe_axi_stream *msm_isp_vfe_get_stream(
+ struct dual_vfe_resource *dual_vfe_res,
+ int vfe_id, uint32_t index)
+{
+ return msm_isp_get_stream_common_data(dual_vfe_res->vfe_dev[vfe_id],
+ index);
+}
+
#endif /* __MSM_ISP_AXI_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index 4aef6b5c7f38..f851e8c9289e 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -22,59 +22,89 @@ static inline void msm_isp_stats_cfg_wm_scratch(struct vfe_device *vfe_dev,
uint32_t pingpong_status)
{
vfe_dev->hw_info->vfe_ops.stats_ops.update_ping_pong_addr(
- vfe_dev->vfe_base, stream_info,
+ vfe_dev, stream_info,
pingpong_status, vfe_dev->buf_mgr->scratch_buf_addr);
}
-static inline void msm_isp_stats_cfg_stream_scratch(struct vfe_device *vfe_dev,
+static inline void msm_isp_stats_cfg_stream_scratch(
struct msm_vfe_stats_stream *stream_info,
uint32_t pingpong_status)
{
- uint32_t stats_idx = STATS_IDX(stream_info->stream_handle);
+ uint32_t stats_idx = STATS_IDX(stream_info->stream_handle[0]);
uint32_t pingpong_bit;
- uint32_t stats_pingpong_offset =
- vfe_dev->hw_info->stats_hw_info->stats_ping_pong_offset[
- stats_idx];
+ uint32_t stats_pingpong_offset;
+ struct vfe_device *vfe_dev;
+ int i;
+ stats_pingpong_offset = stream_info->vfe_dev[0]->hw_info->
+ stats_hw_info->stats_ping_pong_offset[stats_idx];
pingpong_bit = (~(pingpong_status >> stats_pingpong_offset) & 0x1);
- msm_isp_stats_cfg_wm_scratch(vfe_dev, stream_info,
- pingpong_status);
+ for (i = 0; i < stream_info->num_isp; i++) {
+ vfe_dev = stream_info->vfe_dev[i];
+ msm_isp_stats_cfg_wm_scratch(vfe_dev, stream_info,
+ pingpong_status);
+ }
+
stream_info->buf[pingpong_bit] = NULL;
}
-static int msm_isp_stats_cfg_ping_pong_address(struct vfe_device *vfe_dev,
+static int msm_isp_composite_stats_irq(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info,
+ enum msm_isp_comp_irq_types irq)
+{
+ /* interrupt recv on same vfe w/o recv on other vfe */
+ if (stream_info->composite_irq[irq] & (1 << vfe_dev->pdev->id)) {
+ pr_err("%s: irq %d out of sync for dual vfe on vfe %d\n",
+ __func__, irq, vfe_dev->pdev->id);
+ return -EFAULT;
+ }
+
+ stream_info->composite_irq[irq] |= (1 << vfe_dev->pdev->id);
+ if (stream_info->composite_irq[irq] != stream_info->vfe_mask)
+ return 1;
+
+ stream_info->composite_irq[irq] = 0;
+
+ return 0;
+}
+
+static int msm_isp_stats_cfg_ping_pong_address(
struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status)
{
- int rc = -1, vfe_id = 0;
- struct msm_isp_buffer *buf;
- uint32_t pingpong_bit = 0;
- uint32_t stats_pingpong_offset;
+ int rc = -1;
+ struct msm_isp_buffer *buf = NULL;
uint32_t bufq_handle = stream_info->bufq_handle;
- uint32_t stats_idx = STATS_IDX(stream_info->stream_handle);
- struct dual_vfe_resource *dual_vfe_res = NULL;
- struct msm_vfe_stats_stream *dual_vfe_stream_info = NULL;
+ uint32_t stats_idx = STATS_IDX(stream_info->stream_handle[0]);
+ struct vfe_device *vfe_dev = stream_info->vfe_dev[0];
+ uint32_t stats_pingpong_offset;
+ uint32_t pingpong_bit;
+ int k;
if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type ||
stats_idx >= MSM_ISP_STATS_MAX) {
pr_err("%s Invalid stats index %d", __func__, stats_idx);
return -EINVAL;
}
-
- stats_pingpong_offset =
- vfe_dev->hw_info->stats_hw_info->stats_ping_pong_offset[
- stats_idx];
-
+ stats_pingpong_offset = vfe_dev->hw_info->stats_hw_info->
+ stats_ping_pong_offset[stats_idx];
pingpong_bit = (~(pingpong_status >> stats_pingpong_offset) & 0x1);
+ /* if buffer already exists then no need to replace */
+ if (stream_info->buf[pingpong_bit])
+ return 0;
rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
- vfe_dev->pdev->id, bufq_handle, &buf);
+ vfe_dev->pdev->id, bufq_handle,
+ MSM_ISP_INVALID_BUF_INDEX, &buf);
if (rc == -EFAULT) {
msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
return rc;
}
- if (rc < 0 || NULL == buf)
- vfe_dev->error_info.stats_framedrop_count[stats_idx]++;
+ if (rc < 0 || NULL == buf) {
+ for (k = 0; k < stream_info->num_isp; k++)
+ stream_info->vfe_dev[k]->error_info.
+ stats_framedrop_count[stats_idx]++;
+ }
if (buf && buf->num_planes != 1) {
pr_err("%s: Invalid buffer\n", __func__);
@@ -82,58 +112,22 @@ static int msm_isp_stats_cfg_ping_pong_address(struct vfe_device *vfe_dev,
rc = -EINVAL;
goto buf_error;
}
- if (vfe_dev->is_split) {
- dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
- if (!dual_vfe_res->vfe_base[ISP_VFE0] ||
- !dual_vfe_res->stats_data[ISP_VFE0] ||
- !dual_vfe_res->vfe_base[ISP_VFE1] ||
- !dual_vfe_res->stats_data[ISP_VFE1]) {
- pr_err("%s:%d error vfe0 %pK %pK vfe1 %pK %pK\n",
- __func__, __LINE__,
- dual_vfe_res->vfe_base[ISP_VFE0],
- dual_vfe_res->stats_data[ISP_VFE0],
- dual_vfe_res->vfe_base[ISP_VFE1],
- dual_vfe_res->stats_data[ISP_VFE1]);
- } else {
- for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
- dual_vfe_stream_info = &dual_vfe_res->
- stats_data[vfe_id]->
- stream_info[stats_idx];
- if (buf)
- vfe_dev->hw_info->vfe_ops.stats_ops.
- update_ping_pong_addr(
- dual_vfe_res->vfe_base[vfe_id],
- dual_vfe_stream_info,
- pingpong_status,
- buf->mapped_info[0].paddr +
- dual_vfe_stream_info->
- buffer_offset);
- else
- msm_isp_stats_cfg_stream_scratch(
- vfe_dev,
- dual_vfe_stream_info,
- pingpong_status);
- dual_vfe_stream_info->buf[pingpong_bit]
- = buf;
- }
- }
- } else {
- if (buf)
- vfe_dev->hw_info->vfe_ops.stats_ops.
- update_ping_pong_addr(
- vfe_dev->vfe_base, stream_info,
- pingpong_status, buf->mapped_info[0].paddr +
- stream_info->buffer_offset);
- else
- msm_isp_stats_cfg_stream_scratch(vfe_dev,
- stream_info, pingpong_status);
-
- stream_info->buf[pingpong_bit] = buf;
+ if (!buf) {
+ msm_isp_stats_cfg_stream_scratch(stream_info,
+ pingpong_status);
+ return 0;
}
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ vfe_dev->hw_info->vfe_ops.stats_ops.update_ping_pong_addr(
+ vfe_dev, stream_info, pingpong_status,
+ buf->mapped_info[0].paddr +
+ stream_info->buffer_offset[k]);
+ }
+ stream_info->buf[pingpong_bit] = buf;
+ buf->pingpong_bit = pingpong_bit;
- if (buf)
- buf->pingpong_bit = pingpong_bit;
return 0;
buf_error:
vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
@@ -155,6 +149,8 @@ static int32_t msm_isp_stats_buf_divert(struct vfe_device *vfe_dev,
struct msm_isp_buffer *done_buf;
uint32_t stats_pingpong_offset;
uint32_t stats_idx;
+ int vfe_idx;
+ unsigned long flags;
if (!vfe_dev || !ts || !buf_event || !stream_info) {
pr_err("%s:%d failed: invalid params %pK %pK %pK %pK\n",
@@ -163,6 +159,9 @@ static int32_t msm_isp_stats_buf_divert(struct vfe_device *vfe_dev,
return -EINVAL;
}
frame_id = vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+
sw_skip = &stream_info->sw_skip;
stats_event = &buf_event->u.stats;
@@ -182,73 +181,62 @@ static int32_t msm_isp_stats_buf_divert(struct vfe_device *vfe_dev,
(struct msm_isp_sw_framskip));
}
}
- stats_idx = STATS_IDX(stream_info->stream_handle);
+ vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev, stream_info);
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
stats_pingpong_offset =
vfe_dev->hw_info->stats_hw_info->stats_ping_pong_offset[
stats_idx];
pingpong_bit = (~(pingpong_status >> stats_pingpong_offset) & 0x1);
- done_buf = stream_info->buf[pingpong_bit];
-
- if (done_buf)
- buf_index = done_buf->buf_idx;
-
- rc = vfe_dev->buf_mgr->ops->update_put_buf_cnt(
- vfe_dev->buf_mgr, vfe_dev->pdev->id, stream_info->bufq_handle,
- buf_index, &ts->buf_time,
- frame_id, pingpong_bit);
-
- if (rc < 0) {
- if (rc == -EFAULT)
+ rc = msm_isp_composite_stats_irq(vfe_dev, stream_info,
+ MSM_ISP_COMP_IRQ_PING_BUFDONE + pingpong_bit);
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ if (rc < 0)
msm_isp_halt_send_error(vfe_dev,
- ISP_EVENT_BUF_FATAL_ERROR);
- pr_err("stats_buf_divert: update put buf cnt fail\n");
- return rc;
- }
-
- if (rc > 0) {
- ISP_DBG("%s: vfe_id %d buf_id %d bufq %x put_cnt 1\n", __func__,
- vfe_dev->pdev->id, buf_index,
- stream_info->bufq_handle);
+ ISP_EVENT_BUF_FATAL_ERROR);
return rc;
}
+ done_buf = stream_info->buf[pingpong_bit];
/* Program next buffer */
- rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev, stream_info,
+ stream_info->buf[pingpong_bit] = NULL;
+ rc = msm_isp_stats_cfg_ping_pong_address(stream_info,
pingpong_status);
- if (rc)
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+
+ if (!done_buf)
return rc;
- if (drop_buffer && done_buf) {
- rc = vfe_dev->buf_mgr->ops->buf_done(
+ buf_index = done_buf->buf_idx;
+ if (drop_buffer) {
+ vfe_dev->buf_mgr->ops->put_buf(
vfe_dev->buf_mgr,
done_buf->bufq_handle,
- done_buf->buf_idx, &ts->buf_time, frame_id, 0);
- if (rc == -EFAULT)
- msm_isp_halt_send_error(vfe_dev,
- ISP_EVENT_BUF_FATAL_ERROR);
- return rc;
+ done_buf->buf_idx);
+ } else {
+ /* divert native buffers */
+ vfe_dev->buf_mgr->ops->buf_divert(vfe_dev->buf_mgr,
+ done_buf->bufq_handle, done_buf->buf_idx,
+ &ts->buf_time, frame_id);
}
-
- if (done_buf) {
- stats_event->stats_buf_idxs
- [stream_info->stats_type] =
- done_buf->buf_idx;
- if (NULL == comp_stats_type_mask) {
- stats_event->stats_mask =
- 1 << stream_info->stats_type;
- ISP_DBG("%s: stats frameid: 0x%x %d bufq %x\n",
- __func__, buf_event->frame_id,
- stream_info->stats_type, done_buf->bufq_handle);
- msm_isp_send_event(vfe_dev,
- ISP_EVENT_STATS_NOTIFY +
- stream_info->stats_type,
- buf_event);
- } else {
- *comp_stats_type_mask |=
- 1 << stream_info->stats_type;
- }
+ stats_event->stats_buf_idxs
+ [stream_info->stats_type] =
+ done_buf->buf_idx;
+ if (comp_stats_type_mask == NULL) {
+ stats_event->stats_mask =
+ 1 << stream_info->stats_type;
+ ISP_DBG("%s: stats frameid: 0x%x %d bufq %x\n",
+ __func__, buf_event->frame_id,
+ stream_info->stats_type, done_buf->bufq_handle);
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_STATS_NOTIFY +
+ stream_info->stats_type,
+ buf_event);
+ } else {
+ *comp_stats_type_mask |=
+ 1 << stream_info->stats_type;
}
return rc;
@@ -275,8 +263,9 @@ static int32_t msm_isp_stats_configure(struct vfe_device *vfe_dev,
for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
if (!(stats_irq_mask & (1 << i)))
continue;
- stream_info = &vfe_dev->stats_data.stream_info[i];
- if (stream_info->state == STATS_INACTIVE) {
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, i);
+ if (stream_info->state == STATS_INACTIVE ||
+ stream_info->state == STATS_STOPPING) {
pr_debug("%s: Warning! Stream already inactive. Drop irq handling\n",
__func__);
continue;
@@ -353,12 +342,17 @@ void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
}
int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
- struct msm_vfe_stats_stream_request_cmd *stream_req_cmd)
+ struct msm_vfe_stats_stream_request_cmd *stream_req_cmd,
+ struct msm_vfe_stats_stream *stream_info)
{
- int rc = -1;
- struct msm_vfe_stats_stream *stream_info = NULL;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ int rc = 0;
uint32_t stats_idx;
+ uint32_t framedrop_pattern;
+ uint32_t framedrop_period;
+ int i;
+
+ stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops.
+ get_stats_idx(stream_req_cmd->stats_type);
if (!(vfe_dev->hw_info->stats_hw_info->stats_capability_mask &
(1 << stream_req_cmd->stats_type))) {
@@ -366,16 +360,7 @@ int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
return rc;
}
- stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops.
- get_stats_idx(stream_req_cmd->stats_type);
-
- if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
- pr_err("%s Invalid stats index %d", __func__, stats_idx);
- return -EINVAL;
- }
-
- stream_info = &stats_data->stream_info[stats_idx];
- if (stream_info->state != STATS_AVALIABLE) {
+ if (stream_info->state != STATS_AVAILABLE) {
pr_err("%s: Stats already requested\n", __func__);
return rc;
}
@@ -389,17 +374,74 @@ int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
pr_err("%s: Invalid irq subsample pattern\n", __func__);
return rc;
}
+ if (stream_req_cmd->composite_flag >
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask) {
+ pr_err("%s: comp grp %d exceed max %d\n",
+ __func__, stream_req_cmd->composite_flag,
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask);
+ return -EINVAL;
+ }
- stream_info->session_id = stream_req_cmd->session_id;
- stream_info->stream_id = stream_req_cmd->stream_id;
- stream_info->composite_flag = stream_req_cmd->composite_flag;
- stream_info->stats_type = stream_req_cmd->stats_type;
- stream_info->buffer_offset = stream_req_cmd->buffer_offset;
- stream_info->framedrop_pattern = stream_req_cmd->framedrop_pattern;
- stream_info->init_stats_frame_drop = stream_req_cmd->init_frame_drop;
- stream_info->irq_subsample_pattern =
- stream_req_cmd->irq_subsample_pattern;
- stream_info->state = STATS_INACTIVE;
+ if (stream_info->num_isp == 0) {
+ stream_info->session_id = stream_req_cmd->session_id;
+ stream_info->stream_id = stream_req_cmd->stream_id;
+ stream_info->composite_flag = stream_req_cmd->composite_flag;
+ stream_info->stats_type = stream_req_cmd->stats_type;
+ framedrop_pattern = stream_req_cmd->framedrop_pattern;
+ if (framedrop_pattern == SKIP_ALL)
+ framedrop_pattern = 0;
+ else
+ framedrop_pattern = 1;
+ stream_info->framedrop_pattern = framedrop_pattern;
+ stream_info->init_stats_frame_drop =
+ stream_req_cmd->init_frame_drop;
+ stream_info->irq_subsample_pattern =
+ stream_req_cmd->irq_subsample_pattern;
+ framedrop_period = msm_isp_get_framedrop_period(
+ stream_req_cmd->framedrop_pattern);
+ stream_info->framedrop_period = framedrop_period;
+ } else {
+ if (stream_info->vfe_mask & (1 << vfe_dev->pdev->id)) {
+ pr_err("%s: stats %d already requested for vfe %d\n",
+ __func__, stats_idx, vfe_dev->pdev->id);
+ return -EINVAL;
+ }
+ if (stream_info->session_id != stream_req_cmd->session_id)
+ rc = -EINVAL;
+ if (stream_info->session_id != stream_req_cmd->session_id)
+ rc = -EINVAL;
+ if (stream_info->composite_flag !=
+ stream_req_cmd->composite_flag)
+ rc = -EINVAL;
+ if (stream_info->stats_type != stream_req_cmd->stats_type)
+ rc = -EINVAL;
+ framedrop_pattern = stream_req_cmd->framedrop_pattern;
+ if (framedrop_pattern == SKIP_ALL)
+ framedrop_pattern = 0;
+ else
+ framedrop_pattern = 1;
+ if (stream_info->framedrop_pattern != framedrop_pattern)
+ rc = -EINVAL;
+ framedrop_period = msm_isp_get_framedrop_period(
+ stream_req_cmd->framedrop_pattern);
+ if (stream_info->framedrop_period != framedrop_period)
+ rc = -EINVAL;
+ if (rc) {
+ pr_err("%s: Stats stream param mismatch between vfe\n",
+ __func__);
+ return rc;
+ }
+ }
+ stream_info->buffer_offset[stream_info->num_isp] =
+ stream_req_cmd->buffer_offset;
+ stream_info->vfe_dev[stream_info->num_isp] = vfe_dev;
+ stream_info->vfe_mask |= (1 << vfe_dev->pdev->id);
+ stream_info->num_isp++;
+ if (!vfe_dev->is_split || stream_info->num_isp == MAX_VFE) {
+ stream_info->state = STATS_INACTIVE;
+ for (i = 0; i < MSM_ISP_COMP_IRQ_MAX; i++)
+ stream_info->composite_irq[i] = 0;
+ }
if ((vfe_dev->stats_data.stream_handle_cnt << 8) == 0)
vfe_dev->stats_data.stream_handle_cnt++;
@@ -407,7 +449,8 @@ int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
stream_req_cmd->stream_handle =
(++vfe_dev->stats_data.stream_handle_cnt) << 8 | stats_idx;
- stream_info->stream_handle = stream_req_cmd->stream_handle;
+ stream_info->stream_handle[stream_info->num_isp - 1] =
+ stream_req_cmd->stream_handle;
return 0;
}
@@ -416,42 +459,39 @@ int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg)
int rc = -1;
struct msm_vfe_stats_stream_request_cmd *stream_req_cmd = arg;
struct msm_vfe_stats_stream *stream_info = NULL;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
- uint32_t framedrop_period;
uint32_t stats_idx;
+ unsigned long flags;
- rc = msm_isp_stats_create_stream(vfe_dev, stream_req_cmd);
- if (rc < 0) {
- pr_err("%s: create stream failed\n", __func__);
- return rc;
- }
-
- stats_idx = STATS_IDX(stream_req_cmd->stream_handle);
+ stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops.
+ get_stats_idx(stream_req_cmd->stats_type);
if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
pr_err("%s Invalid stats index %d", __func__, stats_idx);
return -EINVAL;
}
- stream_info = &stats_data->stream_info[stats_idx];
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, stats_idx);
- framedrop_period = msm_isp_get_framedrop_period(
- stream_req_cmd->framedrop_pattern);
+ spin_lock_irqsave(&stream_info->lock, flags);
- if (stream_req_cmd->framedrop_pattern == SKIP_ALL)
- stream_info->framedrop_pattern = 0x0;
- else
- stream_info->framedrop_pattern = 0x1;
- stream_info->framedrop_period = framedrop_period - 1;
+ rc = msm_isp_stats_create_stream(vfe_dev, stream_req_cmd, stream_info);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: create stream failed\n", __func__);
+ return rc;
+ }
if (stream_info->init_stats_frame_drop == 0)
vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(vfe_dev,
stream_info);
- msm_isp_stats_cfg_stream_scratch(vfe_dev, stream_info,
+ if (stream_info->state == STATS_INACTIVE) {
+ msm_isp_stats_cfg_stream_scratch(stream_info,
VFE_PING_FLAG);
- msm_isp_stats_cfg_stream_scratch(vfe_dev, stream_info,
+ msm_isp_stats_cfg_stream_scratch(stream_info,
VFE_PONG_FLAG);
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
return rc;
}
@@ -460,32 +500,112 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
int rc = -1;
struct msm_vfe_stats_stream_cfg_cmd stream_cfg_cmd;
struct msm_vfe_stats_stream_release_cmd *stream_release_cmd = arg;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
int stats_idx = STATS_IDX(stream_release_cmd->stream_handle);
struct msm_vfe_stats_stream *stream_info = NULL;
+ int vfe_idx;
+ int i;
+ int k;
+ unsigned long flags;
if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
pr_err("%s Invalid stats index %d", __func__, stats_idx);
return -EINVAL;
}
- stream_info = &stats_data->stream_info[stats_idx];
- if (stream_info->state == STATS_AVALIABLE) {
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, stats_idx);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ vfe_idx = msm_isp_get_vfe_idx_for_stats_stream_user(
+ vfe_dev, stream_info);
+ if (vfe_idx == -ENOTTY || stream_info->stream_handle[vfe_idx] !=
+ stream_release_cmd->stream_handle) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: Invalid stream handle %x, expected %x\n",
+ __func__, stream_release_cmd->stream_handle,
+ vfe_idx != -ENOTTY ?
+ stream_info->stream_handle[vfe_idx] : 0);
+ return -EINVAL;
+ }
+ if (stream_info->state == STATS_AVAILABLE) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: stream already release\n", __func__);
return rc;
- } else if (stream_info->state != STATS_INACTIVE) {
+ }
+ vfe_dev->hw_info->vfe_ops.stats_ops.clear_wm_reg(vfe_dev, stream_info);
+
+ if (stream_info->state != STATS_INACTIVE) {
stream_cfg_cmd.enable = 0;
stream_cfg_cmd.num_streams = 1;
stream_cfg_cmd.stream_handle[0] =
stream_release_cmd->stream_handle;
- rc = msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
+ spin_lock_irqsave(&stream_info->lock, flags);
}
- vfe_dev->hw_info->vfe_ops.stats_ops.clear_wm_reg(vfe_dev, stream_info);
- memset(stream_info, 0, sizeof(struct msm_vfe_stats_stream));
+ for (i = vfe_idx, k = vfe_idx + 1; k < stream_info->num_isp; k++, i++) {
+ stream_info->vfe_dev[i] = stream_info->vfe_dev[k];
+ stream_info->stream_handle[i] = stream_info->stream_handle[k];
+ stream_info->buffer_offset[i] = stream_info->buffer_offset[k];
+ }
+
+ stream_info->vfe_dev[stream_info->num_isp] = 0;
+ stream_info->stream_handle[stream_info->num_isp] = 0;
+ stream_info->buffer_offset[stream_info->num_isp] = 0;
+ stream_info->num_isp--;
+ stream_info->vfe_mask &= ~(1 << vfe_dev->pdev->id);
+ if (stream_info->num_isp == 0)
+ stream_info->state = STATS_AVAILABLE;
+
+ spin_unlock_irqrestore(&stream_info->lock, flags);
return 0;
}
+void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_stats_stream_release_cmd
+ stream_release_cmd[MSM_ISP_STATS_MAX];
+ struct msm_vfe_stats_stream_cfg_cmd stream_cfg_cmd;
+ struct msm_vfe_stats_stream *stream_info;
+ int i;
+ int vfe_idx;
+ int num_stream = 0;
+ unsigned long flags;
+
+ stream_cfg_cmd.enable = 0;
+ stream_cfg_cmd.num_streams = 0;
+
+ for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, i);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ if (stream_info->state == STATS_AVAILABLE) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ vfe_idx = msm_isp_get_vfe_idx_for_stats_stream_user(vfe_dev,
+ stream_info);
+ if (vfe_idx == -ENOTTY) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ stream_release_cmd[num_stream++].stream_handle =
+ stream_info->stream_handle[vfe_idx];
+ if (stream_info->state == STATS_INACTIVE) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ stream_cfg_cmd.stream_handle[
+ stream_cfg_cmd.num_streams] =
+ stream_info->stream_handle[vfe_idx];
+ stream_cfg_cmd.num_streams++;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+ if (stream_cfg_cmd.num_streams)
+ msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
+
+ for (i = 0; i < num_stream; i++)
+ msm_isp_release_stats_stream(vfe_dev, &stream_release_cmd[i]);
+}
+
static int msm_isp_init_stats_ping_pong_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
@@ -497,108 +617,205 @@ static int msm_isp_init_stats_ping_pong_reg(
stream_info->stream_id);
if (stream_info->bufq_handle == 0) {
pr_err("%s: no buf configured for stream: 0x%x\n",
- __func__, stream_info->stream_handle);
+ __func__, stream_info->stream_handle[0]);
return -EINVAL;
}
- if ((vfe_dev->is_split && vfe_dev->pdev->id == 1) ||
- !vfe_dev->is_split) {
- rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
- stream_info, VFE_PING_FLAG);
- if (rc < 0) {
- pr_err("%s: No free buffer for ping\n", __func__);
- return rc;
- }
- rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
- stream_info, VFE_PONG_FLAG);
- if (rc < 0) {
- pr_err("%s: No free buffer for pong\n", __func__);
- return rc;
- }
+ rc = msm_isp_stats_cfg_ping_pong_address(
+ stream_info, VFE_PING_FLAG);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for ping\n", __func__);
+ return rc;
+ }
+ rc = msm_isp_stats_cfg_ping_pong_address(
+ stream_info, VFE_PONG_FLAG);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for pong\n", __func__);
+ return rc;
}
return rc;
}
-void msm_isp_update_stats_framedrop_reg(struct vfe_device *vfe_dev)
+void __msm_isp_update_stats_framedrop_reg(
+ struct msm_vfe_stats_stream *stream_info)
{
- int i;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
- struct msm_vfe_stats_stream *stream_info = NULL;
+ int k;
+ struct vfe_device *vfe_dev;
- for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
- stream_info = &stats_data->stream_info[i];
- if (stream_info->state != STATS_ACTIVE)
- continue;
+ if (!stream_info->init_stats_frame_drop)
+ return;
+ stream_info->init_stats_frame_drop--;
+ if (stream_info->init_stats_frame_drop)
+ return;
- if (stream_info->init_stats_frame_drop) {
- stream_info->init_stats_frame_drop--;
- if (stream_info->init_stats_frame_drop == 0) {
- vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(
- vfe_dev, stream_info);
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(vfe_dev,
+ stream_info);
+
+ }
+}
+
+static void __msm_isp_stats_stream_update(
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t enable = 0;
+ uint8_t comp_flag = 0;
+ int k;
+ struct vfe_device *vfe_dev;
+ int index = STATS_IDX(stream_info->stream_handle[0]);
+
+ switch (stream_info->state) {
+ case STATS_INACTIVE:
+ case STATS_ACTIVE:
+ case STATS_AVAILABLE:
+ break;
+ case STATS_START_PENDING:
+ enable = 1;
+ case STATS_STOP_PENDING:
+ stream_info->state =
+ (stream_info->state == STATS_START_PENDING ?
+ STATS_STARTING : STATS_STOPPING);
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+ vfe_dev, BIT(index), enable);
+ comp_flag = stream_info->composite_flag;
+ if (comp_flag) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ cfg_comp_mask(vfe_dev, BIT(index),
+ (comp_flag - 1), enable);
+ } else {
+ if (enable)
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ cfg_wm_irq_mask(vfe_dev,
+ stream_info);
+ else
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ clear_wm_irq_mask(vfe_dev,
+ stream_info);
}
}
+ break;
+ case STATS_STARTING:
+ stream_info->state = STATS_ACTIVE;
+ complete_all(&stream_info->active_comp);
+ break;
+ case STATS_STOPPING:
+ stream_info->state = STATS_INACTIVE;
+ complete_all(&stream_info->inactive_comp);
+ break;
}
}
+
void msm_isp_stats_stream_update(struct vfe_device *vfe_dev)
{
int i;
- uint32_t enable = 0;
- uint8_t comp_flag = 0;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
- struct msm_vfe_stats_ops *stats_ops =
- &vfe_dev->hw_info->vfe_ops.stats_ops;
+ struct msm_vfe_stats_stream *stream_info;
+ unsigned long flags;
for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
- if (stats_data->stream_info[i].state == STATS_START_PENDING ||
- stats_data->stream_info[i].state ==
- STATS_STOP_PENDING) {
- enable = stats_data->stream_info[i].state ==
- STATS_START_PENDING ? 1 : 0;
- stats_data->stream_info[i].state =
- stats_data->stream_info[i].state ==
- STATS_START_PENDING ?
- STATS_STARTING : STATS_STOPPING;
- vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
- vfe_dev, BIT(i), enable);
- comp_flag = stats_data->stream_info[i].composite_flag;
- if (comp_flag)
- stats_ops->cfg_comp_mask(vfe_dev, BIT(i),
- (comp_flag - 1), enable);
- } else if (stats_data->stream_info[i].state == STATS_STARTING ||
- stats_data->stream_info[i].state == STATS_STOPPING) {
- stats_data->stream_info[i].state =
- stats_data->stream_info[i].state ==
- STATS_STARTING ? STATS_ACTIVE : STATS_INACTIVE;
- }
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, i);
+ if (stream_info->state == STATS_AVAILABLE ||
+ stream_info->state == STATS_INACTIVE)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ __msm_isp_stats_stream_update(stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
}
- atomic_sub(1, &stats_data->stats_update);
- if (!atomic_read(&stats_data->stats_update))
- complete(&vfe_dev->stats_config_complete);
}
-static int msm_isp_stats_wait_for_cfg_done(struct vfe_device *vfe_dev)
+void msm_isp_process_stats_reg_upd_epoch_irq(struct vfe_device *vfe_dev,
+ enum msm_isp_comp_irq_types irq)
{
+ int i;
+ struct msm_vfe_stats_stream *stream_info;
+ unsigned long flags;
int rc;
- init_completion(&vfe_dev->stats_config_complete);
- atomic_set(&vfe_dev->stats_data.stats_update, 2);
- rc = wait_for_completion_timeout(
- &vfe_dev->stats_config_complete,
+
+ for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, i);
+ if (stream_info->state == STATS_AVAILABLE ||
+ stream_info->state == STATS_INACTIVE)
+ continue;
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+
+ rc = msm_isp_composite_stats_irq(vfe_dev, stream_info, irq);
+
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ if (-EFAULT == rc) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return;
+ }
+ continue;
+ }
+
+ if (irq == MSM_ISP_COMP_IRQ_REG_UPD)
+ __msm_isp_stats_stream_update(stream_info);
+ else if (irq == MSM_ISP_COMP_IRQ_EPOCH &&
+ stream_info->state == STATS_ACTIVE)
+ __msm_isp_update_stats_framedrop_reg(stream_info);
+
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+}
+
+static int msm_isp_stats_wait_for_stream_cfg_done(
+ struct msm_vfe_stats_stream *stream_info,
+ int active)
+{
+ int rc = -1;
+
+ if (active && stream_info->state == STATS_ACTIVE)
+ rc = 0;
+ if (!active && stream_info->state == STATS_INACTIVE)
+ rc = 0;
+ if (rc == 0)
+ return rc;
+
+ rc = wait_for_completion_timeout(active ? &stream_info->active_comp :
+ &stream_info->inactive_comp,
msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
- if (rc == 0) {
- pr_err("%s: wait timeout\n", __func__);
- rc = -1;
+ if (rc <= 0) {
+ rc = rc ? rc : -ETIMEDOUT;
+ pr_err("%s: wait for stats stream %x idx %d state %d active %d config failed %d\n",
+ __func__, stream_info->stream_id,
+ STATS_IDX(stream_info->stream_handle[0]),
+ stream_info->state, active, rc);
} else {
rc = 0;
}
return rc;
}
+static int msm_isp_stats_wait_for_streams(
+ struct msm_vfe_stats_stream **streams,
+ int num_stream, int active)
+{
+ int rc = 0;
+ int i;
+ struct msm_vfe_stats_stream *stream_info;
+
+ for (i = 0; i < num_stream; i++) {
+ stream_info = streams[i];
+ rc |= msm_isp_stats_wait_for_stream_cfg_done(stream_info,
+ active);
+ }
+ return rc;
+}
+
static int msm_isp_stats_update_cgc_override(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
{
int i;
uint32_t stats_mask = 0, idx;
+ struct vfe_device *update_vfes[MAX_VFE] = {NULL, NULL};
+ struct msm_vfe_stats_stream *stream_info;
+ int k;
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
@@ -607,12 +824,33 @@ static int msm_isp_stats_update_cgc_override(struct vfe_device *vfe_dev,
pr_err("%s Invalid stats index %d", __func__, idx);
return -EINVAL;
}
- stats_mask |= 1 << idx;
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev,
+ idx);
+ if (stream_info->state == STATS_AVAILABLE)
+ continue;
+
+ /*
+ * we update cgc after making streams inactive or before
+ * starting streams, so stream should be in inactive state
+ */
+ if (stream_info->state == STATS_INACTIVE)
+ stats_mask |= 1 << idx;
+ for (k = 0; k < stream_info->num_isp; k++) {
+ if (update_vfes[stream_info->vfe_dev[k]->pdev->id])
+ continue;
+ update_vfes[stream_info->vfe_dev[k]->pdev->id] =
+ stream_info->vfe_dev[k];
+ }
}
- if (vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override) {
- vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override(
- vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ for (k = 0; k < MAX_VFE; k++) {
+ if (!update_vfes[k])
+ continue;
+ vfe_dev = update_vfes[k];
+ if (vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override(
+ vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ }
}
return 0;
}
@@ -621,61 +859,108 @@ int msm_isp_stats_reset(struct vfe_device *vfe_dev)
{
int i = 0, rc = 0;
struct msm_vfe_stats_stream *stream_info = NULL;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
struct msm_isp_timestamp timestamp;
+ struct vfe_device *update_vfes[MAX_VFE] = {NULL, NULL};
+ unsigned long flags;
+ int k;
msm_isp_get_timestamp(&timestamp);
- for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
- stream_info = &stats_data->stream_info[i];
- if (stream_info->state != STATS_ACTIVE)
+ if (vfe_dev->is_split) {
+ for (i = 0; i < MAX_VFE; i++)
+ update_vfes[i] = vfe_dev->common_data->dual_vfe_res->
+ vfe_dev[i];
+ } else {
+ update_vfes[vfe_dev->pdev->id] = vfe_dev;
+ }
+
+ for (k = 0; k < MAX_VFE; k++) {
+ vfe_dev = update_vfes[k];
+ if (!vfe_dev)
continue;
- rc = vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
- vfe_dev->pdev->id, stream_info->bufq_handle,
- MSM_ISP_BUFFER_FLUSH_ALL, &timestamp.buf_time,
- vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
- if (rc == -EFAULT) {
- msm_isp_halt_send_error(vfe_dev,
- ISP_EVENT_BUF_FATAL_ERROR);
- return rc;
+ for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
+ stream_info = msm_isp_get_stats_stream_common_data(
+ vfe_dev, i);
+ if (stream_info->state == STATS_AVAILABLE ||
+ stream_info->state == STATS_INACTIVE)
+ continue;
+
+ if (stream_info->num_isp > 1 &&
+ vfe_dev->pdev->id == ISP_VFE0)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ msm_isp_stats_cfg_stream_scratch(stream_info,
+ VFE_PING_FLAG);
+ msm_isp_stats_cfg_stream_scratch(stream_info,
+ VFE_PONG_FLAG);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ rc = vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
+ stream_info->bufq_handle,
+ MSM_ISP_BUFFER_FLUSH_ALL, &timestamp.buf_time,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
}
}
return rc;
}
-int msm_isp_stats_restart(struct vfe_device *vfe_dev)
+int msm_isp_stats_restart(struct vfe_device *vfe_dev_ioctl)
{
int i = 0;
struct msm_vfe_stats_stream *stream_info = NULL;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ unsigned long flags;
+ struct vfe_device *update_vfes[MAX_VFE] = {NULL, NULL};
+ struct vfe_device *vfe_dev;
+ int k;
+ int j;
+
+ if (vfe_dev_ioctl->is_split) {
+ for (i = 0; i < MAX_VFE; i++)
+ update_vfes[i] = vfe_dev_ioctl->common_data->
+ dual_vfe_res->vfe_dev[i];
+ } else {
+ update_vfes[vfe_dev_ioctl->pdev->id] = vfe_dev_ioctl;
+ }
- for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
- stream_info = &stats_data->stream_info[i];
- if (stream_info->state < STATS_ACTIVE)
+ for (k = 0; k < MAX_VFE; k++) {
+ vfe_dev = update_vfes[k];
+ if (!vfe_dev)
+ continue;
+ for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
+ stream_info = msm_isp_get_stats_stream_common_data(
+ vfe_dev, i);
+ if (stream_info->state == STATS_AVAILABLE ||
+ stream_info->state == STATS_INACTIVE)
+ continue;
+ if (stream_info->num_isp > 1 &&
+ vfe_dev->pdev->id == ISP_VFE0)
continue;
- msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ for (j = 0; j < MSM_ISP_COMP_IRQ_MAX; j++)
+ stream_info->composite_irq[j] = 0;
+ msm_isp_init_stats_ping_pong_reg(vfe_dev_ioctl,
+ stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
}
return 0;
}
-static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
+static int msm_isp_check_stream_cfg_cmd(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
{
- int i, rc = 0;
- uint32_t stats_mask = 0, idx;
- uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
- uint32_t num_stats_comp_mask = 0;
+ int i;
struct msm_vfe_stats_stream *stream_info;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
- num_stats_comp_mask =
- vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
- rc = vfe_dev->hw_info->vfe_ops.stats_ops.check_streams(
- stats_data->stream_info);
- if (rc < 0)
- return rc;
+ uint32_t idx;
+ int vfe_idx;
+
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
@@ -683,63 +968,99 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
pr_err("%s Invalid stats index %d", __func__, idx);
return -EINVAL;
}
-
- stream_info = &stats_data->stream_info[idx];
- if (stream_info->stream_handle !=
- stream_cfg_cmd->stream_handle[i]) {
- pr_err("%s: Invalid stream handle: 0x%x received\n",
- __func__, stream_cfg_cmd->stream_handle[i]);
- continue;
+ stream_info = msm_isp_get_stats_stream_common_data(
+ vfe_dev, idx);
+ vfe_idx = msm_isp_get_vfe_idx_for_stats_stream_user(vfe_dev,
+ stream_info);
+ if (vfe_idx == -ENOTTY || stream_info->stream_handle[vfe_idx] !=
+ stream_cfg_cmd->stream_handle[i]) {
+ pr_err("%s: Invalid stream handle: 0x%x received expected %x\n",
+ __func__, stream_cfg_cmd->stream_handle[i],
+ vfe_idx == -ENOTTY ? 0 :
+ stream_info->stream_handle[vfe_idx]);
+ return -EINVAL;
}
+ }
+ return 0;
+}
- if (stream_info->composite_flag > num_stats_comp_mask) {
- pr_err("%s: comp grp %d exceed max %d\n",
- __func__, stream_info->composite_flag,
- num_stats_comp_mask);
- return -EINVAL;
+static void __msm_isp_stop_stats_streams(
+ struct msm_vfe_stats_stream **streams,
+ int num_streams,
+ struct msm_isp_timestamp timestamp)
+{
+ int i;
+ int k;
+ struct msm_vfe_stats_stream *stream_info;
+ struct vfe_device *vfe_dev;
+ struct msm_vfe_stats_shared_data *stats_data;
+ unsigned long flags;
+
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ spin_lock_irqsave(&stream_info->lock, flags);
+ init_completion(&stream_info->inactive_comp);
+ stream_info->state = STATS_STOP_PENDING;
+ if (stream_info->vfe_dev[0]->
+ axi_data.src_info[VFE_PIX_0].active == 0) {
+ while (stream_info->state != STATS_INACTIVE)
+ __msm_isp_stats_stream_update(stream_info);
}
- rc = msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
- if (rc < 0) {
- pr_err("%s: No buffer for stream%d\n", __func__, idx);
- return rc;
+ for (k = 0; k < stream_info->num_isp; k++) {
+ stats_data = &stream_info->vfe_dev[k]->stats_data;
+ stats_data->num_active_stream--;
}
- if (!stream_info->composite_flag)
- vfe_dev->hw_info->vfe_ops.stats_ops.
- cfg_wm_irq_mask(vfe_dev, stream_info);
-
- if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
- stream_info->state = STATS_START_PENDING;
- else
- stream_info->state = STATS_ACTIVE;
-
- stats_data->num_active_stream++;
- stats_mask |= 1 << idx;
-
- if (stream_info->composite_flag > 0)
- comp_stats_mask[stream_info->composite_flag-1] |=
- 1 << idx;
-
- ISP_DBG("%s: stats_mask %x %x active streams %d\n",
- __func__, comp_stats_mask[0],
- comp_stats_mask[1],
- stats_data->num_active_stream);
+ msm_isp_stats_cfg_stream_scratch(
+ stream_info, VFE_PING_FLAG);
+ msm_isp_stats_cfg_stream_scratch(
+ stream_info, VFE_PONG_FLAG);
+ vfe_dev = stream_info->vfe_dev[0];
+ if (vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
+ stream_info->bufq_handle,
+ MSM_ISP_BUFFER_FLUSH_ALL, &timestamp.buf_time,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id ==
+ -EFAULT))
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
}
- if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
- rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
- } else {
- vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
- vfe_dev, stats_mask, stream_cfg_cmd->enable);
- for (i = 0; i < num_stats_comp_mask; i++) {
- vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
- vfe_dev, comp_stats_mask[i], i, 1);
+ if (msm_isp_stats_wait_for_streams(streams, num_streams, 0)) {
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ if (stream_info->state == STATS_INACTIVE)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ while (stream_info->state != STATS_INACTIVE)
+ __msm_isp_stats_stream_update(stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
}
}
- return rc;
}
-static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
+static int msm_isp_check_stats_stream_state(
+ struct msm_vfe_stats_stream *stream_info,
+ int cmd)
+{
+ switch (stream_info->state) {
+ case STATS_AVAILABLE:
+ return -EINVAL;
+ case STATS_INACTIVE:
+ if (cmd == 0)
+ return -EALREADY;
+ break;
+ case STATS_ACTIVE:
+ if (cmd)
+ return -EALREADY;
+ break;
+ default:
+ WARN(1, "Invalid stats state %d\n", stream_info->state);
+ }
+ return 0;
+}
+
+static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl,
struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
{
int i, rc = 0;
@@ -747,95 +1068,125 @@ static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
uint32_t num_stats_comp_mask = 0;
struct msm_vfe_stats_stream *stream_info;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ struct msm_vfe_stats_shared_data *stats_data;
+ int num_stream = 0;
+ struct msm_vfe_stats_stream *streams[MSM_ISP_STATS_MAX];
struct msm_isp_timestamp timestamp;
+ unsigned long flags;
+ int k;
+ struct vfe_device *update_vfes[MAX_VFE] = {NULL, NULL};
+ uint32_t num_active_streams[MAX_VFE] = {0, 0};
+ struct vfe_device *vfe_dev;
msm_isp_get_timestamp(&timestamp);
num_stats_comp_mask =
- vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
-
+ vfe_dev_ioctl->hw_info->stats_hw_info->num_stats_comp_mask;
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
-
idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
-
- if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
- pr_err("%s Invalid stats index %d", __func__, idx);
- return -EINVAL;
- }
-
- stream_info = &stats_data->stream_info[idx];
- if (stream_info->stream_handle !=
- stream_cfg_cmd->stream_handle[i]) {
- pr_err("%s: Invalid stream handle: 0x%x received\n",
- __func__, stream_cfg_cmd->stream_handle[i]);
+ stream_info = msm_isp_get_stats_stream_common_data(
+ vfe_dev_ioctl, idx);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ rc = msm_isp_check_stats_stream_state(stream_info, 1);
+ if (rc == -EALREADY) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ rc = 0;
continue;
}
-
- if (stream_info->composite_flag > num_stats_comp_mask) {
- pr_err("%s: comp grp %d exceed max %d\n",
- __func__, stream_info->composite_flag,
- num_stats_comp_mask);
- return -EINVAL;
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ goto error;
}
+ rc = msm_isp_init_stats_ping_pong_reg(vfe_dev_ioctl,
+ stream_info);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: No buffer for stream%d\n", __func__, idx);
+ return rc;
+ }
+ init_completion(&stream_info->active_comp);
+ stream_info->state = STATS_START_PENDING;
+ if (vfe_dev_ioctl->axi_data.src_info[VFE_PIX_0].active == 0) {
+ while (stream_info->state != STATS_ACTIVE)
+ __msm_isp_stats_stream_update(stream_info);
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
- if (!stream_info->composite_flag)
- vfe_dev->hw_info->vfe_ops.stats_ops.
- clear_wm_irq_mask(vfe_dev, stream_info);
-
- if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
- stream_info->state = STATS_STOP_PENDING;
- else
- stream_info->state = STATS_INACTIVE;
-
- stats_data->num_active_stream--;
stats_mask |= 1 << idx;
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ if (update_vfes[vfe_dev->pdev->id])
+ continue;
+ update_vfes[vfe_dev->pdev->id] = vfe_dev;
+ stats_data = &vfe_dev->stats_data;
+ num_active_streams[vfe_dev->pdev->id] =
+ stats_data->num_active_stream;
+ stats_data->num_active_stream++;
+ }
- if (stream_info->composite_flag > 0)
+ if (stream_info->composite_flag)
comp_stats_mask[stream_info->composite_flag-1] |=
1 << idx;
- msm_isp_stats_cfg_stream_scratch(vfe_dev, stream_info,
- VFE_PING_FLAG);
- msm_isp_stats_cfg_stream_scratch(vfe_dev, stream_info,
- VFE_PONG_FLAG);
-
ISP_DBG("%s: stats_mask %x %x active streams %d\n",
__func__, comp_stats_mask[0],
comp_stats_mask[1],
stats_data->num_active_stream);
+ streams[num_stream++] = stream_info;
}
- if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
- rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
- } else {
- vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
- vfe_dev, stats_mask, stream_cfg_cmd->enable);
- for (i = 0; i < num_stats_comp_mask; i++) {
- vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
- vfe_dev, comp_stats_mask[i], i, 0);
- }
+ for (k = 0; k < MAX_VFE; k++) {
+ if (!update_vfes[k] || num_active_streams[k])
+ continue;
+ vfe_dev = update_vfes[k];
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
}
+ rc = msm_isp_stats_wait_for_streams(streams, num_stream, 1);
+ if (rc)
+ goto error;
+ return 0;
+error:
+ __msm_isp_stop_stats_streams(streams, num_stream, timestamp);
+ return rc;
+}
+
+static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i, rc = 0;
+ uint32_t idx;
+ uint32_t num_stats_comp_mask = 0;
+ struct msm_vfe_stats_stream *stream_info;
+ struct msm_isp_timestamp timestamp;
+ int num_stream = 0;
+ struct msm_vfe_stats_stream *streams[MSM_ISP_STATS_MAX];
+ unsigned long flags;
+
+ msm_isp_get_timestamp(&timestamp);
+
+ num_stats_comp_mask =
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
- if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
- pr_err("%s Invalid stats index %d", __func__, idx);
- return -EINVAL;
- }
+ idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
- stream_info = &stats_data->stream_info[idx];
- rc = vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
- vfe_dev->pdev->id, stream_info->bufq_handle,
- MSM_ISP_BUFFER_FLUSH_ALL, &timestamp.buf_time,
- vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
- if (rc == -EFAULT) {
- msm_isp_halt_send_error(vfe_dev,
- ISP_EVENT_BUF_FATAL_ERROR);
- return rc;
+ stream_info = msm_isp_get_stats_stream_common_data(
+ vfe_dev, idx);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ rc = msm_isp_check_stats_stream_state(stream_info, 0);
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ rc = 0;
+ continue;
}
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ streams[num_stream++] = stream_info;
}
+
+ __msm_isp_stop_stats_streams(streams, num_stream, timestamp);
+
return rc;
}
@@ -843,8 +1194,10 @@ int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg)
{
int rc = 0;
struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd = arg;
- if (vfe_dev->stats_data.num_active_stream == 0)
- vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
+
+ rc = msm_isp_check_stream_cfg_cmd(vfe_dev, stream_cfg_cmd);
+ if (rc)
+ return rc;
if (stream_cfg_cmd->enable) {
msm_isp_stats_update_cgc_override(vfe_dev, stream_cfg_cmd);
@@ -863,31 +1216,37 @@ int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg)
{
int rc = 0, i;
struct msm_vfe_stats_stream *stream_info;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL;
struct msm_isp_sw_framskip *sw_skip_info = NULL;
+ int vfe_idx;
+ int k;
/*validate request*/
for (i = 0; i < update_cmd->num_streams; i++) {
- update_info = &update_cmd->update_info[i];
+ update_info = (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
/*check array reference bounds*/
if (STATS_IDX(update_info->stream_handle)
> vfe_dev->hw_info->stats_hw_info->num_stats_type) {
pr_err("%s: stats idx %d out of bound!", __func__,
- STATS_IDX(update_info->stream_handle));
+ STATS_IDX(update_info->stream_handle));
return -EINVAL;
}
}
for (i = 0; i < update_cmd->num_streams; i++) {
- update_info = &update_cmd->update_info[i];
- stream_info = &stats_data->stream_info[
- STATS_IDX(update_info->stream_handle)];
- if (stream_info->stream_handle !=
+ update_info = (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev,
+ STATS_IDX(update_info->stream_handle));
+ vfe_idx = msm_isp_get_vfe_idx_for_stats_stream_user(vfe_dev,
+ stream_info);
+ if (vfe_idx == -ENOTTY || stream_info->stream_handle[vfe_idx] !=
update_info->stream_handle) {
pr_err("%s: stats stream handle %x %x mismatch!\n",
- __func__, stream_info->stream_handle,
+ __func__, vfe_idx != -ENOTTY ?
+ stream_info->stream_handle[vfe_idx] : 0,
update_info->stream_handle);
continue;
}
@@ -897,18 +1256,22 @@ int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg)
uint32_t framedrop_period =
msm_isp_get_framedrop_period(
update_info->skip_pattern);
- if (update_info->skip_pattern == SKIP_ALL)
+ if (update_info->skip_pattern ==
+ SKIP_ALL)
stream_info->framedrop_pattern = 0x0;
else
stream_info->framedrop_pattern = 0x1;
stream_info->framedrop_period = framedrop_period - 1;
if (stream_info->init_stats_frame_drop == 0)
- vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(
- vfe_dev, stream_info);
+ for (k = 0; k < stream_info->num_isp; k++)
+ stream_info->vfe_dev[k]->hw_info->
+ vfe_ops.stats_ops.cfg_wm_reg(
+ vfe_dev, stream_info);
break;
}
case UPDATE_STREAM_SW_FRAME_DROP: {
- sw_skip_info = &update_info->sw_skip_info;
+ sw_skip_info =
+ &update_info->sw_skip_info;
if (!stream_info->sw_skip.stream_src_mask)
stream_info->sw_skip = *sw_skip_info;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
index 01120b65be92..e9728f33fae1 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,8 +23,58 @@ int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg);
int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg);
int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg);
int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg);
-void msm_isp_update_stats_framedrop_reg(struct vfe_device *vfe_dev);
void msm_isp_stats_disable(struct vfe_device *vfe_dev);
int msm_isp_stats_reset(struct vfe_device *vfe_dev);
int msm_isp_stats_restart(struct vfe_device *vfe_dev);
+void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev);
+void msm_isp_process_stats_reg_upd_epoch_irq(struct vfe_device *vfe_dev,
+ enum msm_isp_comp_irq_types irq);
+
+static inline int msm_isp_get_vfe_idx_for_stats_stream_user(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ int vfe_idx;
+
+ for (vfe_idx = 0; vfe_idx < stream_info->num_isp; vfe_idx++)
+ if (stream_info->vfe_dev[vfe_idx] == vfe_dev)
+ return vfe_idx;
+ return -ENOTTY;
+}
+
+static inline int msm_isp_get_vfe_idx_for_stats_stream(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream_user(vfe_dev,
+ stream_info);
+
+ if (vfe_idx < 0) {
+ WARN(1, "%s vfe index missing for stream %d vfe %d\n",
+ __func__, stream_info->stats_type, vfe_dev->pdev->id);
+ vfe_idx = 0;
+ }
+ return vfe_idx;
+}
+
+static inline struct msm_vfe_stats_stream *
+ msm_isp_get_stats_stream_common_data(
+ struct vfe_device *vfe_dev,
+ enum msm_isp_stats_type idx)
+{
+ if (vfe_dev->is_split)
+ return &vfe_dev->common_data->stats_streams[idx];
+ else
+ return &vfe_dev->common_data->stats_streams[idx +
+ MSM_ISP_STATS_MAX * vfe_dev->pdev->id];
+}
+
+static inline struct msm_vfe_stats_stream *
+ msm_isp_get_stats_stream(struct dual_vfe_resource *dual_vfe_res,
+ int vfe_id,
+ enum msm_isp_stats_type idx)
+{
+ return msm_isp_get_stats_stream_common_data(
+ dual_vfe_res->vfe_dev[vfe_id], idx);
+}
#endif /* __MSM_ISP_STATS_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index e47a8de30aa9..a4eb80f31984 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -25,6 +25,22 @@
static DEFINE_MUTEX(bandwidth_mgr_mutex);
static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
+#define MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev) { \
+ if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE0) { \
+ struct vfe_device *vfe1_dev = vfe_dev->common_data-> \
+ dual_vfe_res->vfe_dev[ISP_VFE1]; \
+ mutex_lock(&vfe1_dev->core_mutex); \
+ } \
+}
+
+#define MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev) { \
+ if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE0) { \
+ struct vfe_device *vfe1_dev = vfe_dev->common_data-> \
+ dual_vfe_res->vfe_dev[ISP_VFE1]; \
+ mutex_unlock(&vfe1_dev->core_mutex); \
+ } \
+}
+
static uint64_t msm_isp_cpp_clk_rate;
#define VFE40_8974V2_VERSION 0x1001001A
@@ -354,9 +370,6 @@ void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
struct msm_vfe_fetch_engine_info *fetch_engine_info)
{
struct msm_isp_event_data fe_rd_done_event;
- if (!fetch_engine_info->is_busy)
- return;
-
memset(&fe_rd_done_event, 0, sizeof(struct msm_isp_event_data));
fe_rd_done_event.frame_id =
vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
@@ -762,26 +775,39 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
}
case VIDIOC_MSM_ISP_REQUEST_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_request_axi_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_RELEASE_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_release_axi_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_CFG_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_cfg_axi_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_AXI_HALT:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_axi_halt(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_AXI_RESET:
mutex_lock(&vfe_dev->core_mutex);
+ /* For dual vfe reset both on vfe1 call */
+ if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE0) {
+ mutex_unlock(&vfe_dev->core_mutex);
+ return 0;
+ }
if (atomic_read(&vfe_dev->error_info.overflow_state)
!= HALT_ENFORCED) {
rc = msm_isp_stats_reset(vfe_dev);
@@ -796,6 +822,11 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
break;
case VIDIOC_MSM_ISP_AXI_RESTART:
mutex_lock(&vfe_dev->core_mutex);
+ /* For dual vfe restart both on vfe1 call */
+ if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE0) {
+ mutex_unlock(&vfe_dev->core_mutex);
+ return 0;
+ }
if (atomic_read(&vfe_dev->error_info.overflow_state)
!= HALT_ENFORCED) {
rc = msm_isp_stats_restart(vfe_dev);
@@ -848,27 +879,37 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
break;
case VIDIOC_MSM_ISP_REQUEST_STATS_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_request_stats_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_RELEASE_STATS_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_release_stats_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_CFG_STATS_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_UPDATE_STATS_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_update_stats_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_UPDATE_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_update_axi_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_SMMU_ATTACH:
@@ -883,10 +924,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
vfe_dev->isp_raw2_debug = 0;
break;
case MSM_SD_UNNOTIFY_FREEZE:
- break;
case MSM_SD_SHUTDOWN:
- while (vfe_dev->vfe_open_cnt != 0)
- msm_isp_close_node(sd, NULL);
break;
default:
@@ -1631,8 +1669,8 @@ static int msm_isp_process_iommu_page_fault(struct vfe_device *vfe_dev)
{
int rc = vfe_dev->buf_mgr->pagefault_debug_disable;
- pr_err("%s:%d] VFE%d Handle Page fault! vfe_dev %pK\n", __func__,
- __LINE__, vfe_dev->pdev->id, vfe_dev);
+ pr_err("%s:%d] VFE%d Handle Page fault!\n", __func__,
+ __LINE__, vfe_dev->pdev->id);
msm_isp_halt_send_error(vfe_dev, ISP_EVENT_IOMMU_P_FAULT);
@@ -1899,6 +1937,7 @@ static void msm_vfe_iommu_fault_handler(struct iommu_domain *domain,
if (vfe_dev->vfe_open_cnt > 0) {
atomic_set(&vfe_dev->error_info.overflow_state,
HALT_ENFORCED);
+ pr_err("%s: fault address is %lx\n", __func__, iova);
msm_isp_process_iommu_page_fault(vfe_dev);
} else {
pr_err("%s: no handling, vfe open cnt = %d\n",
@@ -1928,9 +1967,6 @@ int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
return -EINVAL;
}
- if (vfe_dev->pdev->id == ISP_VFE0)
- vfe_dev->common_data->dual_vfe_res->epoch_sync_mask = 0;
-
mutex_lock(&vfe_dev->realtime_mutex);
mutex_lock(&vfe_dev->core_mutex);
@@ -2032,6 +2068,10 @@ int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_unlock(&vfe_dev->realtime_mutex);
return 0;
}
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
+ msm_isp_release_all_axi_stream(vfe_dev);
+ msm_isp_release_all_stats_stream(vfe_dev);
+
/* Unregister page fault handler */
cam_smmu_reg_client_page_fault_handler(
vfe_dev->buf_mgr->iommu_hdl,
@@ -2059,6 +2099,7 @@ int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
msm_isp_end_avtimer();
vfe_dev->vt_enable = 0;
}
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
vfe_dev->is_split = 0;
mutex_unlock(&vfe_dev->core_mutex);
@@ -2088,25 +2129,3 @@ void msm_isp_flush_tasklet(struct vfe_device *vfe_dev)
return;
}
-void msm_isp_save_framedrop_values(struct vfe_device *vfe_dev,
- enum msm_vfe_input_src frame_src)
-{
- struct msm_vfe_axi_stream *stream_info = NULL;
- uint32_t j = 0;
- unsigned long flags;
-
- for (j = 0; j < VFE_AXI_SRC_MAX; j++) {
- stream_info = &vfe_dev->axi_data.stream_info[j];
- if (stream_info->state != ACTIVE)
- continue;
- if (frame_src != SRC_TO_INTF(stream_info->stream_src))
- continue;
-
- stream_info =
- &vfe_dev->axi_data.stream_info[j];
- spin_lock_irqsave(&stream_info->lock, flags);
- stream_info->activated_framedrop_period =
- stream_info->requested_framedrop_period;
- spin_unlock_irqrestore(&stream_info->lock, flags);
- }
-}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
index 9df60c0d7383..16e3198f35b7 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
@@ -70,7 +70,5 @@ void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
struct msm_vfe_fetch_engine_info *fetch_engine_info);
void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format);
void msm_isp_flush_tasklet(struct vfe_device *vfe_dev);
-void msm_isp_save_framedrop_values(struct vfe_device *vfe_dev,
- enum msm_vfe_input_src frame_src);
void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp);
#endif /* __MSM_ISP_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c
index 2af70b2ee1bf..3b38882c4c45 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c
+++ b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c
@@ -1390,7 +1390,7 @@ int msm_jpegdma_hw_get_max_downscale(struct msm_jpegdma_device *dma)
*/
int msm_jpegdma_hw_get_qos(struct msm_jpegdma_device *dma)
{
- int i;
+ int i, j;
int ret;
unsigned int cnt;
const void *property;
@@ -1401,32 +1401,37 @@ int msm_jpegdma_hw_get_qos(struct msm_jpegdma_device *dma)
dev_dbg(dma->dev, "Missing qos settings\n");
return 0;
}
+
cnt /= 4;
+ if (cnt % 2)
+ return -EINVAL;
+
+ dma->qos_regs_num = cnt / 2;
- dma->qos_regs = kzalloc((sizeof(*dma->qos_regs) * cnt), GFP_KERNEL);
+ dma->qos_regs = kzalloc((sizeof(struct jpegdma_reg_cfg) *
+ dma->qos_regs_num), GFP_KERNEL);
if (!dma->qos_regs)
return -ENOMEM;
- for (i = 0; i < cnt; i = i + 2) {
+ for (i = 0, j = 0; i < cnt; i += 2, j++) {
ret = of_property_read_u32_index(dma->dev->of_node,
"qcom,qos-reg-settings", i,
- &dma->qos_regs[i].reg);
+ &dma->qos_regs[j].reg);
if (ret < 0) {
- dev_err(dma->dev, "can not read qos reg %d\n", i);
+ dev_err(dma->dev, "can not read qos reg %d\n", j);
goto error;
}
ret = of_property_read_u32_index(dma->dev->of_node,
"qcom,qos-reg-settings", i + 1,
- &dma->qos_regs[i].val);
+ &dma->qos_regs[j].val);
if (ret < 0) {
- dev_err(dma->dev, "can not read qos setting %d\n", i);
+ dev_err(dma->dev, "can not read qos setting %d\n", j);
goto error;
}
- dev_dbg(dma->dev, "Qos idx %d, reg %x val %x\n", i,
- dma->qos_regs[i].reg, dma->qos_regs[i].val);
+ dev_dbg(dma->dev, "Qos idx %d, reg %x val %x\n", j,
+ dma->qos_regs[j].reg, dma->qos_regs[j].val);
}
- dma->qos_regs_num = cnt;
return 0;
error:
@@ -1452,7 +1457,7 @@ void msm_jpegdma_hw_put_qos(struct msm_jpegdma_device *dma)
*/
int msm_jpegdma_hw_get_vbif(struct msm_jpegdma_device *dma)
{
- int i;
+ int i, j;
int ret;
unsigned int cnt;
const void *property;
@@ -1463,33 +1468,38 @@ int msm_jpegdma_hw_get_vbif(struct msm_jpegdma_device *dma)
dev_dbg(dma->dev, "Missing vbif settings\n");
return 0;
}
+
cnt /= 4;
+ if (cnt % 2)
+ return -EINVAL;
- dma->vbif_regs = kzalloc((sizeof(*dma->vbif_regs) * cnt), GFP_KERNEL);
+ dma->vbif_regs_num = cnt / 2;
+
+ dma->vbif_regs = kzalloc((sizeof(struct jpegdma_reg_cfg) *
+ dma->vbif_regs_num), GFP_KERNEL);
if (!dma->vbif_regs)
return -ENOMEM;
- for (i = 0; i < cnt; i = i + 2) {
+ for (i = 0, j = 0; i < cnt; i += 2, j++) {
ret = of_property_read_u32_index(dma->dev->of_node,
"qcom,vbif-reg-settings", i,
- &dma->vbif_regs[i].reg);
+ &dma->vbif_regs[j].reg);
if (ret < 0) {
- dev_err(dma->dev, "can not read vbif reg %d\n", i);
+ dev_err(dma->dev, "can not read vbif reg %d\n", j);
goto error;
}
ret = of_property_read_u32_index(dma->dev->of_node,
"qcom,vbif-reg-settings", i + 1,
- &dma->vbif_regs[i].val);
+ &dma->vbif_regs[j].val);
if (ret < 0) {
- dev_err(dma->dev, "can not read vbif setting %d\n", i);
+ dev_err(dma->dev, "can not read vbif setting %d\n", j);
goto error;
}
- dev_dbg(dma->dev, "Vbif idx %d, reg %x val %x\n", i,
- dma->vbif_regs[i].reg, dma->vbif_regs[i].val);
+ dev_dbg(dma->dev, "Vbif idx %d, reg %x val %x\n", j,
+ dma->vbif_regs[j].reg, dma->vbif_regs[j].val);
}
- dma->vbif_regs_num = cnt;
return 0;
error:
@@ -1515,7 +1525,7 @@ void msm_jpegdma_hw_put_vbif(struct msm_jpegdma_device *dma)
*/
int msm_jpegdma_hw_get_prefetch(struct msm_jpegdma_device *dma)
{
- int i;
+ int i, j;
int ret;
unsigned int cnt;
const void *property;
@@ -1526,35 +1536,39 @@ int msm_jpegdma_hw_get_prefetch(struct msm_jpegdma_device *dma)
dev_dbg(dma->dev, "Missing prefetch settings\n");
return 0;
}
+
cnt /= 4;
+ if (cnt % 2)
+ return -EINVAL;
+
+ dma->prefetch_regs_num = cnt / 2;
- dma->prefetch_regs = kcalloc(cnt, sizeof(*dma->prefetch_regs),
- GFP_KERNEL);
+ dma->prefetch_regs = kzalloc((sizeof(struct jpegdma_reg_cfg) *
+ dma->prefetch_regs_num), GFP_KERNEL);
if (!dma->prefetch_regs)
return -ENOMEM;
- for (i = 0; i < cnt; i = i + 2) {
+ for (i = 0, j = 0; i < cnt; i += 2, j++) {
ret = of_property_read_u32_index(dma->dev->of_node,
"qcom,prefetch-reg-settings", i,
- &dma->prefetch_regs[i].reg);
+ &dma->prefetch_regs[j].reg);
if (ret < 0) {
- dev_err(dma->dev, "can not read prefetch reg %d\n", i);
+ dev_err(dma->dev, "can not read prefetch reg %d\n", j);
goto error;
}
ret = of_property_read_u32_index(dma->dev->of_node,
"qcom,prefetch-reg-settings", i + 1,
- &dma->prefetch_regs[i].val);
+ &dma->prefetch_regs[j].val);
if (ret < 0) {
dev_err(dma->dev, "can not read prefetch setting %d\n",
- i);
+ j);
goto error;
}
- dev_dbg(dma->dev, "Prefetch idx %d, reg %x val %x\n", i,
- dma->prefetch_regs[i].reg, dma->prefetch_regs[i].val);
+ dev_dbg(dma->dev, "Prefetch idx %d, reg %x val %x\n", j,
+ dma->prefetch_regs[j].reg, dma->prefetch_regs[j].val);
}
- dma->prefetch_regs_num = cnt;
return 0;
error:
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h
index c2b928d2b5d3..198d130b24fc 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h
@@ -101,4 +101,61 @@ struct csiphy_reg_3ph_parms_t csiphy_v5_0_3ph = {
{0x38, 0xFE},
{0x81c, 0x6},
};
+
+struct csiphy_settings_t csiphy_combo_mode_v5_0 = {
+ {
+ {0x818, 0x1},
+ {0x81c, 0x2},
+ {0x004, 0x08},
+ {0x704, 0x08},
+ {0x204, 0x08},
+ {0x404, 0x08},
+ {0x604, 0x08},
+ {0x02c, 0x1},
+ {0x22c, 0x1},
+ {0x42c, 0x1},
+ {0x62c, 0x1},
+ {0x72c, 0x1},
+ {0x034, 0x0f},
+ {0x234, 0x0f},
+ {0x434, 0x0f},
+ {0x634, 0x0f},
+ {0x734, 0x0f},
+ {0x01c, 0x0a},
+ {0x21c, 0x0a},
+ {0x41c, 0x0a},
+ {0x61c, 0x0a},
+ {0x71c, 0x0a},
+ {0x014, 0x60},
+ {0x214, 0x60},
+ {0x414, 0x60},
+ {0x614, 0x60},
+ {0x714, 0x60},
+ {0x728, 0x4},
+ {0x428, 0x0a},
+ {0x628, 0x0e},
+ {0x03c, 0xb8},
+ {0x73c, 0xb8},
+ {0x23c, 0xb8},
+ {0x43c, 0xb8},
+ {0x63c, 0xb8},
+ {0x000, 0x91},
+ {0x700, 0x80},
+ {0x200, 0x91},
+ {0x400, 0x91},
+ {0x600, 0x80},
+ {0x70c, 0xA5},
+ {0x60c, 0xA5},
+ {0x010, 0x52},
+ {0x710, 0x52},
+ {0x210, 0x52},
+ {0x410, 0x52},
+ {0x610, 0x52},
+ {0x038, 0xfe},
+ {0x738, 0x1f},
+ {0x238, 0xfe},
+ {0x438, 0xfe},
+ {0x638, 0x1f},
+ }
+};
#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index 30af06dc64f5..bdee620e8d45 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -46,7 +46,7 @@
#define NUM_LANES_OFFSET 4
#define CSI_3PHASE_HW 1
-#define MAX_LANES 4
+#define MAX_DPHY_DATA_LN 4
#define CLOCK_OFFSET 0x700
#define CSIPHY_SOF_DEBUG_COUNT 2
@@ -55,6 +55,22 @@
static struct v4l2_file_operations msm_csiphy_v4l2_subdev_fops;
+static void msm_csiphy_write_settings(
+ struct csiphy_device *csiphy_dev,
+ struct csiphy_settings_t csiphy_settings)
+{
+ int i = 0;
+
+ for (i = 0; i < MAX_CSIPHY_SETTINGS; i++) {
+ if (csiphy_settings.settings[i].addr == 0 &&
+ csiphy_settings.settings[i].data == 0)
+ break;
+
+ msm_camera_io_w(csiphy_settings.settings[i].data,
+ csiphy_dev->base + csiphy_settings.settings[i].addr);
+ }
+}
+
static void msm_csiphy_cphy_irq_config(
struct csiphy_device *csiphy_dev,
struct msm_camera_csiphy_params *csiphy_params)
@@ -418,7 +434,7 @@ static int msm_csiphy_2phase_lane_config(
csiphybase = csiphy_dev->base;
lane_mask = csiphy_params->lane_mask & 0x1f;
- for (i = 0; i < MAX_LANES; i++) {
+ for (i = 0; i < MAX_DPHY_DATA_LN; i++) {
if (mask == 0x2) {
if (lane_mask & mask)
lane_enable |= 0x80;
@@ -437,7 +453,7 @@ static int msm_csiphy_2phase_lane_config(
csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
mipi_csiphy_3ph_cmn_ctrl6.addr);
- for (i = 0, mask = 0x1; i < MAX_LANES; i++) {
+ for (i = 0, mask = 0x1; i < MAX_DPHY_DATA_LN; i++) {
if (!(lane_mask & mask)) {
if (mask == 0x2)
i--;
@@ -562,112 +578,134 @@ static int msm_csiphy_2phase_lane_config_v50(
struct csiphy_device *csiphy_dev,
struct msm_camera_csiphy_params *csiphy_params)
{
- uint32_t val = 0, lane_enable = 0, clk_lane, mask = 1;
+ uint32_t lane_enable = 0, mask = 1;
uint16_t lane_mask = 0, i = 0, offset;
void __iomem *csiphybase;
csiphybase = csiphy_dev->base;
lane_mask = csiphy_params->lane_mask & 0x1f;
- for (i = 0; i < MAX_LANES; i++) {
+
+ lane_enable = msm_camera_io_r(csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl5.addr);
+
+ /* write settle count and lane_enable */
+ for (i = 0; i < MAX_DPHY_DATA_LN; i++) {
if (mask == 0x2) {
if (lane_mask & mask)
lane_enable |= 0x80;
i--;
- } else if (lane_mask & mask)
+ offset = CLOCK_OFFSET;
+ } else if (lane_mask & mask) {
lane_enable |= 0x1 << (i<<1);
+ offset = 0x200*i;
+ }
+
+ if (lane_mask & mask)
+ msm_camera_io_w((csiphy_params->settle_cnt & 0xFF),
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg2.addr + offset);
mask <<= 1;
}
- CDBG("%s:%d lane_enable: %d\n", __func__, __LINE__, lane_enable);
+ CDBG("%s:%d lane_enable: 0x%x\n", __func__, __LINE__, lane_enable);
msm_camera_io_w(lane_enable,
csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
mipi_csiphy_3ph_cmn_ctrl5.addr);
- msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_3ph_cmn_ctrl6.data,
- csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_3ph_cmn_ctrl6.addr);
- msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_3ph_cmn_ctrl7.data,
- csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_3ph_cmn_ctrl7.addr);
-
- for (i = 0, mask = 0x1; i < MAX_LANES; i++) {
- if (!(lane_mask & mask)) {
- if (mask == 0x2)
- i--;
- mask <<= 0x1;
- continue;
- }
- if (mask == 0x2) {
- val = 4;
- offset = CLOCK_OFFSET;
- clk_lane = 1;
- i--;
- } else {
- offset = 0x200*i;
- val = 0;
- clk_lane = 0;
- }
- if (csiphy_params->combo_mode == 1) {
- val |= 0xA;
- if (mask == csiphy_dev->ctrl_reg->
- csiphy_reg.combo_clk_mask) {
- val |= 0x4;
- clk_lane = 1;
- }
- }
- msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_ctrl11.data,
- csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_ctrl11.addr + offset);
- msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_ctrl13.data,
- csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_ctrl13.addr + offset);
+ /* write mode specific settings */
+ if (csiphy_params->combo_mode == 1)
+ msm_csiphy_write_settings(csiphy_dev,
+ csiphy_dev->ctrl_reg->csiphy_combo_mode_settings);
+ else {
msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_cfg7.data,
- csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_cfg7.addr + offset);
- msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_cfg5.data,
+ mipi_csiphy_3ph_cmn_ctrl6.data,
csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_cfg5.addr + offset);
- if (clk_lane == 1)
- msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnck_ctrl10.data,
- csiphybase +
- csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnck_ctrl10.addr);
- msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_ctrl15.data,
- csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_ctrl15.addr + offset);
- msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_ctrl0.data,
- csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_ctrl0.addr + offset);
+ mipi_csiphy_3ph_cmn_ctrl6.addr);
msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_cfg1.data,
- csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_cfg1.addr + offset);
- msm_camera_io_w((csiphy_params->settle_cnt & 0xFF),
+ mipi_csiphy_3ph_cmn_ctrl7.data,
csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_cfg2.addr + offset);
- if (clk_lane == 1)
- msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnck_ctrl3.data, csiphybase +
- csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnck_ctrl3.addr);
+ mipi_csiphy_3ph_cmn_ctrl7.addr);
msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_cfg4.data, csiphybase +
+ mipi_csiphy_2ph_lnck_ctrl10.data,
+ csiphybase +
csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_cfg4.addr + offset);
+ mipi_csiphy_2ph_lnck_ctrl10.addr);
msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_ctrl14.data,
- csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_ctrl14.addr + offset);
- mask <<= 1;
+ mipi_csiphy_2ph_lnck_ctrl3.data, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnck_ctrl3.addr);
+
+ for (i = 0, mask = 0x1; i < MAX_DPHY_DATA_LN; i++) {
+ if (!(lane_mask & mask)) {
+ if (mask == 0x2)
+ i--;
+ mask <<= 0x1;
+ continue;
+ }
+ if (mask == 0x2) {
+ offset = CLOCK_OFFSET;
+ i--;
+ } else {
+ offset = 0x200*i;
+ }
+
+ msm_camera_io_w(csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl11.data,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl11.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl13.data,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl13.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg7.data,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg7.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg5.data,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg5.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl15.data,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl15.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl0.data,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl0.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg1.data,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg1.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg4.data, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg4.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl14.data,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl14.addr + offset);
+ mask <<= 1;
+ }
}
msm_csiphy_cphy_irq_config(csiphy_dev, csiphy_params);
return 0;
@@ -986,19 +1024,20 @@ static int msm_csiphy_init(struct csiphy_device *csiphy_dev)
}
CDBG("%s:%d called\n", __func__, __LINE__);
+ if (csiphy_dev->ref_count++) {
+ CDBG("%s csiphy refcount = %d\n", __func__,
+ csiphy_dev->ref_count);
+ return rc;
+ }
+
+ CDBG("%s:%d called\n", __func__, __LINE__);
if (csiphy_dev->csiphy_state == CSIPHY_POWER_UP) {
pr_err("%s: csiphy invalid state %d\n", __func__,
csiphy_dev->csiphy_state);
rc = -EINVAL;
return rc;
}
- CDBG("%s:%d called\n", __func__, __LINE__);
- if (csiphy_dev->ref_count++) {
- CDBG("%s csiphy refcount = %d\n", __func__,
- csiphy_dev->ref_count);
- return rc;
- }
CDBG("%s:%d called\n", __func__, __LINE__);
rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
@@ -1063,6 +1102,14 @@ static int msm_csiphy_init(struct csiphy_device *csiphy_dev)
return rc;
}
csiphy_dev->csiphy_sof_debug_count = 0;
+
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ if (csiphy_dev->ref_count++) {
+ CDBG("%s csiphy refcount = %d\n", __func__,
+ csiphy_dev->ref_count);
+ return rc;
+ }
+
CDBG("%s:%d called\n", __func__, __LINE__);
if (csiphy_dev->csiphy_state == CSIPHY_POWER_UP) {
pr_err("%s: csiphy invalid state %d\n", __func__,
@@ -1070,13 +1117,7 @@ static int msm_csiphy_init(struct csiphy_device *csiphy_dev)
rc = -EINVAL;
return rc;
}
- CDBG("%s:%d called\n", __func__, __LINE__);
- if (csiphy_dev->ref_count++) {
- CDBG("%s csiphy refcount = %d\n", __func__,
- csiphy_dev->ref_count);
- return rc;
- }
CDBG("%s:%d called\n", __func__, __LINE__);
rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CSIPHY,
CAM_AHB_SVS_VOTE);
@@ -1651,6 +1692,8 @@ static int csiphy_probe(struct platform_device *pdev)
new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v5_0;
new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V50;
new_csiphy_dev->csiphy_3phase = CSI_3PHASE_HW;
+ new_csiphy_dev->ctrl_reg->csiphy_combo_mode_settings =
+ csiphy_combo_mode_v5_0;
} else {
pr_err("%s:%d, invalid hw version : 0x%x\n", __func__, __LINE__,
new_csiphy_dev->hw_dts_version);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
index 4944ac606bc8..aba88da1157e 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
@@ -24,12 +24,17 @@
#define MAX_CSIPHY 3
#define CSIPHY_NUM_CLK_MAX 16
+#define MAX_CSIPHY_SETTINGS 120
struct csiphy_reg_t {
uint32_t addr;
uint32_t data;
};
+struct csiphy_settings_t {
+ struct csiphy_reg_t settings[MAX_CSIPHY_SETTINGS];
+};
+
struct csiphy_reg_parms_t {
/*MIPI CSI PHY registers*/
uint32_t mipi_csiphy_lnn_cfg1_addr;
@@ -140,6 +145,7 @@ struct csiphy_reg_3ph_parms_t {
struct csiphy_ctrl_t {
struct csiphy_reg_parms_t csiphy_reg;
struct csiphy_reg_3ph_parms_t csiphy_3ph_reg;
+ struct csiphy_settings_t csiphy_combo_mode_settings;
};
enum msm_csiphy_state_t {
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
index d0ce1de1162a..5f749bd46273 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/of_gpio.h>
+#include <linux/leds-qpnp-flash.h>
#include "msm_flash.h"
#include "msm_camera_dt_util.h"
#include "msm_cci.h"
@@ -491,6 +492,45 @@ static int32_t msm_flash_init(
return 0;
}
+static int32_t msm_flash_prepare(
+ struct msm_flash_ctrl_t *flash_ctrl)
+{
+ int32_t ret = 0;
+
+ CDBG("%s:%d: State : %d\n",
+ __func__, __LINE__, flash_ctrl->flash_state);
+
+ if (flash_ctrl->switch_trigger == NULL) {
+ pr_err("%s:%d Invalid argument\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT &&
+ flash_ctrl->is_regulator_enabled == 0) {
+ ret = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+ ENABLE_REGULATOR, NULL);
+ if (ret < 0) {
+ pr_err("%s:%d regulator enable failed ret = %d\n",
+ __func__, __LINE__, ret);
+ return ret;
+ }
+ flash_ctrl->is_regulator_enabled = 1;
+ } else if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_RELEASE &&
+ flash_ctrl->is_regulator_enabled) {
+ ret = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+ DISABLE_REGULATOR, NULL);
+ if (ret < 0) {
+ pr_err("%s:%d regulator disable failed ret = %d\n",
+ __func__, __LINE__, ret);
+ return ret;
+ }
+ flash_ctrl->is_regulator_enabled = 0;
+ }
+ CDBG("%s:%d:Exit\n", __func__, __LINE__);
+ return ret;
+}
+
static int32_t msm_flash_low(
struct msm_flash_ctrl_t *flash_ctrl,
struct msm_flash_cfg_data_t *flash_data)
@@ -564,15 +604,33 @@ static int32_t msm_flash_high(
return 0;
}
+static int32_t msm_flash_query_current(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_query_data_t *flash_query_data)
+{
+ int32_t ret = -EINVAL;
+ int32_t max_current = -EINVAL;
+
+ if (flash_ctrl->switch_trigger) {
+ ret = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+ QUERY_MAX_CURRENT, &max_current);
+ if (ret < 0) {
+ pr_err("%s:%d Query max_avail_curr failed ret = %d\n",
+ __func__, __LINE__, ret);
+ return ret;
+ }
+ }
+
+ flash_query_data->max_avail_curr = max_current;
+ CDBG("%s: %d: max_avail_curr : %d\n", __func__, __LINE__,
+ flash_query_data->max_avail_curr);
+ return 0;
+}
+
static int32_t msm_flash_release(
struct msm_flash_ctrl_t *flash_ctrl)
{
int32_t rc = 0;
- if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_RELEASE) {
- pr_err("%s:%d Invalid flash state = %d",
- __func__, __LINE__, flash_ctrl->flash_state);
- return 0;
- }
rc = flash_ctrl->func_tbl->camera_flash_off(flash_ctrl, NULL);
if (rc < 0) {
@@ -600,24 +658,49 @@ static int32_t msm_flash_config(struct msm_flash_ctrl_t *flash_ctrl,
rc = msm_flash_init(flash_ctrl, flash_data);
break;
case CFG_FLASH_RELEASE:
- if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT)
+ if (flash_ctrl->flash_state != MSM_CAMERA_FLASH_RELEASE) {
rc = flash_ctrl->func_tbl->camera_flash_release(
flash_ctrl);
+ } else {
+ CDBG(pr_fmt("Invalid state : %d\n"),
+ flash_ctrl->flash_state);
+ }
break;
case CFG_FLASH_OFF:
- if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT)
+ if ((flash_ctrl->flash_state != MSM_CAMERA_FLASH_RELEASE) &&
+ (flash_ctrl->flash_state != MSM_CAMERA_FLASH_OFF)) {
rc = flash_ctrl->func_tbl->camera_flash_off(
flash_ctrl, flash_data);
+ if (!rc)
+ flash_ctrl->flash_state = MSM_CAMERA_FLASH_OFF;
+ } else {
+ CDBG(pr_fmt("Invalid state : %d\n"),
+ flash_ctrl->flash_state);
+ }
break;
case CFG_FLASH_LOW:
- if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT)
+ if ((flash_ctrl->flash_state == MSM_CAMERA_FLASH_OFF) ||
+ (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT)) {
rc = flash_ctrl->func_tbl->camera_flash_low(
flash_ctrl, flash_data);
+ if (!rc)
+ flash_ctrl->flash_state = MSM_CAMERA_FLASH_LOW;
+ } else {
+ CDBG(pr_fmt("Invalid state : %d\n"),
+ flash_ctrl->flash_state);
+ }
break;
case CFG_FLASH_HIGH:
- if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT)
+ if ((flash_ctrl->flash_state == MSM_CAMERA_FLASH_OFF) ||
+ (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT)) {
rc = flash_ctrl->func_tbl->camera_flash_high(
flash_ctrl, flash_data);
+ if (!rc)
+ flash_ctrl->flash_state = MSM_CAMERA_FLASH_HIGH;
+ } else {
+ CDBG(pr_fmt("Invalid state : %d\n"),
+ flash_ctrl->flash_state);
+ }
break;
default:
rc = -EFAULT;
@@ -626,11 +709,55 @@ static int32_t msm_flash_config(struct msm_flash_ctrl_t *flash_ctrl,
mutex_unlock(flash_ctrl->flash_mutex);
+ rc = msm_flash_prepare(flash_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d Enable/Disable Regulator failed ret = %d",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+
CDBG("Exit %s type %d\n", __func__, flash_data->cfg_type);
return rc;
}
+static int32_t msm_flash_query_data(struct msm_flash_ctrl_t *flash_ctrl,
+ void __user *argp)
+{
+ int32_t rc = -EINVAL, i = 0;
+ struct msm_flash_query_data_t *flash_query =
+ (struct msm_flash_query_data_t *) argp;
+
+ CDBG("Enter %s type %d\n", __func__, flash_query->query_type);
+
+ switch (flash_query->query_type) {
+ case FLASH_QUERY_CURRENT:
+ if (flash_ctrl->func_tbl && flash_ctrl->func_tbl->
+ camera_flash_query_current != NULL)
+ rc = flash_ctrl->func_tbl->
+ camera_flash_query_current(
+ flash_ctrl, flash_query);
+ else {
+ flash_query->max_avail_curr = 0;
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++) {
+ flash_query->max_avail_curr +=
+ flash_ctrl->flash_op_current[i];
+ }
+ rc = 0;
+ CDBG("%s: max_avail_curr: %d\n", __func__,
+ flash_query->max_avail_curr);
+ }
+ break;
+ default:
+ rc = -EFAULT;
+ break;
+ }
+
+ CDBG("Exit %s type %d\n", __func__, flash_query->query_type);
+
+ return rc;
+}
+
static long msm_flash_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
@@ -662,8 +789,11 @@ static long msm_flash_subdev_ioctl(struct v4l2_subdev *sd,
pr_err("fctrl->func_tbl NULL\n");
return -EINVAL;
} else {
- return fctrl->func_tbl->camera_flash_release(fctrl);
+ fctrl->func_tbl->camera_flash_release(fctrl);
+ return msm_flash_prepare(fctrl);
}
+ case VIDIOC_MSM_FLASH_QUERY_DATA:
+ return msm_flash_query_data(fctrl, argp);
default:
pr_err_ratelimited("invalid cmd %d\n", cmd);
return -ENOIOCTLCMD;
@@ -1140,6 +1270,7 @@ static struct msm_flash_table msm_pmic_flash_table = {
.camera_flash_off = msm_flash_off,
.camera_flash_low = msm_flash_low,
.camera_flash_high = msm_flash_high,
+ .camera_flash_query_current = msm_flash_query_current,
},
};
@@ -1151,6 +1282,7 @@ static struct msm_flash_table msm_gpio_flash_table = {
.camera_flash_off = msm_flash_off,
.camera_flash_low = msm_flash_low,
.camera_flash_high = msm_flash_high,
+ .camera_flash_query_current = NULL,
},
};
@@ -1162,6 +1294,7 @@ static struct msm_flash_table msm_i2c_flash_table = {
.camera_flash_off = msm_flash_i2c_write_setting_array,
.camera_flash_low = msm_flash_i2c_write_setting_array,
.camera_flash_high = msm_flash_i2c_write_setting_array,
+ .camera_flash_query_current = NULL,
},
};
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h
index c82e48cddcaf..ad619fa4eb63 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -27,6 +27,9 @@
enum msm_camera_flash_state_t {
MSM_CAMERA_FLASH_INIT,
+ MSM_CAMERA_FLASH_OFF,
+ MSM_CAMERA_FLASH_LOW,
+ MSM_CAMERA_FLASH_HIGH,
MSM_CAMERA_FLASH_RELEASE,
};
@@ -42,6 +45,9 @@ struct msm_flash_func_t {
struct msm_flash_cfg_data_t *);
int32_t (*camera_flash_high)(struct msm_flash_ctrl_t *,
struct msm_flash_cfg_data_t *);
+ int32_t (*camera_flash_query_current)(struct msm_flash_ctrl_t *,
+ struct msm_flash_query_data_t *);
+
};
struct msm_flash_table {
@@ -67,6 +73,7 @@ struct msm_flash_ctrl_t {
/* Switch node to trigger led */
const char *switch_trigger_name;
struct led_trigger *switch_trigger;
+ uint32_t is_regulator_enabled;
/* Flash */
uint32_t flash_num_sources;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/Makefile b/drivers/media/platform/msm/camera_v2/sensor/io/Makefile
index ec958697ae13..549c35a806f7 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/Makefile
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/Makefile
@@ -2,4 +2,5 @@ ccflags-y += -Idrivers/media/platform/msm/camera_v2/
ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor
ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
-obj-$(CONFIG_MSMB_CAMERA) += msm_camera_cci_i2c.o msm_camera_qup_i2c.o msm_camera_spi.o msm_camera_dt_util.o
+ccflags-y += -Idrivers/misc/
+obj-$(CONFIG_MSMB_CAMERA) += msm_camera_cci_i2c.o msm_camera_qup_i2c.o msm_camera_spi.o msm_camera_dt_util.o msm_camera_tz_i2c.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c.h b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c.h
index 0fbe35713d8e..785dd54d65e1 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c.h
@@ -152,4 +152,60 @@ int32_t msm_camera_qup_i2c_poll(struct msm_camera_i2c_client *client,
uint32_t addr, uint16_t data,
enum msm_camera_i2c_data_type data_type, uint32_t delay_ms);
+int32_t msm_camera_tz_i2c_register_sensor(void *s_ctrl_p);
+
+int32_t msm_camera_tz_i2c_power_up(struct msm_camera_i2c_client *client);
+
+int32_t msm_camera_tz_i2c_power_down(struct msm_camera_i2c_client *client);
+
+int32_t msm_camera_tz_i2c_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_tz_i2c_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_tz_i2c_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_tz_i2c_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_tz_i2c_write_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_table_async(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_table_sync(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_table_sync_block(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_seq_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_seq_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_conf_tbl(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_sensor_tz_i2c_util(struct msm_camera_i2c_client *client,
+ uint16_t cci_cmd);
+
+int32_t msm_camera_tz_i2c_poll(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type);
+
#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
new file mode 100644
index 000000000000..25c152be2b71
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
@@ -0,0 +1,1093 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <soc/qcom/camera2.h>
+#include "qseecom_kernel.h"
+#include "msm_camera_i2c.h"
+#include "msm_camera_io_util.h"
+#include "msm_cci.h"
+#include "msm_sensor.h"
+
+#define QSEECOM_SBUFF_SIZE SZ_128K
+#define MAX_TA_NAME 32
+#define EMPTY_QSEECOM_HANDLE NULL
+
+#ifndef CONFIG_MSM_SEC_CCI_TA_NAME
+ #define CONFIG_MSM_SEC_CCI_TA_NAME "seccamdemo64"
+#endif /* CONFIG_MSM_SEC_CCI_TA_NAME */
+
+/* Update version major number in case the HLOS-TA interface is changed*/
+#define TA_IF_VERSION_MAJ 0
+#define TA_IF_VERSION_MIN 1
+
+#undef CDBG
+#ifdef CONFIG_MSM_SEC_CCI_DEBUG
+
+#define CDBG(fmt, args...) pr_info(CONFIG_MSM_SEC_CCI_TA_NAME "::%s:%d - " fmt,\
+ __func__, __LINE__, ##args)
+#define TZ_I2C_FN_RETURN(ret, i2c_fn, ...) \
+ ((ret < 0) ? i2c_fn(__VA_ARGS__):ret)
+
+#else /* CONFIG_MSM_SEC_CCI_DEBUG */
+
+#define CDBG(fmt, args...) pr_info("%s:%d - " fmt, __func__, __LINE__, ##args)
+#define TZ_I2C_FN_RETURN(ret, i2c_fn, ...) \
+ ((ret < 0) ? -EFAULT:ret)
+
+#endif /* CONFIG_MSM_SEC_CCI_DEBUG */
+
+#pragma pack(push, msm_camera_tz_i2c, 1)
+
+enum msm_camera_tz_i2c_cmd_id_t {
+ TZ_I2C_CMD_GET_NONE,
+ TZ_I2C_CMD_GET_IF_VERSION,
+ TZ_I2C_CMD_POWER_UP,
+ TZ_I2C_CMD_POWER_DOWN,
+ TZ_I2C_CMD_CCI_GENERIC,
+ TZ_I2C_CMD_CCI_READ,
+ TZ_I2C_CMD_CCI_READ_SEQ,
+ TZ_I2C_CMD_CCI_WRITE,
+ TZ_I2C_CMD_CCI_WRITE_SEQ,
+ TZ_I2C_CMD_CCI_WRITE_TABLE_ASYNC,
+ TZ_I2C_CMD_CCI_WRITE_TABLE_SYNC,
+ TZ_I2C_CMD_CCI_WRITE_TABLE_SYNC_BLOCK,
+ TZ_I2C_CMD_CCI_WRITE_TABLE,
+ TZ_I2C_CMD_CCI_WRITE_SEQ_TABLE,
+ TZ_I2C_CMD_CCI_WRITE_TABLE_W_MICRODELAY,
+ TZ_I2C_CMD_CCI_POLL,
+ TZ_I2C_CMD_CCI_WRITE_CONF_TBL,
+ TZ_I2C_CMD_CCI_UTIL,
+};
+
+enum msm_camera_tz_i2c_status_t {
+ TZ_I2C_STATUS_SUCCESS = 0,
+ TZ_I2C_STATUS_GENERAL_FAILURE = -1,
+ TZ_I2C_STATUS_INVALID_INPUT_PARAMS = -2,
+ TZ_I2C_STATUS_INVALID_SENSOR_ID = -3,
+ TZ_I2C_STATUS_BYPASS = -4,
+ TZ_I2C_STATUS_ERR_SIZE = 0x7FFFFFFF
+};
+
+struct msm_camera_tz_i2c_generic_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+};
+
+struct msm_camera_tz_i2c_generic_rsp_t {
+ enum msm_camera_tz_i2c_status_t rc;
+};
+
+#define msm_camera_tz_i2c_get_if_version_req_t msm_camera_tz_i2c_generic_req_t
+
+struct msm_camera_tz_i2c_get_if_version_rsp_t {
+ enum msm_camera_tz_i2c_status_t rc;
+ uint32_t if_version_maj;
+ uint32_t if_version_min;
+};
+
+struct msm_camera_tz_i2c_power_up_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+};
+
+#define msm_camera_tz_i2c_power_up_rsp_t msm_camera_tz_i2c_generic_rsp_t
+
+struct msm_camera_tz_i2c_power_down_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+};
+
+#define msm_camera_tz_i2c_power_down_rsp_t msm_camera_tz_i2c_generic_rsp_t
+
+struct msm_camera_tz_i2c_cci_generic_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+ enum msm_camera_tz_i2c_cmd_id_t cci_cmd_id;
+ uint32_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+};
+
+#define msm_camera_tz_i2c_cci_generic_rsp_t msm_camera_tz_i2c_generic_rsp_t
+
+struct msm_camera_tz_i2c_cci_read_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+ uint32_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+ uint32_t addr;
+ uint32_t data_type;
+};
+
+struct msm_camera_tz_i2c_cci_read_rsp_t {
+ enum msm_camera_tz_i2c_status_t rc;
+ uint16_t data;
+};
+
+struct msm_camera_tz_i2c_cci_write_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+ uint32_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+ uint32_t addr;
+ uint16_t data;
+ uint32_t data_type;
+};
+
+#define msm_camera_tz_i2c_cci_write_rsp_t msm_camera_tz_i2c_generic_rsp_t
+
+struct msm_camera_tz_i2c_cci_util_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+ uint32_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+ uint16_t cci_cmd;
+};
+
+#define msm_camera_tz_i2c_cci_util_rsp_t msm_camera_tz_i2c_generic_rsp_t
+
+#pragma pack(pop, msm_camera_tz_i2c)
+
+struct msm_camera_tz_i2c_sensor_info_t {
+ struct msm_sensor_ctrl_t *s_ctrl;
+ struct msm_camera_i2c_fn_t *saved_sensor_i2c_fn;
+ uint32_t secure;
+ uint32_t ta_enabled;
+ struct qseecom_handle *ta_qseecom_handle;
+ const char *ta_name;
+};
+
+struct msm_camera_tz_i2c_ctrl_t {
+ struct mutex lock;
+ uint32_t lock_ready;
+ uint32_t secure_mode;
+};
+
+static struct msm_camera_tz_i2c_ctrl_t msm_camera_tz_i2c_ctrl;
+
+static struct msm_camera_tz_i2c_sensor_info_t sensor_info[MAX_CAMERAS] = {
+ {NULL, NULL, 0, 0, NULL, CONFIG_MSM_SEC_CCI_TA_NAME},
+ {NULL, NULL, 0, 0, NULL, CONFIG_MSM_SEC_CCI_TA_NAME},
+ {NULL, NULL, 0, 0, NULL, CONFIG_MSM_SEC_CCI_TA_NAME},
+ {NULL, NULL, 0, 0, NULL, CONFIG_MSM_SEC_CCI_TA_NAME},
+};
+
+static int32_t msm_camera_tz_i2c_is_sensor_secure(
+ struct msm_camera_i2c_client *client)
+{
+ uint32_t index;
+
+ if (client == NULL) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("Enter\n");
+ for (index = 0; index < MAX_CAMERAS; index++) {
+ if ((sensor_info[index].s_ctrl != NULL) &&
+ sensor_info[index].secure &&
+ (sensor_info[index].s_ctrl->sensor_i2c_client ==
+ client)) {
+ CDBG("Found secure sensor ID = %d\n",
+ sensor_info[index].s_ctrl->id);
+ return sensor_info[index].s_ctrl->id;
+ }
+ }
+ return -EINVAL;
+}
+
+static int32_t get_cmd_rsp_buffers(
+ struct qseecom_handle *ta_qseecom_handle,
+ void **cmd, int *cmd_len,
+ void **rsp, int *rsp_len)
+{
+
+ CDBG("Enter\n");
+ if ((ta_qseecom_handle == NULL) ||
+ (cmd == NULL) || (cmd_len == NULL) ||
+ (rsp == NULL) || (rsp_len == NULL)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (*cmd_len & QSEECOM_ALIGN_MASK)
+ *cmd_len = QSEECOM_ALIGN(*cmd_len);
+
+ if (*rsp_len & QSEECOM_ALIGN_MASK)
+ *rsp_len = QSEECOM_ALIGN(*rsp_len);
+
+ if ((*rsp_len + *cmd_len) > QSEECOM_SBUFF_SIZE) {
+ pr_err("%s:%d - Shared buffer too small to hold cmd=%d and rsp=%d\n",
+ __func__, __LINE__,
+ *cmd_len, *rsp_len);
+ return -ENOMEM;
+ }
+
+ *cmd = ta_qseecom_handle->sbuf;
+ *rsp = ta_qseecom_handle->sbuf + *cmd_len;
+ return 0;
+}
+
+static int32_t msm_camera_tz_i2c_ta_get_if_version(
+ struct qseecom_handle *ta_qseecom_handle,
+ uint32_t *if_version_maj,
+ uint32_t *if_version_min)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_get_if_version_req_t *cmd;
+ struct msm_camera_tz_i2c_get_if_version_rsp_t *rsp;
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+ if ((ta_qseecom_handle == NULL) ||
+ (if_version_maj == NULL) || (if_version_min == NULL)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ cmd_len = sizeof(struct msm_camera_tz_i2c_get_if_version_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_get_if_version_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_GET_IF_VERSION;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Unable to get if version info, rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+
+ if (rsp->rc < 0) {
+ CDBG("TZ I2C App error, rc=%d\n", rsp->rc);
+ rc = -EFAULT;
+ } else {
+ *if_version_maj = rsp->if_version_maj;
+ *if_version_min = rsp->if_version_min;
+ CDBG("TZ I2C If version %d.%d\n", *if_version_maj,
+ *if_version_min);
+ }
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_power_up(
+ struct qseecom_handle *ta_qseecom_handle,
+ int32_t sensor_id,
+ uint32_t *sensor_secure)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_power_up_req_t *cmd;
+ struct msm_camera_tz_i2c_power_up_rsp_t *rsp;
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+
+ if (sensor_secure == NULL)
+ return -EINVAL;
+
+ *sensor_secure = 0;
+ if ((ta_qseecom_handle == NULL) ||
+ (sensor_secure == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ cmd_len = sizeof(struct msm_camera_tz_i2c_power_up_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_power_up_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_POWER_UP;
+ cmd->sensor_id = sensor_id;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Unable to get sensor secure status, rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+
+ if (rsp->rc == TZ_I2C_STATUS_SUCCESS)
+ *sensor_secure = 1;
+ CDBG("Sensor %d is %s\n", sensor_id,
+ (*sensor_secure)?"SECURE":"NON-SECURE");
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_power_down(
+ struct qseecom_handle *ta_qseecom_handle,
+ int32_t sensor_id)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_power_down_req_t *cmd;
+ struct msm_camera_tz_i2c_power_down_rsp_t *rsp;
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+
+ if ((ta_qseecom_handle == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ cmd_len = sizeof(struct msm_camera_tz_i2c_power_down_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_power_down_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_POWER_DOWN;
+ cmd->sensor_id = sensor_id;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Failed: rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_cci_generic(
+ struct msm_camera_i2c_client *client,
+ enum msm_camera_tz_i2c_cmd_id_t cci_cmd_id)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_cci_generic_req_t *cmd;
+ struct msm_camera_tz_i2c_cci_generic_rsp_t *rsp;
+ int32_t rc = 0;
+ struct qseecom_handle *ta_qseecom_handle;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if ((client == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, cci_cmd_id=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ cci_cmd_id);
+
+ ta_qseecom_handle = sensor_info[sensor_id].ta_qseecom_handle;
+ cmd_len = sizeof(struct msm_camera_tz_i2c_cci_generic_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_cci_generic_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_CCI_GENERIC;
+ cmd->sensor_id = sensor_id;
+ cmd->cci_cmd_id = cci_cmd_id;
+ cmd->cci_i2c_master = client->cci_client->cci_i2c_master;
+ cmd->sid = client->cci_client->sid;
+ cmd->cid = client->cci_client->cid;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Failed: rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+ rc = rsp->rc;
+ CDBG("Done: rc=%d, cci_cmd_id=%d\n", rc, cci_cmd_id);
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_cci_read(
+ struct msm_camera_i2c_client *client,
+ uint32_t addr,
+ uint16_t *data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_cci_read_req_t *cmd;
+ struct msm_camera_tz_i2c_cci_read_rsp_t *rsp;
+ int32_t rc = 0;
+ struct qseecom_handle *ta_qseecom_handle;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if ((client == NULL) ||
+ (data == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, Addr=0x%X, Type=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr,
+ data_type);
+
+ ta_qseecom_handle = sensor_info[sensor_id].ta_qseecom_handle;
+ cmd_len = sizeof(struct msm_camera_tz_i2c_cci_read_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_cci_read_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_CCI_READ;
+ cmd->sensor_id = sensor_id;
+ cmd->cci_i2c_master = client->cci_client->cci_i2c_master;
+ cmd->sid = client->cci_client->sid;
+ cmd->cid = client->cci_client->cid;
+ cmd->addr = addr;
+ cmd->data_type = data_type;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Failed: rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+ rc = rsp->rc;
+ *data = rsp->data;
+
+ CDBG("Done: rc=%d, addr=0x%X, data=0x%X\n", rc,
+ addr, *data);
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_cci_write(
+ struct msm_camera_i2c_client *client,
+ uint32_t addr,
+ uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_cci_write_req_t *cmd;
+ struct msm_camera_tz_i2c_cci_write_rsp_t *rsp;
+ int32_t rc = 0;
+ struct qseecom_handle *ta_qseecom_handle;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if ((client == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, Addr=0x%X, Data=0x%X Type=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr,
+ data,
+ data_type);
+
+ ta_qseecom_handle = sensor_info[sensor_id].ta_qseecom_handle;
+ cmd_len = sizeof(struct msm_camera_tz_i2c_cci_write_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_cci_write_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_CCI_WRITE;
+ cmd->sensor_id = sensor_id;
+ cmd->cci_i2c_master = client->cci_client->cci_i2c_master;
+ cmd->sid = client->cci_client->sid;
+ cmd->cid = client->cci_client->cid;
+ cmd->addr = addr;
+ cmd->data = data;
+ cmd->data_type = data_type;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Failed:, rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+ rc = rsp->rc;
+
+ CDBG("Done: rc=%d, addr=0x%X, data=0x%X\n", rc,
+ addr, data);
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_cci_util(
+ struct msm_camera_i2c_client *client,
+ uint16_t cci_cmd)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_cci_util_req_t *cmd;
+ struct msm_camera_tz_i2c_cci_util_rsp_t *rsp;
+ int32_t rc = 0;
+ struct qseecom_handle *ta_qseecom_handle;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if ((client == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, cci_cmd=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ cci_cmd);
+
+ ta_qseecom_handle = sensor_info[sensor_id].ta_qseecom_handle;
+ cmd_len = sizeof(struct msm_camera_tz_i2c_cci_util_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_cci_util_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_CCI_UTIL;
+ cmd->sensor_id = sensor_id;
+ cmd->cci_i2c_master = client->cci_client->cci_i2c_master;
+ cmd->sid = client->cci_client->sid;
+ cmd->cid = client->cci_client->cid;
+ cmd->cci_cmd = cci_cmd;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Failed: rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+ rc = rsp->rc;
+ CDBG("Done: rc=%d, cci_cmd=%d\n", rc, cci_cmd);
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_probe(
+ struct msm_camera_i2c_client *client)
+{
+ int32_t sensor_id = -1;
+
+ CDBG("Enter\n");
+ sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+ if ((sensor_id >= 0) && sensor_info[sensor_id].ta_enabled
+ && msm_camera_tz_i2c_ctrl.lock_ready) {
+ mutex_lock(&msm_camera_tz_i2c_ctrl.lock);
+ return sensor_id;
+ }
+ return -EINVAL;
+}
+
+static int32_t msm_camera_tz_i2c_ta_done(void)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+ if (msm_camera_tz_i2c_ctrl.lock_ready)
+ mutex_unlock(&msm_camera_tz_i2c_ctrl.lock);
+ return rc;
+}
+
+int32_t msm_camera_tz_i2c_power_up(
+ struct msm_camera_i2c_client *client)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if (!msm_camera_tz_i2c_ctrl.lock_ready) {
+ msm_camera_tz_i2c_ctrl.lock_ready = 1;
+ mutex_init(&msm_camera_tz_i2c_ctrl.lock);
+ }
+
+ CDBG("Enter (sensor_id=%d)\n", sensor_id);
+ if (sensor_id >= 0) {
+ ktime_t startTime;
+
+ mutex_lock(&msm_camera_tz_i2c_ctrl.lock);
+ if (msm_camera_tz_i2c_ctrl.secure_mode) {
+ mutex_unlock(&msm_camera_tz_i2c_ctrl.lock);
+ return rc;
+ }
+ startTime = ktime_get();
+
+ CDBG("Switch to secure mode (secure sensor=%d)\n",
+ sensor_id);
+ /* Start the TA */
+ if ((sensor_info[sensor_id].ta_qseecom_handle == NULL)
+ && (sensor_info[sensor_id].ta_name != NULL) &&
+ ('\0' != sensor_info[sensor_id].ta_name[0])) {
+ uint32_t if_version_maj = 0;
+ uint32_t if_version_min = 0;
+
+ sensor_info[sensor_id].ta_enabled = 0;
+ rc = qseecom_start_app(
+ &sensor_info[sensor_id].ta_qseecom_handle,
+ (char *)sensor_info[sensor_id].ta_name,
+ QSEECOM_SBUFF_SIZE);
+ if (!rc) {
+ rc = msm_camera_tz_i2c_ta_get_if_version(
+ sensor_info[sensor_id].
+ ta_qseecom_handle,
+ &if_version_maj, &if_version_min);
+ }
+
+ if (!rc) {
+ if (if_version_maj != TA_IF_VERSION_MAJ) {
+ CDBG("TA ver mismatch %d.%d != %d.%d\n",
+ if_version_maj, if_version_min,
+ TA_IF_VERSION_MAJ,
+ TA_IF_VERSION_MIN);
+ rc = qseecom_shutdown_app(
+ &sensor_info[sensor_id].
+ ta_qseecom_handle);
+ sensor_info[sensor_id].ta_qseecom_handle
+ = EMPTY_QSEECOM_HANDLE;
+ rc = -EFAULT;
+ } else {
+ uint32_t sensor_secure = 0;
+ /*Notify TA & get sensor secure status*/
+ rc = msm_camera_tz_i2c_ta_power_up(
+ sensor_info[sensor_id].
+ ta_qseecom_handle,
+ sensor_id,
+ &sensor_secure);
+ if (!rc && sensor_secure)
+ /* Sensor validated by TA*/
+ sensor_info[sensor_id].
+ ta_enabled = 1;
+ else {
+ qseecom_shutdown_app(
+ &sensor_info[sensor_id].
+ ta_qseecom_handle);
+ sensor_info[sensor_id].
+ ta_qseecom_handle
+ = EMPTY_QSEECOM_HANDLE;
+ rc = -EFAULT;
+ }
+ }
+ }
+ }
+ CDBG("Init TA %s - %s(%d) - %llu\n",
+ sensor_info[sensor_id].ta_name,
+ (sensor_info[sensor_id].ta_enabled)?"Ok" :
+ "Failed", rc, ktime_us_delta(ktime_get(),
+ startTime));
+ if (!rc)
+ msm_camera_tz_i2c_ctrl.secure_mode++;
+ mutex_unlock(&msm_camera_tz_i2c_ctrl.lock);
+ }
+ return rc;
+}
+
+int32_t msm_camera_tz_i2c_power_down(
+ struct msm_camera_i2c_client *client)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if (!msm_camera_tz_i2c_ctrl.lock_ready) {
+ msm_camera_tz_i2c_ctrl.lock_ready = 1;
+ mutex_init(&msm_camera_tz_i2c_ctrl.lock);
+ }
+
+ CDBG("Enter (sensor_id=%d)\n", sensor_id);
+ if ((sensor_id >= 0) && (msm_camera_tz_i2c_ctrl.secure_mode != 0)) {
+ mutex_lock(&msm_camera_tz_i2c_ctrl.lock);
+ if (msm_camera_tz_i2c_ctrl.secure_mode == 1) {
+ ktime_t startTime = ktime_get();
+
+ CDBG("Switch to non-secure mode (secure sensor=%d)\n",
+ sensor_id);
+ /* Shutdown the TA */
+ if (sensor_info[sensor_id].ta_qseecom_handle != NULL) {
+ msm_camera_tz_i2c_ta_power_down(
+ sensor_info[sensor_id].
+ ta_qseecom_handle,
+ sensor_id);
+ rc = qseecom_shutdown_app(&sensor_info[
+ sensor_id].ta_qseecom_handle);
+ sensor_info[sensor_id].ta_qseecom_handle
+ = EMPTY_QSEECOM_HANDLE;
+ }
+ CDBG("Unload TA %s - %s(%d) - %llu\n",
+ sensor_info[sensor_id].ta_name,
+ (!rc)?"Ok":"Failed", rc,
+ ktime_us_delta(ktime_get(), startTime));
+ }
+ msm_camera_tz_i2c_ctrl.secure_mode--;
+ mutex_unlock(&msm_camera_tz_i2c_ctrl.lock);
+ }
+ return rc;
+}
+
+int32_t msm_camera_tz_i2c_register_sensor(
+ void *s_ctrl_p)
+{
+ struct msm_sensor_ctrl_t *s_ctrl = (struct msm_sensor_ctrl_t *)s_ctrl_p;
+
+ if (s_ctrl == NULL) {
+ pr_err("%s:%d - invalid parameter)\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ if (s_ctrl->id >= MAX_CAMERAS) {
+ pr_err("%s:%d - invalid ID: %d\n",
+ __func__, __LINE__, s_ctrl->id);
+ return -EINVAL;
+ }
+
+ CDBG("id=%d, client=%p\n", s_ctrl->id, s_ctrl);
+ sensor_info[s_ctrl->id].s_ctrl = s_ctrl;
+ sensor_info[s_ctrl->id].secure = s_ctrl->is_secure;
+ return 0;
+}
+
+int32_t msm_camera_tz_i2c_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, addr=0x%08X\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_read(
+ client, addr, data, data_type);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_read, client, addr, data, data_type);
+}
+
+int32_t msm_camera_tz_i2c_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, addr=0x%08X, num=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr,
+ num_byte);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_READ_SEQ);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_read_seq, client, addr, data, num_byte);
+}
+
+int32_t msm_camera_tz_i2c_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, addr=0x%08X\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_write(
+ client, addr, data, data_type);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write, client, addr, data, data_type);
+}
+
+int32_t msm_camera_tz_i2c_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, addr=0x%08X, num=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr,
+ num_byte);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_SEQ);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_seq, client, addr, data, num_byte);
+}
+
+int32_t msm_camera_tz_i2c_write_table_async(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_TABLE_ASYNC);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_table_async, client, write_setting);
+}
+
+int32_t msm_camera_tz_i2c_write_table_sync(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_TABLE_SYNC);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_table_sync, client, write_setting);
+}
+
+int32_t msm_camera_tz_i2c_write_table_sync_block(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_TABLE_SYNC_BLOCK);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_table_sync_block, client,
+ write_setting);
+}
+
+int32_t msm_camera_tz_i2c_write_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_TABLE);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_table, client, write_setting);
+}
+
+int32_t msm_camera_tz_i2c_write_seq_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_seq_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_SEQ_TABLE);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_seq_table, client, write_setting);
+}
+
+int32_t msm_camera_tz_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_TABLE_W_MICRODELAY);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_table_w_microdelay, client,
+ write_setting);
+}
+
+int32_t msm_camera_tz_i2c_poll(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_POLL);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_poll, client, addr, data, data_type);
+}
+
+int32_t msm_camera_tz_i2c_write_conf_tbl(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_CONF_TBL);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_conf_tbl, client, reg_conf_tbl, size,
+ data_type);
+}
+
+int32_t msm_sensor_tz_i2c_util(struct msm_camera_i2c_client *client,
+ uint16_t cci_cmd)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, cci_cmd=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid, cci_cmd);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_util(client, cci_cmd);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_sensor_cci_i2c_util, client, cci_cmd);
+}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
index 22d90a2baf7d..e1143c356721 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
@@ -21,6 +21,9 @@
#undef CDBG
#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl;
+static struct msm_camera_i2c_fn_t msm_sensor_secure_func_tbl;
+
static void msm_sensor_adjust_mclk(struct msm_camera_power_ctrl_t *ctrl)
{
int idx;
@@ -132,6 +135,11 @@ int msm_sensor_power_down(struct msm_sensor_ctrl_t *s_ctrl)
__func__, __LINE__, power_info, sensor_i2c_client);
return -EINVAL;
}
+
+ /* Power down secure session if it exist*/
+ if (s_ctrl->is_secure)
+ msm_camera_tz_i2c_power_down(sensor_i2c_client);
+
return msm_camera_power_down(power_info, sensor_device_type,
sensor_i2c_client);
}
@@ -170,7 +178,27 @@ int msm_sensor_power_up(struct msm_sensor_ctrl_t *s_ctrl)
if (s_ctrl->set_mclk_23880000)
msm_sensor_adjust_mclk(power_info);
+ CDBG("Sensor %d tagged as %s\n", s_ctrl->id,
+ (s_ctrl->is_secure)?"SECURE":"NON-SECURE");
+
for (retry = 0; retry < 3; retry++) {
+ if (s_ctrl->is_secure) {
+ rc = msm_camera_tz_i2c_power_up(sensor_i2c_client);
+ if (rc < 0) {
+#ifdef CONFIG_MSM_SEC_CCI_DEBUG
+ CDBG("Secure Sensor %d use cci\n", s_ctrl->id);
+ /* session is not secure */
+ s_ctrl->sensor_i2c_client->i2c_func_tbl =
+ &msm_sensor_cci_func_tbl;
+#else /* CONFIG_MSM_SEC_CCI_DEBUG */
+ return rc;
+#endif /* CONFIG_MSM_SEC_CCI_DEBUG */
+ } else {
+ /* session is secure */
+ s_ctrl->sensor_i2c_client->i2c_func_tbl =
+ &msm_sensor_secure_func_tbl;
+ }
+ }
rc = msm_camera_power_up(power_info, s_ctrl->sensor_device_type,
sensor_i2c_client);
if (rc < 0)
@@ -1433,6 +1461,21 @@ static struct msm_camera_i2c_fn_t msm_sensor_qup_func_tbl = {
.i2c_write_table_sync_block = msm_camera_qup_i2c_write_table,
};
+static struct msm_camera_i2c_fn_t msm_sensor_secure_func_tbl = {
+ .i2c_read = msm_camera_tz_i2c_read,
+ .i2c_read_seq = msm_camera_tz_i2c_read_seq,
+ .i2c_write = msm_camera_tz_i2c_write,
+ .i2c_write_table = msm_camera_tz_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_tz_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_tz_i2c_write_table_w_microdelay,
+ .i2c_util = msm_sensor_tz_i2c_util,
+ .i2c_write_conf_tbl = msm_camera_tz_i2c_write_conf_tbl,
+ .i2c_write_table_async = msm_camera_tz_i2c_write_table_async,
+ .i2c_write_table_sync = msm_camera_tz_i2c_write_table_sync,
+ .i2c_write_table_sync_block = msm_camera_tz_i2c_write_table_sync_block,
+};
+
int32_t msm_sensor_init_default_params(struct msm_sensor_ctrl_t *s_ctrl)
{
struct msm_camera_cci_client *cci_client = NULL;
@@ -1466,6 +1509,9 @@ int32_t msm_sensor_init_default_params(struct msm_sensor_ctrl_t *s_ctrl)
/* Get CCI subdev */
cci_client->cci_subdev = msm_cci_get_subdev();
+ if (s_ctrl->is_secure)
+ msm_camera_tz_i2c_register_sensor((void *)s_ctrl);
+
/* Update CCI / I2C function table */
if (!s_ctrl->sensor_i2c_client->i2c_func_tbl)
s_ctrl->sensor_i2c_client->i2c_func_tbl =
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h
index bd12588eada9..5d57ec8c28ff 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -88,6 +88,7 @@ struct msm_sensor_ctrl_t {
enum msm_camera_stream_type_t camera_stream_type;
uint32_t set_mclk_23880000;
uint8_t is_csid_tg_mode;
+ uint32_t is_secure;
};
int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
index 02b83c969958..43aadffa2983 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
@@ -992,7 +992,7 @@ CSID_TG:
}
/* Update sensor mount angle and position in media entity flag */
is_yuv = (slave_info->output_format == MSM_SENSOR_YCBCR) ? 1 : 0;
- mount_pos = is_yuv << 25 |
+ mount_pos = ((s_ctrl->is_secure & 0x1) << 26) | is_yuv << 25 |
(s_ctrl->sensordata->sensor_info->position << 16) |
((s_ctrl->sensordata->
sensor_info->sensor_mount_angle / 90) << 8);
@@ -1079,6 +1079,16 @@ static int32_t msm_sensor_driver_get_dt_data(struct msm_sensor_ctrl_t *s_ctrl)
goto FREE_VREG_DATA;
}
+ /* Get custom mode */
+ rc = of_property_read_u32(of_node, "qcom,secure",
+ &s_ctrl->is_secure);
+ CDBG("qcom,secure = %d, rc %d", s_ctrl->is_secure, rc);
+ if (rc < 0) {
+ /* Set default to non-secure mode */
+ s_ctrl->is_secure = 0;
+ rc = 0;
+ }
+
/* Get CCI master */
rc = of_property_read_u32(of_node, "qcom,cci-master",
&s_ctrl->cci_i2c_master);
diff --git a/drivers/media/platform/msm/sde/Kconfig b/drivers/media/platform/msm/sde/Kconfig
index fcd461bb1167..85f5f4257ddb 100644
--- a/drivers/media/platform/msm/sde/Kconfig
+++ b/drivers/media/platform/msm/sde/Kconfig
@@ -5,4 +5,13 @@ config MSM_SDE_ROTATOR
select VIDEOBUF2_CORE
select SW_SYNC if SYNC
---help---
- Enable support of V4L2 rotator driver. \ No newline at end of file
+ Enable support of V4L2 rotator driver.
+
+config MSM_SDE_ROTATOR_EVTLOG_DEBUG
+ depends on MSM_SDE_ROTATOR
+ bool "Enable sde rotator debugging"
+ ---help---
+ The SDE rotator debugging provides support to enable rotator debugging
+ features to: Dump rotator registers during driver errors, panic
+ driver during fatal errors and enable some rotator driver logging
+ into an internal buffer (this avoids logging overhead).
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index b3e81705bdf4..67c6b11329d8 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -92,6 +92,12 @@ enum sde_bus_clients {
SDE_MAX_BUS_CLIENTS
};
+enum sde_rot_regdump_access {
+ SDE_ROT_REGDUMP_READ,
+ SDE_ROT_REGDUMP_WRITE,
+ SDE_ROT_REGDUMP_MAX
+};
+
struct reg_bus_client {
char name[MAX_CLIENT_NAME_LEN];
short usecase_ndx;
@@ -107,6 +113,21 @@ struct sde_smmu_client {
bool domain_attached;
};
+struct sde_rot_vbif_debug_bus {
+ u32 disable_bus_addr;
+ u32 block_bus_addr;
+ u32 bit_offset;
+ u32 block_cnt;
+ u32 test_pnt_cnt;
+};
+
+struct sde_rot_regdump {
+ char *name;
+ u32 offset;
+ u32 len;
+ enum sde_rot_regdump_access access;
+};
+
struct sde_rot_data_type {
u32 mdss_version;
@@ -140,6 +161,14 @@ struct sde_rot_data_type {
int iommu_attached;
int iommu_ref_cnt;
+
+ struct sde_rot_vbif_debug_bus *nrt_vbif_dbg_bus;
+ u32 nrt_vbif_dbg_bus_size;
+
+ struct sde_rot_regdump *regdump;
+ u32 regdump_size;
+
+ void *sde_rot_hw;
};
int sde_rotator_base_init(struct sde_rot_data_type **pmdata,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index e3e71936b8e4..3e5cbdecfba4 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -35,6 +35,7 @@
#include "sde_rotator_r1.h"
#include "sde_rotator_r3.h"
#include "sde_rotator_trace.h"
+#include "sde_rotator_debug.h"
/* waiting for hw time out, 3 vsync for 30fps*/
#define ROT_HW_ACQUIRE_TIMEOUT_IN_MS 100
@@ -127,6 +128,7 @@ static int sde_rotator_bus_scale_set_quota(struct sde_rot_bus_data_type *bus,
bus->curr_bw_uc_idx = new_uc_idx;
bus->curr_quota_val = quota;
+ SDEROT_EVTLOG(new_uc_idx, quota);
SDEROT_DBG("uc_idx=%d quota=%llu\n", new_uc_idx, quota);
ATRACE_BEGIN("msm_bus_scale_req_rot");
ret = msm_bus_scale_client_update_request(bus->bus_hdl,
@@ -274,6 +276,7 @@ static void sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
return;
}
+ SDEROT_EVTLOG(on);
SDEROT_DBG("%s: rotator regulators", on ? "Enable" : "Disable");
ret = sde_rot_enable_vreg(mgr->module_power.vreg_config,
mgr->module_power.num_vreg, on);
@@ -307,6 +310,7 @@ static int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable)
}
if (changed) {
+ SDEROT_EVTLOG(enable);
SDEROT_DBG("Rotator clk %s\n", enable ? "enable" : "disable");
for (i = 0; i < mgr->num_rot_clk; i++) {
clk = mgr->rot_clk[i].clk;
@@ -394,6 +398,7 @@ static bool sde_rotator_is_work_pending(struct sde_rot_mgr *mgr,
static void sde_rotator_clear_fence(struct sde_rot_entry *entry)
{
if (entry->input_fence) {
+ SDEROT_EVTLOG(entry->input_fence, 1111);
SDEROT_DBG("sys_fence_put i:%p\n", entry->input_fence);
sde_rotator_put_sync_fence(entry->input_fence);
entry->input_fence = NULL;
@@ -404,6 +409,7 @@ static void sde_rotator_clear_fence(struct sde_rot_entry *entry)
if (entry->fenceq && entry->fenceq->timeline)
sde_rotator_resync_timeline(entry->fenceq->timeline);
+ SDEROT_EVTLOG(entry->output_fence, 2222);
SDEROT_DBG("sys_fence_put o:%p\n", entry->output_fence);
sde_rotator_put_sync_fence(entry->output_fence);
entry->output_fence = NULL;
@@ -565,6 +571,7 @@ static struct sde_rot_perf *sde_rotator_find_session(
static void sde_rotator_release_data(struct sde_rot_entry *entry)
{
+ SDEROT_EVTLOG(entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr);
sde_mdp_data_free(&entry->src_buf, true, DMA_TO_DEVICE);
sde_mdp_data_free(&entry->dst_buf, true, DMA_FROM_DEVICE);
}
@@ -719,6 +726,10 @@ static struct sde_rot_hw_resource *sde_rotator_get_hw_resource(
}
}
atomic_inc(&hw->num_active);
+ SDEROT_EVTLOG(atomic_read(&hw->num_active), hw->pending_count,
+ mgr->rdot_limit, entry->perf->rdot_limit,
+ mgr->wrot_limit, entry->perf->wrot_limit,
+ entry->item.session_id, entry->item.sequence_id);
SDEROT_DBG("active=%d pending=%d rdot=%u/%u wrot=%u/%u s:%d.%d\n",
atomic_read(&hw->num_active), hw->pending_count,
mgr->rdot_limit, entry->perf->rdot_limit,
@@ -766,6 +777,8 @@ static void sde_rotator_put_hw_resource(struct sde_rot_queue *queue,
if (hw_res)
wake_up(&hw_res->wait_queue);
}
+ SDEROT_EVTLOG(atomic_read(&hw->num_active), hw->pending_count,
+ entry->item.session_id, entry->item.sequence_id);
SDEROT_DBG("active=%d pending=%d s:%d.%d\n",
atomic_read(&hw->num_active), hw->pending_count,
entry->item.session_id, entry->item.sequence_id);
@@ -1125,6 +1138,15 @@ static void sde_rotator_commit_handler(struct work_struct *work)
mgr = entry->private->mgr;
+ SDEROT_EVTLOG(
+ entry->item.session_id, entry->item.sequence_id,
+ entry->item.src_rect.x, entry->item.src_rect.y,
+ entry->item.src_rect.w, entry->item.src_rect.h,
+ entry->item.dst_rect.x, entry->item.dst_rect.y,
+ entry->item.dst_rect.w, entry->item.dst_rect.h,
+ entry->item.flags,
+ entry->dnsc_factor_w, entry->dnsc_factor_h);
+
SDEDEV_DBG(mgr->device,
"commit handler s:%d.%u src:(%d,%d,%d,%d) dst:(%d,%d,%d,%d) f:0x%x dnsc:%u/%u\n",
entry->item.session_id, entry->item.sequence_id,
@@ -1233,11 +1255,13 @@ static void sde_rotator_done_handler(struct work_struct *work)
entry->item.flags,
entry->dnsc_factor_w, entry->dnsc_factor_h);
+ SDEROT_EVTLOG(entry->item.session_id, 0);
ret = mgr->ops_wait_for_entry(hw, entry);
if (ret) {
SDEROT_ERR("fail to wait for completion %d\n", ret);
atomic_inc(&request->failed_count);
}
+ SDEROT_EVTLOG(entry->item.session_id, 1);
if (entry->item.ts)
entry->item.ts[SDE_ROTATOR_TS_DONE] = ktime_get();
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index dae1b51bfaa8..c609dbd2036e 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -22,6 +22,619 @@
#include "sde_rotator_core.h"
#include "sde_rotator_dev.h"
+#ifdef CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG
+#define SDE_EVTLOG_DEFAULT_ENABLE 1
+#else
+#define SDE_EVTLOG_DEFAULT_ENABLE 0
+#endif
+#define SDE_EVTLOG_DEFAULT_PANIC 1
+#define SDE_EVTLOG_DEFAULT_REGDUMP SDE_ROT_DBG_DUMP_IN_MEM
+#define SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP SDE_ROT_DBG_DUMP_IN_MEM
+
+/*
+ * evtlog will print this number of entries when it is called through
+ * sysfs node or panic. This prevents kernel log from evtlog message
+ * flood.
+ */
+#define SDE_ROT_EVTLOG_PRINT_ENTRY 256
+
+/*
+ * evtlog keeps this number of entries in memory for debug purpose. This
+ * number must be greater than print entry to prevent out of bound evtlog
+ * entry array access.
+ */
+#define SDE_ROT_EVTLOG_ENTRY (SDE_ROT_EVTLOG_PRINT_ENTRY * 4)
+#define SDE_ROT_EVTLOG_MAX_DATA 15
+#define SDE_ROT_EVTLOG_BUF_MAX 512
+#define SDE_ROT_EVTLOG_BUF_ALIGN 32
+#define SDE_ROT_DEBUG_BASE_MAX 10
+
+static DEFINE_SPINLOCK(sde_rot_xlock);
+
+/*
+ * tlog - EVTLOG entry structure
+ * @counter - EVTLOG entriy counter
+ * @time - timestamp of EVTLOG entry
+ * @name - function name of EVTLOG entry
+ * @line - line number of EVTLOG entry
+ * @data - EVTLOG data contents
+ * @data_cnt - number of data contents
+ * @pid - pid of current calling thread
+ */
+struct tlog {
+ u32 counter;
+ s64 time;
+ const char *name;
+ int line;
+ u32 data[SDE_ROT_EVTLOG_MAX_DATA];
+ u32 data_cnt;
+ int pid;
+};
+
+/*
+ * sde_rot_dbg_evtlog - EVTLOG debug data structure
+ * @logs - EVTLOG entries
+ * @first - first entry index in the EVTLOG
+ * @last - last entry index in the EVTLOG
+ * @curr - curr entry index in the EVTLOG
+ * @evtlog - EVTLOG debugfs handle
+ * @evtlog_enable - boolean indicates EVTLOG enable/disable
+ * @panic_on_err - boolean indicates issue panic after EVTLOG dump
+ * @enable_reg_dump - control in-log/memory dump for rotator registers
+ * @enable_vbif_dbgbus_dump - control in-log/memory dump for VBIF debug bus
+ * @evtlog_dump_work - schedule work strucutre for timeout handler
+ * @work_dump_reg - storage for register dump control in schedule work
+ * @work_panic - storage for panic control in schedule work
+ * @work_vbif_dbgbus - storage for VBIF debug bus control in schedule work
+ * @nrt_vbif_dbgbus_dump - memory buffer for VBIF debug bus dumping
+ * @reg_dump_array - memory buffer for rotator registers dumping
+ */
+struct sde_rot_dbg_evtlog {
+ struct tlog logs[SDE_ROT_EVTLOG_ENTRY];
+ u32 first;
+ u32 last;
+ u32 curr;
+ struct dentry *evtlog;
+ u32 evtlog_enable;
+ u32 panic_on_err;
+ u32 enable_reg_dump;
+ u32 enable_vbif_dbgbus_dump;
+ struct work_struct evtlog_dump_work;
+ bool work_dump_reg;
+ bool work_panic;
+ bool work_vbif_dbgbus;
+ u32 *nrt_vbif_dbgbus_dump; /* address for the nrt vbif debug bus dump */
+ u32 *reg_dump_array[SDE_ROT_DEBUG_BASE_MAX];
+} sde_rot_dbg_evtlog;
+
+/*
+ * sde_rot_evtlog_is_enabled - helper function for checking EVTLOG
+ * enable/disable
+ * @flag - EVTLOG option flag
+ */
+static inline bool sde_rot_evtlog_is_enabled(u32 flag)
+{
+ return (flag & sde_rot_dbg_evtlog.evtlog_enable) ||
+ (flag == SDE_ROT_EVTLOG_ALL &&
+ sde_rot_dbg_evtlog.evtlog_enable);
+}
+
+/*
+ * __vbif_debug_bus - helper function for VBIF debug bus dump
+ * @head - VBIF debug bus data structure
+ * @vbif_base - VBIF IO mapped address
+ * @dump_addr - output buffer for memory dump option
+ * @in_log - boolean indicates in-log dump option
+ */
+static void __vbif_debug_bus(struct sde_rot_vbif_debug_bus *head,
+ void __iomem *vbif_base, u32 *dump_addr, bool in_log)
+{
+ int i, j;
+ u32 val;
+
+ if (!dump_addr && !in_log)
+ return;
+
+ for (i = 0; i < head->block_cnt; i++) {
+ writel_relaxed(1 << (i + head->bit_offset),
+ vbif_base + head->block_bus_addr);
+ /* make sure that current bus blcok enable */
+ wmb();
+ for (j = 0; j < head->test_pnt_cnt; j++) {
+ writel_relaxed(j, vbif_base + head->block_bus_addr + 4);
+ /* make sure that test point is enabled */
+ wmb();
+ val = readl_relaxed(vbif_base + MMSS_VBIF_TEST_BUS_OUT);
+ if (dump_addr) {
+ *dump_addr++ = head->block_bus_addr;
+ *dump_addr++ = i;
+ *dump_addr++ = j;
+ *dump_addr++ = val;
+ }
+ if (in_log)
+ pr_err("testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+ head->block_bus_addr, i, j, val);
+ }
+ }
+}
+
+/*
+ * sde_rot_dump_vbif_debug_bus - VBIF debug bus dump
+ * @bus_dump_flag - dump flag controlling in-log/memory dump option
+ * @dump_mem - output buffer for memory dump location
+ */
+static void sde_rot_dump_vbif_debug_bus(u32 bus_dump_flag,
+ u32 **dump_mem)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ bool in_log, in_mem;
+ u32 *dump_addr = NULL;
+ u32 value;
+ struct sde_rot_vbif_debug_bus *head;
+ phys_addr_t phys = 0;
+ int i, list_size = 0;
+ void __iomem *vbif_base;
+ struct sde_rot_vbif_debug_bus *dbg_bus;
+ u32 bus_size;
+
+ pr_info("======== NRT VBIF Debug bus DUMP =========\n");
+ vbif_base = mdata->vbif_nrt_io.base;
+ dbg_bus = mdata->nrt_vbif_dbg_bus;
+ bus_size = mdata->nrt_vbif_dbg_bus_size;
+
+ if (!vbif_base || !dbg_bus || !bus_size)
+ return;
+
+ /* allocate memory for each test point */
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+ list_size += (head->block_cnt * head->test_pnt_cnt);
+ }
+
+ /* 4 bytes * 4 entries for each test point*/
+ list_size *= 16;
+
+ in_log = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
+ in_mem = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
+ list_size, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
+ __func__, dump_addr, dump_addr + list_size);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ sde_smmu_ctrl(1);
+
+ value = readl_relaxed(vbif_base + MMSS_VBIF_CLKON);
+ writel_relaxed(value | BIT(1), vbif_base + MMSS_VBIF_CLKON);
+
+ /* make sure that vbif core is on */
+ wmb();
+
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+
+ writel_relaxed(0, vbif_base + head->disable_bus_addr);
+ writel_relaxed(BIT(0), vbif_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
+ /* make sure that other bus is off */
+ wmb();
+
+ __vbif_debug_bus(head, vbif_base, dump_addr, in_log);
+ if (dump_addr)
+ dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
+ }
+
+ sde_smmu_ctrl(0);
+
+ pr_info("========End VBIF Debug bus=========\n");
+}
+
+/*
+ * sde_rot_dump_reg - helper function for dumping rotator register set content
+ * @dump_name - register set name
+ * @reg_dump_flag - dumping flag controlling in-log/memory dump location
+ * @addr - starting address offset for dumping
+ * @len - range of the register set
+ * @dump_mem - output buffer for memory dump location option
+ */
+void sde_rot_dump_reg(const char *dump_name, u32 reg_dump_flag, u32 addr,
+ int len, u32 **dump_mem)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ bool in_log, in_mem;
+ u32 *dump_addr = NULL;
+ phys_addr_t phys = 0;
+ int i;
+
+ in_log = (reg_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
+ in_mem = (reg_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
+
+ pr_debug("reg_dump_flag=%d in_log=%d in_mem=%d\n",
+ reg_dump_flag, in_log, in_mem);
+
+ if (len % 16)
+ len += 16;
+ len /= 16;
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
+ len * 16, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ pr_info("%s: start_addr:0x%p end_addr:0x%p reg_addr=0x%X\n",
+ dump_name, dump_addr, dump_addr + (u32)len * 16,
+ addr);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: kzalloc fails!\n");
+ }
+ }
+
+
+ for (i = 0; i < len; i++) {
+ u32 x0, x4, x8, xc;
+
+ x0 = readl_relaxed(mdata->sde_io.base + addr+0x0);
+ x4 = readl_relaxed(mdata->sde_io.base + addr+0x4);
+ x8 = readl_relaxed(mdata->sde_io.base + addr+0x8);
+ xc = readl_relaxed(mdata->sde_io.base + addr+0xc);
+
+ if (in_log)
+ pr_info("0x%08X : %08x %08x %08x %08x\n",
+ addr, x0, x4, x8, xc);
+
+ if (dump_addr && in_mem) {
+ dump_addr[i*4] = x0;
+ dump_addr[i*4 + 1] = x4;
+ dump_addr[i*4 + 2] = x8;
+ dump_addr[i*4 + 3] = xc;
+ }
+
+ addr += 16;
+ }
+}
+
+/*
+ * sde_rot_dump_reg_all - dumping all SDE rotator registers
+ */
+static void sde_rot_dump_reg_all(void)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ struct sde_rot_regdump *head, *regdump;
+ u32 regdump_size;
+ int i;
+
+ regdump = mdata->regdump;
+ regdump_size = mdata->regdump_size;
+
+ if (!regdump || !regdump_size)
+ return;
+
+ /* Enable clock to rotator if not yet enabled */
+ sde_smmu_ctrl(1);
+
+ for (i = 0; (i < regdump_size) && (i < SDE_ROT_DEBUG_BASE_MAX); i++) {
+ head = &regdump[i];
+
+ if (head->access == SDE_ROT_REGDUMP_WRITE) {
+ writel_relaxed(1, mdata->sde_io.base + head->offset);
+ /* Make sure write go through */
+ wmb();
+ } else {
+ sde_rot_dump_reg(head->name,
+ sde_rot_dbg_evtlog.enable_reg_dump,
+ head->offset, head->len,
+ &sde_rot_dbg_evtlog.reg_dump_array[i]);
+ }
+ }
+
+ /* Disable rotator clock */
+ sde_smmu_ctrl(0);
+}
+
+/*
+ * __sde_rot_evtlog_dump_calc_range - calculate dump range for EVTLOG
+ */
+static bool __sde_rot_evtlog_dump_calc_range(void)
+{
+ static u32 next;
+ bool need_dump = true;
+ unsigned long flags;
+ struct sde_rot_dbg_evtlog *evtlog = &sde_rot_dbg_evtlog;
+
+ spin_lock_irqsave(&sde_rot_xlock, flags);
+
+ evtlog->first = next;
+
+ if (evtlog->last == evtlog->first) {
+ need_dump = false;
+ goto dump_exit;
+ }
+
+ if (evtlog->last < evtlog->first) {
+ evtlog->first %= SDE_ROT_EVTLOG_ENTRY;
+ if (evtlog->last < evtlog->first)
+ evtlog->last += SDE_ROT_EVTLOG_ENTRY;
+ }
+
+ if ((evtlog->last - evtlog->first) > SDE_ROT_EVTLOG_PRINT_ENTRY) {
+ pr_warn("evtlog buffer overflow before dump: %d\n",
+ evtlog->last - evtlog->first);
+ evtlog->first = evtlog->last - SDE_ROT_EVTLOG_PRINT_ENTRY;
+ }
+ next = evtlog->first + 1;
+
+dump_exit:
+ spin_unlock_irqrestore(&sde_rot_xlock, flags);
+
+ return need_dump;
+}
+
+/*
+ * sde_rot_evtlog_dump_entry - helper function for EVTLOG content dumping
+ * @evtlog_buf: EVTLOG dump output buffer
+ * @evtlog_buf_size: EVTLOG output buffer size
+ */
+static ssize_t sde_rot_evtlog_dump_entry(char *evtlog_buf,
+ ssize_t evtlog_buf_size)
+{
+ int i;
+ ssize_t off = 0;
+ struct tlog *log, *prev_log;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sde_rot_xlock, flags);
+
+ log = &sde_rot_dbg_evtlog.logs[sde_rot_dbg_evtlog.first %
+ SDE_ROT_EVTLOG_ENTRY];
+
+ prev_log = &sde_rot_dbg_evtlog.logs[(sde_rot_dbg_evtlog.first - 1) %
+ SDE_ROT_EVTLOG_ENTRY];
+
+ off = snprintf((evtlog_buf + off), (evtlog_buf_size - off), "%s:%-4d",
+ log->name, log->line);
+
+ if (off < SDE_ROT_EVTLOG_BUF_ALIGN) {
+ memset((evtlog_buf + off), 0x20,
+ (SDE_ROT_EVTLOG_BUF_ALIGN - off));
+ off = SDE_ROT_EVTLOG_BUF_ALIGN;
+ }
+
+ off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
+ "=>[%-8d:%-11llu:%9llu][%-4d]:", sde_rot_dbg_evtlog.first,
+ log->time, (log->time - prev_log->time), log->pid);
+
+ for (i = 0; i < log->data_cnt; i++)
+ off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
+ "%x ", log->data[i]);
+
+ off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), "\n");
+
+ spin_unlock_irqrestore(&sde_rot_xlock, flags);
+
+ return off;
+}
+
+/*
+ * sde_rot_evtlog_dump_all - Dumping all content in EVTLOG buffer
+ */
+static void sde_rot_evtlog_dump_all(void)
+{
+ char evtlog_buf[SDE_ROT_EVTLOG_BUF_MAX];
+
+ while (__sde_rot_evtlog_dump_calc_range()) {
+ sde_rot_evtlog_dump_entry(evtlog_buf, SDE_ROT_EVTLOG_BUF_MAX);
+ pr_info("%s", evtlog_buf);
+ }
+}
+
+/*
+ * sde_rot_evtlog_dump_open - debugfs open handler for evtlog dump
+ * @inode: debugfs inode
+ * @file: file handler
+ */
+static int sde_rot_evtlog_dump_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+/*
+ * sde_rot_evtlog_dump_read - debugfs read handler for evtlog dump
+ * @file: file handler
+ * @buff: user buffer content for debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_rot_evtlog_dump_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ ssize_t len = 0;
+ char evtlog_buf[SDE_ROT_EVTLOG_BUF_MAX];
+
+ if (__sde_rot_evtlog_dump_calc_range()) {
+ len = sde_rot_evtlog_dump_entry(evtlog_buf,
+ SDE_ROT_EVTLOG_BUF_MAX);
+ if (copy_to_user(buff, evtlog_buf, len))
+ return -EFAULT;
+ *ppos += len;
+ }
+
+ return len;
+}
+
+/*
+ * sde_rot_evtlog_dump_write - debugfs write handler for evtlog dump
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_rot_evtlog_dump_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ sde_rot_evtlog_dump_all();
+
+ sde_rot_dump_reg_all();
+
+ if (sde_rot_dbg_evtlog.panic_on_err)
+ panic("evtlog_dump_write");
+
+ return count;
+}
+
+/*
+ * sde_rot_evtlog_dump_helper - helper function for evtlog dump
+ * @dead: boolean indicates panic after dump
+ * @panic_name: Panic signature name show up in log
+ * @dump_rot: boolean indicates rotator register dump
+ * @dump_vbif_debug_bus: boolean indicates VBIF debug bus dump
+ */
+static void sde_rot_evtlog_dump_helper(bool dead, const char *panic_name,
+ bool dump_rot, bool dump_vbif_debug_bus)
+{
+ sde_rot_evtlog_dump_all();
+
+ if (dump_rot)
+ sde_rot_dump_reg_all();
+
+ if (dump_vbif_debug_bus)
+ sde_rot_dump_vbif_debug_bus(
+ sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump,
+ &sde_rot_dbg_evtlog.nrt_vbif_dbgbus_dump);
+
+ if (dead)
+ panic(panic_name);
+}
+
+/*
+ * sde_rot_evtlog_debug_work - schedule work function for evtlog dump
+ * @work: schedule work structure
+ */
+static void sde_rot_evtlog_debug_work(struct work_struct *work)
+{
+ sde_rot_evtlog_dump_helper(
+ sde_rot_dbg_evtlog.work_panic,
+ "evtlog_workitem",
+ sde_rot_dbg_evtlog.work_dump_reg,
+ sde_rot_dbg_evtlog.work_vbif_dbgbus);
+}
+
+/*
+ * sde_rot_dump_panic - Issue evtlog dump and generic panic
+ */
+void sde_rot_dump_panic(void)
+{
+ sde_rot_evtlog_dump_all();
+ sde_rot_dump_reg_all();
+
+ panic("sde_rotator");
+}
+
+/*
+ * sde_rot_evtlog_tout_handler - log dump timeout handler
+ * @queue: boolean indicate putting log dump into queue
+ * @name: function name having timeout
+ */
+void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...)
+{
+ int i;
+ bool dead = false;
+ bool dump_rot = false;
+ bool dump_vbif_dbgbus = false;
+ char *blk_name = NULL;
+ va_list args;
+
+ if (!sde_rot_evtlog_is_enabled(SDE_ROT_EVTLOG_DEFAULT))
+ return;
+
+ if (queue && work_pending(&sde_rot_dbg_evtlog.evtlog_dump_work))
+ return;
+
+ va_start(args, name);
+ for (i = 0; i < SDE_ROT_EVTLOG_MAX_DATA; i++) {
+ blk_name = va_arg(args, char*);
+ if (IS_ERR_OR_NULL(blk_name))
+ break;
+
+ if (!strcmp(blk_name, "rot"))
+ dump_rot = true;
+
+ if (!strcmp(blk_name, "vbif_dbg_bus"))
+ dump_vbif_dbgbus = true;
+
+ if (!strcmp(blk_name, "panic"))
+ dead = true;
+ }
+ va_end(args);
+
+ if (queue) {
+ /* schedule work to dump later */
+ sde_rot_dbg_evtlog.work_panic = dead;
+ sde_rot_dbg_evtlog.work_dump_reg = dump_rot;
+ sde_rot_dbg_evtlog.work_vbif_dbgbus = dump_vbif_dbgbus;
+ schedule_work(&sde_rot_dbg_evtlog.evtlog_dump_work);
+ } else {
+ sde_rot_evtlog_dump_helper(dead, name, dump_rot,
+ dump_vbif_dbgbus);
+ }
+}
+
+/*
+ * sde_rot_evtlog - log contents into memory for dump analysis
+ * @name: Name of function calling evtlog
+ * @line: line number of calling function
+ * @flag: Log control flag
+ */
+void sde_rot_evtlog(const char *name, int line, int flag, ...)
+{
+ unsigned long flags;
+ int i, val = 0;
+ va_list args;
+ struct tlog *log;
+
+ if (!sde_rot_evtlog_is_enabled(flag))
+ return;
+
+ spin_lock_irqsave(&sde_rot_xlock, flags);
+ log = &sde_rot_dbg_evtlog.logs[sde_rot_dbg_evtlog.curr];
+ log->time = ktime_to_us(ktime_get());
+ log->name = name;
+ log->line = line;
+ log->data_cnt = 0;
+ log->pid = current->pid;
+
+ va_start(args, flag);
+ for (i = 0; i < SDE_ROT_EVTLOG_MAX_DATA; i++) {
+
+ val = va_arg(args, int);
+ if (val == SDE_ROT_DATA_LIMITER)
+ break;
+
+ log->data[i] = val;
+ }
+ va_end(args);
+ log->data_cnt = i;
+ sde_rot_dbg_evtlog.curr =
+ (sde_rot_dbg_evtlog.curr + 1) % SDE_ROT_EVTLOG_ENTRY;
+ sde_rot_dbg_evtlog.last++;
+
+ spin_unlock_irqrestore(&sde_rot_xlock, flags);
+}
+
/*
* sde_rotator_stat_show - Show statistics on read to this debugfs file
* @s: Pointer to sequence file structure
@@ -249,6 +862,58 @@ static int sde_rotator_core_create_debugfs(
return 0;
}
+static const struct file_operations sde_rot_evtlog_fops = {
+ .open = sde_rot_evtlog_dump_open,
+ .read = sde_rot_evtlog_dump_read,
+ .write = sde_rot_evtlog_dump_write,
+};
+
+static int sde_rotator_evtlog_create_debugfs(
+ struct sde_rot_mgr *mgr,
+ struct dentry *debugfs_root)
+{
+ int i;
+
+ sde_rot_dbg_evtlog.evtlog = debugfs_create_dir("evtlog", debugfs_root);
+ if (IS_ERR_OR_NULL(sde_rot_dbg_evtlog.evtlog)) {
+ pr_err("debugfs_create_dir fail, error %ld\n",
+ PTR_ERR(sde_rot_dbg_evtlog.evtlog));
+ sde_rot_dbg_evtlog.evtlog = NULL;
+ return -ENODEV;
+ }
+
+ INIT_WORK(&sde_rot_dbg_evtlog.evtlog_dump_work,
+ sde_rot_evtlog_debug_work);
+ sde_rot_dbg_evtlog.work_panic = false;
+
+ for (i = 0; i < SDE_ROT_EVTLOG_ENTRY; i++)
+ sde_rot_dbg_evtlog.logs[i].counter = i;
+
+ debugfs_create_file("dump", 0644, sde_rot_dbg_evtlog.evtlog, NULL,
+ &sde_rot_evtlog_fops);
+ debugfs_create_u32("enable", 0644, sde_rot_dbg_evtlog.evtlog,
+ &sde_rot_dbg_evtlog.evtlog_enable);
+ debugfs_create_u32("panic", 0644, sde_rot_dbg_evtlog.evtlog,
+ &sde_rot_dbg_evtlog.panic_on_err);
+ debugfs_create_u32("reg_dump", 0644, sde_rot_dbg_evtlog.evtlog,
+ &sde_rot_dbg_evtlog.enable_reg_dump);
+ debugfs_create_u32("vbif_dbgbus_dump", 0644, sde_rot_dbg_evtlog.evtlog,
+ &sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump);
+
+ sde_rot_dbg_evtlog.evtlog_enable = SDE_EVTLOG_DEFAULT_ENABLE;
+ sde_rot_dbg_evtlog.panic_on_err = SDE_EVTLOG_DEFAULT_PANIC;
+ sde_rot_dbg_evtlog.enable_reg_dump = SDE_EVTLOG_DEFAULT_REGDUMP;
+ sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump =
+ SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP;
+
+ pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n",
+ sde_rot_dbg_evtlog.evtlog_enable,
+ sde_rot_dbg_evtlog.panic_on_err,
+ sde_rot_dbg_evtlog.enable_reg_dump);
+
+ return 0;
+}
+
/*
* struct sde_rotator_stat_ops - processed statistics file operations
*/
@@ -335,6 +1000,12 @@ struct dentry *sde_rotator_create_debugfs(
return NULL;
}
+ if (sde_rotator_evtlog_create_debugfs(rot_dev->mgr, debugfs_root)) {
+ SDEROT_ERR("fail create evtlog debugfs\n");
+ debugfs_remove_recursive(debugfs_root);
+ return NULL;
+ }
+
return debugfs_root;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
index 2ed1b759f3e9..dcda54274fad 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
@@ -16,6 +16,32 @@
#include <linux/types.h>
#include <linux/dcache.h>
+#define SDE_ROT_DATA_LIMITER (-1)
+#define SDE_ROT_EVTLOG_TOUT_DATA_LIMITER (NULL)
+
+enum sde_rot_dbg_reg_dump_flag {
+ SDE_ROT_DBG_DUMP_IN_LOG = BIT(0),
+ SDE_ROT_DBG_DUMP_IN_MEM = BIT(1),
+};
+
+enum sde_rot_dbg_evtlog_flag {
+ SDE_ROT_EVTLOG_DEFAULT = BIT(0),
+ SDE_ROT_EVTLOG_IOMMU = BIT(1),
+ SDE_ROT_EVTLOG_DBG = BIT(6),
+ SDE_ROT_EVTLOG_ALL = BIT(7)
+};
+
+#define SDEROT_EVTLOG(...) sde_rot_evtlog(__func__, __LINE__, \
+ SDE_ROT_EVTLOG_DEFAULT, ##__VA_ARGS__, SDE_ROT_DATA_LIMITER)
+
+#define SDEROT_EVTLOG_TOUT_HANDLER(...) \
+ sde_rot_evtlog_tout_handler(false, __func__, ##__VA_ARGS__, \
+ SDE_ROT_EVTLOG_TOUT_DATA_LIMITER)
+
+void sde_rot_evtlog(const char *name, int line, int flag, ...);
+void sde_rot_dump_panic(void);
+void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...);
+
struct sde_rotator_device;
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c
index 2cf22ae6a3ed..e0f44be222d6 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c
@@ -316,7 +316,7 @@ static int sde_mdp_wb_wait4comp(struct sde_mdp_ctl *ctl, void *arg)
{
struct sde_mdp_writeback_ctx *ctx;
int rc = 0;
- u64 rot_time;
+ u64 rot_time = 0;
u32 status, mask, isr;
ctx = (struct sde_mdp_writeback_ctx *) ctl->priv_data;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 9b08c4fe0989..548131393835 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -34,6 +34,7 @@
#include "sde_rotator_r3_hwio.h"
#include "sde_rotator_r3_debug.h"
#include "sde_rotator_trace.h"
+#include "sde_rotator_debug.h"
/* XIN mapping */
#define XIN_SSPP 0
@@ -198,6 +199,29 @@ static u32 sde_hw_rotator_output_pixfmts[] = {
SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
};
+static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
+ {0x214, 0x21c, 16, 1, 0x10}, /* arb clients */
+ {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
+ {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
+};
+
+static struct sde_rot_regdump sde_rot_r3_regdump[] = {
+ { "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
+ { "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
+ { "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ },
+ { "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100,
+ SDE_ROT_REGDUMP_READ },
+ /*
+ * Need to perform a SW reset to REGDMA in order to access the
+ * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE.
+ * REGDMA RAM should be dump at last.
+ */
+ { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
+ SDE_ROT_REGDUMP_WRITE },
+ { "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
+ SDE_ROT_REGDUMP_READ },
+};
+
/* Invalid software timestamp value for initialization */
#define SDE_REGDMA_SWTS_INVALID (~0)
@@ -332,6 +356,8 @@ static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
SDE_ROTREG_READ(rot->mdss_base,
REGDMA_CSR_REGDMA_FSM_STATE));
+
+ SDEROT_EVTLOG_TOUT_HANDLER("rot", "vbif_dbg_bus", "panic");
}
/**
@@ -434,6 +460,14 @@ static void sde_hw_rotator_setup_timestamp_packet(
SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
+ /*
+ * Must clear secure buffer setting for SW timestamp because
+ * SW timstamp buffer allocation is always non-secure region.
+ */
+ if (ctx->is_secure) {
+ SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
+ SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
+ }
SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
@@ -611,6 +645,9 @@ static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
if (flags & SDE_ROT_FLAG_SECURE_OVERLAY_SESSION) {
SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
ctx->is_secure = true;
+ } else {
+ SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
+ ctx->is_secure = false;
}
/* Update command queue write ptr */
@@ -703,6 +740,11 @@ static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
cfg->dst_rect->x | (cfg->dst_rect->y << 16));
+ if (flags & SDE_ROT_FLAG_SECURE_OVERLAY_SESSION)
+ SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0x1);
+ else
+ SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
+
/*
* setup Downscale factor
*/
@@ -1468,6 +1510,10 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
&entry->dst_buf);
}
+ SDEROT_EVTLOG(flags, item->input.width, item->input.height,
+ item->output.width, item->output.height,
+ entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr);
+
if (mdata->default_ot_rd_limit) {
struct sde_mdp_set_ot_params ot_params;
@@ -1566,8 +1612,8 @@ static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
if (!ctx) {
SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
entry->item.session_id);
+ return -EINVAL;
}
- WARN_ON(ctx == NULL);
ret = sde_smmu_ctrl(1);
if (IS_ERR_VALUE(ret)) {
@@ -1609,8 +1655,8 @@ static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
if (!ctx) {
SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
entry->item.session_id);
+ return -EINVAL;
}
- WARN_ON(ctx == NULL);
ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
@@ -1661,6 +1707,13 @@ static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map);
}
+ mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3;
+ mdata->nrt_vbif_dbg_bus_size =
+ ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
+
+ mdata->regdump = sde_rot_r3_regdump;
+ mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
+
return 0;
}
@@ -1745,8 +1798,10 @@ static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
q_id = ROT_QUEUE_LOW_PRIORITY;
ts = (ts >> SDE_REGDMA_SWTS_SHIFT) &
SDE_REGDMA_SWTS_MASK;
+ } else {
+ SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
+ goto done_isr_handle;
}
-
ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
/*
@@ -1766,6 +1821,7 @@ static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
[ts & SDE_HW_ROT_REGDMA_SEG_MASK];
};
+done_isr_handle:
spin_unlock(&rot->rotisr_lock);
ret = IRQ_HANDLED;
} else if (isr & REGDMA_INT_ERR_MASK) {
@@ -2183,6 +2239,7 @@ int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
clk_set_flags(mgr->rot_clk[mgr->core_clk_idx].clk,
CLKFLAG_NORETAIN_PERIPH);
+ mdata->sde_rot_hw = rot;
return 0;
error_hw_rev_init:
if (rot->irq_num >= 0)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index 6cc975e22cd4..7bbd8aa53342 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -33,6 +33,7 @@
#include "sde_rotator_util.h"
#include "sde_rotator_io_util.h"
#include "sde_rotator_smmu.h"
+#include "sde_rotator_debug.h"
#define SMMU_SDE_ROT_SEC "qcom,smmu_sde_rot_sec"
#define SMMU_SDE_ROT_UNSEC "qcom,smmu_sde_rot_unsec"
@@ -332,6 +333,8 @@ int sde_smmu_ctrl(int enable)
int rc = 0;
mutex_lock(&sde_smmu_ref_cnt_lock);
+ SDEROT_EVTLOG(__builtin_return_address(0), enable, mdata->iommu_ref_cnt,
+ mdata->iommu_attached);
SDEROT_DBG("%pS: enable:%d ref_cnt:%d attach:%d\n",
__builtin_return_address(0), enable, mdata->iommu_ref_cnt,
mdata->iommu_attached);
@@ -407,9 +410,10 @@ static int sde_smmu_fault_handler(struct iommu_domain *domain,
sde_smmu = (struct sde_smmu_client *)token;
- /* TODO: trigger rotator panic and dump */
- SDEROT_ERR("TODO: trigger rotator panic and dump, iova=0x%08lx\n",
- iova);
+ /* trigger rotator panic and dump */
+ SDEROT_ERR("trigger rotator panic and dump, iova=0x%08lx\n", iova);
+
+ sde_rot_dump_panic();
return rc;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
index 44f7af089ee9..eed177ea5bab 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
@@ -586,7 +586,7 @@ void sde_rot_ubwc_data_calc_offset(struct sde_mdp_data *data, u16 x, u16 y,
struct sde_mdp_plane_sizes *ps, struct sde_mdp_format_params *fmt)
{
u16 macro_w, micro_w, micro_h;
- u32 offset;
+ u32 offset = 0;
int ret;
ret = sde_rot_get_ubwc_micro_dim(fmt->format, &micro_w, &micro_h);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 566441e9c546..f0a3875a8f28 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -90,6 +90,7 @@ static void msm_comm_generate_session_error(struct msm_vidc_inst *inst);
static void msm_comm_generate_sys_error(struct msm_vidc_inst *inst);
static void handle_session_error(enum hal_command_response cmd, void *data);
static void msm_vidc_print_running_insts(struct msm_vidc_core *core);
+static void msm_comm_print_debug_info(struct msm_vidc_inst *inst);
bool msm_comm_turbo_session(struct msm_vidc_inst *inst)
{
@@ -905,7 +906,7 @@ static int wait_for_sess_signal_receipt(struct msm_vidc_inst *inst,
call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
dprintk(VIDC_ERR,
"sess resp timeout can potentially crash the system\n");
-
+ msm_comm_print_debug_info(inst);
BUG_ON(inst->core->resources.debug_timeout);
rc = -EIO;
} else {
@@ -1601,6 +1602,7 @@ static void handle_sys_error(enum hal_command_response cmd, void *data)
struct msm_vidc_cb_cmd_done *response = data;
struct msm_vidc_core *core = NULL;
struct hfi_device *hdev = NULL;
+ struct msm_vidc_inst *inst = NULL;
int rc = 0;
subsystem_crashed("venus");
@@ -1640,6 +1642,19 @@ static void handle_sys_error(enum hal_command_response cmd, void *data)
dprintk(VIDC_ERR,
"SYS_ERROR can potentially crash the system\n");
+ /*
+ * For SYS_ERROR, there will not be any inst pointer.
+ * Just grab one of the inst from instances list and
+ * use it.
+ */
+
+ mutex_lock(&core->lock);
+ inst = list_first_entry(&core->instances,
+ struct msm_vidc_inst, list);
+ mutex_unlock(&core->lock);
+
+ msm_comm_print_debug_info(inst);
+
BUG_ON(core->resources.debug_timeout);
}
@@ -2450,6 +2465,7 @@ static int msm_comm_session_abort(struct msm_vidc_inst *inst)
call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
dprintk(VIDC_ERR,
"ABORT timeout can potentially crash the system\n");
+ msm_comm_print_debug_info(inst);
BUG_ON(inst->core->resources.debug_timeout);
rc = -EBUSY;
@@ -2522,6 +2538,7 @@ int msm_comm_check_core_init(struct msm_vidc_core *core)
{
int rc = 0;
struct hfi_device *hdev;
+ struct msm_vidc_inst *inst = NULL;
mutex_lock(&core->lock);
if (core->state >= VIDC_CORE_INIT_DONE) {
@@ -2540,6 +2557,17 @@ int msm_comm_check_core_init(struct msm_vidc_core *core)
call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
dprintk(VIDC_ERR,
"SYS_INIT timeout can potentially crash the system\n");
+ /*
+ * For SYS_INIT, there will not be any inst pointer.
+ * Just grab one of the inst from instances list and
+ * use it.
+ */
+ inst = list_first_entry(&core->instances,
+ struct msm_vidc_inst, list);
+
+ mutex_unlock(&core->lock);
+ msm_comm_print_debug_info(inst);
+ mutex_lock(&core->lock);
BUG_ON(core->resources.debug_timeout);
rc = -EIO;
@@ -4017,6 +4045,8 @@ int msm_comm_try_get_prop(struct msm_vidc_inst *inst, enum hal_property ptype,
call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
dprintk(VIDC_ERR,
"SESS_PROP timeout can potentially crash the system\n");
+ if (inst->core->resources.debug_timeout)
+ msm_comm_print_debug_info(inst);
BUG_ON(inst->core->resources.debug_timeout);
rc = -ETIMEDOUT;
@@ -5232,3 +5262,92 @@ int msm_vidc_comm_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a)
exit:
return rc;
}
+
+void msm_comm_print_inst_info(struct msm_vidc_inst *inst)
+{
+ struct buffer_info *temp;
+ struct internal_buf *buf;
+ int i = 0;
+ bool is_decode = false;
+ enum vidc_ports port;
+
+ if (!inst) {
+ dprintk(VIDC_ERR, "%s - invalid param %p\n",
+ __func__, inst);
+ return;
+ }
+
+ is_decode = inst->session_type == MSM_VIDC_DECODER;
+ port = is_decode ? OUTPUT_PORT : CAPTURE_PORT;
+ dprintk(VIDC_ERR,
+ "%s session, Codec type: %s HxW: %d x %d fps: %d bitrate: %d bit-depth: %s\n",
+ is_decode ? "Decode" : "Encode", inst->fmts[port]->name,
+ inst->prop.height[port], inst->prop.width[port],
+ inst->prop.fps, inst->prop.bitrate,
+ !inst->bit_depth ? "8" : "10");
+
+ dprintk(VIDC_ERR,
+ "---Buffer details for inst: %p of type: %d---\n",
+ inst, inst->session_type);
+ mutex_lock(&inst->registeredbufs.lock);
+ dprintk(VIDC_ERR, "registered buffer list:\n");
+ list_for_each_entry(temp, &inst->registeredbufs.list, list)
+ for (i = 0; i < temp->num_planes; i++)
+ dprintk(VIDC_ERR,
+ "type: %d plane: %d addr: %pa size: %d\n",
+ temp->type, i, &temp->device_addr[i],
+ temp->size[i]);
+
+ mutex_unlock(&inst->registeredbufs.lock);
+
+ mutex_lock(&inst->scratchbufs.lock);
+ dprintk(VIDC_ERR, "scratch buffer list:\n");
+ list_for_each_entry(buf, &inst->scratchbufs.list, list)
+ dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n",
+ buf->buffer_type, &buf->handle->device_addr,
+ buf->handle->size);
+ mutex_unlock(&inst->scratchbufs.lock);
+
+ mutex_lock(&inst->persistbufs.lock);
+ dprintk(VIDC_ERR, "persist buffer list:\n");
+ list_for_each_entry(buf, &inst->persistbufs.list, list)
+ dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n",
+ buf->buffer_type, &buf->handle->device_addr,
+ buf->handle->size);
+ mutex_unlock(&inst->persistbufs.lock);
+
+ mutex_lock(&inst->outputbufs.lock);
+ dprintk(VIDC_ERR, "dpb buffer list:\n");
+ list_for_each_entry(buf, &inst->outputbufs.list, list)
+ dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n",
+ buf->buffer_type, &buf->handle->device_addr,
+ buf->handle->size);
+ mutex_unlock(&inst->outputbufs.lock);
+}
+
+static void msm_comm_print_debug_info(struct msm_vidc_inst *inst)
+{
+ struct msm_vidc_core *core = NULL;
+ struct msm_vidc_inst *temp = NULL;
+
+ if (!inst || !inst->core) {
+ dprintk(VIDC_ERR, "%s - invalid param %p %p\n",
+ __func__, inst, core);
+ return;
+ }
+ core = inst->core;
+
+ dprintk(VIDC_ERR, "Venus core frequency = %lu",
+ msm_comm_get_clock_rate(core));
+ dprintk(VIDC_ERR, "Printing instance info that caused Error\n");
+ msm_comm_print_inst_info(inst);
+ dprintk(VIDC_ERR, "Printing remaining instances info\n");
+ mutex_lock(&core->lock);
+ list_for_each_entry(temp, &core->instances, list) {
+ /* inst already printed above. Hence don't repeat.*/
+ if (temp == inst)
+ continue;
+ msm_comm_print_inst_info(temp);
+ }
+ mutex_unlock(&core->lock);
+}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 337760508eb1..eac7f658eb31 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -98,4 +98,5 @@ int msm_comm_ctrl_deinit(struct msm_vidc_inst *inst);
void msm_comm_cleanup_internal_buffers(struct msm_vidc_inst *inst);
int msm_vidc_comm_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a);
bool msm_comm_turbo_session(struct msm_vidc_inst *inst);
+void msm_comm_print_inst_info(struct msm_vidc_inst *inst);
#endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 1bdc5bf2c93d..25fccab99fb3 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -1238,11 +1238,6 @@ int msm_vidc_smmu_fault_handler(struct iommu_domain *domain,
{
struct msm_vidc_core *core = token;
struct msm_vidc_inst *inst;
- struct buffer_info *temp;
- struct internal_buf *buf;
- int i = 0;
- bool is_decode = false;
- enum vidc_ports port;
if (!domain || !core) {
dprintk(VIDC_ERR, "%s - invalid param %pK %pK\n",
@@ -1257,52 +1252,7 @@ int msm_vidc_smmu_fault_handler(struct iommu_domain *domain,
mutex_lock(&core->lock);
list_for_each_entry(inst, &core->instances, list) {
- is_decode = inst->session_type == MSM_VIDC_DECODER;
- port = is_decode ? OUTPUT_PORT : CAPTURE_PORT;
- dprintk(VIDC_ERR,
- "%s session, Codec type: %s HxW: %d x %d fps: %d bitrate: %d bit-depth: %s\n",
- is_decode ? "Decode" : "Encode", inst->fmts[port]->name,
- inst->prop.height[port], inst->prop.width[port],
- inst->prop.fps, inst->prop.bitrate,
- !inst->bit_depth ? "8" : "10");
-
- dprintk(VIDC_ERR,
- "---Buffer details for inst: %pK of type: %d---\n",
- inst, inst->session_type);
- mutex_lock(&inst->registeredbufs.lock);
- dprintk(VIDC_ERR, "registered buffer list:\n");
- list_for_each_entry(temp, &inst->registeredbufs.list, list)
- for (i = 0; i < temp->num_planes; i++)
- dprintk(VIDC_ERR,
- "type: %d plane: %d addr: %pa size: %d\n",
- temp->type, i, &temp->device_addr[i],
- temp->size[i]);
-
- mutex_unlock(&inst->registeredbufs.lock);
-
- mutex_lock(&inst->scratchbufs.lock);
- dprintk(VIDC_ERR, "scratch buffer list:\n");
- list_for_each_entry(buf, &inst->scratchbufs.list, list)
- dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n",
- buf->buffer_type, &buf->handle->device_addr,
- buf->handle->size);
- mutex_unlock(&inst->scratchbufs.lock);
-
- mutex_lock(&inst->persistbufs.lock);
- dprintk(VIDC_ERR, "persist buffer list:\n");
- list_for_each_entry(buf, &inst->persistbufs.list, list)
- dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n",
- buf->buffer_type, &buf->handle->device_addr,
- buf->handle->size);
- mutex_unlock(&inst->persistbufs.lock);
-
- mutex_lock(&inst->outputbufs.lock);
- dprintk(VIDC_ERR, "dpb buffer list:\n");
- list_for_each_entry(buf, &inst->outputbufs.list, list)
- dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n",
- buf->buffer_type, &buf->handle->device_addr,
- buf->handle->size);
- mutex_unlock(&inst->outputbufs.lock);
+ msm_comm_print_inst_info(inst);
}
core->smmu_fault_handled = true;
mutex_unlock(&core->lock);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h
index 23d84e9f7f7a..f6120dc7d1d5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h
@@ -16,6 +16,7 @@
#define DT_PARSE
#include <linux/of.h>
#include "msm_vidc_resources.h"
+#include "msm_vidc_common.h"
void msm_vidc_free_platform_resources(
struct msm_vidc_platform_resources *res);
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 5a51fbcf9318..e0fb31de38ff 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -119,7 +119,7 @@ static inline bool __core_in_valid_state(struct venus_hfi_device *device)
return device->state != VENUS_STATE_DEINIT;
}
-static void __dump_packet(u8 *packet)
+static void __dump_packet(u8 *packet, enum vidc_msg_prio log_level)
{
u32 c = 0, packet_size = *(u32 *)packet;
const int row_size = 32;
@@ -132,7 +132,7 @@ static void __dump_packet(u8 *packet)
packet_size % row_size : row_size;
hex_dump_to_buffer(packet + c * row_size, bytes_to_read,
row_size, 4, row, sizeof(row), false);
- dprintk(VIDC_PKT, "%s\n", row);
+ dprintk(log_level, "%s\n", row);
}
}
@@ -342,7 +342,7 @@ static int __write_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
if (msm_vidc_debug & VIDC_PKT) {
dprintk(VIDC_PKT, "%s: %pK\n", __func__, qinfo);
- __dump_packet(packet);
+ __dump_packet(packet, VIDC_PKT);
}
packet_size_in_words = (*(u32 *)packet) >> 2;
@@ -548,7 +548,7 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
if (msm_vidc_debug & VIDC_PKT) {
dprintk(VIDC_PKT, "%s: %pK\n", __func__, qinfo);
- __dump_packet(packet);
+ __dump_packet(packet, VIDC_PKT);
}
return rc;
@@ -2517,7 +2517,6 @@ static int venus_hfi_session_clean(void *session)
mutex_lock(&device->lock);
__session_clean(sess_close);
- __flush_debug_queue(device, NULL);
mutex_unlock(&device->lock);
return 0;
@@ -3337,6 +3336,7 @@ static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet)
{
bool local_packet = false;
enum vidc_msg_prio log_level = VIDC_FW;
+ unsigned int pending_packet_count = 0;
if (!device) {
dprintk(VIDC_ERR, "%s: Invalid params\n", __func__);
@@ -3361,6 +3361,23 @@ static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet)
log_level = VIDC_ERR;
}
+ /*
+ * In FATAL situation, print all the pending messages in msg
+ * queue. This is useful for debugging. At this time, message
+ * queues may be corrupted. Hence don't trust them and just print
+ * first max_packets packets.
+ */
+
+ if (local_packet) {
+ dprintk(VIDC_ERR,
+ "Printing all pending messages in message Queue\n");
+ while (!__iface_msgq_read(device, packet) &&
+ pending_packet_count < max_packets) {
+ __dump_packet(packet, log_level);
+ pending_packet_count++;
+ }
+ }
+
while (!__iface_dbgq_read(device, packet)) {
struct hfi_msg_sys_coverage_packet *pkt =
(struct hfi_msg_sys_coverage_packet *) packet;
diff --git a/drivers/mfd/wcd9xxx-irq.c b/drivers/mfd/wcd9xxx-irq.c
index 0c5754341991..b6a476cd882d 100644
--- a/drivers/mfd/wcd9xxx-irq.c
+++ b/drivers/mfd/wcd9xxx-irq.c
@@ -450,10 +450,22 @@ int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res)
{
int i, ret;
u8 irq_level[wcd9xxx_res->num_irq_regs];
+ struct irq_domain *domain;
+ struct device_node *pnode;
mutex_init(&wcd9xxx_res->irq_lock);
mutex_init(&wcd9xxx_res->nested_irq_lock);
+ pnode = of_irq_find_parent(wcd9xxx_res->dev->of_node);
+ if (unlikely(!pnode))
+ return -EINVAL;
+
+ domain = irq_find_host(pnode);
+ if (unlikely(!domain))
+ return -EINVAL;
+
+ wcd9xxx_res->domain = domain;
+
wcd9xxx_res->irq = wcd9xxx_irq_get_upstream_irq(wcd9xxx_res);
if (!wcd9xxx_res->irq) {
pr_warn("%s: irq driver is not yet initialized\n", __func__);
@@ -553,7 +565,6 @@ void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res)
if (wcd9xxx_res->irq) {
disable_irq_wake(wcd9xxx_res->irq);
free_irq(wcd9xxx_res->irq, wcd9xxx_res);
- /* Release parent's of node */
wcd9xxx_res->irq = 0;
wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
}
@@ -626,19 +637,14 @@ wcd9xxx_irq_add_domain(struct device_node *node,
static struct wcd9xxx_irq_drv_data *
wcd9xxx_get_irq_drv_d(const struct wcd9xxx_core_resource *wcd9xxx_res)
{
- struct device_node *pnode;
struct irq_domain *domain;
- pnode = of_irq_find_parent(wcd9xxx_res->dev->of_node);
- /* Shouldn't happen */
- if (unlikely(!pnode))
- return NULL;
+ domain = wcd9xxx_res->domain;
- domain = irq_find_host(pnode);
- if (unlikely(!domain))
+ if (domain)
+ return domain->host_data;
+ else
return NULL;
-
- return (struct wcd9xxx_irq_drv_data *)domain->host_data;
}
static int phyirq_to_virq(struct wcd9xxx_core_resource *wcd9xxx_res, int offset)
@@ -669,10 +675,6 @@ static unsigned int wcd9xxx_irq_get_upstream_irq(
{
struct wcd9xxx_irq_drv_data *data;
- /* Hold parent's of node */
- if (!of_node_get(of_irq_find_parent(wcd9xxx_res->dev->of_node)))
- return -EINVAL;
-
data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
if (!data) {
pr_err("%s: interrupt controller is not registerd\n", __func__);
@@ -686,8 +688,7 @@ static unsigned int wcd9xxx_irq_get_upstream_irq(
static void wcd9xxx_irq_put_upstream_irq(
struct wcd9xxx_core_resource *wcd9xxx_res)
{
- /* Hold parent's of node */
- of_node_put(of_irq_find_parent(wcd9xxx_res->dev->of_node));
+ wcd9xxx_res->domain = NULL;
}
static int wcd9xxx_map_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
diff --git a/drivers/misc/qcom/Kconfig b/drivers/misc/qcom/Kconfig
index a62297e913d2..9c73960f01ff 100644
--- a/drivers/misc/qcom/Kconfig
+++ b/drivers/misc/qcom/Kconfig
@@ -1,6 +1,7 @@
config MSM_QDSP6V2_CODECS
bool "Audio QDSP6V2 APR support"
depends on MSM_SMD
+ select SND_SOC_QDSP6V2
help
Enable Audio codecs with APR IPC protocol support between
application processor and QDSP6 for B-family. APR is
@@ -9,6 +10,7 @@ config MSM_QDSP6V2_CODECS
config MSM_ULTRASOUND
bool "QDSP6V2 HW Ultrasound support"
+ select SND_SOC_QDSP6V2
help
Enable HW Ultrasound support in QDSP6V2.
QDSP6V2 can support HW encoder & decoder and
diff --git a/drivers/misc/qcom/qdsp6v2/aac_in.c b/drivers/misc/qcom/qdsp6v2/aac_in.c
index c9d5dbb0b313..7176c114f85b 100644
--- a/drivers/misc/qcom/qdsp6v2/aac_in.c
+++ b/drivers/misc/qcom/qdsp6v2/aac_in.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -421,6 +421,8 @@ static long aac_in_compat_ioctl(struct file *file, unsigned int cmd,
struct msm_audio_aac_enc_config cfg;
struct msm_audio_aac_enc_config32 cfg_32;
+ memset(&cfg_32, 0, sizeof(cfg_32));
+
cmd = AUDIO_GET_AAC_ENC_CONFIG;
rc = aac_in_ioctl_shared(file, cmd, &cfg);
if (rc) {
diff --git a/drivers/misc/qcom/qdsp6v2/amrnb_in.c b/drivers/misc/qcom/qdsp6v2/amrnb_in.c
index eb92137f0671..1bb441bd2ff4 100644
--- a/drivers/misc/qcom/qdsp6v2/amrnb_in.c
+++ b/drivers/misc/qcom/qdsp6v2/amrnb_in.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2010-2012, 2014 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2012, 2014, 2016 The Linux Foundation.
+ * All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -221,6 +222,8 @@ static long amrnb_in_compat_ioctl(struct file *file,
struct msm_audio_amrnb_enc_config_v2 *amrnb_config;
struct msm_audio_amrnb_enc_config_v2_32 amrnb_config_32;
+ memset(&amrnb_config_32, 0, sizeof(amrnb_config_32));
+
amrnb_config =
(struct msm_audio_amrnb_enc_config_v2 *)audio->enc_cfg;
amrnb_config_32.band_mode = amrnb_config->band_mode;
diff --git a/drivers/misc/qcom/qdsp6v2/amrwb_in.c b/drivers/misc/qcom/qdsp6v2/amrwb_in.c
index 4cea3dc63389..4f94ed2673e6 100644
--- a/drivers/misc/qcom/qdsp6v2/amrwb_in.c
+++ b/drivers/misc/qcom/qdsp6v2/amrwb_in.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2011-2012, 2014 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, 2014, 2016 The Linux Foundation.
+ * All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -216,6 +217,8 @@ static long amrwb_in_compat_ioctl(struct file *file,
struct msm_audio_amrwb_enc_config *amrwb_config;
struct msm_audio_amrwb_enc_config_32 amrwb_config_32;
+ memset(&amrwb_config_32, 0, sizeof(amrwb_config_32));
+
amrwb_config =
(struct msm_audio_amrwb_enc_config *)audio->enc_cfg;
amrwb_config_32.band_mode = amrwb_config->band_mode;
diff --git a/drivers/misc/qcom/qdsp6v2/audio_alac.c b/drivers/misc/qcom/qdsp6v2/audio_alac.c
index 9748db30fac3..3de204c1ebc8 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_alac.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_alac.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -196,6 +196,8 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd,
struct msm_audio_alac_config *alac_config;
struct msm_audio_alac_config_32 alac_config_32;
+ memset(&alac_config_32, 0, sizeof(alac_config_32));
+
alac_config = (struct msm_audio_alac_config *)audio->codec_cfg;
alac_config_32.frameLength = alac_config->frameLength;
alac_config_32.compatVersion =
diff --git a/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c b/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c
index ee5991177687..bfd730017d41 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c
@@ -2,7 +2,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -205,6 +205,10 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd,
struct msm_audio_amrwbplus_config_v2 *amrwbplus_config;
struct msm_audio_amrwbplus_config_v2_32
amrwbplus_config_32;
+
+ memset(&amrwbplus_config_32, 0,
+ sizeof(amrwbplus_config_32));
+
amrwbplus_config =
(struct msm_audio_amrwbplus_config_v2 *)
audio->codec_cfg;
diff --git a/drivers/misc/qcom/qdsp6v2/audio_ape.c b/drivers/misc/qcom/qdsp6v2/audio_ape.c
index b4c2ddb947de..670ec555b8c6 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_ape.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_ape.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -180,6 +180,8 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd,
struct msm_audio_ape_config *ape_config;
struct msm_audio_ape_config_32 ape_config_32;
+ memset(&ape_config_32, 0, sizeof(ape_config_32));
+
ape_config = (struct msm_audio_ape_config *)audio->codec_cfg;
ape_config_32.compatibleVersion = ape_config->compatibleVersion;
ape_config_32.compressionLevel =
diff --git a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
index 3a8834446ea4..3632fc2b961b 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
@@ -630,6 +630,8 @@ static long audio_effects_compat_ioctl(struct file *file, unsigned int cmd,
case AUDIO_EFFECTS_GET_BUF_AVAIL32: {
struct msm_hwacc_buf_avail32 buf_avail;
+ memset(&buf_avail, 0, sizeof(buf_avail));
+
buf_avail.input_num_avail = atomic_read(&effects->in_count);
buf_avail.output_num_avail = atomic_read(&effects->out_count);
pr_debug("%s: write buf avail: %d, read buf avail: %d\n",
diff --git a/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c b/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
index a15fd87c7be8..91bbba176dfd 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
@@ -2,7 +2,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -302,6 +302,8 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd,
struct msm_audio_aac_config *aac_config;
struct msm_audio_aac_config32 aac_config_32;
+ memset(&aac_config_32, 0, sizeof(aac_config_32));
+
aac_config = (struct msm_audio_aac_config *)audio->codec_cfg;
aac_config_32.format = aac_config->format;
aac_config_32.audio_object = aac_config->audio_object;
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils.c b/drivers/misc/qcom/qdsp6v2/audio_utils.c
index cad0220a4960..065b426ca6d0 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,15 @@
#include <asm/ioctls.h>
#include "audio_utils.h"
+/*
+ * Define maximum buffer size. Below values are chosen considering the higher
+ * values used among all native drivers.
+ */
+#define MAX_FRAME_SIZE 1536
+#define MAX_FRAMES 5
+#define META_SIZE (sizeof(struct meta_out_dsp))
+#define MAX_BUFFER_SIZE (1 + ((MAX_FRAME_SIZE + META_SIZE) * MAX_FRAMES))
+
static int audio_in_pause(struct q6audio_in *audio)
{
int rc;
@@ -329,6 +338,10 @@ long audio_in_ioctl(struct file *file,
rc = -EINVAL;
break;
}
+ if (cfg.buffer_size > MAX_BUFFER_SIZE) {
+ rc = -EINVAL;
+ break;
+ }
audio->str_cfg.buffer_size = cfg.buffer_size;
audio->str_cfg.buffer_count = cfg.buffer_count;
if (audio->opened) {
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
index 5c23da7f8857..f7ad8f61f2e7 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
@@ -1936,6 +1936,7 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd,
case AUDIO_GET_CONFIG_32: {
struct msm_audio_config32 cfg_32;
mutex_lock(&audio->lock);
+ memset(&cfg_32, 0, sizeof(cfg_32));
cfg_32.buffer_size = audio->pcm_cfg.buffer_size;
cfg_32.buffer_count = audio->pcm_cfg.buffer_count;
cfg_32.channel_count = audio->pcm_cfg.channel_count;
@@ -2032,6 +2033,7 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd,
audio->buf_cfg.frames_per_buf);
mutex_lock(&audio->lock);
+ memset(&cfg_32, 0, sizeof(cfg_32));
cfg_32.meta_info_enable = audio->buf_cfg.meta_info_enable;
cfg_32.frames_per_buf = audio->buf_cfg.frames_per_buf;
if (copy_to_user((void *)arg, &cfg_32,
diff --git a/drivers/misc/qcom/qdsp6v2/audio_wmapro.c b/drivers/misc/qcom/qdsp6v2/audio_wmapro.c
index 8d96b99d8f84..21ad33b7fd5d 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_wmapro.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_wmapro.c
@@ -2,7 +2,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -217,6 +217,8 @@ static long audio_compat_ioctl(struct file *file, unsigned int cmd,
struct msm_audio_wmapro_config *wmapro_config;
struct msm_audio_wmapro_config32 wmapro_config_32;
+ memset(&wmapro_config_32, 0, sizeof(wmapro_config_32));
+
wmapro_config =
(struct msm_audio_wmapro_config *)audio->codec_cfg;
wmapro_config_32.armdatareqthr = wmapro_config->armdatareqthr;
diff --git a/drivers/misc/qcom/qdsp6v2/evrc_in.c b/drivers/misc/qcom/qdsp6v2/evrc_in.c
index 2f931be226c6..aab8e27c0094 100644
--- a/drivers/misc/qcom/qdsp6v2/evrc_in.c
+++ b/drivers/misc/qcom/qdsp6v2/evrc_in.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -224,6 +224,8 @@ static long evrc_in_compat_ioctl(struct file *file,
struct msm_audio_evrc_enc_config32 cfg_32;
struct msm_audio_evrc_enc_config *enc_cfg;
+ memset(&cfg_32, 0, sizeof(cfg_32));
+
enc_cfg = audio->enc_cfg;
cfg_32.cdma_rate = enc_cfg->cdma_rate;
cfg_32.min_bit_rate = enc_cfg->min_bit_rate;
diff --git a/drivers/misc/qcom/qdsp6v2/qcelp_in.c b/drivers/misc/qcom/qdsp6v2/qcelp_in.c
index b5d5ad113722..aabf5d33a507 100644
--- a/drivers/misc/qcom/qdsp6v2/qcelp_in.c
+++ b/drivers/misc/qcom/qdsp6v2/qcelp_in.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -225,6 +225,8 @@ static long qcelp_in_compat_ioctl(struct file *file,
struct msm_audio_qcelp_enc_config32 cfg_32;
struct msm_audio_qcelp_enc_config *enc_cfg;
+ memset(&cfg_32, 0, sizeof(cfg_32));
+
enc_cfg = (struct msm_audio_qcelp_enc_config *)audio->enc_cfg;
cfg_32.cdma_rate = enc_cfg->cdma_rate;
cfg_32.min_bit_rate = enc_cfg->min_bit_rate;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index a15211fd33aa..bd554667de96 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1607,6 +1607,7 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
list_for_each_entry(ptr_svc,
&qseecom.registered_listener_list_head, list) {
if (ptr_svc->svc.listener_id == lstnr) {
+ ptr_svc->listener_in_use = true;
ptr_svc->rcv_req_flag = 1;
wake_up_interruptible(&ptr_svc->rcv_req_wq);
break;
@@ -1689,6 +1690,7 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
(const void *)&send_data_rsp,
sizeof(send_data_rsp), resp,
sizeof(*resp));
+ ptr_svc->listener_in_use = false;
if (ret) {
pr_err("scm_call() failed with err: %d (app_id = %d)\n",
ret, data->client.app_id);
@@ -1712,6 +1714,101 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
return ret;
}
+int __qseecom_process_reentrancy_blocked_on_listener(
+ struct qseecom_command_scm_resp *resp,
+ struct qseecom_registered_app_list *ptr_app,
+ struct qseecom_dev_handle *data)
+{
+ struct qseecom_registered_listener_list *list_ptr;
+ int ret = 0;
+ struct qseecom_continue_blocked_request_ireq ireq;
+ struct qseecom_command_scm_resp continue_resp;
+ sigset_t new_sigset, old_sigset;
+ unsigned long flags;
+ bool found_app = false;
+
+ if (!resp || !data) {
+ pr_err("invalid resp or data pointer\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* find app_id & img_name from list */
+ if (!ptr_app) {
+ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+ list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+ list) {
+ if ((ptr_app->app_id == data->client.app_id) &&
+ (!strcmp(ptr_app->app_name,
+ data->client.app_name))) {
+ found_app = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+ flags);
+ if (!found_app) {
+ pr_err("app_id %d (%s) is not found\n",
+ data->client.app_id,
+ (char *)data->client.app_name);
+ ret = -ENOENT;
+ goto exit;
+ }
+ }
+
+ list_ptr = __qseecom_find_svc(resp->data);
+ if (!list_ptr) {
+ pr_err("Invalid listener ID\n");
+ ret = -ENODATA;
+ goto exit;
+ }
+ pr_debug("lsntr %d in_use = %d\n",
+ resp->data, list_ptr->listener_in_use);
+ ptr_app->blocked_on_listener_id = resp->data;
+ /* sleep until listener is available */
+ do {
+ qseecom.app_block_ref_cnt++;
+ ptr_app->app_blocked = true;
+ sigfillset(&new_sigset);
+ sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+ mutex_unlock(&app_access_lock);
+ do {
+ if (!wait_event_freezable(
+ list_ptr->listener_block_app_wq,
+ !list_ptr->listener_in_use)) {
+ break;
+ }
+ } while (1);
+ mutex_lock(&app_access_lock);
+ sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+ ptr_app->app_blocked = false;
+ qseecom.app_block_ref_cnt--;
+ } while (list_ptr->listener_in_use == true);
+ ptr_app->blocked_on_listener_id = 0;
+ /* notify the blocked app that listener is available */
+ pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
+ resp->data, data->client.app_id,
+ data->client.app_name);
+ ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
+ ireq.app_id = data->client.app_id;
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ &ireq, sizeof(ireq),
+ &continue_resp, sizeof(continue_resp));
+ if (ret) {
+ pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
+ data->client.app_id,
+ data->client.app_name, ret);
+ goto exit;
+ }
+ /*
+ * After TZ app is unblocked, then continue to next case
+ * for incomplete request processing
+ */
+ resp->result = QSEOS_RESULT_INCOMPLETE;
+exit:
+ return ret;
+}
+
static int __qseecom_reentrancy_process_incomplete_cmd(
struct qseecom_dev_handle *data,
struct qseecom_command_scm_resp *resp)
@@ -1725,7 +1822,7 @@ static int __qseecom_reentrancy_process_incomplete_cmd(
sigset_t new_sigset;
sigset_t old_sigset;
- while (resp->result == QSEOS_RESULT_INCOMPLETE) {
+ while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
lstnr = resp->data;
/*
* Wake up blocking lsitener service with the lstnr id
@@ -1812,16 +1909,37 @@ static int __qseecom_reentrancy_process_incomplete_cmd(
if (ret) {
pr_err("scm_call() failed with err: %d (app_id = %d)\n",
ret, data->client.app_id);
- if (lstnr == RPMB_SERVICE)
- __qseecom_disable_clk(CLK_QSEE);
- return ret;
+ goto exit;
}
- if ((resp->result != QSEOS_RESULT_SUCCESS) &&
- (resp->result != QSEOS_RESULT_INCOMPLETE)) {
+
+ switch (resp->result) {
+ case QSEOS_RESULT_BLOCKED_ON_LISTENER:
+ pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
+ lstnr, data->client.app_id, resp->data);
+ if (lstnr == resp->data) {
+ pr_err("lstnr %d should not be blocked!\n",
+ lstnr);
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = __qseecom_process_reentrancy_blocked_on_listener(
+ resp, NULL, data);
+ if (ret) {
+ pr_err("failed to process App(%d) %s blocked on listener %d\n",
+ data->client.app_id,
+ data->client.app_name, resp->data);
+ goto exit;
+ }
+ case QSEOS_RESULT_SUCCESS:
+ case QSEOS_RESULT_INCOMPLETE:
+ break;
+ default:
pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
resp->result, data->client.app_id, lstnr);
ret = -EINVAL;
+ goto exit;
}
+exit:
if (lstnr == RPMB_SERVICE)
__qseecom_disable_clk(CLK_QSEE);
@@ -2669,63 +2787,21 @@ int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
struct qseecom_registered_app_list *ptr_app,
struct qseecom_dev_handle *data)
{
- struct qseecom_registered_listener_list *list_ptr;
int ret = 0;
- struct qseecom_continue_blocked_request_ireq ireq;
- struct qseecom_command_scm_resp continue_resp;
- sigset_t new_sigset, old_sigset;
switch (resp->result) {
case QSEOS_RESULT_BLOCKED_ON_LISTENER:
- pr_debug("App(%d) %s is blocked on listener %d\n",
+ pr_warn("App(%d) %s is blocked on listener %d\n",
data->client.app_id, data->client.app_name,
resp->data);
- list_ptr = __qseecom_find_svc(resp->data);
- if (!list_ptr) {
- pr_err("Invalid listener ID\n");
- return -ENODATA;
- }
- ptr_app->blocked_on_listener_id = resp->data;
- list_ptr->listener_in_use = true;
- /* sleep until listener is available */
- while (list_ptr->listener_in_use == true) {
- qseecom.app_block_ref_cnt++;
- ptr_app->app_blocked = true;
- sigfillset(&new_sigset);
- sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
- mutex_unlock(&app_access_lock);
- do {
- if (!wait_event_freezable(
- list_ptr->listener_block_app_wq,
- !list_ptr->listener_in_use)) {
- break;
- }
- } while (1);
- mutex_lock(&app_access_lock);
- sigprocmask(SIG_SETMASK, &old_sigset, NULL);
- ptr_app->app_blocked = false;
- qseecom.app_block_ref_cnt--;
- }
- /* notify the blocked app that listener is available */
- pr_debug("Lsntr %d is available, unblock app(%d) %s in TZ\n",
- resp->data, data->client.app_id,
- data->client.app_name);
- ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
- ireq.app_id = data->client.app_id;
- ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
- &ireq, sizeof(ireq),
- &continue_resp, sizeof(continue_resp));
+ ret = __qseecom_process_reentrancy_blocked_on_listener(
+ resp, ptr_app, data);
if (ret) {
- pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
- data->client.app_id,
- data->client.app_name, ret);
+ pr_err("failed to process App(%d) %s is blocked on listener %d\n",
+ data->client.app_id, data->client.app_name, resp->data);
return ret;
}
- /*
- * After TZ app is unblocked, then continue to next case
- * for incomplete request processing
- */
- resp->result = QSEOS_RESULT_INCOMPLETE;
+
case QSEOS_RESULT_INCOMPLETE:
qseecom.app_block_ref_cnt++;
ptr_app->app_blocked = true;
@@ -3249,6 +3325,7 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
}
}
len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
+ sg = sg_ptr->sgl;
goto cleanup;
}
sg = sg_ptr->sgl;
@@ -4015,12 +4092,21 @@ int qseecom_start_app(struct qseecom_handle **handle,
data->client.user_virt_sb_base = 0;
data->client.ihandle = NULL;
+ /* Allocate sglistinfo buffer for kernel client */
+ data->sglistinfo_ptr = kzalloc(SGLISTINFO_TABLE_SIZE, GFP_KERNEL);
+ if (!(data->sglistinfo_ptr)) {
+ kfree(data);
+ kfree(*handle);
+ *handle = NULL;
+ return -ENOMEM;
+ }
init_waitqueue_head(&data->abort_wq);
data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
ION_HEAP(ION_QSECOM_HEAP_ID), 0);
if (IS_ERR_OR_NULL(data->client.ihandle)) {
pr_err("Ion client could not retrieve the handle\n");
+ kfree(data->sglistinfo_ptr);
kfree(data);
kfree(*handle);
*handle = NULL;
@@ -4118,6 +4204,7 @@ int qseecom_start_app(struct qseecom_handle **handle,
return 0;
err:
+ kfree(data->sglistinfo_ptr);
kfree(data);
kfree(*handle);
*handle = NULL;
@@ -4165,6 +4252,7 @@ int qseecom_shutdown_app(struct qseecom_handle **handle)
mutex_unlock(&app_access_lock);
if (ret == 0) {
+ kzfree(data->sglistinfo_ptr);
kzfree(data);
kzfree(*handle);
kzfree(kclient);
diff --git a/drivers/misc/uid_stat.c b/drivers/misc/uid_stat.c
new file mode 100644
index 000000000000..185c69c9738a
--- /dev/null
+++ b/drivers/misc/uid_stat.c
@@ -0,0 +1,153 @@
+/* drivers/misc/uid_stat.c
+ *
+ * Copyright (C) 2008 - 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/atomic.h>
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/uid_stat.h>
+#include <net/activity_stats.h>
+
+static DEFINE_SPINLOCK(uid_lock);
+static LIST_HEAD(uid_list);
+static struct proc_dir_entry *parent;
+
+struct uid_stat {
+ struct list_head link;
+ uid_t uid;
+ atomic_t tcp_rcv;
+ atomic_t tcp_snd;
+};
+
+static struct uid_stat *find_uid_stat(uid_t uid) {
+ struct uid_stat *entry;
+
+ list_for_each_entry(entry, &uid_list, link) {
+ if (entry->uid == uid) {
+ return entry;
+ }
+ }
+ return NULL;
+}
+
+static int uid_stat_atomic_int_show(struct seq_file *m, void *v)
+{
+ unsigned int bytes;
+ atomic_t *counter = m->private;
+
+ bytes = (unsigned int) (atomic_read(counter) + INT_MIN);
+ seq_printf(m, "%u\n", bytes);
+ return seq_has_overflowed(m) ? -ENOSPC : 0;
+}
+
+static int uid_stat_read_atomic_int_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, uid_stat_atomic_int_show, PDE_DATA(inode));
+}
+
+static const struct file_operations uid_stat_read_atomic_int_fops = {
+ .open = uid_stat_read_atomic_int_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/* Create a new entry for tracking the specified uid. */
+static struct uid_stat *create_stat(uid_t uid) {
+ struct uid_stat *new_uid;
+ /* Create the uid stat struct and append it to the list. */
+ new_uid = kmalloc(sizeof(struct uid_stat), GFP_ATOMIC);
+ if (!new_uid)
+ return NULL;
+
+ new_uid->uid = uid;
+ /* Counters start at INT_MIN, so we can track 4GB of network traffic. */
+ atomic_set(&new_uid->tcp_rcv, INT_MIN);
+ atomic_set(&new_uid->tcp_snd, INT_MIN);
+
+ list_add_tail(&new_uid->link, &uid_list);
+ return new_uid;
+}
+
+static void create_stat_proc(struct uid_stat *new_uid)
+{
+ char uid_s[32];
+ struct proc_dir_entry *entry;
+ sprintf(uid_s, "%d", new_uid->uid);
+ entry = proc_mkdir(uid_s, parent);
+
+ /* Keep reference to uid_stat so we know what uid to read stats from. */
+ proc_create_data("tcp_snd", S_IRUGO, entry,
+ &uid_stat_read_atomic_int_fops, &new_uid->tcp_snd);
+
+ proc_create_data("tcp_rcv", S_IRUGO, entry,
+ &uid_stat_read_atomic_int_fops, &new_uid->tcp_rcv);
+}
+
+static struct uid_stat *find_or_create_uid_stat(uid_t uid)
+{
+ struct uid_stat *entry;
+ unsigned long flags;
+ spin_lock_irqsave(&uid_lock, flags);
+ entry = find_uid_stat(uid);
+ if (entry) {
+ spin_unlock_irqrestore(&uid_lock, flags);
+ return entry;
+ }
+ entry = create_stat(uid);
+ spin_unlock_irqrestore(&uid_lock, flags);
+ if (entry)
+ create_stat_proc(entry);
+ return entry;
+}
+
+int uid_stat_tcp_snd(uid_t uid, int size) {
+ struct uid_stat *entry;
+ activity_stats_update();
+ entry = find_or_create_uid_stat(uid);
+ if (!entry)
+ return -1;
+ atomic_add(size, &entry->tcp_snd);
+ return 0;
+}
+
+int uid_stat_tcp_rcv(uid_t uid, int size) {
+ struct uid_stat *entry;
+ activity_stats_update();
+ entry = find_or_create_uid_stat(uid);
+ if (!entry)
+ return -1;
+ atomic_add(size, &entry->tcp_rcv);
+ return 0;
+}
+
+static int __init uid_stat_init(void)
+{
+ parent = proc_mkdir("uid_stat", NULL);
+ if (!parent) {
+ pr_err("uid_stat: failed to create proc entry\n");
+ return -1;
+ }
+ return 0;
+}
+
+__initcall(uid_stat_init);
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 5562308699bc..6142ec1b9dfb 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -68,3 +68,15 @@ config MMC_TEST
This driver is only of interest to those developing or
testing a host driver. Most people should say N here.
+
+config MMC_SIMULATE_MAX_SPEED
+ bool "Turn on maximum speed control per block device"
+ depends on MMC_BLOCK
+ help
+ Say Y here to enable MMC device speed limiting. Used to test and
+ simulate the behavior of the system when confronted with a slow MMC.
+
+ Enables max_read_speed, max_write_speed and cache_size attributes to
+ control the write or read maximum KB/second speed behaviors.
+
+ If unsure, say N here.
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index b116122c5767..7547463928d6 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -38,7 +38,6 @@
#include <linux/pm_runtime.h>
#include <linux/ioprio.h>
-#define CREATE_TRACE_POINTS
#include <trace/events/mmc.h>
#include <linux/mmc/ioctl.h>
@@ -329,6 +328,249 @@ out:
return ret;
}
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+
+static int max_read_speed, max_write_speed, cache_size = 4;
+
+module_param(max_read_speed, int, S_IRUSR | S_IRGRP);
+MODULE_PARM_DESC(max_read_speed, "maximum KB/s read speed 0=off");
+module_param(max_write_speed, int, S_IRUSR | S_IRGRP);
+MODULE_PARM_DESC(max_write_speed, "maximum KB/s write speed 0=off");
+module_param(cache_size, int, S_IRUSR | S_IRGRP);
+MODULE_PARM_DESC(cache_size, "MB high speed memory or SLC cache");
+
+/*
+ * helper macros and expectations:
+ * size - unsigned long number of bytes
+ * jiffies - unsigned long HZ timestamp difference
+ * speed - unsigned KB/s transfer rate
+ */
+#define size_and_speed_to_jiffies(size, speed) \
+ ((size) * HZ / (speed) / 1024UL)
+#define jiffies_and_speed_to_size(jiffies, speed) \
+ (((speed) * (jiffies) * 1024UL) / HZ)
+#define jiffies_and_size_to_speed(jiffies, size) \
+ ((size) * HZ / (jiffies) / 1024UL)
+
+/* Limits to report warning */
+/* jiffies_and_size_to_speed(10*HZ, queue_max_hw_sectors(q) * 512UL) ~ 25 */
+#define MIN_SPEED(q) 250 /* 10 times faster than a floppy disk */
+#define MAX_SPEED(q) jiffies_and_size_to_speed(1, queue_max_sectors(q) * 512UL)
+
+#define speed_valid(speed) ((speed) > 0)
+
+static const char off[] = "off\n";
+
+static int max_speed_show(int speed, char *buf)
+{
+ if (speed)
+ return scnprintf(buf, PAGE_SIZE, "%uKB/s\n", speed);
+ else
+ return scnprintf(buf, PAGE_SIZE, off);
+}
+
+static int max_speed_store(const char *buf, struct request_queue *q)
+{
+ unsigned int limit, set = 0;
+
+ if (!strncasecmp(off, buf, sizeof(off) - 2))
+ return set;
+ if (kstrtouint(buf, 0, &set) || (set > INT_MAX))
+ return -EINVAL;
+ if (set == 0)
+ return set;
+ limit = MAX_SPEED(q);
+ if (set > limit)
+ pr_warn("max speed %u ineffective above %u\n", set, limit);
+ limit = MIN_SPEED(q);
+ if (set < limit)
+ pr_warn("max speed %u painful below %u\n", set, limit);
+ return set;
+}
+
+static ssize_t max_write_speed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ int ret = max_speed_show(atomic_read(&md->queue.max_write_speed), buf);
+
+ mmc_blk_put(md);
+ return ret;
+}
+
+static ssize_t max_write_speed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ int set = max_speed_store(buf, md->queue.queue);
+
+ if (set < 0) {
+ mmc_blk_put(md);
+ return set;
+ }
+
+ atomic_set(&md->queue.max_write_speed, set);
+ mmc_blk_put(md);
+ return count;
+}
+
+static const DEVICE_ATTR(max_write_speed, S_IRUGO | S_IWUSR,
+ max_write_speed_show, max_write_speed_store);
+
+static ssize_t max_read_speed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ int ret = max_speed_show(atomic_read(&md->queue.max_read_speed), buf);
+
+ mmc_blk_put(md);
+ return ret;
+}
+
+static ssize_t max_read_speed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ int set = max_speed_store(buf, md->queue.queue);
+
+ if (set < 0) {
+ mmc_blk_put(md);
+ return set;
+ }
+
+ atomic_set(&md->queue.max_read_speed, set);
+ mmc_blk_put(md);
+ return count;
+}
+
+static const DEVICE_ATTR(max_read_speed, S_IRUGO | S_IWUSR,
+ max_read_speed_show, max_read_speed_store);
+
+static ssize_t cache_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ struct mmc_queue *mq = &md->queue;
+ int cache_size = atomic_read(&mq->cache_size);
+ int ret;
+
+ if (!cache_size)
+ ret = scnprintf(buf, PAGE_SIZE, off);
+ else {
+ int speed = atomic_read(&mq->max_write_speed);
+
+ if (!speed_valid(speed))
+ ret = scnprintf(buf, PAGE_SIZE, "%uMB\n", cache_size);
+ else { /* We accept race between cache_jiffies and cache_used */
+ unsigned long size = jiffies_and_speed_to_size(
+ jiffies - mq->cache_jiffies, speed);
+ long used = atomic_long_read(&mq->cache_used);
+
+ if (size >= used)
+ size = 0;
+ else
+ size = (used - size) * 100 / cache_size
+ / 1024UL / 1024UL;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%uMB %lu%% used\n",
+ cache_size, size);
+ }
+ }
+
+ mmc_blk_put(md);
+ return ret;
+}
+
+static ssize_t cache_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mmc_blk_data *md;
+ unsigned int set = 0;
+
+ if (strncasecmp(off, buf, sizeof(off) - 2)
+ && (kstrtouint(buf, 0, &set) || (set > INT_MAX)))
+ return -EINVAL;
+
+ md = mmc_blk_get(dev_to_disk(dev));
+ atomic_set(&md->queue.cache_size, set);
+ mmc_blk_put(md);
+ return count;
+}
+
+static const DEVICE_ATTR(cache_size, S_IRUGO | S_IWUSR,
+ cache_size_show, cache_size_store);
+
+/* correct for write-back */
+static long mmc_blk_cache_used(struct mmc_queue *mq, unsigned long waitfor)
+{
+ long used = 0;
+ int speed = atomic_read(&mq->max_write_speed);
+
+ if (speed_valid(speed)) {
+ unsigned long size = jiffies_and_speed_to_size(
+ waitfor - mq->cache_jiffies, speed);
+ used = atomic_long_read(&mq->cache_used);
+
+ if (size >= used)
+ used = 0;
+ else
+ used -= size;
+ }
+
+ atomic_long_set(&mq->cache_used, used);
+ mq->cache_jiffies = waitfor;
+
+ return used;
+}
+
+static void mmc_blk_simulate_delay(
+ struct mmc_queue *mq,
+ struct request *req,
+ unsigned long waitfor)
+{
+ int max_speed;
+
+ if (!req)
+ return;
+
+ max_speed = (rq_data_dir(req) == READ)
+ ? atomic_read(&mq->max_read_speed)
+ : atomic_read(&mq->max_write_speed);
+ if (speed_valid(max_speed)) {
+ unsigned long bytes = blk_rq_bytes(req);
+
+ if (rq_data_dir(req) != READ) {
+ int cache_size = atomic_read(&mq->cache_size);
+
+ if (cache_size) {
+ unsigned long size = cache_size * 1024L * 1024L;
+ long used = mmc_blk_cache_used(mq, waitfor);
+
+ used += bytes;
+ atomic_long_set(&mq->cache_used, used);
+ bytes = 0;
+ if (used > size)
+ bytes = used - size;
+ }
+ }
+ waitfor += size_and_speed_to_jiffies(bytes, max_speed);
+ if (time_is_after_jiffies(waitfor)) {
+ long msecs = jiffies_to_msecs(waitfor - jiffies);
+
+ if (likely(msecs > 0))
+ msleep(msecs);
+ }
+ }
+}
+
+#else
+
+#define mmc_blk_simulate_delay(mq, req, waitfor)
+
+#endif
static ssize_t
num_wr_reqs_to_start_packing_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1870,6 +2112,23 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
}
end_req:
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+ else if (atomic_read(&mq->cache_size)) {
+ long used = mmc_blk_cache_used(mq, jiffies);
+
+ if (used) {
+ int speed = atomic_read(&mq->max_write_speed);
+
+ if (speed_valid(speed)) {
+ unsigned long msecs = jiffies_to_msecs(
+ size_and_speed_to_jiffies(
+ used, speed));
+ if (msecs)
+ msleep(msecs);
+ }
+ }
+ }
+#endif
blk_end_request_all(req, ret);
return ret ? 0 : 1;
@@ -3346,6 +3605,9 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
struct mmc_async_req *areq;
const u8 packed_nr = 2;
u8 reqs = 0;
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+ unsigned long waitfor = jiffies;
+#endif
if (!rqc && !mq->mqrq_prev->req)
return 0;
@@ -3396,6 +3658,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
*/
mmc_blk_reset_success(md, type);
+ mmc_blk_simulate_delay(mq, rqc, waitfor);
+
if (mmc_packed_cmd(mq_rq->cmd_type)) {
ret = mmc_blk_end_packed_req(mq_rq);
break;
@@ -3968,6 +4232,14 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
card->ext_csd.boot_ro_lockable)
device_remove_file(disk_to_dev(md->disk),
&md->power_ro_lock);
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+ device_remove_file(disk_to_dev(md->disk),
+ &dev_attr_max_write_speed);
+ device_remove_file(disk_to_dev(md->disk),
+ &dev_attr_max_read_speed);
+ device_remove_file(disk_to_dev(md->disk),
+ &dev_attr_cache_size);
+#endif
del_gendisk(md->disk);
}
@@ -4003,6 +4275,24 @@ static int mmc_add_disk(struct mmc_blk_data *md)
ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
if (ret)
goto force_ro_fail;
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+ atomic_set(&md->queue.max_write_speed, max_write_speed);
+ ret = device_create_file(disk_to_dev(md->disk),
+ &dev_attr_max_write_speed);
+ if (ret)
+ goto max_write_speed_fail;
+ atomic_set(&md->queue.max_read_speed, max_read_speed);
+ ret = device_create_file(disk_to_dev(md->disk),
+ &dev_attr_max_read_speed);
+ if (ret)
+ goto max_read_speed_fail;
+ atomic_set(&md->queue.cache_size, cache_size);
+ atomic_long_set(&md->queue.cache_used, 0);
+ md->queue.cache_jiffies = jiffies;
+ ret = device_create_file(disk_to_dev(md->disk), &dev_attr_cache_size);
+ if (ret)
+ goto cache_size_fail;
+#endif
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
card->ext_csd.boot_ro_lockable) {
@@ -4056,6 +4346,14 @@ no_pack_for_random_fails:
num_wr_reqs_to_start_packing_fail:
device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock);
power_ro_lock_fail:
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+ device_remove_file(disk_to_dev(md->disk), &dev_attr_cache_size);
+cache_size_fail:
+ device_remove_file(disk_to_dev(md->disk), &dev_attr_max_read_speed);
+max_read_speed_fail:
+ device_remove_file(disk_to_dev(md->disk), &dev_attr_max_write_speed);
+max_write_speed_fail:
+#endif
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
force_ro_fail:
del_gendisk(md->disk);
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index d8a33ac55c91..253979b51c84 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -77,6 +77,14 @@ struct mmc_queue {
int (*err_check_fn) (struct mmc_card *, struct mmc_async_req *);
void (*packed_test_fn) (struct request_queue *, struct mmc_queue_req *);
void (*cmdq_shutdown)(struct mmc_queue *);
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+ atomic_t max_write_speed;
+ atomic_t max_read_speed;
+ atomic_t cache_size;
+ /* i/o tracking */
+ atomic_long_t cache_used;
+ unsigned long cache_jiffies;
+#endif
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index b76bb7a74049..3762f698e1ee 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -33,6 +33,7 @@
#include <linux/pm.h>
#include <linux/jiffies.h>
+#define CREATE_TRACE_POINTS
#include <trace/events/mmc.h>
#include <linux/mmc/card.h>
@@ -51,6 +52,11 @@
#include "sd_ops.h"
#include "sdio_ops.h"
+EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_erase_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_erase_end);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_rw_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_rw_end);
+
/* If the device is not responding */
#define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 895dfbb7fbf5..e7ebc3cb8eda 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -22,6 +22,7 @@
#include "core.h"
#include "bus.h"
+#include "host.h"
#include "sd.h"
#include "sdio_bus.h"
#include "mmc_ops.h"
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 0568769fd94a..be3ccf2536d9 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -3707,6 +3707,19 @@ static void sdhci_msm_init(struct sdhci_host *host)
msm_host->pdata->pm_qos_data.latency);
}
+static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
+ u32 max_curr = 0;
+
+ if (curr_slot && curr_slot->vdd_data)
+ max_curr = curr_slot->vdd_data->hpm_uA;
+
+ return max_curr;
+}
+
static struct sdhci_ops sdhci_msm_ops = {
.crypto_engine_cfg = sdhci_msm_ice_cfg,
.crypto_cfg_reset = sdhci_msm_ice_cfg_reset,
@@ -3732,6 +3745,7 @@ static struct sdhci_ops sdhci_msm_ops = {
.init = sdhci_msm_init,
.pre_req = sdhci_msm_pre_req,
.post_req = sdhci_msm_post_req,
+ .get_current_limit = sdhci_msm_get_current_limit,
};
static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 14ed8e775e6a..0542ba51445f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2337,9 +2337,10 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
switch (host->timing) {
/* HS400 tuning is done in HS200 mode */
case MMC_TIMING_MMC_HS400:
- err = -EINVAL;
- goto out_unlock;
-
+ if (!(mmc->caps2 & MMC_CAP2_HS400_POST_TUNING)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
case MMC_TIMING_MMC_HS200:
/*
* Periodic re-tuning for HS400 is not expected to be needed, so
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
index e4cf44b1e815..282aec4860eb 100644
--- a/drivers/net/ppp/Kconfig
+++ b/drivers/net/ppp/Kconfig
@@ -150,7 +150,7 @@ config PPPOL2TP
if TTY
config PPPOLAC
- bool "PPP on L2TP Access Concentrator"
+ tristate "PPP on L2TP Access Concentrator"
depends on PPP && INET
help
L2TP (RFC 2661) is a tunneling protocol widely used in virtual private
@@ -159,7 +159,7 @@ config PPPOLAC
fairly simple and suited for clients.
config PPPOPNS
- bool "PPP on PPTP Network Server"
+ tristate "PPP on PPTP Network Server"
depends on PPP && INET
help
PPTP (RFC 2637) is a tunneling protocol widely used in virtual private
diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c
index 1b8180cc1d4d..0184c96579e9 100644
--- a/drivers/net/ppp/pppolac.c
+++ b/drivers/net/ppp/pppolac.c
@@ -206,11 +206,10 @@ static void pppolac_xmit_core(struct work_struct *delivery_work)
while ((skb = skb_dequeue(&delivery_queue))) {
struct sock *sk_udp = skb->sk;
struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
- struct msghdr msg = {
- .msg_iov = (struct iovec *)&iov,
- .msg_iovlen = 1,
- .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
- };
+ struct msghdr msg = { 0 };
+
+ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1,
+ skb->len);
sk_udp->sk_prot->sendmsg(sk_udp, &msg, skb->len);
kfree_skb(skb);
}
diff --git a/drivers/net/ppp/pppopns.c b/drivers/net/ppp/pppopns.c
index 568bb45cfeac..d9e06039794e 100644
--- a/drivers/net/ppp/pppopns.c
+++ b/drivers/net/ppp/pppopns.c
@@ -189,11 +189,10 @@ static void pppopns_xmit_core(struct work_struct *delivery_work)
while ((skb = skb_dequeue(&delivery_queue))) {
struct sock *sk_raw = skb->sk;
struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
- struct msghdr msg = {
- .msg_iov = (struct iovec *)&iov,
- .msg_iovlen = 1,
- .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
- };
+ struct msghdr msg = { 0 };
+
+ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1,
+ skb->len);
sk_raw->sk_prot->sendmsg(sk_raw, &msg, skb->len);
kfree_skb(skb);
}
diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c
index 801c94859084..5d9329168699 100644
--- a/drivers/net/wireless/cnss/cnss_pci.c
+++ b/drivers/net/wireless/cnss/cnss_pci.c
@@ -2761,14 +2761,8 @@ static void cnss_crash_shutdown(const struct subsys_desc *subsys)
wdrv = penv->driver;
pdev = penv->pdev;
- penv->dump_data.version = CNSS_DUMP_FORMAT_VER;
- strlcpy(penv->dump_data.name, CNSS_DUMP_NAME,
- sizeof(penv->dump_data.name));
-
if (pdev && wdrv && wdrv->crash_shutdown)
wdrv->crash_shutdown(pdev);
-
- penv->dump_data.magic = CNSS_DUMP_MAGIC_VER_V2;
}
void cnss_device_self_recovery(void)
@@ -2829,6 +2823,28 @@ static struct notifier_block mnb = {
.notifier_call = cnss_modem_notifier_nb,
};
+static int cnss_init_dump_entry(void)
+{
+ struct msm_dump_entry dump_entry;
+
+ if (!penv)
+ return -ENODEV;
+
+ if (!penv->ramdump_dynamic)
+ return 0;
+
+ penv->dump_data.addr = penv->ramdump_phys;
+ penv->dump_data.len = penv->ramdump_size;
+ penv->dump_data.version = CNSS_DUMP_FORMAT_VER;
+ penv->dump_data.magic = CNSS_DUMP_MAGIC_VER_V2;
+ strlcpy(penv->dump_data.name, CNSS_DUMP_NAME,
+ sizeof(penv->dump_data.name));
+ dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
+ dump_entry.addr = virt_to_phys(&penv->dump_data);
+
+ return msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
+}
+
static int cnss_probe(struct platform_device *pdev)
{
int ret = 0;
@@ -2836,7 +2852,6 @@ static int cnss_probe(struct platform_device *pdev)
const char *client_desc;
struct device *dev = &pdev->dev;
u32 rc_num;
- struct msm_dump_entry dump_entry;
struct resource *res;
u32 ramdump_size = 0;
u32 smmu_iova_address[2];
@@ -2952,18 +2967,10 @@ static int cnss_probe(struct platform_device *pdev)
goto skip_ramdump;
}
- if (penv->ramdump_dynamic) {
- penv->dump_data.addr = penv->ramdump_phys;
- penv->dump_data.len = penv->ramdump_size;
- dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
- dump_entry.addr = virt_to_phys(&penv->dump_data);
-
- ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
- if (ret) {
- pr_err("%s: Dump table setup failed: %d\n",
- __func__, ret);
- goto err_ramdump_create;
- }
+ ret = cnss_init_dump_entry();
+ if (ret) {
+ pr_err("%s: Dump table setup failed: %d\n", __func__, ret);
+ goto err_ramdump_create;
}
penv->ramdump_dev = create_ramdump_device(penv->subsysdesc.name,
diff --git a/drivers/net/wireless/cnss/cnss_sdio.c b/drivers/net/wireless/cnss/cnss_sdio.c
index f773c5993d44..01b969ec627f 100644
--- a/drivers/net/wireless/cnss/cnss_sdio.c
+++ b/drivers/net/wireless/cnss/cnss_sdio.c
@@ -21,6 +21,8 @@
#include <linux/slab.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/io.h>
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/subsystem_notif.h>
@@ -46,7 +48,7 @@
/* Values for Dynamic Ramdump Collection*/
#define CNSS_DUMP_FORMAT_VER 0x11
#define CNSS_DUMP_MAGIC_VER_V2 0x42445953
-#define CNSS_DUMP_NAME "CNSS_WLAN"
+#define CNSS_DUMP_NAME "CNSS_WLAN_SDIO"
#define CNSS_PINCTRL_SLEEP_STATE "sleep"
#define CNSS_PINCTRL_ACTIVE_STATE "active"
@@ -60,7 +62,11 @@ struct cnss_sdio_regulator {
struct cnss_sdio_info {
struct cnss_sdio_wlan_driver *wdrv;
struct sdio_func *func;
+ struct mmc_card *card;
+ struct mmc_host *host;
+ struct device *dev;
const struct sdio_device_id *id;
+ bool skip_wlan_en_toggle;
};
struct cnss_ssr_info {
@@ -212,19 +218,103 @@ void cnss_sdio_remove_pm_qos(void)
}
EXPORT_SYMBOL(cnss_sdio_remove_pm_qos);
+static int cnss_put_hw_resources(struct device *dev)
+{
+ int ret = -EINVAL;
+ struct cnss_sdio_info *info;
+ struct mmc_host *host;
+
+ if (!cnss_pdata)
+ return ret;
+
+ info = &cnss_pdata->cnss_sdio_info;
+
+ if (info->skip_wlan_en_toggle) {
+ pr_debug("%s: HW doesn't support wlan toggling\n", __func__);
+ return 0;
+ }
+
+ host = info->host;
+
+ if (!host) {
+ pr_err("%s: MMC host is invalid\n", __func__);
+ return 0;
+ }
+
+ ret = mmc_power_save_host(host);
+ if (ret) {
+ pr_err("%s: Failed to Power Save Host err:%d\n", __func__,
+ ret);
+ return ret;
+ }
+
+ if (!cnss_pdata->regulator.wlan_vreg) {
+ pr_debug("%s: wlan_vreg regulator is invalid\n", __func__);
+ return ret;
+ }
+
+ regulator_disable(cnss_pdata->regulator.wlan_vreg);
+
+ return ret;
+}
+
+static int cnss_get_hw_resources(struct device *dev)
+{
+ int ret = -EINVAL;
+ struct mmc_host *host;
+ struct cnss_sdio_info *info;
+
+ if (!cnss_pdata)
+ return ret;
+
+ info = &cnss_pdata->cnss_sdio_info;
+
+ if (info->skip_wlan_en_toggle) {
+ pr_debug("%s: HW doesn't support wlan toggling\n", __func__);
+ return 0;
+ }
+
+ host = info->host;
+
+ ret = regulator_enable(cnss_pdata->regulator.wlan_vreg);
+ if (ret) {
+ pr_err("%s: Failed to enable wlan vreg\n", __func__);
+ return ret;
+ }
+
+ ret = mmc_power_restore_host(host);
+ if (ret) {
+ pr_err("%s: Failed to restore host power ret:%d\n", __func__,
+ ret);
+ regulator_disable(cnss_pdata->regulator.wlan_vreg);
+ }
+
+ return ret;
+}
+
static int cnss_sdio_shutdown(const struct subsys_desc *subsys, bool force_stop)
{
struct cnss_sdio_info *cnss_info;
struct cnss_sdio_wlan_driver *wdrv;
+ int ret = 0;
if (!cnss_pdata)
return -ENODEV;
cnss_info = &cnss_pdata->cnss_sdio_info;
wdrv = cnss_info->wdrv;
- if (wdrv && wdrv->shutdown)
- wdrv->shutdown(cnss_info->func);
- return 0;
+ if (!wdrv)
+ return 0;
+ if (!wdrv->shutdown)
+ return 0;
+
+ wdrv->shutdown(cnss_info->func);
+ ret = cnss_put_hw_resources(cnss_info->dev);
+
+ if (ret)
+ pr_err("%s: Failed to put hw resources\n", __func__);
+
+ return ret;
}
static int cnss_sdio_powerup(const struct subsys_desc *subsys)
@@ -238,11 +328,23 @@ static int cnss_sdio_powerup(const struct subsys_desc *subsys)
cnss_info = &cnss_pdata->cnss_sdio_info;
wdrv = cnss_info->wdrv;
- if (wdrv && wdrv->reinit) {
- ret = wdrv->reinit(cnss_info->func, cnss_info->id);
- if (ret)
- pr_err("%s: wlan reinit error=%d\n", __func__, ret);
+
+ if (!wdrv)
+ return 0;
+
+ if (!wdrv->reinit)
+ return 0;
+
+ ret = cnss_get_hw_resources(cnss_info->dev);
+ if (ret) {
+ pr_err("%s: Failed to power up HW\n", __func__);
+ return ret;
}
+
+ ret = wdrv->reinit(cnss_info->func, cnss_info->id);
+ if (ret)
+ pr_err("%s: wlan reinit error=%d\n", __func__, ret);
+
return ret;
}
@@ -551,25 +653,41 @@ int cnss_get_restart_level(void)
}
EXPORT_SYMBOL(cnss_get_restart_level);
-static int cnss_sdio_wlan_inserted(
- struct sdio_func *func,
- const struct sdio_device_id *id)
+static int cnss_sdio_wlan_inserted(struct sdio_func *func,
+ const struct sdio_device_id *id)
{
+ struct cnss_sdio_info *info;
+
if (!cnss_pdata)
return -ENODEV;
- cnss_pdata->cnss_sdio_info.func = func;
- cnss_pdata->cnss_sdio_info.id = id;
+ info = &cnss_pdata->cnss_sdio_info;
+
+ info->func = func;
+ info->card = func->card;
+ info->host = func->card->host;
+ info->id = id;
+ info->dev = &func->dev;
+
+ cnss_put_hw_resources(cnss_pdata->cnss_sdio_info.dev);
+
+ pr_info("%s: SDIO Device is Probed\n", __func__);
return 0;
}
static void cnss_sdio_wlan_removed(struct sdio_func *func)
{
+ struct cnss_sdio_info *info;
+
if (!cnss_pdata)
return;
- cnss_pdata->cnss_sdio_info.func = NULL;
- cnss_pdata->cnss_sdio_info.id = NULL;
+ info = &cnss_pdata->cnss_sdio_info;
+
+ info->host = NULL;
+ info->card = NULL;
+ info->func = NULL;
+ info->id = NULL;
}
#if defined(CONFIG_PM)
@@ -577,6 +695,8 @@ static int cnss_sdio_wlan_suspend(struct device *dev)
{
struct cnss_sdio_wlan_driver *wdrv;
struct cnss_sdio_bus_bandwidth *bus_bandwidth;
+ struct sdio_func *func;
+
int error = 0;
if (!cnss_pdata)
@@ -588,11 +708,13 @@ static int cnss_sdio_wlan_suspend(struct device *dev)
bus_bandwidth->bus_client, CNSS_BUS_WIDTH_NONE);
}
+ func = cnss_pdata->cnss_sdio_info.func;
wdrv = cnss_pdata->cnss_sdio_info.wdrv;
if (!wdrv) {
/* This can happen when no wlan driver loaded (no register to
* platform driver).
*/
+ sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
pr_debug("wlan driver not registered\n");
return 0;
}
@@ -692,29 +814,49 @@ EXPORT_SYMBOL(cnss_sdio_configure_spdt);
int cnss_sdio_wlan_register_driver(struct cnss_sdio_wlan_driver *driver)
{
struct cnss_sdio_info *cnss_info;
- int error = 0;
+ struct device *dev;
+ int error = -EINVAL;
if (!cnss_pdata)
return -ENODEV;
cnss_info = &cnss_pdata->cnss_sdio_info;
+ dev = cnss_info->dev;
+
if (cnss_info->wdrv)
pr_debug("%s:wdrv already exists wdrv(%p)\n", __func__,
cnss_info->wdrv);
+ cnss_info->wdrv = driver;
+
+ if (!driver)
+ return error;
+
+ error = cnss_get_hw_resources(dev);
+ if (error) {
+ pr_err("%s: Failed to restore power err:%d\n", __func__, error);
+ return error;
+ }
+
error = cnss_set_pinctrl_state(cnss_pdata, PINCTRL_ACTIVE);
if (error) {
pr_err("%s: Fail to set pinctrl to active state\n", __func__);
- return -EFAULT;
+ goto put_hw;
}
- cnss_info->wdrv = driver;
- if (driver->probe) {
- error = driver->probe(cnss_info->func, cnss_info->id);
- if (error)
- pr_err("%s: wlan probe failed error=%d\n", __func__,
- error);
+ error = driver->probe ? driver->probe(cnss_info->func,
+ cnss_info->id) : error;
+ if (error) {
+ pr_err("%s: wlan probe failed error=%d\n", __func__, error);
+ goto pinctrl_sleep;
}
+
+ return error;
+
+pinctrl_sleep:
+ cnss_set_pinctrl_state(cnss_pdata, PINCTRL_SLEEP);
+put_hw:
+ cnss_put_hw_resources(dev);
return error;
}
EXPORT_SYMBOL(cnss_sdio_wlan_register_driver);
@@ -746,10 +888,17 @@ cnss_sdio_wlan_unregister_driver(struct cnss_sdio_wlan_driver *driver)
pr_err("%s: driver not registered\n", __func__);
return;
}
- if (cnss_info->wdrv->remove)
- cnss_info->wdrv->remove(cnss_info->func);
+
+ if (!driver)
+ return;
+
+ if (!driver->remove)
+ return;
+
+ driver->remove(cnss_info->func);
cnss_info->wdrv = NULL;
cnss_set_pinctrl_state(cnss_pdata, PINCTRL_SLEEP);
+ cnss_put_hw_resources(cnss_info->dev);
}
EXPORT_SYMBOL(cnss_sdio_wlan_unregister_driver);
@@ -1051,6 +1200,8 @@ static int cnss_sdio_init_bus_bandwidth(void)
static int cnss_sdio_probe(struct platform_device *pdev)
{
int error;
+ struct device *dev = &pdev->dev;
+ struct cnss_sdio_info *info;
if (pdev->dev.of_node) {
cnss_pdata = devm_kzalloc(
@@ -1065,6 +1216,7 @@ static int cnss_sdio_probe(struct platform_device *pdev)
return -EINVAL;
cnss_pdata->pdev = pdev;
+ info = &cnss_pdata->cnss_sdio_info;
error = cnss_sdio_pinctrl_init(cnss_pdata, pdev);
if (error) {
@@ -1103,6 +1255,9 @@ static int cnss_sdio_probe(struct platform_device *pdev)
}
}
+ info->skip_wlan_en_toggle = of_property_read_bool(dev->of_node,
+ "qcom,skip-wlan-en-toggle");
+
error = cnss_sdio_wlan_init();
if (error) {
dev_err(&pdev->dev, "cnss wlan init failed error=%d\n", error);
@@ -1152,15 +1307,20 @@ err_wlan_enable_regulator:
static int cnss_sdio_remove(struct platform_device *pdev)
{
+ struct cnss_sdio_info *info;
+
if (!cnss_pdata)
return -ENODEV;
+ info = &cnss_pdata->cnss_sdio_info;
+
cnss_sdio_deinit_bus_bandwidth();
cnss_sdio_wlan_exit();
cnss_subsys_exit();
cnss_ramdump_cleanup();
+ cnss_put_hw_resources(info->dev);
cnss_sdio_release_resource();
-
+ cnss_pdata = NULL;
return 0;
}
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index e278aab1e530..3f186137e730 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -501,6 +501,7 @@ struct msm_pcie_clk_info_t {
struct clk *hdl;
char *name;
u32 freq;
+ bool config_mem;
bool required;
};
@@ -710,49 +711,49 @@ static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
static struct msm_pcie_clk_info_t
msm_pcie_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_CLK] = {
{
- {NULL, "pcie_0_ref_clk_src", 0, false},
- {NULL, "pcie_0_aux_clk", 1010000, true},
- {NULL, "pcie_0_cfg_ahb_clk", 0, true},
- {NULL, "pcie_0_mstr_axi_clk", 0, true},
- {NULL, "pcie_0_slv_axi_clk", 0, true},
- {NULL, "pcie_0_ldo", 0, true},
- {NULL, "pcie_0_smmu_clk", 0, false},
- {NULL, "pcie_phy_cfg_ahb_clk", 0, false},
- {NULL, "pcie_phy_aux_clk", 0, false},
- {NULL, "pcie_phy_reset", 0, false},
- {NULL, "pcie_phy_com_reset", 0, false},
- {NULL, "pcie_phy_nocsr_com_phy_reset", 0, false},
- {NULL, "pcie_0_phy_reset", 0, true}
+ {NULL, "pcie_0_ref_clk_src", 0, false, false},
+ {NULL, "pcie_0_aux_clk", 1010000, false, true},
+ {NULL, "pcie_0_cfg_ahb_clk", 0, false, true},
+ {NULL, "pcie_0_mstr_axi_clk", 0, true, true},
+ {NULL, "pcie_0_slv_axi_clk", 0, true, true},
+ {NULL, "pcie_0_ldo", 0, false, true},
+ {NULL, "pcie_0_smmu_clk", 0, false, false},
+ {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
+ {NULL, "pcie_phy_aux_clk", 0, false, false},
+ {NULL, "pcie_phy_reset", 0, false, false},
+ {NULL, "pcie_phy_com_reset", 0, false, false},
+ {NULL, "pcie_phy_nocsr_com_phy_reset", 0, false, false},
+ {NULL, "pcie_0_phy_reset", 0, false, true}
},
{
- {NULL, "pcie_1_ref_clk_src", 0, false},
- {NULL, "pcie_1_aux_clk", 1010000, true},
- {NULL, "pcie_1_cfg_ahb_clk", 0, true},
- {NULL, "pcie_1_mstr_axi_clk", 0, true},
- {NULL, "pcie_1_slv_axi_clk", 0, true},
- {NULL, "pcie_1_ldo", 0, true},
- {NULL, "pcie_1_smmu_clk", 0, false},
- {NULL, "pcie_phy_cfg_ahb_clk", 0, false},
- {NULL, "pcie_phy_aux_clk", 0, false},
- {NULL, "pcie_phy_reset", 0, false},
- {NULL, "pcie_phy_com_reset", 0, false},
- {NULL, "pcie_phy_nocsr_com_phy_reset", 0, false},
- {NULL, "pcie_1_phy_reset", 0, true}
+ {NULL, "pcie_1_ref_clk_src", 0, false, false},
+ {NULL, "pcie_1_aux_clk", 1010000, false, true},
+ {NULL, "pcie_1_cfg_ahb_clk", 0, false, true},
+ {NULL, "pcie_1_mstr_axi_clk", 0, true, true},
+ {NULL, "pcie_1_slv_axi_clk", 0, true, true},
+ {NULL, "pcie_1_ldo", 0, false, true},
+ {NULL, "pcie_1_smmu_clk", 0, false, false},
+ {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
+ {NULL, "pcie_phy_aux_clk", 0, false, false},
+ {NULL, "pcie_phy_reset", 0, false, false},
+ {NULL, "pcie_phy_com_reset", 0, false, false},
+ {NULL, "pcie_phy_nocsr_com_phy_reset", 0, false, false},
+ {NULL, "pcie_1_phy_reset", 0, false, true}
},
{
- {NULL, "pcie_2_ref_clk_src", 0, false},
- {NULL, "pcie_2_aux_clk", 1010000, true},
- {NULL, "pcie_2_cfg_ahb_clk", 0, true},
- {NULL, "pcie_2_mstr_axi_clk", 0, true},
- {NULL, "pcie_2_slv_axi_clk", 0, true},
- {NULL, "pcie_2_ldo", 0, true},
- {NULL, "pcie_2_smmu_clk", 0, false},
- {NULL, "pcie_phy_cfg_ahb_clk", 0, false},
- {NULL, "pcie_phy_aux_clk", 0, false},
- {NULL, "pcie_phy_reset", 0, false},
- {NULL, "pcie_phy_com_reset", 0, false},
- {NULL, "pcie_phy_nocsr_com_phy_reset", 0, false},
- {NULL, "pcie_2_phy_reset", 0, true}
+ {NULL, "pcie_2_ref_clk_src", 0, false, false},
+ {NULL, "pcie_2_aux_clk", 1010000, false, true},
+ {NULL, "pcie_2_cfg_ahb_clk", 0, false, true},
+ {NULL, "pcie_2_mstr_axi_clk", 0, true, true},
+ {NULL, "pcie_2_slv_axi_clk", 0, true, true},
+ {NULL, "pcie_2_ldo", 0, false, true},
+ {NULL, "pcie_2_smmu_clk", 0, false, false},
+ {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
+ {NULL, "pcie_phy_aux_clk", 0, false, false},
+ {NULL, "pcie_phy_reset", 0, false, false},
+ {NULL, "pcie_phy_com_reset", 0, false, false},
+ {NULL, "pcie_phy_nocsr_com_phy_reset", 0, false, false},
+ {NULL, "pcie_2_phy_reset", 0, false, true}
}
};
@@ -760,13 +761,13 @@ static struct msm_pcie_clk_info_t
static struct msm_pcie_clk_info_t
msm_pcie_pipe_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_CLK] = {
{
- {NULL, "pcie_0_pipe_clk", 125000000, true},
+ {NULL, "pcie_0_pipe_clk", 125000000, true, true},
},
{
- {NULL, "pcie_1_pipe_clk", 125000000, true},
+ {NULL, "pcie_1_pipe_clk", 125000000, true, true},
},
{
- {NULL, "pcie_2_pipe_clk", 125000000, true},
+ {NULL, "pcie_2_pipe_clk", 125000000, true, true},
}
};
@@ -861,6 +862,32 @@ static inline void msm_pcie_write_reg_field(void *base, u32 offset,
wmb();
}
+static inline void msm_pcie_config_clock_mem(struct msm_pcie_dev_t *dev,
+ struct msm_pcie_clk_info_t *info)
+{
+ int ret;
+
+ ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_MEM);
+ if (ret)
+ PCIE_ERR(dev,
+ "PCIe: RC%d can't configure core memory for clk %s: %d.\n",
+ dev->rc_idx, info->name, ret);
+ else
+ PCIE_DBG2(dev,
+ "PCIe: RC%d configured core memory for clk %s.\n",
+ dev->rc_idx, info->name);
+
+ ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_PERIPH);
+ if (ret)
+ PCIE_ERR(dev,
+ "PCIe: RC%d can't configure peripheral memory for clk %s: %d.\n",
+ dev->rc_idx, info->name, ret);
+ else
+ PCIE_DBG2(dev,
+ "PCIe: RC%d configured peripheral memory for clk %s.\n",
+ dev->rc_idx, info->name);
+}
+
#if defined(CONFIG_ARCH_FSM9010)
#define PCIE20_PARF_PHY_STTS 0x3c
#define PCIE2_PHY_RESET_CTRL 0x44
@@ -3450,6 +3477,9 @@ static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
if (i >= MSM_PCIE_MAX_CLK - (dev->common_phy ? 4 : 1))
clk_reset(info->hdl, CLK_RESET_DEASSERT);
+ if (info->config_mem)
+ msm_pcie_config_clock_mem(dev, info);
+
if (info->freq) {
rc = clk_set_rate(info->hdl, info->freq);
if (rc) {
@@ -3543,6 +3573,9 @@ static int msm_pcie_pipe_clk_init(struct msm_pcie_dev_t *dev)
clk_reset(info->hdl, CLK_RESET_DEASSERT);
+ if (info->config_mem)
+ msm_pcie_config_clock_mem(dev, info);
+
if (info->freq) {
rc = clk_set_rate(info->hdl, info->freq);
if (rc) {
@@ -3921,8 +3954,8 @@ static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
cnt = of_property_count_strings((&pdev->dev)->of_node,
"clock-names");
if (cnt > 0) {
- clkfreq = kzalloc(cnt * sizeof(*clkfreq),
- GFP_KERNEL);
+ clkfreq = kzalloc((MSM_PCIE_MAX_CLK + MSM_PCIE_MAX_PIPE_CLK) *
+ sizeof(*clkfreq), GFP_KERNEL);
if (!clkfreq) {
PCIE_ERR(dev, "PCIe: memory alloc failed for RC%d\n",
dev->rc_idx);
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h
index 35179c8be471..7acef104d5b7 100644
--- a/drivers/phy/phy-qcom-ufs-i.h
+++ b/drivers/phy/phy-qcom-ufs-i.h
@@ -152,6 +152,7 @@ struct ufs_qcom_phy {
* and writes to QSERDES_RX_SIGDET_CNTRL attribute
* @configure_lpm: pointer to a function that configures the phy
* for low power mode.
+ * @dbg_register_dump: pointer to a function that dumps phy registers for debug.
*/
struct ufs_qcom_phy_specific_ops {
int (*calibrate_phy)(struct ufs_qcom_phy *phy, bool is_rate_B);
@@ -161,6 +162,7 @@ struct ufs_qcom_phy_specific_ops {
void (*ctrl_rx_linecfg)(struct ufs_qcom_phy *phy, bool ctrl);
void (*power_control)(struct ufs_qcom_phy *phy, bool val);
int (*configure_lpm)(struct ufs_qcom_phy *phy, bool enable);
+ void (*dbg_register_dump)(struct ufs_qcom_phy *phy);
};
struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy);
@@ -184,5 +186,6 @@ int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
void ufs_qcom_phy_write_tbl(struct ufs_qcom_phy *ufs_qcom_phy,
struct ufs_qcom_phy_calibration *tbl,
int tbl_size);
-
+void ufs_qcom_phy_dump_regs(struct ufs_qcom_phy *phy,
+ int offset, int len, char *prefix);
#endif
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3.c b/drivers/phy/phy-qcom-ufs-qmp-v3.c
index 57c23f70eb63..a9ad3a6f87cc 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-v3.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3.c
@@ -194,6 +194,22 @@ out:
return err;
}
+static void ufs_qcom_phy_qmp_v3_dbg_register_dump(struct ufs_qcom_phy *phy)
+{
+ ufs_qcom_phy_dump_regs(phy, COM_BASE, COM_SIZE,
+ "PHY QSERDES COM Registers ");
+ ufs_qcom_phy_dump_regs(phy, PHY_BASE, PHY_SIZE,
+ "PHY Registers ");
+ ufs_qcom_phy_dump_regs(phy, RX_BASE(0), RX_SIZE,
+ "PHY RX0 Registers ");
+ ufs_qcom_phy_dump_regs(phy, TX_BASE(0), TX_SIZE,
+ "PHY TX0 Registers ");
+ ufs_qcom_phy_dump_regs(phy, RX_BASE(1), RX_SIZE,
+ "PHY RX1 Registers ");
+ ufs_qcom_phy_dump_regs(phy, TX_BASE(1), TX_SIZE,
+ "PHY TX1 Registers ");
+}
+
struct phy_ops ufs_qcom_phy_qmp_v3_phy_ops = {
.init = ufs_qcom_phy_qmp_v3_init,
.exit = ufs_qcom_phy_exit,
@@ -210,6 +226,7 @@ struct ufs_qcom_phy_specific_ops phy_v3_ops = {
.ctrl_rx_linecfg = ufs_qcom_phy_qmp_v3_ctrl_rx_linecfg,
.power_control = ufs_qcom_phy_qmp_v3_power_control,
.configure_lpm = ufs_qcom_phy_qmp_v3_configure_lpm,
+ .dbg_register_dump = ufs_qcom_phy_qmp_v3_dbg_register_dump,
};
static int ufs_qcom_phy_qmp_v3_probe(struct platform_device *pdev)
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3.h b/drivers/phy/phy-qcom-ufs-qmp-v3.h
index cda57855acb5..8b1e03eab639 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-v3.h
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3.h
@@ -18,10 +18,18 @@
#include "phy-qcom-ufs-i.h"
/* QCOM UFS PHY control registers */
-#define COM_OFF(x) (0x000 + x)
-#define PHY_OFF(x) (0xC00 + x)
-#define TX_OFF(n, x) (0x400 + (0x400 * n) + x)
-#define RX_OFF(n, x) (0x600 + (0x400 * n) + x)
+#define COM_BASE 0x000
+#define COM_SIZE 0x18C
+#define PHY_BASE 0xC00
+#define PHY_SIZE 0x1DC
+#define TX_BASE(n) (0x400 + (0x400 * n))
+#define TX_SIZE 0x128
+#define RX_BASE(n) (0x600 + (0x400 * n))
+#define RX_SIZE 0x1FC
+#define COM_OFF(x) (COM_BASE + x)
+#define PHY_OFF(x) (PHY_BASE + x)
+#define TX_OFF(n, x) (TX_BASE(n) + x)
+#define RX_OFF(n, x) (RX_BASE(n) + x)
/* UFS PHY QSERDES COM registers */
#define QSERDES_COM_ATB_SEL1 COM_OFF(0x00)
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index de32b75f4f57..b2c58430785a 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -788,3 +788,21 @@ int ufs_qcom_phy_configure_lpm(struct phy *generic_phy, bool enable)
return ret;
}
EXPORT_SYMBOL(ufs_qcom_phy_configure_lpm);
+
+void ufs_qcom_phy_dump_regs(struct ufs_qcom_phy *phy, int offset,
+ int len, char *prefix)
+{
+ print_hex_dump(KERN_ERR, prefix,
+ len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
+ 16, 4, phy->mmio + offset, len, false);
+}
+EXPORT_SYMBOL(ufs_qcom_phy_dump_regs);
+
+void ufs_qcom_phy_dbg_register_dump(struct phy *generic_phy)
+{
+ struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+
+ if (ufs_qcom_phy->phy_spec_ops->dbg_register_dump)
+ ufs_qcom_phy->phy_spec_ops->dbg_register_dump(ufs_qcom_phy);
+}
+EXPORT_SYMBOL(ufs_qcom_phy_dbg_register_dump);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 6c42ca14d2fd..9753fbb596cf 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinmux.h>
@@ -39,6 +40,8 @@
#define PMIC_GPIO_SUBTYPE_GPIOC_4CH 0x5
#define PMIC_GPIO_SUBTYPE_GPIO_8CH 0x9
#define PMIC_GPIO_SUBTYPE_GPIOC_8CH 0xd
+#define PMIC_GPIO_SUBTYPE_GPIO_LV 0x10
+#define PMIC_GPIO_SUBTYPE_GPIO_MV 0x11
#define PMIC_MPP_REG_RT_STS 0x10
#define PMIC_MPP_REG_RT_STS_VAL_MASK 0x1
@@ -47,8 +50,11 @@
#define PMIC_GPIO_REG_MODE_CTL 0x40
#define PMIC_GPIO_REG_DIG_VIN_CTL 0x41
#define PMIC_GPIO_REG_DIG_PULL_CTL 0x42
+#define PMIC_GPIO_REG_LV_MV_DIG_OUT_SOURCE_CTL 0x44
+#define PMIC_GPIO_REG_DIG_IN_CTL 0x43
#define PMIC_GPIO_REG_DIG_OUT_CTL 0x45
#define PMIC_GPIO_REG_EN_CTL 0x46
+#define PMIC_GPIO_REG_LV_MV_ANA_PASS_THRU_SEL 0x4A
/* PMIC_GPIO_REG_MODE_CTL */
#define PMIC_GPIO_REG_MODE_VALUE_SHIFT 0x1
@@ -57,6 +63,13 @@
#define PMIC_GPIO_REG_MODE_DIR_SHIFT 4
#define PMIC_GPIO_REG_MODE_DIR_MASK 0x7
+#define PMIC_GPIO_MODE_DIGITAL_INPUT 0
+#define PMIC_GPIO_MODE_DIGITAL_OUTPUT 1
+#define PMIC_GPIO_MODE_DIGITAL_INPUT_OUTPUT 2
+#define PMIC_GPIO_MODE_ANALOG_PASS_THRU 3
+
+#define PMIC_GPIO_REG_LV_MV_MODE_DIR_MASK 0x3
+
/* PMIC_GPIO_REG_DIG_VIN_CTL */
#define PMIC_GPIO_REG_VIN_SHIFT 0
#define PMIC_GPIO_REG_VIN_MASK 0x7
@@ -68,6 +81,16 @@
#define PMIC_GPIO_PULL_DOWN 4
#define PMIC_GPIO_PULL_DISABLE 5
+/* PMIC_GPIO_REG_LV_MV_DIG_OUT_SOURCE_CTL for LV/MV */
+#define PMIC_GPIO_LV_MV_OUTPUT_INVERT 0x80
+#define PMIC_GPIO_LV_MV_OUTPUT_INVERT_SHIFT 7
+#define PMIC_GPIO_LV_MV_OUTPUT_SOURCE_SEL_MASK 0xF
+
+/* PMIC_GPIO_REG_DIG_IN_CTL */
+#define PMIC_GPIO_LV_MV_DIG_IN_DTEST_EN 0x80
+#define PMIC_GPIO_LV_MV_DIG_IN_DTEST_SEL_MASK 0x7
+#define PMIC_GPIO_DIG_IN_DTEST_SEL_MASK 0xf
+
/* PMIC_GPIO_REG_DIG_OUT_CTL */
#define PMIC_GPIO_REG_OUT_STRENGTH_SHIFT 0
#define PMIC_GPIO_REG_OUT_STRENGTH_MASK 0x3
@@ -87,9 +110,29 @@
#define PMIC_GPIO_PHYSICAL_OFFSET 1
+/* PMIC_GPIO_REG_LV_MV_ANA_PASS_THRU_SEL */
+#define PMIC_GPIO_LV_MV_ANA_MUX_SEL_MASK 0x3
+
/* Qualcomm specific pin configurations */
#define PMIC_GPIO_CONF_PULL_UP (PIN_CONFIG_END + 1)
#define PMIC_GPIO_CONF_STRENGTH (PIN_CONFIG_END + 2)
+#define PMIC_GPIO_CONF_ATEST (PIN_CONFIG_END + 3)
+#define PMIC_GPIO_CONF_DTEST_BUFFER (PIN_CONFIG_END + 4)
+
+/* The index of each function in pmic_gpio_functions[] array */
+enum pmic_gpio_func_index {
+ PMIC_GPIO_FUNC_INDEX_NORMAL = 0x00,
+ PMIC_GPIO_FUNC_INDEX_PAIRED = 0x01,
+ PMIC_GPIO_FUNC_INDEX_FUNC1 = 0x02,
+ PMIC_GPIO_FUNC_INDEX_FUNC2 = 0x03,
+ PMIC_GPIO_FUNC_INDEX_FUNC3 = 0x04,
+ PMIC_GPIO_FUNC_INDEX_FUNC4 = 0x05,
+ PMIC_GPIO_FUNC_INDEX_DTEST1 = 0x06,
+ PMIC_GPIO_FUNC_INDEX_DTEST2 = 0x07,
+ PMIC_GPIO_FUNC_INDEX_DTEST3 = 0x08,
+ PMIC_GPIO_FUNC_INDEX_DTEST4 = 0x09,
+ PMIC_GPIO_FUNC_INDEX_ANALOG = 0x10,
+};
/**
* struct pmic_gpio_pad - keep current GPIO settings
@@ -101,12 +144,16 @@
* open-drain or open-source mode.
* @output_enabled: Set to true if GPIO output logic is enabled.
* @input_enabled: Set to true if GPIO input buffer logic is enabled.
+ * @lv_mv_type: Set to true if GPIO subtype is GPIO_LV(0x10) or GPIO_MV(0x11).
* @num_sources: Number of power-sources supported by this GPIO.
* @power_source: Current power-source used.
* @buffer_type: Push-pull, open-drain or open-source.
* @pullup: Constant current which flow trough GPIO output buffer.
* @strength: No, Low, Medium, High
* @function: See pmic_gpio_functions[]
+ * @atest: the ATEST selection for GPIO analog-pass-through mode
+ * @dtest_buffer: the DTEST buffer selection for digital input mode,
+ * the default value is INT_MAX if not used.
*/
struct pmic_gpio_pad {
u16 base;
@@ -116,12 +163,15 @@ struct pmic_gpio_pad {
bool have_buffer;
bool output_enabled;
bool input_enabled;
+ bool lv_mv_type;
unsigned int num_sources;
unsigned int power_source;
unsigned int buffer_type;
unsigned int pullup;
unsigned int strength;
unsigned int function;
+ unsigned int atest;
+ unsigned int dtest_buffer;
};
struct pmic_gpio_state {
@@ -134,12 +184,15 @@ struct pmic_gpio_state {
static const struct pinconf_generic_params pmic_gpio_bindings[] = {
{"qcom,pull-up-strength", PMIC_GPIO_CONF_PULL_UP, 0},
{"qcom,drive-strength", PMIC_GPIO_CONF_STRENGTH, 0},
+ {"qcom,atest", PMIC_GPIO_CONF_ATEST, 0},
+ {"qcom,dtest-buffer", PMIC_GPIO_CONF_DTEST_BUFFER, 0},
};
#ifdef CONFIG_DEBUG_FS
static const struct pin_config_item pmic_conf_items[ARRAY_SIZE(pmic_gpio_bindings)] = {
PCONFDUMP(PMIC_GPIO_CONF_PULL_UP, "pull up strength", NULL, true),
PCONFDUMP(PMIC_GPIO_CONF_STRENGTH, "drive-strength", NULL, true),
+ PCONFDUMP(PMIC_GPIO_CONF_ATEST, "atest", NULL, true),
};
#endif
@@ -151,11 +204,25 @@ static const char *const pmic_gpio_groups[] = {
"gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36",
};
+/*
+ * Treat LV/MV GPIO analog-pass-through mode as a function, add it
+ * to the end of the function list. Add placeholder for the reserved
+ * functions defined in LV/MV OUTPUT_SOURCE_SEL register.
+ */
static const char *const pmic_gpio_functions[] = {
- PMIC_GPIO_FUNC_NORMAL, PMIC_GPIO_FUNC_PAIRED,
- PMIC_GPIO_FUNC_FUNC1, PMIC_GPIO_FUNC_FUNC2,
- PMIC_GPIO_FUNC_DTEST1, PMIC_GPIO_FUNC_DTEST2,
- PMIC_GPIO_FUNC_DTEST3, PMIC_GPIO_FUNC_DTEST4,
+ [PMIC_GPIO_FUNC_INDEX_NORMAL] = PMIC_GPIO_FUNC_NORMAL,
+ [PMIC_GPIO_FUNC_INDEX_PAIRED] = PMIC_GPIO_FUNC_PAIRED,
+ [PMIC_GPIO_FUNC_INDEX_FUNC1] = PMIC_GPIO_FUNC_FUNC1,
+ [PMIC_GPIO_FUNC_INDEX_FUNC2] = PMIC_GPIO_FUNC_FUNC2,
+ [PMIC_GPIO_FUNC_INDEX_FUNC3] = PMIC_GPIO_FUNC_FUNC3,
+ [PMIC_GPIO_FUNC_INDEX_FUNC4] = PMIC_GPIO_FUNC_FUNC4,
+ [PMIC_GPIO_FUNC_INDEX_DTEST1] = PMIC_GPIO_FUNC_DTEST1,
+ [PMIC_GPIO_FUNC_INDEX_DTEST2] = PMIC_GPIO_FUNC_DTEST2,
+ [PMIC_GPIO_FUNC_INDEX_DTEST3] = PMIC_GPIO_FUNC_DTEST3,
+ [PMIC_GPIO_FUNC_INDEX_DTEST4] = PMIC_GPIO_FUNC_DTEST4,
+ "reserved-a", "reserved-b", "reserved-c",
+ "reserved-d", "reserved-e", "reserved-f",
+ [PMIC_GPIO_FUNC_INDEX_ANALOG] = PMIC_GPIO_FUNC_ANALOG,
};
static inline struct pmic_gpio_state *to_gpio_state(struct gpio_chip *chip)
@@ -252,21 +319,74 @@ static int pmic_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned function,
pad->function = function;
- val = 0;
+ val = PMIC_GPIO_MODE_DIGITAL_INPUT;
if (pad->output_enabled) {
if (pad->input_enabled)
- val = 2;
+ val = PMIC_GPIO_MODE_DIGITAL_INPUT_OUTPUT;
else
- val = 1;
+ val = PMIC_GPIO_MODE_DIGITAL_OUTPUT;
+ }
+
+ if (function > PMIC_GPIO_FUNC_INDEX_DTEST4 &&
+ function < PMIC_GPIO_FUNC_INDEX_ANALOG) {
+ pr_err("reserved function: %s hasn't been enabled\n",
+ pmic_gpio_functions[function]);
+ return -EINVAL;
}
- val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT;
- val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
- val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
+ if (pad->lv_mv_type) {
+ if (pad->function == PMIC_GPIO_FUNC_INDEX_ANALOG) {
+ val = PMIC_GPIO_MODE_ANALOG_PASS_THRU;
+ ret = pmic_gpio_write(state, pad,
+ PMIC_GPIO_REG_MODE_CTL, val);
+ if (ret < 0)
+ return ret;
- ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val);
- if (ret < 0)
- return ret;
+ ret = pmic_gpio_write(state, pad,
+ PMIC_GPIO_REG_LV_MV_ANA_PASS_THRU_SEL,
+ pad->atest);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = pmic_gpio_write(state, pad,
+ PMIC_GPIO_REG_MODE_CTL, val);
+ if (ret < 0)
+ return ret;
+
+ val = pad->out_value
+ << PMIC_GPIO_LV_MV_OUTPUT_INVERT_SHIFT;
+ val |= pad->function
+ & PMIC_GPIO_LV_MV_OUTPUT_SOURCE_SEL_MASK;
+ ret = pmic_gpio_write(state, pad,
+ PMIC_GPIO_REG_LV_MV_DIG_OUT_SOURCE_CTL, val);
+ if (ret < 0)
+ return ret;
+ }
+ } else {
+ /*
+ * GPIO not of LV/MV subtype doesn't have "func3", "func4"
+ * "analog" functions, and "dtest1" to "dtest4" functions
+ * have register value 2 bits lower than the function index
+ * in pmic_gpio_functions[].
+ */
+ if (function == PMIC_GPIO_FUNC_INDEX_FUNC3
+ || function == PMIC_GPIO_FUNC_INDEX_FUNC4
+ || function == PMIC_GPIO_FUNC_INDEX_ANALOG) {
+ return -EINVAL;
+ } else if (function >= PMIC_GPIO_FUNC_INDEX_DTEST1 &&
+ function <= PMIC_GPIO_FUNC_INDEX_DTEST4) {
+ pad->function -= (PMIC_GPIO_FUNC_INDEX_DTEST1 -
+ PMIC_GPIO_FUNC_INDEX_FUNC3);
+ }
+
+ val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT;
+ val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
+ val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
+
+ ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val);
+ if (ret < 0)
+ return ret;
+ }
val = pad->is_enabled << PMIC_GPIO_REG_MASTER_EN_SHIFT;
@@ -326,6 +446,12 @@ static int pmic_gpio_config_get(struct pinctrl_dev *pctldev,
case PMIC_GPIO_CONF_STRENGTH:
arg = pad->strength;
break;
+ case PMIC_GPIO_CONF_ATEST:
+ arg = pad->atest;
+ break;
+ case PMIC_GPIO_CONF_DTEST_BUFFER:
+ arg = pad->dtest_buffer;
+ break;
default:
return -EINVAL;
}
@@ -379,7 +505,7 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
pad->is_enabled = false;
break;
case PIN_CONFIG_POWER_SOURCE:
- if (arg > pad->num_sources)
+ if (arg >= pad->num_sources)
return -EINVAL;
pad->power_source = arg;
break;
@@ -400,6 +526,18 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
return -EINVAL;
pad->strength = arg;
break;
+ case PMIC_GPIO_CONF_ATEST:
+ if (arg > PMIC_GPIO_AOUT_ATEST4)
+ return -EINVAL;
+ pad->atest = arg;
+ break;
+ case PMIC_GPIO_CONF_DTEST_BUFFER:
+ if ((pad->lv_mv_type && arg > PMIC_GPIO_DIN_DTEST4)
+ || (!pad->lv_mv_type && arg >
+ PMIC_GPIO_DIG_IN_DTEST_SEL_MASK))
+ return -EINVAL;
+ pad->dtest_buffer = arg;
+ break;
default:
return -EINVAL;
}
@@ -424,19 +562,64 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (ret < 0)
return ret;
- val = 0;
+ val = PMIC_GPIO_MODE_DIGITAL_INPUT;
if (pad->output_enabled) {
if (pad->input_enabled)
- val = 2;
+ val = PMIC_GPIO_MODE_DIGITAL_INPUT_OUTPUT;
else
- val = 1;
+ val = PMIC_GPIO_MODE_DIGITAL_OUTPUT;
+ }
+
+ if (pad->dtest_buffer != INT_MAX) {
+ val = pad->dtest_buffer;
+ if (pad->lv_mv_type)
+ val |= PMIC_GPIO_LV_MV_DIG_IN_DTEST_EN;
+
+ ret = pmic_gpio_write(state, pad,
+ PMIC_GPIO_REG_DIG_IN_CTL, val);
+ if (ret < 0)
+ return ret;
}
- val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT;
- val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
- val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
+ if (pad->lv_mv_type) {
+ if (pad->function == PMIC_GPIO_FUNC_INDEX_ANALOG) {
+ val = PMIC_GPIO_MODE_ANALOG_PASS_THRU;
+ ret = pmic_gpio_write(state, pad,
+ PMIC_GPIO_REG_MODE_CTL, val);
+ if (ret < 0)
+ return ret;
+
+ ret = pmic_gpio_write(state, pad,
+ PMIC_GPIO_REG_LV_MV_ANA_PASS_THRU_SEL,
+ pad->atest);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = pmic_gpio_write(state, pad,
+ PMIC_GPIO_REG_MODE_CTL, val);
+ if (ret < 0)
+ return ret;
+
+ val = pad->out_value
+ << PMIC_GPIO_LV_MV_OUTPUT_INVERT_SHIFT;
+ val |= pad->function
+ & PMIC_GPIO_LV_MV_OUTPUT_SOURCE_SEL_MASK;
+ ret = pmic_gpio_write(state, pad,
+ PMIC_GPIO_REG_LV_MV_DIG_OUT_SOURCE_CTL, val);
+ if (ret < 0)
+ return ret;
+ }
+ } else {
+ val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT;
+ val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
+ val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
+
+ ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val);
+ if (ret < 0)
+ return ret;
+ }
- return pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val);
+ return ret;
}
static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
@@ -444,7 +627,7 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
{
struct pmic_gpio_state *state = pinctrl_dev_get_drvdata(pctldev);
struct pmic_gpio_pad *pad;
- int ret, val;
+ int ret, val, function;
static const char *const biases[] = {
"pull-up 30uA", "pull-up 1.5uA", "pull-up 31.5uA",
@@ -475,14 +658,28 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
pad->out_value = ret;
}
+ /*
+ * For GPIO not of LV/MV subtypes, the register value of
+ * the function mapping from "dtest1" to "dtest4" is 2 bits
+ * lower than the function index in pmic_gpio_functions[].
+ */
+ if (!pad->lv_mv_type &&
+ pad->function >= PMIC_GPIO_FUNC_INDEX_FUNC3) {
+ function = pad->function + (PMIC_GPIO_FUNC_INDEX_DTEST1
+ - PMIC_GPIO_FUNC_INDEX_FUNC3);
+ } else {
+ function = pad->function;
+ }
seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
- seq_printf(s, " %-7s", pmic_gpio_functions[pad->function]);
+ seq_printf(s, " %-7s", pmic_gpio_functions[function]);
seq_printf(s, " vin-%d", pad->power_source);
seq_printf(s, " %-27s", biases[pad->pullup]);
seq_printf(s, " %-10s", buffer_types[pad->buffer_type]);
seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
seq_printf(s, " %-7s", strengths[pad->strength]);
+ if (pad->dtest_buffer != INT_MAX)
+ seq_printf(s, " dtest buffer %d", pad->dtest_buffer);
}
}
@@ -622,40 +819,72 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state,
case PMIC_GPIO_SUBTYPE_GPIOC_8CH:
pad->num_sources = 8;
break;
+ case PMIC_GPIO_SUBTYPE_GPIO_LV:
+ pad->num_sources = 1;
+ pad->have_buffer = true;
+ pad->lv_mv_type = true;
+ break;
+ case PMIC_GPIO_SUBTYPE_GPIO_MV:
+ pad->num_sources = 2;
+ pad->have_buffer = true;
+ pad->lv_mv_type = true;
+ break;
default:
dev_err(state->dev, "unknown GPIO type 0x%x\n", subtype);
return -ENODEV;
}
- val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_MODE_CTL);
- if (val < 0)
- return val;
+ if (pad->lv_mv_type) {
+ val = pmic_gpio_read(state, pad,
+ PMIC_GPIO_REG_LV_MV_DIG_OUT_SOURCE_CTL);
+ if (val < 0)
+ return val;
+
+ pad->out_value = !!(val & PMIC_GPIO_LV_MV_OUTPUT_INVERT);
+ pad->function = val & PMIC_GPIO_LV_MV_OUTPUT_SOURCE_SEL_MASK;
- pad->out_value = val & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
+ val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_MODE_CTL);
+ if (val < 0)
+ return val;
+
+ dir = val & PMIC_GPIO_REG_LV_MV_MODE_DIR_MASK;
+ } else {
+ val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_MODE_CTL);
+ if (val < 0)
+ return val;
+
+ pad->out_value = val & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
+
+ dir = val >> PMIC_GPIO_REG_MODE_DIR_SHIFT;
+ dir &= PMIC_GPIO_REG_MODE_DIR_MASK;
+ pad->function = val >> PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
+ pad->function &= PMIC_GPIO_REG_MODE_FUNCTION_MASK;
+ }
- dir = val >> PMIC_GPIO_REG_MODE_DIR_SHIFT;
- dir &= PMIC_GPIO_REG_MODE_DIR_MASK;
switch (dir) {
- case 0:
+ case PMIC_GPIO_MODE_DIGITAL_INPUT:
pad->input_enabled = true;
pad->output_enabled = false;
break;
- case 1:
+ case PMIC_GPIO_MODE_DIGITAL_OUTPUT:
pad->input_enabled = false;
pad->output_enabled = true;
break;
- case 2:
+ case PMIC_GPIO_MODE_DIGITAL_INPUT_OUTPUT:
pad->input_enabled = true;
pad->output_enabled = true;
break;
+ case PMIC_GPIO_MODE_ANALOG_PASS_THRU:
+ if (pad->lv_mv_type)
+ pad->function = PMIC_GPIO_FUNC_INDEX_ANALOG;
+ else
+ return -ENODEV;
+ break;
default:
dev_err(state->dev, "unknown GPIO direction\n");
return -ENODEV;
}
- pad->function = val >> PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
- pad->function &= PMIC_GPIO_REG_MODE_FUNCTION_MASK;
-
val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_DIG_VIN_CTL);
if (val < 0)
return val;
@@ -670,6 +899,17 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state,
pad->pullup = val >> PMIC_GPIO_REG_PULL_SHIFT;
pad->pullup &= PMIC_GPIO_REG_PULL_MASK;
+ val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_DIG_IN_CTL);
+ if (val < 0)
+ return val;
+
+ if (pad->lv_mv_type && (val & PMIC_GPIO_LV_MV_DIG_IN_DTEST_EN))
+ pad->dtest_buffer = val & PMIC_GPIO_LV_MV_DIG_IN_DTEST_SEL_MASK;
+ else if (!pad->lv_mv_type)
+ pad->dtest_buffer = val & PMIC_GPIO_DIG_IN_DTEST_SEL_MASK;
+ else
+ pad->dtest_buffer = INT_MAX;
+
val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_DIG_OUT_CTL);
if (val < 0)
return val;
@@ -680,6 +920,13 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state,
pad->buffer_type = val >> PMIC_GPIO_REG_OUT_TYPE_SHIFT;
pad->buffer_type &= PMIC_GPIO_REG_OUT_TYPE_MASK;
+ if (pad->function == PMIC_GPIO_FUNC_INDEX_ANALOG) {
+ val = pmic_gpio_read(state, pad,
+ PMIC_GPIO_REG_LV_MV_ANA_PASS_THRU_SEL);
+ if (val < 0)
+ return val;
+ pad->atest = val & PMIC_GPIO_LV_MV_ANA_MUX_SEL_MASK;
+ }
/* Pin could be disabled with PIN_CONFIG_BIAS_HIGH_IMPEDANCE */
pad->is_enabled = true;
return 0;
@@ -693,18 +940,19 @@ static int pmic_gpio_probe(struct platform_device *pdev)
struct pmic_gpio_pad *pad, *pads;
struct pmic_gpio_state *state;
int ret, npins, i;
- u32 res[2];
+ u32 reg;
- ret = of_property_read_u32_array(dev->of_node, "reg", res, 2);
+ ret = of_property_read_u32(dev->of_node, "reg", &reg);
if (ret < 0) {
- dev_err(dev, "missing base address and/or range");
+ dev_err(dev, "missing base address");
return ret;
}
- npins = res[1] / PMIC_GPIO_ADDRESS_RANGE;
-
+ npins = platform_irq_count(pdev);
if (!npins)
return -EINVAL;
+ if (npins < 0)
+ return npins;
BUG_ON(npins > ARRAY_SIZE(pmic_gpio_groups));
@@ -752,7 +1000,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
if (pad->irq < 0)
return pad->irq;
- pad->base = res[0] + i * PMIC_GPIO_ADDRESS_RANGE;
+ pad->base = reg + i * PMIC_GPIO_ADDRESS_RANGE;
ret = pmic_gpio_populate(state, pad);
if (ret < 0)
@@ -805,6 +1053,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8916-gpio" }, /* 4 GPIO's */
{ .compatible = "qcom,pm8941-gpio" }, /* 36 GPIO's */
{ .compatible = "qcom,pma8084-gpio" }, /* 22 GPIO's */
+ { .compatible = "qcom,spmi-gpio" }, /* Generic */
{ },
};
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 9ce0e30e33e8..73547abc5cf5 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinmux.h>
@@ -87,6 +88,10 @@
#define PMIC_MPP_REG_AIN_ROUTE_SHIFT 0
#define PMIC_MPP_REG_AIN_ROUTE_MASK 0x7
+/* PMIC_MPP_REG_SINK_CTL */
+#define PMIC_MPP_REG_CURRENT_SINK_MASK 0x7
+#define MPP_CURRENT_SINK_MA_STEP_SIZE 5
+
#define PMIC_MPP_MODE_DIGITAL_INPUT 0
#define PMIC_MPP_MODE_DIGITAL_OUTPUT 1
#define PMIC_MPP_MODE_DIGITAL_BIDIR 2
@@ -106,6 +111,7 @@
#define PMIC_MPP_CONF_ANALOG_LEVEL (PIN_CONFIG_END + 2)
#define PMIC_MPP_CONF_DTEST_SELECTOR (PIN_CONFIG_END + 3)
#define PMIC_MPP_CONF_PAIRED (PIN_CONFIG_END + 4)
+#define PMIC_MPP_CONF_DTEST_BUFFER (PIN_CONFIG_END + 5)
/**
* struct pmic_mpp_pad - keep current MPP settings
@@ -124,6 +130,7 @@
* @function: See pmic_mpp_functions[].
* @drive_strength: Amount of current in sink mode
* @dtest: DTEST route selector
+ * @dtest_buffer: the DTEST buffer selection for digital input mode
*/
struct pmic_mpp_pad {
u16 base;
@@ -141,6 +148,7 @@ struct pmic_mpp_pad {
unsigned int function;
unsigned int drive_strength;
unsigned int dtest;
+ unsigned int dtest_buffer;
};
struct pmic_mpp_state {
@@ -155,6 +163,7 @@ static const struct pinconf_generic_params pmic_mpp_bindings[] = {
{"qcom,analog-level", PMIC_MPP_CONF_ANALOG_LEVEL, 0},
{"qcom,dtest", PMIC_MPP_CONF_DTEST_SELECTOR, 0},
{"qcom,paired", PMIC_MPP_CONF_PAIRED, 0},
+ {"qcom,dtest-buffer", PMIC_MPP_CONF_DTEST_BUFFER, 0},
};
#ifdef CONFIG_DEBUG_FS
@@ -163,6 +172,7 @@ static const struct pin_config_item pmic_conf_items[] = {
PCONFDUMP(PMIC_MPP_CONF_ANALOG_LEVEL, "analog level", NULL, true),
PCONFDUMP(PMIC_MPP_CONF_DTEST_SELECTOR, "dtest", NULL, true),
PCONFDUMP(PMIC_MPP_CONF_PAIRED, "paired", NULL, false),
+ PCONFDUMP(PMIC_MPP_CONF_DTEST_BUFFER, "dtest buffer", NULL, true),
};
#endif
@@ -392,6 +402,9 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
case PMIC_MPP_CONF_ANALOG_LEVEL:
arg = pad->aout_level;
break;
+ case PMIC_MPP_CONF_DTEST_BUFFER:
+ arg = pad->dtest_buffer;
+ break;
default:
return -EINVAL;
}
@@ -457,7 +470,7 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
pad->dtest = arg;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
- arg = pad->drive_strength;
+ pad->drive_strength = arg;
break;
case PMIC_MPP_CONF_AMUX_ROUTE:
if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4)
@@ -470,6 +483,15 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PMIC_MPP_CONF_PAIRED:
pad->paired = !!arg;
break;
+ case PMIC_MPP_CONF_DTEST_BUFFER:
+ /*
+ * 0xf is the max value which selects
+ * 4 dtest rails simultaneously
+ */
+ if (arg > 0xf)
+ return -EINVAL;
+ pad->dtest_buffer = arg;
+ break;
default:
return -EINVAL;
}
@@ -481,6 +503,11 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (ret < 0)
return ret;
+ val = pad->dtest_buffer;
+ ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_DIG_IN_CTL, val);
+ if (ret < 0)
+ return ret;
+
val = pad->pullup << PMIC_MPP_REG_PULL_SHIFT;
ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_DIG_PULL_CTL, val);
@@ -497,6 +524,16 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (ret < 0)
return ret;
+ val = 0;
+ if (pad->drive_strength >= MPP_CURRENT_SINK_MA_STEP_SIZE)
+ val = DIV_ROUND_UP(pad->drive_strength,
+ MPP_CURRENT_SINK_MA_STEP_SIZE) - 1;
+
+ val &= PMIC_MPP_REG_CURRENT_SINK_MASK;
+ ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_SINK_CTL, val);
+ if (ret < 0)
+ return ret;
+
ret = pmic_mpp_write_mode_ctl(state, pad);
if (ret < 0)
return ret;
@@ -544,6 +581,8 @@ static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, " dtest%d", pad->dtest);
if (pad->paired)
seq_puts(s, " paired");
+ if (pad->dtest_buffer)
+ seq_printf(s, " dtest buffer %d", pad->dtest_buffer);
}
}
@@ -741,7 +780,7 @@ static int pmic_mpp_populate(struct pmic_mpp_state *state,
sel &= PMIC_MPP_REG_MODE_FUNCTION_MASK;
if (sel >= PMIC_MPP_SELECTOR_DTEST_FIRST)
- pad->dtest = sel + 1;
+ pad->dtest = sel - PMIC_MPP_SELECTOR_DTEST_FIRST + 1;
else if (sel == PMIC_MPP_SELECTOR_PAIRED)
pad->paired = true;
@@ -752,6 +791,12 @@ static int pmic_mpp_populate(struct pmic_mpp_state *state,
pad->power_source = val >> PMIC_MPP_REG_VIN_SHIFT;
pad->power_source &= PMIC_MPP_REG_VIN_MASK;
+ val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_IN_CTL);
+ if (val < 0)
+ return val;
+
+ pad->dtest_buffer = val;
+
val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_PULL_CTL);
if (val < 0)
return val;
@@ -770,7 +815,8 @@ static int pmic_mpp_populate(struct pmic_mpp_state *state,
if (val < 0)
return val;
- pad->drive_strength = val;
+ val &= PMIC_MPP_REG_CURRENT_SINK_MASK;
+ pad->drive_strength = (val + 1) * MPP_CURRENT_SINK_MA_STEP_SIZE;
val = pmic_mpp_read(state, pad, PMIC_MPP_REG_AOUT_CTL);
if (val < 0)
@@ -795,17 +841,19 @@ static int pmic_mpp_probe(struct platform_device *pdev)
struct pmic_mpp_pad *pad, *pads;
struct pmic_mpp_state *state;
int ret, npins, i;
- u32 res[2];
+ u32 reg;
- ret = of_property_read_u32_array(dev->of_node, "reg", res, 2);
+ ret = of_property_read_u32(dev->of_node, "reg", &reg);
if (ret < 0) {
- dev_err(dev, "missing base address and/or range");
+ dev_err(dev, "missing base address");
return ret;
}
- npins = res[1] / PMIC_MPP_ADDRESS_RANGE;
+ npins = platform_irq_count(pdev);
if (!npins)
return -EINVAL;
+ if (npins < 0)
+ return npins;
BUG_ON(npins > ARRAY_SIZE(pmic_mpp_groups));
@@ -854,7 +902,7 @@ static int pmic_mpp_probe(struct platform_device *pdev)
if (pad->irq < 0)
return pad->irq;
- pad->base = res[0] + i * PMIC_MPP_ADDRESS_RANGE;
+ pad->base = reg + i * PMIC_MPP_ADDRESS_RANGE;
ret = pmic_mpp_populate(state, pad);
if (ret < 0)
@@ -908,6 +956,7 @@ static const struct of_device_id pmic_mpp_of_match[] = {
{ .compatible = "qcom,pm8916-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8941-mpp" }, /* 8 MPP's */
{ .compatible = "qcom,pma8084-mpp" }, /* 8 MPP's */
+ { .compatible = "qcom,spmi-mpp" }, /* Generic */
{ },
};
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index 19a3c3bc2f1f..e51176ec83d2 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -23,6 +23,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
@@ -650,11 +651,12 @@ static int pm8xxx_pin_populate(struct pm8xxx_gpio *pctrl,
}
static const struct of_device_id pm8xxx_gpio_of_match[] = {
- { .compatible = "qcom,pm8018-gpio", .data = (void *)6 },
- { .compatible = "qcom,pm8038-gpio", .data = (void *)12 },
- { .compatible = "qcom,pm8058-gpio", .data = (void *)40 },
- { .compatible = "qcom,pm8917-gpio", .data = (void *)38 },
- { .compatible = "qcom,pm8921-gpio", .data = (void *)44 },
+ { .compatible = "qcom,pm8018-gpio" },
+ { .compatible = "qcom,pm8038-gpio" },
+ { .compatible = "qcom,pm8058-gpio" },
+ { .compatible = "qcom,pm8917-gpio" },
+ { .compatible = "qcom,pm8921-gpio" },
+ { .compatible = "qcom,ssbi-gpio" },
{ },
};
MODULE_DEVICE_TABLE(of, pm8xxx_gpio_of_match);
@@ -665,14 +667,19 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
struct pinctrl_pin_desc *pins;
struct pm8xxx_gpio *pctrl;
int ret;
- int i;
+ int i, npins;
pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
return -ENOMEM;
pctrl->dev = &pdev->dev;
- pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev);
+ npins = platform_irq_count(pdev);
+ if (!npins)
+ return -EINVAL;
+ if (npins < 0)
+ return npins;
+ pctrl->npins = npins;
pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!pctrl->regmap) {
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index b868ef1766a0..e9f01de51e18 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -23,6 +23,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
@@ -741,11 +742,12 @@ static int pm8xxx_pin_populate(struct pm8xxx_mpp *pctrl,
}
static const struct of_device_id pm8xxx_mpp_of_match[] = {
- { .compatible = "qcom,pm8018-mpp", .data = (void *)6 },
- { .compatible = "qcom,pm8038-mpp", .data = (void *)6 },
- { .compatible = "qcom,pm8917-mpp", .data = (void *)10 },
- { .compatible = "qcom,pm8821-mpp", .data = (void *)4 },
- { .compatible = "qcom,pm8921-mpp", .data = (void *)12 },
+ { .compatible = "qcom,pm8018-mpp" },
+ { .compatible = "qcom,pm8038-mpp" },
+ { .compatible = "qcom,pm8917-mpp" },
+ { .compatible = "qcom,pm8821-mpp" },
+ { .compatible = "qcom,pm8921-mpp" },
+ { .compatible = "qcom,ssbi-mpp" },
{ },
};
MODULE_DEVICE_TABLE(of, pm8xxx_mpp_of_match);
@@ -756,14 +758,19 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev)
struct pinctrl_pin_desc *pins;
struct pm8xxx_mpp *pctrl;
int ret;
- int i;
+ int i, npins;
pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
return -ENOMEM;
pctrl->dev = &pdev->dev;
- pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev);
+ npins = platform_irq_count(pdev);
+ if (!npins)
+ return -EINVAL;
+ if (npins < 0)
+ return npins;
+ pctrl->npins = npins;
pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!pctrl->regmap) {
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
index 6addf14d7126..a02247d3e938 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -2435,6 +2435,7 @@ int ipa_mhi_init(struct ipa_mhi_init_params *params)
int res;
struct ipa_rm_create_params mhi_prod_params;
struct ipa_rm_create_params mhi_cons_params;
+ struct ipa_rm_perf_profile profile;
IPA_MHI_FUNC_ENTRY();
@@ -2506,6 +2507,14 @@ int ipa_mhi_init(struct ipa_mhi_init_params *params)
goto fail_create_rm_prod;
}
+ memset(&profile, 0, sizeof(profile));
+ profile.max_supported_bandwidth_mbps = 1000;
+ res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_PROD, &profile);
+ if (res) {
+ IPA_MHI_ERR("fail to set profile to MHI_PROD\n");
+ goto fail_perf_rm_prod;
+ }
+
/* Create CONS in IPA RM */
memset(&mhi_cons_params, 0, sizeof(mhi_cons_params));
mhi_cons_params.name = IPA_RM_RESOURCE_MHI_CONS;
@@ -2518,6 +2527,14 @@ int ipa_mhi_init(struct ipa_mhi_init_params *params)
goto fail_create_rm_cons;
}
+ memset(&profile, 0, sizeof(profile));
+ profile.max_supported_bandwidth_mbps = 1000;
+ res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_CONS, &profile);
+ if (res) {
+ IPA_MHI_ERR("fail to set profile to MHI_CONS\n");
+ goto fail_perf_rm_cons;
+ }
+
/* Initialize uC interface */
ipa_uc_mhi_init(ipa_mhi_uc_ready_cb,
ipa_mhi_uc_wakeup_request_cb);
@@ -2530,7 +2547,10 @@ int ipa_mhi_init(struct ipa_mhi_init_params *params)
IPA_MHI_FUNC_EXIT();
return 0;
+fail_perf_rm_cons:
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
fail_create_rm_cons:
+fail_perf_rm_prod:
ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
fail_create_rm_prod:
destroy_workqueue(ipa_mhi_client_ctx->wq);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 8e583203abda..838b78c1934d 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -2136,11 +2136,11 @@ int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
connect_fail:
ipa3_usb_release_xdci_channel(dl_out_params->clnt_hdl,
- dl_chan_params->teth_prot);
+ IPA3_USB_GET_TTYPE(dl_chan_params->teth_prot));
alloc_dl_chan_fail:
if (connect_params->teth_prot != IPA_USB_DIAG)
ipa3_usb_release_xdci_channel(ul_out_params->clnt_hdl,
- ul_chan_params->teth_prot);
+ IPA3_USB_GET_TTYPE(ul_chan_params->teth_prot));
bad_params:
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return result;
diff --git a/drivers/platform/msm/ipa/ipa_rm.c b/drivers/platform/msm/ipa/ipa_rm.c
index bf6352452283..209264d69b26 100644
--- a/drivers/platform/msm/ipa/ipa_rm.c
+++ b/drivers/platform/msm/ipa/ipa_rm.c
@@ -267,17 +267,18 @@ static int _ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
time = wait_for_completion_timeout(
&((struct ipa_rm_resource_cons *)consumer)->
request_consumer_in_progress,
- HZ);
+ HZ * 5);
result = 0;
if (!time) {
IPA_RM_ERR("TIMEOUT waiting for %s GRANT event.",
ipa_rm_resource_str(depends_on_name));
result = -ETIME;
- }
- IPA_RM_DBG("%s waited for %s GRANT %lu time.\n",
+ } else {
+ IPA_RM_DBG("%s waited for %s GRANT %lu time.\n",
ipa_rm_resource_str(resource_name),
ipa_rm_resource_str(depends_on_name),
time);
+ }
}
IPA_RM_DBG("EXIT with %d\n", result);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 66553205b520..9cb0b1f3c379 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -575,6 +575,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct ipa_ioc_v4_nat_del nat_del;
struct ipa_ioc_rm_dependency rm_depend;
size_t sz;
+ int pre_entry;
IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
@@ -623,11 +624,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
+ pre_entry =
+ ((struct ipa_ioc_nat_dma_cmd *)header)->entries;
pyld_sz =
sizeof(struct ipa_ioc_nat_dma_cmd) +
- ((struct ipa_ioc_nat_dma_cmd *)header)->entries *
- sizeof(struct ipa_ioc_nat_dma_one);
+ pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -638,7 +639,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_nat_dma_cmd *)param)->entries,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
retval = -EFAULT;
break;
@@ -663,10 +672,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_hdr *)header)->num_hdrs;
pyld_sz =
sizeof(struct ipa_ioc_add_hdr) +
- ((struct ipa_ioc_add_hdr *)header)->num_hdrs *
- sizeof(struct ipa_hdr_add);
+ pre_entry * sizeof(struct ipa_hdr_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -676,6 +686,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_hdr *)param)->num_hdrs,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_add_hdr((struct ipa_ioc_add_hdr *)param)) {
retval = -EFAULT;
break;
@@ -692,10 +711,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_hdr *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_hdr) +
- ((struct ipa_ioc_del_hdr *)header)->num_hdls *
- sizeof(struct ipa_hdr_del);
+ pre_entry * sizeof(struct ipa_hdr_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -705,6 +725,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_hdr *)param)->num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_del_hdr((struct ipa_ioc_del_hdr *)param)) {
retval = -EFAULT;
break;
@@ -721,10 +750,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_rt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_add_rt_rule) +
- ((struct ipa_ioc_add_rt_rule *)header)->num_rules *
- sizeof(struct ipa_rt_rule_add);
+ pre_entry * sizeof(struct ipa_rt_rule_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -734,6 +764,16 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_rt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
retval = -EFAULT;
break;
@@ -750,10 +790,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_mdfy_rt_rule) +
- ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules *
- sizeof(struct ipa_rt_rule_mdfy);
+ pre_entry * sizeof(struct ipa_rt_rule_mdfy);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -763,6 +804,16 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_mdfy_rt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
retval = -EFAULT;
break;
@@ -779,10 +830,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_rt_rule) +
- ((struct ipa_ioc_del_rt_rule *)header)->num_hdls *
- sizeof(struct ipa_rt_rule_del);
+ pre_entry * sizeof(struct ipa_rt_rule_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -792,6 +844,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
retval = -EFAULT;
break;
@@ -808,10 +869,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_flt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_add_flt_rule) +
- ((struct ipa_ioc_add_flt_rule *)header)->num_rules *
- sizeof(struct ipa_flt_rule_add);
+ pre_entry * sizeof(struct ipa_flt_rule_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -821,6 +883,16 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_flt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
retval = -EFAULT;
break;
@@ -837,10 +909,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_flt_rule) +
- ((struct ipa_ioc_del_flt_rule *)header)->num_hdls *
- sizeof(struct ipa_flt_rule_del);
+ pre_entry * sizeof(struct ipa_flt_rule_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -850,6 +923,16 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_flt_rule *)param)->
+ num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
retval = -EFAULT;
break;
@@ -866,10 +949,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_mdfy_flt_rule) +
- ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules *
- sizeof(struct ipa_flt_rule_mdfy);
+ pre_entry * sizeof(struct ipa_flt_rule_mdfy);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -879,6 +963,16 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_mdfy_flt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
retval = -EFAULT;
break;
@@ -992,9 +1086,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
- pyld_sz = sz + ((struct ipa_ioc_query_intf_tx_props *)
- header)->num_tx_props *
+ pre_entry =
+ ((struct ipa_ioc_query_intf_tx_props *)
+ header)->num_tx_props;
+ pyld_sz = sz + pre_entry *
sizeof(struct ipa_ioc_tx_intf_prop);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
@@ -1005,6 +1100,16 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
+ param)->num_tx_props
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_query_intf_tx_props *)
+ param)->num_tx_props, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa_query_intf_tx_props(
(struct ipa_ioc_query_intf_tx_props *)param)) {
retval = -1;
@@ -1027,9 +1132,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
- pyld_sz = sz + ((struct ipa_ioc_query_intf_rx_props *)
- header)->num_rx_props *
+ pre_entry =
+ ((struct ipa_ioc_query_intf_rx_props *)
+ header)->num_rx_props;
+ pyld_sz = sz + pre_entry *
sizeof(struct ipa_ioc_rx_intf_prop);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
@@ -1040,6 +1146,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
+ param)->num_rx_props != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_query_intf_rx_props *)
+ param)->num_rx_props, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa_query_intf_rx_props(
(struct ipa_ioc_query_intf_rx_props *)param)) {
retval = -1;
@@ -1062,9 +1177,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
- pyld_sz = sz + ((struct ipa_ioc_query_intf_ext_props *)
- header)->num_ext_props *
+ pre_entry =
+ ((struct ipa_ioc_query_intf_ext_props *)
+ header)->num_ext_props;
+ pyld_sz = sz + pre_entry *
sizeof(struct ipa_ioc_ext_intf_prop);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
@@ -1075,6 +1191,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
+ param)->num_ext_props != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_query_intf_ext_props *)
+ param)->num_ext_props, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa_query_intf_ext_props(
(struct ipa_ioc_query_intf_ext_props *)param)) {
retval = -1;
@@ -1091,8 +1216,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- pyld_sz = sizeof(struct ipa_msg_meta) +
+ pre_entry =
((struct ipa_msg_meta *)header)->msg_len;
+ pyld_sz = sizeof(struct ipa_msg_meta) +
+ pre_entry;
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -1102,6 +1229,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_msg_meta *)param)->msg_len
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_msg_meta *)param)->msg_len,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa_pull_msg((struct ipa_msg_meta *)param,
(char *)param + sizeof(struct ipa_msg_meta),
((struct ipa_msg_meta *)param)->msg_len) !=
@@ -1218,10 +1354,12 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_hdr_proc_ctx *)
+ header)->num_proc_ctxs;
pyld_sz =
sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
- ((struct ipa_ioc_add_hdr_proc_ctx *)header)->num_proc_ctxs *
- sizeof(struct ipa_hdr_proc_ctx_add);
+ pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -1231,6 +1369,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
+ param)->num_proc_ctxs != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_hdr_proc_ctx *)
+ param)->num_proc_ctxs, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_add_hdr_proc_ctx(
(struct ipa_ioc_add_hdr_proc_ctx *)param)) {
retval = -EFAULT;
@@ -1247,10 +1394,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
- ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls *
- sizeof(struct ipa_hdr_proc_ctx_del);
+ pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -1260,6 +1408,16 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
+ param)->num_hdls != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_hdr_proc_ctx *)param)->
+ num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_del_hdr_proc_ctx(
(struct ipa_ioc_del_hdr_proc_ctx *)param)) {
retval = -EFAULT;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index c0ac544fa271..695c8bc4cbc0 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -2397,7 +2397,7 @@ begin:
if (skb->len < IPA_PKT_STATUS_SIZE) {
WARN_ON(sys->prev_skb != NULL);
IPADBG("status straddles buffer\n");
- sys->prev_skb = skb;
+ sys->prev_skb = skb_copy(skb, GFP_KERNEL);
sys->len_partial = skb->len;
return rc;
}
@@ -2482,7 +2482,7 @@ begin:
!status->exception) {
WARN_ON(sys->prev_skb != NULL);
IPADBG("Ins header in next buffer\n");
- sys->prev_skb = skb;
+ sys->prev_skb = skb_copy(skb, GFP_KERNEL);
sys->len_partial = skb->len;
return rc;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
index a1c927c5ec0f..249de808ec5c 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -348,6 +348,11 @@ int ipa_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext)
return result;
}
+static void ipa2_send_msg_free(void *buff, u32 len, u32 type)
+{
+ kfree(buff);
+}
+
/**
* ipa2_send_msg() - Send "message" from kernel client to IPA driver
* @meta: [in] message meta-data
@@ -367,6 +372,7 @@ int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff,
ipa_msg_free_fn callback)
{
struct ipa_push_msg *msg;
+ void *data = NULL;
if (unlikely(!ipa_ctx)) {
IPAERR("IPA driver was not initialized\n");
@@ -392,8 +398,17 @@ int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff,
}
msg->meta = *meta;
- msg->buff = buff;
- msg->callback = callback;
+ if (meta->msg_len > 0 && buff) {
+ data = kmalloc(meta->msg_len, GFP_KERNEL);
+ if (data == NULL) {
+ IPAERR("fail to alloc data container\n");
+ kfree(msg);
+ return -ENOMEM;
+ }
+ memcpy(data, buff, meta->msg_len);
+ msg->buff = data;
+ msg->callback = ipa2_send_msg_free;
+ }
mutex_lock(&ipa_ctx->msg_lock);
list_add_tail(&msg->link, &ipa_ctx->msg_list);
@@ -401,6 +416,8 @@ int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff,
IPA_STATS_INC_CNT(ipa_ctx->stats.msg_w[meta->msg_type]);
wake_up(&ipa_ctx->msg_waitq);
+ if (buff)
+ callback(buff, meta->msg_len, meta->msg_type);
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 2420dd78b4c0..1be9a6745531 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -2394,18 +2394,20 @@ static void rmnet_ipa_get_stats_and_update(bool reset)
}
rc = ipa_qmi_get_data_stats(&req, resp);
+ if (rc) {
+ IPAWANERR("ipa_qmi_get_data_stats failed: %d\n", rc);
+ kfree(resp);
+ return;
+ }
- if (!rc) {
- memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
- msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
- msg_meta.msg_len =
- sizeof(struct ipa_get_data_stats_resp_msg_v01);
- rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
- if (rc) {
- IPAWANERR("ipa2_send_msg failed: %d\n", rc);
- kfree(resp);
- return;
- }
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
+ msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01);
+ rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+ if (rc) {
+ IPAWANERR("ipa2_send_msg failed: %d\n", rc);
+ kfree(resp);
+ return;
}
}
@@ -2454,18 +2456,20 @@ static void rmnet_ipa_get_network_stats_and_update(void)
req.mux_id_list[0] = ipa_rmnet_ctx.metered_mux_id;
rc = ipa_qmi_get_network_stats(&req, resp);
+ if (rc) {
+ IPAWANERR("ipa_qmi_get_network_stats failed %d\n", rc);
+ kfree(resp);
+ return;
+ }
- if (!rc) {
- memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
- msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
- msg_meta.msg_len =
- sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
- rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
- if (rc) {
- IPAWANERR("ipa2_send_msg failed: %d\n", rc);
- kfree(resp);
- return;
- }
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
+ msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
+ rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+ if (rc) {
+ IPAWANERR("ipa2_send_msg failed: %d\n", rc);
+ kfree(resp);
+ return;
}
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index c9120ce83da8..1df2bc6b902c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -596,6 +596,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct ipa_ioc_v4_nat_del nat_del;
struct ipa_ioc_rm_dependency rm_depend;
size_t sz;
+ int pre_entry;
IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
@@ -649,11 +650,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
+ pre_entry =
+ ((struct ipa_ioc_nat_dma_cmd *)header)->entries;
pyld_sz =
sizeof(struct ipa_ioc_nat_dma_cmd) +
- ((struct ipa_ioc_nat_dma_cmd *)header)->entries *
- sizeof(struct ipa_ioc_nat_dma_one);
+ pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -664,7 +665,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_nat_dma_cmd *)param)->entries,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
retval = -EFAULT;
break;
@@ -689,10 +698,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_hdr *)header)->num_hdrs;
pyld_sz =
sizeof(struct ipa_ioc_add_hdr) +
- ((struct ipa_ioc_add_hdr *)header)->num_hdrs *
- sizeof(struct ipa_hdr_add);
+ pre_entry * sizeof(struct ipa_hdr_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -702,6 +712,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_hdr *)param)->num_hdrs,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_add_hdr((struct ipa_ioc_add_hdr *)param)) {
retval = -EFAULT;
break;
@@ -718,10 +737,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_hdr *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_hdr) +
- ((struct ipa_ioc_del_hdr *)header)->num_hdls *
- sizeof(struct ipa_hdr_del);
+ pre_entry * sizeof(struct ipa_hdr_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -731,6 +751,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_hdr *)param)->num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_del_hdr((struct ipa_ioc_del_hdr *)param)) {
retval = -EFAULT;
break;
@@ -747,10 +776,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_rt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_add_rt_rule) +
- ((struct ipa_ioc_add_rt_rule *)header)->num_rules *
- sizeof(struct ipa_rt_rule_add);
+ pre_entry * sizeof(struct ipa_rt_rule_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -760,6 +790,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_rt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
retval = -EFAULT;
break;
@@ -776,10 +816,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_add_rt_rule_after) +
- ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules *
- sizeof(struct ipa_rt_rule_add);
+ pre_entry * sizeof(struct ipa_rt_rule_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -789,6 +830,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
+ num_rules != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_rt_rule_after *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_add_rt_rule_after(
(struct ipa_ioc_add_rt_rule_after *)param)) {
@@ -807,10 +858,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_mdfy_rt_rule) +
- ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules *
- sizeof(struct ipa_rt_rule_mdfy);
+ pre_entry * sizeof(struct ipa_rt_rule_mdfy);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -820,6 +872,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_mdfy_rt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
retval = -EFAULT;
break;
@@ -836,10 +898,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_rt_rule) +
- ((struct ipa_ioc_del_rt_rule *)header)->num_hdls *
- sizeof(struct ipa_rt_rule_del);
+ pre_entry * sizeof(struct ipa_rt_rule_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -849,6 +912,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
retval = -EFAULT;
break;
@@ -865,10 +937,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_flt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_add_flt_rule) +
- ((struct ipa_ioc_add_flt_rule *)header)->num_rules *
- sizeof(struct ipa_flt_rule_add);
+ pre_entry * sizeof(struct ipa_flt_rule_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -878,6 +951,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_flt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
retval = -EFAULT;
break;
@@ -895,10 +978,12 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_flt_rule_after *)header)->
+ num_rules;
pyld_sz =
sizeof(struct ipa_ioc_add_flt_rule_after) +
- ((struct ipa_ioc_add_flt_rule_after *)header)->num_rules *
- sizeof(struct ipa_flt_rule_add);
+ pre_entry * sizeof(struct ipa_flt_rule_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -908,6 +993,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
+ num_rules != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_flt_rule_after *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_add_flt_rule_after(
(struct ipa_ioc_add_flt_rule_after *)param)) {
retval = -EFAULT;
@@ -925,10 +1020,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_flt_rule) +
- ((struct ipa_ioc_del_flt_rule *)header)->num_hdls *
- sizeof(struct ipa_flt_rule_del);
+ pre_entry * sizeof(struct ipa_flt_rule_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -938,6 +1034,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_flt_rule *)param)->
+ num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
retval = -EFAULT;
break;
@@ -954,10 +1060,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_mdfy_flt_rule) +
- ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules *
- sizeof(struct ipa_flt_rule_mdfy);
+ pre_entry * sizeof(struct ipa_flt_rule_mdfy);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -967,6 +1074,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_mdfy_flt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
retval = -EFAULT;
break;
@@ -1080,9 +1197,10 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
- pyld_sz = sz + ((struct ipa_ioc_query_intf_tx_props *)
- header)->num_tx_props *
+ pre_entry =
+ ((struct ipa_ioc_query_intf_tx_props *)
+ header)->num_tx_props;
+ pyld_sz = sz + pre_entry *
sizeof(struct ipa_ioc_tx_intf_prop);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
@@ -1093,6 +1211,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
+ param)->num_tx_props
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_query_intf_tx_props *)
+ param)->num_tx_props, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_query_intf_tx_props(
(struct ipa_ioc_query_intf_tx_props *)param)) {
retval = -1;
@@ -1115,9 +1243,10 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
- pyld_sz = sz + ((struct ipa_ioc_query_intf_rx_props *)
- header)->num_rx_props *
+ pre_entry =
+ ((struct ipa_ioc_query_intf_rx_props *)
+ header)->num_rx_props;
+ pyld_sz = sz + pre_entry *
sizeof(struct ipa_ioc_rx_intf_prop);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
@@ -1128,6 +1257,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
+ param)->num_rx_props != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_query_intf_rx_props *)
+ param)->num_rx_props, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_query_intf_rx_props(
(struct ipa_ioc_query_intf_rx_props *)param)) {
retval = -1;
@@ -1150,9 +1288,10 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
- pyld_sz = sz + ((struct ipa_ioc_query_intf_ext_props *)
- header)->num_ext_props *
+ pre_entry =
+ ((struct ipa_ioc_query_intf_ext_props *)
+ header)->num_ext_props;
+ pyld_sz = sz + pre_entry *
sizeof(struct ipa_ioc_ext_intf_prop);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
@@ -1163,6 +1302,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
+ param)->num_ext_props != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_query_intf_ext_props *)
+ param)->num_ext_props, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_query_intf_ext_props(
(struct ipa_ioc_query_intf_ext_props *)param)) {
retval = -1;
@@ -1179,8 +1327,10 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- pyld_sz = sizeof(struct ipa_msg_meta) +
+ pre_entry =
((struct ipa_msg_meta *)header)->msg_len;
+ pyld_sz = sizeof(struct ipa_msg_meta) +
+ pre_entry;
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -1190,6 +1340,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_msg_meta *)param)->msg_len
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_msg_meta *)param)->msg_len,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_pull_msg((struct ipa_msg_meta *)param,
(char *)param + sizeof(struct ipa_msg_meta),
((struct ipa_msg_meta *)param)->msg_len) !=
@@ -1306,10 +1465,12 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_hdr_proc_ctx *)
+ header)->num_proc_ctxs;
pyld_sz =
sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
- ((struct ipa_ioc_add_hdr_proc_ctx *)header)->num_proc_ctxs *
- sizeof(struct ipa_hdr_proc_ctx_add);
+ pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -1319,6 +1480,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
+ param)->num_proc_ctxs != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_hdr_proc_ctx *)
+ param)->num_proc_ctxs, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_add_hdr_proc_ctx(
(struct ipa_ioc_add_hdr_proc_ctx *)param)) {
retval = -EFAULT;
@@ -1335,10 +1505,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
- ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls *
- sizeof(struct ipa_hdr_proc_ctx_del);
+ pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -1348,6 +1519,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
+ param)->num_hdls != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_hdr_proc_ctx *)param)->
+ num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_del_hdr_proc_ctx(
(struct ipa_ioc_del_hdr_proc_ctx *)param)) {
retval = -EFAULT;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 19eb1ee9c881..9e346f12a108 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -3836,6 +3836,7 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
union __packed gsi_channel_scratch ch_scratch;
struct ipa_gsi_ep_config *gsi_ep_info;
dma_addr_t dma_addr;
+ dma_addr_t evt_dma_addr;
int result;
if (!ep) {
@@ -3844,13 +3845,13 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
}
ep->gsi_evt_ring_hdl = ~0;
+ memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
/*
* allocate event ring for all interrupt-policy
* pipes and IPA consumers pipes
*/
if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
IPA_CLIENT_IS_CONS(ep->client)) {
- memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
gsi_evt_ring_props.intr = GSI_INTR_IRQ;
gsi_evt_ring_props.re_size =
@@ -3859,8 +3860,13 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
gsi_evt_ring_props.ring_len = IPA_GSI_EVT_RING_LEN;
gsi_evt_ring_props.ring_base_vaddr =
dma_alloc_coherent(ipa3_ctx->pdev, IPA_GSI_EVT_RING_LEN,
- &dma_addr, 0);
- gsi_evt_ring_props.ring_base_addr = dma_addr;
+ &evt_dma_addr, GFP_KERNEL);
+ if (!gsi_evt_ring_props.ring_base_vaddr) {
+ IPAERR("fail to dma alloc %u bytes\n",
+ IPA_GSI_EVT_RING_LEN);
+ return -ENOMEM;
+ }
+ gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
/* copy mem info */
ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
@@ -3895,7 +3901,7 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
if (!gsi_ep_info) {
IPAERR("Invalid ep number\n");
result = -EINVAL;
- goto fail_alloc_evt_ring;
+ goto fail_get_gsi_ep_info;
} else
gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
@@ -3914,7 +3920,12 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
gsi_channel_props.ring_len = 2 * in->desc_fifo_sz;
gsi_channel_props.ring_base_vaddr =
dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
- &dma_addr, 0);
+ &dma_addr, GFP_KERNEL);
+ if (!gsi_channel_props.ring_base_vaddr) {
+ IPAERR("fail to dma alloc %u bytes\n",
+ gsi_channel_props.ring_len);
+ goto fail_alloc_channel_ring;
+ }
gsi_channel_props.ring_base_addr = dma_addr;
/* copy mem info */
@@ -3950,7 +3961,7 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to write scratch %d\n", result);
- goto fail_start_channel;
+ goto fail_write_channel_scratch;
}
result = gsi_start_channel(ep->gsi_chan_hdl);
@@ -3962,17 +3973,25 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
return 0;
fail_start_channel:
+fail_write_channel_scratch:
if (gsi_dealloc_channel(ep->gsi_chan_hdl)
!= GSI_STATUS_SUCCESS) {
IPAERR("Failed to dealloc GSI chan.\n");
BUG();
}
fail_alloc_channel:
+ dma_free_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
+ gsi_channel_props.ring_base_vaddr, dma_addr);
+fail_alloc_channel_ring:
+fail_get_gsi_ep_info:
if (ep->gsi_evt_ring_hdl != ~0) {
gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
ep->gsi_evt_ring_hdl = ~0;
}
fail_alloc_evt_ring:
+ if (gsi_evt_ring_props.ring_base_vaddr)
+ dma_free_coherent(ipa3_ctx->pdev, IPA_GSI_EVT_RING_LEN,
+ gsi_evt_ring_props.ring_base_vaddr, evt_dma_addr);
IPAERR("Return with err: %d\n", result);
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index 32c5004dda95..22756c1fb168 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -358,6 +358,11 @@ int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext)
return result;
}
+static void ipa3_send_msg_free(void *buff, u32 len, u32 type)
+{
+ kfree(buff);
+}
+
/**
* ipa3_send_msg() - Send "message" from kernel client to IPA driver
* @meta: [in] message meta-data
@@ -377,6 +382,7 @@ int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
ipa_msg_free_fn callback)
{
struct ipa3_push_msg *msg;
+ void *data = NULL;
if (meta == NULL || (buff == NULL && callback != NULL) ||
(buff != NULL && callback == NULL)) {
@@ -397,8 +403,17 @@ int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
}
msg->meta = *meta;
- msg->buff = buff;
- msg->callback = callback;
+ if (meta->msg_len > 0 && buff) {
+ data = kmalloc(meta->msg_len, GFP_KERNEL);
+ if (data == NULL) {
+ IPAERR("fail to alloc data container\n");
+ kfree(msg);
+ return -ENOMEM;
+ }
+ memcpy(data, buff, meta->msg_len);
+ msg->buff = data;
+ msg->callback = ipa3_send_msg_free;
+ }
mutex_lock(&ipa3_ctx->msg_lock);
list_add_tail(&msg->link, &ipa3_ctx->msg_list);
@@ -406,6 +421,8 @@ int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
IPA_STATS_INC_CNT(ipa3_ctx->stats.msg_w[meta->msg_type]);
wake_up(&ipa3_ctx->msg_waitq);
+ if (buff)
+ callback(buff, meta->msg_len, meta->msg_type);
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index aebdaab3ac77..2cd08d77df6e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -2467,18 +2467,20 @@ static void rmnet_ipa_get_stats_and_update(void)
req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
rc = ipa3_qmi_get_data_stats(&req, resp);
+ if (rc) {
+ IPAWANERR("ipa3_qmi_get_data_stats failed: %d\n", rc);
+ kfree(resp);
+ return;
+ }
- if (!rc) {
- memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
- msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
- msg_meta.msg_len =
- sizeof(struct ipa_get_data_stats_resp_msg_v01);
- rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
- if (rc) {
- IPAWANERR("ipa_send_msg failed: %d\n", rc);
- kfree(resp);
- return;
- }
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
+ msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01);
+ rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+ if (rc) {
+ IPAWANERR("ipa_send_msg failed: %d\n", rc);
+ kfree(resp);
+ return;
}
}
@@ -2527,18 +2529,20 @@ static void rmnet_ipa_get_network_stats_and_update(void)
req.mux_id_list[0] = ipa3_rmnet_ctx.metered_mux_id;
rc = ipa3_qmi_get_network_stats(&req, resp);
+ if (rc) {
+ IPAWANERR("ipa3_qmi_get_network_stats failed: %d\n", rc);
+ kfree(resp);
+ return;
+ }
- if (!rc) {
- memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
- msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
- msg_meta.msg_len =
- sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
- rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
- if (rc) {
- IPAWANERR("ipa_send_msg failed: %d\n", rc);
- kfree(resp);
- return;
- }
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
+ msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
+ rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+ if (rc) {
+ IPAWANERR("ipa_send_msg failed: %d\n", rc);
+ kfree(resp);
+ return;
}
}
diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile
index e1686e608906..c20fd2b42487 100644
--- a/drivers/platform/msm/ipa/test/Makefile
+++ b/drivers/platform/msm/ipa/test/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o
-ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o
+ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o
diff --git a/drivers/platform/msm/ipa/test/ipa_test_dma.c b/drivers/platform/msm/ipa/test/ipa_test_dma.c
new file mode 100644
index 000000000000..af1b584b0d8c
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_test_dma.c
@@ -0,0 +1,931 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa.h>
+#include "../ipa_v3/ipa_i.h"
+#include "ipa_ut_framework.h"
+
+#define IPA_TEST_DMA_WQ_NAME_BUFF_SZ 64
+#define IPA_TEST_DMA_MT_TEST_NUM_WQ 500
+#define IPA_TEST_DMA_MEMCPY_BUFF_SIZE 16384
+#define IPA_TEST_DMA_MAX_PKT_SIZE 0xFF00
+#define IPA_DMA_TEST_LOOP_NUM 1000
+#define IPA_DMA_TEST_INT_LOOP_NUM 50
+#define IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM 128
+#define IPA_DMA_RUN_TEST_UNIT_IN_LOOP(test_unit, iters, rc, args...) \
+ do { \
+ int __i; \
+ for (__i = 0; __i < iters; __i++) { \
+ IPA_UT_LOG(#test_unit " START iter %d\n", __i); \
+ rc = test_unit(args); \
+ if (!rc) \
+ continue; \
+ IPA_UT_LOG(#test_unit " failed %d\n", rc); \
+ break; \
+ } \
+ } while (0)
+
+/**
+ * struct ipa_test_dma_async_user_data - user_data structure for async memcpy
+ * @src_mem: source memory buffer
+ * @dest_mem: destination memory buffer
+ * @call_serial_number: Id of the caller
+ * @copy_done: Completion object
+ */
+struct ipa_test_dma_async_user_data {
+ struct ipa_mem_buffer src_mem;
+ struct ipa_mem_buffer dest_mem;
+ int call_serial_number;
+ struct completion copy_done;
+};
+
+/**
+ * ipa_test_dma_setup() - Suite setup function
+ */
+static int ipa_test_dma_setup(void **ppriv)
+{
+ int rc;
+
+ IPA_UT_DBG("Start Setup\n");
+
+ if (!ipa3_ctx) {
+ IPA_UT_ERR("No IPA ctx\n");
+ return -EINVAL;
+ }
+
+ rc = ipa_dma_init();
+ if (rc)
+ IPA_UT_ERR("Fail to init ipa_dma - return code %d\n", rc);
+ else
+ IPA_UT_DBG("ipa_dma_init() Completed successfully!\n");
+
+ *ppriv = NULL;
+
+ return rc;
+}
+
+/**
+ * ipa_test_dma_teardown() - Suite teardown function
+ */
+static int ipa_test_dma_teardown(void *priv)
+{
+ IPA_UT_DBG("Start Teardown\n");
+ ipa_dma_destroy();
+ return 0;
+}
+
+static int ipa_test_dma_alloc_buffs(struct ipa_mem_buffer *src,
+ struct ipa_mem_buffer *dest,
+ int size)
+{
+ int i;
+ static int val = 1;
+ int rc;
+
+ val++;
+ src->size = size;
+ src->base = dma_alloc_coherent(ipa3_ctx->pdev, src->size,
+ &src->phys_base, GFP_KERNEL);
+ if (!src->base) {
+ IPA_UT_LOG("fail to alloc dma mem %d bytes\n", size);
+ IPA_UT_TEST_FAIL_REPORT("fail to alloc dma mem");
+ return -ENOMEM;
+ }
+
+ dest->size = size;
+ dest->base = dma_alloc_coherent(ipa3_ctx->pdev, dest->size,
+ &dest->phys_base, GFP_KERNEL);
+ if (!dest->base) {
+ IPA_UT_LOG("fail to alloc dma mem %d bytes\n", size);
+ IPA_UT_TEST_FAIL_REPORT("fail to alloc dma mem");
+ rc = -ENOMEM;
+ goto fail_alloc_dest;
+ }
+
+ memset(dest->base, 0, dest->size);
+ for (i = 0; i < src->size; i++)
+ memset(src->base + i, (val + i) & 0xFF, 1);
+ rc = memcmp(dest->base, src->base, dest->size);
+ if (rc == 0) {
+ IPA_UT_LOG("dest & src buffers are equal\n");
+ IPA_UT_TEST_FAIL_REPORT("dest & src buffers are equal");
+ rc = -EFAULT;
+ goto fail_buf_cmp;
+ }
+
+ return 0;
+
+fail_buf_cmp:
+ dma_free_coherent(ipa3_ctx->pdev, dest->size, dest->base,
+ dest->phys_base);
+fail_alloc_dest:
+ dma_free_coherent(ipa3_ctx->pdev, src->size, src->base,
+ src->phys_base);
+ return rc;
+}
+
+static void ipa_test_dma_destroy_buffs(struct ipa_mem_buffer *src,
+ struct ipa_mem_buffer *dest)
+{
+ dma_free_coherent(ipa3_ctx->pdev, src->size, src->base,
+ src->phys_base);
+ dma_free_coherent(ipa3_ctx->pdev, dest->size, dest->base,
+ dest->phys_base);
+}
+
+/**
+ * ipa_test_dma_memcpy_sync() - memcpy in sync mode
+ *
+ * @size: buffer size
+ * @expect_fail: test expects the memcpy to fail
+ *
+ * To be run during tests
+ * 1. Alloc src and dst buffers
+ * 2. sync memcpy src to dst via dma
+ * 3. compare src and dts if memcpy succeeded as expected
+ */
+static int ipa_test_dma_memcpy_sync(int size, bool expect_fail)
+{
+ int rc = 0;
+ int i;
+ struct ipa_mem_buffer src_mem;
+ struct ipa_mem_buffer dest_mem;
+ u8 *src;
+ u8 *dest;
+
+ rc = ipa_test_dma_alloc_buffs(&src_mem, &dest_mem, size);
+ if (rc) {
+ IPA_UT_LOG("fail to alloc buffers\n");
+ IPA_UT_TEST_FAIL_REPORT("fail to alloc buffers");
+ return rc;
+ }
+
+ rc = ipa_dma_sync_memcpy(dest_mem.phys_base, src_mem.phys_base, size);
+ if (!expect_fail && rc) {
+ IPA_UT_LOG("fail to sync memcpy - rc = %d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("sync memcpy failed");
+ goto free_buffs;
+ }
+ if (expect_fail && !rc) {
+ IPA_UT_LOG("sync memcpy succeeded while expected to fail\n");
+ IPA_UT_TEST_FAIL_REPORT(
+ "sync memcpy succeeded while expected to fail");
+ rc = -EFAULT;
+ goto free_buffs;
+ }
+
+ if (!rc) {
+ /* if memcpy succeeded, compare the buffers */
+ rc = memcmp(dest_mem.base, src_mem.base, size);
+ if (rc) {
+ IPA_UT_LOG("BAD memcpy - buffs are not equals\n");
+ IPA_UT_TEST_FAIL_REPORT(
+ "BAD memcpy - buffs are not equals");
+ src = src_mem.base;
+ dest = dest_mem.base;
+ for (i = 0; i < size; i++) {
+ if (*(src + i) != *(dest + i)) {
+ IPA_UT_LOG("byte: %d 0x%x != 0x%x\n",
+ i, *(src + i), *(dest + i));
+ }
+ }
+ }
+ } else {
+ /* if memcpy failed as expected, update the rc */
+ rc = 0;
+ }
+
+free_buffs:
+ ipa_test_dma_destroy_buffs(&src_mem, &dest_mem);
+ return rc;
+}
+
+static void ipa_test_dma_async_memcpy_cb(void *comp_obj)
+{
+ struct completion *xfer_done;
+
+ if (!comp_obj) {
+ IPA_UT_ERR("Invalid Input\n");
+ return;
+ }
+ xfer_done = (struct completion *)comp_obj;
+ complete(xfer_done);
+}
+
+static void ipa_test_dma_async_memcpy_cb_user_data(void *user_param)
+{
+ int rc;
+ int i;
+ u8 *src;
+ u8 *dest;
+ struct ipa_test_dma_async_user_data *udata =
+ (struct ipa_test_dma_async_user_data *)user_param;
+
+ if (!udata) {
+ IPA_UT_ERR("Invalid user param\n");
+ return;
+ }
+
+ rc = memcmp(udata->dest_mem.base, udata->src_mem.base,
+ udata->src_mem.size);
+ if (rc) {
+ IPA_UT_LOG("BAD memcpy - buffs are not equal sn=%d\n",
+ udata->call_serial_number);
+ IPA_UT_TEST_FAIL_REPORT(
+ "BAD memcpy - buffs are not equal");
+ src = udata->src_mem.base;
+ dest = udata->dest_mem.base;
+ for (i = 0; i < udata->src_mem.size; i++) {
+ if (*(src + i) != *(dest + i)) {
+ IPA_UT_ERR("byte: %d 0x%x != 0x%x\n", i,
+ *(src + i), *(dest + i));
+ }
+ }
+ return;
+ }
+
+ IPA_UT_LOG("Notify on async memcopy sn=%d\n",
+ udata->call_serial_number);
+ complete(&(udata->copy_done));
+}
+
+/**
+ * ipa_test_dma_memcpy_async() - memcpy in async mode
+ *
+ * @size: buffer size
+ * @expect_fail: test expected the memcpy to fail
+ *
+ * To be run during tests
+ * 1. Alloc src and dst buffers
+ * 2. async memcpy src to dst via dma and wait for completion
+ * 3. compare src and dts if memcpy succeeded as expected
+ */
+static int ipa_test_dma_memcpy_async(int size, bool expect_fail)
+{
+ int rc = 0;
+ int i;
+ struct ipa_mem_buffer src_mem;
+ struct ipa_mem_buffer dest_mem;
+ u8 *src;
+ u8 *dest;
+ struct completion xfer_done;
+
+ rc = ipa_test_dma_alloc_buffs(&src_mem, &dest_mem, size);
+ if (rc) {
+ IPA_UT_LOG("fail to alloc buffers\n");
+ IPA_UT_TEST_FAIL_REPORT("fail to alloc buffers");
+ return rc;
+ }
+
+ init_completion(&xfer_done);
+ rc = ipa_dma_async_memcpy(dest_mem.phys_base, src_mem.phys_base, size,
+ ipa_test_dma_async_memcpy_cb, &xfer_done);
+ if (!expect_fail && rc) {
+ IPA_UT_LOG("fail to initiate async memcpy - rc=%d\n",
+ rc);
+ IPA_UT_TEST_FAIL_REPORT("async memcpy initiate failed");
+ goto free_buffs;
+ }
+ if (expect_fail && !rc) {
+ IPA_UT_LOG("async memcpy succeeded while expected to fail\n");
+ IPA_UT_TEST_FAIL_REPORT(
+ "async memcpy succeeded while expected to fail");
+ rc = -EFAULT;
+ goto free_buffs;
+ }
+
+ if (!rc) {
+ /* if memcpy succeeded, compare the buffers */
+ wait_for_completion(&xfer_done);
+ rc = memcmp(dest_mem.base, src_mem.base, size);
+ if (rc) {
+ IPA_UT_LOG("BAD memcpy - buffs are not equals\n");
+ IPA_UT_TEST_FAIL_REPORT(
+ "BAD memcpy - buffs are not equals");
+ src = src_mem.base;
+ dest = dest_mem.base;
+ for (i = 0; i < size; i++) {
+ if (*(src + i) != *(dest + i)) {
+ IPA_UT_LOG("byte: %d 0x%x != 0x%x\n",
+ i, *(src + i), *(dest + i));
+ }
+ }
+ }
+ } else {
+ /* if memcpy failed as expected, update the rc */
+ rc = 0;
+ }
+
+free_buffs:
+ ipa_test_dma_destroy_buffs(&src_mem, &dest_mem);
+ return rc;
+}
+
+/**
+ * ipa_test_dma_sync_async_memcpy() - memcpy in sync and then async mode
+ *
+ * @size: buffer size
+ *
+ * To be run during tests
+ * 1. several sync memcopy in row
+ * 2. several async memcopy -
+ * back-to-back (next async try initiated after prev is completed)
+ */
+static int ipa_test_dma_sync_async_memcpy(int size)
+{
+ int rc;
+
+ IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_sync,
+ IPA_DMA_TEST_INT_LOOP_NUM, rc, size, false);
+ if (rc) {
+ IPA_UT_LOG("sync memcopy fail rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("sync memcopy fail");
+ return rc;
+ }
+
+ IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_async,
+ IPA_DMA_TEST_INT_LOOP_NUM, rc, size, false);
+ if (rc) {
+ IPA_UT_LOG("async memcopy fail rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("async memcopy fail");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: test control API - enable/disable dma
+ * 1. enable dma
+ * 2. disable dma
+ */
+static int ipa_test_dma_control_api(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: memcpy before dma enable
+ *
+ * 1. sync memcpy - should fail
+ * 2. async memcpy - should fail
+ */
+static int ipa_test_dma_memcpy_before_enable(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, true);
+ if (rc) {
+ IPA_UT_LOG("sync memcpy succeeded unexpectedly rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("sync memcpy succeeded unexpectedly");
+ return rc;
+ }
+
+ rc = ipa_test_dma_memcpy_async(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, true);
+ if (rc) {
+ IPA_UT_LOG("async memcpy succeeded unexpectedly rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("sync memcpy succeeded unexpectedly");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: Sync memory copy
+ *
+ * 1. dma enable
+ * 2. sync memcpy
+ * 3. dma disable
+ */
+static int ipa_test_dma_sync_memcpy(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false);
+ if (rc) {
+ IPA_UT_LOG("sync memcpy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("sync memcpy failed");
+ (void)ipa_dma_disable();
+ return rc;
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: Async memory copy
+ *
+ * 1. dma enable
+ * 2. async memcpy
+ * 3. dma disable
+ */
+static int ipa_test_dma_async_memcpy(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ rc = ipa_test_dma_memcpy_async(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false);
+ if (rc) {
+ IPA_UT_LOG("async memcpy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("async memcpy failed");
+ (void)ipa_dma_disable();
+ return rc;
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: Iteration of sync memory copy
+ *
+ * 1. dma enable
+ * 2. sync memcpy in loop - in row
+ * 3. dma disable
+ */
+static int ipa_test_dma_sync_memcpy_in_loop(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_sync,
+ IPA_DMA_TEST_LOOP_NUM, rc,
+ IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false);
+ if (rc) {
+ IPA_UT_LOG("Iterations of sync memcpy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("Iterations of sync memcpy failed");
+ (void)ipa_dma_disable();
+ return rc;
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: Iteration of async memory copy
+ *
+ * 1. dma enable
+ * 2. async memcpy in loop - back-to-back
+ * next async copy is initiated once previous one completed
+ * 3. dma disable
+ */
+static int ipa_test_dma_async_memcpy_in_loop(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_memcpy_async,
+ IPA_DMA_TEST_LOOP_NUM, rc,
+ IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false);
+ if (rc) {
+ IPA_UT_LOG("Iterations of async memcpy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("Iterations of async memcpy failed");
+ (void)ipa_dma_disable();
+ return rc;
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: Iteration of interleaved sync and async memory copy
+ *
+ * 1. dma enable
+ * 2. sync and async memcpy in loop - interleaved
+ * 3. dma disable
+ */
+static int ipa_test_dma_interleaved_sync_async_memcpy_in_loop(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ IPA_DMA_RUN_TEST_UNIT_IN_LOOP(ipa_test_dma_sync_async_memcpy,
+ IPA_DMA_TEST_INT_LOOP_NUM, rc,
+ IPA_TEST_DMA_MEMCPY_BUFF_SIZE);
+ if (rc) {
+ IPA_UT_LOG(
+ "Iterations of interleaved sync async memcpy failed rc=%d\n"
+ , rc);
+ IPA_UT_TEST_FAIL_REPORT(
+ "Iterations of interleaved sync async memcpy failed");
+ (void)ipa_dma_disable();
+ return rc;
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ return 0;
+}
+
+static atomic_t ipa_test_dma_mt_test_pass;
+
+static void ipa_test_dma_wrapper_test_one_sync(struct work_struct *work)
+{
+ int rc;
+
+ rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false);
+ if (rc) {
+ IPA_UT_LOG("fail sync memcpy from thread rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail sync memcpy from thread");
+ return;
+ }
+ atomic_inc(&ipa_test_dma_mt_test_pass);
+}
+
+static void ipa_test_dma_wrapper_test_one_async(struct work_struct *work)
+{
+ int rc;
+
+ rc = ipa_test_dma_memcpy_async(IPA_TEST_DMA_MEMCPY_BUFF_SIZE, false);
+ if (rc) {
+ IPA_UT_LOG("fail async memcpy from thread rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail async memcpy from thread");
+ return;
+ }
+ atomic_inc(&ipa_test_dma_mt_test_pass);
+}
+
+/**
+ * TEST: Multiple threads running sync and sync mem copy
+ *
+ * 1. dma enable
+ * 2. In-loop
+ * 2.1 create wq for sync memcpy
+ * 2.2 create wq for async memcpy
+ * 2.3 queue sync memcpy work
+ * 2.4 queue async memcoy work
+ * 3. In-loop
+ * 3.1 flush and destroy wq sync
+ * 3.2 flush and destroy wq async
+ * 3. dma disable
+ */
+static int ipa_test_dma_mt_sync_async(void *priv)
+{
+ int rc;
+ int i;
+ static struct workqueue_struct *wq_sync[IPA_TEST_DMA_MT_TEST_NUM_WQ];
+ static struct workqueue_struct *wq_async[IPA_TEST_DMA_MT_TEST_NUM_WQ];
+ static struct work_struct work_async[IPA_TEST_DMA_MT_TEST_NUM_WQ];
+ static struct work_struct work_sync[IPA_TEST_DMA_MT_TEST_NUM_WQ];
+ char buff[IPA_TEST_DMA_WQ_NAME_BUFF_SZ];
+
+ memset(wq_sync, 0, sizeof(wq_sync));
+ memset(wq_sync, 0, sizeof(wq_async));
+ memset(work_async, 0, sizeof(work_async));
+ memset(work_sync, 0, sizeof(work_sync));
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ atomic_set(&ipa_test_dma_mt_test_pass, 0);
+ for (i = 0; i < IPA_TEST_DMA_MT_TEST_NUM_WQ; i++) {
+ snprintf(buff, sizeof(buff), "ipa_test_dmaSwq%d", i);
+ wq_sync[i] = create_singlethread_workqueue(buff);
+ if (!wq_sync[i]) {
+ IPA_UT_ERR("failed to create sync wq#%d\n", i);
+ rc = -EFAULT;
+ goto fail_create_wq;
+ }
+ snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipa_test_dmaAwq%d", i);
+ wq_async[i] = create_singlethread_workqueue(buff);
+ if (!wq_async[i]) {
+ IPA_UT_ERR("failed to create async wq#%d\n", i);
+ rc = -EFAULT;
+ goto fail_create_wq;
+ }
+
+ INIT_WORK(&work_sync[i], ipa_test_dma_wrapper_test_one_sync);
+ queue_work(wq_sync[i], &work_sync[i]);
+ INIT_WORK(&work_async[i], ipa_test_dma_wrapper_test_one_async);
+ queue_work(wq_async[i], &work_async[i]);
+ }
+
+ for (i = 0; i < IPA_TEST_DMA_MT_TEST_NUM_WQ; i++) {
+ flush_workqueue(wq_sync[i]);
+ destroy_workqueue(wq_sync[i]);
+ flush_workqueue(wq_async[i]);
+ destroy_workqueue(wq_async[i]);
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ if ((2 * IPA_TEST_DMA_MT_TEST_NUM_WQ) !=
+ atomic_read(&ipa_test_dma_mt_test_pass)) {
+ IPA_UT_LOG(
+ "Multi-threaded sync/async memcopy failed passed=%d\n"
+ , atomic_read(&ipa_test_dma_mt_test_pass));
+ IPA_UT_TEST_FAIL_REPORT(
+ "Multi-threaded sync/async memcopy failed");
+ return -EFAULT;
+ }
+
+ return 0;
+
+fail_create_wq:
+ (void)ipa_dma_disable();
+ for (i = 0; i < IPA_TEST_DMA_MT_TEST_NUM_WQ; i++) {
+ if (wq_sync[i])
+ destroy_workqueue(wq_sync[i]);
+ if (wq_async[i])
+ destroy_workqueue(wq_async[i]);
+ }
+
+ return rc;
+}
+
+/**
+ * TEST: Several parallel async memory copy iterations
+ *
+ * 1. create several user_data structures - one per iteration
+ * 2. allocate buffs. Give slice for each iteration
+ * 3. iterations of async mem copy
+ * 4. wait for all to complete
+ * 5. dma disable
+ */
+static int ipa_test_dma_parallel_async_memcpy_in_loop(void *priv)
+{
+ int rc;
+ struct ipa_test_dma_async_user_data *udata;
+ struct ipa_mem_buffer all_src_mem;
+ struct ipa_mem_buffer all_dest_mem;
+ int i;
+ bool is_fail = false;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ udata = kzalloc(IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM *
+ sizeof(struct ipa_test_dma_async_user_data), GFP_KERNEL);
+ if (!udata) {
+ IPA_UT_ERR("fail allocate user_data array\n");
+ (void)ipa_dma_disable();
+ return -ENOMEM;
+ }
+
+ rc = ipa_test_dma_alloc_buffs(&all_src_mem, &all_dest_mem,
+ IPA_TEST_DMA_MEMCPY_BUFF_SIZE);
+ if (rc) {
+ IPA_UT_LOG("fail to alloc buffers\n");
+ IPA_UT_TEST_FAIL_REPORT("fail to alloc buffers");
+ kfree(udata);
+ (void)ipa_dma_disable();
+ return rc;
+ }
+
+ for (i = 0 ; i < IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM ; i++) {
+ udata[i].src_mem.size =
+ IPA_TEST_DMA_MEMCPY_BUFF_SIZE /
+ IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM;
+ udata[i].src_mem.base = all_src_mem.base + i *
+ (IPA_TEST_DMA_MEMCPY_BUFF_SIZE /
+ IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM);
+ udata[i].src_mem.phys_base = all_src_mem.phys_base + i *
+ (IPA_TEST_DMA_MEMCPY_BUFF_SIZE /
+ IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM);
+
+ udata[i].dest_mem.size =
+ (IPA_TEST_DMA_MEMCPY_BUFF_SIZE /
+ IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM);
+ udata[i].dest_mem.base = all_dest_mem.base + i *
+ (IPA_TEST_DMA_MEMCPY_BUFF_SIZE /
+ IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM);
+ udata[i].dest_mem.phys_base = all_dest_mem.phys_base + i *
+ (IPA_TEST_DMA_MEMCPY_BUFF_SIZE /
+ IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM);
+
+ udata[i].call_serial_number = i + 1;
+ init_completion(&(udata[i].copy_done));
+ rc = ipa_dma_async_memcpy(udata[i].dest_mem.phys_base,
+ udata[i].src_mem.phys_base,
+ (IPA_TEST_DMA_MEMCPY_BUFF_SIZE /
+ IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM),
+ ipa_test_dma_async_memcpy_cb_user_data, &udata[i]);
+ if (rc) {
+ IPA_UT_LOG("async memcpy initiation fail i=%d rc=%d\n",
+ i, rc);
+ is_fail = true;
+ }
+ }
+
+ for (i = 0; i < IPA_DMA_TEST_ASYNC_PARALLEL_LOOP_NUM ; i++)
+ wait_for_completion(&udata[i].copy_done);
+
+ ipa_test_dma_destroy_buffs(&all_src_mem, &all_dest_mem);
+ kfree(udata);
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ if (is_fail) {
+ IPA_UT_LOG("async memcopy failed\n");
+ IPA_UT_TEST_FAIL_REPORT("async memcopy failed");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * TEST: Sync memory copy
+ *
+ * 1. dma enable
+ * 2. sync memcpy with max packet size
+ * 3. dma disable
+ */
+static int ipa_test_dma_sync_memcpy_max_pkt_size(void *priv)
+{
+ int rc;
+
+ IPA_UT_LOG("Test Start\n");
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ IPA_UT_LOG("DMA enable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail enable dma");
+ return rc;
+ }
+
+ rc = ipa_test_dma_memcpy_sync(IPA_TEST_DMA_MAX_PKT_SIZE, false);
+ if (rc) {
+ IPA_UT_LOG("sync memcpy failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("sync memcpy failed");
+ (void)ipa_dma_disable();
+ return rc;
+ }
+
+ rc = ipa_dma_disable();
+ if (rc) {
+ IPA_UT_LOG("DMA disable failed rc=%d\n", rc);
+ IPA_UT_TEST_FAIL_REPORT("fail disable dma");
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Suite definition block */
+IPA_UT_DEFINE_SUITE_START(dma, "DMA for GSI",
+ ipa_test_dma_setup, ipa_test_dma_teardown)
+{
+ IPA_UT_ADD_TEST(control_api,
+ "Control API",
+ ipa_test_dma_control_api,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(memcpy_before_enable,
+ "Call memcpy before dma enable and expect it to fail",
+ ipa_test_dma_memcpy_before_enable,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(sync_memcpy,
+ "Sync memory copy",
+ ipa_test_dma_sync_memcpy,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(async_memcpy,
+ "Async memory copy",
+ ipa_test_dma_async_memcpy,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(sync_memcpy_in_loop,
+ "Several sync memory copy iterations",
+ ipa_test_dma_sync_memcpy_in_loop,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(async_memcpy_in_loop,
+ "Several async memory copy iterations",
+ ipa_test_dma_async_memcpy_in_loop,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(interleaved_sync_async_memcpy_in_loop,
+ "Several interleaved sync and async memory copy iterations",
+ ipa_test_dma_interleaved_sync_async_memcpy_in_loop,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(multi_threaded_multiple_sync_async_memcpy,
+ "Several multi-threaded sync and async memory copy iterations",
+ ipa_test_dma_mt_sync_async,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(parallel_async_memcpy_in_loop,
+ "Several parallel async memory copy iterations",
+ ipa_test_dma_parallel_async_memcpy_in_loop,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(sync_memcpy_max_pkt_size,
+ "Sync memory copy with max packet size",
+ ipa_test_dma_sync_memcpy_max_pkt_size,
+ true, IPA_HW_v3_0, IPA_HW_MAX),
+} IPA_UT_DEFINE_SUITE_END(dma);
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
index 944800f8e4be..bbb8b771ba0b 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
+++ b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
@@ -21,6 +21,7 @@
* No importance for order.
*/
IPA_UT_DECLARE_SUITE(mhi);
+IPA_UT_DECLARE_SUITE(dma);
IPA_UT_DECLARE_SUITE(example);
@@ -31,6 +32,7 @@ IPA_UT_DECLARE_SUITE(example);
IPA_UT_DEFINE_ALL_SUITES_START
{
IPA_UT_REGISTER_SUITE(mhi),
+ IPA_UT_REGISTER_SUITE(dma),
IPA_UT_REGISTER_SUITE(example),
} IPA_UT_DEFINE_ALL_SUITES_END;
diff --git a/drivers/platform/msm/sps/sps_bam.c b/drivers/platform/msm/sps/sps_bam.c
index f34242f29e2b..38a6474dd06f 100644
--- a/drivers/platform/msm/sps/sps_bam.c
+++ b/drivers/platform/msm/sps/sps_bam.c
@@ -1083,6 +1083,7 @@ int sps_bam_pipe_disconnect(struct sps_bam *dev, u32 pipe_index)
{
struct sps_pipe *pipe;
int result;
+ unsigned long flags;
if (pipe_index >= dev->props.num_pipes) {
SPS_ERR(dev, "sps:Invalid BAM %pa pipe: %d\n", BAM_ID(dev),
@@ -1094,8 +1095,10 @@ int sps_bam_pipe_disconnect(struct sps_bam *dev, u32 pipe_index)
pipe = dev->pipes[pipe_index];
if (BAM_PIPE_IS_ASSIGNED(pipe)) {
if ((dev->pipe_active_mask & (1UL << pipe_index))) {
+ spin_lock_irqsave(&dev->isr_lock, flags);
list_del(&pipe->list);
dev->pipe_active_mask &= ~(1UL << pipe_index);
+ spin_unlock_irqrestore(&dev->isr_lock, flags);
}
dev->pipe_remote_mask &= ~(1UL << pipe_index);
if (pipe->connect.options & SPS_O_NO_DISABLE)
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index b1c3441b285a..545a1e684b25 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -227,6 +227,8 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(usb_otg),
POWER_SUPPLY_ATTR(battery_charging_enabled),
POWER_SUPPLY_ATTR(charging_enabled),
+ POWER_SUPPLY_ATTR(step_charging_enabled),
+ POWER_SUPPLY_ATTR(step_charging_step),
POWER_SUPPLY_ATTR(pin_enabled),
POWER_SUPPLY_ATTR(input_suspend),
POWER_SUPPLY_ATTR(input_voltage_regulation),
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index 90a93064ca84..a7dd0bfc66ad 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -38,15 +38,15 @@ static struct smb_params v1_params = {
.fv = {
.name = "float voltage",
.reg = FLOAT_VOLTAGE_CFG_REG,
- .min_u = 2500000,
- .max_u = 5000000,
- .step_u = 10000,
+ .min_u = 3487500,
+ .max_u = 4920000,
+ .step_u = 7500,
},
.usb_icl = {
.name = "usb input current limit",
.reg = USBIN_CURRENT_LIMIT_CFG_REG,
.min_u = 0,
- .max_u = 6000000,
+ .max_u = 4800000,
.step_u = 25000,
},
.icl_stat = {
@@ -112,8 +112,90 @@ static struct smb_params v1_params = {
.max_u = 1575000,
.step_u = 25000,
},
+ .step_soc_threshold[0] = {
+ .name = "step charge soc threshold 1",
+ .reg = STEP_CHG_SOC_OR_BATT_V_TH1_REG,
+ .min_u = 0,
+ .max_u = 100,
+ .step_u = 1,
+ },
+ .step_soc_threshold[1] = {
+ .name = "step charge soc threshold 2",
+ .reg = STEP_CHG_SOC_OR_BATT_V_TH2_REG,
+ .min_u = 0,
+ .max_u = 100,
+ .step_u = 1,
+ },
+ .step_soc_threshold[2] = {
+ .name = "step charge soc threshold 3",
+ .reg = STEP_CHG_SOC_OR_BATT_V_TH3_REG,
+ .min_u = 0,
+ .max_u = 100,
+ .step_u = 1,
+ },
+ .step_soc_threshold[3] = {
+ .name = "step charge soc threshold 4",
+ .reg = STEP_CHG_SOC_OR_BATT_V_TH4_REG,
+ .min_u = 0,
+ .max_u = 100,
+ .step_u = 1,
+ },
+ .step_soc = {
+ .name = "step charge soc",
+ .reg = STEP_CHG_SOC_VBATT_V_REG,
+ .min_u = 0,
+ .max_u = 100,
+ .step_u = 1,
+ .set_proc = smblib_mapping_soc_from_field_value,
+ },
+ .step_cc_delta[0] = {
+ .name = "step charge current delta 1",
+ .reg = STEP_CHG_CURRENT_DELTA1_REG,
+ .min_u = 100000,
+ .max_u = 3200000,
+ .step_u = 100000,
+ .get_proc = smblib_mapping_cc_delta_to_field_value,
+ .set_proc = smblib_mapping_cc_delta_from_field_value,
+ },
+ .step_cc_delta[1] = {
+ .name = "step charge current delta 2",
+ .reg = STEP_CHG_CURRENT_DELTA2_REG,
+ .min_u = 100000,
+ .max_u = 3200000,
+ .step_u = 100000,
+ .get_proc = smblib_mapping_cc_delta_to_field_value,
+ .set_proc = smblib_mapping_cc_delta_from_field_value,
+ },
+ .step_cc_delta[2] = {
+ .name = "step charge current delta 3",
+ .reg = STEP_CHG_CURRENT_DELTA3_REG,
+ .min_u = 100000,
+ .max_u = 3200000,
+ .step_u = 100000,
+ .get_proc = smblib_mapping_cc_delta_to_field_value,
+ .set_proc = smblib_mapping_cc_delta_from_field_value,
+ },
+ .step_cc_delta[3] = {
+ .name = "step charge current delta 4",
+ .reg = STEP_CHG_CURRENT_DELTA4_REG,
+ .min_u = 100000,
+ .max_u = 3200000,
+ .step_u = 100000,
+ .get_proc = smblib_mapping_cc_delta_to_field_value,
+ .set_proc = smblib_mapping_cc_delta_from_field_value,
+ },
+ .step_cc_delta[4] = {
+ .name = "step charge current delta 5",
+ .reg = STEP_CHG_CURRENT_DELTA5_REG,
+ .min_u = 100000,
+ .max_u = 3200000,
+ .step_u = 100000,
+ .get_proc = smblib_mapping_cc_delta_to_field_value,
+ .set_proc = smblib_mapping_cc_delta_from_field_value,
+ },
};
+#define STEP_CHARGING_MAX_STEPS 5
struct smb_dt_props {
bool suspend_input;
int fcc_ua;
@@ -121,6 +203,8 @@ struct smb_dt_props {
int dc_icl_ua;
int fv_uv;
int wipower_max_uw;
+ u32 step_soc_threshold[STEP_CHARGING_MAX_STEPS - 1];
+ s32 step_cc_delta[STEP_CHARGING_MAX_STEPS];
};
struct smb2 {
@@ -150,6 +234,28 @@ static int smb2_parse_dt(struct smb2 *chip)
return -EINVAL;
}
+ chg->step_chg_enabled = true;
+
+ if (of_property_count_u32_elems(node, "qcom,step-soc-thresholds")
+ != STEP_CHARGING_MAX_STEPS - 1)
+ chg->step_chg_enabled = false;
+
+ rc = of_property_read_u32_array(node, "qcom,step-soc-thresholds",
+ chip->dt.step_soc_threshold,
+ STEP_CHARGING_MAX_STEPS - 1);
+ if (rc < 0)
+ chg->step_chg_enabled = false;
+
+ if (of_property_count_u32_elems(node, "qcom,step-current-deltas")
+ != STEP_CHARGING_MAX_STEPS)
+ chg->step_chg_enabled = false;
+
+ rc = of_property_read_u32_array(node, "qcom,step-current-deltas",
+ chip->dt.step_cc_delta,
+ STEP_CHARGING_MAX_STEPS);
+ if (rc < 0)
+ chg->step_chg_enabled = false;
+
chip->dt.suspend_input = of_property_read_bool(node,
"qcom,suspend-input");
@@ -173,10 +279,10 @@ static int smb2_parse_dt(struct smb2 *chip)
if (rc < 0)
chip->dt.dc_icl_ua = -EINVAL;
- rc = of_property_read_u32(node,
- "qcom,wipower-max-uw", &chip->dt.wipower_max_uw);
+ rc = of_property_read_u32(node, "qcom,wipower-max-uw",
+ &chip->dt.wipower_max_uw);
if (rc < 0)
- chip->dt.wipower_max_uw = SMB2_DEFAULT_WPWR_UW;
+ chip->dt.wipower_max_uw = -EINVAL;
if (of_find_property(node, "qcom,thermal-mitigation", &byte_len)) {
chg->thermal_mitigation = devm_kzalloc(chg->dev, byte_len,
@@ -218,6 +324,7 @@ static enum power_supply_property smb2_usb_props[] = {
POWER_SUPPLY_PROP_PD_ALLOWED,
POWER_SUPPLY_PROP_PD_ACTIVE,
POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
};
static int smb2_usb_get_prop(struct power_supply *psy,
@@ -277,6 +384,9 @@ static int smb2_usb_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
rc = smblib_get_prop_input_current_settled(chg, val);
break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW:
+ rc = smblib_get_prop_usb_current_now(chg, val);
+ break;
default:
pr_err("get prop %d is not supported\n", psp);
rc = -EINVAL;
@@ -476,42 +586,55 @@ static enum power_supply_property smb2_batt_props[] = {
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+ POWER_SUPPLY_PROP_CHARGER_TEMP,
+ POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
};
static int smb2_batt_get_prop(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
+ int rc;
struct smb_charger *chg = power_supply_get_drvdata(psy);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- smblib_get_prop_batt_status(chg, val);
+ rc = smblib_get_prop_batt_status(chg, val);
break;
case POWER_SUPPLY_PROP_HEALTH:
- smblib_get_prop_batt_health(chg, val);
+ rc = smblib_get_prop_batt_health(chg, val);
break;
case POWER_SUPPLY_PROP_PRESENT:
- smblib_get_prop_batt_present(chg, val);
+ rc = smblib_get_prop_batt_present(chg, val);
break;
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
- smblib_get_prop_input_suspend(chg, val);
+ rc = smblib_get_prop_input_suspend(chg, val);
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
- smblib_get_prop_batt_charge_type(chg, val);
+ rc = smblib_get_prop_batt_charge_type(chg, val);
break;
case POWER_SUPPLY_PROP_CAPACITY:
- smblib_get_prop_batt_capacity(chg, val);
+ rc = smblib_get_prop_batt_capacity(chg, val);
break;
case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
- smblib_get_prop_system_temp_level(chg, val);
+ rc = smblib_get_prop_system_temp_level(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP:
+ rc = smblib_get_prop_charger_temp(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+ rc = smblib_get_prop_charger_temp_max(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+ rc = smblib_get_prop_input_current_limited(chg, val);
break;
default:
pr_err("batt power supply prop %d not supported\n", psp);
return -EINVAL;
}
- return 0;
+ return rc;
}
static int smb2_batt_set_prop(struct power_supply *psy,
@@ -669,6 +792,73 @@ static int smb2_init_vconn_regulator(struct smb2 *chip)
/***************************
* HARDWARE INITIALIZATION *
***************************/
+static int smb2_config_step_charging(struct smb2 *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+ int i;
+
+ if (!chg->step_chg_enabled)
+ return rc;
+
+ for (i = 0; i < STEP_CHARGING_MAX_STEPS - 1; i++) {
+ rc = smblib_set_charge_param(chg,
+ &chg->param.step_soc_threshold[i],
+ chip->dt.step_soc_threshold[i]);
+ if (rc < 0) {
+ pr_err("Couldn't configure soc thresholds rc = %d\n",
+ rc);
+ goto err_out;
+ }
+ }
+
+ for (i = 0; i < STEP_CHARGING_MAX_STEPS; i++) {
+ rc = smblib_set_charge_param(chg, &chg->param.step_cc_delta[i],
+ chip->dt.step_cc_delta[i]);
+ if (rc < 0) {
+ pr_err("Couldn't configure cc delta rc = %d\n",
+ rc);
+ goto err_out;
+ }
+ }
+
+ rc = smblib_write(chg, STEP_CHG_UPDATE_REQUEST_TIMEOUT_CFG_REG,
+ STEP_CHG_UPDATE_REQUEST_TIMEOUT_40S);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure soc request timeout reg rc=%d\n",
+ rc);
+ goto err_out;
+ }
+
+ rc = smblib_write(chg, STEP_CHG_UPDATE_FAIL_TIMEOUT_CFG_REG,
+ STEP_CHG_UPDATE_FAIL_TIMEOUT_120S);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure soc fail timeout reg rc=%d\n",
+ rc);
+ goto err_out;
+ }
+
+ /*
+ * enable step charging, source soc, standard mode, go to final
+ * state in case of failure.
+ */
+ rc = smblib_write(chg, CHGR_STEP_CHG_MODE_CFG_REG,
+ STEP_CHARGING_ENABLE_BIT |
+ STEP_CHARGING_SOURCE_SELECT_BIT |
+ STEP_CHARGING_SOC_FAIL_OPTION_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure charger rc=%d\n", rc);
+ goto err_out;
+ }
+
+ return 0;
+err_out:
+ chg->step_chg_enabled = false;
+ return rc;
+}
+
static int smb2_config_wipower_input_power(struct smb2 *chip, int uw)
{
int rc;
@@ -676,6 +866,9 @@ static int smb2_config_wipower_input_power(struct smb2 *chip, int uw)
struct smb_charger *chg = &chip->chg;
s64 nw = (s64)uw * 1000;
+ if (uw < 0)
+ return 0;
+
ua = div_s64(nw, ZIN_ICL_PT_MAX_MV);
rc = smblib_set_charge_param(chg, &chg->param.dc_icl_pt_lv, ua);
if (rc < 0) {
@@ -833,6 +1026,14 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
+ /* configure step charging */
+ rc = smb2_config_step_charging(chip);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure step charging rc=%d\n",
+ rc);
+ return rc;
+ }
+
/* configure wipower watts */
rc = smb2_config_wipower_input_power(chip, chip->dt.wipower_max_uw);
if (rc < 0) {
@@ -840,6 +1041,16 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
+ /* configure PMI stat output to enable and disable parallel charging */
+ rc = smblib_masked_write(chg, STAT_CFG_REG,
+ STAT_PARALLEL_CFG_BIT | STAT_SW_OVERRIDE_CFG_BIT,
+ STAT_PARALLEL_CFG_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure signal for parallel rc=%d\n", rc);
+ return rc;
+ }
+
return rc;
}
@@ -856,6 +1067,8 @@ static int smb2_determine_initial_status(struct smb2 *chip)
smblib_handle_usb_source_change(0, &irq_data);
smblib_handle_chg_state_change(0, &irq_data);
smblib_handle_icl_change(0, &irq_data);
+ smblib_handle_step_chg_state_change(0, &irq_data);
+ smblib_handle_step_chg_soc_update_request(0, &irq_data);
return 0;
}
@@ -875,9 +1088,12 @@ static struct smb2_irq_info smb2_irqs[] = {
/* CHARGER IRQs */
{ "chg-error", smblib_handle_debug },
{ "chg-state-change", smblib_handle_chg_state_change, true },
- { "step-chg-state-change", smblib_handle_debug },
- { "step-chg-soc-update-fail", smblib_handle_debug },
- { "step-chg-soc-update-request", smblib_handle_debug },
+ { "step-chg-state-change", smblib_handle_step_chg_state_change,
+ true },
+ { "step-chg-soc-update-fail", smblib_handle_step_chg_soc_update_fail,
+ true },
+ { "step-chg-soc-update-request",
+ smblib_handle_step_chg_soc_update_request, true },
/* OTG IRQs */
{ "otg-fail", smblib_handle_debug },
{ "otg-overcurrent", smblib_handle_debug },
@@ -912,7 +1128,7 @@ static struct smb2_irq_info smb2_irqs[] = {
{ "wdog-bark", NULL },
{ "aicl-fail", smblib_handle_debug },
{ "aicl-done", smblib_handle_debug },
- { "high-duty-cycle", smblib_handle_debug },
+ { "high-duty-cycle", smblib_handle_high_duty_cycle, true },
{ "input-current-limiting", smblib_handle_debug },
{ "temperature-change", smblib_handle_debug },
{ "switcher-power-ok", smblib_handle_debug },
@@ -1023,7 +1239,11 @@ static int smb2_probe(struct platform_device *pdev)
return -EINVAL;
}
- smblib_init(chg);
+ rc = smblib_init(chg);
+ if (rc < 0) {
+ pr_err("Smblib_init failed rc=%d\n", rc);
+ goto cleanup;
+ }
rc = smb2_parse_dt(chip);
if (rc < 0) {
diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c
index 60534bdddd26..21b330127369 100644
--- a/drivers/power/qcom-charger/smb-lib.c
+++ b/drivers/power/qcom-charger/smb-lib.c
@@ -12,6 +12,7 @@
#include <linux/device.h>
#include <linux/regmap.h>
+#include <linux/iio/consumer.h>
#include <linux/power_supply.h>
#include <linux/regulator/driver.h>
#include <linux/irq.h>
@@ -82,12 +83,40 @@ unlock:
return rc;
}
+static int smblib_get_step_charging_adjustment(struct smb_charger *chg,
+ int *cc_offset)
+{
+ int step_state;
+ int rc;
+ u8 stat;
+
+ if (!chg->step_chg_enabled) {
+ *cc_offset = 0;
+ return 0;
+ }
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ step_state = (stat & STEP_CHARGING_STATUS_MASK) >> 3;
+ rc = smblib_get_charge_param(chg, &chg->param.step_cc_delta[step_state],
+ cc_offset);
+
+ return rc;
+}
+
static void smblib_fcc_split_ua(struct smb_charger *chg, int total_fcc,
int *master_ua, int *slave_ua)
{
int rc, cc_reduction_ua = 0;
+ int step_cc_delta;
int master_percent = min(max(*chg->pl.master_percent, 0), 100);
union power_supply_propval pval = {0, };
+ int effective_fcc;
/*
* if master_percent is 0, s/w will configure master's fcc to zero and
@@ -111,10 +140,21 @@ static void smblib_fcc_split_ua(struct smb_charger *chg, int total_fcc,
}
}
- total_fcc = max(0, total_fcc - cc_reduction_ua);
- *master_ua = (total_fcc * master_percent) / 100;
- *slave_ua = (total_fcc - *master_ua) * chg->pl.taper_percent / 100;
- *master_ua += cc_reduction_ua;
+ rc = smblib_get_step_charging_adjustment(chg, &step_cc_delta);
+ if (rc < 0)
+ step_cc_delta = 0;
+
+ /*
+ * During JEITA condition and with step_charging enabled, PMI will
+ * pick the lower of the two value: (FCC - JEITA current compensation)
+ * or (FCC + step_charging current delta)
+ */
+
+ effective_fcc = min(max(0, total_fcc - cc_reduction_ua),
+ max(0, total_fcc + step_cc_delta));
+ *master_ua = (effective_fcc * master_percent) / 100;
+ *slave_ua = (effective_fcc - *master_ua) * chg->pl.taper_percent / 100;
+ *master_ua = max(0, *master_ua + total_fcc - effective_fcc);
}
/********************
@@ -246,6 +286,28 @@ int smblib_set_charge_param(struct smb_charger *chg,
return rc;
}
+static int step_charge_soc_update(struct smb_charger *chg, int capacity)
+{
+ int rc = 0;
+
+ rc = smblib_set_charge_param(chg, &chg->param.step_soc, capacity);
+ if (rc < 0) {
+ dev_err(chg->dev, "Error in updating soc, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smblib_write(chg, STEP_CHG_SOC_VBATT_V_UPDATE_REG,
+ STEP_CHG_SOC_VBATT_V_UPDATE_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't set STEP_CHG_SOC_VBATT_V_UPDATE_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return rc;
+}
+
int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend)
{
int rc = 0;
@@ -394,6 +456,41 @@ static int smblib_register_notifier(struct smb_charger *chg)
return 0;
}
+int smblib_mapping_soc_from_field_value(struct smb_chg_param *param,
+ int val_u, u8 *val_raw)
+{
+ if (val_u > param->max_u || val_u < param->min_u)
+ return -EINVAL;
+
+ *val_raw = val_u << 1;
+
+ return 0;
+}
+
+int smblib_mapping_cc_delta_to_field_value(struct smb_chg_param *param,
+ u8 val_raw)
+{
+ int val_u = val_raw * param->step_u + param->min_u;
+
+ if (val_u > param->max_u)
+ val_u -= param->max_u * 2;
+
+ return val_u;
+}
+
+int smblib_mapping_cc_delta_from_field_value(struct smb_chg_param *param,
+ int val_u, u8 *val_raw)
+{
+ if (val_u > param->max_u || val_u < param->min_u - param->max_u)
+ return -EINVAL;
+
+ val_u += param->max_u * 2 - param->min_u;
+ val_u %= param->max_u * 2;
+ *val_raw = val_u / param->step_u;
+
+ return 0;
+}
+
/*********************
* VOTABLE CALLBACKS *
*********************/
@@ -891,6 +988,21 @@ int smblib_get_prop_system_temp_level(struct smb_charger *chg,
return 0;
}
+int smblib_get_prop_input_current_limited(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ u8 stat;
+ int rc;
+
+ rc = smblib_read(chg, AICL_STATUS_REG, &stat);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read AICL_STATUS rc=%d\n", rc);
+ return rc;
+ }
+ val->intval = (stat & SOFT_ILIMIT_BIT) || chg->is_hdc;
+ return 0;
+}
+
/***********************
* BATTERY PSY SETTERS *
***********************/
@@ -1076,12 +1188,20 @@ int smblib_get_prop_usb_online(struct smb_charger *chg,
int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
union power_supply_propval *val)
{
- if (chg->vbus_present)
- val->intval = MICRO_5V;
- else
- val->intval = 0;
+ int rc = 0;
- return 0;
+ rc = smblib_get_prop_usb_present(chg, val);
+ if (rc < 0 || !val->intval)
+ return rc;
+
+ if (!chg->iio.usbin_v_chan ||
+ PTR_ERR(chg->iio.usbin_v_chan) == -EPROBE_DEFER)
+ chg->iio.usbin_v_chan = iio_channel_get(chg->dev, "usbin_v");
+
+ if (IS_ERR(chg->iio.usbin_v_chan))
+ return PTR_ERR(chg->iio.usbin_v_chan);
+
+ return iio_read_channel_processed(chg->iio.usbin_v_chan, &val->intval);
}
int smblib_get_prop_usb_current_max(struct smb_charger *chg,
@@ -1091,6 +1211,59 @@ int smblib_get_prop_usb_current_max(struct smb_charger *chg,
return 0;
}
+int smblib_get_prop_usb_current_now(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc = 0;
+
+ rc = smblib_get_prop_usb_present(chg, val);
+ if (rc < 0 || !val->intval)
+ return rc;
+
+ if (!chg->iio.usbin_i_chan ||
+ PTR_ERR(chg->iio.usbin_i_chan) == -EPROBE_DEFER)
+ chg->iio.usbin_i_chan = iio_channel_get(chg->dev, "usbin_i");
+
+ if (IS_ERR(chg->iio.usbin_i_chan))
+ return PTR_ERR(chg->iio.usbin_i_chan);
+
+ return iio_read_channel_processed(chg->iio.usbin_i_chan, &val->intval);
+}
+
+int smblib_get_prop_charger_temp(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+
+ if (!chg->iio.temp_chan ||
+ PTR_ERR(chg->iio.temp_chan) == -EPROBE_DEFER)
+ chg->iio.temp_chan = iio_channel_get(chg->dev, "charger_temp");
+
+ if (IS_ERR(chg->iio.temp_chan))
+ return PTR_ERR(chg->iio.temp_chan);
+
+ rc = iio_read_channel_processed(chg->iio.temp_chan, &val->intval);
+ val->intval /= 100;
+ return rc;
+}
+
+int smblib_get_prop_charger_temp_max(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+
+ if (!chg->iio.temp_max_chan ||
+ PTR_ERR(chg->iio.temp_max_chan) == -EPROBE_DEFER)
+ chg->iio.temp_max_chan = iio_channel_get(chg->dev,
+ "charger_temp_max");
+ if (IS_ERR(chg->iio.temp_max_chan))
+ return PTR_ERR(chg->iio.temp_max_chan);
+
+ rc = iio_read_channel_processed(chg->iio.temp_max_chan, &val->intval);
+ val->intval /= 100;
+ return rc;
+}
+
int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
union power_supply_propval *val)
{
@@ -1427,6 +1600,57 @@ irqreturn_t smblib_handle_chg_state_change(int irq, void *data)
return IRQ_HANDLED;
}
+irqreturn_t smblib_handle_step_chg_state_change(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+ if (chg->step_chg_enabled)
+ rerun_election(chg->fcc_votable);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_step_chg_soc_update_fail(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+ if (chg->step_chg_enabled)
+ rerun_election(chg->fcc_votable);
+
+ return IRQ_HANDLED;
+}
+
+#define STEP_SOC_REQ_MS 3000
+irqreturn_t smblib_handle_step_chg_soc_update_request(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ int rc;
+ union power_supply_propval pval = {0, };
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+ if (!chg->bms_psy) {
+ schedule_delayed_work(&chg->step_soc_req_work,
+ msecs_to_jiffies(STEP_SOC_REQ_MS));
+ return IRQ_HANDLED;
+ }
+
+ rc = smblib_get_prop_batt_capacity(chg, &pval);
+ if (rc < 0)
+ dev_err(chg->dev, "Couldn't get batt capacity rc=%d\n", rc);
+ else
+ step_charge_soc_update(chg, pval.intval);
+
+ return IRQ_HANDLED;
+}
+
irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data)
{
struct smb_irq_data *irq_data = data;
@@ -1756,6 +1980,17 @@ irqreturn_t smblib_handle_usb_typec_change(int irq, void *data)
return IRQ_HANDLED;
}
+irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+
+ chg->is_hdc = true;
+ schedule_delayed_work(&chg->clear_hdc_work, msecs_to_jiffies(60));
+
+ return IRQ_HANDLED;
+}
+
/***************
* Work Queues *
***************/
@@ -1774,13 +2009,29 @@ static void smblib_hvdcp_detect_work(struct work_struct *work)
}
}
-static void smblib_bms_update_work(struct work_struct *work)
+static void bms_update_work(struct work_struct *work)
{
struct smb_charger *chg = container_of(work, struct smb_charger,
bms_update_work);
power_supply_changed(chg->batt_psy);
}
+static void step_soc_req_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ step_soc_req_work.work);
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ rc = smblib_get_prop_batt_capacity(chg, &pval);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't get batt capacity rc=%d\n", rc);
+ return;
+ }
+
+ step_charge_soc_update(chg, pval.intval);
+}
+
static void smblib_pl_detect_work(struct work_struct *work)
{
struct smb_charger *chg = container_of(work, struct smb_charger,
@@ -1829,7 +2080,15 @@ done:
vote(chg->awake_votable, PL_VOTER, false, 0);
}
-int smblib_create_votables(struct smb_charger *chg)
+static void clear_hdc_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ clear_hdc_work.work);
+
+ chg->is_hdc = 0;
+}
+
+static int smblib_create_votables(struct smb_charger *chg)
{
int rc = 0;
@@ -1923,15 +2182,53 @@ int smblib_create_votables(struct smb_charger *chg)
return rc;
}
+static void smblib_destroy_votables(struct smb_charger *chg)
+{
+ if (chg->usb_suspend_votable)
+ destroy_votable(chg->usb_suspend_votable);
+ if (chg->dc_suspend_votable)
+ destroy_votable(chg->dc_suspend_votable);
+ if (chg->fcc_max_votable)
+ destroy_votable(chg->fcc_max_votable);
+ if (chg->fcc_votable)
+ destroy_votable(chg->fcc_votable);
+ if (chg->fv_votable)
+ destroy_votable(chg->fv_votable);
+ if (chg->usb_icl_votable)
+ destroy_votable(chg->usb_icl_votable);
+ if (chg->dc_icl_votable)
+ destroy_votable(chg->dc_icl_votable);
+ if (chg->pd_allowed_votable)
+ destroy_votable(chg->pd_allowed_votable);
+ if (chg->awake_votable)
+ destroy_votable(chg->awake_votable);
+ if (chg->pl_disable_votable)
+ destroy_votable(chg->pl_disable_votable);
+}
+
+static void smblib_iio_deinit(struct smb_charger *chg)
+{
+ if (!IS_ERR_OR_NULL(chg->iio.temp_chan))
+ iio_channel_release(chg->iio.temp_chan);
+ if (!IS_ERR_OR_NULL(chg->iio.temp_max_chan))
+ iio_channel_release(chg->iio.temp_max_chan);
+ if (!IS_ERR_OR_NULL(chg->iio.usbin_i_chan))
+ iio_channel_release(chg->iio.usbin_i_chan);
+ if (!IS_ERR_OR_NULL(chg->iio.usbin_v_chan))
+ iio_channel_release(chg->iio.usbin_v_chan);
+}
+
int smblib_init(struct smb_charger *chg)
{
int rc = 0;
mutex_init(&chg->write_lock);
- INIT_WORK(&chg->bms_update_work, smblib_bms_update_work);
+ INIT_WORK(&chg->bms_update_work, bms_update_work);
INIT_WORK(&chg->pl_detect_work, smblib_pl_detect_work);
INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
INIT_DELAYED_WORK(&chg->pl_taper_work, smblib_pl_taper_work);
+ INIT_DELAYED_WORK(&chg->step_soc_req_work, step_soc_req_work);
+ INIT_DELAYED_WORK(&chg->clear_hdc_work, clear_hdc_work);
chg->fake_capacity = -EINVAL;
switch (chg->mode) {
@@ -1943,6 +2240,7 @@ int smblib_init(struct smb_charger *chg)
return rc;
}
+ chg->bms_psy = power_supply_get_by_name("bms");
chg->pl.psy = power_supply_get_by_name("parallel");
rc = smblib_register_notifier(chg);
@@ -1965,18 +2263,19 @@ int smblib_init(struct smb_charger *chg)
int smblib_deinit(struct smb_charger *chg)
{
- destroy_votable(chg->usb_suspend_votable);
- destroy_votable(chg->dc_suspend_votable);
- destroy_votable(chg->fcc_max_votable);
- destroy_votable(chg->fcc_votable);
- destroy_votable(chg->fv_votable);
- destroy_votable(chg->usb_icl_votable);
- destroy_votable(chg->dc_icl_votable);
- destroy_votable(chg->pd_allowed_votable);
- destroy_votable(chg->awake_votable);
- destroy_votable(chg->pl_disable_votable);
-
- power_supply_unreg_notifier(&chg->nb);
+ switch (chg->mode) {
+ case PARALLEL_MASTER:
+ power_supply_unreg_notifier(&chg->nb);
+ smblib_destroy_votables(chg);
+ break;
+ case PARALLEL_SLAVE:
+ break;
+ default:
+ dev_err(chg->dev, "Unsupported mode %d\n", chg->mode);
+ return -EINVAL;
+ }
+
+ smblib_iio_deinit(chg);
return 0;
}
diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h
index b56cd24adde1..aeb1eb2c454f 100644
--- a/drivers/power/qcom-charger/smb-lib.h
+++ b/drivers/power/qcom-charger/smb-lib.h
@@ -76,6 +76,9 @@ struct smb_params {
struct smb_chg_param dc_icl_div2_mid_hv;
struct smb_chg_param dc_icl_div2_hv;
struct smb_chg_param jeita_cc_comp;
+ struct smb_chg_param step_soc_threshold[4];
+ struct smb_chg_param step_soc;
+ struct smb_chg_param step_cc_delta[5];
};
struct parallel_params {
@@ -85,10 +88,18 @@ struct parallel_params {
int slave_fcc;
};
+struct smb_iio {
+ struct iio_channel *temp_chan;
+ struct iio_channel *temp_max_chan;
+ struct iio_channel *usbin_i_chan;
+ struct iio_channel *usbin_v_chan;
+};
+
struct smb_charger {
struct device *dev;
struct regmap *regmap;
struct smb_params param;
+ struct smb_iio iio;
int *debug_mask;
enum smb_mode mode;
@@ -133,6 +144,8 @@ struct smb_charger {
struct delayed_work hvdcp_detect_work;
struct delayed_work ps_change_timeout_work;
struct delayed_work pl_taper_work;
+ struct delayed_work step_soc_req_work;
+ struct delayed_work clear_hdc_work;
/* cached status */
int voltage_min_uv;
@@ -145,6 +158,9 @@ struct smb_charger {
int *thermal_mitigation;
int fake_capacity;
+
+ bool step_chg_enabled;
+ bool is_hdc;
};
int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
@@ -161,6 +177,13 @@ int smblib_set_charge_param(struct smb_charger *chg,
int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend);
int smblib_set_dc_suspend(struct smb_charger *chg, bool suspend);
+int smblib_mapping_soc_from_field_value(struct smb_chg_param *param,
+ int val_u, u8 *val_raw);
+int smblib_mapping_cc_delta_to_field_value(struct smb_chg_param *param,
+ u8 val_raw);
+int smblib_mapping_cc_delta_from_field_value(struct smb_chg_param *param,
+ int val_u, u8 *val_raw);
+
int smblib_vbus_regulator_enable(struct regulator_dev *rdev);
int smblib_vbus_regulator_disable(struct regulator_dev *rdev);
int smblib_vbus_regulator_is_enabled(struct regulator_dev *rdev);
@@ -171,6 +194,9 @@ int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev);
irqreturn_t smblib_handle_debug(int irq, void *data);
irqreturn_t smblib_handle_chg_state_change(int irq, void *data);
+irqreturn_t smblib_handle_step_chg_state_change(int irq, void *data);
+irqreturn_t smblib_handle_step_chg_soc_update_fail(int irq, void *data);
+irqreturn_t smblib_handle_step_chg_soc_update_request(int irq, void *data);
irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data);
irqreturn_t smblib_handle_batt_psy_changed(int irq, void *data);
irqreturn_t smblib_handle_usb_psy_changed(int irq, void *data);
@@ -178,6 +204,7 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data);
irqreturn_t smblib_handle_usb_source_change(int irq, void *data);
irqreturn_t smblib_handle_icl_change(int irq, void *data);
irqreturn_t smblib_handle_usb_typec_change(int irq, void *data);
+irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data);
int smblib_get_prop_input_suspend(struct smb_charger *chg,
union power_supply_propval *val);
@@ -193,6 +220,8 @@ int smblib_get_prop_batt_health(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_system_temp_level(struct smb_charger *chg,
union power_supply_propval *val);
+int smblib_get_prop_input_current_limited(struct smb_charger *chg,
+ union power_supply_propval *val);
int smblib_set_prop_input_suspend(struct smb_charger *chg,
const union power_supply_propval *val);
@@ -220,6 +249,8 @@ int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_usb_current_max(struct smb_charger *chg,
union power_supply_propval *val);
+int smblib_get_prop_usb_current_now(struct smb_charger *chg,
+ union power_supply_propval *val);
int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_typec_mode(struct smb_charger *chg,
@@ -230,6 +261,10 @@ int smblib_get_prop_pd_allowed(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_input_current_settled(struct smb_charger *chg,
union power_supply_propval *val);
+int smblib_get_prop_charger_temp(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_charger_temp_max(struct smb_charger *chg,
+ union power_supply_propval *val);
int smblib_set_prop_usb_current_max(struct smb_charger *chg,
const union power_supply_propval *val);
int smblib_set_prop_usb_voltage_min(struct smb_charger *chg,
diff --git a/drivers/power/qcom-charger/smb-reg.h b/drivers/power/qcom-charger/smb-reg.h
index b03e8a7e0403..0d5222ec08f8 100644
--- a/drivers/power/qcom-charger/smb-reg.h
+++ b/drivers/power/qcom-charger/smb-reg.h
@@ -283,6 +283,36 @@ enum {
#define IADC_SYNC_CNV_SEL_BIT BIT(1)
#define VADC_SYNC_CNV_SEL_BIT BIT(0)
+#define CHGR_STEP_CHG_MODE_CFG_REG (CHGR_BASE + 0xB0)
+#define STEP_CHARGING_SOC_FAIL_OPTION_BIT BIT(3)
+#define STEP_CHARGING_MODE_SELECT_BIT BIT(2)
+#define STEP_CHARGING_SOURCE_SELECT_BIT BIT(1)
+#define STEP_CHARGING_ENABLE_BIT BIT(0)
+
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_CFG_REG (CHGR_BASE + 0xB1)
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_CFG_MASK GENMASK(0, 1)
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_5S 0
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_10S 1
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_20S 2
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_40S 3
+
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_CFG_REG (CHGR_BASE + 0xB2)
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_CFG_MASK GENMASK(0, 1)
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_10S 0
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_30S 1
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_60S 2
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_120S 3
+
+#define STEP_CHG_SOC_OR_BATT_V_TH1_REG (CHGR_BASE + 0xB3)
+#define STEP_CHG_SOC_OR_BATT_V_TH2_REG (CHGR_BASE + 0xB4)
+#define STEP_CHG_SOC_OR_BATT_V_TH3_REG (CHGR_BASE + 0xB5)
+#define STEP_CHG_SOC_OR_BATT_V_TH4_REG (CHGR_BASE + 0xB6)
+#define STEP_CHG_CURRENT_DELTA1_REG (CHGR_BASE + 0xB7)
+#define STEP_CHG_CURRENT_DELTA2_REG (CHGR_BASE + 0xB8)
+#define STEP_CHG_CURRENT_DELTA3_REG (CHGR_BASE + 0xB9)
+#define STEP_CHG_CURRENT_DELTA4_REG (CHGR_BASE + 0xBA)
+#define STEP_CHG_CURRENT_DELTA5_REG (CHGR_BASE + 0xBB)
+
/* OTG Peripheral Registers */
#define RID_CC_CONTROL_23_16_REG (OTG_BASE + 0x06)
#define RID_CC_CONTROL_23_BIT BIT(7)
diff --git a/drivers/power/qcom-charger/smb138x-charger.c b/drivers/power/qcom-charger/smb138x-charger.c
index 259e9f31e243..a77b0bbc193c 100644
--- a/drivers/power/qcom-charger/smb138x-charger.c
+++ b/drivers/power/qcom-charger/smb138x-charger.c
@@ -47,8 +47,8 @@ static struct smb_params v1_params = {
.name = "fast charge current",
.reg = FAST_CHARGE_CURRENT_CFG_REG,
.min_u = 0,
- .max_u = 4500000,
- .step_u = 25000,
+ .max_u = 5000000,
+ .step_u = 50000,
},
.fv = {
.name = "float voltage",
@@ -267,6 +267,8 @@ static enum power_supply_property smb138x_batt_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CHARGER_TEMP,
+ POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
};
static int smb138x_batt_get_prop(struct power_supply *psy,
@@ -296,6 +298,12 @@ static int smb138x_batt_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CAPACITY:
rc = smblib_get_prop_batt_capacity(chg, val);
break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP:
+ rc = smblib_get_prop_charger_temp(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+ rc = smblib_get_prop_charger_temp_max(chg, val);
+ break;
default:
pr_err("batt power supply get prop %d not supported\n",
prop);
@@ -381,6 +389,8 @@ static enum power_supply_property smb138x_parallel_props[] = {
POWER_SUPPLY_PROP_INPUT_SUSPEND,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CHARGER_TEMP,
+ POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
};
static int smb138x_parallel_get_prop(struct power_supply *psy,
@@ -415,6 +425,12 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
rc = smblib_get_charge_param(chg, &chg->param.fcc,
&val->intval);
break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP:
+ rc = smblib_get_prop_charger_temp(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+ rc = smblib_get_prop_charger_temp_max(chg, val);
+ break;
default:
pr_err("parallel power supply get prop %d not supported\n",
prop);
diff --git a/drivers/power/qcom/debug_core.c b/drivers/power/qcom/debug_core.c
index d3620bbbeafa..ccef04ae9eb2 100644
--- a/drivers/power/qcom/debug_core.c
+++ b/drivers/power/qcom/debug_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,8 @@
#include "soc/qcom/msm-core.h"
#define MAX_PSTATES 50
+#define NUM_OF_PENTRY 3 /* number of variables for ptable node */
+#define NUM_OF_EENTRY 2 /* number of variables for enable node */
enum arg_offset {
CPU_OFFSET,
@@ -82,15 +84,28 @@ static struct debugfs_blob_wrapper help_msg = {
};
-static void add_to_ptable(uint64_t *arg)
+static void add_to_ptable(unsigned int *arg)
{
struct core_debug *node;
int i, cpu = arg[CPU_OFFSET];
+ uint32_t freq = arg[FREQ_OFFSET];
+ uint32_t power = arg[POWER_OFFSET];
if (!cpu_possible(cpu))
return;
+ if ((freq == 0) || (power == 0)) {
+ pr_warn("Incorrect power data\n");
+ return;
+ }
+
node = &per_cpu(c_dgfs, cpu);
+
+ if (node->len >= MAX_PSTATES) {
+ pr_warn("Dropped ptable update - no space left.\n");
+ return;
+ }
+
if (!node->head) {
node->head = kzalloc(sizeof(struct cpu_pstate_pwr) *
(MAX_PSTATES + 1),
@@ -98,24 +113,18 @@ static void add_to_ptable(uint64_t *arg)
if (!node->head)
return;
}
- for (i = 0; i < MAX_PSTATES; i++) {
- if (node->head[i].freq == arg[FREQ_OFFSET]) {
- node->head[i].power = arg[POWER_OFFSET];
+
+ for (i = 0; i < node->len; i++) {
+ if (node->head[i].freq == freq) {
+ node->head[i].power = power;
return;
}
- if (node->head[i].freq == 0)
- break;
- }
-
- if (i == MAX_PSTATES) {
- pr_warn("Dropped ptable update - no space left.\n");
- return;
}
/* Insert a new frequency (may need to move things around to
keep in ascending order). */
for (i = MAX_PSTATES - 1; i > 0; i--) {
- if (node->head[i-1].freq > arg[FREQ_OFFSET]) {
+ if (node->head[i-1].freq > freq) {
node->head[i].freq = node->head[i-1].freq;
node->head[i].power = node->head[i-1].power;
} else if (node->head[i-1].freq != 0) {
@@ -123,23 +132,29 @@ static void add_to_ptable(uint64_t *arg)
}
}
- node->head[i].freq = arg[FREQ_OFFSET];
- node->head[i].power = arg[POWER_OFFSET];
- node->len++;
+ if (node->len < MAX_PSTATES) {
+ node->head[i].freq = freq;
+ node->head[i].power = power;
+ node->len++;
+ }
if (node->ptr)
node->ptr->len = node->len;
}
-static int split_ptable_args(char *line, uint64_t *arg)
+static int split_ptable_args(char *line, unsigned int *arg, uint32_t n)
{
char *args;
int i;
int ret = 0;
- for (i = 0; line; i++) {
+ for (i = 0; i < n; i++) {
+ if (!line)
+ break;
args = strsep(&line, " ");
- ret = kstrtoull(args, 10, &arg[i]);
+ ret = kstrtouint(args, 10, &arg[i]);
+ if (ret)
+ return ret;
}
return ret;
}
@@ -149,7 +164,7 @@ static ssize_t msm_core_ptable_write(struct file *file,
{
char *kbuf;
int ret;
- uint64_t arg[3];
+ unsigned int arg[3];
if (len == 0)
return 0;
@@ -163,7 +178,7 @@ static ssize_t msm_core_ptable_write(struct file *file,
goto done;
}
kbuf[len] = '\0';
- ret = split_ptable_args(kbuf, arg);
+ ret = split_ptable_args(kbuf, arg, NUM_OF_PENTRY);
if (!ret) {
add_to_ptable(arg);
ret = len;
@@ -201,7 +216,7 @@ static int msm_core_ptable_read(struct seq_file *m, void *data)
seq_printf(m, "--- CPU%d - Live numbers at %ldC---\n",
cpu, node->ptr->temp);
print_table(m, msm_core_data[cpu].ptable,
- msm_core_data[cpu].len);
+ node->driver_len);
}
}
return 0;
@@ -212,7 +227,7 @@ static ssize_t msm_core_enable_write(struct file *file,
{
char *kbuf;
int ret;
- uint64_t arg[3];
+ unsigned int arg[3];
int cpu;
if (len == 0)
@@ -227,7 +242,7 @@ static ssize_t msm_core_enable_write(struct file *file,
goto done;
}
kbuf[len] = '\0';
- ret = split_ptable_args(kbuf, arg);
+ ret = split_ptable_args(kbuf, arg, NUM_OF_EENTRY);
if (ret)
goto done;
cpu = arg[CPU_OFFSET];
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d49d8606da15..27a5deb1213e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -150,7 +150,7 @@ static void regulator_lock_supply(struct regulator_dev *rdev)
{
int i;
- for (i = 0; rdev->supply; rdev = rdev_get_supply(rdev), i++)
+ for (i = 0; rdev; rdev = rdev_get_supply(rdev), i++)
mutex_lock_nested(&rdev->mutex, i);
}
diff --git a/drivers/regulator/cpr3-mmss-regulator.c b/drivers/regulator/cpr3-mmss-regulator.c
index fe5dbbeac15e..b0439871c41a 100644
--- a/drivers/regulator/cpr3-mmss-regulator.c
+++ b/drivers/regulator/cpr3-mmss-regulator.c
@@ -242,8 +242,8 @@ static const int msmcobalt_v2_mmss_fuse_ref_volt[MSM8996_MMSS_FUSE_CORNERS] = {
#define MSMCOBALT_MMSS_CPR_SENSOR_COUNT 35
-#define MSMCOBALT_MMSS_AGING_SENSOR_ID 17
-#define MSMCOBALT_MMSS_AGING_BYPASS_MASK0 0
+#define MSMCOBALT_MMSS_AGING_SENSOR_ID 29
+#define MSMCOBALT_MMSS_AGING_BYPASS_MASK0 (GENMASK(23, 0))
#define MSMCOBALT_MMSS_MAX_TEMP_POINTS 3
#define MSMCOBALT_MMSS_TEMP_SENSOR_ID_START 12
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index 6e8db03fe16e..232373092746 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -3523,7 +3523,7 @@ cleanup:
if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
rc2 = cpr3_ctrl_clear_cpr4_config(ctrl);
- if (rc) {
+ if (rc2) {
cpr3_err(ctrl, "failed to clear CPR4 configuration,rc=%d\n",
rc2);
rc = rc2;
@@ -3725,6 +3725,17 @@ static int cpr3_regulator_aging_adjust(struct cpr3_controller *ctrl)
return 0;
}
+ /*
+ * Verify that the aging possible register (if specified) has an
+ * acceptable value.
+ */
+ if (ctrl->aging_possible_reg) {
+ reg = readl_relaxed(ctrl->aging_possible_reg);
+ reg &= ctrl->aging_possible_mask;
+ if (reg != ctrl->aging_possible_val)
+ return 0;
+ }
+
restore_current_corner = kcalloc(vreg_count,
sizeof(*restore_current_corner), GFP_KERNEL);
restore_vreg_enabled = kcalloc(vreg_count,
@@ -3798,7 +3809,7 @@ static int cpr3_regulator_aging_adjust(struct cpr3_controller *ctrl)
max_aging_volt = max(max_aging_volt, aging_volt);
} else {
cpr3_err(ctrl, "CPR aging measurement failed after %d tries, rc=%d\n",
- rc, CPR3_AGING_RETRY_COUNT);
+ j, rc);
ctrl->aging_failed = true;
ctrl->aging_required = false;
goto cleanup;
@@ -5992,6 +6003,21 @@ int cpr3_regulator_register(struct platform_device *pdev,
}
ctrl->cpr_ctrl_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (ctrl->aging_possible_mask) {
+ /*
+ * Aging possible register address is required if an aging
+ * possible mask has been specified.
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "aging_allowed");
+ if (!res || !res->start) {
+ cpr3_err(ctrl, "CPR aging allowed address is missing\n");
+ return -ENXIO;
+ }
+ ctrl->aging_possible_reg = devm_ioremap(dev, res->start,
+ resource_size(res));
+ }
+
if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH) {
ctrl->irq = platform_get_irq_byname(pdev, "cpr");
if (ctrl->irq < 0) {
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
index 0907518722df..8897def3ef76 100644
--- a/drivers/regulator/cpr3-regulator.h
+++ b/drivers/regulator/cpr3-regulator.h
@@ -532,6 +532,9 @@ struct cpr3_panic_regs_info {
* that this CPR3 controller manages.
* @cpr_ctrl_base: Virtual address of the CPR3 controller base register
* @fuse_base: Virtual address of fuse row 0
+ * @aging_possible_reg: Virtual address of an optional platform-specific
+ * register that must be ready to determine if it is
+ * possible to perform an aging measurement.
* @list: list head used in a global cpr3-regulator list so that
* cpr3-regulator structs can be found easily in RAM dumps
* @thread: Array of CPR3 threads managed by the CPR3 controller
@@ -671,6 +674,11 @@ struct cpr3_panic_regs_info {
* @aging_sensor: Array of CPR3 aging sensors which are used to perform
* aging measurements at a runtime.
* @aging_sensor_count: Number of elements in the aging_sensor array
+ * @aging_possible_mask: Optional bitmask used to mask off the
+ * aging_possible_reg register.
+ * @aging_possible_val: Optional value that the masked aging_possible_reg
+ * register must have in order for a CPR aging measurement
+ * to be possible.
* @step_quot_fixed: Fixed step quotient value used for target quotient
* adjustment if use_dynamic_step_quot is not set.
* This parameter is only relevant for CPR4 controllers
@@ -721,6 +729,7 @@ struct cpr3_controller {
int ctrl_id;
void __iomem *cpr_ctrl_base;
void __iomem *fuse_base;
+ void __iomem *aging_possible_reg;
struct list_head list;
struct cpr3_thread *thread;
int thread_count;
@@ -784,6 +793,8 @@ struct cpr3_controller {
bool aging_failed;
struct cpr3_aging_sensor_info *aging_sensor;
int aging_sensor_count;
+ u32 aging_possible_mask;
+ u32 aging_possible_val;
u32 step_quot_fixed;
u32 initial_temp_band;
diff --git a/drivers/regulator/cpr3-util.c b/drivers/regulator/cpr3-util.c
index 9d55e9af2e7c..51179f28fcf5 100644
--- a/drivers/regulator/cpr3-util.c
+++ b/drivers/regulator/cpr3-util.c
@@ -1169,6 +1169,24 @@ int cpr3_parse_common_ctrl_data(struct cpr3_controller *ctrl)
of_property_read_u32(ctrl->dev->of_node, "qcom,cpr-aging-ref-voltage",
&ctrl->aging_ref_volt);
+ /* Aging possible bitmask is optional */
+ ctrl->aging_possible_mask = 0;
+ of_property_read_u32(ctrl->dev->of_node,
+ "qcom,cpr-aging-allowed-reg-mask",
+ &ctrl->aging_possible_mask);
+
+ if (ctrl->aging_possible_mask) {
+ /*
+ * Aging possible register value required if bitmask is
+ * specified
+ */
+ rc = cpr3_parse_ctrl_u32(ctrl,
+ "qcom,cpr-aging-allowed-reg-value",
+ &ctrl->aging_possible_val, 0, UINT_MAX);
+ if (rc)
+ return rc;
+ }
+
if (of_find_property(ctrl->dev->of_node, "clock-names", NULL)) {
ctrl->core_clk = devm_clk_get(ctrl->dev, "core_clk");
if (IS_ERR(ctrl->core_clk)) {
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index 083459f96ac4..284180b0e72f 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -69,8 +69,9 @@ struct cprh_msmcobalt_kbss_fuses {
/*
* Fuse combos 0 - 7 map to CPR fusing revision 0 - 7 with speed bin fuse = 0.
+ * Fuse combos 8 - 15 map to CPR fusing revision 0 - 7 with speed bin fuse = 1.
*/
-#define CPRH_MSMCOBALT_KBSS_FUSE_COMBO_COUNT 8
+#define CPRH_MSMCOBALT_KBSS_FUSE_COMBO_COUNT 16
/*
* Constants which define the name of each fuse corner.
@@ -206,11 +207,19 @@ msmcobalt_v1_kbss_fuse_ref_volt[MSMCOBALT_KBSS_FUSE_CORNERS] = {
* Open loop voltage fuse reference voltages in microvolts for MSMCOBALT v2
*/
static const int
-msmcobalt_v2_kbss_fuse_ref_volt[MSMCOBALT_KBSS_FUSE_CORNERS] = {
- 688000,
- 756000,
- 828000,
- 1056000,
+msmcobalt_v2_kbss_fuse_ref_volt[2][MSMCOBALT_KBSS_FUSE_CORNERS] = {
+ [MSMCOBALT_KBSS_POWER_CLUSTER_ID] = {
+ 688000,
+ 756000,
+ 828000,
+ 1056000,
+ },
+ [MSMCOBALT_KBSS_PERFORMANCE_CLUSTER_ID] = {
+ 756000,
+ 756000,
+ 828000,
+ 1056000,
+ },
};
#define MSMCOBALT_KBSS_FUSE_STEP_VOLT 10000
@@ -391,7 +400,7 @@ static int cprh_msmcobalt_kbss_calculate_open_loop_voltages(
{
struct device_node *node = vreg->of_node;
struct cprh_msmcobalt_kbss_fuses *fuse = vreg->platform_fuses;
- int i, j, soc_revision, rc = 0;
+ int i, j, soc_revision, id, rc = 0;
bool allow_interpolation;
u64 freq_low, volt_low, freq_high, volt_high;
const int *ref_volt;
@@ -407,13 +416,12 @@ static int cprh_msmcobalt_kbss_calculate_open_loop_voltages(
goto done;
}
+ id = vreg->thread->ctrl->ctrl_id;
soc_revision = vreg->thread->ctrl->soc_revision;
if (soc_revision == 1)
ref_volt = msmcobalt_v1_kbss_fuse_ref_volt;
- else if (soc_revision == 2)
- ref_volt = msmcobalt_v2_kbss_fuse_ref_volt;
else
- ref_volt = msmcobalt_v2_kbss_fuse_ref_volt;
+ ref_volt = msmcobalt_v2_kbss_fuse_ref_volt[id];
for (i = 0; i < vreg->fuse_corner_count; i++) {
fuse_volt[i] = cpr3_convert_open_loop_voltage_fuse(
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index bfa82ca64499..ad4b6ffef36e 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -349,6 +349,28 @@ out:
return err;
}
+static void ufs_qcom_force_mem_config(struct ufs_hba *hba)
+{
+ struct ufs_clk_info *clki;
+
+ /*
+ * Configure the behavior of ufs clocks core and peripheral
+ * memory state when they are turned off.
+ * This configuration is required to allow retaining
+ * ICE crypto configuration (including keys) when
+ * core_clk_ice is turned off, and powering down
+ * non-ICE RAMs of host controller.
+ */
+ list_for_each_entry(clki, &hba->clk_list_head, list) {
+ if (!strcmp(clki->name, "core_clk_ice"))
+ clk_set_flags(clki->clk, CLKFLAG_RETAIN_MEM);
+ else
+ clk_set_flags(clki->clk, CLKFLAG_NORETAIN_MEM);
+ clk_set_flags(clki->clk, CLKFLAG_NORETAIN_PERIPH);
+ clk_set_flags(clki->clk, CLKFLAG_PERIPH_OFF_CLEAR);
+ }
+}
+
static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
@@ -357,6 +379,7 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
+ ufs_qcom_force_mem_config(hba);
ufs_qcom_power_up_sequence(hba);
/*
* The PHY PLL output is the source of tx/rx lane symbol
@@ -741,21 +764,29 @@ out:
static int ufs_qcom_full_reset(struct ufs_hba *hba)
{
- struct ufs_clk_info *clki;
int ret = -ENOTSUPP;
- list_for_each_entry(clki, &hba->clk_list_head, list) {
- if (!strcmp(clki->name, "core_clk")) {
- ret = clk_reset(clki->clk, CLK_RESET_ASSERT);
- if (ret)
- goto out;
- /* Very small delay, per the documented requirement */
- usleep_range(1, 2);
-
- ret = clk_reset(clki->clk, CLK_RESET_DEASSERT);
- break;
- }
+ if (!hba->core_reset) {
+ dev_err(hba->dev, "%s: failed, err = %d\n", __func__,
+ ret);
+ goto out;
+ }
+
+ ret = reset_control_assert(hba->core_reset);
+ if (ret) {
+ dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
+ __func__, ret);
+ goto out;
}
+
+ /* Very small delay, per the documented requirement */
+ usleep_range(1, 2);
+
+ ret = reset_control_deassert(hba->core_reset);
+ if (ret)
+ dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
+ __func__, ret);
+
out:
return ret;
}
@@ -996,7 +1027,7 @@ static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
}
}
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
{
int err = 0;
@@ -1027,7 +1058,7 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
vote = ufs_qcom_get_bus_vote(host, mode);
if (vote >= 0)
- err = ufs_qcom_set_bus_vote(host, vote);
+ err = __ufs_qcom_set_bus_vote(host, vote);
else
err = vote;
@@ -1038,6 +1069,35 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
return err;
}
+static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int vote, err;
+
+ /*
+ * In case ufs_qcom_init() is not yet done, simply ignore.
+ * This ufs_qcom_set_bus_vote() shall be called from
+ * ufs_qcom_init() after init is done.
+ */
+ if (!host)
+ return 0;
+
+ if (on) {
+ vote = host->bus_vote.saved_vote;
+ if (vote == host->bus_vote.min_bw_vote)
+ ufs_qcom_update_bus_bw_vote(host);
+ } else {
+ vote = host->bus_vote.min_bw_vote;
+ }
+
+ err = __ufs_qcom_set_bus_vote(host, vote);
+ if (err)
+ dev_err(hba->dev, "%s: set bus vote failed %d\n",
+ __func__, err);
+
+ return err;
+}
+
static ssize_t
show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1403,7 +1463,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
- int vote = 0;
/*
* In case ufs_qcom_init() is not yet done, simply ignore.
@@ -1428,9 +1487,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
- vote = host->bus_vote.saved_vote;
- if (vote == host->bus_vote.min_bw_vote)
- ufs_qcom_update_bus_bw_vote(host);
err = ufs_qcom_ice_resume(host);
if (err)
@@ -1449,14 +1505,8 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
}
- vote = host->bus_vote.min_bw_vote;
}
- err = ufs_qcom_set_bus_vote(host, vote);
- if (err)
- dev_err(hba->dev, "%s: set bus vote failed %d\n",
- __func__, err);
-
out:
return err;
}
@@ -2011,6 +2061,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
+ ufs_qcom_set_bus_vote(hba, true);
ufs_qcom_setup_clocks(hba, true, false);
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
@@ -2362,17 +2413,21 @@ void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
{
- if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
+ if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
+ ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
+ UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
- else
+ } else {
+ ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
+ }
}
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
{
/* provide a legal default configuration */
- host->testbus.select_major = TSTBUS_UAWM;
- host->testbus.select_minor = 1;
+ host->testbus.select_major = TSTBUS_UNIPRO;
+ host->testbus.select_minor = 37;
}
static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
@@ -2389,7 +2444,7 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
* mappings of select_minor, since there is no harm in
* configuring a non-existent select_minor
*/
- if (host->testbus.select_minor > 0x1F) {
+ if (host->testbus.select_minor > 0xFF) {
dev_err(host->hba->dev,
"%s: 0x%05X is not a legal testbus option\n",
__func__, host->testbus.select_minor);
@@ -2458,7 +2513,8 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
break;
case TSTBUS_UNIPRO:
reg = UFS_UNIPRO_CFG;
- offset = 1;
+ offset = 20;
+ mask = 0xFFF;
break;
/*
* No need for a default case, since
@@ -2477,6 +2533,11 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
(u32)host->testbus.select_minor << offset,
reg);
ufs_qcom_enable_test_bus(host);
+ /*
+ * Make sure the test bus configuration is
+ * committed before returning.
+ */
+ mb();
ufshcd_release(host->hba, false);
pm_runtime_put_sync(host->hba->dev);
@@ -2488,15 +2549,44 @@ static void ufs_qcom_testbus_read(struct ufs_hba *hba)
ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
}
+static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ u32 *testbus = NULL;
+ int i, nminor = 256, testbus_len = nminor * sizeof(u32);
+
+ testbus = kmalloc(testbus_len, GFP_KERNEL);
+ if (!testbus)
+ return;
+
+ host->testbus.select_major = TSTBUS_UNIPRO;
+ for (i = 0; i < nminor; i++) {
+ host->testbus.select_minor = i;
+ ufs_qcom_testbus_config(host);
+ testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
+ }
+ print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
+ 16, 4, testbus, testbus_len, false);
+ kfree(testbus);
+}
+
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy = host->generic_phy;
ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
"HCI Vendor Specific Registers ");
+ /* sleep a bit intermittently as we are dumping too much data */
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
+ usleep_range(1000, 1100);
ufs_qcom_testbus_read(hba);
+ usleep_range(1000, 1100);
+ ufs_qcom_print_unipro_testbus(hba);
+ usleep_range(1000, 1100);
+ ufs_qcom_phy_dbg_register_dump(phy);
+ usleep_range(1000, 1100);
ufs_qcom_ice_print_regs(host);
}
@@ -2521,6 +2611,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.full_reset = ufs_qcom_full_reset,
.update_sec_cfg = ufs_qcom_update_sec_cfg,
.get_scale_down_gear = ufs_qcom_get_scale_down_gear,
+ .set_bus_vote = ufs_qcom_set_bus_vote,
.dbg_register_dump = ufs_qcom_dump_dbg_regs,
#ifdef CONFIG_DEBUG_FS
.add_debugfs = ufs_qcom_dbg_add_debugfs,
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index ba36d9883a0f..394de8302fd2 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -100,6 +100,7 @@ enum {
#define QUNIPRO_SEL UFS_BIT(0)
#define TEST_BUS_EN BIT(18)
#define TEST_BUS_SEL GENMASK(22, 19)
+#define UFS_REG_TEST_BUS_EN BIT(30)
/* bit definitions for REG_UFS_CFG2 register */
#define UAWM_HW_CGC_EN (1 << 0)
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 19c378c72083..5a9564326099 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -40,6 +40,22 @@
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
+static int ufshcd_parse_reset_info(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ hba->core_reset = devm_reset_control_get(hba->dev,
+ "core_reset");
+ if (IS_ERR(hba->core_reset)) {
+ ret = PTR_ERR(hba->core_reset);
+ dev_err(hba->dev, "core_reset unavailable,err = %d\n",
+ ret);
+ hba->core_reset = NULL;
+ }
+
+ return ret;
+}
+
static int ufshcd_parse_clock_info(struct ufs_hba *hba)
{
int ret = 0;
@@ -338,6 +354,13 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
goto dealloc_host;
}
+ err = ufshcd_parse_reset_info(hba);
+ if (err) {
+ dev_err(&pdev->dev, "%s: reset parse failed %d\n",
+ __func__, err);
+ goto dealloc_host;
+ }
+
ufshcd_parse_pm_levels(hba);
if (!dev->dma_mask)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index ce779d760c69..d478767ad3dd 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -3932,8 +3932,12 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
ret = (status != PWR_OK) ? status : -1;
}
out:
- if (ret)
+ if (ret) {
ufsdbg_set_err_state(hba);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ }
ufshcd_save_tstamp_of_last_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -4644,8 +4648,12 @@ link_startup:
ret = ufshcd_make_hba_operational(hba);
out:
- if (ret)
+ if (ret) {
dev_err(hba->dev, "link startup failed %d\n", ret);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ }
return ret;
}
@@ -7555,6 +7563,13 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
if (!head || list_empty(head))
goto out;
+ /* call vendor specific bus vote before enabling the clocks */
+ if (on) {
+ ret = ufshcd_vops_set_bus_vote(hba, on);
+ if (ret)
+ return ret;
+ }
+
/*
* vendor specific setup_clocks ops may depend on clocks managed by
* this standard driver hence call the vendor specific setup_clocks
@@ -7593,11 +7608,24 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
* this standard driver hence call the vendor specific setup_clocks
* after enabling the clocks managed here.
*/
- if (on)
+ if (on) {
ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
+ if (ret)
+ goto out;
+ }
+
+ /*
+ * call vendor specific bus vote to remove the vote after
+ * disabling the clocks.
+ */
+ if (!on)
+ ret = ufshcd_vops_set_bus_vote(hba, on);
out:
if (ret) {
+ if (on)
+ /* Can't do much if this fails */
+ (void) ufshcd_vops_set_bus_vote(hba, false);
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
clk_disable_unprepare(clki->clk);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 552d50081e3f..a6298f614a0b 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -55,6 +55,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include "unipro.h"
#include <asm/irq.h>
@@ -309,6 +310,7 @@ struct ufs_pwr_mode_info {
* @update_sec_cfg: called to restore host controller secure configuration
* @get_scale_down_gear: called to get the minimum supported gear to
* scale down
+ * @set_bus_vote: called to vote for the required bus bandwidth
* @add_debugfs: used to add debugfs entries
* @remove_debugfs: used to remove debugfs entries
*/
@@ -335,6 +337,7 @@ struct ufs_hba_variant_ops {
void (*dbg_register_dump)(struct ufs_hba *hba);
int (*update_sec_cfg)(struct ufs_hba *hba, bool restore_sec_cfg);
u32 (*get_scale_down_gear)(struct ufs_hba *);
+ int (*set_bus_vote)(struct ufs_hba *, bool);
#ifdef CONFIG_DEBUG_FS
void (*add_debugfs)(struct ufs_hba *hba, struct dentry *root);
void (*remove_debugfs)(struct ufs_hba *hba);
@@ -891,6 +894,7 @@ struct ufs_hba {
struct rw_semaphore clk_scaling_lock;
+ struct reset_control *core_reset;
/* If set, don't gate device ref_clk during clock gating */
bool no_ref_clk_gating;
@@ -1259,6 +1263,13 @@ static inline u32 ufshcd_vops_get_scale_down_gear(struct ufs_hba *hba)
return UFS_HS_G1;
}
+static inline int ufshcd_vops_set_bus_vote(struct ufs_hba *hba, bool on)
+{
+ if (hba->var && hba->var->vops && hba->var->vops->set_bus_vote)
+ return hba->var->vops->set_bus_vote(hba, on);
+ return 0;
+}
+
#ifdef CONFIG_DEBUG_FS
static inline void ufshcd_vops_add_debugfs(struct ufs_hba *hba,
struct dentry *root)
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index ea4dd2ce4e1d..9af9ce323bc3 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -167,6 +167,7 @@ static int ngd_qmi_available(struct notifier_block *n, unsigned long code,
SLIM_INFO(dev, "Slimbus QMI NGD CB received event:%ld\n", code);
switch (code) {
case QMI_SERVER_ARRIVE:
+ atomic_set(&dev->ssr_in_progress, 0);
schedule_work(&dev->dsp.dom_up);
break;
default:
@@ -214,6 +215,8 @@ static int dsp_domr_notify_cb(struct notifier_block *n, unsigned long code,
switch (code) {
case SUBSYS_BEFORE_SHUTDOWN:
case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01:
+ SLIM_INFO(dev, "SLIM DSP SSR notify cb:%lu\n", code);
+ atomic_set(&dev->ssr_in_progress, 1);
/* wait for current transaction */
mutex_lock(&dev->tx_lock);
/* make sure autosuspend is not called until ADSP comes up*/
@@ -225,6 +228,16 @@ static int dsp_domr_notify_cb(struct notifier_block *n, unsigned long code,
break;
case LOCATOR_UP:
reg = _cmd;
+ if (!reg || reg->total_domains != 1) {
+ SLIM_WARN(dev, "error locating audio-PD\n");
+ if (reg)
+ SLIM_WARN(dev, "audio-PDs matched:%d\n",
+ reg->total_domains);
+
+ /* Fall back to SSR */
+ ngd_reg_ssr(dev);
+ return NOTIFY_DONE;
+ }
dev->dsp.domr = service_notif_register_notifier(
reg->domain_list->name,
reg->domain_list->instance_id,
@@ -866,7 +879,7 @@ static int ngd_bulk_wr(struct slim_controller *ctrl, u8 la, u8 mt, u8 mc,
}
if (dev->bulk.size > dev->bulk.buf_sz) {
void *temp = krealloc(dev->bulk.base, dev->bulk.size,
- GFP_KERNEL);
+ GFP_KERNEL | GFP_DMA);
if (!temp) {
ret = -ENOMEM;
goto retpath;
@@ -1316,8 +1329,10 @@ hw_init_retry:
if (ret) {
SLIM_WARN(dev, "SLIM power req failed:%d, retry:%d\n",
ret, retries);
- msm_slim_qmi_power_request(dev, false);
- if (retries < INIT_MX_RETRIES) {
+ if (!atomic_read(&dev->ssr_in_progress))
+ msm_slim_qmi_power_request(dev, false);
+ if (retries < INIT_MX_RETRIES &&
+ !atomic_read(&dev->ssr_in_progress)) {
retries++;
goto hw_init_retry;
}
@@ -1416,7 +1431,8 @@ capability_retry:
SLIM_WARN(dev,
"slim capability time-out:%d, stat:0x%x,cfg:0x%x\n",
retries, laddr, cfg);
- if (retries < INIT_MX_RETRIES) {
+ if ((retries < INIT_MX_RETRIES) &&
+ !atomic_read(&dev->ssr_in_progress)) {
retries++;
goto capability_retry;
}
@@ -1487,8 +1503,7 @@ static int ngd_slim_rx_msgq_thread(void *data)
int retries = 0;
u8 wbuf[8];
- set_current_state(TASK_INTERRUPTIBLE);
- wait_for_completion(notify);
+ wait_for_completion_interruptible(notify);
txn.dt = SLIM_MSG_DEST_LOGICALADDR;
txn.ec = 0;
@@ -1549,8 +1564,7 @@ static int ngd_notify_slaves(void *data)
}
while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
- wait_for_completion(&dev->qmi.slave_notify);
+ wait_for_completion_interruptible(&dev->qmi.slave_notify);
/* Probe devices for first notification */
if (!i) {
i++;
@@ -1683,7 +1697,7 @@ static int ngd_slim_probe(struct platform_device *pdev)
/* typical txn numbers and size used in bulk operation */
dev->bulk.buf_sz = SLIM_MAX_TXNS * 8;
- dev->bulk.base = kzalloc(dev->bulk.buf_sz, GFP_KERNEL);
+ dev->bulk.base = kzalloc(dev->bulk.buf_sz, GFP_KERNEL | GFP_DMA);
if (!dev->bulk.base) {
ret = -ENOMEM;
goto err_nobulk;
@@ -1780,6 +1794,7 @@ static int ngd_slim_probe(struct platform_device *pdev)
dev->ee = 1;
dev->irq = irq->start;
dev->bam.irq = bam_irq->start;
+ atomic_set(&dev->ssr_in_progress, 0);
if (rxreg_access)
dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
@@ -1938,6 +1953,7 @@ static int ngd_slim_runtime_resume(struct device *device)
struct platform_device *pdev = to_platform_device(device);
struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
int ret = 0;
+
mutex_lock(&dev->tx_lock);
if (dev->state >= MSM_CTRL_ASLEEP)
ret = ngd_slim_power_up(dev, false);
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
index 7616e714299c..65b9fae8040b 100644
--- a/drivers/slimbus/slim-msm.h
+++ b/drivers/slimbus/slim-msm.h
@@ -314,6 +314,7 @@ struct msm_slim_ctrl {
void (*rx_slim)(struct msm_slim_ctrl *dev, u8 *buf);
u32 current_rx_buf[10];
int current_count;
+ atomic_t ssr_in_progress;
};
struct msm_sat_chan {
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 0e74093eeb2b..9c27344165be 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -127,6 +127,17 @@ config MSM_SPCOM
spcom provides clients/server API, although currently only one client
or server is allowed per logical channel.
+config MSM_SPSS_UTILS
+ depends on MSM_PIL
+ bool "Secure Processor Utilities"
+ help
+ spss-utils driver selects Secure Processor firmware file name.
+ The firmware file name for test or production is selected based
+ on a test fuse.
+ Different file name is used for differnt SPSS HW versions,
+ because the SPSS firmware size is too small to support multiple
+ HW versions.
+
config MSM_SMEM_LOGGING
depends on MSM_SMEM
bool "MSM Shared Memory Logger"
@@ -417,6 +428,15 @@ config ICNSS
control messages to FW over QMI channel. It is also responsible for
handling WLAN PD restart notifications.
+config ICNSS_DEBUG
+ bool "ICNSS debug support"
+ depends on ICNSS
+ ---help---
+ Say 'Y' here to enable ICNSS driver debug support. Debug support
+ primarily consists of logs consisting of information related to
+ hardware register access and enabling BUG_ON for certain cases to aid
+ the debugging.
+
config MSM_SECURE_BUFFER
bool "Helper functions for securing buffers through TZ"
help
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index e9e65ea443dd..d9134a558be6 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_MSM_IPC_ROUTER_HSIC_XPRT) += ipc_router_hsic_xprt.o
obj-$(CONFIG_MSM_IPC_ROUTER_MHI_XPRT) += ipc_router_mhi_xprt.o
obj-$(CONFIG_MSM_IPC_ROUTER_GLINK_XPRT) += ipc_router_glink_xprt.o
obj-$(CONFIG_MSM_SPCOM) += spcom.o
+obj-$(CONFIG_MSM_SPSS_UTILS) += spss_utils.o
obj-y += qdsp6v2/
obj-$(CONFIG_MSM_SYSTEM_HEALTH_MONITOR) += system_health_monitor_v01.o
obj-$(CONFIG_MSM_SYSTEM_HEALTH_MONITOR) += system_health_monitor.o
@@ -70,7 +71,7 @@ obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o
obj-$(CONFIG_QCOM_COMMON_LOG) += common_log.o
obj-$(CONFIG_QCOM_IRQ_HELPER) += irq-helper.o
obj-$(CONFIG_TRACER_PKT) += tracer_pkt.o
-obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o
+obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o icnss_utils.o
obj-$(CONFIG_SOC_BUS) += socinfo.o
obj-$(CONFIG_QCOM_BUS_SCALING) += msm_bus/
obj-$(CONFIG_MSM_SERVICE_NOTIFIER) += service-notifier.o
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index f54d9c3f4f3d..5612075ba60c 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -30,6 +30,7 @@
#include "glink_private.h"
#include "glink_xprt_if.h"
+#define GLINK_CTX_CANARY 0x58544324 /* "$CTX" */
/* Number of internal IPC Logging log pages */
#define NUM_LOG_PAGES 10
#define GLINK_PM_QOS_HOLDOFF_MS 10
@@ -38,6 +39,8 @@
#define GLINK_QOS_DEF_MTU 2048
#define GLINK_KTHREAD_PRIO 1
+
+static rwlock_t magic_lock;
/**
* struct glink_qos_priority_bin - Packet Scheduler's priority bucket
* @max_rate_kBps: Maximum rate supported by the priority bucket.
@@ -308,6 +311,7 @@ struct channel_ctx {
unsigned long req_rate_kBps;
uint32_t tx_intent_cnt;
uint32_t tx_cnt;
+ uint32_t magic_number;
};
static struct glink_core_if core_impl;
@@ -436,6 +440,37 @@ static void glink_core_deinit_xprt_qos_cfg(
#define GLINK_GET_CH_TX_STATE(ctx) \
((ctx)->tx_intent_cnt || (ctx)->tx_cnt)
+static int glink_get_ch_ctx(struct channel_ctx *ctx)
+{
+ unsigned long flags;
+
+ if (!ctx)
+ return -EINVAL;
+ read_lock_irqsave(&magic_lock, flags);
+ if (ctx->magic_number != GLINK_CTX_CANARY) {
+ read_unlock_irqrestore(&magic_lock, flags);
+ return -EINVAL;
+ }
+ rwref_get(&ctx->ch_state_lhb2);
+ read_unlock_irqrestore(&magic_lock, flags);
+ return 0;
+}
+
+static int glink_put_ch_ctx(struct channel_ctx *ctx, bool update_magic)
+{
+ unsigned long flags;
+
+ if (!update_magic) {
+ rwref_put(&ctx->ch_state_lhb2);
+ return 0;
+ }
+ write_lock_irqsave(&magic_lock, flags);
+ ctx->magic_number = 0;
+ rwref_put(&ctx->ch_state_lhb2);
+ write_unlock_irqrestore(&magic_lock, flags);
+ return 0;
+}
+
/**
* glink_ssr() - Clean up locally for SSR by simulating remote close
* @subsystem: The name of the subsystem being restarted
@@ -2583,7 +2618,7 @@ void *glink_open(const struct glink_open_config *cfg)
GLINK_INFO_CH(ctx, "%s: Created channel, sent OPEN command. ctx %p\n",
__func__, ctx);
-
+ ctx->magic_number = GLINK_CTX_CANARY;
return ctx;
}
EXPORT_SYMBOL(glink_open);
@@ -2681,15 +2716,19 @@ int glink_close(void *handle)
unsigned long flags;
bool is_empty = false;
- if (!ctx)
- return -EINVAL;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
GLINK_INFO_CH(ctx, "%s: Closing channel, ctx: %p\n", __func__, ctx);
- if (ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+ if (ctx->local_open_state == GLINK_CHANNEL_CLOSED) {
+ glink_put_ch_ctx(ctx, false);
return 0;
+ }
if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) {
/* close already pending */
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
@@ -2754,6 +2793,7 @@ relock: xprt_ctx = ctx->transport_ptr;
rwref_put(&ctx->ch_state_lhb2);
rwref_read_put(&xprt_ctx->xprt_state_lhb0);
+ glink_put_ch_ctx(ctx, true);
return ret;
}
EXPORT_SYMBOL(glink_close);
@@ -2812,29 +2852,30 @@ static int glink_tx_common(void *handle, void *pkt_priv,
if (!size)
return -EINVAL;
- if (!ctx)
- return -EINVAL;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic);
if (!(vbuf_provider || pbuf_provider)) {
- rwref_read_put(&ctx->ch_state_lhb2);
- return -EINVAL;
+ ret = -EINVAL;
+ goto glink_tx_common_err;
}
if (!ch_is_fully_opened(ctx)) {
- rwref_read_put(&ctx->ch_state_lhb2);
- return -EBUSY;
+ ret = -EBUSY;
+ goto glink_tx_common_err;
}
if (size > GLINK_MAX_PKT_SIZE) {
- rwref_read_put(&ctx->ch_state_lhb2);
- return -EINVAL;
+ ret = -EINVAL;
+ goto glink_tx_common_err;
}
if (unlikely(tx_flags & GLINK_TX_TRACER_PKT)) {
if (!(ctx->transport_ptr->capabilities & GCAP_TRACER_PKT)) {
- rwref_read_put(&ctx->ch_state_lhb2);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto glink_tx_common_err;
}
tracer_pkt_log_event(data, GLINK_CORE_TX);
}
@@ -2846,16 +2887,16 @@ static int glink_tx_common(void *handle, void *pkt_priv,
GLINK_ERR_CH(ctx,
"%s: R[%u]:%zu Intent not present for lcid\n",
__func__, riid, size);
- rwref_read_put(&ctx->ch_state_lhb2);
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto glink_tx_common_err;
}
if (is_atomic && !(ctx->transport_ptr->capabilities &
GCAP_AUTO_QUEUE_RX_INT)) {
GLINK_ERR_CH(ctx,
"%s: Cannot request intent in atomic context\n",
__func__);
- rwref_read_put(&ctx->ch_state_lhb2);
- return -EINVAL;
+ ret = -EINVAL;
+ goto glink_tx_common_err;
}
/* request intent of correct size */
@@ -2865,20 +2906,18 @@ static int glink_tx_common(void *handle, void *pkt_priv,
if (ret) {
GLINK_ERR_CH(ctx, "%s: Request intent failed %d\n",
__func__, ret);
- rwref_read_put(&ctx->ch_state_lhb2);
- return ret;
+ goto glink_tx_common_err;
}
while (ch_pop_remote_rx_intent(ctx, size, &riid,
&intent_size, &cookie)) {
- rwref_get(&ctx->ch_state_lhb2);
rwref_read_put(&ctx->ch_state_lhb2);
if (is_atomic) {
GLINK_ERR_CH(ctx,
"%s Intent of size %zu not ready\n",
__func__, size);
- rwref_put(&ctx->ch_state_lhb2);
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto glink_tx_common_err_2;
}
if (ctx->transport_ptr->local_state == GLINK_XPRT_DOWN
@@ -2886,8 +2925,8 @@ static int glink_tx_common(void *handle, void *pkt_priv,
GLINK_ERR_CH(ctx,
"%s: Channel closed while waiting for intent\n",
__func__);
- rwref_put(&ctx->ch_state_lhb2);
- return -EBUSY;
+ ret = -EBUSY;
+ goto glink_tx_common_err_2;
}
/* wait for the remote intent req ack */
@@ -2897,8 +2936,8 @@ static int glink_tx_common(void *handle, void *pkt_priv,
GLINK_ERR_CH(ctx,
"%s: Intent request ack with size: %zu not granted for lcid\n",
__func__, size);
- rwref_put(&ctx->ch_state_lhb2);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto glink_tx_common_err_2;
}
if (!ctx->int_req_ack) {
@@ -2906,8 +2945,8 @@ static int glink_tx_common(void *handle, void *pkt_priv,
"%s: Intent Request with size: %zu %s",
__func__, size,
"not granted for lcid\n");
- rwref_put(&ctx->ch_state_lhb2);
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto glink_tx_common_err_2;
}
/* wait for the rx_intent from remote side */
@@ -2917,13 +2956,12 @@ static int glink_tx_common(void *handle, void *pkt_priv,
GLINK_ERR_CH(ctx,
"%s: Intent request with size: %zu not granted for lcid\n",
__func__, size);
- rwref_put(&ctx->ch_state_lhb2);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto glink_tx_common_err_2;
}
reinit_completion(&ctx->int_req_complete);
rwref_read_get(&ctx->ch_state_lhb2);
- rwref_put(&ctx->ch_state_lhb2);
}
}
@@ -2943,8 +2981,8 @@ static int glink_tx_common(void *handle, void *pkt_priv,
if (!tx_info) {
GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
- rwref_read_put(&ctx->ch_state_lhb2);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto glink_tx_common_err;
}
rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release);
INIT_LIST_HEAD(&tx_info->list_done);
@@ -2970,7 +3008,10 @@ static int glink_tx_common(void *handle, void *pkt_priv,
else
xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
+glink_tx_common_err:
rwref_read_put(&ctx->ch_state_lhb2);
+glink_tx_common_err_2:
+ glink_put_ch_ctx(ctx, false);
return ret;
}
@@ -3011,13 +3052,15 @@ int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size)
struct glink_core_rx_intent *intent_ptr;
int ret = 0;
- if (!ctx)
- return -EINVAL;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
if (!ch_is_fully_opened(ctx)) {
/* Can only queue rx intents if channel is fully opened */
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
@@ -3026,13 +3069,16 @@ int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size)
GLINK_ERR_CH(ctx,
"%s: Intent pointer allocation failed size[%zu]\n",
__func__, size);
+ glink_put_ch_ctx(ctx, false);
return -ENOMEM;
}
GLINK_DBG_CH(ctx, "%s: L[%u]:%zu\n", __func__, intent_ptr->id,
intent_ptr->intent_size);
- if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS)
+ if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) {
+ glink_put_ch_ctx(ctx, false);
return ret;
+ }
/* notify remote side of rx intent */
ret = ctx->transport_ptr->ops->tx_cmd_local_rx_intent(
@@ -3040,7 +3086,7 @@ int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size)
if (ret)
/* unable to transmit, dequeue intent */
ch_remove_local_rx_intent(ctx, intent_ptr->id);
-
+ glink_put_ch_ctx(ctx, false);
return ret;
}
EXPORT_SYMBOL(glink_queue_rx_intent);
@@ -3059,20 +3105,25 @@ bool glink_rx_intent_exists(void *handle, size_t size)
struct channel_ctx *ctx = (struct channel_ctx *)handle;
struct glink_core_rx_intent *intent;
unsigned long flags;
+ int ret;
if (!ctx || !ch_is_fully_opened(ctx))
return false;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return false;
spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
list_for_each_entry(intent, &ctx->local_rx_intent_list, list) {
if (size <= intent->intent_size) {
spin_unlock_irqrestore(
&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ glink_put_ch_ctx(ctx, false);
return true;
}
}
spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
-
+ glink_put_ch_ctx(ctx, false);
return false;
}
EXPORT_SYMBOL(glink_rx_intent_exists);
@@ -3093,11 +3144,15 @@ int glink_rx_done(void *handle, const void *ptr, bool reuse)
uint32_t id;
int ret = 0;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
liid_ptr = ch_get_local_rx_intent_notified(ctx, ptr);
if (IS_ERR_OR_NULL(liid_ptr)) {
/* invalid pointer */
GLINK_ERR_CH(ctx, "%s: Invalid pointer %p\n", __func__, ptr);
+ glink_put_ch_ctx(ctx, false);
return -EINVAL;
}
@@ -3123,7 +3178,7 @@ int glink_rx_done(void *handle, const void *ptr, bool reuse)
/* send rx done */
ctx->transport_ptr->ops->tx_cmd_local_rx_done(ctx->transport_ptr->ops,
ctx->lcid, id, reuse);
-
+ glink_put_ch_ctx(ctx, false);
return ret;
}
EXPORT_SYMBOL(glink_rx_done);
@@ -3171,12 +3226,13 @@ int glink_sigs_set(void *handle, uint32_t sigs)
struct channel_ctx *ctx = (struct channel_ctx *)handle;
int ret;
- if (!ctx)
- return -EINVAL;
-
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
@@ -3186,6 +3242,7 @@ int glink_sigs_set(void *handle, uint32_t sigs)
ctx->lcid, ctx->lsigs);
GLINK_INFO_CH(ctx, "%s: Sent SIGNAL SET command\n", __func__);
+ glink_put_ch_ctx(ctx, false);
return ret;
}
EXPORT_SYMBOL(glink_sigs_set);
@@ -3201,17 +3258,22 @@ EXPORT_SYMBOL(glink_sigs_set);
int glink_sigs_local_get(void *handle, uint32_t *sigs)
{
struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
- if (!ctx || !sigs)
+ if (!sigs)
return -EINVAL;
-
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
*sigs = ctx->lsigs;
+ glink_put_ch_ctx(ctx, false);
return 0;
}
EXPORT_SYMBOL(glink_sigs_local_get);
@@ -3227,17 +3289,23 @@ EXPORT_SYMBOL(glink_sigs_local_get);
int glink_sigs_remote_get(void *handle, uint32_t *sigs)
{
struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
- if (!ctx || !sigs)
+ if (!sigs)
return -EINVAL;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
*sigs = ctx->rsigs;
+ glink_put_ch_ctx(ctx, false);
return 0;
}
EXPORT_SYMBOL(glink_sigs_remote_get);
@@ -3333,12 +3401,16 @@ int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size)
int ret;
unsigned long req_rate_kBps;
- if (!ctx || !latency_us || !pkt_size)
+ if (!latency_us || !pkt_size)
return -EINVAL;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
@@ -3348,7 +3420,7 @@ int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size)
if (ret < 0)
GLINK_ERR_CH(ctx, "%s: QoS %lu:%zu cannot be met\n",
__func__, latency_us, pkt_size);
-
+ glink_put_ch_ctx(ctx, false);
return ret;
}
EXPORT_SYMBOL(glink_qos_latency);
@@ -3366,16 +3438,18 @@ int glink_qos_cancel(void *handle)
struct channel_ctx *ctx = (struct channel_ctx *)handle;
int ret;
- if (!ctx)
- return -EINVAL;
-
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
ret = glink_qos_reset_priority(ctx);
+ glink_put_ch_ctx(ctx, false);
return ret;
}
EXPORT_SYMBOL(glink_qos_cancel);
@@ -3396,12 +3470,13 @@ int glink_qos_start(void *handle)
int ret;
unsigned long flags;
- if (!ctx)
- return -EINVAL;
-
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
@@ -3410,6 +3485,7 @@ int glink_qos_start(void *handle)
ret = glink_qos_add_ch_tx_intent(ctx);
spin_unlock(&ctx->tx_lists_lock_lhc3);
spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+ glink_put_ch_ctx(ctx, false);
return ret;
}
EXPORT_SYMBOL(glink_qos_start);
@@ -3428,16 +3504,20 @@ EXPORT_SYMBOL(glink_qos_start);
unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size)
{
struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
- if (!ctx)
- return (unsigned long)-EINVAL;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return (unsigned long)ret;
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return (unsigned long)-EBUSY;
}
+ glink_put_ch_ctx(ctx, false);
return ctx->transport_ptr->ops->get_power_vote_ramp_time(
ctx->transport_ptr->ops,
glink_prio_to_power_state(ctx->transport_ptr,
@@ -3521,12 +3601,16 @@ EXPORT_SYMBOL(glink_rpm_mask_rx_interrupt);
int glink_wait_link_down(void *handle)
{
struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
- if (!ctx)
- return -EINVAL;
- if (!ctx->transport_ptr)
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
+ if (!ctx->transport_ptr) {
+ glink_put_ch_ctx(ctx, false);
return -EOPNOTSUPP;
-
+ }
+ glink_put_ch_ctx(ctx, false);
return ctx->transport_ptr->ops->wait_link_down(ctx->transport_ptr->ops);
}
EXPORT_SYMBOL(glink_wait_link_down);
@@ -4029,6 +4113,37 @@ static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx(
return xprt_ptr;
}
+static struct channel_ctx *get_first_ch_ctx(
+ struct glink_core_xprt_ctx *xprt_ctx)
+{
+ unsigned long flags;
+ struct channel_ctx *ctx;
+
+ spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+ if (!list_empty(&xprt_ctx->channels)) {
+ ctx = list_first_entry(&xprt_ctx->channels,
+ struct channel_ctx, port_list_node);
+ rwref_get(&ctx->ch_state_lhb2);
+ } else {
+ ctx = NULL;
+ }
+ spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+ return ctx;
+}
+
+static void glink_core_move_ch_node(struct glink_core_xprt_ctx *xprt_ptr,
+ struct glink_core_xprt_ctx *dummy_xprt_ctx, struct channel_ctx *ctx)
+{
+ unsigned long flags, d_flags;
+
+ spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+ spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+ rwref_get(&dummy_xprt_ctx->xprt_state_lhb0);
+ list_move_tail(&ctx->port_list_node, &dummy_xprt_ctx->channels);
+ spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+ spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+}
+
/**
* glink_core_channel_cleanup() - cleanup all channels for the transport
*
@@ -4039,7 +4154,7 @@ static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx(
static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr)
{
unsigned long flags, d_flags;
- struct channel_ctx *ctx, *tmp_ctx;
+ struct channel_ctx *ctx;
struct channel_lcid *temp_lcid, *temp_lcid1;
struct glink_core_xprt_ctx *dummy_xprt_ctx;
@@ -4048,29 +4163,18 @@ static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr)
GLINK_ERR("%s: Dummy Transport creation failed\n", __func__);
return;
}
-
rwref_read_get(&dummy_xprt_ctx->xprt_state_lhb0);
rwref_read_get(&xprt_ptr->xprt_state_lhb0);
- spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
- spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
-
- list_for_each_entry_safe(ctx, tmp_ctx, &xprt_ptr->channels,
- port_list_node) {
+ ctx = get_first_ch_ctx(xprt_ptr);
+ while (ctx) {
rwref_write_get_atomic(&ctx->ch_state_lhb2, true);
if (ctx->local_open_state == GLINK_CHANNEL_OPENED ||
ctx->local_open_state == GLINK_CHANNEL_OPENING) {
- rwref_get(&dummy_xprt_ctx->xprt_state_lhb0);
- list_move_tail(&ctx->port_list_node,
- &dummy_xprt_ctx->channels);
ctx->transport_ptr = dummy_xprt_ctx;
rwref_write_put(&ctx->ch_state_lhb2);
+ glink_core_move_ch_node(xprt_ptr, dummy_xprt_ctx, ctx);
} else {
/* local state is in either CLOSED or CLOSING */
- spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1,
- flags);
- spin_unlock_irqrestore(
- &dummy_xprt_ctx->xprt_ctx_lock_lhb1,
- d_flags);
glink_core_remote_close_common(ctx, true);
if (ctx->local_open_state == GLINK_CHANNEL_CLOSING)
glink_core_ch_close_ack_common(ctx, true);
@@ -4078,22 +4182,21 @@ static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr)
if (ch_is_fully_closed(ctx))
glink_delete_ch_from_list(ctx, false);
rwref_write_put(&ctx->ch_state_lhb2);
- spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
- d_flags);
- spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
}
+ rwref_put(&ctx->ch_state_lhb2);
+ ctx = get_first_ch_ctx(xprt_ptr);
}
+ spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
list_for_each_entry_safe(temp_lcid, temp_lcid1,
&xprt_ptr->free_lcid_list, list_node) {
list_del(&temp_lcid->list_node);
kfree(&temp_lcid->list_node);
}
- dummy_xprt_ctx->dummy_in_use = false;
spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
- spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
rwref_read_put(&xprt_ptr->xprt_state_lhb0);
spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+ dummy_xprt_ctx->dummy_in_use = false;
while (!list_empty(&dummy_xprt_ctx->channels)) {
ctx = list_first_entry(&dummy_xprt_ctx->channels,
struct channel_ctx, port_list_node);
@@ -5273,7 +5376,7 @@ static int glink_scheduler_tx(struct channel_ctx *ctx,
struct glink_core_xprt_ctx *xprt_ctx)
{
unsigned long flags;
- struct glink_core_tx_pkt *tx_info;
+ struct glink_core_tx_pkt *tx_info, *temp_tx_info;
size_t txd_len = 0;
size_t tx_len = 0;
uint32_t num_pkts = 0;
@@ -5308,6 +5411,20 @@ static int glink_scheduler_tx(struct channel_ctx *ctx,
ctx->lcid, tx_info);
}
spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+ if (!list_empty(&ctx->tx_active)) {
+ /*
+ * Verify if same tx_info still exist in tx_active
+ * list and is not removed during tx operation.
+ * It can happen if SSR and tx done both happen
+ * before tx_lists_lock_lhc3 is taken.
+ */
+ temp_tx_info = list_first_entry(&ctx->tx_active,
+ struct glink_core_tx_pkt, list_node);
+ if (temp_tx_info != tx_info)
+ continue;
+ } else {
+ break;
+ }
if (ret == -EAGAIN) {
/*
* transport unable to send at the moment and will call
@@ -5334,6 +5451,7 @@ static int glink_scheduler_tx(struct channel_ctx *ctx,
* Break out of the loop so that the scheduler can
* continue with the next channel.
*/
+ rwref_put(&tx_info->pkt_ref);
break;
} else {
txd_len += tx_len;
@@ -5342,8 +5460,8 @@ static int glink_scheduler_tx(struct channel_ctx *ctx,
if (!tx_info->size_remaining) {
num_pkts++;
list_del_init(&tx_info->list_node);
- rwref_put(&tx_info->pkt_ref);
}
+ rwref_put(&tx_info->pkt_ref);
}
ctx->txd_len += txd_len;
@@ -5392,6 +5510,7 @@ static void tx_func(struct kthread_work *work)
glink_pm_qos_vote(xprt_ptr);
ch_ptr = list_first_entry(&xprt_ptr->prio_bin[prio].tx_ready,
struct channel_ctx, tx_ready_list_node);
+ rwref_get(&ch_ptr->ch_state_lhb2);
spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
if (tx_ready_head == NULL || tx_ready_head_prio < prio) {
@@ -5403,6 +5522,7 @@ static void tx_func(struct kthread_work *work)
GLINK_ERR_XPRT(xprt_ptr,
"%s: Unable to send data on this transport.\n",
__func__);
+ rwref_put(&ch_ptr->ch_state_lhb2);
break;
}
transmitted_successfully = false;
@@ -5413,6 +5533,7 @@ static void tx_func(struct kthread_work *work)
* transport unable to send at the moment and will call
* tx_resume() when it can send again.
*/
+ rwref_put(&ch_ptr->ch_state_lhb2);
break;
} else if (ret < 0) {
/*
@@ -5425,6 +5546,7 @@ static void tx_func(struct kthread_work *work)
GLINK_ERR_XPRT(xprt_ptr,
"%s: unrecoverable xprt failure %d\n",
__func__, ret);
+ rwref_put(&ch_ptr->ch_state_lhb2);
break;
} else if (!ret) {
/*
@@ -5436,6 +5558,7 @@ static void tx_func(struct kthread_work *work)
list_rotate_left(&xprt_ptr->prio_bin[prio].tx_ready);
spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3,
flags);
+ rwref_put(&ch_ptr->ch_state_lhb2);
continue;
}
@@ -5453,6 +5576,7 @@ static void tx_func(struct kthread_work *work)
tx_ready_head = NULL;
transmitted_successfully = true;
+ rwref_put(&ch_ptr->ch_state_lhb2);
}
glink_pm_qos_unvote(xprt_ptr);
GLINK_PERF("%s: worker exiting\n", __func__);
@@ -6019,6 +6143,7 @@ EXPORT_SYMBOL(glink_get_xprt_log_ctx);
static int glink_init(void)
{
log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "glink", 0);
+ rwlock_init(&magic_lock);
if (!log_ctx)
GLINK_ERR("%s: unable to create log context\n", __func__);
glink_debugfs_init();
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index aff9683b394f..05a9ff4aeb1c 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -34,6 +34,8 @@
#include <linux/qmi_encdec.h>
#include <linux/ipc_logging.h>
#include <linux/msm-bus.h>
+#include <linux/uaccess.h>
+#include <linux/qpnp/qpnp-adc.h>
#include <soc/qcom/memory_dump.h>
#include <soc/qcom/icnss.h>
#include <soc/qcom/msm_qmi_interface.h>
@@ -44,11 +46,12 @@
#include "wlan_firmware_service_v01.h"
-#define ICNSS_PANIC 1
#define WLFW_TIMEOUT_MS 3000
#define WLFW_SERVICE_INS_ID_V01 0
#define MAX_PROP_SIZE 32
-#define NUM_LOG_PAGES 4
+#define NUM_LOG_PAGES 10
+#define NUM_REG_LOG_PAGES 4
+#define ICNSS_MAGIC 0x5abc5abc
/*
* Registers: MPM2_PSHOLD
@@ -57,9 +60,12 @@
#define MPM_WCSSAON_CONFIG_OFFSET 0x18
#define MPM_WCSSAON_CONFIG_ARES_N BIT(0)
#define MPM_WCSSAON_CONFIG_WLAN_DISABLE BIT(1)
+#define MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD BIT(6)
+#define MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD_VAL BIT(7)
#define MPM_WCSSAON_CONFIG_FORCE_ACTIVE BIT(14)
#define MPM_WCSSAON_CONFIG_FORCE_XO_ENABLE BIT(19)
#define MPM_WCSSAON_CONFIG_DISCONNECT_CLR BIT(21)
+#define MPM_WCSSAON_CONFIG_M2W_CLAMP_EN BIT(22)
/*
* Registers: WCSS_SR_SHADOW_REGISTERS
@@ -148,14 +154,31 @@
#define ICNSS_HW_REG_RETRY 10
+#define WCSS_HM_A_PMM_HW_VERSION_V10 0x40000000
+#define WCSS_HM_A_PMM_HW_VERSION_V20 0x40010000
+#define WCSS_HM_A_PMM_HW_VERSION_Q10 0x40010001
+
#define ICNSS_SERVICE_LOCATION_CLIENT_NAME "ICNSS-WLAN"
#define ICNSS_WLAN_SERVICE_NAME "wlan/fw"
+#define ICNSS_THRESHOLD_HIGH 3600000
+#define ICNSS_THRESHOLD_LOW 3450000
+#define ICNSS_THRESHOLD_GUARD 20000
+
#define icnss_ipc_log_string(_x...) do { \
if (icnss_ipc_log_context) \
ipc_log_string(icnss_ipc_log_context, _x); \
} while (0)
+#ifdef CONFIG_ICNSS_DEBUG
+#define icnss_ipc_log_long_string(_x...) do { \
+ if (icnss_ipc_log_long_context) \
+ ipc_log_string(icnss_ipc_log_long_context, _x); \
+ } while (0)
+#else
+#define icnss_ipc_log_long_string(_x...)
+#endif
+
#define icnss_pr_err(_fmt, ...) do { \
pr_err(_fmt, ##__VA_ARGS__); \
icnss_ipc_log_string("ERR: " pr_fmt(_fmt), \
@@ -180,7 +203,13 @@
##__VA_ARGS__); \
} while (0)
-#ifdef ICNSS_PANIC
+#define icnss_reg_dbg(_fmt, ...) do { \
+ pr_debug(_fmt, ##__VA_ARGS__); \
+ icnss_ipc_log_long_string("REG: " pr_fmt(_fmt), \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#ifdef CONFIG_ICNSS_DEBUG
#define ICNSS_ASSERT(_condition) do { \
if (!(_condition)) { \
icnss_pr_err("ASSERT at line %d\n", \
@@ -208,15 +237,19 @@ enum icnss_debug_quirks {
PDR_ONLY,
};
-#define ICNSS_QUIRKS_DEFAULT ( \
- BIT(SSR_ONLY) \
- )
+#define ICNSS_QUIRKS_DEFAULT 0
unsigned long quirks = ICNSS_QUIRKS_DEFAULT;
module_param(quirks, ulong, 0600);
void *icnss_ipc_log_context;
+#ifdef CONFIG_ICNSS_DEBUG
+void *icnss_ipc_log_long_context;
+#endif
+
+#define ICNSS_EVENT_PENDING 2989
+
enum icnss_driver_event_type {
ICNSS_DRIVER_EVENT_SERVER_ARRIVE,
ICNSS_DRIVER_EVENT_SERVER_EXIT,
@@ -243,6 +276,8 @@ enum icnss_driver_state {
ICNSS_DRIVER_PROBED,
ICNSS_FW_TEST_MODE,
ICNSS_SUSPEND,
+ ICNSS_PM_SUSPEND,
+ ICNSS_PM_SUSPEND_NOIRQ,
ICNSS_SSR_ENABLED,
ICNSS_PDR_ENABLED,
ICNSS_PD_RESTART,
@@ -298,6 +333,15 @@ struct icnss_stats {
uint32_t disable;
} ce_irqs[ICNSS_MAX_IRQ_REGISTRATIONS];
+ uint32_t pm_suspend;
+ uint32_t pm_suspend_err;
+ uint32_t pm_resume;
+ uint32_t pm_resume_err;
+ uint32_t pm_suspend_noirq;
+ uint32_t pm_suspend_noirq_err;
+ uint32_t pm_resume_noirq;
+ uint32_t pm_resume_noirq_err;
+
uint32_t ind_register_req;
uint32_t ind_register_resp;
uint32_t ind_register_err;
@@ -321,9 +365,13 @@ struct icnss_stats {
uint32_t ini_req;
uint32_t ini_resp;
uint32_t ini_req_err;
+ uint32_t vbatt_req;
+ uint32_t vbatt_resp;
+ uint32_t vbatt_req_err;
};
static struct icnss_priv {
+ uint32_t magic;
struct platform_device *pdev;
struct icnss_driver_ops *ops;
struct ce_irq_list ce_irq_list[ICNSS_MAX_IRQ_REGISTRATIONS];
@@ -370,6 +418,15 @@ static struct icnss_priv {
struct notifier_block get_service_nb;
void *modem_notify_handler;
struct notifier_block modem_ssr_nb;
+ struct wakeup_source ws;
+ uint32_t diag_reg_read_addr;
+ uint32_t diag_reg_read_mem_type;
+ uint32_t diag_reg_read_len;
+ uint8_t *diag_reg_read_buf;
+ struct qpnp_adc_tm_btm_param vph_monitor_params;
+ struct qpnp_adc_tm_chip *adc_tm_dev;
+ struct qpnp_vadc_chip *vadc_dev;
+ uint64_t vph_pwr;
} *penv;
static void icnss_hw_write_reg(void *base, u32 offset, u32 val)
@@ -382,7 +439,7 @@ static u32 icnss_hw_read_reg(void *base, u32 offset)
{
u32 rdata = readl_relaxed(base + offset);
- icnss_pr_dbg(" READ: offset: 0x%06x 0x%08x\n", offset, rdata);
+ icnss_reg_dbg(" READ: offset: 0x%06x 0x%08x\n", offset, rdata);
return rdata;
}
@@ -394,7 +451,7 @@ static void icnss_hw_write_reg_field(void *base, u32 offset, u32 mask, u32 val)
val = (rdata & ~mask) | (val << shift);
- icnss_pr_dbg("WRITE: offset: 0x%06x 0x%08x -> 0x%08x\n",
+ icnss_reg_dbg("WRITE: offset: 0x%06x 0x%08x -> 0x%08x\n",
offset, rdata, val);
icnss_hw_write_reg(base, offset, val);
@@ -413,12 +470,12 @@ static int icnss_hw_poll_reg_field(void *base, u32 offset, u32 mask, u32 val,
rdata = readl_relaxed(base + offset);
- icnss_pr_dbg(" POLL: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n",
+ icnss_reg_dbg(" POLL: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n",
offset, val, rdata, mask);
while ((rdata & mask) != val) {
if (retry != 0 && r >= retry) {
- icnss_pr_err(" POLL FAILED: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n",
+ icnss_pr_err("POLL FAILED: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n",
offset, val, rdata, mask);
return -EIO;
@@ -429,8 +486,8 @@ static int icnss_hw_poll_reg_field(void *base, u32 offset, u32 mask, u32 val,
rdata = readl_relaxed(base + offset);
if (retry)
- icnss_pr_dbg(" POLL: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n",
- offset, val, rdata, mask);
+ icnss_reg_dbg(" POLL: offset: 0x%06x 0x%08x == 0x%08x & 0x%08x\n",
+ offset, val, rdata, mask);
}
@@ -486,6 +543,7 @@ static int icnss_driver_event_post(enum icnss_driver_event_type type,
event->type = type;
event->data = data;
init_completion(&event->complete);
+ event->ret = ICNSS_EVENT_PENDING;
event->sync = sync;
spin_lock_irqsave(&penv->event_lock, flags);
@@ -494,16 +552,213 @@ static int icnss_driver_event_post(enum icnss_driver_event_type type,
penv->stats.events[type].posted++;
queue_work(penv->event_wq, &penv->event_work);
- if (sync) {
- ret = wait_for_completion_interruptible(&event->complete);
- if (ret == 0)
- ret = event->ret;
- kfree(event);
+
+ if (!sync)
+ return ret;
+
+ ret = wait_for_completion_interruptible(&event->complete);
+
+ icnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
+ icnss_driver_event_to_str(type), type, penv->state, ret,
+ event->ret);
+
+ spin_lock_irqsave(&penv->event_lock, flags);
+ if (ret == -ERESTARTSYS && event->ret == ICNSS_EVENT_PENDING) {
+ event->sync = false;
+ spin_unlock_irqrestore(&penv->event_lock, flags);
+ return ret;
+ }
+ spin_unlock_irqrestore(&penv->event_lock, flags);
+
+ ret = event->ret;
+ kfree(event);
+
+ return ret;
+}
+
+static int wlfw_vbatt_send_sync_msg(struct icnss_priv *priv,
+ uint64_t voltage_uv)
+{
+ int ret;
+ struct wlfw_vbatt_req_msg_v01 req;
+ struct wlfw_vbatt_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+
+ if (!priv->wlfw_clnt) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ icnss_pr_dbg("Sending Vbatt message, state: 0x%lx\n",
+ penv->state);
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.voltage_uv = voltage_uv;
+
+ req_desc.max_msg_len = WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_VBATT_REQ_V01;
+ req_desc.ei_array = wlfw_vbatt_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_VBATT_RESP_V01;
+ resp_desc.ei_array = wlfw_vbatt_resp_msg_v01_ei;
+
+ priv->stats.vbatt_req++;
+
+ ret = qmi_send_req_wait(priv->wlfw_clnt, &req_desc, &req, sizeof(req),
+ &resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ icnss_pr_err("Send vbatt req failed %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ icnss_pr_err("QMI vbatt request failed %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
}
+ priv->stats.vbatt_resp++;
+out:
+ priv->stats.vbatt_req_err++;
return ret;
}
+static int icnss_get_phone_power(struct icnss_priv *priv, uint64_t *result_uv)
+{
+ int ret = 0;
+ struct qpnp_vadc_result adc_result;
+
+ if (!priv->vadc_dev) {
+ icnss_pr_err("VADC dev doesn't exists\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = qpnp_vadc_read(penv->vadc_dev, VADC_VPH_PWR, &adc_result);
+ if (ret) {
+ icnss_pr_err("Error reading ADC channel %d, ret = %d\n",
+ VADC_VPH_PWR, ret);
+ goto out;
+ }
+
+ icnss_pr_dbg("Phone power read phy=%lld meas=0x%llx\n",
+ adc_result.physical, adc_result.measurement);
+
+ *result_uv = adc_result.physical;
+out:
+ return ret;
+}
+
+static void icnss_vph_notify(enum qpnp_tm_state state, void *ctx)
+{
+ struct icnss_priv *priv = ctx;
+ uint64_t vph_pwr = 0;
+ uint64_t vph_pwr_prev;
+ int ret = 0;
+ bool update = true;
+
+ if (!priv) {
+ icnss_pr_err("Priv pointer is NULL\n");
+ return;
+ }
+
+ vph_pwr_prev = priv->vph_pwr;
+
+ ret = icnss_get_phone_power(priv, &vph_pwr);
+ if (ret)
+ return;
+
+ if (vph_pwr < ICNSS_THRESHOLD_LOW) {
+ if (vph_pwr_prev < ICNSS_THRESHOLD_LOW)
+ update = false;
+ priv->vph_monitor_params.state_request =
+ ADC_TM_HIGH_THR_ENABLE;
+ priv->vph_monitor_params.high_thr = ICNSS_THRESHOLD_LOW +
+ ICNSS_THRESHOLD_GUARD;
+ priv->vph_monitor_params.low_thr = 0;
+ } else if (vph_pwr > ICNSS_THRESHOLD_HIGH) {
+ if (vph_pwr_prev > ICNSS_THRESHOLD_HIGH)
+ update = false;
+ priv->vph_monitor_params.state_request =
+ ADC_TM_LOW_THR_ENABLE;
+ priv->vph_monitor_params.low_thr = ICNSS_THRESHOLD_HIGH -
+ ICNSS_THRESHOLD_GUARD;
+ priv->vph_monitor_params.high_thr = 0;
+ } else {
+ if (vph_pwr_prev > ICNSS_THRESHOLD_LOW &&
+ vph_pwr_prev < ICNSS_THRESHOLD_HIGH)
+ update = false;
+ priv->vph_monitor_params.state_request =
+ ADC_TM_HIGH_LOW_THR_ENABLE;
+ priv->vph_monitor_params.low_thr = ICNSS_THRESHOLD_LOW;
+ priv->vph_monitor_params.high_thr = ICNSS_THRESHOLD_HIGH;
+ }
+
+ priv->vph_pwr = vph_pwr;
+
+ if (update)
+ wlfw_vbatt_send_sync_msg(priv, vph_pwr);
+
+ icnss_pr_dbg("set low threshold to %d, high threshold to %d\n",
+ priv->vph_monitor_params.low_thr,
+ priv->vph_monitor_params.high_thr);
+ ret = qpnp_adc_tm_channel_measure(priv->adc_tm_dev,
+ &priv->vph_monitor_params);
+ if (ret)
+ icnss_pr_err("TM channel setup failed %d\n", ret);
+}
+
+static int icnss_setup_vph_monitor(struct icnss_priv *priv)
+{
+ int ret = 0;
+
+ if (!priv->adc_tm_dev) {
+ icnss_pr_err("ADC TM handler is NULL\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->vph_monitor_params.low_thr = ICNSS_THRESHOLD_LOW;
+ priv->vph_monitor_params.high_thr = ICNSS_THRESHOLD_HIGH;
+ priv->vph_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_ENABLE;
+ priv->vph_monitor_params.channel = VADC_VPH_PWR;
+ priv->vph_monitor_params.btm_ctx = priv;
+ priv->vph_monitor_params.timer_interval = ADC_MEAS1_INTERVAL_1S;
+ priv->vph_monitor_params.threshold_notification = &icnss_vph_notify;
+ icnss_pr_dbg("Set low threshold to %d, high threshold to %d\n",
+ priv->vph_monitor_params.low_thr,
+ priv->vph_monitor_params.high_thr);
+
+ ret = qpnp_adc_tm_channel_measure(priv->adc_tm_dev,
+ &priv->vph_monitor_params);
+ if (ret)
+ icnss_pr_err("TM channel setup failed %d\n", ret);
+out:
+ return ret;
+}
+
+static int icnss_init_vph_monitor(struct icnss_priv *priv)
+{
+ int ret = 0;
+
+ ret = icnss_get_phone_power(priv, &priv->vph_pwr);
+ if (ret)
+ goto out;
+
+ wlfw_vbatt_send_sync_msg(priv, priv->vph_pwr);
+
+ ret = icnss_setup_vph_monitor(priv);
+ if (ret)
+ goto out;
+out:
+ return ret;
+}
+
+
static int icnss_qmi_pin_connect_result_ind(void *msg, unsigned int msg_len)
{
struct msg_desc ind_desc;
@@ -742,6 +997,32 @@ static void icnss_hw_top_level_reset(struct icnss_priv *priv)
ICNSS_HW_REG_RETRY);
}
+static void icnss_hw_io_reset(struct icnss_priv *priv, bool on)
+{
+ u32 hw_version = priv->soc_info.soc_id;
+
+ if (on && !test_bit(ICNSS_FW_READY, &priv->state))
+ return;
+
+ icnss_pr_dbg("HW io reset: %s, SoC: 0x%x, state: 0x%lx\n",
+ on ? "ON" : "OFF", priv->soc_info.soc_id, priv->state);
+
+ if (hw_version == WCSS_HM_A_PMM_HW_VERSION_V10 ||
+ hw_version == WCSS_HM_A_PMM_HW_VERSION_V20) {
+ icnss_hw_write_reg_field(priv->mpm_config_va,
+ MPM_WCSSAON_CONFIG_OFFSET,
+ MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD_VAL, 0);
+ icnss_hw_write_reg_field(priv->mpm_config_va,
+ MPM_WCSSAON_CONFIG_OFFSET,
+ MPM_WCSSAON_CONFIG_MSM_CLAMP_EN_OVRD, on);
+ } else if (hw_version == WCSS_HM_A_PMM_HW_VERSION_Q10) {
+ icnss_hw_write_reg_field(priv->mpm_config_va,
+ MPM_WCSSAON_CONFIG_OFFSET,
+ MPM_WCSSAON_CONFIG_M2W_CLAMP_EN,
+ on);
+ }
+}
+
int icnss_hw_reset_wlan_ss_power_down(struct icnss_priv *priv)
{
u32 rdata;
@@ -1039,7 +1320,7 @@ int icnss_hw_reset(struct icnss_priv *priv)
MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 1);
icnss_hw_poll_reg_field(priv->mem_base_va, SR_WCSSAON_SR_LSB_OFFSET,
- SR_WCSSAON_SR_LSB_RETENTION_STATUS, 1, 10,
+ SR_WCSSAON_SR_LSB_RETENTION_STATUS, 1, 100,
ICNSS_HW_REG_RETRY);
for (i = 0; i < ICNSS_HW_REG_RETRY; i++) {
@@ -1107,7 +1388,7 @@ static int icnss_hw_power_on(struct icnss_priv *priv)
int ret = 0;
unsigned long flags;
- icnss_pr_dbg("Power on: state: 0x%lx\n", priv->state);
+ icnss_pr_dbg("HW Power on: state: 0x%lx\n", priv->state);
spin_lock_irqsave(&priv->on_off_lock, flags);
if (test_bit(ICNSS_POWER_ON, &priv->state)) {
@@ -1127,6 +1408,8 @@ static int icnss_hw_power_on(struct icnss_priv *priv)
icnss_hw_top_level_release_reset(priv);
+ icnss_hw_io_reset(penv, 1);
+
return ret;
out:
clear_bit(ICNSS_POWER_ON, &priv->state);
@@ -1141,7 +1424,7 @@ static int icnss_hw_power_off(struct icnss_priv *priv)
if (test_bit(HW_ALWAYS_ON, &quirks))
return 0;
- icnss_pr_dbg("Power off: 0x%lx\n", priv->state);
+ icnss_pr_dbg("HW Power off: 0x%lx\n", priv->state);
spin_lock_irqsave(&priv->on_off_lock, flags);
if (!test_bit(ICNSS_POWER_ON, &priv->state)) {
@@ -1151,6 +1434,8 @@ static int icnss_hw_power_off(struct icnss_priv *priv)
clear_bit(ICNSS_POWER_ON, &priv->state);
spin_unlock_irqrestore(&priv->on_off_lock, flags);
+ icnss_hw_io_reset(penv, 0);
+
icnss_hw_reset(priv);
icnss_clk_deinit(priv);
@@ -1175,6 +1460,8 @@ int icnss_power_on(struct device *dev)
return -EINVAL;
}
+ icnss_pr_dbg("Power On: 0x%lx\n", priv->state);
+
return icnss_hw_power_on(priv);
}
EXPORT_SYMBOL(icnss_power_on);
@@ -1189,6 +1476,8 @@ int icnss_power_off(struct device *dev)
return -EINVAL;
}
+ icnss_pr_dbg("Power Off: 0x%lx\n", priv->state);
+
return icnss_hw_power_off(priv);
}
EXPORT_SYMBOL(icnss_power_off);
@@ -1219,11 +1508,11 @@ int icnss_map_msa_permissions(struct icnss_priv *priv, u32 index)
ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems,
dest_vmids, dest_perms, dest_nelems);
if (ret) {
- icnss_pr_err("region %u hyp_assign_phys failed IPA=%pa size=%u err=%d\n",
+ icnss_pr_err("Region %u hyp_assign_phys failed IPA=%pa size=%u err=%d\n",
index, &addr, size, ret);
goto out;
}
- icnss_pr_dbg("hypervisor map for region %u: source=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x\n",
+ icnss_pr_dbg("Hypervisor map for region %u: source=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x\n",
index, source_vmlist[0], dest_nelems,
dest_vmids[0], dest_vmids[1], dest_vmids[2]);
out:
@@ -1255,7 +1544,7 @@ int icnss_unmap_msa_permissions(struct icnss_priv *priv, u32 index)
ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems,
dest_vmids, dest_perms, dest_nelems);
if (ret) {
- icnss_pr_err("region %u hyp_assign_phys failed IPA=%pa size=%u err=%d\n",
+ icnss_pr_err("Region %u hyp_assign_phys failed IPA=%pa size=%u err=%d\n",
index, &addr, size, ret);
goto out;
}
@@ -1294,16 +1583,14 @@ static void icnss_remove_msa_permissions(struct icnss_priv *priv)
static int wlfw_msa_mem_info_send_sync_msg(void)
{
- int ret = 0;
+ int ret;
int i;
struct wlfw_msa_info_req_msg_v01 req;
struct wlfw_msa_info_resp_msg_v01 resp;
struct msg_desc req_desc, resp_desc;
- if (!penv || !penv->wlfw_clnt) {
- ret = -ENODEV;
- goto out;
- }
+ if (!penv || !penv->wlfw_clnt)
+ return -ENODEV;
icnss_pr_dbg("Sending MSA mem info, state: 0x%lx\n", penv->state);
@@ -1326,15 +1613,14 @@ static int wlfw_msa_mem_info_send_sync_msg(void)
ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS);
if (ret < 0) {
- icnss_pr_err("Send req failed %d\n", ret);
+ icnss_pr_err("Send MSA Mem info req failed %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI request failed %d %d\n",
+ icnss_pr_err("QMI MSA Mem info request failed %d %d\n",
resp.resp.result, resp.resp.error);
ret = resp.resp.result;
- penv->stats.msa_info_err++;
goto out;
}
@@ -1345,7 +1631,6 @@ static int wlfw_msa_mem_info_send_sync_msg(void)
icnss_pr_err("Invalid memory region length received: %d\n",
resp.mem_region_info_len);
ret = -EINVAL;
- penv->stats.msa_info_err++;
goto out;
}
@@ -1363,7 +1648,11 @@ static int wlfw_msa_mem_info_send_sync_msg(void)
penv->icnss_mem_region[i].secure_flag);
}
+ return 0;
+
out:
+ penv->stats.msa_info_err++;
+ ICNSS_ASSERT(false);
return ret;
}
@@ -1374,10 +1663,8 @@ static int wlfw_msa_ready_send_sync_msg(void)
struct wlfw_msa_ready_resp_msg_v01 resp;
struct msg_desc req_desc, resp_desc;
- if (!penv || !penv->wlfw_clnt) {
- ret = -ENODEV;
- goto out;
- }
+ if (!penv || !penv->wlfw_clnt)
+ return -ENODEV;
icnss_pr_dbg("Sending MSA ready request message, state: 0x%lx\n",
penv->state);
@@ -1397,20 +1684,23 @@ static int wlfw_msa_ready_send_sync_msg(void)
ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS);
if (ret < 0) {
- penv->stats.msa_ready_err++;
- icnss_pr_err("Send req failed %d\n", ret);
+ icnss_pr_err("Send MSA ready req failed %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI request failed %d %d\n",
+ icnss_pr_err("QMI MSA ready request failed %d %d\n",
resp.resp.result, resp.resp.error);
- penv->stats.msa_ready_err++;
ret = resp.resp.result;
goto out;
}
penv->stats.msa_ready_resp++;
+
+ return 0;
+
out:
+ penv->stats.msa_ready_err++;
+ ICNSS_ASSERT(false);
return ret;
}
@@ -1421,10 +1711,8 @@ static int wlfw_ind_register_send_sync_msg(void)
struct wlfw_ind_register_resp_msg_v01 resp;
struct msg_desc req_desc, resp_desc;
- if (!penv || !penv->wlfw_clnt) {
- ret = -ENODEV;
- goto out;
- }
+ if (!penv || !penv->wlfw_clnt)
+ return -ENODEV;
icnss_pr_dbg("Sending indication register message, state: 0x%lx\n",
penv->state);
@@ -1452,21 +1740,24 @@ static int wlfw_ind_register_send_sync_msg(void)
ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp),
WLFW_TIMEOUT_MS);
- penv->stats.ind_register_resp++;
if (ret < 0) {
- icnss_pr_err("Send req failed %d\n", ret);
- penv->stats.ind_register_err++;
+ icnss_pr_err("Send indication register req failed %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI request failed %d %d\n",
+ icnss_pr_err("QMI indication register request failed %d %d\n",
resp.resp.result, resp.resp.error);
ret = resp.resp.result;
- penv->stats.ind_register_err++;
goto out;
}
+ penv->stats.ind_register_resp++;
+
+ return 0;
+
out:
+ penv->stats.ind_register_err++;
+ ICNSS_ASSERT(false);
return ret;
}
@@ -1477,10 +1768,8 @@ static int wlfw_cap_send_sync_msg(void)
struct wlfw_cap_resp_msg_v01 resp;
struct msg_desc req_desc, resp_desc;
- if (!penv || !penv->wlfw_clnt) {
- ret = -ENODEV;
- goto out;
- }
+ if (!penv || !penv->wlfw_clnt)
+ return -ENODEV;
icnss_pr_dbg("Sending capability message, state: 0x%lx\n", penv->state);
@@ -1499,16 +1788,14 @@ static int wlfw_cap_send_sync_msg(void)
&resp_desc, &resp, sizeof(resp),
WLFW_TIMEOUT_MS);
if (ret < 0) {
- icnss_pr_err("Send req failed %d\n", ret);
- penv->stats.cap_err++;
+ icnss_pr_err("Send capability req failed %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI request failed %d %d\n",
+ icnss_pr_err("QMI capability request failed %d %d\n",
resp.resp.result, resp.resp.error);
ret = resp.resp.result;
- penv->stats.cap_err++;
goto out;
}
@@ -1530,7 +1817,12 @@ static int wlfw_cap_send_sync_msg(void)
penv->board_info.board_id, penv->soc_info.soc_id,
penv->fw_version_info.fw_version,
penv->fw_version_info.fw_build_timestamp);
+
+ return 0;
+
out:
+ penv->stats.cap_err++;
+ ICNSS_ASSERT(false);
return ret;
}
@@ -1541,10 +1833,15 @@ static int wlfw_wlan_mode_send_sync_msg(enum wlfw_driver_mode_enum_v01 mode)
struct wlfw_wlan_mode_resp_msg_v01 resp;
struct msg_desc req_desc, resp_desc;
- if (!penv || !penv->wlfw_clnt) {
- ret = -ENODEV;
- goto out;
- }
+ if (!penv || !penv->wlfw_clnt)
+ return -ENODEV;
+
+ /* During recovery do not send mode request for WLAN OFF as
+ * FW not able to process it.
+ */
+ if (test_bit(ICNSS_PD_RESTART, &penv->state) &&
+ mode == QMI_WLFW_OFF_V01)
+ return 0;
icnss_pr_dbg("Sending Mode request, state: 0x%lx, mode: %d\n",
penv->state, mode);
@@ -1569,20 +1866,24 @@ static int wlfw_wlan_mode_send_sync_msg(enum wlfw_driver_mode_enum_v01 mode)
&resp_desc, &resp, sizeof(resp),
WLFW_TIMEOUT_MS);
if (ret < 0) {
- icnss_pr_err("Send req failed %d\n", ret);
- penv->stats.mode_req_err++;
+ icnss_pr_err("Send mode req failed, mode: %d ret: %d\n",
+ mode, ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI request failed %d %d\n",
- resp.resp.result, resp.resp.error);
+ icnss_pr_err("QMI mode request failed mode: %d, %d %d\n",
+ mode, resp.resp.result, resp.resp.error);
ret = resp.resp.result;
- penv->stats.mode_req_err++;
goto out;
}
penv->stats.mode_resp++;
+
+ return 0;
+
out:
+ penv->stats.mode_req_err++;
+ ICNSS_ASSERT(false);
return ret;
}
@@ -1593,10 +1894,8 @@ static int wlfw_wlan_cfg_send_sync_msg(struct wlfw_wlan_cfg_req_msg_v01 *data)
struct wlfw_wlan_cfg_resp_msg_v01 resp;
struct msg_desc req_desc, resp_desc;
- if (!penv || !penv->wlfw_clnt) {
+ if (!penv || !penv->wlfw_clnt)
return -ENODEV;
- goto out;
- }
icnss_pr_dbg("Sending config request, state: 0x%lx\n", penv->state);
@@ -1618,20 +1917,23 @@ static int wlfw_wlan_cfg_send_sync_msg(struct wlfw_wlan_cfg_req_msg_v01 *data)
&resp_desc, &resp, sizeof(resp),
WLFW_TIMEOUT_MS);
if (ret < 0) {
- icnss_pr_err("Send req failed %d\n", ret);
- penv->stats.cfg_req_err++;
+ icnss_pr_err("Send config req failed %d\n", ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI request failed %d %d\n",
+ icnss_pr_err("QMI config request failed %d %d\n",
resp.resp.result, resp.resp.error);
ret = resp.resp.result;
- penv->stats.cfg_req_err++;
goto out;
}
penv->stats.cfg_resp++;
+
+ return 0;
+
out:
+ penv->stats.cfg_req_err++;
+ ICNSS_ASSERT(false);
return ret;
}
@@ -1642,10 +1944,8 @@ static int wlfw_ini_send_sync_msg(bool enable_fw_log)
struct wlfw_ini_resp_msg_v01 resp;
struct msg_desc req_desc, resp_desc;
- if (!penv || !penv->wlfw_clnt) {
- ret = -ENODEV;
- goto out;
- }
+ if (!penv || !penv->wlfw_clnt)
+ return -ENODEV;
icnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log: %d\n",
penv->state, enable_fw_log);
@@ -1669,20 +1969,145 @@ static int wlfw_ini_send_sync_msg(bool enable_fw_log)
ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS);
if (ret < 0) {
- icnss_pr_err("send req failed %d\n", ret);
- penv->stats.ini_req_err++;
+ icnss_pr_err("Send INI req failed fw_log: %d, ret: %d\n",
+ enable_fw_log, ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI request failed %d %d\n",
- resp.resp.result, resp.resp.error);
+ icnss_pr_err("QMI INI request failed fw_log: %d, %d %d\n",
+ enable_fw_log, resp.resp.result, resp.resp.error);
ret = resp.resp.result;
- penv->stats.ini_req_err++;
goto out;
}
penv->stats.ini_resp++;
+
+ return 0;
+
+out:
+ penv->stats.ini_req_err++;
+ ICNSS_ASSERT(false);
+ return ret;
+}
+
+static int wlfw_athdiag_read_send_sync_msg(struct icnss_priv *priv,
+ uint32_t offset, uint32_t mem_type,
+ uint32_t data_len, uint8_t *data)
+{
+ int ret;
+ struct wlfw_athdiag_read_req_msg_v01 req;
+ struct wlfw_athdiag_read_resp_msg_v01 *resp = NULL;
+ struct msg_desc req_desc, resp_desc;
+
+ if (!priv->wlfw_clnt) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ icnss_pr_dbg("Diag read: state 0x%lx, offset %x, mem_type %x, data_len %u\n",
+ priv->state, offset, mem_type, data_len);
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memset(&req, 0, sizeof(req));
+
+ req.offset = offset;
+ req.mem_type = mem_type;
+ req.data_len = data_len;
+
+ req_desc.max_msg_len = WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_ATHDIAG_READ_REQ_V01;
+ req_desc.ei_array = wlfw_athdiag_read_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_ATHDIAG_READ_RESP_V01;
+ resp_desc.ei_array = wlfw_athdiag_read_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req),
+ &resp_desc, resp, sizeof(*resp),
+ WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ icnss_pr_err("send athdiag read req failed %d\n", ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ icnss_pr_err("QMI athdiag read request failed %d %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = resp->resp.result;
+ goto out;
+ }
+
+ if (!resp->data_valid || resp->data_len <= data_len) {
+ icnss_pr_err("Athdiag read data is invalid, data_valid = %u, data_len = %u\n",
+ resp->data_valid, resp->data_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(data, resp->data, resp->data_len);
+
out:
+ kfree(resp);
+ return ret;
+}
+
+static int wlfw_athdiag_write_send_sync_msg(struct icnss_priv *priv,
+ uint32_t offset, uint32_t mem_type,
+ uint32_t data_len, uint8_t *data)
+{
+ int ret;
+ struct wlfw_athdiag_write_req_msg_v01 *req = NULL;
+ struct wlfw_athdiag_write_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+
+ if (!priv->wlfw_clnt) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ icnss_pr_dbg("Diag write: state 0x%lx, offset %x, mem_type %x, data_len %u, data %p\n",
+ priv->state, offset, mem_type, data_len, data);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memset(&resp, 0, sizeof(resp));
+
+ req->offset = offset;
+ req->mem_type = mem_type;
+ req->data_len = data_len;
+ memcpy(req->data, data, data_len);
+
+ req_desc.max_msg_len = WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_ATHDIAG_WRITE_REQ_V01;
+ req_desc.ei_array = wlfw_athdiag_write_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_ATHDIAG_WRITE_RESP_V01;
+ resp_desc.ei_array = wlfw_athdiag_write_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, req, sizeof(*req),
+ &resp_desc, &resp, sizeof(resp),
+ WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ icnss_pr_err("send athdiag write req failed %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ icnss_pr_err("QMI athdiag write request failed %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+out:
+ kfree(req);
return ret;
}
@@ -1766,12 +2191,11 @@ static int icnss_driver_event_server_arrive(void *data)
goto out;
}
- ret = qmi_connect_to_service(penv->wlfw_clnt,
- WLFW_SERVICE_ID_V01,
- WLFW_SERVICE_VERS_V01,
- WLFW_SERVICE_INS_ID_V01);
+ ret = qmi_connect_to_service(penv->wlfw_clnt, WLFW_SERVICE_ID_V01,
+ WLFW_SERVICE_VERS_V01,
+ WLFW_SERVICE_INS_ID_V01);
if (ret < 0) {
- icnss_pr_err("Server not found : %d\n", ret);
+ icnss_pr_err("QMI WLAN Service not found : %d\n", ret);
goto fail;
}
@@ -1792,10 +2216,8 @@ static int icnss_driver_event_server_arrive(void *data)
goto fail;
ret = wlfw_ind_register_send_sync_msg();
- if (ret < 0) {
- icnss_pr_err("Failed to send indication message: %d\n", ret);
+ if (ret < 0)
goto err_power_on;
- }
if (!penv->msa_va) {
icnss_pr_err("Invalid MSA address\n");
@@ -1804,27 +2226,23 @@ static int icnss_driver_event_server_arrive(void *data)
}
ret = wlfw_msa_mem_info_send_sync_msg();
- if (ret < 0) {
- icnss_pr_err("Failed to send MSA info: %d\n", ret);
+ if (ret < 0)
goto err_power_on;
- }
+
ret = icnss_setup_msa_permissions(penv);
- if (ret < 0) {
- icnss_pr_err("Failed to setup msa permissions: %d\n",
- ret);
+ if (ret < 0)
goto err_power_on;
- }
+
ret = wlfw_msa_ready_send_sync_msg();
- if (ret < 0) {
- icnss_pr_err("Failed to send MSA ready : %d\n", ret);
+ if (ret < 0)
goto err_setup_msa;
- }
ret = wlfw_cap_send_sync_msg();
- if (ret < 0) {
- icnss_pr_err("Failed to get capability: %d\n", ret);
+ if (ret < 0)
goto err_setup_msa;
- }
+
+ icnss_init_vph_monitor(penv);
+
return ret;
err_setup_msa:
@@ -1846,6 +2264,10 @@ static int icnss_driver_event_server_exit(void *data)
icnss_pr_info("QMI Service Disconnected: 0x%lx\n", penv->state);
+ if (penv->adc_tm_dev)
+ qpnp_adc_tm_disable_chan_meas(penv->adc_tm_dev,
+ &penv->vph_monitor_params);
+
qmi_handle_destroy(penv->wlfw_clnt);
clear_bit(ICNSS_WLFW_QMI_CONNECTED, &penv->state);
@@ -1886,6 +2308,9 @@ static int icnss_call_driver_reinit(struct icnss_priv *priv)
if (!priv->ops || !priv->ops->reinit)
goto out;
+ if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
+ goto out;
+
icnss_hw_power_on(priv);
ret = priv->ops->reinit(&priv->pdev->dev);
@@ -1916,6 +2341,8 @@ static int icnss_driver_event_fw_ready_ind(void *data)
if (!penv)
return -ENODEV;
+ __pm_stay_awake(&penv->ws);
+
set_bit(ICNSS_FW_READY, &penv->state);
icnss_pr_info("WLAN FW is ready: 0x%lx\n", penv->state);
@@ -1933,7 +2360,10 @@ static int icnss_driver_event_fw_ready_ind(void *data)
else
ret = icnss_call_driver_probe(penv);
+ __pm_relax(&penv->ws);
+
out:
+ __pm_relax(&penv->ws);
return ret;
}
@@ -1941,10 +2371,10 @@ static int icnss_driver_event_register_driver(void *data)
{
int ret = 0;
- if (penv->ops) {
- ret = -EEXIST;
- goto out;
- }
+ if (penv->ops)
+ return -EEXIST;
+
+ __pm_stay_awake(&penv->ws);
penv->ops = data;
@@ -1971,16 +2401,21 @@ static int icnss_driver_event_register_driver(void *data)
set_bit(ICNSS_DRIVER_PROBED, &penv->state);
+ __pm_relax(&penv->ws);
+
return 0;
power_off:
icnss_hw_power_off(penv);
out:
+ __pm_relax(&penv->ws);
return ret;
}
static int icnss_driver_event_unregister_driver(void *data)
{
+ __pm_stay_awake(&penv->ws);
+
if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state)) {
penv->ops = NULL;
goto out;
@@ -1996,6 +2431,7 @@ static int icnss_driver_event_unregister_driver(void *data)
icnss_hw_power_off(penv);
out:
+ __pm_relax(&penv->ws);
return 0;
}
@@ -2012,6 +2448,9 @@ static int icnss_qmi_pd_event_service_down(struct icnss_priv *priv, void *data)
if (!priv->ops || !priv->ops->shutdown)
goto out;
+ if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
+ goto out;
+
priv->ops->shutdown(&priv->pdev->dev);
out:
@@ -2071,11 +2510,15 @@ static void icnss_driver_event_work(struct work_struct *work)
penv->stats.events[event->type].processed++;
+ spin_lock_irqsave(&penv->event_lock, flags);
if (event->sync) {
event->ret = ret;
complete(&event->complete);
- } else
- kfree(event);
+ continue;
+ }
+ spin_unlock_irqrestore(&penv->event_lock, flags);
+
+ kfree(event);
spin_lock_irqsave(&penv->event_lock, flags);
}
@@ -2372,6 +2815,9 @@ int icnss_register_driver(struct icnss_driver_ops *ops)
ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_REGISTER_DRIVER,
true, ops);
+ if (ret == -ERESTARTSYS)
+ ret = 0;
+
out:
return ret;
}
@@ -2571,6 +3017,78 @@ int icnss_set_fw_debug_mode(bool enable_fw_log)
}
EXPORT_SYMBOL(icnss_set_fw_debug_mode);
+int icnss_athdiag_read(struct device *dev, uint32_t offset,
+ uint32_t mem_type, uint32_t data_len,
+ uint8_t *output)
+{
+ int ret = 0;
+ struct icnss_priv *priv = dev_get_drvdata(dev);
+
+ if (priv->magic != ICNSS_MAGIC) {
+ icnss_pr_err("Invalid drvdata for diag read: dev %p, data %p, magic 0x%x\n",
+ dev, priv, priv->magic);
+ return -EINVAL;
+ }
+
+ if (!output || data_len == 0
+ || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ icnss_pr_err("Invalid parameters for diag read: output %p, data_len %u\n",
+ output, data_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!test_bit(ICNSS_FW_READY, &priv->state) ||
+ !test_bit(ICNSS_POWER_ON, &priv->state)) {
+ icnss_pr_err("Invalid state for diag read: 0x%lx\n",
+ priv->state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = wlfw_athdiag_read_send_sync_msg(priv, offset, mem_type,
+ data_len, output);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(icnss_athdiag_read);
+
+int icnss_athdiag_write(struct device *dev, uint32_t offset,
+ uint32_t mem_type, uint32_t data_len,
+ uint8_t *input)
+{
+ int ret = 0;
+ struct icnss_priv *priv = dev_get_drvdata(dev);
+
+ if (priv->magic != ICNSS_MAGIC) {
+ icnss_pr_err("Invalid drvdata for diag write: dev %p, data %p, magic 0x%x\n",
+ dev, priv, priv->magic);
+ return -EINVAL;
+ }
+
+ if (!input || data_len == 0
+ || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ icnss_pr_err("Invalid parameters for diag write: input %p, data_len %u\n",
+ input, data_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!test_bit(ICNSS_FW_READY, &priv->state) ||
+ !test_bit(ICNSS_POWER_ON, &priv->state)) {
+ icnss_pr_err("Invalid state for diag write: 0x%lx\n",
+ priv->state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = wlfw_athdiag_write_send_sync_msg(priv, offset, mem_type,
+ data_len, input);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(icnss_athdiag_write);
+
int icnss_wlan_enable(struct icnss_wlan_enable_cfg *config,
enum icnss_driver_mode mode,
const char *host_version)
@@ -2633,14 +3151,10 @@ int icnss_wlan_enable(struct icnss_wlan_enable_cfg *config,
sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req.shadow_reg_len);
ret = wlfw_wlan_cfg_send_sync_msg(&req);
- if (ret) {
- icnss_pr_err("Failed to send cfg, ret = %d\n", ret);
+ if (ret)
goto out;
- }
skip:
ret = wlfw_wlan_mode_send_sync_msg(mode);
- if (ret)
- icnss_pr_err("Failed to send mode, ret = %d\n", ret);
out:
if (test_bit(SKIP_QMI, &quirks))
ret = 0;
@@ -2890,7 +3404,7 @@ static int icnss_get_vreg_info(struct device *dev,
reg = devm_regulator_get_optional(dev, vreg_info->name);
- if (IS_ERR(reg) == -EPROBE_DEFER) {
+ if (PTR_ERR(reg) == -EPROBE_DEFER) {
icnss_pr_err("EPROBE_DEFER for regulator: %s\n",
vreg_info->name);
ret = PTR_ERR(reg);
@@ -2910,7 +3424,6 @@ static int icnss_get_vreg_info(struct device *dev,
vreg_info->name, ret);
goto done;
}
-
}
vreg_info->reg = reg;
@@ -3201,6 +3714,12 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
case ICNSS_FW_TEST_MODE:
seq_puts(s, "FW TEST MODE");
continue;
+ case ICNSS_PM_SUSPEND:
+ seq_puts(s, "PM SUSPEND");
+ continue;
+ case ICNSS_PM_SUSPEND_NOIRQ:
+ seq_puts(s, "PM SUSPEND_NOIRQ");
+ continue;
case ICNSS_SSR_ENABLED:
seq_puts(s, "SSR ENABLED");
continue;
@@ -3300,6 +3819,19 @@ static int icnss_stats_show(struct seq_file *s, void *data)
ICNSS_STATS_DUMP(s, priv, ini_req);
ICNSS_STATS_DUMP(s, priv, ini_resp);
ICNSS_STATS_DUMP(s, priv, ini_req_err);
+ ICNSS_STATS_DUMP(s, priv, vbatt_req);
+ ICNSS_STATS_DUMP(s, priv, vbatt_resp);
+ ICNSS_STATS_DUMP(s, priv, vbatt_req_err);
+
+ seq_puts(s, "\n<------------------ PM stats ------------------->\n");
+ ICNSS_STATS_DUMP(s, priv, pm_suspend);
+ ICNSS_STATS_DUMP(s, priv, pm_suspend_err);
+ ICNSS_STATS_DUMP(s, priv, pm_resume);
+ ICNSS_STATS_DUMP(s, priv, pm_resume_err);
+ ICNSS_STATS_DUMP(s, priv, pm_suspend_noirq);
+ ICNSS_STATS_DUMP(s, priv, pm_suspend_noirq_err);
+ ICNSS_STATS_DUMP(s, priv, pm_resume_noirq);
+ ICNSS_STATS_DUMP(s, priv, pm_resume_noirq_err);
icnss_stats_show_irqs(s, priv);
@@ -3327,6 +3859,207 @@ static const struct file_operations icnss_stats_fops = {
.llseek = seq_lseek,
};
+static int icnss_regwrite_show(struct seq_file *s, void *data)
+{
+ struct icnss_priv *priv = s->private;
+
+ seq_puts(s, "\nUsage: echo <mem_type> <offset> <reg_val> > <debugfs>/icnss/reg_write\n");
+
+ if (!test_bit(ICNSS_FW_READY, &priv->state))
+ seq_puts(s, "Firmware is not ready yet!, wait for FW READY\n");
+
+ return 0;
+}
+
+static ssize_t icnss_regwrite_write(struct file *fp,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct icnss_priv *priv =
+ ((struct seq_file *)fp->private_data)->private;
+ char buf[64];
+ char *sptr, *token;
+ unsigned int len = 0;
+ uint32_t reg_offset, mem_type, reg_val;
+ const char *delim = " ";
+ int ret = 0;
+
+ if (!test_bit(ICNSS_FW_READY, &priv->state) ||
+ !test_bit(ICNSS_POWER_ON, &priv->state))
+ return -EINVAL;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (!sptr)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &mem_type))
+ return -EINVAL;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (!sptr)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &reg_offset))
+ return -EINVAL;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &reg_val))
+ return -EINVAL;
+
+ ret = wlfw_athdiag_write_send_sync_msg(priv, reg_offset, mem_type,
+ sizeof(uint32_t),
+ (uint8_t *)&reg_val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int icnss_regwrite_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, icnss_regwrite_show, inode->i_private);
+}
+
+static const struct file_operations icnss_regwrite_fops = {
+ .read = seq_read,
+ .write = icnss_regwrite_write,
+ .open = icnss_regwrite_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+static int icnss_regread_show(struct seq_file *s, void *data)
+{
+ struct icnss_priv *priv = s->private;
+
+ if (!priv->diag_reg_read_buf) {
+ seq_puts(s, "Usage: echo <mem_type> <offset> <data_len> > <debugfs>/icnss/reg_read\n");
+
+ if (!test_bit(ICNSS_FW_READY, &priv->state))
+ seq_puts(s, "Firmware is not ready yet!, wait for FW READY\n");
+
+ return 0;
+ }
+
+ seq_printf(s, "REGREAD: Addr 0x%x Type 0x%x Length 0x%x\n",
+ priv->diag_reg_read_addr, priv->diag_reg_read_mem_type,
+ priv->diag_reg_read_len);
+
+ seq_hex_dump(s, "", DUMP_PREFIX_OFFSET, 32, 4, priv->diag_reg_read_buf,
+ priv->diag_reg_read_len, false);
+
+ priv->diag_reg_read_len = 0;
+ kfree(priv->diag_reg_read_buf);
+ priv->diag_reg_read_buf = NULL;
+
+ return 0;
+}
+
+static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct icnss_priv *priv =
+ ((struct seq_file *)fp->private_data)->private;
+ char buf[64];
+ char *sptr, *token;
+ unsigned int len = 0;
+ uint32_t reg_offset, mem_type;
+ uint32_t data_len = 0;
+ uint8_t *reg_buf = NULL;
+ const char *delim = " ";
+ int ret = 0;
+
+ if (!test_bit(ICNSS_FW_READY, &priv->state) ||
+ !test_bit(ICNSS_POWER_ON, &priv->state))
+ return -EINVAL;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (!sptr)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &mem_type))
+ return -EINVAL;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (!sptr)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &reg_offset))
+ return -EINVAL;
+
+ token = strsep(&sptr, delim);
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, &data_len))
+ return -EINVAL;
+
+ if (data_len == 0 ||
+ data_len > QMI_WLFW_MAX_DATA_SIZE_V01)
+ return -EINVAL;
+
+ reg_buf = kzalloc(data_len, GFP_KERNEL);
+ if (!reg_buf)
+ return -ENOMEM;
+
+ ret = wlfw_athdiag_read_send_sync_msg(priv, reg_offset,
+ mem_type, data_len,
+ reg_buf);
+ if (ret) {
+ kfree(reg_buf);
+ return ret;
+ }
+
+ priv->diag_reg_read_addr = reg_offset;
+ priv->diag_reg_read_mem_type = mem_type;
+ priv->diag_reg_read_len = data_len;
+ priv->diag_reg_read_buf = reg_buf;
+
+ return count;
+}
+
+static int icnss_regread_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, icnss_regread_show, inode->i_private);
+}
+
+static const struct file_operations icnss_regread_fops = {
+ .read = seq_read,
+ .write = icnss_regread_write,
+ .open = icnss_regread_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
static int icnss_debugfs_create(struct icnss_priv *priv)
{
int ret = 0;
@@ -3347,6 +4080,10 @@ static int icnss_debugfs_create(struct icnss_priv *priv)
debugfs_create_file("stats", 0644, root_dentry, priv,
&icnss_stats_fops);
+ debugfs_create_file("reg_read", 0600, root_dentry, priv,
+ &icnss_regread_fops);
+ debugfs_create_file("reg_write", 0644, root_dentry, priv,
+ &icnss_regwrite_fops);
out:
return ret;
@@ -3357,6 +4094,44 @@ static void icnss_debugfs_destroy(struct icnss_priv *priv)
debugfs_remove_recursive(priv->root_dentry);
}
+static int icnss_get_vbatt_info(struct icnss_priv *priv)
+{
+ struct qpnp_adc_tm_chip *adc_tm_dev = NULL;
+ struct qpnp_vadc_chip *vadc_dev = NULL;
+ int ret = 0;
+
+ adc_tm_dev = qpnp_get_adc_tm(&priv->pdev->dev, "icnss");
+ if (PTR_ERR(adc_tm_dev) == -EPROBE_DEFER) {
+ icnss_pr_err("adc_tm_dev probe defer\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (IS_ERR(adc_tm_dev)) {
+ ret = PTR_ERR(adc_tm_dev);
+ icnss_pr_err("Not able to get ADC dev, VBATT monitoring is disabled: %d\n",
+ ret);
+ return ret;
+ }
+
+ vadc_dev = qpnp_get_vadc(&priv->pdev->dev, "icnss");
+ if (PTR_ERR(vadc_dev) == -EPROBE_DEFER) {
+ icnss_pr_err("vadc_dev probe defer\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (IS_ERR(vadc_dev)) {
+ ret = PTR_ERR(vadc_dev);
+ icnss_pr_err("Not able to get VADC dev, VBATT monitoring is disabled: %d\n",
+ ret);
+ return ret;
+ }
+
+ priv->adc_tm_dev = adc_tm_dev;
+ priv->vadc_dev = vadc_dev;
+
+ return 0;
+}
+
static int icnss_probe(struct platform_device *pdev)
{
int ret = 0;
@@ -3370,14 +4145,21 @@ static int icnss_probe(struct platform_device *pdev)
return -EEXIST;
}
+ icnss_pr_dbg("Platform driver probe\n");
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ priv->magic = ICNSS_MAGIC;
dev_set_drvdata(dev, priv);
priv->pdev = pdev;
+ ret = icnss_get_vbatt_info(priv);
+ if (ret == -EPROBE_DEFER)
+ goto out;
+
memcpy(priv->vreg_info, icnss_vreg_info, sizeof(icnss_vreg_info));
for (i = 0; i < ICNSS_VREG_INFO_SIZE; i++) {
ret = icnss_get_vreg_info(dev, &priv->vreg_info[i]);
@@ -3469,9 +4251,8 @@ static int icnss_probe(struct platform_device *pdev)
} else {
priv->smmu_iova_start = res->start;
priv->smmu_iova_len = resource_size(res);
- icnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: %zu\n",
- &priv->smmu_iova_start,
- priv->smmu_iova_len);
+ icnss_pr_dbg("SMMU IOVA start: %pa, len: %zu\n",
+ &priv->smmu_iova_start, priv->smmu_iova_len);
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM,
@@ -3481,7 +4262,7 @@ static int icnss_probe(struct platform_device *pdev)
} else {
priv->smmu_iova_ipa_start = res->start;
priv->smmu_iova_ipa_len = resource_size(res);
- icnss_pr_dbg("smmu_iova_ipa_start: %pa, smmu_iova_ipa_len: %zu\n",
+ icnss_pr_dbg("SMMU IOVA IPA start: %pa, len: %zu\n",
&priv->smmu_iova_ipa_start,
priv->smmu_iova_ipa_len);
}
@@ -3502,6 +4283,8 @@ static int icnss_probe(struct platform_device *pdev)
spin_lock_init(&priv->event_lock);
spin_lock_init(&priv->on_off_lock);
+ wakeup_source_init(&priv->ws, "icnss_ws");
+
priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
if (!priv->event_wq) {
icnss_pr_err("Workqueue creation failed\n");
@@ -3563,6 +4346,8 @@ static int icnss_remove(struct platform_device *pdev)
icnss_bw_deinit(penv);
+ wakeup_source_trash(&penv->ws);
+
icnss_hw_power_off(penv);
dev_set_drvdata(&pdev->dev, NULL);
@@ -3619,6 +4404,131 @@ out:
return ret;
}
+#ifdef CONFIG_PM_SLEEP
+static int icnss_pm_suspend(struct device *dev)
+{
+ struct icnss_priv *priv = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (priv->magic != ICNSS_MAGIC) {
+ icnss_pr_err("Invalid drvdata for pm suspend: dev %p, data %p, magic 0x%x\n",
+ dev, priv, priv->magic);
+ return -EINVAL;
+ }
+
+ icnss_pr_dbg("PM Suspend, state: 0x%lx\n", priv->state);
+
+ if (!priv->ops || !priv->ops->pm_suspend ||
+ !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+ goto out;
+
+ ret = priv->ops->pm_suspend(dev);
+
+out:
+ if (ret == 0) {
+ priv->stats.pm_suspend++;
+ set_bit(ICNSS_PM_SUSPEND, &priv->state);
+ } else {
+ priv->stats.pm_suspend_err++;
+ }
+ return ret;
+}
+
+static int icnss_pm_resume(struct device *dev)
+{
+ struct icnss_priv *priv = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (priv->magic != ICNSS_MAGIC) {
+ icnss_pr_err("Invalid drvdata for pm resume: dev %p, data %p, magic 0x%x\n",
+ dev, priv, priv->magic);
+ return -EINVAL;
+ }
+
+ icnss_pr_dbg("PM resume, state: 0x%lx\n", priv->state);
+
+ if (!priv->ops || !priv->ops->pm_resume ||
+ !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+ goto out;
+
+ ret = priv->ops->pm_resume(dev);
+
+out:
+ if (ret == 0) {
+ priv->stats.pm_resume++;
+ clear_bit(ICNSS_PM_SUSPEND, &priv->state);
+ } else {
+ priv->stats.pm_resume_err++;
+ }
+ return ret;
+}
+
+static int icnss_pm_suspend_noirq(struct device *dev)
+{
+ struct icnss_priv *priv = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (priv->magic != ICNSS_MAGIC) {
+ icnss_pr_err("Invalid drvdata for pm suspend_noirq: dev %p, data %p, magic 0x%x\n",
+ dev, priv, priv->magic);
+ return -EINVAL;
+ }
+
+ icnss_pr_dbg("PM suspend_noirq, state: 0x%lx\n", priv->state);
+
+ if (!priv->ops || !priv->ops->suspend_noirq ||
+ !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+ goto out;
+
+ ret = priv->ops->suspend_noirq(dev);
+
+out:
+ if (ret == 0) {
+ priv->stats.pm_suspend_noirq++;
+ set_bit(ICNSS_PM_SUSPEND_NOIRQ, &priv->state);
+ } else {
+ priv->stats.pm_suspend_noirq_err++;
+ }
+ return ret;
+}
+
+static int icnss_pm_resume_noirq(struct device *dev)
+{
+ struct icnss_priv *priv = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (priv->magic != ICNSS_MAGIC) {
+ icnss_pr_err("Invalid drvdata for pm resume_noirq: dev %p, data %p, magic 0x%x\n",
+ dev, priv, priv->magic);
+ return -EINVAL;
+ }
+
+ icnss_pr_dbg("PM resume_noirq, state: 0x%lx\n", priv->state);
+
+ if (!priv->ops || !priv->ops->resume_noirq ||
+ !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
+ goto out;
+
+ ret = priv->ops->resume_noirq(dev);
+
+out:
+ if (ret == 0) {
+ priv->stats.pm_resume_noirq++;
+ clear_bit(ICNSS_PM_SUSPEND_NOIRQ, &priv->state);
+ } else {
+ priv->stats.pm_resume_noirq_err++;
+ }
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops icnss_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(icnss_pm_suspend,
+ icnss_pm_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(icnss_pm_suspend_noirq,
+ icnss_pm_resume_noirq)
+};
+
static const struct of_device_id icnss_dt_match[] = {
{.compatible = "qcom,icnss"},
{}
@@ -3633,11 +4543,32 @@ static struct platform_driver icnss_driver = {
.resume = icnss_resume,
.driver = {
.name = "icnss",
+ .pm = &icnss_pm_ops,
.owner = THIS_MODULE,
.of_match_table = icnss_dt_match,
},
};
+#ifdef CONFIG_ICNSS_DEBUG
+static void __init icnss_ipc_log_long_context_init(void)
+{
+ icnss_ipc_log_long_context = ipc_log_context_create(NUM_REG_LOG_PAGES,
+ "icnss_long", 0);
+ if (!icnss_ipc_log_long_context)
+ icnss_pr_err("Unable to create register log context\n");
+}
+
+static void __exit icnss_ipc_log_long_context_destroy(void)
+{
+ ipc_log_context_destroy(icnss_ipc_log_long_context);
+ icnss_ipc_log_long_context = NULL;
+}
+#else
+
+static void __init icnss_ipc_log_long_context_init(void) { }
+static void __exit icnss_ipc_log_long_context_destroy(void) { }
+#endif
+
static int __init icnss_initialize(void)
{
icnss_ipc_log_context = ipc_log_context_create(NUM_LOG_PAGES,
@@ -3645,6 +4576,8 @@ static int __init icnss_initialize(void)
if (!icnss_ipc_log_context)
icnss_pr_err("Unable to create log context\n");
+ icnss_ipc_log_long_context_init();
+
return platform_driver_register(&icnss_driver);
}
@@ -3653,6 +4586,8 @@ static void __exit icnss_exit(void)
platform_driver_unregister(&icnss_driver);
ipc_log_context_destroy(icnss_ipc_log_context);
icnss_ipc_log_context = NULL;
+
+ icnss_ipc_log_long_context_destroy();
}
diff --git a/drivers/soc/qcom/icnss_utils.c b/drivers/soc/qcom/icnss_utils.c
new file mode 100644
index 000000000000..5e187d5df8b3
--- /dev/null
+++ b/drivers/soc/qcom/icnss_utils.c
@@ -0,0 +1,132 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define ICNSS_MAX_CH_NUM 45
+
+static DEFINE_MUTEX(unsafe_channel_list_lock);
+static DEFINE_MUTEX(dfs_nol_info_lock);
+
+static struct icnss_unsafe_channel_list {
+ u16 unsafe_ch_count;
+ u16 unsafe_ch_list[ICNSS_MAX_CH_NUM];
+} unsafe_channel_list;
+
+static struct icnss_dfs_nol_info {
+ void *dfs_nol_info;
+ u16 dfs_nol_info_len;
+} dfs_nol_info;
+
+int icnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count)
+{
+ mutex_lock(&unsafe_channel_list_lock);
+ if ((!unsafe_ch_list) || (ch_count > ICNSS_MAX_CH_NUM)) {
+ mutex_unlock(&unsafe_channel_list_lock);
+ return -EINVAL;
+ }
+
+ unsafe_channel_list.unsafe_ch_count = ch_count;
+
+ if (ch_count != 0) {
+ memcpy(
+ (char *)unsafe_channel_list.unsafe_ch_list,
+ (char *)unsafe_ch_list, ch_count * sizeof(u16));
+ }
+ mutex_unlock(&unsafe_channel_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(icnss_set_wlan_unsafe_channel);
+
+int icnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list,
+ u16 *ch_count, u16 buf_len)
+{
+ mutex_lock(&unsafe_channel_list_lock);
+ if (!unsafe_ch_list || !ch_count) {
+ mutex_unlock(&unsafe_channel_list_lock);
+ return -EINVAL;
+ }
+
+ if (buf_len < (unsafe_channel_list.unsafe_ch_count * sizeof(u16))) {
+ mutex_unlock(&unsafe_channel_list_lock);
+ return -ENOMEM;
+ }
+
+ *ch_count = unsafe_channel_list.unsafe_ch_count;
+ memcpy(
+ (char *)unsafe_ch_list,
+ (char *)unsafe_channel_list.unsafe_ch_list,
+ unsafe_channel_list.unsafe_ch_count * sizeof(u16));
+ mutex_unlock(&unsafe_channel_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(icnss_get_wlan_unsafe_channel);
+
+int icnss_wlan_set_dfs_nol(const void *info, u16 info_len)
+{
+ void *temp;
+ struct icnss_dfs_nol_info *dfs_info;
+
+ mutex_lock(&dfs_nol_info_lock);
+ if (!info || !info_len) {
+ mutex_unlock(&dfs_nol_info_lock);
+ return -EINVAL;
+ }
+
+ temp = kmalloc(info_len, GFP_KERNEL);
+ if (!temp) {
+ mutex_unlock(&dfs_nol_info_lock);
+ return -ENOMEM;
+ }
+
+ memcpy(temp, info, info_len);
+ dfs_info = &dfs_nol_info;
+ kfree(dfs_info->dfs_nol_info);
+
+ dfs_info->dfs_nol_info = temp;
+ dfs_info->dfs_nol_info_len = info_len;
+ mutex_unlock(&dfs_nol_info_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(icnss_wlan_set_dfs_nol);
+
+int icnss_wlan_get_dfs_nol(void *info, u16 info_len)
+{
+ int len;
+ struct icnss_dfs_nol_info *dfs_info;
+
+ mutex_lock(&dfs_nol_info_lock);
+ if (!info || !info_len) {
+ mutex_unlock(&dfs_nol_info_lock);
+ return -EINVAL;
+ }
+
+ dfs_info = &dfs_nol_info;
+
+ if (dfs_info->dfs_nol_info == NULL ||
+ dfs_info->dfs_nol_info_len == 0) {
+ mutex_unlock(&dfs_nol_info_lock);
+ return -ENOENT;
+ }
+
+ len = min(info_len, dfs_info->dfs_nol_info_len);
+
+ memcpy(info, dfs_info->dfs_nol_info, len);
+ mutex_unlock(&dfs_nol_info_lock);
+
+ return len;
+}
+EXPORT_SYMBOL(icnss_wlan_get_dfs_nol);
diff --git a/drivers/soc/qcom/irq-helper.c b/drivers/soc/qcom/irq-helper.c
index 7bb371f7991e..370801291230 100644
--- a/drivers/soc/qcom/irq-helper.c
+++ b/drivers/soc/qcom/irq-helper.c
@@ -78,10 +78,12 @@ IRQ_HELPER_ATTR(irq_blacklist_on, 0444, show_deploy, NULL);
static struct irq_helper *irq_h;
+/* Do not call this API in an atomic context */
int irq_blacklist_on(void)
{
bool flag = false;
+ might_sleep();
if (!irq_h) {
pr_err("%s: init function is not called", __func__);
return -EPERM;
@@ -103,10 +105,12 @@ int irq_blacklist_on(void)
}
EXPORT_SYMBOL(irq_blacklist_on);
+/* Do not call this API in an atomic context */
int irq_blacklist_off(void)
{
bool flag = false;
+ might_sleep();
if (!irq_h) {
pr_err("%s: init function is not called", __func__);
return -EPERM;
diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c
index f44f6f97ecdd..e1e91f56526d 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.c
+++ b/drivers/soc/qcom/memshare/msm_memshare.c
@@ -203,6 +203,7 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
int dest_vmids[1] = {VMID_HLOS};
int dest_perms[1] = {PERM_READ|PERM_WRITE};
+ mutex_lock(&memsh_drv->mem_share);
switch (code) {
case SUBSYS_BEFORE_SHUTDOWN:
@@ -264,6 +265,7 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
break;
}
+ mutex_unlock(&memsh_drv->mem_share);
return NOTIFY_DONE;
}
diff --git a/drivers/soc/qcom/remoteqdss.c b/drivers/soc/qcom/remoteqdss.c
index e66ca587adca..5e2a5babdcc8 100644
--- a/drivers/soc/qcom/remoteqdss.c
+++ b/drivers/soc/qcom/remoteqdss.c
@@ -28,8 +28,8 @@ static struct dentry *remoteqdss_dir;
#define REMOTEQDSS_ERR(fmt, ...) \
pr_debug("%s: " fmt, __func__, ## __VA_ARGS__)
-#define REMOTEQDSS_ERR_CALLER(fmt, ...) \
- pr_debug("%pf: " fmt, __builtin_return_address(1), ## __VA_ARGS__)
+#define REMOTEQDSS_ERR_CALLER(fmt, caller, ...) \
+ pr_debug("%pf: " fmt, caller, ## __VA_ARGS__)
struct qdss_msg_translation {
u64 val;
@@ -97,7 +97,7 @@ struct remoteqdss_query_swentity_fmt {
/* msgs is a null terminated array */
static void remoteqdss_err_translation(struct qdss_msg_translation *msgs,
- u64 err)
+ u64 err, const void *caller)
{
static DEFINE_RATELIMIT_STATE(rl, 5 * HZ, 2);
struct qdss_msg_translation *msg;
@@ -110,12 +110,13 @@ static void remoteqdss_err_translation(struct qdss_msg_translation *msgs,
for (msg = msgs; msg->msg; msg++) {
if (err == msg->val && __ratelimit(&rl)) {
- REMOTEQDSS_ERR_CALLER("0x%llx: %s\n", err, msg->msg);
+ REMOTEQDSS_ERR_CALLER("0x%llx: %s\n", caller, err,
+ msg->msg);
return;
}
}
- REMOTEQDSS_ERR_CALLER("Error 0x%llx\n", err);
+ REMOTEQDSS_ERR_CALLER("Error 0x%llx\n", caller, err);
}
/* Shared across all remoteqdss scm functions */
@@ -160,7 +161,7 @@ static void free_remoteqdss_data(struct remoteqdss_data *data)
}
static int remoteqdss_do_scm_call(struct scm_desc *desc,
- dma_addr_t addr, size_t size)
+ dma_addr_t addr, size_t size, const void *caller)
{
int ret;
@@ -175,7 +176,7 @@ static int remoteqdss_do_scm_call(struct scm_desc *desc,
if (ret)
return ret;
- remoteqdss_err_translation(remoteqdss_scm_msgs, desc->ret[0]);
+ remoteqdss_err_translation(remoteqdss_scm_msgs, desc->ret[0], caller);
ret = desc->ret[0] ? -EINVAL : 0;
return ret;
}
@@ -194,7 +195,8 @@ static int remoteqdss_scm_query_swtrace(void *priv, u64 *val)
fmt->subsys_id = data->id;
fmt->cmd_id = CMD_ID_QUERY_SWTRACE_STATE;
- ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt));
+ ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt),
+ __builtin_return_address(0));
*val = desc.ret[1];
dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
@@ -216,7 +218,8 @@ static int remoteqdss_scm_filter_swtrace(void *priv, u64 val)
fmt->h.cmd_id = CMD_ID_FILTER_SWTRACE_STATE;
fmt->state = (uint32_t)val;
- ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt));
+ ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt),
+ __builtin_return_address(0));
dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
return ret;
@@ -241,7 +244,8 @@ static int remoteqdss_scm_query_tag(void *priv, u64 *val)
fmt->subsys_id = data->id;
fmt->cmd_id = CMD_ID_QUERY_SWEVENT_TAG;
- ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt));
+ ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt),
+ __builtin_return_address(0));
*val = desc.ret[1];
dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
@@ -268,7 +272,8 @@ static int remoteqdss_scm_query_swevent(void *priv, u64 *val)
fmt->h.cmd_id = CMD_ID_QUERY_SWEVENT;
fmt->event_group = data->sw_event_group;
- ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt));
+ ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt),
+ __builtin_return_address(0));
*val = desc.ret[1];
dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
@@ -291,7 +296,8 @@ static int remoteqdss_scm_filter_swevent(void *priv, u64 val)
fmt->event_group = data->sw_event_group;
fmt->event_mask = (uint32_t)val;
- ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt));
+ ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt),
+ __builtin_return_address(0));
dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
return ret;
@@ -317,7 +323,8 @@ static int remoteqdss_scm_query_swentity(void *priv, u64 *val)
fmt->h.cmd_id = CMD_ID_QUERY_SWENTITY;
fmt->entity_group = data->sw_entity_group;
- ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt));
+ ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt),
+ __builtin_return_address(0));
*val = desc.ret[1];
dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
@@ -340,7 +347,8 @@ static int remoteqdss_scm_filter_swentity(void *priv, u64 val)
fmt->entity_group = data->sw_entity_group;
fmt->entity_mask = (uint32_t)val;
- ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt));
+ ret = remoteqdss_do_scm_call(&desc, addr, sizeof(*fmt),
+ __builtin_return_address(0));
dma_free_coherent(&dma_dev, sizeof(*fmt), fmt, addr);
return ret;
diff --git a/drivers/soc/qcom/rpm-smd.c b/drivers/soc/qcom/rpm-smd.c
index 5eaf2db32d21..03a1591e5b09 100644
--- a/drivers/soc/qcom/rpm-smd.c
+++ b/drivers/soc/qcom/rpm-smd.c
@@ -795,45 +795,23 @@ static int msm_rpm_read_sleep_ack(void)
{
int ret;
char buf[MAX_ERR_BUFFER_SIZE] = {0};
- uint32_t msg_id;
if (glink_enabled)
ret = msm_rpm_glink_rx_poll(glink_data->glink_handle);
else {
ret = msm_rpm_read_smd_data(buf);
- if (!ret) {
- /*
- * Mimic Glink behavior to ensure that the
- * data is read and the msg is removed from
- * the wait list. We should have gotten here
- * only when there are no drivers waiting on
- * ACKs. msm_rpm_get_entry_from_msg_id()
- * return non-NULL only then.
- */
- msg_id = msm_rpm_get_msg_id_from_ack(buf);
- msm_rpm_process_ack(msg_id, 0);
+ if (!ret)
ret = smd_is_pkt_avail(msm_rpm_data.ch_info);
- }
}
return ret;
}
-static void msm_rpm_flush_noack_messages(void)
-{
- while (!list_empty(&msm_rpm_wait_list)) {
- if (!msm_rpm_read_sleep_ack())
- break;
- }
-}
-
static int msm_rpm_flush_requests(bool print)
{
struct rb_node *t;
int ret;
int count = 0;
- msm_rpm_flush_noack_messages();
-
for (t = rb_first(&tr_root); t; t = rb_next(t)) {
struct slp_buf *s = rb_entry(t, struct slp_buf, node);
@@ -1102,18 +1080,14 @@ static void msm_rpm_notify(void *data, unsigned event)
bool msm_rpm_waiting_for_ack(void)
{
- bool ret = false;
+ bool ret;
unsigned long flags;
- struct msm_rpm_wait_data *elem = NULL;
spin_lock_irqsave(&msm_rpm_list_lock, flags);
- elem = list_first_entry_or_null(&msm_rpm_wait_list,
- struct msm_rpm_wait_data, list);
- if (elem)
- ret = !elem->delete_on_ack;
+ ret = list_empty(&msm_rpm_wait_list);
spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
- return ret;
+ return !ret;
}
static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 23e32214756a..ea4f557fcd70 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -538,6 +538,9 @@ static struct msm_soc_info cpu_of_id[] = {
/* falcon ID */
[317] = {MSM_CPU_FALCON, "MSMFALCON"},
+ /* triton ID */
+ [318] = {MSM_CPU_TRITON, "MSMTRITON"},
+
/* Uninitialized IDs are not known to run Linux.
MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
considered as unknown CPU. */
@@ -1207,6 +1210,10 @@ static void * __init setup_dummy_socinfo(void)
dummy_socinfo.id = 317;
strlcpy(dummy_socinfo.build_id, "msmfalcon - ",
sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_msmtriton()) {
+ dummy_socinfo.id = 318;
+ strlcpy(dummy_socinfo.build_id, "msmtriton - ",
+ sizeof(dummy_socinfo.build_id));
} else if (early_machine_is_apqcobalt()) {
dummy_socinfo.id = 319;
strlcpy(dummy_socinfo.build_id, "apqcobalt - ",
diff --git a/drivers/soc/qcom/spss_utils.c b/drivers/soc/qcom/spss_utils.c
new file mode 100644
index 000000000000..9f799dfd9003
--- /dev/null
+++ b/drivers/soc/qcom/spss_utils.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Secure-Processor-SubSystem (SPSS) utilities.
+ *
+ * This driver provides utilities for the Secure Processor (SP).
+ *
+ * The SP daemon needs to load different SPSS images based on:
+ *
+ * 1. Test/Production key used to sign the SPSS image (read fuse).
+ * 2. SPSS HW version (selected via Device Tree).
+ *
+ */
+
+#define pr_fmt(fmt) "spss_utils [%s]: " fmt, __func__
+
+#include <linux/kernel.h> /* min() */
+#include <linux/module.h> /* MODULE_LICENSE */
+#include <linux/device.h> /* class_create() */
+#include <linux/slab.h> /* kzalloc() */
+#include <linux/fs.h> /* file_operations */
+#include <linux/cdev.h> /* cdev_add() */
+#include <linux/errno.h> /* EINVAL, ETIMEDOUT */
+#include <linux/printk.h> /* pr_err() */
+#include <linux/bitops.h> /* BIT(x) */
+#include <linux/platform_device.h> /* platform_driver_register() */
+#include <linux/of.h> /* of_property_count_strings() */
+#include <linux/io.h> /* ioremap_nocache() */
+
+#include <soc/qcom/subsystem_restart.h>
+
+/* driver name */
+#define DEVICE_NAME "spss-utils"
+
+static bool is_test_fuse_set;
+static const char *test_firmware_name;
+static const char *prod_firmware_name;
+static const char *firmware_name;
+static struct device *spss_dev;
+
+/*==========================================================================*/
+/* Device Sysfs */
+/*==========================================================================*/
+
+static ssize_t firmware_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+
+ if (!dev || !attr || !buf) {
+ pr_err("invalid param.\n");
+ return -EINVAL;
+ }
+
+ if (firmware_name == NULL)
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", "unknown");
+ else
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", firmware_name);
+
+ return ret;
+}
+
+static ssize_t firmware_name_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ pr_err("set firmware name is not allowed.\n");
+
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(firmware_name, 0444,
+ firmware_name_show, firmware_name_store);
+
+static ssize_t test_fuse_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+
+ if (!dev || !attr || !buf) {
+ pr_err("invalid param.\n");
+ return -EINVAL;
+ }
+
+ if (is_test_fuse_set)
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", "test");
+ else
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", "prod");
+
+ return ret;
+}
+
+static ssize_t test_fuse_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ pr_err("set test fuse state is not allowed.\n");
+
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(test_fuse_state, 0444,
+ test_fuse_state_show, test_fuse_state_store);
+
+static int spss_create_sysfs(struct device *dev)
+{
+ int ret;
+
+ ret = device_create_file(dev, &dev_attr_firmware_name);
+ if (ret < 0) {
+ pr_err("failed to create sysfs file for firmware_name.\n");
+ return ret;
+ }
+
+ ret = device_create_file(dev, &dev_attr_test_fuse_state);
+ if (ret < 0) {
+ pr_err("failed to create sysfs file for test_fuse_state.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*==========================================================================*/
+/* Device Tree */
+/*==========================================================================*/
+
+/**
+ * spss_parse_dt() - Parse Device Tree info.
+ */
+static int spss_parse_dt(struct device_node *node)
+{
+ int ret;
+ u32 spss_fuse_addr = 0;
+ u32 spss_fuse_bit = 0;
+ u32 spss_fuse_mask = 0;
+ void __iomem *spss_fuse_reg = NULL;
+ u32 val = 0;
+
+ ret = of_property_read_string(node, "qcom,spss-test-firmware-name",
+ &test_firmware_name);
+ if (ret < 0) {
+ pr_err("can't get test fw name.\n");
+ return -EFAULT;
+ }
+
+ ret = of_property_read_string(node, "qcom,spss-prod-firmware-name",
+ &prod_firmware_name);
+ if (ret < 0) {
+ pr_err("can't get prod fw name.\n");
+ return -EFAULT;
+ }
+
+ ret = of_property_read_u32(node, "qcom,spss-fuse-addr",
+ &spss_fuse_addr);
+ if (ret < 0) {
+ pr_err("can't get fuse addr.\n");
+ return -EFAULT;
+ }
+
+ ret = of_property_read_u32(node, "qcom,spss-fuse-bit",
+ &spss_fuse_bit);
+ if (ret < 0) {
+ pr_err("can't get fuse bit.\n");
+ return -EFAULT;
+ }
+
+ spss_fuse_mask = BIT(spss_fuse_bit);
+
+ pr_debug("spss_fuse_addr [0x%x] , spss_fuse_bit [%d] .\n",
+ (int) spss_fuse_addr, (int) spss_fuse_bit);
+
+ spss_fuse_reg = ioremap_nocache(spss_fuse_addr, sizeof(u32));
+
+ if (!spss_fuse_reg) {
+ pr_err("can't map fuse addr.\n");
+ return -EFAULT;
+ }
+
+ val = readl_relaxed(spss_fuse_reg);
+
+ pr_debug("spss fuse register value [0x%x].\n", (int) val);
+
+ if (val & spss_fuse_mask)
+ is_test_fuse_set = true;
+
+ iounmap(spss_fuse_reg);
+
+ return 0;
+}
+
+/**
+ * spss_probe() - initialization sequence
+ */
+static int spss_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct device_node *np = NULL;
+ struct device *dev = NULL;
+
+ if (!pdev) {
+ pr_err("invalid pdev.\n");
+ return -ENODEV;
+ }
+
+ np = pdev->dev.of_node;
+ if (!np) {
+ pr_err("invalid DT node.\n");
+ return -EINVAL;
+ }
+
+ dev = &pdev->dev;
+ spss_dev = dev;
+
+ if (dev == NULL) {
+ pr_err("invalid dev.\n");
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ ret = spss_parse_dt(np);
+ if (ret < 0) {
+ pr_err("fail to parse device tree.\n");
+ return -EFAULT;
+ }
+
+ if (is_test_fuse_set)
+ firmware_name = test_firmware_name;
+ else
+ firmware_name = prod_firmware_name;
+
+ ret = subsystem_set_fwname("spss", firmware_name);
+ if (ret < 0) {
+ pr_err("fail to set fw name.\n");
+ return -EFAULT;
+ }
+
+ spss_create_sysfs(dev);
+
+ pr_info("Initialization completed ok, firmware_name [%s].\n",
+ firmware_name);
+
+ return 0;
+}
+
+static const struct of_device_id spss_match_table[] = {
+ { .compatible = "qcom,spss-utils", },
+ { },
+};
+
+static struct platform_driver spss_driver = {
+ .probe = spss_probe,
+ .driver = {
+ .name = DEVICE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(spss_match_table),
+ },
+};
+
+/*==========================================================================*/
+/* Driver Init/Exit */
+/*==========================================================================*/
+static int __init spss_init(void)
+{
+ int ret = 0;
+
+ pr_info("spss-utils driver Ver 1.0 12-Sep-2016.\n");
+
+ ret = platform_driver_register(&spss_driver);
+ if (ret)
+ pr_err("register platform driver failed, ret [%d]\n", ret);
+
+ return 0;
+}
+late_initcall(spss_init); /* start after PIL driver */
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Secure Processor Utilities");
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index 0ed8a6533e00..76d941ceb77e 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -746,6 +746,28 @@ static void subsys_stop(struct subsys_device *subsys)
notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_SHUTDOWN, NULL);
}
+int subsystem_set_fwname(const char *name, const char *fw_name)
+{
+ struct subsys_device *subsys;
+
+ if (!name)
+ return -EINVAL;
+
+ if (!fw_name)
+ return -EINVAL;
+
+ subsys = find_subsys(name);
+ if (!subsys)
+ return -EINVAL;
+
+ pr_debug("Changing subsys [%s] fw_name to [%s]\n", name, fw_name);
+ strlcpy(subsys->desc->fw_name, fw_name,
+ sizeof(subsys->desc->fw_name));
+
+ return 0;
+}
+EXPORT_SYMBOL(subsystem_set_fwname);
+
void *__subsystem_get(const char *name, const char *fw_name)
{
struct subsys_device *subsys;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 670eda466438..9fdb06e08d4b 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -197,13 +198,13 @@ struct msm_port {
static
void msm_write(struct uart_port *port, unsigned int val, unsigned int off)
{
- writel_relaxed(val, port->membase + off);
+ writel_relaxed_no_log(val, port->membase + off);
}
static
unsigned int msm_read(struct uart_port *port, unsigned int off)
{
- return readl_relaxed(port->membase + off);
+ return readl_relaxed_no_log(port->membase + off);
}
/*
@@ -403,8 +404,12 @@ static void msm_stop_tx(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
+ if (pm_runtime_get_sync(port->dev) < 0)
+ return;
msm_port->imr &= ~UART_IMR_TXLEV;
msm_write(port, msm_port->imr, UART_IMR);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
}
static void msm_start_tx(struct uart_port *port)
@@ -416,8 +421,12 @@ static void msm_start_tx(struct uart_port *port)
if (dma->count)
return;
+ if (pm_runtime_get_sync(port->dev) < 0)
+ return;
msm_port->imr |= UART_IMR_TXLEV;
msm_write(port, msm_port->imr, UART_IMR);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
}
static void msm_reset_dm_count(struct uart_port *port, int count)
@@ -439,6 +448,8 @@ static void msm_complete_tx_dma(void *args)
unsigned int count;
u32 val;
+ if (pm_runtime_get_sync(port->dev) < 0)
+ return;
spin_lock_irqsave(&port->lock, flags);
/* Already stopped */
@@ -475,6 +486,8 @@ static void msm_complete_tx_dma(void *args)
msm_handle_tx(port);
done:
spin_unlock_irqrestore(&port->lock, flags);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
}
static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
@@ -547,6 +560,8 @@ static void msm_complete_rx_dma(void *args)
unsigned long flags;
u32 val;
+ if (pm_runtime_get_sync(port->dev) < 0)
+ return;
spin_lock_irqsave(&port->lock, flags);
/* Already stopped */
@@ -598,6 +613,8 @@ done:
if (count)
tty_flip_buffer_push(tport);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
}
static void msm_start_rx_dma(struct msm_port *msm_port)
@@ -672,19 +689,28 @@ static void msm_stop_rx(struct uart_port *port)
struct msm_port *msm_port = UART_TO_MSM(port);
struct msm_dma *dma = &msm_port->rx_dma;
+ if (pm_runtime_get_sync(port->dev) < 0)
+ return;
msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
msm_write(port, msm_port->imr, UART_IMR);
if (dma->chan)
msm_stop_dma(port, dma);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
}
static void msm_enable_ms(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
+ if (pm_runtime_get_sync(port->dev) < 0)
+ return;
+
msm_port->imr |= UART_IMR_DELTA_CTS;
msm_write(port, msm_port->imr, UART_IMR);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
}
static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
@@ -931,6 +957,8 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
unsigned int misr;
u32 val;
+ if (pm_runtime_get_sync(port->dev) < 0)
+ return IRQ_NONE;
spin_lock_irqsave(&port->lock, flags);
misr = msm_read(port, UART_MISR);
msm_write(port, 0, UART_IMR); /* disable interrupt */
@@ -964,13 +992,25 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
spin_unlock_irqrestore(&port->lock, flags);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
return IRQ_HANDLED;
}
static unsigned int msm_tx_empty(struct uart_port *port)
{
- return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
+ int ret;
+
+ ret = pm_runtime_get_sync(port->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = msm_read(port, UART_SR) & UART_SR_TX_EMPTY ? TIOCSER_TEMT : 0;
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+
+ return ret;
}
static unsigned int msm_get_mctrl(struct uart_port *port)
@@ -999,6 +1039,8 @@ static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int mr;
+ if (pm_runtime_get_sync(port->dev) < 0)
+ return;
mr = msm_read(port, UART_MR1);
if (!(mctrl & TIOCM_RTS)) {
@@ -1009,14 +1051,20 @@ static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
mr |= UART_MR1_RX_RDY_CTL;
msm_write(port, mr, UART_MR1);
}
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
}
static void msm_break_ctl(struct uart_port *port, int break_ctl)
{
+ if (pm_runtime_get_sync(port->dev) < 0)
+ return;
if (break_ctl)
msm_write(port, UART_CR_CMD_START_BREAK, UART_CR);
else
msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
}
struct msm_baud_map {
@@ -1157,15 +1205,6 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
return baud;
}
-static void msm_init_clock(struct uart_port *port)
-{
- struct msm_port *msm_port = UART_TO_MSM(port);
-
- clk_prepare_enable(msm_port->clk);
- clk_prepare_enable(msm_port->pclk);
- msm_serial_set_mnd_regs(port);
-}
-
static int msm_startup(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
@@ -1180,7 +1219,22 @@ static int msm_startup(struct uart_port *port)
if (unlikely(ret))
return ret;
- msm_init_clock(port);
+ /*
+ * UART clk must be kept enabled to
+ * avoid losing received character
+ */
+ ret = clk_prepare_enable(msm_port->clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare(msm_port->pclk);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(port->dev);
+ if (ret < 0)
+ goto err;
+
+ msm_serial_set_mnd_regs(port);
if (likely(port->fifosize > 12))
rfr_level = port->fifosize - 12;
@@ -1206,19 +1260,34 @@ static int msm_startup(struct uart_port *port)
msm_request_rx_dma(msm_port, msm_port->uart.mapbase);
}
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+
return 0;
+
+err:
+ clk_unprepare(msm_port->pclk);
+ clk_disable_unprepare(msm_port->clk);
+ free_irq(port->irq, port);
+ return ret;
}
static void msm_shutdown(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
+ if (pm_runtime_get_sync(port->dev) < 0)
+ return;
+
msm_port->imr = 0;
msm_write(port, 0, UART_IMR); /* disable interrupts */
if (msm_port->is_uartdm)
msm_release_dma(msm_port);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+ clk_unprepare(msm_port->pclk);
clk_disable_unprepare(msm_port->clk);
free_irq(port->irq, port);
@@ -1232,6 +1301,8 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned long flags;
unsigned int baud, mr;
+ if (pm_runtime_get_sync(port->dev) < 0)
+ return;
spin_lock_irqsave(&port->lock, flags);
if (dma->chan) /* Terminate if any */
@@ -1305,6 +1376,8 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
msm_start_rx_dma(msm_port);
spin_unlock_irqrestore(&port->lock, flags);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
}
static const char *msm_type(struct uart_port *port)
@@ -1385,12 +1458,26 @@ static void msm_power(struct uart_port *port, unsigned int state,
switch (state) {
case 0:
- clk_prepare_enable(msm_port->clk);
- clk_prepare_enable(msm_port->pclk);
+ /*
+ * UART clk must be kept enabled to
+ * avoid losing received character
+ */
+ if (clk_prepare_enable(msm_port->clk))
+ return;
+ if (clk_prepare(msm_port->pclk)) {
+ clk_disable_unprepare(msm_port->clk);
+ return;
+ }
+ if (pm_runtime_get_sync(port->dev) < 0) {
+ clk_unprepare(msm_port->pclk);
+ clk_disable_unprepare(msm_port->clk);
+ return;
+ }
break;
case 3:
+ pm_runtime_put(port->dev);
+ clk_unprepare(msm_port->pclk);
clk_disable_unprepare(msm_port->clk);
- clk_disable_unprepare(msm_port->pclk);
break;
default:
pr_err("msm_serial: Unknown PM state %d\n", state);
@@ -1632,7 +1719,11 @@ static void msm_console_write(struct console *co, const char *s,
port = msm_get_port_from_line(co->index);
msm_port = UART_TO_MSM(port);
+ if (pm_runtime_get_sync(port->dev) < 0)
+ return;
__msm_console_write(port, s, count, msm_port->is_uartdm);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
}
static int __init msm_console_setup(struct console *co, char *options)
@@ -1651,7 +1742,7 @@ static int __init msm_console_setup(struct console *co, char *options)
if (unlikely(!port->membase))
return -ENXIO;
- msm_init_clock(port);
+ msm_serial_set_mnd_regs(port);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -1794,6 +1885,12 @@ static int msm_serial_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, port);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
+ pm_runtime_irq_safe(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
return uart_add_one_port(&msm_uart_driver, port);
}
@@ -1802,6 +1899,7 @@ static int msm_serial_remove(struct platform_device *pdev)
struct uart_port *port = platform_get_drvdata(pdev);
uart_remove_one_port(&msm_uart_driver, port);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -1812,12 +1910,67 @@ static const struct of_device_id msm_match_table[] = {
{}
};
+#ifdef CONFIG_PM
+static int msm_serial_runtime_suspend(struct device *dev)
+{
+ struct uart_port *port = dev_get_drvdata(dev);
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ if (msm_port->is_uartdm)
+ clk_disable(msm_port->pclk);
+
+ return 0;
+}
+
+static int msm_serial_runtime_resume(struct device *dev)
+{
+ struct uart_port *port = dev_get_drvdata(dev);
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ int ret;
+
+ if (msm_port->is_uartdm) {
+ ret = clk_enable(msm_port->pclk);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int msm_serial_suspend(struct device *dev)
+{
+ struct uart_port *port = dev_get_drvdata(dev);
+
+ uart_suspend_port(&msm_uart_driver, port);
+
+ return 0;
+}
+
+static int msm_serial_resume(struct device *dev)
+{
+ struct uart_port *port = dev_get_drvdata(dev);
+
+ uart_resume_port(&msm_uart_driver, port);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops msm_serial_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(msm_serial_suspend, msm_serial_resume)
+ SET_RUNTIME_PM_OPS(msm_serial_runtime_suspend,
+ msm_serial_runtime_resume, NULL)
+};
+
static struct platform_driver msm_platform_driver = {
.remove = msm_serial_remove,
.probe = msm_serial_probe,
.driver = {
.name = "msm_serial",
.of_match_table = msm_match_table,
+ .pm = &msm_serial_pm_ops,
},
};
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 38ae877c46e3..3ffb01ff6549 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1203,10 +1203,11 @@ static int proc_getdriver(struct usb_dev_state *ps, void __user *arg)
static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg)
{
- struct usbdevfs_connectinfo ci = {
- .devnum = ps->dev->devnum,
- .slow = ps->dev->speed == USB_SPEED_LOW
- };
+ struct usbdevfs_connectinfo ci;
+
+ memset(&ci, 0, sizeof(ci));
+ ci.devnum = ps->dev->devnum;
+ ci.slow = ps->dev->speed == USB_SPEED_LOW;
if (copy_to_user(arg, &ci, sizeof(ci)))
return -EFAULT;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 2c26ae1e3eb7..1fb5ce9caf98 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1006,6 +1006,7 @@ struct dwc3 {
/* IRQ timing statistics */
int irq;
unsigned long irq_cnt;
+ unsigned long ep_cmd_timeout_cnt;
unsigned bh_completion_time[MAX_INTR_STATS];
unsigned bh_handled_evt_cnt[MAX_INTR_STATS];
unsigned bh_dbg_index;
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 37a3c954a1dd..83a265f0211e 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -45,6 +45,7 @@
#include <linux/msm-bus.h>
#include <linux/irq.h>
#include <linux/extcon.h>
+#include <linux/reset.h>
#include "power.h"
#include "core.h"
@@ -159,6 +160,7 @@ struct dwc3_msm {
struct clk *utmi_clk_src;
struct clk *bus_aggr_clk;
struct clk *cfg_ahb_clk;
+ struct reset_control *core_reset;
struct regulator *dwc3_gdsc;
struct usb_phy *hs_phy, *ss_phy;
@@ -1517,19 +1519,19 @@ static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
clk_disable_unprepare(mdwc->sleep_clk);
clk_disable_unprepare(mdwc->core_clk);
clk_disable_unprepare(mdwc->iface_clk);
- ret = clk_reset(mdwc->core_clk, CLK_RESET_ASSERT);
+ ret = reset_control_assert(mdwc->core_reset);
if (ret)
- dev_err(mdwc->dev, "dwc3 core_clk assert failed\n");
+ dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
} else {
dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
- ret = clk_reset(mdwc->core_clk, CLK_RESET_DEASSERT);
+ ret = reset_control_deassert(mdwc->core_reset);
+ if (ret)
+ dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
ndelay(200);
clk_prepare_enable(mdwc->iface_clk);
clk_prepare_enable(mdwc->core_clk);
clk_prepare_enable(mdwc->sleep_clk);
clk_prepare_enable(mdwc->utmi_clk);
- if (ret)
- dev_err(mdwc->dev, "dwc3 core_clk deassert failed\n");
enable_irq(mdwc->pwr_event_irq);
}
@@ -1694,7 +1696,7 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned event)
break;
case DWC3_CONTROLLER_RESTART_USB_SESSION:
dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
- dwc3_restart_usb_work(&mdwc->restart_usb_work);
+ schedule_work(&mdwc->restart_usb_work);
break;
default:
dev_dbg(mdwc->dev, "unknown dwc3 event\n");
@@ -2041,11 +2043,16 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
dwc3_msm_config_gdsc(mdwc, 1);
-
- clk_reset(mdwc->core_clk, CLK_RESET_ASSERT);
+ ret = reset_control_assert(mdwc->core_reset);
+ if (ret)
+ dev_err(mdwc->dev, "%s:core_reset assert failed\n",
+ __func__);
/* HW requires a short delay for reset to take place properly */
usleep_range(1000, 1200);
- clk_reset(mdwc->core_clk, CLK_RESET_DEASSERT);
+ ret = reset_control_deassert(mdwc->core_reset);
+ if (ret)
+ dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
+ __func__);
clk_prepare_enable(mdwc->sleep_clk);
}
@@ -2366,6 +2373,17 @@ static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk, LONG_MAX);
}
+ mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
+ if (IS_ERR(mdwc->core_reset)) {
+ dev_err(mdwc->dev, "failed to get core_reset\n");
+ return PTR_ERR(mdwc->core_reset);
+ }
+
+ /*
+ * Get Max supported clk frequency for USB Core CLK and request
+ * to set the same.
+ */
+ mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk, LONG_MAX);
if (IS_ERR_VALUE(mdwc->core_clk_rate)) {
dev_err(mdwc->dev, "fail to get core clk max freq.\n");
} else {
@@ -3191,7 +3209,7 @@ static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA)
{
- union power_supply_propval pval = {1000 * mA};
+ union power_supply_propval pval = {0};
int ret;
if (mdwc->charging_disabled)
@@ -3208,9 +3226,14 @@ static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA)
}
}
+ power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_TYPE, &pval);
+ if (pval.intval != POWER_SUPPLY_TYPE_USB)
+ return 0;
+
dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
/* Set max current limit in uA */
+ pval.intval = 1000 * mA;
ret = power_supply_set_property(mdwc->usb_psy,
POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
if (ret) {
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index acaa99615d33..d90df256796c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -340,7 +340,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
{
struct dwc3_ep *dep = dwc->eps[ep];
- u32 timeout = 1500;
+ u32 timeout = 3000;
u32 reg;
trace_dwc3_gadget_ep_cmd(dep, cmd, params);
@@ -380,6 +380,11 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
"Command Timed Out");
dev_err(dwc->dev, "%s command timeout for %s\n",
dwc3_gadget_ep_cmd_string(cmd), dep->name);
+ if (!(cmd & DWC3_DEPCMD_ENDTRANSFER)) {
+ dwc->ep_cmd_timeout_cnt++;
+ dwc3_notify_event(dwc,
+ DWC3_CONTROLLER_RESTART_USB_SESSION);
+ }
return -ETIMEDOUT;
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 3d17fd93c787..a99405261306 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -27,6 +27,14 @@
#define SSUSB_GADGET_VBUS_DRAW_UNITS 8
#define HSUSB_GADGET_VBUS_DRAW_UNITS 2
+/*
+ * Based on enumerated USB speed, draw power with set_config and resume
+ * HSUSB: 500mA, SSUSB: 900mA
+ */
+#define USB_VBUS_DRAW(speed)\
+ (speed == USB_SPEED_SUPER ?\
+ SSUSB_GADGET_VBUS_DRAW : CONFIG_USB_GADGET_VBUS_DRAW)
+
static bool enable_l1_for_hs;
module_param(enable_l1_for_hs, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(enable_l1_for_hs, "Enable support for L1 LPM for HS devices");
@@ -749,7 +757,6 @@ static int set_config(struct usb_composite_dev *cdev,
struct usb_gadget *gadget = cdev->gadget;
struct usb_configuration *c = NULL;
int result = -EINVAL;
- unsigned power = gadget_is_otg(gadget) ? 8 : 100;
int tmp;
/*
@@ -863,14 +870,8 @@ static int set_config(struct usb_composite_dev *cdev,
}
}
- /* Allow 900mA to draw with Super-Speed */
- if (gadget->speed == USB_SPEED_SUPER)
- power = SSUSB_GADGET_VBUS_DRAW;
- else
- power = CONFIG_USB_GADGET_VBUS_DRAW;
-
done:
- usb_gadget_vbus_draw(gadget, power);
+ usb_gadget_vbus_draw(gadget, USB_VBUS_DRAW(gadget->speed));
if (result >= 0 && cdev->delayed_status)
result = USB_GADGET_DELAYED_STATUS;
return result;
@@ -2319,7 +2320,6 @@ void composite_resume(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_function *f;
- u16 maxpower;
int ret;
unsigned long flags;
@@ -2352,10 +2352,7 @@ void composite_resume(struct usb_gadget *gadget)
f->resume(f);
}
- maxpower = cdev->config->MaxPower;
-
- usb_gadget_vbus_draw(gadget, maxpower ?
- maxpower : CONFIG_USB_GADGET_VBUS_DRAW);
+ usb_gadget_vbus_draw(gadget, USB_VBUS_DRAW(gadget->speed));
}
spin_unlock_irqrestore(&cdev->lock, flags);
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index 908cfb33bf99..2f6f393ba7c9 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -448,12 +448,19 @@ static void acc_hid_close(struct hid_device *hid)
{
}
+static int acc_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
+ __u8 *buf, size_t len, unsigned char rtype, int reqtype)
+{
+ return 0;
+}
+
static struct hid_ll_driver acc_hid_ll_driver = {
.parse = acc_hid_parse,
.start = acc_hid_start,
.stop = acc_hid_stop,
.open = acc_hid_open,
.close = acc_hid_close,
+ .raw_request = acc_hid_raw_request,
};
static struct acc_hid_dev *acc_hid_new(struct acc_dev *dev,
@@ -609,8 +616,7 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
{
struct acc_dev *dev = fp->private_data;
struct usb_request *req;
- ssize_t r = count;
- unsigned xfer;
+ ssize_t r = count, xfer, len;
int ret = 0;
pr_debug("acc_read(%zu)\n", count);
@@ -623,6 +629,8 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
if (count > BULK_BUFFER_SIZE)
count = BULK_BUFFER_SIZE;
+ len = ALIGN(count, dev->ep_out->maxpacket);
+
/* we will block until we're online */
pr_debug("acc_read: waiting for online\n");
ret = wait_event_interruptible(dev->read_wq, dev->online);
@@ -640,7 +648,7 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
requeue_req:
/* queue a request */
req = dev->rx_req[0];
- req->length = count;
+ req->length = len;
dev->rx_done = 0;
ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
if (ret < 0) {
@@ -936,6 +944,8 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev,
memset(dev->serial, 0, sizeof(dev->serial));
dev->start_requested = 0;
dev->audio_mode = 0;
+ strlcpy(dev->manufacturer, "Android", ACC_STRING_SIZE);
+ strlcpy(dev->model, "Android", ACC_STRING_SIZE);
}
}
@@ -1239,13 +1249,13 @@ static int acc_setup(void)
INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
INIT_WORK(&dev->hid_work, acc_hid_work);
- /* _acc_dev must be set before calling usb_gadget_register_driver */
- _acc_dev = dev;
-
ret = misc_register(&acc_device);
if (ret)
goto err;
+ /* _acc_dev must be set before calling usb_gadget_register_driver */
+ _acc_dev = dev;
+
return 0;
err:
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index 39645be93502..bcd817439dbf 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -583,6 +583,11 @@ static void audio_disable(struct usb_function *f)
usb_ep_disable(audio->in_ep);
}
+static void audio_free_func(struct usb_function *f)
+{
+ /* no-op */
+}
+
/*-------------------------------------------------------------------------*/
static void audio_build_desc(struct audio_dev *audio)
@@ -827,6 +832,7 @@ static struct audio_dev _audio_dev = {
.set_alt = audio_set_alt,
.setup = audio_setup,
.disable = audio_disable,
+ .free_func = audio_free_func,
},
.lock = __SPIN_LOCK_UNLOCKED(_audio_dev.lock),
.idle_reqs = LIST_HEAD_INIT(_audio_dev.idle_reqs),
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index e32348d17b26..92738ae2704d 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -36,6 +36,7 @@ MODULE_PARM_DESC(num_out_bufs,
static struct workqueue_struct *ipa_usb_wq;
+static void gsi_rndis_ipa_reset_trigger(struct f_gsi *rndis);
static void ipa_disconnect_handler(struct gsi_data_port *d_port);
static int gsi_ctrl_send_notification(struct f_gsi *gsi,
enum gsi_ctrl_notify_state);
@@ -553,6 +554,7 @@ static void ipa_work_handler(struct work_struct *w)
struct device *dev;
struct device *gad_dev;
struct f_gsi *gsi;
+ bool block_db;
event = read_event(d_port);
@@ -665,7 +667,27 @@ static void ipa_work_handler(struct work_struct *w)
}
break;
case STATE_CONNECTED:
- if (event == EVT_DISCONNECTED) {
+ if (event == EVT_DISCONNECTED || event == EVT_HOST_NRDY) {
+ if (peek_event(d_port) == EVT_HOST_READY) {
+ read_event(d_port);
+ log_event_dbg("%s: NO_OP NRDY_RDY", __func__);
+ break;
+ }
+
+ if (event == EVT_HOST_NRDY) {
+ log_event_dbg("%s: ST_CON_HOST_NRDY\n",
+ __func__);
+ block_db = true;
+ /* stop USB ringing doorbell to GSI(OUT_EP) */
+ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
+ GSI_EP_OP_SET_CLR_BLOCK_DBL);
+ gsi_rndis_ipa_reset_trigger(gsi);
+ usb_gsi_ep_op(d_port->in_ep, NULL,
+ GSI_EP_OP_ENDXFER);
+ usb_gsi_ep_op(d_port->out_ep, NULL,
+ GSI_EP_OP_ENDXFER);
+ }
+
ipa_disconnect_work_handler(d_port);
d_port->sm_state = STATE_INITIALIZED;
usb_gadget_autopm_put_async(d_port->gadget);
@@ -1269,7 +1291,7 @@ static void gsi_rndis_open(struct f_gsi *rndis)
rndis_signal_connect(rndis->params);
}
-void gsi_rndis_ipa_reset_trigger(struct f_gsi *rndis)
+static void gsi_rndis_ipa_reset_trigger(struct f_gsi *rndis)
{
unsigned long flags;
@@ -1301,12 +1323,11 @@ void gsi_rndis_flow_ctrl_enable(bool enable, struct rndis_params *param)
d_port = &rndis->d_port;
- if (enable) {
- gsi_rndis_ipa_reset_trigger(rndis);
- usb_gsi_ep_op(d_port->in_ep, NULL, GSI_EP_OP_ENDXFER);
- usb_gsi_ep_op(d_port->out_ep, NULL, GSI_EP_OP_ENDXFER);
- post_event(d_port, EVT_DISCONNECTED);
+ if (enable) {
+ log_event_dbg("%s: posting HOST_NRDY\n", __func__);
+ post_event(d_port, EVT_HOST_NRDY);
} else {
+ log_event_dbg("%s: posting HOST_READY\n", __func__);
post_event(d_port, EVT_HOST_READY);
}
@@ -1359,6 +1380,12 @@ static int gsi_ctrl_send_notification(struct f_gsi *gsi,
return -ENODEV;
}
+ if (atomic_inc_return(&gsi->c_port.notify_count) != 1) {
+ log_event_dbg("delay ep_queue: notify req is busy %d",
+ atomic_read(&gsi->c_port.notify_count));
+ return 0;
+ }
+
event = req->buf;
switch (state) {
@@ -1414,12 +1441,6 @@ static int gsi_ctrl_send_notification(struct f_gsi *gsi,
log_event_dbg("send Notify type %02x", event->bNotificationType);
- if (atomic_inc_return(&gsi->c_port.notify_count) != 1) {
- log_event_dbg("delay ep_queue: notify req is busy %d",
- atomic_read(&gsi->c_port.notify_count));
- return 0;
- }
-
return queue_notification_request(gsi);
}
@@ -1469,7 +1490,8 @@ static void gsi_rndis_response_available(void *_rndis)
{
struct f_gsi *gsi = _rndis;
- gsi_ctrl_send_notification(gsi, GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE);
+ gsi_ctrl_send_notification(gsi,
+ GSI_CTRL_NOTIFY_RESPONSE_AVAILABLE);
}
static void gsi_rndis_command_complete(struct usb_ep *ep,
@@ -1937,14 +1959,14 @@ static int gsi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (gsi->prot_id == IPA_USB_ECM)
gsi->d_port.cdc_filter = DEFAULT_FILTER;
- post_event(&gsi->d_port, EVT_CONNECT_IN_PROGRESS);
/*
* For RNDIS the event is posted from the flow control
* handler which is invoked when the host sends the
* GEN_CURRENT_PACKET_FILTER message.
*/
if (gsi->prot_id != IPA_USB_RNDIS)
- post_event(&gsi->d_port, EVT_HOST_READY);
+ post_event(&gsi->d_port,
+ EVT_CONNECT_IN_PROGRESS);
queue_work(gsi->d_port.ipa_usb_wq,
&gsi->d_port.usb_ipa_w);
}
@@ -2056,6 +2078,8 @@ static void gsi_suspend(struct usb_function *f)
queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
}
+ log_event_dbg("%s: notify_count = %d\n",
+ __func__, atomic_read(&gsi->c_port.notify_count));
log_event_dbg("gsi suspended");
}
@@ -2080,6 +2104,21 @@ static void gsi_resume(struct usb_function *f)
else
remote_wakeup_allowed = f->config->cdev->gadget->remote_wakeup;
+ if (gsi->c_port.notify && !gsi->c_port.notify->desc)
+ config_ep_by_speed(cdev->gadget, f, gsi->c_port.notify);
+
+ log_event_dbg("%s: notify_count = %d\n",
+ __func__, atomic_read(&gsi->c_port.notify_count));
+
+ /* Send notification to host for RMNET, RNDIS and MBIM Interface */
+ if ((gsi->prot_id == IPA_USB_MBIM ||
+ gsi->prot_id == IPA_USB_RNDIS ||
+ gsi->prot_id == IPA_USB_RMNET) &&
+ (atomic_read(&gsi->c_port.notify_count) >= 1)) {
+ log_event_dbg("%s: force_queue\n", __func__);
+ queue_notification_request(gsi);
+ }
+
if (!remote_wakeup_allowed) {
/* Configure EPs for GSI */
@@ -2110,11 +2149,6 @@ static void gsi_resume(struct usb_function *f)
post_event(&gsi->d_port, EVT_RESUMED);
queue_work(gsi->d_port.ipa_usb_wq, &gsi->d_port.usb_ipa_w);
-
- if (gsi->c_port.notify && !gsi->c_port.notify->desc)
- config_ep_by_speed(cdev->gadget, f, gsi->c_port.notify);
-
- atomic_set(&gsi->c_port.notify_count, 0);
log_event_dbg("%s: completed", __func__);
}
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index 3b3b1b011407..aa186781ef22 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -158,7 +158,7 @@ static struct usb_interface_descriptor ptp_interface_desc = {
.bInterfaceProtocol = 1,
};
-static struct usb_endpoint_descriptor mtp_superspeed_in_desc = {
+static struct usb_endpoint_descriptor mtp_ss_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
@@ -166,8 +166,8 @@ static struct usb_endpoint_descriptor mtp_superspeed_in_desc = {
.wMaxPacketSize = cpu_to_le16(1024),
};
-static struct usb_ss_ep_comp_descriptor mtp_superspeed_in_comp_desc = {
- .bLength = sizeof(mtp_superspeed_in_comp_desc),
+static struct usb_ss_ep_comp_descriptor mtp_ss_in_comp_desc = {
+ .bLength = sizeof(mtp_ss_in_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */
@@ -175,7 +175,8 @@ static struct usb_ss_ep_comp_descriptor mtp_superspeed_in_comp_desc = {
/* .bmAttributes = 0, */
};
-static struct usb_endpoint_descriptor mtp_superspeed_out_desc = {
+
+static struct usb_endpoint_descriptor mtp_ss_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
@@ -183,8 +184,8 @@ static struct usb_endpoint_descriptor mtp_superspeed_out_desc = {
.wMaxPacketSize = cpu_to_le16(1024),
};
-static struct usb_ss_ep_comp_descriptor mtp_superspeed_out_comp_desc = {
- .bLength = sizeof(mtp_superspeed_out_comp_desc),
+static struct usb_ss_ep_comp_descriptor mtp_ss_out_comp_desc = {
+ .bLength = sizeof(mtp_ss_out_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */
@@ -231,8 +232,8 @@ static struct usb_endpoint_descriptor mtp_intr_desc = {
.bInterval = 6,
};
-static struct usb_ss_ep_comp_descriptor mtp_superspeed_intr_comp_desc = {
- .bLength = sizeof(mtp_superspeed_intr_comp_desc),
+static struct usb_ss_ep_comp_descriptor mtp_intr_ss_comp_desc = {
+ .bLength = sizeof(mtp_intr_ss_comp_desc),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 3 values can be tweaked if necessary */
@@ -259,12 +260,12 @@ static struct usb_descriptor_header *hs_mtp_descs[] = {
static struct usb_descriptor_header *ss_mtp_descs[] = {
(struct usb_descriptor_header *) &mtp_interface_desc,
- (struct usb_descriptor_header *) &mtp_superspeed_in_desc,
- (struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
- (struct usb_descriptor_header *) &mtp_superspeed_out_desc,
- (struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
+ (struct usb_descriptor_header *) &mtp_ss_in_desc,
+ (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &mtp_ss_out_desc,
+ (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
(struct usb_descriptor_header *) &mtp_intr_desc,
- (struct usb_descriptor_header *) &mtp_superspeed_intr_comp_desc,
+ (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
NULL,
};
@@ -286,12 +287,12 @@ static struct usb_descriptor_header *hs_ptp_descs[] = {
static struct usb_descriptor_header *ss_ptp_descs[] = {
(struct usb_descriptor_header *) &ptp_interface_desc,
- (struct usb_descriptor_header *) &mtp_superspeed_in_desc,
- (struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
- (struct usb_descriptor_header *) &mtp_superspeed_out_desc,
- (struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
+ (struct usb_descriptor_header *) &mtp_ss_in_desc,
+ (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &mtp_ss_out_desc,
+ (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
(struct usb_descriptor_header *) &mtp_intr_desc,
- (struct usb_descriptor_header *) &mtp_superspeed_intr_comp_desc,
+ (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
NULL,
};
@@ -352,7 +353,7 @@ struct ext_mtp_desc mtp_ext_config_desc = {
.dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
.bcdVersion = __constant_cpu_to_le16(0x0100),
.wIndex = __constant_cpu_to_le16(4),
- .bCount = __constant_cpu_to_le16(1),
+ .bCount = 1,
},
.function = {
.bFirstInterfaceNumber = 0,
@@ -395,6 +396,8 @@ struct mtp_instance {
struct usb_function_instance func_inst;
const char *name;
struct mtp_dev *dev;
+ char mtp_ext_compat_id[16];
+ struct usb_os_desc mtp_os_desc;
};
/* temporary variable used between mtp_open() and mtp_gadget_bind() */
@@ -1389,6 +1392,7 @@ mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
struct mtp_dev *dev = func_to_mtp(f);
int id;
int ret;
+ struct mtp_instance *fi_mtp;
dev->cdev = cdev;
DBG(cdev, "mtp_function_bind dev: %p\n", dev);
@@ -1406,6 +1410,18 @@ mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
mtp_interface_desc.iInterface = ret;
}
+
+ fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
+
+ if (cdev->use_os_string) {
+ f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
+ GFP_KERNEL);
+ if (!f->os_desc_table)
+ return -ENOMEM;
+ f->os_desc_n = 1;
+ f->os_desc_table[0].os_desc = &fi_mtp->mtp_os_desc;
+ }
+
/* allocate endpoints */
ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
&mtp_fullspeed_out_desc, &mtp_intr_desc);
@@ -1419,18 +1435,32 @@ mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
mtp_highspeed_out_desc.bEndpointAddress =
mtp_fullspeed_out_desc.bEndpointAddress;
}
+ /* support super speed hardware */
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ unsigned max_burst;
+
+ /* Calculate bMaxBurst, we know packet size is 1024 */
+ max_burst = min_t(unsigned, MTP_BULK_BUFFER_SIZE / 1024, 15);
+ mtp_ss_in_desc.bEndpointAddress =
+ mtp_fullspeed_in_desc.bEndpointAddress;
+ mtp_ss_in_comp_desc.bMaxBurst = max_burst;
+ mtp_ss_out_desc.bEndpointAddress =
+ mtp_fullspeed_out_desc.bEndpointAddress;
+ mtp_ss_out_comp_desc.bMaxBurst = max_burst;
+ }
/* support super speed hardware */
if (gadget_is_superspeed(c->cdev->gadget)) {
- mtp_superspeed_in_desc.bEndpointAddress =
+ mtp_ss_in_desc.bEndpointAddress =
mtp_fullspeed_in_desc.bEndpointAddress;
- mtp_superspeed_out_desc.bEndpointAddress =
+ mtp_ss_out_desc.bEndpointAddress =
mtp_fullspeed_out_desc.bEndpointAddress;
}
DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
- gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
- f->name, dev->ep_in->name, dev->ep_out->name);
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),
+ f->name, dev->ep_in->name, dev->ep_out->name);
return 0;
}
@@ -1449,6 +1479,8 @@ mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
mtp_request_free(req, dev->ep_intr);
dev->state = STATE_OFFLINE;
dev->is_ptp = false;
+ kfree(f->os_desc_table);
+ f->os_desc_n = 0;
}
static int mtp_function_set_alt(struct usb_function *f,
@@ -1748,6 +1780,7 @@ static void mtp_free_inst(struct usb_function_instance *fi)
fi_mtp = to_fi_mtp(fi);
kfree(fi_mtp->name);
mtp_cleanup();
+ kfree(fi_mtp->mtp_os_desc.group.default_groups);
kfree(fi_mtp);
}
@@ -1755,6 +1788,8 @@ struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
{
struct mtp_instance *fi_mtp;
int ret = 0;
+ struct usb_os_desc *descs[1];
+ char *names[1];
fi_mtp = kzalloc(sizeof(*fi_mtp), GFP_KERNEL);
if (!fi_mtp)
@@ -1762,6 +1797,13 @@ struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
fi_mtp->func_inst.set_inst_name = mtp_set_inst_name;
fi_mtp->func_inst.free_func_inst = mtp_free_inst;
+ fi_mtp->mtp_os_desc.ext_compat_id = fi_mtp->mtp_ext_compat_id;
+ INIT_LIST_HEAD(&fi_mtp->mtp_os_desc.ext_prop);
+ descs[0] = &fi_mtp->mtp_os_desc;
+ names[0] = "MTP";
+ usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1,
+ descs, names, THIS_MODULE);
+
if (mtp_config) {
ret = mtp_setup_configfs(fi_mtp);
if (ret) {
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 76b25445c6ad..dd73dfe5dcab 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -1014,6 +1014,7 @@ struct net_device *gether_setup_name_default(const char *netname)
spin_lock_init(&dev->lock);
spin_lock_init(&dev->req_lock);
INIT_WORK(&dev->work, eth_work);
+ INIT_WORK(&dev->rx_work, process_rx_w);
INIT_LIST_HEAD(&dev->tx_reqs);
INIT_LIST_HEAD(&dev->rx_reqs);
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index b011efe189e7..1bb7082be8e6 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -1257,8 +1257,11 @@ static void usbpd_sm(struct work_struct *w)
POWER_SUPPLY_PROP_PD_ACTIVE, &val);
if (pd->current_pr == PR_SRC) {
- regulator_disable(pd->vconn);
regulator_disable(pd->vbus);
+ if (pd->vconn_enabled) {
+ regulator_disable(pd->vconn);
+ pd->vconn_enabled = false;
+ }
}
if (pd->current_dr == DR_UFP)
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index 23a4ac11af36..4ecdc350fbd4 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -27,6 +27,7 @@
#include <linux/regulator/machine.h>
#include <linux/usb/phy.h>
#include <linux/usb/msm_hsusb.h>
+#include <linux/reset.h>
#define QUSB2PHY_PWR_CTRL1 0x210
#define PWR_CTRL1_POWR_DOWN BIT(0)
@@ -34,12 +35,8 @@
#define QUSB2PHY_PLL_COMMON_STATUS_ONE 0x1A0
#define CORE_READY_STATUS BIT(0)
-/* In case Efuse register shows zero, use this value */
-#define TUNE2_DEFAULT_HIGH_NIBBLE 0xB
-#define TUNE2_DEFAULT_LOW_NIBBLE 0x3
-
-/* Get TUNE2's high nibble value read from efuse */
-#define TUNE2_HIGH_NIBBLE_VAL(val, pos, mask) ((val >> pos) & mask)
+/* Get TUNE value from efuse bit-mask */
+#define TUNE_VAL_MASK(val, pos, mask) ((val >> pos) & mask)
#define QUSB2PHY_INTR_CTRL 0x22C
#define DMSE_INTR_HIGH_SEL BIT(4)
@@ -52,7 +49,7 @@
#define DMSE_INTERRUPT BIT(1)
#define DPSE_INTERRUPT BIT(0)
-#define QUSB2PHY_PORT_TUNE2 0x240
+#define QUSB2PHY_PORT_TUNE1 0x23c
#define QUSB2PHY_1P8_VOL_MIN 1800000 /* uV */
#define QUSB2PHY_1P8_VOL_MAX 1800000 /* uV */
@@ -65,19 +62,22 @@
#define LINESTATE_DP BIT(0)
#define LINESTATE_DM BIT(1)
-unsigned int phy_tune2;
-module_param(phy_tune2, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(phy_tune2, "QUSB PHY v2 TUNE2");
+#define QUSB2PHY_PLL_ANALOG_CONTROLS_ONE 0x0
+
+unsigned int phy_tune1;
+module_param(phy_tune1, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(phy_tune1, "QUSB PHY v2 TUNE1");
struct qusb_phy {
struct usb_phy phy;
void __iomem *base;
- void __iomem *tune2_efuse_reg;
+ void __iomem *efuse_reg;
+ void __iomem *tcsr_clamp_dig_n;
struct clk *ref_clk_src;
struct clk *ref_clk;
struct clk *cfg_ahb_clk;
- struct clk *phy_reset;
+ struct reset_control *phy_reset;
struct regulator *vdd;
struct regulator *vdda33;
@@ -88,9 +88,9 @@ struct qusb_phy {
int host_init_seq_len;
int *qusb_phy_host_init_seq;
- u32 tune2_val;
- int tune2_efuse_bit_pos;
- int tune2_efuse_num_of_bits;
+ u32 tune_val;
+ int efuse_bit_pos;
+ int efuse_num_of_bits;
bool power_enabled;
bool clocks_enabled;
@@ -324,40 +324,34 @@ static int qusb_phy_update_dpdm(struct usb_phy *phy, int value)
return ret;
}
-static void qusb_phy_get_tune2_param(struct qusb_phy *qphy)
+static void qusb_phy_get_tune1_param(struct qusb_phy *qphy)
{
- u8 num_of_bits;
+ u8 reg;
u32 bit_mask = 1;
pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__,
- qphy->tune2_efuse_num_of_bits,
- qphy->tune2_efuse_bit_pos);
+ qphy->efuse_num_of_bits,
+ qphy->efuse_bit_pos);
/* get bit mask based on number of bits to use with efuse reg */
- if (qphy->tune2_efuse_num_of_bits) {
- num_of_bits = qphy->tune2_efuse_num_of_bits;
- bit_mask = (bit_mask << num_of_bits) - 1;
- }
+ bit_mask = (bit_mask << qphy->efuse_num_of_bits) - 1;
/*
- * Read EFUSE register having TUNE2 parameter's high nibble.
- * If efuse register shows value as 0x0, then use default value
- * as 0xB as high nibble. Otherwise use efuse register based
- * value for this purpose.
+ * if efuse reg is updated (i.e non-zero) then use it to program
+ * tune parameters
*/
- qphy->tune2_val = readl_relaxed(qphy->tune2_efuse_reg);
- pr_debug("%s(): bit_mask:%d efuse based tune2 value:%d\n",
- __func__, bit_mask, qphy->tune2_val);
-
- qphy->tune2_val = TUNE2_HIGH_NIBBLE_VAL(qphy->tune2_val,
- qphy->tune2_efuse_bit_pos, bit_mask);
-
- if (!qphy->tune2_val)
- qphy->tune2_val = TUNE2_DEFAULT_HIGH_NIBBLE;
-
- /* Get TUNE2 byte value using high and low nibble value */
- qphy->tune2_val = ((qphy->tune2_val << 0x4) |
- TUNE2_DEFAULT_LOW_NIBBLE);
+ qphy->tune_val = readl_relaxed(qphy->efuse_reg);
+ pr_debug("%s(): bit_mask:%d efuse based tune1 value:%d\n",
+ __func__, bit_mask, qphy->tune_val);
+
+ qphy->tune_val = TUNE_VAL_MASK(qphy->tune_val,
+ qphy->efuse_bit_pos, bit_mask);
+ reg = readb_relaxed(qphy->base + QUSB2PHY_PORT_TUNE1);
+ if (qphy->tune_val) {
+ reg = reg & 0x0f;
+ reg |= (qphy->tune_val << 4);
+ }
+ qphy->tune_val = reg;
}
static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
@@ -377,14 +371,19 @@ static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
static void qusb_phy_host_init(struct usb_phy *phy)
{
u8 reg;
+ int ret;
struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
dev_dbg(phy->dev, "%s\n", __func__);
/* Perform phy reset */
- clk_reset(qphy->phy_reset, CLK_RESET_ASSERT);
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
usleep_range(100, 150);
- clk_reset(qphy->phy_reset, CLK_RESET_DEASSERT);
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
qusb_phy_write_seq(qphy->base, qphy->qusb_phy_host_init_seq,
qphy->host_init_seq_len, 0);
@@ -418,9 +417,13 @@ static int qusb_phy_init(struct usb_phy *phy)
qusb_phy_enable_clocks(qphy, true);
/* Perform phy reset */
- clk_reset(qphy->phy_reset, CLK_RESET_ASSERT);
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
usleep_range(100, 150);
- clk_reset(qphy->phy_reset, CLK_RESET_DEASSERT);
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
if (qphy->emulation) {
if (qphy->emu_init_seq)
@@ -454,27 +457,22 @@ static int qusb_phy_init(struct usb_phy *phy)
if (qphy->qusb_phy_init_seq)
qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
qphy->init_seq_len, 0);
- /*
- * Check for EFUSE value only if tune2_efuse_reg is available
- * and try to read EFUSE value only once i.e. not every USB
- * cable connect case.
- */
- if (qphy->tune2_efuse_reg) {
- if (!qphy->tune2_val)
- qusb_phy_get_tune2_param(qphy);
+ if (qphy->efuse_reg) {
+ if (!qphy->tune_val)
+ qusb_phy_get_tune1_param(qphy);
- pr_debug("%s(): Programming TUNE2 parameter as:%x\n", __func__,
- qphy->tune2_val);
- writel_relaxed(qphy->tune2_val,
- qphy->base + QUSB2PHY_PORT_TUNE2);
+ pr_debug("%s(): Programming TUNE1 parameter as:%x\n", __func__,
+ qphy->tune_val);
+ writel_relaxed(qphy->tune_val,
+ qphy->base + QUSB2PHY_PORT_TUNE1);
}
- /* If phy_tune2 modparam set, override tune2 value */
- if (phy_tune2) {
- pr_debug("%s(): (modparam) TUNE2 val:0x%02x\n",
- __func__, phy_tune2);
- writel_relaxed(phy_tune2,
- qphy->base + QUSB2PHY_PORT_TUNE2);
+ /* If phy_tune1 modparam set, override tune1 value */
+ if (phy_tune1) {
+ pr_debug("%s(): (modparam) TUNE1 val:0x%02x\n",
+ __func__, phy_tune1);
+ writel_relaxed(phy_tune1,
+ qphy->base + QUSB2PHY_PORT_TUNE1);
}
/* ensure above writes are completed before re-enabling PHY */
@@ -543,6 +541,7 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
{
struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
u32 linestate = 0, intr_mask = 0;
+ int ret;
if (qphy->suspended && suspend) {
dev_dbg(phy->dev, "%s: USB PHY is already suspended\n",
@@ -554,6 +553,11 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
/* Bus suspend case */
if (qphy->cable_connected ||
(qphy->phy.flags & PHY_HOST_MODE)) {
+
+ /* enable clock bypass */
+ writel_relaxed(0x90,
+ qphy->base + QUSB2PHY_PLL_ANALOG_CONTROLS_ONE);
+
/* Disable all interrupts */
writel_relaxed(0x00,
qphy->base + QUSB2PHY_INTR_CTRL);
@@ -584,15 +588,26 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
wmb();
qusb_phy_enable_clocks(qphy, false);
} else { /* Cable disconnect case */
- /* Disable all interrupts */
- writel_relaxed(0x00,
- qphy->base + QUSB2PHY_INTR_CTRL);
-
- /* Put PHY into non-driving mode */
- writel_relaxed(0x23,
- qphy->base + QUSB2PHY_PWR_CTRL1);
- /* Makes sure that above write goes through */
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset assert failed\n",
+ __func__);
+ usleep_range(100, 150);
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset deassert failed\n",
+ __func__);
+
+ /* enable clock bypass */
+ writel_relaxed(0x90,
+ qphy->base + QUSB2PHY_PLL_ANALOG_CONTROLS_ONE);
+
+ writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+ /*
+ * clamp needs asserted before
+ * power/clocks can be turned off
+ */
wmb();
qusb_phy_enable_clocks(qphy, false);
@@ -604,6 +619,11 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
if (qphy->cable_connected ||
(qphy->phy.flags & PHY_HOST_MODE)) {
qusb_phy_enable_clocks(qphy, true);
+
+ /* disable clock bypass */
+ writel_relaxed(0x80,
+ qphy->base + QUSB2PHY_PLL_ANALOG_CONTROLS_ONE);
+
/* Clear all interrupts on resume */
writel_relaxed(0x00,
qphy->base + QUSB2PHY_INTR_CTRL);
@@ -611,6 +631,19 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
/* Makes sure that above write goes through */
wmb();
} else { /* Cable connect case */
+ writel_relaxed(0x1, qphy->tcsr_clamp_dig_n);
+
+ /*
+ * clamp needs de-asserted before
+ * power/clocks can be turned on
+ */
+ wmb();
+
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset deassert failed\n",
+ __func__);
+
qusb_phy_enable_power(qphy, true, true);
qusb_phy_enable_clocks(qphy, true);
}
@@ -735,23 +768,33 @@ static int qusb_phy_probe(struct platform_device *pdev)
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "tune2_efuse_addr");
+ "tcsr_clamp_dig_n_1p8");
+ if (res) {
+ qphy->tcsr_clamp_dig_n = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qphy->tcsr_clamp_dig_n)) {
+ dev_dbg(dev, "couldn't ioremap tcsr_clamp_dig_n\n");
+ return PTR_ERR(qphy->tcsr_clamp_dig_n);
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "efuse_addr");
if (res) {
- qphy->tune2_efuse_reg = devm_ioremap_nocache(dev, res->start,
+ qphy->efuse_reg = devm_ioremap_nocache(dev, res->start,
resource_size(res));
- if (!IS_ERR_OR_NULL(qphy->tune2_efuse_reg)) {
+ if (!IS_ERR_OR_NULL(qphy->efuse_reg)) {
ret = of_property_read_u32(dev->of_node,
- "qcom,tune2-efuse-bit-pos",
- &qphy->tune2_efuse_bit_pos);
+ "qcom,efuse-bit-pos",
+ &qphy->efuse_bit_pos);
if (!ret) {
ret = of_property_read_u32(dev->of_node,
- "qcom,tune2-efuse-num-bits",
- &qphy->tune2_efuse_num_of_bits);
+ "qcom,efuse-num-bits",
+ &qphy->efuse_num_of_bits);
}
if (ret) {
dev_err(dev,
- "DT Value for tune2 efuse is invalid.\n");
+ "DT Value for efuse is invalid.\n");
return -EINVAL;
}
}
@@ -779,7 +822,7 @@ static int qusb_phy_probe(struct platform_device *pdev)
}
}
- qphy->phy_reset = devm_clk_get(dev, "phy_reset");
+ qphy->phy_reset = devm_reset_control_get(dev, "phy_reset");
if (IS_ERR(qphy->phy_reset))
return PTR_ERR(qphy->phy_reset);
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index af7ec03314f5..5456b9e8733b 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -27,6 +27,7 @@
#include <linux/regulator/machine.h>
#include <linux/usb/phy.h>
#include <linux/usb/msm_hsusb.h>
+#include <linux/reset.h>
#define QUSB2PHY_PLL_STATUS 0x38
#define QUSB2PHY_PLL_LOCK BIT(5)
@@ -112,7 +113,7 @@ struct qusb_phy {
struct clk *ref_clk_src;
struct clk *ref_clk;
struct clk *cfg_ahb_clk;
- struct clk *phy_reset;
+ struct reset_control *phy_reset;
struct regulator *vdd;
struct regulator *vdda33;
@@ -443,9 +444,13 @@ static int qusb_phy_init(struct usb_phy *phy)
}
/* Perform phy reset */
- clk_reset(qphy->phy_reset, CLK_RESET_ASSERT);
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
usleep_range(100, 150);
- clk_reset(qphy->phy_reset, CLK_RESET_DEASSERT);
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
if (qphy->emulation) {
if (qphy->emu_init_seq)
@@ -858,7 +863,7 @@ static int qusb_phy_probe(struct platform_device *pdev)
if (IS_ERR(qphy->cfg_ahb_clk))
return PTR_ERR(qphy->cfg_ahb_clk);
- qphy->phy_reset = devm_clk_get(dev, "phy_reset");
+ qphy->phy_reset = devm_reset_control_get(dev, "phy_reset");
if (IS_ERR(qphy->phy_reset))
return PTR_ERR(qphy->phy_reset);
@@ -1011,8 +1016,11 @@ static int qusb_phy_probe(struct platform_device *pdev)
* not used, there is leakage current seen with QUSB PHY related voltage
* rail. Hence keep QUSB PHY into reset state explicitly here.
*/
- if (hold_phy_reset)
- clk_reset(qphy->phy_reset, CLK_RESET_ASSERT);
+ if (hold_phy_reset) {
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(dev, "%s:phy_reset assert failed\n", __func__);
+ }
ret = usb_add_phy_dev(&qphy->phy);
if (ret)
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index 529e4ea76a96..fc61e3172d0b 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -25,6 +25,7 @@
#include <linux/usb/msm_hsusb.h>
#include <linux/clk.h>
#include <linux/clk/msm-clk.h>
+#include <linux/reset.h>
enum core_ldo_levels {
CORE_LEVEL_NONE = 0,
@@ -87,8 +88,10 @@ struct msm_ssphy_qmp {
struct clk *aux_clk;
struct clk *cfg_ahb_clk;
struct clk *pipe_clk;
- struct clk *phy_reset;
- struct clk *phy_phy_reset;
+ bool power_enabled;
+ struct reset_control *phy_reset;
+ struct reset_control *phy_phy_reset;
+
bool clk_enabled;
bool cable_connected;
bool in_suspend;
@@ -159,38 +162,47 @@ static void msm_ssusb_qmp_enable_autonomous(struct msm_ssphy_qmp *phy,
}
}
-
-static int msm_ssusb_qmp_config_vdd(struct msm_ssphy_qmp *phy, int high)
+static int msm_ssusb_qmp_ldo_enable(struct msm_ssphy_qmp *phy, int on)
{
- int min, ret;
+ int min, rc = 0;
- min = high ? 1 : 0; /* low or none? */
- ret = regulator_set_voltage(phy->vdd, phy->vdd_levels[min],
- phy->vdd_levels[2]);
- if (ret) {
- dev_err(phy->phy.dev, "unable to set voltage for ssusb vdd\n");
- return ret;
- }
+ dev_dbg(phy->phy.dev, "reg (%s)\n", on ? "HPM" : "LPM");
- dev_dbg(phy->phy.dev, "min_vol:%d max_vol:%d\n",
- phy->vdd_levels[min], phy->vdd_levels[2]);
- return ret;
-}
+ if (phy->power_enabled == on) {
+ dev_dbg(phy->phy.dev, "PHYs' regulators status %d\n",
+ phy->power_enabled);
+ return 0;
+ }
-static int msm_ssusb_qmp_ldo_enable(struct msm_ssphy_qmp *phy, int on)
-{
- int rc = 0;
+ phy->power_enabled = on;
- dev_dbg(phy->phy.dev, "reg (%s)\n", on ? "HPM" : "LPM");
+ min = on ? 1 : 0; /* low or none? */
if (!on)
goto disable_regulators;
+ rc = regulator_set_voltage(phy->vdd, phy->vdd_levels[min],
+ phy->vdd_levels[2]);
+ if (rc) {
+ dev_err(phy->phy.dev, "unable to set voltage for ssusb vdd\n");
+ return rc;
+ }
+
+ dev_dbg(phy->phy.dev, "min_vol:%d max_vol:%d\n",
+ phy->vdd_levels[min], phy->vdd_levels[2]);
+
+ rc = regulator_enable(phy->vdd);
+ if (rc) {
+ dev_err(phy->phy.dev,
+ "regulator_enable(phy->vdd) failed, ret=%d",
+ rc);
+ goto unconfig_vdd;
+ }
rc = regulator_set_load(phy->core_ldo, USB_SSPHY_HPM_LOAD);
if (rc < 0) {
dev_err(phy->phy.dev, "Unable to set HPM of core_ldo\n");
- return rc;
+ goto disable_vdd;
}
rc = regulator_set_voltage(phy->core_ldo,
@@ -226,6 +238,18 @@ put_core_ldo_lpm:
if (rc < 0)
dev_err(phy->phy.dev, "Unable to set LPM of core_ldo\n");
+disable_vdd:
+ rc = regulator_disable(phy->vdd);
+ if (rc)
+ dev_err(phy->phy.dev, "regulator_disable(phy->vdd) failed, ret=%d",
+ rc);
+
+unconfig_vdd:
+ rc = regulator_set_voltage(phy->vdd, phy->vdd_levels[min],
+ phy->vdd_levels[2]);
+ if (rc)
+ dev_err(phy->phy.dev, "unable to set voltage for ssusb vdd\n");
+
return rc < 0 ? rc : 0;
}
@@ -263,6 +287,14 @@ static int msm_ssphy_qmp_init(struct usb_phy *uphy)
if (phy->emulation)
return 0;
+ ret = msm_ssusb_qmp_ldo_enable(phy, 1);
+ if (ret) {
+ dev_err(phy->phy.dev,
+ "msm_ssusb_qmp_ldo_enable(1) failed, ret=%d\n",
+ ret);
+ return ret;
+ }
+
if (!phy->clk_enabled) {
if (phy->ref_clk_src)
clk_prepare_enable(phy->ref_clk_src);
@@ -341,56 +373,39 @@ static int msm_ssphy_qmp_reset(struct usb_phy *uphy)
dev_dbg(uphy->dev, "Resetting QMP phy\n");
/* Assert USB3 PHY reset */
- if (phy->phy_phy_reset) {
- ret = clk_reset(phy->phy_phy_reset, CLK_RESET_ASSERT);
- if (ret) {
- dev_err(uphy->dev, "phy_phy reset assert failed\n");
- goto exit;
- }
- } else {
- ret = clk_reset(phy->pipe_clk, CLK_RESET_ASSERT);
- if (ret) {
- dev_err(uphy->dev, "pipe_clk reset assert failed\n");
- goto exit;
- }
+ ret = reset_control_assert(phy->phy_phy_reset);
+ if (ret) {
+ dev_err(uphy->dev, "phy_phy_reset assert failed\n");
+ goto exit;
}
/* Assert USB3 PHY CSR reset */
- ret = clk_reset(phy->phy_reset, CLK_RESET_ASSERT);
+ ret = reset_control_assert(phy->phy_reset);
if (ret) {
- dev_err(uphy->dev, "phy_reset clk assert failed\n");
+ dev_err(uphy->dev, "phy_reset assert failed\n");
goto deassert_phy_phy_reset;
}
/* Deassert USB3 PHY CSR reset */
- ret = clk_reset(phy->phy_reset, CLK_RESET_DEASSERT);
+ ret = reset_control_deassert(phy->phy_reset);
if (ret) {
- dev_err(uphy->dev, "phy_reset clk deassert failed\n");
+ dev_err(uphy->dev, "phy_reset deassert failed\n");
goto deassert_phy_phy_reset;
}
/* Deassert USB3 PHY reset */
- if (phy->phy_phy_reset) {
- ret = clk_reset(phy->phy_phy_reset, CLK_RESET_DEASSERT);
- if (ret) {
- dev_err(uphy->dev, "phy_phy reset deassert failed\n");
- goto exit;
- }
- } else {
- ret = clk_reset(phy->pipe_clk, CLK_RESET_DEASSERT);
- if (ret) {
- dev_err(uphy->dev, "pipe_clk reset deassert failed\n");
- goto exit;
- }
+ ret = reset_control_deassert(phy->phy_phy_reset);
+ if (ret) {
+ dev_err(uphy->dev, "phy_phy_reset deassert failed\n");
+ goto exit;
}
return 0;
deassert_phy_phy_reset:
- if (phy->phy_phy_reset)
- clk_reset(phy->phy_phy_reset, CLK_RESET_DEASSERT);
- else
- clk_reset(phy->pipe_clk, CLK_RESET_DEASSERT);
+ ret = reset_control_deassert(phy->phy_phy_reset);
+ if (ret)
+ dev_err(uphy->dev, "phy_phy_reset deassert failed\n");
exit:
phy->in_suspend = false;
@@ -408,12 +423,6 @@ static int msm_ssphy_power_enable(struct msm_ssphy_qmp *phy, bool on)
*/
if (!host && !phy->cable_connected) {
if (on) {
- ret = regulator_enable(phy->vdd);
- if (ret)
- dev_err(phy->phy.dev,
- "regulator_enable(phy->vdd) failed, ret=%d",
- ret);
-
ret = msm_ssusb_qmp_ldo_enable(phy, 1);
if (ret)
dev_err(phy->phy.dev,
@@ -425,11 +434,6 @@ static int msm_ssphy_power_enable(struct msm_ssphy_qmp *phy, bool on)
dev_err(phy->phy.dev,
"msm_ssusb_qmp_ldo_enable(0) failed, ret=%d\n",
ret);
-
- ret = regulator_disable(phy->vdd);
- if (ret)
- dev_err(phy->phy.dev, "regulator_disable(phy->vdd) failed, ret=%d",
- ret);
}
}
@@ -575,26 +579,18 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev)
goto err;
}
- if (of_property_match_string(pdev->dev.of_node,
- "clock-names", "phy_reset") >= 0) {
- phy->phy_reset = clk_get(&pdev->dev, "phy_reset");
- if (IS_ERR(phy->phy_reset)) {
- ret = PTR_ERR(phy->phy_reset);
- phy->phy_reset = NULL;
- dev_dbg(dev, "failed to get phy_reset\n");
- goto err;
- }
+ phy->phy_reset = devm_reset_control_get(dev, "phy_reset");
+ if (IS_ERR(phy->phy_reset)) {
+ ret = PTR_ERR(phy->phy_reset);
+ dev_dbg(dev, "failed to get phy_reset\n");
+ goto err;
}
- if (of_property_match_string(pdev->dev.of_node,
- "clock-names", "phy_phy_reset") >= 0) {
- phy->phy_phy_reset = clk_get(dev, "phy_phy_reset");
- if (IS_ERR(phy->phy_phy_reset)) {
- ret = PTR_ERR(phy->phy_phy_reset);
- phy->phy_phy_reset = NULL;
- dev_dbg(dev, "phy_phy_reset unavailable\n");
- goto err;
- }
+ phy->phy_phy_reset = devm_reset_control_get(dev, "phy_phy_reset");
+ if (IS_ERR(phy->phy_phy_reset)) {
+ ret = PTR_ERR(phy->phy_phy_reset);
+ dev_dbg(dev, "failed to get phy_phy_reset\n");
+ goto err;
}
of_get_property(dev->of_node, "qcom,qmp-phy-reg-offset", &size);
@@ -733,24 +729,6 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev)
goto err;
}
- ret = msm_ssusb_qmp_config_vdd(phy, 1);
- if (ret) {
- dev_err(dev, "ssusb vdd_dig configuration failed\n");
- goto err;
- }
-
- ret = regulator_enable(phy->vdd);
- if (ret) {
- dev_err(dev, "unable to enable the ssusb vdd_dig\n");
- goto unconfig_ss_vdd;
- }
-
- ret = msm_ssusb_qmp_ldo_enable(phy, 1);
- if (ret) {
- dev_err(dev, "ssusb vreg enable failed\n");
- goto disable_ss_vdd;
- }
-
phy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
if (IS_ERR(phy->ref_clk_src))
phy->ref_clk_src = NULL;
@@ -772,16 +750,7 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev)
phy->phy.type = USB_PHY_TYPE_USB3;
ret = usb_add_phy_dev(&phy->phy);
- if (ret)
- goto disable_ss_ldo;
- return 0;
-disable_ss_ldo:
- msm_ssusb_qmp_ldo_enable(phy, 0);
-disable_ss_vdd:
- regulator_disable(phy->vdd);
-unconfig_ss_vdd:
- msm_ssusb_qmp_config_vdd(phy, 0);
err:
return ret;
}
@@ -799,8 +768,6 @@ static int msm_ssphy_qmp_remove(struct platform_device *pdev)
if (phy->ref_clk_src)
clk_disable_unprepare(phy->ref_clk_src);
msm_ssusb_qmp_ldo_enable(phy, 0);
- regulator_disable(phy->vdd);
- msm_ssusb_qmp_config_vdd(phy, 0);
clk_disable_unprepare(phy->aux_clk);
clk_disable_unprepare(phy->cfg_ahb_clk);
clk_disable_unprepare(phy->pipe_clk);
diff --git a/drivers/video/adf/Kconfig b/drivers/video/adf/Kconfig
index 33858b73d8bb..2777db48fae0 100644
--- a/drivers/video/adf/Kconfig
+++ b/drivers/video/adf/Kconfig
@@ -11,4 +11,4 @@ menuconfig ADF_FBDEV
menuconfig ADF_MEMBLOCK
depends on ADF
depends on HAVE_MEMBLOCK
- tristate "Helper for using memblocks as buffers in ADF drivers"
+ bool "Helper for using memblocks as buffers in ADF drivers"
diff --git a/drivers/video/adf/Makefile b/drivers/video/adf/Makefile
index 78d0915122f4..cdf34a666dc7 100644
--- a/drivers/video/adf/Makefile
+++ b/drivers/video/adf/Makefile
@@ -2,13 +2,15 @@ ccflags-y := -Idrivers/staging/android
CFLAGS_adf.o := -I$(src)
-obj-$(CONFIG_ADF) += adf.o \
+obj-$(CONFIG_ADF) += adf_core.o
+
+adf_core-y := adf.o \
adf_client.o \
adf_fops.o \
adf_format.o \
adf_sysfs.o
-obj-$(CONFIG_COMPAT) += adf_fops32.o
+adf_core-$(CONFIG_COMPAT) += adf_fops32.o
obj-$(CONFIG_ADF_FBDEV) += adf_fbdev.o
diff --git a/drivers/video/adf/adf_client.c b/drivers/video/adf/adf_client.c
index 8061d8e6b9fb..75b2f0b18522 100644
--- a/drivers/video/adf/adf_client.c
+++ b/drivers/video/adf/adf_client.c
@@ -305,8 +305,10 @@ static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf,
}
done:
- if (ret < 0)
+ if (ret < 0) {
adf_buffer_mapping_cleanup(mapping, buf);
+ memset(mapping, 0, sizeof(*mapping));
+ }
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index 4724f4378e23..609a7aed4977 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -191,6 +191,7 @@ enum mdss_qos_settings {
MDSS_QOS_TS_PREFILL,
MDSS_QOS_REMAPPER,
MDSS_QOS_IB_NOCR,
+ MDSS_QOS_WB2_WRITE_GATHER_EN,
MDSS_QOS_MAX,
};
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index c8b415df4bce..f25a6e185051 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -1434,10 +1434,54 @@ static ssize_t mdss_dp_rda_connected(struct device *dev,
return ret;
}
+
+static ssize_t mdss_dp_sysfs_wta_s3d_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret, s3d_mode;
+ struct mdss_dp_drv_pdata *dp = mdss_dp_get_drvdata(dev);
+
+ if (!dp) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+ ret = kstrtoint(buf, 10, &s3d_mode);
+ if (ret) {
+ DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, ret);
+ goto end;
+ }
+
+ dp->s3d_mode = s3d_mode;
+ ret = strnlen(buf, PAGE_SIZE);
+ DEV_DBG("%s: %d\n", __func__, dp->s3d_mode);
+end:
+ return ret;
+}
+
+static ssize_t mdss_dp_sysfs_rda_s3d_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct mdss_dp_drv_pdata *dp = mdss_dp_get_drvdata(dev);
+
+ if (!dp) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", dp->s3d_mode);
+ DEV_DBG("%s: '%d'\n", __func__, dp->s3d_mode);
+
+ return ret;
+}
+
static DEVICE_ATTR(connected, S_IRUGO, mdss_dp_rda_connected, NULL);
+static DEVICE_ATTR(s3d_mode, S_IRUGO | S_IWUSR, mdss_dp_sysfs_rda_s3d_mode,
+ mdss_dp_sysfs_wta_s3d_mode);
static struct attribute *mdss_dp_fs_attrs[] = {
&dev_attr_connected.attr,
+ &dev_attr_s3d_mode.attr,
NULL,
};
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index b724aa655424..9a8534677c5e 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -404,6 +404,7 @@ struct mdss_dp_drv_pdata {
struct mutex pd_msg_mutex;
struct mutex hdcp_mutex;
bool cable_connected;
+ u32 s3d_mode;
u32 aux_cmd_busy;
u32 aux_cmd_i2c;
int aux_trans_num;
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index fe6ce30d0c89..e8d68059581f 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -27,7 +27,6 @@
#include "mdss_dba_utils.h"
#define DT_CMD_HDR 6
-#define MIN_REFRESH_RATE 48
#define DEFAULT_MDP_TRANSFER_TIME 14000
#define VSYNC_DELAY msecs_to_jiffies(17)
@@ -1907,10 +1906,10 @@ static int mdss_dsi_set_refresh_rate_range(struct device_node *pan_node,
__func__, __LINE__);
/*
- * Since min refresh rate is not specified when dynamic
- * fps is enabled, using minimum as 30
+ * If min refresh rate is not specified, set it to the
+ * default panel refresh rate.
*/
- pinfo->min_fps = MIN_REFRESH_RATE;
+ pinfo->min_fps = pinfo->mipi.frame_rate;
rc = 0;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index f35156a2cfcb..1b5c1b7d51e1 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1364,6 +1364,7 @@ static void mdss_mdp_memory_retention_enter(void)
}
}
+ __mdss_mdp_reg_access_clk_enable(mdata, true);
if (mdss_mdp_clk) {
clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_SET);
@@ -1375,6 +1376,7 @@ static void mdss_mdp_memory_retention_enter(void)
clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_PERIPH_OFF_SET);
clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_NORETAIN_PERIPH);
}
+ __mdss_mdp_reg_access_clk_enable(mdata, false);
}
static void mdss_mdp_memory_retention_exit(void)
@@ -1396,7 +1398,7 @@ static void mdss_mdp_memory_retention_exit(void)
}
}
-
+ __mdss_mdp_reg_access_clk_enable(mdata, true);
if (mdss_mdp_clk) {
clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_PERIPH);
@@ -1408,6 +1410,7 @@ static void mdss_mdp_memory_retention_exit(void)
clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_RETAIN_PERIPH);
clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_PERIPH_OFF_CLEAR);
}
+ __mdss_mdp_reg_access_clk_enable(mdata, false);
}
/**
@@ -1981,6 +1984,7 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
set_bit(MDSS_QOS_IB_NOCR, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_WB2_WRITE_GATHER_EN, mdata->mdss_qos_map);
set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
set_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED,
mdata->mdss_caps_map);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index ed55057e1d7e..abc048866313 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -4315,9 +4315,11 @@ void mdss_mdp_check_ctl_reset_status(struct mdss_mdp_ctl *ctl)
return;
pr_debug("hw ctl reset is set for ctl:%d\n", ctl->num);
- status = mdss_mdp_poll_ctl_reset_status(ctl, 5);
+ /* poll for at least ~1 frame */
+ status = mdss_mdp_poll_ctl_reset_status(ctl, 320);
if (status) {
- pr_err("hw recovery is not complete for ctl:%d\n", ctl->num);
+ pr_err("hw recovery is not complete for ctl:%d status:0x%x\n",
+ ctl->num, status);
MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt", "dbg_bus",
"vbif_dbg_bus", "panic");
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_hwio.h b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
index f54cbb575535..5d8c83126b1b 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_hwio.h
+++ b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
@@ -829,6 +829,7 @@ enum mdss_mdp_pingpong_index {
#define MMSS_VBIF_CLKON 0x4
#define MMSS_VBIF_RD_LIM_CONF 0x0B0
#define MMSS_VBIF_WR_LIM_CONF 0x0C0
+#define MDSS_VBIF_WRITE_GATHER_EN 0x0AC
#define MMSS_VBIF_XIN_HALT_CTRL0 0x200
#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
index cee168a33f85..3aadb3950442 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -411,22 +411,28 @@ static void mdss_mdp_video_avr_vtotal_setup(struct mdss_mdp_ctl *ctl,
if (test_bit(MDSS_CAPS_AVR_SUPPORTED, mdata->mdss_caps_map)) {
struct mdss_panel_data *pdata = ctl->panel_data;
- u32 hsync_period = p->hsync_pulse_width + p->h_back_porch +
- p->width + p->h_front_porch;
- u32 vsync_period = p->vsync_pulse_width + p->v_back_porch +
- p->height + p->v_front_porch;
- u32 min_fps = pdata->panel_info.min_fps;
- u32 diff_fps = abs(pdata->panel_info.default_fps - min_fps);
- u32 vtotal = mdss_panel_get_vtotal(&pdata->panel_info);
-
- int add_porches = mult_frac(vtotal, diff_fps, min_fps);
-
- u32 vsync_period_slow = vsync_period + add_porches;
- u32 avr_vtotal = vsync_period_slow * hsync_period;
+ struct mdss_panel_info *pinfo = &pdata->panel_info;
+ u32 avr_vtotal = pinfo->saved_avr_vtotal;
+
+ if (!pinfo->saved_avr_vtotal) {
+ u32 hsync_period = p->hsync_pulse_width +
+ p->h_back_porch + p->width + p->h_front_porch;
+ u32 vsync_period = p->vsync_pulse_width +
+ p->v_back_porch + p->height + p->v_front_porch;
+ u32 min_fps = pinfo->min_fps;
+ u32 default_fps = mdss_panel_get_framerate(pinfo);
+ u32 diff_fps = abs(default_fps - min_fps);
+ u32 vtotal = mdss_panel_get_vtotal(pinfo);
+ int add_porches = mult_frac(vtotal, diff_fps, min_fps);
+ u32 vsync_period_slow = vsync_period + add_porches;
+
+ avr_vtotal = vsync_period_slow * hsync_period;
+ pinfo->saved_avr_vtotal = avr_vtotal;
+ }
mdp_video_write(ctx, MDSS_MDP_REG_INTF_AVR_VTOTAL, avr_vtotal);
- MDSS_XLOG(min_fps, vsync_period, vsync_period_slow, avr_vtotal);
+ MDSS_XLOG(pinfo->min_fps, pinfo->default_fps, avr_vtotal);
}
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
index 9026b99cd87a..40b10e368309 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
@@ -488,6 +488,10 @@ int mdss_mdp_writeback_prepare_cwb(struct mdss_mdp_ctl *ctl,
mdss_mdp_writeback_cwb_overflow, sctl);
}
+ if (test_bit(MDSS_QOS_WB2_WRITE_GATHER_EN, ctl->mdata->mdss_qos_map))
+ MDSS_VBIF_WRITE(ctl->mdata, MDSS_VBIF_WRITE_GATHER_EN,
+ BIT(6), false);
+
if (ctl->mdata->default_ot_wr_limit || ctl->mdata->default_ot_rd_limit)
mdss_mdp_set_ot_limit_wb(ctx, false);
@@ -907,6 +911,10 @@ static int mdss_mdp_writeback_display(struct mdss_mdp_ctl *ctl, void *arg)
return ret;
}
+ if (test_bit(MDSS_QOS_WB2_WRITE_GATHER_EN, ctl->mdata->mdss_qos_map))
+ MDSS_VBIF_WRITE(ctl->mdata, MDSS_VBIF_WRITE_GATHER_EN,
+ BIT(6), false);
+
mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
mdss_mdp_writeback_intr_done, ctl);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index efd09302de45..ee1cd8fd623e 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -2152,7 +2152,7 @@ static void pp_dspp_opmode_config(struct mdss_mdp_ctl *ctl, u32 num,
bool pa_side_enabled = false;
side = pp_num_to_side(ctl, num);
- if (side < 0)
+ if (side < 0 || !pp_sts)
return;
if (pp_driver_ops.pp_opmode_config) {
@@ -2217,7 +2217,7 @@ static int pp_dspp_setup(u32 disp_num, struct mdss_mdp_mixer *mixer)
{
u32 ad_flags, flags, dspp_num, opmode = 0, ad_bypass;
struct mdp_pgc_lut_data *pgc_config;
- struct pp_sts_type *pp_sts;
+ struct pp_sts_type *pp_sts = NULL;
char __iomem *base, *addr = NULL;
int ret = 0;
struct mdss_data_type *mdata;
diff --git a/drivers/video/fbdev/msm/mdss_panel.c b/drivers/video/fbdev/msm/mdss_panel.c
index 61911810b2c0..97025b3a9c23 100644
--- a/drivers/video/fbdev/msm/mdss_panel.c
+++ b/drivers/video/fbdev/msm/mdss_panel.c
@@ -644,7 +644,6 @@ void mdss_panel_info_from_timing(struct mdss_panel_timing *pt,
pinfo->dsc_enc_total = pt->dsc_enc_total;
pinfo->fbc = pt->fbc;
pinfo->compression_mode = pt->compression_mode;
- pinfo->default_fps = pinfo->mipi.frame_rate;
pinfo->roi_alignment = pt->roi_alignment;
pinfo->te = pt->te;
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index 1137c4475cab..81b6fa7d35b3 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -722,6 +722,9 @@ struct mdss_panel_info {
/* debugfs structure for the panel */
struct mdss_panel_debugfs_info *debugfs_info;
+
+ /* stores initial adaptive variable refresh vtotal value */
+ u32 saved_avr_vtotal;
};
struct mdss_panel_timing {