summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/android/binder.c31
-rw-r--r--drivers/char/adsprpc.c2
-rw-r--r--drivers/clk/msm/clock-gpu-cobalt.c3
-rw-r--r--drivers/clk/msm/clock-osm.c451
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c4
-rw-r--r--drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c18
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c247
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h23
-rw-r--r--drivers/clk/qcom/clk-pll.h9
-rw-r--r--drivers/clk/qcom/gcc-msmfalcon.c15
-rw-r--r--drivers/clk/qcom/gpucc-msmfalcon.c14
-rw-r--r--drivers/clk/qcom/vdd-level-falcon.h42
-rw-r--r--drivers/cpufreq/qcom-cpufreq.c32
-rw-r--r--drivers/crypto/Kconfig10
-rw-r--r--drivers/crypto/msm/qce50.c21
-rw-r--r--drivers/crypto/msm/qcedev.c68
-rw-r--r--drivers/devfreq/devfreq.c13
-rw-r--r--drivers/gpio/qpnp-pin.c12
-rw-r--r--drivers/gpu/msm/a5xx_reg.h2
-rw-r--r--drivers/gpu/msm/adreno-gpulist.h2
-rw-r--r--drivers/gpu/msm/adreno.c15
-rw-r--r--drivers/gpu/msm/adreno.h2
-rw-r--r--drivers/gpu/msm/adreno_a3xx.c40
-rw-r--r--drivers/gpu/msm/adreno_a4xx_preempt.c2
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c26
-rw-r--r--drivers/gpu/msm/adreno_perfcounter.c32
-rw-r--r--drivers/gpu/msm/kgsl.c49
-rw-r--r--drivers/gpu/msm/kgsl.h3
-rw-r--r--drivers/gpu/msm/kgsl_debugfs.c93
-rw-r--r--drivers/gpu/msm/kgsl_mmu.c20
-rw-r--r--drivers/gpu/msm/kgsl_pool.c132
-rw-r--r--drivers/gpu/msm/kgsl_pool.h2
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c3
-rw-r--r--drivers/iio/adc/qcom-rradc.c19
-rw-r--r--drivers/iio/adc/qcom-tadc.c2
-rw-r--r--drivers/iommu/arm-smmu.c50
-rw-r--r--drivers/iommu/iommu-debug.c2
-rw-r--r--drivers/leds/leds-qpnp-flash-v2.c368
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_soc_api.c9
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c6
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c4
-rw-r--r--drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c4
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c42
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c2
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c285
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h11
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c34
-rw-r--r--drivers/media/platform/msm/vidc/hfi_packetization.c27
-rw-r--r--drivers/media/platform/msm/vidc/msm_v4l2_vidc.c20
-rw-r--r--drivers/media/platform/msm/vidc/msm_venc.c47
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c4
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c14
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_debug.c2
-rw-r--r--drivers/media/platform/msm/vidc/venus_hfi.c18
-rw-r--r--drivers/media/platform/msm/vidc/venus_hfi.h3
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi_api.h8
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi_helper.h10
-rw-r--r--drivers/mfd/wcd9xxx-utils.c23
-rw-r--r--drivers/misc/hdcp.c59
-rw-r--r--drivers/misc/qseecom.c333
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c55
-rw-r--r--drivers/nfc/nq-nci.c104
-rw-r--r--drivers/nfc/nq-nci.h9
-rw-r--r--drivers/pci/host/pci-msm.c37
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c32
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c23
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c27
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c25
-rw-r--r--drivers/platform/msm/msm_11ad/msm_11ad.c48
-rw-r--r--drivers/power/qcom-charger/fg-core.h44
-rw-r--r--drivers/power/qcom-charger/fg-memif.c193
-rw-r--r--drivers/power/qcom-charger/fg-reg.h25
-rw-r--r--drivers/power/qcom-charger/fg-util.c76
-rw-r--r--drivers/power/qcom-charger/qpnp-fg-gen3.c516
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c94
-rw-r--r--drivers/power/qcom-charger/smb-lib.c415
-rw-r--r--drivers/power/qcom-charger/smb-lib.h24
-rw-r--r--drivers/power/qcom-charger/smb-reg.h8
-rw-r--r--drivers/power/qcom-charger/smb138x-charger.c4
-rw-r--r--drivers/regulator/qpnp-labibb-regulator.c341
-rw-r--r--drivers/soc/qcom/glink.c1
-rw-r--r--drivers/soc/qcom/memshare/msm_memshare.c14
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c4
-rw-r--r--drivers/soc/qcom/pil-q6v5-mss.c3
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_tal_glink.c51
-rw-r--r--drivers/soc/qcom/service-locator.c2
-rw-r--r--drivers/soc/qcom/service-notifier.c5
-rw-r--r--drivers/soc/qcom/sysmon.c6
-rw-r--r--drivers/spmi/spmi-pmic-arb.c51
-rw-r--r--drivers/usb/core/hub.c11
-rw-r--r--drivers/usb/gadget/configfs.c9
-rw-r--r--drivers/usb/gadget/function/f_mtp.c14
-rw-r--r--drivers/usb/host/xhci-hub.c4
-rw-r--r--drivers/usb/host/xhci-mem.c30
-rw-r--r--drivers/usb/host/xhci-plat.c4
-rw-r--r--drivers/video/fbdev/msm/mdss.h13
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c144
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h5
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c18
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.c56
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.h11
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_panel.c121
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c119
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h34
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c56
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_debug.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c18
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c126
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c340
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pipe.c22
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_util.c31
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.h89
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.c37
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.h34
124 files changed, 5325 insertions, 1142 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index f80cfc36a354..a3f458fd2238 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1074,7 +1074,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
static struct binder_ref *binder_get_ref(struct binder_proc *proc,
- uint32_t desc)
+ uint32_t desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
@@ -1082,12 +1082,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (desc < ref->desc)
+ if (desc < ref->desc) {
n = n->rb_left;
- else if (desc > ref->desc)
+ } else if (desc > ref->desc) {
n = n->rb_right;
- else
+ } else if (need_strong_ref && !ref->strong) {
+ binder_user_error("tried to use weak ref as strong ref\n");
+ return NULL;
+ } else {
return ref;
+ }
}
return NULL;
}
@@ -1357,7 +1361,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
pr_err("transaction release %d bad handle %d\n",
@@ -1452,7 +1457,7 @@ static void binder_transaction(struct binder_proc *proc,
if (tr->target.handle) {
struct binder_ref *ref;
- ref = binder_get_ref(proc, tr->target.handle);
+ ref = binder_get_ref(proc, tr->target.handle, true);
if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
@@ -1649,7 +1654,9 @@ static void binder_transaction(struct binder_proc *proc,
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
+ fp->binder = 0;
fp->handle = ref->desc;
+ fp->cookie = 0;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
@@ -1661,7 +1668,8 @@ static void binder_transaction(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
@@ -1696,7 +1704,9 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
+ fp->binder = 0;
fp->handle = new_ref->desc;
+ fp->cookie = 0;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
trace_binder_transaction_ref_to_ref(t, ref,
new_ref);
@@ -1750,6 +1760,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
+ fp->binder = 0;
fp->handle = target_fd;
} break;
@@ -1880,7 +1891,9 @@ static int binder_thread_write(struct binder_proc *proc,
ref->desc);
}
} else
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target,
+ cmd == BC_ACQUIRE ||
+ cmd == BC_RELEASE);
if (ref == NULL) {
binder_user_error("%d:%d refcount change on invalid ref %d\n",
proc->pid, thread->pid, target);
@@ -2076,7 +2089,7 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index ef8aaac6e0a2..7767086df849 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -490,7 +490,7 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map)
ion_free(fl->apps->client, map->handle);
if (sess->smmu.enabled) {
if (map->size || map->phys)
- msm_dma_unmap_sg(fl->sctx->dev,
+ msm_dma_unmap_sg(sess->dev,
map->table->sgl,
map->table->nents, DMA_BIDIRECTIONAL,
map->buf);
diff --git a/drivers/clk/msm/clock-gpu-cobalt.c b/drivers/clk/msm/clock-gpu-cobalt.c
index 9d93351a083e..7cec9be1f42c 100644
--- a/drivers/clk/msm/clock-gpu-cobalt.c
+++ b/drivers/clk/msm/clock-gpu-cobalt.c
@@ -173,6 +173,7 @@ static struct clk_freq_tbl ftbl_gfx3d_clk_src_v2[] = {
F_SLEW( 515000000, 1030000000, gpu_pll0_pll_out_even, 1, 0, 0),
F_SLEW( 596000000, 1192000000, gpu_pll0_pll_out_even, 1, 0, 0),
F_SLEW( 670000000, 1340000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 710000000, 1420000000, gpu_pll0_pll_out_even, 1, 0, 0),
F_END
};
@@ -611,7 +612,7 @@ static void msm_gfxcc_hamster_fixup(void)
static void msm_gfxcc_cobalt_v2_fixup(void)
{
- gpu_pll0_pll.c.fmax[VDD_DIG_MIN] = 1340000500;
+ gpu_pll0_pll.c.fmax[VDD_DIG_MIN] = 1420000500;
gfx3d_clk_src.freq_tbl = ftbl_gfx3d_clk_src_v2;
}
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index 5391ef456aae..3e45aee1c0f7 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -49,6 +49,7 @@ enum clk_osm_bases {
OSM_BASE,
PLL_BASE,
EFUSE_BASE,
+ ACD_BASE,
NUM_BASES,
};
@@ -228,11 +229,43 @@ enum clk_osm_trace_packet_id {
#define MSMCOBALTV2_PWRCL_BOOT_RATE 1555200000
#define MSMCOBALTV2_PERFCL_BOOT_RATE 1728000000
+/* ACD registers */
+#define ACD_HW_VERSION 0x0
+#define ACDCR 0x4
+#define ACDTD 0x8
+#define ACDSSCR 0x28
+#define ACD_EXTINT_CFG 0x30
+#define ACD_DCVS_SW 0x34
+#define ACD_GFMUX_CFG 0x3c
+#define ACD_READOUT_CFG 0x48
+#define ACD_AUTOXFER_CFG 0x80
+#define ACD_AUTOXFER 0x84
+#define ACD_AUTOXFER_CTL 0x88
+#define ACD_AUTOXFER_STATUS 0x8c
+#define ACD_WRITE_CTL 0x90
+#define ACD_WRITE_STATUS 0x94
+#define ACD_READOUT 0x98
+
+#define ACD_MASTER_ONLY_REG_ADDR 0x80
+#define ACD_WRITE_CTL_UPDATE_EN BIT(0)
+#define ACD_WRITE_CTL_SELECT_SHIFT 1
+#define ACD_GFMUX_CFG_SELECT BIT(0)
+#define ACD_AUTOXFER_START_CLEAR 0
+#define ACD_AUTOXFER_START_SET BIT(0)
+#define AUTO_XFER_DONE_MASK BIT(0)
+#define ACD_DCVS_SW_DCVS_IN_PRGR_SET BIT(0)
+#define ACD_DCVS_SW_DCVS_IN_PRGR_CLEAR 0
+#define ACD_LOCAL_TRANSFER_TIMEOUT_NS 500
+
static void __iomem *virt_base;
static void __iomem *debug_base;
#define lmh_lite_clk_src_source_val 1
+#define ACD_REG_RELATIVE_ADDR(addr) (addr / 4)
+#define ACD_REG_RELATIVE_ADDR_BITMASK(addr) \
+ (1 << (ACD_REG_RELATIVE_ADDR(addr)))
+
#define FIXDIV(div) (div ? (2 * (div) - 1) : (0))
#define F(f, s, div, m, n) \
@@ -341,6 +374,14 @@ struct clk_osm {
u32 apm_ctrl_status;
u32 osm_clk_rate;
u32 xo_clk_rate;
+ u32 acd_td;
+ u32 acd_cr;
+ u32 acd_sscr;
+ u32 acd_extint0_cfg;
+ u32 acd_extint1_cfg;
+ u32 acd_autoxfer_ctl;
+ u32 acd_debugfs_addr;
+ bool acd_init;
bool secure_init;
bool red_fsm_en;
bool boost_fsm_en;
@@ -394,6 +435,161 @@ static inline int clk_osm_mb(struct clk_osm *c, int base)
return readl_relaxed_no_log((char *)c->vbases[base] + VERSION_REG);
}
+static inline int clk_osm_acd_mb(struct clk_osm *c)
+{
+ return readl_relaxed_no_log((char *)c->vbases[ACD_BASE] +
+ ACD_HW_VERSION);
+}
+
+static inline void clk_osm_acd_master_write_reg(struct clk_osm *c,
+ u32 val, u32 offset)
+{
+ writel_relaxed(val, (char *)c->vbases[ACD_BASE] + offset);
+}
+
+static int clk_osm_acd_local_read_reg(struct clk_osm *c, u32 offset)
+{
+ u32 reg = 0;
+ int timeout;
+
+ if (offset >= ACD_MASTER_ONLY_REG_ADDR) {
+ pr_err("ACD register at offset=0x%x not locally readable\n",
+ offset);
+ return -EINVAL;
+ }
+
+ /* Set select field in read control register */
+ writel_relaxed(ACD_REG_RELATIVE_ADDR(offset),
+ (char *)c->vbases[ACD_BASE] + ACD_READOUT_CFG);
+
+ /* Clear write control register */
+ writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+ /* Set select and update_en fields in write control register */
+ reg = (ACD_REG_RELATIVE_ADDR(ACD_READOUT_CFG)
+ << ACD_WRITE_CTL_SELECT_SHIFT)
+ | ACD_WRITE_CTL_UPDATE_EN;
+ writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+ /* Ensure writes complete before polling */
+ clk_osm_acd_mb(c);
+
+ /* Poll write status register */
+ for (timeout = ACD_LOCAL_TRANSFER_TIMEOUT_NS; timeout > 0;
+ timeout -= 100) {
+ reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+ + ACD_WRITE_STATUS);
+ if ((reg & (ACD_REG_RELATIVE_ADDR_BITMASK(ACD_READOUT_CFG))))
+ break;
+ ndelay(100);
+ }
+
+ if (!timeout) {
+ pr_err("local read timed out, offset=0x%x status=0x%x\n",
+ offset, reg);
+ return -ETIMEDOUT;
+ }
+
+ reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+ + ACD_READOUT);
+ return reg;
+}
+
+static int clk_osm_acd_local_write_reg(struct clk_osm *c, u32 val, u32 offset)
+{
+ u32 reg = 0;
+ int timeout;
+
+ if (offset >= ACD_MASTER_ONLY_REG_ADDR) {
+ pr_err("ACD register at offset=0x%x not transferrable\n",
+ offset);
+ return -EINVAL;
+ }
+
+ /* Clear write control register */
+ writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+ /* Set select and update_en fields in write control register */
+ reg = (ACD_REG_RELATIVE_ADDR(offset) << ACD_WRITE_CTL_SELECT_SHIFT)
+ | ACD_WRITE_CTL_UPDATE_EN;
+ writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+ /* Ensure writes complete before polling */
+ clk_osm_acd_mb(c);
+
+ /* Poll write status register */
+ for (timeout = ACD_LOCAL_TRANSFER_TIMEOUT_NS; timeout > 0;
+ timeout -= 100) {
+ reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+ + ACD_WRITE_STATUS);
+ if ((reg & (ACD_REG_RELATIVE_ADDR_BITMASK(offset))))
+ break;
+ ndelay(100);
+ }
+
+ if (!timeout) {
+ pr_err("local write timed out, offset=0x%x val=0x%x status=0x%x\n",
+ offset, val, reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int clk_osm_acd_master_write_through_reg(struct clk_osm *c,
+ u32 val, u32 offset)
+{
+ writel_relaxed(val, (char *)c->vbases[ACD_BASE] + offset);
+
+ /* Ensure writes complete before transfer to local copy */
+ clk_osm_acd_mb(c);
+
+ return clk_osm_acd_local_write_reg(c, val, offset);
+}
+
+static int clk_osm_acd_auto_local_write_reg(struct clk_osm *c, u32 mask)
+{
+ u32 numregs, bitmask = mask;
+ u32 reg = 0;
+ int timeout;
+
+ /* count number of bits set in register mask */
+ for (numregs = 0; bitmask; numregs++)
+ bitmask &= bitmask - 1;
+
+ /* Program auto-transfter mask */
+ writel_relaxed(mask, (char *)c->vbases[ACD_BASE] + ACD_AUTOXFER_CFG);
+
+ /* Clear start field in auto-transfer register */
+ writel_relaxed(ACD_AUTOXFER_START_CLEAR,
+ (char *)c->vbases[ACD_BASE] + ACD_AUTOXFER);
+
+ /* Set start field in auto-transfer register */
+ writel_relaxed(ACD_AUTOXFER_START_SET,
+ (char *)c->vbases[ACD_BASE] + ACD_AUTOXFER);
+
+ /* Ensure writes complete before polling */
+ clk_osm_acd_mb(c);
+
+ /* Poll auto-transfer status register */
+ for (timeout = ACD_LOCAL_TRANSFER_TIMEOUT_NS * numregs;
+ timeout > 0; timeout -= 100) {
+ reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+ + ACD_AUTOXFER_STATUS);
+ if (reg & AUTO_XFER_DONE_MASK)
+ break;
+ ndelay(100);
+ }
+
+ if (!timeout) {
+ pr_err("local register auto-transfer timed out, mask=0x%x registers=%d status=0x%x\n",
+ mask, numregs, reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static inline int clk_osm_count_ns(struct clk_osm *c, u64 nsec)
{
u64 temp;
@@ -813,6 +1009,74 @@ static int clk_osm_parse_dt_configs(struct platform_device *pdev)
LLM_SW_OVERRIDE_CNT + i,
&perfcl_clk.llm_sw_overr[i]);
+ if (pwrcl_clk.acd_init || perfcl_clk.acd_init) {
+ rc = of_property_read_u32_array(of, "qcom,acdtd-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdtd-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_td = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_td = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdcr-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdcr-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_cr = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_cr = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdsscr-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdsscr-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_sscr = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_sscr = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdextint0-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdextint0-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_extint0_cfg = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_extint0_cfg = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdextint1-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdextint1-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_extint1_cfg = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_extint1_cfg = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdautoxfer-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdautoxfer-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_autoxfer_ctl = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_autoxfer_ctl = array[perfcl_clk.cluster_num];
+ }
+
rc = of_property_read_u32(of, "qcom,xo-clk-rate",
&pwrcl_clk.xo_clk_rate);
if (rc) {
@@ -1037,6 +1301,40 @@ static int clk_osm_resources_init(struct platform_device *pdev)
perfcl_clk.vbases[EFUSE_BASE] = vbase;
}
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "pwrcl_acd");
+ if (res) {
+ pbase = (unsigned long)res->start;
+ vbase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!vbase) {
+ dev_err(&pdev->dev, "Unable to map in pwrcl_acd base\n");
+ return -ENOMEM;
+ }
+ pwrcl_clk.pbases[ACD_BASE] = pbase;
+ pwrcl_clk.vbases[ACD_BASE] = vbase;
+ pwrcl_clk.acd_init = true;
+ } else {
+ pwrcl_clk.acd_init = false;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "perfcl_acd");
+ if (res) {
+ pbase = (unsigned long)res->start;
+ vbase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!vbase) {
+ dev_err(&pdev->dev, "Unable to map in perfcl_acd base\n");
+ return -ENOMEM;
+ }
+ perfcl_clk.pbases[ACD_BASE] = pbase;
+ perfcl_clk.vbases[ACD_BASE] = vbase;
+ perfcl_clk.acd_init = true;
+ } else {
+ perfcl_clk.acd_init = false;
+ }
+
vdd_pwrcl = devm_regulator_get(&pdev->dev, "vdd-pwrcl");
if (IS_ERR(vdd_pwrcl)) {
rc = PTR_ERR(vdd_pwrcl);
@@ -2402,6 +2700,55 @@ DEFINE_SIMPLE_ATTRIBUTE(debugfs_perf_state_deviation_corrected_irq_fops,
debugfs_set_perf_state_deviation_corrected_irq,
"%llu\n");
+static int debugfs_get_debug_reg(void *data, u64 *val)
+{
+ struct clk_osm *c = data;
+
+ if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR)
+ *val = readl_relaxed((char *)c->vbases[ACD_BASE] +
+ c->acd_debugfs_addr);
+ else
+ *val = clk_osm_acd_local_read_reg(c, c->acd_debugfs_addr);
+ return 0;
+}
+
+static int debugfs_set_debug_reg(void *data, u64 val)
+{
+ struct clk_osm *c = data;
+
+ if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR)
+ clk_osm_acd_master_write_reg(c, val, c->acd_debugfs_addr);
+ else
+ clk_osm_acd_master_write_through_reg(c, val,
+ c->acd_debugfs_addr);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_acd_debug_reg_fops,
+ debugfs_get_debug_reg,
+ debugfs_set_debug_reg,
+ "0x%llx\n");
+
+static int debugfs_get_debug_reg_addr(void *data, u64 *val)
+{
+ struct clk_osm *c = data;
+
+ *val = c->acd_debugfs_addr;
+ return 0;
+}
+
+static int debugfs_set_debug_reg_addr(void *data, u64 val)
+{
+ struct clk_osm *c = data;
+
+ c->acd_debugfs_addr = val;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_acd_debug_reg_addr_fops,
+ debugfs_get_debug_reg_addr,
+ debugfs_set_debug_reg_addr,
+ "%llu\n");
+
static void populate_debugfs_dir(struct clk_osm *c)
{
struct dentry *temp;
@@ -2493,6 +2840,24 @@ static void populate_debugfs_dir(struct clk_osm *c)
goto exit;
}
+ temp = debugfs_create_file("acd_debug_reg",
+ S_IRUGO | S_IWUSR,
+ c->debugfs, c,
+ &debugfs_acd_debug_reg_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("debugfs_acd_debug_reg_fops debugfs file creation failed\n");
+ goto exit;
+ }
+
+ temp = debugfs_create_file("acd_debug_reg_addr",
+ S_IRUGO | S_IWUSR,
+ c->debugfs, c,
+ &debugfs_acd_debug_reg_addr_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("debugfs_acd_debug_reg_addr_fops debugfs file creation failed\n");
+ goto exit;
+ }
+
exit:
if (IS_ERR_OR_NULL(temp))
debugfs_remove_recursive(c->debugfs);
@@ -2537,6 +2902,81 @@ static int clk_osm_panic_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
+static int clk_osm_acd_init(struct clk_osm *c)
+{
+
+ int rc = 0;
+ u32 auto_xfer_mask = 0;
+
+ if (!c->acd_init)
+ return 0;
+
+ c->acd_debugfs_addr = ACD_HW_VERSION;
+
+ /* Program ACD tunable-length delay register */
+ clk_osm_acd_master_write_reg(c, c->acd_td, ACDTD);
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDTD);
+
+ /* Program ACD control register */
+ clk_osm_acd_master_write_reg(c, c->acd_cr, ACDCR);
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDCR);
+
+ /* Program ACD soft start control register */
+ clk_osm_acd_master_write_reg(c, c->acd_sscr, ACDSSCR);
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDSSCR);
+
+ /* Program initial ACD external interface configuration register */
+ clk_osm_acd_master_write_reg(c, c->acd_extint0_cfg, ACD_EXTINT_CFG);
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_EXTINT_CFG);
+
+ /* Program ACD auto-register transfer control register */
+ clk_osm_acd_master_write_reg(c, c->acd_autoxfer_ctl, ACD_AUTOXFER_CTL);
+
+ /* Ensure writes complete before transfers to local copy */
+ clk_osm_acd_mb(c);
+
+ /* Transfer master copies */
+ rc = clk_osm_acd_auto_local_write_reg(c, auto_xfer_mask);
+ if (rc)
+ return rc;
+
+ /* Switch CPUSS clock source to ACD clock */
+ rc = clk_osm_acd_master_write_through_reg(c, ACD_GFMUX_CFG_SELECT,
+ ACD_GFMUX_CFG);
+ if (rc)
+ return rc;
+
+ /* Program ACD_DCVS_SW */
+ rc = clk_osm_acd_master_write_through_reg(c,
+ ACD_DCVS_SW_DCVS_IN_PRGR_SET,
+ ACD_DCVS_SW);
+ if (rc)
+ return rc;
+
+ rc = clk_osm_acd_master_write_through_reg(c,
+ ACD_DCVS_SW_DCVS_IN_PRGR_CLEAR,
+ ACD_DCVS_SW);
+ if (rc)
+ return rc;
+
+ udelay(1);
+
+ /* Program final ACD external interface configuration register */
+ rc = clk_osm_acd_master_write_through_reg(c, c->acd_extint1_cfg,
+ ACD_EXTINT_CFG);
+ if (rc)
+ return rc;
+
+ /*
+ * ACDCR, ACDTD, ACDSSCR, ACD_EXTINT_CFG, ACD_GFMUX_CFG
+ * must be copied from master to local copy on PC exit.
+ */
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_GFMUX_CFG);
+ clk_osm_acd_master_write_reg(c, auto_xfer_mask, ACD_AUTOXFER_CFG);
+
+ return 0;
+}
+
static unsigned long init_rate = 300000000;
static unsigned long osm_clk_init_rate = 200000000;
@@ -2717,6 +3157,17 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
clk_osm_setup_cluster_pll(&perfcl_clk);
}
+ rc = clk_osm_acd_init(&pwrcl_clk);
+ if (rc) {
+ pr_err("failed to initialize ACD for pwrcl, rc=%d\n", rc);
+ return rc;
+ }
+ rc = clk_osm_acd_init(&perfcl_clk);
+ if (rc) {
+ pr_err("failed to initialize ACD for perfcl, rc=%d\n", rc);
+ return rc;
+ }
+
spin_lock_init(&pwrcl_clk.lock);
spin_lock_init(&perfcl_clk.lock);
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
index a574a9cd2b5a..93bbcf5d40f5 100644
--- a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
+++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
@@ -275,7 +275,7 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00);
MDSS_PLL_REG_W(dp_res->pll_base,
- QSERDES_COM_DIV_FRAC_START3_MODE0, 0xa0);
+ QSERDES_COM_DIV_FRAC_START3_MODE0, 0x0a);
MDSS_PLL_REG_W(dp_res->pll_base,
QSERDES_COM_CMN_CONFIG, 0x12);
MDSS_PLL_REG_W(dp_res->pll_base,
@@ -733,7 +733,7 @@ unsigned long dp_vco_get_rate(struct clk *c)
{
struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
int rc;
- u32 div, hsclk_div, link2xclk_div;
+ u32 div, hsclk_div, link2xclk_div = 0;
u64 vco_rate;
struct mdss_pll_resources *pll = vco->priv;
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c b/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c
index 4b2d8bba0940..299934c86d05 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c
@@ -205,18 +205,34 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_cobalt *pll,
struct dsi_pll_regs *regs = &pll->reg_setup;
u64 target_freq;
u64 fref = rsc->vco_ref_clk_rate;
- u32 computed_output_div, div_log;
+ u32 computed_output_div, div_log = 0;
u64 pll_freq;
u64 divider;
u64 dec, dec_multiple;
u32 frac;
u64 multiplier;
+ u32 i;
target_freq = rsc->vco_current_rate;
pr_debug("target_freq = %llu\n", target_freq);
if (config->div_override) {
computed_output_div = config->output_div;
+
+ /*
+ * Computed_output_div = 2 ^ div_log
+ * To get div_log from output div just get the index of the
+ * 1 bit in the value.
+ * div_log ranges from 0-3. so check the 4 lsbs
+ */
+
+ for (i = 0; i < 4; i++) {
+ if (computed_output_div & (1 << i)) {
+ div_log = i;
+ break;
+ }
+ }
+
} else {
if (target_freq < MHZ_375) {
computed_output_div = 8;
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 4d46d65898d1..085b9acfb9d5 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,28 +20,34 @@
#include "clk-alpha-pll.h"
#define PLL_MODE 0x00
-# define PLL_OUTCTRL BIT(0)
-# define PLL_BYPASSNL BIT(1)
-# define PLL_RESET_N BIT(2)
-# define PLL_LOCK_COUNT_SHIFT 8
-# define PLL_LOCK_COUNT_MASK 0x3f
-# define PLL_BIAS_COUNT_SHIFT 14
-# define PLL_BIAS_COUNT_MASK 0x3f
-# define PLL_VOTE_FSM_ENA BIT(20)
-# define PLL_VOTE_FSM_RESET BIT(21)
-# define PLL_ACTIVE_FLAG BIT(30)
-# define PLL_LOCK_DET BIT(31)
+#define PLL_OUTCTRL BIT(0)
+#define PLL_BYPASSNL BIT(1)
+#define PLL_RESET_N BIT(2)
+#define PLL_LOCK_COUNT_SHIFT 8
+#define PLL_LOCK_COUNT_MASK 0x3f
+#define PLL_LOCK_COUNT_VAL 0x0
+#define PLL_BIAS_COUNT_SHIFT 14
+#define PLL_BIAS_COUNT_MASK 0x3f
+#define PLL_BIAS_COUNT_VAL 0x6
+#define PLL_LATCH_INTERFACE BIT(11)
+#define PLL_VOTE_FSM_ENA BIT(20)
+#define PLL_VOTE_FSM_RESET BIT(21)
+#define PLL_UPDATE BIT(22)
+#define PLL_HW_UPDATE_LOGIC_BYPASS BIT(23)
+#define PLL_ALPHA_EN BIT(24)
+#define PLL_ACTIVE_FLAG BIT(30)
+#define PLL_LOCK_DET BIT(31)
+#define PLL_ACK_LATCH BIT(29)
#define PLL_L_VAL 0x04
#define PLL_ALPHA_VAL 0x08
#define PLL_ALPHA_VAL_U 0x0c
#define PLL_USER_CTL 0x10
-# define PLL_POST_DIV_SHIFT 8
-# define PLL_POST_DIV_MASK 0xf
-# define PLL_ALPHA_EN BIT(24)
-# define PLL_VCO_SHIFT 20
-# define PLL_VCO_MASK 0x3
+#define PLL_POST_DIV_SHIFT 8
+#define PLL_POST_DIV_MASK 0xf
+#define PLL_VCO_SHIFT 20
+#define PLL_VCO_MASK 0x3
#define PLL_USER_CTL_U 0x14
@@ -79,7 +85,7 @@ static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
if (ret)
return ret;
- if (inverse && (val & mask))
+ if (inverse && !(val & mask))
return 0;
else if ((val & mask) == mask)
return 0;
@@ -106,6 +112,10 @@ static int wait_for_pll_offline(struct clk_alpha_pll *pll, u32 mask)
return wait_for_pll(pll, mask, 0, "offline");
}
+static int wait_for_pll_latch_ack(struct clk_alpha_pll *pll, u32 mask)
+{
+ return wait_for_pll(pll, mask, 0, "latch_ack");
+}
/* alpha pll with hwfsm support */
@@ -114,29 +124,103 @@ static int wait_for_pll_offline(struct clk_alpha_pll *pll, u32 mask)
#define PLL_OFFLINE_ACK BIT(28)
#define PLL_ACTIVE_FLAG BIT(30)
+static void clk_alpha_set_fsm_mode(struct clk_alpha_pll *pll)
+{
+ u32 val;
+
+ regmap_read(pll->clkr.regmap, pll->offset + PLL_MODE, &val);
+
+ /* De-assert reset to FSM */
+ val &= ~PLL_VOTE_FSM_RESET;
+
+ /* Program bias count */
+ val &= ~(PLL_BIAS_COUNT_MASK << PLL_BIAS_COUNT_SHIFT);
+ val |= PLL_BIAS_COUNT_VAL << PLL_BIAS_COUNT_SHIFT;
+
+ /* Program lock count */
+ val &= ~(PLL_LOCK_COUNT_MASK << PLL_LOCK_COUNT_SHIFT);
+ val |= PLL_LOCK_COUNT_VAL << PLL_LOCK_COUNT_SHIFT;
+
+ /* Enable PLL FSM voting */
+ val |= PLL_VOTE_FSM_ENA;
+
+ regmap_write(pll->clkr.regmap, pll->offset + PLL_MODE, val);
+}
+
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
- const struct pll_config *config)
+ const struct pll_config *config)
{
u32 val, mask;
- regmap_write(regmap, pll->offset + PLL_CONFIG_CTL,
- config->config_ctl_val);
+ if (config->l)
+ regmap_write(regmap, pll->offset + PLL_L_VAL,
+ config->l);
+ if (config->alpha)
+ regmap_write(regmap, pll->offset + PLL_ALPHA_VAL,
+ config->alpha);
+ if (config->alpha_u)
+ regmap_write(regmap, pll->offset + PLL_ALPHA_VAL_U,
+ config->alpha_u);
+ if (config->config_ctl_val)
+ regmap_write(regmap, pll->offset + PLL_CONFIG_CTL,
+ config->config_ctl_val);
+
+ if (config->main_output_mask || config->aux_output_mask ||
+ config->aux2_output_mask || config->early_output_mask ||
+ config->vco_val || config->alpha_en_mask) {
+
+ val = config->main_output_mask;
+ val |= config->aux_output_mask;
+ val |= config->aux2_output_mask;
+ val |= config->early_output_mask;
+ val |= config->vco_val;
+ val |= config->alpha_en_mask;
+
+ mask = config->main_output_mask;
+ mask |= config->aux_output_mask;
+ mask |= config->aux2_output_mask;
+ mask |= config->early_output_mask;
+ mask |= config->vco_mask;
+ mask |= config->alpha_en_mask;
+
+ regmap_update_bits(regmap, pll->offset + PLL_USER_CTL,
+ mask, val);
+ }
- val = config->main_output_mask;
- val |= config->aux_output_mask;
- val |= config->aux2_output_mask;
- val |= config->early_output_mask;
- val |= config->post_div_val;
+ if (config->post_div_mask) {
+ mask = config->post_div_mask;
+ val = config->post_div_val;
+ regmap_update_bits(regmap, pll->offset + PLL_USER_CTL,
+ mask, val);
+ }
- mask = config->main_output_mask;
- mask |= config->aux_output_mask;
- mask |= config->aux2_output_mask;
- mask |= config->early_output_mask;
- mask |= config->post_div_mask;
+ /* Do not bypass the latch interface */
+ if (pll->flags & SUPPORTS_SLEW)
+ regmap_update_bits(regmap, pll->offset + PLL_USER_CTL_U,
+ PLL_LATCH_INTERFACE, (u32)~PLL_LATCH_INTERFACE);
- regmap_update_bits(regmap, pll->offset + PLL_USER_CTL, mask, val);
+ if (pll->flags & SUPPORTS_DYNAMIC_UPDATE) {
+ regmap_update_bits(regmap, pll->offset + PLL_MODE,
+ PLL_HW_UPDATE_LOGIC_BYPASS,
+ PLL_HW_UPDATE_LOGIC_BYPASS);
+ }
- return;
+ if (config->test_ctl_lo_mask) {
+ mask = config->test_ctl_lo_mask;
+ val = config->test_ctl_lo_val;
+ regmap_update_bits(regmap, pll->offset + PLL_TEST_CTL,
+ mask, val);
+ }
+
+ if (config->test_ctl_hi_mask) {
+ mask = config->test_ctl_hi_mask;
+ val = config->test_ctl_hi_val;
+ regmap_update_bits(regmap, pll->offset + PLL_TEST_CTL_U,
+ mask, val);
+ }
+
+ if (pll->flags & SUPPORTS_FSM_MODE)
+ clk_alpha_set_fsm_mode(pll);
}
static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
@@ -190,8 +274,9 @@ static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
PLL_FSM_ENA, 0);
if (ret)
return;
+
wait_for_pll_disable(pll, PLL_ACTIVE_FLAG);
- return;
+
}
static int clk_alpha_pll_enable(struct clk_hw *hw)
@@ -201,7 +286,6 @@ static int clk_alpha_pll_enable(struct clk_hw *hw)
u32 val, mask, off;
off = pll->offset;
-
mask = PLL_OUTCTRL | PLL_RESET_N | PLL_BYPASSNL;
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
if (ret)
@@ -339,7 +423,53 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
a >>= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH;
}
- return alpha_pll_calc_rate(prate, l, a);
+ ctl >>= PLL_POST_DIV_SHIFT;
+ ctl &= PLL_POST_DIV_MASK;
+
+ return alpha_pll_calc_rate(prate, l, a) >> fls(ctl);
+}
+
+static int clk_alpha_pll_dynamic_update(struct clk_alpha_pll *pll)
+{
+ int ret;
+
+ /* Latch the input to the PLL */
+ regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_MODE,
+ PLL_UPDATE, PLL_UPDATE);
+
+ /* Wait for 2 reference cycle before checking ACK bit */
+ udelay(1);
+
+ ret = wait_for_pll_latch_ack(pll, PLL_ACK_LATCH);
+ if (ret)
+ return ret;
+
+ /* Return latch input to 0 */
+ regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_MODE,
+ PLL_UPDATE, (u32)~PLL_UPDATE);
+
+ ret = wait_for_pll_enable(pll, PLL_LOCK_DET);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct pll_vco_data
+ *find_vco_data(const struct pll_vco_data *data,
+ unsigned long rate, size_t size)
+{
+ int i;
+
+ if (!data)
+ return NULL;
+
+ for (i = 0; i < size; i++) {
+ if (rate == data[i].freq)
+ return &data[i];
+ }
+
+ return &data[i - 1];
}
static int clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -347,16 +477,36 @@ static int clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
const struct pll_vco *vco;
+ const struct pll_vco_data *data;
+ bool is_enabled;
u32 l, off = pll->offset;
u64 a;
+ unsigned long rrate;
+
+ rrate = alpha_pll_round_rate(rate, prate, &l, &a);
- rate = alpha_pll_round_rate(rate, prate, &l, &a);
- vco = alpha_pll_find_vco(pll, rate);
+ if (rrate != rate) {
+ pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ vco = alpha_pll_find_vco(pll, rrate);
if (!vco) {
pr_err("alpha pll not in a valid vco range\n");
return -EINVAL;
}
+ is_enabled = clk_hw_is_enabled(hw);
+
+ /*
+ * For PLLs that do not support dynamic programming (dynamic_update
+ * is not set), ensure PLL is off before changing rate. For
+ * optimization reasons, assume no downstream clock is actively
+ * using it.
+ */
+ if (is_enabled && !(pll->flags & SUPPORTS_DYNAMIC_UPDATE))
+ hw->init->ops->disable(hw);
+
a <<= (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
regmap_write(pll->clkr.regmap, off + PLL_L_VAL, l);
@@ -367,9 +517,27 @@ static int clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
PLL_VCO_MASK << PLL_VCO_SHIFT,
vco->val << PLL_VCO_SHIFT);
+ data = find_vco_data(pll->vco_data, rate, pll->num_vco_data);
+ if (data) {
+ if (data->freq == rate)
+ regmap_update_bits(pll->clkr.regmap, off + PLL_USER_CTL,
+ PLL_POST_DIV_MASK << PLL_POST_DIV_SHIFT,
+ data->post_div_val << PLL_POST_DIV_SHIFT);
+ else
+ regmap_update_bits(pll->clkr.regmap, off + PLL_USER_CTL,
+ PLL_POST_DIV_MASK << PLL_POST_DIV_SHIFT,
+ 0x0 << PLL_VCO_SHIFT);
+ }
+
regmap_update_bits(pll->clkr.regmap, off + PLL_USER_CTL, PLL_ALPHA_EN,
PLL_ALPHA_EN);
+ if (is_enabled && (pll->flags & SUPPORTS_DYNAMIC_UPDATE))
+ clk_alpha_pll_dynamic_update(pll);
+
+ if (is_enabled && !(pll->flags & SUPPORTS_DYNAMIC_UPDATE))
+ hw->init->ops->enable(hw);
+
return 0;
}
@@ -381,6 +549,9 @@ static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
u64 a;
unsigned long min_freq, max_freq;
+ if (rate < pll->min_supported_freq)
+ return pll->min_supported_freq;
+
rate = alpha_pll_round_rate(rate, *prate, &l, &a);
if (alpha_pll_find_vco(pll, rate))
return rate;
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 7718bd5beefc..9b1d3ee61cac 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,11 @@
#include "clk-regmap.h"
#include "clk-pll.h"
+struct pll_vco_data {
+ unsigned long freq;
+ u8 post_div_val;
+};
+
struct pll_vco {
unsigned long min_freq;
unsigned long max_freq;
@@ -28,21 +33,35 @@ struct pll_vco {
* struct clk_alpha_pll - phase locked loop (PLL)
* @offset: base address of registers
* @vco_table: array of VCO settings
+ * @vco_data: array of VCO data settings like post div
* @clkr: regmap clock handle
*/
struct clk_alpha_pll {
u32 offset;
+ struct pll_config *config;
const struct pll_vco *vco_table;
size_t num_vco;
+ const struct pll_vco_data *vco_data;
+ size_t num_vco_data;
+
+ u8 flags;
+#define SUPPORTS_FSM_MODE BIT(0)
+ /* some PLLs support dynamically updating their rate
+ * without disabling the PLL first. Set this flag
+ * to enable this support.
+ */
+#define SUPPORTS_DYNAMIC_UPDATE BIT(1)
+#define SUPPORTS_SLEW BIT(2)
+
struct clk_regmap clkr;
- u32 config_ctl_val;
#define PLLOUT_MAIN BIT(0)
#define PLLOUT_AUX BIT(1)
#define PLLOUT_AUX2 BIT(2)
#define PLLOUT_EARLY BIT(3)
u32 pllout_flags;
+ unsigned long min_supported_freq;
};
/**
diff --git a/drivers/clk/qcom/clk-pll.h b/drivers/clk/qcom/clk-pll.h
index 1e64fe9326d6..4f779fda785b 100644
--- a/drivers/clk/qcom/clk-pll.h
+++ b/drivers/clk/qcom/clk-pll.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016 The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -70,6 +70,8 @@ struct pll_config {
u16 l;
u32 m;
u32 n;
+ u32 alpha;
+ u32 alpha_u;
u32 vco_val;
u32 vco_mask;
u32 pre_div_val;
@@ -77,11 +79,16 @@ struct pll_config {
u32 post_div_val;
u32 post_div_mask;
u32 mn_ena_mask;
+ u32 alpha_en_mask;
u32 main_output_mask;
u32 aux_output_mask;
u32 aux2_output_mask;
u32 early_output_mask;
u32 config_ctl_val;
+ u32 test_ctl_lo_val;
+ u32 test_ctl_lo_mask;
+ u32 test_ctl_hi_val;
+ u32 test_ctl_hi_mask;
};
void clk_pll_configure_sr(struct clk_pll *pll, struct regmap *regmap,
diff --git a/drivers/clk/qcom/gcc-msmfalcon.c b/drivers/clk/qcom/gcc-msmfalcon.c
index 42b91d70aa54..b5f7e18cf495 100644
--- a/drivers/clk/qcom/gcc-msmfalcon.c
+++ b/drivers/clk/qcom/gcc-msmfalcon.c
@@ -35,8 +35,8 @@
#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
-static DEFINE_VDD_REGULATORS(vdd_dig_ao, VDD_DIG_NUM, 1, vdd_corner, NULL);
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_dig_ao, VDD_DIG_NUM, 1, vdd_corner);
enum {
P_CORE_BI_PLL_TEST_SE,
@@ -2201,7 +2201,7 @@ static struct clk_branch gcc_ufs_axi_hw_ctl_clk = {
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_hw_ctl_ops,
+ .ops = &clk_branch2_ops,
},
},
};
@@ -2249,7 +2249,7 @@ static struct clk_branch gcc_ufs_ice_core_hw_ctl_clk = {
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_hw_ctl_ops,
+ .ops = &clk_branch2_ops,
},
},
};
@@ -2284,7 +2284,7 @@ static struct clk_branch gcc_ufs_phy_aux_hw_ctl_clk = {
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_hw_ctl_ops,
+ .ops = &clk_branch2_ops,
},
},
};
@@ -2355,7 +2355,7 @@ static struct clk_branch gcc_ufs_unipro_core_hw_ctl_clk = {
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_hw_ctl_ops,
+ .ops = &clk_branch2_ops,
},
},
};
@@ -2753,9 +2753,8 @@ MODULE_DEVICE_TABLE(of, gcc_falcon_match_table);
static int gcc_falcon_probe(struct platform_device *pdev)
{
- int ret = 0, i;
+ int ret = 0;
struct regmap *regmap;
- struct clk *clk;
regmap = qcom_cc_map(pdev, &gcc_falcon_desc);
if (IS_ERR(regmap))
diff --git a/drivers/clk/qcom/gpucc-msmfalcon.c b/drivers/clk/qcom/gpucc-msmfalcon.c
index f194abb471cd..fe7cff443250 100644
--- a/drivers/clk/qcom/gpucc-msmfalcon.c
+++ b/drivers/clk/qcom/gpucc-msmfalcon.c
@@ -35,8 +35,8 @@
#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
#define F_GFX(f, s, h, m, n, sf) { (f), (s), (2 * (h) - 1), (m), (n), (sf) }
-static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
-static DEFINE_VDD_REGULATORS(vdd_mx, VDD_DIG_NUM, 1, vdd_corner, NULL);
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mx, VDD_DIG_NUM, 1, vdd_corner);
static DEFINE_VDD_REGS_INIT(vdd_gfx, 1);
enum {
@@ -181,6 +181,7 @@ static struct clk_init_data gpu_clks_init[] = {
* | 465000000 | 930000000 | 1 | 2 |
* | 588000000 | 1176000000 | 1 | 2 |
* | 647000000 | 1294000000 | 1 | 2 |
+ * | 700000000 | 1400000000 | 1 | 2 |
* | 750000000 | 1500000000 | 1 | 2 |
* ====================================================
*/
@@ -193,6 +194,7 @@ static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
F_GFX(465000000, 0, 2, 0, 0, 930000000),
F_GFX(588000000, 0, 2, 0, 0, 1176000000),
F_GFX(647000000, 0, 2, 0, 0, 1294000000),
+ F_GFX(700000000, 0, 2, 0, 0, 1400000000),
F_GFX(750000000, 0, 2, 0, 0, 1500000000),
{ }
};
@@ -376,9 +378,9 @@ static int of_get_fmax_vdd_class(struct platform_device *pdev,
if (!vdd->vdd_uv)
return -ENOMEM;
- gpu_clks_init[index].fmax = devm_kzalloc(&pdev->dev, prop_len *
+ gpu_clks_init[index].rate_max = devm_kzalloc(&pdev->dev, prop_len *
sizeof(unsigned long), GFP_KERNEL);
- if (!gpu_clks_init[index].fmax)
+ if (!gpu_clks_init[index].rate_max)
return -ENOMEM;
array = devm_kzalloc(&pdev->dev, prop_len * sizeof(u32) * num,
@@ -388,7 +390,7 @@ static int of_get_fmax_vdd_class(struct platform_device *pdev,
of_property_read_u32_array(of, prop_name, array, prop_len * num);
for (i = 0; i < prop_len; i++) {
- gpu_clks_init[index].fmax[i] = array[num * i];
+ gpu_clks_init[index].rate_max[i] = array[num * i];
for (j = 1; j < num; j++) {
vdd->vdd_uv[(num - 1) * i + (j - 1)] =
array[num * i + j];
@@ -398,7 +400,7 @@ static int of_get_fmax_vdd_class(struct platform_device *pdev,
devm_kfree(&pdev->dev, array);
vdd->num_levels = prop_len;
vdd->cur_level = prop_len;
- gpu_clks_init[index].num_fmax = prop_len;
+ gpu_clks_init[index].num_rate_max = prop_len;
return 0;
}
diff --git a/drivers/clk/qcom/vdd-level-falcon.h b/drivers/clk/qcom/vdd-level-falcon.h
index e8699358cf91..d54e801ecc67 100644
--- a/drivers/clk/qcom/vdd-level-falcon.h
+++ b/drivers/clk/qcom/vdd-level-falcon.h
@@ -19,50 +19,52 @@
#define VDD_DIG_FMAX_MAP1(l1, f1) \
.vdd_class = &vdd_dig, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
+
#define VDD_DIG_FMAX_MAP2(l1, f1, l2, f2) \
.vdd_class = &vdd_dig, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
#define VDD_DIG_FMAX_MAP3(l1, f1, l2, f2, l3, f3) \
.vdd_class = &vdd_dig, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
[VDD_DIG_##l3] = (f3), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
+
#define VDD_DIG_FMAX_MAP4(l1, f1, l2, f2, l3, f3, l4, f4) \
.vdd_class = &vdd_dig, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
[VDD_DIG_##l3] = (f3), \
[VDD_DIG_##l4] = (f4), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
#define VDD_DIG_FMAX_MAP5(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5) \
.vdd_class = &vdd_dig, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
[VDD_DIG_##l3] = (f3), \
[VDD_DIG_##l4] = (f4), \
[VDD_DIG_##l5] = (f5), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
#define VDD_DIG_FMAX_MAP6(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5, l6, f6) \
.vdd_class = &vdd_dig, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
[VDD_DIG_##l3] = (f3), \
@@ -70,12 +72,12 @@
[VDD_DIG_##l5] = (f5), \
[VDD_DIG_##l6] = (f6), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
#define VDD_DIG_FMAX_MAP7(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5, l6, f6, \
l7, f7) \
.vdd_class = &vdd_dig, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
[VDD_DIG_##l3] = (f3), \
@@ -84,27 +86,27 @@
[VDD_DIG_##l6] = (f6), \
[VDD_DIG_##l7] = (f7), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
#define VDD_DIG_FMAX_MAP1_AO(l1, f1) \
.vdd_class = &vdd_dig_ao, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
#define VDD_DIG_FMAX_MAP3_AO(l1, f1, l2, f2, l3, f3) \
.vdd_class = &vdd_dig_ao, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
[VDD_DIG_##l3] = (f3), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
#define VDD_GPU_PLL_FMAX_MAP6(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5, l6, f6) \
.vdd_class = &vdd_mx, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
[VDD_DIG_##l3] = (f3), \
@@ -112,7 +114,7 @@
[VDD_DIG_##l5] = (f5), \
[VDD_DIG_##l6] = (f6), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
enum vdd_dig_levels {
VDD_DIG_NONE,
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
index 82861b1dbe46..2aa7b783f276 100644
--- a/drivers/cpufreq/qcom-cpufreq.c
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -3,7 +3,7 @@
* MSM architecture cpufreq driver
*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2007-2016, The Linux Foundation. All rights reserved.
* Author: Mike A. Chan <mikechan@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -320,7 +320,7 @@ static struct cpufreq_driver msm_cpufreq_driver = {
static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev,
char *tbl_name, int cpu)
{
- int ret, nf, i;
+ int ret, nf, i, j;
u32 *data;
struct cpufreq_frequency_table *ftbl;
@@ -344,6 +344,7 @@ static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev,
if (!ftbl)
return ERR_PTR(-ENOMEM);
+ j = 0;
for (i = 0; i < nf; i++) {
unsigned long f;
@@ -353,29 +354,20 @@ static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev,
f /= 1000;
/*
- * Check if this is the last feasible frequency in the table.
+ * Don't repeat frequencies if they round up to the same clock
+ * frequency.
*
- * The table listing frequencies higher than what the HW can
- * support is not an error since the table might be shared
- * across CPUs in different speed bins. It's also not
- * sufficient to check if the rounded rate is lower than the
- * requested rate as it doesn't cover the following example:
- *
- * Table lists: 2.2 GHz and 2.5 GHz.
- * Rounded rate returns: 2.2 GHz and 2.3 GHz.
- *
- * In this case, we can CPUfreq to use 2.2 GHz and 2.3 GHz
- * instead of rejecting the 2.5 GHz table entry.
*/
- if (i > 0 && f <= ftbl[i-1].frequency)
- break;
+ if (j > 0 && f <= ftbl[j - 1].frequency)
+ continue;
- ftbl[i].driver_data = i;
- ftbl[i].frequency = f;
+ ftbl[j].driver_data = j;
+ ftbl[j].frequency = f;
+ j++;
}
- ftbl[i].driver_data = i;
- ftbl[i].frequency = CPUFREQ_TABLE_END;
+ ftbl[j].driver_data = j;
+ ftbl[j].frequency = CPUFREQ_TABLE_END;
devm_kfree(dev, data);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 386c85fc714b..77aea1f41714 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -372,18 +372,18 @@ config CRYPTO_DEV_QCRYPTO
config CRYPTO_DEV_QCOM_MSM_QCE
tristate "Qualcomm Crypto Engine (QCE) module"
- select CRYPTO_DEV_QCE50 if ARCH_APQ8084 || ARCH_MSM8916 || ARCH_MSM8994 || ARCH_MSM8996 || ARCH_MSM8992 || ARCH_MSMTITANIUM || ARCH_MSM8909 || ARCH_MSMCOBALT || ARCH_MSMFALCON
+ select CRYPTO_DEV_QCE50 if ARCH_APQ8084 || ARCH_MSM8916 || ARCH_MSM8994 || ARCH_MSM8996 || ARCH_MSM8992 || ARCH_MSMTITANIUM || ARCH_MSM8909 || ARCH_MSMCOBALT || ARCH_MSMFALCON || ARCH_MSMTRITON
default n
help
This driver supports Qualcomm Crypto Engine in MSM7x30, MSM8660
MSM8x55, MSM8960, MSM9615, MSM8916, MSM8994, MSM8996, FSM9900,
- MSMTITANINUM, APQ8084, MSMCOBALT and MSMFALCON.
+ MSMTITANINUM, APQ8084, MSMCOBALT, MSMFALCON and MSMTRITON.
To compile this driver as a module, choose M here: the
For MSM7x30 MSM8660 and MSM8x55 the module is called qce
For MSM8960, APQ8064 and MSM9615 the module is called qce40
For MSM8974, MSM8916, MSM8994, MSM8996, MSM8992, MSMTITANIUM,
- APQ8084, MSMCOBALT and MSMFALCON the module is called qce50.
+ APQ8084, MSMCOBALT, MSMFALCON and MSMTRITON the module is called qce50.
config CRYPTO_DEV_QCEDEV
tristate "QCEDEV Interface to CE module"
@@ -391,8 +391,8 @@ config CRYPTO_DEV_QCEDEV
help
This driver supports Qualcomm QCEDEV Crypto in MSM7x30, MSM8660,
MSM8960, MSM9615, APQ8064, MSM8974, MSM8916, MSM8994, MSM8996,
- APQ8084, MSMCOBALT, MSMFALCON. This exposes the interface to the QCE hardware
- accelerator via IOCTLs.
+ APQ8084, MSMCOBALT, MSMFALCON, MSMTRITON. This exposes the
+ interface to the QCE hardware accelerator via IOCTLs.
To compile this driver as a module, choose M here: the
module will be called qcedev.
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 3562de7fc967..55c043b44cea 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -3023,6 +3023,7 @@ static void qce_multireq_timeout(unsigned long data)
struct qce_device *pce_dev = (struct qce_device *)data;
int ret = 0;
int last_seq;
+ unsigned long flags;
last_seq = atomic_read(&pce_dev->bunch_cmd_seq);
if (last_seq == 0 ||
@@ -3032,21 +3033,33 @@ static void qce_multireq_timeout(unsigned long data)
return;
}
/* last bunch mode command time out */
+
+ /*
+ * From here to dummy request finish sps request and set owner back
+ * to none, we disable interrupt.
+ * So it won't get preempted or interrupted. If bam inerrupts happen
+ * between, and completion callback gets called from BAM, a new
+ * request may be issued by the client driver. Deadlock may happen.
+ */
+ local_irq_save(flags);
if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_TIMEOUT)
!= QCE_OWNER_NONE) {
+ local_irq_restore(flags);
mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
return;
}
- del_timer(&(pce_dev->timer));
- pce_dev->mode = IN_INTERRUPT_MODE;
- pce_dev->qce_stats.no_of_timeouts++;
- pr_debug("pcedev %d mode switch to INTR\n", pce_dev->dev_no);
ret = qce_dummy_req(pce_dev);
if (ret)
pr_warn("pcedev %d: Failed to insert dummy req\n",
pce_dev->dev_no);
cmpxchg(&pce_dev->owner, QCE_OWNER_TIMEOUT, QCE_OWNER_NONE);
+ pce_dev->mode = IN_INTERRUPT_MODE;
+ local_irq_restore(flags);
+
+ del_timer(&(pce_dev->timer));
+ pce_dev->qce_stats.no_of_timeouts++;
+ pr_debug("pcedev %d mode switch to INTR\n", pce_dev->dev_no);
}
void qce_get_driver_stats(void *handle)
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index e63f061175ad..e2099c4e7877 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -1234,44 +1234,6 @@ static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
struct qcedev_cipher_op_req *saved_req;
struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
- /* Verify Source Address's */
- for (i = 0; i < areq->cipher_op_req.entries; i++)
- if (!access_ok(VERIFY_READ,
- (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr,
- areq->cipher_op_req.vbuf.src[i].len))
- return -EFAULT;
-
- /* Verify Destination Address's */
- if (creq->in_place_op != 1) {
- for (i = 0, total = 0; i < QCEDEV_MAX_BUFFERS; i++) {
- if ((areq->cipher_op_req.vbuf.dst[i].vaddr != 0) &&
- (total < creq->data_len)) {
- if (!access_ok(VERIFY_WRITE,
- (void __user *)creq->vbuf.dst[i].vaddr,
- creq->vbuf.dst[i].len)) {
- pr_err("%s:DST WR_VERIFY err %d=0x%lx\n",
- __func__, i, (uintptr_t)
- creq->vbuf.dst[i].vaddr);
- return -EFAULT;
- }
- total += creq->vbuf.dst[i].len;
- }
- }
- } else {
- for (i = 0, total = 0; i < creq->entries; i++) {
- if (total < creq->data_len) {
- if (!access_ok(VERIFY_WRITE,
- (void __user *)creq->vbuf.src[i].vaddr,
- creq->vbuf.src[i].len)) {
- pr_err("%s:SRC WR_VERIFY err %d=0x%lx\n",
- __func__, i, (uintptr_t)
- creq->vbuf.src[i].vaddr);
- return -EFAULT;
- }
- total += creq->vbuf.src[i].len;
- }
- }
- }
total = 0;
if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
@@ -1569,6 +1531,36 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
__func__, total, req->data_len);
goto error;
}
+ /* Verify Source Address's */
+ for (i = 0, total = 0; i < req->entries; i++) {
+ if (total < req->data_len) {
+ if (!access_ok(VERIFY_READ,
+ (void __user *)req->vbuf.src[i].vaddr,
+ req->vbuf.src[i].len)) {
+ pr_err("%s:SRC RD_VERIFY err %d=0x%lx\n",
+ __func__, i, (uintptr_t)
+ req->vbuf.src[i].vaddr);
+ goto error;
+ }
+ total += req->vbuf.src[i].len;
+ }
+ }
+
+ /* Verify Destination Address's */
+ for (i = 0, total = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ if ((req->vbuf.dst[i].vaddr != 0) &&
+ (total < req->data_len)) {
+ if (!access_ok(VERIFY_WRITE,
+ (void __user *)req->vbuf.dst[i].vaddr,
+ req->vbuf.dst[i].len)) {
+ pr_err("%s:DST WR_VERIFY err %d=0x%lx\n",
+ __func__, i, (uintptr_t)
+ req->vbuf.dst[i].vaddr);
+ goto error;
+ }
+ total += req->vbuf.dst[i].len;
+ }
+ }
return 0;
error:
return -EINVAL;
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index bc1c5f6dd4bd..844a8ad666a9 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -806,7 +806,7 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
struct devfreq *df = to_devfreq(dev);
int ret;
char str_governor[DEVFREQ_NAME_LEN + 1];
- struct devfreq_governor *governor;
+ const struct devfreq_governor *governor, *prev_gov;
ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
if (ret != 1)
@@ -831,12 +831,21 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
goto out;
}
}
+ prev_gov = df->governor;
df->governor = governor;
strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
- if (ret)
+ if (ret) {
dev_warn(dev, "%s: Governor %s not started(%d)\n",
__func__, df->governor->name, ret);
+ if (prev_gov) {
+ df->governor = prev_gov;
+ strncpy(df->governor_name, prev_gov->name,
+ DEVFREQ_NAME_LEN);
+ df->governor->event_handler(df, DEVFREQ_GOV_START,
+ NULL);
+ }
+ }
out:
mutex_unlock(&devfreq_list_lock);
diff --git a/drivers/gpio/qpnp-pin.c b/drivers/gpio/qpnp-pin.c
index 182c6074985e..483bb9338ac3 100644
--- a/drivers/gpio/qpnp-pin.c
+++ b/drivers/gpio/qpnp-pin.c
@@ -827,9 +827,17 @@ static int qpnp_pin_get(struct gpio_chip *gpio_chip, unsigned offset)
if (WARN_ON(!q_spec))
return -ENODEV;
+ if (is_gpio_lv_mv(q_spec)) {
+ mask = Q_REG_LV_MV_MODE_SEL_MASK;
+ shift = Q_REG_LV_MV_MODE_SEL_SHIFT;
+ } else {
+ mask = Q_REG_MODE_SEL_MASK;
+ shift = Q_REG_MODE_SEL_SHIFT;
+ }
+
/* gpio val is from RT status iff input is enabled */
- if ((q_spec->regs[Q_REG_I_MODE_CTL] & Q_REG_MODE_SEL_MASK)
- == QPNP_PIN_MODE_DIG_IN) {
+ if (q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL], shift, mask)
+ == QPNP_PIN_MODE_DIG_IN) {
rc = regmap_read(q_chip->regmap,
Q_REG_ADDR(q_spec, Q_REG_STATUS1), &val);
buf[0] = (u8)val;
diff --git a/drivers/gpu/msm/a5xx_reg.h b/drivers/gpu/msm/a5xx_reg.h
index f3b4e6622043..436b6949c414 100644
--- a/drivers/gpu/msm/a5xx_reg.h
+++ b/drivers/gpu/msm/a5xx_reg.h
@@ -608,6 +608,7 @@
#define A5XX_PC_PERFCTR_PC_SEL_7 0xD17
/* HLSQ registers */
+#define A5XX_HLSQ_DBG_ECO_CNTL 0xE04
#define A5XX_HLSQ_ADDR_MODE_CNTL 0xE05
#define A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 0xE10
#define A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 0xE11
@@ -632,6 +633,7 @@
#define A5XX_VFD_PERFCTR_VFD_SEL_7 0xE57
/* VPC registers */
+#define A5XX_VPC_DBG_ECO_CNTL 0xE60
#define A5XX_VPC_ADDR_MODE_CNTL 0xE61
#define A5XX_VPC_PERFCTR_VPC_SEL_0 0xE64
#define A5XX_VPC_PERFCTR_VPC_SEL_1 0xE65
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 3615be45b6d9..a02ed40ba9d5 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -269,7 +269,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
.patchid = ANY_ID,
.features = ADRENO_PREEMPTION | ADRENO_64BIT |
ADRENO_CONTENT_PROTECTION |
- ADRENO_GPMU | ADRENO_SPTP_PC,
+ ADRENO_GPMU | ADRENO_SPTP_PC | ADRENO_LM,
.pm4fw_name = "a530_pm4.fw",
.pfpfw_name = "a530_pfp.fw",
.zap_name = "a540_zap",
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 6160aa567fbf..11226472d801 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -843,6 +843,8 @@ static struct {
{ ADRENO_QUIRK_FAULT_DETECT_MASK, "qcom,gpu-quirk-fault-detect-mask" },
{ ADRENO_QUIRK_DISABLE_RB_DP2CLOCKGATING,
"qcom,gpu-quirk-dp2clockgating-disable" },
+ { ADRENO_QUIRK_DISABLE_LMLOADKILL,
+ "qcom,gpu-quirk-lmloadkill-disable" },
};
static int adreno_of_get_power(struct adreno_device *adreno_dev,
@@ -2109,8 +2111,6 @@ static int adreno_soft_reset(struct kgsl_device *device)
adreno_support_64bit(adreno_dev))
gpudev->enable_64bit(adreno_dev);
- /* Restore physical performance counter values after soft reset */
- adreno_perfcounter_restore(adreno_dev);
/* Reinitialize the GPU */
gpudev->start(adreno_dev);
@@ -2137,6 +2137,9 @@ static int adreno_soft_reset(struct kgsl_device *device)
set_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
}
+ /* Restore physical performance counter values after soft reset */
+ adreno_perfcounter_restore(adreno_dev);
+
return ret;
}
@@ -2291,9 +2294,9 @@ static void adreno_read(struct kgsl_device *device, void __iomem *base,
unsigned int mem_len)
{
- unsigned int __iomem *reg;
+ void __iomem *reg;
BUG_ON(offsetwords*sizeof(uint32_t) >= mem_len);
- reg = (unsigned int __iomem *)(base + (offsetwords << 2));
+ reg = (base + (offsetwords << 2));
if (!in_interrupt())
kgsl_pre_hwaccess(device);
@@ -2333,7 +2336,7 @@ static void adreno_regwrite(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value)
{
- unsigned int __iomem *reg;
+ void __iomem *reg;
BUG_ON(offsetwords*sizeof(uint32_t) >= device->reg_len);
@@ -2343,7 +2346,7 @@ static void adreno_regwrite(struct kgsl_device *device,
trace_kgsl_regwrite(device, offsetwords, value);
kgsl_cffdump_regwrite(device, offsetwords << 2, value);
- reg = (unsigned int __iomem *)(device->reg_virt + (offsetwords << 2));
+ reg = (device->reg_virt + (offsetwords << 2));
/*ensure previous writes post before this one,
* i.e. act like normal writel() */
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index a2af26c81f50..d81142db5b58 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -123,6 +123,8 @@
#define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(3)
/* Disable RB sampler datapath clock gating optimization */
#define ADRENO_QUIRK_DISABLE_RB_DP2CLOCKGATING BIT(4)
+/* Disable local memory(LM) feature to avoid corner case error */
+#define ADRENO_QUIRK_DISABLE_LMLOADKILL BIT(5)
/* Flags to control command packet settings */
#define KGSL_CMD_FLAGS_NONE 0
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 3f5a9c6318f6..423071811b43 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -151,6 +151,43 @@ static const unsigned int _a3xx_pwron_fixup_fs_instructions[] = {
0x00000000, 0x03000000, 0x00000000, 0x00000000,
};
+static void a3xx_efuse_speed_bin(struct adreno_device *adreno_dev)
+{
+ unsigned int val;
+ unsigned int speed_bin[3];
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ if (of_property_read_u32_array(device->pdev->dev.of_node,
+ "qcom,gpu-speed-bin", speed_bin, 3))
+ return;
+
+ adreno_efuse_read_u32(adreno_dev, speed_bin[0], &val);
+
+ adreno_dev->speed_bin = (val & speed_bin[1]) >> speed_bin[2];
+}
+
+static const struct {
+ int (*check)(struct adreno_device *adreno_dev);
+ void (*func)(struct adreno_device *adreno_dev);
+} a3xx_efuse_funcs[] = {
+ { adreno_is_a306a, a3xx_efuse_speed_bin },
+};
+
+static void a3xx_check_features(struct adreno_device *adreno_dev)
+{
+ unsigned int i;
+
+ if (adreno_efuse_map(adreno_dev))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(a3xx_efuse_funcs); i++) {
+ if (a3xx_efuse_funcs[i].check(adreno_dev))
+ a3xx_efuse_funcs[i].func(adreno_dev);
+ }
+
+ adreno_efuse_unmap(adreno_dev);
+}
+
/**
* _a3xx_pwron_fixup() - Initialize a special command buffer to run a
* post-power collapse shader workaround
@@ -604,6 +641,9 @@ static void a3xx_platform_setup(struct adreno_device *adreno_dev)
gpudev->vbif_xin_halt_ctrl0_mask =
A30X_VBIF_XIN_HALT_CTRL0_MASK;
}
+
+ /* Check efuse bits for various capabilties */
+ a3xx_check_features(adreno_dev);
}
static int a3xx_send_me_init(struct adreno_device *adreno_dev,
diff --git a/drivers/gpu/msm/adreno_a4xx_preempt.c b/drivers/gpu/msm/adreno_a4xx_preempt.c
index 4087ac60c89e..ef837dc4b7ea 100644
--- a/drivers/gpu/msm/adreno_a4xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a4xx_preempt.c
@@ -146,6 +146,8 @@ static int a4xx_submit_preempt_token(struct adreno_ringbuffer *rb,
&ptname, PT_INFO_OFFSET(current_rb_ptname));
pt = kgsl_mmu_get_pt_from_ptname(&(device->mmu),
ptname);
+ if (IS_ERR_OR_NULL(pt))
+ return (pt == NULL) ? -ENOENT : PTR_ERR(pt);
/* set the ringbuffer for incoming RB */
pt_switch_sizedwords =
adreno_iommu_set_pt_generate_cmds(incoming_rb,
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 860f6d2925f1..f652e955b07c 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -196,6 +196,8 @@ static void a5xx_platform_setup(struct adreno_device *adreno_dev)
/* A510 has 3 XIN ports in VBIF */
gpudev->vbif_xin_halt_ctrl0_mask =
A510_VBIF_XIN_HALT_CTRL0_MASK;
+ } else if (adreno_is_a540(adreno_dev)) {
+ gpudev->snapshot_data->sect_sizes->cp_merciu = 1024;
}
/* Calculate SP local and private mem addresses */
@@ -1534,12 +1536,12 @@ static void a5xx_clk_set_options(struct adreno_device *adreno_dev,
const char *name, struct clk *clk)
{
if (adreno_is_a540(adreno_dev)) {
- if (!strcmp(name, "mem_iface_clk"))
- clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
- clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
- if (!strcmp(name, "core_clk")) {
+ if (!strcmp(name, "mem_iface_clk")) {
clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
+ } else if (!strcmp(name, "core_clk")) {
+ clk_set_flags(clk, CLKFLAG_RETAIN_PERIPH);
+ clk_set_flags(clk, CLKFLAG_RETAIN_MEM);
}
}
}
@@ -1781,11 +1783,11 @@ static void a5xx_start(struct adreno_device *adreno_dev)
set_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv);
gpudev->irq->mask |= (1 << A5XX_INT_MISC_HANG_DETECT);
/*
- * Set hang detection threshold to 1 million cycles
- * (0xFFFF*16)
+ * Set hang detection threshold to 4 million cycles
+ * (0x3FFFF*16)
*/
kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
- (1 << 30) | 0xFFFF);
+ (1 << 30) | 0x3FFFF);
}
@@ -1944,6 +1946,16 @@ static void a5xx_start(struct adreno_device *adreno_dev)
}
+ /*
+ * VPC corner case with local memory load kill leads to corrupt
+ * internal state. Normal Disable does not work for all a5x chips.
+ * So do the following setting to disable it.
+ */
+ if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_DISABLE_LMLOADKILL)) {
+ kgsl_regrmw(device, A5XX_VPC_DBG_ECO_CNTL, 0, 0x1 << 23);
+ kgsl_regrmw(device, A5XX_HLSQ_DBG_ECO_CNTL, 0x1 << 18, 0);
+ }
+
a5xx_preemption_start(adreno_dev);
a5xx_protect_init(adreno_dev);
}
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 42f8119ad8b4..f5f99c3ebb4a 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -522,12 +522,18 @@ int adreno_perfcounter_get(struct adreno_device *adreno_dev,
if (empty == -1)
return -EBUSY;
+ /* initialize the new counter */
+ group->regs[empty].countable = countable;
+
/* enable the new counter */
ret = adreno_perfcounter_enable(adreno_dev, groupid, empty, countable);
- if (ret)
+ if (ret) {
+ /* Put back the perfcounter */
+ if (!(group->flags & ADRENO_PERFCOUNTER_GROUP_FIXED))
+ group->regs[empty].countable =
+ KGSL_PERFCOUNTER_NOT_USED;
return ret;
- /* initialize the new counter */
- group->regs[empty].countable = countable;
+ }
/* set initial kernel and user count */
if (flags & PERFCOUNTER_FLAG_KERNEL) {
@@ -720,10 +726,22 @@ static int _perfcounter_enable_default(struct adreno_device *adreno_dev,
/* wait for the above commands submitted to complete */
ret = adreno_ringbuffer_waittimestamp(rb, rb->timestamp,
ADRENO_IDLE_TIMEOUT);
- if (ret)
- KGSL_DRV_ERR(device,
- "Perfcounter %u/%u/%u start via commands failed %d\n",
- group, counter, countable, ret);
+ if (ret) {
+ /*
+ * If we were woken up because of cancelling rb events
+ * either due to soft reset or adreno_stop, ignore the
+ * error and return 0 here. The perfcounter is already
+ * set up in software and it will be programmed in
+ * hardware when we wake up or come up after soft reset,
+ * by adreno_perfcounter_restore.
+ */
+ if (ret == -EAGAIN)
+ ret = 0;
+ else
+ KGSL_DRV_ERR(device,
+ "Perfcounter %u/%u/%u start via commands failed %d\n",
+ group, counter, countable, ret);
+ }
} else {
/* Select the desired perfcounter */
kgsl_regwrite(device, reg->select, countable);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index add4590bbb90..554eb2dffae4 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2507,6 +2507,8 @@ static int kgsl_setup_dma_buf(struct kgsl_device *device,
meta->dmabuf = dmabuf;
meta->attach = attach;
+ attach->priv = entry;
+
entry->priv_data = meta;
entry->memdesc.pagetable = pagetable;
entry->memdesc.size = 0;
@@ -2557,6 +2559,45 @@ out:
}
#endif
+#ifdef CONFIG_DMA_SHARED_BUFFER
+void kgsl_get_egl_counts(struct kgsl_mem_entry *entry,
+ int *egl_surface_count, int *egl_image_count)
+{
+ struct kgsl_dma_buf_meta *meta = entry->priv_data;
+ struct dma_buf *dmabuf = meta->dmabuf;
+ struct dma_buf_attachment *mem_entry_buf_attachment = meta->attach;
+ struct device *buf_attachment_dev = mem_entry_buf_attachment->dev;
+ struct dma_buf_attachment *attachment = NULL;
+
+ mutex_lock(&dmabuf->lock);
+ list_for_each_entry(attachment, &dmabuf->attachments, node) {
+ struct kgsl_mem_entry *scan_mem_entry = NULL;
+
+ if (attachment->dev != buf_attachment_dev)
+ continue;
+
+ scan_mem_entry = attachment->priv;
+ if (!scan_mem_entry)
+ continue;
+
+ switch (kgsl_memdesc_get_memtype(&scan_mem_entry->memdesc)) {
+ case KGSL_MEMTYPE_EGL_SURFACE:
+ (*egl_surface_count)++;
+ break;
+ case KGSL_MEMTYPE_EGL_IMAGE:
+ (*egl_image_count)++;
+ break;
+ }
+ }
+ mutex_unlock(&dmabuf->lock);
+}
+#else
+void kgsl_get_egl_counts(struct kgsl_mem_entry *entry,
+ int *egl_surface_count, int *egl_image_count)
+{
+}
+#endif
+
long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
@@ -3891,7 +3932,7 @@ kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
return -EINVAL;
}
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
result = remap_pfn_range(vma, vma->vm_start,
device->memstore.physaddr >> PAGE_SHIFT,
@@ -4527,6 +4568,9 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
if (status)
goto error_close_mmu;
+ /* Initialize the memory pools */
+ kgsl_init_page_pools(device->pdev);
+
status = kgsl_allocate_global(device, &device->memstore,
KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG, "memstore");
@@ -4581,9 +4625,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
/* Initialize common sysfs entries */
kgsl_pwrctrl_init_sysfs(device);
- /* Initialize the memory pools */
- kgsl_init_page_pools();
-
return 0;
error_free_memstore:
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 826c4edb3582..fbf9197b6d1b 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -435,6 +435,9 @@ long kgsl_ioctl_sparse_unbind(struct kgsl_device_private *dev_priv,
void kgsl_mem_entry_destroy(struct kref *kref);
+void kgsl_get_egl_counts(struct kgsl_mem_entry *entry,
+ int *egl_surface_count, int *egl_image_count);
+
struct kgsl_mem_entry * __must_check
kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr);
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 2f293e4da398..7758fc956055 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -125,13 +125,15 @@ static char get_cacheflag(const struct kgsl_memdesc *m)
}
-static int print_mem_entry(int id, void *ptr, void *data)
+static int print_mem_entry(void *data, void *ptr)
{
struct seq_file *s = data;
struct kgsl_mem_entry *entry = ptr;
char flags[10];
char usage[16];
struct kgsl_memdesc *m = &entry->memdesc;
+ unsigned int usermem_type = kgsl_memdesc_usermem_type(m);
+ int egl_surface_count = 0, egl_image_count = 0;
if (m->flags & KGSL_MEMFLAGS_SPARSE_VIRT)
return 0;
@@ -149,12 +151,17 @@ static int print_mem_entry(int id, void *ptr, void *data)
kgsl_get_memory_usage(usage, sizeof(usage), m->flags);
- seq_printf(s, "%pK %pK %16llu %5d %9s %10s %16s %5d %16llu",
+ if (usermem_type == KGSL_MEM_ENTRY_ION)
+ kgsl_get_egl_counts(entry, &egl_surface_count,
+ &egl_image_count);
+
+ seq_printf(s, "%pK %pK %16llu %5d %9s %10s %16s %5d %16llu %6d %6d",
(uint64_t *)(uintptr_t) m->gpuaddr,
(unsigned long *) m->useraddr,
m->size, entry->id, flags,
- memtype_str(kgsl_memdesc_usermem_type(m)),
- usage, (m->sgt ? m->sgt->nents : 0), m->mapsize);
+ memtype_str(usermem_type),
+ usage, (m->sgt ? m->sgt->nents : 0), m->mapsize,
+ egl_surface_count, egl_image_count);
if (entry->metadata[0] != 0)
seq_printf(s, " %s", entry->metadata);
@@ -164,25 +171,83 @@ static int print_mem_entry(int id, void *ptr, void *data)
return 0;
}
-static int process_mem_print(struct seq_file *s, void *unused)
+static struct kgsl_mem_entry *process_mem_seq_find(struct seq_file *s,
+ void *ptr, loff_t pos)
{
+ struct kgsl_mem_entry *entry = ptr;
struct kgsl_process_private *private = s->private;
+ int id = 0;
+ loff_t temp_pos = 1;
- seq_printf(s, "%16s %16s %16s %5s %9s %10s %16s %5s %16s\n",
- "gpuaddr", "useraddr", "size", "id", "flags", "type",
- "usage", "sglen", "mapsize");
+ if (entry != SEQ_START_TOKEN)
+ id = entry->id + 1;
spin_lock(&private->mem_lock);
- idr_for_each(&private->mem_idr, print_mem_entry, s);
+ for (entry = idr_get_next(&private->mem_idr, &id); entry;
+ id++, entry = idr_get_next(&private->mem_idr, &id),
+ temp_pos++) {
+ if (temp_pos == pos && kgsl_mem_entry_get(entry)) {
+ spin_unlock(&private->mem_lock);
+ goto found;
+ }
+ }
spin_unlock(&private->mem_lock);
- return 0;
+ entry = NULL;
+found:
+ if (ptr != SEQ_START_TOKEN)
+ kgsl_mem_entry_put(ptr);
+
+ return entry;
+}
+
+static void *process_mem_seq_start(struct seq_file *s, loff_t *pos)
+{
+ loff_t seq_file_offset = *pos;
+
+ if (seq_file_offset == 0)
+ return SEQ_START_TOKEN;
+ else
+ return process_mem_seq_find(s, SEQ_START_TOKEN,
+ seq_file_offset);
+}
+
+static void process_mem_seq_stop(struct seq_file *s, void *ptr)
+{
+ if (ptr && ptr != SEQ_START_TOKEN)
+ kgsl_mem_entry_put(ptr);
}
+static void *process_mem_seq_next(struct seq_file *s, void *ptr,
+ loff_t *pos)
+{
+ ++*pos;
+ return process_mem_seq_find(s, ptr, 1);
+}
+
+static int process_mem_seq_show(struct seq_file *s, void *ptr)
+{
+ if (ptr == SEQ_START_TOKEN) {
+ seq_printf(s, "%16s %16s %16s %5s %9s %10s %16s %5s %16s %6s %6s\n",
+ "gpuaddr", "useraddr", "size", "id", "flags", "type",
+ "usage", "sglen", "mapsize", "eglsrf", "eglimg");
+ return 0;
+ } else
+ return print_mem_entry(s, ptr);
+}
+
+static const struct seq_operations process_mem_seq_fops = {
+ .start = process_mem_seq_start,
+ .stop = process_mem_seq_stop,
+ .next = process_mem_seq_next,
+ .show = process_mem_seq_show,
+};
+
static int process_mem_open(struct inode *inode, struct file *file)
{
int ret;
pid_t pid = (pid_t) (unsigned long) inode->i_private;
+ struct seq_file *s = NULL;
struct kgsl_process_private *private = NULL;
private = kgsl_process_private_find(pid);
@@ -190,9 +255,13 @@ static int process_mem_open(struct inode *inode, struct file *file)
if (!private)
return -ENODEV;
- ret = single_open(file, process_mem_print, private);
+ ret = seq_open(file, &process_mem_seq_fops);
if (ret)
kgsl_process_private_put(private);
+ else {
+ s = file->private_data;
+ s->private = private;
+ }
return ret;
}
@@ -205,7 +274,7 @@ static int process_mem_release(struct inode *inode, struct file *file)
if (private)
kgsl_process_private_put(private);
- return single_release(inode, file);
+ return seq_release(inode, file);
}
static const struct file_operations process_mem_fops = {
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index ba564b2851f9..f516b7cd245a 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -390,6 +390,13 @@ kgsl_mmu_map(struct kgsl_pagetable *pagetable,
if (!memdesc->gpuaddr)
return -EINVAL;
+ if (!(memdesc->flags & (KGSL_MEMFLAGS_SPARSE_VIRT |
+ KGSL_MEMFLAGS_SPARSE_PHYS))) {
+ /* Only global mappings should be mapped multiple times */
+ if (!kgsl_memdesc_is_global(memdesc) &&
+ (KGSL_MEMDESC_MAPPED & memdesc->priv))
+ return -EINVAL;
+ }
size = kgsl_memdesc_footprint(memdesc);
@@ -403,6 +410,9 @@ kgsl_mmu_map(struct kgsl_pagetable *pagetable,
atomic_inc(&pagetable->stats.entries);
KGSL_STATS_ADD(size, &pagetable->stats.mapped,
&pagetable->stats.max_mapped);
+
+ /* This is needed for non-sparse mappings */
+ memdesc->priv |= KGSL_MEMDESC_MAPPED;
}
return 0;
@@ -455,6 +465,13 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
if (memdesc->size == 0)
return -EINVAL;
+ if (!(memdesc->flags & (KGSL_MEMFLAGS_SPARSE_VIRT |
+ KGSL_MEMFLAGS_SPARSE_PHYS))) {
+ /* Only global mappings should be mapped multiple times */
+ if (!(KGSL_MEMDESC_MAPPED & memdesc->priv))
+ return -EINVAL;
+ }
+
if (PT_OP_VALID(pagetable, mmu_unmap)) {
uint64_t size;
@@ -464,6 +481,9 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
atomic_dec(&pagetable->stats.entries);
atomic_long_sub(size, &pagetable->stats.mapped);
+
+ if (!kgsl_memdesc_is_global(memdesc))
+ memdesc->priv &= ~KGSL_MEMDESC_MAPPED;
}
return ret;
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index f5402fdc7e57..6ecbab466c7c 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -21,6 +21,10 @@
#include "kgsl_device.h"
#include "kgsl_pool.h"
+#define KGSL_MAX_POOLS 4
+#define KGSL_MAX_POOL_ORDER 8
+#define KGSL_MAX_RESERVED_PAGES 4096
+
/**
* struct kgsl_page_pool - Structure to hold information for the pool
* @pool_order: Page order describing the size of the page
@@ -40,41 +44,10 @@ struct kgsl_page_pool {
struct list_head page_list;
};
-static struct kgsl_page_pool kgsl_pools[] = {
- {
- .pool_order = 0,
- .reserved_pages = 2048,
- .allocation_allowed = true,
- .list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[0].list_lock),
- .page_list = LIST_HEAD_INIT(kgsl_pools[0].page_list),
- },
-#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
- {
- .pool_order = 1,
- .reserved_pages = 1024,
- .allocation_allowed = true,
- .list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[1].list_lock),
- .page_list = LIST_HEAD_INIT(kgsl_pools[1].page_list),
- },
- {
- .pool_order = 4,
- .reserved_pages = 256,
- .allocation_allowed = false,
- .list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[2].list_lock),
- .page_list = LIST_HEAD_INIT(kgsl_pools[2].page_list),
- },
- {
- .pool_order = 8,
- .reserved_pages = 32,
- .allocation_allowed = false,
- .list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[3].list_lock),
- .page_list = LIST_HEAD_INIT(kgsl_pools[3].page_list),
- },
+static struct kgsl_page_pool kgsl_pools[KGSL_MAX_POOLS];
+static int kgsl_num_pools;
+static int kgsl_pool_max_pages;
-#endif
-};
-
-#define KGSL_NUM_POOLS ARRAY_SIZE(kgsl_pools)
/* Returns KGSL pool corresponding to input page order*/
static struct kgsl_page_pool *
@@ -82,7 +55,7 @@ _kgsl_get_pool_from_order(unsigned int order)
{
int i;
- for (i = 0; i < KGSL_NUM_POOLS; i++) {
+ for (i = 0; i < kgsl_num_pools; i++) {
if (kgsl_pools[i].pool_order == order)
return &kgsl_pools[i];
}
@@ -154,7 +127,7 @@ static int kgsl_pool_size_total(void)
int i;
int total = 0;
- for (i = 0; i < KGSL_NUM_POOLS; i++)
+ for (i = 0; i < kgsl_num_pools; i++)
total += kgsl_pool_size(&kgsl_pools[i]);
return total;
}
@@ -207,7 +180,7 @@ kgsl_pool_reduce(unsigned int target_pages, bool exit)
total_pages = kgsl_pool_size_total();
- for (i = (KGSL_NUM_POOLS - 1); i >= 0; i--) {
+ for (i = (kgsl_num_pools - 1); i >= 0; i--) {
pool = &kgsl_pools[i];
/*
@@ -300,7 +273,7 @@ static int kgsl_pool_idx_lookup(unsigned int order)
{
int i;
- for (i = 0; i < KGSL_NUM_POOLS; i++)
+ for (i = 0; i < kgsl_num_pools; i++)
if (order == kgsl_pools[i].pool_order)
return i;
@@ -384,10 +357,13 @@ void kgsl_pool_free_page(struct page *page)
page_order = compound_order(page);
- pool = _kgsl_get_pool_from_order(page_order);
- if (pool != NULL) {
- _kgsl_pool_add_page(pool, page);
- return;
+ if (!kgsl_pool_max_pages ||
+ (kgsl_pool_size_total() < kgsl_pool_max_pages)) {
+ pool = _kgsl_get_pool_from_order(page_order);
+ if (pool != NULL) {
+ _kgsl_pool_add_page(pool, page);
+ return;
+ }
}
/* Give back to system as not added to pool */
@@ -398,7 +374,7 @@ static void kgsl_pool_reserve_pages(void)
{
int i, j;
- for (i = 0; i < KGSL_NUM_POOLS; i++) {
+ for (i = 0; i < kgsl_num_pools; i++) {
struct page *page;
for (j = 0; j < kgsl_pools[i].reserved_pages; j++) {
@@ -445,8 +421,76 @@ static struct shrinker kgsl_pool_shrinker = {
.batch = 0,
};
-void kgsl_init_page_pools(void)
+static void kgsl_pool_config(unsigned int order, unsigned int reserved_pages,
+ bool allocation_allowed)
{
+#ifdef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
+ if (order > 0) {
+ pr_info("%s: Pool order:%d not supprted.!!\n", __func__, order);
+ return;
+ }
+#endif
+ if ((order > KGSL_MAX_POOL_ORDER) ||
+ (reserved_pages > KGSL_MAX_RESERVED_PAGES))
+ return;
+
+ kgsl_pools[kgsl_num_pools].pool_order = order;
+ kgsl_pools[kgsl_num_pools].reserved_pages = reserved_pages;
+ kgsl_pools[kgsl_num_pools].allocation_allowed = allocation_allowed;
+ spin_lock_init(&kgsl_pools[kgsl_num_pools].list_lock);
+ INIT_LIST_HEAD(&kgsl_pools[kgsl_num_pools].page_list);
+ kgsl_num_pools++;
+}
+
+static void kgsl_of_parse_mempools(struct device_node *node)
+{
+ struct device_node *child;
+ unsigned int page_size, reserved_pages = 0;
+ bool allocation_allowed;
+
+ for_each_child_of_node(node, child) {
+ unsigned int index;
+
+ if (of_property_read_u32(child, "reg", &index))
+ return;
+
+ if (index >= KGSL_MAX_POOLS)
+ continue;
+
+ if (of_property_read_u32(child, "qcom,mempool-page-size",
+ &page_size))
+ return;
+
+ of_property_read_u32(child, "qcom,mempool-reserved",
+ &reserved_pages);
+
+ allocation_allowed = of_property_read_bool(child,
+ "qcom,mempool-allocate");
+
+ kgsl_pool_config(ilog2(page_size >> PAGE_SHIFT), reserved_pages,
+ allocation_allowed);
+ }
+}
+
+static void kgsl_of_get_mempools(struct device_node *parent)
+{
+ struct device_node *node;
+
+ node = of_find_compatible_node(parent, NULL, "qcom,gpu-mempools");
+ if (node != NULL) {
+ /* Get Max pages limit for mempool */
+ of_property_read_u32(node, "qcom,mempool-max-pages",
+ &kgsl_pool_max_pages);
+ kgsl_of_parse_mempools(node);
+ }
+}
+
+void kgsl_init_page_pools(struct platform_device *pdev)
+{
+
+ /* Get GPU mempools data and configure pools */
+ kgsl_of_get_mempools(pdev->dev.of_node);
+
/* Reserve the appropriate number of pages for each pool */
kgsl_pool_reserve_pages();
diff --git a/drivers/gpu/msm/kgsl_pool.h b/drivers/gpu/msm/kgsl_pool.h
index efbfa96f1498..d55e1ada123b 100644
--- a/drivers/gpu/msm/kgsl_pool.h
+++ b/drivers/gpu/msm/kgsl_pool.h
@@ -35,7 +35,7 @@ kgsl_gfp_mask(unsigned int page_order)
void kgsl_pool_free_sgt(struct sg_table *sgt);
void kgsl_pool_free_pages(struct page **pages, unsigned int page_count);
-void kgsl_init_page_pools(void);
+void kgsl_init_page_pools(struct platform_device *pdev);
void kgsl_exit_page_pools(void);
int kgsl_pool_alloc_page(int *page_size, struct page **pages,
unsigned int pages_len, unsigned int *align);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 72895c18119f..618e9e9a33a3 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -574,12 +574,11 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
void *addr = (memdesc->hostptr) ?
memdesc->hostptr : (void *) memdesc->useraddr;
- /* Make sure that size is non-zero */
- if (!size)
+ if (size == 0 || size > UINT_MAX)
return -EINVAL;
- /* Make sure that the offset + size isn't bigger than we can handle */
- if ((offset + size) > ULONG_MAX)
+ /* Make sure that the offset + size does not overflow */
+ if ((offset + size < offset) || (offset + size < size))
return -ERANGE;
/* Make sure the offset + size do not overflow the address */
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 5697ad3b1d13..a37b5ce9a6b2 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -2801,6 +2801,10 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
put_online_cpus();
+ ret = clk_set_rate(adev->pclk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
+
pm_runtime_put(&adev->dev);
mutex_lock(&drvdata->mutex);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 294444d5f59e..cc8d957e0581 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -1545,8 +1545,8 @@ static ssize_t mem_size_store(struct device *dev,
mutex_lock(&drvdata->mem_lock);
if (kstrtoul(buf, 16, &val)) {
- return -EINVAL;
mutex_unlock(&drvdata->mem_lock);
+ return -EINVAL;
}
drvdata->mem_size = val;
@@ -1900,6 +1900,10 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
}
+ ret = clk_set_rate(adev->pclk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
+
pm_runtime_put(&adev->dev);
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 7baa1e750a23..3fd080b94069 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -162,6 +162,9 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
/* Disable tpiu to support older devices */
tpiu_disable_hw(drvdata);
+ ret = clk_set_rate(adev->pclk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
pm_runtime_put(&adev->dev);
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index ebb49230d4d7..e08de7a808eb 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -165,7 +165,8 @@
#define FG_ADC_RR_CHG_THRESHOLD_SCALE 4
#define FG_ADC_RR_VOLT_INPUT_FACTOR 8
-#define FG_ADC_RR_CURR_INPUT_FACTOR 2
+#define FG_ADC_RR_CURR_INPUT_FACTOR 2000
+#define FG_ADC_RR_CURR_USBIN_INPUT_FACTOR_MIL 1886
#define FG_ADC_SCALE_MILLI_FACTOR 1000
#define FG_ADC_KELVINMIL_CELSIUSMIL 273150
@@ -323,12 +324,20 @@ static int rradc_post_process_curr(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
int *result_ua)
{
- int64_t ua = 0;
+ int64_t ua = 0, scale = 0;
- /* 0.5 V/A; 2.5V ADC full scale */
- ua = ((int64_t)adc_code * FG_ADC_RR_CURR_INPUT_FACTOR);
+ if (!prop)
+ return -EINVAL;
+
+ if (prop->channel == RR_ADC_USBIN_I)
+ scale = FG_ADC_RR_CURR_USBIN_INPUT_FACTOR_MIL;
+ else
+ scale = FG_ADC_RR_CURR_INPUT_FACTOR;
+
+ /* scale * V/A; 2.5V ADC full scale */
+ ua = ((int64_t)adc_code * scale);
ua *= (FG_ADC_RR_FS_VOLTAGE_MV * FG_ADC_SCALE_MILLI_FACTOR);
- ua = div64_s64(ua, FG_MAX_ADC_READINGS);
+ ua = div64_s64(ua, (FG_MAX_ADC_READINGS * 1000));
*result_ua = ua;
return 0;
diff --git a/drivers/iio/adc/qcom-tadc.c b/drivers/iio/adc/qcom-tadc.c
index 3cc2694f9a03..4a56847a43e7 100644
--- a/drivers/iio/adc/qcom-tadc.c
+++ b/drivers/iio/adc/qcom-tadc.c
@@ -398,7 +398,7 @@ static int tadc_do_conversion(struct tadc_chip *chip, u8 channels, s16 *adc)
}
for (i = 0; i < TADC_NUM_CH; i++)
- adc[i] = val[i * 2] | val[i * 2 + 1] << BITS_PER_BYTE;
+ adc[i] = (s16)(val[i * 2] | (u16)val[i * 2 + 1] << 8);
return jiffies_to_msecs(timeout - timeleft);
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index fda10abae58a..4ed3fd177f15 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -528,6 +528,8 @@ static bool arm_smmu_is_static_cb(struct arm_smmu_device *smmu);
static bool arm_smmu_is_slave_side_secure(struct arm_smmu_domain *smmu_domain);
static bool arm_smmu_has_secure_vmid(struct arm_smmu_domain *smmu_domain);
+static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
+
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
@@ -1604,7 +1606,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
/* SCTLR */
reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_EAE_SBOP;
- if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
+ if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
+ !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
!stage1)
reg |= SCTLR_M;
if (stage1)
@@ -3043,6 +3046,11 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
ret = 0;
break;
}
+ case DOMAIN_ATTR_EARLY_MAP:
+ *((int *)data) = !!(smmu_domain->attributes
+ & (1 << DOMAIN_ATTR_EARLY_MAP));
+ ret = 0;
+ break;
default:
ret = -ENODEV;
break;
@@ -3148,6 +3156,24 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
ret = 0;
break;
+ case DOMAIN_ATTR_EARLY_MAP: {
+ int early_map = *((int *)data);
+
+ ret = 0;
+ if (early_map) {
+ smmu_domain->attributes |=
+ 1 << DOMAIN_ATTR_EARLY_MAP;
+ } else {
+ if (smmu_domain->smmu)
+ ret = arm_smmu_enable_s1_translations(
+ smmu_domain);
+
+ if (!ret)
+ smmu_domain->attributes &=
+ ~(1 << DOMAIN_ATTR_EARLY_MAP);
+ }
+ break;
+ }
default:
ret = -ENODEV;
break;
@@ -3158,6 +3184,28 @@ out_unlock:
return ret;
}
+
+static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ void __iomem *cb_base;
+ u32 reg;
+ int ret;
+
+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ ret = arm_smmu_enable_clocks(smmu);
+ if (ret)
+ return ret;
+
+ reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
+ reg |= SCTLR_M;
+
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
+ arm_smmu_disable_clocks(smmu);
+ return ret;
+}
+
static int arm_smmu_dma_supported(struct iommu_domain *domain,
struct device *dev, u64 mask)
{
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 3b54fd4a77e6..7b0c1ae5a48d 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -70,6 +70,8 @@ static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
return "DOMAIN_ATTR_S1_BYPASS";
case DOMAIN_ATTR_FAST:
return "DOMAIN_ATTR_FAST";
+ case DOMAIN_ATTR_EARLY_MAP:
+ return "DOMAIN_ATTR_EARLY_MAP";
default:
return "Unknown attr!";
}
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 51fd79f101c8..ea3d0eed47e1 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -49,6 +49,10 @@
#define FLASH_LED_REG_VPH_DROOP_THRESHOLD(base) (base + 0x61)
#define FLASH_LED_REG_VPH_DROOP_DEBOUNCE(base) (base + 0x62)
#define FLASH_LED_REG_ILED_GRT_THRSH(base) (base + 0x67)
+#define FLASH_LED_REG_LED1N2_ICLAMP_LOW(base) (base + 0x68)
+#define FLASH_LED_REG_LED1N2_ICLAMP_MID(base) (base + 0x69)
+#define FLASH_LED_REG_LED3_ICLAMP_LOW(base) (base + 0x6A)
+#define FLASH_LED_REG_LED3_ICLAMP_MID(base) (base + 0x6B)
#define FLASH_LED_REG_MITIGATION_SEL(base) (base + 0x6E)
#define FLASH_LED_REG_MITIGATION_SW(base) (base + 0x6F)
#define FLASH_LED_REG_LMH_LEVEL(base) (base + 0x70)
@@ -173,18 +177,12 @@ struct flash_node_data {
bool led_on;
};
-struct flash_regulator_data {
- struct regulator *vreg;
- const char *reg_name;
- u32 max_volt_uv;
-};
struct flash_switch_data {
struct platform_device *pdev;
+ struct regulator *vreg;
struct led_classdev cdev;
- struct flash_regulator_data *reg_data;
int led_mask;
- int num_regulators;
bool regulator_on;
bool enabled;
};
@@ -202,6 +200,10 @@ struct flash_led_platform_data {
int rpara_uohm;
int lmh_rbatt_threshold_uohm;
int lmh_ocv_threshold_uv;
+ u32 led1n2_iclamp_low_ma;
+ u32 led1n2_iclamp_mid_ma;
+ u32 led3_iclamp_low_ma;
+ u32 led3_iclamp_mid_ma;
u8 isc_delay;
u8 warmup_delay;
u8 current_derate_en_cfg;
@@ -385,6 +387,46 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
if (rc < 0)
return rc;
+ if (led->pdata->led1n2_iclamp_low_ma) {
+ val = CURRENT_MA_TO_REG_VAL(led->pdata->led1n2_iclamp_low_ma,
+ led->fnode[0].ires_ua);
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_LED1N2_ICLAMP_LOW(led->base),
+ FLASH_LED_CURRENT_MASK, val);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (led->pdata->led1n2_iclamp_mid_ma) {
+ val = CURRENT_MA_TO_REG_VAL(led->pdata->led1n2_iclamp_mid_ma,
+ led->fnode[0].ires_ua);
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_LED1N2_ICLAMP_MID(led->base),
+ FLASH_LED_CURRENT_MASK, val);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (led->pdata->led3_iclamp_low_ma) {
+ val = CURRENT_MA_TO_REG_VAL(led->pdata->led3_iclamp_low_ma,
+ led->fnode[3].ires_ua);
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_LED3_ICLAMP_LOW(led->base),
+ FLASH_LED_CURRENT_MASK, val);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (led->pdata->led3_iclamp_mid_ma) {
+ val = CURRENT_MA_TO_REG_VAL(led->pdata->led3_iclamp_mid_ma,
+ led->fnode[3].ires_ua);
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_LED3_ICLAMP_MID(led->base),
+ FLASH_LED_CURRENT_MASK, val);
+ if (rc < 0)
+ return rc;
+ }
+
return 0;
}
@@ -426,34 +468,27 @@ static int qpnp_flash_led_hw_strobe_enable(struct flash_node_data *fnode,
static int qpnp_flash_led_regulator_enable(struct qpnp_flash_led *led,
struct flash_switch_data *snode, bool on)
{
- int i, rc = 0;
+ int rc = 0;
+
+ if (!snode || !snode->vreg)
+ return 0;
if (snode->regulator_on == on)
return 0;
- if (on == false) {
- i = snode->num_regulators;
- goto out;
- }
+ if (on)
+ rc = regulator_enable(snode->vreg);
+ else
+ rc = regulator_disable(snode->vreg);
- for (i = 0; i < snode->num_regulators; i++) {
- rc = regulator_enable(snode->reg_data[i].vreg);
- if (rc < 0) {
- dev_err(&led->pdev->dev,
- "regulator enable failed, rc=%d\n", rc);
- goto out;
- }
+ if (rc < 0) {
+ dev_err(&led->pdev->dev, "regulator_%s failed, rc=%d\n",
+ on ? "enable" : "disable", rc);
+ return rc;
}
- snode->regulator_on = true;
- return rc;
-
-out:
- while (i--)
- regulator_disable(snode->reg_data[i].vreg);
-
- snode->regulator_on = false;
- return rc;
+ snode->regulator_on = on ? true : false;
+ return 0;
}
static int get_property_from_fg(struct qpnp_flash_led *led,
@@ -835,7 +870,7 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
u8 val, mask;
if (snode->enabled == on) {
- dev_warn(&led->pdev->dev, "Switch node is already %s!\n",
+ dev_dbg(&led->pdev->dev, "Switch node is already %s!\n",
on ? "enabled" : "disabled");
return 0;
}
@@ -1050,6 +1085,32 @@ static void qpnp_flash_led_brightness_set(struct led_classdev *led_cdev,
spin_unlock(&led->lock);
}
+/* sysfs show function for flash_max_current */
+static ssize_t qpnp_flash_led_max_current_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rc;
+ struct flash_switch_data *snode;
+ struct qpnp_flash_led *led;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ snode = container_of(led_cdev, struct flash_switch_data, cdev);
+ led = dev_get_drvdata(&snode->pdev->dev);
+
+ rc = qpnp_flash_led_get_max_avail_current(led);
+ if (rc < 0)
+ dev_err(&led->pdev->dev, "query max current failed, rc=%d\n",
+ rc);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rc);
+}
+
+/* sysfs attributes exported by flash_led */
+static struct device_attribute qpnp_flash_led_attrs[] = {
+ __ATTR(max_current, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_flash_led_max_current_show, NULL),
+};
+
static int flash_led_psy_notifier_call(struct notifier_block *nb,
unsigned long ev, void *v)
{
@@ -1156,104 +1217,6 @@ int qpnp_flash_led_unregister_irq_notifier(struct notifier_block *nb)
return atomic_notifier_chain_unregister(&irq_notifier_list, nb);
}
-static int qpnp_flash_led_regulator_setup(struct qpnp_flash_led *led,
- struct flash_switch_data *snode, bool on)
-{
- int i, rc = 0;
-
- if (on == false) {
- i = snode->num_regulators;
- goto out;
- }
-
- for (i = 0; i < snode->num_regulators; i++) {
- snode->reg_data[i].vreg = regulator_get(snode->cdev.dev,
- snode->reg_data[i].reg_name);
- if (IS_ERR(snode->reg_data[i].vreg)) {
- rc = PTR_ERR(snode->reg_data[i].vreg);
- dev_err(&led->pdev->dev,
- "Failed to get regulator, rc=%d\n", rc);
- goto out;
- }
-
- if (regulator_count_voltages(snode->reg_data[i].vreg) > 0) {
- rc = regulator_set_voltage(snode->reg_data[i].vreg,
- snode->reg_data[i].max_volt_uv,
- snode->reg_data[i].max_volt_uv);
- if (rc < 0) {
- dev_err(&led->pdev->dev,
- "regulator set voltage failed, rc=%d\n",
- rc);
- regulator_put(snode->reg_data[i].vreg);
- goto out;
- }
- }
- }
-
- return rc;
-
-out:
- while (i--) {
- if (regulator_count_voltages(snode->reg_data[i].vreg) > 0)
- regulator_set_voltage(snode->reg_data[i].vreg, 0,
- snode->reg_data[i].max_volt_uv);
-
- regulator_put(snode->reg_data[i].vreg);
- }
-
- return rc;
-}
-
-static int qpnp_flash_led_regulator_parse_dt(struct qpnp_flash_led *led,
- struct flash_switch_data *snode,
- struct device_node *node) {
-
- int i = 0, rc = 0, num_regs = 0;
- struct device_node *temp = NULL;
- const char *temp_string;
- u32 val;
-
- while ((temp = of_get_next_available_child(node, temp))) {
- if (of_find_property(temp, "regulator-name", NULL))
- num_regs++;
- }
- snode->num_regulators = num_regs;
-
- if (snode->num_regulators == 0)
- return 0;
-
- snode->reg_data = devm_kcalloc(&led->pdev->dev, snode->num_regulators,
- sizeof(*snode->reg_data),
- GFP_KERNEL);
- if (!snode->reg_data)
- return -ENOMEM;
-
- for_each_available_child_of_node(node, temp) {
- rc = of_property_read_string(temp, "regulator-name",
- &temp_string);
- if (!rc)
- snode->reg_data[i].reg_name = temp_string;
- else {
- dev_err(&led->pdev->dev,
- "Unable to read regulator name, rc=%d\n", rc);
- return rc;
- }
-
- rc = of_property_read_u32(temp, "max-voltage-uv", &val);
- if (!rc) {
- snode->reg_data[i].max_volt_uv = val;
- } else if (rc != -EINVAL) {
- dev_err(&led->pdev->dev,
- "Unable to read max voltage, rc=%d\n", rc);
- return rc;
- }
-
- i++;
- }
-
- return 0;
-}
-
static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
struct flash_node_data *fnode, struct device_node *node)
{
@@ -1440,7 +1403,7 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
fnode->pinctrl = devm_pinctrl_get(fnode->cdev.dev);
if (IS_ERR_OR_NULL(fnode->pinctrl)) {
- dev_warn(&led->pdev->dev, "No pinctrl defined\n");
+ dev_dbg(&led->pdev->dev, "No pinctrl defined\n");
fnode->pinctrl = NULL;
} else {
fnode->gpio_state_active =
@@ -1471,7 +1434,8 @@ static int qpnp_flash_led_parse_and_register_switch(struct qpnp_flash_led *led,
struct flash_switch_data *snode,
struct device_node *node)
{
- int rc = 0;
+ int rc = 0, num;
+ char reg_name[16], reg_sup_name[16];
rc = of_property_read_string(node, "qcom,led-name", &snode->cdev.name);
if (rc < 0) {
@@ -1480,6 +1444,12 @@ static int qpnp_flash_led_parse_and_register_switch(struct qpnp_flash_led *led,
return rc;
}
+ rc = sscanf(snode->cdev.name, "led:switch_%d", &num);
+ if (!rc) {
+ pr_err("No number for switch device?\n");
+ return -EINVAL;
+ }
+
rc = of_property_read_string(node, "qcom,default-led-trigger",
&snode->cdev.default_trigger);
if (rc < 0) {
@@ -1499,18 +1469,16 @@ static int qpnp_flash_led_parse_and_register_switch(struct qpnp_flash_led *led,
return -EINVAL;
}
- rc = qpnp_flash_led_regulator_parse_dt(led, snode, node);
- if (rc < 0) {
- dev_err(&led->pdev->dev,
- "Unable to parse regulator data, rc=%d\n", rc);
- return rc;
- }
-
- if (snode->num_regulators) {
- rc = qpnp_flash_led_regulator_setup(led, snode, true);
- if (rc < 0) {
- dev_err(&led->pdev->dev,
- "Unable to setup regulator, rc=%d\n", rc);
+ scnprintf(reg_name, sizeof(reg_name), "switch%d-supply", num);
+ if (of_find_property(led->pdev->dev.of_node, reg_name, NULL)) {
+ scnprintf(reg_sup_name, sizeof(reg_sup_name), "switch%d", num);
+ snode->vreg = devm_regulator_get(&led->pdev->dev, reg_sup_name);
+ if (IS_ERR_OR_NULL(snode->vreg)) {
+ rc = PTR_ERR(snode->vreg);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&led->pdev->dev, "Failed to get regulator, rc=%d\n",
+ rc);
+ snode->vreg = NULL;
return rc;
}
}
@@ -1651,6 +1619,42 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
return rc;
}
+ rc = of_property_read_u32(node, "qcom,led1n2-iclamp-low-ma", &val);
+ if (!rc) {
+ led->pdata->led1n2_iclamp_low_ma = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read led1n2_iclamp_low current, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,led1n2-iclamp-mid-ma", &val);
+ if (!rc) {
+ led->pdata->led1n2_iclamp_mid_ma = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read led1n2_iclamp_mid current, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,led3-iclamp-low-ma", &val);
+ if (!rc) {
+ led->pdata->led3_iclamp_low_ma = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read led3_iclamp_low current, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,led3-iclamp-mid-ma", &val);
+ if (!rc) {
+ led->pdata->led3_iclamp_mid_ma = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read led3_iclamp_mid current, rc=%d\n",
+ rc);
+ return rc;
+ }
+
led->pdata->vled_max_uv = FLASH_LED_VLED_MAX_DEFAULT_UV;
rc = of_property_read_u32(node, "qcom,vled-max-uv", &val);
if (!rc) {
@@ -1785,7 +1789,7 @@ static int qpnp_flash_led_probe(struct platform_device *pdev)
struct device_node *node, *temp;
const char *temp_string;
unsigned int base;
- int rc, i = 0;
+ int rc, i = 0, j = 0;
node = pdev->dev.of_node;
if (!node) {
@@ -1863,26 +1867,35 @@ static int qpnp_flash_led_probe(struct platform_device *pdev)
return -ENOMEM;
temp = NULL;
- for (i = 0; i < led->num_fnodes; i++) {
- temp = of_get_next_available_child(node, temp);
- rc = qpnp_flash_led_parse_each_led_dt(led,
- &led->fnode[i], temp);
+ i = 0;
+ j = 0;
+ for_each_available_child_of_node(node, temp) {
+ rc = of_property_read_string(temp, "label", &temp_string);
if (rc < 0) {
dev_err(&pdev->dev,
- "Unable to parse flash node %d rc=%d\n", i, rc);
- goto error_led_register;
+ "Failed to parse label, rc=%d\n", rc);
+ return rc;
}
- }
- for (i = 0; i < led->num_snodes; i++) {
- temp = of_get_next_available_child(node, temp);
- rc = qpnp_flash_led_parse_and_register_switch(led,
- &led->snode[i], temp);
- if (rc < 0) {
- dev_err(&pdev->dev,
- "Unable to parse and register switch node, rc=%d\n",
- rc);
- goto error_switch_register;
+ if (!strcmp("flash", temp_string) ||
+ !strcmp("torch", temp_string)) {
+ rc = qpnp_flash_led_parse_each_led_dt(led,
+ &led->fnode[i++], temp);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Unable to parse flash node %d rc=%d\n",
+ i, rc);
+ goto error_led_register;
+ }
+ }
+
+ if (!strcmp("switch", temp_string)) {
+ rc = qpnp_flash_led_parse_and_register_switch(led,
+ &led->snode[j++], temp);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Unable to parse and register switch node, rc=%d\n",
+ rc);
+ goto error_switch_register;
+ }
}
}
@@ -1946,12 +1959,36 @@ static int qpnp_flash_led_probe(struct platform_device *pdev)
goto unreg_notifier;
}
+ for (i = 0; i < led->num_snodes; i++) {
+ for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++) {
+ rc = sysfs_create_file(&led->snode[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "sysfs creation failed, rc=%d\n",
+ rc);
+ goto sysfs_fail;
+ }
+ }
+ }
+
spin_lock_init(&led->lock);
dev_set_drvdata(&pdev->dev, led);
return 0;
+sysfs_fail:
+ for (--j; j >= 0; j--)
+ sysfs_remove_file(&led->snode[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+
+ for (--i; i >= 0; i--) {
+ for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++)
+ sysfs_remove_file(&led->snode[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ }
+
+ i = led->num_snodes;
unreg_notifier:
power_supply_unreg_notifier(&led->nb);
error_switch_register:
@@ -1968,20 +2005,21 @@ error_led_register:
static int qpnp_flash_led_remove(struct platform_device *pdev)
{
struct qpnp_flash_led *led = dev_get_drvdata(&pdev->dev);
- int i;
+ int i, j;
for (i = 0; i < led->num_snodes; i++) {
- if (led->snode[i].num_regulators) {
- if (led->snode[i].regulator_on)
- qpnp_flash_led_regulator_enable(led,
- &led->snode[i], false);
- qpnp_flash_led_regulator_setup(led,
+ for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++)
+ sysfs_remove_file(&led->snode[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+
+ if (led->snode[i].regulator_on)
+ qpnp_flash_led_regulator_enable(led,
&led->snode[i], false);
- }
}
while (i > 0)
led_classdev_unregister(&led->snode[--i].cdev);
+
i = led->num_fnodes;
while (i > 0)
led_classdev_unregister(&led->fnode[--i].cdev);
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c b/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c
index d6bb18522e0c..6c09f3820dfd 100644
--- a/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c
+++ b/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c
@@ -460,7 +460,7 @@ int msm_camera_set_clk_flags(struct clk *clk, unsigned long flags)
if (!clk)
return -EINVAL;
- CDBG("clk : %p, flags : %ld\n", clk, flags);
+ CDBG("clk : %pK, flags : %ld\n", clk, flags);
return clk_set_flags(clk, flags);
}
@@ -1040,8 +1040,11 @@ uint32_t msm_camera_unregister_bus_client(enum cam_bus_client id)
mutex_destroy(&g_cv[id].lock);
msm_bus_scale_unregister_client(g_cv[id].bus_client);
- msm_bus_cl_clear_pdata(g_cv[id].pdata);
- memset(&g_cv[id], 0, sizeof(struct msm_cam_bus_pscale_data));
+ g_cv[id].bus_client = 0;
+ g_cv[id].num_usecases = 0;
+ g_cv[id].num_paths = 0;
+ g_cv[id].vector_index = 0;
+ g_cv[id].dyn_vote = 0;
return 0;
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index 1529e2aa740c..96f9e61578f2 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -1027,7 +1027,7 @@ static int msm_vfe40_start_fetch_engine(struct vfe_device *vfe_dev,
rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
if (rc < 0 || !buf) {
- pr_err("%s: No fetch buffer rc= %d buf= %p\n",
+ pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
__func__, rc, buf);
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index fdc98436a105..e8289f05d28f 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -67,7 +67,7 @@ static int msm_isp_axi_create_stream(struct vfe_device *vfe_dev,
} else {
/* check if the stream has been added for the vfe-device */
if (stream_info->vfe_mask & (1 << vfe_dev->pdev->id)) {
- pr_err("%s: stream %p/%x is already added for vfe dev %d vfe_mask %x\n",
+ pr_err("%s: stream %pK/%x is already added for vfe dev %d vfe_mask %x\n",
__func__, stream_info, stream_info->stream_id,
vfe_dev->pdev->id, stream_info->vfe_mask);
return -EINVAL;
@@ -1255,7 +1255,7 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
if (vfe_idx == -ENOTTY ||
stream_release_cmd->stream_handle !=
stream_info->stream_handle[vfe_idx]) {
- pr_err("%s: Invalid stream %p handle %x/%x vfe_idx %d vfe_dev %d num_isp %d\n",
+ pr_err("%s: Invalid stream %pK handle %x/%x vfe_idx %d vfe_dev %d num_isp %d\n",
__func__, stream_info,
stream_release_cmd->stream_handle,
vfe_idx != -ENOTTY ?
@@ -3483,7 +3483,7 @@ static int msm_isp_stream_axi_cfg_update(struct vfe_device *vfe_dev,
if (stream_info->update_vfe_mask) {
if (stream_info->update_vfe_mask & (1 << vfe_dev->pdev->id)) {
spin_unlock_irqrestore(&stream_info->lock, flags);
- pr_err("%s: Stream %p/%x Update already in progress for vfe %d\n",
+ pr_err("%s: Stream %pK/%x Update already in progress for vfe %d\n",
__func__, stream_info, stream_info->stream_src,
vfe_dev->pdev->id);
return -EINVAL;
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index df95e5cb9b99..094a7861831a 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -840,7 +840,7 @@ static uint16_t msm_ispif_get_cids_mask_from_cfg(
uint16_t cids_mask = 0;
BUG_ON(!entry);
- for (i = 0; i < entry->num_cids; i++)
+ for (i = 0; i < entry->num_cids && i < MAX_CID_CH_PARAM_ENTRY; i++)
cids_mask |= (1 << entry->cids[i]);
return cids_mask;
@@ -970,7 +970,7 @@ static void msm_ispif_intf_cmd(struct ispif_device *ispif, uint32_t cmd_bits,
pr_err("%s: invalid interface type\n", __func__);
return;
}
- if (params->entries[i].num_cids > MAX_CID_CH) {
+ if (params->entries[i].num_cids > MAX_CID_CH_PARAM_ENTRY) {
pr_err("%s: out of range of cid_num %d\n",
__func__, params->entries[i].num_cids);
return;
diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
index 59135225cb15..c779ee46c19a 100644
--- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
+++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
@@ -286,7 +286,7 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id,
break;
}
if (vb2_v4l2_buf != vb) {
- pr_err("VB buffer is INVALID vb=%p, ses_id=%d, str_id=%d\n",
+ pr_err("VB buffer is INVALID vb=%pK, ses_id=%d, str_id=%d\n",
vb, session_id, stream_id);
spin_unlock_irqrestore(&stream->stream_lock, flags);
return -EINVAL;
@@ -329,7 +329,7 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id,
break;
}
if (vb2_v4l2_buf != vb) {
- pr_err("VB buffer is INVALID ses_id=%d, str_id=%d, vb=%p\n",
+ pr_err("VB buffer is INVALID ses_id=%d, str_id=%d, vb=%pK\n",
session_id, stream_id, vb);
spin_unlock_irqrestore(&stream->stream_lock, flags);
return -EINVAL;
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 106d76aae3bb..ab074ffbcdfb 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -1074,6 +1074,13 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
goto end;
}
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CPP,
+ CAM_AHB_NOMINAL_VOTE);
+ if (rc < 0) {
+ pr_err("%s:%d: failed to vote for AHB\n", __func__, __LINE__);
+ goto end;
+ }
+
msm_camera_io_w(0x1, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
msm_camera_io_w(0x1, cpp_dev->base +
MSM_CPP_MICRO_BOOT_START);
@@ -1082,7 +1089,7 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
if (rc) {
pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
MSM_CPP_MSG_ID_CMD, rc);
- goto end;
+ goto vote;
}
msm_camera_io_w(0xFFFFFFFF, cpp_dev->base +
@@ -1092,7 +1099,7 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
if (rc) {
pr_err("%s:%d] poll rx empty failed %d",
__func__, __LINE__, rc);
- goto end;
+ goto vote;
}
/*Start firmware loading*/
msm_cpp_write(MSM_CPP_CMD_FW_LOAD, cpp_dev->base);
@@ -1102,7 +1109,7 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
if (rc) {
pr_err("%s:%d] poll rx empty failed %d",
__func__, __LINE__, rc);
- goto end;
+ goto vote;
}
for (i = 0; i < cpp_dev->fw->size/4; i++) {
msm_cpp_write(*ptr_bin, cpp_dev->base);
@@ -1111,7 +1118,7 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
if (rc) {
pr_err("%s:%d] poll rx empty failed %d",
__func__, __LINE__, rc);
- goto end;
+ goto vote;
}
}
ptr_bin++;
@@ -1124,21 +1131,21 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
if (rc) {
pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
MSM_CPP_MSG_ID_OK, rc);
- goto end;
+ goto vote;
}
rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
if (rc) {
pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
MSM_CPP_MSG_ID_CMD, rc);
- goto end;
+ goto vote;
}
rc = msm_cpp_poll_rx_empty(cpp_dev->base);
if (rc) {
pr_err("%s:%d] poll rx empty failed %d",
__func__, __LINE__, rc);
- goto end;
+ goto vote;
}
/*Trigger MC to jump to start address*/
msm_cpp_write(MSM_CPP_CMD_EXEC_JUMP, cpp_dev->base);
@@ -1148,21 +1155,21 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
if (rc) {
pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
MSM_CPP_MSG_ID_CMD, rc);
- goto end;
+ goto vote;
}
rc = msm_cpp_poll(cpp_dev->base, 0x1);
if (rc) {
pr_err("%s:%d] poll command 0x1 failed %d", __func__, __LINE__,
rc);
- goto end;
+ goto vote;
}
rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_JUMP_ACK);
if (rc) {
pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
MSM_CPP_MSG_ID_JUMP_ACK, rc);
- goto end;
+ goto vote;
}
rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_TRAILER);
@@ -1171,6 +1178,11 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
MSM_CPP_MSG_ID_JUMP_ACK, rc);
}
+vote:
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CPP,
+ CAM_AHB_SVS_VOTE);
+ if (rc < 0)
+ pr_err("%s:%d: failed to vote for AHB\n", __func__, __LINE__);
end:
return rc;
}
@@ -4186,7 +4198,7 @@ static int cpp_probe(struct platform_device *pdev)
cpp_dev->state = CPP_STATE_BOOT;
rc = cpp_init_hardware(cpp_dev);
if (rc < 0)
- goto cpp_probe_init_error;
+ goto bus_de_init;
media_entity_init(&cpp_dev->msm_sd.sd.entity, 0, NULL, 0);
cpp_dev->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
@@ -4225,7 +4237,7 @@ static int cpp_probe(struct platform_device *pdev)
if (!cpp_dev->work) {
pr_err("no enough memory\n");
rc = -ENOMEM;
- goto cpp_probe_init_error;
+ goto bus_de_init;
}
INIT_WORK((struct work_struct *)cpp_dev->work, msm_cpp_do_timeout_work);
@@ -4245,6 +4257,12 @@ static int cpp_probe(struct platform_device *pdev)
else
CPP_DBG("FAILED.");
return rc;
+
+bus_de_init:
+ if (cpp_dev->bus_master_flag)
+ msm_cpp_deinit_bandwidth_mgr(cpp_dev);
+ else
+ msm_isp_deinit_bandwidth_mgr(ISP_CPP);
cpp_probe_init_error:
media_entity_cleanup(&cpp_dev->msm_sd.sd.entity);
msm_sd_unregister(&cpp_dev->msm_sd);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
index 5f749bd46273..0d27de5c9b4b 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
@@ -645,7 +645,7 @@ static int32_t msm_flash_release(
static int32_t msm_flash_config(struct msm_flash_ctrl_t *flash_ctrl,
void __user *argp)
{
- int32_t rc = -EINVAL;
+ int32_t rc = 0;
struct msm_flash_cfg_data_t *flash_data =
(struct msm_flash_cfg_data_t *) argp;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
index 25c152be2b71..5a330db0f9a5 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
@@ -794,7 +794,7 @@ int32_t msm_camera_tz_i2c_register_sensor(
return -EINVAL;
}
- CDBG("id=%d, client=%p\n", s_ctrl->id, s_ctrl);
+ CDBG("id=%d, client=%pK\n", s_ctrl->id, s_ctrl);
sensor_info[s_ctrl->id].s_ctrl = s_ctrl;
sensor_info[s_ctrl->id].secure = s_ctrl->is_secure;
return 0;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index 94223b557990..cf33bc6437cc 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -49,6 +49,10 @@
#define SDE_ROT_EVTLOG_BUF_ALIGN 32
#define SDE_ROT_DEBUG_BASE_MAX 10
+#define SDE_ROT_DEFAULT_BASE_REG_CNT 0x100
+#define GROUP_BYTES 4
+#define ROW_BYTES 16
+
static DEFINE_SPINLOCK(sde_rot_xlock);
/*
@@ -963,6 +967,273 @@ static const struct file_operations sde_rotator_raw_ops = {
.release = single_release
};
+static int sde_rotator_debug_base_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int sde_rotator_debug_base_release(struct inode *inode,
+ struct file *file)
+{
+ struct sde_rotator_debug_base *dbg = file->private_data;
+
+ if (dbg && dbg->buf) {
+ kfree(dbg->buf);
+ dbg->buf_len = 0;
+ dbg->buf = NULL;
+ }
+ return 0;
+}
+
+static ssize_t sde_rotator_debug_base_offset_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_rotator_debug_base *dbg = file->private_data;
+ u32 off = 0;
+ u32 cnt = SDE_ROT_DEFAULT_BASE_REG_CNT;
+ char buf[24];
+
+ if (!dbg)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0;
+
+ if (sscanf(buf, "%5x %x", &off, &cnt) < 2)
+ return -EINVAL;
+
+ if (off > dbg->max_offset)
+ return -EINVAL;
+
+ if (cnt > (dbg->max_offset - off))
+ cnt = dbg->max_offset - off;
+
+ dbg->off = off;
+ dbg->cnt = cnt;
+
+ SDEROT_DBG("offset=%x cnt=%x\n", off, cnt);
+
+ return count;
+}
+
+static ssize_t sde_rotator_debug_base_offset_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct sde_rotator_debug_base *dbg = file->private_data;
+ int len = 0;
+ char buf[24] = {'\0'};
+
+ if (!dbg)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static ssize_t sde_rotator_debug_base_reg_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_rotator_debug_base *dbg = file->private_data;
+ size_t off;
+ u32 data, cnt;
+ char buf[24];
+
+ if (!dbg)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0;
+
+ cnt = sscanf(buf, "%zx %x", &off, &data);
+
+ if (cnt < 2)
+ return -EFAULT;
+
+ if (off >= dbg->max_offset)
+ return -EFAULT;
+
+ /* Enable Clock for register access */
+ sde_rotator_clk_ctrl(dbg->mgr, true);
+
+ writel_relaxed(data, dbg->base + off);
+
+ /* Disable Clock after register access */
+ sde_rotator_clk_ctrl(dbg->mgr, false);
+
+ SDEROT_DBG("addr=%zx data=%x\n", off, data);
+
+ return count;
+}
+
+static ssize_t sde_rotator_debug_base_reg_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_rotator_debug_base *dbg = file->private_data;
+ size_t len;
+
+ if (!dbg) {
+ SDEROT_ERR("invalid handle\n");
+ return -ENODEV;
+ }
+
+ if (!dbg->buf) {
+ char dump_buf[64];
+ char *ptr;
+ int cnt, tot;
+
+ dbg->buf_len = sizeof(dump_buf) *
+ DIV_ROUND_UP(dbg->cnt, ROW_BYTES);
+ dbg->buf = kzalloc(dbg->buf_len, GFP_KERNEL);
+
+ if (!dbg->buf) {
+ SDEROT_ERR("not enough memory to hold reg dump\n");
+ return -ENOMEM;
+ }
+
+ ptr = dbg->base + dbg->off;
+ tot = 0;
+
+ /* Enable clock for register access */
+ sde_rotator_clk_ctrl(dbg->mgr, true);
+
+ for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
+ hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
+ ROW_BYTES, GROUP_BYTES, dump_buf,
+ sizeof(dump_buf), false);
+ len = scnprintf(dbg->buf + tot, dbg->buf_len - tot,
+ "0x%08x: %s\n",
+ ((int) (unsigned long) ptr) -
+ ((int) (unsigned long) dbg->base),
+ dump_buf);
+
+ ptr += ROW_BYTES;
+ tot += len;
+ if (tot >= dbg->buf_len)
+ break;
+ }
+ /* Disable clock after register access */
+ sde_rotator_clk_ctrl(dbg->mgr, false);
+
+ dbg->buf_len = tot;
+ }
+
+ if (*ppos >= dbg->buf_len)
+ return 0; /* done reading */
+
+ len = min(count, dbg->buf_len - (size_t) *ppos);
+ if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
+ SDEROT_ERR("failed to copy to user\n");
+ return -EFAULT;
+ }
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static const struct file_operations sde_rotator_off_fops = {
+ .open = sde_rotator_debug_base_open,
+ .release = sde_rotator_debug_base_release,
+ .read = sde_rotator_debug_base_offset_read,
+ .write = sde_rotator_debug_base_offset_write,
+};
+
+static const struct file_operations sde_rotator_reg_fops = {
+ .open = sde_rotator_debug_base_open,
+ .release = sde_rotator_debug_base_release,
+ .read = sde_rotator_debug_base_reg_read,
+ .write = sde_rotator_debug_base_reg_write,
+};
+
+int sde_rotator_debug_register_base(struct sde_rotator_device *rot_dev,
+ struct dentry *debugfs_root,
+ const char *name,
+ struct sde_io_data *io_data)
+{
+ struct sde_rotator_debug_base *dbg;
+ struct dentry *ent_off, *ent_reg;
+ char dbgname[80] = "";
+ int prefix_len = 0;
+
+ if (!io_data)
+ return -EINVAL;
+
+ dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
+ if (!dbg)
+ return -ENOMEM;
+
+ if (name)
+ strlcpy(dbg->name, name, sizeof(dbg->name));
+ dbg->base = io_data->base;
+ dbg->max_offset = io_data->len;
+ dbg->off = 0;
+ dbg->cnt = SDE_ROT_DEFAULT_BASE_REG_CNT;
+
+ if (name) {
+ if (strcmp(name, "sde"))
+ prefix_len = snprintf(dbgname, sizeof(dbgname), "%s_",
+ name);
+ else
+ /*
+ * For SDE Rotator registers block, the IO base address
+ * is based on MDP IO address base. It is necessary to
+ * apply the initial offset to it from the first
+ * regdump setting.
+ */
+ dbg->base += rot_dev->mdata->regdump ?
+ rot_dev->mdata->regdump[0].offset : 0;
+ }
+
+ strlcpy(dbgname + prefix_len, "off", sizeof(dbgname) - prefix_len);
+ ent_off = debugfs_create_file(dbgname, 0644, debugfs_root, dbg,
+ &sde_rotator_off_fops);
+ if (IS_ERR_OR_NULL(ent_off)) {
+ SDEROT_ERR("debugfs_create_file: offset fail\n");
+ goto off_fail;
+ }
+
+ strlcpy(dbgname + prefix_len, "reg", sizeof(dbgname) - prefix_len);
+ ent_reg = debugfs_create_file(dbgname, 0644, debugfs_root, dbg,
+ &sde_rotator_reg_fops);
+ if (IS_ERR_OR_NULL(ent_reg)) {
+ SDEROT_ERR("debugfs_create_file: reg fail\n");
+ goto reg_fail;
+ }
+
+ dbg->mgr = rot_dev->mgr;
+
+ return 0;
+reg_fail:
+ debugfs_remove(ent_off);
+off_fail:
+ kfree(dbg);
+ return -ENODEV;
+}
+
/*
* sde_rotator_create_debugfs - Setup rotator debugfs directory structure.
* @rot_dev: Pointer to rotator device
@@ -1040,6 +1311,20 @@ struct dentry *sde_rotator_create_debugfs(
return NULL;
}
+ if (sde_rotator_debug_register_base(rot_dev, debugfs_root,
+ "sde", &rot_dev->mdata->sde_io)) {
+ SDEROT_ERR("fail create debug register for sde rotator\n");
+ debugfs_remove_recursive(debugfs_root);
+ return NULL;
+ }
+
+ if (sde_rotator_debug_register_base(rot_dev, debugfs_root,
+ "vbif_nrt", &rot_dev->mdata->vbif_nrt_io)) {
+ SDEROT_ERR("fail create debug register for sderot vbif_nrt\n");
+ debugfs_remove_recursive(debugfs_root);
+ return NULL;
+ }
+
return debugfs_root;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
index dcda54274fad..c2c6f9775602 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
@@ -44,6 +44,17 @@ void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...);
struct sde_rotator_device;
+struct sde_rotator_debug_base {
+ char name[80];
+ void __iomem *base;
+ size_t off;
+ size_t cnt;
+ size_t max_offset;
+ char *buf;
+ size_t buf_len;
+ struct sde_rot_mgr *mgr;
+};
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *sde_rotator_create_debugfs(
struct sde_rotator_device *rot_dev);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index b88f03ce89ae..08075ae90507 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -305,6 +305,39 @@ static void sde_rotator_buf_queue(struct vb2_buffer *vb)
}
/*
+ * sde_rotator_buf_finish - vb2_ops buf_finish to finalize buffer before going
+ * back to user space
+ * @vb: Pointer to vb2 buffer struct.
+ */
+static void sde_rotator_buf_finish(struct vb2_buffer *vb)
+{
+ struct sde_rotator_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ int i;
+
+ SDEDEV_DBG(ctx->rot_dev->dev,
+ "buf_finish t:%d i:%d s:%d m:%u np:%d up:%lu\n",
+ vb->type, vb->index, vb->state,
+ vb->vb2_queue->memory,
+ vb->num_planes,
+ vb->planes[0].m.userptr);
+
+ if (vb->vb2_queue->memory != VB2_MEMORY_USERPTR)
+ return;
+
+ /*
+ * We use userptr to tunnel fd, and fd can be the same across qbuf
+ * even though the underlying buffer is different. Since vb2 layer
+ * optimizes memory mapping for userptr by first checking if userptr
+ * has changed, it will not trigger put_userptr if fd value does
+ * not change. In order to force buffer release, we need to clear
+ * userptr when the current buffer is done and ready to go back to
+ * user mode. Since 0 is a valid fd, reset userptr to -1 instead.
+ */
+ for (i = 0; i < vb->num_planes; i++)
+ vb->planes[i].m.userptr = ~0;
+}
+
+/*
* sde_rotator_return_all_buffers - Return all buffers with the given status.
* @q: Pointer to vb2 buffer queue struct.
* @state: State of the buffer
@@ -460,6 +493,7 @@ static struct vb2_ops sde_rotator_vb2_q_ops = {
.stop_streaming = sde_rotator_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
+ .buf_finish = sde_rotator_buf_finish,
};
/*
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 0b44896bf6b3..7388dab92c34 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -2153,6 +2153,33 @@ int create_pkt_cmd_session_set_property(
pkt->size += sizeof(u32) + sizeof(*signal_info);
break;
}
+ case HAL_PARAM_VENC_IFRAMESIZE_TYPE:
+ {
+ enum hal_iframesize_type hal =
+ *(enum hal_iframesize_type *)pdata;
+ struct hfi_iframe_size *hfi = (struct hfi_iframe_size *)
+ &pkt->rg_property_data[1];
+
+ switch (hal) {
+ case HAL_IFRAMESIZE_TYPE_DEFAULT:
+ hfi->type = HFI_IFRAME_SIZE_DEFAULT;
+ break;
+ case HAL_IFRAMESIZE_TYPE_MEDIUM:
+ hfi->type = HFI_IFRAME_SIZE_MEDIUM;
+ break;
+ case HAL_IFRAMESIZE_TYPE_HUGE:
+ hfi->type = HFI_IFRAME_SIZE_HIGH;
+ break;
+ case HAL_IFRAMESIZE_TYPE_UNLIMITED:
+ hfi->type = HFI_IFRAME_SIZE_UNLIMITED;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_VENC_IFRAMESIZE;
+ pkt->size += sizeof(u32) + sizeof(struct hfi_iframe_size);
+ break;
+ }
/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
case HAL_CONFIG_BUFFER_REQUIREMENTS:
case HAL_CONFIG_PRIORITY:
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 3633d1fb3cd1..fbdccea56f67 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -36,7 +36,7 @@
struct msm_vidc_drv *vidc_driver;
-uint32_t msm_vidc_pwr_collapse_delay = 2000;
+uint32_t msm_vidc_pwr_collapse_delay = 3000;
static inline struct msm_vidc_inst *get_vidc_inst(struct file *filp, void *fh)
{
@@ -138,12 +138,20 @@ int msm_v4l2_reqbufs(struct file *file, void *fh,
{
struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
int rc = 0;
- if (!b->count)
+ if (!b->count) {
rc = msm_vidc_release_buffers(vidc_inst, b->type);
- if (rc)
- dprintk(VIDC_WARN,
- "Failed in %s for release output buffers\n", __func__);
- return msm_vidc_reqbufs((void *)vidc_inst, b);
+ if (rc)
+ dprintk(VIDC_WARN,
+ "Failed in %s for release output buffers\n",
+ __func__);
+ } else {
+ rc = msm_vidc_reqbufs((void *)vidc_inst, b);
+ if (rc)
+ dprintk(VIDC_WARN,
+ "Failed in %s for buffer requirements\n",
+ __func__);
+ }
+ return rc;
}
int msm_v4l2_prepare_buf(struct file *file, void *fh,
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index f071aae3ccab..4ec331d121d9 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -185,6 +185,13 @@ static const char *const timestamp_mode[] = {
"Ignore",
};
+static const char *const iframe_sizes[] = {
+ "Default",
+ "Medium",
+ "Huge",
+ "Unlimited"
+};
+
static struct msm_vidc_ctrl msm_venc_ctrls[] = {
{
.id = V4L2_CID_MPEG_VIDC_VIDEO_IDR_PERIOD,
@@ -1281,6 +1288,20 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
.step = 1,
.qmenu = NULL,
},
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_TYPE,
+ .name = "Bounds of I-frame size",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_DEFAULT,
+ .maximum = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_UNLIMITED,
+ .default_value = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_DEFAULT,
+ .menu_skip_mask = ~(
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_DEFAULT) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_MEDIUM) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_HUGE) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_UNLIMITED)),
+ .qmenu = iframe_sizes,
+ },
};
@@ -2117,6 +2138,19 @@ static inline int venc_v4l2_to_hal(int id, int value)
default:
goto unknown_value;
}
+ case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_TYPE:
+ switch (value) {
+ case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_DEFAULT:
+ return HAL_IFRAMESIZE_TYPE_DEFAULT;
+ case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_MEDIUM:
+ return HAL_IFRAMESIZE_TYPE_MEDIUM;
+ case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_HUGE:
+ return HAL_IFRAMESIZE_TYPE_HUGE;
+ case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_UNLIMITED:
+ return HAL_IFRAMESIZE_TYPE_UNLIMITED;
+ default:
+ goto unknown_value;
+ }
}
unknown_value:
@@ -2159,6 +2193,7 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
int frameqp = 0;
int pic_order_cnt = 0;
struct hal_video_signal_info signal_info = {0};
+ enum hal_iframesize_type iframesize_type = HAL_IFRAMESIZE_TYPE_DEFAULT;
if (!inst || !inst->core || !inst->core->device) {
dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
@@ -2940,7 +2975,10 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL:
switch (ctrl->val) {
case V4L2_CID_MPEG_VIDC_PERF_LEVEL_NOMINAL:
- inst->flags &= ~VIDC_TURBO;
+ if (inst->flags & VIDC_TURBO) {
+ inst->flags &= ~VIDC_TURBO;
+ msm_dcvs_init_load(inst);
+ }
break;
case V4L2_CID_MPEG_VIDC_PERF_LEVEL_TURBO:
inst->flags |= VIDC_TURBO;
@@ -3239,6 +3277,13 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
}
pdata = &enable;
break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_TYPE:
+ property_id = HAL_PARAM_VENC_IFRAMESIZE_TYPE;
+ iframesize_type = venc_v4l2_to_hal(
+ V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_TYPE,
+ ctrl->val);
+ pdata = &iframesize_type;
+ break;
default:
dprintk(VIDC_ERR, "Unsupported index: %x\n", ctrl->id);
rc = -ENOTSUPP;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 93e32ef4ac35..c4d06b658b30 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -652,10 +652,6 @@ int output_buffer_cache_invalidate(struct msm_vidc_inst *inst,
return -EINVAL;
}
- if (binfo->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- return 0;
-
-
for (i = 0; i < binfo->num_planes; i++) {
if (binfo->handle[i]) {
rc = msm_comm_smem_cache_operations(inst,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index e612c6ed11c7..fa2ad1754e77 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -5286,24 +5286,28 @@ void msm_comm_print_inst_info(struct msm_vidc_inst *inst)
int i = 0;
bool is_decode = false;
enum vidc_ports port;
+ bool is_secure = false;
if (!inst) {
- dprintk(VIDC_ERR, "%s - invalid param %p\n",
+ dprintk(VIDC_ERR, "%s - invalid param %pK\n",
__func__, inst);
return;
}
is_decode = inst->session_type == MSM_VIDC_DECODER;
port = is_decode ? OUTPUT_PORT : CAPTURE_PORT;
+ is_secure = inst->flags & VIDC_SECURE;
dprintk(VIDC_ERR,
- "%s session, Codec type: %s HxW: %d x %d fps: %d bitrate: %d bit-depth: %s\n",
- is_decode ? "Decode" : "Encode", inst->fmts[port].name,
+ "%s session, %s, Codec type: %s HxW: %d x %d fps: %d bitrate: %d bit-depth: %s\n",
+ is_decode ? "Decode" : "Encode",
+ is_secure ? "Secure" : "Non-Secure",
+ inst->fmts[port].name,
inst->prop.height[port], inst->prop.width[port],
inst->prop.fps, inst->prop.bitrate,
!inst->bit_depth ? "8" : "10");
dprintk(VIDC_ERR,
- "---Buffer details for inst: %p of type: %d---\n",
+ "---Buffer details for inst: %pK of type: %d---\n",
inst, inst->session_type);
mutex_lock(&inst->registeredbufs.lock);
dprintk(VIDC_ERR, "registered buffer list:\n");
@@ -5347,7 +5351,7 @@ static void msm_comm_print_debug_info(struct msm_vidc_inst *inst)
struct msm_vidc_inst *temp = NULL;
if (!inst || !inst->core) {
- dprintk(VIDC_ERR, "%s - invalid param %p %p\n",
+ dprintk(VIDC_ERR, "%s - invalid param %pK %pK\n",
__func__, inst, core);
return;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 011941c6d4eb..2e1df75cb248 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -24,7 +24,7 @@ EXPORT_SYMBOL(msm_vidc_debug_out);
int msm_vidc_fw_debug = 0x18;
int msm_vidc_fw_debug_mode = 1;
int msm_vidc_fw_low_power_mode = 1;
-int msm_vidc_hw_rsp_timeout = 1000;
+int msm_vidc_hw_rsp_timeout = 2000;
bool msm_vidc_fw_coverage = false;
bool msm_vidc_vpe_csc_601_to_709 = false;
bool msm_vidc_dec_dcvs_mode = true;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index c87b6fc585c7..787ee43ccbd2 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -3407,10 +3407,12 @@ static int __response_handler(struct venus_hfi_device *device)
packets = device->response_pkt;
- raw_packet = kzalloc(VIDC_IFACEQ_VAR_HUGE_PKT_SIZE, GFP_TEMPORARY);
+ raw_packet = device->raw_packet;
+
if (!raw_packet || !packets) {
- dprintk(VIDC_ERR, "%s: Failed to allocate memory\n", __func__);
- kfree(raw_packet);
+ dprintk(VIDC_ERR,
+ "%s: Invalid args : Res packet = %p, Raw packet = %p\n",
+ __func__, packets, raw_packet);
return 0;
}
@@ -3566,7 +3568,6 @@ static int __response_handler(struct venus_hfi_device *device)
exit:
__flush_debug_queue(device, raw_packet);
- kfree(raw_packet);
return packet_count;
}
@@ -4553,6 +4554,13 @@ static struct venus_hfi_device *__add_device(u32 device_id,
goto err_cleanup;
}
+ hdevice->raw_packet =
+ kzalloc(VIDC_IFACEQ_VAR_HUGE_PKT_SIZE, GFP_TEMPORARY);
+ if (!hdevice->raw_packet) {
+ dprintk(VIDC_ERR, "failed to allocate raw packet\n");
+ goto err_cleanup;
+ }
+
rc = __init_regs_and_interrupts(hdevice, res);
if (rc)
goto err_cleanup;
@@ -4590,6 +4598,7 @@ err_cleanup:
if (hdevice->vidc_workq)
destroy_workqueue(hdevice->vidc_workq);
kfree(hdevice->response_pkt);
+ kfree(hdevice->raw_packet);
kfree(hdevice);
exit:
return NULL;
@@ -4631,6 +4640,7 @@ void venus_hfi_delete_device(void *device)
iounmap(dev->hal_data->register_base);
kfree(close->hal_data);
kfree(close->response_pkt);
+ kfree(close->raw_packet);
kfree(close);
break;
}
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.h b/drivers/media/platform/msm/vidc/venus_hfi.h
index 7cc71a470c2e..1d2ca88a3c1d 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.h
+++ b/drivers/media/platform/msm/vidc/venus_hfi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -247,6 +247,7 @@ struct venus_hfi_device {
struct hfi_packetization_ops *pkt_ops;
enum hfi_packetization_type packetization_type;
struct msm_vidc_cb_info *response_pkt;
+ u8 *raw_packet;
struct pm_qos_request qos;
unsigned int skip_pc_count;
struct msm_vidc_capability *sys_init_capabilities;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index aa566159c393..116ce12c8dba 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -242,6 +242,7 @@ enum hal_property {
HAL_PARAM_VENC_SESSION_QP_RANGE_PACKED,
HAL_PARAM_VENC_H264_TRANSFORM_8x8,
HAL_PARAM_VENC_VIDEO_SIGNAL_INFO,
+ HAL_PARAM_VENC_IFRAMESIZE_TYPE,
};
enum hal_domain {
@@ -1002,6 +1003,13 @@ struct hal_video_signal_info {
bool full_range;
};
+enum hal_iframesize_type {
+ HAL_IFRAMESIZE_TYPE_DEFAULT,
+ HAL_IFRAMESIZE_TYPE_MEDIUM,
+ HAL_IFRAMESIZE_TYPE_HUGE,
+ HAL_IFRAMESIZE_TYPE_UNLIMITED,
+};
+
enum vidc_resource_id {
VIDC_RESOURCE_NONE,
VIDC_RESOURCE_OCMEM,
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 5e5ef6abc303..bb9958b0a819 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -386,6 +386,8 @@ struct hfi_buffer_info {
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x031)
#define HFI_PROPERTY_PARAM_VENC_VQZIP_SEI_TYPE \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x033)
+#define HFI_PROPERTY_PARAM_VENC_IFRAMESIZE \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x034)
#define HFI_PROPERTY_CONFIG_VENC_COMMON_START \
(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
@@ -888,6 +890,14 @@ struct hfi_aspect_ratio {
u32 aspect_height;
};
+#define HFI_IFRAME_SIZE_DEFAULT (HFI_COMMON_BASE + 0x1)
+#define HFI_IFRAME_SIZE_MEDIUM (HFI_COMMON_BASE + 0x2)
+#define HFI_IFRAME_SIZE_HIGH (HFI_COMMON_BASE + 0x3)
+#define HFI_IFRAME_SIZE_UNLIMITED (HFI_COMMON_BASE + 0x4)
+struct hfi_iframe_size {
+ u32 type;
+};
+
#define HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM (0)
#define HFI_MVC_BUFFER_LAYOUT_SIDEBYSIDE (1)
#define HFI_MVC_BUFFER_LAYOUT_SEQ (2)
diff --git a/drivers/mfd/wcd9xxx-utils.c b/drivers/mfd/wcd9xxx-utils.c
index fab594992df3..2b0a5f8ce7f2 100644
--- a/drivers/mfd/wcd9xxx-utils.c
+++ b/drivers/mfd/wcd9xxx-utils.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/delay.h>
+#include <linux/sched.h>
#include <linux/mfd/core.h>
#include <linux/mfd/wcd9xxx/pdata.h>
#include <linux/mfd/wcd9xxx/core.h>
@@ -310,6 +311,7 @@ struct wcd9xxx_pdata *wcd9xxx_populate_dt_data(struct device *dev)
u32 ecpp_dmic_sample_rate = WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED;
u32 dmic_clk_drive = WCD9XXX_DMIC_CLK_DRIVE_UNDEFINED;
u32 prop_val;
+ int rc = 0;
if (!dev || !dev->of_node)
return NULL;
@@ -368,9 +370,13 @@ struct wcd9xxx_pdata *wcd9xxx_populate_dt_data(struct device *dev)
pdata->mclk_rate,
"mad_dmic_rate");
- if (!(wcd9xxx_read_of_property_u32(dev, "qcom,cdc-ecpp-dmic-rate",
- &prop_val)))
- ecpp_dmic_sample_rate = prop_val;
+ if (of_find_property(dev->of_node, "qcom,cdc-ecpp-dmic-rate", NULL)) {
+ rc = wcd9xxx_read_of_property_u32(dev,
+ "qcom,cdc-ecpp-dmic-rate",
+ &prop_val);
+ if (!rc)
+ ecpp_dmic_sample_rate = prop_val;
+ }
pdata->ecpp_dmic_sample_rate = wcd9xxx_validate_dmic_sample_rate(dev,
ecpp_dmic_sample_rate,
@@ -379,13 +385,14 @@ struct wcd9xxx_pdata *wcd9xxx_populate_dt_data(struct device *dev)
if (!(of_property_read_u32(dev->of_node,
"qcom,cdc-dmic-clk-drv-strength",
- &prop_val)))
+ &prop_val))) {
dmic_clk_drive = prop_val;
- if (dmic_clk_drive != 2 && dmic_clk_drive != 4 &&
- dmic_clk_drive != 8 && dmic_clk_drive != 16)
- dev_err(dev, "Invalid cdc-dmic-clk-drv-strength %d\n",
- dmic_clk_drive);
+ if (dmic_clk_drive != 2 && dmic_clk_drive != 4 &&
+ dmic_clk_drive != 8 && dmic_clk_drive != 16)
+ dev_err(dev, "Invalid cdc-dmic-clk-drv-strength %d\n",
+ dmic_clk_drive);
+ }
pdata->dmic_clk_drv = dmic_clk_drive;
diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c
index 69ec7127102c..56ddf8467d16 100644
--- a/drivers/misc/hdcp.c
+++ b/drivers/misc/hdcp.c
@@ -255,6 +255,15 @@ struct __attribute__ ((__packed__)) hdcp_version_rsp {
uint32_t appversion;
};
+struct __attribute__ ((__packed__)) hdcp_verify_key_req {
+ uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_verify_key_rsp {
+ uint32_t status;
+ uint32_t commandId;
+};
+
struct __attribute__ ((__packed__)) hdcp_lib_init_req_v1 {
uint32_t commandid;
};
@@ -794,6 +803,48 @@ exit:
return rc;
}
+static int hdcp_lib_verify_keys(struct hdcp_lib_handle *handle)
+{
+ int rc = -EINVAL;
+ struct hdcp_verify_key_req *req_buf;
+ struct hdcp_verify_key_rsp *rsp_buf;
+
+ if (!handle) {
+ pr_err("invalid input\n");
+ goto exit;
+ }
+
+ if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+ pr_err("app not loaded\n");
+ goto exit;
+ }
+
+ req_buf = (struct hdcp_verify_key_req *)handle->qseecom_handle->sbuf;
+ req_buf->commandid = HDCP_TXMTR_VERIFY_KEY;
+
+ rsp_buf = (struct hdcp_verify_key_rsp *)
+ (handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp_verify_key_req)));
+
+ rc = qseecom_send_command(handle->qseecom_handle,
+ req_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_verify_key_req)),
+ rsp_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_verify_key_rsp)));
+
+ if (rc < 0) {
+ pr_err("qseecom cmd failed err = %d\n", rc);
+ goto exit;
+ }
+
+ return rsp_buf->status;
+exit:
+ return rc;
+}
+
+
static int hdcp_app_init_legacy(struct hdcp_lib_handle *handle)
{
int rc = 0;
@@ -1456,10 +1507,12 @@ static bool hdcp_lib_client_feature_supported(void *phdcpcontext)
rc = hdcp_lib_library_load(handle);
if (!rc) {
- pr_debug("HDCP2p2 supported\n");
- handle->feature_supported = true;
+ if (!hdcp_lib_verify_keys(handle)) {
+ pr_debug("HDCP2p2 supported\n");
+ handle->feature_supported = true;
+ supported = true;
+ }
hdcp_lib_library_unload(handle);
- supported = true;
}
exit:
return supported;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index ff838ebefba6..7dc7271b05c1 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -131,6 +131,35 @@ static DEFINE_MUTEX(qsee_bw_mutex);
static DEFINE_MUTEX(app_access_lock);
static DEFINE_MUTEX(clk_access_lock);
+struct sglist_info {
+ uint32_t indexAndFlags;
+ uint32_t sizeOrCount;
+};
+
+/*
+ * The 31th bit indicates only one or multiple physical address inside
+ * the request buffer. If it is set, the index locates a single physical addr
+ * inside the request buffer, and `sizeOrCount` is the size of the memory being
+ * shared at that physical address.
+ * Otherwise, the index locates an array of {start, len} pairs (a
+ * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
+ * that array.
+ *
+ * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
+ * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
+ *
+ * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
+ */
+#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
+ ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
+
+#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
+
+#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
+
+#define MAKE_WHITELIST_VERSION(major, minor, patch) \
+ (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
struct qseecom_registered_listener_list {
struct list_head list;
struct qseecom_register_listener_req svc;
@@ -145,6 +174,8 @@ struct qseecom_registered_listener_list {
bool listener_in_use;
/* wq for thread blocked on this listener*/
wait_queue_head_t listener_block_app_wq;
+ struct sglist_info sglistinfo_ptr[MAX_ION_FD];
+ uint32_t sglist_cnt;
};
struct qseecom_registered_app_list {
@@ -268,30 +299,6 @@ struct qseecom_listener_handle {
static struct qseecom_control qseecom;
-struct sglist_info {
- uint32_t indexAndFlags;
- uint32_t sizeOrCount;
-};
-
-/*
- * The 31th bit indicates only one or multiple physical address inside
- * the request buffer. If it is set, the index locates a single physical addr
- * inside the request buffer, and `sizeOrCount` is the size of the memory being
- * shared at that physical address.
- * Otherwise, the index locates an array of {start, len} pairs (a
- * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
- * that array.
- *
- * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
- * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
- *
- * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
- */
-#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
- ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
-
-#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
-
struct qseecom_dev_handle {
enum qseecom_client_handle_type type;
union {
@@ -305,8 +312,9 @@ struct qseecom_dev_handle {
bool perf_enabled;
bool fast_load_enabled;
enum qseecom_bandwidth_request_mode mode;
- struct sglist_info *sglistinfo_ptr;
+ struct sglist_info sglistinfo_ptr[MAX_ION_FD];
uint32_t sglist_cnt;
+ bool use_legacy_cmd;
};
struct qseecom_key_id_usage_desc {
@@ -584,6 +592,34 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
ret = scm_call2(smc_id, &desc);
break;
}
+ case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
+ struct qseecom_client_listener_data_irsp *req;
+ struct qseecom_client_listener_data_64bit_irsp *req_64;
+
+ smc_id =
+ TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
+ desc.arginfo =
+ TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req =
+ (struct qseecom_client_listener_data_irsp *)
+ req_buf;
+ desc.args[0] = req->listener_id;
+ desc.args[1] = req->status;
+ desc.args[2] = req->sglistinfo_ptr;
+ desc.args[3] = req->sglistinfo_len;
+ } else {
+ req_64 =
+ (struct qseecom_client_listener_data_64bit_irsp *)
+ req_buf;
+ desc.args[0] = req_64->listener_id;
+ desc.args[1] = req_64->status;
+ desc.args[2] = req_64->sglistinfo_ptr;
+ desc.args[3] = req_64->sglistinfo_len;
+ }
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
struct qseecom_load_app_ireq *req;
struct qseecom_load_app_64bit_ireq *req_64bit;
@@ -1128,7 +1164,7 @@ static int qseecom_register_listener(struct qseecom_dev_handle *data,
return -EBUSY;
}
- new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
+ new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
if (!new_entry) {
pr_err("kmalloc failed\n");
return -ENOMEM;
@@ -1593,6 +1629,16 @@ static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
return ret;
}
+static void __qseecom_clean_listener_sglistinfo(
+ struct qseecom_registered_listener_list *ptr_svc)
+{
+ if (ptr_svc->sglist_cnt) {
+ memset(ptr_svc->sglistinfo_ptr, 0,
+ SGLISTINFO_TABLE_SIZE);
+ ptr_svc->sglist_cnt = 0;
+ }
+}
+
static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
struct qseecom_command_scm_resp *resp)
{
@@ -1601,9 +1647,14 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
uint32_t lstnr;
unsigned long flags;
struct qseecom_client_listener_data_irsp send_data_rsp;
+ struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
struct qseecom_registered_listener_list *ptr_svc = NULL;
sigset_t new_sigset;
sigset_t old_sigset;
+ uint32_t status;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+ struct sglist_info *table = NULL;
while (resp->result == QSEOS_RESULT_INCOMPLETE) {
lstnr = resp->data;
@@ -1677,15 +1728,42 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
data->client.app_id, lstnr, ret);
rc = -ENODEV;
- send_data_rsp.status = QSEOS_RESULT_FAILURE;
+ status = QSEOS_RESULT_FAILURE;
} else {
- send_data_rsp.status = QSEOS_RESULT_SUCCESS;
+ status = QSEOS_RESULT_SUCCESS;
}
qseecom.send_resp_flag = 0;
ptr_svc->send_resp_flag = 0;
- send_data_rsp.qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
- send_data_rsp.listener_id = lstnr;
+ table = ptr_svc->sglistinfo_ptr;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ send_data_rsp.listener_id = lstnr;
+ send_data_rsp.status = status;
+ send_data_rsp.sglistinfo_ptr =
+ (uint32_t)virt_to_phys(table);
+ send_data_rsp.sglistinfo_len =
+ SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&send_data_rsp;
+ cmd_len = sizeof(send_data_rsp);
+ } else {
+ send_data_rsp_64bit.listener_id = lstnr;
+ send_data_rsp_64bit.status = status;
+ send_data_rsp_64bit.sglistinfo_ptr =
+ virt_to_phys(table);
+ send_data_rsp_64bit.sglistinfo_len =
+ SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&send_data_rsp_64bit;
+ cmd_len = sizeof(send_data_rsp_64bit);
+ }
+ if (qseecom.whitelist_support == false)
+ *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+ else
+ *(uint32_t *)cmd_buf =
+ QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
if (ptr_svc)
msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
ptr_svc->sb_virt, ptr_svc->sb_length,
@@ -1695,10 +1773,9 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
__qseecom_enable_clk(CLK_QSEE);
ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
- (const void *)&send_data_rsp,
- sizeof(send_data_rsp), resp,
- sizeof(*resp));
+ cmd_buf, cmd_len, resp, sizeof(*resp));
ptr_svc->listener_in_use = false;
+ __qseecom_clean_listener_sglistinfo(ptr_svc);
if (ret) {
pr_err("scm_call() failed with err: %d (app_id = %d)\n",
ret, data->client.app_id);
@@ -1826,9 +1903,14 @@ static int __qseecom_reentrancy_process_incomplete_cmd(
uint32_t lstnr;
unsigned long flags;
struct qseecom_client_listener_data_irsp send_data_rsp;
+ struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
struct qseecom_registered_listener_list *ptr_svc = NULL;
sigset_t new_sigset;
sigset_t old_sigset;
+ uint32_t status;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+ struct sglist_info *table = NULL;
while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
lstnr = resp->data;
@@ -1891,13 +1973,38 @@ static int __qseecom_reentrancy_process_incomplete_cmd(
pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
data->client.app_id, lstnr, ret);
rc = -ENODEV;
- send_data_rsp.status = QSEOS_RESULT_FAILURE;
+ status = QSEOS_RESULT_FAILURE;
} else {
- send_data_rsp.status = QSEOS_RESULT_SUCCESS;
+ status = QSEOS_RESULT_SUCCESS;
}
-
- send_data_rsp.qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
- send_data_rsp.listener_id = lstnr;
+ table = ptr_svc->sglistinfo_ptr;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ send_data_rsp.listener_id = lstnr;
+ send_data_rsp.status = status;
+ send_data_rsp.sglistinfo_ptr =
+ (uint32_t)virt_to_phys(table);
+ send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&send_data_rsp;
+ cmd_len = sizeof(send_data_rsp);
+ } else {
+ send_data_rsp_64bit.listener_id = lstnr;
+ send_data_rsp_64bit.status = status;
+ send_data_rsp_64bit.sglistinfo_ptr =
+ virt_to_phys(table);
+ send_data_rsp_64bit.sglistinfo_len =
+ SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&send_data_rsp_64bit;
+ cmd_len = sizeof(send_data_rsp_64bit);
+ }
+ if (qseecom.whitelist_support == false)
+ *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+ else
+ *(uint32_t *)cmd_buf =
+ QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
if (ptr_svc)
msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
ptr_svc->sb_virt, ptr_svc->sb_length,
@@ -1907,11 +2014,9 @@ static int __qseecom_reentrancy_process_incomplete_cmd(
__qseecom_enable_clk(CLK_QSEE);
ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
- (const void *)&send_data_rsp,
- sizeof(send_data_rsp), resp,
- sizeof(*resp));
-
+ cmd_buf, cmd_len, resp, sizeof(*resp));
ptr_svc->listener_in_use = false;
+ __qseecom_clean_listener_sglistinfo(ptr_svc);
wake_up_interruptible(&ptr_svc->listener_block_app_wq);
if (ret) {
@@ -2079,6 +2184,7 @@ static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
struct qseecom_load_app_64bit_ireq load_req_64bit;
void *cmd_buf = NULL;
size_t cmd_len;
+ bool first_time = false;
/* Copy the relevant information needed for loading the image */
if (copy_from_user(&load_img_req,
@@ -2150,6 +2256,7 @@ static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
&qseecom.registered_app_list_lock, flags);
ret = 0;
} else {
+ first_time = true;
pr_warn("App (%s) does'nt exist, loading apps for first time\n",
(char *)(load_img_req.img_name));
/* Get the handle of the shared fd */
@@ -2281,8 +2388,15 @@ static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
load_img_req.app_id = app_id;
if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
pr_err("copy_to_user failed\n");
- kzfree(entry);
ret = -EFAULT;
+ if (first_time == true) {
+ spin_lock_irqsave(
+ &qseecom.registered_app_list_lock, flags);
+ list_del(&entry->list);
+ spin_unlock_irqrestore(
+ &qseecom.registered_app_list_lock, flags);
+ kzfree(entry);
+ }
}
loadapp_err:
@@ -2910,7 +3024,7 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
}
- if (qseecom.whitelist_support == false)
+ if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
*(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
else
*(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
@@ -3015,6 +3129,8 @@ static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
struct qseecom_send_modfd_cmd_req *req = NULL;
struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
struct qseecom_registered_listener_list *this_lstnr = NULL;
+ uint32_t offset;
+ struct sg_table *sg_ptr;
if ((data->type != QSEECOM_LISTENER_SERVICE) &&
(data->type != QSEECOM_CLIENT_APP))
@@ -3036,7 +3152,6 @@ static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
}
for (i = 0; i < MAX_ION_FD; i++) {
- struct sg_table *sg_ptr = NULL;
if ((data->type != QSEECOM_LISTENER_SERVICE) &&
(req->ifd_data[i].fd > 0)) {
ihandle = ion_import_dma_buf(qseecom.ion_clnt,
@@ -3178,14 +3293,25 @@ static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
ihandle, NULL, len,
ION_IOC_CLEAN_INV_CACHES);
if (data->type == QSEECOM_CLIENT_APP) {
+ offset = req->ifd_data[i].cmd_buf_offset;
data->sglistinfo_ptr[i].indexAndFlags =
SGLISTINFO_SET_INDEX_FLAG(
- (sg_ptr->nents == 1), 0,
- req->ifd_data[i].cmd_buf_offset);
+ (sg_ptr->nents == 1), 0, offset);
data->sglistinfo_ptr[i].sizeOrCount =
(sg_ptr->nents == 1) ?
sg->length : sg_ptr->nents;
data->sglist_cnt = i + 1;
+ } else {
+ offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+ + (uintptr_t)lstnr_resp->resp_buf_ptr -
+ (uintptr_t)this_lstnr->sb_virt);
+ this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+ SGLISTINFO_SET_INDEX_FLAG(
+ (sg_ptr->nents == 1), 0, offset);
+ this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+ (sg_ptr->nents == 1) ?
+ sg->length : sg_ptr->nents;
+ this_lstnr->sglist_cnt = i + 1;
}
}
/* Deallocate the handle */
@@ -3258,6 +3384,8 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
struct qseecom_send_modfd_cmd_req *req = NULL;
struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
struct qseecom_registered_listener_list *this_lstnr = NULL;
+ uint32_t offset;
+ struct sg_table *sg_ptr;
if ((data->type != QSEECOM_LISTENER_SERVICE) &&
(data->type != QSEECOM_CLIENT_APP))
@@ -3279,7 +3407,6 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
}
for (i = 0; i < MAX_ION_FD; i++) {
- struct sg_table *sg_ptr = NULL;
if ((data->type != QSEECOM_LISTENER_SERVICE) &&
(req->ifd_data[i].fd > 0)) {
ihandle = ion_import_dma_buf(qseecom.ion_clnt,
@@ -3396,14 +3523,25 @@ cleanup:
ihandle, NULL, len,
ION_IOC_CLEAN_INV_CACHES);
if (data->type == QSEECOM_CLIENT_APP) {
+ offset = req->ifd_data[i].cmd_buf_offset;
data->sglistinfo_ptr[i].indexAndFlags =
SGLISTINFO_SET_INDEX_FLAG(
- (sg_ptr->nents == 1), 1,
- req->ifd_data[i].cmd_buf_offset);
+ (sg_ptr->nents == 1), 1, offset);
data->sglistinfo_ptr[i].sizeOrCount =
(sg_ptr->nents == 1) ?
sg->length : sg_ptr->nents;
data->sglist_cnt = i + 1;
+ } else {
+ offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+ + (uintptr_t)lstnr_resp->resp_buf_ptr -
+ (uintptr_t)this_lstnr->sb_virt);
+ this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+ SGLISTINFO_SET_INDEX_FLAG(
+ (sg_ptr->nents == 1), 1, offset);
+ this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+ (sg_ptr->nents == 1) ?
+ sg->length : sg_ptr->nents;
+ this_lstnr->sglist_cnt = i + 1;
}
}
/* Deallocate the handle */
@@ -4122,21 +4260,12 @@ int qseecom_start_app(struct qseecom_handle **handle,
data->client.user_virt_sb_base = 0;
data->client.ihandle = NULL;
- /* Allocate sglistinfo buffer for kernel client */
- data->sglistinfo_ptr = kzalloc(SGLISTINFO_TABLE_SIZE, GFP_KERNEL);
- if (!(data->sglistinfo_ptr)) {
- kfree(data);
- kfree(*handle);
- *handle = NULL;
- return -ENOMEM;
- }
init_waitqueue_head(&data->abort_wq);
data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
ION_HEAP(ION_QSECOM_HEAP_ID), 0);
if (IS_ERR_OR_NULL(data->client.ihandle)) {
pr_err("Ion client could not retrieve the handle\n");
- kfree(data->sglistinfo_ptr);
kfree(data);
kfree(*handle);
*handle = NULL;
@@ -4239,7 +4368,6 @@ int qseecom_start_app(struct qseecom_handle **handle,
return 0;
err:
- kfree(data->sglistinfo_ptr);
kfree(data);
kfree(*handle);
*handle = NULL;
@@ -4287,7 +4415,6 @@ int qseecom_shutdown_app(struct qseecom_handle **handle)
mutex_unlock(&app_access_lock);
if (ret == 0) {
- kzfree(data->sglistinfo_ptr);
kzfree(data);
kzfree(*handle);
kzfree(kclient);
@@ -4353,8 +4480,11 @@ int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
}
perf_enabled = true;
}
+ if (!strcmp(data->client.app_name, "securemm"))
+ data->use_legacy_cmd = true;
ret = __qseecom_send_cmd(data, &req);
+ data->use_legacy_cmd = false;
if (qseecom.support_bus_scaling)
__qseecom_add_bw_scale_down_timer(
QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
@@ -7030,6 +7160,7 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
wake_up_all(&data->abort_wq);
if (ret)
pr_err("failed qseecom_send_mod_resp: %d\n", ret);
+ __qseecom_clean_data_sglistinfo(data);
break;
}
case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
@@ -7179,12 +7310,6 @@ static int qseecom_open(struct inode *inode, struct file *file)
data->mode = INACTIVE;
init_waitqueue_head(&data->abort_wq);
atomic_set(&data->ioctl_count, 0);
-
- data->sglistinfo_ptr = kzalloc(SGLISTINFO_TABLE_SIZE, GFP_KERNEL);
- if (!(data->sglistinfo_ptr)) {
- kzfree(data);
- return -ENOMEM;
- }
return ret;
}
@@ -7239,7 +7364,6 @@ static int qseecom_release(struct inode *inode, struct file *file)
if (data->perf_enabled == true)
qsee_disable_clock_vote(data, CLK_DFAB);
}
- kfree(data->sglistinfo_ptr);
kfree(data);
return ret;
@@ -7988,73 +8112,14 @@ out:
}
/*
- * Check if whitelist feature is supported by making a test scm_call
- * to send a whitelist command to an invalid app ID 0
+ * Check whitelist feature, and if TZ feature version is < 1.0.0,
+ * then whitelist feature is not supported.
*/
static int qseecom_check_whitelist_feature(void)
{
- struct qseecom_client_send_data_ireq send_data_req = {0};
- struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
- struct qseecom_command_scm_resp resp;
- uint32_t buf_size = 128;
- void *buf = NULL;
- void *cmd_buf = NULL;
- size_t cmd_len;
- int ret = 0;
- phys_addr_t pa;
+ int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- pa = virt_to_phys(buf);
- if (qseecom.qsee_version < QSEE_VERSION_40) {
- send_data_req.qsee_cmd_id =
- QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
- send_data_req.app_id = 0;
- send_data_req.req_ptr = (uint32_t)pa;
- send_data_req.req_len = buf_size;
- send_data_req.rsp_ptr = (uint32_t)pa;
- send_data_req.rsp_len = buf_size;
- send_data_req.sglistinfo_ptr = (uint32_t)pa;
- send_data_req.sglistinfo_len = buf_size;
- cmd_buf = (void *)&send_data_req;
- cmd_len = sizeof(struct qseecom_client_send_data_ireq);
- } else {
- send_data_req_64bit.qsee_cmd_id =
- QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
- send_data_req_64bit.app_id = 0;
- send_data_req_64bit.req_ptr = (uint64_t)pa;
- send_data_req_64bit.req_len = buf_size;
- send_data_req_64bit.rsp_ptr = (uint64_t)pa;
- send_data_req_64bit.rsp_len = buf_size;
- send_data_req_64bit.sglistinfo_ptr = (uint64_t)pa;
- send_data_req_64bit.sglistinfo_len = buf_size;
- cmd_buf = (void *)&send_data_req_64bit;
- cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
- }
- ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
- cmd_buf, cmd_len,
- &resp, sizeof(resp));
-/*
- * If this cmd exists and whitelist is supported, scm_call return -2 (scm
- * driver remap it to -EINVAL) and resp.result 0xFFFFFFED(-19); Otherwise,
- * scm_call return -1 (remap to -EIO).
- */
- if (ret == -EIO) {
- qseecom.whitelist_support = false;
- ret = 0;
- } else if (ret == -EINVAL &&
- resp.result == QSEOS_RESULT_FAIL_SEND_CMD_NO_THREAD) {
- qseecom.whitelist_support = true;
- ret = 0;
- } else {
- pr_info("Check whitelist with ret = %d, result = 0x%x\n",
- ret, resp.result);
- qseecom.whitelist_support = false;
- ret = 0;
- }
- kfree(buf);
- return ret;
+ return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
}
static int qseecom_probe(struct platform_device *pdev)
@@ -8305,11 +8370,7 @@ static int qseecom_probe(struct platform_device *pdev)
qseecom.qsee_perf_client = msm_bus_scale_register_client(
qseecom_platform_support);
- rc = qseecom_check_whitelist_feature();
- if (rc) {
- rc = -EINVAL;
- goto exit_destroy_ion_client;
- }
+ qseecom.whitelist_support = qseecom_check_whitelist_feature();
pr_warn("qseecom.whitelist_support = %d\n",
qseecom.whitelist_support);
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index 5ca0307a3274..b9faae0278c9 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -54,6 +54,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
struct pmc_ctx *pmc = &wil->pmc;
struct device *dev = wil_to_dev(wil);
struct wmi_pmc_cmd pmc_cmd = {0};
+ int last_cmd_err = -ENOMEM;
mutex_lock(&pmc->lock);
@@ -62,6 +63,29 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__);
goto no_release_err;
}
+ if ((num_descriptors <= 0) || (descriptor_size <= 0)) {
+ wil_err(wil,
+ "Invalid params num_descriptors(%d), descriptor_size(%d)\n",
+ num_descriptors, descriptor_size);
+ last_cmd_err = -EINVAL;
+ goto no_release_err;
+ }
+
+ if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) {
+ wil_err(wil,
+ "num_descriptors(%d) exceeds max ring size %d\n",
+ num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX);
+ last_cmd_err = -EINVAL;
+ goto no_release_err;
+ }
+
+ if (num_descriptors > INT_MAX / descriptor_size) {
+ wil_err(wil,
+ "Overflow in num_descriptors(%d)*descriptor_size(%d)\n",
+ num_descriptors, descriptor_size);
+ last_cmd_err = -EINVAL;
+ goto no_release_err;
+ }
pmc->num_descriptors = num_descriptors;
pmc->descriptor_size = descriptor_size;
@@ -189,7 +213,7 @@ release_pmc_skb_list:
pmc->descriptors = NULL;
no_release_err:
- pmc->last_cmd_status = -ENOMEM;
+ pmc->last_cmd_status = last_cmd_err;
mutex_unlock(&pmc->lock);
}
@@ -295,7 +319,7 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
size_t retval = 0;
unsigned long long idx;
loff_t offset;
- size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+ size_t pmc_size;
mutex_lock(&pmc->lock);
@@ -306,6 +330,8 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
return -EPERM;
}
+ pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+
wil_dbg_misc(wil,
"%s: size %u, pos %lld\n",
__func__, (unsigned)count, *f_pos);
@@ -345,7 +371,18 @@ loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
loff_t newpos;
struct wil6210_priv *wil = filp->private_data;
struct pmc_ctx *pmc = &wil->pmc;
- size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+ size_t pmc_size;
+
+ mutex_lock(&pmc->lock);
+
+ if (!wil_is_pmc_allocated(pmc)) {
+ wil_err(wil, "error, pmc is not allocated!\n");
+ pmc->last_cmd_status = -EPERM;
+ mutex_unlock(&pmc->lock);
+ return -EPERM;
+ }
+
+ pmc_size = pmc->descriptor_size * pmc->num_descriptors;
switch (whence) {
case 0: /* SEEK_SET */
@@ -361,15 +398,21 @@ loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
break;
default: /* can't happen */
- return -EINVAL;
+ newpos = -EINVAL;
+ goto out;
}
- if (newpos < 0)
- return -EINVAL;
+ if (newpos < 0) {
+ newpos = -EINVAL;
+ goto out;
+ }
if (newpos > pmc_size)
newpos = pmc_size;
filp->f_pos = newpos;
+out:
+ mutex_unlock(&pmc->lock);
+
return newpos;
}
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 154310020997..2dd18dd78677 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -53,6 +53,7 @@ struct nqx_dev {
struct mutex read_mutex;
struct i2c_client *client;
struct miscdevice nqx_device;
+ union nqx_uinfo nqx_info;
/* NFC GPIO variables */
unsigned int irq_gpio;
unsigned int en_gpio;
@@ -467,6 +468,25 @@ int nfc_ioctl_core_reset_ntf(struct file *filp)
return nqx_dev->core_reset_ntf;
}
+/*
+ * Inside nfc_ioctl_nfcc_info
+ *
+ * @brief nfc_ioctl_nfcc_info
+ *
+ * Check the NQ Chipset and firmware version details
+ */
+unsigned int nfc_ioctl_nfcc_info(struct file *filp, unsigned long arg)
+{
+ unsigned int r = 0;
+ struct nqx_dev *nqx_dev = filp->private_data;
+
+ r = nqx_dev->nqx_info.i;
+ dev_dbg(&nqx_dev->client->dev,
+ "nqx nfc : nfc_ioctl_nfcc_info r = %d\n", r);
+
+ return r;
+}
+
static long nfc_ioctl(struct file *pfile, unsigned int cmd,
unsigned long arg)
{
@@ -489,6 +509,9 @@ static long nfc_ioctl(struct file *pfile, unsigned int cmd,
case NFCC_INITIAL_CORE_RESET_NTF:
r = nfc_ioctl_core_reset_ntf(pfile);
break;
+ case NFCC_GET_INFO:
+ r = nfc_ioctl_nfcc_info(pfile, arg);
+ break;
default:
r = -ENOIOCTLCMD;
}
@@ -508,13 +531,16 @@ static const struct file_operations nfc_dev_fops = {
};
/* Check for availability of NQ_ NFC controller hardware */
-static int nfcc_hw_check(struct i2c_client *client, unsigned int enable_gpio)
+static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
{
int ret = 0;
unsigned char raw_nci_reset_cmd[] = {0x20, 0x00, 0x01, 0x00};
+ unsigned char raw_nci_init_cmd[] = {0x20, 0x01, 0x00};
+ unsigned char nci_init_rsp[28];
unsigned char nci_reset_rsp[6];
-
+ unsigned char init_rsp_len = 0;
+ unsigned int enable_gpio = nqx_dev->en_gpio;
/* making sure that the NFCC starts in a clean state. */
gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
/* hardware dependent delay */
@@ -536,16 +562,75 @@ static int nfcc_hw_check(struct i2c_client *client, unsigned int enable_gpio)
/* Read Response of RESET command */
ret = i2c_master_recv(client, nci_reset_rsp,
- sizeof(nci_reset_rsp));
+ sizeof(nci_reset_rsp));
dev_err(&client->dev,
- "%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
- __func__, nci_reset_rsp[0],
- nci_reset_rsp[1], nci_reset_rsp[2]);
+ "%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
+ __func__, nci_reset_rsp[0],
+ nci_reset_rsp[1], nci_reset_rsp[2]);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s: - i2c_master_recv Error\n", __func__);
+ goto err_nfcc_hw_check;
+ }
+ ret = i2c_master_send(client, raw_nci_init_cmd,
+ sizeof(raw_nci_init_cmd));
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s: - i2c_master_send Error\n", __func__);
+ goto err_nfcc_hw_check;
+ }
+ /* hardware dependent delay */
+ msleep(30);
+ /* Read Response of INIT command */
+ ret = i2c_master_recv(client, nci_init_rsp,
+ sizeof(nci_init_rsp));
if (ret < 0) {
dev_err(&client->dev,
"%s: - i2c_master_recv Error\n", __func__);
goto err_nfcc_hw_check;
}
+ init_rsp_len = 2 + nci_init_rsp[2]; /*payload + len*/
+ if (init_rsp_len > PAYLOAD_HEADER_LENGTH) {
+ nqx_dev->nqx_info.info.chip_type =
+ nci_init_rsp[init_rsp_len - 3];
+ nqx_dev->nqx_info.info.rom_version =
+ nci_init_rsp[init_rsp_len - 2];
+ nqx_dev->nqx_info.info.fw_major =
+ nci_init_rsp[init_rsp_len - 1];
+ nqx_dev->nqx_info.info.fw_minor =
+ nci_init_rsp[init_rsp_len];
+ }
+ dev_dbg(&nqx_dev->client->dev, "NQ NFCC chip_type = %x\n",
+ nqx_dev->nqx_info.info.chip_type);
+ dev_dbg(&nqx_dev->client->dev, "NQ fw version = %x.%x.%x\n",
+ nqx_dev->nqx_info.info.rom_version,
+ nqx_dev->nqx_info.info.fw_major,
+ nqx_dev->nqx_info.info.fw_minor);
+
+ switch (nqx_dev->nqx_info.info.chip_type) {
+ case NFCC_NQ_210:
+ dev_dbg(&client->dev,
+ "%s: ## NFCC == NQ210 ##\n", __func__);
+ break;
+ case NFCC_NQ_220:
+ dev_dbg(&client->dev,
+ "%s: ## NFCC == NQ220 ##\n", __func__);
+ break;
+ case NFCC_NQ_310:
+ dev_dbg(&client->dev,
+ "%s: ## NFCC == NQ310 ##\n", __func__);
+ break;
+ case NFCC_NQ_330:
+ dev_dbg(&client->dev,
+ "%s: ## NFCC == NQ330 ##\n", __func__);
+ break;
+ default:
+ dev_err(&client->dev,
+ "%s: - NFCC HW not Supported\n", __func__);
+ break;
+ }
+
+ /*Disable NFC by default to save power on boot*/
gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
ret = 0;
goto done;
@@ -566,9 +651,7 @@ done:
static int nqx_clock_select(struct nqx_dev *nqx_dev)
{
int r = 0;
-
- nqx_dev->s_clk =
- clk_get(&nqx_dev->client->dev, "ref_clk");
+ nqx_dev->s_clk = clk_get(&nqx_dev->client->dev, "ref_clk");
if (nqx_dev->s_clk == NULL)
goto err_clk;
@@ -867,8 +950,7 @@ static int nqx_probe(struct i2c_client *client,
* present before attempting further hardware initialisation.
*
*/
-
- r = nfcc_hw_check(client, platform_data->en_gpio);
+ r = nfcc_hw_check(client, nqx_dev);
if (r) {
/* make sure NFCC is not enabled */
gpio_set_value(platform_data->en_gpio, 0);
diff --git a/drivers/nfc/nq-nci.h b/drivers/nfc/nq-nci.h
index d62100c2d15a..c635e818b1f3 100644
--- a/drivers/nfc/nq-nci.h
+++ b/drivers/nfc/nq-nci.h
@@ -22,6 +22,7 @@
#include <linux/ioctl.h>
#include <linux/miscdevice.h>
+#include <linux/nfcinfo.h>
#define NFC_SET_PWR _IOW(0xE9, 0x01, unsigned int)
#define ESE_SET_PWR _IOW(0xE9, 0x02, unsigned int)
@@ -42,4 +43,12 @@ enum nfcc_initial_core_reset_ntf {
DEFAULT_INITIAL_CORE_RESET_NTF, /*2*/
};
+enum nfcc_chip_variant {
+ NFCC_NQ_210 = 0x48, /**< NFCC NQ210 */
+ NFCC_NQ_220 = 0x58, /**< NFCC NQ220 */
+ NFCC_NQ_310 = 0x40, /**< NFCC NQ310 */
+ NFCC_NQ_330 = 0x51, /**< NFCC NQ330 */
+ NFCC_NOT_SUPPORTED = 0xFF /**< NFCC is not supported */
+};
+
#endif
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 14af8ca66d1c..240bf2903308 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -619,6 +619,7 @@ struct msm_pcie_dev_t {
bool ext_ref_clk;
bool common_phy;
uint32_t ep_latency;
+ uint32_t wr_halt_size;
uint32_t cpl_timeout;
uint32_t current_bdf;
short current_short_bdf;
@@ -1976,6 +1977,8 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
dev->common_phy);
PCIE_DBG_FS(dev, "ep_latency: %dms\n",
dev->ep_latency);
+ PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
+ dev->wr_halt_size);
PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
dev->cpl_timeout);
PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
@@ -4495,8 +4498,19 @@ int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
if (dev->use_msi) {
PCIE_DBG(dev, "RC%d: enable WR halt.\n", dev->rc_idx);
- msm_pcie_write_mask(dev->parf +
- PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT, 0, BIT(31));
+ val = dev->wr_halt_size ? dev->wr_halt_size :
+ readl_relaxed(dev->parf +
+ PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ msm_pcie_write_reg(dev->parf,
+ PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
+ BIT(31) | val);
+
+ PCIE_DBG(dev,
+ "RC%d: PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT: 0x%x.\n",
+ dev->rc_idx,
+ readl_relaxed(dev->parf +
+ PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT));
}
mutex_lock(&com_phy_lock);
@@ -5385,9 +5399,6 @@ static irqreturn_t handle_linkdown_irq(int irq, void *data)
dev->link_status = MSM_PCIE_LINK_DISABLED;
dev->shadow_en = false;
- pcie_phy_dump(dev);
- pcie_parf_dump(dev);
-
if (dev->linkdown_panic)
panic("User has chosen to panic on linkdown\n");
@@ -5420,7 +5431,7 @@ static irqreturn_t handle_msi_irq(int irq, void *data)
struct msm_pcie_dev_t *dev = data;
void __iomem *ctrl_status;
- PCIE_DBG(dev, "irq=%d\n", irq);
+ PCIE_DUMP(dev, "irq: %d\n", irq);
/* check for set bits, clear it by setting that bit
and trigger corresponding irq */
@@ -5696,7 +5707,7 @@ static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
irq_set_msi_desc(firstirq, desc);
msg.address_hi = 0;
msg.address_lo = dev->msi_gicm_addr;
- msg.data = dev->msi_gicm_base;
+ msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num);
write_msi_msg(firstirq, &msg);
return 0;
@@ -6071,6 +6082,18 @@ static int msm_pcie_probe(struct platform_device *pdev)
PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n",
rc_idx, msm_pcie_dev[rc_idx].ep_latency);
+ msm_pcie_dev[rc_idx].wr_halt_size = 0;
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,wr-halt-size",
+ &msm_pcie_dev[rc_idx].wr_halt_size);
+ if (ret)
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "RC%d: wr-halt-size not specified in dt. Use default value.\n",
+ rc_idx);
+ else
+ PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: wr-halt-size: 0x%x.\n",
+ rc_idx, msm_pcie_dev[rc_idx].wr_halt_size);
+
msm_pcie_dev[rc_idx].cpl_timeout = 0;
ret = of_property_read_u32((&pdev->dev)->of_node,
"qcom,cpl-timeout",
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 804c89dc9533..73add50cf224 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -4907,20 +4907,16 @@ int ipa_iommu_map(struct iommu_domain *domain,
IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
- /* make sure no overlapping */
+ /* Checking the address overlapping */
if (domain == ipa2_get_smmu_domain()) {
if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
IPAERR("iommu AP overlap addr 0x%lx\n", iova);
- ipa_assert();
- return -EFAULT;
}
} else if (domain == ipa2_get_wlan_smmu_domain()) {
/* wlan is one time map */
} else if (domain == ipa2_get_uc_smmu_domain()) {
if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
IPAERR("iommu uC overlap addr 0x%lx\n", iova);
- ipa_assert();
- return -EFAULT;
}
} else {
IPAERR("Unexpected domain 0x%p\n", domain);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 3c2a6d4620ba..d51e9ac97fe0 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -18,6 +18,7 @@
#include "ipa_i.h"
#include "ipa_trace.h"
+#define IPA_WAN_AGGR_PKT_CNT 5
#define IPA_LAST_DESC_CNT 0xFFFF
#define POLLING_INACTIVITY_RX 40
#define POLLING_INACTIVITY_TX 40
@@ -1099,16 +1100,18 @@ int ipa2_rx_poll(u32 clnt_hdl, int weight)
break;
ipa_wq_rx_common(ep->sys, iov.size);
- cnt += 5;
+ cnt += IPA_WAN_AGGR_PKT_CNT;
};
- if (cnt == 0) {
+ if (cnt == 0 || cnt < weight) {
ep->inactive_cycles++;
ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
ep->switch_to_intr = true;
delay = 0;
+ } else if (cnt < weight) {
+ delay = 0;
}
queue_delayed_work(ep->sys->wq,
&ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
@@ -1165,8 +1168,11 @@ void ipa_update_repl_threshold(enum ipa_client_type ipa_client)
* Determine how many buffers/descriptors remaining will
* cause to drop below the yellow WM bar.
*/
- ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys)
- / ep->sys->rx_buff_sz;
+ if (ep->sys->rx_buff_sz)
+ ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys)
+ / ep->sys->rx_buff_sz;
+ else
+ ep->rx_replenish_threshold = 0;
}
/**
@@ -1361,8 +1367,11 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
* Determine how many buffers/descriptors remaining will
* cause to drop below the yellow WM bar.
*/
- ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys)
- / ep->sys->rx_buff_sz;
+ if (ep->sys->rx_buff_sz)
+ ep->rx_replenish_threshold =
+ ipa_get_sys_yellow_wm(ep->sys) / ep->sys->rx_buff_sz;
+ else
+ ep->rx_replenish_threshold = 0;
/* Only when the WAN pipes are setup, actual threshold will
* be read from the register. So update LAN_CONS ep again with
* right value.
@@ -3162,14 +3171,9 @@ static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in,
sys->repl_hdlr =
ipa_replenish_rx_cache;
}
- if (in->napi_enabled) {
- sys->rx_pool_sz =
- IPA_WAN_NAPI_CONS_RX_POOL_SZ;
- if (in->recycle_enabled) {
- sys->repl_hdlr =
- ipa_replenish_rx_cache_recycle;
- }
- }
+ if (in->napi_enabled && in->recycle_enabled)
+ sys->repl_hdlr =
+ ipa_replenish_rx_cache_recycle;
sys->ep->wakelock_client =
IPA_WAKELOCK_REF_CLIENT_WAN_RX;
in->ipa_ep_cfg.aggr.aggr_sw_eof_active
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 73206abf9cfd..866170d3324d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -51,8 +51,6 @@
#define IPA_UC_FINISH_MAX 6
#define IPA_UC_WAIT_MIN_SLEEP 1000
#define IPA_UC_WAII_MAX_SLEEP 1200
-#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
-#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
#define IPA_MAX_STATUS_STAT_NUM 30
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 520f139ee38a..c2e43a62ab69 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -62,6 +62,7 @@
#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
#define NAPI_WEIGHT 60
+#define IPA_WWAN_CONS_DESC_FIFO_SZ 1024
static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT];
static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
@@ -100,6 +101,7 @@ struct ipa_rmnet_plat_drv_res {
bool ipa_loaduC;
bool ipa_advertise_sg_support;
bool ipa_napi_enable;
+ u32 wan_rx_desc_size;
};
static struct ipa_rmnet_plat_drv_res ipa_rmnet_res;
@@ -1291,10 +1293,8 @@ static int handle_ingress_format(struct net_device *dev,
ipa_to_apps_ep_cfg.priv = dev;
ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable;
- if (ipa_to_apps_ep_cfg.napi_enabled)
- ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
- else
- ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ ipa_to_apps_ep_cfg.desc_fifo_sz =
+ ipa_rmnet_res.wan_rx_desc_size * sizeof(struct sps_iovec);
mutex_lock(&ipa_to_apps_pipe_handle_guard);
if (atomic_read(&is_ssr)) {
@@ -1925,6 +1925,9 @@ static struct notifier_block ssr_notifier = {
static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res)
{
+ int result;
+
+ ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ;
ipa_rmnet_drv_res->ipa_rmnet_ssr =
of_property_read_bool(pdev->dev.of_node,
"qcom,rmnet-ipa-ssr");
@@ -1947,6 +1950,18 @@ static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
"qcom,ipa-napi-enable");
pr_info("IPA Napi Enable = %s\n",
ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
+
+ /* Get IPA WAN RX desc fifo size */
+ result = of_property_read_u32(pdev->dev.of_node,
+ "qcom,wan-rx-desc-size",
+ &ipa_rmnet_drv_res->wan_rx_desc_size);
+ if (result)
+ pr_info("using default for wan-rx-desc-size = %u\n",
+ ipa_rmnet_drv_res->wan_rx_desc_size);
+ else
+ IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n",
+ ipa_rmnet_drv_res->wan_rx_desc_size);
+
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index cc1cb456ab8a..4b0cd46082a3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -21,6 +21,7 @@
#include "ipahal/ipahal.h"
#include "ipahal/ipahal_fltrt.h"
+#define IPA_WAN_AGGR_PKT_CNT 5
#define IPA_LAST_DESC_CNT 0xFFFF
#define POLLING_INACTIVITY_RX 40
#define POLLING_MIN_SLEEP_RX 1010
@@ -60,7 +61,6 @@
#define IPA_ODU_RX_POOL_SZ 64
#define IPA_SIZE_DL_CSUM_META_TRAILER 8
-#define IPA_GSI_EVT_RING_LEN 4096
#define IPA_GSI_MAX_CH_LOW_WEIGHT 15
#define IPA_GSI_EVT_RING_INT_MODT 3200 /* 0.1s under 32KHz clock */
@@ -3298,9 +3298,6 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
sys->repl_hdlr =
ipa3_replenish_rx_cache;
}
- if (in->napi_enabled)
- sys->rx_pool_sz =
- IPA_WAN_NAPI_CONS_RX_POOL_SZ;
if (in->napi_enabled && in->recycle_enabled)
sys->repl_hdlr =
ipa3_replenish_rx_cache_recycle;
@@ -3965,13 +3962,19 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
gsi_evt_ring_props.re_size =
GSI_EVT_RING_RE_SIZE_16B;
- gsi_evt_ring_props.ring_len = IPA_GSI_EVT_RING_LEN;
+ /*
+ * GSI ring length is calculated based on the desc_fifo_sz
+ * which was meant to define the BAM desc fifo. GSI descriptors
+ * are 16B as opposed to 8B for BAM.
+ */
+ gsi_evt_ring_props.ring_len = 2 * in->desc_fifo_sz;
+
gsi_evt_ring_props.ring_base_vaddr =
- dma_alloc_coherent(ipa3_ctx->pdev, IPA_GSI_EVT_RING_LEN,
- &evt_dma_addr, GFP_KERNEL);
+ dma_alloc_coherent(ipa3_ctx->pdev,
+ gsi_evt_ring_props.ring_len, &evt_dma_addr, GFP_KERNEL);
if (!gsi_evt_ring_props.ring_base_vaddr) {
IPAERR("fail to dma alloc %u bytes\n",
- IPA_GSI_EVT_RING_LEN);
+ gsi_evt_ring_props.ring_len);
return -ENOMEM;
}
gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
@@ -4098,7 +4101,7 @@ fail_get_gsi_ep_info:
}
fail_alloc_evt_ring:
if (gsi_evt_ring_props.ring_base_vaddr)
- dma_free_coherent(ipa3_ctx->pdev, IPA_GSI_EVT_RING_LEN,
+ dma_free_coherent(ipa3_ctx->pdev, gsi_evt_ring_props.ring_len,
gsi_evt_ring_props.ring_base_vaddr, evt_dma_addr);
IPAERR("Return with err: %d\n", result);
return result;
@@ -4280,16 +4283,18 @@ int ipa3_rx_poll(u32 clnt_hdl, int weight)
break;
ipa3_wq_rx_common(ep->sys, mem_info.size);
- cnt += 5;
+ cnt += IPA_WAN_AGGR_PKT_CNT;
};
- if (cnt == 0) {
+ if (cnt == 0 || cnt < weight) {
ep->inactive_cycles++;
ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
ep->switch_to_intr = true;
delay = 0;
+ } else if (cnt < weight) {
+ delay = 0;
}
queue_delayed_work(ep->sys->wq,
&ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 40f1e93653f9..1b78835cda6b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -53,8 +53,6 @@
#define IPA_UC_FINISH_MAX 6
#define IPA_UC_WAIT_MIN_SLEEP 1000
#define IPA_UC_WAII_MAX_SLEEP 1200
-#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
-#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
#define IPA_MAX_STATUS_STAT_NUM 30
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 8f6c303d2867..0419249890e9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -65,6 +65,7 @@
((rmnet_ipa3_ctx && rmnet_ipa3_ctx->wwan_priv) ? \
rmnet_ipa3_ctx->wwan_priv->net : NULL)
+#define IPA_WWAN_CONS_DESC_FIFO_SZ 256
static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
@@ -89,6 +90,7 @@ struct ipa3_rmnet_plat_drv_res {
bool ipa_loaduC;
bool ipa_advertise_sg_support;
bool ipa_napi_enable;
+ u32 wan_rx_desc_size;
};
/**
@@ -1275,7 +1277,7 @@ static int handle3_ingress_format(struct net_device *dev,
ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit =
in->u.ingress_format.agg_count;
- if (ipa_wan_ep_cfg->napi_enabled) {
+ if (ipa3_rmnet_res.ipa_napi_enable) {
ipa_wan_ep_cfg->recycle_enabled = true;
ep_cfg = (struct rmnet_phys_ep_conf_s *)
rcu_dereference(dev->rx_handler_data);
@@ -1303,10 +1305,8 @@ static int handle3_ingress_format(struct net_device *dev,
ipa_wan_ep_cfg->priv = dev;
ipa_wan_ep_cfg->napi_enabled = ipa3_rmnet_res.ipa_napi_enable;
- if (ipa_wan_ep_cfg->napi_enabled)
- ipa_wan_ep_cfg->desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
- else
- ipa_wan_ep_cfg->desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ ipa_wan_ep_cfg->desc_fifo_sz =
+ ipa3_rmnet_res.wan_rx_desc_size * sizeof(struct sps_iovec);
mutex_lock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
@@ -1957,6 +1957,9 @@ static struct notifier_block ipa3_ssr_notifier = {
static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
struct ipa3_rmnet_plat_drv_res *ipa_rmnet_drv_res)
{
+ int result;
+
+ ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ;
ipa_rmnet_drv_res->ipa_rmnet_ssr =
of_property_read_bool(pdev->dev.of_node,
"qcom,rmnet-ipa-ssr");
@@ -1979,6 +1982,18 @@ static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
"qcom,ipa-napi-enable");
pr_info("IPA Napi Enable = %s\n",
ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
+
+ /* Get IPA WAN RX desc fifo size */
+ result = of_property_read_u32(pdev->dev.of_node,
+ "qcom,wan-rx-desc-size",
+ &ipa_rmnet_drv_res->wan_rx_desc_size);
+ if (result)
+ pr_info("using default for wan-rx-desc-size = %u\n",
+ ipa_rmnet_drv_res->wan_rx_desc_size);
+ else
+ IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n",
+ ipa_rmnet_drv_res->wan_rx_desc_size);
+
return 0;
}
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 45fedfa72bda..5810f7bf7f2f 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -51,6 +51,9 @@
#define VDDIO_MAX_UV 2040000
#define VDDIO_MAX_UA 70300
+#define DISABLE_PCIE_L1_MASK 0xFFFFFFFD
+#define PCIE20_CAP_LINKCTRLSTATUS 0x80
+
struct device;
static const char * const gpio_en_name = "qcom,wigig-en";
@@ -505,6 +508,7 @@ static int ops_resume(void *handle)
int rc;
struct msm11ad_ctx *ctx = handle;
struct pci_dev *pcidev;
+ u32 val;
pr_info("%s(%p)\n", __func__, handle);
if (!ctx) {
@@ -548,6 +552,27 @@ static int ops_resume(void *handle)
goto err_suspend_rc;
}
+ /* Disable L1 */
+ rc = pci_read_config_dword(ctx->pcidev,
+ PCIE20_CAP_LINKCTRLSTATUS, &val);
+ if (rc) {
+ dev_err(ctx->dev,
+ "reading PCIE20_CAP_LINKCTRLSTATUS failed:%d\n",
+ rc);
+ goto err_suspend_rc;
+ }
+ val &= DISABLE_PCIE_L1_MASK; /* disable bit 1 */
+ dev_dbg(ctx->dev, "writing PCIE20_CAP_LINKCTRLSTATUS (val 0x%x)\n",
+ val);
+ rc = pci_write_config_dword(ctx->pcidev,
+ PCIE20_CAP_LINKCTRLSTATUS, val);
+ if (rc) {
+ dev_err(ctx->dev,
+ "writing PCIE20_CAP_LINKCTRLSTATUS (val 0x%x) failed:%d\n",
+ val, rc);
+ goto err_suspend_rc;
+ }
+
return 0;
err_suspend_rc:
@@ -792,6 +817,7 @@ static int msm_11ad_probe(struct platform_device *pdev)
struct device_node *rc_node;
struct pci_dev *pcidev = NULL;
int rc;
+ u32 val;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -907,6 +933,28 @@ static int msm_11ad_probe(struct platform_device *pdev)
goto out_rc;
}
ctx->pcidev = pcidev;
+
+ /* Disable L1 */
+ rc = pci_read_config_dword(ctx->pcidev,
+ PCIE20_CAP_LINKCTRLSTATUS, &val);
+ if (rc) {
+ dev_err(ctx->dev,
+ "reading PCIE20_CAP_LINKCTRLSTATUS failed:%d\n",
+ rc);
+ goto out_rc;
+ }
+ val &= DISABLE_PCIE_L1_MASK; /* disable bit 1 */
+ dev_dbg(ctx->dev, "writing PCIE20_CAP_LINKCTRLSTATUS (val 0x%x)\n",
+ val);
+ rc = pci_write_config_dword(ctx->pcidev,
+ PCIE20_CAP_LINKCTRLSTATUS, val);
+ if (rc) {
+ dev_err(ctx->dev,
+ "writing PCIE20_CAP_LINKCTRLSTATUS (val 0x%x) failed:%d\n",
+ val, rc);
+ goto out_rc;
+ }
+
rc = pci_save_state(pcidev);
if (rc) {
dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
diff --git a/drivers/power/qcom-charger/fg-core.h b/drivers/power/qcom-charger/fg-core.h
index a703b208f6e4..d612016b1b79 100644
--- a/drivers/power/qcom-charger/fg-core.h
+++ b/drivers/power/qcom-charger/fg-core.h
@@ -74,6 +74,7 @@ enum fg_debug_flag {
FG_BUS_WRITE = BIT(5), /* Show REGMAP writes */
FG_BUS_READ = BIT(6), /* Show REGMAP reads */
FG_CAP_LEARN = BIT(7), /* Show capacity learning */
+ FG_TTF = BIT(8), /* Show time to full */
};
/* SRAM access */
@@ -128,6 +129,7 @@ enum {
*/
enum fg_sram_param_id {
FG_SRAM_BATT_SOC = 0,
+ FG_SRAM_FULL_SOC,
FG_SRAM_VOLTAGE_PRED,
FG_SRAM_OCV,
FG_SRAM_RSLOW,
@@ -222,7 +224,6 @@ struct fg_batt_props {
int float_volt_uv;
int vbatt_full_mv;
int fastchg_curr_ma;
- int batt_id_kohm;
};
struct fg_cyc_ctr_data {
@@ -251,6 +252,29 @@ struct fg_irq_info {
int irq;
};
+struct fg_circ_buf {
+ int arr[20];
+ int size;
+ int head;
+};
+
+struct fg_pt {
+ s32 x;
+ s32 y;
+};
+
+static const struct fg_pt fg_ln_table[] = {
+ { 1000, 0 },
+ { 2000, 693 },
+ { 4000, 1386 },
+ { 6000, 1792 },
+ { 8000, 2079 },
+ { 16000, 2773 },
+ { 32000, 3466 },
+ { 64000, 4159 },
+ { 128000, 4852 },
+};
+
struct fg_chip {
struct device *dev;
struct pmic_revid_data *pmic_rev_id;
@@ -260,6 +284,7 @@ struct fg_chip {
struct power_supply *batt_psy;
struct power_supply *usb_psy;
struct power_supply *dc_psy;
+ struct power_supply *parallel_psy;
struct iio_channel *batt_id_chan;
struct fg_memif *sram;
struct fg_irq_info *irqs;
@@ -275,12 +300,15 @@ struct fg_chip {
struct fg_cap_learning cl;
struct mutex bus_lock;
struct mutex sram_rw_lock;
+ struct mutex batt_avg_lock;
u32 batt_soc_base;
u32 batt_info_base;
u32 mem_if_base;
- int batt_id;
- int status;
+ int batt_id_kohms;
+ int charge_status;
+ int prev_charge_status;
int charge_done;
+ int charge_type;
int last_soc;
int last_batt_temp;
int health;
@@ -291,11 +319,15 @@ struct fg_chip {
bool charge_full;
bool recharge_soc_adjusted;
bool ki_coeff_dischg_en;
+ bool esr_fcc_ctrl_en;
struct completion soc_update;
struct completion soc_ready;
struct delayed_work profile_load_work;
struct work_struct status_change_work;
struct work_struct cycle_count_work;
+ struct delayed_work batt_avg_work;
+ struct fg_circ_buf ibatt_circ_buf;
+ struct fg_circ_buf vbatt_circ_buf;
};
/* Debugfs data structures are below */
@@ -340,9 +372,15 @@ extern int fg_read(struct fg_chip *chip, int addr, u8 *val, int len);
extern int fg_write(struct fg_chip *chip, int addr, u8 *val, int len);
extern int fg_masked_write(struct fg_chip *chip, int addr, u8 mask, u8 val);
extern int fg_ima_init(struct fg_chip *chip);
+extern int fg_clear_ima_errors_if_any(struct fg_chip *chip, bool check_hw_sts);
+extern int fg_clear_dma_errors_if_any(struct fg_chip *chip);
extern int fg_debugfs_create(struct fg_chip *chip);
extern void fill_string(char *str, size_t str_len, u8 *buf, int buf_len);
extern int64_t twos_compliment_extend(int64_t val, int s_bit_pos);
extern s64 fg_float_decode(u16 val);
extern bool is_input_present(struct fg_chip *chip);
+extern void fg_circ_buf_add(struct fg_circ_buf *, int);
+extern void fg_circ_buf_clr(struct fg_circ_buf *);
+extern int fg_circ_buf_avg(struct fg_circ_buf *, int *);
+extern int fg_lerp(const struct fg_pt *, size_t, s32, s32 *);
#endif
diff --git a/drivers/power/qcom-charger/fg-memif.c b/drivers/power/qcom-charger/fg-memif.c
index c271b24adfc4..a98ff7d765e3 100644
--- a/drivers/power/qcom-charger/fg-memif.c
+++ b/drivers/power/qcom-charger/fg-memif.c
@@ -64,44 +64,90 @@ static int fg_config_access_mode(struct fg_chip *chip, bool access, bool burst)
static int fg_run_iacs_clear_sequence(struct fg_chip *chip)
{
- u8 tmp;
- int rc;
+ u8 val, hw_sts, exp_sts;
+ int rc, tries = 250;
/*
* Values to write for running IACS clear sequence comes from
* hardware documentation.
*/
- rc = fg_masked_write(chip, MEM_IF_IMA_CFG(chip), IACS_CLR_BIT,
- IACS_CLR_BIT);
+ rc = fg_masked_write(chip, MEM_IF_IMA_CFG(chip),
+ IACS_CLR_BIT | STATIC_CLK_EN_BIT,
+ IACS_CLR_BIT | STATIC_CLK_EN_BIT);
if (rc < 0) {
pr_err("failed to write 0x%04x, rc=%d\n", MEM_IF_IMA_CFG(chip),
rc);
return rc;
}
- tmp = 0x4;
- rc = fg_write(chip, MEM_IF_ADDR_MSB(chip), &tmp, 1);
+ rc = fg_config_access_mode(chip, FG_READ, false);
if (rc < 0) {
- pr_err("failed to write 0x%04x, rc=%d\n", MEM_IF_ADDR_LSB(chip),
- rc);
+ pr_err("failed to write to 0x%04x, rc=%d\n",
+ MEM_IF_IMA_CTL(chip), rc);
return rc;
}
- tmp = 0x0;
- rc = fg_write(chip, MEM_IF_WR_DATA3(chip), &tmp, 1);
+ rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+ MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT,
+ MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT);
if (rc < 0) {
- pr_err("failed to write 0x%04x, rc=%d\n", MEM_IF_WR_DATA3(chip),
- rc);
+ pr_err("failed to set ima_req_access bit rc=%d\n", rc);
return rc;
}
- rc = fg_read(chip, MEM_IF_RD_DATA3(chip), &tmp, 1);
- if (rc < 0) {
- pr_err("failed to read 0x%04x, rc=%d\n", MEM_IF_RD_DATA3(chip),
- rc);
- return rc;
+ /* Delay for the clock to reach FG */
+ usleep_range(35, 40);
+
+ while (1) {
+ val = 0;
+ rc = fg_write(chip, MEM_IF_ADDR_MSB(chip), &val, 1);
+ if (rc < 0) {
+ pr_err("failed to write 0x%04x, rc=%d\n",
+ MEM_IF_ADDR_MSB(chip), rc);
+ return rc;
+ }
+
+ val = 0;
+ rc = fg_write(chip, MEM_IF_WR_DATA3(chip), &val, 1);
+ if (rc < 0) {
+ pr_err("failed to write 0x%04x, rc=%d\n",
+ MEM_IF_WR_DATA3(chip), rc);
+ return rc;
+ }
+
+ rc = fg_read(chip, MEM_IF_RD_DATA3(chip), &val, 1);
+ if (rc < 0) {
+ pr_err("failed to read 0x%04x, rc=%d\n",
+ MEM_IF_RD_DATA3(chip), rc);
+ return rc;
+ }
+
+ /* Delay for IMA hardware to clear */
+ usleep_range(35, 40);
+
+ rc = fg_read(chip, MEM_IF_IMA_HW_STS(chip), &hw_sts, 1);
+ if (rc < 0) {
+ pr_err("failed to read ima_hw_sts rc=%d\n", rc);
+ return rc;
+ }
+
+ if (hw_sts != 0)
+ continue;
+
+ rc = fg_read(chip, MEM_IF_IMA_EXP_STS(chip), &exp_sts, 1);
+ if (rc < 0) {
+ pr_err("failed to read ima_exp_sts rc=%d\n", rc);
+ return rc;
+ }
+
+ if (exp_sts == 0 || !(--tries))
+ break;
}
+ if (!tries)
+ pr_err("Failed to clear the error? hw_sts: %x exp_sts: %d\n",
+ hw_sts, exp_sts);
+
rc = fg_masked_write(chip, MEM_IF_IMA_CFG(chip), IACS_CLR_BIT, 0);
if (rc < 0) {
pr_err("failed to write 0x%04x, rc=%d\n", MEM_IF_IMA_CFG(chip),
@@ -109,14 +155,65 @@ static int fg_run_iacs_clear_sequence(struct fg_chip *chip)
return rc;
}
+ udelay(5);
+
+ rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+ MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT, 0);
+ if (rc < 0) {
+ pr_err("failed to write to 0x%04x, rc=%d\n",
+ MEM_IF_MEM_INTF_CFG(chip), rc);
+ return rc;
+ }
+
+ /* Delay before next transaction is attempted */
+ usleep_range(35, 40);
fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "IACS clear sequence complete\n");
return rc;
}
-static int fg_check_for_ima_errors(struct fg_chip *chip)
+int fg_clear_dma_errors_if_any(struct fg_chip *chip)
+{
+ int rc;
+ u8 dma_sts;
+
+ rc = fg_read(chip, MEM_IF_DMA_STS(chip), &dma_sts, 1);
+ if (rc < 0) {
+ pr_err("failed to read addr=0x%04x, rc=%d\n",
+ MEM_IF_DMA_STS(chip), rc);
+ return rc;
+ }
+ fg_dbg(chip, FG_STATUS, "dma_sts: %x\n", dma_sts);
+
+ if (dma_sts & (DMA_WRITE_ERROR_BIT | DMA_READ_ERROR_BIT)) {
+ rc = fg_masked_write(chip, MEM_IF_DMA_CTL(chip),
+ DMA_CLEAR_LOG_BIT, DMA_CLEAR_LOG_BIT);
+ if (rc < 0) {
+ pr_err("failed to write addr=0x%04x, rc=%d\n",
+ MEM_IF_DMA_CTL(chip), rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int fg_clear_ima_errors_if_any(struct fg_chip *chip, bool check_hw_sts)
{
int rc = 0;
u8 err_sts, exp_sts = 0, hw_sts = 0;
+ bool run_err_clr_seq = false;
+
+ rc = fg_read(chip, MEM_IF_IMA_EXP_STS(chip), &exp_sts, 1);
+ if (rc < 0) {
+ pr_err("failed to read ima_exp_sts rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_read(chip, MEM_IF_IMA_HW_STS(chip), &hw_sts, 1);
+ if (rc < 0) {
+ pr_err("failed to read ima_hw_sts rc=%d\n", rc);
+ return rc;
+ }
rc = fg_read(chip, MEM_IF_IMA_ERR_STS(chip), &err_sts, 1);
if (rc < 0) {
@@ -124,22 +221,30 @@ static int fg_check_for_ima_errors(struct fg_chip *chip)
return rc;
}
- if (err_sts & (ADDR_STBL_ERR_BIT | WR_ACS_ERR_BIT | RD_ACS_ERR_BIT)) {
- rc = fg_read(chip, MEM_IF_IMA_EXP_STS(chip), &exp_sts, 1);
- if (rc < 0) {
- pr_err("failed to read ima_exp_sts rc=%d\n", rc);
- return rc;
- }
+ fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+ err_sts, exp_sts, hw_sts);
- rc = fg_read(chip, MEM_IF_IMA_HW_STS(chip), &hw_sts, 1);
- if (rc < 0) {
- pr_err("failed to read ima_hw_sts rc=%d\n", rc);
- return rc;
+ if (check_hw_sts) {
+ /*
+ * Lower nibble should be equal to upper nibble before SRAM
+ * transactions begins from SW side. If they are unequal, then
+ * the error clear sequence should be run irrespective of IMA
+ * exception errors.
+ */
+ if ((hw_sts & 0x0F) != hw_sts >> 4) {
+ pr_err("IMA HW not in correct state, hw_sts=%x\n",
+ hw_sts);
+ run_err_clr_seq = true;
}
+ }
- pr_err("ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
- err_sts, exp_sts, hw_sts);
+ if (exp_sts & (IACS_ERR_BIT | XCT_TYPE_ERR_BIT | DATA_RD_ERR_BIT |
+ DATA_WR_ERR_BIT | ADDR_BURST_WRAP_BIT | ADDR_STABLE_ERR_BIT)) {
+ pr_err("IMA exception bit set, exp_sts=%x\n", exp_sts);
+ run_err_clr_seq = true;
+ }
+ if (run_err_clr_seq) {
/* clear the error */
rc = fg_run_iacs_clear_sequence(chip);
if (rc < 0) {
@@ -156,7 +261,7 @@ static int fg_check_for_ima_errors(struct fg_chip *chip)
static int fg_check_iacs_ready(struct fg_chip *chip)
{
- int rc = 0, timeout = 250;
+ int rc = 0, tries = 250;
u8 ima_opr_sts = 0;
/*
@@ -176,17 +281,17 @@ static int fg_check_iacs_ready(struct fg_chip *chip)
if (ima_opr_sts & IACS_RDY_BIT)
break;
- if (!(--timeout))
+ if (!(--tries))
break;
/* delay for iacs_ready to be asserted */
usleep_range(5000, 7000);
}
- if (!timeout) {
+ if (!tries) {
pr_err("IACS_RDY not set\n");
-
- rc = fg_check_for_ima_errors(chip);
+ /* check for error condition */
+ rc = fg_clear_ima_errors_if_any(chip, false);
if (rc < 0) {
pr_err("Failed to check for ima errors rc=%d\n", rc);
return rc;
@@ -250,7 +355,7 @@ static int __fg_interleaved_mem_write(struct fg_chip *chip, u16 address,
}
/* check for error condition */
- rc = fg_check_for_ima_errors(chip);
+ rc = fg_clear_ima_errors_if_any(chip, false);
if (rc < 0) {
pr_err("Failed to check for ima errors rc=%d\n", rc);
return rc;
@@ -296,7 +401,7 @@ static int __fg_interleaved_mem_read(struct fg_chip *chip, u16 address,
offset = 0;
/* check for error condition */
- rc = fg_check_for_ima_errors(chip);
+ rc = fg_clear_ima_errors_if_any(chip, false);
if (rc < 0) {
pr_err("Failed to check for ima errors rc=%d\n", rc);
return rc;
@@ -581,5 +686,19 @@ int fg_ima_init(struct fg_chip *chip)
return rc;
}
+ /* Clear DMA errors if any before clearing IMA errors */
+ rc = fg_clear_dma_errors_if_any(chip);
+ if (rc < 0) {
+ pr_err("Error in checking DMA errors rc:%d\n", rc);
+ return rc;
+ }
+
+ /* Clear IMA errors if any before SRAM transactions can begin */
+ rc = fg_clear_ima_errors_if_any(chip, true);
+ if (rc < 0 && rc != -EAGAIN) {
+ pr_err("Error in checking IMA errors rc:%d\n", rc);
+ return rc;
+ }
+
return 0;
}
diff --git a/drivers/power/qcom-charger/fg-reg.h b/drivers/power/qcom-charger/fg-reg.h
index 431e28a7eb1f..ffc46f328f91 100644
--- a/drivers/power/qcom-charger/fg-reg.h
+++ b/drivers/power/qcom-charger/fg-reg.h
@@ -26,6 +26,9 @@
#define BATT_SOC_LOW_PWR_CFG(chip) (chip->batt_soc_base + 0x52)
#define BATT_SOC_LOW_PWR_STS(chip) (chip->batt_soc_base + 0x56)
+/* BATT_SOC_INT_RT_STS */
+#define MSOC_EMPTY_BIT BIT(5)
+
/* BATT_SOC_EN_CTL */
#define FG_ALGORITHM_EN_BIT BIT(7)
@@ -258,6 +261,7 @@
#define ESR_REQ_CTL_EN_BIT BIT(0)
/* FG_MEM_IF register and bit definitions */
+#define MEM_IF_INT_RT_STS(chip) ((chip->mem_if_base) + 0x10)
#define MEM_IF_MEM_INTF_CFG(chip) ((chip->mem_if_base) + 0x50)
#define MEM_IF_IMA_CTL(chip) ((chip->mem_if_base) + 0x51)
#define MEM_IF_IMA_CFG(chip) ((chip->mem_if_base) + 0x52)
@@ -273,6 +277,11 @@
#define MEM_IF_WR_DATA3(chip) ((chip->mem_if_base) + 0x66)
#define MEM_IF_RD_DATA0(chip) ((chip->mem_if_base) + 0x67)
#define MEM_IF_RD_DATA3(chip) ((chip->mem_if_base) + 0x6A)
+#define MEM_IF_DMA_STS(chip) ((chip->mem_if_base) + 0x70)
+#define MEM_IF_DMA_CTL(chip) ((chip->mem_if_base) + 0x71)
+
+/* MEM_IF_INT_RT_STS */
+#define MEM_XCP_BIT BIT(1)
/* MEM_IF_MEM_INTF_CFG */
#define MEM_ACCESS_REQ_BIT BIT(7)
@@ -286,10 +295,19 @@
/* MEM_IF_IMA_CFG */
#define IACS_CLR_BIT BIT(2)
#define IACS_INTR_SRC_SLCT_BIT BIT(3)
+#define STATIC_CLK_EN_BIT BIT(4)
/* MEM_IF_IMA_OPR_STS */
#define IACS_RDY_BIT BIT(1)
+/* MEM_IF_IMA_EXP_STS */
+#define IACS_ERR_BIT BIT(0)
+#define XCT_TYPE_ERR_BIT BIT(1)
+#define DATA_RD_ERR_BIT BIT(3)
+#define DATA_WR_ERR_BIT BIT(4)
+#define ADDR_BURST_WRAP_BIT BIT(5)
+#define ADDR_STABLE_ERR_BIT BIT(7)
+
/* MEM_IF_IMA_ERR_STS */
#define ADDR_STBL_ERR_BIT BIT(7)
#define WR_ACS_ERR_BIT BIT(6)
@@ -297,4 +315,11 @@
/* MEM_IF_FG_BEAT_COUNT */
#define BEAT_COUNT_MASK GENMASK(3, 0)
+
+/* MEM_IF_DMA_STS */
+#define DMA_WRITE_ERROR_BIT BIT(1)
+#define DMA_READ_ERROR_BIT BIT(2)
+
+/* MEM_IF_DMA_CTL */
+#define DMA_CLEAR_LOG_BIT BIT(0)
#endif
diff --git a/drivers/power/qcom-charger/fg-util.c b/drivers/power/qcom-charger/fg-util.c
index 0e3c7dbb5731..405d875ea7df 100644
--- a/drivers/power/qcom-charger/fg-util.c
+++ b/drivers/power/qcom-charger/fg-util.c
@@ -14,6 +14,82 @@
#include "fg-core.h"
+void fg_circ_buf_add(struct fg_circ_buf *buf, int val)
+{
+ buf->arr[buf->head] = val;
+ buf->head = (buf->head + 1) % ARRAY_SIZE(buf->arr);
+ buf->size = min(++buf->size, (int)ARRAY_SIZE(buf->arr));
+}
+
+void fg_circ_buf_clr(struct fg_circ_buf *buf)
+{
+ memset(buf, 0, sizeof(*buf));
+}
+
+int fg_circ_buf_avg(struct fg_circ_buf *buf, int *avg)
+{
+ s64 result = 0;
+ int i;
+
+ if (buf->size == 0)
+ return -ENODATA;
+
+ for (i = 0; i < buf->size; i++)
+ result += buf->arr[i];
+
+ *avg = div_s64(result, buf->size);
+ return 0;
+}
+
+int fg_lerp(const struct fg_pt *pts, size_t tablesize, s32 input, s32 *output)
+{
+ int i;
+ s64 temp;
+
+ if (pts == NULL) {
+ pr_err("Table is NULL\n");
+ return -EINVAL;
+ }
+
+ if (tablesize < 1) {
+ pr_err("Table has no entries\n");
+ return -ENOENT;
+ }
+
+ if (tablesize == 1) {
+ *output = pts[0].y;
+ return 0;
+ }
+
+ if (pts[0].x > pts[1].x) {
+ pr_err("Table is not in acending order\n");
+ return -EINVAL;
+ }
+
+ if (input <= pts[0].x) {
+ *output = pts[0].y;
+ return 0;
+ }
+
+ if (input >= pts[tablesize - 1].x) {
+ *output = pts[tablesize - 1].y;
+ return 0;
+ }
+
+ for (i = 1; i < tablesize; i++) {
+ if (input >= pts[i].x)
+ continue;
+
+ temp = (s64)(pts[i].y - pts[i - 1].y) *
+ (s64)(input - pts[i - 1].x);
+ temp = div_s64(temp, pts[i].x - pts[i - 1].x);
+ *output = temp + pts[i - 1].y;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static struct fg_dbgfs dbgfs_data = {
.help_msg = {
.data =
diff --git a/drivers/power/qcom-charger/qpnp-fg-gen3.c b/drivers/power/qcom-charger/qpnp-fg-gen3.c
index 4ee94b990382..100153280d9e 100644
--- a/drivers/power/qcom-charger/qpnp-fg-gen3.c
+++ b/drivers/power/qcom-charger/qpnp-fg-gen3.c
@@ -146,6 +146,8 @@ static void fg_encode_default(struct fg_sram_param *sp,
static struct fg_sram_param pmicobalt_v1_sram_params[] = {
PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
fg_decode_default),
+ PARAM(FULL_SOC, FULL_SOC_WORD, FULL_SOC_OFFSET, 2, 1, 1, 0, NULL,
+ fg_decode_default),
PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 244141,
1000, 0, NULL, fg_decode_voltage_15b),
PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 244141, 1000, 0, NULL,
@@ -198,6 +200,8 @@ static struct fg_sram_param pmicobalt_v1_sram_params[] = {
static struct fg_sram_param pmicobalt_v2_sram_params[] = {
PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
fg_decode_default),
+ PARAM(FULL_SOC, FULL_SOC_WORD, FULL_SOC_OFFSET, 2, 1, 1, 0, NULL,
+ fg_decode_default),
PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 244141,
1000, 0, NULL, fg_decode_voltage_15b),
PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 244141, 1000, 0, NULL,
@@ -655,12 +659,71 @@ static int fg_get_msoc_raw(struct fg_chip *chip, int *val)
return 0;
}
+static bool is_batt_empty(struct fg_chip *chip)
+{
+ u8 status;
+ int rc, vbatt_uv, msoc;
+
+ rc = fg_read(chip, BATT_SOC_INT_RT_STS(chip), &status, 1);
+ if (rc < 0) {
+ pr_err("failed to read addr=0x%04x, rc=%d\n",
+ BATT_SOC_INT_RT_STS(chip), rc);
+ return false;
+ }
+
+ if (!(status & MSOC_EMPTY_BIT))
+ return false;
+
+ rc = fg_get_battery_voltage(chip, &vbatt_uv);
+ if (rc < 0) {
+ pr_err("failed to get battery voltage, rc=%d\n", rc);
+ return false;
+ }
+
+ rc = fg_get_msoc_raw(chip, &msoc);
+ if (!rc)
+ pr_warn("batt_soc_rt_sts: %x vbatt: %d uV msoc:%d\n", status,
+ vbatt_uv, msoc);
+
+ return ((vbatt_uv < chip->dt.cutoff_volt_mv * 1000) ? true : false);
+}
+
+#define DEBUG_BATT_ID_KOHMS 7
+static bool is_debug_batt_id(struct fg_chip *chip)
+{
+ int batt_id_delta = 0;
+
+ if (!chip->batt_id_kohms)
+ return false;
+
+ batt_id_delta = abs(chip->batt_id_kohms - DEBUG_BATT_ID_KOHMS);
+ if (batt_id_delta <= 1) {
+ fg_dbg(chip, FG_POWER_SUPPLY, "Debug battery id: %dKohms\n",
+ chip->batt_id_kohms);
+ return true;
+ }
+
+ return false;
+}
+
#define FULL_CAPACITY 100
#define FULL_SOC_RAW 255
+#define DEBUG_BATT_SOC 67
+#define EMPTY_SOC 0
static int fg_get_prop_capacity(struct fg_chip *chip, int *val)
{
int rc, msoc;
+ if (is_debug_batt_id(chip)) {
+ *val = DEBUG_BATT_SOC;
+ return 0;
+ }
+
+ if (is_batt_empty(chip)) {
+ *val = EMPTY_SOC;
+ return 0;
+ }
+
if (chip->charge_full) {
*val = FULL_CAPACITY;
return 0;
@@ -725,7 +788,7 @@ static int fg_get_batt_profile(struct fg_chip *chip)
}
batt_id /= 1000;
- chip->batt_id = batt_id;
+ chip->batt_id_kohms = batt_id;
batt_node = of_find_node_by_name(node, "qcom,battery-data");
if (!batt_node) {
pr_err("Batterydata not available\n");
@@ -879,6 +942,17 @@ static bool is_charger_available(struct fg_chip *chip)
return true;
}
+static bool is_parallel_charger_available(struct fg_chip *chip)
+{
+ if (!chip->parallel_psy)
+ chip->parallel_psy = power_supply_get_by_name("parallel");
+
+ if (!chip->parallel_psy)
+ return false;
+
+ return true;
+}
+
static int fg_save_learned_cap_to_sram(struct fg_chip *chip)
{
int16_t cc_mah;
@@ -1126,11 +1200,11 @@ static void fg_cap_learning_update(struct fg_chip *chip)
batt_soc = (u32)batt_soc >> 24;
fg_dbg(chip, FG_CAP_LEARN, "Chg_status: %d cl_active: %d batt_soc: %d\n",
- chip->status, chip->cl.active, batt_soc);
+ chip->charge_status, chip->cl.active, batt_soc);
/* Initialize the starting point of learning capacity */
if (!chip->cl.active) {
- if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
rc = fg_cap_learning_begin(chip, batt_soc);
chip->cl.active = (rc == 0);
}
@@ -1146,7 +1220,7 @@ static void fg_cap_learning_update(struct fg_chip *chip)
chip->cl.init_cc_uah = 0;
}
- if (chip->status == POWER_SUPPLY_STATUS_NOT_CHARGING) {
+ if (chip->charge_status == POWER_SUPPLY_STATUS_NOT_CHARGING) {
fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n",
batt_soc);
chip->cl.active = false;
@@ -1176,7 +1250,7 @@ static int fg_adjust_ki_coeff_dischg(struct fg_chip *chip)
return rc;
}
- if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ if (chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING) {
for (i = KI_COEFF_SOC_LEVELS - 1; i >= 0; i--) {
if (msoc < chip->dt.ki_coeff_soc[i]) {
ki_coeff_med = chip->dt.ki_coeff_med_dischg[i];
@@ -1250,7 +1324,7 @@ static int fg_charge_full_update(struct fg_chip *chip)
}
fg_dbg(chip, FG_STATUS, "msoc: %d health: %d status: %d\n", msoc,
- chip->health, chip->status);
+ chip->health, chip->charge_status);
if (chip->charge_done) {
if (msoc >= 99 && chip->health == POWER_SUPPLY_HEALTH_GOOD)
chip->charge_full = true;
@@ -1351,6 +1425,88 @@ static int fg_adjust_recharge_soc(struct fg_chip *chip)
return 0;
}
+static int fg_esr_fcc_config(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0, };
+ int rc;
+ bool parallel_en = false;
+
+ if (is_parallel_charger_available(chip)) {
+ rc = power_supply_get_property(chip->parallel_psy,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED, &prop);
+ if (rc < 0) {
+ pr_err("Error in reading charging_enabled from parallel_psy, rc=%d\n",
+ rc);
+ return rc;
+ }
+ parallel_en = prop.intval;
+ }
+
+ fg_dbg(chip, FG_POWER_SUPPLY, "charge_status: %d parallel_en: %d esr_fcc_ctrl_en: %d\n",
+ chip->charge_status, parallel_en, chip->esr_fcc_ctrl_en);
+
+ if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
+ parallel_en) {
+ if (chip->esr_fcc_ctrl_en)
+ return 0;
+
+ /*
+ * When parallel charging is enabled, configure ESR FCC to
+ * 300mA to trigger an ESR pulse. Without this, FG can ask
+ * the main charger to increase FCC when it is supposed to
+ * decrease it.
+ */
+ rc = fg_masked_write(chip, BATT_INFO_ESR_FAST_CRG_CFG(chip),
+ ESR_FAST_CRG_IVAL_MASK |
+ ESR_FAST_CRG_CTL_EN_BIT,
+ ESR_FCC_300MA | ESR_FAST_CRG_CTL_EN_BIT);
+ if (rc < 0) {
+ pr_err("Error in writing to %04x, rc=%d\n",
+ BATT_INFO_ESR_FAST_CRG_CFG(chip), rc);
+ return rc;
+ }
+
+ chip->esr_fcc_ctrl_en = true;
+ } else {
+ if (!chip->esr_fcc_ctrl_en)
+ return 0;
+
+ /*
+ * If we're here, then it means either the device is not in
+ * charging state or parallel charging is disabled. Disable
+ * ESR fast charge current control in SW.
+ */
+ rc = fg_masked_write(chip, BATT_INFO_ESR_FAST_CRG_CFG(chip),
+ ESR_FAST_CRG_CTL_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Error in writing to %04x, rc=%d\n",
+ BATT_INFO_ESR_FAST_CRG_CFG(chip), rc);
+ return rc;
+ }
+
+ chip->esr_fcc_ctrl_en = false;
+ }
+
+ fg_dbg(chip, FG_STATUS, "esr_fcc_ctrl_en set to %d\n",
+ chip->esr_fcc_ctrl_en);
+ return 0;
+}
+
+static void fg_batt_avg_update(struct fg_chip *chip)
+{
+ if (chip->charge_status == chip->prev_charge_status)
+ return;
+
+ cancel_delayed_work_sync(&chip->batt_avg_work);
+ fg_circ_buf_clr(&chip->ibatt_circ_buf);
+ fg_circ_buf_clr(&chip->vbatt_circ_buf);
+
+ if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING ||
+ chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING)
+ schedule_delayed_work(&chip->batt_avg_work,
+ msecs_to_jiffies(2000));
+}
+
static void status_change_work(struct work_struct *work)
{
struct fg_chip *chip = container_of(work,
@@ -1370,7 +1526,16 @@ static void status_change_work(struct work_struct *work)
goto out;
}
- chip->status = prop.intval;
+ chip->prev_charge_status = chip->charge_status;
+ chip->charge_status = prop.intval;
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
+ if (rc < 0) {
+ pr_err("Error in getting charge type, rc=%d\n", rc);
+ goto out;
+ }
+
+ chip->charge_type = prop.intval;
rc = power_supply_get_property(chip->batt_psy,
POWER_SUPPLY_PROP_CHARGE_DONE, &prop);
if (rc < 0) {
@@ -1379,9 +1544,6 @@ static void status_change_work(struct work_struct *work)
}
chip->charge_done = prop.intval;
- fg_dbg(chip, FG_POWER_SUPPLY, "curr_status:%d charge_done: %d\n",
- chip->status, chip->charge_done);
-
if (chip->cyc_ctr.en)
schedule_work(&chip->cycle_count_work);
@@ -1398,7 +1560,16 @@ static void status_change_work(struct work_struct *work)
rc = fg_adjust_ki_coeff_dischg(chip);
if (rc < 0)
pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+
+ rc = fg_esr_fcc_config(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc);
+
+ fg_batt_avg_update(chip);
+
out:
+ fg_dbg(chip, FG_POWER_SUPPLY, "charge_status:%d charge_type:%d charge_done:%d\n",
+ chip->charge_status, chip->charge_type, chip->charge_done);
pm_relax(chip->dev);
}
@@ -1485,7 +1656,7 @@ static void cycle_count_work(struct work_struct *work)
/* We need only the most significant byte here */
batt_soc = (u32)batt_soc >> 24;
- if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
/* Find out which bucket the SOC falls in */
bucket = batt_soc / BUCKET_SOC_PCT;
pr_debug("batt_soc: %d bucket: %d\n", batt_soc, bucket);
@@ -1556,6 +1727,10 @@ static void dump_sram(u8 *buf, int len)
}
}
+#define PROFILE_LOAD_BIT BIT(0)
+#define BOOTLOADER_LOAD_BIT BIT(1)
+#define BOOTLOADER_RESTART_BIT BIT(2)
+#define HLOS_RESTART_BIT BIT(3)
static bool is_profile_load_required(struct fg_chip *chip)
{
u8 buf[PROFILE_COMP_LEN], val;
@@ -1570,7 +1745,7 @@ static bool is_profile_load_required(struct fg_chip *chip)
}
/* Check if integrity bit is set */
- if (val == 0x01) {
+ if (val & PROFILE_LOAD_BIT) {
fg_dbg(chip, FG_STATUS, "Battery profile integrity bit is set\n");
rc = fg_sram_read(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
buf, PROFILE_COMP_LEN, FG_IMA_DEFAULT);
@@ -1728,7 +1903,7 @@ static void profile_load_work(struct work_struct *work)
fg_dbg(chip, FG_STATUS, "SOC is ready\n");
/* Set the profile integrity bit */
- val = 0x1;
+ val = HLOS_RESTART_BIT | PROFILE_LOAD_BIT;
rc = fg_sram_write(chip, PROFILE_INTEGRITY_WORD,
PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
if (rc < 0) {
@@ -1798,6 +1973,229 @@ static struct kernel_param_ops fg_restart_ops = {
module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644);
+#define BATT_AVG_POLL_PERIOD_MS 10000
+static void batt_avg_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ batt_avg_work.work);
+ int rc, ibatt_now, vbatt_now;
+
+ mutex_lock(&chip->batt_avg_lock);
+ rc = fg_get_battery_current(chip, &ibatt_now);
+ if (rc < 0) {
+ pr_err("failed to get battery current, rc=%d\n", rc);
+ goto reschedule;
+ }
+
+ rc = fg_get_battery_voltage(chip, &vbatt_now);
+ if (rc < 0) {
+ pr_err("failed to get battery voltage, rc=%d\n", rc);
+ goto reschedule;
+ }
+
+ fg_circ_buf_add(&chip->ibatt_circ_buf, ibatt_now);
+ fg_circ_buf_add(&chip->vbatt_circ_buf, vbatt_now);
+
+reschedule:
+ mutex_unlock(&chip->batt_avg_lock);
+ schedule_delayed_work(&chip->batt_avg_work,
+ msecs_to_jiffies(BATT_AVG_POLL_PERIOD_MS));
+}
+
+#define DECI_TAU_SCALE 13
+#define HOURS_TO_SECONDS 3600
+#define OCV_SLOPE_UV 10869
+#define MILLI_UNIT 1000
+#define MICRO_UNIT 1000000
+static int fg_get_time_to_full(struct fg_chip *chip, int *val)
+{
+ int rc, ibatt_avg, vbatt_avg, rbatt, msoc, ocv_cc2cv, full_soc,
+ act_cap_uah;
+ s32 i_cc2cv, soc_cc2cv, ln_val;
+ s64 t_predicted_cc = 0, t_predicted_cv = 0;
+
+ if (chip->bp.float_volt_uv <= 0) {
+ pr_err("battery profile is not loaded\n");
+ return -ENODATA;
+ }
+
+ if (!is_charger_available(chip)) {
+ fg_dbg(chip, FG_TTF, "charger is not available\n");
+ return -ENODATA;
+ }
+
+ if (chip->charge_status == POWER_SUPPLY_STATUS_FULL) {
+ *val = 0;
+ return 0;
+ }
+
+ mutex_lock(&chip->batt_avg_lock);
+ rc = fg_circ_buf_avg(&chip->ibatt_circ_buf, &ibatt_avg);
+ if (rc < 0) {
+ /* try to get instantaneous current */
+ rc = fg_get_battery_current(chip, &ibatt_avg);
+ if (rc < 0) {
+ mutex_unlock(&chip->batt_avg_lock);
+ pr_err("failed to get battery current, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ rc = fg_circ_buf_avg(&chip->vbatt_circ_buf, &vbatt_avg);
+ if (rc < 0) {
+ /* try to get instantaneous voltage */
+ rc = fg_get_battery_voltage(chip, &vbatt_avg);
+ if (rc < 0) {
+ mutex_unlock(&chip->batt_avg_lock);
+ pr_err("failed to get battery voltage, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ mutex_unlock(&chip->batt_avg_lock);
+ fg_dbg(chip, FG_TTF, "vbatt_avg=%d\n", vbatt_avg);
+
+ /* clamp ibatt_avg to -150mA */
+ if (ibatt_avg > -150000)
+ ibatt_avg = -150000;
+ fg_dbg(chip, FG_TTF, "ibatt_avg=%d\n", ibatt_avg);
+
+ /* reverse polarity to be consistent with unsigned current settings */
+ ibatt_avg = abs(ibatt_avg);
+
+ /* estimated battery current at the CC to CV transition */
+ i_cc2cv = div_s64((s64)ibatt_avg * vbatt_avg, chip->bp.float_volt_uv);
+ fg_dbg(chip, FG_TTF, "i_cc2cv=%d\n", i_cc2cv);
+
+ rc = fg_get_battery_resistance(chip, &rbatt);
+ if (rc < 0) {
+ pr_err("failed to get battery resistance rc=%d\n", rc);
+ return rc;
+ }
+
+ /* clamp rbatt to 50mOhms */
+ if (rbatt < 50000)
+ rbatt = 50000;
+
+ fg_dbg(chip, FG_TTF, "rbatt=%d\n", rbatt);
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_uah);
+ if (rc < 0) {
+ pr_err("failed to get ACT_BATT_CAP rc=%d\n", rc);
+ return rc;
+ }
+ act_cap_uah *= MILLI_UNIT;
+ fg_dbg(chip, FG_TTF, "actual_capacity_uah=%d\n", act_cap_uah);
+
+ rc = fg_get_prop_capacity(chip, &msoc);
+ if (rc < 0) {
+ pr_err("failed to get msoc rc=%d\n", rc);
+ return rc;
+ }
+ fg_dbg(chip, FG_TTF, "msoc=%d\n", msoc);
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_FULL_SOC, &full_soc);
+ if (rc < 0) {
+ pr_err("failed to get full soc rc=%d\n", rc);
+ return rc;
+ }
+ full_soc = DIV_ROUND_CLOSEST(((u16)full_soc >> 8) * FULL_CAPACITY,
+ FULL_SOC_RAW);
+ fg_dbg(chip, FG_TTF, "full_soc=%d\n", full_soc);
+
+ /* if we are already in CV state then we can skip estimating CC */
+ if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER)
+ goto skip_cc_estimate;
+
+ /* if the charger is current limited then use power approximation */
+ if (ibatt_avg > chip->bp.fastchg_curr_ma * MILLI_UNIT - 50000)
+ ocv_cc2cv = div_s64((s64)rbatt * ibatt_avg, MICRO_UNIT);
+ else
+ ocv_cc2cv = div_s64((s64)rbatt * i_cc2cv, MICRO_UNIT);
+ ocv_cc2cv = chip->bp.float_volt_uv - ocv_cc2cv;
+ fg_dbg(chip, FG_TTF, "ocv_cc2cv=%d\n", ocv_cc2cv);
+
+ soc_cc2cv = div_s64(chip->bp.float_volt_uv - ocv_cc2cv, OCV_SLOPE_UV);
+ /* estimated SOC at the CC to CV transition */
+ soc_cc2cv = 100 - soc_cc2cv;
+ fg_dbg(chip, FG_TTF, "soc_cc2cv=%d\n", soc_cc2cv);
+
+ /* the esimated SOC may be lower than the current SOC */
+ if (soc_cc2cv - msoc <= 0)
+ goto skip_cc_estimate;
+
+ t_predicted_cc = div_s64((s64)full_soc * act_cap_uah, 100);
+ t_predicted_cc = div_s64(t_predicted_cc * (soc_cc2cv - msoc), 100);
+ t_predicted_cc *= HOURS_TO_SECONDS;
+ t_predicted_cc = div_s64(t_predicted_cc, (ibatt_avg + i_cc2cv) / 2);
+
+skip_cc_estimate:
+ fg_dbg(chip, FG_TTF, "t_predicted_cc=%lld\n", t_predicted_cc);
+
+ /* CV estimate starts here */
+ if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER)
+ ln_val = ibatt_avg / abs(chip->dt.sys_term_curr_ma);
+ else
+ ln_val = i_cc2cv / abs(chip->dt.sys_term_curr_ma);
+
+ fg_dbg(chip, FG_TTF, "ln_in=%d\n", ln_val);
+ rc = fg_lerp(fg_ln_table, ARRAY_SIZE(fg_ln_table), ln_val, &ln_val);
+ fg_dbg(chip, FG_TTF, "ln_out=%d\n", ln_val);
+ t_predicted_cv = div_s64((s64)act_cap_uah * rbatt, MICRO_UNIT);
+ t_predicted_cv = div_s64(t_predicted_cv * DECI_TAU_SCALE, 10);
+ t_predicted_cv = div_s64(t_predicted_cv * ln_val, MILLI_UNIT);
+ t_predicted_cv = div_s64(t_predicted_cv * HOURS_TO_SECONDS, MICRO_UNIT);
+ fg_dbg(chip, FG_TTF, "t_predicted_cv=%lld\n", t_predicted_cv);
+ *val = t_predicted_cc + t_predicted_cv;
+ return 0;
+}
+
+#define CENTI_ICORRECT_C0 105
+#define CENTI_ICORRECT_C1 20
+static int fg_get_time_to_empty(struct fg_chip *chip, int *val)
+{
+ int rc, ibatt_avg, msoc, act_cap_uah;
+ s32 divisor;
+ s64 t_predicted;
+
+ rc = fg_circ_buf_avg(&chip->ibatt_circ_buf, &ibatt_avg);
+ if (rc < 0) {
+ /* try to get instantaneous current */
+ rc = fg_get_battery_current(chip, &ibatt_avg);
+ if (rc < 0) {
+ pr_err("failed to get battery current, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* clamp ibatt_avg to 150mA */
+ if (ibatt_avg < 150000)
+ ibatt_avg = 150000;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_uah);
+ if (rc < 0) {
+ pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+ return rc;
+ }
+ act_cap_uah *= MILLI_UNIT;
+
+ rc = fg_get_prop_capacity(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting capacity, rc=%d\n", rc);
+ return rc;
+ }
+
+ t_predicted = div_s64((s64)msoc * act_cap_uah, 100);
+ t_predicted *= HOURS_TO_SECONDS;
+ divisor = CENTI_ICORRECT_C0 * 100 + CENTI_ICORRECT_C1 * msoc;
+ divisor = div_s64((s64)divisor * ibatt_avg, 10000);
+ if (divisor > 0)
+ t_predicted = div_s64(t_predicted, divisor);
+
+ *val = t_predicted;
+ return 0;
+}
+
/* PSY CALLBACKS STAY HERE */
static int fg_psy_get_property(struct power_supply *psy,
@@ -1859,11 +2257,22 @@ static int fg_psy_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
rc = fg_get_cc_soc_sw(chip, &pval->intval);
break;
+ case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+ rc = fg_get_time_to_full(chip, &pval->intval);
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+ rc = fg_get_time_to_empty(chip, &pval->intval);
+ break;
default:
+ pr_err("unsupported property %d\n", psp);
+ rc = -EINVAL;
break;
}
- return rc;
+ if (rc < 0)
+ return -ENODATA;
+
+ return 0;
}
static int fg_psy_set_property(struct power_supply *psy,
@@ -1946,6 +2355,8 @@ static enum power_supply_property fg_psy_props[] = {
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
};
static const struct power_supply_desc fg_psy_desc = {
@@ -2152,6 +2563,37 @@ static int fg_memif_init(struct fg_chip *chip)
/* INTERRUPT HANDLERS STAY HERE */
+static irqreturn_t fg_mem_xcp_irq_handler(int irq, void *data)
+{
+ struct fg_chip *chip = data;
+ u8 status;
+ int rc;
+
+ rc = fg_read(chip, MEM_IF_INT_RT_STS(chip), &status, 1);
+ if (rc < 0) {
+ pr_err("failed to read addr=0x%04x, rc=%d\n",
+ MEM_IF_INT_RT_STS(chip), rc);
+ return IRQ_HANDLED;
+ }
+
+ fg_dbg(chip, FG_IRQ, "irq %d triggered, status:%d\n", irq, status);
+ if (status & MEM_XCP_BIT) {
+ rc = fg_clear_dma_errors_if_any(chip);
+ if (rc < 0) {
+ pr_err("Error in clearing DMA error, rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ mutex_lock(&chip->sram_rw_lock);
+ rc = fg_clear_ima_errors_if_any(chip, true);
+ if (rc < 0 && rc != -EAGAIN)
+ pr_err("Error in checking IMA errors rc:%d\n", rc);
+ mutex_unlock(&chip->sram_rw_lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t fg_vbatt_low_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
@@ -2248,14 +2690,10 @@ static irqreturn_t fg_delta_soc_irq_handler(int irq, void *data)
struct fg_chip *chip = data;
int rc;
+ fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
if (chip->cyc_ctr.en)
schedule_work(&chip->cycle_count_work);
- if (is_charger_available(chip))
- power_supply_changed(chip->batt_psy);
-
- fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
-
if (chip->cl.active)
fg_cap_learning_update(chip);
@@ -2267,6 +2705,9 @@ static irqreturn_t fg_delta_soc_irq_handler(int irq, void *data)
if (rc < 0)
pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+ if (is_charger_available(chip))
+ power_supply_changed(chip->batt_psy);
+
return IRQ_HANDLED;
}
@@ -2274,10 +2715,10 @@ static irqreturn_t fg_empty_soc_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
+ fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
if (is_charger_available(chip))
power_supply_changed(chip->batt_psy);
- fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
return IRQ_HANDLED;
}
@@ -2291,9 +2732,7 @@ static irqreturn_t fg_soc_irq_handler(int irq, void *data)
static irqreturn_t fg_dummy_irq_handler(int irq, void *data)
{
- struct fg_chip *chip = data;
-
- fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+ pr_debug("irq %d triggered\n", irq);
return IRQ_HANDLED;
}
@@ -2367,7 +2806,7 @@ static struct fg_irq_info fg_irqs[FG_IRQ_MAX] = {
},
[MEM_XCP_IRQ] = {
.name = "mem-xcp",
- .handler = fg_dummy_irq_handler,
+ .handler = fg_mem_xcp_irq_handler,
},
[IMA_RDY_IRQ] = {
.name = "ima-rdy",
@@ -2495,7 +2934,7 @@ static int fg_parse_ki_coefficients(struct fg_chip *chip)
}
#define DEFAULT_CUTOFF_VOLT_MV 3200
-#define DEFAULT_EMPTY_VOLT_MV 3100
+#define DEFAULT_EMPTY_VOLT_MV 2800
#define DEFAULT_CHG_TERM_CURR_MA 100
#define DEFAULT_SYS_TERM_CURR_MA -125
#define DEFAULT_DELTA_SOC_THR 1
@@ -2612,7 +3051,7 @@ static int fg_parse_dt(struct fg_chip *chip)
rc = fg_get_batt_profile(chip);
if (rc < 0)
pr_warn("profile for batt_id=%dKOhms not found..using OTP, rc:%d\n",
- chip->batt_id, rc);
+ chip->batt_id_kohms, rc);
/* Read all the optional properties below */
rc = of_property_read_u32(node, "qcom,fg-cutoff-voltage", &temp);
@@ -2783,7 +3222,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
{
struct fg_chip *chip;
struct power_supply_config fg_psy_cfg;
- int rc;
+ int rc, msoc, volt_uv, batt_temp;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -2792,6 +3231,8 @@ static int fg_gen3_probe(struct platform_device *pdev)
chip->dev = &pdev->dev;
chip->debug_mask = &fg_gen3_debug_mask;
chip->irqs = fg_irqs;
+ chip->charge_status = -EINVAL;
+ chip->prev_charge_status = -EINVAL;
chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
if (!chip->regmap) {
dev_err(chip->dev, "Parent regmap is unavailable\n");
@@ -2816,11 +3257,13 @@ static int fg_gen3_probe(struct platform_device *pdev)
mutex_init(&chip->sram_rw_lock);
mutex_init(&chip->cyc_ctr.lock);
mutex_init(&chip->cl.lock);
+ mutex_init(&chip->batt_avg_lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
INIT_WORK(&chip->status_change_work, status_change_work);
INIT_WORK(&chip->cycle_count_work, cycle_count_work);
+ INIT_DELAYED_WORK(&chip->batt_avg_work, batt_avg_work);
rc = fg_memif_init(chip);
if (rc < 0) {
@@ -2876,11 +3319,22 @@ static int fg_gen3_probe(struct platform_device *pdev)
goto exit;
}
+ rc = fg_get_battery_voltage(chip, &volt_uv);
+ if (!rc)
+ rc = fg_get_prop_capacity(chip, &msoc);
+
+ if (!rc)
+ rc = fg_get_battery_temp(chip, &batt_temp);
+
+ if (!rc)
+ pr_info("battery SOC:%d voltage: %duV temp: %d id: %dKOhms\n",
+ msoc, volt_uv, batt_temp, chip->batt_id_kohms);
+
+ device_init_wakeup(chip->dev, true);
if (chip->profile_available)
schedule_delayed_work(&chip->profile_load_work, 0);
- device_init_wakeup(chip->dev, true);
- pr_debug("FG GEN3 driver successfully probed\n");
+ pr_debug("FG GEN3 driver probed successfully\n");
return 0;
exit:
fg_cleanup(chip);
@@ -2902,6 +3356,7 @@ static int fg_gen3_suspend(struct device *dev)
}
}
+ cancel_delayed_work_sync(&chip->batt_avg_work);
return 0;
}
@@ -2920,6 +3375,9 @@ static int fg_gen3_resume(struct device *dev)
}
}
+ fg_circ_buf_clr(&chip->ibatt_circ_buf);
+ fg_circ_buf_clr(&chip->vbatt_circ_buf);
+ schedule_delayed_work(&chip->batt_avg_work, 0);
return 0;
}
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index f9d76c56aa2e..6968ab2ab11c 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -225,6 +225,7 @@ struct smb_dt_props {
s32 step_cc_delta[STEP_CHARGING_MAX_STEPS];
struct device_node *revid_dev_node;
int float_option;
+ int chg_inhibit_thr_mv;
bool hvdcp_disable;
};
@@ -335,6 +336,14 @@ static int smb2_parse_dt(struct smb2 *chip)
chip->dt.hvdcp_disable = of_property_read_bool(node,
"qcom,hvdcp-disable");
+ of_property_read_u32(node, "qcom,chg-inhibit-threshold-mv",
+ &chip->dt.chg_inhibit_thr_mv);
+ if ((chip->dt.chg_inhibit_thr_mv < 0 ||
+ chip->dt.chg_inhibit_thr_mv > 300)) {
+ pr_err("qcom,chg-inhibit-threshold-mv is incorrect\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1040,6 +1049,7 @@ static int smb2_init_hw(struct smb2 *chip)
{
struct smb_charger *chg = &chip->chg;
int rc;
+ u8 stat;
if (chip->dt.no_battery)
chg->fake_capacity = 50;
@@ -1060,6 +1070,21 @@ static int smb2_init_hw(struct smb2 *chip)
chg->otg_cl_ua = chip->dt.otg_cl_ua;
+ rc = smblib_read(chg, APSD_RESULT_STATUS_REG, &stat);
+ if (rc < 0) {
+ pr_err("Couldn't read APSD_RESULT_STATUS rc=%d\n", rc);
+ return rc;
+ }
+
+ /* clear the ICL override if it is set */
+ if (stat & ICL_OVERRIDE_LATCH_BIT) {
+ rc = smblib_write(chg, CMD_APSD_REG, ICL_OVERRIDE_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't disable ICL override rc=%d\n", rc);
+ return rc;
+ }
+ }
+
/* votes must be cast before configuring software control */
vote(chg->pl_disable_votable,
PL_INDIRECT_VOTER, true, 0);
@@ -1152,6 +1177,15 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
+ /* increase VCONN softstart */
+ rc = smblib_masked_write(chg, TYPE_C_CFG_2_REG,
+ VCONN_SOFTSTART_CFG_MASK, VCONN_SOFTSTART_CFG_MASK);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't increase VCONN softstart rc=%d\n",
+ rc);
+ return rc;
+ }
+
rc = smblib_masked_write(chg, QNOVO_PT_ENABLE_CMD_REG,
QNOVO_PT_ENABLE_CMD_BIT, QNOVO_PT_ENABLE_CMD_BIT);
if (rc < 0) {
@@ -1174,13 +1208,12 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
- /* configure PMI stat output to enable and disable parallel charging */
+ /* disable SW STAT override */
rc = smblib_masked_write(chg, STAT_CFG_REG,
- STAT_PARALLEL_CFG_BIT | STAT_SW_OVERRIDE_CFG_BIT,
- STAT_PARALLEL_CFG_BIT);
+ STAT_SW_OVERRIDE_CFG_BIT, 0);
if (rc < 0) {
- dev_err(chg->dev,
- "Couldn't configure signal for parallel rc=%d\n", rc);
+ dev_err(chg->dev, "Couldn't disable SW STAT override rc=%d\n",
+ rc);
return rc;
}
@@ -1213,6 +1246,40 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
+ switch (chip->dt.chg_inhibit_thr_mv) {
+ case 50:
+ rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+ CHARGE_INHIBIT_THRESHOLD_MASK,
+ CHARGE_INHIBIT_THRESHOLD_50MV);
+ break;
+ case 100:
+ rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+ CHARGE_INHIBIT_THRESHOLD_MASK,
+ CHARGE_INHIBIT_THRESHOLD_100MV);
+ break;
+ case 200:
+ rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+ CHARGE_INHIBIT_THRESHOLD_MASK,
+ CHARGE_INHIBIT_THRESHOLD_200MV);
+ break;
+ case 300:
+ rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+ CHARGE_INHIBIT_THRESHOLD_MASK,
+ CHARGE_INHIBIT_THRESHOLD_300MV);
+ break;
+ case 0:
+ rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+ CHARGER_INHIBIT_BIT, 0);
+ default:
+ break;
+ }
+
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure charge inhibit threshold rc=%d\n",
+ rc);
+ return rc;
+ }
+
return rc;
}
@@ -1241,8 +1308,11 @@ static int smb2_setup_wa_flags(struct smb2 *chip)
switch (pmic_rev_id->pmic_subtype) {
case PMICOBALT_SUBTYPE:
+ chip->chg.wa_flags |= BOOST_BACK_WA;
if (pmic_rev_id->rev4 == PMICOBALT_V1P1_REV4) /* PMI rev 1.1 */
chg->wa_flags |= QC_CHARGER_DETECTION_WA_BIT;
+ if (pmic_rev_id->rev4 == PMICOBALT_V2P0_REV4) /* PMI rev 2.0 */
+ chg->wa_flags |= TYPEC_CC2_REMOVAL_WA_BIT;
break;
default:
pr_err("PMIC subtype %d not supported\n",
@@ -1451,7 +1521,8 @@ static struct smb2_irq_info smb2_irqs[] = {
},
{
.name = "switcher-power-ok",
- .handler = smblib_handle_debug,
+ .handler = smblib_handle_switcher_power_ok,
+ .storm_data = {true, 1000, 3},
},
};
@@ -1746,6 +1817,16 @@ static int smb2_remove(struct platform_device *pdev)
return 0;
}
+static void smb2_shutdown(struct platform_device *pdev)
+{
+ struct smb2 *chip = platform_get_drvdata(pdev);
+ struct smb_charger *chg = &chip->chg;
+
+ smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+ HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT, 0);
+ smblib_write(chg, CMD_HVDCP_2_REG, FORCE_5V_BIT);
+}
+
static const struct of_device_id match_table[] = {
{ .compatible = "qcom,qpnp-smb2", },
{ },
@@ -1759,6 +1840,7 @@ static struct platform_driver smb2_driver = {
},
.probe = smb2_probe,
.remove = smb2_remove,
+ .shutdown = smb2_shutdown,
};
module_platform_driver(smb2_driver);
diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c
index 198e77469bbe..6aae7d49271f 100644
--- a/drivers/power/qcom-charger/smb-lib.c
+++ b/drivers/power/qcom-charger/smb-lib.c
@@ -490,7 +490,7 @@ static int try_rerun_apsd_for_hvdcp(struct smb_charger *chg)
/* ensure hvdcp is enabled */
if (!get_effective_result(chg->hvdcp_disable_votable)) {
apsd_result = smblib_get_apsd_result(chg);
- if (apsd_result->pst == POWER_SUPPLY_TYPE_USB_HVDCP) {
+ if (apsd_result->bit & (QC_2P0_BIT | QC_3P0_BIT)) {
/* rerun APSD */
smblib_dbg(chg, PR_MISC, "rerun APSD\n");
smblib_masked_write(chg, CMD_APSD_REG,
@@ -596,7 +596,11 @@ static int smblib_usb_suspend_vote_callback(struct votable *votable, void *data,
{
struct smb_charger *chg = data;
- return smblib_set_usb_suspend(chg, suspend);
+ /* resume input if suspend is invalid */
+ if (suspend < 0)
+ suspend = 0;
+
+ return smblib_set_usb_suspend(chg, (bool)suspend);
}
static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
@@ -604,10 +608,11 @@ static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
{
struct smb_charger *chg = data;
+ /* resume input if suspend is invalid */
if (suspend < 0)
- suspend = false;
+ suspend = 0;
- return smblib_set_dc_suspend(chg, suspend);
+ return smblib_set_dc_suspend(chg, (bool)suspend);
}
static int smblib_fcc_max_vote_callback(struct votable *votable, void *data,
@@ -956,12 +961,13 @@ static int smblib_apsd_disable_vote_callback(struct votable *votable,
* OTG REGULATOR *
*****************/
-#define OTG_SOFT_START_DELAY_MS 20
+#define MAX_SOFTSTART_TRIES 2
int smblib_vbus_regulator_enable(struct regulator_dev *rdev)
{
struct smb_charger *chg = rdev_get_drvdata(rdev);
u8 stat;
int rc = 0;
+ int tries = MAX_SOFTSTART_TRIES;
rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
ENG_BUCKBOOST_HALT1_8_MODE_BIT,
@@ -978,14 +984,25 @@ int smblib_vbus_regulator_enable(struct regulator_dev *rdev)
return rc;
}
- msleep(OTG_SOFT_START_DELAY_MS);
- rc = smblib_read(chg, OTG_STATUS_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read OTG_STATUS_REG rc=%d\n", rc);
- return rc;
- }
- if (stat & BOOST_SOFTSTART_DONE_BIT)
- smblib_otg_cl_config(chg, chg->otg_cl_ua);
+ /* waiting for boost readiness, usually ~1ms, 2ms in worst case */
+ do {
+ usleep_range(1000, 1100);
+
+ rc = smblib_read(chg, OTG_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read OTG_STATUS_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+ if (stat & BOOST_SOFTSTART_DONE_BIT) {
+ smblib_otg_cl_config(chg, chg->otg_cl_ua);
+ break;
+ }
+ } while (--tries);
+
+ if (tries == 0)
+ smblib_err(chg, "Timeout waiting for boost softstart rc=%d\n",
+ rc);
return rc;
}
@@ -1447,21 +1464,17 @@ int smblib_set_prop_system_temp_level(struct smb_charger *chg,
int smblib_get_prop_dc_present(struct smb_charger *chg,
union power_supply_propval *val)
{
- int rc = 0;
+ int rc;
u8 stat;
- rc = smblib_read(chg, DC_INT_RT_STS_REG, &stat);
+ rc = smblib_read(chg, DCIN_BASE + INT_RT_STS_OFFSET, &stat);
if (rc < 0) {
- smblib_err(chg, "Couldn't read DC_INT_RT_STS_REG rc=%d\n",
- rc);
+ smblib_err(chg, "Couldn't read DCIN_RT_STS rc=%d\n", rc);
return rc;
}
- smblib_dbg(chg, PR_REGISTER, "DC_INT_RT_STS_REG = 0x%02x\n",
- stat);
val->intval = (bool)(stat & DCIN_PLUGIN_RT_STS_BIT);
-
- return rc;
+ return 0;
}
int smblib_get_prop_dc_online(struct smb_charger *chg,
@@ -1517,20 +1530,17 @@ int smblib_set_prop_dc_current_max(struct smb_charger *chg,
int smblib_get_prop_usb_present(struct smb_charger *chg,
union power_supply_propval *val)
{
- int rc = 0;
+ int rc;
u8 stat;
- rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ smblib_err(chg, "Couldn't read USBIN_RT_STS rc=%d\n", rc);
return rc;
}
- smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n",
- stat);
-
- val->intval = (bool)(stat & CC_ATTACHED_BIT);
- return rc;
+ val->intval = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
+ return 0;
}
int smblib_get_prop_usb_online(struct smb_charger *chg,
@@ -1997,6 +2007,45 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
"Couldn't enable vconn on CC line rc=%d\n", rc);
return rc;
}
+
+ rc = vote(chg->usb_icl_votable, PD_VOTER, true, USBIN_500MA);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't vote for USB ICL rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
+ USBIN_MODE_CHG_BIT, USBIN_MODE_CHG_BIT);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't change USB mode rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smblib_masked_write(chg, CMD_APSD_REG,
+ ICL_OVERRIDE_BIT, ICL_OVERRIDE_BIT);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't override APSD rc=%d\n", rc);
+ return rc;
+ }
+ } else {
+ rc = smblib_masked_write(chg, CMD_APSD_REG,
+ ICL_OVERRIDE_BIT, 0);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't override APSD rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
+ USBIN_MODE_CHG_BIT, 0);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't change USB mode rc=%d\n", rc);
+ return rc;
+ }
}
/* CC pin selection s/w override in PD session; h/w otherwise. */
@@ -2027,6 +2076,146 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
return rc;
}
+int smblib_reg_block_update(struct smb_charger *chg,
+ struct reg_info *entry)
+{
+ int rc = 0;
+
+ while (entry && entry->reg) {
+ rc = smblib_read(chg, entry->reg, &entry->bak);
+ if (rc < 0) {
+ dev_err(chg->dev, "Error in reading %s rc=%d\n",
+ entry->desc, rc);
+ break;
+ }
+ entry->bak &= entry->mask;
+
+ rc = smblib_masked_write(chg, entry->reg,
+ entry->mask, entry->val);
+ if (rc < 0) {
+ dev_err(chg->dev, "Error in writing %s rc=%d\n",
+ entry->desc, rc);
+ break;
+ }
+ entry++;
+ }
+
+ return rc;
+}
+
+int smblib_reg_block_restore(struct smb_charger *chg,
+ struct reg_info *entry)
+{
+ int rc = 0;
+
+ while (entry && entry->reg) {
+ rc = smblib_masked_write(chg, entry->reg,
+ entry->mask, entry->bak);
+ if (rc < 0) {
+ dev_err(chg->dev, "Error in writing %s rc=%d\n",
+ entry->desc, rc);
+ break;
+ }
+ entry++;
+ }
+
+ return rc;
+}
+
+static struct reg_info cc2_detach_settings[] = {
+ {
+ .reg = TYPE_C_CFG_2_REG,
+ .mask = TYPE_C_UFP_MODE_BIT | EN_TRY_SOURCE_MODE_BIT,
+ .val = TYPE_C_UFP_MODE_BIT,
+ .desc = "TYPE_C_CFG_2_REG",
+ },
+ {
+ .reg = TYPE_C_CFG_3_REG,
+ .mask = EN_TRYSINK_MODE_BIT,
+ .val = 0,
+ .desc = "TYPE_C_CFG_3_REG",
+ },
+ {
+ .reg = TAPER_TIMER_SEL_CFG_REG,
+ .mask = TYPEC_SPARE_CFG_BIT,
+ .val = TYPEC_SPARE_CFG_BIT,
+ .desc = "TAPER_TIMER_SEL_CFG_REG",
+ },
+ {
+ .reg = TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ .mask = VCONN_EN_ORIENTATION_BIT,
+ .val = 0,
+ .desc = "TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG",
+ },
+ {
+ .reg = MISC_CFG_REG,
+ .mask = TCC_DEBOUNCE_20MS_BIT,
+ .val = TCC_DEBOUNCE_20MS_BIT,
+ .desc = "Tccdebounce time"
+ },
+ {
+ },
+};
+
+static int smblib_cc2_sink_removal_enter(struct smb_charger *chg)
+{
+ int rc = 0;
+ union power_supply_propval cc2_val = {0, };
+
+ if ((chg->wa_flags & TYPEC_CC2_REMOVAL_WA_BIT) == 0)
+ return rc;
+
+ if (chg->cc2_sink_detach_flag != CC2_SINK_NONE)
+ return rc;
+
+ rc = smblib_get_prop_typec_cc_orientation(chg, &cc2_val);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get cc orientation rc=%d\n", rc);
+ return rc;
+ }
+ if (cc2_val.intval == 1)
+ return rc;
+
+ rc = smblib_get_prop_typec_mode(chg, &cc2_val);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (cc2_val.intval) {
+ case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+ smblib_reg_block_update(chg, cc2_detach_settings);
+ chg->cc2_sink_detach_flag = CC2_SINK_STD;
+ schedule_work(&chg->rdstd_cc2_detach_work);
+ break;
+ case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+ case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+ chg->cc2_sink_detach_flag = CC2_SINK_MEDIUM_HIGH;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static int smblib_cc2_sink_removal_exit(struct smb_charger *chg)
+{
+ int rc = 0;
+
+ if ((chg->wa_flags & TYPEC_CC2_REMOVAL_WA_BIT) == 0)
+ return rc;
+
+ if (chg->cc2_sink_detach_flag == CC2_SINK_STD) {
+ cancel_work_sync(&chg->rdstd_cc2_detach_work);
+ smblib_reg_block_restore(chg, cc2_detach_settings);
+ }
+
+ chg->cc2_sink_detach_flag = CC2_SINK_NONE;
+
+ return rc;
+}
+
int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
const union power_supply_propval *val)
{
@@ -2035,9 +2224,24 @@ int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
EXIT_SNK_BASED_ON_CC_BIT,
(val->intval) ? EXIT_SNK_BASED_ON_CC_BIT : 0);
+ if (rc < 0) {
+ smblib_err(chg, "Could not set EXIT_SNK_BASED_ON_CC rc=%d\n",
+ rc);
+ return rc;
+ }
vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, val->intval, 0);
+ if (val->intval)
+ rc = smblib_cc2_sink_removal_enter(chg);
+ else
+ rc = smblib_cc2_sink_removal_exit(chg);
+
+ if (rc < 0) {
+ smblib_err(chg, "Could not detect cc2 removal rc=%d\n", rc);
+ return rc;
+ }
+
return rc;
}
@@ -2208,6 +2412,7 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
struct smb_charger *chg = irq_data->parent_data;
int rc;
u8 stat;
+ bool vbus_rising;
rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
if (rc < 0) {
@@ -2215,9 +2420,9 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
return IRQ_HANDLED;
}
- chg->vbus_present = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
+ vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
smblib_set_opt_freq_buck(chg,
- chg->vbus_present ? FSW_600HZ_FOR_5V : FSW_1MHZ_FOR_REMOVAL);
+ vbus_rising ? FSW_600HZ_FOR_5V : FSW_1MHZ_FOR_REMOVAL);
/* fetch the DPDM regulator */
if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
@@ -2230,11 +2435,8 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
}
}
- if (!chg->dpdm_reg)
- goto skip_dpdm_float;
-
- if (chg->vbus_present) {
- if (!regulator_is_enabled(chg->dpdm_reg)) {
+ if (vbus_rising) {
+ if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
rc = regulator_enable(chg->dpdm_reg);
if (rc < 0)
@@ -2242,7 +2444,14 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
rc);
}
} else {
- if (regulator_is_enabled(chg->dpdm_reg)) {
+ if (chg->wa_flags & BOOST_BACK_WA) {
+ vote(chg->usb_suspend_votable,
+ BOOST_BACK_VOTER, false, 0);
+ vote(chg->dc_suspend_votable,
+ BOOST_BACK_VOTER, false, 0);
+ }
+
+ if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
rc = regulator_disable(chg->dpdm_reg);
if (rc < 0)
@@ -2251,10 +2460,9 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
}
}
-skip_dpdm_float:
power_supply_changed(chg->usb_psy);
smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s %s\n",
- irq_data->name, chg->vbus_present ? "attached" : "detached");
+ irq_data->name, vbus_rising ? "attached" : "detached");
return IRQ_HANDLED;
}
@@ -2621,6 +2829,24 @@ static void smblib_handle_typec_debounce_done(struct smb_charger *chg,
if (rc < 0)
smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc);
+ /*
+ * HW BUG - after cable is removed, medium or high rd reading
+ * falls to std. Use it for signal of typec cc detachment in
+ * software WA.
+ */
+ if (chg->cc2_sink_detach_flag == CC2_SINK_MEDIUM_HIGH
+ && pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
+
+ chg->cc2_sink_detach_flag = CC2_SINK_WA_DONE;
+
+ rc = smblib_masked_write(chg,
+ TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ EXIT_SNK_BASED_ON_CC_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't get prop typec mode rc=%d\n",
+ rc);
+ }
+
smblib_dbg(chg, PR_INTERRUPT, "IRQ: debounce-done %s; Type-C %s detected\n",
rising ? "rising" : "falling",
smblib_typec_mode_name[pval.intval]);
@@ -2634,6 +2860,10 @@ irqreturn_t smblib_handle_usb_typec_change(int irq, void *data)
u8 stat;
bool debounce_done, sink_attached, legacy_cable;
+ /* WA - not when PD hard_reset WIP on cc2 in sink mode */
+ if (chg->cc2_sink_detach_flag == CC2_SINK_STD)
+ return IRQ_HANDLED;
+
rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
if (rc < 0) {
smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
@@ -2683,6 +2913,39 @@ irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data)
return IRQ_HANDLED;
}
+irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ int rc;
+ u8 stat;
+
+ if (!(chg->wa_flags & BOOST_BACK_WA))
+ return IRQ_HANDLED;
+
+ rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ if ((stat & USE_USBIN_BIT) &&
+ get_effective_result(chg->usb_suspend_votable))
+ return IRQ_HANDLED;
+
+ if ((stat & USE_DCIN_BIT) &&
+ get_effective_result(chg->dc_suspend_votable))
+ return IRQ_HANDLED;
+
+ if (is_storming(&irq_data->storm_data)) {
+ smblib_dbg(chg, PR_MISC, "reverse boost detected; suspending input\n");
+ vote(chg->usb_suspend_votable, BOOST_BACK_VOTER, true, 0);
+ vote(chg->dc_suspend_votable, BOOST_BACK_VOTER, true, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
/***************
* Work Queues *
***************/
@@ -2743,7 +3006,9 @@ static void smblib_pl_taper_work(struct work_struct *work)
union power_supply_propval pval = {0, };
int rc;
+ smblib_dbg(chg, PR_PARALLEL, "starting parallel taper work\n");
if (chg->pl.slave_fcc_ua < MINIMUM_PARALLEL_FCC_UA) {
+ smblib_dbg(chg, PR_PARALLEL, "parallel taper is done\n");
vote(chg->pl_disable_votable, TAPER_END_VOTER, true, 0);
goto done;
}
@@ -2755,6 +3020,7 @@ static void smblib_pl_taper_work(struct work_struct *work)
}
if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+ smblib_dbg(chg, PR_PARALLEL, "master is taper charging; reducing slave FCC\n");
vote(chg->awake_votable, PL_TAPER_WORK_RUNNING_VOTER, true, 0);
/* Reduce the taper percent by 25 percent */
chg->pl.taper_pct = chg->pl.taper_pct
@@ -2768,6 +3034,8 @@ static void smblib_pl_taper_work(struct work_struct *work)
/*
* Master back to Fast Charge, get out of this round of taper reduction
*/
+ smblib_dbg(chg, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
+
done:
vote(chg->awake_votable, PL_TAPER_WORK_RUNNING_VOTER, false, 0);
}
@@ -2780,6 +3048,74 @@ static void clear_hdc_work(struct work_struct *work)
chg->is_hdc = 0;
}
+static void rdstd_cc2_detach_work(struct work_struct *work)
+{
+ int rc;
+ u8 stat;
+ struct smb_irq_data irq_data = {NULL, "cc2-removal-workaround"};
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ rdstd_cc2_detach_work);
+
+ /*
+ * WA steps -
+ * 1. Enable both UFP and DFP, wait for 10ms.
+ * 2. Disable DFP, wait for 30ms.
+ * 3. Removal detected if both TYPEC_DEBOUNCE_DONE_STATUS
+ * and TIMER_STAGE bits are gone, otherwise repeat all by
+ * work rescheduling.
+ * Note, work will be cancelled when pd_hard_reset is 0.
+ */
+
+ rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ UFP_EN_CMD_BIT | DFP_EN_CMD_BIT,
+ UFP_EN_CMD_BIT | DFP_EN_CMD_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write TYPE_C_CTRL_REG rc=%d\n", rc);
+ return;
+ }
+
+ usleep_range(10000, 11000);
+
+ rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ UFP_EN_CMD_BIT | DFP_EN_CMD_BIT,
+ UFP_EN_CMD_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write TYPE_C_CTRL_REG rc=%d\n", rc);
+ return;
+ }
+
+ usleep_range(30000, 31000);
+
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+ rc);
+ return;
+ }
+ if (stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT)
+ goto rerun;
+
+ rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't read TYPE_C_STATUS_5_REG rc=%d\n", rc);
+ return;
+ }
+ if (stat & TIMER_STAGE_2_BIT)
+ goto rerun;
+
+ /* Bingo, cc2 removal detected */
+ smblib_reg_block_restore(chg, cc2_detach_settings);
+ chg->cc2_sink_detach_flag = CC2_SINK_WA_DONE;
+ irq_data.parent_data = chg;
+ smblib_handle_usb_typec_change(0, &irq_data);
+
+ return;
+
+rerun:
+ schedule_work(&chg->rdstd_cc2_detach_work);
+}
+
static int smblib_create_votables(struct smb_charger *chg)
{
int rc = 0;
@@ -2962,6 +3298,7 @@ int smblib_init(struct smb_charger *chg)
mutex_init(&chg->write_lock);
INIT_WORK(&chg->bms_update_work, bms_update_work);
INIT_WORK(&chg->pl_detect_work, smblib_pl_detect_work);
+ INIT_WORK(&chg->rdstd_cc2_detach_work, rdstd_cc2_detach_work);
INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
INIT_DELAYED_WORK(&chg->pl_taper_work, smblib_pl_taper_work);
INIT_DELAYED_WORK(&chg->step_soc_req_work, step_soc_req_work);
diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h
index 4be06ffcfb25..a0237412ee8b 100644
--- a/drivers/power/qcom-charger/smb-lib.h
+++ b/drivers/power/qcom-charger/smb-lib.h
@@ -46,6 +46,7 @@ enum print_reason {
#define VBUS_CC_SHORT_VOTER "VBUS_CC_SHORT_VOTER"
#define LEGACY_CABLE_VOTER "LEGACY_CABLE_VOTER"
#define PD_INACTIVE_VOTER "PD_INACTIVE_VOTER"
+#define BOOST_BACK_VOTER "BOOST_BACK_VOTER"
enum smb_mode {
PARALLEL_MASTER = 0,
@@ -53,8 +54,17 @@ enum smb_mode {
NUM_MODES,
};
+enum cc2_sink_type {
+ CC2_SINK_NONE = 0,
+ CC2_SINK_STD,
+ CC2_SINK_MEDIUM_HIGH,
+ CC2_SINK_WA_DONE,
+};
+
enum {
- QC_CHARGER_DETECTION_WA_BIT = BIT(0),
+ QC_CHARGER_DETECTION_WA_BIT = BIT(0),
+ BOOST_BACK_WA = BIT(1),
+ TYPEC_CC2_REMOVAL_WA_BIT = BIT(2),
};
struct smb_regulator {
@@ -116,6 +126,14 @@ struct smb_iio {
struct iio_channel *batt_i_chan;
};
+struct reg_info {
+ u16 reg;
+ u8 mask;
+ u8 val;
+ u8 bak;
+ const char *desc;
+};
+
struct smb_charger {
struct device *dev;
char *name;
@@ -167,6 +185,7 @@ struct smb_charger {
/* work */
struct work_struct bms_update_work;
struct work_struct pl_detect_work;
+ struct work_struct rdstd_cc2_detach_work;
struct delayed_work hvdcp_detect_work;
struct delayed_work ps_change_timeout_work;
struct delayed_work pl_taper_work;
@@ -177,7 +196,6 @@ struct smb_charger {
int voltage_min_uv;
int voltage_max_uv;
int pd_active;
- bool vbus_present;
bool system_suspend_supported;
int system_temp_level;
@@ -195,6 +213,7 @@ struct smb_charger {
/* workaround flag */
u32 wa_flags;
+ enum cc2_sink_type cc2_sink_detach_flag;
};
int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
@@ -240,6 +259,7 @@ irqreturn_t smblib_handle_icl_change(int irq, void *data);
irqreturn_t smblib_handle_usb_typec_change(int irq, void *data);
irqreturn_t smblib_handle_dc_plugin(int irq, void *data);
irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data);
+irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data);
int smblib_get_prop_input_suspend(struct smb_charger *chg,
union power_supply_propval *val);
diff --git a/drivers/power/qcom-charger/smb-reg.h b/drivers/power/qcom-charger/smb-reg.h
index a74fcf730a8c..c2a2b0c86d73 100644
--- a/drivers/power/qcom-charger/smb-reg.h
+++ b/drivers/power/qcom-charger/smb-reg.h
@@ -184,6 +184,10 @@ enum {
#define CHARGE_INHIBIT_THRESHOLD_CFG_REG (CHGR_BASE + 0x72)
#define CHARGE_INHIBIT_THRESHOLD_MASK GENMASK(1, 0)
+#define CHARGE_INHIBIT_THRESHOLD_50MV 0
+#define CHARGE_INHIBIT_THRESHOLD_100MV 1
+#define CHARGE_INHIBIT_THRESHOLD_200MV 2
+#define CHARGE_INHIBIT_THRESHOLD_300MV 3
#define RECHARGE_THRESHOLD_CFG_REG (CHGR_BASE + 0x73)
#define RECHARGE_THRESHOLD_MASK GENMASK(1, 0)
@@ -701,9 +705,6 @@ enum {
#define WIPWR_RANGE_STATUS_REG (DCIN_BASE + 0x08)
#define WIPWR_RANGE_STATUS_MASK GENMASK(4, 0)
-#define DC_INT_RT_STS_REG (DCIN_BASE + 0x10)
-#define DCIN_PLUGIN_RT_STS_BIT BIT(4)
-
/* DCIN Interrupt Bits */
#define WIPWR_VOLTAGE_RANGE_RT_STS_BIT BIT(7)
#define DCIN_ICL_CHANGE_RT_STS_BIT BIT(6)
@@ -901,6 +902,7 @@ enum {
#define MISC_CFG_REG (MISC_BASE + 0x52)
#define GSM_PA_ON_ADJ_SEL_BIT BIT(0)
+#define TCC_DEBOUNCE_20MS_BIT BIT(5)
#define SNARL_BARK_BITE_WD_CFG_REG (MISC_BASE + 0x53)
#define BITE_WDOG_DISABLE_CHARGING_CFG_BIT BIT(7)
diff --git a/drivers/power/qcom-charger/smb138x-charger.c b/drivers/power/qcom-charger/smb138x-charger.c
index 9a87ff5fb081..9dc528a6bb45 100644
--- a/drivers/power/qcom-charger/smb138x-charger.c
+++ b/drivers/power/qcom-charger/smb138x-charger.c
@@ -397,6 +397,7 @@ static int smb138x_init_batt_psy(struct smb138x *chip)
*****************************/
static enum power_supply_property smb138x_parallel_props[] = {
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_CHARGING_ENABLED,
POWER_SUPPLY_PROP_PIN_ENABLED,
POWER_SUPPLY_PROP_INPUT_SUSPEND,
@@ -417,6 +418,9 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
u8 temp;
switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ rc = smblib_get_prop_batt_charge_type(chg, val);
+ break;
case POWER_SUPPLY_PROP_CHARGING_ENABLED:
rc = smblib_read(chg, BATTERY_CHARGER_STATUS_5_REG,
&temp);
diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c
index 2e41401f8580..3d0be1f8a5b5 100644
--- a/drivers/regulator/qpnp-labibb-regulator.c
+++ b/drivers/regulator/qpnp-labibb-regulator.c
@@ -32,6 +32,7 @@
#define QPNP_LABIBB_REGULATOR_DRIVER_NAME "qcom,qpnp-labibb-regulator"
+#define REG_REVISION_2 0x01
#define REG_PERPH_TYPE 0x04
#define QPNP_LAB_TYPE 0x24
@@ -60,6 +61,7 @@
#define REG_LAB_PRECHARGE_CTL 0x5E
#define REG_LAB_SOFT_START_CTL 0x5F
#define REG_LAB_SPARE_CTL 0x60
+#define REG_LAB_PFM_CTL 0x62
/* LAB register bits definitions */
@@ -91,9 +93,9 @@
#define LAB_IBB_EN_RDY_EN BIT(7)
/* REG_LAB_CURRENT_LIMIT */
-#define LAB_CURRENT_LIMIT_BITS 3
-#define LAB_CURRENT_LIMIT_MASK ((1 << LAB_CURRENT_LIMIT_BITS) - 1)
-#define LAB_CURRENT_LIMIT_EN BIT(7)
+#define LAB_CURRENT_LIMIT_MASK GENMASK(2, 0)
+#define LAB_CURRENT_LIMIT_EN_BIT BIT(7)
+#define LAB_OVERRIDE_CURRENT_MAX_BIT BIT(3)
/* REG_LAB_CURRENT_SENSE */
#define LAB_CURRENT_SENSE_GAIN_BITS 2
@@ -129,6 +131,9 @@
#define LAB_SPARE_TOUCH_WAKE_BIT BIT(3)
#define LAB_SPARE_DISABLE_SCP_BIT BIT(0)
+/* REG_LAB_PFM_CTL */
+#define LAB_PFM_EN_BIT BIT(7)
+
/* IBB register offset definitions */
#define REG_IBB_REVISION4 0x03
#define REG_IBB_STATUS1 0x08
@@ -253,12 +258,6 @@
#define SWIRE_DEFAULT_2ND_CMD_DLY_MS 20
#define SWIRE_DEFAULT_IBB_PS_ENABLE_DLY_MS 200
-enum pmic_subtype {
- PMI8994 = 10,
- PMI8950 = 17,
- PMI8996 = 19,
-};
-
/**
* enum qpnp_labibb_mode - working mode of LAB/IBB regulators
* %QPNP_LABIBB_LCD_MODE: configure LAB and IBB regulators
@@ -350,6 +349,10 @@ static const int lab_current_limit_plan[] = {
400,
600,
800,
+ 1000,
+ 1200,
+ 1400,
+ 1600,
};
static const char * const lab_current_sense_plan[] = {
@@ -477,6 +480,8 @@ struct qpnp_labibb {
struct pmic_revid_data *pmic_rev_id;
u16 lab_base;
u16 ibb_base;
+ u8 lab_dig_major;
+ u8 ibb_dig_major;
struct lab_regulator lab_vreg;
struct ibb_regulator ibb_vreg;
enum qpnp_labibb_mode mode;
@@ -487,6 +492,7 @@ struct qpnp_labibb {
bool swire_control;
bool ttw_force_lab_on;
bool skip_2nd_swire_cmd;
+ bool pfm_enable;
u32 swire_2nd_cmd_delay;
u32 swire_ibb_ps_enable_delay;
};
@@ -539,60 +545,58 @@ static struct settings lab_settings[LAB_SETTINGS_MAX] = {
SETTING(LAB_RDSON_MNGMNT, false),
};
-static int
-qpnp_labibb_read(struct qpnp_labibb *labibb, u8 *val,
- u16 base, int count)
+static int qpnp_labibb_read(struct qpnp_labibb *labibb, u8 *val, u16 address,
+ int count)
{
int rc = 0;
struct platform_device *pdev = labibb->pdev;
- if (base == 0) {
- pr_err("base cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
- base, to_spmi_device(pdev->dev.parent)->usid, rc);
+ if (address == 0) {
+ pr_err("address cannot be zero address=0x%02x sid=0x%02x rc=%d\n",
+ address, to_spmi_device(pdev->dev.parent)->usid, rc);
return -EINVAL;
}
- rc = regmap_bulk_read(labibb->regmap, base, val, count);
+ rc = regmap_bulk_read(labibb->regmap, address, val, count);
if (rc) {
- pr_err("SPMI read failed base=0x%02x sid=0x%02x rc=%d\n", base,
- to_spmi_device(pdev->dev.parent)->usid, rc);
+ pr_err("SPMI read failed address=0x%02x sid=0x%02x rc=%d\n",
+ address, to_spmi_device(pdev->dev.parent)->usid, rc);
return rc;
}
+
return 0;
}
-static int
-qpnp_labibb_write(struct qpnp_labibb *labibb, u16 base,
- u8 *val, int count)
+static int qpnp_labibb_write(struct qpnp_labibb *labibb, u16 address, u8 *val,
+ int count)
{
int rc = 0;
struct platform_device *pdev = labibb->pdev;
- if (base == 0) {
- pr_err("base cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
- base, to_spmi_device(pdev->dev.parent)->usid, rc);
+ if (address == 0) {
+ pr_err("address cannot be zero address=0x%02x sid=0x%02x rc=%d\n",
+ address, to_spmi_device(pdev->dev.parent)->usid, rc);
return -EINVAL;
}
- rc = regmap_bulk_write(labibb->regmap, base, val, count);
+ rc = regmap_bulk_write(labibb->regmap, address, val, count);
if (rc) {
- pr_err("write failed base=0x%02x sid=0x%02x rc=%d\n",
- base, to_spmi_device(pdev->dev.parent)->usid, rc);
+ pr_err("write failed address=0x%02x sid=0x%02x rc=%d\n",
+ address, to_spmi_device(pdev->dev.parent)->usid, rc);
return rc;
}
return 0;
}
-static int
-qpnp_labibb_masked_write(struct qpnp_labibb *labibb, u16 base,
- u8 mask, u8 val)
+static int qpnp_labibb_masked_write(struct qpnp_labibb *labibb, u16 address,
+ u8 mask, u8 val)
{
int rc;
- rc = regmap_update_bits(labibb->regmap, base, mask, val);
+ rc = regmap_update_bits(labibb->regmap, address, mask, val);
if (rc) {
- pr_err("spmi write failed: addr=%03X, rc=%d\n", base, rc);
+ pr_err("spmi write failed: addr=%03X, rc=%d\n", address, rc);
return rc;
}
@@ -709,18 +713,24 @@ static int qpnp_lab_dt_init(struct qpnp_labibb *labibb,
u8 i, val;
u32 tmp;
- if (labibb->mode == QPNP_LABIBB_LCD_MODE)
- val = REG_LAB_IBB_LCD_MODE;
- else
- val = REG_LAB_IBB_AMOLED_MODE;
+ /*
+ * Do not configure LCD_AMOLED_SEL for pmicobalt as it will be done by
+ * GPIO selector.
+ */
+ if (labibb->pmic_rev_id->pmic_subtype != PMICOBALT_SUBTYPE) {
+ if (labibb->mode == QPNP_LABIBB_LCD_MODE)
+ val = REG_LAB_IBB_LCD_MODE;
+ else
+ val = REG_LAB_IBB_AMOLED_MODE;
- rc = qpnp_labibb_sec_write(labibb, labibb->lab_base,
- REG_LAB_LCD_AMOLED_SEL, &val, 1);
+ rc = qpnp_labibb_sec_write(labibb, labibb->lab_base,
+ REG_LAB_LCD_AMOLED_SEL, &val, 1);
- if (rc) {
- pr_err("qpnp_lab_sec_write register %x failed rc = %d\n",
- REG_LAB_LCD_AMOLED_SEL, rc);
- return rc;
+ if (rc) {
+ pr_err("qpnp_lab_sec_write register %x failed rc = %d\n",
+ REG_LAB_LCD_AMOLED_SEL, rc);
+ return rc;
+ }
}
val = 0;
@@ -785,7 +795,7 @@ static int qpnp_lab_dt_init(struct qpnp_labibb *labibb,
if (of_property_read_bool(of_node,
"qcom,qpnp-lab-limit-max-current-enable"))
- val |= LAB_CURRENT_LIMIT_EN;
+ val |= LAB_CURRENT_LIMIT_EN_BIT;
rc = qpnp_labibb_write(labibb, labibb->lab_base +
REG_LAB_CURRENT_LIMIT, &val, 1);
@@ -938,6 +948,88 @@ static int qpnp_lab_dt_init(struct qpnp_labibb *labibb,
return rc;
}
+#define LAB_CURRENT_MAX_1600MA 0x7
+#define LAB_CURRENT_MAX_400MA 0x1
+static int qpnp_lab_pfm_disable(struct qpnp_labibb *labibb)
+{
+ int rc = 0;
+ u8 val, mask;
+
+ mutex_lock(&(labibb->lab_vreg.lab_mutex));
+ if (!labibb->pfm_enable) {
+ pr_debug("PFM already disabled\n");
+ goto out;
+ }
+
+ val = 0;
+ mask = LAB_PFM_EN_BIT;
+ rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+ REG_LAB_PFM_CTL, mask, val);
+ if (rc < 0) {
+ pr_err("Write register %x failed rc = %d\n",
+ REG_LAB_PFM_CTL, rc);
+ goto out;
+ }
+
+ val = LAB_CURRENT_MAX_1600MA;
+ mask = LAB_OVERRIDE_CURRENT_MAX_BIT | LAB_CURRENT_LIMIT_MASK;
+ rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+ REG_LAB_CURRENT_LIMIT, mask, val);
+ if (rc < 0) {
+ pr_err("Write register %x failed rc = %d\n",
+ REG_LAB_CURRENT_LIMIT, rc);
+ goto out;
+ }
+
+ labibb->pfm_enable = false;
+out:
+ mutex_unlock(&(labibb->lab_vreg.lab_mutex));
+ return rc;
+}
+
+static int qpnp_lab_pfm_enable(struct qpnp_labibb *labibb)
+{
+ int rc = 0;
+ u8 val, mask;
+
+ mutex_lock(&(labibb->lab_vreg.lab_mutex));
+ if (labibb->pfm_enable) {
+ pr_debug("PFM already enabled\n");
+ goto out;
+ }
+
+ /* Wait for ~100uS */
+ usleep_range(100, 105);
+
+ val = LAB_OVERRIDE_CURRENT_MAX_BIT | LAB_CURRENT_MAX_400MA;
+ mask = LAB_OVERRIDE_CURRENT_MAX_BIT | LAB_CURRENT_LIMIT_MASK;
+ rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+ REG_LAB_CURRENT_LIMIT, mask, val);
+ if (rc < 0) {
+ pr_err("Write register %x failed rc = %d\n",
+ REG_LAB_CURRENT_LIMIT, rc);
+ goto out;
+ }
+
+ /* Wait for ~100uS */
+ usleep_range(100, 105);
+
+ val = LAB_PFM_EN_BIT;
+ mask = LAB_PFM_EN_BIT;
+ rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+ REG_LAB_PFM_CTL, mask, val);
+ if (rc < 0) {
+ pr_err("Write register %x failed rc = %d\n",
+ REG_LAB_PFM_CTL, rc);
+ goto out;
+ }
+
+ labibb->pfm_enable = true;
+out:
+ mutex_unlock(&(labibb->lab_vreg.lab_mutex));
+ return rc;
+}
+
static int qpnp_labibb_restore_settings(struct qpnp_labibb *labibb)
{
int rc, i;
@@ -1172,7 +1264,7 @@ static int qpnp_labibb_regulator_ttw_mode_enter(struct qpnp_labibb *labibb)
}
val = LAB_SPARE_DISABLE_SCP_BIT;
- if (labibb->pmic_rev_id->pmic_subtype != PMI8950)
+ if (labibb->pmic_rev_id->pmic_subtype != PMI8950_SUBTYPE)
val |= LAB_SPARE_TOUCH_WAKE_BIT;
rc = qpnp_labibb_write(labibb, labibb->lab_base +
REG_LAB_SPARE_CTL, &val, 1);
@@ -1199,10 +1291,10 @@ static int qpnp_labibb_regulator_ttw_mode_enter(struct qpnp_labibb *labibb)
}
switch (labibb->pmic_rev_id->pmic_subtype) {
- case PMI8996:
+ case PMI8996_SUBTYPE:
rc = qpnp_labibb_ttw_enter_ibb_pmi8996(labibb);
break;
- case PMI8950:
+ case PMI8950_SUBTYPE:
rc = qpnp_labibb_ttw_enter_ibb_pmi8950(labibb);
break;
}
@@ -1282,9 +1374,9 @@ static int qpnp_labibb_regulator_ttw_mode_exit(struct qpnp_labibb *labibb)
}
switch (labibb->pmic_rev_id->pmic_subtype) {
- case PMI8996:
- case PMI8994:
- case PMI8950:
+ case PMI8996_SUBTYPE:
+ case PMI8994_SUBTYPE:
+ case PMI8950_SUBTYPE:
rc = qpnp_labibb_ttw_exit_ibb_common(labibb);
break;
}
@@ -1437,6 +1529,15 @@ static int qpnp_labibb_regulator_disable(struct qpnp_labibb *labibb)
return -EINVAL;
}
+ if (labibb->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE &&
+ labibb->mode == QPNP_LABIBB_LCD_MODE) {
+ rc = qpnp_lab_pfm_disable(labibb);
+ if (rc < 0) {
+ pr_err("Error in disabling PFM, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
labibb->lab_vreg.vreg_enabled = 0;
labibb->ibb_vreg.vreg_enabled = 0;
@@ -1646,9 +1747,17 @@ static irqreturn_t lab_vreg_ok_handler(int irq, void *_labibb)
struct qpnp_labibb *labibb = _labibb;
int rc;
- rc = qpnp_skip_swire_command(labibb);
- if (rc)
- pr_err("Failed in 'qpnp_skip_swire_command' rc=%d\n", rc);
+ if (labibb->skip_2nd_swire_cmd && labibb->lab_dig_major < 2) {
+ rc = qpnp_skip_swire_command(labibb);
+ if (rc < 0)
+ pr_err("Failed in 'qpnp_skip_swire_command' rc=%d\n",
+ rc);
+ } else if (labibb->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE &&
+ labibb->mode == QPNP_LABIBB_LCD_MODE) {
+ rc = qpnp_lab_pfm_enable(labibb);
+ if (rc < 0)
+ pr_err("Failed to config PFM, rc=%d\n", rc);
+ }
return IRQ_HANDLED;
}
@@ -1663,6 +1772,23 @@ static int qpnp_lab_regulator_get_voltage(struct regulator_dev *rdev)
return labibb->lab_vreg.curr_volt;
}
+static bool is_lab_vreg_ok_irq_available(struct qpnp_labibb *labibb)
+{
+ /*
+ * LAB VREG_OK interrupt is used only to skip 2nd SWIRE command in
+ * dig_major < 2 targets. For pmicobalt, it is used to enable PFM in
+ * LCD mode.
+ */
+ if (labibb->skip_2nd_swire_cmd && labibb->lab_dig_major < 2)
+ return true;
+
+ if (labibb->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE &&
+ labibb->mode == QPNP_LABIBB_LCD_MODE)
+ return true;
+
+ return false;
+}
+
static struct regulator_ops qpnp_lab_ops = {
.enable = qpnp_lab_regulator_enable,
.disable = qpnp_lab_regulator_disable,
@@ -1784,16 +1910,16 @@ static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb,
}
if (of_find_property(of_node,
- "qpnp,qpnp-lab-current-sense", NULL)) {
+ "qcom,qpnp-lab-current-sense", NULL)) {
config_current_sense = true;
rc = of_property_read_string(of_node,
- "qpnp,qpnp-lab-current-sense",
+ "qcom,qpnp-lab-current-sense",
&current_sense_str);
if (!rc) {
val = qpnp_labibb_get_matching_idx(
current_sense_str);
} else {
- pr_err("qpnp,qpnp-lab-current-sense configured incorrectly rc = %d\n",
+ pr_err("qcom,qpnp-lab-current-sense configured incorrectly rc = %d\n",
rc);
return rc;
}
@@ -1811,19 +1937,6 @@ static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb,
}
}
- if (labibb->skip_2nd_swire_cmd) {
- rc = devm_request_threaded_irq(labibb->dev,
- labibb->lab_vreg.lab_vreg_ok_irq, NULL,
- lab_vreg_ok_handler,
- IRQF_ONESHOT | IRQF_TRIGGER_RISING,
- "lab-vreg-ok", labibb);
- if (rc) {
- pr_err("Failed to register 'lab-vreg-ok' irq rc=%d\n",
- rc);
- return rc;
- }
- }
-
val = (labibb->standalone) ? 0 : LAB_IBB_EN_RDY_EN;
rc = qpnp_labibb_sec_write(labibb, labibb->lab_base,
REG_LAB_IBB_EN_RDY, &val, 1);
@@ -1901,6 +2014,19 @@ static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb,
labibb->lab_vreg.vreg_enabled = 1;
}
+ if (is_lab_vreg_ok_irq_available(labibb)) {
+ rc = devm_request_threaded_irq(labibb->dev,
+ labibb->lab_vreg.lab_vreg_ok_irq, NULL,
+ lab_vreg_ok_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "lab-vreg-ok", labibb);
+ if (rc) {
+ pr_err("Failed to register 'lab-vreg-ok' irq rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
rc = qpnp_labibb_read(labibb, &val,
labibb->lab_base + REG_LAB_MODULE_RDY, 1);
if (rc) {
@@ -1955,7 +2081,6 @@ static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb,
return -EINVAL;
}
- mutex_init(&(labibb->lab_vreg.lab_mutex));
return 0;
}
@@ -1966,18 +2091,37 @@ static int qpnp_ibb_dt_init(struct qpnp_labibb *labibb,
u32 i, tmp;
u8 val;
- if (labibb->mode == QPNP_LABIBB_LCD_MODE)
- val = REG_LAB_IBB_LCD_MODE;
- else
- val = REG_LAB_IBB_AMOLED_MODE;
+ /*
+ * Do not configure LCD_AMOLED_SEL for pmicobalt as it will be done by
+ * GPIO selector. Override the labibb->mode with what was configured
+ * by the bootloader.
+ */
+ if (labibb->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE) {
+ rc = qpnp_labibb_read(labibb, &val,
+ labibb->ibb_base + REG_IBB_LCD_AMOLED_SEL, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_read register %x failed rc = %d\n",
+ REG_IBB_LCD_AMOLED_SEL, rc);
+ return rc;
+ }
- rc = qpnp_labibb_sec_write(labibb, labibb->ibb_base,
- REG_LAB_LCD_AMOLED_SEL, &val, 1);
+ if (val == REG_LAB_IBB_AMOLED_MODE)
+ labibb->mode = QPNP_LABIBB_AMOLED_MODE;
+ else
+ labibb->mode = QPNP_LABIBB_LCD_MODE;
+ } else {
+ if (labibb->mode == QPNP_LABIBB_LCD_MODE)
+ val = REG_LAB_IBB_LCD_MODE;
+ else
+ val = REG_LAB_IBB_AMOLED_MODE;
- if (rc) {
- pr_err("qpnp_labibb_sec_write register %x failed rc = %d\n",
- REG_IBB_LCD_AMOLED_SEL, rc);
- return rc;
+ rc = qpnp_labibb_sec_write(labibb, labibb->ibb_base,
+ REG_LAB_LCD_AMOLED_SEL, &val, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_sec_write register %x failed rc = %d\n",
+ REG_IBB_LCD_AMOLED_SEL, rc);
+ return rc;
+ }
}
rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-lab-pwrdn-delay",
@@ -2482,6 +2626,13 @@ static int register_qpnp_ibb_regulator(struct qpnp_labibb *labibb,
return rc;
}
+ /*
+ * For pmicobalt, override swire_control with what was configured
+ * before by the bootloader.
+ */
+ if (labibb->pmic_rev_id->pmic_subtype == PMICOBALT_SUBTYPE)
+ labibb->swire_control = val & IBB_ENABLE_CTL_SWIRE_RDY;
+
if (ibb_enable_ctl &
(IBB_ENABLE_CTL_SWIRE_RDY | IBB_ENABLE_CTL_MODULE_EN)) {
/* SWIRE_RDY or IBB_MODULE_EN enabled */
@@ -2659,14 +2810,13 @@ static int register_qpnp_ibb_regulator(struct qpnp_labibb *labibb,
return -EINVAL;
}
- mutex_init(&(labibb->ibb_vreg.ibb_mutex));
return 0;
}
static int qpnp_lab_register_irq(struct device_node *child,
struct qpnp_labibb *labibb)
{
- if (labibb->skip_2nd_swire_cmd) {
+ if (is_lab_vreg_ok_irq_available(labibb)) {
labibb->lab_vreg.lab_vreg_ok_irq =
of_irq_get_byname(child, "lab-vreg-ok");
if (labibb->lab_vreg.lab_vreg_ok_irq < 0) {
@@ -2684,7 +2834,7 @@ static int qpnp_labibb_check_ttw_supported(struct qpnp_labibb *labibb)
u8 val;
switch (labibb->pmic_rev_id->pmic_subtype) {
- case PMI8996:
+ case PMI8996_SUBTYPE:
rc = qpnp_labibb_read(labibb, &val,
labibb->ibb_base + REG_IBB_REVISION4, 1);
if (rc) {
@@ -2702,7 +2852,7 @@ static int qpnp_labibb_check_ttw_supported(struct qpnp_labibb *labibb)
/* FORCE_LAB_ON in TTW is not required for PMI8996 */
labibb->ttw_force_lab_on = false;
break;
- case PMI8950:
+ case PMI8950_SUBTYPE:
/* TTW supported for all revisions */
break;
default:
@@ -2721,7 +2871,7 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
unsigned int base;
struct device_node *child, *revid_dev_node;
const char *mode_name;
- u8 type;
+ u8 type, revision;
int rc = 0;
labibb = devm_kzalloc(&pdev->dev,
@@ -2739,6 +2889,9 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
labibb->dev = &(pdev->dev);
labibb->pdev = pdev;
+ mutex_init(&(labibb->lab_vreg.lab_mutex));
+ mutex_init(&(labibb->ibb_vreg.ibb_mutex));
+
revid_dev_node = of_parse_phandle(labibb->dev->of_node,
"qcom,pmic-revid", 0);
if (!revid_dev_node) {
@@ -2753,19 +2906,19 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
}
rc = of_property_read_string(labibb->dev->of_node,
- "qpnp,qpnp-labibb-mode", &mode_name);
+ "qcom,qpnp-labibb-mode", &mode_name);
if (!rc) {
if (strcmp("lcd", mode_name) == 0) {
labibb->mode = QPNP_LABIBB_LCD_MODE;
} else if (strcmp("amoled", mode_name) == 0) {
labibb->mode = QPNP_LABIBB_AMOLED_MODE;
} else {
- pr_err("Invalid device property in qpnp,qpnp-labibb-mode: %s\n",
+ pr_err("Invalid device property in qcom,qpnp-labibb-mode: %s\n",
mode_name);
return -EINVAL;
}
} else {
- pr_err("qpnp_labibb: qpnp,qpnp-labibb-mode is missing.\n");
+ pr_err("qpnp_labibb: qcom,qpnp-labibb-mode is missing.\n");
return rc;
}
@@ -2783,15 +2936,17 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
labibb->dev->of_node, "qcom,labibb-ttw-force-lab-on");
labibb->swire_control = of_property_read_bool(labibb->dev->of_node,
- "qpnp,swire-control");
+ "qcom,swire-control");
if (labibb->swire_control && labibb->mode != QPNP_LABIBB_AMOLED_MODE) {
pr_err("Invalid mode for SWIRE control\n");
return -EINVAL;
}
+
if (labibb->swire_control) {
labibb->skip_2nd_swire_cmd =
of_property_read_bool(labibb->dev->of_node,
"qcom,skip-2nd-swire-cmd");
+
rc = of_property_read_u32(labibb->dev->of_node,
"qcom,swire-2nd-cmd-delay",
&labibb->swire_2nd_cmd_delay);
@@ -2811,6 +2966,7 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
pr_err("no child nodes\n");
return -ENXIO;
}
+
for_each_available_child_of_node(pdev->dev.of_node, child) {
rc = of_property_read_u32(child, "reg", &base);
if (rc < 0) {
@@ -2820,6 +2976,13 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
return rc;
}
+ rc = qpnp_labibb_read(labibb, &revision, base + REG_REVISION_2,
+ 1);
+ if (rc) {
+ pr_err("Reading REVISION_2 failed rc=%d\n", rc);
+ goto fail_registration;
+ }
+
rc = qpnp_labibb_read(labibb, &type,
base + REG_PERPH_TYPE, 1);
if (rc) {
@@ -2830,6 +2993,7 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
switch (type) {
case QPNP_LAB_TYPE:
labibb->lab_base = base;
+ labibb->lab_dig_major = revision;
rc = qpnp_lab_register_irq(child, labibb);
if (rc) {
pr_err("Failed to register LAB IRQ rc=%d\n",
@@ -2843,6 +3007,7 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
case QPNP_IBB_TYPE:
labibb->ibb_base = base;
+ labibb->ibb_dig_major = revision;
rc = register_qpnp_ibb_regulator(labibb, child);
if (rc)
goto fail_registration;
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 9cfca014c8ad..382245eb90b6 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -3948,7 +3948,6 @@ int glink_core_register_transport(struct glink_transport_if *if_ptr,
xprt_ptr->edge, xprt_ptr->name);
if (IS_ERR_OR_NULL(xprt_ptr->tx_task)) {
GLINK_ERR("%s: unable to run thread\n", __func__);
- glink_core_deinit_xprt_qos_cfg(xprt_ptr);
kfree(xprt_ptr);
return -ENOMEM;
}
diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c
index 00cc5e12709b..dcca82fc25c6 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.c
+++ b/drivers/soc/qcom/memshare/msm_memshare.c
@@ -39,6 +39,7 @@ static DECLARE_DELAYED_WORK(work_recv_msg, mem_share_svc_recv_msg);
static struct workqueue_struct *mem_share_svc_workqueue;
static uint64_t bootup_request;
static void *memshare_ramdump_dev[MAX_CLIENTS];
+static struct device *memshare_dev[MAX_CLIENTS];
/* Memshare Driver Structure */
struct memshare_driver {
@@ -145,9 +146,14 @@ static int mem_share_configure_ramdump(void)
}
snprintf(client_name, 18, "memshare_%s", clnt);
-
- memshare_ramdump_dev[num_clients] = create_ramdump_device(client_name,
- NULL);
+ if (memshare_dev[num_clients]) {
+ memshare_ramdump_dev[num_clients] =
+ create_ramdump_device(client_name,
+ memshare_dev[num_clients]);
+ } else {
+ pr_err("memshare:%s: invalid memshare device\n", __func__);
+ return -ENODEV;
+ }
if (IS_ERR_OR_NULL(memshare_ramdump_dev[num_clients])) {
pr_err("memshare: %s: Unable to create memshare ramdump device.\n",
__func__);
@@ -957,6 +963,8 @@ static int memshare_child_probe(struct platform_device *pdev)
* memshare clients
*/
+ memshare_dev[num_clients] = &pdev->dev;
+
if (!memblock[num_clients].file_created) {
rc = mem_share_configure_ramdump();
if (rc)
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
index e4c8f1f446df..a876484859eb 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is Mree software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -133,7 +133,7 @@ static ssize_t bus_floor_vote_store_api(struct device *dev,
return 0;
}
- if (sscanf(buf, "%s %llu", name, &vote_khz) != 2) {
+ if (sscanf(buf, "%9s %llu", name, &vote_khz) != 2) {
pr_err("%s:return error", __func__);
return -EINVAL;
}
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index 31a5ae89174e..bf6b11194111 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -277,7 +277,8 @@ static int pil_mss_loadable_init(struct modem_data *drv,
q6->restart_reg_sec = true;
}
- q6->restart_reg = devm_ioremap_resource(&pdev->dev, res);
+ q6->restart_reg = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
if (!q6->restart_reg)
return -ENOMEM;
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
index e8969a5e533b..45ac48eb2241 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
@@ -35,6 +35,7 @@
struct apr_tx_buf {
struct list_head list;
+ struct apr_pkt_priv pkt_priv;
char buf[APR_MAX_BUF];
};
@@ -67,29 +68,28 @@ static char *svc_names[APR_DEST_MAX][APR_CLIENT_MAX] = {
static struct apr_svc_ch_dev
apr_svc_ch[APR_DL_MAX][APR_DEST_MAX][APR_CLIENT_MAX];
-static int apr_get_free_buf(int len, void **buf)
+static struct apr_tx_buf *apr_get_free_buf(int len)
{
struct apr_tx_buf *tx_buf;
unsigned long flags;
- if (!buf || len > APR_MAX_BUF) {
+ if (len > APR_MAX_BUF) {
pr_err("%s: buf too large [%d]\n", __func__, len);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
spin_lock_irqsave(&buf_list.lock, flags);
if (list_empty(&buf_list.list)) {
spin_unlock_irqrestore(&buf_list.lock, flags);
pr_err("%s: No buf available\n", __func__);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
tx_buf = list_first_entry(&buf_list.list, struct apr_tx_buf, list);
list_del(&tx_buf->list);
spin_unlock_irqrestore(&buf_list.lock, flags);
- *buf = tx_buf->buf;
- return 0;
+ return tx_buf;
}
static void apr_buf_add_tail(const void *buf)
@@ -130,16 +130,22 @@ int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
{
int rc = 0, retries = 0;
void *pkt_data = NULL;
+ struct apr_tx_buf *tx_buf;
+ struct apr_pkt_priv *pkt_priv_ptr = pkt_priv;
if (!apr_ch->handle || !pkt_priv)
return -EINVAL;
if (pkt_priv->pkt_owner == APR_PKT_OWNER_DRIVER) {
- rc = apr_get_free_buf(len, &pkt_data);
- if (rc)
+ tx_buf = apr_get_free_buf(len);
+ if (IS_ERR_OR_NULL(tx_buf)) {
+ rc = -EINVAL;
goto exit;
-
- memcpy(pkt_data, data, len);
+ }
+ memcpy(tx_buf->buf, data, len);
+ memcpy(&tx_buf->pkt_priv, pkt_priv, sizeof(tx_buf->pkt_priv));
+ pkt_priv_ptr = &tx_buf->pkt_priv;
+ pkt_data = tx_buf->buf;
} else {
pkt_data = data;
}
@@ -148,7 +154,7 @@ int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
if (rc == -EAGAIN)
udelay(50);
- rc = __apr_tal_write(apr_ch, pkt_data, pkt_priv, len);
+ rc = __apr_tal_write(apr_ch, pkt_data, pkt_priv_ptr, len);
} while (rc == -EAGAIN && retries++ < APR_MAXIMUM_NUM_OF_RETRIES);
if (rc < 0) {
@@ -180,6 +186,28 @@ void apr_tal_notify_rx(void *handle, const void *priv, const void *pkt_priv,
glink_rx_done(apr_ch->handle, ptr, true);
}
+static void apr_tal_notify_tx_abort(void *handle, const void *priv,
+ const void *pkt_priv)
+{
+ struct apr_pkt_priv *apr_pkt_priv_ptr =
+ (struct apr_pkt_priv *)pkt_priv;
+ struct apr_tx_buf *list_node;
+
+ if (!apr_pkt_priv_ptr) {
+ pr_err("%s: Invalid pkt_priv\n", __func__);
+ return;
+ }
+
+ pr_debug("%s: tx_abort received for apr_pkt_priv_ptr:%pK\n",
+ __func__, apr_pkt_priv_ptr);
+
+ if (apr_pkt_priv_ptr->pkt_owner == APR_PKT_OWNER_DRIVER) {
+ list_node = container_of(apr_pkt_priv_ptr,
+ struct apr_tx_buf, pkt_priv);
+ apr_buf_add_tail(list_node->buf);
+ }
+}
+
void apr_tal_notify_tx_done(void *handle, const void *priv,
const void *pkt_priv, const void *ptr)
{
@@ -315,6 +343,7 @@ struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest, uint32_t dl,
open_cfg.notify_state = apr_tal_notify_state;
open_cfg.notify_rx_intent_req = apr_tal_notify_rx_intent_req;
open_cfg.notify_remote_rx_intent = apr_tal_notify_remote_rx_intent;
+ open_cfg.notify_tx_abort = apr_tal_notify_tx_abort;
open_cfg.priv = apr_ch;
open_cfg.transport = "smem";
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index 24018c544b06..2b708732760f 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -149,7 +149,7 @@ static void service_locator_recv_msg(struct work_struct *work)
do {
pr_debug("Notified about a Receive event\n");
ret = qmi_recv_msg(service_locator.clnt_handle);
- if (ret != -ENOMSG)
+ if (ret < 0)
pr_err("Error receiving message rc:%d. Retrying...\n",
ret);
} while (ret == 0);
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index 8cba88742cb8..a244bc168136 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -162,7 +162,7 @@ static void root_service_clnt_recv_msg(struct work_struct *work)
data->instance_id);
} while ((ret = qmi_recv_msg(data->clnt_handle)) == 0);
- pr_info("Notified about a Receive event (instance-id: %d)\n",
+ pr_debug("Notified about a Receive event (instance-id: %d)\n",
data->instance_id);
}
@@ -227,7 +227,8 @@ static void root_service_service_ind_cb(struct qmi_handle *handle,
struct qmi_client_info *data = (struct qmi_client_info *)ind_cb_priv;
struct service_notif_info *service_notif;
struct msg_desc ind_desc;
- struct qmi_servreg_notif_state_updated_ind_msg_v01 ind_msg;
+ struct qmi_servreg_notif_state_updated_ind_msg_v01 ind_msg = {
+ QMI_STATE_MIN_VAL, "", 0xFFFF };
int rc;
ind_desc.msg_id = SERVREG_NOTIF_STATE_UPDATED_IND_MSG;
diff --git a/drivers/soc/qcom/sysmon.c b/drivers/soc/qcom/sysmon.c
index 8a12341a6f91..f8d0f10b1173 100644
--- a/drivers/soc/qcom/sysmon.c
+++ b/drivers/soc/qcom/sysmon.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014,2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -206,7 +206,7 @@ int sysmon_send_shutdown_no_qmi(struct subsys_desc *dest_desc)
return -ENODEV;
mutex_lock(&ss->lock);
- ret = sysmon_send_msg(ss, tx_buf, ARRAY_SIZE(tx_buf));
+ ret = sysmon_send_msg(ss, tx_buf, strlen(tx_buf));
if (ret) {
pr_err("Message sending failed %d\n", ret);
goto out;
@@ -257,7 +257,7 @@ int sysmon_get_reason_no_qmi(struct subsys_desc *dest_desc,
return -ENODEV;
mutex_lock(&ss->lock);
- ret = sysmon_send_msg(ss, tx_buf, ARRAY_SIZE(tx_buf));
+ ret = sysmon_send_msg(ss, tx_buf, strlen(tx_buf));
if (ret) {
pr_err("Message sending failed %d\n", ret);
goto out;
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index c0b936d802ef..0ae654a921f8 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spmi.h>
+#include <linux/syscore_ops.h>
/* PMIC Arbiter configuration registers */
#define PMIC_ARB_VERSION 0x0000
@@ -159,6 +160,7 @@ struct spmi_pmic_arb {
u16 last_apid;
struct apid_data apid_data[PMIC_ARB_MAX_PERIPHS];
};
+static struct spmi_pmic_arb *the_pa;
/**
* pmic_arb_ver: version dependent functionality.
@@ -525,7 +527,7 @@ static void cleanup_irq(struct spmi_pmic_arb *pa, u16 apid, int id)
irq_mask, ppid);
}
-static void periph_interrupt(struct spmi_pmic_arb *pa, u16 apid)
+static void periph_interrupt(struct spmi_pmic_arb *pa, u16 apid, bool show)
{
unsigned int irq;
u32 status;
@@ -542,22 +544,32 @@ static void periph_interrupt(struct spmi_pmic_arb *pa, u16 apid)
cleanup_irq(pa, apid, id);
continue;
}
- generic_handle_irq(irq);
+ if (show) {
+ struct irq_desc *desc;
+ const char *name = "null";
+
+ desc = irq_to_desc(irq);
+ if (desc == NULL)
+ name = "stray irq";
+ else if (desc->action && desc->action->name)
+ name = desc->action->name;
+
+ pr_warn("spmi_show_resume_irq: %d triggered [0x%01x, 0x%02x, 0x%01x] %s\n",
+ irq, sid, per, id, name);
+ } else {
+ generic_handle_irq(irq);
+ }
}
}
-static void pmic_arb_chained_irq(struct irq_desc *desc)
+static void __pmic_arb_chained_irq(struct spmi_pmic_arb *pa, bool show)
{
- struct spmi_pmic_arb *pa = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
void __iomem *intr = pa->intr;
int first = pa->min_apid >> 5;
int last = pa->max_apid >> 5;
u32 status, enable;
int i, id, apid;
- chained_irq_enter(chip, desc);
-
for (i = first; i <= last; ++i) {
status = readl_relaxed(intr +
pa->ver_ops->owner_acc_status(pa->ee, i));
@@ -568,10 +580,18 @@ static void pmic_arb_chained_irq(struct irq_desc *desc)
enable = readl_relaxed(intr +
pa->ver_ops->acc_enable(apid));
if (enable & SPMI_PIC_ACC_ENABLE_BIT)
- periph_interrupt(pa, apid);
+ periph_interrupt(pa, apid, show);
}
}
+}
+
+static void pmic_arb_chained_irq(struct irq_desc *desc)
+{
+ struct spmi_pmic_arb *pa = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ chained_irq_enter(chip, desc);
+ __pmic_arb_chained_irq(pa, false);
chained_irq_exit(chip, desc);
}
@@ -988,6 +1008,16 @@ static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
.xlate = qpnpint_irq_domain_dt_translate,
};
+static void spmi_pmic_arb_resume(void)
+{
+ if (spmi_show_resume_irq())
+ __pmic_arb_chained_irq(the_pa, true);
+}
+
+static struct syscore_ops spmi_pmic_arb_syscore_ops = {
+ .resume = spmi_pmic_arb_resume,
+};
+
static int spmi_pmic_arb_probe(struct platform_device *pdev)
{
struct spmi_pmic_arb *pa;
@@ -1153,6 +1183,8 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
if (err)
goto err_domain_remove;
+ the_pa = pa;
+ register_syscore_ops(&spmi_pmic_arb_syscore_ops);
return 0;
err_domain_remove:
@@ -1167,8 +1199,11 @@ static int spmi_pmic_arb_remove(struct platform_device *pdev)
{
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
+
spmi_controller_remove(ctrl);
irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
+ unregister_syscore_ops(&spmi_pmic_arb_syscore_ops);
+ the_pa = NULL;
irq_domain_remove(pa->domain);
spmi_controller_put(ctrl);
return 0;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 269c1ee2da44..5839111ab4e0 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -48,6 +48,11 @@ static void hub_event(struct work_struct *work);
/* synchronize hub-port add/remove and peering operations */
DEFINE_MUTEX(usb_port_peer_mutex);
+static bool skip_extended_resume_delay = 1;
+module_param(skip_extended_resume_delay, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(skip_extended_resume_delay,
+ "removes extra delay added to finish bus resume");
+
/* cycle leds on hubs that aren't blinking for attention */
static bool blinkenlights = 0;
module_param(blinkenlights, bool, S_IRUGO);
@@ -3434,7 +3439,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
/* drive resume for USB_RESUME_TIMEOUT msec */
dev_dbg(&udev->dev, "usb %sresume\n",
(PMSG_IS_AUTO(msg) ? "auto-" : ""));
- msleep(USB_RESUME_TIMEOUT);
+ if (!skip_extended_resume_delay)
+ usleep_range(USB_RESUME_TIMEOUT * 1000,
+ (USB_RESUME_TIMEOUT + 1) * 1000);
/* Virtual root hubs can trigger on GET_PORT_STATUS to
* stop resume signaling. Then finish the resume
@@ -3443,7 +3450,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
status = hub_port_status(hub, port1, &portstatus, &portchange);
/* TRSMRCY = 10 msec */
- msleep(10);
+ usleep_range(10000, 10500);
}
SuspendCleared:
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 5db4fe9e3cdf..be29dc4bef89 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1528,7 +1528,14 @@ static int android_setup(struct usb_gadget *gadget,
static void android_disconnect(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
- struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev);
+ struct gadget_info *gi;
+
+ if (!cdev) {
+ pr_err("%s: gadget is not connected\n", __func__);
+ return;
+ }
+
+ gi = container_of(cdev, struct gadget_info, cdev);
/* accessory HID support can be active while the
accessory function is not actually enabled,
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index 5e50fe245a59..33f7304eac84 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -43,6 +43,7 @@
#include "configfs.h"
#define MTP_RX_BUFFER_INIT_SIZE 1048576
+#define MTP_TX_BUFFER_INIT_SIZE 1048576
#define MTP_BULK_BUFFER_SIZE 16384
#define INTR_BUFFER_SIZE 28
#define MAX_INST_NAME_LEN 40
@@ -81,7 +82,7 @@
unsigned int mtp_rx_req_len = MTP_RX_BUFFER_INIT_SIZE;
module_param(mtp_rx_req_len, uint, S_IRUGO | S_IWUSR);
-unsigned int mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
+unsigned int mtp_tx_req_len = MTP_TX_BUFFER_INIT_SIZE;
module_param(mtp_tx_req_len, uint, S_IRUGO | S_IWUSR);
unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX;
@@ -551,9 +552,6 @@ static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
dev->ep_intr = ep;
retry_tx_alloc:
- if (mtp_tx_req_len > MTP_BULK_BUFFER_SIZE)
- mtp_tx_reqs = 4;
-
/* now allocate requests for our endpoints */
for (i = 0; i < mtp_tx_reqs; i++) {
req = mtp_request_new(dev->ep_in, mtp_tx_req_len);
@@ -753,8 +751,8 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
break;
}
- if (count > MTP_BULK_BUFFER_SIZE)
- xfer = MTP_BULK_BUFFER_SIZE;
+ if (count > mtp_tx_req_len)
+ xfer = mtp_tx_req_len;
else
xfer = count;
if (xfer && copy_from_user(req->buf, buf, xfer)) {
@@ -850,8 +848,8 @@ static void send_file_work(struct work_struct *data)
break;
}
- if (count > MTP_BULK_BUFFER_SIZE)
- xfer = MTP_BULK_BUFFER_SIZE;
+ if (count > mtp_tx_req_len)
+ xfer = mtp_tx_req_len;
else
xfer = count;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0f51d078416e..a7b055bc279a 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1165,7 +1165,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(20);
+ usleep_range(21000, 21500);
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
@@ -1409,7 +1409,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
if (need_usb2_u3_exit) {
spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(20);
+ usleep_range(21000, 21500);
spin_lock_irqsave(&xhci->lock, flags);
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ac298e632d73..29dc6ab252b1 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1789,6 +1789,8 @@ void xhci_free_command(struct xhci_hcd *xhci,
int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned intr_num)
{
int size;
+ u32 iman_reg;
+ u64 erdp_reg;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct device *dev = xhci_to_hcd(xhci)->self.controller;
@@ -1800,14 +1802,38 @@ int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned intr_num)
size =
sizeof(struct xhci_erst_entry)*(xhci->sec_erst[intr_num].num_entries);
- if (xhci->sec_erst[intr_num].entries)
+ if (xhci->sec_erst[intr_num].entries) {
+ /*
+ * disable irq, ack pending interrupt and clear EHB for xHC to
+ * generate interrupt again when new event ring is setup
+ */
+ iman_reg =
+ readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+ iman_reg &= ~IMAN_IE;
+ writel_relaxed(iman_reg,
+ &xhci->sec_ir_set[intr_num]->irq_pending);
+ iman_reg =
+ readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+ if (iman_reg & IMAN_IP)
+ writel_relaxed(iman_reg,
+ &xhci->sec_ir_set[intr_num]->irq_pending);
+ /* make sure IP gets cleared before clearing EHB */
+ mb();
+
+ erdp_reg = xhci_read_64(xhci,
+ &xhci->sec_ir_set[intr_num]->erst_dequeue);
+ xhci_write_64(xhci, erdp_reg | ERST_EHB,
+ &xhci->sec_ir_set[intr_num]->erst_dequeue);
+
dma_free_coherent(dev, size, xhci->sec_erst[intr_num].entries,
xhci->sec_erst[intr_num].erst_dma_addr);
- xhci->sec_erst[intr_num].entries = NULL;
+ xhci->sec_erst[intr_num].entries = NULL;
+ }
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed SEC ERST#%d",
intr_num);
if (xhci->sec_event_ring[intr_num])
xhci_ring_free(xhci, xhci->sec_event_ring[intr_num]);
+
xhci->sec_event_ring[intr_num] = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed sec event ring");
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 084dbbc81a6d..c025dccdd8f1 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -332,7 +332,7 @@ static int xhci_plat_runtime_suspend(struct device *dev)
dev_dbg(dev, "xhci-plat runtime suspend\n");
- return xhci_suspend(xhci, true);
+ return 0;
}
static int xhci_plat_runtime_resume(struct device *dev)
@@ -346,7 +346,7 @@ static int xhci_plat_runtime_resume(struct device *dev)
dev_dbg(dev, "xhci-plat runtime resume\n");
- ret = xhci_resume(xhci, false);
+ ret = 0;
pm_runtime_mark_last_busy(dev);
return ret;
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index 1e93a5b2e9ba..5a24a1995af9 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -178,6 +178,7 @@ enum mdss_hw_capabilities {
MDSS_CAPS_CWB_SUPPORTED,
MDSS_CAPS_MDP_VOTE_CLK_NOT_SUPPORTED,
MDSS_CAPS_AVR_SUPPORTED,
+ MDSS_CAPS_SEC_DETACH_SMMU,
MDSS_CAPS_MAX,
};
@@ -221,6 +222,7 @@ struct mdss_smmu_client {
bool domain_attached;
bool handoff_pending;
void __iomem *mmu_base;
+ int domain;
};
struct mdss_mdp_qseed3_lut_tbl {
@@ -327,6 +329,7 @@ struct mdss_data_type {
u32 wfd_mode;
u32 has_no_lut_read;
atomic_t sd_client_count;
+ atomic_t sc_client_count;
u8 has_wb_ad;
u8 has_non_scalar_rgb;
bool has_src_split;
@@ -519,6 +522,8 @@ struct mdss_data_type {
u32 max_dest_scaler_input_width;
u32 max_dest_scaler_output_width;
struct mdss_mdp_destination_scaler *ds;
+ u32 sec_disp_en;
+ u32 sec_cam_en;
};
extern struct mdss_data_type *mdss_res;
@@ -579,6 +584,14 @@ static inline int mdss_get_sd_client_cnt(void)
return atomic_read(&mdss_res->sd_client_count);
}
+static inline int mdss_get_sc_client_cnt(void)
+{
+ if (!mdss_res)
+ return 0;
+ else
+ return atomic_read(&mdss_res->sc_client_count);
+}
+
static inline void mdss_set_quirk(struct mdss_data_type *mdata,
enum mdss_hw_quirk bit)
{
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index 75e42ca8cd88..29fc4e6fd65b 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -45,6 +45,11 @@
#define VDDA_UA_ON_LOAD 100000 /* uA units */
#define VDDA_UA_OFF_LOAD 100 /* uA units */
+struct mdss_dp_attention_node {
+ u32 vdo;
+ struct list_head list;
+};
+
#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3
static u32 supported_modes[] = {
HDMI_VFRMT_640x480p60_4_3,
@@ -60,6 +65,7 @@ static u32 supported_modes[] = {
static int mdss_dp_off_irq(struct mdss_dp_drv_pdata *dp_drv);
static void mdss_dp_mainlink_push_idle(struct mdss_panel_data *pdata);
static inline void mdss_dp_link_retraining(struct mdss_dp_drv_pdata *dp);
+static void mdss_dp_handle_attention(struct mdss_dp_drv_pdata *dp_drv);
static void mdss_dp_put_dt_clk_data(struct device *dev,
struct dss_module_power *module_power)
@@ -1133,7 +1139,7 @@ static void mdss_dp_configure_source_params(struct mdss_dp_drv_pdata *dp,
mdss_dp_fill_link_cfg(dp);
mdss_dp_mainlink_ctrl(&dp->ctrl_io, true);
mdss_dp_config_ctrl(dp);
- mdss_dp_sw_mvid_nvid(&dp->ctrl_io);
+ mdss_dp_sw_config_msa(&dp->ctrl_io, dp->link_rate, &dp->dp_cc_io);
mdss_dp_timing_cfg(&dp->ctrl_io, &dp->panel_data.panel_info);
}
@@ -1290,7 +1296,6 @@ link_training:
dp_drv->cont_splash = 0;
dp_drv->power_on = true;
- mdss_dp_ack_state(dp_drv, true);
pr_debug("End-\n");
exit:
@@ -1559,6 +1564,7 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata)
goto edid_error;
}
+ mdss_dp_update_cable_status(dp_drv, true);
mdss_dp_notify_clients(dp_drv, true);
dp_drv->dp_initialized = true;
@@ -1883,8 +1889,13 @@ static void mdss_dp_update_hdcp_info(struct mdss_dp_drv_pdata *dp)
}
/* update internal data about hdcp */
- dp->hdcp.data = fd;
- dp->hdcp.ops = ops;
+ if (dp->hdcp.hdcp2_present || dp->hdcp.hdcp1_present) {
+ dp->hdcp.data = fd;
+ dp->hdcp.ops = ops;
+ } else {
+ dp->hdcp.data = NULL;
+ dp->hdcp.ops = NULL;
+ }
}
static inline bool dp_is_hdcp_enabled(struct mdss_dp_drv_pdata *dp_drv)
@@ -1920,6 +1931,8 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
if (dp->hdcp.ops && dp->hdcp.ops->authenticate)
rc = dp->hdcp.ops->authenticate(dp->hdcp.data);
+
+ mdss_dp_ack_state(dp, true);
break;
case MDSS_EVENT_PANEL_OFF:
rc = mdss_dp_off(pdata);
@@ -2041,6 +2054,12 @@ static int mdss_retrieve_dp_ctrl_resources(struct platform_device *pdev,
return rc;
}
+ if (msm_dss_ioremap_byname(pdev, &dp_drv->dp_cc_io, "dp_mmss_cc")) {
+ pr_err("%d unable to remap dp MMSS_CC resources\n",
+ __LINE__);
+ return rc;
+ }
+
if (msm_dss_ioremap_byname(pdev, &dp_drv->qfprom_io,
"qfprom_physical"))
pr_warn("unable to remap dp qfprom resources\n");
@@ -2117,6 +2136,9 @@ static void mdss_dp_event_work(struct work_struct *work)
case EV_IDLE_PATTERNS_SENT:
mdss_dp_idle_patterns_sent(dp);
break;
+ case EV_USBPD_ATTENTION:
+ mdss_dp_handle_attention(dp);
+ break;
case EV_USBPD_DISCOVER_MODES:
usbpd_send_svdm(dp->pd, USB_C_DP_SID, USBPD_SVDM_DISCOVER_MODES,
SVDM_CMD_TYPE_INITIATOR, 0x0, 0x0, 0x0);
@@ -2225,6 +2247,7 @@ static int mdss_dp_event_setup(struct mdss_dp_drv_pdata *dp)
INIT_WORK(&dp->work, mdss_dp_event_work);
INIT_DELAYED_WORK(&dp->hdcp_cb_work, mdss_dp_hdcp_cb_work);
+ INIT_LIST_HEAD(&dp->attention_head);
return 0;
}
@@ -2508,6 +2531,7 @@ static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
const u32 *vdos, int num_vdos)
{
struct mdss_dp_drv_pdata *dp_drv;
+ struct mdss_dp_attention_node *node;
dp_drv = container_of(hdlr, struct mdss_dp_drv_pdata, svid_handler);
if (!dp_drv->pd) {
@@ -2535,45 +2559,14 @@ static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
dp_send_events(dp_drv, EV_USBPD_DP_STATUS);
break;
case USBPD_SVDM_ATTENTION:
- dp_drv->alt_mode.dp_status.response = *vdos;
- mdss_dp_usbpd_ext_dp_status(&dp_drv->alt_mode.dp_status);
-
- dp_drv->hpd_irq_toggled = dp_drv->hpd_irq_on !=
- dp_drv->alt_mode.dp_status.hpd_irq;
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ node->vdo = *vdos;
- if (dp_drv->alt_mode.dp_status.hpd_irq) {
- pr_debug("Attention: hpd_irq high\n");
-
- if (dp_drv->power_on && dp_drv->hdcp.ops &&
- dp_drv->hdcp.ops->cp_irq)
- dp_drv->hdcp.ops->cp_irq(dp_drv->hdcp.data);
-
- if (!mdss_dp_process_hpd_irq_high(dp_drv))
- break;
- } else if (dp_drv->hpd_irq_toggled) {
- if (!mdss_dp_process_hpd_irq_low(dp_drv))
- break;
- }
-
- if (!dp_drv->alt_mode.dp_status.hpd_high) {
- pr_debug("Attention: HPD low\n");
- mdss_dp_update_cable_status(dp_drv, false);
- mdss_dp_notify_clients(dp_drv, false);
- pr_debug("Attention: Notified clients\n");
- break;
- }
-
- pr_debug("Attention: HPD high\n");
-
- mdss_dp_update_cable_status(dp_drv, true);
-
- dp_drv->alt_mode.current_state |= DP_STATUS_DONE;
-
- if (dp_drv->alt_mode.current_state & DP_CONFIGURE_DONE)
- mdss_dp_host_init(&dp_drv->panel_data);
- else
- dp_send_events(dp_drv, EV_USBPD_DP_CONFIGURE);
+ mutex_lock(&dp_drv->attention_lock);
+ list_add_tail(&node->list, &dp_drv->attention_head);
+ mutex_unlock(&dp_drv->attention_lock);
+ dp_send_events(dp_drv, EV_USBPD_ATTENTION);
break;
case DP_VDM_STATUS:
dp_drv->alt_mode.dp_status.response = *vdos;
@@ -2597,6 +2590,74 @@ static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
}
}
+static void mdss_dp_process_attention(struct mdss_dp_drv_pdata *dp_drv)
+{
+ dp_drv->hpd_irq_toggled = dp_drv->hpd_irq_on !=
+ dp_drv->alt_mode.dp_status.hpd_irq;
+
+ if (dp_drv->alt_mode.dp_status.hpd_irq) {
+ pr_debug("Attention: hpd_irq high\n");
+
+ if (dp_drv->power_on && dp_drv->hdcp.ops &&
+ dp_drv->hdcp.ops->cp_irq)
+ dp_drv->hdcp.ops->cp_irq(dp_drv->hdcp.data);
+
+ if (!mdss_dp_process_hpd_irq_high(dp_drv))
+ return;
+ } else if (dp_drv->hpd_irq_toggled) {
+ if (!mdss_dp_process_hpd_irq_low(dp_drv))
+ return;
+ }
+
+ if (!dp_drv->alt_mode.dp_status.hpd_high) {
+ pr_debug("Attention: HPD low\n");
+ mdss_dp_update_cable_status(dp_drv, false);
+ mdss_dp_notify_clients(dp_drv, false);
+ pr_debug("Attention: Notified clients\n");
+ return;
+ }
+
+ pr_debug("Attention: HPD high\n");
+
+ mdss_dp_update_cable_status(dp_drv, true);
+
+ dp_drv->alt_mode.current_state |= DP_STATUS_DONE;
+
+ if (dp_drv->alt_mode.current_state & DP_CONFIGURE_DONE)
+ mdss_dp_host_init(&dp_drv->panel_data);
+ else
+ dp_send_events(dp_drv, EV_USBPD_DP_CONFIGURE);
+
+ pr_debug("exit\n");
+}
+
+static void mdss_dp_handle_attention(struct mdss_dp_drv_pdata *dp)
+{
+ int i = 0;
+
+ while (!list_empty_careful(&dp->attention_head)) {
+ struct mdss_dp_attention_node *node;
+ u32 vdo;
+
+ pr_debug("processing item %d in the list\n", ++i);
+
+ mutex_lock(&dp->attention_lock);
+ node = list_first_entry(&dp->attention_head,
+ struct mdss_dp_attention_node, list);
+
+ vdo = node->vdo;
+ list_del(&node->list);
+ mutex_unlock(&dp->attention_lock);
+
+ kzfree(node);
+
+ dp->alt_mode.dp_status.response = vdo;
+ mdss_dp_usbpd_ext_dp_status(&dp->alt_mode.dp_status);
+ mdss_dp_process_attention(dp);
+ };
+
+}
+
static int mdss_dp_usbpd_setup(struct mdss_dp_drv_pdata *dp_drv)
{
int ret = 0;
@@ -2670,6 +2731,7 @@ static int mdss_dp_probe(struct platform_device *pdev)
dp_drv->mask2 = EDP_INTR_MASK2;
mutex_init(&dp_drv->emutex);
mutex_init(&dp_drv->pd_msg_mutex);
+ mutex_init(&dp_drv->attention_lock);
mutex_init(&dp_drv->hdcp_mutex);
spin_lock_init(&dp_drv->lock);
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index e801eceeef1b..04abe9221acc 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -205,6 +205,7 @@ struct dp_alt_mode {
#define EV_USBPD_DP_CONFIGURE BIT(10)
#define EV_USBPD_CC_PIN_POLARITY BIT(11)
#define EV_USBPD_EXIT_MODE BIT(12)
+#define EV_USBPD_ATTENTION BIT(13)
/* dp state ctrl */
#define ST_TRAIN_PATTERN_1 BIT(0)
@@ -406,6 +407,7 @@ struct mdss_dp_drv_pdata {
struct dss_io_data ctrl_io;
struct dss_io_data phy_io;
struct dss_io_data tcsr_reg_io;
+ struct dss_io_data dp_cc_io;
struct dss_io_data qfprom_io;
struct dss_io_data hdcp_io;
int base_size;
@@ -454,6 +456,7 @@ struct mdss_dp_drv_pdata {
struct mutex aux_mutex;
struct mutex train_mutex;
struct mutex pd_msg_mutex;
+ struct mutex attention_lock;
struct mutex hdcp_mutex;
bool cable_connected;
u32 s3d_mode;
@@ -497,6 +500,8 @@ struct mdss_dp_drv_pdata {
struct dpcd_test_request test_data;
struct dpcd_sink_count sink_count;
+
+ struct list_head attention_head;
};
enum dp_lane_count {
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index 9014e3a02d21..b64518194926 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -514,7 +514,7 @@ char mdss_dp_gen_link_clk(struct mdss_panel_info *pinfo, char lane_cnt)
{
const u32 encoding_factx10 = 8;
const u32 ln_to_link_ratio = 10;
- u32 min_link_rate;
+ u32 min_link_rate, reminder = 0;
char calc_link_rate = 0;
pr_debug("clk_rate=%llu, bpp= %d, lane_cnt=%d\n",
@@ -527,13 +527,19 @@ char mdss_dp_gen_link_clk(struct mdss_panel_info *pinfo, char lane_cnt)
* Any changes in the section of code should
* consider this limitation.
*/
- min_link_rate = pinfo->clk_rate
- / (lane_cnt * encoding_factx10);
+ min_link_rate = (u32)div_u64(pinfo->clk_rate,
+ (lane_cnt * encoding_factx10));
min_link_rate /= ln_to_link_ratio;
min_link_rate = (min_link_rate * pinfo->bpp);
- min_link_rate = (u32)div_u64(min_link_rate * 10,
- DP_LINK_RATE_MULTIPLIER);
+ min_link_rate = (u32)div_u64_rem(min_link_rate * 10,
+ DP_LINK_RATE_MULTIPLIER, &reminder);
+ /*
+ * To avoid any fractional values,
+ * increment the min_link_rate
+ */
+ if (reminder)
+ min_link_rate += 1;
pr_debug("min_link_rate = %d\n", min_link_rate);
if (min_link_rate <= DP_LINK_RATE_162)
@@ -1647,6 +1653,8 @@ int mdss_dp_link_train(struct mdss_dp_drv_pdata *dp)
clear:
dp_clear_training_pattern(dp);
if (ret != -EINVAL) {
+ mdss_dp_config_misc_settings(&dp->ctrl_io,
+ &dp->panel_data.panel_info);
mdss_dp_setup_tr_unit(&dp->ctrl_io, dp->link_rate,
dp->lane_cnt, dp->vic);
mdss_dp_state_ctrl(&dp->ctrl_io, ST_SEND_VIDEO);
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.c b/drivers/video/fbdev/msm/mdss_dp_util.c
index 2d24d8499105..86edc4492599 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.c
+++ b/drivers/video/fbdev/msm/mdss_dp_util.c
@@ -253,10 +253,48 @@ void mdss_dp_timing_cfg(struct dss_io_data *ctrl_io,
writel_relaxed(data, ctrl_io->base + DP_ACTIVE_HOR_VER);
}
-void mdss_dp_sw_mvid_nvid(struct dss_io_data *ctrl_io)
+void mdss_dp_sw_config_msa(struct dss_io_data *ctrl_io,
+ char lrate, struct dss_io_data *dp_cc_io)
{
- writel_relaxed(0x37, ctrl_io->base + DP_SOFTWARE_MVID);
- writel_relaxed(0x3c, ctrl_io->base + DP_SOFTWARE_NVID);
+ u32 pixel_m, pixel_n;
+ u32 mvid, nvid;
+
+ pixel_m = readl_relaxed(dp_cc_io->base + MMSS_DP_PIXEL_M);
+ pixel_n = readl_relaxed(dp_cc_io->base + MMSS_DP_PIXEL_N);
+ pr_debug("pixel_m=0x%x, pixel_n=0x%x\n",
+ pixel_m, pixel_n);
+
+ mvid = (pixel_m & 0xFFFF) * 5;
+ nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
+ if (lrate == DP_LINK_RATE_540)
+ nvid = nvid * 2;
+ pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
+ writel_relaxed(mvid, ctrl_io->base + DP_SOFTWARE_MVID);
+ writel_relaxed(nvid, ctrl_io->base + DP_SOFTWARE_NVID);
+}
+
+void mdss_dp_config_misc_settings(struct dss_io_data *ctrl_io,
+ struct mdss_panel_info *pinfo)
+{
+ u32 bpp = pinfo->bpp;
+ u32 misc_val = 0x0;
+
+ switch (bpp) {
+ case 18:
+ misc_val |= (0x0 << 5);
+ break;
+ case 30:
+ misc_val |= (0x2 << 5);
+ break;
+ case 24:
+ default:
+ misc_val |= (0x1 << 5);
+ }
+
+ misc_val |= BIT(0); /* Configure clock to synchronous mode */
+
+ pr_debug("Misc settings = 0x%x\n", misc_val);
+ writel_relaxed(misc_val, ctrl_io->base + DP_MISC1_MISC0);
}
void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate,
@@ -267,8 +305,6 @@ void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate,
u32 valid_boundary2 = 0x0;
struct dp_vc_tu_mapping_table const *tu_entry = tu_table;
- writel_relaxed(0x21, ctrl_io->base + DP_MISC1_MISC0);
-
for (; tu_entry != tu_table + ARRAY_SIZE(tu_table); ++tu_entry) {
if ((tu_entry->vic == res) &&
(tu_entry->lanes == ln_cnt) &&
@@ -454,8 +490,14 @@ u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp)
pin_cfg = dp->alt_mode.dp_cap.dlink_pin_config;
for (pin = PIN_ASSIGNMENT_A; pin < PIN_ASSIGNMENT_MAX; pin++) {
- if (pin_cfg & BIT(pin))
- break;
+ if (pin_cfg & BIT(pin)) {
+ if (dp->alt_mode.dp_status.multi_func) {
+ if (pin == PIN_ASSIGNMENT_D)
+ break;
+ } else {
+ break;
+ }
+ }
}
if (pin == PIN_ASSIGNMENT_MAX)
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.h b/drivers/video/fbdev/msm/mdss_dp_util.h
index 334c0071050d..4b28d98177be 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.h
+++ b/drivers/video/fbdev/msm/mdss_dp_util.h
@@ -164,6 +164,12 @@
#define TCSR_USB3_DP_PHYMODE 0x48
#define EDID_START_ADDRESS 0x50
+/* DP MMSS_CC registers */
+#define MMSS_DP_LINK_CMD_RCGR 0x0000
+#define MMSS_DP_LINK_CFG_RCGR 0x0004
+#define MMSS_DP_PIXEL_M 0x0048
+#define MMSS_DP_PIXEL_N 0x004C
+
/* DP HDCP 1.3 registers */
#define DP_HDCP_CTRL (0x0A0)
#define DP_HDCP_STATUS (0x0A4)
@@ -271,6 +277,8 @@ void mdss_dp_switch_usb3_phy_to_dp_mode(struct dss_io_data *tcsr_reg_io);
void mdss_dp_assert_phy_reset(struct dss_io_data *ctrl_io, bool assert);
void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate,
u8 ln_cnt, u32 res);
+void mdss_dp_config_misc_settings(struct dss_io_data *ctrl_io,
+ struct mdss_panel_info *pinfo);
void mdss_dp_phy_aux_setup(struct dss_io_data *phy_io);
void mdss_dp_hpd_configure(struct dss_io_data *ctrl_io, bool enable);
void mdss_dp_aux_ctrl(struct dss_io_data *ctrl_io, bool enable);
@@ -285,7 +293,8 @@ void mdss_dp_state_ctrl(struct dss_io_data *ctrl_io, u32 data);
int mdss_dp_irq_setup(struct mdss_dp_drv_pdata *dp_drv);
void mdss_dp_irq_enable(struct mdss_dp_drv_pdata *dp_drv);
void mdss_dp_irq_disable(struct mdss_dp_drv_pdata *dp_drv);
-void mdss_dp_sw_mvid_nvid(struct dss_io_data *ctrl_io);
+void mdss_dp_sw_config_msa(struct dss_io_data *ctrl_io,
+ char lrate, struct dss_io_data *dp_cc_io);
void mdss_dp_usbpd_ext_capabilities(struct usbpd_dp_capabilities *dp_cap);
void mdss_dp_usbpd_ext_dp_status(struct usbpd_dp_status *dp_status);
u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp);
diff --git a/drivers/video/fbdev/msm/mdss_dsi.h b/drivers/video/fbdev/msm/mdss_dsi.h
index 7091dc2f38b9..3536cb2d294d 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.h
+++ b/drivers/video/fbdev/msm/mdss_dsi.h
@@ -468,6 +468,7 @@ struct mdss_dsi_ctrl_pdata {
bool cmd_sync_wait_trigger;
struct mdss_rect roi;
+ struct mdss_dsi_dual_pu_roi dual_roi;
struct pwm_device *pwm_bl;
u32 pclk_rate;
u32 byte_clk_rate;
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index bd0c2ad32c05..7c36bb627043 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -447,28 +447,82 @@ static int mdss_dsi_roi_merge(struct mdss_dsi_ctrl_pdata *ctrl,
static char caset[] = {0x2a, 0x00, 0x00, 0x03, 0x00}; /* DTYPE_DCS_LWRITE */
static char paset[] = {0x2b, 0x00, 0x00, 0x05, 0x00}; /* DTYPE_DCS_LWRITE */
+/*
+ * Some panels can support multiple ROIs as part of the below commands
+ */
+static char caset_dual[] = {0x2a, 0x00, 0x00, 0x03, 0x00, 0x03,
+ 0x00, 0x00, 0x00, 0x00};/* DTYPE_DCS_LWRITE */
+static char paset_dual[] = {0x2b, 0x00, 0x00, 0x05, 0x00, 0x03,
+ 0x00, 0x00, 0x00, 0x00};/* DTYPE_DCS_LWRITE */
+
/* pack into one frame before sent */
static struct dsi_cmd_desc set_col_page_addr_cmd[] = {
{{DTYPE_DCS_LWRITE, 0, 0, 0, 1, sizeof(caset)}, caset}, /* packed */
{{DTYPE_DCS_LWRITE, 1, 0, 0, 1, sizeof(paset)}, paset},
};
+/* pack into one frame before sent */
+static struct dsi_cmd_desc set_dual_col_page_addr_cmd[] = { /*packed*/
+ {{DTYPE_DCS_LWRITE, 0, 0, 0, 1, sizeof(caset_dual)}, caset_dual},
+ {{DTYPE_DCS_LWRITE, 1, 0, 0, 1, sizeof(paset_dual)}, paset_dual},
+};
+
+
+static void __mdss_dsi_send_col_page_addr(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct mdss_rect *roi, bool dual_roi)
+{
+ if (dual_roi) {
+ struct mdss_rect *first, *second;
+
+ first = &ctrl->panel_data.panel_info.dual_roi.first_roi;
+ second = &ctrl->panel_data.panel_info.dual_roi.second_roi;
+
+ caset_dual[1] = (((first->x) & 0xFF00) >> 8);
+ caset_dual[2] = (((first->x) & 0xFF));
+ caset_dual[3] = (((first->x - 1 + first->w) & 0xFF00) >> 8);
+ caset_dual[4] = (((first->x - 1 + first->w) & 0xFF));
+ /* skip the MPU setting byte*/
+ caset_dual[6] = (((second->x) & 0xFF00) >> 8);
+ caset_dual[7] = (((second->x) & 0xFF));
+ caset_dual[8] = (((second->x - 1 + second->w) & 0xFF00) >> 8);
+ caset_dual[9] = (((second->x - 1 + second->w) & 0xFF));
+ set_dual_col_page_addr_cmd[0].payload = caset_dual;
+
+ paset_dual[1] = (((first->y) & 0xFF00) >> 8);
+ paset_dual[2] = (((first->y) & 0xFF));
+ paset_dual[3] = (((first->y - 1 + first->h) & 0xFF00) >> 8);
+ paset_dual[4] = (((first->y - 1 + first->h) & 0xFF));
+ /* skip the MPU setting byte */
+ paset_dual[6] = (((second->y) & 0xFF00) >> 8);
+ paset_dual[7] = (((second->y) & 0xFF));
+ paset_dual[8] = (((second->y - 1 + second->h) & 0xFF00) >> 8);
+ paset_dual[9] = (((second->y - 1 + second->h) & 0xFF));
+ set_dual_col_page_addr_cmd[1].payload = paset_dual;
+ } else {
+ caset[1] = (((roi->x) & 0xFF00) >> 8);
+ caset[2] = (((roi->x) & 0xFF));
+ caset[3] = (((roi->x - 1 + roi->w) & 0xFF00) >> 8);
+ caset[4] = (((roi->x - 1 + roi->w) & 0xFF));
+ set_col_page_addr_cmd[0].payload = caset;
+
+ paset[1] = (((roi->y) & 0xFF00) >> 8);
+ paset[2] = (((roi->y) & 0xFF));
+ paset[3] = (((roi->y - 1 + roi->h) & 0xFF00) >> 8);
+ paset[4] = (((roi->y - 1 + roi->h) & 0xFF));
+ set_col_page_addr_cmd[1].payload = paset;
+ }
+ pr_debug("%s Sending 2A 2B cmnd with dual_roi=%d\n", __func__,
+ dual_roi);
+
+}
static void mdss_dsi_send_col_page_addr(struct mdss_dsi_ctrl_pdata *ctrl,
struct mdss_rect *roi, int unicast)
{
struct dcs_cmd_req cmdreq;
+ struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info;
+ bool dual_roi = pinfo->dual_roi.enabled;
- caset[1] = (((roi->x) & 0xFF00) >> 8);
- caset[2] = (((roi->x) & 0xFF));
- caset[3] = (((roi->x - 1 + roi->w) & 0xFF00) >> 8);
- caset[4] = (((roi->x - 1 + roi->w) & 0xFF));
- set_col_page_addr_cmd[0].payload = caset;
-
- paset[1] = (((roi->y) & 0xFF00) >> 8);
- paset[2] = (((roi->y) & 0xFF));
- paset[3] = (((roi->y - 1 + roi->h) & 0xFF00) >> 8);
- paset[4] = (((roi->y - 1 + roi->h) & 0xFF));
- set_col_page_addr_cmd[1].payload = paset;
+ __mdss_dsi_send_col_page_addr(ctrl, roi, dual_roi);
memset(&cmdreq, 0, sizeof(cmdreq));
cmdreq.cmds_cnt = 2;
@@ -478,7 +532,9 @@ static void mdss_dsi_send_col_page_addr(struct mdss_dsi_ctrl_pdata *ctrl,
cmdreq.rlen = 0;
cmdreq.cb = NULL;
- cmdreq.cmds = set_col_page_addr_cmd;
+ /* Send default or dual roi 2A/2B cmd */
+ cmdreq.cmds = dual_roi ? set_dual_col_page_addr_cmd :
+ set_col_page_addr_cmd;
mdss_dsi_cmdlist_put(ctrl, &cmdreq);
}
@@ -1837,20 +1893,28 @@ error:
pinfo->esd_check_enabled = false;
}
-static int mdss_dsi_parse_panel_features(struct device_node *np,
- struct mdss_dsi_ctrl_pdata *ctrl)
+static void mdss_dsi_parse_partial_update_caps(struct device_node *np,
+ struct mdss_dsi_ctrl_pdata *ctrl)
{
struct mdss_panel_info *pinfo;
-
- if (!np || !ctrl) {
- pr_err("%s: Invalid arguments\n", __func__);
- return -ENODEV;
- }
+ const char *data;
pinfo = &ctrl->panel_data.panel_info;
- pinfo->partial_update_supported = of_property_read_bool(np,
- "qcom,partial-update-enabled");
+ data = of_get_property(np, "qcom,partial-update-enabled", NULL);
+ if (data && !strcmp(data, "single_roi"))
+ pinfo->partial_update_supported =
+ PU_SINGLE_ROI;
+ else if (data && !strcmp(data, "dual_roi"))
+ pinfo->partial_update_supported =
+ PU_DUAL_ROI;
+ else if (data && !strcmp(data, "none"))
+ pinfo->partial_update_supported =
+ PU_NOT_SUPPORTED;
+ else
+ pinfo->partial_update_supported =
+ PU_NOT_SUPPORTED;
+
if (pinfo->mipi.mode == DSI_CMD_MODE) {
pinfo->partial_update_enabled = pinfo->partial_update_supported;
pr_info("%s: partial_update_enabled=%d\n", __func__,
@@ -1862,6 +1926,21 @@ static int mdss_dsi_parse_panel_features(struct device_node *np,
"qcom,partial-update-roi-merge");
}
}
+}
+
+static int mdss_dsi_parse_panel_features(struct device_node *np,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_panel_info *pinfo;
+
+ if (!np || !ctrl) {
+ pr_err("%s: Invalid arguments\n", __func__);
+ return -ENODEV;
+ }
+
+ pinfo = &ctrl->panel_data.panel_info;
+
+ mdss_dsi_parse_partial_update_caps(np, ctrl);
pinfo->dcs_cmd_by_left = of_property_read_bool(np,
"qcom,dcs-cmd-by-left");
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index bcd23d3c19f2..08e06c75522a 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -564,7 +564,7 @@ static ssize_t mdss_fb_get_panel_info(struct device *dev,
"min_fps=%d\nmax_fps=%d\npanel_name=%s\n"
"primary_panel=%d\nis_pluggable=%d\ndisplay_id=%s\n"
"is_cec_supported=%d\nis_pingpong_split=%d\n"
- "dfps_porch_mode=%d\n",
+ "dfps_porch_mode=%d\npu_roi_cnt=%d\ndual_dsi=%d",
pinfo->partial_update_enabled,
pinfo->roi_alignment.xstart_pix_align,
pinfo->roi_alignment.width_pix_align,
@@ -577,7 +577,8 @@ static ssize_t mdss_fb_get_panel_info(struct device *dev,
pinfo->panel_name, pinfo->is_prim_panel,
pinfo->is_pluggable, pinfo->display_id,
pinfo->is_cec_supported, is_pingpong_split(mfd),
- dfps_porch_mode);
+ dfps_porch_mode, pinfo->partial_update_enabled,
+ is_panel_split(mfd));
return ret;
}
@@ -3282,6 +3283,7 @@ int mdss_fb_atomic_commit(struct fb_info *info,
mfd->msm_fb_backup.atomic_commit = true;
mfd->msm_fb_backup.disp_commit.l_roi = commit_v1->left_roi;
mfd->msm_fb_backup.disp_commit.r_roi = commit_v1->right_roi;
+ mfd->msm_fb_backup.disp_commit.flags = commit_v1->flags;
mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 04e8fa4ba576..1dae41391795 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -47,6 +47,8 @@
#include <linux/msm-bus-board.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/rpm-smd.h>
+#include "soc/qcom/secure_buffer.h"
+#include <asm/cacheflush.h>
#include "mdss.h"
#include "mdss_fb.h"
@@ -64,6 +66,8 @@
#define RES_1080p (1088*1920)
#define RES_UHD (3840*2160)
+#define MDP_DEVICE_ID 0x1A
+
struct mdss_data_type *mdss_res;
static u32 mem_protect_sd_ctrl_id;
@@ -87,6 +91,7 @@ struct msm_mdp_interface mdp5 = {
#define MEM_PROTECT_SD_CTRL 0xF
#define MEM_PROTECT_SD_CTRL_FLAT 0x14
+#define MEM_PROTECT_SD_CTRL_SWITCH 0x18
static DEFINE_SPINLOCK(mdp_lock);
static DEFINE_SPINLOCK(mdss_mdp_intr_lock);
@@ -1329,7 +1334,9 @@ int mdss_iommu_ctrl(int enable)
if (mdata->iommu_ref_cnt == 0) {
rc = mdss_smmu_detach(mdata);
if (mdss_has_quirk(mdata,
- MDSS_QUIRK_MIN_BUS_VOTE))
+ MDSS_QUIRK_MIN_BUS_VOTE) &&
+ (!mdata->sec_disp_en ||
+ !mdata->sec_cam_en))
mdss_bus_scale_set_quota(MDSS_HW_RT,
0, 0);
}
@@ -1985,6 +1992,7 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
mdata->pixel_ram_size = 50 * 1024;
mdata->rects_per_sspp[MDSS_MDP_PIPE_TYPE_DMA] = 2;
+ mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL_SWITCH;
set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
set_bit(MDSS_QOS_REMAPPER, mdata->mdss_qos_map);
set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
@@ -2015,6 +2023,7 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
mdata->has_wb_ubwc = true;
set_bit(MDSS_CAPS_10_BIT_SUPPORTED, mdata->mdss_caps_map);
set_bit(MDSS_CAPS_AVR_SUPPORTED, mdata->mdss_caps_map);
+ set_bit(MDSS_CAPS_SEC_DETACH_SMMU, mdata->mdss_caps_map);
break;
default:
mdata->max_target_zorder = 4; /* excluding base layer */
@@ -4939,29 +4948,115 @@ static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on)
}
}
-int mdss_mdp_secure_display_ctrl(unsigned int enable)
+int mdss_mdp_secure_session_ctrl(unsigned int enable, u64 flags)
{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
struct sd_ctrl_req {
unsigned int enable;
} __attribute__ ((__packed__)) request;
unsigned int resp = -1;
int ret = 0;
+ uint32_t sid_info;
struct scm_desc desc;
- desc.args[0] = request.enable = enable;
- desc.arginfo = SCM_ARGS(1);
+ if (test_bit(MDSS_CAPS_SEC_DETACH_SMMU, mdata->mdss_caps_map)) {
+ /*
+ * Prepare syscall to hypervisor to switch the secure_vmid
+ * between secure and non-secure contexts
+ */
+ /* MDP secure SID */
+ sid_info = 0x1;
+ desc.arginfo = SCM_ARGS(4, SCM_VAL, SCM_RW, SCM_VAL, SCM_VAL);
+ desc.args[0] = MDP_DEVICE_ID;
+ desc.args[1] = SCM_BUFFER_PHYS(&sid_info);
+ desc.args[2] = sizeof(uint32_t);
+
- if (!is_scm_armv8()) {
- ret = scm_call(SCM_SVC_MP, MEM_PROTECT_SD_CTRL,
- &request, sizeof(request), &resp, sizeof(resp));
- } else {
- ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ pr_debug("Enable/Disable: %d, Flags %llx\n", enable, flags);
+ if (enable) {
+ if (flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION) {
+ desc.args[3] = VMID_CP_SEC_DISPLAY;
+ mdata->sec_disp_en = 1;
+ } else if (flags & MDP_SECURE_CAMERA_OVERLAY_SESSION) {
+ desc.args[3] = VMID_CP_CAMERA_PREVIEW;
+ mdata->sec_cam_en = 1;
+ } else {
+ return 0;
+ }
+
+ /* detach smmu contexts */
+ ret = mdss_smmu_detach(mdata);
+ if (ret) {
+ pr_err("Error while detaching smmu contexts ret = %d\n",
+ ret);
+ return -EINVAL;
+ }
+
+ /* let the driver think smmu is still attached */
+ mdata->iommu_attached = true;
+
+ dmac_flush_range(&sid_info, &sid_info + 1);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
mem_protect_sd_ctrl_id), &desc);
- resp = desc.ret[0];
- }
+ if (ret) {
+ pr_err("Error scm_call MEM_PROTECT_SD_CTRL(%u) ret=%dm resp=%x\n",
+ enable, ret, resp);
+ return -EINVAL;
+ }
+ resp = desc.ret[0];
- pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x",
+ pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x\n",
+ enable, ret, resp);
+ } else {
+ desc.args[3] = VMID_CP_PIXEL;
+ if (flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
+ mdata->sec_disp_en = 0;
+ else if (flags & MDP_SECURE_CAMERA_OVERLAY_SESSION)
+ mdata->sec_cam_en = 0;
+
+ dmac_flush_range(&sid_info, &sid_info + 1);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ mem_protect_sd_ctrl_id), &desc);
+ if (ret)
+ MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl",
+ "dsi0_phy", "dsi1_ctrl",
+ "dsi1_phy", "vbif", "vbif_nrt",
+ "dbg_bus", "vbif_dbg_bus",
+ "panic");
+ resp = desc.ret[0];
+
+ pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x\n",
+ enable, ret, resp);
+
+ /* re-attach smmu contexts */
+ mdata->iommu_attached = false;
+ ret = mdss_smmu_attach(mdata);
+ if (ret) {
+ pr_err("Error while attaching smmu contexts ret = %d\n",
+ ret);
+ return -EINVAL;
+ }
+ }
+ MDSS_XLOG(enable);
+ } else {
+ desc.args[0] = request.enable = enable;
+ desc.arginfo = SCM_ARGS(1);
+
+ if (!is_scm_armv8()) {
+ ret = scm_call(SCM_SVC_MP, MEM_PROTECT_SD_CTRL,
+ &request,
+ sizeof(request),
+ &resp,
+ sizeof(resp));
+ } else {
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ mem_protect_sd_ctrl_id), &desc);
+ resp = desc.ret[0];
+ }
+
+ pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x\n",
enable, ret, resp);
+ }
if (ret)
return ret;
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index 93f5f9a51a63..e1c3841c82de 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -92,6 +92,8 @@
*/
#define ENABLE_PIXEL_EXT_ONLY 0x80000000
+/* Pipe flag to indicate this pipe contains secure camera buffer */
+#define MDP_SECURE_CAMERA_OVERLAY_SESSION 0x100000000
/**
* Destination Scaler control flags setting
*
@@ -562,6 +564,8 @@ struct mdss_mdp_mixer {
bool valid_roi;
bool roi_changed;
struct mdss_rect roi;
+ bool dsc_enabled;
+ bool dsc_merge_enabled;
u8 cursor_enabled;
u16 cursor_hotx;
@@ -627,7 +631,7 @@ struct mdss_mdp_img_data {
dma_addr_t addr;
unsigned long len;
u32 offset;
- u32 flags;
+ u64 flags;
u32 dir;
u32 domain;
bool mapped;
@@ -811,7 +815,7 @@ struct mdss_mdp_pipe {
struct file *file;
bool is_handed_off;
- u32 flags;
+ u64 flags;
u32 bwc_mode;
/* valid only when pipe's output is crossing both layer mixers */
@@ -827,6 +831,9 @@ struct mdss_mdp_pipe {
struct mdss_mdp_format_params *src_fmt;
struct mdss_mdp_plane_sizes src_planes;
+ /* flag to re-store roi in case of pu dual-roi validation error */
+ bool restore_roi;
+
/* compression ratio from the source format */
struct mult_factor comp_ratio;
@@ -916,6 +923,7 @@ struct mdss_overlay_private {
u32 splash_mem_addr;
u32 splash_mem_size;
u32 sd_enabled;
+ u32 sc_enabled;
struct sw_sync_timeline *vsync_timeline;
struct mdss_mdp_vsync_handler vsync_retire_handler;
@@ -1289,6 +1297,15 @@ static inline void mdss_update_sd_client(struct mdss_data_type *mdata,
atomic_add_unless(&mdss_res->sd_client_count, -1, 0);
}
+static inline void mdss_update_sc_client(struct mdss_data_type *mdata,
+ bool status)
+{
+ if (status)
+ atomic_inc(&mdata->sc_client_count);
+ else
+ atomic_add_unless(&mdss_res->sc_client_count, -1, 0);
+}
+
static inline int mdss_mdp_get_wb_ctl_support(struct mdss_data_type *mdata,
bool rotator_session)
{
@@ -1506,6 +1523,7 @@ static inline bool mdss_mdp_is_map_needed(struct mdss_data_type *mdata,
struct mdss_mdp_img_data *data)
{
u32 is_secure_ui = data->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION;
+ u64 is_secure_camera = data->flags & MDP_SECURE_CAMERA_OVERLAY_SESSION;
/*
* For ULT Targets we need SMMU Map, to issue map call for secure Display.
@@ -1513,6 +1531,10 @@ static inline bool mdss_mdp_is_map_needed(struct mdss_data_type *mdata,
if (is_secure_ui && !mdss_has_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP))
return false;
+ if (is_secure_camera && test_bit(MDSS_CAPS_SEC_DETACH_SMMU,
+ mdata->mdss_caps_map))
+ return false;
+
return true;
}
@@ -1569,7 +1591,7 @@ unsigned long mdss_mdp_get_clk_rate(u32 clk_idx, bool locked);
int mdss_mdp_vsync_clk_enable(int enable, bool locked);
void mdss_mdp_clk_ctrl(int enable);
struct mdss_data_type *mdss_mdp_get_mdata(void);
-int mdss_mdp_secure_display_ctrl(unsigned int enable);
+int mdss_mdp_secure_session_ctrl(unsigned int enable, u64 flags);
int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd);
int mdss_mdp_dfps_update_params(struct msm_fb_data_type *mfd,
@@ -1603,7 +1625,7 @@ int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd);
void mdss_mdp_overlay_set_chroma_sample(
struct mdss_mdp_pipe *pipe);
int mdp_pipe_tune_perf(struct mdss_mdp_pipe *pipe,
- u32 flags);
+ u64 flags);
int mdss_mdp_overlay_setup_scaling(struct mdss_mdp_pipe *pipe);
struct mdss_mdp_pipe *mdss_mdp_pipe_assign(struct mdss_data_type *mdata,
struct mdss_mdp_mixer *mixer, u32 ndx,
@@ -1836,7 +1858,7 @@ struct mult_factor *mdss_mdp_get_comp_factor(u32 format,
int mdss_mdp_data_map(struct mdss_mdp_data *data, bool rotator, int dir);
void mdss_mdp_data_free(struct mdss_mdp_data *data, bool rotator, int dir);
int mdss_mdp_data_get_and_validate_size(struct mdss_mdp_data *data,
- struct msmfb_data *planes, int num_planes, u32 flags,
+ struct msmfb_data *planes, int num_planes, u64 flags,
struct device *dev, bool rotator, int dir,
struct mdp_layer_buffer *buffer);
u32 mdss_get_panel_framerate(struct msm_fb_data_type *mfd);
@@ -1847,7 +1869,7 @@ void mdss_mdp_intersect_rect(struct mdss_rect *res_rect,
const struct mdss_rect *sci_rect);
void mdss_mdp_crop_rect(struct mdss_rect *src_rect,
struct mdss_rect *dst_rect,
- const struct mdss_rect *sci_rect);
+ const struct mdss_rect *sci_rect, bool normalize);
void rect_copy_mdss_to_mdp(struct mdp_rect *user, struct mdss_rect *kernel);
void rect_copy_mdp_to_mdss(struct mdp_rect *user, struct mdss_rect *kernel);
bool mdss_rect_overlap_check(struct mdss_rect *rect1, struct mdss_rect *rect2);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index 1d61653b76bb..9ed44937efe6 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -728,7 +728,7 @@ int mdss_mdp_get_pipe_overlap_bw(struct mdss_mdp_pipe *pipe,
/* crop rectangles */
if (roi && !mixer->ctl->is_video_mode && !pipe->src_split_req)
- mdss_mdp_crop_rect(&src, &dst, roi);
+ mdss_mdp_crop_rect(&src, &dst, roi, true);
/*
* when doing vertical decimation lines will be skipped, hence there is
@@ -1108,7 +1108,7 @@ int mdss_mdp_perf_calc_pipe(struct mdss_mdp_pipe *pipe,
/* crop rectangles */
if (roi && !mixer->ctl->is_video_mode && !pipe->src_split_req)
- mdss_mdp_crop_rect(&src, &dst, roi);
+ mdss_mdp_crop_rect(&src, &dst, roi, true);
pr_debug("v_total=%d, xres=%d fps=%d\n", v_total, xres, fps);
pr_debug("src(w,h)(%d,%d) dst(w,h)(%d,%d) dst_y=%d bpp=%d yuv=%d\n",
@@ -2743,6 +2743,7 @@ static inline void __dsc_enable(struct mdss_mdp_mixer *mixer)
{
mdss_mdp_pingpong_write(mixer->pingpong_base,
MDSS_MDP_REG_PP_DSC_MODE, 1);
+ mixer->dsc_enabled = true;
}
static inline void __dsc_disable(struct mdss_mdp_mixer *mixer)
@@ -2762,6 +2763,13 @@ static inline void __dsc_disable(struct mdss_mdp_mixer *mixer)
return;
}
writel_relaxed(0, offset + MDSS_MDP_REG_DSC_COMMON_MODE);
+ mixer->dsc_enabled = false;
+ mixer->dsc_merge_enabled = false;
+}
+
+static bool __is_dsc_merge_enabled(u32 common_mode)
+{
+ return common_mode & BIT(1);
}
static void __dsc_config(struct mdss_mdp_mixer *mixer,
@@ -2774,6 +2782,7 @@ static void __dsc_config(struct mdss_mdp_mixer *mixer,
u32 initial_lines = dsc->initial_lines;
bool is_cmd_mode = !(mode & BIT(2));
+ mixer->dsc_merge_enabled = __is_dsc_merge_enabled(mode);
data = mdss_mdp_pingpong_read(mixer->pingpong_base,
MDSS_MDP_REG_PP_DCE_DATA_OUT_SWAP);
data |= BIT(18); /* endian flip */
@@ -2928,11 +2937,6 @@ static void __dsc_config_thresh(struct mdss_mdp_mixer *mixer,
}
}
-static bool __is_dsc_merge_enabled(u32 common_mode)
-{
- return common_mode & BIT(1);
-}
-
static bool __dsc_is_3d_mux_enabled(struct mdss_mdp_ctl *ctl,
struct mdss_panel_info *pinfo)
{
@@ -3291,8 +3295,19 @@ void mdss_mdp_ctl_dsc_setup(struct mdss_mdp_ctl *ctl,
struct mdss_mdp_ctl *sctl;
struct mdss_panel_info *spinfo;
- if (!is_dsc_compression(pinfo))
+ /*
+ * Check for dynamic resolution switch from DSC On to DSC Off
+ * and disable DSC
+ */
+ if ((ctl->pending_mode_switch == SWITCH_RESOLUTION) &&
+ ctl->is_master &&
+ (!is_dsc_compression(pinfo))) {
+ if (ctl->mixer_left && ctl->mixer_left->dsc_enabled)
+ __dsc_disable(ctl->mixer_left);
+ if (ctl->mixer_right && ctl->mixer_right->dsc_enabled)
+ __dsc_disable(ctl->mixer_right);
return;
+ }
if (!ctl->is_master) {
pr_debug("skip slave ctl because master will program for both\n");
@@ -3703,6 +3718,30 @@ skip_intf_reconfig:
ctl->mixer_right->width = ctl->width / 2;
ctl->mixer_right->height = ctl->height;
}
+
+ /*
+ * If we are transitioning from DSC On + DSC Merge to DSC Off
+ * the 3D mux needs to be enabled
+ */
+ if (!is_dsc_compression(&pdata->panel_info) &&
+ ctl->mixer_left &&
+ ctl->mixer_left->dsc_enabled &&
+ ctl->mixer_left->dsc_merge_enabled) {
+ ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+ MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
+ }
+
+ /*
+ * If we are transitioning from DSC Off to DSC On + DSC Merge
+ * the 3D mux needs to be disabled
+ */
+ if (is_dsc_compression(&pdata->panel_info) &&
+ ctl->mixer_left &&
+ !ctl->mixer_left->dsc_enabled &&
+ pdata->panel_info.dsc_enc_total != 1) {
+ ctl->opmode &= ~(MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+ MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT);
+ }
} else {
/*
* Handles MDP_SPLIT_MODE_NONE, MDP_DUAL_LM_DUAL_DISPLAY and
@@ -3717,7 +3756,6 @@ skip_intf_reconfig:
ctl->border_x_off = pdata->panel_info.lcdc.border_left;
ctl->border_y_off = pdata->panel_info.lcdc.border_top;
-
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_debug.c b/drivers/video/fbdev/msm/mdss_mdp_debug.c
index 4c4fa9ea98d0..711d2d222c7d 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_debug.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_debug.c
@@ -1868,7 +1868,7 @@ static void __dump_pipe(struct seq_file *s, struct mdss_mdp_pipe *pipe)
int smps[4];
int i;
- seq_printf(s, "\nSSPP #%d type=%s ndx=%x flags=0x%08x play_cnt=%u xin_id=%d\n",
+ seq_printf(s, "\nSSPP #%d type=%s ndx=%x flags=0x%16llx play_cnt=%u xin_id=%d\n",
pipe->num, mdss_mdp_pipetype2str(pipe->type),
pipe->ndx, pipe->flags, pipe->play_cnt, pipe->xin_id);
seq_printf(s, "\tstage=%d alpha=0x%x transp=0x%x blend_op=%d\n",
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index 4eb121f01aca..f08af5d6edd3 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -2029,8 +2029,22 @@ static void mdss_mdp_cmd_dsc_reconfig(struct mdss_mdp_ctl *ctl)
return;
pinfo = &ctl->panel_data->panel_info;
- if (pinfo->compression_mode != COMPRESSION_DSC)
- return;
+ if (pinfo->compression_mode != COMPRESSION_DSC) {
+ /*
+ * Check for a dynamic resolution switch from DSC On to
+ * DSC Off and call mdss_mdp_ctl_dsc_setup to disable DSC
+ */
+ if (ctl->pending_mode_switch == SWITCH_RESOLUTION) {
+ if (ctl->mixer_left && ctl->mixer_left->dsc_enabled)
+ changed = true;
+ if (is_split_lm(ctl->mfd) &&
+ ctl->mixer_right &&
+ ctl->mixer_right->dsc_enabled)
+ changed = true;
+ } else {
+ return;
+ }
+ }
changed = ctl->mixer_left->roi_changed;
if (is_split_lm(ctl->mfd))
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index 353d07ad64ac..036e4e39efda 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -522,6 +522,56 @@ static void __update_avr_info(struct mdss_mdp_ctl *ctl,
}
/*
+ * __validate_dual_partial_update() - validation function for
+ * dual partial update ROIs
+ *
+ * - This function uses the commit structs "left_roi" and "right_roi"
+ * to pass the first and second ROI information for the multiple
+ * partial update feature.
+ * - Supports only SINGLE DSI with a max of 2 PU ROIs.
+ * - Not supported along with destination scalar.
+ * - Not supported when source-split is disabled.
+ * - Not supported with ping-pong split enabled.
+ */
+static int __validate_dual_partial_update(
+ struct mdss_mdp_ctl *ctl, struct mdp_layer_commit_v1 *commit)
+{
+ struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+ struct mdss_data_type *mdata = ctl->mdata;
+ struct mdss_rect first_roi, second_roi;
+ int ret = 0;
+ struct mdp_destination_scaler_data *ds_data = commit->dest_scaler;
+
+ if (!mdata->has_src_split
+ || (is_panel_split(ctl->mfd))
+ || (is_pingpong_split(ctl->mfd))
+ || (ds_data && commit->dest_scaler_cnt &&
+ ds_data->flags & MDP_DESTSCALER_ENABLE)) {
+ pr_err("Invalid mode multi pu src_split:%d, split_mode:%d, ds_cnt:%d\n",
+ mdata->has_src_split, ctl->mfd->split_mode,
+ commit->dest_scaler_cnt);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ rect_copy_mdp_to_mdss(&commit->left_roi, &first_roi);
+ rect_copy_mdp_to_mdss(&commit->right_roi, &second_roi);
+
+ if (!is_valid_pu_dual_roi(pinfo, &first_roi, &second_roi))
+ ret = -EINVAL;
+
+ MDSS_XLOG(ctl->num, first_roi.x, first_roi.y, first_roi.w, first_roi.h,
+ second_roi.x, second_roi.y, second_roi.w, second_roi.h,
+ ret);
+ pr_debug("Multiple PU ROIs - roi0:{%d,%d,%d,%d}, roi1{%d,%d,%d,%d}, ret:%d\n",
+ first_roi.x, first_roi.y, first_roi.w, first_roi.h,
+ second_roi.x, second_roi.y, second_roi.w,
+ second_roi.h, ret);
+end:
+ return ret;
+}
+
+/*
* __layer_needs_src_split() - check needs source split configuration
* @layer: input layer
*
@@ -936,7 +986,7 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
{
int ret = 0;
u32 left_lm_w = left_lm_w_from_mfd(mfd);
- u32 flags;
+ u64 flags;
struct mdss_mdp_mixer *mixer = NULL;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
@@ -978,6 +1028,8 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
pipe->flags |= MDP_BWC_EN;
if (layer->flags & MDP_LAYER_PP)
pipe->flags |= MDP_OVERLAY_PP_CFG_EN;
+ if (layer->flags & MDP_LAYER_SECURE_CAMERA_SESSION)
+ pipe->flags |= MDP_SECURE_CAMERA_OVERLAY_SESSION;
pipe->scaler.enable = (layer->flags & SCALER_ENABLED);
pipe->is_fg = layer->flags & MDP_LAYER_FORGROUND;
@@ -1000,6 +1052,7 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
pipe->is_handed_off = false;
pipe->async_update = (layer->flags & MDP_LAYER_ASYNC) ? true : false;
pipe->csc_coeff_set = layer->color_space;
+ pipe->restore_roi = false;
if (mixer->ctl) {
pipe->dst.x += mixer->ctl->border_x_off;
@@ -1007,7 +1060,7 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
pr_debug("border{%d,%d}\n", mixer->ctl->border_x_off,
mixer->ctl->border_y_off);
}
- pr_debug("src{%d,%d,%d,%d}, dst{%d,%d,%d,%d}\n",
+ pr_debug("pipe:%d src{%d,%d,%d,%d}, dst{%d,%d,%d,%d}\n", pipe->num,
pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
@@ -1348,7 +1401,7 @@ static struct mdss_mdp_data *__map_layer_buffer(struct msm_fb_data_type *mfd,
struct mdp_layer_buffer *buffer;
struct msmfb_data image;
int i, ret;
- u32 flags;
+ u64 flags;
struct mdss_mdp_validate_info_t *vitem;
for (i = 0; i < layer_count; i++) {
@@ -1374,7 +1427,8 @@ static struct mdss_mdp_data *__map_layer_buffer(struct msm_fb_data_type *mfd,
}
flags = (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
- MDP_SECURE_DISPLAY_OVERLAY_SESSION));
+ MDP_SECURE_DISPLAY_OVERLAY_SESSION |
+ MDP_SECURE_CAMERA_OVERLAY_SESSION));
if (buffer->planes[0].fd < 0) {
pr_err("invalid file descriptor for layer buffer\n");
@@ -1585,34 +1639,48 @@ end:
}
/*
- * __validate_secure_display() - validate secure display
+ * __validate_secure_session() - validate various secure sessions
*
* This function travers through used pipe list and checks if any pipe
- * is with secure display enabled flag. It fails if client tries to stage
- * unsecure content with secure display session.
+ * is with secure display, secure video and secure camera enabled flag.
+ * It fails if client tries to stage unsecure content with
+ * secure display session and secure camera with secure video sessions.
*
*/
-static int __validate_secure_display(struct mdss_overlay_private *mdp5_data)
+static int __validate_secure_session(struct mdss_overlay_private *mdp5_data)
{
struct mdss_mdp_pipe *pipe, *tmp;
uint32_t sd_pipes = 0, nonsd_pipes = 0;
+ uint32_t secure_vid_pipes = 0, secure_cam_pipes = 0;
mutex_lock(&mdp5_data->list_lock);
list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
sd_pipes++;
+ else if (pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+ secure_vid_pipes++;
+ else if (pipe->flags & MDP_SECURE_CAMERA_OVERLAY_SESSION)
+ secure_cam_pipes++;
else
nonsd_pipes++;
}
mutex_unlock(&mdp5_data->list_lock);
- pr_debug("pipe count:: secure display:%d non-secure:%d\n",
- sd_pipes, nonsd_pipes);
+ pr_debug("pipe count:: secure display:%d non-secure:%d secure-vid:%d,secure-cam:%d\n",
+ sd_pipes, nonsd_pipes, secure_vid_pipes, secure_cam_pipes);
- if ((sd_pipes || mdss_get_sd_client_cnt()) && nonsd_pipes) {
+ if ((sd_pipes || mdss_get_sd_client_cnt()) &&
+ (nonsd_pipes || secure_vid_pipes ||
+ secure_cam_pipes)) {
pr_err("non-secure layer validation request during secure display session\n");
- pr_err(" secure client cnt:%d secure pipe cnt:%d non-secure pipe cnt:%d\n",
- mdss_get_sd_client_cnt(), sd_pipes, nonsd_pipes);
+ pr_err(" secure client cnt:%d secure pipe:%d non-secure pipe:%d, secure-vid:%d, secure-cam:%d\n",
+ mdss_get_sd_client_cnt(), sd_pipes, nonsd_pipes,
+ secure_vid_pipes, secure_cam_pipes);
+ return -EINVAL;
+ } else if (secure_cam_pipes && (secure_vid_pipes || sd_pipes)) {
+ pr_err(" incompatible layers during secure camera session\n");
+ pr_err("secure-camera cnt:%d secure video:%d secure display:%d\n",
+ secure_cam_pipes, secure_vid_pipes, sd_pipes);
return -EINVAL;
} else {
return 0;
@@ -2388,7 +2456,7 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
validate_skip:
__handle_free_list(mdp5_data, validate_info_list, layer_count);
- ret = __validate_secure_display(mdp5_data);
+ ret = __validate_secure_session(mdp5_data);
validate_exit:
pr_debug("err=%d total_layer:%d left:%d right:%d rec0_rel_ndx=0x%x rec1_rel_ndx=0x%x rec0_destroy_ndx=0x%x rec1_destroy_ndx=0x%x processed=%d\n",
@@ -2402,16 +2470,20 @@ validate_exit:
mutex_lock(&mdp5_data->list_lock);
list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
if (IS_ERR_VALUE(ret)) {
- if ((pipe->ndx & rec_release_ndx[0]) ||
- (pipe->ndx & rec_release_ndx[1])) {
+ if (((pipe->ndx & rec_release_ndx[0]) &&
+ (pipe->multirect.num == 0)) ||
+ ((pipe->ndx & rec_release_ndx[1]) &&
+ (pipe->multirect.num == 1))) {
mdss_mdp_smp_unreserve(pipe);
pipe->params_changed = 0;
pipe->dirty = true;
if (!list_empty(&pipe->list))
list_del_init(&pipe->list);
mdss_mdp_pipe_destroy(pipe);
- } else if ((pipe->ndx & rec_destroy_ndx[0]) ||
- (pipe->ndx & rec_destroy_ndx[1])) {
+ } else if (((pipe->ndx & rec_destroy_ndx[0]) &&
+ (pipe->multirect.num == 0)) ||
+ ((pipe->ndx & rec_destroy_ndx[1]) &&
+ (pipe->multirect.num == 1))) {
/*
* cleanup/destroy list pipes should move back
* to destroy list. Next/current kickoff cycle
@@ -2625,6 +2697,7 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
{
struct mdss_overlay_private *mdp5_data;
struct mdp_destination_scaler_data *ds_data;
+ struct mdss_panel_info *pinfo;
int rc = 0;
if (!mfd || !commit) {
@@ -2658,6 +2731,23 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
}
}
+ pinfo = mfd->panel_info;
+ if (pinfo->partial_update_enabled == PU_DUAL_ROI) {
+ if (commit->flags & MDP_COMMIT_PARTIAL_UPDATE_DUAL_ROI) {
+ rc = __validate_dual_partial_update(mdp5_data->ctl,
+ commit);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("Multiple pu pre-validate fail\n");
+ return rc;
+ }
+ }
+ } else {
+ if (commit->flags & MDP_COMMIT_PARTIAL_UPDATE_DUAL_ROI) {
+ pr_err("Multiple partial update not supported!\n");
+ return -EINVAL;
+ }
+ }
+
ds_data = commit->dest_scaler;
if (ds_data && commit->dest_scaler_cnt &&
(ds_data->flags & MDP_DESTSCALER_ENABLE)) {
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 9bdc66232dd5..946e33033ab7 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -396,7 +396,7 @@ int mdss_mdp_overlay_req_check(struct msm_fb_data_type *mfd,
}
int mdp_pipe_tune_perf(struct mdss_mdp_pipe *pipe,
- u32 flags)
+ u64 flags)
{
struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
struct mdss_mdp_perf_params perf;
@@ -1188,11 +1188,10 @@ static void __overlay_pipe_cleanup(struct msm_fb_data_type *mfd,
list_move(&buf->buf_list, &mdp5_data->bufs_freelist);
/*
- * in case of secure UI, the buffer needs to be released as
- * soon as session is closed.
+ * free the buffers on the same cycle instead of waiting for
+ * next kickoff
*/
- if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
- mdss_mdp_overlay_buf_free(mfd, buf);
+ mdss_mdp_overlay_buf_free(mfd, buf);
}
mdss_mdp_pipe_destroy(pipe);
@@ -1477,7 +1476,7 @@ static int __overlay_queue_pipes(struct msm_fb_data_type *mfd)
*/
if (mdss_get_sd_client_cnt() &&
!(pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)) {
- pr_warn("Non secure pipe during secure display: %u: %08X, skip\n",
+ pr_warn("Non secure pipe during secure display: %u: %16llx, skip\n",
pipe->num, pipe->flags);
continue;
}
@@ -1861,12 +1860,109 @@ int mdss_mode_switch_post(struct msm_fb_data_type *mfd, u32 mode)
return rc;
}
+static void __restore_pipe(struct mdss_mdp_pipe *pipe)
+{
+
+ if (!pipe->restore_roi)
+ return;
+
+ pr_debug("restoring pipe:%d dst from:{%d,%d,%d,%d} to:{%d,%d,%d,%d}\n",
+ pipe->num, pipe->dst.x, pipe->dst.y,
+ pipe->dst.w, pipe->dst.h, pipe->layer.dst_rect.x,
+ pipe->layer.dst_rect.y, pipe->layer.dst_rect.w,
+ pipe->layer.dst_rect.h);
+ pr_debug("restoring pipe:%d src from:{%d,%d,%d,%d} to:{%d,%d,%d,%d}\n",
+ pipe->num, pipe->src.x, pipe->src.y,
+ pipe->src.w, pipe->src.h, pipe->layer.src_rect.x,
+ pipe->layer.src_rect.y, pipe->layer.src_rect.w,
+ pipe->layer.src_rect.h);
+
+ pipe->src.x = pipe->layer.src_rect.x;
+ pipe->src.y = pipe->layer.src_rect.y;
+ pipe->src.w = pipe->layer.src_rect.w;
+ pipe->src.h = pipe->layer.src_rect.h;
+
+ pipe->dst.x = pipe->layer.dst_rect.x;
+ pipe->dst.y = pipe->layer.dst_rect.y;
+ pipe->dst.w = pipe->layer.dst_rect.w;
+ pipe->dst.h = pipe->layer.dst_rect.h;
+}
+
+ /**
+ * __crop_adjust_pipe_rect() - Adjust pipe roi for dual partial
+ * update feature.
+ * @pipe: pipe to check against.
+ * @dual_roi: roi's for the dual partial roi.
+ *
+ * For dual PU ROI case, the layer mixer is configured
+ * by merging the two width aligned ROIs (first_roi and
+ * second_roi) vertically. So, the y-offset of all the
+ * pipes belonging to the second_roi needs to adjusted
+ * accordingly. Also the cropping of the pipe's src/dst
+ * rect has to be done with respect to the ROI the pipe
+ * is intersecting with, before the adjustment.
+ */
+static int __crop_adjust_pipe_rect(struct mdss_mdp_pipe *pipe,
+ struct mdss_dsi_dual_pu_roi *dual_roi)
+{
+ u32 adjust_h;
+ u32 roi_y_pos;
+ int ret = 0;
+
+ pipe->restore_roi = false;
+ if (mdss_rect_overlap_check(&pipe->dst, &dual_roi->first_roi)) {
+ mdss_mdp_crop_rect(&pipe->src, &pipe->dst,
+ &dual_roi->first_roi, false);
+ pipe->restore_roi = true;
+
+ } else if (mdss_rect_overlap_check(&pipe->dst, &dual_roi->second_roi)) {
+ mdss_mdp_crop_rect(&pipe->src, &pipe->dst,
+ &dual_roi->second_roi, false);
+ adjust_h = dual_roi->second_roi.y;
+ roi_y_pos = dual_roi->first_roi.y + dual_roi->first_roi.h;
+
+ if (adjust_h > roi_y_pos) {
+ adjust_h = adjust_h - roi_y_pos;
+ pipe->dst.y -= adjust_h;
+ } else {
+ pr_err("wrong y-pos adjust_y:%d roi_y_pos:%d\n",
+ adjust_h, roi_y_pos);
+ ret = -EINVAL;
+ }
+ pipe->restore_roi = true;
+
+ } else {
+ ret = -EINVAL;
+ }
+
+ pr_debug("crop/adjusted p:%d src:{%d,%d,%d,%d} dst:{%d,%d,%d,%d} r:%d\n",
+ pipe->num, pipe->src.x, pipe->src.y,
+ pipe->src.w, pipe->src.h, pipe->dst.x,
+ pipe->dst.y, pipe->dst.w, pipe->dst.h,
+ pipe->restore_roi);
+
+ if (ret) {
+ pr_err("dual roi error p%d dst{%d,%d,%d,%d}",
+ pipe->num, pipe->dst.x, pipe->dst.y, pipe->dst.w,
+ pipe->dst.h);
+ pr_err(" roi1{%d,%d,%d,%d} roi2{%d,%d,%d,%d}\n",
+ dual_roi->first_roi.x, dual_roi->first_roi.y,
+ dual_roi->first_roi.w, dual_roi->first_roi.h,
+ dual_roi->second_roi.x, dual_roi->second_roi.y,
+ dual_roi->second_roi.w, dual_roi->second_roi.h);
+ }
+
+ return ret;
+}
+
static void __validate_and_set_roi(struct msm_fb_data_type *mfd,
struct mdp_display_commit *commit)
{
struct mdss_mdp_pipe *pipe;
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+ struct mdss_dsi_dual_pu_roi *dual_roi = &pinfo->dual_roi;
struct mdss_rect l_roi = {0}, r_roi = {0};
struct mdp_rect tmp_roi = {0};
bool skip_partial_update = true;
@@ -1881,6 +1977,39 @@ static void __validate_and_set_roi(struct msm_fb_data_type *mfd,
rect_copy_mdp_to_mdss(&commit->l_roi, &l_roi);
rect_copy_mdp_to_mdss(&commit->r_roi, &r_roi);
+ /*
+ * In case of dual partial update ROI, update the two ROIs to dual_roi
+ * struct and combine both the ROIs and assign it as a merged ROI in
+ * l_roi, as MDP would need only the merged ROI information for all
+ * LM settings.
+ */
+ if (pinfo->partial_update_enabled == PU_DUAL_ROI) {
+ if (commit->flags & MDP_COMMIT_PARTIAL_UPDATE_DUAL_ROI) {
+
+ if (!is_valid_pu_dual_roi(pinfo, &l_roi, &r_roi)) {
+ pr_err("Invalid dual roi - fall back to full screen update\n");
+ goto set_roi;
+ }
+
+ dual_roi->first_roi = (struct mdss_rect)
+ {l_roi.x, l_roi.y, l_roi.w, l_roi.h};
+ dual_roi->second_roi = (struct mdss_rect)
+ {r_roi.x, r_roi.y, r_roi.w, r_roi.h};
+ dual_roi->enabled = true;
+
+ l_roi.h += r_roi.h;
+ memset(&r_roi, 0, sizeof(struct mdss_rect));
+
+ pr_debug("Dual ROI - first_roi:{%d,%d,%d,%d}, second_roi:{%d,%d,%d,%d}\n",
+ dual_roi->first_roi.x, dual_roi->first_roi.y,
+ dual_roi->first_roi.w, dual_roi->first_roi.h,
+ dual_roi->second_roi.x, dual_roi->second_roi.y,
+ dual_roi->second_roi.w, dual_roi->second_roi.h);
+ } else {
+ dual_roi->enabled = false;
+ }
+ }
+
pr_debug("input: l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
l_roi.x, l_roi.y, l_roi.w, l_roi.h,
r_roi.x, r_roi.y, r_roi.w, r_roi.h);
@@ -1926,12 +2055,24 @@ static void __validate_and_set_roi(struct msm_fb_data_type *mfd,
}
list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
+ pr_debug("pipe:%d src:{%d,%d,%d,%d} dst:{%d,%d,%d,%d}\n",
+ pipe->num, pipe->src.x, pipe->src.y,
+ pipe->src.w, pipe->src.h, pipe->dst.x,
+ pipe->dst.y, pipe->dst.w, pipe->dst.h);
+
+ if (dual_roi->enabled) {
+ if (__crop_adjust_pipe_rect(pipe, dual_roi)) {
+ skip_partial_update = true;
+ break;
+ }
+ }
+
if (!__is_roi_valid(pipe, &l_roi, &r_roi)) {
skip_partial_update = true;
- pr_err("error. invalid pu config for pipe%d: %d,%d,%d,%d\n",
- pipe->num,
- pipe->dst.x, pipe->dst.y,
- pipe->dst.w, pipe->dst.h);
+ pr_err("error. invalid pu config for pipe%d: %d,%d,%d,%d, dual_pu_roi:%d\n",
+ pipe->num, pipe->dst.x, pipe->dst.y,
+ pipe->dst.w, pipe->dst.h,
+ dual_roi->enabled);
break;
}
}
@@ -1946,17 +2087,143 @@ set_roi:
ctl->mixer_right->width,
ctl->mixer_right->height};
}
+
+ if (pinfo->partial_update_enabled == PU_DUAL_ROI) {
+ if (dual_roi->enabled) {
+ /* we failed pu validation, restore pipes */
+ list_for_each_entry(pipe,
+ &mdp5_data->pipes_used, list)
+ __restore_pipe(pipe);
+ }
+ dual_roi->enabled = false;
+ }
}
- pr_debug("after processing: %s l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
+ pr_debug("after processing: %s l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d, dual_pu_roi:%d\n",
(l_roi.w && l_roi.h && r_roi.w && r_roi.h) ? "left+right" :
((l_roi.w && l_roi.h) ? "left-only" : "right-only"),
l_roi.x, l_roi.y, l_roi.w, l_roi.h,
- r_roi.x, r_roi.y, r_roi.w, r_roi.h);
+ r_roi.x, r_roi.y, r_roi.w, r_roi.h,
+ dual_roi->enabled);
mdss_mdp_set_roi(ctl, &l_roi, &r_roi);
}
+/*
+ * Enables/disable secure (display or camera) sessions
+ */
+static int __overlay_secure_ctrl(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ struct mdss_mdp_pipe *pipe;
+ int ret = 0;
+ int sd_in_pipe = 0;
+ int sc_in_pipe = 0;
+ u64 pipes_flags = 0;
+
+ list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
+ pipes_flags |= pipe->flags;
+ if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION) {
+ sd_in_pipe = 1;
+ pr_debug("Secure pipe: %u : %16llx\n",
+ pipe->num, pipe->flags);
+ } else if (pipe->flags & MDP_SECURE_CAMERA_OVERLAY_SESSION) {
+ sc_in_pipe = 1;
+ pr_debug("Secure camera: %u: %16llx\n",
+ pipe->num, pipe->flags);
+ }
+ }
+
+ MDSS_XLOG(sd_in_pipe, sc_in_pipe, pipes_flags,
+ mdp5_data->sc_enabled, mdp5_data->sd_enabled);
+ pr_debug("sd:%d sd_in_pipe:%d sc:%d sc_in_pipe:%d flags:0x%llx\n",
+ mdp5_data->sd_enabled, sd_in_pipe,
+ mdp5_data->sc_enabled, sc_in_pipe, pipes_flags);
+
+ /*
+ * Return early in only two conditions:
+ * 1. All the features are already disabled and state remains
+ * disabled for the pipes.
+ * 2. One of the features is already enabled and state remains
+ * enabled for the pipes.
+ */
+ if (!sd_in_pipe && !mdp5_data->sd_enabled &&
+ !sc_in_pipe && !mdp5_data->sc_enabled)
+ return ret;
+ else if ((sd_in_pipe && mdp5_data->sd_enabled) ||
+ (sc_in_pipe && mdp5_data->sc_enabled))
+ return ret;
+
+ /* Secure Display */
+ if (!mdp5_data->sd_enabled && sd_in_pipe) {
+ if (!mdss_get_sd_client_cnt()) {
+ MDSS_XLOG(0x11);
+ /*wait for ping pong done */
+ if (ctl->ops.wait_pingpong)
+ mdss_mdp_display_wait4pingpong(ctl, true);
+ ret = mdss_mdp_secure_session_ctrl(1,
+ MDP_SECURE_DISPLAY_OVERLAY_SESSION);
+ if (ret) {
+ pr_err("secure display enable fail:%d", ret);
+ return ret;
+ }
+ }
+ mdp5_data->sd_enabled = 1;
+ mdss_update_sd_client(mdp5_data->mdata, true);
+ } else if (mdp5_data->sd_enabled && !sd_in_pipe) {
+ /* disable the secure display on last client */
+ if (mdss_get_sd_client_cnt() == 1) {
+ MDSS_XLOG(0x22);
+ if (ctl->ops.wait_pingpong)
+ mdss_mdp_display_wait4pingpong(ctl, true);
+ ret = mdss_mdp_secure_session_ctrl(0,
+ MDP_SECURE_DISPLAY_OVERLAY_SESSION);
+ if (ret) {
+ pr_err("secure display disable fail:%d\n", ret);
+ return ret;
+ }
+ }
+ mdss_update_sd_client(mdp5_data->mdata, false);
+ mdp5_data->sd_enabled = 0;
+ }
+
+ /* Secure Camera */
+ if (!mdp5_data->sc_enabled && sc_in_pipe) {
+ if (!mdss_get_sc_client_cnt()) {
+ MDSS_XLOG(0x33);
+ if (ctl->ops.wait_pingpong)
+ mdss_mdp_display_wait4pingpong(ctl, true);
+ ret = mdss_mdp_secure_session_ctrl(1,
+ MDP_SECURE_CAMERA_OVERLAY_SESSION);
+ if (ret) {
+ pr_err("secure camera enable fail:%d\n", ret);
+ return ret;
+ }
+ }
+ mdp5_data->sc_enabled = 1;
+ mdss_update_sc_client(mdp5_data->mdata, true);
+ } else if (mdp5_data->sc_enabled && !sc_in_pipe) {
+ /* disable the secure camera on last client */
+ if (mdss_get_sc_client_cnt() == 1) {
+ MDSS_XLOG(0x44);
+ if (ctl->ops.wait_pingpong)
+ mdss_mdp_display_wait4pingpong(ctl, true);
+ ret = mdss_mdp_secure_session_ctrl(0,
+ MDP_SECURE_CAMERA_OVERLAY_SESSION);
+ if (ret) {
+ pr_err("secure camera disable fail:%d\n", ret);
+ return ret;
+ }
+ }
+ mdss_update_sc_client(mdp5_data->mdata, false);
+ mdp5_data->sc_enabled = 0;
+ }
+
+ MDSS_XLOG(ret);
+ return ret;
+}
+
int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
struct mdp_display_commit *data)
{
@@ -1964,7 +2231,6 @@ int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
struct mdss_mdp_pipe *pipe, *tmp;
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
int ret = 0;
- int sd_in_pipe = 0;
struct mdss_mdp_commit_cb commit_cb;
if (!ctl)
@@ -1995,30 +2261,12 @@ int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
mutex_unlock(ctl->shared_lock);
return ret;
}
- mutex_lock(&mdp5_data->list_lock);
-
- /*
- * check if there is a secure display session
- */
- list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
- if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION) {
- sd_in_pipe = 1;
- pr_debug("Secure pipe: %u : %08X\n",
- pipe->num, pipe->flags);
- }
- }
- /*
- * start secure display session if there is secure display session and
- * sd_enabled is not true.
- */
- if (!mdp5_data->sd_enabled && sd_in_pipe) {
- if (!mdss_get_sd_client_cnt())
- ret = mdss_mdp_secure_display_ctrl(1);
- if (!ret) {
- mdp5_data->sd_enabled = 1;
- mdss_update_sd_client(mdp5_data->mdata, true);
- }
+ mutex_lock(&mdp5_data->list_lock);
+ ret = __overlay_secure_ctrl(mfd);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("secure operation failed %d\n", ret);
+ goto commit_fail;
}
if (!ctl->shared_lock)
@@ -2108,19 +2356,6 @@ int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
}
mutex_lock(&mdp5_data->ov_lock);
- /*
- * If there is no secure display session and sd_enabled, disable the
- * secure display session
- */
- if (mdp5_data->sd_enabled && !sd_in_pipe && !ret) {
- /* disable the secure display on last client */
- if (mdss_get_sd_client_cnt() == 1)
- ret = mdss_mdp_secure_display_ctrl(0);
- if (!ret) {
- mdss_update_sd_client(mdp5_data->mdata, false);
- mdp5_data->sd_enabled = 0;
- }
- }
mdss_fb_update_notify_update(mfd);
commit_fail:
@@ -2272,7 +2507,7 @@ static int mdss_mdp_overlay_queue(struct msm_fb_data_type *mfd,
struct mdss_mdp_data *src_data;
struct mdp_layer_buffer buffer;
int ret;
- u32 flags;
+ u64 flags;
pipe = __overlay_find_pipe(mfd, req->id);
if (!pipe) {
@@ -2298,7 +2533,8 @@ static int mdss_mdp_overlay_queue(struct msm_fb_data_type *mfd,
pr_warn("Unexpected buffer queue to a solid fill pipe\n");
flags = (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
- MDP_SECURE_DISPLAY_OVERLAY_SESSION));
+ MDP_SECURE_DISPLAY_OVERLAY_SESSION |
+ MDP_SECURE_CAMERA_OVERLAY_SESSION));
mutex_lock(&mdp5_data->list_lock);
src_data = mdss_mdp_overlay_buf_alloc(mfd, pipe);
@@ -5223,6 +5459,7 @@ static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
mdss_mdp_overlay_kickoff(mfd, NULL);
}
+ctl_stop:
/*
* If retire fences are still active wait for a vsync time
* for retire fence to be updated.
@@ -5253,7 +5490,6 @@ static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
flush_work(&mdp5_data->retire_work);
}
-ctl_stop:
mutex_lock(&mdp5_data->ov_lock);
/* set the correct pipe_mapped before ctl_stop */
mdss_mdp_mixer_update_pipe_map(mdp5_data->ctl,
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pipe.c b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
index 8f211a977aa4..bcf5309993b9 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pipe.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
@@ -1340,10 +1340,11 @@ static struct mdss_mdp_pipe *__pipe_lookup(struct mdss_mdp_pipe *pipe_list,
bool (*cmp)(struct mdss_mdp_pipe *, void *), void *data)
{
struct mdss_mdp_pipe *pipe;
- int i, j;
+ int i, j, max_rects;
for (i = 0, pipe = pipe_list; i < count; i++) {
- for (j = 0; j < pipe->multirect.max_rects; j++, pipe++)
+ max_rects = pipe->multirect.max_rects;
+ for (j = 0; j < max_rects; j++, pipe++)
if ((rect_num == pipe->multirect.num) &&
cmp(pipe, data))
return pipe;
@@ -1878,7 +1879,8 @@ static void mdss_mdp_pipe_stride_update(struct mdss_mdp_pipe *pipe)
if (pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_NONE) {
memcpy(&ystride, &pipe->src_planes.ystride,
sizeof(u32) * MAX_PLANES);
- if (pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+ if (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
+ MDP_SECURE_CAMERA_OVERLAY_SESSION))
secure = 0xF;
} else {
if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
@@ -1891,12 +1893,14 @@ static void mdss_mdp_pipe_stride_update(struct mdss_mdp_pipe *pipe)
ystride[0] = rec0_pipe->src_planes.ystride[0];
ystride[2] = rec0_pipe->src_planes.ystride[2];
- if (rec0_pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+ if (rec0_pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
+ MDP_SECURE_CAMERA_OVERLAY_SESSION))
secure |= 0x5;
ystride[1] = rec1_pipe->src_planes.ystride[0];
ystride[3] = rec1_pipe->src_planes.ystride[2];
- if (rec1_pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+ if (rec1_pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
+ MDP_SECURE_CAMERA_OVERLAY_SESSION))
secure |= 0xA;
}
@@ -1998,7 +2002,7 @@ static int mdss_mdp_image_setup(struct mdss_mdp_pipe *pipe,
dst.x -= left_lm_w_from_mfd(pipe->mfd);
}
- mdss_mdp_crop_rect(&src, &dst, &roi);
+ mdss_mdp_crop_rect(&src, &dst, &roi, true);
if (mdata->has_src_split && is_right_mixer) {
/*
@@ -2320,11 +2324,13 @@ static int mdss_mdp_pipe_solidfill_setup(struct mdss_mdp_pipe *pipe)
}
format = MDSS_MDP_FMT_SOLID_FILL;
- secure = (pipe->flags & MDP_SECURE_OVERLAY_SESSION ? 0xF : 0x0);
+ secure = (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
+ MDP_SECURE_CAMERA_OVERLAY_SESSION)
+ ? 0xF : 0x0);
/* support ARGB color format only */
unpack = (C3_ALPHA << 24) | (C2_R_Cr << 16) |
- (C1_B_Cb << 8) | (C0_G_Y << 0);
+ (C0_G_Y << 8) | (C1_B_Cb << 0);
if (pipe->scaler.enable)
opmode |= (1 << 31);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_util.c b/drivers/video/fbdev/msm/mdss_mdp_util.c
index 8b0ebc3fdf05..199c2b66d90e 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_util.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_util.c
@@ -241,7 +241,7 @@ void mdss_mdp_intersect_rect(struct mdss_rect *res_rect,
void mdss_mdp_crop_rect(struct mdss_rect *src_rect,
struct mdss_rect *dst_rect,
- const struct mdss_rect *sci_rect)
+ const struct mdss_rect *sci_rect, bool normalize)
{
struct mdss_rect res;
mdss_mdp_intersect_rect(&res, dst_rect, sci_rect);
@@ -253,9 +253,17 @@ void mdss_mdp_crop_rect(struct mdss_rect *src_rect,
src_rect->w = res.w;
src_rect->h = res.h;
}
- *dst_rect = (struct mdss_rect)
- {(res.x - sci_rect->x), (res.y - sci_rect->y),
- res.w, res.h};
+
+ /* adjust dest rect based on the sci_rect starting */
+ if (normalize) {
+ *dst_rect = (struct mdss_rect) {(res.x - sci_rect->x),
+ (res.y - sci_rect->y), res.w, res.h};
+
+ /* return the actual cropped intersecting rect */
+ } else {
+ *dst_rect = (struct mdss_rect) {res.x, res.y,
+ res.w, res.h};
+ }
}
}
@@ -963,16 +971,17 @@ static int mdss_mdp_put_img(struct mdss_mdp_img_data *data, bool rotator,
data->srcp_dma_buf = NULL;
}
}
- } else if (data->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION) {
+ } else if ((data->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION) ||
+ (data->flags & MDP_SECURE_CAMERA_OVERLAY_SESSION)) {
/*
- * skip memory unmapping - secure display uses physical
- * address which does not require buffer unmapping
+ * skip memory unmapping - secure display and camera uses
+ * physical address which does not require buffer unmapping
*
* For LT targets in secure display usecase, srcp_dma_buf will
* be filled due to map call which will be unmapped above.
*
*/
- pr_debug("skip memory unmapping for secure display content\n");
+ pr_debug("skip memory unmapping for secure display/camera content\n");
} else {
return -ENOMEM;
}
@@ -1188,7 +1197,7 @@ err_unmap:
}
static int mdss_mdp_data_get(struct mdss_mdp_data *data,
- struct msmfb_data *planes, int num_planes, u32 flags,
+ struct msmfb_data *planes, int num_planes, u64 flags,
struct device *dev, bool rotator, int dir)
{
int i, rc = 0;
@@ -1201,7 +1210,7 @@ static int mdss_mdp_data_get(struct mdss_mdp_data *data,
rc = mdss_mdp_get_img(&planes[i], &data->p[i], dev, rotator,
dir);
if (rc) {
- pr_err("failed to get buf p=%d flags=%x\n", i, flags);
+ pr_err("failed to get buf p=%d flags=%llx\n", i, flags);
while (i > 0) {
i--;
mdss_mdp_put_img(&data->p[i], rotator, dir);
@@ -1251,7 +1260,7 @@ void mdss_mdp_data_free(struct mdss_mdp_data *data, bool rotator, int dir)
}
int mdss_mdp_data_get_and_validate_size(struct mdss_mdp_data *data,
- struct msmfb_data *planes, int num_planes, u32 flags,
+ struct msmfb_data *planes, int num_planes, u64 flags,
struct device *dev, bool rotator, int dir,
struct mdp_layer_buffer *buffer)
{
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index 18a93f9d3c3e..0483e3d42873 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -137,6 +137,25 @@ enum {
SIM_HW_TE_MODE,
};
+
+/*
+ * enum partial_update_mode - Different modes for partial update feature
+ *
+ * @PU_NOT_SUPPORTED: Feature is not supported on target.
+ * @PU_SINGLE_ROI: Default mode, only one ROI is triggered to the
+ * panel(one on each DSI in case of split dsi)
+ * @PU_DUAL_ROI: Support for sending two roi's that are clubbed
+ * together as one big single ROI. This is only
+ * supported on certain panels that have this
+ * capability in their DDIC.
+ *
+ */
+enum {
+ PU_NOT_SUPPORTED = 0,
+ PU_SINGLE_ROI,
+ PU_DUAL_ROI,
+};
+
struct mdss_rect {
u16 x;
u16 y;
@@ -664,6 +683,50 @@ struct mdss_panel_roi_alignment {
u32 min_height;
};
+
+/*
+ * Nomeclature used to represent partial ROI in case of
+ * dual roi when the panel supports it. Region marked (XXX) is
+ * the extended roi to align with the second roi since LM output
+ * has to be rectangle.
+ *
+ * For single ROI, only the first ROI will be used in the struct.
+ * DSI driver will merge it based on the partial_update_roi_merge
+ * property.
+ *
+ * -------------------------------
+ * | DSI0 | DSI1 |
+ * -------------------------------
+ * | | |
+ * | | |
+ * | =========|=======----+ |
+ * | | | |XXXX| |
+ * | | First| Roi |XXXX| |
+ * | | | |XXXX| |
+ * | =========|=======----+ |
+ * | | |
+ * | | |
+ * | | |
+ * | +----================= |
+ * | |XXXX| | | |
+ * | |XXXX| Second Roi | |
+ * | |XXXX| | | |
+ * | +----====|============ |
+ * | | |
+ * | | |
+ * | | |
+ * | | |
+ * | | |
+ * ------------------------------
+ *
+ */
+
+struct mdss_dsi_dual_pu_roi {
+ struct mdss_rect first_roi;
+ struct mdss_rect second_roi;
+ bool enabled;
+};
+
struct mdss_panel_info {
u32 xres;
u32 yres;
@@ -689,6 +752,7 @@ struct mdss_panel_info {
u32 vic; /* video identification code */
u32 deep_color;
struct mdss_rect roi;
+ struct mdss_dsi_dual_pu_roi dual_roi;
int pwm_pmic_gpio;
int pwm_lpg_chan;
int pwm_period;
@@ -723,8 +787,8 @@ struct mdss_panel_info {
u32 cont_splash_enabled;
bool esd_rdy;
- bool partial_update_supported; /* value from dts if pu is supported */
- bool partial_update_enabled; /* is pu currently allowed */
+ u32 partial_update_supported; /* value from dts if pu is supported */
+ u32 partial_update_enabled; /* is pu currently allowed */
u32 dcs_cmd_by_left;
u32 partial_update_roi_merge;
struct ion_handle *splash_ihdl;
@@ -1000,6 +1064,27 @@ static inline bool is_lm_configs_dsc_compatible(struct mdss_panel_info *pinfo,
return true;
}
+static inline bool is_valid_pu_dual_roi(struct mdss_panel_info *pinfo,
+ struct mdss_rect *first_roi, struct mdss_rect *second_roi)
+{
+ if ((first_roi->x != second_roi->x) || (first_roi->w != second_roi->w)
+ || (first_roi->y > second_roi->y)
+ || ((first_roi->y + first_roi->h) > second_roi->y)
+ || (is_dsc_compression(pinfo) &&
+ !is_lm_configs_dsc_compatible(pinfo,
+ first_roi->w, first_roi->h) &&
+ !is_lm_configs_dsc_compatible(pinfo,
+ second_roi->w, second_roi->h))) {
+ pr_err("Invalid multiple PU ROIs, roi0:{%d,%d,%d,%d}, roi1{%d,%d,%d,%d}\n",
+ first_roi->x, first_roi->y, first_roi->w,
+ first_roi->h, second_roi->x, second_roi->y,
+ second_roi->w, second_roi->h);
+ return false;
+ }
+
+ return true;
+}
+
int mdss_register_panel(struct platform_device *pdev,
struct mdss_panel_data *pdata);
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
index eab7bcaaa156..2239791fdad0 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.c
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -162,12 +162,15 @@ end:
}
/*
- * mdss_smmu_v2_attach()
+ * mdss_smmu_attach_v2()
*
* Associates each configured VA range with the corresponding smmu context
* bank device. Enables the clks as smmu_v2 requires voting it before the usage.
* And iommu attach is done only once during the initial attach and it is never
* detached as smmu v2 uses a feature called 'retention'.
+ * Only detach the secure and non-secure contexts in case of secure display
+ * case and secure contexts for secure camera use cases for the platforms
+ * which have caps MDSS_CAPS_SEC_DETACH_SMMU enabled
*/
static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
{
@@ -191,7 +194,9 @@ static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
}
mdss_smmu->handoff_pending = false;
- if (!mdss_smmu->domain_attached) {
+ if (!mdss_smmu->domain_attached &&
+ mdss_smmu_is_valid_domain_condition(mdata,
+ i, true)) {
rc = arm_iommu_attach_device(mdss_smmu->dev,
mdss_smmu->mmu_mapping);
if (rc) {
@@ -229,10 +234,11 @@ err:
}
/*
- * mdss_smmu_v2_detach()
+ * mdss_smmu_detach_v2()
*
- * Only disables the clks as it is not required to detach the iommu mapped
- * VA range from the device in smmu_v2 as explained in the mdss_smmu_v2_attach
+ * Disables the clks only when it is not required to detach the iommu mapped
+ * VA range (as long as not in secure display use case)
+ * from the device in smmu_v2 as explained in the mdss_smmu_v2_attach
*/
static int mdss_smmu_detach_v2(struct mdss_data_type *mdata)
{
@@ -245,8 +251,24 @@ static int mdss_smmu_detach_v2(struct mdss_data_type *mdata)
continue;
mdss_smmu = mdss_smmu_get_cb(i);
- if (mdss_smmu && mdss_smmu->dev && !mdss_smmu->handoff_pending)
- mdss_smmu_enable_power(mdss_smmu, false);
+ if (mdss_smmu && mdss_smmu->dev) {
+ if (!mdss_smmu->handoff_pending &&
+ mdss_smmu->domain_attached &&
+ mdss_smmu_is_valid_domain_condition(mdata,
+ i, false)) {
+ /*
+ * if entering in secure display or
+ * secure camera use case(for secured contexts
+ * leave the smmu clocks on and only detach the
+ * smmu contexts
+ */
+ arm_iommu_detach_device(mdss_smmu->dev);
+ mdss_smmu->domain_attached = false;
+ pr_debug("iommu v2 domain[%i] detached\n", i);
+ } else {
+ mdss_smmu_enable_power(mdss_smmu, false);
+ }
+ }
}
mutex_unlock(&mdp_iommu_lock);
@@ -609,6 +631,7 @@ int mdss_smmu_probe(struct platform_device *pdev)
}
mdss_smmu = &mdata->mdss_smmu[smmu_domain.domain];
+ mdss_smmu->domain = smmu_domain.domain;
mp = &mdss_smmu->mp;
memset(mp, 0, sizeof(struct dss_module_power));
diff --git a/drivers/video/fbdev/msm/mdss_smmu.h b/drivers/video/fbdev/msm/mdss_smmu.h
index a987066cc773..f7e6e275c16a 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.h
+++ b/drivers/video/fbdev/msm/mdss_smmu.h
@@ -73,6 +73,38 @@ static inline bool mdss_smmu_is_valid_domain_type(struct mdss_data_type *mdata,
return true;
}
+static inline bool mdss_smmu_is_valid_domain_condition(
+ struct mdss_data_type *mdata,
+ int domain_type,
+ bool is_attach)
+{
+ if (is_attach) {
+ if (test_bit(MDSS_CAPS_SEC_DETACH_SMMU,
+ mdata->mdss_caps_map) &&
+ (mdata->sec_disp_en ||
+ (mdata->sec_cam_en &&
+ domain_type == MDSS_IOMMU_DOMAIN_SECURE))) {
+ pr_debug("SMMU attach not attempted, sd:%d, sc:%d\n",
+ mdata->sec_disp_en, mdata->sec_cam_en);
+ return false;
+ } else {
+ return true;
+ }
+ } else {
+ if (test_bit(MDSS_CAPS_SEC_DETACH_SMMU,
+ mdata->mdss_caps_map) &&
+ (mdata->sec_disp_en ||
+ (mdata->sec_cam_en &&
+ domain_type == MDSS_IOMMU_DOMAIN_SECURE))) {
+ pr_debug("SMMU detach attempted, sd:%d, sc:%d\n",
+ mdata->sec_disp_en, mdata->sec_cam_en);
+ return true;
+ } else {
+ return false;
+ }
+ }
+}
+
static inline struct mdss_smmu_client *mdss_smmu_get_cb(u32 domain)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
@@ -96,7 +128,7 @@ static inline int is_mdss_iommu_attached(void)
return mdata ? mdata->iommu_attached : false;
}
-static inline int mdss_smmu_get_domain_type(u32 flags, bool rotator)
+static inline int mdss_smmu_get_domain_type(u64 flags, bool rotator)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
int type;