summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/dma-removed.c3
-rw-r--r--drivers/block/zram/zram_drv.c5
-rw-r--r--drivers/bluetooth/btfm_slim.c2
-rw-r--r--drivers/bluetooth/btfm_slim.h4
-rw-r--r--drivers/bluetooth/btfm_slim_codec.c60
-rw-r--r--drivers/bluetooth/btfm_slim_wcn3990.c4
-rw-r--r--drivers/char/diag/diagchar_core.c3
-rw-r--r--drivers/char/diag/diagfwd.c94
-rw-r--r--drivers/char/diag/diagfwd.h5
-rw-r--r--drivers/char/diag/diagfwd_glink.c44
-rw-r--r--drivers/char/diag/diagfwd_glink.h4
-rw-r--r--drivers/clk/msm/clock-gcc-8998.c49
-rw-r--r--drivers/clk/msm/clock-generic.c14
-rw-r--r--drivers/clk/msm/clock-osm.c62
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c1
-rw-r--r--drivers/clk/qcom/clk-cpu-osm.c27
-rw-r--r--drivers/clk/qcom/clk-rcg2.c22
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c4
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c61
-rw-r--r--drivers/clk/qcom/mmcc-sdm660.c18
-rw-r--r--drivers/crypto/msm/qcedev.c10
-rw-r--r--drivers/gpu/drm/msm/Kconfig7
-rw-r--r--drivers/gpu/drm/msm/Makefile20
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h22
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h523
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c115
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h1365
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c201
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h3497
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c1345
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h187
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c509
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c383
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_snapshot.c796
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h49
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c117
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c467
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h221
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h160
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c11
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h5
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h5
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c1182
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h346
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c393
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c388
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h300
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c38
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h10
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c19
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c49
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c43
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h5
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c203
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h103
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c26
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c199
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h37
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c38
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c256
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c340
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h122
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c245
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.h37
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h12
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c6
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c13
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h19
-rw-r--r--drivers/gpu/drm/msm/msm_smmu.c18
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot.c105
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot.h85
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot_api.h121
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c38
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.h17
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_irq.c95
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_irq.h16
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys.h4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c29
-rw-r--r--drivers/gpu/drm/msm/sde/sde_formats.c20
-rw-r--r--drivers/gpu/drm/msm/sde/sde_formats.h11
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.c8
-rw-r--r--drivers/gpu/drm/msm/sde/sde_irq.c80
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c159
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.h7
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.c12
-rw-r--r--drivers/gpu/drm/msm/sde_io_util.c502
-rw-r--r--drivers/gpu/drm/msm/sde_power_handle.c2
-rw-r--r--drivers/gpu/msm/adreno_debugfs.c3
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c6
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.c10
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c3
-rw-r--r--drivers/gpu/msm/kgsl.c5
-rw-r--r--drivers/gpu/msm/kgsl_device.h6
-rw-r--r--drivers/hwtracing/coresight/Kconfig11
-rw-r--r--drivers/input/touchscreen/Kconfig11
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/msg21xx_ts.c2260
-rw-r--r--drivers/iommu/arm-smmu.c29
-rw-r--r--drivers/iommu/io-pgtable-arm.c158
-rw-r--r--drivers/iommu/io-pgtable.h1
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c32
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c22
-rw-r--r--drivers/media/platform/msm/camera_v2/msm.c20
-rw-r--r--drivers/media/platform/msm/camera_v2/msm.h4
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c43
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_base.h1
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c37
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h1
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c21
-rw-r--r--drivers/mmc/card/block.c12
-rw-r--r--drivers/mmc/core/core.c12
-rw-r--r--drivers/mmc/host/cmdq_hci.c74
-rw-r--r--drivers/mmc/host/cmdq_hci.h8
-rw-r--r--drivers/mmc/host/sdhci-msm.c11
-rw-r--r--drivers/net/ethernet/msm/msm_rmnet_mhi.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c109
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h61
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c54
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h57
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c24
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c82
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c135
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c30
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c66
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c36
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c57
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c79
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/sysfs.c126
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c75
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h4
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c18
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c62
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h46
-rw-r--r--drivers/pci/host/pci-msm.c14
-rw-r--r--drivers/phy/phy-qcom-ufs.c8
-rw-r--r--drivers/platform/msm/gsi/gsi.c70
-rw-r--r--drivers/platform/msm/gsi/gsi.h27
-rw-r--r--drivers/platform/msm/gsi/gsi_reg.h6
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_client.c42
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h3
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c8
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c58
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c42
-rw-r--r--drivers/platform/msm/mhi/mhi.h154
-rw-r--r--drivers/platform/msm/mhi/mhi_bhi.c27
-rw-r--r--drivers/platform/msm/mhi/mhi_event.c63
-rw-r--r--drivers/platform/msm/mhi/mhi_hwio.h18
-rw-r--r--drivers/platform/msm/mhi/mhi_iface.c128
-rw-r--r--drivers/platform/msm/mhi/mhi_init.c109
-rw-r--r--drivers/platform/msm/mhi/mhi_isr.c329
-rw-r--r--drivers/platform/msm/mhi/mhi_macros.h13
-rw-r--r--drivers/platform/msm/mhi/mhi_main.c932
-rw-r--r--drivers/platform/msm/mhi/mhi_mmio_ops.c62
-rw-r--r--drivers/platform/msm/mhi/mhi_pm.c306
-rw-r--r--drivers/platform/msm/mhi/mhi_ring_ops.c12
-rw-r--r--drivers/platform/msm/mhi/mhi_ssr.c74
-rw-r--r--drivers/platform/msm/mhi/mhi_states.c785
-rw-r--r--drivers/platform/msm/mhi/mhi_sys.c127
-rw-r--r--drivers/platform/msm/mhi/mhi_sys.h4
-rw-r--r--drivers/platform/msm/mhi_uci/mhi_uci.c2
-rw-r--r--drivers/power/supply/qcom/battery.c61
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c21
-rw-r--r--drivers/power/supply/qcom/smb-lib.c321
-rw-r--r--drivers/power/supply/qcom/smb-lib.h10
-rw-r--r--drivers/power/supply/qcom/smb-reg.h3
-rw-r--r--drivers/power/supply/qcom/smb1351-charger.c9
-rw-r--r--drivers/power/supply/qcom/smb138x-charger.c77
-rw-r--r--drivers/regulator/cpr4-mmss-ldo-regulator.c44
-rw-r--r--drivers/regulator/cprh-kbss-regulator.c121
-rw-r--r--drivers/regulator/qpnp-labibb-regulator.c59
-rw-r--r--drivers/regulator/qpnp-lcdb-regulator.c23
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c179
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h1
-rw-r--r--drivers/scsi/ufs/ufshcd.c8
-rw-r--r--drivers/soc/qcom/glink.c4
-rw-r--r--drivers/soc/qcom/msm_smem.c2
-rw-r--r--drivers/soc/qcom/peripheral-loader.c27
-rw-r--r--drivers/soc/qcom/pil-msa.c46
-rw-r--r--drivers/soc/qcom/pil-msa.h3
-rw-r--r--drivers/soc/qcom/pil-q6v5-mss.c42
-rw-r--r--drivers/soc/qcom/qdsp6v2/audio_notifier.c11
-rw-r--r--drivers/soc/qcom/ramdump.c10
-rw-r--r--drivers/soc/qcom/rpm_rail_stats.c4
-rw-r--r--drivers/soc/qcom/smcinvoke.c7
-rw-r--r--drivers/spi/spi_qsd.c55
-rw-r--r--drivers/usb/gadget/function/f_gsi.c6
-rw-r--r--drivers/usb/gadget/function/u_ether.c2
-rw-r--r--drivers/usb/pd/policy_engine.c14
-rw-r--r--drivers/usb/phy/phy-msm-qusb.c44
-rw-r--r--drivers/video/fbdev/msm/mdss.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c7
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c10
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c23
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c20
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp.c8
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.c14
-rw-r--r--drivers/virtio/virtio_balloon.c51
221 files changed, 18979 insertions, 6130 deletions
diff --git a/drivers/base/dma-removed.c b/drivers/base/dma-removed.c
index 87c743995a50..5fa3c6bdeea0 100644
--- a/drivers/base/dma-removed.c
+++ b/drivers/base/dma-removed.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
* Copyright (C) 2000-2004 Russell King
*
* This program is free software; you can redistribute it and/or modify
@@ -296,6 +296,7 @@ void removed_free(struct device *dev, size_t size, void *cpu_addr,
attrs);
struct removed_region *dma_mem = dev->removed_mem;
+ size = PAGE_ALIGN(size);
if (!no_kernel_mapping)
iounmap(cpu_addr);
mutex_lock(&dma_mem->lock);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 72d088b1863c..3c59417b485a 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -521,7 +521,7 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
goto out_error;
}
- meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
+ meta->mem_pool = zs_create_pool(pool_name);
if (!meta->mem_pool) {
pr_err("Error creating memory pool\n");
goto out_error;
@@ -725,7 +725,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
src = uncmem;
}
- handle = zs_malloc(meta->mem_pool, clen);
+ handle = zs_malloc(meta->mem_pool, clen, GFP_NOIO | __GFP_HIGHMEM |
+ __GFP_MOVABLE);
if (!handle) {
if (printk_timed_ratelimit(&zram_rs_time,
ALLOC_ERROR_LOG_RATE_MS))
diff --git a/drivers/bluetooth/btfm_slim.c b/drivers/bluetooth/btfm_slim.c
index 37cc6284cf97..969f755f5dc4 100644
--- a/drivers/bluetooth/btfm_slim.c
+++ b/drivers/bluetooth/btfm_slim.c
@@ -127,7 +127,7 @@ int btfm_slim_enable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch,
if (!btfmslim || !ch)
return -EINVAL;
- BTFMSLIM_DBG("port:%d", ch->port);
+ BTFMSLIM_DBG("port: %d ch: %d", ch->port, ch->ch);
/* Define the channel with below parameters */
prop.prot = SLIM_AUTO_ISO;
diff --git a/drivers/bluetooth/btfm_slim.h b/drivers/bluetooth/btfm_slim.h
index dbb4c563d802..5d105fba2193 100644
--- a/drivers/bluetooth/btfm_slim.h
+++ b/drivers/bluetooth/btfm_slim.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,7 +13,7 @@
#define BTFM_SLIM_H
#include <linux/slimbus/slimbus.h>
-#define BTFMSLIM_DBG(fmt, arg...) pr_debug("%s: " fmt "\n", __func__, ## arg)
+#define BTFMSLIM_DBG(fmt, arg...) pr_debug(fmt "\n", ## arg)
#define BTFMSLIM_INFO(fmt, arg...) pr_info("%s: " fmt "\n", __func__, ## arg)
#define BTFMSLIM_ERR(fmt, arg...) pr_err("%s: " fmt "\n", __func__, ## arg)
diff --git a/drivers/bluetooth/btfm_slim_codec.c b/drivers/bluetooth/btfm_slim_codec.c
index d7d24ff07801..061177173ab8 100644
--- a/drivers/bluetooth/btfm_slim_codec.c
+++ b/drivers/bluetooth/btfm_slim_codec.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -54,8 +54,8 @@ static int btfm_slim_dai_startup(struct snd_pcm_substream *substream,
int ret;
struct btfmslim *btfmslim = dai->dev->platform_data;
- BTFMSLIM_DBG("substream = %s stream = %d",
- substream->name, substream->stream);
+ BTFMSLIM_DBG("substream = %s stream = %d dai->name = %s",
+ substream->name, substream->stream, dai->name);
ret = btfm_slim_hw_init(btfmslim);
return ret;
}
@@ -63,33 +63,12 @@ static int btfm_slim_dai_startup(struct snd_pcm_substream *substream,
static void btfm_slim_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct btfmslim *btfmslim = dai->dev->platform_data;
-
- BTFMSLIM_DBG("substream = %s stream = %d",
- substream->name, substream->stream);
- btfm_slim_hw_deinit(btfmslim);
-}
-
-static int btfm_slim_dai_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- BTFMSLIM_DBG("dai_name = %s DAI-ID %x rate %d num_ch %d",
- dai->name, dai->id, params_rate(params),
- params_channels(params));
-
- return 0;
-}
-
-int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- int i, ret = -EINVAL;
+ int i;
struct btfmslim *btfmslim = dai->dev->platform_data;
struct btfmslim_ch *ch;
uint8_t rxport, grp = false, nchan = 1;
- BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name,
+ BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
dai->id, dai->rate);
switch (dai->id) {
@@ -110,7 +89,7 @@ int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
case BTFM_SLIM_NUM_CODEC_DAIS:
default:
BTFMSLIM_ERR("dai->id is invalid:%d", dai->id);
- return ret;
+ return;
}
/* Search for dai->id matched port handler */
@@ -122,14 +101,25 @@ int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
if ((ch->port == BTFM_SLIM_PGD_PORT_LAST) ||
(ch->id == BTFM_SLIM_NUM_CODEC_DAIS)) {
BTFMSLIM_ERR("ch is invalid!!");
- return ret;
+ return;
}
- ret = btfm_slim_enable_ch(btfmslim, ch, rxport, dai->rate, grp, nchan);
- return ret;
+ btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
+ btfm_slim_hw_deinit(btfmslim);
+}
+
+static int btfm_slim_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ BTFMSLIM_DBG("dai->name = %s DAI-ID %x rate %d num_ch %d",
+ dai->name, dai->id, params_rate(params),
+ params_channels(params));
+
+ return 0;
}
-int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
+int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
int i, ret = -EINVAL;
@@ -137,7 +127,7 @@ int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
struct btfmslim_ch *ch;
uint8_t rxport, grp = false, nchan = 1;
- BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name,
+ BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
dai->id, dai->rate);
switch (dai->id) {
@@ -172,7 +162,8 @@ int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
BTFMSLIM_ERR("ch is invalid!!");
return ret;
}
- ret = btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
+
+ ret = btfm_slim_enable_ch(btfmslim, ch, rxport, dai->rate, grp, nchan);
return ret;
}
@@ -315,7 +306,6 @@ static struct snd_soc_dai_ops btfmslim_dai_ops = {
.shutdown = btfm_slim_dai_shutdown,
.hw_params = btfm_slim_dai_hw_params,
.prepare = btfm_slim_dai_prepare,
- .hw_free = btfm_slim_dai_hw_free,
.set_channel_map = btfm_slim_dai_set_channel_map,
.get_channel_map = btfm_slim_dai_get_channel_map,
};
@@ -384,7 +374,7 @@ static struct snd_soc_dai_driver btfmslim_dai[] = {
static struct snd_soc_codec_driver btfmslim_codec = {
.probe = btfm_slim_codec_probe,
.remove = btfm_slim_codec_remove,
- .read = btfm_slim_codec_read,
+ .read = btfm_slim_codec_read,
.write = btfm_slim_codec_write,
};
diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c
index 7d7bd2441c6e..72e28da4bd3b 100644
--- a/drivers/bluetooth/btfm_slim_wcn3990.c
+++ b/drivers/bluetooth/btfm_slim_wcn3990.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -76,7 +76,7 @@ int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num,
uint8_t reg_val = 0;
uint16_t reg;
- BTFMSLIM_DBG("enable(%d)", enable);
+ BTFMSLIM_DBG("port(%d) enable(%d)", port_num, enable);
if (rxport) {
/* Port enable */
reg = CHRK_SB_PGD_PORT_RX_CFGN(port_num - 0x10);
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index facdb0f40e44..335064352789 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -2606,7 +2606,8 @@ static int diag_user_process_raw_data(const char __user *buf, int len)
info = diag_md_session_get_pid(current->tgid);
ret = diag_process_apps_pkt(user_space_data, len, info);
if (ret == 1)
- diag_send_error_rsp((void *)(user_space_data), len);
+ diag_send_error_rsp((void *)(user_space_data), len,
+ info);
}
fail:
diagmem_free(driver, user_space_data, mempool);
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index a7069bc0edf3..99a16dd47cd4 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -239,10 +239,11 @@ void chk_logging_wakeup(void)
}
}
-static void pack_rsp_and_send(unsigned char *buf, int len)
+static void pack_rsp_and_send(unsigned char *buf, int len,
+ struct diag_md_session_t *info)
{
int err;
- int retry_count = 0;
+ int retry_count = 0, i, rsp_ctxt;
uint32_t write_len = 0;
unsigned long flags;
unsigned char *rsp_ptr = driver->encoded_rsp_buf;
@@ -257,6 +258,15 @@ static void pack_rsp_and_send(unsigned char *buf, int len)
return;
}
+ if (info && info->peripheral_mask) {
+ for (i = 0; i <= NUM_PERIPHERALS; i++) {
+ if (info->peripheral_mask & (1 << i))
+ break;
+ }
+ rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1);
+ } else
+ rsp_ctxt = driver->rsp_buf_ctxt;
+
/*
* Keep trying till we get the buffer back. It should probably
* take one or two iterations. When this loops till UINT_MAX, it
@@ -298,8 +308,7 @@ static void pack_rsp_and_send(unsigned char *buf, int len)
*(uint8_t *)(rsp_ptr + write_len) = CONTROL_CHAR;
write_len += sizeof(uint8_t);
- err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, write_len,
- driver->rsp_buf_ctxt);
+ err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, write_len, rsp_ctxt);
if (err) {
pr_err("diag: In %s, unable to write to mux, err: %d\n",
__func__, err);
@@ -309,12 +318,13 @@ static void pack_rsp_and_send(unsigned char *buf, int len)
}
}
-static void encode_rsp_and_send(unsigned char *buf, int len)
+static void encode_rsp_and_send(unsigned char *buf, int len,
+ struct diag_md_session_t *info)
{
struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
unsigned char *rsp_ptr = driver->encoded_rsp_buf;
- int err, retry_count = 0;
+ int err, i, rsp_ctxt, retry_count = 0;
unsigned long flags;
if (!rsp_ptr || !buf)
@@ -326,6 +336,15 @@ static void encode_rsp_and_send(unsigned char *buf, int len)
return;
}
+ if (info && info->peripheral_mask) {
+ for (i = 0; i <= NUM_PERIPHERALS; i++) {
+ if (info->peripheral_mask & (1 << i))
+ break;
+ }
+ rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1);
+ } else
+ rsp_ctxt = driver->rsp_buf_ctxt;
+
/*
* Keep trying till we get the buffer back. It should probably
* take one or two iterations. When this loops till UINT_MAX, it
@@ -369,7 +388,7 @@ static void encode_rsp_and_send(unsigned char *buf, int len)
diag_hdlc_encode(&send, &enc);
driver->encoded_rsp_len = (int)(enc.dest - (void *)rsp_ptr);
err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, driver->encoded_rsp_len,
- driver->rsp_buf_ctxt);
+ rsp_ctxt);
if (err) {
pr_err("diag: In %s, Unable to write to device, err: %d\n",
__func__, err);
@@ -380,21 +399,22 @@ static void encode_rsp_and_send(unsigned char *buf, int len)
memset(buf, '\0', DIAG_MAX_RSP_SIZE);
}
-void diag_send_rsp(unsigned char *buf, int len)
+void diag_send_rsp(unsigned char *buf, int len, struct diag_md_session_t *info)
{
struct diag_md_session_t *session_info = NULL;
uint8_t hdlc_disabled;
- session_info = diag_md_session_get_peripheral(APPS_DATA);
+ session_info = (info) ? info :
+ diag_md_session_get_peripheral(APPS_DATA);
if (session_info)
hdlc_disabled = session_info->hdlc_disabled;
else
hdlc_disabled = driver->hdlc_disabled;
if (hdlc_disabled)
- pack_rsp_and_send(buf, len);
+ pack_rsp_and_send(buf, len, session_info);
else
- encode_rsp_and_send(buf, len);
+ encode_rsp_and_send(buf, len, session_info);
}
void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type)
@@ -865,7 +885,8 @@ static int diag_cmd_disable_hdlc(unsigned char *src_buf, int src_len,
return write_len;
}
-void diag_send_error_rsp(unsigned char *buf, int len)
+void diag_send_error_rsp(unsigned char *buf, int len,
+ struct diag_md_session_t *info)
{
/* -1 to accomodate the first byte 0x13 */
if (len > (DIAG_MAX_RSP_SIZE - 1)) {
@@ -875,7 +896,7 @@ void diag_send_error_rsp(unsigned char *buf, int len)
*(uint8_t *)driver->apps_rsp_buf = DIAG_CMD_ERROR;
memcpy((driver->apps_rsp_buf + sizeof(uint8_t)), buf, len);
- diag_send_rsp(driver->apps_rsp_buf, len + 1);
+ diag_send_rsp(driver->apps_rsp_buf, len + 1, info);
}
int diag_process_apps_pkt(unsigned char *buf, int len,
@@ -895,7 +916,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
/* Check if the command is a supported mask command */
mask_ret = diag_process_apps_masks(buf, len, info);
if (mask_ret > 0) {
- diag_send_rsp(driver->apps_rsp_buf, mask_ret);
+ diag_send_rsp(driver->apps_rsp_buf, mask_ret, info);
return 0;
}
@@ -917,7 +938,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0)
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
return 0;
}
@@ -933,7 +954,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
} else {
if (MD_PERIPHERAL_MASK(reg_item->proc) &
driver->logging_mask)
- diag_send_error_rsp(buf, len);
+ diag_send_error_rsp(buf, len, info);
else
write_len = diag_send_data(reg_item, buf, len);
}
@@ -949,13 +970,13 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
for (i = 0; i < 4; i++)
*(driver->apps_rsp_buf+i) = *(buf+i);
*(uint32_t *)(driver->apps_rsp_buf+4) = DIAG_MAX_REQ_SIZE;
- diag_send_rsp(driver->apps_rsp_buf, 8);
+ diag_send_rsp(driver->apps_rsp_buf, 8, info);
return 0;
} else if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
(*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) {
len = diag_process_stm_cmd(buf, driver->apps_rsp_buf);
if (len > 0) {
- diag_send_rsp(driver->apps_rsp_buf, len);
+ diag_send_rsp(driver->apps_rsp_buf, len, info);
return 0;
}
return len;
@@ -968,7 +989,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0)
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
return 0;
}
/* Check for time sync switch command */
@@ -979,14 +1000,14 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0)
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
return 0;
}
/* Check for download command */
else if ((chk_apps_master()) && (*buf == 0x3A)) {
/* send response back */
driver->apps_rsp_buf[0] = *buf;
- diag_send_rsp(driver->apps_rsp_buf, 1);
+ diag_send_rsp(driver->apps_rsp_buf, 1, info);
msleep(5000);
/* call download API */
msm_set_restart_mode(RESTART_DLOAD);
@@ -1006,7 +1027,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
for (i = 0; i < 13; i++)
driver->apps_rsp_buf[i+3] = 0;
- diag_send_rsp(driver->apps_rsp_buf, 16);
+ diag_send_rsp(driver->apps_rsp_buf, 16, info);
return 0;
}
}
@@ -1015,7 +1036,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
(*(buf+2) == 0x04) && (*(buf+3) == 0x0)) {
memcpy(driver->apps_rsp_buf, buf, 4);
driver->apps_rsp_buf[4] = wrap_enabled;
- diag_send_rsp(driver->apps_rsp_buf, 5);
+ diag_send_rsp(driver->apps_rsp_buf, 5, info);
return 0;
}
/* Wrap the Delayed Rsp ID */
@@ -1024,7 +1045,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
wrap_enabled = true;
memcpy(driver->apps_rsp_buf, buf, 4);
driver->apps_rsp_buf[4] = wrap_count;
- diag_send_rsp(driver->apps_rsp_buf, 6);
+ diag_send_rsp(driver->apps_rsp_buf, 6, info);
return 0;
}
/* Mobile ID Rsp */
@@ -1035,7 +1056,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0) {
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
return 0;
}
}
@@ -1055,7 +1076,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
for (i = 0; i < 55; i++)
driver->apps_rsp_buf[i] = 0;
- diag_send_rsp(driver->apps_rsp_buf, 55);
+ diag_send_rsp(driver->apps_rsp_buf, 55, info);
return 0;
}
/* respond to 0x7c command */
@@ -1068,14 +1089,14 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
chk_config_get_id();
*(unsigned char *)(driver->apps_rsp_buf + 12) = '\0';
*(unsigned char *)(driver->apps_rsp_buf + 13) = '\0';
- diag_send_rsp(driver->apps_rsp_buf, 14);
+ diag_send_rsp(driver->apps_rsp_buf, 14, info);
return 0;
}
}
write_len = diag_cmd_chk_stats(buf, len, driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0) {
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
return 0;
}
write_len = diag_cmd_disable_hdlc(buf, len, driver->apps_rsp_buf,
@@ -1087,7 +1108,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
* before disabling HDLC encoding on Apps processor.
*/
mutex_lock(&driver->hdlc_disable_mutex);
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
/*
* Set the value of hdlc_disabled after sending the response to
* the tools. This is required since the tools is expecting a
@@ -1107,7 +1128,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
/* We have now come to the end of the function. */
if (chk_apps_only())
- diag_send_error_rsp(buf, len);
+ diag_send_error_rsp(buf, len, info);
return 0;
}
@@ -1190,7 +1211,7 @@ fail:
* recovery algorithm. Send an error response if the
* packet is not in expected format.
*/
- diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len);
+ diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len, info);
driver->hdlc_buf_len = 0;
end:
mutex_unlock(&driver->diag_hdlc_mutex);
@@ -1446,7 +1467,7 @@ start:
if (actual_pkt->start != CONTROL_CHAR) {
diag_hdlc_start_recovery(buf, len, info);
- diag_send_error_rsp(buf, len);
+ diag_send_error_rsp(buf, len, info);
goto end;
}
@@ -1528,15 +1549,14 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
case TYPE_CMD:
if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
diagfwd_write_done(peripheral, type, num);
- } else if (peripheral == APPS_DATA) {
+ }
+ if (peripheral == APPS_DATA ||
+ ctxt == DIAG_MEMORY_DEVICE_MODE) {
spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
driver->rsp_buf_busy = 0;
driver->encoded_rsp_len = 0;
spin_unlock_irqrestore(&driver->rsp_buf_busy_lock,
flags);
- } else {
- pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n",
- peripheral, __func__, type);
}
break;
default:
diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h
index 0023e0638aa2..4c6d86fc36ae 100644
--- a/drivers/char/diag/diagfwd.h
+++ b/drivers/char/diag/diagfwd.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -45,7 +45,8 @@ void diag_update_userspace_clients(unsigned int type);
void diag_update_sleeping_process(int process_id, int data_type);
int diag_process_apps_pkt(unsigned char *buf, int len,
struct diag_md_session_t *info);
-void diag_send_error_rsp(unsigned char *buf, int len);
+void diag_send_error_rsp(unsigned char *buf, int len,
+ struct diag_md_session_t *info);
void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type);
int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf);
void diag_md_hdlc_reset_timer_func(unsigned long pid);
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
index e761e6ec4e39..37f3bd2626c8 100644
--- a/drivers/char/diag/diagfwd_glink.c
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -462,6 +462,31 @@ static int diag_glink_write(void *ctxt, unsigned char *buf, int len)
return err;
}
+
+static void diag_glink_connect_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ connect_work);
+ if (!glink_info || glink_info->hdl)
+ return;
+ atomic_set(&glink_info->opened, 1);
+ diagfwd_channel_open(glink_info->fwd_ctxt);
+ diagfwd_late_open(glink_info->fwd_ctxt);
+}
+
+static void diag_glink_remote_disconnect_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ remote_disconnect_work);
+ if (!glink_info || glink_info->hdl)
+ return;
+ atomic_set(&glink_info->opened, 0);
+ diagfwd_channel_close(glink_info->fwd_ctxt);
+ atomic_set(&glink_info->tx_intent_ready, 0);
+}
+
static void diag_glink_transport_notify_state(void *handle, const void *priv,
unsigned event)
{
@@ -475,9 +500,7 @@ static void diag_glink_transport_notify_state(void *handle, const void *priv,
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"%s received channel connect for periph:%d\n",
glink_info->name, glink_info->peripheral);
- atomic_set(&glink_info->opened, 1);
- diagfwd_channel_open(glink_info->fwd_ctxt);
- diagfwd_late_open(glink_info->fwd_ctxt);
+ queue_work(glink_info->wq, &glink_info->connect_work);
break;
case GLINK_LOCAL_DISCONNECTED:
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -489,9 +512,7 @@ static void diag_glink_transport_notify_state(void *handle, const void *priv,
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"%s received channel remote disconnect for periph:%d\n",
glink_info->name, glink_info->peripheral);
- atomic_set(&glink_info->opened, 0);
- diagfwd_channel_close(glink_info->fwd_ctxt);
- atomic_set(&glink_info->tx_intent_ready, 0);
+ queue_work(glink_info->wq, &glink_info->remote_disconnect_work);
break;
default:
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -641,6 +662,9 @@ static void __diag_glink_init(struct diag_glink_info *glink_info)
INIT_WORK(&(glink_info->open_work), diag_glink_open_work_fn);
INIT_WORK(&(glink_info->close_work), diag_glink_close_work_fn);
INIT_WORK(&(glink_info->read_work), diag_glink_read_work_fn);
+ INIT_WORK(&(glink_info->connect_work), diag_glink_connect_work_fn);
+ INIT_WORK(&(glink_info->remote_disconnect_work),
+ diag_glink_remote_disconnect_work_fn);
link_info.glink_link_state_notif_cb = diag_glink_notify_cb;
link_info.transport = NULL;
link_info.edge = glink_info->edge;
@@ -681,6 +705,8 @@ int diag_glink_init(void)
struct diag_glink_info *glink_info = NULL;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
glink_info = &glink_cntl[peripheral];
__diag_glink_init(glink_info);
diagfwd_cntl_register(TRANSPORT_GLINK, glink_info->peripheral,
@@ -719,6 +745,8 @@ void diag_glink_early_exit(void)
int peripheral = 0;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
__diag_glink_exit(&glink_cntl[peripheral]);
glink_unregister_link_state_cb(&glink_cntl[peripheral].hdl);
}
@@ -729,6 +757,8 @@ void diag_glink_exit(void)
int peripheral = 0;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
__diag_glink_exit(&glink_data[peripheral]);
__diag_glink_exit(&glink_cmd[peripheral]);
__diag_glink_exit(&glink_dci[peripheral]);
diff --git a/drivers/char/diag/diagfwd_glink.h b/drivers/char/diag/diagfwd_glink.h
index bad4629b5ab8..5c1abeffd498 100644
--- a/drivers/char/diag/diagfwd_glink.h
+++ b/drivers/char/diag/diagfwd_glink.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,8 @@ struct diag_glink_info {
struct work_struct open_work;
struct work_struct close_work;
struct work_struct read_work;
+ struct work_struct connect_work;
+ struct work_struct remote_disconnect_work;
struct diagfwd_info *fwd_ctxt;
};
diff --git a/drivers/clk/msm/clock-gcc-8998.c b/drivers/clk/msm/clock-gcc-8998.c
index 8b7efbd093b5..f9d713a22c76 100644
--- a/drivers/clk/msm/clock-gcc-8998.c
+++ b/drivers/clk/msm/clock-gcc-8998.c
@@ -239,28 +239,6 @@ static struct pll_vote_clk gpll4 = {
};
DEFINE_EXT_CLK(gpll4_out_main, &gpll4.c);
-static struct clk_freq_tbl ftbl_hmss_ahb_clk_src[] = {
- F( 19200000, cxo_clk_src_ao, 1, 0, 0),
- F( 50000000, gpll0_out_main, 12, 0, 0),
- F( 100000000, gpll0_out_main, 6, 0, 0),
- F_END
-};
-
-static struct rcg_clk hmss_ahb_clk_src = {
- .cmd_rcgr_reg = GCC_HMSS_AHB_CMD_RCGR,
- .set_rate = set_rate_hid,
- .freq_tbl = ftbl_hmss_ahb_clk_src,
- .current_freq = &rcg_dummy_freq,
- .base = &virt_base,
- .c = {
- .dbg_name = "hmss_ahb_clk_src",
- .ops = &clk_ops_rcg,
- VDD_DIG_FMAX_MAP3_AO(LOWER, 19200000, LOW, 50000000,
- NOMINAL, 100000000),
- CLK_INIT(hmss_ahb_clk_src.c),
- },
-};
-
static struct clk_freq_tbl ftbl_usb30_master_clk_src[] = {
F( 19200000, cxo_clk_src, 1, 0, 0),
F( 60000000, gpll0_out_main, 10, 0, 0),
@@ -1732,20 +1710,6 @@ static struct branch_clk gcc_gpu_iref_clk = {
},
};
-static struct local_vote_clk gcc_hmss_ahb_clk = {
- .cbcr_reg = GCC_HMSS_AHB_CBCR,
- .vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
- .en_mask = BIT(21),
- .base = &virt_base,
- .c = {
- .dbg_name = "gcc_hmss_ahb_clk",
- .always_on = true,
- .parent = &hmss_ahb_clk_src.c,
- .ops = &clk_ops_vote,
- CLK_INIT(gcc_hmss_ahb_clk.c),
- },
-};
-
static struct branch_clk gcc_hmss_dvm_bus_clk = {
.cbcr_reg = GCC_HMSS_DVM_BUS_CBCR,
.has_sibling = 1,
@@ -2408,7 +2372,6 @@ static struct mux_clk gcc_debug_mux = {
{ &gcc_ce1_ahb_m_clk.c, 0x0099 },
{ &measure_only_bimc_hmss_axi_clk.c, 0x00bb },
{ &gcc_bimc_gfx_clk.c, 0x00ac },
- { &gcc_hmss_ahb_clk.c, 0x00ba },
{ &gcc_hmss_rbcpr_clk.c, 0x00bc },
{ &gcc_gp1_clk.c, 0x00df },
{ &gcc_gp2_clk.c, 0x00e0 },
@@ -2531,7 +2494,6 @@ static struct clk_lookup msm_clocks_gcc_8998[] = {
CLK_LIST(gcc_gpu_gpll0_div_clk),
CLK_LIST(gpll4),
CLK_LIST(gpll4_out_main),
- CLK_LIST(hmss_ahb_clk_src),
CLK_LIST(usb30_master_clk_src),
CLK_LIST(pcie_aux_clk_src),
CLK_LIST(ufs_axi_clk_src),
@@ -2629,7 +2591,6 @@ static struct clk_lookup msm_clocks_gcc_8998[] = {
CLK_LIST(gcc_gpu_bimc_gfx_clk),
CLK_LIST(gcc_gpu_cfg_ahb_clk),
CLK_LIST(gcc_gpu_iref_clk),
- CLK_LIST(gcc_hmss_ahb_clk),
CLK_LIST(gcc_hmss_dvm_bus_clk),
CLK_LIST(gcc_hmss_rbcpr_clk),
CLK_LIST(gcc_mmss_noc_cfg_ahb_clk),
@@ -2742,12 +2703,12 @@ static int msm_gcc_8998_probe(struct platform_device *pdev)
}
/*
- * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be
- * turned off by hardware during certain apps low power modes.
+ * Clear the HMSS_AHB_CLK_ENA bit to allow the gcc_hmss_ahb_clk clock
+ * to be gated by RPM during VDD_MIN.
*/
- regval = readl_relaxed(virt_base + GCC_APCS_CLOCK_SLEEP_ENA_VOTE);
- regval |= BIT(21);
- writel_relaxed(regval, virt_base + GCC_APCS_CLOCK_SLEEP_ENA_VOTE);
+ regval = readl_relaxed(virt_base + GCC_APCS_CLOCK_BRANCH_ENA_VOTE);
+ regval &= ~BIT(21);
+ writel_relaxed(regval, virt_base + GCC_APCS_CLOCK_BRANCH_ENA_VOTE);
vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
if (IS_ERR(vdd_dig.regulator[0])) {
diff --git a/drivers/clk/msm/clock-generic.c b/drivers/clk/msm/clock-generic.c
index eeb940e434cf..69e47a103402 100644
--- a/drivers/clk/msm/clock-generic.c
+++ b/drivers/clk/msm/clock-generic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -491,6 +491,9 @@ static long __slave_div_round_rate(struct clk *c, unsigned long rate,
if (best_div)
*best_div = div;
+ if (d->data.is_half_divider)
+ p_rate *= 2;
+
return p_rate / div;
}
@@ -530,9 +533,16 @@ static int slave_div_set_rate(struct clk *c, unsigned long rate)
static unsigned long slave_div_get_rate(struct clk *c)
{
struct div_clk *d = to_div_clk(c);
+ unsigned long rate;
+
if (!d->data.div)
return 0;
- return clk_get_rate(c->parent) / d->data.div;
+
+ rate = clk_get_rate(c->parent) / d->data.div;
+ if (d->data.is_half_divider)
+ rate *= 2;
+
+ return rate;
}
struct clk_ops clk_ops_slave_div = {
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index 1639b1b7f94b..9d9aa61c480a 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -89,6 +89,7 @@ enum clk_osm_trace_packet_id {
#define SINGLE_CORE 1
#define MAX_CORE_COUNT 4
#define LLM_SW_OVERRIDE_CNT 3
+#define OSM_SEQ_MINUS_ONE 0xff
#define ENABLE_REG 0x1004
#define INDEX_REG 0x1150
@@ -367,8 +368,10 @@ struct clk_osm {
u32 cluster_num;
u32 irq;
u32 apm_crossover_vc;
+ u32 apm_threshold_pre_vc;
u32 apm_threshold_vc;
u32 mem_acc_crossover_vc;
+ u32 mem_acc_threshold_pre_vc;
u32 mem_acc_threshold_vc;
u32 cycle_counter_reads;
u32 cycle_counter_delay;
@@ -1646,10 +1649,26 @@ static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
if (corner_volt >= apm_threshold) {
c->apm_threshold_vc = c->osm_table[i].virtual_corner;
+ /*
+ * Handle case where VC 0 has open-loop
+ * greater than or equal to APM threshold voltage.
+ */
+ c->apm_threshold_pre_vc = c->apm_threshold_vc ?
+ c->apm_threshold_vc - 1 : OSM_SEQ_MINUS_ONE;
break;
}
}
+ /*
+ * This assumes the OSM table uses corners
+ * 0 to MAX_VIRTUAL_CORNER - 1.
+ */
+ if (!c->apm_threshold_vc &&
+ c->apm_threshold_pre_vc != OSM_SEQ_MINUS_ONE) {
+ c->apm_threshold_vc = MAX_VIRTUAL_CORNER;
+ c->apm_threshold_pre_vc = c->apm_threshold_vc - 1;
+ }
+
/* Determine MEM ACC threshold virtual corner */
if (mem_acc_threshold) {
for (i = 0; i < OSM_TABLE_SIZE; i++) {
@@ -1660,6 +1679,15 @@ static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
if (corner_volt >= mem_acc_threshold) {
c->mem_acc_threshold_vc
= c->osm_table[i].virtual_corner;
+ /*
+ * Handle case where VC 0 has open-loop
+ * greater than or equal to MEM-ACC threshold
+ * voltage.
+ */
+ c->mem_acc_threshold_pre_vc =
+ c->mem_acc_threshold_vc ?
+ c->mem_acc_threshold_vc - 1 :
+ OSM_SEQ_MINUS_ONE;
break;
}
}
@@ -1668,9 +1696,13 @@ static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
* This assumes the OSM table uses corners
* 0 to MAX_VIRTUAL_CORNER - 1.
*/
- if (!c->mem_acc_threshold_vc)
+ if (!c->mem_acc_threshold_vc && c->mem_acc_threshold_pre_vc
+ != OSM_SEQ_MINUS_ONE) {
c->mem_acc_threshold_vc =
MAX_VIRTUAL_CORNER;
+ c->mem_acc_threshold_pre_vc =
+ c->mem_acc_threshold_vc - 1;
+ }
}
return 0;
@@ -2021,13 +2053,20 @@ static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
* highest MEM ACC threshold if it is specified instead of the
* fixed mapping in the LUT.
*/
- if (c->mem_acc_threshold_vc) {
- threshold_vc[2] = c->mem_acc_threshold_vc - 1;
+ if (c->mem_acc_threshold_vc || c->mem_acc_threshold_pre_vc
+ == OSM_SEQ_MINUS_ONE) {
+ threshold_vc[2] = c->mem_acc_threshold_pre_vc;
threshold_vc[3] = c->mem_acc_threshold_vc;
- if (threshold_vc[1] >= threshold_vc[2])
- threshold_vc[1] = threshold_vc[2] - 1;
- if (threshold_vc[0] >= threshold_vc[1])
- threshold_vc[0] = threshold_vc[1] - 1;
+
+ if (c->mem_acc_threshold_pre_vc == OSM_SEQ_MINUS_ONE) {
+ threshold_vc[1] = threshold_vc[0] =
+ c->mem_acc_threshold_pre_vc;
+ } else {
+ if (threshold_vc[1] >= threshold_vc[2])
+ threshold_vc[1] = threshold_vc[2] - 1;
+ if (threshold_vc[0] >= threshold_vc[1])
+ threshold_vc[0] = threshold_vc[1] - 1;
+ }
}
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(55),
@@ -2045,7 +2084,8 @@ static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
* Program L_VAL corresponding to the first virtual
* corner with MEM ACC level 3.
*/
- if (c->mem_acc_threshold_vc)
+ if (c->mem_acc_threshold_vc ||
+ c->mem_acc_threshold_pre_vc == OSM_SEQ_MINUS_ONE)
for (i = 0; i < c->num_entries; i++)
if (c->mem_acc_threshold_vc == table[i].virtual_corner)
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(32),
@@ -2365,8 +2405,7 @@ static void clk_osm_apm_vc_setup(struct clk_osm *c)
SEQ_REG(8));
clk_osm_write_reg(c, c->apm_threshold_vc,
SEQ_REG(15));
- clk_osm_write_reg(c, c->apm_threshold_vc != 0 ?
- c->apm_threshold_vc - 1 : 0xff,
+ clk_osm_write_reg(c, c->apm_threshold_pre_vc,
SEQ_REG(31));
clk_osm_write_reg(c, 0x3b | c->apm_threshold_vc << 6,
SEQ_REG(73));
@@ -2390,8 +2429,7 @@ static void clk_osm_apm_vc_setup(struct clk_osm *c)
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(15),
c->apm_threshold_vc);
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(31),
- c->apm_threshold_vc != 0 ?
- c->apm_threshold_vc - 1 : 0xff);
+ c->apm_threshold_pre_vc);
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(76),
0x39 | c->apm_threshold_vc << 6);
}
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index e6054444599c..1d9085cff82c 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -876,5 +876,6 @@ const struct clk_ops clk_alpha_pll_slew_ops = {
.recalc_rate = clk_alpha_pll_recalc_rate,
.round_rate = clk_alpha_pll_round_rate,
.set_rate = clk_alpha_pll_slew_set_rate,
+ .list_registers = clk_alpha_pll_list_registers,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_slew_ops);
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index e88d70f07a1c..6b00bee337a1 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -1952,16 +1952,21 @@ static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
}
}
+ threshold_vc[0] = mem_acc_level_map[0];
+ threshold_vc[1] = mem_acc_level_map[0] + 1;
+ threshold_vc[2] = mem_acc_level_map[1];
+ threshold_vc[3] = mem_acc_level_map[1] + 1;
+
if (c->secure_init) {
clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(1), SEQ_REG(51));
clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(2), SEQ_REG(52));
clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(3), SEQ_REG(53));
clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(4), SEQ_REG(54));
clk_osm_write_reg(c, MEM_ACC_APM_READ_MASK, SEQ_REG(59));
- clk_osm_write_reg(c, mem_acc_level_map[0], SEQ_REG(55));
- clk_osm_write_reg(c, mem_acc_level_map[0] + 1, SEQ_REG(56));
- clk_osm_write_reg(c, mem_acc_level_map[1], SEQ_REG(57));
- clk_osm_write_reg(c, mem_acc_level_map[1] + 1, SEQ_REG(58));
+ clk_osm_write_reg(c, threshold_vc[0], SEQ_REG(55));
+ clk_osm_write_reg(c, threshold_vc[1], SEQ_REG(56));
+ clk_osm_write_reg(c, threshold_vc[2], SEQ_REG(57));
+ clk_osm_write_reg(c, threshold_vc[3], SEQ_REG(58));
clk_osm_write_reg(c, c->pbases[OSM_BASE] + SEQ_REG(28),
SEQ_REG(49));
@@ -1977,11 +1982,6 @@ static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(88),
c->mem_acc_crossover_vc);
- threshold_vc[0] = mem_acc_level_map[0];
- threshold_vc[1] = mem_acc_level_map[0] + 1;
- threshold_vc[2] = mem_acc_level_map[1];
- threshold_vc[3] = mem_acc_level_map[1] + 1;
-
/*
* Use dynamic MEM ACC threshold voltage based value for the
* highest MEM ACC threshold if it is specified instead of the
@@ -2011,11 +2011,10 @@ static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
* Program L_VAL corresponding to the first virtual
* corner with MEM ACC level 3.
*/
- if (c->mem_acc_threshold_vc)
- for (i = 0; i < c->num_entries; i++)
- if (c->mem_acc_threshold_vc == table[i].virtual_corner)
- scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(32),
- L_VAL(table[i].freq_data));
+ for (i = 0; i < c->num_entries; i++)
+ if (threshold_vc[3] == table[i].virtual_corner)
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(32),
+ L_VAL(table[i].freq_data));
}
void clk_osm_setup_sequencer(struct clk_osm *c)
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index a07deb902577..9e5c0b6f7a0e 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -307,7 +307,18 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
{
u32 cfg, mask, old_cfg;
struct clk_hw *hw = &rcg->clkr.hw;
- int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ int ret, index;
+
+ /*
+ * In case the frequency table of cxo_f is used, the src in parent_map
+ * and the source in cxo_f.src could be different. Update the index to
+ * '0' since it's assumed that CXO is always fed to port 0 of RCGs HLOS
+ * controls.
+ */
+ if (f == &cxo_f)
+ index = 0;
+ else
+ index = qcom_find_src_index(hw, rcg->parent_map, f->src);
if (index < 0)
return index;
@@ -507,6 +518,15 @@ static int clk_rcg2_enable(struct clk_hw *hw)
if (!f)
return -EINVAL;
+ /*
+ * If CXO is not listed as a supported frequency in the frequency
+ * table, the above API would return the lowest supported frequency
+ * instead. This will lead to incorrect configuration of the RCG.
+ * Check if the RCG rate is CXO and configure it accordingly.
+ */
+ if (rate == cxo_f.freq)
+ f = &cxo_f;
+
clk_rcg_set_force_enable(hw);
clk_rcg2_configure(rcg, f);
clk_rcg_clear_force_enable(hw);
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 095530e72b78..0d76b6b28985 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -570,8 +570,6 @@ static DEFINE_CLK_VOTER(pnoc_msmbus_clk, pnoc_clk, LONG_MAX);
static DEFINE_CLK_VOTER(pnoc_msmbus_a_clk, pnoc_a_clk, LONG_MAX);
static DEFINE_CLK_VOTER(pnoc_pm_clk, pnoc_clk, LONG_MAX);
static DEFINE_CLK_VOTER(pnoc_sps_clk, pnoc_clk, 0);
-static DEFINE_CLK_VOTER(mmssnoc_a_cpu_clk, mmssnoc_axi_a_clk,
- 19200000);
/* Voter Branch clocks */
static DEFINE_CLK_BRANCH_VOTER(cxo_dwc3_clk, cxo);
@@ -740,7 +738,6 @@ static struct clk_hw *sdm660_clks[] = {
[CXO_PIL_LPASS_CLK] = &cxo_pil_lpass_clk.hw,
[CXO_PIL_CDSP_CLK] = &cxo_pil_cdsp_clk.hw,
[CNOC_PERIPH_KEEPALIVE_A_CLK] = &cnoc_periph_keepalive_a_clk.hw,
- [MMSSNOC_A_CLK_CPU_VOTE] = &mmssnoc_a_cpu_clk.hw
};
static const struct rpm_smd_clk_desc rpm_clk_sdm660 = {
@@ -861,7 +858,6 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
/* Hold an active set vote for the cnoc_periph resource */
clk_set_rate(cnoc_periph_keepalive_a_clk.hw.clk, 19200000);
clk_prepare_enable(cnoc_periph_keepalive_a_clk.hw.clk);
- clk_prepare_enable(mmssnoc_a_cpu_clk.hw.clk);
}
dev_info(&pdev->dev, "Registered RPM clocks\n");
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index 3413859a56ef..c282253da584 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -730,32 +730,6 @@ static struct clk_rcg2 gp3_clk_src = {
},
};
-static const struct freq_tbl ftbl_hmss_ahb_clk_src[] = {
- F(19200000, P_XO, 1, 0, 0),
- F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
- F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
- F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
- { }
-};
-
-static struct clk_rcg2 hmss_ahb_clk_src = {
- .cmd_rcgr = 0x48014,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = gcc_parent_map_1,
- .freq_tbl = ftbl_hmss_ahb_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "hmss_ahb_clk_src",
- .parent_names = gcc_parent_names_ao_1,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- VDD_DIG_FMAX_MAP3_AO(
- LOWER, 19200000,
- LOW, 50000000,
- NOMINAL, 100000000),
- },
-};
-
static const struct freq_tbl ftbl_hmss_gpll0_clk_src[] = {
F(600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
{ }
@@ -1823,24 +1797,6 @@ static struct clk_branch gcc_gpu_gpll0_div_clk = {
},
};
-static struct clk_branch gcc_hmss_ahb_clk = {
- .halt_reg = 0x48000,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x52004,
- .enable_mask = BIT(21),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_hmss_ahb_clk",
- .parent_names = (const char *[]){
- "hmss_ahb_clk_src",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_hmss_dvm_bus_clk = {
.halt_reg = 0x4808c,
.halt_check = BRANCH_HALT,
@@ -2493,14 +2449,15 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
},
};
-static struct clk_gate2 gcc_usb3_phy_pipe_clk = {
- .udelay = 50,
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_reg = 0x50004,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x50004,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_phy_pipe_clk",
- .ops = &clk_gate2_ops,
+ .ops = &clk_branch2_ops,
},
},
};
@@ -2640,7 +2597,6 @@ static struct clk_regmap *gcc_660_clocks[] = {
[GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
[GCC_GPU_GPLL0_CLK] = &gcc_gpu_gpll0_clk.clkr,
[GCC_GPU_GPLL0_DIV_CLK] = &gcc_gpu_gpll0_div_clk.clkr,
- [GCC_HMSS_AHB_CLK] = &gcc_hmss_ahb_clk.clkr,
[GCC_HMSS_DVM_BUS_CLK] = &gcc_hmss_dvm_bus_clk.clkr,
[GCC_HMSS_RBCPR_CLK] = &gcc_hmss_rbcpr_clk.clkr,
[GCC_MMSS_GPLL0_CLK] = &gcc_mmss_gpll0_clk.clkr,
@@ -2690,7 +2646,6 @@ static struct clk_regmap *gcc_660_clocks[] = {
[GPLL1] = &gpll1_out_main.clkr,
[GPLL4] = &gpll4_out_main.clkr,
[HLOS1_VOTE_LPASS_ADSP_SMMU_CLK] = &hlos1_vote_lpass_adsp_smmu_clk.clkr,
- [HMSS_AHB_CLK_SRC] = &hmss_ahb_clk_src.clkr,
[HMSS_GPLL0_CLK_SRC] = &hmss_gpll0_clk_src.clkr,
[HMSS_GPLL4_CLK_SRC] = &hmss_gpll4_clk_src.clkr,
[HMSS_RBCPR_CLK_SRC] = &hmss_rbcpr_clk_src.clkr,
@@ -2764,12 +2719,6 @@ static int gcc_660_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- /*
- * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be
- * turned off by hardware during certain apps low power modes.
- */
- regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
-
vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
if (IS_ERR(vdd_dig.regulator[0])) {
if (!(PTR_ERR(vdd_dig.regulator[0]) == -EPROBE_DEFER))
@@ -2882,7 +2831,6 @@ static const char *const debug_mux_parent_names[] = {
"gcc_gp3_clk",
"gcc_gpu_bimc_gfx_clk",
"gcc_gpu_cfg_ahb_clk",
- "gcc_hmss_ahb_clk",
"gcc_hmss_dvm_bus_clk",
"gcc_hmss_rbcpr_clk",
"gcc_mmss_noc_cfg_ahb_clk",
@@ -3061,7 +3009,6 @@ static struct clk_debug_mux gcc_debug_mux = {
{ "gcc_gp3_clk", 0x0E1 },
{ "gcc_gpu_bimc_gfx_clk", 0x13F },
{ "gcc_gpu_cfg_ahb_clk", 0x13B },
- { "gcc_hmss_ahb_clk", 0x0BA },
{ "gcc_hmss_dvm_bus_clk", 0x0BF },
{ "gcc_hmss_rbcpr_clk", 0x0BC },
{ "gcc_mmss_noc_cfg_ahb_clk", 0x020 },
diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
index aec73d62bc18..87e6f8c6be0a 100644
--- a/drivers/clk/qcom/mmcc-sdm660.c
+++ b/drivers/clk/qcom/mmcc-sdm660.c
@@ -529,7 +529,7 @@ static struct clk_rcg2 ahb_clk_src = {
.hid_width = 5,
.parent_map = mmcc_parent_map_10,
.freq_tbl = ftbl_ahb_clk_src,
- .flags = FORCE_ENABLE_RCGR,
+ .enable_safe_config = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "ahb_clk_src",
.parent_names = mmcc_parent_names_10,
@@ -3002,6 +3002,7 @@ static const struct qcom_cc_desc mmcc_660_desc = {
static const struct of_device_id mmcc_660_match_table[] = {
{ .compatible = "qcom,mmcc-sdm660" },
+ { .compatible = "qcom,mmcc-sdm630" },
{ }
};
MODULE_DEVICE_TABLE(of, mmcc_660_match_table);
@@ -3010,11 +3011,15 @@ static int mmcc_660_probe(struct platform_device *pdev)
{
int ret = 0;
struct regmap *regmap;
+ bool is_sdm630 = 0;
regmap = qcom_cc_map(pdev, &mmcc_660_desc);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
+ is_sdm630 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,mmcc-sdm630");
+
/* PLLs connected on Mx rails of MMSS_CC */
vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx_mmss");
if (IS_ERR(vdd_mx.regulator[0])) {
@@ -3048,6 +3053,17 @@ static int mmcc_660_probe(struct platform_device *pdev)
clk_alpha_pll_configure(&mmpll8_pll_out_main, regmap, &mmpll8_config);
clk_alpha_pll_configure(&mmpll10_pll_out_main, regmap, &mmpll10_config);
+ if (is_sdm630) {
+ mmcc_660_desc.clks[BYTE1_CLK_SRC] = 0;
+ mmcc_660_desc.clks[MMSS_MDSS_BYTE1_CLK] = 0;
+ mmcc_660_desc.clks[MMSS_MDSS_BYTE1_INTF_DIV_CLK] = 0;
+ mmcc_660_desc.clks[MMSS_MDSS_BYTE1_INTF_CLK] = 0;
+ mmcc_660_desc.clks[ESC1_CLK_SRC] = 0;
+ mmcc_660_desc.clks[MMSS_MDSS_ESC1_CLK] = 0;
+ mmcc_660_desc.clks[PCLK1_CLK_SRC] = 0;
+ mmcc_660_desc.clks[MMSS_MDSS_PCLK1_CLK] = 0;
+ }
+
ret = qcom_cc_really_probe(pdev, &mmcc_660_desc, regmap);
if (ret) {
dev_err(&pdev->dev, "Failed to register MMSS clocks\n");
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 5ce87a6edcc3..7459401979fe 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -1490,6 +1490,11 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
}
/* Check for sum of all dst length is equal to data_len */
for (i = 0, total = 0; i < req->entries; i++) {
+ if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
+ pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
+ __func__, i, req->vbuf.dst[i].len);
+ goto error;
+ }
if (req->vbuf.dst[i].len >= U32_MAX - total) {
pr_err("%s: Integer overflow on total req dst vbuf length\n",
__func__);
@@ -1504,6 +1509,11 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
}
/* Check for sum of all src length is equal to data_len */
for (i = 0, total = 0; i < req->entries; i++) {
+ if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
+ pr_err("%s: NULL req src vbuf[%d] with length %d\n",
+ __func__, i, req->vbuf.src[i].len);
+ goto error;
+ }
if (req->vbuf.src[i].len > U32_MAX - total) {
pr_err("%s: Integer overflow on total req src vbuf length\n",
__func__);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index afd94a1e85d3..62675198d6ac 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -10,6 +10,7 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM
+ select BACKLIGHT_CLASS_DEVICE
default y
help
DRM/KMS driver for MSM/snapdragon.
@@ -88,3 +89,9 @@ config DRM_SDE_WB
help
Choose this option for writeback connector support.
+config DRM_SDE_HDMI
+ bool "Enable HDMI driver support in DRM SDE driver"
+ depends on DRM_MSM
+ default y
+ help
+ Choose this option if HDMI connector support is needed in SDE driver.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 4ca16fc01e1c..d81ec8918ce7 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,4 +1,6 @@
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging
+ccflags-y += -Idrivers/gpu/drm/msm/hdmi
+ccflags-y += -Idrivers/gpu/drm/msm/hdmi-staging
ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android
ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
@@ -45,14 +47,19 @@ msm_drm-y := \
sde/sde_backlight.o \
sde/sde_color_processing.o \
sde/sde_vbif.o \
- sde_dbg_evtlog.o
+ sde_dbg_evtlog.o \
+ sde_io_util.o \
# use drm gpu driver only if qcom_kgsl driver not available
ifneq ($(CONFIG_QCOM_KGSL),y)
msm_drm-y += adreno/adreno_device.o \
adreno/adreno_gpu.o \
adreno/a3xx_gpu.o \
- adreno/a4xx_gpu.o
+ adreno/a4xx_gpu.o \
+ adreno/a5xx_gpu.o \
+ adreno/a5xx_power.o \
+ adreno/a5xx_preempt.o \
+ adreno/a5xx_snapshot.o
endif
msm_drm-$(CONFIG_DRM_MSM_MDP4) += mdp/mdp4/mdp4_crtc.o \
@@ -90,6 +97,11 @@ msm_drm-$(CONFIG_DRM_MSM_DSI_STAGING) += dsi-staging/dsi_phy.o \
dsi-staging/dsi_panel.o \
dsi-staging/dsi_display_test.o
+msm_drm-$(CONFIG_DRM_SDE_HDMI) += \
+ hdmi-staging/sde_hdmi.o \
+ hdmi-staging/sde_hdmi_bridge.o \
+ hdmi-staging/sde_hdmi_audio.o
+
msm_drm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
dsi/pll/dsi_pll_28nm.o
@@ -121,12 +133,14 @@ msm_drm-$(CONFIG_DRM_MSM) += \
msm_gem.o \
msm_gem_prime.o \
msm_gem_submit.o \
+ msm_gem_vma.o \
msm_gpu.o \
msm_iommu.o \
msm_smmu.o \
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o \
- msm_prop.o
+ msm_prop.o \
+ msm_snapshot.o
obj-$(CONFIG_DRM_MSM) += msm_drm.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 9e2aceb4ffe6..8d16b21ef8a5 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 97dc1c6ec107..d521b13bdf9f 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -111,10 +113,14 @@ enum a3xx_vtx_fmt {
VFMT_8_8_SNORM = 53,
VFMT_8_8_8_SNORM = 54,
VFMT_8_8_8_8_SNORM = 55,
- VFMT_10_10_10_2_UINT = 60,
- VFMT_10_10_10_2_UNORM = 61,
- VFMT_10_10_10_2_SINT = 62,
- VFMT_10_10_10_2_SNORM = 63,
+ VFMT_10_10_10_2_UINT = 56,
+ VFMT_10_10_10_2_UNORM = 57,
+ VFMT_10_10_10_2_SINT = 58,
+ VFMT_10_10_10_2_SNORM = 59,
+ VFMT_2_10_10_10_UINT = 60,
+ VFMT_2_10_10_10_UNORM = 61,
+ VFMT_2_10_10_10_SINT = 62,
+ VFMT_2_10_10_10_SNORM = 63,
};
enum a3xx_tex_fmt {
@@ -124,10 +130,14 @@ enum a3xx_tex_fmt {
TFMT_Z16_UNORM = 9,
TFMT_X8Z24_UNORM = 10,
TFMT_Z32_FLOAT = 11,
- TFMT_NV12_UV_TILED = 17,
- TFMT_NV12_Y_TILED = 19,
- TFMT_NV12_UV = 21,
- TFMT_NV12_Y = 23,
+ TFMT_UV_64X32 = 16,
+ TFMT_VU_64X32 = 17,
+ TFMT_Y_64X32 = 18,
+ TFMT_NV12_64X32 = 19,
+ TFMT_UV_LINEAR = 20,
+ TFMT_VU_LINEAR = 21,
+ TFMT_Y_LINEAR = 22,
+ TFMT_NV12_LINEAR = 23,
TFMT_I420_Y = 24,
TFMT_I420_U = 26,
TFMT_I420_V = 27,
@@ -138,10 +148,12 @@ enum a3xx_tex_fmt {
TFMT_DXT1 = 36,
TFMT_DXT3 = 37,
TFMT_DXT5 = 38,
+ TFMT_2_10_10_10_UNORM = 40,
TFMT_10_10_10_2_UNORM = 41,
TFMT_9_9_9_E5_FLOAT = 42,
TFMT_11_11_10_FLOAT = 43,
TFMT_A8_UNORM = 44,
+ TFMT_L8_UNORM = 45,
TFMT_L8_A8_UNORM = 47,
TFMT_8_UNORM = 48,
TFMT_8_8_UNORM = 49,
@@ -183,6 +195,8 @@ enum a3xx_tex_fmt {
TFMT_32_SINT = 92,
TFMT_32_32_SINT = 93,
TFMT_32_32_32_32_SINT = 95,
+ TFMT_2_10_10_10_UINT = 96,
+ TFMT_10_10_10_2_UINT = 97,
TFMT_ETC2_RG11_SNORM = 112,
TFMT_ETC2_RG11_UNORM = 113,
TFMT_ETC2_R11_SNORM = 114,
@@ -215,6 +229,9 @@ enum a3xx_color_fmt {
RB_R8_UINT = 14,
RB_R8_SINT = 15,
RB_R10G10B10A2_UNORM = 16,
+ RB_A2R10G10B10_UNORM = 17,
+ RB_R10G10B10A2_UINT = 18,
+ RB_A2R10G10B10_UINT = 19,
RB_A8_UNORM = 20,
RB_R8_UNORM = 21,
RB_R16_FLOAT = 24,
@@ -244,38 +261,273 @@ enum a3xx_color_fmt {
RB_R32G32B32A32_UINT = 59,
};
+enum a3xx_cp_perfcounter_select {
+ CP_ALWAYS_COUNT = 0,
+ CP_AHB_PFPTRANS_WAIT = 3,
+ CP_AHB_NRTTRANS_WAIT = 6,
+ CP_CSF_NRT_READ_WAIT = 8,
+ CP_CSF_I1_FIFO_FULL = 9,
+ CP_CSF_I2_FIFO_FULL = 10,
+ CP_CSF_ST_FIFO_FULL = 11,
+ CP_RESERVED_12 = 12,
+ CP_CSF_RING_ROQ_FULL = 13,
+ CP_CSF_I1_ROQ_FULL = 14,
+ CP_CSF_I2_ROQ_FULL = 15,
+ CP_CSF_ST_ROQ_FULL = 16,
+ CP_RESERVED_17 = 17,
+ CP_MIU_TAG_MEM_FULL = 18,
+ CP_MIU_NRT_WRITE_STALLED = 22,
+ CP_MIU_NRT_READ_STALLED = 23,
+ CP_ME_REGS_RB_DONE_FIFO_FULL = 26,
+ CP_ME_REGS_VS_EVENT_FIFO_FULL = 27,
+ CP_ME_REGS_PS_EVENT_FIFO_FULL = 28,
+ CP_ME_REGS_CF_EVENT_FIFO_FULL = 29,
+ CP_ME_MICRO_RB_STARVED = 30,
+ CP_AHB_RBBM_DWORD_SENT = 40,
+ CP_ME_BUSY_CLOCKS = 41,
+ CP_ME_WAIT_CONTEXT_AVAIL = 42,
+ CP_PFP_TYPE0_PACKET = 43,
+ CP_PFP_TYPE3_PACKET = 44,
+ CP_CSF_RB_WPTR_NEQ_RPTR = 45,
+ CP_CSF_I1_SIZE_NEQ_ZERO = 46,
+ CP_CSF_I2_SIZE_NEQ_ZERO = 47,
+ CP_CSF_RBI1I2_FETCHING = 48,
+};
+
+enum a3xx_gras_tse_perfcounter_select {
+ GRAS_TSEPERF_INPUT_PRIM = 0,
+ GRAS_TSEPERF_INPUT_NULL_PRIM = 1,
+ GRAS_TSEPERF_TRIVAL_REJ_PRIM = 2,
+ GRAS_TSEPERF_CLIPPED_PRIM = 3,
+ GRAS_TSEPERF_NEW_PRIM = 4,
+ GRAS_TSEPERF_ZERO_AREA_PRIM = 5,
+ GRAS_TSEPERF_FACENESS_CULLED_PRIM = 6,
+ GRAS_TSEPERF_ZERO_PIXEL_PRIM = 7,
+ GRAS_TSEPERF_OUTPUT_NULL_PRIM = 8,
+ GRAS_TSEPERF_OUTPUT_VISIBLE_PRIM = 9,
+ GRAS_TSEPERF_PRE_CLIP_PRIM = 10,
+ GRAS_TSEPERF_POST_CLIP_PRIM = 11,
+ GRAS_TSEPERF_WORKING_CYCLES = 12,
+ GRAS_TSEPERF_PC_STARVE = 13,
+ GRAS_TSERASPERF_STALL = 14,
+};
+
+enum a3xx_gras_ras_perfcounter_select {
+ GRAS_RASPERF_16X16_TILES = 0,
+ GRAS_RASPERF_8X8_TILES = 1,
+ GRAS_RASPERF_4X4_TILES = 2,
+ GRAS_RASPERF_WORKING_CYCLES = 3,
+ GRAS_RASPERF_STALL_CYCLES_BY_RB = 4,
+ GRAS_RASPERF_STALL_CYCLES_BY_VSC = 5,
+ GRAS_RASPERF_STARVE_CYCLES_BY_TSE = 6,
+};
+
+enum a3xx_hlsq_perfcounter_select {
+ HLSQ_PERF_SP_VS_CONSTANT = 0,
+ HLSQ_PERF_SP_VS_INSTRUCTIONS = 1,
+ HLSQ_PERF_SP_FS_CONSTANT = 2,
+ HLSQ_PERF_SP_FS_INSTRUCTIONS = 3,
+ HLSQ_PERF_TP_STATE = 4,
+ HLSQ_PERF_QUADS = 5,
+ HLSQ_PERF_PIXELS = 6,
+ HLSQ_PERF_VERTICES = 7,
+ HLSQ_PERF_FS8_THREADS = 8,
+ HLSQ_PERF_FS16_THREADS = 9,
+ HLSQ_PERF_FS32_THREADS = 10,
+ HLSQ_PERF_VS8_THREADS = 11,
+ HLSQ_PERF_VS16_THREADS = 12,
+ HLSQ_PERF_SP_VS_DATA_BYTES = 13,
+ HLSQ_PERF_SP_FS_DATA_BYTES = 14,
+ HLSQ_PERF_ACTIVE_CYCLES = 15,
+ HLSQ_PERF_STALL_CYCLES_SP_STATE = 16,
+ HLSQ_PERF_STALL_CYCLES_SP_VS = 17,
+ HLSQ_PERF_STALL_CYCLES_SP_FS = 18,
+ HLSQ_PERF_STALL_CYCLES_UCHE = 19,
+ HLSQ_PERF_RBBM_LOAD_CYCLES = 20,
+ HLSQ_PERF_DI_TO_VS_START_SP0 = 21,
+ HLSQ_PERF_DI_TO_FS_START_SP0 = 22,
+ HLSQ_PERF_VS_START_TO_DONE_SP0 = 23,
+ HLSQ_PERF_FS_START_TO_DONE_SP0 = 24,
+ HLSQ_PERF_SP_STATE_COPY_CYCLES_VS = 25,
+ HLSQ_PERF_SP_STATE_COPY_CYCLES_FS = 26,
+ HLSQ_PERF_UCHE_LATENCY_CYCLES = 27,
+ HLSQ_PERF_UCHE_LATENCY_COUNT = 28,
+};
+
+enum a3xx_pc_perfcounter_select {
+ PC_PCPERF_VISIBILITY_STREAMS = 0,
+ PC_PCPERF_TOTAL_INSTANCES = 1,
+ PC_PCPERF_PRIMITIVES_PC_VPC = 2,
+ PC_PCPERF_PRIMITIVES_KILLED_BY_VS = 3,
+ PC_PCPERF_PRIMITIVES_VISIBLE_BY_VS = 4,
+ PC_PCPERF_DRAWCALLS_KILLED_BY_VS = 5,
+ PC_PCPERF_DRAWCALLS_VISIBLE_BY_VS = 6,
+ PC_PCPERF_VERTICES_TO_VFD = 7,
+ PC_PCPERF_REUSED_VERTICES = 8,
+ PC_PCPERF_CYCLES_STALLED_BY_VFD = 9,
+ PC_PCPERF_CYCLES_STALLED_BY_TSE = 10,
+ PC_PCPERF_CYCLES_STALLED_BY_VBIF = 11,
+ PC_PCPERF_CYCLES_IS_WORKING = 12,
+};
+
+enum a3xx_rb_perfcounter_select {
+ RB_RBPERF_ACTIVE_CYCLES_ANY = 0,
+ RB_RBPERF_ACTIVE_CYCLES_ALL = 1,
+ RB_RBPERF_STARVE_CYCLES_BY_SP = 2,
+ RB_RBPERF_STARVE_CYCLES_BY_RAS = 3,
+ RB_RBPERF_STARVE_CYCLES_BY_MARB = 4,
+ RB_RBPERF_STALL_CYCLES_BY_MARB = 5,
+ RB_RBPERF_STALL_CYCLES_BY_HLSQ = 6,
+ RB_RBPERF_RB_MARB_DATA = 7,
+ RB_RBPERF_SP_RB_QUAD = 8,
+ RB_RBPERF_RAS_EARLY_Z_QUADS = 9,
+ RB_RBPERF_GMEM_CH0_READ = 10,
+ RB_RBPERF_GMEM_CH1_READ = 11,
+ RB_RBPERF_GMEM_CH0_WRITE = 12,
+ RB_RBPERF_GMEM_CH1_WRITE = 13,
+ RB_RBPERF_CP_CONTEXT_DONE = 14,
+ RB_RBPERF_CP_CACHE_FLUSH = 15,
+ RB_RBPERF_CP_ZPASS_DONE = 16,
+};
+
+enum a3xx_rbbm_perfcounter_select {
+ RBBM_ALAWYS_ON = 0,
+ RBBM_VBIF_BUSY = 1,
+ RBBM_TSE_BUSY = 2,
+ RBBM_RAS_BUSY = 3,
+ RBBM_PC_DCALL_BUSY = 4,
+ RBBM_PC_VSD_BUSY = 5,
+ RBBM_VFD_BUSY = 6,
+ RBBM_VPC_BUSY = 7,
+ RBBM_UCHE_BUSY = 8,
+ RBBM_VSC_BUSY = 9,
+ RBBM_HLSQ_BUSY = 10,
+ RBBM_ANY_RB_BUSY = 11,
+ RBBM_ANY_TEX_BUSY = 12,
+ RBBM_ANY_USP_BUSY = 13,
+ RBBM_ANY_MARB_BUSY = 14,
+ RBBM_ANY_ARB_BUSY = 15,
+ RBBM_AHB_STATUS_BUSY = 16,
+ RBBM_AHB_STATUS_STALLED = 17,
+ RBBM_AHB_STATUS_TXFR = 18,
+ RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+ RBBM_AHB_STATUS_TXFR_ERROR = 20,
+ RBBM_AHB_STATUS_LONG_STALL = 21,
+ RBBM_RBBM_STATUS_MASKED = 22,
+};
+
enum a3xx_sp_perfcounter_select {
+ SP_LM_LOAD_INSTRUCTIONS = 0,
+ SP_LM_STORE_INSTRUCTIONS = 1,
+ SP_LM_ATOMICS = 2,
+ SP_UCHE_LOAD_INSTRUCTIONS = 3,
+ SP_UCHE_STORE_INSTRUCTIONS = 4,
+ SP_UCHE_ATOMICS = 5,
+ SP_VS_TEX_INSTRUCTIONS = 6,
+ SP_VS_CFLOW_INSTRUCTIONS = 7,
+ SP_VS_EFU_INSTRUCTIONS = 8,
+ SP_VS_FULL_ALU_INSTRUCTIONS = 9,
+ SP_VS_HALF_ALU_INSTRUCTIONS = 10,
+ SP_FS_TEX_INSTRUCTIONS = 11,
SP_FS_CFLOW_INSTRUCTIONS = 12,
+ SP_FS_EFU_INSTRUCTIONS = 13,
SP_FS_FULL_ALU_INSTRUCTIONS = 14,
- SP0_ICL1_MISSES = 26,
+ SP_FS_HALF_ALU_INSTRUCTIONS = 15,
+ SP_FS_BARY_INSTRUCTIONS = 16,
+ SP_VS_INSTRUCTIONS = 17,
+ SP_FS_INSTRUCTIONS = 18,
+ SP_ADDR_LOCK_COUNT = 19,
+ SP_UCHE_READ_TRANS = 20,
+ SP_UCHE_WRITE_TRANS = 21,
+ SP_EXPORT_VPC_TRANS = 22,
+ SP_EXPORT_RB_TRANS = 23,
+ SP_PIXELS_KILLED = 24,
+ SP_ICL1_REQUESTS = 25,
+ SP_ICL1_MISSES = 26,
+ SP_ICL0_REQUESTS = 27,
+ SP_ICL0_MISSES = 28,
SP_ALU_ACTIVE_CYCLES = 29,
+ SP_EFU_ACTIVE_CYCLES = 30,
+ SP_STALL_CYCLES_BY_VPC = 31,
+ SP_STALL_CYCLES_BY_TP = 32,
+ SP_STALL_CYCLES_BY_UCHE = 33,
+ SP_STALL_CYCLES_BY_RB = 34,
+ SP_ACTIVE_CYCLES_ANY = 35,
+ SP_ACTIVE_CYCLES_ALL = 36,
+};
+
+enum a3xx_tp_perfcounter_select {
+ TPL1_TPPERF_L1_REQUESTS = 0,
+ TPL1_TPPERF_TP0_L1_REQUESTS = 1,
+ TPL1_TPPERF_TP0_L1_MISSES = 2,
+ TPL1_TPPERF_TP1_L1_REQUESTS = 3,
+ TPL1_TPPERF_TP1_L1_MISSES = 4,
+ TPL1_TPPERF_TP2_L1_REQUESTS = 5,
+ TPL1_TPPERF_TP2_L1_MISSES = 6,
+ TPL1_TPPERF_TP3_L1_REQUESTS = 7,
+ TPL1_TPPERF_TP3_L1_MISSES = 8,
+ TPL1_TPPERF_OUTPUT_TEXELS_POINT = 9,
+ TPL1_TPPERF_OUTPUT_TEXELS_BILINEAR = 10,
+ TPL1_TPPERF_OUTPUT_TEXELS_MIP = 11,
+ TPL1_TPPERF_OUTPUT_TEXELS_ANISO = 12,
+ TPL1_TPPERF_BILINEAR_OPS = 13,
+ TPL1_TPPERF_QUADSQUADS_OFFSET = 14,
+ TPL1_TPPERF_QUADQUADS_SHADOW = 15,
+ TPL1_TPPERF_QUADS_ARRAY = 16,
+ TPL1_TPPERF_QUADS_PROJECTION = 17,
+ TPL1_TPPERF_QUADS_GRADIENT = 18,
+ TPL1_TPPERF_QUADS_1D2D = 19,
+ TPL1_TPPERF_QUADS_3DCUBE = 20,
+ TPL1_TPPERF_ZERO_LOD = 21,
+ TPL1_TPPERF_OUTPUT_TEXELS = 22,
+ TPL1_TPPERF_ACTIVE_CYCLES_ANY = 23,
+ TPL1_TPPERF_ACTIVE_CYCLES_ALL = 24,
+ TPL1_TPPERF_STALL_CYCLES_BY_ARB = 25,
+ TPL1_TPPERF_LATENCY = 26,
+ TPL1_TPPERF_LATENCY_TRANS = 27,
+};
+
+enum a3xx_vfd_perfcounter_select {
+ VFD_PERF_UCHE_BYTE_FETCHED = 0,
+ VFD_PERF_UCHE_TRANS = 1,
+ VFD_PERF_VPC_BYPASS_COMPONENTS = 2,
+ VFD_PERF_FETCH_INSTRUCTIONS = 3,
+ VFD_PERF_DECODE_INSTRUCTIONS = 4,
+ VFD_PERF_ACTIVE_CYCLES = 5,
+ VFD_PERF_STALL_CYCLES_UCHE = 6,
+ VFD_PERF_STALL_CYCLES_HLSQ = 7,
+ VFD_PERF_STALL_CYCLES_VPC_BYPASS = 8,
+ VFD_PERF_STALL_CYCLES_VPC_ALLOC = 9,
};
-enum a3xx_rop_code {
- ROP_CLEAR = 0,
- ROP_NOR = 1,
- ROP_AND_INVERTED = 2,
- ROP_COPY_INVERTED = 3,
- ROP_AND_REVERSE = 4,
- ROP_INVERT = 5,
- ROP_XOR = 6,
- ROP_NAND = 7,
- ROP_AND = 8,
- ROP_EQUIV = 9,
- ROP_NOOP = 10,
- ROP_OR_INVERTED = 11,
- ROP_COPY = 12,
- ROP_OR_REVERSE = 13,
- ROP_OR = 14,
- ROP_SET = 15,
+enum a3xx_vpc_perfcounter_select {
+ VPC_PERF_SP_LM_PRIMITIVES = 0,
+ VPC_PERF_COMPONENTS_FROM_SP = 1,
+ VPC_PERF_SP_LM_COMPONENTS = 2,
+ VPC_PERF_ACTIVE_CYCLES = 3,
+ VPC_PERF_STALL_CYCLES_LM = 4,
+ VPC_PERF_STALL_CYCLES_RAS = 5,
};
-enum a3xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_DST_MINUS_SRC = 2,
- BLEND_MIN_DST_SRC = 3,
- BLEND_MAX_DST_SRC = 4,
+enum a3xx_uche_perfcounter_select {
+ UCHE_UCHEPERF_VBIF_READ_BEATS_TP = 0,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_VFD = 1,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_HLSQ = 2,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_MARB = 3,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_SP = 4,
+ UCHE_UCHEPERF_READ_REQUESTS_TP = 8,
+ UCHE_UCHEPERF_READ_REQUESTS_VFD = 9,
+ UCHE_UCHEPERF_READ_REQUESTS_HLSQ = 10,
+ UCHE_UCHEPERF_READ_REQUESTS_MARB = 11,
+ UCHE_UCHEPERF_READ_REQUESTS_SP = 12,
+ UCHE_UCHEPERF_WRITE_REQUESTS_MARB = 13,
+ UCHE_UCHEPERF_WRITE_REQUESTS_SP = 14,
+ UCHE_UCHEPERF_TAG_CHECK_FAILS = 15,
+ UCHE_UCHEPERF_EVICTS = 16,
+ UCHE_UCHEPERF_FLUSHES = 17,
+ UCHE_UCHEPERF_VBIF_LATENCY_CYCLES = 18,
+ UCHE_UCHEPERF_VBIF_LATENCY_SAMPLES = 19,
+ UCHE_UCHEPERF_ACTIVE_CYCLES = 20,
};
enum a3xx_intp_mode {
@@ -1138,13 +1390,14 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mod
{
return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
}
+#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE 0x00000080
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
{
return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
}
-#define A3XX_RB_COPY_CONTROL_UNK12 0x00001000
+#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE 0x00001000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
@@ -1217,7 +1470,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
{
return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
}
-#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
@@ -1429,15 +1682,23 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_
#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200
-#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000030
#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
{
return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
}
#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
+#define A3XX_HLSQ_CONTROL_0_REG_COMPUTEMODE 0x00000100
#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK 0x00fff000
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT 12
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK;
+}
+#define A3XX_HLSQ_CONTROL_0_REG_FSONLYTEX 0x02000000
#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
@@ -1451,17 +1712,39 @@ static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201
-#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x000000c0
#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
{
return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
}
#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
-#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
-#define A3XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK 0x00ff0000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK 0xff000000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT 24
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK;
+}
#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK 0x000003fc
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT 2
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK 0x03fc0000
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT 18
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK;
+}
#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
@@ -1478,13 +1761,13 @@ static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
}
#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff
#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000
#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
{
@@ -1498,13 +1781,13 @@ static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
}
#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff
#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000
#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
{
@@ -1518,13 +1801,13 @@ static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
}
#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff
#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
{
return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
}
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000
#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
{
@@ -1532,13 +1815,13 @@ static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
}
#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff
#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
{
return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
}
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000
#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
{
@@ -1620,12 +1903,24 @@ static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
}
#define REG_A3XX_VFD_CONTROL_1 0x00002241
-#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000000f
#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
{
return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
}
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK 0x000000f0
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT 4
+static inline uint32_t A3XX_VFD_CONTROL_1_MAXTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK;
+}
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK 0x00000f00
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT 8
+static inline uint32_t A3XX_VFD_CONTROL_1_MINTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK;
+}
#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
@@ -2008,24 +2303,19 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffe
return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
}
#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_VS_CTRL_REG0_ALUSCHMODE 0x00000008
#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
}
-#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
-#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
-static inline uint32_t A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
-}
#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
@@ -2033,8 +2323,6 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
}
#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
-#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
-#define A3XX_SP_VS_CTRL_REG0_COMPUTEMODE 0x00800000
#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
@@ -2075,7 +2363,8 @@ static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
{
return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
}
-#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
+#define A3XX_SP_VS_PARAM_REG_POS2DMODE 0x00010000
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0x01f00000
#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
{
@@ -2085,24 +2374,26 @@ static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
-#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
{
return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
}
+#define A3XX_SP_VS_OUT_REG_A_HALF 0x00000100
#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
{
return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
}
-#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
{
return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
}
+#define A3XX_SP_VS_OUT_REG_B_HALF 0x01000000
#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
@@ -2113,25 +2404,25 @@ static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x0000007f
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
{
return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
}
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x00007f00
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
{
return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
}
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x007f0000
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
{
return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
}
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0x7f000000
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
{
@@ -2139,6 +2430,12 @@ static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
}
#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
@@ -2155,8 +2452,38 @@ static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
#define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+ return ((val >> 5) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
@@ -2182,24 +2509,22 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffe
return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
}
#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_FS_CTRL_REG0_ALUSCHMODE 0x00000008
#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
}
-#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
-#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
-static inline uint32_t A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
-}
+#define A3XX_SP_FS_CTRL_REG0_FSBYPASSENABLE 0x00020000
+#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP 0x00040000
+#define A3XX_SP_FS_CTRL_REG0_OUTORDERED 0x00080000
#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
@@ -2235,7 +2560,7 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
{
return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
}
-#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x3f000000
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x7f000000
#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24
static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
{
@@ -2243,6 +2568,12 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
}
#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
@@ -2259,8 +2590,38 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
#define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+ return ((val >> 5) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index fd266ed963b6..c4f886fd6037 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -40,10 +40,11 @@
extern bool hang_debug;
static void a3xx_dump(struct msm_gpu *gpu);
+static bool a3xx_idle(struct msm_gpu *gpu);
-static void a3xx_me_init(struct msm_gpu *gpu)
+static bool a3xx_me_init(struct msm_gpu *gpu)
{
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
@@ -64,8 +65,8 @@ static void a3xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu);
- gpu->funcs->idle(gpu);
+ gpu->funcs->flush(gpu, ring);
+ return a3xx_idle(gpu);
}
static int a3xx_hw_init(struct msm_gpu *gpu)
@@ -294,9 +295,7 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
- a3xx_me_init(gpu);
-
- return 0;
+ return a3xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a3xx_recover(struct msm_gpu *gpu)
@@ -330,17 +329,22 @@ static void a3xx_destroy(struct msm_gpu *gpu)
kfree(a3xx_gpu);
}
-static void a3xx_idle(struct msm_gpu *gpu)
+static bool a3xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- adreno_idle(gpu);
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
- A3XX_RBBM_STATUS_GPU_BUSY)))
+ A3XX_RBBM_STATUS_GPU_BUSY))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
- /* TODO maybe we need to reset GPU here to recover from hang? */
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
}
static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
@@ -419,91 +423,13 @@ static void a3xx_dump(struct msm_gpu *gpu)
}
/* Register offset defines for A3XX */
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_A3XX_CP_PFP_UCODE_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_A3XX_CP_PFP_UCODE_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
- REG_A3XX_CP_PROTECT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_A3XX_RBBM_PERFCTR_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
- REG_A3XX_RBBM_INT_0_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_A3XX_RBBM_AHB_ERROR_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_A3XX_RBBM_INT_CLEAR_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
- REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_A3XX_VSC_SIZE_ADDRESS),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_A3XX_SP_VS_OBJ_START_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_A3XX_SP_FS_OBJ_START_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
- REG_A3XX_RBBM_PM_OVERRIDE2),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
- REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
- REG_A3XX_SQ_GPR_MANAGEMENT),
- REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
- REG_A3XX_SQ_INST_STORE_MANAGMENT),
- REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_A3XX_RBBM_SW_RESET_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
- REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
- REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static const struct adreno_gpu_funcs funcs = {
@@ -514,9 +440,10 @@ static const struct adreno_gpu_funcs funcs = {
.pm_resume = msm_gpu_pm_resume,
.recover = a3xx_recover,
.last_fence = adreno_last_fence,
+ .submitted_fence = adreno_submitted_fence,
.submit = adreno_submit,
.flush = adreno_flush,
- .idle = a3xx_idle,
+ .active_ring = adreno_active_ring,
.irq = a3xx_irq,
.destroy = a3xx_destroy,
#ifdef CONFIG_DEBUG_FS
@@ -564,7 +491,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a3xx_registers;
adreno_gpu->reg_offsets = a3xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
@@ -583,7 +510,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 99de8271dba8..1004d4ecf8fe 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -45,13 +47,18 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
enum a4xx_color_fmt {
RB4_A8_UNORM = 1,
RB4_R8_UNORM = 2,
+ RB4_R8_SNORM = 3,
+ RB4_R8_UINT = 4,
+ RB4_R8_SINT = 5,
RB4_R4G4B4A4_UNORM = 8,
RB4_R5G5B5A1_UNORM = 10,
- RB4_R5G6R5_UNORM = 14,
+ RB4_R5G6B5_UNORM = 14,
RB4_R8G8_UNORM = 15,
RB4_R8G8_SNORM = 16,
RB4_R8G8_UINT = 17,
RB4_R8G8_SINT = 18,
+ RB4_R16_UNORM = 19,
+ RB4_R16_SNORM = 20,
RB4_R16_FLOAT = 21,
RB4_R16_UINT = 22,
RB4_R16_SINT = 23,
@@ -63,12 +70,16 @@ enum a4xx_color_fmt {
RB4_R10G10B10A2_UNORM = 31,
RB4_R10G10B10A2_UINT = 34,
RB4_R11G11B10_FLOAT = 39,
+ RB4_R16G16_UNORM = 40,
+ RB4_R16G16_SNORM = 41,
RB4_R16G16_FLOAT = 42,
RB4_R16G16_UINT = 43,
RB4_R16G16_SINT = 44,
RB4_R32_FLOAT = 45,
RB4_R32_UINT = 46,
RB4_R32_SINT = 47,
+ RB4_R16G16B16A16_UNORM = 52,
+ RB4_R16G16B16A16_SNORM = 53,
RB4_R16G16B16A16_FLOAT = 54,
RB4_R16G16B16A16_UINT = 55,
RB4_R16G16B16A16_SINT = 56,
@@ -82,17 +93,10 @@ enum a4xx_color_fmt {
enum a4xx_tile_mode {
TILE4_LINEAR = 0,
+ TILE4_2 = 2,
TILE4_3 = 3,
};
-enum a4xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_DST_MINUS_SRC = 2,
- BLEND_MIN_DST_SRC = 3,
- BLEND_MAX_DST_SRC = 4,
-};
-
enum a4xx_vtx_fmt {
VFMT4_32_FLOAT = 1,
VFMT4_32_32_FLOAT = 2,
@@ -106,6 +110,7 @@ enum a4xx_vtx_fmt {
VFMT4_32_32_FIXED = 10,
VFMT4_32_32_32_FIXED = 11,
VFMT4_32_32_32_32_FIXED = 12,
+ VFMT4_11_11_10_FLOAT = 13,
VFMT4_16_SINT = 16,
VFMT4_16_16_SINT = 17,
VFMT4_16_16_16_SINT = 18,
@@ -146,52 +151,76 @@ enum a4xx_vtx_fmt {
VFMT4_8_8_SNORM = 53,
VFMT4_8_8_8_SNORM = 54,
VFMT4_8_8_8_8_SNORM = 55,
- VFMT4_10_10_10_2_UINT = 60,
- VFMT4_10_10_10_2_UNORM = 61,
- VFMT4_10_10_10_2_SINT = 62,
- VFMT4_10_10_10_2_SNORM = 63,
+ VFMT4_10_10_10_2_UINT = 56,
+ VFMT4_10_10_10_2_UNORM = 57,
+ VFMT4_10_10_10_2_SINT = 58,
+ VFMT4_10_10_10_2_SNORM = 59,
+ VFMT4_2_10_10_10_UINT = 60,
+ VFMT4_2_10_10_10_UNORM = 61,
+ VFMT4_2_10_10_10_SINT = 62,
+ VFMT4_2_10_10_10_SNORM = 63,
};
enum a4xx_tex_fmt {
- TFMT4_5_6_5_UNORM = 11,
- TFMT4_5_5_5_1_UNORM = 10,
- TFMT4_4_4_4_4_UNORM = 8,
- TFMT4_X8Z24_UNORM = 71,
- TFMT4_10_10_10_2_UNORM = 33,
TFMT4_A8_UNORM = 3,
- TFMT4_L8_A8_UNORM = 13,
TFMT4_8_UNORM = 4,
- TFMT4_8_8_UNORM = 14,
- TFMT4_8_8_8_8_UNORM = 28,
TFMT4_8_SNORM = 5,
- TFMT4_8_8_SNORM = 15,
- TFMT4_8_8_8_8_SNORM = 29,
TFMT4_8_UINT = 6,
- TFMT4_8_8_UINT = 16,
- TFMT4_8_8_8_8_UINT = 30,
TFMT4_8_SINT = 7,
+ TFMT4_4_4_4_4_UNORM = 8,
+ TFMT4_5_5_5_1_UNORM = 9,
+ TFMT4_5_6_5_UNORM = 11,
+ TFMT4_L8_A8_UNORM = 13,
+ TFMT4_8_8_UNORM = 14,
+ TFMT4_8_8_SNORM = 15,
+ TFMT4_8_8_UINT = 16,
TFMT4_8_8_SINT = 17,
- TFMT4_8_8_8_8_SINT = 31,
+ TFMT4_16_UNORM = 18,
+ TFMT4_16_SNORM = 19,
+ TFMT4_16_FLOAT = 20,
TFMT4_16_UINT = 21,
- TFMT4_16_16_UINT = 41,
- TFMT4_16_16_16_16_UINT = 54,
TFMT4_16_SINT = 22,
+ TFMT4_8_8_8_8_UNORM = 28,
+ TFMT4_8_8_8_8_SNORM = 29,
+ TFMT4_8_8_8_8_UINT = 30,
+ TFMT4_8_8_8_8_SINT = 31,
+ TFMT4_9_9_9_E5_FLOAT = 32,
+ TFMT4_10_10_10_2_UNORM = 33,
+ TFMT4_10_10_10_2_UINT = 34,
+ TFMT4_11_11_10_FLOAT = 37,
+ TFMT4_16_16_UNORM = 38,
+ TFMT4_16_16_SNORM = 39,
+ TFMT4_16_16_FLOAT = 40,
+ TFMT4_16_16_UINT = 41,
TFMT4_16_16_SINT = 42,
- TFMT4_16_16_16_16_SINT = 55,
+ TFMT4_32_FLOAT = 43,
TFMT4_32_UINT = 44,
- TFMT4_32_32_UINT = 57,
- TFMT4_32_32_32_32_UINT = 64,
TFMT4_32_SINT = 45,
- TFMT4_32_32_SINT = 58,
- TFMT4_32_32_32_32_SINT = 65,
- TFMT4_16_FLOAT = 20,
- TFMT4_16_16_FLOAT = 40,
+ TFMT4_16_16_16_16_UNORM = 51,
+ TFMT4_16_16_16_16_SNORM = 52,
TFMT4_16_16_16_16_FLOAT = 53,
- TFMT4_32_FLOAT = 43,
+ TFMT4_16_16_16_16_UINT = 54,
+ TFMT4_16_16_16_16_SINT = 55,
TFMT4_32_32_FLOAT = 56,
+ TFMT4_32_32_UINT = 57,
+ TFMT4_32_32_SINT = 58,
+ TFMT4_32_32_32_FLOAT = 59,
+ TFMT4_32_32_32_UINT = 60,
+ TFMT4_32_32_32_SINT = 61,
TFMT4_32_32_32_32_FLOAT = 63,
- TFMT4_9_9_9_E5_FLOAT = 32,
- TFMT4_11_11_10_FLOAT = 37,
+ TFMT4_32_32_32_32_UINT = 64,
+ TFMT4_32_32_32_32_SINT = 65,
+ TFMT4_X8Z24_UNORM = 71,
+ TFMT4_DXT1 = 86,
+ TFMT4_DXT3 = 87,
+ TFMT4_DXT5 = 88,
+ TFMT4_RGTC1_UNORM = 90,
+ TFMT4_RGTC1_SNORM = 91,
+ TFMT4_RGTC2_UNORM = 94,
+ TFMT4_RGTC2_SNORM = 95,
+ TFMT4_BPTC_UFLOAT = 97,
+ TFMT4_BPTC_FLOAT = 98,
+ TFMT4_BPTC = 99,
TFMT4_ATC_RGB = 100,
TFMT4_ATC_RGBA_EXPLICIT = 101,
TFMT4_ATC_RGBA_INTERPOLATED = 102,
@@ -240,6 +269,545 @@ enum a4xx_tess_spacing {
EVEN_SPACING = 3,
};
+enum a4xx_ccu_perfcounter_select {
+ CCU_BUSY_CYCLES = 0,
+ CCU_RB_DEPTH_RETURN_STALL = 2,
+ CCU_RB_COLOR_RETURN_STALL = 3,
+ CCU_DEPTH_BLOCKS = 6,
+ CCU_COLOR_BLOCKS = 7,
+ CCU_DEPTH_BLOCK_HIT = 8,
+ CCU_COLOR_BLOCK_HIT = 9,
+ CCU_DEPTH_FLAG1_COUNT = 10,
+ CCU_DEPTH_FLAG2_COUNT = 11,
+ CCU_DEPTH_FLAG3_COUNT = 12,
+ CCU_DEPTH_FLAG4_COUNT = 13,
+ CCU_COLOR_FLAG1_COUNT = 14,
+ CCU_COLOR_FLAG2_COUNT = 15,
+ CCU_COLOR_FLAG3_COUNT = 16,
+ CCU_COLOR_FLAG4_COUNT = 17,
+ CCU_PARTIAL_BLOCK_READ = 18,
+};
+
+enum a4xx_cp_perfcounter_select {
+ CP_ALWAYS_COUNT = 0,
+ CP_BUSY = 1,
+ CP_PFP_IDLE = 2,
+ CP_PFP_BUSY_WORKING = 3,
+ CP_PFP_STALL_CYCLES_ANY = 4,
+ CP_PFP_STARVE_CYCLES_ANY = 5,
+ CP_PFP_STARVED_PER_LOAD_ADDR = 6,
+ CP_PFP_STALLED_PER_STORE_ADDR = 7,
+ CP_PFP_PC_PROFILE = 8,
+ CP_PFP_MATCH_PM4_PKT_PROFILE = 9,
+ CP_PFP_COND_INDIRECT_DISCARDED = 10,
+ CP_LONG_RESUMPTIONS = 11,
+ CP_RESUME_CYCLES = 12,
+ CP_RESUME_TO_BOUNDARY_CYCLES = 13,
+ CP_LONG_PREEMPTIONS = 14,
+ CP_PREEMPT_CYCLES = 15,
+ CP_PREEMPT_TO_BOUNDARY_CYCLES = 16,
+ CP_ME_FIFO_EMPTY_PFP_IDLE = 17,
+ CP_ME_FIFO_EMPTY_PFP_BUSY = 18,
+ CP_ME_FIFO_NOT_EMPTY_NOT_FULL = 19,
+ CP_ME_FIFO_FULL_ME_BUSY = 20,
+ CP_ME_FIFO_FULL_ME_NON_WORKING = 21,
+ CP_ME_WAITING_FOR_PACKETS = 22,
+ CP_ME_BUSY_WORKING = 23,
+ CP_ME_STARVE_CYCLES_ANY = 24,
+ CP_ME_STARVE_CYCLES_PER_PROFILE = 25,
+ CP_ME_STALL_CYCLES_PER_PROFILE = 26,
+ CP_ME_PC_PROFILE = 27,
+ CP_RCIU_FIFO_EMPTY = 28,
+ CP_RCIU_FIFO_NOT_EMPTY_NOT_FULL = 29,
+ CP_RCIU_FIFO_FULL = 30,
+ CP_RCIU_FIFO_FULL_NO_CONTEXT = 31,
+ CP_RCIU_FIFO_FULL_AHB_MASTER = 32,
+ CP_RCIU_FIFO_FULL_OTHER = 33,
+ CP_AHB_IDLE = 34,
+ CP_AHB_STALL_ON_GRANT_NO_SPLIT = 35,
+ CP_AHB_STALL_ON_GRANT_SPLIT = 36,
+ CP_AHB_STALL_ON_GRANT_SPLIT_PROFILE = 37,
+ CP_AHB_BUSY_WORKING = 38,
+ CP_AHB_BUSY_STALL_ON_HRDY = 39,
+ CP_AHB_BUSY_STALL_ON_HRDY_PROFILE = 40,
+};
+
+enum a4xx_gras_ras_perfcounter_select {
+ RAS_SUPER_TILES = 0,
+ RAS_8X8_TILES = 1,
+ RAS_4X4_TILES = 2,
+ RAS_BUSY_CYCLES = 3,
+ RAS_STALL_CYCLES_BY_RB = 4,
+ RAS_STALL_CYCLES_BY_VSC = 5,
+ RAS_STARVE_CYCLES_BY_TSE = 6,
+ RAS_SUPERTILE_CYCLES = 7,
+ RAS_TILE_CYCLES = 8,
+ RAS_FULLY_COVERED_SUPER_TILES = 9,
+ RAS_FULLY_COVERED_8X8_TILES = 10,
+ RAS_4X4_PRIM = 11,
+ RAS_8X4_4X8_PRIM = 12,
+ RAS_8X8_PRIM = 13,
+};
+
+enum a4xx_gras_tse_perfcounter_select {
+ TSE_INPUT_PRIM = 0,
+ TSE_INPUT_NULL_PRIM = 1,
+ TSE_TRIVAL_REJ_PRIM = 2,
+ TSE_CLIPPED_PRIM = 3,
+ TSE_NEW_PRIM = 4,
+ TSE_ZERO_AREA_PRIM = 5,
+ TSE_FACENESS_CULLED_PRIM = 6,
+ TSE_ZERO_PIXEL_PRIM = 7,
+ TSE_OUTPUT_NULL_PRIM = 8,
+ TSE_OUTPUT_VISIBLE_PRIM = 9,
+ TSE_PRE_CLIP_PRIM = 10,
+ TSE_POST_CLIP_PRIM = 11,
+ TSE_BUSY_CYCLES = 12,
+ TSE_PC_STARVE = 13,
+ TSE_RAS_STALL = 14,
+ TSE_STALL_BARYPLANE_FIFO_FULL = 15,
+ TSE_STALL_ZPLANE_FIFO_FULL = 16,
+};
+
+enum a4xx_hlsq_perfcounter_select {
+ HLSQ_SP_VS_STAGE_CONSTANT = 0,
+ HLSQ_SP_VS_STAGE_INSTRUCTIONS = 1,
+ HLSQ_SP_FS_STAGE_CONSTANT = 2,
+ HLSQ_SP_FS_STAGE_INSTRUCTIONS = 3,
+ HLSQ_TP_STATE = 4,
+ HLSQ_QUADS = 5,
+ HLSQ_PIXELS = 6,
+ HLSQ_VERTICES = 7,
+ HLSQ_SP_VS_STAGE_DATA_BYTES = 13,
+ HLSQ_SP_FS_STAGE_DATA_BYTES = 14,
+ HLSQ_BUSY_CYCLES = 15,
+ HLSQ_STALL_CYCLES_SP_STATE = 16,
+ HLSQ_STALL_CYCLES_SP_VS_STAGE = 17,
+ HLSQ_STALL_CYCLES_SP_FS_STAGE = 18,
+ HLSQ_STALL_CYCLES_UCHE = 19,
+ HLSQ_RBBM_LOAD_CYCLES = 20,
+ HLSQ_DI_TO_VS_START_SP = 21,
+ HLSQ_DI_TO_FS_START_SP = 22,
+ HLSQ_VS_STAGE_START_TO_DONE_SP = 23,
+ HLSQ_FS_STAGE_START_TO_DONE_SP = 24,
+ HLSQ_SP_STATE_COPY_CYCLES_VS_STAGE = 25,
+ HLSQ_SP_STATE_COPY_CYCLES_FS_STAGE = 26,
+ HLSQ_UCHE_LATENCY_CYCLES = 27,
+ HLSQ_UCHE_LATENCY_COUNT = 28,
+ HLSQ_STARVE_CYCLES_VFD = 29,
+};
+
+enum a4xx_pc_perfcounter_select {
+ PC_VIS_STREAMS_LOADED = 0,
+ PC_VPC_PRIMITIVES = 2,
+ PC_DEAD_PRIM = 3,
+ PC_LIVE_PRIM = 4,
+ PC_DEAD_DRAWCALLS = 5,
+ PC_LIVE_DRAWCALLS = 6,
+ PC_VERTEX_MISSES = 7,
+ PC_STALL_CYCLES_VFD = 9,
+ PC_STALL_CYCLES_TSE = 10,
+ PC_STALL_CYCLES_UCHE = 11,
+ PC_WORKING_CYCLES = 12,
+ PC_IA_VERTICES = 13,
+ PC_GS_PRIMITIVES = 14,
+ PC_HS_INVOCATIONS = 15,
+ PC_DS_INVOCATIONS = 16,
+ PC_DS_PRIMITIVES = 17,
+ PC_STARVE_CYCLES_FOR_INDEX = 20,
+ PC_STARVE_CYCLES_FOR_TESS_FACTOR = 21,
+ PC_STARVE_CYCLES_FOR_VIZ_STREAM = 22,
+ PC_STALL_CYCLES_TESS = 23,
+ PC_STARVE_CYCLES_FOR_POSITION = 24,
+ PC_MODE0_DRAWCALL = 25,
+ PC_MODE1_DRAWCALL = 26,
+ PC_MODE2_DRAWCALL = 27,
+ PC_MODE3_DRAWCALL = 28,
+ PC_MODE4_DRAWCALL = 29,
+ PC_PREDICATED_DEAD_DRAWCALL = 30,
+ PC_STALL_CYCLES_BY_TSE_ONLY = 31,
+ PC_STALL_CYCLES_BY_VPC_ONLY = 32,
+ PC_VPC_POS_DATA_TRANSACTION = 33,
+ PC_BUSY_CYCLES = 34,
+ PC_STARVE_CYCLES_DI = 35,
+ PC_STALL_CYCLES_VPC = 36,
+ TESS_WORKING_CYCLES = 37,
+ TESS_NUM_CYCLES_SETUP_WORKING = 38,
+ TESS_NUM_CYCLES_PTGEN_WORKING = 39,
+ TESS_NUM_CYCLES_CONNGEN_WORKING = 40,
+ TESS_BUSY_CYCLES = 41,
+ TESS_STARVE_CYCLES_PC = 42,
+ TESS_STALL_CYCLES_PC = 43,
+};
+
+enum a4xx_pwr_perfcounter_select {
+ PWR_CORE_CLOCK_CYCLES = 0,
+ PWR_BUSY_CLOCK_CYCLES = 1,
+};
+
+enum a4xx_rb_perfcounter_select {
+ RB_BUSY_CYCLES = 0,
+ RB_BUSY_CYCLES_BINNING = 1,
+ RB_BUSY_CYCLES_RENDERING = 2,
+ RB_BUSY_CYCLES_RESOLVE = 3,
+ RB_STARVE_CYCLES_BY_SP = 4,
+ RB_STARVE_CYCLES_BY_RAS = 5,
+ RB_STARVE_CYCLES_BY_MARB = 6,
+ RB_STALL_CYCLES_BY_MARB = 7,
+ RB_STALL_CYCLES_BY_HLSQ = 8,
+ RB_RB_RB_MARB_DATA = 9,
+ RB_SP_RB_QUAD = 10,
+ RB_RAS_RB_Z_QUADS = 11,
+ RB_GMEM_CH0_READ = 12,
+ RB_GMEM_CH1_READ = 13,
+ RB_GMEM_CH0_WRITE = 14,
+ RB_GMEM_CH1_WRITE = 15,
+ RB_CP_CONTEXT_DONE = 16,
+ RB_CP_CACHE_FLUSH = 17,
+ RB_CP_ZPASS_DONE = 18,
+ RB_STALL_FIFO0_FULL = 19,
+ RB_STALL_FIFO1_FULL = 20,
+ RB_STALL_FIFO2_FULL = 21,
+ RB_STALL_FIFO3_FULL = 22,
+ RB_RB_HLSQ_TRANSACTIONS = 23,
+ RB_Z_READ = 24,
+ RB_Z_WRITE = 25,
+ RB_C_READ = 26,
+ RB_C_WRITE = 27,
+ RB_C_READ_LATENCY = 28,
+ RB_Z_READ_LATENCY = 29,
+ RB_STALL_BY_UCHE = 30,
+ RB_MARB_UCHE_TRANSACTIONS = 31,
+ RB_CACHE_STALL_MISS = 32,
+ RB_CACHE_STALL_FIFO_FULL = 33,
+ RB_8BIT_BLENDER_UNITS_ACTIVE = 34,
+ RB_16BIT_BLENDER_UNITS_ACTIVE = 35,
+ RB_SAMPLER_UNITS_ACTIVE = 36,
+ RB_TOTAL_PASS = 38,
+ RB_Z_PASS = 39,
+ RB_Z_FAIL = 40,
+ RB_S_FAIL = 41,
+ RB_POWER0 = 42,
+ RB_POWER1 = 43,
+ RB_POWER2 = 44,
+ RB_POWER3 = 45,
+ RB_POWER4 = 46,
+ RB_POWER5 = 47,
+ RB_POWER6 = 48,
+ RB_POWER7 = 49,
+};
+
+enum a4xx_rbbm_perfcounter_select {
+ RBBM_ALWAYS_ON = 0,
+ RBBM_VBIF_BUSY = 1,
+ RBBM_TSE_BUSY = 2,
+ RBBM_RAS_BUSY = 3,
+ RBBM_PC_DCALL_BUSY = 4,
+ RBBM_PC_VSD_BUSY = 5,
+ RBBM_VFD_BUSY = 6,
+ RBBM_VPC_BUSY = 7,
+ RBBM_UCHE_BUSY = 8,
+ RBBM_VSC_BUSY = 9,
+ RBBM_HLSQ_BUSY = 10,
+ RBBM_ANY_RB_BUSY = 11,
+ RBBM_ANY_TPL1_BUSY = 12,
+ RBBM_ANY_SP_BUSY = 13,
+ RBBM_ANY_MARB_BUSY = 14,
+ RBBM_ANY_ARB_BUSY = 15,
+ RBBM_AHB_STATUS_BUSY = 16,
+ RBBM_AHB_STATUS_STALLED = 17,
+ RBBM_AHB_STATUS_TXFR = 18,
+ RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+ RBBM_AHB_STATUS_TXFR_ERROR = 20,
+ RBBM_AHB_STATUS_LONG_STALL = 21,
+ RBBM_STATUS_MASKED = 22,
+ RBBM_CP_BUSY_GFX_CORE_IDLE = 23,
+ RBBM_TESS_BUSY = 24,
+ RBBM_COM_BUSY = 25,
+ RBBM_DCOM_BUSY = 32,
+ RBBM_ANY_CCU_BUSY = 33,
+ RBBM_DPM_BUSY = 34,
+};
+
+enum a4xx_sp_perfcounter_select {
+ SP_LM_LOAD_INSTRUCTIONS = 0,
+ SP_LM_STORE_INSTRUCTIONS = 1,
+ SP_LM_ATOMICS = 2,
+ SP_GM_LOAD_INSTRUCTIONS = 3,
+ SP_GM_STORE_INSTRUCTIONS = 4,
+ SP_GM_ATOMICS = 5,
+ SP_VS_STAGE_TEX_INSTRUCTIONS = 6,
+ SP_VS_STAGE_CFLOW_INSTRUCTIONS = 7,
+ SP_VS_STAGE_EFU_INSTRUCTIONS = 8,
+ SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 9,
+ SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 10,
+ SP_FS_STAGE_TEX_INSTRUCTIONS = 11,
+ SP_FS_STAGE_CFLOW_INSTRUCTIONS = 12,
+ SP_FS_STAGE_EFU_INSTRUCTIONS = 13,
+ SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 14,
+ SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 15,
+ SP_VS_INSTRUCTIONS = 17,
+ SP_FS_INSTRUCTIONS = 18,
+ SP_ADDR_LOCK_COUNT = 19,
+ SP_UCHE_READ_TRANS = 20,
+ SP_UCHE_WRITE_TRANS = 21,
+ SP_EXPORT_VPC_TRANS = 22,
+ SP_EXPORT_RB_TRANS = 23,
+ SP_PIXELS_KILLED = 24,
+ SP_ICL1_REQUESTS = 25,
+ SP_ICL1_MISSES = 26,
+ SP_ICL0_REQUESTS = 27,
+ SP_ICL0_MISSES = 28,
+ SP_ALU_WORKING_CYCLES = 29,
+ SP_EFU_WORKING_CYCLES = 30,
+ SP_STALL_CYCLES_BY_VPC = 31,
+ SP_STALL_CYCLES_BY_TP = 32,
+ SP_STALL_CYCLES_BY_UCHE = 33,
+ SP_STALL_CYCLES_BY_RB = 34,
+ SP_BUSY_CYCLES = 35,
+ SP_HS_INSTRUCTIONS = 36,
+ SP_DS_INSTRUCTIONS = 37,
+ SP_GS_INSTRUCTIONS = 38,
+ SP_CS_INSTRUCTIONS = 39,
+ SP_SCHEDULER_NON_WORKING = 40,
+ SP_WAVE_CONTEXTS = 41,
+ SP_WAVE_CONTEXT_CYCLES = 42,
+ SP_POWER0 = 43,
+ SP_POWER1 = 44,
+ SP_POWER2 = 45,
+ SP_POWER3 = 46,
+ SP_POWER4 = 47,
+ SP_POWER5 = 48,
+ SP_POWER6 = 49,
+ SP_POWER7 = 50,
+ SP_POWER8 = 51,
+ SP_POWER9 = 52,
+ SP_POWER10 = 53,
+ SP_POWER11 = 54,
+ SP_POWER12 = 55,
+ SP_POWER13 = 56,
+ SP_POWER14 = 57,
+ SP_POWER15 = 58,
+};
+
+enum a4xx_tp_perfcounter_select {
+ TP_L1_REQUESTS = 0,
+ TP_L1_MISSES = 1,
+ TP_QUADS_OFFSET = 8,
+ TP_QUAD_SHADOW = 9,
+ TP_QUADS_ARRAY = 10,
+ TP_QUADS_GRADIENT = 11,
+ TP_QUADS_1D2D = 12,
+ TP_QUADS_3DCUBE = 13,
+ TP_BUSY_CYCLES = 16,
+ TP_STALL_CYCLES_BY_ARB = 17,
+ TP_STATE_CACHE_REQUESTS = 20,
+ TP_STATE_CACHE_MISSES = 21,
+ TP_POWER0 = 22,
+ TP_POWER1 = 23,
+ TP_POWER2 = 24,
+ TP_POWER3 = 25,
+ TP_POWER4 = 26,
+ TP_POWER5 = 27,
+ TP_POWER6 = 28,
+ TP_POWER7 = 29,
+};
+
+enum a4xx_uche_perfcounter_select {
+ UCHE_VBIF_READ_BEATS_TP = 0,
+ UCHE_VBIF_READ_BEATS_VFD = 1,
+ UCHE_VBIF_READ_BEATS_HLSQ = 2,
+ UCHE_VBIF_READ_BEATS_MARB = 3,
+ UCHE_VBIF_READ_BEATS_SP = 4,
+ UCHE_READ_REQUESTS_TP = 5,
+ UCHE_READ_REQUESTS_VFD = 6,
+ UCHE_READ_REQUESTS_HLSQ = 7,
+ UCHE_READ_REQUESTS_MARB = 8,
+ UCHE_READ_REQUESTS_SP = 9,
+ UCHE_WRITE_REQUESTS_MARB = 10,
+ UCHE_WRITE_REQUESTS_SP = 11,
+ UCHE_TAG_CHECK_FAILS = 12,
+ UCHE_EVICTS = 13,
+ UCHE_FLUSHES = 14,
+ UCHE_VBIF_LATENCY_CYCLES = 15,
+ UCHE_VBIF_LATENCY_SAMPLES = 16,
+ UCHE_BUSY_CYCLES = 17,
+ UCHE_VBIF_READ_BEATS_PC = 18,
+ UCHE_READ_REQUESTS_PC = 19,
+ UCHE_WRITE_REQUESTS_VPC = 20,
+ UCHE_STALL_BY_VBIF = 21,
+ UCHE_WRITE_REQUESTS_VSC = 22,
+ UCHE_POWER0 = 23,
+ UCHE_POWER1 = 24,
+ UCHE_POWER2 = 25,
+ UCHE_POWER3 = 26,
+ UCHE_POWER4 = 27,
+ UCHE_POWER5 = 28,
+ UCHE_POWER6 = 29,
+ UCHE_POWER7 = 30,
+};
+
+enum a4xx_vbif_perfcounter_select {
+ AXI_READ_REQUESTS_ID_0 = 0,
+ AXI_READ_REQUESTS_ID_1 = 1,
+ AXI_READ_REQUESTS_ID_2 = 2,
+ AXI_READ_REQUESTS_ID_3 = 3,
+ AXI_READ_REQUESTS_ID_4 = 4,
+ AXI_READ_REQUESTS_ID_5 = 5,
+ AXI_READ_REQUESTS_ID_6 = 6,
+ AXI_READ_REQUESTS_ID_7 = 7,
+ AXI_READ_REQUESTS_ID_8 = 8,
+ AXI_READ_REQUESTS_ID_9 = 9,
+ AXI_READ_REQUESTS_ID_10 = 10,
+ AXI_READ_REQUESTS_ID_11 = 11,
+ AXI_READ_REQUESTS_ID_12 = 12,
+ AXI_READ_REQUESTS_ID_13 = 13,
+ AXI_READ_REQUESTS_ID_14 = 14,
+ AXI_READ_REQUESTS_ID_15 = 15,
+ AXI0_READ_REQUESTS_TOTAL = 16,
+ AXI1_READ_REQUESTS_TOTAL = 17,
+ AXI2_READ_REQUESTS_TOTAL = 18,
+ AXI3_READ_REQUESTS_TOTAL = 19,
+ AXI_READ_REQUESTS_TOTAL = 20,
+ AXI_WRITE_REQUESTS_ID_0 = 21,
+ AXI_WRITE_REQUESTS_ID_1 = 22,
+ AXI_WRITE_REQUESTS_ID_2 = 23,
+ AXI_WRITE_REQUESTS_ID_3 = 24,
+ AXI_WRITE_REQUESTS_ID_4 = 25,
+ AXI_WRITE_REQUESTS_ID_5 = 26,
+ AXI_WRITE_REQUESTS_ID_6 = 27,
+ AXI_WRITE_REQUESTS_ID_7 = 28,
+ AXI_WRITE_REQUESTS_ID_8 = 29,
+ AXI_WRITE_REQUESTS_ID_9 = 30,
+ AXI_WRITE_REQUESTS_ID_10 = 31,
+ AXI_WRITE_REQUESTS_ID_11 = 32,
+ AXI_WRITE_REQUESTS_ID_12 = 33,
+ AXI_WRITE_REQUESTS_ID_13 = 34,
+ AXI_WRITE_REQUESTS_ID_14 = 35,
+ AXI_WRITE_REQUESTS_ID_15 = 36,
+ AXI0_WRITE_REQUESTS_TOTAL = 37,
+ AXI1_WRITE_REQUESTS_TOTAL = 38,
+ AXI2_WRITE_REQUESTS_TOTAL = 39,
+ AXI3_WRITE_REQUESTS_TOTAL = 40,
+ AXI_WRITE_REQUESTS_TOTAL = 41,
+ AXI_TOTAL_REQUESTS = 42,
+ AXI_READ_DATA_BEATS_ID_0 = 43,
+ AXI_READ_DATA_BEATS_ID_1 = 44,
+ AXI_READ_DATA_BEATS_ID_2 = 45,
+ AXI_READ_DATA_BEATS_ID_3 = 46,
+ AXI_READ_DATA_BEATS_ID_4 = 47,
+ AXI_READ_DATA_BEATS_ID_5 = 48,
+ AXI_READ_DATA_BEATS_ID_6 = 49,
+ AXI_READ_DATA_BEATS_ID_7 = 50,
+ AXI_READ_DATA_BEATS_ID_8 = 51,
+ AXI_READ_DATA_BEATS_ID_9 = 52,
+ AXI_READ_DATA_BEATS_ID_10 = 53,
+ AXI_READ_DATA_BEATS_ID_11 = 54,
+ AXI_READ_DATA_BEATS_ID_12 = 55,
+ AXI_READ_DATA_BEATS_ID_13 = 56,
+ AXI_READ_DATA_BEATS_ID_14 = 57,
+ AXI_READ_DATA_BEATS_ID_15 = 58,
+ AXI0_READ_DATA_BEATS_TOTAL = 59,
+ AXI1_READ_DATA_BEATS_TOTAL = 60,
+ AXI2_READ_DATA_BEATS_TOTAL = 61,
+ AXI3_READ_DATA_BEATS_TOTAL = 62,
+ AXI_READ_DATA_BEATS_TOTAL = 63,
+ AXI_WRITE_DATA_BEATS_ID_0 = 64,
+ AXI_WRITE_DATA_BEATS_ID_1 = 65,
+ AXI_WRITE_DATA_BEATS_ID_2 = 66,
+ AXI_WRITE_DATA_BEATS_ID_3 = 67,
+ AXI_WRITE_DATA_BEATS_ID_4 = 68,
+ AXI_WRITE_DATA_BEATS_ID_5 = 69,
+ AXI_WRITE_DATA_BEATS_ID_6 = 70,
+ AXI_WRITE_DATA_BEATS_ID_7 = 71,
+ AXI_WRITE_DATA_BEATS_ID_8 = 72,
+ AXI_WRITE_DATA_BEATS_ID_9 = 73,
+ AXI_WRITE_DATA_BEATS_ID_10 = 74,
+ AXI_WRITE_DATA_BEATS_ID_11 = 75,
+ AXI_WRITE_DATA_BEATS_ID_12 = 76,
+ AXI_WRITE_DATA_BEATS_ID_13 = 77,
+ AXI_WRITE_DATA_BEATS_ID_14 = 78,
+ AXI_WRITE_DATA_BEATS_ID_15 = 79,
+ AXI0_WRITE_DATA_BEATS_TOTAL = 80,
+ AXI1_WRITE_DATA_BEATS_TOTAL = 81,
+ AXI2_WRITE_DATA_BEATS_TOTAL = 82,
+ AXI3_WRITE_DATA_BEATS_TOTAL = 83,
+ AXI_WRITE_DATA_BEATS_TOTAL = 84,
+ AXI_DATA_BEATS_TOTAL = 85,
+ CYCLES_HELD_OFF_ID_0 = 86,
+ CYCLES_HELD_OFF_ID_1 = 87,
+ CYCLES_HELD_OFF_ID_2 = 88,
+ CYCLES_HELD_OFF_ID_3 = 89,
+ CYCLES_HELD_OFF_ID_4 = 90,
+ CYCLES_HELD_OFF_ID_5 = 91,
+ CYCLES_HELD_OFF_ID_6 = 92,
+ CYCLES_HELD_OFF_ID_7 = 93,
+ CYCLES_HELD_OFF_ID_8 = 94,
+ CYCLES_HELD_OFF_ID_9 = 95,
+ CYCLES_HELD_OFF_ID_10 = 96,
+ CYCLES_HELD_OFF_ID_11 = 97,
+ CYCLES_HELD_OFF_ID_12 = 98,
+ CYCLES_HELD_OFF_ID_13 = 99,
+ CYCLES_HELD_OFF_ID_14 = 100,
+ CYCLES_HELD_OFF_ID_15 = 101,
+ AXI_READ_REQUEST_HELD_OFF = 102,
+ AXI_WRITE_REQUEST_HELD_OFF = 103,
+ AXI_REQUEST_HELD_OFF = 104,
+ AXI_WRITE_DATA_HELD_OFF = 105,
+ OCMEM_AXI_READ_REQUEST_HELD_OFF = 106,
+ OCMEM_AXI_WRITE_REQUEST_HELD_OFF = 107,
+ OCMEM_AXI_REQUEST_HELD_OFF = 108,
+ OCMEM_AXI_WRITE_DATA_HELD_OFF = 109,
+ ELAPSED_CYCLES_DDR = 110,
+ ELAPSED_CYCLES_OCMEM = 111,
+};
+
+enum a4xx_vfd_perfcounter_select {
+ VFD_UCHE_BYTE_FETCHED = 0,
+ VFD_UCHE_TRANS = 1,
+ VFD_FETCH_INSTRUCTIONS = 3,
+ VFD_BUSY_CYCLES = 5,
+ VFD_STALL_CYCLES_UCHE = 6,
+ VFD_STALL_CYCLES_HLSQ = 7,
+ VFD_STALL_CYCLES_VPC_BYPASS = 8,
+ VFD_STALL_CYCLES_VPC_ALLOC = 9,
+ VFD_MODE_0_FIBERS = 13,
+ VFD_MODE_1_FIBERS = 14,
+ VFD_MODE_2_FIBERS = 15,
+ VFD_MODE_3_FIBERS = 16,
+ VFD_MODE_4_FIBERS = 17,
+ VFD_BFIFO_STALL = 18,
+ VFD_NUM_VERTICES_TOTAL = 19,
+ VFD_PACKER_FULL = 20,
+ VFD_UCHE_REQUEST_FIFO_FULL = 21,
+ VFD_STARVE_CYCLES_PC = 22,
+ VFD_STARVE_CYCLES_UCHE = 23,
+};
+
+enum a4xx_vpc_perfcounter_select {
+ VPC_SP_LM_COMPONENTS = 2,
+ VPC_SP0_LM_BYTES = 3,
+ VPC_SP1_LM_BYTES = 4,
+ VPC_SP2_LM_BYTES = 5,
+ VPC_SP3_LM_BYTES = 6,
+ VPC_WORKING_CYCLES = 7,
+ VPC_STALL_CYCLES_LM = 8,
+ VPC_STARVE_CYCLES_RAS = 9,
+ VPC_STREAMOUT_CYCLES = 10,
+ VPC_UCHE_TRANSACTIONS = 12,
+ VPC_STALL_CYCLES_UCHE = 13,
+ VPC_BUSY_CYCLES = 14,
+ VPC_STARVE_CYCLES_SP = 15,
+};
+
+enum a4xx_vsc_perfcounter_select {
+ VSC_BUSY_CYCLES = 0,
+ VSC_WORKING_CYCLES = 1,
+ VSC_STALL_CYCLES_UCHE = 2,
+ VSC_STARVE_CYCLES_RAS = 3,
+ VSC_EOT_NUM = 4,
+};
+
enum a4xx_tex_filter {
A4XX_TEX_NEAREST = 0,
A4XX_TEX_LINEAR = 1,
@@ -326,6 +894,12 @@ static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val)
#define REG_A4XX_RB_PERFCTR_RB_SEL_7 0x00000cce
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_0 0x00000ccf
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_1 0x00000cd0
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_2 0x00000cd1
+
#define REG_A4XX_RB_PERFCTR_CCU_SEL_3 0x00000cd2
#define REG_A4XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
@@ -363,6 +937,7 @@ static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
{
return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
}
+#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM 0x00010000
#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1
#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001
@@ -400,8 +975,13 @@ static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4
#define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
#define A4XX_RB_MRT_CONTROL_BLEND 0x00000010
#define A4XX_RB_MRT_CONTROL_BLEND2 0x00000020
-#define A4XX_RB_MRT_CONTROL_FASTCLEAR 0x00000400
-#define A4XX_RB_MRT_CONTROL_B11 0x00000800
+#define A4XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000040
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A4XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A4XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
@@ -461,7 +1041,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_b
}
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
}
@@ -479,7 +1059,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb
}
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
}
@@ -490,13 +1070,19 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
}
-#define REG_A4XX_RB_BLEND_RED 0x000020f3
-#define A4XX_RB_BLEND_RED_UINT__MASK 0x00007fff
+#define REG_A4XX_RB_BLEND_RED 0x000020f0
+#define A4XX_RB_BLEND_RED_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_RED_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK;
}
+#define A4XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK;
+}
#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
@@ -504,13 +1090,27 @@ static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK;
}
-#define REG_A4XX_RB_BLEND_GREEN 0x000020f4
-#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x00007fff
+#define REG_A4XX_RB_BLEND_RED_F32 0x000020f1
+#define A4XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_RED_F32__SHIFT) & A4XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_GREEN 0x000020f2
+#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK;
}
+#define A4XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK;
+}
#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
@@ -518,13 +1118,27 @@ static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK;
}
-#define REG_A4XX_RB_BLEND_BLUE 0x000020f5
-#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x00007fff
+#define REG_A4XX_RB_BLEND_GREEN_F32 0x000020f3
+#define A4XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_GREEN_F32__SHIFT) & A4XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_BLUE 0x000020f4
+#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK;
}
+#define A4XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK;
+}
#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
@@ -532,13 +1146,27 @@ static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK;
}
+#define REG_A4XX_RB_BLEND_BLUE_F32 0x000020f5
+#define A4XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_BLUE_F32__SHIFT) & A4XX_RB_BLEND_BLUE_F32__MASK;
+}
+
#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6
-#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x00007fff
+#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK;
}
+#define A4XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK;
+}
#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
@@ -546,6 +1174,14 @@ static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK;
}
+#define REG_A4XX_RB_BLEND_ALPHA_F32 0x000020f7
+#define A4XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_ALPHA_F32__SHIFT) & A4XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
#define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8
#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
@@ -568,7 +1204,7 @@ static inline uint32_t A4XX_RB_FS_OUTPUT_ENABLE_BLEND(uint32_t val)
{
return ((val) << A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT) & A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK;
}
-#define A4XX_RB_FS_OUTPUT_FAST_CLEAR 0x00000100
+#define A4XX_RB_FS_OUTPUT_INDEPENDENT_BLEND 0x00000100
#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000
#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16
static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
@@ -734,8 +1370,9 @@ static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
{
return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
}
-#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
+#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000
#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
#define REG_A4XX_RB_DEPTH_CLEAR 0x00002102
@@ -996,8 +1633,386 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x
#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D 0x0000004d
+#define REG_A4XX_RBBM_POWER_CNTL_IP 0x00000098
+#define A4XX_RBBM_POWER_CNTL_IP_SW_COLLAPSE 0x00000001
+#define A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON 0x00100000
+
#define REG_A4XX_RBBM_PERFCTR_CP_0_LO 0x0000009c
+#define REG_A4XX_RBBM_PERFCTR_CP_0_HI 0x0000009d
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_LO 0x0000009e
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_HI 0x0000009f
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_LO 0x000000a0
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_HI 0x000000a1
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_LO 0x000000a2
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_HI 0x000000a3
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_LO 0x000000a4
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_HI 0x000000a5
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_LO 0x000000a6
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_HI 0x000000a7
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_LO 0x000000a8
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_HI 0x000000a9
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_LO 0x000000aa
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_HI 0x000000ab
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_LO 0x000000ac
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_HI 0x000000ad
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_LO 0x000000ae
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_HI 0x000000af
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_LO 0x000000b0
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_HI 0x000000b1
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_LO 0x000000b2
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_HI 0x000000b3
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_LO 0x000000b4
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_HI 0x000000b5
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_LO 0x000000b6
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_HI 0x000000b7
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_LO 0x000000b8
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_HI 0x000000b9
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_LO 0x000000ba
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_HI 0x000000bb
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_LO 0x000000bc
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_HI 0x000000bd
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_LO 0x000000be
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_HI 0x000000bf
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_LO 0x000000c0
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_HI 0x000000c1
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_LO 0x000000c2
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_HI 0x000000c3
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_LO 0x000000c4
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_HI 0x000000c5
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_LO 0x000000c6
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_HI 0x000000c7
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_LO 0x000000c8
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_HI 0x000000c9
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_LO 0x000000ca
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_HI 0x000000cb
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_LO 0x000000cc
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_HI 0x000000cd
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_LO 0x000000ce
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_HI 0x000000cf
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_LO 0x000000d0
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_HI 0x000000d1
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_LO 0x000000d2
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_HI 0x000000d3
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000d4
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000d5
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000d6
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000d7
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000d8
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000d9
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000da
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000db
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000dc
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000dd
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000de
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000df
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_LO 0x000000e0
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_HI 0x000000e1
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_LO 0x000000e2
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_HI 0x000000e3
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_LO 0x000000e4
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_HI 0x000000e5
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_LO 0x000000e6
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_HI 0x000000e7
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_LO 0x000000e8
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_HI 0x000000e9
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_LO 0x000000ea
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_HI 0x000000eb
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_LO 0x000000ec
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_HI 0x000000ed
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_LO 0x000000ee
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_HI 0x000000ef
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_LO 0x000000f0
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_HI 0x000000f1
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_LO 0x000000f2
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_HI 0x000000f3
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_LO 0x000000f4
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_HI 0x000000f5
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_LO 0x000000f6
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_HI 0x000000f7
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_LO 0x000000f8
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_HI 0x000000f9
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_LO 0x000000fa
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_HI 0x000000fb
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_LO 0x000000fc
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_HI 0x000000fd
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_LO 0x000000fe
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_HI 0x000000ff
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_LO 0x00000100
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_HI 0x00000101
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_LO 0x00000102
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_HI 0x00000103
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_LO 0x00000104
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_HI 0x00000105
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_LO 0x00000106
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_HI 0x00000107
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_LO 0x00000108
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_HI 0x00000109
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_LO 0x0000010a
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_HI 0x0000010b
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_LO 0x0000010c
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_HI 0x0000010d
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_LO 0x0000010e
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_HI 0x0000010f
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_LO 0x00000110
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_HI 0x00000111
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_LO 0x00000112
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_HI 0x00000113
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_LO 0x00000116
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_HI 0x00000117
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_LO 0x00000118
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_HI 0x00000119
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_LO 0x0000011a
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_HI 0x0000011b
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_LO 0x0000011c
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_HI 0x0000011d
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_LO 0x0000011e
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_HI 0x0000011f
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_LO 0x00000120
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_HI 0x00000121
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_LO 0x00000122
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_HI 0x00000123
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_LO 0x00000124
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_HI 0x00000125
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_LO 0x00000126
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_HI 0x00000127
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_LO 0x00000128
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_HI 0x00000129
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_LO 0x0000012a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_HI 0x0000012b
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_LO 0x0000012c
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_HI 0x0000012d
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_LO 0x0000012e
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_HI 0x0000012f
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_LO 0x00000130
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_HI 0x00000131
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_LO 0x00000132
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_HI 0x00000133
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_LO 0x00000134
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_HI 0x00000135
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_LO 0x00000136
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_HI 0x00000137
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_LO 0x00000138
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_HI 0x00000139
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_LO 0x0000013a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_HI 0x0000013b
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_LO 0x0000013c
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_HI 0x0000013d
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_LO 0x0000013e
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_HI 0x0000013f
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_LO 0x00000140
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_HI 0x00000141
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_LO 0x00000142
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_HI 0x00000143
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_LO 0x00000144
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_HI 0x00000145
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_LO 0x00000146
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_HI 0x00000147
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_LO 0x00000148
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_HI 0x00000149
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_LO 0x0000014a
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_HI 0x0000014b
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_LO 0x0000014c
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_HI 0x0000014d
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_LO 0x0000014e
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_HI 0x0000014f
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_LO 0x00000166
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_HI 0x00000167
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_HI 0x00000169
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_LO 0x0000016e
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_HI 0x0000016f
+
static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; }
static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; }
@@ -1046,6 +2061,10 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(uint32_t i0) { r
static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; }
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0 0x00000099
+
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1 0x0000009a
+
#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
#define REG_A4XX_RBBM_PERFCTR_CTL 0x00000170
@@ -1060,6 +2079,14 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000175
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_0 0x00000176
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_1 0x00000177
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_2 0x00000178
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_3 0x00000179
+
#define REG_A4XX_RBBM_GPU_BUSY_MASKED 0x0000017a
#define REG_A4XX_RBBM_INT_0_STATUS 0x0000017d
@@ -1099,6 +2126,11 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5 0x0000019f
+#define REG_A4XX_RBBM_POWER_STATUS 0x000001b0
+#define A4XX_RBBM_POWER_STATUS_SP_TP_PWR_ON 0x00100000
+
+#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2 0x000001b8
+
#define REG_A4XX_CP_SCRATCH_UMASK 0x00000228
#define REG_A4XX_CP_SCRATCH_ADDR 0x00000229
@@ -1167,11 +2199,23 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
-#define REG_A4XX_CP_PROTECT_REG_0 0x00000240
-
static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A4XX_CP_PROTECT_REG_TRAP_READ 0x40000000
#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
@@ -1191,6 +2235,20 @@ static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240
#define REG_A4XX_CP_PERFCTR_CP_SEL_0 0x00000500
+#define REG_A4XX_CP_PERFCTR_CP_SEL_1 0x00000501
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_2 0x00000502
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_3 0x00000503
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_4 0x00000504
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_5 0x00000505
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_6 0x00000506
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_7 0x00000507
+
#define REG_A4XX_CP_PERFCOMBINER_SELECT 0x0000050b
static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; }
@@ -1201,6 +2259,28 @@ static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578
#define REG_A4XX_SP_MODE_CONTROL 0x00000ec3
+#define REG_A4XX_SP_PERFCTR_SP_SEL_0 0x00000ec4
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_1 0x00000ec5
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_2 0x00000ec6
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_3 0x00000ec7
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_4 0x00000ec8
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_5 0x00000ec9
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_6 0x00000eca
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_7 0x00000ecb
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_8 0x00000ecc
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_9 0x00000ecd
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_10 0x00000ece
+
#define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf
#define REG_A4XX_SP_SP_CTRL_REG 0x000022c0
@@ -1226,7 +2306,7 @@ static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
@@ -1374,7 +2454,7 @@ static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
@@ -1699,6 +2779,12 @@ static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A4XX_VPC_DEBUG_ECO_CONTROL 0x00000e64
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_0 0x00000e65
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_1 0x00000e66
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_2 0x00000e67
+
#define REG_A4XX_VPC_PERFCTR_VPC_SEL_3 0x00000e68
#define REG_A4XX_VPC_ATTR 0x00002140
@@ -1811,6 +2897,20 @@ static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0
#define REG_A4XX_VFD_DEBUG_CONTROL 0x00000e40
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_0 0x00000e43
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_1 0x00000e44
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_2 0x00000e45
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_3 0x00000e46
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_4 0x00000e47
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_5 0x00000e48
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_6 0x00000e49
+
#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a
#define REG_A4XX_VGT_CL_INITIATOR 0x000021d0
@@ -1967,6 +3067,20 @@ static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
#define REG_A4XX_TPL1_TP_MODE_CONTROL 0x00000f03
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_0 0x00000f04
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_1 0x00000f05
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_2 0x00000f06
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_3 0x00000f07
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_4 0x00000f08
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_5 0x00000f09
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_6 0x00000f0a
+
#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b
#define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380
@@ -2021,9 +3135,25 @@ static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c88
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c89
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c8a
+
#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c8b
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c8c
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c8d
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c8e
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c8f
+
#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
+#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE 0x00010000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
#define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003
#define A4XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR 0x00000001
@@ -2114,6 +3244,7 @@ static inline uint32_t A4XX_GRAS_SU_POINT_SIZE(float val)
#define REG_A4XX_GRAS_ALPHA_CONTROL 0x00002073
#define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE 0x00000004
+#define A4XX_GRAS_ALPHA_CONTROL_FORCE_FRAGZ_TO_FS 0x00000008
#define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE 0x00002074
#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
@@ -2285,6 +3416,20 @@ static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val)
#define REG_A4XX_UCHE_CACHE_WAYS_VFD 0x00000e8c
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000e8e
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000e8f
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000e90
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000e91
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000e92
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000e93
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000e94
+
#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e95
#define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD 0x00000e00
@@ -2295,6 +3440,22 @@ static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val)
#define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e06
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e07
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e08
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e09
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e0a
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e0b
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e0c
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e0d
+
#define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0
#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
@@ -2545,14 +3706,42 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001
+#define REG_A4XX_PC_TESSFACTOR_ADDR 0x00000d08
+
#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c
#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+#define REG_A4XX_PC_PERFCTR_PC_SEL_1 0x00000d11
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_2 0x00000d12
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_3 0x00000d13
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_4 0x00000d14
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_5 0x00000d15
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_6 0x00000d16
+
#define REG_A4XX_PC_PERFCTR_PC_SEL_7 0x00000d17
#define REG_A4XX_PC_BIN_BASE 0x000021c0
+#define REG_A4XX_PC_VSTREAM_CONTROL 0x000021c2
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A4XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
+#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT 22
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK;
+}
+
#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0
@@ -2564,7 +3753,20 @@ static inline uint32_t A4XX_PC_PRIM_VTX_CNTL_VAROUT(uint32_t val)
#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
#define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
-#define REG_A4XX_UNKNOWN_21C5 0x000021c5
+#define REG_A4XX_PC_PRIM_VTX_CNTL2 0x000021c5
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK 0x00000007
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT 0
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK 0x00000038
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT 3
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_ENABLE 0x00000040
#define REG_A4XX_PC_RESTART_INDEX 0x000021c6
@@ -2602,12 +3804,8 @@ static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
{
return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK;
}
-#define A4XX_PC_HS_PARAM_PRIMTYPE__MASK 0x01800000
-#define A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT 23
-static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_HS_PARAM_PRIMTYPE__MASK;
-}
+#define A4XX_PC_HS_PARAM_CW 0x00800000
+#define A4XX_PC_HS_PARAM_CONNECTED 0x01000000
#define REG_A4XX_VBIF_VERSION 0x00003000
@@ -2646,20 +3844,6 @@ static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
#define REG_A4XX_UNKNOWN_20EF 0x000020ef
-#define REG_A4XX_UNKNOWN_20F0 0x000020f0
-
-#define REG_A4XX_UNKNOWN_20F1 0x000020f1
-
-#define REG_A4XX_UNKNOWN_20F2 0x000020f2
-
-#define REG_A4XX_UNKNOWN_20F7 0x000020f7
-#define A4XX_UNKNOWN_20F7__MASK 0xffffffff
-#define A4XX_UNKNOWN_20F7__SHIFT 0
-static inline uint32_t A4XX_UNKNOWN_20F7(float val)
-{
- return ((fui(val)) << A4XX_UNKNOWN_20F7__SHIFT) & A4XX_UNKNOWN_20F7__MASK;
-}
-
#define REG_A4XX_UNKNOWN_2152 0x00002152
#define REG_A4XX_UNKNOWN_2153 0x00002153
@@ -2720,6 +3904,12 @@ static inline uint32_t A4XX_TEX_SAMP_0_ANISO(enum a4xx_tex_aniso val)
{
return ((val) << A4XX_TEX_SAMP_0_ANISO__SHIFT) & A4XX_TEX_SAMP_0_ANISO__MASK;
}
+#define A4XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A4XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A4XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
#define REG_A4XX_TEX_SAMP_1 0x00000001
#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
@@ -2728,6 +3918,7 @@ static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val
{
return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
}
+#define A4XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
#define A4XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
#define A4XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
#define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
@@ -2796,7 +3987,7 @@ static inline uint32_t A4XX_TEX_CONST_1_HEIGHT(uint32_t val)
{
return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK;
}
-#define A4XX_TEX_CONST_1_WIDTH__MASK 0x1fff8000
+#define A4XX_TEX_CONST_1_WIDTH__MASK 0x3fff8000
#define A4XX_TEX_CONST_1_WIDTH__SHIFT 15
static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
{
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index a53f1be05f75..534a7c3fbdca 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -31,6 +31,7 @@
extern bool hang_debug;
static void a4xx_dump(struct msm_gpu *gpu);
+static bool a4xx_idle(struct msm_gpu *gpu);
/*
* a4xx_enable_hwcg() - Program the clock control registers
@@ -102,14 +103,20 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00020000);
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
+ /* Early A430's have a timing issue with SP/TP power collapse;
+ disabling HW clock gating prevents it. */
+ if (adreno_is_a430(adreno_gpu) && adreno_gpu->rev.patchid < 2)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0);
+ else
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
}
-static void a4xx_me_init(struct msm_gpu *gpu)
+
+static bool a4xx_me_init(struct msm_gpu *gpu)
{
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
@@ -130,8 +137,8 @@ static void a4xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu);
- gpu->funcs->idle(gpu);
+ gpu->funcs->flush(gpu, ring);
+ return a4xx_idle(gpu);
}
static int a4xx_hw_init(struct msm_gpu *gpu)
@@ -141,7 +148,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
uint32_t *ptr, len;
int i, ret;
- if (adreno_is_a4xx(adreno_gpu)) {
+ if (adreno_is_a420(adreno_gpu)) {
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
@@ -150,6 +157,13 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ } else if (adreno_is_a430(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
} else {
BUG();
}
@@ -161,6 +175,10 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+ if (adreno_is_a430(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30);
+ }
+
/* Enable the RBBM error reporting bits */
gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
@@ -183,6 +201,14 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
/* Turn on performance counters: */
gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
+ /* use the first CP counter for timestamp queries.. userspace may set
+ * this as well but it selects the same counter/countable:
+ */
+ gpu_write(gpu, REG_A4XX_CP_PERFCTR_CP_SEL_0, CP_ALWAYS_COUNT);
+
+ if (adreno_is_a430(adreno_gpu))
+ gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
+
/* Disable L2 bypass to avoid UCHE out of bounds errors */
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
@@ -190,6 +216,15 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
(adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
+ /* On A430 enable SP regfile sleep for power savings */
+ /* TODO downstream does this for !420, so maybe applies for 405 too? */
+ if (!adreno_is_a420(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0,
+ 0x00000441);
+ gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1,
+ 0x00000441);
+ }
+
a4xx_enable_hwcg(gpu);
/*
@@ -204,10 +239,6 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
}
- ret = adreno_hw_init(gpu);
- if (ret)
- return ret;
-
/* setup access protection: */
gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
@@ -262,8 +293,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
- a4xx_me_init(gpu);
- return 0;
+ return a4xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a4xx_recover(struct msm_gpu *gpu)
@@ -297,17 +327,21 @@ static void a4xx_destroy(struct msm_gpu *gpu)
kfree(a4xx_gpu);
}
-static void a4xx_idle(struct msm_gpu *gpu)
+static bool a4xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- adreno_idle(gpu);
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
- A4XX_RBBM_STATUS_GPU_BUSY)))
+ A4XX_RBBM_STATUS_GPU_BUSY))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
- /* TODO maybe we need to reset GPU here to recover from hang? */
+ return true;
}
static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
@@ -422,87 +456,13 @@ static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
/* Register offset defines for A4XX, in order of enum adreno_regs */
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_A4XX_CP_PFP_UCODE_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_A4XX_CP_PFP_UCODE_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
- REG_A4XX_CP_PROTECT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_A4XX_RBBM_PERFCTR_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
- REG_A4XX_RBBM_INT_0_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_A4XX_RBBM_AHB_ERROR_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
- REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
- REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_A4XX_VPC_DEBUG_RAM_SEL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_A4XX_VPC_DEBUG_RAM_READ),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_A4XX_RBBM_INT_CLEAR_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_A4XX_VSC_SIZE_ADDRESS),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_A4XX_SP_VS_PVT_MEM_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_A4XX_SP_FS_PVT_MEM_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_A4XX_SP_VS_OBJ_START),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_A4XX_SP_FS_OBJ_START),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_A4XX_RBBM_SW_RESET_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
- REG_A4XX_UCHE_INVALIDATE0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
- REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static void a4xx_dump(struct msm_gpu *gpu)
@@ -512,23 +472,68 @@ static void a4xx_dump(struct msm_gpu *gpu)
adreno_dump(gpu);
}
+static int a4xx_pm_resume(struct msm_gpu *gpu) {
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ ret = msm_gpu_pm_resume(gpu);
+ if (ret)
+ return ret;
+
+ if (adreno_is_a430(adreno_gpu)) {
+ unsigned int reg;
+ /* Set the default register values; set SW_COLLAPSE to 0 */
+ gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778000);
+ do {
+ udelay(5);
+ reg = gpu_read(gpu, REG_A4XX_RBBM_POWER_STATUS);
+ } while (!(reg & A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON));
+ }
+ return 0;
+}
+
+static int a4xx_pm_suspend(struct msm_gpu *gpu) {
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ ret = msm_gpu_pm_suspend(gpu);
+ if (ret)
+ return ret;
+
+ if (adreno_is_a430(adreno_gpu)) {
+ /* Set the default register values; set SW_COLLAPSE to 1 */
+ gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778001);
+ }
+ return 0;
+}
+
+static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A4XX_RBBM_PERFCTR_CP_0_HI);
+
+ return 0;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
.hw_init = a4xx_hw_init,
- .pm_suspend = msm_gpu_pm_suspend,
- .pm_resume = msm_gpu_pm_resume,
+ .pm_suspend = a4xx_pm_suspend,
+ .pm_resume = a4xx_pm_resume,
.recover = a4xx_recover,
.last_fence = adreno_last_fence,
+ .submitted_fence = adreno_submitted_fence,
.submit = adreno_submit,
.flush = adreno_flush,
- .idle = a4xx_idle,
+ .active_ring = adreno_active_ring,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
#ifdef CONFIG_DEBUG_FS
.show = a4xx_show,
#endif
},
+ .get_timestamp = a4xx_get_timestamp,
};
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
@@ -563,7 +568,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a4xx_registers;
adreno_gpu->reg_offsets = a4xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
@@ -582,7 +587,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
new file mode 100644
index 000000000000..56dad2217289
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -0,0 +1,3497 @@
+#ifndef A5XX_XML
+#define A5XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /local3/projects/drm/envytools/rnndb//adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a5xx.xml ( 81546 bytes, from 2016-10-31 16:38:41)
+- /local3/projects/drm/envytools/rnndb//adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a5xx_color_fmt {
+ RB5_R8_UNORM = 3,
+ RB5_R5G5B5A1_UNORM = 10,
+ RB5_R8G8B8A8_UNORM = 48,
+ RB5_R8G8B8_UNORM = 49,
+ RB5_R8G8B8A8_UINT = 51,
+ RB5_R10G10B10A2_UINT = 58,
+ RB5_R16G16B16A16_FLOAT = 98,
+};
+
+enum a5xx_tile_mode {
+ TILE5_LINEAR = 0,
+ TILE5_2 = 2,
+ TILE5_3 = 3,
+};
+
+enum a5xx_vtx_fmt {
+ VFMT5_8_UNORM = 3,
+ VFMT5_8_SNORM = 4,
+ VFMT5_8_UINT = 5,
+ VFMT5_8_SINT = 6,
+ VFMT5_8_8_UNORM = 15,
+ VFMT5_8_8_SNORM = 16,
+ VFMT5_8_8_UINT = 17,
+ VFMT5_8_8_SINT = 18,
+ VFMT5_16_UNORM = 21,
+ VFMT5_16_SNORM = 22,
+ VFMT5_16_FLOAT = 23,
+ VFMT5_16_UINT = 24,
+ VFMT5_16_SINT = 25,
+ VFMT5_8_8_8_UNORM = 33,
+ VFMT5_8_8_8_SNORM = 34,
+ VFMT5_8_8_8_UINT = 35,
+ VFMT5_8_8_8_SINT = 36,
+ VFMT5_8_8_8_8_UNORM = 48,
+ VFMT5_8_8_8_8_SNORM = 50,
+ VFMT5_8_8_8_8_UINT = 51,
+ VFMT5_8_8_8_8_SINT = 52,
+ VFMT5_16_16_UNORM = 67,
+ VFMT5_16_16_SNORM = 68,
+ VFMT5_16_16_FLOAT = 69,
+ VFMT5_16_16_UINT = 70,
+ VFMT5_16_16_SINT = 71,
+ VFMT5_32_UNORM = 72,
+ VFMT5_32_SNORM = 73,
+ VFMT5_32_FLOAT = 74,
+ VFMT5_32_UINT = 75,
+ VFMT5_32_SINT = 76,
+ VFMT5_32_FIXED = 77,
+ VFMT5_16_16_16_UNORM = 88,
+ VFMT5_16_16_16_SNORM = 89,
+ VFMT5_16_16_16_FLOAT = 90,
+ VFMT5_16_16_16_UINT = 91,
+ VFMT5_16_16_16_SINT = 92,
+ VFMT5_16_16_16_16_UNORM = 96,
+ VFMT5_16_16_16_16_SNORM = 97,
+ VFMT5_16_16_16_16_FLOAT = 98,
+ VFMT5_16_16_16_16_UINT = 99,
+ VFMT5_16_16_16_16_SINT = 100,
+ VFMT5_32_32_UNORM = 101,
+ VFMT5_32_32_SNORM = 102,
+ VFMT5_32_32_FLOAT = 103,
+ VFMT5_32_32_UINT = 104,
+ VFMT5_32_32_SINT = 105,
+ VFMT5_32_32_FIXED = 106,
+ VFMT5_32_32_32_UNORM = 112,
+ VFMT5_32_32_32_SNORM = 113,
+ VFMT5_32_32_32_UINT = 114,
+ VFMT5_32_32_32_SINT = 115,
+ VFMT5_32_32_32_FLOAT = 116,
+ VFMT5_32_32_32_FIXED = 117,
+ VFMT5_32_32_32_32_UNORM = 128,
+ VFMT5_32_32_32_32_SNORM = 129,
+ VFMT5_32_32_32_32_FLOAT = 130,
+ VFMT5_32_32_32_32_UINT = 131,
+ VFMT5_32_32_32_32_SINT = 132,
+ VFMT5_32_32_32_32_FIXED = 133,
+};
+
+enum a5xx_tex_fmt {
+ TFMT5_A8_UNORM = 2,
+ TFMT5_8_UNORM = 3,
+ TFMT5_4_4_4_4_UNORM = 8,
+ TFMT5_5_6_5_UNORM = 14,
+ TFMT5_L8_A8_UNORM = 19,
+ TFMT5_16_FLOAT = 23,
+ TFMT5_8_8_8_8_UNORM = 48,
+ TFMT5_10_10_10_2_UNORM = 54,
+ TFMT5_16_16_FLOAT = 69,
+ TFMT5_32_FLOAT = 74,
+ TFMT5_16_16_16_16_FLOAT = 98,
+ TFMT5_32_32_FLOAT = 103,
+ TFMT5_32_32_32_32_FLOAT = 130,
+ TFMT5_X8Z24_UNORM = 160,
+};
+
+enum a5xx_tex_fetchsize {
+ TFETCH5_1_BYTE = 0,
+ TFETCH5_2_BYTE = 1,
+ TFETCH5_4_BYTE = 2,
+ TFETCH5_8_BYTE = 3,
+ TFETCH5_16_BYTE = 4,
+};
+
+enum a5xx_depth_format {
+ DEPTH5_NONE = 0,
+ DEPTH5_16 = 1,
+ DEPTH5_24_8 = 2,
+ DEPTH5_32 = 4,
+};
+
+enum a5xx_debugbus {
+ A5XX_RBBM_DBGBUS_CP = 1,
+ A5XX_RBBM_DBGBUS_RBBM = 2,
+ A5XX_RBBM_DBGBUS_VBIF = 3,
+ A5XX_RBBM_DBGBUS_HLSQ = 4,
+ A5XX_RBBM_DBGBUS_UCHE = 5,
+ A5XX_RBBM_DBGBUS_DPM = 6,
+ A5XX_RBBM_DBGBUS_TESS = 7,
+ A5XX_RBBM_DBGBUS_PC = 8,
+ A5XX_RBBM_DBGBUS_VFDP = 9,
+ A5XX_RBBM_DBGBUS_VPC = 10,
+ A5XX_RBBM_DBGBUS_TSE = 11,
+ A5XX_RBBM_DBGBUS_RAS = 12,
+ A5XX_RBBM_DBGBUS_VSC = 13,
+ A5XX_RBBM_DBGBUS_COM = 14,
+ A5XX_RBBM_DBGBUS_DCOM = 15,
+ A5XX_RBBM_DBGBUS_LRZ = 16,
+ A5XX_RBBM_DBGBUS_A2D_DSP = 17,
+ A5XX_RBBM_DBGBUS_CCUFCHE = 18,
+ A5XX_RBBM_DBGBUS_GPMU = 19,
+ A5XX_RBBM_DBGBUS_RBP = 20,
+ A5XX_RBBM_DBGBUS_HM = 21,
+ A5XX_RBBM_DBGBUS_RBBM_CFG = 22,
+ A5XX_RBBM_DBGBUS_VBIF_CX = 23,
+ A5XX_RBBM_DBGBUS_GPC = 29,
+ A5XX_RBBM_DBGBUS_LARC = 30,
+ A5XX_RBBM_DBGBUS_HLSQ_SPTP = 31,
+ A5XX_RBBM_DBGBUS_RB_0 = 32,
+ A5XX_RBBM_DBGBUS_RB_1 = 33,
+ A5XX_RBBM_DBGBUS_RB_2 = 34,
+ A5XX_RBBM_DBGBUS_RB_3 = 35,
+ A5XX_RBBM_DBGBUS_CCU_0 = 40,
+ A5XX_RBBM_DBGBUS_CCU_1 = 41,
+ A5XX_RBBM_DBGBUS_CCU_2 = 42,
+ A5XX_RBBM_DBGBUS_CCU_3 = 43,
+ A5XX_RBBM_DBGBUS_A2D_RAS_0 = 48,
+ A5XX_RBBM_DBGBUS_A2D_RAS_1 = 49,
+ A5XX_RBBM_DBGBUS_A2D_RAS_2 = 50,
+ A5XX_RBBM_DBGBUS_A2D_RAS_3 = 51,
+ A5XX_RBBM_DBGBUS_VFD_0 = 56,
+ A5XX_RBBM_DBGBUS_VFD_1 = 57,
+ A5XX_RBBM_DBGBUS_VFD_2 = 58,
+ A5XX_RBBM_DBGBUS_VFD_3 = 59,
+ A5XX_RBBM_DBGBUS_SP_0 = 64,
+ A5XX_RBBM_DBGBUS_SP_1 = 65,
+ A5XX_RBBM_DBGBUS_SP_2 = 66,
+ A5XX_RBBM_DBGBUS_SP_3 = 67,
+ A5XX_RBBM_DBGBUS_TPL1_0 = 72,
+ A5XX_RBBM_DBGBUS_TPL1_1 = 73,
+ A5XX_RBBM_DBGBUS_TPL1_2 = 74,
+ A5XX_RBBM_DBGBUS_TPL1_3 = 75,
+};
+
+enum a5xx_shader_blocks {
+ A5XX_TP_W_MEMOBJ = 1,
+ A5XX_TP_W_SAMPLER = 2,
+ A5XX_TP_W_MIPMAP_BASE = 3,
+ A5XX_TP_W_MEMOBJ_TAG = 4,
+ A5XX_TP_W_SAMPLER_TAG = 5,
+ A5XX_TP_S_3D_MEMOBJ = 6,
+ A5XX_TP_S_3D_SAMPLER = 7,
+ A5XX_TP_S_3D_MEMOBJ_TAG = 8,
+ A5XX_TP_S_3D_SAMPLER_TAG = 9,
+ A5XX_TP_S_CS_MEMOBJ = 10,
+ A5XX_TP_S_CS_SAMPLER = 11,
+ A5XX_TP_S_CS_MEMOBJ_TAG = 12,
+ A5XX_TP_S_CS_SAMPLER_TAG = 13,
+ A5XX_SP_W_INSTR = 14,
+ A5XX_SP_W_CONST = 15,
+ A5XX_SP_W_UAV_SIZE = 16,
+ A5XX_SP_W_CB_SIZE = 17,
+ A5XX_SP_W_UAV_BASE = 18,
+ A5XX_SP_W_CB_BASE = 19,
+ A5XX_SP_W_INST_TAG = 20,
+ A5XX_SP_W_STATE = 21,
+ A5XX_SP_S_3D_INSTR = 22,
+ A5XX_SP_S_3D_CONST = 23,
+ A5XX_SP_S_3D_CB_BASE = 24,
+ A5XX_SP_S_3D_CB_SIZE = 25,
+ A5XX_SP_S_3D_UAV_BASE = 26,
+ A5XX_SP_S_3D_UAV_SIZE = 27,
+ A5XX_SP_S_CS_INSTR = 28,
+ A5XX_SP_S_CS_CONST = 29,
+ A5XX_SP_S_CS_CB_BASE = 30,
+ A5XX_SP_S_CS_CB_SIZE = 31,
+ A5XX_SP_S_CS_UAV_BASE = 32,
+ A5XX_SP_S_CS_UAV_SIZE = 33,
+ A5XX_SP_S_3D_INSTR_DIRTY = 34,
+ A5XX_SP_S_3D_CONST_DIRTY = 35,
+ A5XX_SP_S_3D_CB_BASE_DIRTY = 36,
+ A5XX_SP_S_3D_CB_SIZE_DIRTY = 37,
+ A5XX_SP_S_3D_UAV_BASE_DIRTY = 38,
+ A5XX_SP_S_3D_UAV_SIZE_DIRTY = 39,
+ A5XX_SP_S_CS_INSTR_DIRTY = 40,
+ A5XX_SP_S_CS_CONST_DIRTY = 41,
+ A5XX_SP_S_CS_CB_BASE_DIRTY = 42,
+ A5XX_SP_S_CS_CB_SIZE_DIRTY = 43,
+ A5XX_SP_S_CS_UAV_BASE_DIRTY = 44,
+ A5XX_SP_S_CS_UAV_SIZE_DIRTY = 45,
+ A5XX_HLSQ_ICB = 46,
+ A5XX_HLSQ_ICB_DIRTY = 47,
+ A5XX_HLSQ_ICB_CB_BASE_DIRTY = 48,
+ A5XX_SP_POWER_RESTORE_RAM = 64,
+ A5XX_SP_POWER_RESTORE_RAM_TAG = 65,
+ A5XX_TP_POWER_RESTORE_RAM = 66,
+ A5XX_TP_POWER_RESTORE_RAM_TAG = 67,
+};
+
+enum a5xx_tex_filter {
+ A5XX_TEX_NEAREST = 0,
+ A5XX_TEX_LINEAR = 1,
+ A5XX_TEX_ANISO = 2,
+};
+
+enum a5xx_tex_clamp {
+ A5XX_TEX_REPEAT = 0,
+ A5XX_TEX_CLAMP_TO_EDGE = 1,
+ A5XX_TEX_MIRROR_REPEAT = 2,
+ A5XX_TEX_CLAMP_TO_BORDER = 3,
+ A5XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a5xx_tex_aniso {
+ A5XX_TEX_ANISO_1 = 0,
+ A5XX_TEX_ANISO_2 = 1,
+ A5XX_TEX_ANISO_4 = 2,
+ A5XX_TEX_ANISO_8 = 3,
+ A5XX_TEX_ANISO_16 = 4,
+};
+
+enum a5xx_tex_swiz {
+ A5XX_TEX_X = 0,
+ A5XX_TEX_Y = 1,
+ A5XX_TEX_Z = 2,
+ A5XX_TEX_W = 3,
+ A5XX_TEX_ZERO = 4,
+ A5XX_TEX_ONE = 5,
+};
+
+enum a5xx_tex_type {
+ A5XX_TEX_1D = 0,
+ A5XX_TEX_2D = 1,
+ A5XX_TEX_CUBE = 2,
+ A5XX_TEX_3D = 3,
+};
+
+#define A5XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A5XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A5XX_INT0_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_INT0_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_INT0_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_INT0_RBBM_GPC_ERROR 0x00000080
+#define A5XX_INT0_CP_SW 0x00000100
+#define A5XX_INT0_CP_HW_ERROR 0x00000200
+#define A5XX_INT0_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_INT0_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_INT0_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_INT0_CP_IB2 0x00002000
+#define A5XX_INT0_CP_IB1 0x00004000
+#define A5XX_INT0_CP_RB 0x00008000
+#define A5XX_INT0_CP_UNUSED_1 0x00010000
+#define A5XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A5XX_INT0_CP_WT_DONE_TS 0x00040000
+#define A5XX_INT0_UNKNOWN_1 0x00080000
+#define A5XX_INT0_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_INT0_UNUSED_2 0x00200000
+#define A5XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_INT0_MISC_HANG_DETECT 0x00800000
+#define A5XX_INT0_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_INT0_UCHE_TRAP_INTR 0x02000000
+#define A5XX_INT0_DEBBUS_INTR_0 0x04000000
+#define A5XX_INT0_DEBBUS_INTR_1 0x08000000
+#define A5XX_INT0_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_INT0_GPMU_FIRMWARE 0x20000000
+#define A5XX_INT0_ISDB_CPU_IRQ 0x40000000
+#define A5XX_INT0_ISDB_UNDER_DEBUG 0x80000000
+#define A5XX_CP_INT_CP_OPCODE_ERROR 0x00000001
+#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR 0x00000002
+#define A5XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
+#define A5XX_CP_INT_CP_DMA_ERROR 0x00000008
+#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
+#define A5XX_CP_INT_CP_AHB_ERROR 0x00000020
+#define REG_A5XX_CP_RB_BASE 0x00000800
+
+#define REG_A5XX_CP_RB_BASE_HI 0x00000801
+
+#define REG_A5XX_CP_RB_CNTL 0x00000802
+
+#define REG_A5XX_CP_RB_RPTR_ADDR 0x00000804
+
+#define REG_A5XX_CP_RB_RPTR_ADDR_HI 0x00000805
+
+#define REG_A5XX_CP_RB_RPTR 0x00000806
+
+#define REG_A5XX_CP_RB_WPTR 0x00000807
+
+#define REG_A5XX_CP_PFP_STAT_ADDR 0x00000808
+
+#define REG_A5XX_CP_PFP_STAT_DATA 0x00000809
+
+#define REG_A5XX_CP_DRAW_STATE_ADDR 0x0000080b
+
+#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818
+
+#define REG_A5XX_CP_CRASH_DUMP_CNTL 0x00000819
+
+#define REG_A5XX_CP_ME_STAT_ADDR 0x0000081a
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_1 0x0000081f
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_2 0x00000820
+
+#define REG_A5XX_CP_ROQ_DBG_ADDR 0x00000821
+
+#define REG_A5XX_CP_ROQ_DBG_DATA 0x00000822
+
+#define REG_A5XX_CP_MEQ_DBG_ADDR 0x00000823
+
+#define REG_A5XX_CP_MEQ_DBG_DATA 0x00000824
+
+#define REG_A5XX_CP_MEQ_THRESHOLDS 0x00000825
+
+#define REG_A5XX_CP_MERCIU_SIZE 0x00000826
+
+#define REG_A5XX_CP_MERCIU_DBG_ADDR 0x00000827
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_1 0x00000828
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_2 0x00000829
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_ADDR 0x0000082a
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_DATA 0x0000082b
+
+#define REG_A5XX_CP_ME_UCODE_DBG_ADDR 0x0000082f
+
+#define REG_A5XX_CP_ME_UCODE_DBG_DATA 0x00000830
+
+#define REG_A5XX_CP_CNTL 0x00000831
+
+#define REG_A5XX_CP_PFP_ME_CNTL 0x00000832
+
+#define REG_A5XX_CP_CHICKEN_DBG 0x00000833
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_LO 0x00000835
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_HI 0x00000836
+
+#define REG_A5XX_CP_ME_INSTR_BASE_LO 0x00000838
+
+#define REG_A5XX_CP_ME_INSTR_BASE_HI 0x00000839
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_CNTL 0x0000083b
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO 0x0000083c
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI 0x0000083d
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO 0x0000083e
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI 0x0000083f
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x00000840
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x00000841
+
+#define REG_A5XX_CP_ADDR_MODE_CNTL 0x00000860
+
+#define REG_A5XX_CP_ME_STAT_DATA 0x00000b14
+
+#define REG_A5XX_CP_WFI_PEND_CTR 0x00000b15
+
+#define REG_A5XX_CP_INTERRUPT_STATUS 0x00000b18
+
+#define REG_A5XX_CP_HW_FAULT 0x00000b1a
+
+#define REG_A5XX_CP_PROTECT_STATUS 0x00000b1c
+
+#define REG_A5XX_CP_IB1_BASE 0x00000b1f
+
+#define REG_A5XX_CP_IB1_BASE_HI 0x00000b20
+
+#define REG_A5XX_CP_IB1_BUFSZ 0x00000b21
+
+#define REG_A5XX_CP_IB2_BASE 0x00000b22
+
+#define REG_A5XX_CP_IB2_BASE_HI 0x00000b23
+
+#define REG_A5XX_CP_IB2_BUFSZ 0x00000b24
+
+static inline uint32_t REG_A5XX_CP_SCRATCH(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A5XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A5XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A5XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A5XX_CP_PROTECT_REG_TRAP_READ 0x40000000
+
+#define REG_A5XX_CP_PROTECT_CNTL 0x000008a0
+
+#define REG_A5XX_CP_AHB_FAULT 0x00000b1b
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_0 0x00000bb0
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_1 0x00000bb1
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_2 0x00000bb2
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_3 0x00000bb3
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_4 0x00000bb4
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_5 0x00000bb5
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_6 0x00000bb6
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_7 0x00000bb7
+
+#define REG_A5XX_VSC_ADDR_MODE_CNTL 0x00000bc1
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_0 0x00000bba
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_1 0x00000bbb
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_2 0x00000bbc
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_3 0x00000bbd
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_A 0x00000004
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__MASK 0x000000ff
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT 0
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT) & A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__MASK;
+}
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK 0x0000ff00
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT) & A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK;
+}
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_B 0x00000005
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_C 0x00000006
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_D 0x00000007
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLT 0x00000008
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLM 0x00000009
+#define A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A5XX_RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPL 0x0000000a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPE 0x0000000b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_0 0x0000000c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_1 0x0000000d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_2 0x0000000e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_3 0x0000000f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_0 0x00000010
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_1 0x00000011
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_2 0x00000012
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_3 0x00000013
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_0 0x00000014
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_1 0x00000015
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_0 0x00000016
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_1 0x00000017
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_2 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_3 0x00000019
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_0 0x0000001a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_1 0x0000001b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_2 0x0000001c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_3 0x0000001d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_NIBBLEE 0x0000001e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC0 0x0000001f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC1 0x00000020
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADREG 0x00000021
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IDX 0x00000022
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CLRC 0x00000023
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADIVT 0x00000024
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000002f
+
+#define REG_A5XX_RBBM_INT_CLEAR_CMD 0x00000037
+
+#define REG_A5XX_RBBM_INT_0_MASK 0x00000038
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
+#define A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR 0x00000002
+#define A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
+#define A5XX_RBBM_INT_0_MASK_CP_SW 0x00000100
+#define A5XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
+#define A5XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
+#define A5XX_RBBM_INT_0_MASK_CP_RB 0x00008000
+#define A5XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
+#define A5XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
+#define A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT 0x00800000
+#define A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_FIRMWARE 0x20000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+
+#define REG_A5XX_RBBM_AHB_DBG_CNTL 0x0000003f
+
+#define REG_A5XX_RBBM_EXT_VBIF_DBG_CNTL 0x00000041
+
+#define REG_A5XX_RBBM_SW_RESET_CMD 0x00000043
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+
+#define REG_A5XX_RBBM_DBG_LO_HI_GPIO 0x00000048
+
+#define REG_A5XX_RBBM_EXT_TRACE_BUS_CNTL 0x00000049
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP0 0x0000004a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP1 0x0000004b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP2 0x0000004c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP3 0x0000004d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP0 0x0000004e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP1 0x0000004f
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP2 0x00000050
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP3 0x00000051
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP0 0x00000052
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP1 0x00000053
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP2 0x00000054
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP3 0x00000055
+
+#define REG_A5XX_RBBM_READ_AHB_THROUGH_DBG 0x00000059
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_UCHE 0x0000005a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_UCHE 0x0000005b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_UCHE 0x0000005c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL4_UCHE 0x0000005d
+
+#define REG_A5XX_RBBM_CLOCK_HYST_UCHE 0x0000005e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_UCHE 0x0000005f
+
+#define REG_A5XX_RBBM_CLOCK_MODE_GPC 0x00000060
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPC 0x00000061
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPC 0x00000062
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000063
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x00000064
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000065
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_HLSQ 0x00000066
+
+#define REG_A5XX_RBBM_CLOCK_CNTL 0x00000067
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP0 0x00000068
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP1 0x00000069
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP2 0x0000006a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP3 0x0000006b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP0 0x0000006c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP1 0x0000006d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP2 0x0000006e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP3 0x0000006f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP0 0x00000070
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP1 0x00000071
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP2 0x00000072
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP3 0x00000073
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP0 0x00000074
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP1 0x00000075
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP2 0x00000076
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP3 0x00000077
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB0 0x00000078
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB1 0x00000079
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB2 0x0000007a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB3 0x0000007b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB0 0x0000007c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB1 0x0000007d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB2 0x0000007e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB3 0x0000007f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RAC 0x00000080
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RAC 0x00000081
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU0 0x00000082
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU1 0x00000083
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU2 0x00000084
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU3 0x00000085
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000086
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000087
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000088
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000089
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RAC 0x0000008a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RAC 0x0000008b
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0 0x0000008c
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1 0x0000008d
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2 0x0000008e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3 0x0000008f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_VFD 0x00000090
+
+#define REG_A5XX_RBBM_CLOCK_MODE_VFD 0x00000091
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_VFD 0x00000092
+
+#define REG_A5XX_RBBM_AHB_CNTL0 0x00000093
+
+#define REG_A5XX_RBBM_AHB_CNTL1 0x00000094
+
+#define REG_A5XX_RBBM_AHB_CNTL2 0x00000095
+
+#define REG_A5XX_RBBM_AHB_CMD 0x00000096
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11 0x0000009c
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12 0x0000009d
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13 0x0000009e
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14 0x0000009f
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15 0x000000a0
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16 0x000000a1
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17 0x000000a2
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18 0x000000a3
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP0 0x000000a4
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP1 0x000000a5
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP2 0x000000a6
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP3 0x000000a7
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP0 0x000000a8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP1 0x000000a9
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP2 0x000000aa
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP3 0x000000ab
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP0 0x000000ac
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP1 0x000000ad
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP2 0x000000ae
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP3 0x000000af
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP0 0x000000b0
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP1 0x000000b1
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP2 0x000000b2
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP3 0x000000b3
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP0 0x000000b4
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP1 0x000000b5
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP2 0x000000b6
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP3 0x000000b7
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP0 0x000000b8
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP1 0x000000b9
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP2 0x000000ba
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP3 0x000000bb
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_GPMU 0x000000c8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPMU 0x000000c9
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPMU 0x000000ca
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_LO 0x000003a0
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_HI 0x000003a1
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_LO 0x000003a2
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_HI 0x000003a3
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_LO 0x000003a4
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_HI 0x000003a5
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_LO 0x000003a6
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_HI 0x000003a7
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_LO 0x000003a8
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_HI 0x000003a9
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_LO 0x000003aa
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_HI 0x000003ab
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_LO 0x000003ac
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_HI 0x000003ad
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_LO 0x000003ae
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_HI 0x000003af
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_LO 0x000003b0
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_HI 0x000003b1
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_LO 0x000003b2
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_HI 0x000003b3
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_LO 0x000003b4
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_HI 0x000003b5
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_LO 0x000003b6
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_HI 0x000003b7
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_LO 0x000003b8
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_HI 0x000003b9
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_LO 0x000003ba
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_HI 0x000003bb
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_LO 0x000003bc
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_HI 0x000003bd
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_LO 0x000003be
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_HI 0x000003bf
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_LO 0x000003c0
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_HI 0x000003c1
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_LO 0x000003c2
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_HI 0x000003c3
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_LO 0x000003c4
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_HI 0x000003c5
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_LO 0x000003c6
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_HI 0x000003c7
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_LO 0x000003c8
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_HI 0x000003c9
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_LO 0x000003ca
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_HI 0x000003cb
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_LO 0x000003cc
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_HI 0x000003cd
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_LO 0x000003ce
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_HI 0x000003cf
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_LO 0x000003d0
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_HI 0x000003d1
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_LO 0x000003d2
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_HI 0x000003d3
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_LO 0x000003d4
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_HI 0x000003d5
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_LO 0x000003d6
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_HI 0x000003d7
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO 0x000003d8
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI 0x000003d9
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO 0x000003da
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI 0x000003db
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO 0x000003dc
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI 0x000003dd
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO 0x000003de
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI 0x000003df
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO 0x000003e0
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI 0x000003e1
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO 0x000003e2
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI 0x000003e3
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO 0x000003e4
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI 0x000003e5
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO 0x000003e6
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI 0x000003e7
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_LO 0x000003e8
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_HI 0x000003e9
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_LO 0x000003ea
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_HI 0x000003eb
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_LO 0x000003ec
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_HI 0x000003ed
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_LO 0x000003ee
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_HI 0x000003ef
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_LO 0x000003f0
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_HI 0x000003f1
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_LO 0x000003f2
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_HI 0x000003f3
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_LO 0x000003f4
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_HI 0x000003f5
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_LO 0x000003f6
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_HI 0x000003f7
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_LO 0x000003f8
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_HI 0x000003f9
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_LO 0x000003fa
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_HI 0x000003fb
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_LO 0x000003fc
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_HI 0x000003fd
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_LO 0x000003fe
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_HI 0x000003ff
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_LO 0x00000400
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_HI 0x00000401
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_LO 0x00000402
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_HI 0x00000403
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_LO 0x00000404
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_HI 0x00000405
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_LO 0x00000406
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_HI 0x00000407
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_LO 0x00000408
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_HI 0x00000409
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_LO 0x0000040a
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_HI 0x0000040b
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_LO 0x0000040c
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_HI 0x0000040d
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_LO 0x0000040e
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_HI 0x0000040f
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_LO 0x00000410
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_HI 0x00000411
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_LO 0x00000412
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_HI 0x00000413
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_LO 0x00000414
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_HI 0x00000415
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_LO 0x00000416
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_HI 0x00000417
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_LO 0x00000418
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_HI 0x00000419
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_LO 0x0000041a
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_HI 0x0000041b
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_LO 0x0000041c
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_HI 0x0000041d
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_LO 0x0000041e
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_HI 0x0000041f
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_LO 0x00000420
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_HI 0x00000421
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_LO 0x00000422
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_HI 0x00000423
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_LO 0x00000424
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_HI 0x00000425
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_LO 0x00000426
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_HI 0x00000427
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_LO 0x00000428
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_HI 0x00000429
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_LO 0x0000042a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_HI 0x0000042b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_LO 0x0000042c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_HI 0x0000042d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_LO 0x0000042e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_HI 0x0000042f
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_LO 0x00000430
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_HI 0x00000431
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_LO 0x00000432
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_HI 0x00000433
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_LO 0x00000434
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_HI 0x00000435
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_LO 0x00000436
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_HI 0x00000437
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_LO 0x00000438
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_HI 0x00000439
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_LO 0x0000043a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_HI 0x0000043b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_LO 0x0000043c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_HI 0x0000043d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_LO 0x0000043e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_HI 0x0000043f
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_LO 0x00000440
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_HI 0x00000441
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_LO 0x00000442
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_HI 0x00000443
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_LO 0x00000444
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_HI 0x00000445
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_LO 0x00000446
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_HI 0x00000447
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_LO 0x00000448
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_HI 0x00000449
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_LO 0x0000044a
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_HI 0x0000044b
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_LO 0x0000044c
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_HI 0x0000044d
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_LO 0x0000044e
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_HI 0x0000044f
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_LO 0x00000450
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_HI 0x00000451
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_LO 0x00000452
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_HI 0x00000453
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_LO 0x00000454
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_HI 0x00000455
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_LO 0x00000456
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_HI 0x00000457
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_LO 0x00000458
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_HI 0x00000459
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_LO 0x0000045a
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_HI 0x0000045b
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_LO 0x0000045c
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_HI 0x0000045d
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_LO 0x0000045e
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_HI 0x0000045f
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_LO 0x00000460
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_HI 0x00000461
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_LO 0x00000462
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_HI 0x00000463
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_LO 0x000004d2
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI 0x000004d3
+
+#define REG_A5XX_RBBM_STATUS 0x000004f5
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x80000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x40000000
+#define A5XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A5XX_RBBM_STATUS_VSC_BUSY 0x10000000
+#define A5XX_RBBM_STATUS_TPL1_BUSY 0x08000000
+#define A5XX_RBBM_STATUS_SP_BUSY 0x04000000
+#define A5XX_RBBM_STATUS_UCHE_BUSY 0x02000000
+#define A5XX_RBBM_STATUS_VPC_BUSY 0x01000000
+#define A5XX_RBBM_STATUS_VFDP_BUSY 0x00800000
+#define A5XX_RBBM_STATUS_VFD_BUSY 0x00400000
+#define A5XX_RBBM_STATUS_TESS_BUSY 0x00200000
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY 0x00040000
+#define A5XX_RBBM_STATUS_DCOM_BUSY 0x00020000
+#define A5XX_RBBM_STATUS_COM_BUSY 0x00010000
+#define A5XX_RBBM_STATUS_LRZ_BUZY 0x00008000
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY 0x00004000
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY 0x00002000
+#define A5XX_RBBM_STATUS_RB_BUSY 0x00001000
+#define A5XX_RBBM_STATUS_RAS_BUSY 0x00000800
+#define A5XX_RBBM_STATUS_TSE_BUSY 0x00000400
+#define A5XX_RBBM_STATUS_VBIF_BUSY 0x00000200
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST 0x00000100
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST 0x00000080
+#define A5XX_RBBM_STATUS_CP_BUSY 0x00000040
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY 0x00000020
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY 0x00000010
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY 0x00000008
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A5XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001
+
+#define REG_A5XX_RBBM_STATUS3 0x00000530
+
+#define REG_A5XX_RBBM_INT_0_STATUS 0x000004e1
+
+#define REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS 0x000004f0
+
+#define REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS 0x000004f1
+
+#define REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS 0x000004f3
+
+#define REG_A5XX_RBBM_AHB_ERROR_STATUS 0x000004f4
+
+#define REG_A5XX_RBBM_PERFCTR_CNTL 0x00000464
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 0x00000465
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD1 0x00000466
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD2 0x00000467
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD3 0x00000468
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000469
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x0000046a
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000046f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC 0x00000504
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OVER 0x00000505
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT0 0x00000506
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT1 0x00000507
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT2 0x00000508
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT3 0x00000509
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT4 0x0000050a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT5 0x0000050b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR 0x0000050c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0 0x0000050d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1 0x0000050e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2 0x0000050f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3 0x00000510
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4 0x00000511
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR0 0x00000512
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR1 0x00000513
+
+#define REG_A5XX_RBBM_ISDB_CNT 0x00000533
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CONFIG 0x0000f000
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
+
+#define REG_A5XX_RBBM_SECVID_TSB_CNTL 0x0000f803
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_LO 0x0000f804
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_HI 0x0000f805
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_LO 0x0000f806
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_HI 0x0000f807
+
+#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
+
+#define REG_A5XX_VSC_PIPE_DATA_LENGTH_0 0x00000c00
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0 0x00000c60
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1 0x00000c61
+
+#define REG_A5XX_VSC_BIN_SIZE 0x00000cdd
+#define A5XX_VSC_BIN_SIZE_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_VSC_BIN_SIZE_X__MASK 0x00007fff
+#define A5XX_VSC_BIN_SIZE_X__SHIFT 0
+static inline uint32_t A5XX_VSC_BIN_SIZE_X(uint32_t val)
+{
+ return ((val) << A5XX_VSC_BIN_SIZE_X__SHIFT) & A5XX_VSC_BIN_SIZE_X__MASK;
+}
+#define A5XX_VSC_BIN_SIZE_Y__MASK 0x7fff0000
+#define A5XX_VSC_BIN_SIZE_Y__SHIFT 16
+static inline uint32_t A5XX_VSC_BIN_SIZE_Y(uint32_t val)
+{
+ return ((val) << A5XX_VSC_BIN_SIZE_Y__SHIFT) & A5XX_VSC_BIN_SIZE_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_ADDR_MODE_CNTL 0x00000c81
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c90
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c91
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c92
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c93
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c94
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c95
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c96
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c97
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 0x00000c98
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 0x00000c99
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 0x00000c9a
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 0x00000c9b
+
+#define REG_A5XX_RB_DBG_ECO_CNTL 0x00000cc4
+
+#define REG_A5XX_RB_ADDR_MODE_CNTL 0x00000cc5
+
+#define REG_A5XX_RB_MODE_CNTL 0x00000cc6
+
+#define REG_A5XX_RB_CCU_CNTL 0x00000cc7
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_0 0x00000cd0
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_1 0x00000cd1
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_2 0x00000cd2
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_3 0x00000cd3
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_4 0x00000cd4
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_5 0x00000cd5
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_6 0x00000cd6
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_7 0x00000cd7
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_0 0x00000cd8
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_1 0x00000cd9
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_2 0x00000cda
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_3 0x00000cdb
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_0 0x00000ce0
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_1 0x00000ce1
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_2 0x00000ce2
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_3 0x00000ce3
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_0 0x00000ce4
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_1 0x00000ce5
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_0 0x00000cec
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_1 0x00000ced
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_2 0x00000cee
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_3 0x00000cef
+
+#define REG_A5XX_PC_DBG_ECO_CNTL 0x00000d00
+#define A5XX_PC_DBG_ECO_CNTL_TWOPASSUSEWFI 0x00000100
+
+#define REG_A5XX_PC_ADDR_MODE_CNTL 0x00000d01
+
+#define REG_A5XX_PC_MODE_CNTL 0x00000d02
+
+#define REG_A5XX_UNKNOWN_0D08 0x00000d08
+
+#define REG_A5XX_UNKNOWN_0D09 0x00000d09
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_1 0x00000d11
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_2 0x00000d12
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_3 0x00000d13
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_4 0x00000d14
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_5 0x00000d15
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_6 0x00000d16
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_7 0x00000d17
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0 0x00000e00
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01
+
+#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05
+
+#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e10
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e11
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e12
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e13
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e14
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e15
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e16
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e17
+
+#define REG_A5XX_HLSQ_SPTP_RDSEL 0x00000f08
+
+#define REG_A5XX_HLSQ_DBG_READ_SEL 0x0000bc00
+#define A5XX_HLSQ_DBG_READ_SEL_STATETYPE__MASK 0x0000ff00
+#define A5XX_HLSQ_DBG_READ_SEL_STATETYPE__SHIFT 8
+static inline uint32_t A5XX_HLSQ_DBG_READ_SEL_STATETYPE(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DBG_READ_SEL_STATETYPE__SHIFT) & A5XX_HLSQ_DBG_READ_SEL_STATETYPE__MASK;
+}
+
+#define REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000a000
+
+#define REG_A5XX_VFD_ADDR_MODE_CNTL 0x00000e41
+
+#define REG_A5XX_VFD_MODE_CNTL 0x00000e42
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_0 0x00000e50
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_1 0x00000e51
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_2 0x00000e52
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_3 0x00000e53
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_4 0x00000e54
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_5 0x00000e55
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_6 0x00000e56
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7 0x00000e57
+
+#define REG_A5XX_VPC_DBG_ECO_CNTL 0x00000e60
+
+#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61
+
+#define REG_A5XX_VPC_MODE_CNTL 0x00000e62
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0 0x00000e64
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_1 0x00000e65
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_2 0x00000e66
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_3 0x00000e67
+
+#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80
+
+#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_HI 0x00000e88
+
+#define REG_A5XX_UCHE_TRAP_BASE_LO 0x00000e89
+
+#define REG_A5XX_UCHE_TRAP_BASE_HI 0x00000e8a
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e8b
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e8c
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e8d
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e8e
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL_2 0x00000e8f
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL 0x00000e90
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO 0x00000e91
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_HI 0x00000e92
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_LO 0x00000e93
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_HI 0x00000e94
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE 0x00000e95
+
+#define REG_A5XX_UCHE_CACHE_WAYS 0x00000e96
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000ea0
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000ea1
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000ea2
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000ea3
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000ea4
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000ea5
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000ea6
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000ea7
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 0x00000ea8
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 0x00000ea9
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 0x00000eaa
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 0x00000eab
+
+#define REG_A5XX_UCHE_TRAP_LOG_LO 0x00000eb1
+
+#define REG_A5XX_UCHE_TRAP_LOG_HI 0x00000eb2
+
+#define REG_A5XX_SP_DBG_ECO_CNTL 0x00000ec0
+
+#define REG_A5XX_SP_ADDR_MODE_CNTL 0x00000ec1
+
+#define REG_A5XX_SP_MODE_CNTL 0x00000ec2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_0 0x00000ed0
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_1 0x00000ed1
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_2 0x00000ed2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_3 0x00000ed3
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_4 0x00000ed4
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_5 0x00000ed5
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_6 0x00000ed6
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_7 0x00000ed7
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_8 0x00000ed8
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_9 0x00000ed9
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_10 0x00000eda
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_11 0x00000edb
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_0 0x00000edc
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_1 0x00000edd
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_2 0x00000ede
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_3 0x00000edf
+
+#define REG_A5XX_TPL1_ADDR_MODE_CNTL 0x00000f01
+
+#define REG_A5XX_TPL1_MODE_CNTL 0x00000f02
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_0 0x00000f10
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_1 0x00000f11
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_2 0x00000f12
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_3 0x00000f13
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_4 0x00000f14
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_5 0x00000f15
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_6 0x00000f16
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_7 0x00000f17
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_0 0x00000f18
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_1 0x00000f19
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_2 0x00000f1a
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_3 0x00000f1b
+
+#define REG_A5XX_VBIF_VERSION 0x00003000
+
+#define REG_A5XX_VBIF_CLKON 0x00003001
+#define A5XX_VBIF_CLKON_FORCE_ON 0x00000001
+#define A5XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000002
+
+#define REG_A5XX_VBIF_ABIT_SORT 0x00003028
+
+#define REG_A5XX_VBIF_ABIT_SORT_CONF 0x00003029
+
+#define REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A5XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL0 0x00003080
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL1 0x00003081
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
+#define A5XX_VBIF_TEST_BUS_OUT_CTRL_TEST_BUS_CTRL_EN 0x00000001
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL0 0x00003085
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL1 0x00003086
+#define A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__MASK 0x0000000f
+#define A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__SHIFT 0
+static inline uint32_t A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL(uint32_t val)
+{
+ return ((val) << A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__SHIFT) & A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__MASK;
+}
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL0 0x00003087
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL1 0x00003088
+#define A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__MASK 0x0000001f
+#define A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__SHIFT 0
+static inline uint32_t A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(uint32_t val)
+{
+ return ((val) << A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__SHIFT) & A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__MASK;
+}
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+
+#define REG_A5XX_GPMU_INST_RAM_BASE 0x00008800
+
+#define REG_A5XX_GPMU_DATA_RAM_BASE 0x00009800
+
+#define REG_A5XX_GPMU_SP_POWER_CNTL 0x0000a881
+
+#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL 0x0000a886
+
+#define REG_A5XX_GPMU_RBCCU_POWER_CNTL 0x0000a887
+
+#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS 0x0000a88b
+#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS 0x0000a88d
+#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY 0x0000a891
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL 0x0000a892
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST 0x0000a893
+
+#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1
+
+#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6
+
+#define REG_A5XX_GPMU_CM3_SYSRESET 0x0000a8d8
+
+#define REG_A5XX_GPMU_GENERAL_0 0x0000a8e0
+
+#define REG_A5XX_GPMU_GENERAL_1 0x0000a8e1
+
+#define REG_A5XX_SP_POWER_COUNTER_0_LO 0x0000a840
+
+#define REG_A5XX_SP_POWER_COUNTER_0_HI 0x0000a841
+
+#define REG_A5XX_SP_POWER_COUNTER_1_LO 0x0000a842
+
+#define REG_A5XX_SP_POWER_COUNTER_1_HI 0x0000a843
+
+#define REG_A5XX_SP_POWER_COUNTER_2_LO 0x0000a844
+
+#define REG_A5XX_SP_POWER_COUNTER_2_HI 0x0000a845
+
+#define REG_A5XX_SP_POWER_COUNTER_3_LO 0x0000a846
+
+#define REG_A5XX_SP_POWER_COUNTER_3_HI 0x0000a847
+
+#define REG_A5XX_TP_POWER_COUNTER_0_LO 0x0000a848
+
+#define REG_A5XX_TP_POWER_COUNTER_0_HI 0x0000a849
+
+#define REG_A5XX_TP_POWER_COUNTER_1_LO 0x0000a84a
+
+#define REG_A5XX_TP_POWER_COUNTER_1_HI 0x0000a84b
+
+#define REG_A5XX_TP_POWER_COUNTER_2_LO 0x0000a84c
+
+#define REG_A5XX_TP_POWER_COUNTER_2_HI 0x0000a84d
+
+#define REG_A5XX_TP_POWER_COUNTER_3_LO 0x0000a84e
+
+#define REG_A5XX_TP_POWER_COUNTER_3_HI 0x0000a84f
+
+#define REG_A5XX_RB_POWER_COUNTER_0_LO 0x0000a850
+
+#define REG_A5XX_RB_POWER_COUNTER_0_HI 0x0000a851
+
+#define REG_A5XX_RB_POWER_COUNTER_1_LO 0x0000a852
+
+#define REG_A5XX_RB_POWER_COUNTER_1_HI 0x0000a853
+
+#define REG_A5XX_RB_POWER_COUNTER_2_LO 0x0000a854
+
+#define REG_A5XX_RB_POWER_COUNTER_2_HI 0x0000a855
+
+#define REG_A5XX_RB_POWER_COUNTER_3_LO 0x0000a856
+
+#define REG_A5XX_RB_POWER_COUNTER_3_HI 0x0000a857
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_LO 0x0000a858
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_HI 0x0000a859
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_LO 0x0000a85a
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_HI 0x0000a85b
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_LO 0x0000a85c
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_HI 0x0000a85d
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_LO 0x0000a85e
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_HI 0x0000a85f
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_LO 0x0000a860
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_HI 0x0000a861
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_LO 0x0000a862
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_HI 0x0000a863
+
+#define REG_A5XX_CP_POWER_COUNTER_0_LO 0x0000a864
+
+#define REG_A5XX_CP_POWER_COUNTER_0_HI 0x0000a865
+
+#define REG_A5XX_CP_POWER_COUNTER_1_LO 0x0000a866
+
+#define REG_A5XX_CP_POWER_COUNTER_1_HI 0x0000a867
+
+#define REG_A5XX_CP_POWER_COUNTER_2_LO 0x0000a868
+
+#define REG_A5XX_CP_POWER_COUNTER_2_HI 0x0000a869
+
+#define REG_A5XX_CP_POWER_COUNTER_3_LO 0x0000a86a
+
+#define REG_A5XX_CP_POWER_COUNTER_3_HI 0x0000a86b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_LO 0x0000a86c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_HI 0x0000a86d
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_LO 0x0000a86e
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_HI 0x0000a86f
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_LO 0x0000a870
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_HI 0x0000a871
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_LO 0x0000a872
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_HI 0x0000a873
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_LO 0x0000a874
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_HI 0x0000a875
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_LO 0x0000a876
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_HI 0x0000a877
+
+#define REG_A5XX_GPMU_POWER_COUNTER_ENABLE 0x0000a878
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO 0x0000a879
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI 0x0000a87a
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET 0x0000a87b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_0 0x0000a87c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1 0x0000a87d
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL 0x0000a8a8
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_ID 0x0000ac00
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG 0x0000ac01
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__MASK 0x0000000f
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__SHIFT 0
+static inline uint32_t A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS(uint32_t val)
+{
+ return ((val) << A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__SHIFT) & A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__MASK;
+}
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_BCL_ENABLED 0x00000002
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_LLM_ENABLED 0x00000200
+
+#define REG_A5XX_GPMU_TEMP_VAL 0x0000ac02
+
+#define REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD 0x0000ac03
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_STATUS 0x0000ac05
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK 0x0000ac06
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_0_1 0x0000ac40
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_2_3 0x0000ac41
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_0_1 0x0000ac42
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_2_3 0x0000ac43
+
+#define REG_A5XX_GPMU_BASE_LEAKAGE 0x0000ac46
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE 0x0000ac60
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_STATUS 0x0000ac61
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK 0x0000ac62
+
+#define REG_A5XX_GPMU_GPMU_PWR_THRESHOLD 0x0000ac80
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL 0x0000acc4
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_IDLE_FULL_LM 0x00000001
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__MASK 0x00000030
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__SHIFT 4
+static inline uint32_t A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD(uint32_t val)
+{
+ return ((val) << A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__SHIFT) & A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__MASK;
+}
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS 0x0000acc5
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS_IDLE_FULL_ACK 0x00000001
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS_WAKEUP_ACK 0x00000002
+
+#define REG_A5XX_GDPM_CONFIG1 0x0000b80c
+
+#define REG_A5XX_GDPM_CONFIG2 0x0000b80d
+
+#define REG_A5XX_GDPM_INT_EN 0x0000b80f
+
+#define REG_A5XX_GDPM_INT_MASK 0x0000b811
+
+#define REG_A5XX_GPMU_BEC_ENABLE 0x0000b9a0
+
+#define REG_A5XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000c41a
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000c41d
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000c41f
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x0000c421
+
+#define REG_A5XX_GPU_CS_ENABLE_REG 0x0000c520
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557
+
+#define REG_A5XX_GRAS_CL_CNTL 0x0000e000
+
+#define REG_A5XX_UNKNOWN_E001 0x0000e001
+
+#define REG_A5XX_UNKNOWN_E004 0x0000e004
+
+#define REG_A5XX_GRAS_CLEAR_CNTL 0x0000e005
+#define A5XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR 0x00000001
+
+#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XOFFSET_0 0x0000e010
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XSCALE_0 0x0000e011
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YOFFSET_0 0x0000e012
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YSCALE_0 0x0000e013
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000e014
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZSCALE_0 0x0000e015
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CNTL 0x0000e090
+#define A5XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
+#define A5XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+
+#define REG_A5XX_GRAS_SU_POINT_MINMAX 0x0000e091
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POINT_SIZE 0x0000e092
+#define A5XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E093 0x0000e093
+
+#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094
+#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_ALPHA_TEST_ENABLE 0x00000001
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000e095
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000e096
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x0000e097
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO 0x0000e098
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x0000e099
+
+#define REG_A5XX_GRAS_SC_CNTL 0x0000e0a0
+
+#define REG_A5XX_GRAS_SC_BIN_CNTL 0x0000e0a1
+
+#define REG_A5XX_GRAS_SC_RAS_MSAA_CNTL 0x0000e0a2
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_DEST_MSAA_CNTL 0x0000e0a3
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL 0x0000e0a4
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x0000e0aa
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x0000e0ab
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x0000e0ca
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x0000e0cb
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000e0ea
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000e0eb
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_LRZ_CNTL 0x0000e100
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO 0x0000e101
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI 0x0000e102
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH 0x0000e103
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x0000e105
+
+#define REG_A5XX_RB_CNTL 0x0000e140
+#define A5XX_RB_CNTL_WIDTH__MASK 0x000000ff
+#define A5XX_RB_CNTL_WIDTH__SHIFT 0
+static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK;
+}
+#define A5XX_RB_CNTL_HEIGHT__MASK 0x0001fe00
+#define A5XX_RB_CNTL_HEIGHT__SHIFT 9
+static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK;
+}
+#define A5XX_RB_CNTL_BYPASS 0x00020000
+
+#define REG_A5XX_RB_RENDER_CNTL 0x0000e141
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS__MASK 0x00ff0000
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS__SHIFT 16
+static inline uint32_t A5XX_RB_RENDER_CNTL_ENABLED_MRTS(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_ENABLED_MRTS__SHIFT) & A5XX_RB_RENDER_CNTL_ENABLED_MRTS__MASK;
+}
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__MASK 0xff000000
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__SHIFT 24
+static inline uint32_t A5XX_RB_RENDER_CNTL_ENABLED_MRTS2(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__SHIFT) & A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__MASK;
+}
+
+#define REG_A5XX_RB_RAS_MSAA_CNTL 0x0000e142
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_RB_DEST_MSAA_CNTL 0x0000e143
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144
+#define A5XX_RB_RENDER_CONTROL0_VARYING 0x00000001
+#define A5XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
+#define A5XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
+#define A5XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
+#define A5XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
+
+#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145
+#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+
+#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_RB_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z 0x00000020
+
+static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001
+#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
+static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x0000e151 + 0x7*i0; }
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x0000e152 + 0x7*i0; }
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000007f
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
+
+static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 + 0x7*i0; }
+#define A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__MASK 0x0007ffff
+#define A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__SHIFT) & A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; }
+#define A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__MASK 0x01ffffff
+#define A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x0000e156 + 0x7*i0; }
+
+#define REG_A5XX_RB_BLEND_RED 0x0000e1a0
+#define A5XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_UINT__SHIFT) & A5XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_SINT__SHIFT) & A5XX_RB_BLEND_RED_SINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_RED_F32 0x0000e1a1
+#define A5XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_RED_F32__SHIFT) & A5XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN 0x0000e1a2
+#define A5XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_UINT__SHIFT) & A5XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_SINT__SHIFT) & A5XX_RB_BLEND_GREEN_SINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN_F32 0x0000e1a3
+#define A5XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_GREEN_F32__SHIFT) & A5XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE 0x0000e1a4
+#define A5XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_UINT__SHIFT) & A5XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_SINT__SHIFT) & A5XX_RB_BLEND_BLUE_SINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE_F32 0x0000e1a5
+#define A5XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_BLUE_F32__SHIFT) & A5XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA 0x0000e1a6
+#define A5XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_UINT__SHIFT) & A5XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_SINT__SHIFT) & A5XX_RB_BLEND_ALPHA_SINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA_F32 0x0000e1a7
+#define A5XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_ALPHA_F32__SHIFT) & A5XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A5XX_RB_ALPHA_CONTROL 0x0000e1a8
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_CNTL 0x0000e1a9
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_PLANE_CNTL 0x0000e1b0
+#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+
+#define REG_A5XX_RB_DEPTH_CNTL 0x0000e1b1
+#define A5XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
+#define A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
+static inline uint32_t A5XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A5XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000040
+
+#define REG_A5XX_RB_DEPTH_BUFFER_INFO 0x0000e1b2
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_LO 0x0000e1b3
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_HI 0x0000e1b4
+
+#define REG_A5XX_RB_DEPTH_BUFFER_PITCH 0x0000e1b5
+#define A5XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A5XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_INFO 0x0000e1c2
+#define A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+#define A5XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff000
+#define A5XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 12
+static inline uint32_t A5XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
+{
+ return ((val >> 12) << A5XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A5XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E1C3 0x0000e1c3
+
+#define REG_A5XX_RB_STENCILREFMASK 0x0000e1c6
+#define A5XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0
+#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A5XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_X__SHIFT) & A5XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A5XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A5XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_1 0x0000e211
+#define A5XX_RB_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_1_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_1_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_2 0x0000e212
+#define A5XX_RB_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_2_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_2_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x0000e240
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x0000e241
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x0000e242
+
+#define REG_A5XX_VPC_CNTL_0 0x0000e280
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT) & A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK;
+}
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+#define REG_A5XX_VPC_GS_SIV_CNTL 0x0000e298
+
+#define REG_A5XX_VPC_PACK 0x0000e29d
+#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK 0x000000ff
+#define A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT 0
+static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK;
+}
+
+#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL 0x0000e2a0
+
+#define REG_A5XX_VPC_SO_OVERRIDE 0x0000e2a2
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_LO_0 0x0000e2a7
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_HI_0 0x0000e2a8
+
+#define REG_A5XX_VPC_SO_BUFFER_SIZE_0 0x0000e2a9
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_LO_0 0x0000e2ac
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_HI_0 0x0000e2ad
+
+#define REG_A5XX_PC_PRIMITIVE_CNTL 0x0000e384
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
+}
+
+#define REG_A5XX_PC_RASTER_CNTL 0x0000e388
+
+#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c
+
+#define REG_A5XX_PC_GS_PARAM 0x0000e38e
+
+#define REG_A5XX_PC_HS_PARAM 0x0000e38f
+
+#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0
+
+#define REG_A5XX_VFD_CONTROL_0 0x0000e400
+#define A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x0000003f
+#define A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_1 0x0000e401
+#define A5XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
+#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_2 0x0000e402
+
+#define REG_A5XX_VFD_CONTROL_3 0x0000e403
+
+#define REG_A5XX_VFD_CONTROL_4 0x0000e404
+
+#define REG_A5XX_VFD_CONTROL_5 0x0000e405
+
+#define REG_A5XX_VFD_INDEX_OFFSET 0x0000e408
+
+#define REG_A5XX_VFD_INSTANCE_START_OFFSET 0x0000e409
+
+static inline uint32_t REG_A5XX_VFD_FETCH(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000e40b + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+#define A5XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
+#define A5XX_VFD_DECODE_INSTR_IDX__SHIFT 0
+static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x3ff00000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
+static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+
+static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A5XX_VFD_POWER_CNTL 0x0000e4f0
+
+#define REG_A5XX_SP_SP_CNTL 0x0000e580
+
+#define REG_A5XX_SP_VS_CONTROL_REG 0x0000e584
+#define A5XX_SP_VS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_FS_CONTROL_REG 0x0000e585
+#define A5XX_SP_FS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_HS_CONTROL_REG 0x0000e586
+#define A5XX_SP_HS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_DS_CONTROL_REG 0x0000e587
+#define A5XX_SP_DS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_GS_CONTROL_REG 0x0000e588
+#define A5XX_SP_GS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_CS_CONFIG 0x0000e589
+
+#define REG_A5XX_SP_VS_CONFIG_MAX_CONST 0x0000e58a
+
+#define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b
+
+#define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00100000
+
+static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+#define A5XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A5XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A5XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A5XX_SP_VS_OBJ_START_LO 0x0000e5ac
+
+#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad
+
+#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00100000
+
+#define REG_A5XX_SP_FS_OBJ_START_LO 0x0000e5c3
+
+#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4
+
+#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
+
+#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK 0x00001fe0
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT 5
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK 0x001fe000
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT 13
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
+static inline uint32_t REG_A5XX_SP_FS_MRT(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x0000007f
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+
+#define REG_A5XX_SP_CS_CNTL_0 0x0000e5f0
+
+#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704
+
+#define REG_A5XX_TPL1_TP_DEST_MSAA_CNTL 0x0000e705
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_LO 0x0000e722
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_HI 0x0000e723
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_LO 0x0000e72a
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_HI 0x0000e72b
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_LO 0x0000e75a
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_HI 0x0000e75b
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_LO 0x0000e75e
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_HI 0x0000e75f
+
+#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL 0x0000e764
+
+#define REG_A5XX_HLSQ_CONTROL_0_REG 0x0000e784
+
+#define REG_A5XX_HLSQ_CONTROL_1_REG 0x0000e785
+
+#define REG_A5XX_HLSQ_CONTROL_2_REG 0x0000e786
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787
+#define A5XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_4_REG 0x0000e788
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_UPDATE_CNTL 0x0000e78a
+
+#define REG_A5XX_HLSQ_VS_CONTROL_REG 0x0000e78b
+#define A5XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CONTROL_REG 0x0000e78c
+#define A5XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CONTROL_REG 0x0000e78d
+#define A5XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CONTROL_REG 0x0000e78e
+#define A5XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CONTROL_REG 0x0000e78f
+#define A5XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CONFIG 0x0000e790
+
+#define REG_A5XX_HLSQ_VS_CNTL 0x0000e791
+
+#define REG_A5XX_HLSQ_FS_CNTL 0x0000e792
+
+#define REG_A5XX_HLSQ_CS_CNTL 0x0000e796
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_X 0x0000e7b9
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000e7ba
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000e7bb
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_0 0x0000e7b0
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6
+
+#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7
+
+#define REG_A5XX_HLSQ_CS_CNTL_1 0x0000e7b8
+
+#define REG_A5XX_HLSQ_VS_CONSTLEN 0x0000e7c3
+
+#define REG_A5XX_HLSQ_VS_INSTRLEN 0x0000e7c4
+
+#define REG_A5XX_HLSQ_FS_CONSTLEN 0x0000e7d7
+
+#define REG_A5XX_HLSQ_FS_INSTRLEN 0x0000e7d8
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_3 0x0000e7dc
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_4 0x0000e7dd
+
+#define REG_A5XX_TEX_SAMP_0 0x00000000
+#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A5XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A5XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MAG(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MAG__SHIFT) & A5XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A5XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A5XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MIN(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MIN__SHIFT) & A5XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A5XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_S(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_S__SHIFT) & A5XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A5XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_T(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_T__SHIFT) & A5XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A5XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_R(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_R__SHIFT) & A5XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A5XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A5XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A5XX_TEX_SAMP_0_ANISO(enum a5xx_tex_aniso val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_ANISO__SHIFT) & A5XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A5XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A5XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A5XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_1 0x00000001
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A5XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A5XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A5XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A5XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A5XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A5XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A5XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A5XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A5XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_2 0x00000002
+
+#define REG_A5XX_TEX_SAMP_3 0x00000003
+
+#define REG_A5XX_TEX_CONST_0 0x00000000
+#define A5XX_TEX_CONST_0_TILED 0x00000001
+#define A5XX_TEX_CONST_0_SRGB 0x00000004
+#define A5XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A5XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_X(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_X__SHIFT) & A5XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A5XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Y(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A5XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Z(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A5XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000
+#define A5XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
+{
+ return ((val) << A5XX_TEX_CONST_0_FMT__SHIFT) & A5XX_TEX_CONST_0_FMT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_1 0x00000001
+#define A5XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
+#define A5XX_TEX_CONST_1_WIDTH__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_WIDTH__SHIFT) & A5XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A5XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
+#define A5XX_TEX_CONST_1_HEIGHT__SHIFT 15
+static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_HEIGHT__SHIFT) & A5XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_2 0x00000002
+#define A5XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f
+#define A5XX_TEX_CONST_2_FETCHSIZE__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_2_FETCHSIZE(enum a5xx_tex_fetchsize val)
+{
+ return ((val) << A5XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A5XX_TEX_CONST_2_FETCHSIZE__MASK;
+}
+#define A5XX_TEX_CONST_2_PITCH__MASK 0x1fffff00
+#define A5XX_TEX_CONST_2_PITCH__SHIFT 8
+static inline uint32_t A5XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_2_PITCH__SHIFT) & A5XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_2_TYPE__MASK 0x60000000
+#define A5XX_TEX_CONST_2_TYPE__SHIFT 29
+static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val)
+{
+ return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_3 0x00000003
+#define A5XX_TEX_CONST_3_LAYERSZ__MASK 0x00003fff
+#define A5XX_TEX_CONST_3_LAYERSZ__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_3_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_LAYERSZ__SHIFT) & A5XX_TEX_CONST_3_LAYERSZ__MASK;
+}
+#define A5XX_TEX_CONST_3_LAYERSZ2__MASK 0xff800000
+#define A5XX_TEX_CONST_3_LAYERSZ2__SHIFT 23
+static inline uint32_t A5XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A5XX_TEX_CONST_3_LAYERSZ2__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_4 0x00000004
+#define A5XX_TEX_CONST_4_BASE__MASK 0xffffffe0
+#define A5XX_TEX_CONST_4_BASE__SHIFT 5
+static inline uint32_t A5XX_TEX_CONST_4_BASE(uint32_t val)
+{
+ return ((val >> 5) << A5XX_TEX_CONST_4_BASE__SHIFT) & A5XX_TEX_CONST_4_BASE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_5 0x00000005
+#define A5XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
+#define A5XX_TEX_CONST_5_DEPTH__SHIFT 17
+static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_5_DEPTH__SHIFT) & A5XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_6 0x00000006
+
+#define REG_A5XX_TEX_CONST_7 0x00000007
+
+#define REG_A5XX_TEX_CONST_8 0x00000008
+
+#define REG_A5XX_TEX_CONST_9 0x00000009
+
+#define REG_A5XX_TEX_CONST_10 0x0000000a
+
+#define REG_A5XX_TEX_CONST_11 0x0000000b
+
+
+#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
new file mode 100644
index 000000000000..f5847bc60c49
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -0,0 +1,1345 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "msm_iommu.h"
+#include "a5xx_gpu.h"
+
+static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ uint32_t wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Make sure everything is posted before making a decision */
+ mb();
+
+ /* Update HW if this is the current ring and we are not in preempt */
+ if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ struct msm_gem_address_space *aspace)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_mmu *mmu = aspace->mmu;
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ if (!iommu->ttbr0)
+ return;
+
+ /* Turn off protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Turn on APIV mode to access critical regions */
+ OUT_PKT4(ring, REG_A5XX_CP_CNTL, 1);
+ OUT_RING(ring, 1);
+
+ /* Make sure the ME is syncronized before staring the update */
+ OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
+
+ /* Execute the table update */
+ OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 3);
+ OUT_RING(ring, lower_32_bits(iommu->ttbr0));
+ OUT_RING(ring, upper_32_bits(iommu->ttbr0));
+ OUT_RING(ring, iommu->contextidr);
+
+ /*
+ * Write the new TTBR0 to the preemption records - this will be used to
+ * reload the pagetable if the current ring gets preempted out.
+ */
+ OUT_PKT7(ring, CP_MEM_WRITE, 4);
+ OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0)));
+ OUT_RING(ring, lower_32_bits(iommu->ttbr0));
+ OUT_RING(ring, upper_32_bits(iommu->ttbr0));
+
+ /* Also write the current contextidr (ASID) */
+ OUT_PKT7(ring, CP_MEM_WRITE, 3);
+ OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id,
+ contextidr)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id,
+ contextidr)));
+ OUT_RING(ring, iommu->contextidr);
+
+ /* Invalidate the draw state so we start off fresh */
+ OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
+ OUT_RING(ring, 0x40000);
+ OUT_RING(ring, 1);
+ OUT_RING(ring, 0);
+
+ /* Turn off APRIV */
+ OUT_PKT4(ring, REG_A5XX_CP_CNTL, 1);
+ OUT_RING(ring, 0);
+
+ /* Turn off protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+}
+
+static int a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[submit->ring];
+ unsigned int i, ibs = 0;
+
+ a5xx_set_pagetable(gpu, ring, submit->aspace);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ /* Enable local preemption for finegrain preemption */
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Submit the commands */
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+ }
+
+ /*
+ * Write the render mode to NULL (0) to indicate to the CP that the IBs
+ * are done rendering - otherwise a lucky preemption would start
+ * replaying from the last checkpoint
+ */
+ OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+
+ /* Turn off IB level preemptions */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Write the fence to the scratch register */
+ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->fence);
+
+ /*
+ * Execute a CACHE_FLUSH_TS event. This will ensure that the
+ * timestamp is written to the memory and then triggers the interrupt
+ */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+
+ OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
+ OUT_RING(ring, submit->fence);
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ /*
+ * If dword[2:1] are non zero, they specify an address for the CP to
+ * write the value of dword[3] to on preemption complete. Write 0 to
+ * skip the write
+ */
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ /* Data value - not used if the address above is 0 */
+ OUT_RING(ring, 0x01);
+ /* Set bit 0 to trigger an interrupt on preempt complete */
+ OUT_RING(ring, 0x01);
+
+ a5xx_flush(gpu, ring);
+
+ /* Check to see if we need to start preemption */
+ a5xx_preempt_trigger(gpu);
+
+ return 0;
+}
+
+static const struct {
+ u32 offset;
+ u32 value;
+} a5xx_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
+};
+
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
+ gpu_write(gpu, a5xx_hwcg[i].offset,
+ state ? a5xx_hwcg[i].value : 0);
+
+ /* There are a few additional registers just for A540 */
+ if (adreno_is_a540(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU,
+ state ? 0x770 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU,
+ state ? 0x004 : 0);
+ }
+
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
+}
+
+static int a5xx_me_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_ME_INIT, 8);
+
+ OUT_RING(ring, 0x0000002F);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Don't enable header dump */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ /* Specify workarounds for various microcode issues */
+ if (adreno_is_a530(adreno_gpu)) {
+ /* Workaround for token end syncs
+ * Force a WFI after every direct-render 3D mode draw and every
+ * 2D mode 3 draw
+ */
+ OUT_RING(ring, 0x0000000B);
+ } else {
+ /* No workarounds enabled */
+ OUT_RING(ring, 0x00000000);
+ }
+
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu, ring);
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static int a5xx_preempt_start(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (gpu->nr_rings == 1)
+ return 0;
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x00);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+ OUT_RING(ring, 0x01);
+
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x01);
+ OUT_RING(ring, 0x01);
+
+ gpu->funcs->flush(gpu, ring);
+
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+
+static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova)
+{
+ struct drm_device *drm = gpu->dev;
+ struct drm_gem_object *bo;
+ void *ptr;
+
+ mutex_lock(&drm->struct_mutex);
+ bo = msm_gem_new(drm, fw->size - 4,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(bo))
+ return bo;
+
+ ptr = msm_gem_vaddr(bo);
+ if (!ptr) {
+ drm_gem_object_unreference_unlocked(bo);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (iova) {
+ int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
+
+ if (ret) {
+ drm_gem_object_unreference_unlocked(bo);
+ return ERR_PTR(ret);
+ }
+ }
+
+ memcpy(ptr, &fw->data[4], fw->size - 4);
+ return bo;
+}
+
+static int a5xx_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int ret;
+
+ if (!a5xx_gpu->pm4_bo) {
+ a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
+ &a5xx_gpu->pm4_iova);
+
+ if (IS_ERR(a5xx_gpu->pm4_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pm4_bo);
+ a5xx_gpu->pm4_bo = NULL;
+ dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (!a5xx_gpu->pfp_bo) {
+ a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
+ &a5xx_gpu->pfp_iova);
+
+ if (IS_ERR(a5xx_gpu->pfp_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pfp_bo);
+ a5xx_gpu->pfp_bo = NULL;
+ dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
+ REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
+
+ gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
+ REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
+
+ return 0;
+}
+
+#ifdef CONFIG_MSM_SUBSYSTEM_RESTART
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/scm.h>
+
+static void a5xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ const char *name;
+ void *ptr;
+
+ /* If no zap shader was defined, we'll assume that none is needed */
+ if (of_property_read_string(GPU_OF_NODE(gpu), "qcom,zap-shader", &name))
+ return;
+
+ /*
+ * If the zap shader has already been loaded then just ask the SCM to
+ * re-initialize the registers (not needed if CPZ retention is a thing)
+ */
+ if (test_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags)) {
+ int ret;
+ struct scm_desc desc = { 0 };
+
+ if (of_property_read_bool(GPU_OF_NODE(gpu),
+ "qcom,cpz-retention"))
+ return;
+
+ desc.args[0] = 0;
+ desc.args[1] = 13;
+ desc.arginfo = SCM_ARGS(2);
+
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, 0x0A), &desc);
+ if (ret)
+ DRM_ERROR(
+ "%s: zap-shader resume failed with error %d\n",
+ gpu->name, ret);
+
+ return;
+ }
+
+ ptr = subsystem_get(name);
+
+ if (IS_ERR_OR_NULL(ptr)) {
+ DRM_ERROR("%s: Unable to load the zap shader: %ld\n", gpu->name,
+ IS_ERR(ptr) ? PTR_ERR(ptr) : -ENODEV);
+ } else {
+ set_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags);
+ }
+}
+#else
+static void a5xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ if (of_find_property(GPU_OF_NODE(gpu), "qcom,zap-shader", NULL))
+ return;
+
+ DRM_INFO_ONCE("%s: Zap shader is defined but loader isn't available\n",
+ gpu->name);
+}
+#endif
+
+#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
+ A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
+ A5XX_RBBM_INT_0_MASK_CP_SW | \
+ A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+
+static int a5xx_hw_init(struct msm_gpu *gpu)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int ret, bit = 0;
+
+ pm_qos_update_request(&gpu->pm_qos_req_dma, 101);
+
+ gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ if (adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
+ /* Enable RBBM error reporting bits */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
+
+ if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
+ /*
+ * Mask out the activity signals from RB1-3 to avoid false
+ * positives
+ */
+
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
+ 0xF0000000);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
+ 0xFFFFFFFF);
+ }
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
+ (1 << 30) | 0xFFFF);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
+
+ /* Increase VFD cache access so LRZ and other data gets evicted less */
+ gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
+
+ /* Disable L2 bypass in the UCHE */
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
+
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO,
+ REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
+
+ gpu_write64(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
+ REG_A5XX_UCHE_GMEM_RANGE_MAX_HI,
+ 0x00100000 + adreno_gpu->gmem - 1);
+
+ gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
+
+ if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+ gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
+
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
+
+ /* Enable USE_RETENTION_FLOPS */
+ gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
+
+ /* Enable ME/PFP split notification */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
+
+ /* Enable HWCG */
+ a5xx_set_hwcg(gpu, true);
+
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
+
+ /* Set the highest bank bit if specified in the device tree */
+ if (!of_property_read_u32(pdev->dev.of_node, "qcom,highest-bank-bit",
+ &bit)) {
+ if (bit >= 13 && bit <= 16) {
+ bit -= 13;
+
+ gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, bit << 7);
+ gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, bit << 1);
+
+ if (adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2,
+ bit);
+ }
+ }
+
+ /* Try to load and initialize the zap shader if applicable */
+ a5xx_zap_shader_init(gpu);
+
+ /* Protect registers from the CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
+
+ /* RBBM */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
+
+ /* Content protect */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
+
+ /* CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
+
+ /* RB */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
+
+ /* VPC */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
+
+ /* UCHE */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
+
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
+ ADRENO_PROTECT_RW(0x10000, 0x8000));
+
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* Put the GPU into 64 bit by default */
+ gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+
+ /* Load the GPMU firmware before starting the HW init */
+ a5xx_gpmu_ucode_init(gpu);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ a5xx_preempt_hw_init(gpu);
+
+ ret = a5xx_ucode_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Disable the interrupts through the initial bringup stage */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
+
+ /* Clear ME_HALT to start the micro engine */
+ gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
+ ret = a5xx_me_init(gpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Send a pipeline event stat to get misbehaving counters to start
+ * ticking correctly
+ */
+ if (adreno_is_a530(adreno_gpu)) {
+ OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
+ OUT_RING(gpu->rb[0], 0x0F);
+
+ gpu->funcs->flush(gpu, gpu->rb[0]);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ }
+
+ /*
+ * If a zap shader was specified in the device tree, assume that we are
+ * on a secure device that blocks access to the RBBM_SECVID registers
+ * so we need to use the CP to switch out of secure mode. If a zap
+ * shader was NOT specified then we assume we are on an unlocked device.
+ * If we guessed wrong then the access to the register will probably
+ * cause a XPU violation.
+ */
+ if (test_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags)) {
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_SET_SECURE_MODE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu, gpu->rb[0]);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ } else {
+ /* Print a warning so if we die, we know why */
+ dev_warn_once(gpu->dev->dev,
+ "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ }
+
+ /* Next, start the power */
+ ret = a5xx_power_init(gpu);
+ if (ret)
+ return ret;
+
+
+ /* Last step - yield the ringbuffer */
+ a5xx_preempt_start(gpu);
+
+ pm_qos_update_request(&gpu->pm_qos_req_dma, 501);
+
+ return 0;
+}
+
+static void a5xx_recover(struct msm_gpu *gpu)
+{
+ adreno_dump_info(gpu);
+
+ msm_gpu_snapshot(gpu, gpu->snapshot);
+
+ /* Reset the GPU so it can work again */
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
+
+ adreno_recover(gpu);
+}
+
+static void a5xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ a5xx_preempt_fini(gpu);
+
+ if (a5xx_gpu->pm4_bo) {
+ if (a5xx_gpu->pm4_iova)
+ msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
+ }
+
+ if (a5xx_gpu->pfp_bo) {
+ if (a5xx_gpu->pfp_iova)
+ msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
+ }
+
+ if (a5xx_gpu->gpmu_bo) {
+ if (a5xx_gpu->gpmu_iova)
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+ }
+
+ adreno_gpu_cleanup(adreno_gpu);
+ kfree(a5xx_gpu);
+}
+
+static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
+{
+ if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
+ return false;
+
+ /*
+ * Nearly every abnormality ends up pausing the GPU and triggering a
+ * fault so we can safely just watch for this one interrupt to fire
+ */
+ return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
+}
+
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (ring != a5xx_gpu->cur_ring) {
+ WARN(1, "Tried to idle a non-current ringbuffer\n");
+ return false;
+ }
+
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu, ring))
+ return false;
+
+ if (spin_until(_a5xx_check_idle(gpu))) {
+ DRM_ERROR(
+ "%s: timeout waiting for GPU RB %d to idle: status %8.8X rptr/wptr: %4.4X/%4.4X irq %8.8X\n",
+ gpu->name, ring->id,
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+ gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
+
+ return false;
+ }
+
+ return true;
+}
+
+static void a5xx_cp_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
+
+ if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
+ u32 val;
+
+ gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
+
+ /*
+ * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
+ * read it twice
+ */
+
+ gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+ val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+
+ dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
+ val);
+ }
+
+ if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
+ gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
+
+ if (status & A5XX_CP_INT_CP_DMA_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
+
+ if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+ val & (1 << 24) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, val);
+ }
+
+ if (status & A5XX_CP_INT_CP_AHB_ERROR) {
+ u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
+ const char *access[16] = { "reserved", "reserved",
+ "timestamp lo", "timestamp hi", "pfp read", "pfp write",
+ "", "", "me read", "me write", "", "", "crashdump read",
+ "crashdump write" };
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
+ status & 0xFFFFF, access[(status >> 24) & 0xF],
+ (status & (1 << 31)), status);
+ }
+}
+
+static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
+{
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
+ val & (1 << 28) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
+ (val >> 24) & 0xF);
+
+ /* Clear the error */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
+
+ /* Clear the interrupt */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+ }
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
+}
+
+static void a5xx_uche_err_irq(struct msm_gpu *gpu)
+{
+ uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
+
+ addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
+
+ dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
+ addr);
+}
+
+static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
+{
+ dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
+}
+
+static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+
+ dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ ring ? ring->id : -1, adreno_submitted_fence(gpu, ring),
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+ gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
+ gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
+ gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
+ gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
+#define RBBM_ERROR_MASK \
+ (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+
+static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
+
+ /*
+ * Clear all the interrupts except for RBBM_AHB_ERROR
+ * which needs to be cleared after the error condition
+ * is cleared otherwise it will storm
+ */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+
+ if (status & RBBM_ERROR_MASK)
+ a5xx_rbbm_err_irq(gpu, status);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a5xx_cp_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
+ a5xx_fault_detect_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ a5xx_uche_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+ a5xx_gpmu_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ msm_gpu_retire(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
+ a5xx_preempt_irq(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+ REG_A5XX_CP_RB_RPTR_ADDR_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
+};
+
+static const u32 a5xx_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002b,
+ 0x002e, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
+ 0x0097, 0x00bb, 0x03a0, 0x0464, 0x0469, 0x046f, 0x04d2, 0x04d3,
+ 0x04e0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081a, 0x081f, 0x0841,
+ 0x0860, 0x0860, 0x0880, 0x08a0, 0x0b00, 0x0b12, 0x0b14, 0x0b28,
+ 0x0b78, 0x0b7f, 0x0bb0, 0x0bbd, 0x0bc0, 0x0bc6, 0x0bd0, 0x0c53,
+ 0x0c60, 0x0c61, 0x0c80, 0x0c82, 0x0c84, 0x0c85, 0x0c90, 0x0c9b,
+ 0x0ca0, 0x0ca0, 0x0cb0, 0x0cb2, 0x0cc1, 0x0cc1, 0x0cc4, 0x0cc7,
+ 0x0ccc, 0x0ccc, 0x0cd0, 0x0cdb, 0x0ce0, 0x0ce5, 0x0ce8, 0x0ce8,
+ 0x0cec, 0x0cf1, 0x0cfb, 0x0d0e, 0x0d10, 0x0d17, 0x0d20, 0x0d23,
+ 0x0d30, 0x0d30, 0x0e40, 0x0e43, 0x0e4a, 0x0e4a, 0x0e50, 0x0e57,
+ 0x0e60, 0x0e7c, 0x0e80, 0x0e8e, 0x0e90, 0x0e96, 0x0ea0, 0x0eab,
+ 0x0eb0, 0x0eb2, 0x2100, 0x211e, 0x2140, 0x2145, 0x2180, 0x2185,
+ 0x2500, 0x251e, 0x2540, 0x2545, 0x2580, 0x2585, 0x3000, 0x3014,
+ 0x3018, 0x302c, 0x3030, 0x3030, 0x3034, 0x3036, 0x303c, 0x303d,
+ 0x3040, 0x3040, 0x3042, 0x3042, 0x3049, 0x3049, 0x3058, 0x3058,
+ 0x305a, 0x3061, 0x3064, 0x3068, 0x306c, 0x306d, 0x3080, 0x3088,
+ 0x308b, 0x308c, 0x3090, 0x3094, 0x3098, 0x3098, 0x309c, 0x309c,
+ 0x3124, 0x3124, 0x340c, 0x340c, 0x3410, 0x3410, 0x3800, 0x3801,
+ 0xa800, 0xa800, 0xa820, 0xa828, 0xa840, 0xa87d, 0xa880, 0xa88d,
+ 0xa890, 0xa8a3, 0xa8a8, 0xa8aa, 0xa8c0, 0xa8c3, 0xa8c6, 0xa8ca,
+ 0xa8cc, 0xa8cf, 0xa8d1, 0xa8d8, 0xa8dc, 0xa8dc, 0xa8e0, 0xa8f5,
+ 0xac00, 0xac06, 0xac40, 0xac47, 0xac60, 0xac62, 0xac80, 0xac82,
+ 0xb800, 0xb808, 0xb80c, 0xb812, 0xb814, 0xb817, 0xb900, 0xb904,
+ 0xb906, 0xb90a, 0xb90c, 0xb90f, 0xb920, 0xb924, 0xb926, 0xb92a,
+ 0xb92c, 0xb92f, 0xb940, 0xb944, 0xb946, 0xb94a, 0xb94c, 0xb94f,
+ 0xb960, 0xb964, 0xb966, 0xb96a, 0xb96c, 0xb96f, 0xb980, 0xb984,
+ 0xb986, 0xb98a, 0xb98c, 0xb98f, 0xb9a0, 0xb9b0, 0xb9b8, 0xb9ba,
+ 0xd200, 0xd23f, 0xe000, 0xe006, 0xe010, 0xe09a, 0xe0a0, 0xe0a4,
+ 0xe0aa, 0xe0eb, 0xe100, 0xe105, 0xe140, 0xe147, 0xe150, 0xe187,
+ 0xe1a0, 0xe1a9, 0xe1b0, 0xe1b6, 0xe1c0, 0xe1c7, 0xe1d0, 0xe1d1,
+ 0xe200, 0xe201, 0xe210, 0xe21c, 0xe240, 0xe268, 0xe280, 0xe280,
+ 0xe282, 0xe2a3, 0xe2a5, 0xe2c2, 0xe380, 0xe38f, 0xe3b0, 0xe3b0,
+ 0xe400, 0xe405, 0xe408, 0xe4e9, 0xe4f0, 0xe4f0, 0xe800, 0xe806,
+ 0xe810, 0xe89a, 0xe8a0, 0xe8a4, 0xe8aa, 0xe8eb, 0xe900, 0xe905,
+ 0xe940, 0xe947, 0xe950, 0xe987, 0xe9a0, 0xe9a9, 0xe9b0, 0xe9b6,
+ 0xe9c0, 0xe9c7, 0xe9d0, 0xe9d1, 0xea00, 0xea01, 0xea10, 0xea1c,
+ 0xea40, 0xea68, 0xea80, 0xea80, 0xea82, 0xeaa3, 0xeaa5, 0xeac2,
+ 0xeb80, 0xeb8f, 0xebb0, 0xebb0, 0xec00, 0xec05, 0xec08, 0xece9,
+ 0xecf0, 0xecf0, 0xf400, 0xf400, 0xf800, 0xf807,
+ ~0
+};
+
+static int a5xx_pm_resume(struct msm_gpu *gpu)
+{
+ int ret;
+
+ /* Turn on the core power */
+ ret = msm_gpu_pm_resume(gpu);
+ if (ret)
+ return ret;
+
+ /* Turn the RBCCU domain first to limit the chances of voltage droop */
+ gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
+
+ /* Wait 3 usecs before polling */
+ udelay(3);
+
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret) {
+ DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
+ gpu->name,
+ gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
+ return ret;
+ }
+
+ /* Turn on the SP domain */
+ gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret)
+ DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
+ gpu->name);
+
+ return ret;
+}
+
+static int a5xx_pm_suspend(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ /* Clear the VBIF pipe before shutting down */
+
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
+ spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
+
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ /*
+ * Reset the VBIF before power collapse to avoid issue with FIFO
+ * entries
+ */
+
+ if (adreno_is_a530(adreno_gpu)) {
+ /* These only need to be done for A530 */
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+ }
+
+ return msm_gpu_pm_suspend(gpu);
+}
+
+static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A5XX_RBBM_PERFCTR_CP_0_HI);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+ gpu->funcs->pm_resume(gpu);
+
+ seq_printf(m, "status: %08x\n",
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ gpu->funcs->pm_suspend(gpu);
+
+ adreno_show(gpu, m);
+}
+#endif
+
+static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ return a5xx_gpu->cur_ring;
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a5xx_hw_init,
+ .pm_suspend = a5xx_pm_suspend,
+ .pm_resume = a5xx_pm_resume,
+ .recover = a5xx_recover,
+ .last_fence = adreno_last_fence,
+ .submitted_fence = adreno_submitted_fence,
+ .submit = a5xx_submit,
+ .flush = a5xx_flush,
+ .active_ring = a5xx_active_ring,
+ .irq = a5xx_irq,
+ .destroy = a5xx_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .show = a5xx_show,
+#endif
+ .snapshot = a5xx_snapshot,
+ },
+ .get_timestamp = a5xx_get_timestamp,
+};
+
+/* Read the limits management leakage from the efuses */
+static void a530_efuse_leakage(struct platform_device *pdev,
+ struct adreno_gpu *adreno_gpu, void *base,
+ size_t size)
+{
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned int row0, row2;
+ unsigned int leakage_pwr_on, coeff;
+
+ if (size < 0x148)
+ return;
+
+ /* Leakage */
+ row0 = readl_relaxed(base + 0x134);
+ row2 = readl_relaxed(base + 0x144);
+
+ /* Read barrier to get the previous two reads */
+ rmb();
+
+ /* Get the leakage coefficient from device tree */
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,base-leakage-coefficent", &coeff))
+ return;
+
+ leakage_pwr_on = ((row2 >> 2) & 0xFF) * (1 << (row0 >> 1) & 0x03);
+ a5xx_gpu->lm_leakage = (leakage_pwr_on << 16) |
+ ((leakage_pwr_on * coeff) / 100);
+}
+
+/* Read the speed bin from the efuses */
+static void a530_efuse_bin(struct platform_device *pdev,
+ struct adreno_gpu *adreno_gpu, void *base,
+ size_t size)
+{
+ uint32_t speed_bin[3];
+ uint32_t val;
+
+ if (of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,gpu-speed-bin", speed_bin, 3))
+ return;
+
+ if (size < speed_bin[0] + 4)
+ return;
+
+ val = readl_relaxed(base + speed_bin[0]);
+
+ adreno_gpu->speed_bin = (val & speed_bin[1]) >> speed_bin[2];
+}
+
+/* Read target specific configuration from the efuses */
+static void a5xx_efuses_read(struct platform_device *pdev,
+ struct adreno_gpu *adreno_gpu)
+{
+ struct adreno_platform_config *config = pdev->dev.platform_data;
+ const struct adreno_info *info = adreno_info(config->rev);
+ struct resource *res;
+ void *base;
+
+ /*
+ * The adreno_gpu->revn mechanism isn't set up yet so we need to check
+ * it directly here
+ */
+ if (info->revn != 530)
+ return;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "qfprom_memory");
+ if (!res)
+ return;
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base)
+ return;
+
+ a530_efuse_bin(pdev, adreno_gpu, base, resource_size(res));
+ a530_efuse_leakage(pdev, adreno_gpu, base, resource_size(res));
+
+ iounmap(base);
+}
+
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct a5xx_gpu *a5xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "No A5XX device is defined\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
+ if (!a5xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a5xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ a5xx_gpu->pdev = pdev;
+ adreno_gpu->registers = a5xx_registers;
+ adreno_gpu->reg_offsets = a5xx_register_offsets;
+
+ a5xx_gpu->lm_leakage = 0x4E001A;
+
+ /* Check the efuses for some configuration */
+ a5xx_efuses_read(pdev, adreno_gpu);
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
+ if (ret) {
+ a5xx_destroy(&(a5xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ /* Set up the preemption specific bits and pieces for each ringbuffer */
+ a5xx_preempt_init(gpu);
+
+ return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
new file mode 100644
index 000000000000..3de14fe42a1b
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -0,0 +1,187 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A5XX_GPU_H__
+#define __A5XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* Bringing over the hack from the previous targets */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a5xx.xml.h"
+
+enum {
+ A5XX_ZAP_SHADER_LOADED = 1,
+};
+
+struct a5xx_gpu {
+ unsigned long flags;
+
+ struct adreno_gpu base;
+ struct platform_device *pdev;
+
+ struct drm_gem_object *pm4_bo;
+ uint64_t pm4_iova;
+
+ struct drm_gem_object *pfp_bo;
+ uint64_t pfp_iova;
+
+ struct drm_gem_object *gpmu_bo;
+ uint64_t gpmu_iova;
+ uint32_t gpmu_dwords;
+
+ uint32_t lm_leakage;
+
+ struct msm_ringbuffer *cur_ring;
+ struct msm_ringbuffer *next_ring;
+
+ struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
+ struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
+ uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
+
+ atomic_t preempt_state;
+ struct timer_list preempt_timer;
+
+ struct a5xx_smmu_info *smmu_info;
+ struct drm_gem_object *smmu_info_bo;
+ uint64_t smmu_info_iova;
+};
+
+#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+
+/*
+ * In order to do lockless preemption we use a simple state machine to progress
+ * through the process.
+ *
+ * PREEMPT_NONE - no preemption in progress. Next state START.
+ * PREEMPT_START - The trigger is evaulating if preemption is possible. Next
+ * states: TRIGGERED, NONE
+ * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
+ * states: FAULTED, PENDING
+ * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
+ * recovery. Next state: N/A
+ * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
+ * checking the success of the operation. Next state: FAULTED, NONE.
+ */
+
+enum preempt_state {
+ PREEMPT_NONE = 0,
+ PREEMPT_START,
+ PREEMPT_TRIGGERED,
+ PREEMPT_FAULTED,
+ PREEMPT_PENDING,
+};
+
+/*
+ * struct a5xx_preempt_record is a shared buffer between the microcode and the
+ * CPU to store the state for preemption. The record itself is much larger
+ * (64k) but most of that is used by the CP for storage.
+ *
+ * There is a preemption record assigned per ringbuffer. When the CPU triggers a
+ * preemption, it fills out the record with the useful information (wptr, ring
+ * base, etc) and the microcode uses that information to set up the CP following
+ * the preemption. When a ring is switched out, the CP will save the ringbuffer
+ * state back to the record. In this way, once the records are properly set up
+ * the CPU can quickly switch back and forth between ringbuffers by only
+ * updating a few registers (often only the wptr).
+ *
+ * These are the CPU aware registers in the record:
+ * @magic: Must always be 0x27C4BAFC
+ * @info: Type of the record - written 0 by the CPU, updated by the CP
+ * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
+ * the CP
+ * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
+ * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
+ * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
+ * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
+ * @rbase: Value of RB_BASE written by CPU, save/restored by CP
+ * @counter: GPU address of the storage area for the performance counters
+ */
+struct a5xx_preempt_record {
+ uint32_t magic;
+ uint32_t info;
+ uint32_t data;
+ uint32_t cntl;
+ uint32_t rptr;
+ uint32_t wptr;
+ uint64_t rptr_addr;
+ uint64_t rbase;
+ uint64_t counter;
+};
+
+/* Magic identifier for the preemption record */
+#define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
+
+/*
+ * Even though the structure above is only a few bytes, we need a full 64k to
+ * store the entire preemption record from the CP
+ */
+#define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
+
+/*
+ * The preemption counter block is a storage area for the value of the
+ * preemption counters that are saved immediately before context switch. We
+ * append it on to the end of the allocadtion for the preemption record.
+ */
+#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
+
+/*
+ * This is a global structure that the preemption code uses to switch in the
+ * pagetable for the preempted process - the code switches in whatever we
+ * after preempting in a new ring.
+ */
+struct a5xx_smmu_info {
+ uint32_t magic;
+ uint32_t _pad4;
+ uint64_t ttbr0;
+ uint32_t asid;
+ uint32_t contextidr;
+};
+
+#define A5XX_SMMU_INFO_MAGIC 0x3618CDA3UL
+
+int a5xx_power_init(struct msm_gpu *gpu);
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
+
+static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
+ uint32_t reg, uint32_t mask, uint32_t value)
+{
+ while (usecs--) {
+ udelay(1);
+ if ((gpu_read(gpu, reg) & mask) == value)
+ return 0;
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+
+void a5xx_preempt_init(struct msm_gpu *gpu);
+void a5xx_preempt_hw_init(struct msm_gpu *gpu);
+void a5xx_preempt_trigger(struct msm_gpu *gpu);
+void a5xx_preempt_irq(struct msm_gpu *gpu);
+void a5xx_preempt_fini(struct msm_gpu *gpu);
+
+int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+
+/* Return true if we are in a preempt state */
+static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
+{
+ return !(atomic_read(&a5xx_gpu->preempt_state) == PREEMPT_NONE);
+}
+
+#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
new file mode 100644
index 000000000000..e04feaadefb9
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -0,0 +1,509 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pm_opp.h>
+#include "a5xx_gpu.h"
+
+/*
+ * The GPMU data block is a block of shared registers that can be used to
+ * communicate back and forth. These "registers" are by convention with the GPMU
+ * firwmare and not bound to any specific hardware design
+ */
+
+#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
+#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
+#define AGC_MSG_BASE (AGC_INIT_BASE + 7)
+
+#define AGC_MSG_STATE (AGC_MSG_BASE + 0)
+#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
+#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
+#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
+
+#define AGC_POWER_CONFIG_PRODUCTION_ID 1
+#define AGC_INIT_MSG_VALUE 0xBABEFACE
+
+/* AGC_LM_CONFIG (A540+) */
+#define AGC_LM_CONFIG (136/4)
+#define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17
+#define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1
+#define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8)
+#define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4)
+#define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4)
+#define AGC_LM_CONFIG_LLM_ENABLED (1 << 16)
+#define AGC_LM_CONFIG_BCL_DISABLED (1 << 24)
+
+#define AGC_LEVEL_CONFIG (140/4)
+
+static struct {
+ uint32_t reg;
+ uint32_t value;
+} a5xx_sequence_regs[] = {
+ { 0xB9A1, 0x00010303 },
+ { 0xB9A2, 0x13000000 },
+ { 0xB9A3, 0x00460020 },
+ { 0xB9A4, 0x10000000 },
+ { 0xB9A5, 0x040A1707 },
+ { 0xB9A6, 0x00010000 },
+ { 0xB9A7, 0x0E000904 },
+ { 0xB9A8, 0x10000000 },
+ { 0xB9A9, 0x01165000 },
+ { 0xB9AA, 0x000E0002 },
+ { 0xB9AB, 0x03884141 },
+ { 0xB9AC, 0x10000840 },
+ { 0xB9AD, 0x572A5000 },
+ { 0xB9AE, 0x00000003 },
+ { 0xB9AF, 0x00000000 },
+ { 0xB9B0, 0x10000000 },
+ { 0xB828, 0x6C204010 },
+ { 0xB829, 0x6C204011 },
+ { 0xB82A, 0x6C204012 },
+ { 0xB82B, 0x6C204013 },
+ { 0xB82C, 0x6C204014 },
+ { 0xB90F, 0x00000004 },
+ { 0xB910, 0x00000002 },
+ { 0xB911, 0x00000002 },
+ { 0xB912, 0x00000002 },
+ { 0xB913, 0x00000002 },
+ { 0xB92F, 0x00000004 },
+ { 0xB930, 0x00000005 },
+ { 0xB931, 0x00000005 },
+ { 0xB932, 0x00000005 },
+ { 0xB933, 0x00000005 },
+ { 0xB96F, 0x00000001 },
+ { 0xB970, 0x00000003 },
+ { 0xB94F, 0x00000004 },
+ { 0xB950, 0x0000000B },
+ { 0xB951, 0x0000000B },
+ { 0xB952, 0x0000000B },
+ { 0xB953, 0x0000000B },
+ { 0xB907, 0x00000019 },
+ { 0xB927, 0x00000019 },
+ { 0xB947, 0x00000019 },
+ { 0xB967, 0x00000019 },
+ { 0xB987, 0x00000019 },
+ { 0xB906, 0x00220001 },
+ { 0xB926, 0x00220001 },
+ { 0xB946, 0x00220001 },
+ { 0xB966, 0x00220001 },
+ { 0xB986, 0x00300000 },
+ { 0xAC40, 0x0340FF41 },
+ { 0xAC41, 0x03BEFED0 },
+ { 0xAC42, 0x00331FED },
+ { 0xAC43, 0x021FFDD3 },
+ { 0xAC44, 0x5555AAAA },
+ { 0xAC45, 0x5555AAAA },
+ { 0xB9BA, 0x00000008 },
+};
+
+/*
+ * Get the actual voltage value for the operating point at the specified
+ * frequency
+ */
+static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct dev_pm_opp *opp;
+
+ opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
+
+ return (!IS_ERR(opp)) ? dev_pm_opp_get_voltage(opp) / 1000 : 0;
+}
+
+#define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32))
+#define LM_DCVS_LIMIT 1
+#define LEVEL_CONFIG ~(0x303)
+
+/* Setup thermal limit management for A540 */
+static void a540_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ u32 max_power = 0;
+ u32 rate = gpu->gpufreq[gpu->active_level];
+ u32 config;
+
+ /* The battery current limiter isn't enabled for A540 */
+ config = AGC_LM_CONFIG_BCL_DISABLED;
+ config |= adreno_gpu->rev.patchid << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
+
+ /* For now disable GPMU side throttling */
+ config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
+
+ /* Get the max-power from the device tree */
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-max-power", &max_power);
+
+ gpu_write(gpu, AGC_MSG_STATE, 0x80000001);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ /*
+ * For now just write the one voltage level - we will do more when we
+ * can do scaling
+ */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), max_power);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG);
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE,
+ PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1));
+
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Setup thermal limit management for A530 */
+static void a530_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ uint32_t rate = gpu->gpufreq[gpu->active_level];
+ uint32_t tsens = 0;
+ uint32_t max_power = 0;
+ unsigned int i;
+
+ /* Write the block of sequence registers */
+ for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
+ gpu_write(gpu, a5xx_sequence_regs[i].reg,
+ a5xx_sequence_regs[i].value);
+
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,gpmu-tsens", &tsens);
+
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, tsens);
+ gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
+
+ /* Write the voltage table */
+
+ /* Get the max-power from the device tree */
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-max-power", &max_power);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
+
+ gpu_write(gpu, AGC_MSG_STATE, 1);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ /*
+ * For now just write the one voltage level - we will do more when we
+ * can do scaling
+ */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), max_power);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Enable SP/TP cpower collapse */
+static void a5xx_pc_init(struct msm_gpu *gpu)
+{
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
+}
+
+/* Enable the GPMU microcontroller */
+static int a5xx_gpmu_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (!a5xx_gpu->gpmu_dwords)
+ return 0;
+
+ /* Turn off protected mode for this operation */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Kick off the IB to load the GPMU microcode */
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, a5xx_gpu->gpmu_dwords);
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ gpu->funcs->flush(gpu, ring);
+
+ /* This is "fatal" because the CP is left in a bad state */
+ if (!a5xx_idle(gpu, ring)) {
+ DRM_ERROR("%s: Unable to load GPMU firmwaren",
+ gpu->name);
+ return -EINVAL;
+ }
+
+ /* Clock gating setup for A530 targets */
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
+
+ /* Kick off the GPMU */
+ gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
+
+ /*
+ * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
+ * won't have advanced power collapse.
+ */
+ if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
+ 0xBABEFACE)) {
+ DRM_ERROR("%s: GPMU firmware initialization timed out\n",
+ gpu->name);
+ return 0;
+ }
+
+ if (!adreno_is_a530(adreno_gpu)) {
+ u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1);
+
+ if (val)
+ DRM_ERROR("%s: GPMU firmare initialization failed: %d\n",
+ gpu->name, val);
+ }
+
+ /* FIXME: Clear GPMU interrupts? */
+ return 0;
+}
+
+/* Enable limits management */
+static void a5xx_lm_enable(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ /* This init sequence only applies to A530 */
+ if (!adreno_is_a530(adreno_gpu))
+ return;
+
+ gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
+ gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
+ gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
+
+ gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
+}
+
+int a5xx_power_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+ u32 lm_limit = 6000;
+
+ /*
+ * Set up the limit management
+ * first, do some generic setup:
+ */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
+
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-limit", &lm_limit);
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | lm_limit);
+
+ /* Now do the target specific setup */
+ if (adreno_is_a530(adreno_gpu))
+ a530_lm_setup(gpu);
+ else
+ a540_lm_setup(gpu);
+
+ /* Set up SP/TP power collpase */
+ a5xx_pc_init(gpu);
+
+ /* Start the GPMU */
+ ret = a5xx_gpmu_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Start the limits management */
+ a5xx_lm_enable(gpu);
+
+ return 0;
+}
+
+static int _read_header(unsigned int *data, uint32_t fwsize,
+ unsigned int *major, unsigned int *minor)
+{
+ uint32_t size;
+ unsigned int i;
+
+ /* First dword of the header is the header size */
+ if (fwsize < 4)
+ return -EINVAL;
+
+ size = data[0];
+
+ /* Make sure the header isn't too big and is a multiple of two */
+ if ((size % 2) || (size > 10) || size > (fwsize >> 2))
+ return -EINVAL;
+
+ /* Read the values in pairs */
+ for (i = 1; i < size; i += 2) {
+ switch (data[i]) {
+ case 1:
+ *major = data[i + 1];
+ break;
+ case 2:
+ *minor = data[i + 1];
+ break;
+ default:
+ /* Invalid values are non fatal */
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Make sure cur_major and cur_minor are greater than or equal to the minimum
+ * allowable major/minor
+ */
+static inline bool _check_gpmu_version(uint32_t cur_major, uint32_t cur_minor,
+ uint32_t min_major, uint32_t min_minor)
+{
+ return ((cur_major > min_major) ||
+ ((cur_major == min_major) && (cur_minor >= min_minor)));
+}
+
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *drm = gpu->dev;
+ const char *name;
+ const struct firmware *fw;
+ uint32_t version[2] = { 0, 0 };
+ uint32_t dwords = 0, offset = 0;
+ uint32_t major = 0, minor = 0, bosize;
+ unsigned int *data, *ptr, *cmds;
+ unsigned int cmds_size;
+
+ if (a5xx_gpu->gpmu_bo)
+ return;
+
+ /*
+ * Read the firmware name from the device tree - if it doesn't exist
+ * then don't initialize the GPMU for this target
+ */
+ if (of_property_read_string(GPU_OF_NODE(gpu), "qcom,gpmu-firmware",
+ &name))
+ return;
+
+ /*
+ * The version isn't mandatory, but if it exists, we need to enforce
+ * that the version of the GPMU firmware matches or is newer than the
+ * value
+ */
+ of_property_read_u32_array(GPU_OF_NODE(gpu), "qcom,gpmu-version",
+ version, 2);
+
+ /* Get the firmware */
+ if (request_firmware(&fw, name, drm->dev)) {
+ DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
+ gpu->name);
+ return;
+ }
+
+ data = (unsigned int *) fw->data;
+
+ /*
+ * The first dword is the size of the remaining data in dwords. Use it
+ * as a checksum of sorts and make sure it matches the actual size of
+ * the firmware that we read
+ */
+
+ if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2)))
+ goto out;
+
+ /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
+ if (data[1] != 2)
+ goto out;
+
+ /* Read the header and get the major/minor of the read firmware */
+ if (_read_header(&data[2], fw->size - 8, &major, &minor))
+ goto out;
+
+ if (!_check_gpmu_version(major, minor, version[0], version[1])) {
+ DRM_ERROR("%s: Loaded GPMU version %d.%d is too old\n",
+ gpu->name, major, minor);
+ goto out;
+ }
+
+ cmds = data + data[2] + 3;
+ cmds_size = data[0] - data[2] - 2;
+
+ /*
+ * A single type4 opcode can only have so many values attached so
+ * add enough opcodes to load the all the commands
+ */
+ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
+
+ mutex_lock(&drm->struct_mutex);
+ a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(a5xx_gpu->gpmu_bo))
+ goto err;
+
+ if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
+ &a5xx_gpu->gpmu_iova))
+ goto err;
+
+ ptr = msm_gem_vaddr(a5xx_gpu->gpmu_bo);
+ if (!ptr)
+ goto err;
+
+ while (cmds_size > 0) {
+ int i;
+ uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
+ TYPE4_MAX_PAYLOAD : cmds_size;
+
+ ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
+ _size);
+
+ for (i = 0; i < _size; i++)
+ ptr[dwords++] = *cmds++;
+
+ offset += _size;
+ cmds_size -= _size;
+ }
+
+ a5xx_gpu->gpmu_dwords = dwords;
+
+ goto out;
+
+err:
+ if (a5xx_gpu->gpmu_iova)
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ if (a5xx_gpu->gpmu_bo)
+ drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+
+ a5xx_gpu->gpmu_bo = NULL;
+ a5xx_gpu->gpmu_iova = 0;
+ a5xx_gpu->gpmu_dwords = 0;
+
+out:
+ /* No need to keep that firmware laying around anymore */
+ release_firmware(fw);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
new file mode 100644
index 000000000000..648494c75abc
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -0,0 +1,383 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "msm_iommu.h"
+#include "a5xx_gpu.h"
+
+static void *alloc_kernel_bo(struct drm_device *drm, struct msm_gpu *gpu,
+ size_t size, uint32_t flags, struct drm_gem_object **bo,
+ u64 *iova)
+{
+ struct drm_gem_object *_bo;
+ u64 _iova;
+ void *ptr;
+ int ret;
+
+ mutex_lock(&drm->struct_mutex);
+ _bo = msm_gem_new(drm, size, flags);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(_bo))
+ return _bo;
+
+ ret = msm_gem_get_iova(_bo, gpu->aspace, &_iova);
+ if (ret)
+ goto out;
+
+ ptr = msm_gem_vaddr(_bo);
+ if (!ptr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (bo)
+ *bo = _bo;
+ if (iova)
+ *iova = _iova;
+
+ return ptr;
+out:
+ drm_gem_object_unreference_unlocked(_bo);
+ return ERR_PTR(ret);
+}
+
+/*
+ * Try to transition the preemption state from old to new. Return
+ * true on success or false if the original state wasn't 'old'
+ */
+static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu,
+ enum preempt_state old, enum preempt_state new)
+{
+ enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
+ old, new);
+
+ return (cur == old);
+}
+
+/*
+ * Force the preemption state to the specified state. This is used in cases
+ * where the current state is known and won't change
+ */
+static inline void set_preempt_state(struct a5xx_gpu *gpu,
+ enum preempt_state new)
+{
+ /*
+ * preempt_state may be read by other cores trying to trigger a
+ * preemption or in the interrupt handler so barriers are needed
+ * before...
+ */
+ smp_mb__before_atomic();
+ atomic_set(&gpu->preempt_state, new);
+ /* ... and after*/
+ smp_mb__after_atomic();
+}
+
+/* Write the most recent wptr for the given ring into the hardware */
+static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ unsigned long flags;
+ uint32_t wptr;
+
+ if (!ring)
+ return;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+/* Return the highest priority ringbuffer with something in it */
+static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ unsigned long flags;
+ int i;
+
+ for (i = gpu->nr_rings - 1; i >= 0; i--) {
+ bool empty;
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ spin_lock_irqsave(&ring->lock, flags);
+ empty = (get_wptr(ring) == adreno_gpu->memptrs->rptr[ring->id]);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ if (!empty)
+ return ring;
+ }
+
+ return NULL;
+}
+
+static void a5xx_preempt_timer(unsigned long data)
+{
+ struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data;
+ struct msm_gpu *gpu = &a5xx_gpu->base.base;
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
+ return;
+
+ dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
+/* Try to trigger a preemption switch */
+void a5xx_preempt_trigger(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned long flags;
+ struct msm_ringbuffer *ring;
+
+ if (gpu->nr_rings == 1)
+ return;
+
+ /*
+ * Try to start preemption by moving from NONE to START. If
+ * unsuccessful, a preemption is already in flight
+ */
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
+ return;
+
+ /* Get the next ring to preempt to */
+ ring = get_next_ring(gpu);
+
+ /*
+ * If no ring is populated or the highest priority ring is the current
+ * one do nothing except to update the wptr to the latest and greatest
+ */
+ if (!ring || (a5xx_gpu->cur_ring == ring)) {
+ update_wptr(gpu, ring);
+
+ /* Set the state back to NONE */
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+ return;
+ }
+
+ /* Make sure the wptr doesn't update while we're in motion */
+ spin_lock_irqsave(&ring->lock, flags);
+ a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Do read barrier to make sure we have updated pagetable info */
+ rmb();
+
+ /* Set the SMMU info for the preemption */
+ if (a5xx_gpu->smmu_info) {
+ a5xx_gpu->smmu_info->ttbr0 =
+ adreno_gpu->memptrs->ttbr0[ring->id];
+ a5xx_gpu->smmu_info->contextidr =
+ adreno_gpu->memptrs->contextidr[ring->id];
+ }
+
+ /* Set the address of the incoming preemption record */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
+ REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
+ a5xx_gpu->preempt_iova[ring->id]);
+
+ a5xx_gpu->next_ring = ring;
+
+ /* Start a timer to catch a stuck preemption */
+ mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
+
+ /* Set the preemption state to triggered */
+ set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED);
+
+ /* Make sure everything is written before hitting the button */
+ wmb();
+
+ /* And actually start the preemption */
+ gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
+}
+
+void a5xx_preempt_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
+ return;
+
+ /* Delete the preemption watchdog timer */
+ del_timer(&a5xx_gpu->preempt_timer);
+
+ /*
+ * The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
+ * firing the interrupt, but there is a non zero chance of a hardware
+ * condition or a software race that could set it again before we have a
+ * chance to finish. If that happens, log and go for recovery
+ */
+ status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
+ if (unlikely(status)) {
+ set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
+ dev_err(dev->dev, "%s: Preemption failed to complete\n",
+ gpu->name);
+ queue_work(priv->wq, &gpu->recover_work);
+ return;
+ }
+
+ a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
+ a5xx_gpu->next_ring = NULL;
+
+ update_wptr(gpu, a5xx_gpu->cur_ring);
+
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+}
+
+void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring;
+ int i;
+
+ if (gpu->nr_rings > 1) {
+ /* Clear the preemption records */
+ FOR_EACH_RING(gpu, ring, i) {
+ if (ring) {
+ a5xx_gpu->preempt[ring->id]->wptr = 0;
+ a5xx_gpu->preempt[ring->id]->rptr = 0;
+ a5xx_gpu->preempt[ring->id]->rbase = ring->iova;
+ }
+ }
+ }
+
+ /* Tell the CP where to find the smmu_info buffer */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
+ REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
+ a5xx_gpu->smmu_info_iova);
+
+ /* Reset the preemption state */
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+
+ /* Always come up on rb 0 */
+ a5xx_gpu->cur_ring = gpu->rb[0];
+}
+
+static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
+ struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct a5xx_preempt_record *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+
+ ptr = alloc_kernel_bo(gpu->dev, gpu,
+ A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
+ MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
+ &bo, &iova);
+
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ a5xx_gpu->preempt_bo[ring->id] = bo;
+ a5xx_gpu->preempt_iova[ring->id] = iova;
+ a5xx_gpu->preempt[ring->id] = ptr;
+
+ /* Set up the defaults on the preemption record */
+
+ ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
+ ptr->info = 0;
+ ptr->data = 0;
+ ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
+ ptr->rptr_addr = rbmemptr(adreno_gpu, ring->id, rptr);
+ ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
+
+ return 0;
+}
+
+void a5xx_preempt_fini(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring;
+ int i;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring || !a5xx_gpu->preempt_bo[i])
+ continue;
+
+ if (a5xx_gpu->preempt_iova[i])
+ msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
+
+ drm_gem_object_unreference_unlocked(a5xx_gpu->preempt_bo[i]);
+
+ a5xx_gpu->preempt_bo[i] = NULL;
+ }
+
+ if (a5xx_gpu->smmu_info_bo) {
+ if (a5xx_gpu->smmu_info_iova)
+ msm_gem_put_iova(a5xx_gpu->smmu_info_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->smmu_info_bo);
+ a5xx_gpu->smmu_info_bo = NULL;
+ }
+}
+
+void a5xx_preempt_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring;
+ struct a5xx_smmu_info *ptr;
+ struct drm_gem_object *bo;
+ uint64_t iova;
+ int i;
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings <= 1)
+ return;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
+
+ if (preempt_init_ring(a5xx_gpu, ring))
+ goto fail;
+ }
+
+ if (msm_iommu_allow_dynamic(gpu->aspace->mmu)) {
+ ptr = alloc_kernel_bo(gpu->dev, gpu,
+ sizeof(struct a5xx_smmu_info),
+ MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
+ &bo, &iova);
+
+ if (IS_ERR(ptr))
+ goto fail;
+
+ ptr->magic = A5XX_SMMU_INFO_MAGIC;
+
+ a5xx_gpu->smmu_info_bo = bo;
+ a5xx_gpu->smmu_info_iova = iova;
+ a5xx_gpu->smmu_info = ptr;
+ }
+
+ setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer,
+ (unsigned long) a5xx_gpu);
+
+ return;
+fail:
+ /*
+ * On any failure our adventure is over. Clean up and
+ * set nr_rings to 1 to force preemption off
+ */
+ a5xx_preempt_fini(gpu);
+ gpu->nr_rings = 1;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
new file mode 100644
index 000000000000..5a2edb0ea518
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
@@ -0,0 +1,796 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+#include "a5xx_gpu.h"
+#include "msm_snapshot_api.h"
+
+#define A5XX_NR_SHADER_BANKS 4
+
+/*
+ * These are a list of the registers that need to be read through the HLSQ
+ * aperture through the crashdumper. These are not nominally accessible from
+ * the CPU on a secure platform.
+ */
+static const struct {
+ u32 type;
+ u32 regoffset;
+ u32 count;
+} a5xx_hlsq_aperture_regs[] = {
+ { 0x35, 0xE00, 0x32 }, /* HSLQ non-context */
+ { 0x31, 0x2080, 0x1 }, /* HLSQ 2D context 0 */
+ { 0x33, 0x2480, 0x1 }, /* HLSQ 2D context 1 */
+ { 0x32, 0xE780, 0x62 }, /* HLSQ 3D context 0 */
+ { 0x34, 0xEF80, 0x62 }, /* HLSQ 3D context 1 */
+ { 0x3f, 0x0EC0, 0x40 }, /* SP non-context */
+ { 0x3d, 0x2040, 0x1 }, /* SP 2D context 0 */
+ { 0x3b, 0x2440, 0x1 }, /* SP 2D context 1 */
+ { 0x3e, 0xE580, 0x180 }, /* SP 3D context 0 */
+ { 0x3c, 0xED80, 0x180 }, /* SP 3D context 1 */
+ { 0x3a, 0x0F00, 0x1c }, /* TP non-context */
+ { 0x38, 0x2000, 0xa }, /* TP 2D context 0 */
+ { 0x36, 0x2400, 0xa }, /* TP 2D context 1 */
+ { 0x39, 0xE700, 0x80 }, /* TP 3D context 0 */
+ { 0x37, 0xEF00, 0x80 }, /* TP 3D context 1 */
+};
+
+/*
+ * The debugbus registers contain device state that presumably makes
+ * sense to the hardware designers. 'count' is the number of indexes to read,
+ * each index value is 64 bits
+ */
+static const struct {
+ enum a5xx_debugbus id;
+ u32 count;
+} a5xx_debugbus_blocks[] = {
+ { A5XX_RBBM_DBGBUS_CP, 0x100, },
+ { A5XX_RBBM_DBGBUS_RBBM, 0x100, },
+ { A5XX_RBBM_DBGBUS_HLSQ, 0x100, },
+ { A5XX_RBBM_DBGBUS_UCHE, 0x100, },
+ { A5XX_RBBM_DBGBUS_DPM, 0x100, },
+ { A5XX_RBBM_DBGBUS_TESS, 0x100, },
+ { A5XX_RBBM_DBGBUS_PC, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFDP, 0x100, },
+ { A5XX_RBBM_DBGBUS_VPC, 0x100, },
+ { A5XX_RBBM_DBGBUS_TSE, 0x100, },
+ { A5XX_RBBM_DBGBUS_RAS, 0x100, },
+ { A5XX_RBBM_DBGBUS_VSC, 0x100, },
+ { A5XX_RBBM_DBGBUS_COM, 0x100, },
+ { A5XX_RBBM_DBGBUS_DCOM, 0x100, },
+ { A5XX_RBBM_DBGBUS_LRZ, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_DSP, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCUFCHE, 0x100, },
+ { A5XX_RBBM_DBGBUS_GPMU, 0x100, },
+ { A5XX_RBBM_DBGBUS_RBP, 0x100, },
+ { A5XX_RBBM_DBGBUS_HM, 0x100, },
+ { A5XX_RBBM_DBGBUS_RBBM_CFG, 0x100, },
+ { A5XX_RBBM_DBGBUS_VBIF_CX, 0x100, },
+ { A5XX_RBBM_DBGBUS_GPC, 0x100, },
+ { A5XX_RBBM_DBGBUS_LARC, 0x100, },
+ { A5XX_RBBM_DBGBUS_HLSQ_SPTP, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_3, 0x100, },
+};
+
+/*
+ * The shader blocks are read from the HLSQ aperture - each one has its own
+ * identifier for the aperture read
+ */
+static const struct {
+ enum a5xx_shader_blocks id;
+ u32 size;
+} a5xx_shader_blocks[] = {
+ {A5XX_TP_W_MEMOBJ, 0x200},
+ {A5XX_TP_W_MIPMAP_BASE, 0x3C0},
+ {A5XX_TP_W_SAMPLER_TAG, 0x40},
+ {A5XX_TP_S_3D_SAMPLER, 0x80},
+ {A5XX_TP_S_3D_SAMPLER_TAG, 0x20},
+ {A5XX_TP_S_CS_SAMPLER, 0x40},
+ {A5XX_TP_S_CS_SAMPLER_TAG, 0x10},
+ {A5XX_SP_W_CONST, 0x800},
+ {A5XX_SP_W_CB_SIZE, 0x30},
+ {A5XX_SP_W_CB_BASE, 0xF0},
+ {A5XX_SP_W_STATE, 0x1},
+ {A5XX_SP_S_3D_CONST, 0x800},
+ {A5XX_SP_S_3D_CB_SIZE, 0x28},
+ {A5XX_SP_S_3D_UAV_SIZE, 0x80},
+ {A5XX_SP_S_CS_CONST, 0x400},
+ {A5XX_SP_S_CS_CB_SIZE, 0x8},
+ {A5XX_SP_S_CS_UAV_SIZE, 0x80},
+ {A5XX_SP_S_3D_CONST_DIRTY, 0x12},
+ {A5XX_SP_S_3D_CB_SIZE_DIRTY, 0x1},
+ {A5XX_SP_S_3D_UAV_SIZE_DIRTY, 0x2},
+ {A5XX_SP_S_CS_CONST_DIRTY, 0xA},
+ {A5XX_SP_S_CS_CB_SIZE_DIRTY, 0x1},
+ {A5XX_SP_S_CS_UAV_SIZE_DIRTY, 0x2},
+ {A5XX_HLSQ_ICB_DIRTY, 0xB},
+ {A5XX_SP_POWER_RESTORE_RAM_TAG, 0xA},
+ {A5XX_TP_POWER_RESTORE_RAM_TAG, 0xA},
+ {A5XX_TP_W_SAMPLER, 0x80},
+ {A5XX_TP_W_MEMOBJ_TAG, 0x40},
+ {A5XX_TP_S_3D_MEMOBJ, 0x200},
+ {A5XX_TP_S_3D_MEMOBJ_TAG, 0x20},
+ {A5XX_TP_S_CS_MEMOBJ, 0x100},
+ {A5XX_TP_S_CS_MEMOBJ_TAG, 0x10},
+ {A5XX_SP_W_INSTR, 0x800},
+ {A5XX_SP_W_UAV_SIZE, 0x80},
+ {A5XX_SP_W_UAV_BASE, 0x80},
+ {A5XX_SP_W_INST_TAG, 0x40},
+ {A5XX_SP_S_3D_INSTR, 0x800},
+ {A5XX_SP_S_3D_CB_BASE, 0xC8},
+ {A5XX_SP_S_3D_UAV_BASE, 0x80},
+ {A5XX_SP_S_CS_INSTR, 0x400},
+ {A5XX_SP_S_CS_CB_BASE, 0x28},
+ {A5XX_SP_S_CS_UAV_BASE, 0x80},
+ {A5XX_SP_S_3D_INSTR_DIRTY, 0x1},
+ {A5XX_SP_S_3D_CB_BASE_DIRTY, 0x5},
+ {A5XX_SP_S_3D_UAV_BASE_DIRTY, 0x2},
+ {A5XX_SP_S_CS_INSTR_DIRTY, 0x1},
+ {A5XX_SP_S_CS_CB_BASE_DIRTY, 0x1},
+ {A5XX_SP_S_CS_UAV_BASE_DIRTY, 0x2},
+ {A5XX_HLSQ_ICB, 0x200},
+ {A5XX_HLSQ_ICB_CB_BASE_DIRTY, 0x4},
+ {A5XX_SP_POWER_RESTORE_RAM, 0x140},
+ {A5XX_TP_POWER_RESTORE_RAM, 0x40},
+};
+
+/*
+ * The A5XX architecture has a a built in engine to asynchronously dump
+ * registers from the GPU. It is used to accelerate the copy of hundreds
+ * (thousands) of registers and as a safe way to access registers that might
+ * have secure data in them (if the GPU is in secure, the crashdumper returns
+ * bogus values for those registers). On a fully secured device the CPU will be
+ * blocked from accessing those registers directly and so the crashdump is the
+ * only way that we can access context registers and the shader banks for debug
+ * purposes.
+ *
+ * The downside of the crashdump is that it requires access to GPU accessible
+ * memory (so the VBIF and the bus and the SMMU need to be up and working) and
+ * you need enough memory to write the script for the crashdumper and to store
+ * the data that you are dumping so there is a balancing act between the work to
+ * set up a crash dumper and the value we get out of it.
+ */
+
+/*
+ * The crashdump uses a pseudo-script format to read and write registers. Each
+ * operation is two 64 bit values.
+ *
+ * READ:
+ * [qword 0] [64:00] - The absolute IOVA address target for the register value
+ * [qword 1] [63:44] - the dword address of the register offset to read
+ * [15:00] - Number of dwords to read at once
+ *
+ * WRITE:
+ * [qword 0] [31:0] 32 bit value to write to the register
+ * [qword 1] [63:44] - the dword address of the register offset to write
+ * [21:21] - set 1 to write
+ * [15:00] - Number of dwords to write (usually 1)
+ *
+ * At the bottom of the script, write quadword zeros to trigger the end.
+ */
+struct crashdump {
+ struct drm_gem_object *bo;
+ void *ptr;
+ u64 iova;
+ u32 index;
+};
+
+#define CRASHDUMP_BO_SIZE (SZ_1M)
+#define CRASHDUMP_SCRIPT_SIZE (256 * SZ_1K)
+#define CRASHDUMP_DATA_SIZE (CRASHDUMP_BO_SIZE - CRASHDUMP_SCRIPT_SIZE)
+
+static int crashdump_init(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+ struct drm_device *drm = gpu->dev;
+ int ret = -ENOMEM;
+
+ crashdump->bo = msm_gem_new(drm, CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED);
+ if (IS_ERR(crashdump->bo)) {
+ ret = PTR_ERR(crashdump->bo);
+ crashdump->bo = NULL;
+ return ret;
+ }
+
+ crashdump->ptr = msm_gem_vaddr_locked(crashdump->bo);
+ if (!crashdump->ptr)
+ goto out;
+
+ ret = msm_gem_get_iova_locked(crashdump->bo, gpu->aspace,
+ &crashdump->iova);
+
+out:
+ if (ret) {
+ drm_gem_object_unreference(crashdump->bo);
+ crashdump->bo = NULL;
+ }
+
+ return ret;
+}
+
+static int crashdump_run(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+ if (!crashdump->ptr || !crashdump->index)
+ return -EINVAL;
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
+ lower_32_bits(crashdump->iova));
+ gpu_write(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_HI,
+ upper_32_bits(crashdump->iova));
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+
+ return spin_until(gpu_read(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL) & 0x04);
+}
+
+static void crashdump_destroy(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+ if (!crashdump->bo)
+ return;
+
+ if (crashdump->iova)
+ msm_gem_put_iova(crashdump->bo, gpu->aspace);
+
+ drm_gem_object_unreference(crashdump->bo);
+
+ memset(crashdump, 0, sizeof(*crashdump));
+}
+
+static inline void CRASHDUMP_SCRIPT_WRITE(struct crashdump *crashdump,
+ u32 reg, u32 val)
+{
+ u64 *ptr = crashdump->ptr + crashdump->index;
+
+ if (WARN_ON(crashdump->index + (2 * sizeof(u64))
+ >= CRASHDUMP_SCRIPT_SIZE))
+ return;
+
+ /* This is the value to write */
+ ptr[0] = (u64) val;
+
+ /*
+ * This triggers a write to the specified register. 1 is the size of
+ * the write in dwords
+ */
+ ptr[1] = (((u64) reg) << 44) | (1 << 21) | 1;
+
+ crashdump->index += 2 * sizeof(u64);
+}
+
+static inline void CRASHDUMP_SCRIPT_READ(struct crashdump *crashdump,
+ u32 reg, u32 count, u32 offset)
+{
+ u64 *ptr = crashdump->ptr + crashdump->index;
+
+ if (WARN_ON(crashdump->index + (2 * sizeof(u64))
+ >= CRASHDUMP_SCRIPT_SIZE))
+ return;
+
+ if (WARN_ON(offset + (count * sizeof(u32)) >= CRASHDUMP_DATA_SIZE))
+ return;
+
+ ptr[0] = (u64) crashdump->iova + CRASHDUMP_SCRIPT_SIZE + offset;
+ ptr[1] = (((u64) reg) << 44) | count;
+
+ crashdump->index += 2 * sizeof(u64);
+}
+
+static inline void *CRASHDUMP_DATA_PTR(struct crashdump *crashdump, u32 offset)
+{
+ if (WARN_ON(!crashdump->ptr || offset >= CRASHDUMP_DATA_SIZE))
+ return NULL;
+
+ return crashdump->ptr + CRASHDUMP_SCRIPT_SIZE + offset;
+}
+
+static inline u32 CRASHDUMP_DATA_READ(struct crashdump *crashdump, u32 offset)
+{
+ return *((u32 *) CRASHDUMP_DATA_PTR(crashdump, offset));
+}
+
+static inline void CRASHDUMP_RESET(struct crashdump *crashdump)
+{
+ crashdump->index = 0;
+}
+
+static inline void CRASHDUMP_END(struct crashdump *crashdump)
+{
+ u64 *ptr = crashdump->ptr + crashdump->index;
+
+ if (WARN_ON((crashdump->index + (2 * sizeof(u64)))
+ >= CRASHDUMP_SCRIPT_SIZE))
+ return;
+
+ ptr[0] = 0;
+ ptr[1] = 0;
+
+ crashdump->index += 2 * sizeof(u64);
+}
+
+static u32 _crashdump_read_hlsq_aperture(struct crashdump *crashdump,
+ u32 offset, u32 statetype, u32 bank,
+ u32 count)
+{
+ CRASHDUMP_SCRIPT_WRITE(crashdump, REG_A5XX_HLSQ_DBG_READ_SEL,
+ A5XX_HLSQ_DBG_READ_SEL_STATETYPE(statetype) | bank);
+
+ CRASHDUMP_SCRIPT_READ(crashdump, REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE,
+ count, offset);
+
+ return count * sizeof(u32);
+}
+
+static u32 _copy_registers(struct msm_snapshot *snapshot,
+ struct crashdump *crashdump, u32 reg, u32 count,
+ u32 offset)
+{
+ int i;
+ u32 *ptr = (u32 *) (crashdump->ptr + CRASHDUMP_SCRIPT_SIZE + offset);
+ /*
+ * Write the offset of the first register of the group and the number of
+ * registers in the group
+ */
+ SNAPSHOT_WRITE_U32(snapshot, ((count << 16) | reg));
+
+ /* Followed by each register value in the group */
+ for (i = 0; i < count; i++)
+ SNAPSHOT_WRITE_U32(snapshot, ptr[i]);
+
+ return count * sizeof(u32);
+}
+
+/*
+ * Return the number of registers in each register group from the
+ * adreno_gpu->rgisters
+ */
+static inline u32 REG_COUNT(const unsigned int *ptr)
+{
+ return (ptr[1] - ptr[0]) + 1;
+}
+
+/*
+ * Capture what registers we can from the CPU in case the crashdumper is
+ * unavailable or broken. This will omit the SP,TP and HLSQ registers, but
+ * you'll get everything else and that ain't bad
+ */
+static void a5xx_snapshot_registers_cpu(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_snapshot_regs header;
+ u32 regcount = 0, groups = 0;
+ int i;
+
+ /*
+ * Before we write the section we need to figure out how big our data
+ * section will be
+ */
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ regcount += REG_COUNT(&(adreno_gpu->registers[i]));
+ groups++;
+ }
+
+ header.count = groups;
+
+ /*
+ * We need one dword for each group and then one dword for each register
+ * value in that group
+ */
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_REGS_V2,
+ regcount + groups))
+ return;
+
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ u32 count = REG_COUNT(&(adreno_gpu->registers[i]));
+ u32 reg = adreno_gpu->registers[i];
+ int j;
+
+ /* Write the offset and count for the group */
+ SNAPSHOT_WRITE_U32(snapshot, (count << 16) | reg);
+
+ /* Write each value in the group */
+ for (j = 0; j < count; j++)
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu, reg++));
+ }
+}
+
+static void a5xx_snapshot_registers(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct msm_snapshot_regs header;
+ struct crashdump *crashdump = snapshot->priv;
+ u32 offset = 0, regcount = 0, groups = 0;
+ int i;
+
+ /*
+ * First snapshot all the registers that we can from the CPU. Do this
+ * because the crashdumper has a tendency to "taint" the value of some
+ * of the registers (because the GPU implements the crashdumper) so we
+ * only want to use the crash dump facility if we have to
+ */
+ a5xx_snapshot_registers_cpu(gpu, snapshot);
+
+ if (!crashdump)
+ return;
+
+ CRASHDUMP_RESET(crashdump);
+
+ /* HLSQ and context registers behind the aperture */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 count = a5xx_hlsq_aperture_regs[i].count;
+
+ offset += _crashdump_read_hlsq_aperture(crashdump, offset,
+ a5xx_hlsq_aperture_regs[i].type, 0, count);
+ regcount += count;
+
+ groups++;
+ }
+
+ CRASHDUMP_END(crashdump);
+
+ if (crashdump_run(gpu, crashdump))
+ return;
+
+ header.count = groups;
+
+ /*
+ * The size of the data will be one dword for each "group" of registers,
+ * and then one dword for each of the registers in that group
+ */
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_REGS_V2,
+ groups + regcount))
+ return;
+
+ /* Copy the registers to the snapshot */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
+ offset += _copy_registers(snapshot, crashdump,
+ a5xx_hlsq_aperture_regs[i].regoffset,
+ a5xx_hlsq_aperture_regs[i].count, offset);
+}
+
+static void _a5xx_snapshot_shader_bank(struct msm_snapshot *snapshot,
+ struct crashdump *crashdump, u32 block, u32 bank,
+ u32 size, u32 offset)
+{
+ void *src;
+
+ struct msm_snapshot_shader header = {
+ .type = block,
+ .index = bank,
+ .size = size,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_SHADER, size))
+ return;
+
+ src = CRASHDUMP_DATA_PTR(crashdump, offset);
+
+ if (src)
+ SNAPSHOT_MEMCPY(snapshot, src, size * sizeof(u32));
+}
+
+static void a5xx_snapshot_shader_memory(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct crashdump *crashdump = snapshot->priv;
+ u32 offset = 0;
+ int i;
+
+ /* We can only get shader memory through the crashdump */
+ if (!crashdump)
+ return;
+
+ CRASHDUMP_RESET(crashdump);
+
+ /* For each shader block */
+ for (i = 0; i < ARRAY_SIZE(a5xx_shader_blocks); i++) {
+ int j;
+
+ /* For each block, dump 4 banks */
+ for (j = 0; j < A5XX_NR_SHADER_BANKS; j++)
+ offset += _crashdump_read_hlsq_aperture(crashdump,
+ offset, a5xx_shader_blocks[i].id, j,
+ a5xx_shader_blocks[i].size);
+ }
+
+ CRASHDUMP_END(crashdump);
+
+ /* If the crashdump fails we can't get shader memory any other way */
+ if (crashdump_run(gpu, crashdump))
+ return;
+
+ /* Each bank of each shader gets its own snapshot section */
+ for (offset = 0, i = 0; i < ARRAY_SIZE(a5xx_shader_blocks); i++) {
+ int j;
+
+ for (j = 0; j < A5XX_NR_SHADER_BANKS; j++) {
+ _a5xx_snapshot_shader_bank(snapshot, crashdump,
+ a5xx_shader_blocks[i].id, j,
+ a5xx_shader_blocks[i].size, offset);
+ offset += a5xx_shader_blocks[i].size * sizeof(u32);
+ }
+ }
+}
+
+#define A5XX_NUM_AXI_ARB_BLOCKS 2
+#define A5XX_NUM_XIN_BLOCKS 4
+#define VBIF_DATA_SIZE ((16 * A5XX_NUM_AXI_ARB_BLOCKS) + \
+ (18 * A5XX_NUM_XIN_BLOCKS) + (12 * A5XX_NUM_XIN_BLOCKS))
+
+static void a5xx_snapshot_debugbus_vbif(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+ struct msm_snapshot_debugbus header = {
+ .id = A5XX_RBBM_DBGBUS_VBIF,
+ .count = VBIF_DATA_SIZE,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUGBUS,
+ VBIF_DATA_SIZE))
+ return;
+
+ gpu_rmw(gpu, REG_A5XX_VBIF_CLKON, A5XX_VBIF_CLKON_FORCE_ON_TESTBUS,
+ A5XX_VBIF_CLKON_FORCE_ON_TESTBUS);
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL0, 0);
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS_OUT_CTRL,
+ A5XX_VBIF_TEST_BUS_OUT_CTRL_TEST_BUS_CTRL_EN);
+
+ for (i = 0; i < A5XX_NUM_AXI_ARB_BLOCKS; i++) {
+ int j;
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL0, 1 << (i + 16));
+ for (j = 0; j < 16; j++) {
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL1,
+ A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(j));
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+ REG_A5XX_VBIF_TEST_BUS_OUT));
+ }
+ }
+
+ for (i = 0; i < A5XX_NUM_XIN_BLOCKS; i++) {
+ int j;
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
+ for (j = 0; j < 18; j++) {
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL1,
+ A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(j));
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_VBIF_TEST_BUS_OUT));
+ }
+ }
+
+ for (i = 0; i < A5XX_NUM_XIN_BLOCKS; i++) {
+ int j;
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
+ for (j = 0; j < 12; j++) {
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL1,
+ A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL(j));
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+ REG_A5XX_VBIF_TEST_BUS_OUT));
+ }
+ }
+
+}
+
+static void a5xx_snapshot_debugbus_block(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot, u32 block, u32 count)
+{
+ int i;
+ struct msm_snapshot_debugbus header = {
+ .id = block,
+ .count = count * 2, /* Each value is 2 dwords */
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUGBUS,
+ (count * 2)))
+ return;
+
+ for (i = 0; i < count; i++) {
+ u32 reg = A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX(i) |
+ A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_A, reg);
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_B, reg);
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_C, reg);
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_D, reg);
+
+ /* Each debugbus entry is a quad word */
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+ REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2));
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1));
+ }
+}
+
+static void a5xx_snapshot_debugbus(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_CNTLM,
+ A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE(0xF));
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_debugbus_blocks); i++)
+ a5xx_snapshot_debugbus_block(gpu, snapshot,
+ a5xx_debugbus_blocks[i].id,
+ a5xx_debugbus_blocks[i].count);
+
+ /* VBIF is special and not in a good way */
+ a5xx_snapshot_debugbus_vbif(gpu, snapshot);
+}
+
+static void a5xx_snapshot_cp_merciu(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ unsigned int i;
+ struct msm_snapshot_debug header = {
+ .type = SNAPSHOT_DEBUG_CP_MERCIU,
+ .size = 64 << 1, /* Data size is 2 dwords per entry */
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 64 << 1))
+ return;
+
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_DBG_ADDR, 0);
+ for (i = 0; i < 64; i++) {
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_MERCIU_DBG_DATA_1));
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_MERCIU_DBG_DATA_2));
+ }
+}
+
+static void a5xx_snapshot_cp_roq(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+ struct msm_snapshot_debug header = {
+ .type = SNAPSHOT_DEBUG_CP_ROQ,
+ .size = 512,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 512))
+ return;
+
+ gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0);
+ for (i = 0; i < 512; i++)
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA));
+}
+
+static void a5xx_snapshot_cp_meq(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+ struct msm_snapshot_debug header = {
+ .type = SNAPSHOT_DEBUG_CP_MEQ,
+ .size = 64,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 64))
+ return;
+
+ gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0);
+ for (i = 0; i < 64; i++)
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
+}
+
+static void a5xx_snapshot_indexed_registers(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot, u32 addr, u32 data,
+ u32 count)
+{
+ unsigned int i;
+ struct msm_snapshot_indexed_regs header = {
+ .index_reg = addr,
+ .data_reg = data,
+ .start = 0,
+ .count = count,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_INDEXED_REGS,
+ count))
+ return;
+
+ for (i = 0; i < count; i++) {
+ gpu_write(gpu, addr, i);
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu, data));
+ }
+}
+
+int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ struct crashdump crashdump = { 0 };
+
+ if (!crashdump_init(gpu, &crashdump))
+ snapshot->priv = &crashdump;
+
+ /* To accurately read all registers, disable hardware clock gating */
+ a5xx_set_hwcg(gpu, false);
+
+ /* Kick it up to the generic level */
+ adreno_snapshot(gpu, snapshot);
+
+ /* Read the GPU registers */
+ a5xx_snapshot_registers(gpu, snapshot);
+
+ /* Read the shader memory banks */
+ a5xx_snapshot_shader_memory(gpu, snapshot);
+
+ /* Read the debugbus registers */
+ a5xx_snapshot_debugbus(gpu, snapshot);
+
+ /* PFP data */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_PFP_STAT_ADDR, REG_A5XX_CP_PFP_STAT_DATA, 36);
+
+ /* ME data */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_ME_STAT_ADDR, REG_A5XX_CP_ME_STAT_DATA, 29);
+
+ /* DRAW_STATE data */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_DRAW_STATE_ADDR, REG_A5XX_CP_DRAW_STATE_DATA,
+ 256);
+
+ /* ME cache */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_ME_UCODE_DBG_ADDR, REG_A5XX_CP_ME_UCODE_DBG_DATA,
+ 0x53F);
+
+ /* PFP cache */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_PFP_UCODE_DBG_ADDR, REG_A5XX_CP_PFP_UCODE_DBG_DATA,
+ 0x53F);
+
+ /* ME queue */
+ a5xx_snapshot_cp_meq(gpu, snapshot);
+
+ /* CP ROQ */
+ a5xx_snapshot_cp_roq(gpu, snapshot);
+
+ /* CP MERCIU */
+ a5xx_snapshot_cp_merciu(gpu, snapshot);
+
+ crashdump_destroy(gpu, &crashdump);
+ snapshot->priv = NULL;
+
+ /* Re-enable HWCG */
+ a5xx_set_hwcg(gpu, true);
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index c304468cf2bd..1cf84479e447 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -119,6 +121,25 @@ enum adreno_rb_copy_control_mode {
RB_COPY_DEPTH_STENCIL = 5,
};
+enum a3xx_rop_code {
+ ROP_CLEAR = 0,
+ ROP_NOR = 1,
+ ROP_AND_INVERTED = 2,
+ ROP_COPY_INVERTED = 3,
+ ROP_AND_REVERSE = 4,
+ ROP_INVERT = 5,
+ ROP_XOR = 6,
+ ROP_NAND = 7,
+ ROP_AND = 8,
+ ROP_EQUIV = 9,
+ ROP_NOOP = 10,
+ ROP_OR_INVERTED = 11,
+ ROP_COPY = 12,
+ ROP_OR_REVERSE = 13,
+ ROP_OR = 14,
+ ROP_SET = 15,
+};
+
enum a3xx_render_mode {
RB_RENDERING_PASS = 0,
RB_TILING_PASS = 1,
@@ -154,6 +175,14 @@ enum a3xx_color_swap {
XYZW = 3,
};
+enum a3xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_DST_MINUS_SRC = 2,
+ BLEND_MIN_DST_SRC = 3,
+ BLEND_MAX_DST_SRC = 4,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 1ea2df524fac..a498a60cd52d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -19,10 +19,6 @@
#include "adreno_gpu.h"
-#if defined(DOWNSTREAM_CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
-# include <mach/kgsl.h>
-#endif
-
#define ANY_ID 0xff
bool hang_debug = false;
@@ -31,6 +27,7 @@ module_param_named(hang_debug, hang_debug, bool, 0600);
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
static const struct adreno_info gpulist[] = {
{
@@ -73,6 +70,30 @@ static const struct adreno_info gpulist[] = {
.pfpfw = "a420_pfp.fw",
.gmem = (SZ_1M + SZ_512K),
.init = a4xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(4, 3, 0, ANY_ID),
+ .revn = 430,
+ .name = "A430",
+ .pm4fw = "a420_pm4.fw",
+ .pfpfw = "a420_pfp.fw",
+ .gmem = (SZ_1M + SZ_512K),
+ .init = a4xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(5, 3, 0, ANY_ID),
+ .revn = 530,
+ .name = "A530",
+ .pm4fw = "a530_pm4.fw",
+ .pfpfw = "a530_pfp.fw",
+ .gmem = SZ_1M,
+ .init = a5xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(5, 4, 0, ANY_ID),
+ .revn = 540,
+ .name = "A540",
+ .pm4fw = "a530_pm4.fw",
+ .pfpfw = "a530_pfp.fw",
+ .gmem = SZ_1M,
+ .init = a5xx_gpu_init,
},
};
@@ -82,6 +103,8 @@ MODULE_FIRMWARE("a330_pm4.fw");
MODULE_FIRMWARE("a330_pfp.fw");
MODULE_FIRMWARE("a420_pm4.fw");
MODULE_FIRMWARE("a420_pfp.fw");
+MODULE_FIRMWARE("a530_fm4.fw");
+MODULE_FIRMWARE("a530_pfp.fw");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
@@ -144,12 +167,16 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
gpu->funcs->pm_resume(gpu);
mutex_unlock(&dev->struct_mutex);
+
+ disable_irq(gpu->irq);
+
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
gpu->funcs->destroy(gpu);
gpu = NULL;
} else {
+ enable_irq(gpu->irq);
/* give inactive pm a chance to kick in: */
msm_gpu_retire(gpu);
}
@@ -168,12 +195,16 @@ static void set_gpu_pdev(struct drm_device *dev,
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
-#ifdef CONFIG_OF
- struct device_node *child, *node = dev->of_node;
- u32 val;
+ uint32_t val = 0;
int ret;
- ret = of_property_read_u32(node, "qcom,chipid", &val);
+ /*
+ * Read the chip ID from the device tree at bind time - we use this
+ * information to load the correct functions. All the rest of the
+ * (extensive) device tree probing should happen in the GPU specific
+ * code
+ */
+ ret = of_property_read_u32(dev->of_node, "qcom,chipid", &val);
if (ret) {
dev_err(dev, "could not find chipid: %d\n", ret);
return ret;
@@ -182,76 +213,6 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
config.rev = ADRENO_REV((val >> 24) & 0xff,
(val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
- /* find clock rates: */
- config.fast_rate = 0;
- config.slow_rate = ~0;
- for_each_child_of_node(node, child) {
- if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
- struct device_node *pwrlvl;
- for_each_child_of_node(child, pwrlvl) {
- ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
- if (ret) {
- dev_err(dev, "could not find gpu-freq: %d\n", ret);
- return ret;
- }
- config.fast_rate = max(config.fast_rate, val);
- config.slow_rate = min(config.slow_rate, val);
- }
- }
- }
-
- if (!config.fast_rate) {
- dev_err(dev, "could not find clk rates\n");
- return -ENXIO;
- }
-
-#else
- struct kgsl_device_platform_data *pdata = dev->platform_data;
- uint32_t version = socinfo_get_version();
- if (cpu_is_apq8064ab()) {
- config.fast_rate = 450000000;
- config.slow_rate = 27000000;
- config.bus_freq = 4;
- config.rev = ADRENO_REV(3, 2, 1, 0);
- } else if (cpu_is_apq8064()) {
- config.fast_rate = 400000000;
- config.slow_rate = 27000000;
- config.bus_freq = 4;
-
- if (SOCINFO_VERSION_MAJOR(version) == 2)
- config.rev = ADRENO_REV(3, 2, 0, 2);
- else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
- (SOCINFO_VERSION_MINOR(version) == 1))
- config.rev = ADRENO_REV(3, 2, 0, 1);
- else
- config.rev = ADRENO_REV(3, 2, 0, 0);
-
- } else if (cpu_is_msm8960ab()) {
- config.fast_rate = 400000000;
- config.slow_rate = 320000000;
- config.bus_freq = 4;
-
- if (SOCINFO_VERSION_MINOR(version) == 0)
- config.rev = ADRENO_REV(3, 2, 1, 0);
- else
- config.rev = ADRENO_REV(3, 2, 1, 1);
-
- } else if (cpu_is_msm8930()) {
- config.fast_rate = 400000000;
- config.slow_rate = 27000000;
- config.bus_freq = 3;
-
- if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
- (SOCINFO_VERSION_MINOR(version) == 2))
- config.rev = ADRENO_REV(3, 0, 5, 2);
- else
- config.rev = ADRENO_REV(3, 0, 5, 0);
-
- }
-# ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- config.bus_scale_table = pdata->bus_scale_table;
-# endif
-#endif
dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
return 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a3b54cc76495..f1883825354e 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -2,7 +2,7 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -17,12 +17,12 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/utsname.h>
#include "adreno_gpu.h"
+#include "msm_snapshot.h"
#include "msm_gem.h"
#include "msm_mmu.h"
-#define RB_SIZE SZ_32K
-#define RB_BLKSIZE 16
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
@@ -35,96 +35,147 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
case MSM_PARAM_GMEM_SIZE:
*value = adreno_gpu->gmem;
return 0;
+ case MSM_PARAM_GMEM_BASE:
+ *value = 0x100000;
+ return 0;
case MSM_PARAM_CHIP_ID:
*value = adreno_gpu->rev.patchid |
(adreno_gpu->rev.minor << 8) |
(adreno_gpu->rev.major << 16) |
(adreno_gpu->rev.core << 24);
return 0;
+ case MSM_PARAM_MAX_FREQ:
+ *value = gpu->gpufreq[gpu->active_level];
+ return 0;
+ case MSM_PARAM_TIMESTAMP:
+ if (adreno_gpu->funcs->get_timestamp)
+ return adreno_gpu->funcs->get_timestamp(gpu, value);
+ return -EINVAL;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
}
}
-#define rbmemptr(adreno_gpu, member) \
- ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
-
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- int ret;
+ int i;
DBG("%s", gpu->name);
- ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
- if (ret) {
- gpu->rb_iova = 0;
- dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
- return ret;
+ for (i = 0; i < gpu->nr_rings; i++) {
+ int ret = msm_gem_get_iova(gpu->rb[i]->bo, gpu->aspace,
+ &gpu->rb[i]->iova);
+ if (ret) {
+ gpu->rb[i]->iova = 0;
+ dev_err(gpu->dev->dev,
+ "could not map ringbuffer %d: %d\n", i, ret);
+ return ret;
+ }
}
- /* Setup REG_CP_RB_CNTL: */
+ /*
+ * Setup REG_CP_RB_CNTL. The same value is used across targets (with
+ * the excpetion of A430 that disables the RPTR shadow) - the cacluation
+ * for the ringbuffer size and block size is moved to msm_gpu.h for the
+ * pre-processor to deal with and the A430 variant is ORed in here
+ */
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
- /* size is log2(quad-words): */
- AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
- AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
-
- /* Setup ringbuffer address: */
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
- rbmemptr(adreno_gpu, rptr));
+ MSM_GPU_RB_CNTL_DEFAULT |
+ (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
- /* Setup scratch/timestamp: */
- adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_ADDR,
- rbmemptr(adreno_gpu, fence));
+ /* Setup ringbuffer address - use ringbuffer[0] for GPU init */
+ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
- adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_UMSK, 0x1);
+ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(adreno_gpu, 0, rptr));
return 0;
}
-static uint32_t get_wptr(struct msm_ringbuffer *ring)
+/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
+static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
+ struct msm_ringbuffer *ring)
+{
+ if (adreno_is_a430(adreno_gpu)) {
+ /*
+ * If index is anything but 0 this will probably break horribly,
+ * but I think that we have enough infrastructure in place to
+ * ensure that it won't be. If not then this is why your
+ * a430 stopped working.
+ */
+ return adreno_gpu->memptrs->rptr[ring->id] = adreno_gpu_read(
+ adreno_gpu, REG_ADRENO_CP_RB_RPTR);
+ } else
+ return adreno_gpu->memptrs->rptr[ring->id];
+}
+
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
{
- return ring->cur - ring->start;
+ return gpu->rb[0];
}
-uint32_t adreno_last_fence(struct msm_gpu *gpu)
+uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring)
+{
+ if (!ring)
+ return 0;
+
+ return ring->submitted_fence;
+}
+
+uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- return adreno_gpu->memptrs->fence;
+
+ if (!ring)
+ return 0;
+
+ return adreno_gpu->memptrs->fence[ring->id];
}
void adreno_recover(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *dev = gpu->dev;
- int ret;
+ struct msm_ringbuffer *ring;
+ int ret, i;
gpu->funcs->pm_suspend(gpu);
- /* reset ringbuffer: */
- gpu->rb->cur = gpu->rb->start;
+ /* reset ringbuffer(s): */
+
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
- /* reset completed fence seqno, just discard anything pending: */
- adreno_gpu->memptrs->fence = gpu->submitted_fence;
- adreno_gpu->memptrs->rptr = 0;
- adreno_gpu->memptrs->wptr = 0;
+ /* No need for a lock here, nobody else is peeking in */
+ ring->cur = ring->start;
+ ring->next = ring->start;
+
+ /* reset completed fence seqno, discard anything pending: */
+ adreno_gpu->memptrs->fence[ring->id] =
+ adreno_submitted_fence(gpu, ring);
+ adreno_gpu->memptrs->rptr[ring->id] = 0;
+ }
gpu->funcs->pm_resume(gpu);
+
+ disable_irq(gpu->irq);
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
+ enable_irq(gpu->irq);
}
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx)
+int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct msm_drm_private *priv = gpu->dev->dev_private;
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[submit->ring];
unsigned i, ibs = 0;
for (i = 0; i < submit->nr_cmds; i++) {
@@ -133,12 +184,11 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* ignore IB-targets */
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- /* ignore if there has not been a ctx switch: */
- if (priv->lastctx == ctx)
break;
case MSM_SUBMIT_CMD_BUF:
- OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
- OUT_RING(ring, submit->cmd[i].iova);
+ OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
+ CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
ibs++;
break;
@@ -169,7 +219,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
- OUT_RING(ring, rbmemptr(adreno_gpu, fence));
+ OUT_RING(ring, rbmemptr(adreno_gpu, ring->id, fence));
OUT_RING(ring, submit->fence);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
@@ -196,15 +246,25 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
#endif
- gpu->funcs->flush(gpu);
+ gpu->funcs->flush(gpu, ring);
return 0;
}
-void adreno_flush(struct msm_gpu *gpu)
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t wptr;
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /*
+ * Mask the wptr value that we calculate to fit in the HW range. This is
+ * to account for the possibility that the last command fit exactly into
+ * the ringbuffer and rb->next hasn't wrapped to zero yet
+ */
+ wptr = get_wptr(ring);
/* ensure writes to ringbuffer have hit system memory: */
mb();
@@ -212,22 +272,27 @@ void adreno_flush(struct msm_gpu *gpu)
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
}
-void adreno_idle(struct msm_gpu *gpu)
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t wptr = get_wptr(ring);
/* wait for CP to drain ringbuffer: */
- if (spin_until(adreno_gpu->memptrs->rptr == wptr))
- DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+ if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
+ return true;
/* TODO maybe we need to reset GPU here to recover from hang? */
+ DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
+ gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
+
+ return false;
}
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring;
int i;
seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
@@ -235,11 +300,18 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->submitted_fence);
- seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
- seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
- seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
+
+ seq_printf(m, "rb %d: fence: %d/%d\n", i,
+ adreno_last_fence(gpu, ring),
+ adreno_submitted_fence(gpu, ring));
+
+ seq_printf(m, " rptr: %d\n",
+ get_rptr(adreno_gpu, ring));
+ seq_printf(m, "rb wptr: %d\n", get_wptr(ring));
+ }
gpu->funcs->pm_resume(gpu);
@@ -268,22 +340,29 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
*/
void adreno_dump_info(struct msm_gpu *gpu)
{
+ struct drm_device *dev = gpu->dev;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring;
int i;
- printk("revision: %d (%d.%d.%d.%d)\n",
+ dev_err(dev->dev, "revision: %d (%d.%d.%d.%d)\n",
adreno_gpu->info->revn, adreno_gpu->rev.core,
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->submitted_fence);
- printk("rptr: %d\n", adreno_gpu->memptrs->rptr);
- printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
- printk("rb wptr: %d\n", get_wptr(gpu->rb));
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
+
+ dev_err(dev->dev, " ring %d: fence %d/%d rptr/wptr %x/%x\n", i,
+ adreno_last_fence(gpu, ring),
+ adreno_submitted_fence(gpu, ring),
+ get_rptr(adreno_gpu, ring),
+ get_wptr(ring));
+ }
for (i = 0; i < 8; i++) {
- printk("CP_SCRATCH_REG%d: %u\n", i,
+ pr_err("CP_SCRATCH_REG%d: %u\n", i,
gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
}
}
@@ -308,30 +387,146 @@ void adreno_dump(struct msm_gpu *gpu)
}
}
-static uint32_t ring_freewords(struct msm_gpu *gpu)
+static uint32_t ring_freewords(struct msm_ringbuffer *ring)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t size = gpu->rb->size / 4;
- uint32_t wptr = get_wptr(gpu->rb);
- uint32_t rptr = adreno_gpu->memptrs->rptr;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
+ uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
+ /* Use ring->next to calculate free size */
+ uint32_t wptr = ring->next - ring->start;
+ uint32_t rptr = get_rptr(adreno_gpu, ring);
return (rptr + (size - 1) - wptr) % size;
}
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
{
- if (spin_until(ring_freewords(gpu) >= ndwords))
- DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
+ if (spin_until(ring_freewords(ring) >= ndwords))
+ DRM_ERROR("%s: timeout waiting for space in ringubffer %d\n",
+ ring->gpu->name, ring->id);
}
static const char *iommu_ports[] = {
- "gfx3d_user", "gfx3d_priv",
- "gfx3d1_user", "gfx3d1_priv",
+ "gfx3d_user",
+};
+
+/* Read the set of powerlevels */
+static int _adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *node)
+{
+ struct device_node *child;
+
+ gpu->active_level = 1;
+
+ /* The device tree will tell us the best clock to initialize with */
+ of_property_read_u32(node, "qcom,initial-pwrlevel", &gpu->active_level);
+
+ if (gpu->active_level >= ARRAY_SIZE(gpu->gpufreq))
+ gpu->active_level = 1;
+
+ for_each_child_of_node(node, child) {
+ unsigned int index;
+
+ if (of_property_read_u32(child, "reg", &index))
+ return -EINVAL;
+
+ if (index >= ARRAY_SIZE(gpu->gpufreq))
+ continue;
+
+ gpu->nr_pwrlevels = max(gpu->nr_pwrlevels, index + 1);
+
+ of_property_read_u32(child, "qcom,gpu-freq",
+ &gpu->gpufreq[index]);
+ of_property_read_u32(child, "qcom,bus-freq",
+ &gpu->busfreq[index]);
+ }
+
+ DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
+ gpu->gpufreq[gpu->active_level],
+ gpu->gpufreq[gpu->nr_pwrlevels - 1],
+ gpu->busfreq[gpu->active_level]);
+
+ return 0;
+}
+
+/*
+ * Escape valve for targets that don't define the binning nodes. Get the
+ * first powerlevel node and parse it
+ */
+static int adreno_get_legacy_pwrlevels(struct msm_gpu *gpu,
+ struct device_node *parent)
+{
+ struct device_node *child;
+
+ child = of_find_node_by_name(parent, "qcom,gpu-pwrlevels");
+ if (child)
+ return _adreno_get_pwrlevels(gpu, child);
+
+ dev_err(gpu->dev->dev, "Unable to parse any powerlevels\n");
+ return -EINVAL;
+}
+
+/* Get the powerlevels for the target */
+static int adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *parent)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct device_node *node, *child;
+
+ /* See if the target has defined a number of power bins */
+ node = of_find_node_by_name(parent, "qcom,gpu-pwrlevel-bins");
+ if (!node) {
+ /* If not look for the qcom,gpu-pwrlevels node */
+ return adreno_get_legacy_pwrlevels(gpu, parent);
+ }
+
+ for_each_child_of_node(node, child) {
+ unsigned int bin;
+
+ if (of_property_read_u32(child, "qcom,speed-bin", &bin))
+ continue;
+
+ /*
+ * If the bin matches the bin specified by the fuses, then we
+ * have a winner - parse it
+ */
+ if (adreno_gpu->speed_bin == bin)
+ return _adreno_get_pwrlevels(gpu, child);
+ }
+
+ return -ENODEV;
+}
+
+static const struct {
+ const char *str;
+ uint32_t flag;
+} quirks[] = {
+ { "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI },
+ { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
};
+/* Parse the statistics from the device tree */
+static int adreno_of_parse(struct platform_device *pdev, struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct device_node *node = pdev->dev.of_node;
+ int i, ret;
+
+ /* Probe the powerlevels */
+ ret = adreno_get_pwrlevels(gpu, node);
+ if (ret)
+ return ret;
+
+ /* Check to see if any quirks were specified in the device tree */
+ for (i = 0; i < ARRAY_SIZE(quirks); i++)
+ if (of_property_read_bool(node, quirks[i].str))
+ adreno_gpu->quirks |= quirks[i].flag;
+
+ return 0;
+}
+
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
- struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
+ struct adreno_gpu *adreno_gpu,
+ const struct adreno_gpu_funcs *funcs, int nr_rings)
{
struct adreno_platform_config *config = pdev->dev.platform_data;
+ struct msm_gpu_config adreno_gpu_config = { 0 };
struct msm_gpu *gpu = &adreno_gpu->base;
struct msm_mmu *mmu;
int ret;
@@ -342,19 +537,29 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->revn = adreno_gpu->info->revn;
adreno_gpu->rev = config->rev;
- gpu->fast_rate = config->fast_rate;
- gpu->slow_rate = config->slow_rate;
- gpu->bus_freq = config->bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- gpu->bus_scale_table = config->bus_scale_table;
-#endif
+ /* Get the rest of the target configuration from the device tree */
+ adreno_of_parse(pdev, gpu);
- DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
- gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
+ adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
+ adreno_gpu_config.irqname = "kgsl_3d0_irq";
+ adreno_gpu_config.nr_rings = nr_rings;
+
+ adreno_gpu_config.va_start = SZ_16M;
+ adreno_gpu_config.va_end = 0xffffffff;
+
+ if (adreno_gpu->revn >= 500) {
+ /* 5XX targets use a 64 bit region */
+ adreno_gpu_config.va_start = 0x800000000;
+ adreno_gpu_config.va_end = 0x8ffffffff;
+ } else {
+ adreno_gpu_config.va_start = 0x300000;
+ adreno_gpu_config.va_end = 0xffffffff;
+ }
+
+ adreno_gpu_config.nr_rings = nr_rings;
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
- adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
- RB_SIZE);
+ adreno_gpu->info->name, &adreno_gpu_config);
if (ret)
return ret;
@@ -372,7 +577,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- mmu = gpu->mmu;
+ mmu = gpu->aspace->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
@@ -397,7 +602,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return -ENOMEM;
}
- ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
+ ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
&adreno_gpu->memptrs_iova);
if (ret) {
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
@@ -409,12 +614,98 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *gpu)
{
+ struct msm_gem_address_space *aspace = gpu->base.aspace;
+
if (gpu->memptrs_bo) {
if (gpu->memptrs_iova)
- msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+ msm_gem_put_iova(gpu->memptrs_bo, aspace);
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
release_firmware(gpu->pm4);
release_firmware(gpu->pfp);
+
msm_gpu_cleanup(&gpu->base);
+
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu);
+ msm_gem_address_space_put(aspace);
+ }
+}
+
+static void adreno_snapshot_os(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct msm_snapshot_linux header;
+
+ memset(&header, 0, sizeof(header));
+
+ header.osid = SNAPSHOT_OS_LINUX_V3;
+ strlcpy(header.release, utsname()->release, sizeof(header.release));
+ strlcpy(header.version, utsname()->version, sizeof(header.version));
+
+ header.seconds = get_seconds();
+ header.ctxtcount = 0;
+
+ SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_OS, 0);
+}
+
+static void adreno_snapshot_ringbuffer(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_snapshot_ringbuffer header;
+ unsigned int i, end = 0;
+ unsigned int *data = ring->start;
+
+ memset(&header, 0, sizeof(header));
+
+ /*
+ * We only want to copy the active contents of each ring, so find the
+ * last valid entry in the ringbuffer
+ */
+ for (i = 0; i < MSM_GPU_RINGBUFFER_SZ >> 2; i++) {
+ if (data[i])
+ end = i;
+ }
+
+ /* The dump always starts at 0 */
+ header.start = 0;
+ header.end = end;
+
+ /* This is the number of dwords being dumped */
+ header.count = end + 1;
+
+ /* This is the size of the actual ringbuffer */
+ header.rbsize = MSM_GPU_RINGBUFFER_SZ >> 2;
+
+ header.id = ring->id;
+ header.gpuaddr = ring->iova;
+ header.rptr = get_rptr(adreno_gpu, ring);
+ header.wptr = get_wptr(ring);
+ header.timestamp_queued = adreno_submitted_fence(gpu, ring);
+ header.timestamp_retired = adreno_last_fence(gpu, ring);
+
+ /* Write the header even if the ringbuffer data is empty */
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_RB_V2,
+ header.count))
+ return;
+
+ SNAPSHOT_MEMCPY(snapshot, ring->start, header.count * sizeof(u32));
+}
+
+static void adreno_snapshot_ringbuffers(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct msm_ringbuffer *ring;
+ int i;
+
+ /* Write a new section for each ringbuffer */
+ FOR_EACH_RING(gpu, ring, i)
+ adreno_snapshot_ringbuffer(gpu, snapshot, ring);
+}
+
+void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ adreno_snapshot_os(gpu, snapshot);
+ adreno_snapshot_ringbuffers(gpu, snapshot);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 0a312e9d3afd..30461115281c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -2,7 +2,7 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -24,10 +24,17 @@
#include "msm_gpu.h"
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
#include "adreno_common.xml.h"
#include "adreno_pm4.xml.h"
#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
+#define REG_SKIP ~0
+#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
+
/**
* adreno_regs: List of registers that are used in across all
* 3D devices. Each device type has different offset value for the same
@@ -35,73 +42,21 @@
* and are indexed by the enumeration values defined in this enum
*/
enum adreno_regs {
- REG_ADRENO_CP_DEBUG,
- REG_ADRENO_CP_ME_RAM_WADDR,
- REG_ADRENO_CP_ME_RAM_DATA,
- REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_ADRENO_CP_WFI_PEND_CTR,
REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_BASE_HI,
REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI,
REG_ADRENO_CP_RB_RPTR,
REG_ADRENO_CP_RB_WPTR,
- REG_ADRENO_CP_PROTECT_CTRL,
- REG_ADRENO_CP_ME_CNTL,
REG_ADRENO_CP_RB_CNTL,
- REG_ADRENO_CP_IB1_BASE,
- REG_ADRENO_CP_IB1_BUFSZ,
- REG_ADRENO_CP_IB2_BASE,
- REG_ADRENO_CP_IB2_BUFSZ,
- REG_ADRENO_CP_TIMESTAMP,
- REG_ADRENO_CP_ME_RAM_RADDR,
- REG_ADRENO_CP_ROQ_ADDR,
- REG_ADRENO_CP_ROQ_DATA,
- REG_ADRENO_CP_MERCIU_ADDR,
- REG_ADRENO_CP_MERCIU_DATA,
- REG_ADRENO_CP_MERCIU_DATA2,
- REG_ADRENO_CP_MEQ_ADDR,
- REG_ADRENO_CP_MEQ_DATA,
- REG_ADRENO_CP_HW_FAULT,
- REG_ADRENO_CP_PROTECT_STATUS,
- REG_ADRENO_SCRATCH_ADDR,
- REG_ADRENO_SCRATCH_UMSK,
- REG_ADRENO_SCRATCH_REG2,
- REG_ADRENO_RBBM_STATUS,
- REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
- REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_ADRENO_RBBM_INT_0_MASK,
- REG_ADRENO_RBBM_INT_0_STATUS,
- REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_ADRENO_RBBM_PM_OVERRIDE2,
- REG_ADRENO_RBBM_AHB_CMD,
- REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_ADRENO_RBBM_CLOCK_CTL,
- REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
- REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
- REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_ADRENO_VFD_CONTROL_0,
- REG_ADRENO_VFD_INDEX_MAX,
- REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_ADRENO_PA_SC_AA_CONFIG,
- REG_ADRENO_SQ_GPR_MANAGEMENT,
- REG_ADRENO_SQ_INST_STORE_MANAGMENT,
- REG_ADRENO_TP0_CHICKEN,
- REG_ADRENO_RBBM_RBBM_CTL,
- REG_ADRENO_UCHE_INVALIDATE0,
- REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
REG_ADRENO_REGISTER_MAX,
};
+enum adreno_quirks {
+ ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
+ ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
+};
+
struct adreno_rev {
uint8_t core;
uint8_t major;
@@ -114,6 +69,7 @@ struct adreno_rev {
struct adreno_gpu_funcs {
struct msm_gpu_funcs base;
+ int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
};
struct adreno_info {
@@ -127,10 +83,20 @@ struct adreno_info {
const struct adreno_info *adreno_info(struct adreno_rev rev);
+#define _sizeof(member) \
+ sizeof(((struct adreno_rbmemptrs *) 0)->member[0])
+
+#define _base(adreno_gpu, member) \
+ ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
+
+#define rbmemptr(adreno_gpu, index, member) \
+ (_base((adreno_gpu), member) + ((index) * _sizeof(member)))
+
struct adreno_rbmemptrs {
- volatile uint32_t rptr;
- volatile uint32_t wptr;
- volatile uint32_t fence;
+ volatile uint32_t rptr[MSM_GPU_MAX_RINGS];
+ volatile uint32_t fence[MSM_GPU_MAX_RINGS];
+ volatile uint64_t ttbr0[MSM_GPU_MAX_RINGS];
+ volatile unsigned int contextidr[MSM_GPU_MAX_RINGS];
};
struct adreno_gpu {
@@ -152,7 +118,7 @@ struct adreno_gpu {
// different for z180..
struct adreno_rbmemptrs *memptrs;
struct drm_gem_object *memptrs_bo;
- uint32_t memptrs_iova;
+ uint64_t memptrs_iova;
/*
* Register offsets are different between some GPUs.
@@ -160,16 +126,15 @@ struct adreno_gpu {
* code (a3xx_gpu.c) and stored in this common location.
*/
const unsigned int *reg_offsets;
+
+ uint32_t quirks;
+ uint32_t speed_bin;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
/* platform config data (ie. from DT, or pdata) */
struct adreno_platform_config {
struct adreno_rev rev;
- uint32_t fast_rate, slow_rate, bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- struct msm_bus_scale_pdata *bus_scale_table;
-#endif
};
#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -186,6 +151,9 @@ struct adreno_platform_config {
__ret; \
})
+#define GPU_OF_NODE(_g) \
+ (((struct msm_drm_private *) \
+ ((_g)->dev->dev_private))->gpu_pdev->dev.of_node)
static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
{
@@ -228,32 +196,51 @@ static inline int adreno_is_a420(struct adreno_gpu *gpu)
return gpu->revn == 420;
}
+static inline int adreno_is_a430(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 430;
+}
+
+static inline int adreno_is_a530(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 530;
+}
+
+static inline int adreno_is_a540(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 540;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu);
-uint32_t adreno_last_fence(struct msm_gpu *gpu);
+uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring);
void adreno_recover(struct msm_gpu *gpu);
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx);
-void adreno_flush(struct msm_gpu *gpu);
-void adreno_idle(struct msm_gpu *gpu);
+int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
#endif
void adreno_dump_info(struct msm_gpu *gpu);
void adreno_dump(struct msm_gpu *gpu);
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
- struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs);
+ struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+ int nr_rings);
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
+void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
/* ringbuffer helpers (the parts that are adreno specific) */
static inline void
OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt+1);
+ adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
}
@@ -261,19 +248,49 @@ OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
static inline void
OUT_PKT2(struct msm_ringbuffer *ring)
{
- adreno_wait_ring(ring->gpu, 1);
+ adreno_wait_ring(ring, 1);
OUT_RING(ring, CP_TYPE2_PKT);
}
static inline void
OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt+1);
+ adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
}
+static inline u32 PM4_PARITY(u32 val)
+{
+ return (0x9669 >> (0xF & (val ^
+ (val >> 4) ^ (val >> 8) ^ (val >> 12) ^
+ (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
+ (val >> 28)))) & 1;
+}
+
+/* Maximum number of values that can be executed for one opcode */
+#define TYPE4_MAX_PAYLOAD 127
+
+#define PKT4(_reg, _cnt) \
+ (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
+ (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
+
+static inline void
+OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt + 1);
+ OUT_RING(ring, PKT4(regindx, cnt));
+}
+
+static inline void
+OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt + 1);
+ OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
+ ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
+}
+
/*
- * adreno_checkreg_off() - Checks the validity of a register enum
+ * adreno_reg_check() - Checks the validity of a register enum
* @gpu: Pointer to struct adreno_gpu
* @offset_name: The register enum that is checked
*/
@@ -284,6 +301,16 @@ static inline bool adreno_reg_check(struct adreno_gpu *gpu,
!gpu->reg_offsets[offset_name]) {
BUG();
}
+
+ /*
+ * REG_SKIP is a special value that tell us that the register in
+ * question isn't implemented on target but don't trigger a BUG(). This
+ * is used to cleanly implement adreno_gpu_write64() and
+ * adreno_gpu_read64() in a generic fashion
+ */
+ if (gpu->reg_offsets[offset_name] == REG_SKIP)
+ return false;
+
return true;
}
@@ -305,4 +332,40 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
gpu_write(&gpu->base, reg - 1, data);
}
+static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
+ enum adreno_regs lo, enum adreno_regs hi, u64 data)
+{
+ adreno_gpu_write(gpu, lo, lower_32_bits(data));
+ adreno_gpu_write(gpu, hi, upper_32_bits(data));
+}
+
+static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
+{
+ return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
+}
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ *
+ * The register base needs to be a multiple of the length. If it is not, the
+ * hardware will quietly mask off the bits for you and shift the size. For
+ * example, if you intend the protection to start at 0x07 for a length of 4
+ * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
+ * expose registers you intended to protect!
+ */
+#define ADRENO_PROTECT_RW(_reg, _len) \
+ ((1 << 30) | (1 << 29) | \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define ADRENO_PROTECT_RDONLY(_reg, _len) \
+ ((1 << 29) \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index a22fef569499..9911a181f9c2 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -57,6 +59,7 @@ enum vgt_event_type {
RST_PIX_CNT = 13,
RST_VTX_CNT = 14,
TILE_FLUSH = 15,
+ STAT_EVENT = 16,
CACHE_FLUSH_AND_INV_TS_EVENT = 20,
ZPASS_DONE = 21,
CACHE_FLUSH_AND_INV_EVENT = 22,
@@ -81,7 +84,6 @@ enum pc_di_primtype {
DI_PT_LINESTRIP_ADJ = 11,
DI_PT_TRI_ADJ = 12,
DI_PT_TRISTRIP_ADJ = 13,
- DI_PT_PATCHES = 34,
};
enum pc_di_src_sel {
@@ -109,11 +111,15 @@ enum adreno_pm4_packet_type {
CP_TYPE1_PKT = 0x40000000,
CP_TYPE2_PKT = 0x80000000,
CP_TYPE3_PKT = 0xc0000000,
+ CP_TYPE4_PKT = 0x40000000,
+ CP_TYPE7_PKT = 0x70000000,
};
enum adreno_pm4_type3_packets {
CP_ME_INIT = 72,
CP_NOP = 16,
+ CP_PREEMPT_ENABLE = 28,
+ CP_PREEMPT_TOKEN = 30,
CP_INDIRECT_BUFFER = 63,
CP_INDIRECT_BUFFER_PFD = 55,
CP_WAIT_FOR_IDLE = 38,
@@ -162,6 +168,7 @@ enum adreno_pm4_type3_packets {
CP_TEST_TWO_MEMS = 113,
CP_REG_WR_NO_CTXT = 120,
CP_RECORD_PFP_TIMESTAMP = 17,
+ CP_SET_SECURE_MODE = 102,
CP_WAIT_FOR_ME = 19,
CP_SET_DRAW_STATE = 67,
CP_DRAW_INDX_OFFSET = 56,
@@ -172,6 +179,26 @@ enum adreno_pm4_type3_packets {
CP_UNKNOWN_1A = 26,
CP_UNKNOWN_4E = 78,
CP_WIDE_REG_WRITE = 116,
+ CP_SCRATCH_TO_REG = 77,
+ CP_REG_TO_SCRATCH = 74,
+ CP_WAIT_MEM_WRITES = 18,
+ CP_COND_REG_EXEC = 71,
+ CP_MEM_TO_REG = 66,
+ CP_EXEC_CS = 51,
+ CP_PERFCOUNTER_ACTION = 80,
+ CP_SMMU_TABLE_UPDATE = 83,
+ CP_CONTEXT_REG_BUNCH = 92,
+ CP_YIELD_ENABLE = 28,
+ CP_SKIP_IB2_ENABLE_GLOBAL = 29,
+ CP_SKIP_IB2_ENABLE_LOCAL = 35,
+ CP_SET_SUBDRAW_SIZE = 53,
+ CP_SET_VISIBILITY_OVERRIDE = 100,
+ CP_PREEMPT_ENABLE_GLOBAL = 105,
+ CP_PREEMPT_ENABLE_LOCAL = 106,
+ CP_CONTEXT_SWITCH_YIELD = 107,
+ CP_SET_RENDER_MODE = 108,
+ CP_COMPUTE_CHECKPOINT = 110,
+ CP_MEM_TO_MEM = 115,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -190,6 +217,7 @@ enum adreno_state_block {
SB_VERT_SHADER = 4,
SB_GEOM_SHADER = 5,
SB_FRAG_SHADER = 6,
+ SB_COMPUTE_SHADER = 7,
};
enum adreno_state_type {
@@ -199,7 +227,11 @@ enum adreno_state_type {
enum adreno_state_src {
SS_DIRECT = 0,
+ SS_INVALID_ALL_IC = 2,
+ SS_INVALID_PART_IC = 3,
SS_INDIRECT = 4,
+ SS_INDIRECT_TCM = 5,
+ SS_INDIRECT_STM = 6,
};
enum a4xx_index_size {
@@ -227,7 +259,7 @@ static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val)
{
return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
}
-#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0x7fc00000
+#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0xffc00000
#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22
static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
{
@@ -379,7 +411,12 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel va
{
return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
}
-#define CP_DRAW_INDX_OFFSET_0_TESSELLATE 0x00000100
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000300
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
+}
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10
static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val)
@@ -499,5 +536,102 @@ static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val)
return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK;
}
+#define REG_CP_REG_TO_MEM_0 0x00000000
+#define CP_REG_TO_MEM_0_REG__MASK 0x0000ffff
+#define CP_REG_TO_MEM_0_REG__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_0_REG(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_0_CNT__MASK 0x3ff80000
+#define CP_REG_TO_MEM_0_CNT__SHIFT 19
+static inline uint32_t CP_REG_TO_MEM_0_CNT(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_0_64B 0x40000000
+#define CP_REG_TO_MEM_0_ACCUMULATE 0x80000000
+
+#define REG_CP_REG_TO_MEM_1 0x00000001
+#define CP_REG_TO_MEM_1_DEST__MASK 0xffffffff
+#define CP_REG_TO_MEM_1_DEST__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_0 0x00000000
+
+#define REG_CP_DISPATCH_COMPUTE_1 0x00000001
+#define CP_DISPATCH_COMPUTE_1_X__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_1_X__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_2 0x00000002
+#define CP_DISPATCH_COMPUTE_2_Y__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_2_Y__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_3 0x00000003
+#define CP_DISPATCH_COMPUTE_3_Z__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_3_Z__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_0 0x00000000
+
+#define REG_CP_SET_RENDER_MODE_1 0x00000001
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_2 0x00000002
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_3 0x00000003
+#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010
+
+#define REG_CP_SET_RENDER_MODE_4 0x00000004
+
+#define REG_CP_SET_RENDER_MODE_5 0x00000005
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_6 0x00000006
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_7 0x00000007
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK;
+}
+
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index b8520aadbc0c..fb9617ce572a 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1186,7 +1186,7 @@ static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl)
int dsi_ctrl_buffer_init(struct dsi_ctrl *dsi_ctrl)
{
int rc = 0;
- u32 iova = 0;
+ u64 iova = 0;
dsi_ctrl->tx_cmd_buf = msm_gem_new(dsi_ctrl->drm_dev,
SZ_4K,
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 5f5a3732cdf6..4cb4764e7492 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -89,7 +89,7 @@ int msm_dsi_manager_phy_enable(int id,
u32 *clk_pre, u32 *clk_post);
void msm_dsi_manager_phy_disable(int id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
-bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u64 iova, u32 len);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
@@ -143,7 +143,7 @@ int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg);
void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
- u32 iova, u32 len);
+ u64 iova, u32 len);
int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index b2b5f3dd1b4c..4958594d5266 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4c49868efcda..4580a6e3c877 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -836,7 +836,7 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
{
struct drm_device *dev = msm_host->dev;
int ret;
- u32 iova;
+ u64 iova;
mutex_lock(&dev->struct_mutex);
msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
@@ -974,7 +974,7 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
{
int ret;
- u32 iova;
+ uint64_t iova;
bool triggered;
ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
@@ -1750,11 +1750,12 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
return ret;
}
-void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u64 iova, u32 len)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
+ /* FIXME: Verify that the iova < 32 bits? */
+ dsi_write(msm_host, REG_DSI_DMA_BASE, lower_32_bits(iova));
dsi_write(msm_host, REG_DSI_DMA_LEN, len);
dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 0455ff75074a..2091b748abbb 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -774,7 +774,7 @@ restore_host0:
return ret;
}
-bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u64 iova, u32 len)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 80ec65e47468..2d999494cdea 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 7d7662e69e11..506434fac993 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index 90bf5ed46746..f1072c18c81e 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
new file mode 100644
index 000000000000..15e2d69827e7
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -0,0 +1,1182 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "sde-hdmi:[%s] " fmt, __func__
+
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+
+#include "sde_kms.h"
+#include "msm_drv.h"
+#include "sde_hdmi.h"
+
+static DEFINE_MUTEX(sde_hdmi_list_lock);
+static LIST_HEAD(sde_hdmi_list);
+
+static const struct of_device_id sde_hdmi_dt_match[] = {
+ {.compatible = "qcom,hdmi-display"},
+ {}
+};
+
+static ssize_t _sde_hdmi_debugfs_dump_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char *buf;
+ u32 len = 0;
+
+ if (!display)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(SZ_1K, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += snprintf(buf, SZ_4K, "name = %s\n", display->name);
+
+ if (copy_to_user(buff, buf, len)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+
+ kfree(buf);
+ return len;
+}
+
+
+static const struct file_operations dump_info_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_dump_info_read,
+};
+
+static int _sde_hdmi_debugfs_init(struct sde_hdmi *display)
+{
+ int rc = 0;
+ struct dentry *dir, *dump_file;
+
+ dir = debugfs_create_dir(display->name, NULL);
+ if (!dir) {
+ rc = -ENOMEM;
+ SDE_ERROR("[%s]debugfs create dir failed, rc = %d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ dump_file = debugfs_create_file("dump_info",
+ 0444,
+ dir,
+ display,
+ &dump_info_fops);
+ if (IS_ERR_OR_NULL(dump_file)) {
+ rc = PTR_ERR(dump_file);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ display->root = dir;
+ return rc;
+error_remove_dir:
+ debugfs_remove(dir);
+error:
+ return rc;
+}
+
+static void _sde_hdmi_debugfs_deinit(struct sde_hdmi *display)
+{
+ debugfs_remove(display->root);
+}
+
+static void _sde_hdmi_phy_reset(struct hdmi *hdmi)
+{
+ unsigned int val;
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+}
+
+static int _sde_hdmi_gpio_config(struct hdmi *hdmi, bool on)
+{
+ const struct hdmi_platform_config *config = hdmi->config;
+ int ret;
+
+ if (on) {
+ if (config->ddc_clk_gpio != -1) {
+ ret = gpio_request(config->ddc_clk_gpio,
+ "HDMI_DDC_CLK");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_CLK", config->ddc_clk_gpio,
+ ret);
+ goto error_ddc_clk_gpio;
+ }
+ gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
+ }
+
+ if (config->ddc_data_gpio != -1) {
+ ret = gpio_request(config->ddc_data_gpio,
+ "HDMI_DDC_DATA");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_DATA", config->ddc_data_gpio,
+ ret);
+ goto error_ddc_data_gpio;
+ }
+ gpio_set_value_cansleep(config->ddc_data_gpio, 1);
+ }
+
+ ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_HPD", config->hpd_gpio, ret);
+ goto error_hpd_gpio;
+ }
+ gpio_direction_output(config->hpd_gpio, 1);
+ if (config->hpd5v_gpio != -1) {
+ ret = gpio_request(config->hpd5v_gpio, "HDMI_HPD_5V");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_HPD_5V",
+ config->hpd5v_gpio,
+ ret);
+ goto error_hpd5v_gpio;
+ }
+ gpio_set_value_cansleep(config->hpd5v_gpio, 1);
+ }
+
+ if (config->mux_en_gpio != -1) {
+ ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_EN", config->mux_en_gpio,
+ ret);
+ goto error_en_gpio;
+ }
+ gpio_set_value_cansleep(config->mux_en_gpio, 1);
+ }
+
+ if (config->mux_sel_gpio != -1) {
+ ret = gpio_request(config->mux_sel_gpio,
+ "HDMI_MUX_SEL");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_SEL", config->mux_sel_gpio,
+ ret);
+ goto error_sel_gpio;
+ }
+ gpio_set_value_cansleep(config->mux_sel_gpio, 0);
+ }
+
+ if (config->mux_lpm_gpio != -1) {
+ ret = gpio_request(config->mux_lpm_gpio,
+ "HDMI_MUX_LPM");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_LPM",
+ config->mux_lpm_gpio, ret);
+ goto error_lpm_gpio;
+ }
+ gpio_set_value_cansleep(config->mux_lpm_gpio, 1);
+ }
+ SDE_DEBUG("gpio on");
+ } else {
+ if (config->ddc_clk_gpio != -1)
+ gpio_free(config->ddc_clk_gpio);
+
+ if (config->ddc_data_gpio != -1)
+ gpio_free(config->ddc_data_gpio);
+
+ gpio_free(config->hpd_gpio);
+
+ if (config->mux_en_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_en_gpio, 0);
+ gpio_free(config->mux_en_gpio);
+ }
+
+ if (config->mux_sel_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_sel_gpio, 1);
+ gpio_free(config->mux_sel_gpio);
+ }
+
+ if (config->mux_lpm_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_lpm_gpio, 0);
+ gpio_free(config->mux_lpm_gpio);
+ }
+ SDE_DEBUG("gpio off");
+ }
+
+ return 0;
+
+error_lpm_gpio:
+ if (config->mux_sel_gpio != -1)
+ gpio_free(config->mux_sel_gpio);
+error_sel_gpio:
+ if (config->mux_en_gpio != -1)
+ gpio_free(config->mux_en_gpio);
+error_en_gpio:
+ gpio_free(config->hpd5v_gpio);
+error_hpd5v_gpio:
+ gpio_free(config->hpd_gpio);
+error_hpd_gpio:
+ if (config->ddc_data_gpio != -1)
+ gpio_free(config->ddc_data_gpio);
+error_ddc_data_gpio:
+ if (config->ddc_clk_gpio != -1)
+ gpio_free(config->ddc_clk_gpio);
+error_ddc_clk_gpio:
+ return ret;
+}
+
+static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct device *dev = &hdmi->pdev->dev;
+ uint32_t hpd_ctrl;
+ int i, ret;
+ unsigned long flags;
+
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->hpd_regs[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
+ }
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret) {
+ SDE_ERROR("pinctrl state chg failed: %d\n", ret);
+ goto fail;
+ }
+
+ ret = _sde_hdmi_gpio_config(hdmi, true);
+ if (ret) {
+ SDE_ERROR("failed to configure GPIOs: %d\n", ret);
+ goto fail;
+ }
+
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ if (config->hpd_freq && config->hpd_freq[i]) {
+ ret = clk_set_rate(hdmi->hpd_clks[i],
+ config->hpd_freq[i]);
+ if (ret)
+ pr_warn("failed to set clk %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+
+ ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ goto fail;
+ }
+ }
+
+ sde_hdmi_set_mode(hdmi, false);
+ _sde_hdmi_phy_reset(hdmi);
+ sde_hdmi_set_mode(hdmi, true);
+
+ hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
+
+ /* set timeout to 4.1ms (max) for hardware debounce */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
+
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+
+ /* enable HPD events: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ HDMI_HPD_INT_CTRL_INT_CONNECT |
+ HDMI_HPD_INT_CTRL_INT_EN);
+
+ /* Toggle HPD circuit to trigger HPD sense */
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static void _sde_hdmi_hdp_disable(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct device *dev = &hdmi->pdev->dev;
+ int i, ret = 0;
+
+ /* Disable HPD interrupt */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
+
+ sde_hdmi_set_mode(hdmi, false);
+
+ for (i = 0; i < config->hpd_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->hpd_clks[i]);
+
+ ret = _sde_hdmi_gpio_config(hdmi, false);
+ if (ret)
+ pr_warn("failed to unconfigure GPIOs: %d\n", ret);
+
+ ret = pinctrl_pm_select_sleep_state(dev);
+ if (ret)
+ pr_warn("pinctrl state chg failed: %d\n", ret);
+
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->hpd_regs[i]);
+ if (ret)
+ pr_warn("failed to disable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ }
+}
+
+static void _sde_hdmi_hotplug_work(struct work_struct *work)
+{
+ struct sde_hdmi *sde_hdmi =
+ container_of(work, struct sde_hdmi, hpd_work);
+ struct drm_connector *connector;
+
+ if (!sde_hdmi || !sde_hdmi->ctrl.ctrl ||
+ !sde_hdmi->ctrl.ctrl->connector) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ sde_hdmi);
+ return;
+ }
+
+ connector = sde_hdmi->ctrl.ctrl->connector;
+ drm_helper_hpd_irq_event(connector->dev);
+}
+
+static void _sde_hdmi_connector_irq(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ uint32_t hpd_int_status, hpd_int_ctrl;
+
+ /* Process HPD: */
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ hpd_int_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL);
+
+ if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
+ (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
+ sde_hdmi->connected = !!(hpd_int_status &
+ HDMI_HPD_INT_STATUS_CABLE_DETECTED);
+ /* ack & disable (temporarily) HPD events: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ HDMI_HPD_INT_CTRL_INT_ACK);
+
+ DRM_DEBUG("status=%04x, ctrl=%04x", hpd_int_status,
+ hpd_int_ctrl);
+
+ /* detect disconnect if we are connected or visa versa: */
+ hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
+ if (!sde_hdmi->connected)
+ hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
+
+ queue_work(hdmi->workq, &sde_hdmi->hpd_work);
+ }
+}
+
+static irqreturn_t _sde_hdmi_irq(int irq, void *dev_id)
+{
+ struct sde_hdmi *sde_hdmi = dev_id;
+ struct hdmi *hdmi;
+
+ if (!sde_hdmi || !sde_hdmi->ctrl.ctrl) {
+ SDE_ERROR("sde_hdmi=%p or hdmi is NULL\n", sde_hdmi);
+ return IRQ_NONE;
+ }
+ hdmi = sde_hdmi->ctrl.ctrl;
+ /* Process HPD: */
+ _sde_hdmi_connector_irq(sde_hdmi);
+
+ /* Process DDC: */
+ hdmi_i2c_irq(hdmi->i2c);
+
+ /* Process HDCP: */
+ if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
+ hdmi_hdcp_ctrl_irq(hdmi->hdcp_ctrl);
+
+ /* TODO audio.. */
+
+ return IRQ_HANDLED;
+}
+
+void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
+{
+ uint32_t ctrl = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ if (power_on) {
+ ctrl |= HDMI_CTRL_ENABLE;
+ if (!hdmi->hdmi_mode) {
+ ctrl |= HDMI_CTRL_HDMI;
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ ctrl &= ~HDMI_CTRL_HDMI;
+ } else {
+ ctrl |= HDMI_CTRL_HDMI;
+ }
+ } else {
+ ctrl &= ~HDMI_CTRL_HDMI;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+ DRM_DEBUG("HDMI Core: %s, HDMI_CTRL=0x%08x\n",
+ power_on ? "Enable" : "Disable", ctrl);
+}
+
+int sde_hdmi_get_info(struct msm_display_info *info,
+ void *display)
+{
+ int rc = 0;
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct hdmi *hdmi = hdmi_display->ctrl.ctrl;
+
+ if (!display || !info) {
+ SDE_ERROR("display=%p or info=%p is NULL\n", display, info);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_display->display_lock);
+
+ info->intf_type = DRM_MODE_CONNECTOR_HDMIA;
+ info->num_of_h_tiles = 1;
+ info->h_tile_instance[0] = 0;
+ if (hdmi_display->non_pluggable) {
+ info->capabilities = MSM_DISPLAY_CAP_VID_MODE;
+ hdmi_display->connected = true;
+ hdmi->hdmi_mode = true;
+ } else {
+ info->capabilities = MSM_DISPLAY_CAP_HOT_PLUG |
+ MSM_DISPLAY_CAP_EDID | MSM_DISPLAY_CAP_VID_MODE;
+ }
+ info->is_connected = hdmi_display->connected;
+ info->max_width = 1920;
+ info->max_height = 1080;
+ info->compression = MSM_DISPLAY_COMPRESS_NONE;
+
+ mutex_unlock(&hdmi_display->display_lock);
+ return rc;
+}
+
+u32 sde_hdmi_get_num_of_displays(void)
+{
+ u32 count = 0;
+ struct sde_hdmi *display;
+
+ mutex_lock(&sde_hdmi_list_lock);
+
+ list_for_each_entry(display, &sde_hdmi_list, list)
+ count++;
+
+ mutex_unlock(&sde_hdmi_list_lock);
+ return count;
+}
+
+int sde_hdmi_get_displays(void **display_array, u32 max_display_count)
+{
+ struct sde_hdmi *display;
+ int i = 0;
+
+ SDE_DEBUG("\n");
+
+ if (!display_array || !max_display_count) {
+ if (!display_array)
+ SDE_ERROR("invalid param\n");
+ return 0;
+ }
+
+ mutex_lock(&sde_hdmi_list_lock);
+ list_for_each_entry(display, &sde_hdmi_list, list) {
+ if (i >= max_display_count)
+ break;
+ display_array[i++] = display;
+ }
+ mutex_unlock(&sde_hdmi_list_lock);
+
+ return i;
+}
+
+int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+ void *display)
+{
+ struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)display;
+
+ if (!sde_hdmi || !sde_hdmi->ctrl.ctrl) {
+ SDE_ERROR("sde_hdmi=%p or hdmi is NULL\n", sde_hdmi);
+ return -EINVAL;
+ }
+
+ _sde_hdmi_hdp_disable(sde_hdmi);
+
+ return 0;
+}
+
+int sde_hdmi_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ int rc = 0;
+ struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)display;
+ struct hdmi *hdmi;
+
+ if (!sde_hdmi) {
+ SDE_ERROR("sde_hdmi is NULL\n");
+ return -EINVAL;
+ }
+
+ hdmi = sde_hdmi->ctrl.ctrl;
+ if (!hdmi) {
+ SDE_ERROR("hdmi is NULL\n");
+ return -EINVAL;
+ }
+
+ if (info)
+ sde_kms_info_add_keystr(info,
+ "DISPLAY_TYPE",
+ sde_hdmi->display_type);
+
+ hdmi->connector = connector;
+ INIT_WORK(&sde_hdmi->hpd_work, _sde_hdmi_hotplug_work);
+
+ /* Enable HPD detection */
+ rc = _sde_hdmi_hpd_enable(sde_hdmi);
+ if (rc)
+ SDE_ERROR("failed to enable HPD: %d\n", rc);
+
+ return rc;
+}
+
+enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display)
+{
+ enum drm_connector_status status = connector_status_unknown;
+ struct msm_display_info info;
+ int rc;
+
+ if (!connector || !display) {
+ SDE_ERROR("connector=%p or display=%p is NULL\n",
+ connector, display);
+ return status;
+ }
+
+ SDE_DEBUG("\n");
+
+ /* get display dsi_info */
+ memset(&info, 0x0, sizeof(info));
+ rc = sde_hdmi_get_info(&info, display);
+ if (rc) {
+ SDE_ERROR("failed to get display info, rc=%d\n", rc);
+ return connector_status_disconnected;
+ }
+
+ if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+ status = (info.is_connected ? connector_status_connected :
+ connector_status_disconnected);
+ else
+ status = connector_status_connected;
+
+ connector->display_info.width_mm = info.width_mm;
+ connector->display_info.height_mm = info.height_mm;
+
+ return status;
+}
+
+int sde_hdmi_connector_get_modes(struct drm_connector *connector, void *display)
+{
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct hdmi *hdmi;
+ struct edid *edid;
+ struct drm_display_mode *mode, *m;
+ uint32_t hdmi_ctrl;
+ int ret = 0;
+
+ if (!connector || !display) {
+ SDE_ERROR("connector=%p or display=%p is NULL\n",
+ connector, display);
+ return 0;
+ }
+
+ SDE_DEBUG("\n");
+
+ hdmi = hdmi_display->ctrl.ctrl;
+ if (hdmi_display->non_pluggable) {
+ list_for_each_entry(mode, &hdmi_display->mode_list, head) {
+ m = drm_mode_duplicate(connector->dev, mode);
+ if (!m) {
+ SDE_ERROR("failed to add hdmi mode %dx%d\n",
+ mode->hdisplay, mode->vdisplay);
+ break;
+ }
+ drm_mode_probed_add(connector, m);
+ }
+ ret = hdmi_display->num_of_modes;
+ } else {
+ /* Read EDID */
+ hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
+
+ edid = drm_get_edid(connector, hdmi->i2c);
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+
+ hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ if (edid) {
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+ }
+
+ return ret;
+}
+
+enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display)
+{
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct hdmi *hdmi;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+ long actual, requested;
+
+ if (!connector || !display || !mode) {
+ SDE_ERROR("connector=%p or display=%p or mode=%p is NULL\n",
+ connector, display, mode);
+ return 0;
+ }
+
+ SDE_DEBUG("\n");
+
+ hdmi = hdmi_display->ctrl.ctrl;
+ priv = connector->dev->dev_private;
+ kms = priv->kms;
+ requested = 1000 * mode->clock;
+ actual = kms->funcs->round_pixclk(kms,
+ requested, hdmi->encoder);
+
+ SDE_DEBUG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+int sde_hdmi_dev_init(struct sde_hdmi *display)
+{
+ if (!display) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int sde_hdmi_dev_deinit(struct sde_hdmi *display)
+{
+ if (!display) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sde_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ int rc = 0;
+ struct sde_hdmi_ctrl *display_ctrl = NULL;
+ struct sde_hdmi *display = NULL;
+ struct drm_device *drm = NULL;
+ struct msm_drm_private *priv = NULL;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ SDE_ERROR("E\n");
+ if (!dev || !pdev || !master) {
+ pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
+ dev, pdev, master);
+ return -EINVAL;
+ }
+
+ drm = dev_get_drvdata(master);
+ display = platform_get_drvdata(pdev);
+ if (!drm || !display) {
+ pr_err("invalid param(s), drm %pK, display %pK\n",
+ drm, display);
+ return -EINVAL;
+ }
+
+ priv = drm->dev_private;
+ mutex_lock(&display->display_lock);
+
+ rc = _sde_hdmi_debugfs_init(display);
+ if (rc) {
+ SDE_ERROR("[%s]Debugfs init failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ display_ctrl = &display->ctrl;
+ display_ctrl->ctrl = priv->hdmi;
+ SDE_ERROR("display_ctrl->ctrl=%p\n", display_ctrl->ctrl);
+ display->drm_dev = drm;
+
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+
+static void sde_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sde_hdmi *display = NULL;
+
+ if (!dev) {
+ SDE_ERROR("invalid params\n");
+ return;
+ }
+
+ display = platform_get_drvdata(to_platform_device(dev));
+ if (!display) {
+ SDE_ERROR("Invalid display device\n");
+ return;
+ }
+ mutex_lock(&display->display_lock);
+ (void)_sde_hdmi_debugfs_deinit(display);
+ display->drm_dev = NULL;
+ mutex_unlock(&display->display_lock);
+}
+
+static const struct component_ops sde_hdmi_comp_ops = {
+ .bind = sde_hdmi_bind,
+ .unbind = sde_hdmi_unbind,
+};
+
+static int _sde_hdmi_parse_dt_modes(struct device_node *np,
+ struct list_head *head,
+ u32 *num_of_modes)
+{
+ int rc = 0;
+ struct drm_display_mode *mode;
+ u32 mode_count = 0;
+ struct device_node *node = NULL;
+ struct device_node *root_node = NULL;
+ const char *name;
+ u32 h_front_porch, h_pulse_width, h_back_porch;
+ u32 v_front_porch, v_pulse_width, v_back_porch;
+ bool h_active_high, v_active_high;
+ u32 flags = 0;
+
+ root_node = of_get_child_by_name(np, "qcom,customize-modes");
+ if (!root_node) {
+ root_node = of_parse_phandle(np, "qcom,customize-modes", 0);
+ if (!root_node) {
+ DRM_INFO("No entry present for qcom,customize-modes");
+ goto end;
+ }
+ }
+ for_each_child_of_node(root_node, node) {
+ rc = 0;
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode) {
+ SDE_ERROR("Out of memory\n");
+ rc = -ENOMEM;
+ continue;
+ }
+
+ rc = of_property_read_string(node, "qcom,mode-name",
+ &name);
+ if (rc) {
+ SDE_ERROR("failed to read qcom,mode-name, rc=%d\n", rc);
+ goto fail;
+ }
+ strlcpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+ rc = of_property_read_u32(node, "qcom,mode-h-active",
+ &mode->hdisplay);
+ if (rc) {
+ SDE_ERROR("failed to read h-active, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-front-porch",
+ &h_front_porch);
+ if (rc) {
+ SDE_ERROR("failed to read h-front-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-pulse-width",
+ &h_pulse_width);
+ if (rc) {
+ SDE_ERROR("failed to read h-pulse-width, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-back-porch",
+ &h_back_porch);
+ if (rc) {
+ SDE_ERROR("failed to read h-back-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ h_active_high = of_property_read_bool(node,
+ "qcom,mode-h-active-high");
+
+ rc = of_property_read_u32(node, "qcom,mode-v-active",
+ &mode->vdisplay);
+ if (rc) {
+ SDE_ERROR("failed to read v-active, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-front-porch",
+ &v_front_porch);
+ if (rc) {
+ SDE_ERROR("failed to read v-front-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-pulse-width",
+ &v_pulse_width);
+ if (rc) {
+ SDE_ERROR("failed to read v-pulse-width, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-back-porch",
+ &v_back_porch);
+ if (rc) {
+ SDE_ERROR("failed to read v-back-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ v_active_high = of_property_read_bool(node,
+ "qcom,mode-v-active-high");
+
+ rc = of_property_read_u32(node, "qcom,mode-refersh-rate",
+ &mode->vrefresh);
+ if (rc) {
+ SDE_ERROR("failed to read refersh-rate, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-clock-in-khz",
+ &mode->clock);
+ if (rc) {
+ SDE_ERROR("failed to read clock, rc=%d\n", rc);
+ goto fail;
+ }
+
+ mode->hsync_start = mode->hdisplay + h_front_porch;
+ mode->hsync_end = mode->hsync_start + h_pulse_width;
+ mode->htotal = mode->hsync_end + h_back_porch;
+ mode->vsync_start = mode->vdisplay + v_front_porch;
+ mode->vsync_end = mode->vsync_start + v_pulse_width;
+ mode->vtotal = mode->vsync_end + v_back_porch;
+ if (h_active_high)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (v_active_high)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ mode->flags = flags;
+
+ if (!rc) {
+ mode_count++;
+ list_add_tail(&mode->head, head);
+ }
+
+ SDE_DEBUG("mode[%d] h[%d,%d,%d,%d] v[%d,%d,%d,%d] %d %xH %d\n",
+ mode_count - 1, mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal, mode->vdisplay,
+ mode->vsync_start, mode->vsync_end, mode->vtotal,
+ mode->vrefresh, mode->flags, mode->clock);
+fail:
+ if (rc) {
+ kfree(mode);
+ continue;
+ }
+ }
+
+ if (num_of_modes)
+ *num_of_modes = mode_count;
+
+end:
+ return rc;
+}
+
+static int _sde_hdmi_parse_dt(struct device_node *node,
+ struct sde_hdmi *display)
+{
+ int rc = 0;
+
+ display->name = of_get_property(node, "label", NULL);
+
+ display->display_type = of_get_property(node,
+ "qcom,display-type", NULL);
+ if (!display->display_type)
+ display->display_type = "unknown";
+
+ display->non_pluggable = of_property_read_bool(node,
+ "qcom,non-pluggable");
+
+ rc = _sde_hdmi_parse_dt_modes(node, &display->mode_list,
+ &display->num_of_modes);
+ if (rc)
+ SDE_ERROR("parse_dt_modes failed rc=%d\n", rc);
+
+ return rc;
+}
+
+static int _sde_hdmi_dev_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct sde_hdmi *display;
+ int ret = 0;
+
+
+ SDE_DEBUG("\n");
+
+ if (!pdev || !pdev->dev.of_node) {
+ SDE_ERROR("pdev not found\n");
+ return -ENODEV;
+ }
+
+ display = devm_kzalloc(&pdev->dev, sizeof(*display), GFP_KERNEL);
+ if (!display)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&display->mode_list);
+ rc = _sde_hdmi_parse_dt(pdev->dev.of_node, display);
+ if (rc)
+ SDE_ERROR("parse dt failed, rc=%d\n", rc);
+
+ mutex_init(&display->display_lock);
+ display->pdev = pdev;
+ platform_set_drvdata(pdev, display);
+ mutex_lock(&sde_hdmi_list_lock);
+ list_add(&display->list, &sde_hdmi_list);
+ mutex_unlock(&sde_hdmi_list_lock);
+ if (!sde_hdmi_dev_init(display)) {
+ ret = component_add(&pdev->dev, &sde_hdmi_comp_ops);
+ if (ret) {
+ pr_err("component add failed\n");
+ goto out;
+ }
+ }
+ return 0;
+
+out:
+ if (rc)
+ devm_kfree(&pdev->dev, display);
+ return rc;
+}
+
+static int _sde_hdmi_dev_remove(struct platform_device *pdev)
+{
+ struct sde_hdmi *display;
+ struct sde_hdmi *pos, *tmp;
+ struct drm_display_mode *mode, *n;
+
+ if (!pdev) {
+ SDE_ERROR("Invalid device\n");
+ return -EINVAL;
+ }
+
+ display = platform_get_drvdata(pdev);
+
+ mutex_lock(&sde_hdmi_list_lock);
+ list_for_each_entry_safe(pos, tmp, &sde_hdmi_list, list) {
+ if (pos == display) {
+ list_del(&display->list);
+ break;
+ }
+ }
+ mutex_unlock(&sde_hdmi_list_lock);
+
+ list_for_each_entry_safe(mode, n, &display->mode_list, head) {
+ list_del(&mode->head);
+ kfree(mode);
+ }
+
+ platform_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, display);
+ return 0;
+}
+
+static struct platform_driver sde_hdmi_driver = {
+ .probe = _sde_hdmi_dev_probe,
+ .remove = _sde_hdmi_dev_remove,
+ .driver = {
+ .name = "sde_hdmi",
+ .of_match_table = sde_hdmi_dt_match,
+ },
+};
+
+int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
+{
+ int rc = 0;
+ struct msm_drm_private *priv = NULL;
+ struct hdmi *hdmi;
+ struct platform_device *pdev;
+
+ DBG("");
+ if (!display || !display->drm_dev || !enc) {
+ SDE_ERROR("display=%p or enc=%p or drm_dev is NULL\n",
+ display, enc);
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+ priv = display->drm_dev->dev_private;
+ hdmi = display->ctrl.ctrl;
+
+ if (!priv || !hdmi) {
+ SDE_ERROR("priv=%p or hdmi=%p is NULL\n",
+ priv, hdmi);
+ mutex_unlock(&display->display_lock);
+ return -EINVAL;
+ }
+
+ pdev = hdmi->pdev;
+ hdmi->dev = display->drm_dev;
+ hdmi->encoder = enc;
+
+ hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
+
+ hdmi->bridge = sde_hdmi_bridge_init(hdmi);
+ if (IS_ERR(hdmi->bridge)) {
+ rc = PTR_ERR(hdmi->bridge);
+ SDE_ERROR("failed to create HDMI bridge: %d\n", rc);
+ hdmi->bridge = NULL;
+ goto error;
+ }
+ hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (hdmi->irq < 0) {
+ rc = hdmi->irq;
+ SDE_ERROR("failed to get irq: %d\n", rc);
+ goto error;
+ }
+
+ rc = devm_request_irq(&pdev->dev, hdmi->irq,
+ _sde_hdmi_irq, IRQF_TRIGGER_HIGH,
+ "sde_hdmi_isr", display);
+ if (rc < 0) {
+ SDE_ERROR("failed to request IRQ%u: %d\n",
+ hdmi->irq, rc);
+ goto error;
+ }
+
+ enc->bridge = hdmi->bridge;
+ priv->bridges[priv->num_bridges++] = hdmi->bridge;
+
+ mutex_unlock(&display->display_lock);
+ return 0;
+
+error:
+ /* bridge is normally destroyed by drm: */
+ if (hdmi->bridge) {
+ hdmi_bridge_destroy(hdmi->bridge);
+ hdmi->bridge = NULL;
+ }
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int sde_hdmi_drm_deinit(struct sde_hdmi *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int __init sde_hdmi_register(void)
+{
+ int rc = 0;
+
+ DBG("");
+ rc = platform_driver_register(&sde_hdmi_driver);
+ return rc;
+}
+
+static void __exit sde_hdmi_unregister(void)
+{
+ platform_driver_unregister(&sde_hdmi_driver);
+}
+
+module_init(sde_hdmi_register);
+module_exit(sde_hdmi_unregister);
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
new file mode 100644
index 000000000000..1c13d9f875f2
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_HDMI_H_
+#define _SDE_HDMI_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/msm_ext_display.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "hdmi.h"
+
+/**
+ * struct sde_hdmi_info - defines hdmi display properties
+ * @display_type: Display type as defined by device tree.
+ * @is_hot_pluggable: Can panel be hot plugged.
+ * @is_connected: Is panel connected.
+ * @is_edid_supported: Does panel support reading EDID information.
+ * @width_mm: Physical width of panel in millimeters.
+ * @height_mm: Physical height of panel in millimeters.
+ */
+struct sde_hdmi_info {
+ const char *display_type;
+
+ /* HPD */
+ bool is_hot_pluggable;
+ bool is_connected;
+ bool is_edid_supported;
+
+ /* Physical properties */
+ u32 width_mm;
+ u32 height_mm;
+};
+
+/**
+ * struct sde_hdmi_ctrl - hdmi ctrl/phy information for the display
+ * @ctrl: Handle to the HDMI controller device.
+ * @ctrl_of_node: pHandle to the HDMI controller device.
+ * @hdmi_ctrl_idx: HDMI controller instance id.
+ */
+struct sde_hdmi_ctrl {
+ /* controller info */
+ struct hdmi *ctrl;
+ struct device_node *ctrl_of_node;
+ u32 hdmi_ctrl_idx;
+};
+
+/**
+ * struct sde_hdmi - hdmi display information
+ * @pdev: Pointer to platform device.
+ * @drm_dev: DRM device associated with the display.
+ * @name: Name of the display.
+ * @display_type: Display type as defined in device tree.
+ * @list: List pointer.
+ * @display_lock: Mutex for sde_hdmi interface.
+ * @ctrl: Controller information for HDMI display.
+ * @non_pluggable: If HDMI display is non pluggable
+ * @num_of_modes: Number of modes supported by display if non pluggable.
+ * @mode_list: Mode list if non pluggable.
+ * @connected: If HDMI display is connected.
+ * @is_tpg_enabled: TPG state.
+ * @hpd_work: HPD work structure.
+ * @root: Debug fs root entry.
+ */
+struct sde_hdmi {
+ struct platform_device *pdev;
+ struct drm_device *drm_dev;
+
+ const char *name;
+ const char *display_type;
+ struct list_head list;
+ struct mutex display_lock;
+
+ struct sde_hdmi_ctrl ctrl;
+
+ bool non_pluggable;
+ u32 num_of_modes;
+ struct list_head mode_list;
+ bool connected;
+ bool is_tpg_enabled;
+
+ struct work_struct hpd_work;
+
+ /* DEBUG FS */
+ struct dentry *root;
+};
+
+#ifdef CONFIG_DRM_SDE_HDMI
+/**
+ * sde_hdmi_get_num_of_displays() - returns number of display devices
+ * supported.
+ *
+ * Return: number of displays.
+ */
+u32 sde_hdmi_get_num_of_displays(void);
+
+/**
+ * sde_hdmi_get_displays() - returns the display list that's available.
+ * @display_array: Pointer to display list
+ * @max_display_count: Number of maximum displays in the list
+ *
+ * Return: number of available displays.
+ */
+int sde_hdmi_get_displays(void **display_array, u32 max_display_count);
+
+/**
+ * sde_hdmi_connector_pre_deinit()- perform additional deinitialization steps
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+ void *display);
+
+/**
+ * sde_hdmi_connector_post_init()- perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to sde connector info structure
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+int sde_hdmi_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display);
+
+/**
+ * sde_hdmi_connector_detect()- determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display);
+
+/**
+ * sde_hdmi_connector_get_modes - add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+
+ * Returns: Number of modes added
+ */
+int sde_hdmi_connector_get_modes(struct drm_connector *connector,
+ void *display);
+
+/**
+ * sde_hdmi_mode_valid - determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ *
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display);
+
+/**
+ * sde_hdmi_dev_init() - Initializes the display device
+ * @display: Handle to the display.
+ *
+ * Initialization will acquire references to the resources required for the
+ * display hardware to function.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_dev_init(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_dev_deinit() - Desinitializes the display device
+ * @display: Handle to the display.
+ *
+ * All the resources acquired during device init will be released.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_dev_deinit(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_drm_init() - initializes DRM objects for the display device.
+ * @display: Handle to the display.
+ * @encoder: Pointer to the encoder object which is connected to the
+ * display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_drm_init(struct sde_hdmi *display,
+ struct drm_encoder *enc);
+
+/**
+ * sde_hdmi_drm_deinit() - destroys DRM objects assosciated with the display
+ * @display: Handle to the display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_drm_deinit(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_get_info() - returns the display properties
+ * @display: Handle to the display.
+ * @info: Pointer to the structure where info is stored.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_get_info(struct msm_display_info *info,
+ void *display);
+
+/**
+ * sde_hdmi_bridge_init() - init sde hdmi bridge
+ * @hdmi: Handle to the hdmi.
+ *
+ * Return: struct drm_bridge *.
+ */
+struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi);
+
+/**
+ * sde_hdmi_set_mode() - Set HDMI mode API.
+ * @hdmi: Handle to the hdmi.
+ * @power_on: Power on/off request.
+ *
+ * Return: void.
+ */
+void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on);
+
+/**
+ * sde_hdmi_audio_on() - enable hdmi audio.
+ * @hdmi: Handle to the hdmi.
+ * @params: audio setup parameters from codec.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_audio_on(struct hdmi *hdmi,
+ struct msm_ext_disp_audio_setup_params *params);
+
+/**
+ * sde_hdmi_audio_off() - disable hdmi audio.
+ * @hdmi: Handle to the hdmi.
+ *
+ * Return: void.
+ */
+void sde_hdmi_audio_off(struct hdmi *hdmi);
+
+/**
+ * sde_hdmi_config_avmute() - mute hdmi.
+ * @hdmi: Handle to the hdmi.
+ * @set: enable/disable avmute.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_config_avmute(struct hdmi *hdmi, bool set);
+#else /*#ifdef CONFIG_DRM_SDE_HDMI*/
+
+static inline u32 sde_hdmi_get_num_of_displays(void)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_get_displays(void **display_array,
+ u32 max_display_count)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+ void *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ return 0;
+}
+
+static inline enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display)
+{
+ return connector_status_disconnected;
+}
+
+static inline int sde_hdmi_connector_get_modes(struct drm_connector *connector,
+ void *display)
+{
+ return 0;
+}
+
+static inline enum drm_mode_status sde_hdmi_mode_valid(
+ struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display)
+{
+ return MODE_OK;
+}
+
+static inline int sde_hdmi_dev_init(struct sde_hdmi *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_dev_deinit(struct sde_hdmi *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_drm_init(struct sde_hdmi *display,
+ struct drm_encoder *enc)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_drm_deinit(struct sde_hdmi *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_get_info(struct msm_display_info *info,
+ void *display)
+{
+ return 0;
+}
+#endif /*#else of CONFIG_DRM_SDE_HDMI*/
+#endif /* _SDE_HDMI_H_ */
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c
new file mode 100644
index 000000000000..13ea49cfa42d
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c
@@ -0,0 +1,393 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include <linux/switch.h>
+#include <linux/gcd.h>
+
+#include "drm_edid.h"
+#include "sde_kms.h"
+#include "sde_hdmi.h"
+#include "sde_hdmi_regs.h"
+#include "hdmi.h"
+
+#define HDMI_AUDIO_INFO_FRAME_PACKET_HEADER 0x84
+#define HDMI_AUDIO_INFO_FRAME_PACKET_VERSION 0x1
+#define HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH 0x0A
+
+#define HDMI_KHZ_TO_HZ 1000
+#define HDMI_MHZ_TO_HZ 1000000
+#define HDMI_ACR_N_MULTIPLIER 128
+#define DEFAULT_AUDIO_SAMPLE_RATE_HZ 48000
+
+/* Supported HDMI Audio channels */
+enum hdmi_audio_channels {
+ AUDIO_CHANNEL_2 = 2,
+ AUDIO_CHANNEL_3,
+ AUDIO_CHANNEL_4,
+ AUDIO_CHANNEL_5,
+ AUDIO_CHANNEL_6,
+ AUDIO_CHANNEL_7,
+ AUDIO_CHANNEL_8,
+};
+
+/* parameters for clock regeneration */
+struct hdmi_audio_acr {
+ u32 n;
+ u32 cts;
+};
+
+enum hdmi_audio_sample_rates {
+ AUDIO_SAMPLE_RATE_32KHZ,
+ AUDIO_SAMPLE_RATE_44_1KHZ,
+ AUDIO_SAMPLE_RATE_48KHZ,
+ AUDIO_SAMPLE_RATE_88_2KHZ,
+ AUDIO_SAMPLE_RATE_96KHZ,
+ AUDIO_SAMPLE_RATE_176_4KHZ,
+ AUDIO_SAMPLE_RATE_192KHZ,
+ AUDIO_SAMPLE_RATE_MAX
+};
+
+struct sde_hdmi_audio {
+ struct hdmi *hdmi;
+ struct msm_ext_disp_audio_setup_params params;
+ u32 pclk;
+};
+
+static void _sde_hdmi_audio_get_audio_sample_rate(u32 *sample_rate_hz)
+{
+ u32 rate = *sample_rate_hz;
+
+ switch (rate) {
+ case 32000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_32KHZ;
+ break;
+ case 44100:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_44_1KHZ;
+ break;
+ case 48000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_48KHZ;
+ break;
+ case 88200:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_88_2KHZ;
+ break;
+ case 96000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_96KHZ;
+ break;
+ case 176400:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_176_4KHZ;
+ break;
+ case 192000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_192KHZ;
+ break;
+ default:
+ SDE_ERROR("%d unchanged\n", rate);
+ break;
+ }
+}
+
+static void _sde_hdmi_audio_get_acr_param(u32 pclk, u32 fs,
+ struct hdmi_audio_acr *acr)
+{
+ u32 div, mul;
+
+ if (!acr) {
+ SDE_ERROR("invalid data\n");
+ return;
+ }
+
+ /*
+ * as per HDMI specification, N/CTS = (128*fs)/pclk.
+ * get the ratio using this formula.
+ */
+ acr->n = HDMI_ACR_N_MULTIPLIER * fs;
+ acr->cts = pclk;
+
+ /* get the greatest common divisor for the ratio */
+ div = gcd(acr->n, acr->cts);
+
+ /* get the n and cts values wrt N/CTS formula */
+ acr->n /= div;
+ acr->cts /= div;
+
+ /*
+ * as per HDMI specification, 300 <= 128*fs/N <= 1500
+ * with a target of 128*fs/N = 1000. To get closest
+ * value without truncating fractional values, find
+ * the corresponding multiplier
+ */
+ mul = ((HDMI_ACR_N_MULTIPLIER * fs / HDMI_KHZ_TO_HZ)
+ + (acr->n - 1)) / acr->n;
+
+ acr->n *= mul;
+ acr->cts *= mul;
+}
+
+static void _sde_hdmi_audio_acr_enable(struct sde_hdmi_audio *audio)
+{
+ struct hdmi_audio_acr acr;
+ struct msm_ext_disp_audio_setup_params *params;
+ u32 pclk, layout, multiplier = 1, sample_rate;
+ u32 acr_pkt_ctl, aud_pkt_ctl2, acr_reg_cts, acr_reg_n;
+ struct hdmi *hdmi;
+
+ hdmi = audio->hdmi;
+ params = &audio->params;
+ pclk = audio->pclk;
+ sample_rate = params->sample_rate_hz;
+
+ _sde_hdmi_audio_get_acr_param(pclk, sample_rate, &acr);
+ _sde_hdmi_audio_get_audio_sample_rate(&sample_rate);
+
+ layout = (params->num_of_channels == AUDIO_CHANNEL_2) ? 0 : 1;
+
+ SDE_DEBUG("n=%u, cts=%u, layout=%u\n", acr.n, acr.cts, layout);
+
+ /* AUDIO_PRIORITY | SOURCE */
+ acr_pkt_ctl = BIT(31) | BIT(8);
+
+ switch (sample_rate) {
+ case AUDIO_SAMPLE_RATE_44_1KHZ:
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ case AUDIO_SAMPLE_RATE_48KHZ:
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_192KHZ:
+ multiplier = 4;
+ acr.n >>= 2;
+
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_176_4KHZ:
+ multiplier = 4;
+ acr.n >>= 2;
+
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ case AUDIO_SAMPLE_RATE_96KHZ:
+ multiplier = 2;
+ acr.n >>= 1;
+
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_88_2KHZ:
+ multiplier = 2;
+ acr.n >>= 1;
+
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ default:
+ multiplier = 1;
+
+ acr_pkt_ctl |= 0x1 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_32_0;
+ acr_reg_n = HDMI_ACR_32_1;
+ break;
+ }
+
+ aud_pkt_ctl2 = BIT(0) | (layout << 1);
+
+ /* N_MULTIPLE(multiplier) */
+ acr_pkt_ctl &= ~(7 << 16);
+ acr_pkt_ctl |= (multiplier & 0x7) << 16;
+
+ /* SEND | CONT */
+ acr_pkt_ctl |= BIT(0) | BIT(1);
+
+ hdmi_write(hdmi, acr_reg_cts, acr.cts);
+ hdmi_write(hdmi, acr_reg_n, acr.n);
+ hdmi_write(hdmi, HDMI_ACR_PKT_CTRL, acr_pkt_ctl);
+ hdmi_write(hdmi, HDMI_AUDIO_PKT_CTRL2, aud_pkt_ctl2);
+}
+
+static void _sde_hdmi_audio_acr_setup(struct sde_hdmi_audio *audio, bool on)
+{
+ if (on)
+ _sde_hdmi_audio_acr_enable(audio);
+ else
+ hdmi_write(audio->hdmi, HDMI_ACR_PKT_CTRL, 0);
+}
+
+static void _sde_hdmi_audio_infoframe_setup(struct sde_hdmi_audio *audio,
+ bool enabled)
+{
+ struct hdmi *hdmi = audio->hdmi;
+ u32 channels, channel_allocation, level_shift, down_mix, layout;
+ u32 hdmi_debug_reg = 0, audio_info_0_reg = 0, audio_info_1_reg = 0;
+ u32 audio_info_ctrl_reg, aud_pck_ctrl_2_reg;
+ u32 check_sum, sample_present;
+
+ audio_info_ctrl_reg = hdmi_read(hdmi, HDMI_INFOFRAME_CTRL0);
+ audio_info_ctrl_reg &= ~0xF0;
+
+ if (!enabled)
+ goto end;
+
+ channels = audio->params.num_of_channels - 1;
+ channel_allocation = audio->params.channel_allocation;
+ level_shift = audio->params.level_shift;
+ down_mix = audio->params.down_mix;
+ sample_present = audio->params.sample_present;
+
+ layout = (audio->params.num_of_channels == AUDIO_CHANNEL_2) ? 0 : 1;
+ aud_pck_ctrl_2_reg = BIT(0) | (layout << 1);
+ hdmi_write(hdmi, HDMI_AUDIO_PKT_CTRL2, aud_pck_ctrl_2_reg);
+
+ audio_info_1_reg |= channel_allocation & 0xFF;
+ audio_info_1_reg |= ((level_shift & 0xF) << 11);
+ audio_info_1_reg |= ((down_mix & 0x1) << 15);
+
+ check_sum = 0;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_HEADER;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_VERSION;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH;
+ check_sum += channels;
+ check_sum += channel_allocation;
+ check_sum += (level_shift & 0xF) << 3 | (down_mix & 0x1) << 7;
+ check_sum &= 0xFF;
+ check_sum = (u8) (256 - check_sum);
+
+ audio_info_0_reg |= check_sum & 0xFF;
+ audio_info_0_reg |= ((channels & 0x7) << 8);
+
+ /* Enable Audio InfoFrame Transmission */
+ audio_info_ctrl_reg |= 0xF0;
+
+ if (layout) {
+ /* Set the Layout bit */
+ hdmi_debug_reg |= BIT(4);
+
+ /* Set the Sample Present bits */
+ hdmi_debug_reg |= sample_present & 0xF;
+ }
+end:
+ hdmi_write(hdmi, HDMI_DEBUG, hdmi_debug_reg);
+ hdmi_write(hdmi, HDMI_AUDIO_INFO0, audio_info_0_reg);
+ hdmi_write(hdmi, HDMI_AUDIO_INFO1, audio_info_1_reg);
+ hdmi_write(hdmi, HDMI_INFOFRAME_CTRL0, audio_info_ctrl_reg);
+}
+
+int sde_hdmi_audio_on(struct hdmi *hdmi,
+ struct msm_ext_disp_audio_setup_params *params)
+{
+ struct sde_hdmi_audio audio;
+ int rc = 0;
+
+ if (!hdmi) {
+ SDE_ERROR("invalid HDMI Ctrl\n");
+ rc = -ENODEV;
+ goto end;
+ }
+
+ audio.pclk = hdmi->pixclock;
+ audio.params = *params;
+ audio.hdmi = hdmi;
+
+ if (!audio.params.num_of_channels) {
+ audio.params.sample_rate_hz = DEFAULT_AUDIO_SAMPLE_RATE_HZ;
+ audio.params.num_of_channels = AUDIO_CHANNEL_2;
+ }
+
+ _sde_hdmi_audio_acr_setup(&audio, true);
+ _sde_hdmi_audio_infoframe_setup(&audio, true);
+
+ SDE_DEBUG("HDMI Audio: Enabled\n");
+end:
+ return rc;
+}
+
+void sde_hdmi_audio_off(struct hdmi *hdmi)
+{
+ struct sde_hdmi_audio audio;
+ int rc = 0;
+
+ if (!hdmi) {
+ SDE_ERROR("invalid HDMI Ctrl\n");
+ rc = -ENODEV;
+ return;
+ }
+
+ audio.hdmi = hdmi;
+
+ _sde_hdmi_audio_infoframe_setup(&audio, false);
+ _sde_hdmi_audio_acr_setup(&audio, false);
+
+ SDE_DEBUG("HDMI Audio: Disabled\n");
+}
+
+int sde_hdmi_config_avmute(struct hdmi *hdmi, bool set)
+{
+ u32 av_mute_status;
+ bool av_pkt_en = false;
+
+ if (!hdmi) {
+ SDE_ERROR("invalid HDMI Ctrl\n");
+ return -ENODEV;
+ }
+
+ av_mute_status = hdmi_read(hdmi, HDMI_GC);
+
+ if (set) {
+ if (!(av_mute_status & BIT(0))) {
+ hdmi_write(hdmi, HDMI_GC, av_mute_status | BIT(0));
+ av_pkt_en = true;
+ }
+ } else {
+ if (av_mute_status & BIT(0)) {
+ hdmi_write(hdmi, HDMI_GC, av_mute_status & ~BIT(0));
+ av_pkt_en = true;
+ }
+ }
+
+ /* Enable AV Mute tranmission here */
+ if (av_pkt_en)
+ hdmi_write(hdmi, HDMI_VBI_PKT_CTRL,
+ hdmi_read(hdmi, HDMI_VBI_PKT_CTRL) | (BIT(4) & BIT(5)));
+
+ SDE_DEBUG("AVMUTE %s\n", set ? "set" : "cleared");
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
new file mode 100644
index 000000000000..ecfff7e88689
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "drm_edid.h"
+#include "sde_kms.h"
+#include "sde_hdmi.h"
+#include "hdmi.h"
+
+struct sde_hdmi_bridge {
+ struct drm_bridge base;
+ struct hdmi *hdmi;
+};
+#define to_hdmi_bridge(x) container_of(x, struct sde_hdmi_bridge, base)
+
+/* for AVI program */
+#define HDMI_AVI_INFOFRAME_BUFFER_SIZE \
+ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE)
+#define HDMI_VS_INFOFRAME_BUFFER_SIZE (HDMI_INFOFRAME_HEADER_SIZE + 6)
+#define HDMI_SPD_INFOFRAME_BUFFER_SIZE \
+ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_SPD_INFOFRAME_SIZE)
+#define HDMI_DEFAULT_VENDOR_NAME "unknown"
+#define HDMI_DEFAULT_PRODUCT_NAME "msm"
+#define LEFT_SHIFT_BYTE(x) ((x) << 8)
+#define LEFT_SHIFT_WORD(x) ((x) << 16)
+#define LEFT_SHIFT_24BITS(x) ((x) << 24)
+#define HDMI_AVI_IFRAME_LINE_NUMBER 1
+#define HDMI_VENDOR_IFRAME_LINE_NUMBER 3
+
+void _sde_hdmi_bridge_destroy(struct drm_bridge *bridge)
+{
+}
+
+static void _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int i, ret;
+
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->pwr_regs[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ }
+ }
+
+ if (config->pwr_clk_cnt > 0) {
+ DRM_DEBUG("pixclock: %lu", hdmi->pixclock);
+ ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
+ if (ret) {
+ SDE_ERROR("failed to set pixel clk: %s (%d)\n",
+ config->pwr_clk_names[0], ret);
+ }
+ }
+
+ for (i = 0; i < config->pwr_clk_cnt; i++) {
+ ret = clk_prepare_enable(hdmi->pwr_clks[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable pwr clk: %s (%d)\n",
+ config->pwr_clk_names[i], ret);
+ }
+ }
+}
+
+static void _sde_hdmi_bridge_power_off(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int i, ret;
+
+ /* Wait for vsync */
+ msleep(20);
+
+ for (i = 0; i < config->pwr_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->pwr_clks[i]);
+
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->pwr_regs[i]);
+ if (ret) {
+ SDE_ERROR("failed to disable pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ }
+ }
+}
+
+static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+
+ DRM_DEBUG("power up");
+
+ if (!hdmi->power_on) {
+ _sde_hdmi_bridge_power_on(bridge);
+ hdmi->power_on = true;
+ hdmi_audio_update(hdmi);
+ }
+
+ if (phy)
+ phy->funcs->powerup(phy, hdmi->pixclock);
+
+ sde_hdmi_set_mode(hdmi, true);
+
+ if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
+ hdmi_hdcp_ctrl_on(hdmi->hdcp_ctrl);
+}
+
+static void _sde_hdmi_bridge_enable(struct drm_bridge *bridge)
+{
+}
+
+static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge)
+{
+}
+
+static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+
+ if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
+ hdmi_hdcp_ctrl_off(hdmi->hdcp_ctrl);
+
+ DRM_DEBUG("power down");
+ sde_hdmi_set_mode(hdmi, false);
+
+ if (phy)
+ phy->funcs->powerdown(phy);
+
+ if (hdmi->power_on) {
+ _sde_hdmi_bridge_power_off(bridge);
+ hdmi->power_on = false;
+ hdmi_audio_update(hdmi);
+ }
+}
+
+static void _sde_hdmi_bridge_set_avi_infoframe(struct hdmi *hdmi,
+ const struct drm_display_mode *mode)
+{
+ u8 avi_iframe[HDMI_AVI_INFOFRAME_BUFFER_SIZE] = {0};
+ u8 *avi_frame = &avi_iframe[HDMI_INFOFRAME_HEADER_SIZE];
+ u8 checksum;
+ u32 reg_val;
+ struct hdmi_avi_infoframe info;
+
+ drm_hdmi_avi_infoframe_from_display_mode(&info, mode);
+ hdmi_avi_infoframe_pack(&info, avi_iframe, sizeof(avi_iframe));
+ checksum = avi_iframe[HDMI_INFOFRAME_HEADER_SIZE - 1];
+
+ reg_val = checksum |
+ LEFT_SHIFT_BYTE(avi_frame[0]) |
+ LEFT_SHIFT_WORD(avi_frame[1]) |
+ LEFT_SHIFT_24BITS(avi_frame[2]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(0), reg_val);
+
+ reg_val = avi_frame[3] |
+ LEFT_SHIFT_BYTE(avi_frame[4]) |
+ LEFT_SHIFT_WORD(avi_frame[5]) |
+ LEFT_SHIFT_24BITS(avi_frame[6]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(1), reg_val);
+
+ reg_val = avi_frame[7] |
+ LEFT_SHIFT_BYTE(avi_frame[8]) |
+ LEFT_SHIFT_WORD(avi_frame[9]) |
+ LEFT_SHIFT_24BITS(avi_frame[10]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(2), reg_val);
+
+ reg_val = avi_frame[11] |
+ LEFT_SHIFT_BYTE(avi_frame[12]) |
+ LEFT_SHIFT_24BITS(avi_iframe[1]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(3), reg_val);
+
+ /* AVI InfFrame enable (every frame) */
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
+ hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0) | BIT(1) | BIT(0));
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ reg_val &= ~0x3F;
+ reg_val |= HDMI_AVI_IFRAME_LINE_NUMBER;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, reg_val);
+}
+
+static void _sde_hdmi_bridge_set_vs_infoframe(struct hdmi *hdmi,
+ const struct drm_display_mode *mode)
+{
+ u8 vs_iframe[HDMI_VS_INFOFRAME_BUFFER_SIZE] = {0};
+ u32 reg_val;
+ struct hdmi_vendor_infoframe info;
+ int rc = 0;
+
+ rc = drm_hdmi_vendor_infoframe_from_display_mode(&info, mode);
+ if (rc < 0) {
+ SDE_DEBUG("don't send vendor infoframe\n");
+ return;
+ }
+ hdmi_vendor_infoframe_pack(&info, vs_iframe, sizeof(vs_iframe));
+
+ reg_val = (info.s3d_struct << 24) | (info.vic << 16) |
+ (vs_iframe[3] << 8) | (vs_iframe[7] << 5) |
+ vs_iframe[2];
+ hdmi_write(hdmi, REG_HDMI_VENSPEC_INFO0, reg_val);
+
+ /* vendor specific info-frame enable (every frame) */
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
+ hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0) | BIT(13) | BIT(12));
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ reg_val &= ~0x3F000000;
+ reg_val |= (HDMI_VENDOR_IFRAME_LINE_NUMBER << 24);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, reg_val);
+}
+
+static void _sde_hdmi_bridge_set_spd_infoframe(struct hdmi *hdmi,
+ const struct drm_display_mode *mode)
+{
+ u8 spd_iframe[HDMI_SPD_INFOFRAME_BUFFER_SIZE] = {0};
+ u32 packet_payload, packet_control, packet_header;
+ struct hdmi_spd_infoframe info;
+ int i;
+
+ /* Need to query vendor and product name from platform setup */
+ hdmi_spd_infoframe_init(&info, HDMI_DEFAULT_VENDOR_NAME,
+ HDMI_DEFAULT_PRODUCT_NAME);
+ hdmi_spd_infoframe_pack(&info, spd_iframe, sizeof(spd_iframe));
+
+ packet_header = spd_iframe[0]
+ | LEFT_SHIFT_BYTE(spd_iframe[1] & 0x7f)
+ | LEFT_SHIFT_WORD(spd_iframe[2] & 0x7f);
+ hdmi_write(hdmi, REG_HDMI_GENERIC1_HDR, packet_header);
+
+ for (i = 0; i < MAX_REG_HDMI_GENERIC1_INDEX; i++) {
+ packet_payload = spd_iframe[3 + i * 4]
+ | LEFT_SHIFT_BYTE(spd_iframe[4 + i * 4] & 0x7f)
+ | LEFT_SHIFT_WORD(spd_iframe[5 + i * 4] & 0x7f)
+ | LEFT_SHIFT_24BITS(spd_iframe[6 + i * 4] & 0x7f);
+ hdmi_write(hdmi, REG_HDMI_GENERIC1(i), packet_payload);
+ }
+
+ packet_payload = (spd_iframe[27] & 0x7f)
+ | LEFT_SHIFT_BYTE(spd_iframe[28] & 0x7f);
+ hdmi_write(hdmi, REG_HDMI_GENERIC1(MAX_REG_HDMI_GENERIC1_INDEX),
+ packet_payload);
+
+ /*
+ * GENERIC1_LINE | GENERIC1_CONT | GENERIC1_SEND
+ * Setup HDMI TX generic packet control
+ * Enable this packet to transmit every frame
+ * Enable HDMI TX engine to transmit Generic packet 1
+ */
+ packet_control = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ packet_control |= ((0x1 << 24) | (1 << 5) | (1 << 4));
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, packet_control);
+}
+
+static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ int hstart, hend, vstart, vend;
+ uint32_t frame_ctrl;
+
+ mode = adjusted_mode;
+
+ hdmi->pixclock = mode->clock * 1000;
+
+ hstart = mode->htotal - mode->hsync_start;
+ hend = mode->htotal - mode->hsync_start + mode->hdisplay;
+
+ vstart = mode->vtotal - mode->vsync_start - 1;
+ vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
+
+ DRM_DEBUG(
+ "htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
+ mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
+
+ hdmi_write(hdmi, REG_HDMI_TOTAL,
+ HDMI_TOTAL_H_TOTAL(mode->htotal - 1) |
+ HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
+
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
+ HDMI_ACTIVE_HSYNC_START(hstart) |
+ HDMI_ACTIVE_HSYNC_END(hend));
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC,
+ HDMI_ACTIVE_VSYNC_START(vstart) |
+ HDMI_ACTIVE_VSYNC_END(vend));
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) |
+ HDMI_VSYNC_ACTIVE_F2_END(vend + 1));
+ } else {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ HDMI_VSYNC_TOTAL_F2_V_TOTAL(0));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ HDMI_VSYNC_ACTIVE_F2_START(0) |
+ HDMI_VSYNC_ACTIVE_F2_END(0));
+ }
+
+ frame_ctrl = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
+ DRM_DEBUG("frame_ctrl=%08x\n", frame_ctrl);
+ hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
+
+ /*
+ * Setup info frame
+ * Current drm_edid driver doesn't have all CEA formats defined in
+ * latest CEA-861(CTA-861) spec. So, don't check if mode is CEA mode
+ * in here. Once core framework is updated, the check needs to be
+ * added back.
+ */
+ if (hdmi->hdmi_mode) {
+ _sde_hdmi_bridge_set_avi_infoframe(hdmi, mode);
+ _sde_hdmi_bridge_set_vs_infoframe(hdmi, mode);
+ _sde_hdmi_bridge_set_spd_infoframe(hdmi, mode);
+ DRM_DEBUG("hdmi setup info frame\n");
+ }
+
+ hdmi_audio_update(hdmi);
+}
+
+static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = {
+ .pre_enable = _sde_hdmi_bridge_pre_enable,
+ .enable = _sde_hdmi_bridge_enable,
+ .disable = _sde_hdmi_bridge_disable,
+ .post_disable = _sde_hdmi_bridge_post_disable,
+ .mode_set = _sde_hdmi_bridge_mode_set,
+};
+
+
+/* initialize bridge */
+struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi)
+{
+ struct drm_bridge *bridge = NULL;
+ struct sde_hdmi_bridge *sde_hdmi_bridge;
+ int ret;
+
+ sde_hdmi_bridge = devm_kzalloc(hdmi->dev->dev,
+ sizeof(*sde_hdmi_bridge), GFP_KERNEL);
+ if (!sde_hdmi_bridge) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ sde_hdmi_bridge->hdmi = hdmi;
+
+ bridge = &sde_hdmi_bridge->base;
+ bridge->funcs = &_sde_hdmi_bridge_funcs;
+
+ ret = drm_bridge_attach(hdmi->dev, bridge);
+ if (ret)
+ goto fail;
+
+ return bridge;
+
+fail:
+ if (bridge)
+ _sde_hdmi_bridge_destroy(bridge);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h
new file mode 100644
index 000000000000..f1bff0f08051
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h
@@ -0,0 +1,300 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HDMI_REGS_H
+#define _SDE_HDMI_REGS_H
+
+/* HDMI_TX Registers */
+#define HDMI_CTRL (0x00000000)
+#define HDMI_TEST_PATTERN (0x00000010)
+#define HDMI_RANDOM_PATTERN (0x00000014)
+#define HDMI_PKT_BLK_CTRL (0x00000018)
+#define HDMI_STATUS (0x0000001C)
+#define HDMI_AUDIO_PKT_CTRL (0x00000020)
+#define HDMI_ACR_PKT_CTRL (0x00000024)
+#define HDMI_VBI_PKT_CTRL (0x00000028)
+#define HDMI_INFOFRAME_CTRL0 (0x0000002C)
+#define HDMI_INFOFRAME_CTRL1 (0x00000030)
+#define HDMI_GEN_PKT_CTRL (0x00000034)
+#define HDMI_ACP (0x0000003C)
+#define HDMI_GC (0x00000040)
+#define HDMI_AUDIO_PKT_CTRL2 (0x00000044)
+#define HDMI_ISRC1_0 (0x00000048)
+#define HDMI_ISRC1_1 (0x0000004C)
+#define HDMI_ISRC1_2 (0x00000050)
+#define HDMI_ISRC1_3 (0x00000054)
+#define HDMI_ISRC1_4 (0x00000058)
+#define HDMI_ISRC2_0 (0x0000005C)
+#define HDMI_ISRC2_1 (0x00000060)
+#define HDMI_ISRC2_2 (0x00000064)
+#define HDMI_ISRC2_3 (0x00000068)
+#define HDMI_AVI_INFO0 (0x0000006C)
+#define HDMI_AVI_INFO1 (0x00000070)
+#define HDMI_AVI_INFO2 (0x00000074)
+#define HDMI_AVI_INFO3 (0x00000078)
+#define HDMI_MPEG_INFO0 (0x0000007C)
+#define HDMI_MPEG_INFO1 (0x00000080)
+#define HDMI_GENERIC0_HDR (0x00000084)
+#define HDMI_GENERIC0_0 (0x00000088)
+#define HDMI_GENERIC0_1 (0x0000008C)
+#define HDMI_GENERIC0_2 (0x00000090)
+#define HDMI_GENERIC0_3 (0x00000094)
+#define HDMI_GENERIC0_4 (0x00000098)
+#define HDMI_GENERIC0_5 (0x0000009C)
+#define HDMI_GENERIC0_6 (0x000000A0)
+#define HDMI_GENERIC1_HDR (0x000000A4)
+#define HDMI_GENERIC1_0 (0x000000A8)
+#define HDMI_GENERIC1_1 (0x000000AC)
+#define HDMI_GENERIC1_2 (0x000000B0)
+#define HDMI_GENERIC1_3 (0x000000B4)
+#define HDMI_GENERIC1_4 (0x000000B8)
+#define HDMI_GENERIC1_5 (0x000000BC)
+#define HDMI_GENERIC1_6 (0x000000C0)
+#define HDMI_ACR_32_0 (0x000000C4)
+#define HDMI_ACR_32_1 (0x000000C8)
+#define HDMI_ACR_44_0 (0x000000CC)
+#define HDMI_ACR_44_1 (0x000000D0)
+#define HDMI_ACR_48_0 (0x000000D4)
+#define HDMI_ACR_48_1 (0x000000D8)
+#define HDMI_ACR_STATUS_0 (0x000000DC)
+#define HDMI_ACR_STATUS_1 (0x000000E0)
+#define HDMI_AUDIO_INFO0 (0x000000E4)
+#define HDMI_AUDIO_INFO1 (0x000000E8)
+#define HDMI_CS_60958_0 (0x000000EC)
+#define HDMI_CS_60958_1 (0x000000F0)
+#define HDMI_RAMP_CTRL0 (0x000000F8)
+#define HDMI_RAMP_CTRL1 (0x000000FC)
+#define HDMI_RAMP_CTRL2 (0x00000100)
+#define HDMI_RAMP_CTRL3 (0x00000104)
+#define HDMI_CS_60958_2 (0x00000108)
+#define HDMI_HDCP_CTRL2 (0x0000010C)
+#define HDMI_HDCP_CTRL (0x00000110)
+#define HDMI_HDCP_DEBUG_CTRL (0x00000114)
+#define HDMI_HDCP_INT_CTRL (0x00000118)
+#define HDMI_HDCP_LINK0_STATUS (0x0000011C)
+#define HDMI_HDCP_DDC_CTRL_0 (0x00000120)
+#define HDMI_HDCP_DDC_CTRL_1 (0x00000124)
+#define HDMI_HDCP_DDC_STATUS (0x00000128)
+#define HDMI_HDCP_ENTROPY_CTRL0 (0x0000012C)
+#define HDMI_HDCP_RESET (0x00000130)
+#define HDMI_HDCP_RCVPORT_DATA0 (0x00000134)
+#define HDMI_HDCP_RCVPORT_DATA1 (0x00000138)
+#define HDMI_HDCP_RCVPORT_DATA2_0 (0x0000013C)
+#define HDMI_HDCP_RCVPORT_DATA2_1 (0x00000140)
+#define HDMI_HDCP_RCVPORT_DATA3 (0x00000144)
+#define HDMI_HDCP_RCVPORT_DATA4 (0x00000148)
+#define HDMI_HDCP_RCVPORT_DATA5 (0x0000014C)
+#define HDMI_HDCP_RCVPORT_DATA6 (0x00000150)
+#define HDMI_HDCP_RCVPORT_DATA7 (0x00000154)
+#define HDMI_HDCP_RCVPORT_DATA8 (0x00000158)
+#define HDMI_HDCP_RCVPORT_DATA9 (0x0000015C)
+#define HDMI_HDCP_RCVPORT_DATA10 (0x00000160)
+#define HDMI_HDCP_RCVPORT_DATA11 (0x00000164)
+#define HDMI_HDCP_RCVPORT_DATA12 (0x00000168)
+#define HDMI_VENSPEC_INFO0 (0x0000016C)
+#define HDMI_VENSPEC_INFO1 (0x00000170)
+#define HDMI_VENSPEC_INFO2 (0x00000174)
+#define HDMI_VENSPEC_INFO3 (0x00000178)
+#define HDMI_VENSPEC_INFO4 (0x0000017C)
+#define HDMI_VENSPEC_INFO5 (0x00000180)
+#define HDMI_VENSPEC_INFO6 (0x00000184)
+#define HDMI_HDCP_DEBUG (0x00000194)
+#define HDMI_TMDS_CTRL_CHAR (0x0000019C)
+#define HDMI_TMDS_CTRL_SEL (0x000001A4)
+#define HDMI_TMDS_SYNCCHAR01 (0x000001A8)
+#define HDMI_TMDS_SYNCCHAR23 (0x000001AC)
+#define HDMI_TMDS_DEBUG (0x000001B4)
+#define HDMI_TMDS_CTL_BITS (0x000001B8)
+#define HDMI_TMDS_DCBAL_CTRL (0x000001BC)
+#define HDMI_TMDS_DCBAL_CHAR (0x000001C0)
+#define HDMI_TMDS_CTL01_GEN (0x000001C8)
+#define HDMI_TMDS_CTL23_GEN (0x000001CC)
+#define HDMI_AUDIO_CFG (0x000001D0)
+#define HDMI_DEBUG (0x00000204)
+#define HDMI_USEC_REFTIMER (0x00000208)
+#define HDMI_DDC_CTRL (0x0000020C)
+#define HDMI_DDC_ARBITRATION (0x00000210)
+#define HDMI_DDC_INT_CTRL (0x00000214)
+#define HDMI_DDC_SW_STATUS (0x00000218)
+#define HDMI_DDC_HW_STATUS (0x0000021C)
+#define HDMI_DDC_SPEED (0x00000220)
+#define HDMI_DDC_SETUP (0x00000224)
+#define HDMI_DDC_TRANS0 (0x00000228)
+#define HDMI_DDC_TRANS1 (0x0000022C)
+#define HDMI_DDC_TRANS2 (0x00000230)
+#define HDMI_DDC_TRANS3 (0x00000234)
+#define HDMI_DDC_DATA (0x00000238)
+#define HDMI_HDCP_SHA_CTRL (0x0000023C)
+#define HDMI_HDCP_SHA_STATUS (0x00000240)
+#define HDMI_HDCP_SHA_DATA (0x00000244)
+#define HDMI_HDCP_SHA_DBG_M0_0 (0x00000248)
+#define HDMI_HDCP_SHA_DBG_M0_1 (0x0000024C)
+#define HDMI_HPD_INT_STATUS (0x00000250)
+#define HDMI_HPD_INT_CTRL (0x00000254)
+#define HDMI_HPD_CTRL (0x00000258)
+#define HDMI_HDCP_ENTROPY_CTRL1 (0x0000025C)
+#define HDMI_HDCP_SW_UPPER_AN (0x00000260)
+#define HDMI_HDCP_SW_LOWER_AN (0x00000264)
+#define HDMI_CRC_CTRL (0x00000268)
+#define HDMI_VID_CRC (0x0000026C)
+#define HDMI_AUD_CRC (0x00000270)
+#define HDMI_VBI_CRC (0x00000274)
+#define HDMI_DDC_REF (0x0000027C)
+#define HDMI_HDCP_SW_UPPER_AKSV (0x00000284)
+#define HDMI_HDCP_SW_LOWER_AKSV (0x00000288)
+#define HDMI_CEC_CTRL (0x0000028C)
+#define HDMI_CEC_WR_DATA (0x00000290)
+#define HDMI_CEC_RETRANSMIT (0x00000294)
+#define HDMI_CEC_STATUS (0x00000298)
+#define HDMI_CEC_INT (0x0000029C)
+#define HDMI_CEC_ADDR (0x000002A0)
+#define HDMI_CEC_TIME (0x000002A4)
+#define HDMI_CEC_REFTIMER (0x000002A8)
+#define HDMI_CEC_RD_DATA (0x000002AC)
+#define HDMI_CEC_RD_FILTER (0x000002B0)
+#define HDMI_ACTIVE_H (0x000002B4)
+#define HDMI_ACTIVE_V (0x000002B8)
+#define HDMI_ACTIVE_V_F2 (0x000002BC)
+#define HDMI_TOTAL (0x000002C0)
+#define HDMI_V_TOTAL_F2 (0x000002C4)
+#define HDMI_FRAME_CTRL (0x000002C8)
+#define HDMI_AUD_INT (0x000002CC)
+#define HDMI_DEBUG_BUS_CTRL (0x000002D0)
+#define HDMI_PHY_CTRL (0x000002D4)
+#define HDMI_CEC_WR_RANGE (0x000002DC)
+#define HDMI_CEC_RD_RANGE (0x000002E0)
+#define HDMI_VERSION (0x000002E4)
+#define HDMI_BIST_ENABLE (0x000002F4)
+#define HDMI_TIMING_ENGINE_EN (0x000002F8)
+#define HDMI_INTF_CONFIG (0x000002FC)
+#define HDMI_HSYNC_CTL (0x00000300)
+#define HDMI_VSYNC_PERIOD_F0 (0x00000304)
+#define HDMI_VSYNC_PERIOD_F1 (0x00000308)
+#define HDMI_VSYNC_PULSE_WIDTH_F0 (0x0000030C)
+#define HDMI_VSYNC_PULSE_WIDTH_F1 (0x00000310)
+#define HDMI_DISPLAY_V_START_F0 (0x00000314)
+#define HDMI_DISPLAY_V_START_F1 (0x00000318)
+#define HDMI_DISPLAY_V_END_F0 (0x0000031C)
+#define HDMI_DISPLAY_V_END_F1 (0x00000320)
+#define HDMI_ACTIVE_V_START_F0 (0x00000324)
+#define HDMI_ACTIVE_V_START_F1 (0x00000328)
+#define HDMI_ACTIVE_V_END_F0 (0x0000032C)
+#define HDMI_ACTIVE_V_END_F1 (0x00000330)
+#define HDMI_DISPLAY_HCTL (0x00000334)
+#define HDMI_ACTIVE_HCTL (0x00000338)
+#define HDMI_HSYNC_SKEW (0x0000033C)
+#define HDMI_POLARITY_CTL (0x00000340)
+#define HDMI_TPG_MAIN_CONTROL (0x00000344)
+#define HDMI_TPG_VIDEO_CONFIG (0x00000348)
+#define HDMI_TPG_COMPONENT_LIMITS (0x0000034C)
+#define HDMI_TPG_RECTANGLE (0x00000350)
+#define HDMI_TPG_INITIAL_VALUE (0x00000354)
+#define HDMI_TPG_BLK_WHT_PATTERN_FRAMES (0x00000358)
+#define HDMI_TPG_RGB_MAPPING (0x0000035C)
+#define HDMI_CEC_COMPL_CTL (0x00000360)
+#define HDMI_CEC_RD_START_RANGE (0x00000364)
+#define HDMI_CEC_RD_TOTAL_RANGE (0x00000368)
+#define HDMI_CEC_RD_ERR_RESP_LO (0x0000036C)
+#define HDMI_CEC_WR_CHECK_CONFIG (0x00000370)
+#define HDMI_INTERNAL_TIMING_MODE (0x00000374)
+#define HDMI_CTRL_SW_RESET (0x00000378)
+#define HDMI_CTRL_AUDIO_RESET (0x0000037C)
+#define HDMI_SCRATCH (0x00000380)
+#define HDMI_CLK_CTRL (0x00000384)
+#define HDMI_CLK_ACTIVE (0x00000388)
+#define HDMI_VBI_CFG (0x0000038C)
+#define HDMI_DDC_INT_CTRL0 (0x00000430)
+#define HDMI_DDC_INT_CTRL1 (0x00000434)
+#define HDMI_DDC_INT_CTRL2 (0x00000438)
+#define HDMI_DDC_INT_CTRL3 (0x0000043C)
+#define HDMI_DDC_INT_CTRL4 (0x00000440)
+#define HDMI_DDC_INT_CTRL5 (0x00000444)
+#define HDMI_HDCP2P2_DDC_CTRL (0x0000044C)
+#define HDMI_HDCP2P2_DDC_TIMER_CTRL (0x00000450)
+#define HDMI_HDCP2P2_DDC_TIMER_CTRL2 (0x00000454)
+#define HDMI_HDCP2P2_DDC_STATUS (0x00000458)
+#define HDMI_SCRAMBLER_STATUS_DDC_CTRL (0x00000464)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL (0x00000468)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2 (0x0000046C)
+#define HDMI_SCRAMBLER_STATUS_DDC_STATUS (0x00000470)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS (0x00000474)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS2 (0x00000478)
+#define HDMI_HW_DDC_CTRL (0x000004CC)
+#define HDMI_HDCP2P2_DDC_SW_TRIGGER (0x000004D0)
+#define HDMI_HDCP_STATUS (0x00000500)
+#define HDMI_HDCP_INT_CTRL2 (0x00000504)
+
+/* HDMI PHY Registers */
+#define HDMI_PHY_ANA_CFG0 (0x00000000)
+#define HDMI_PHY_ANA_CFG1 (0x00000004)
+#define HDMI_PHY_PD_CTRL0 (0x00000010)
+#define HDMI_PHY_PD_CTRL1 (0x00000014)
+#define HDMI_PHY_BIST_CFG0 (0x00000034)
+#define HDMI_PHY_BIST_PATN0 (0x0000003C)
+#define HDMI_PHY_BIST_PATN1 (0x00000040)
+#define HDMI_PHY_BIST_PATN2 (0x00000044)
+#define HDMI_PHY_BIST_PATN3 (0x00000048)
+
+/* QFPROM Registers for HDMI/HDCP */
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB (0x000000F8)
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB (0x000000FC)
+#define QFPROM_RAW_VERSION_4 (0x000000A8)
+#define SEC_CTRL_HW_VERSION (0x00006000)
+#define HDCP_KSV_LSB (0x000060D8)
+#define HDCP_KSV_MSB (0x000060DC)
+#define HDCP_KSV_VERSION_4_OFFSET (0x00000014)
+
+/* SEC_CTRL version that supports HDCP SEL */
+#define HDCP_SEL_MIN_SEC_VERSION (0x50010000)
+
+#define LPASS_LPAIF_RDDMA_CTL0 (0xFE152000)
+#define LPASS_LPAIF_RDDMA_PER_CNT0 (0x00000014)
+
+/* TX major version that supports scrambling */
+#define HDMI_TX_SCRAMBLER_MIN_TX_VERSION 0x04
+
+/* TX major versions */
+#define HDMI_TX_VERSION_4 4
+#define HDMI_TX_VERSION_3 3
+
+/* HDMI SCDC register offsets */
+#define HDMI_SCDC_UPDATE_0 0x10
+#define HDMI_SCDC_UPDATE_1 0x11
+#define HDMI_SCDC_TMDS_CONFIG 0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
+#define HDMI_SCDC_CONFIG_0 0x30
+#define HDMI_SCDC_STATUS_FLAGS_0 0x40
+#define HDMI_SCDC_STATUS_FLAGS_1 0x41
+#define HDMI_SCDC_ERR_DET_0_L 0x50
+#define HDMI_SCDC_ERR_DET_0_H 0x51
+#define HDMI_SCDC_ERR_DET_1_L 0x52
+#define HDMI_SCDC_ERR_DET_1_H 0x53
+#define HDMI_SCDC_ERR_DET_2_L 0x54
+#define HDMI_SCDC_ERR_DET_2_H 0x55
+#define HDMI_SCDC_ERR_DET_CHECKSUM 0x56
+
+/* HDCP secure registers directly accessible to HLOS since HDMI controller
+ * version major version 4.0
+ */
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x00000004)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x00000008)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x0000000C)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x00000010)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x00000014)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x00000018)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x0000001C)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x00000020)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_CTRL (0x00000024)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_DATA (0x00000028)
+
+#endif /* _SDE_HDMI_REGS_H */
+
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index ba5921149ac3..7915562057d6 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -234,6 +234,11 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
hdmi->hdcp_ctrl = NULL;
}
+ /*making it false currently to avoid ifdefs
+ *will get rid of this flag when HDCP SW
+ *support gets added to HDMI DRM driver
+ */
+ hdmi->is_hdcp_supported = false;
return hdmi;
@@ -389,7 +394,34 @@ static struct hdmi_platform_config hdmi_tx_8996_config = {
.hpd_freq = hpd_clk_freq_8x74,
};
+/*TO DO*/
+static const char *pwr_reg_names_8x98[] = {"core-vdda", "core-vcc"};
+/*TO DO*/
+static const char *hpd_reg_names_8x98[] = {"hpd-gdsc", "hpd-5v"};
+
+static const char *pwr_clk_names_8x98[] = {"core_extp_clk",
+ "hpd_alt_iface_clk"};
+
+static const char *hpd_clk_names_8x98[] = {"hpd_iface_clk",
+ "hpd_core_clk",
+ "hpd_mdp_core_clk",
+ "mnoc_clk",
+ "hpd_misc_ahb_clk",
+ "hpd_bus_clk"};
+
+static unsigned long hpd_clk_freq_8x98[] = {0, 19200000, 0, 0, 0, 0};
+
+static struct hdmi_platform_config hdmi_tx_8998_config = {
+ .phy_init = NULL,
+ HDMI_CFG(pwr_reg, 8x98),
+ HDMI_CFG(hpd_reg, 8x98),
+ HDMI_CFG(pwr_clk, 8x98),
+ HDMI_CFG(hpd_clk, 8x98),
+ .hpd_freq = hpd_clk_freq_8x98,
+};
+
static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8998_config },
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
@@ -425,7 +457,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
#ifdef CONFIG_OF
struct device_node *of_node = dev->of_node;
const struct of_device_id *match;
-
match = of_match_node(dt_match, of_node);
if (match && match->data) {
hdmi_cfg = (struct hdmi_platform_config *)match->data;
@@ -443,12 +474,13 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi_cfg->mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
-
+ hdmi_cfg->hpd5v_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd5v");
#else
static struct hdmi_platform_config config = {};
static const char *hpd_clk_names[] = {
"core_clk", "master_iface_clk", "slave_iface_clk",
};
+
if (cpu_is_apq8064()) {
static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index e22ddcd51248..9ce8ff513210 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -70,7 +70,7 @@ struct hdmi {
struct drm_encoder *encoder;
bool hdmi_mode; /* are we in hdmi mode? */
-
+ bool is_hdcp_supported;
int irq;
struct workqueue_struct *workq;
@@ -110,7 +110,9 @@ struct hdmi_platform_config {
int pwr_clk_cnt;
/* gpio's: */
- int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
+ int ddc_clk_gpio, ddc_data_gpio;
+ int hpd_gpio, mux_en_gpio;
+ int mux_sel_gpio, hpd5v_gpio;
int mux_lpm_gpio;
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 10c45700aefe..aef20f76bf02 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -110,6 +111,8 @@ static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val)
#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040
#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080
+#define REG_HDMI_INFOFRAME_CTRL1 0x00000030
+
#define REG_HDMI_GEN_PKT_CTRL 0x00000034
#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001
#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002
@@ -149,6 +152,7 @@ static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*
#define REG_HDMI_GENERIC1_HDR 0x000000a4
+#define MAX_REG_HDMI_GENERIC1_INDEX 6
static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
static inline uint32_t REG_HDMI_ACR(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index dbd9cc4daf2e..6eab7d0cf6b5 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index d5d94575fa1b..6688e79cc88e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 6ac9aa165768..caad2be87ae4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -40,7 +40,7 @@ struct mdp4_crtc {
uint32_t x, y;
/* next cursor to scan-out: */
- uint32_t next_iova;
+ uint64_t next_iova;
struct drm_gem_object *next_bo;
/* current cursor being scanned out: */
@@ -133,7 +133,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
container_of(work, struct mdp4_crtc, unref_cursor_work);
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
- msm_gem_put_iova(val, mdp4_kms->id);
+ msm_gem_put_iova(val, mdp4_kms->aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -387,25 +387,28 @@ static void update_cursor(struct drm_crtc *crtc)
if (mdp4_crtc->cursor.stale) {
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
- uint32_t iova = mdp4_crtc->cursor.next_iova;
+ uint64_t iova = mdp4_crtc->cursor.next_iova;
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_reference(next_bo);
- msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
+ msm_gem_get_iova_locked(next_bo, mdp4_kms->aspace,
+ &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
+ /* FIXME: Make sure iova < 32 bits */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
+ lower_32_bits(iova));
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
} else {
/* disable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
- mdp4_kms->blank_cursor_iova);
+ lower_32_bits(mdp4_kms->blank_cursor_iova));
}
/* and drop the iova ref + obj rev when done scanning out: */
@@ -432,7 +435,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_gem_object *cursor_bo, *old_bo;
unsigned long flags;
- uint32_t iova;
+ uint64_t iova;
int ret;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -449,7 +452,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
}
if (cursor_bo) {
- ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
+ ret = msm_gem_get_iova(cursor_bo, mdp4_kms->aspace, &iova);
if (ret)
goto fail;
} else {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index f0f66ac4b2fb..b6cddee0cf34 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -17,6 +17,7 @@
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp4_kms.h"
@@ -177,18 +178,35 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
unsigned i;
+ struct msm_gem_address_space *aspace = mdp4_kms->aspace;
for (i = 0; i < priv->num_crtcs; i++)
mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
+
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_destroy(aspace);
+ }
}
static void mdp4_destroy(struct msm_kms *kms)
{
+ struct device *dev = mdp4_kms->dev->dev;
+ struct msm_gem_address_space *aspace = mdp4_kms->aspace;
+
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
if (mdp4_kms->blank_cursor_iova)
- msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+ msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace);
if (mdp4_kms->blank_cursor_bo)
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_put(aspace);
+ }
+
kfree(mdp4_kms);
}
@@ -408,7 +426,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
struct mdp4_platform_config *config = mdp4_get_config(pdev);
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
int ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
@@ -497,26 +515,30 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->iommu);
+ struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, config->iommu);
+
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
+
+ aspace = msm_gem_address_space_create(&pdev->dev,
+ mmu, "mdp4", 0x1000, 0xffffffff);
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
+ goto fail;
+ }
+
+ mdp4_kms->aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret)
goto fail;
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
- mmu = NULL;
- }
-
- mdp4_kms->id = msm_register_mmu(dev, mmu);
- if (mdp4_kms->id < 0) {
- ret = mdp4_kms->id;
- dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
- goto fail;
+ aspace = NULL;
}
ret = modeset_init(mdp4_kms);
@@ -535,7 +557,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+ ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace,
&mdp4_kms->blank_cursor_iova);
if (ret) {
dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
@@ -562,6 +584,7 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
/* TODO */
config.max_clk = 266667000;
config.iommu = iommu_domain_alloc(msm_iommu_get_bus(&dev->dev));
+
#else
if (cpu_is_apq8064())
config.max_clk = 266667000;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 8a7f6e1e2bca..5cf03e58a4f4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -33,8 +33,6 @@ struct mdp4_kms {
int rev;
/* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
-
void __iomem *mmio;
struct regulator *dsi_pll_vdda;
@@ -45,12 +43,13 @@ struct mdp4_kms {
struct clk *pclk;
struct clk *lut_clk;
struct clk *axi_clk;
+ struct msm_gem_address_space *aspace;
struct mdp_irq error_handler;
/* empty/blank cursor bo to use when cursor is "disabled" */
struct drm_gem_object *blank_cursor_bo;
- uint32_t blank_cursor_iova;
+ uint64_t blank_cursor_iova;
};
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 30d57e74c42f..bc1ece2a5b7e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -109,7 +109,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
return 0;
DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp4_kms->id);
+ return msm_framebuffer_prepare(fb, mdp4_kms->aspace);
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
@@ -123,7 +123,7 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
return;
DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp4_kms->id);
+ msm_framebuffer_cleanup(fb, mdp4_kms->aspace);
}
@@ -172,13 +172,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 0));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 0));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 1));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 1));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 2));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 3));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 3));
plane->fb = fb;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index c37da9c61e29..b275ce11b24b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index bb1225aa2f75..89305ad3cde2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -547,7 +547,7 @@ fail:
if (cfg_handler)
mdp5_cfg_destroy(cfg_handler);
- return NULL;
+ return ERR_PTR(ret);
}
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 7f9f4ac88029..422a9a78f3b2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -171,7 +171,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
container_of(work, struct mdp5_crtc, unref_cursor_work);
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
- msm_gem_put_iova(val, mdp5_kms->id);
+ msm_gem_put_iova(val, mdp5_kms->aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -509,7 +509,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_gem_object *cursor_bo, *old_bo = NULL;
- uint32_t blendcfg, cursor_addr, stride;
+ uint32_t blendcfg, stride;
+ uint64_t cursor_addr;
int ret, bpp, lm;
unsigned int depth;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
@@ -536,7 +537,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
+ ret = msm_gem_get_iova(cursor_bo, mdp5_kms->aspace, &cursor_addr);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index f7aebf5516ce..e4e69ebd116e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -18,6 +18,7 @@
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp5_kms.h"
@@ -130,13 +131,13 @@ static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
static void mdp5_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct msm_mmu *mmu = mdp5_kms->mmu;
+ struct msm_gem_address_space *aspace = mdp5_kms->aspace;
mdp5_irq_domain_fini(mdp5_kms);
- if (mmu) {
- mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
- mmu->funcs->destroy(mmu);
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu);
+ msm_gem_address_space_put(aspace);
}
if (mdp5_kms->ctlm)
@@ -474,7 +475,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
struct mdp5_cfg *config;
struct mdp5_kms *mdp5_kms;
struct msm_kms *kms = NULL;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
uint32_t major, minor;
int i, ret;
@@ -595,34 +596,34 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16);
if (config->platform.iommu) {
- mmu = msm_smmu_new(&pdev->dev,
+ struct msm_mmu *mmu = msm_smmu_new(&pdev->dev,
MSM_SMMU_DOMAIN_UNSECURE);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
dev_err(dev->dev, "failed to init iommu: %d\n", ret);
iommu_domain_free(config->platform.iommu);
+ }
+
+ aspace = msm_gem_smmu_address_space_create(&pdev->dev,
+ mmu, "mdp5");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
+ mdp5_kms->aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
- dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
- mmu->funcs->destroy(mmu);
+ dev_err(&pdev->dev, "failed to attach iommu: %d\n",
+ ret);
goto fail;
}
} else {
- dev_info(dev->dev, "no iommu, fallback to phys "
- "contig buffers for scanout\n");
- mmu = NULL;
- }
- mdp5_kms->mmu = mmu;
-
- mdp5_kms->id = msm_register_mmu(dev, mmu);
- if (mdp5_kms->id < 0) {
- ret = mdp5_kms->id;
- dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
- goto fail;
+ dev_info(&pdev->dev,
+ "no iommu, fallback to phys contig buffers for scanout\n");
+ aspace = NULL;
}
ret = modeset_init(mdp5_kms);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 84f65d415598..c1aa86d21416 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -36,8 +36,7 @@ struct mdp5_kms {
/* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
struct mdp5_smp *smp;
struct mdp5_ctl_manager *ctlm;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 81cd49045ffc..873ab11d34d2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -260,7 +260,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
return 0;
DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp5_kms->id);
+ return msm_framebuffer_prepare(fb, mdp5_kms->aspace);
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
@@ -274,7 +274,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
return;
DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp5_kms->id);
+ msm_framebuffer_cleanup(fb, mdp5_kms->aspace);
}
static int mdp5_plane_atomic_check(struct drm_plane *plane,
@@ -400,13 +400,13 @@ static void set_scanout_locked(struct drm_plane *plane,
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 0));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 0));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 1));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 1));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 2));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 3));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 3));
plane->fb = fb;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 0aec1ac1f6d0..452e3518f98b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 5d04b0384215..f821a81c53a6 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -23,6 +23,8 @@
#include "sde_wb.h"
#define TEARDOWN_DEADLOCK_RETRY_MAX 5
+#include "msm_gem.h"
+#include "msm_mmu.h"
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
@@ -38,42 +40,6 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.atomic_commit = msm_atomic_commit,
};
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
-{
- struct msm_drm_private *priv = dev->dev_private;
- int idx = priv->num_mmus++;
-
- if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
- return -EINVAL;
-
- priv->mmus[idx] = mmu;
-
- return idx;
-}
-
-void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu)
-{
- struct msm_drm_private *priv = dev->dev_private;
- int idx;
-
- if (priv->num_mmus <= 0) {
- dev_err(dev->dev, "invalid num mmus %d\n", priv->num_mmus);
- return;
- }
-
- idx = priv->num_mmus - 1;
-
- /* only support reverse-order deallocation */
- if (priv->mmus[idx] != mmu) {
- dev_err(dev->dev, "unexpected mmu at idx %d\n", idx);
- return;
- }
-
- --priv->num_mmus;
- priv->mmus[idx] = 0;
-}
-
-
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
static bool reglog = false;
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -588,29 +554,65 @@ static void load_gpu(struct drm_device *dev)
}
#endif
-static int msm_open(struct drm_device *dev, struct drm_file *file)
+static struct msm_file_private *setup_pagetable(struct msm_drm_private *priv)
{
struct msm_file_private *ctx;
+ if (!priv || !priv->gpu)
+ return NULL;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->aspace = msm_gem_address_space_create_instance(
+ priv->gpu->aspace->mmu, "gpu", 0x100000000, 0x1ffffffff);
+
+ if (IS_ERR(ctx->aspace)) {
+ int ret = PTR_ERR(ctx->aspace);
+
+ /*
+ * If dynamic domains are not supported, everybody uses the
+ * same pagetable
+ */
+ if (ret != -EOPNOTSUPP) {
+ kfree(ctx);
+ return ERR_PTR(ret);
+ }
+
+ ctx->aspace = priv->gpu->aspace;
+ }
+
+ ctx->aspace->mmu->funcs->attach(ctx->aspace->mmu, NULL, 0);
+ return ctx;
+}
+
+static int msm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_file_private *ctx = NULL;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+
+ if (!dev || !dev->dev_private)
+ return -ENODEV;
+
+ priv = dev->dev_private;
/* For now, load gpu on open.. to avoid the requirement of having
* firmware in the initrd.
*/
load_gpu(dev);
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = setup_pagetable(priv);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
file->driver_priv = ctx;
- if (dev && dev->dev_private) {
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_kms *kms;
+ kms = priv->kms;
+
+ if (kms && kms->funcs && kms->funcs->postopen)
+ kms->funcs->postopen(kms, file);
- kms = priv->kms;
- if (kms && kms->funcs && kms->funcs->postopen)
- kms->funcs->postopen(kms, file);
- }
return 0;
}
@@ -633,8 +635,10 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
kms->funcs->postclose(kms, file);
mutex_lock(&dev->struct_mutex);
- if (ctx == priv->lastctx)
- priv->lastctx = NULL;
+ if (ctx && ctx->aspace && ctx->aspace != priv->gpu->aspace) {
+ ctx->aspace->mmu->funcs->detach(ctx->aspace->mmu);
+ msm_gem_address_space_put(ctx->aspace);
+ }
mutex_unlock(&dev->struct_mutex);
kfree(ctx);
@@ -833,6 +837,13 @@ static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
return 0;
}
+static int msm_snapshot_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ return msm_snapshot_write(priv->gpu, m);
+}
+
static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -897,11 +908,22 @@ static int show_locked(struct seq_file *m, void *arg)
return ret;
}
+static int show_unlocked(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ int (*show)(struct drm_device *dev, struct seq_file *m) =
+ node->info_ent->data;
+
+ return show(dev, m);
+}
+
static struct drm_info_list msm_debugfs_list[] = {
{"gpu", show_locked, 0, msm_gpu_show},
{"gem", show_locked, 0, msm_gem_show},
{ "mm", show_locked, 0, msm_mm_show },
{ "fb", show_locked, 0, msm_fb_show },
+ { "snapshot", show_unlocked, 0, msm_snapshot_show },
};
static int late_init_minor(struct drm_minor *minor)
@@ -975,14 +997,23 @@ int msm_wait_fence(struct drm_device *dev, uint32_t fence,
ktime_t *timeout , bool interruptible)
{
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int index = FENCE_RING(fence);
+ uint32_t submitted;
int ret;
- if (!priv->gpu)
- return 0;
+ if (!gpu)
+ return -ENXIO;
+
+ if (index > MSM_GPU_MAX_RINGS || index >= gpu->nr_rings ||
+ !gpu->rb[index])
+ return -EINVAL;
+
+ submitted = gpu->funcs->submitted_fence(gpu, gpu->rb[index]);
- if (fence > priv->gpu->submitted_fence) {
+ if (fence > submitted) {
DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
- fence, priv->gpu->submitted_fence);
+ fence, submitted);
return -EINVAL;
}
@@ -1012,7 +1043,7 @@ int msm_wait_fence(struct drm_device *dev, uint32_t fence,
if (ret == 0) {
DBG("timeout waiting for fence: %u (completed: %u)",
- fence, priv->completed_fence);
+ fence, priv->completed_fence[index]);
ret = -ETIMEDOUT;
} else if (ret != -ERESTARTSYS) {
ret = 0;
@@ -1026,12 +1057,13 @@ int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
+ int index = FENCE_RING(fence);
int ret = 0;
mutex_lock(&dev->struct_mutex);
if (!list_empty(&cb->work.entry)) {
ret = -EINVAL;
- } else if (fence > priv->completed_fence) {
+ } else if (fence > priv->completed_fence[index]) {
cb->fence = fence;
list_add_tail(&cb->work.entry, &priv->fence_cbs);
} else {
@@ -1046,21 +1078,21 @@ int msm_queue_fence_cb(struct drm_device *dev,
void msm_update_fence(struct drm_device *dev, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_fence_cb *cb, *tmp;
+ int index = FENCE_RING(fence);
- mutex_lock(&dev->struct_mutex);
- priv->completed_fence = max(fence, priv->completed_fence);
-
- while (!list_empty(&priv->fence_cbs)) {
- struct msm_fence_cb *cb;
-
- cb = list_first_entry(&priv->fence_cbs,
- struct msm_fence_cb, work.entry);
+ if (index >= MSM_GPU_MAX_RINGS)
+ return;
- if (cb->fence > priv->completed_fence)
- break;
+ mutex_lock(&dev->struct_mutex);
+ priv->completed_fence[index] = max(fence, priv->completed_fence[index]);
- list_del_init(&cb->work.entry);
- queue_work(priv->wq, &cb->work);
+ list_for_each_entry_safe(cb, tmp, &priv->fence_cbs, work.entry) {
+ if (COMPARE_FENCE_LTE(cb->fence,
+ priv->completed_fence[index])) {
+ list_del_init(&cb->work.entry);
+ queue_work(priv->wq, &cb->work);
+ }
}
mutex_unlock(&dev->struct_mutex);
@@ -1165,16 +1197,28 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
{
struct drm_msm_gem_info *args = data;
struct drm_gem_object *obj;
+ struct msm_file_private *ctx = file->driver_priv;
int ret = 0;
- if (args->pad)
+ if (args->flags & ~MSM_INFO_FLAGS)
+ return -EINVAL;
+
+ if (!ctx || !ctx->aspace)
return -EINVAL;
obj = drm_gem_object_lookup(dev, file, args->handle);
if (!obj)
return -ENOENT;
- args->offset = msm_gem_mmap_offset(obj);
+ if (args->flags & MSM_INFO_IOVA) {
+ uint64_t iova;
+
+ ret = msm_gem_get_iova(obj, ctx->aspace, &iova);
+ if (!ret)
+ args->offset = iova;
+ } else {
+ args->offset = msm_gem_mmap_offset(obj);
+ }
drm_gem_object_unreference_unlocked(obj);
@@ -1185,13 +1229,24 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_wait_fence *args = data;
- ktime_t timeout = to_ktime(args->timeout);
+ ktime_t timeout;
+
if (args->pad) {
DRM_ERROR("invalid pad: %08x\n", args->pad);
return -EINVAL;
}
+ /*
+ * Special case - if the user passes a timeout of 0.0 just return the
+ * current fence status (0 for retired, -EBUSY for active) with no
+ * accompanying kernel logs. This can be a poor man's way of
+ * determining the status of a fence.
+ */
+ if (args->timeout.tv_sec == 0 && args->timeout.tv_nsec == 0)
+ return msm_wait_fence(dev, args->fence, NULL, true);
+
+ timeout = to_ktime(args->timeout);
return msm_wait_fence(dev, args->fence, &timeout, true);
}
@@ -1777,7 +1832,13 @@ static int msm_pdev_probe(struct platform_device *pdev)
component_match_add(&pdev->dev, &match, compare_dev, dev);
}
#endif
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ /* on all devices that I am aware of, iommu's which cna map
+ * any address the cpu can see are used:
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
+ if (ret)
+ return ret;
+
ret = msm_add_master_component(&pdev->dev, match);
return ret;
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index a2678882d57a..d8a4c34e9be0 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -32,7 +32,8 @@
#include <linux/iommu.h>
#include <linux/types.h>
#include <linux/of_graph.h>
-#include <linux/mdss_io_util.h>
+#include <linux/of_device.h>
+#include <linux/sde_io_util.h>
#include <asm/sizes.h>
#include <linux/kthread.h>
@@ -63,6 +64,8 @@ struct msm_mmu;
struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
+struct msm_gem_address_space;
+struct msm_gem_vma;
#define NUM_DOMAINS 4 /* one for KMS, then one per gpu core (?) */
#define MAX_CRTCS 8
@@ -72,11 +75,7 @@ struct msm_gem_submit;
#define MAX_CONNECTORS 8
struct msm_file_private {
- /* currently we don't do anything useful with this.. but when
- * per-context address spaces are supported we'd keep track of
- * the context's page-tables here.
- */
- int dummy;
+ struct msm_gem_address_space *aspace;
};
enum msm_mdp_plane_property {
@@ -248,6 +247,8 @@ struct msm_drm_commit {
struct kthread_worker worker;
};
+#define MSM_GPU_MAX_RINGS 4
+
struct msm_drm_private {
struct msm_kms *kms;
@@ -274,11 +275,12 @@ struct msm_drm_private {
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
- struct msm_file_private *lastctx;
struct drm_fb_helper *fbdev;
- uint32_t next_fence, completed_fence;
+ uint32_t next_fence[MSM_GPU_MAX_RINGS];
+ uint32_t completed_fence[MSM_GPU_MAX_RINGS];
+
wait_queue_head_t fence_event;
struct msm_rd_state *rd;
@@ -296,9 +298,13 @@ struct msm_drm_private {
uint32_t pending_crtcs;
wait_queue_head_t pending_crtcs_event;
- /* registered MMUs: */
- unsigned int num_mmus;
- struct msm_mmu *mmus[NUM_DOMAINS];
+ /* Registered address spaces.. currently this is fixed per # of
+ * iommu's. Ie. one for display block and one for gpu block.
+ * Eventually, to do per-process gpu pagetables, we'll want one
+ * of these per-process.
+ */
+ unsigned int num_aspaces;
+ struct msm_gem_address_space *aspace[NUM_DOMAINS];
unsigned int num_planes;
struct drm_plane *planes[MAX_PLANES];
@@ -345,6 +351,31 @@ struct msm_format {
uint32_t pixel_format;
};
+/*
+ * Some GPU targets can support multiple ringbuffers and preempt between them.
+ * In order to do this without massive API changes we will steal two bits from
+ * the top of the fence and use them to identify the ringbuffer, (0x00000001 for
+ * riug 0, 0x40000001 for ring 1, 0x50000001 for ring 2, etc). If you are going
+ * to do a fence comparision you have to make sure you are only comparing
+ * against fences from the same ring, but since fences within a ringbuffer are
+ * still contigious you can still use straight comparisons (i.e 0x40000001 is
+ * older than 0x40000002). Mathmatically there will be 0x3FFFFFFF timestamps
+ * per ring or ~103 days of 120 interrupts per second (two interrupts per frame
+ * at 60 FPS).
+ */
+#define FENCE_RING(_fence) ((_fence >> 30) & 3)
+#define FENCE(_ring, _fence) ((((_ring) & 3) << 30) | ((_fence) & 0x3FFFFFFF))
+
+static inline bool COMPARE_FENCE_LTE(uint32_t a, uint32_t b)
+{
+ return ((FENCE_RING(a) == FENCE_RING(b)) && a <= b);
+}
+
+static inline bool COMPARE_FENCE_LT(uint32_t a, uint32_t b)
+{
+ return ((FENCE_RING(a) == FENCE_RING(b)) && a < b);
+}
+
/* callback from wq once fence has passed: */
struct msm_fence_cb {
struct work_struct work;
@@ -362,15 +393,34 @@ void __msm_fence_worker(struct work_struct *work);
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async);
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-
int msm_wait_fence(struct drm_device *dev, uint32_t fence,
ktime_t *timeout, bool interruptible);
int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence);
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv);
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, unsigned int flags);
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
+
+/* For GPU and legacy display */
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ const char *name);
+struct msm_gem_address_space *
+msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
+ uint64_t start, uint64_t end);
+
+/* For SDE display */
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+ const char *name);
+
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -379,13 +429,16 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova);
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -416,9 +469,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
uint32_t size, struct sg_table *sgt);
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
@@ -502,7 +558,8 @@ u32 msm_readl(const void __iomem *addr);
static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
- return priv->completed_fence >= fence;
+
+ return priv->completed_fence[FENCE_RING(fence)] >= fence;
}
static inline int align_pitch(int width, int bpp)
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index dca4de382581..a3f0392c2f88 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -92,15 +92,16 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
* should be fine, since only the scanout (mdpN) side of things needs
* this, the gpu doesn't care about fb's.
*/
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
- uint32_t iova;
+ uint64_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
- DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
+ ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
+ DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
}
@@ -108,21 +109,30 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
return 0;
}
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = drm_format_num_planes(fb->pixel_format);
for (i = 0; i < n; i++)
- msm_gem_put_iova(msm_fb->planes[i], id);
+ msm_gem_put_iova(msm_fb->planes[i], aspace);
}
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+/* FIXME: Leave this as a uint32_t and just return the lower 32 bits? */
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ uint64_t iova;
+
if (!msm_fb->planes[plane])
return 0;
- return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane];
+
+ iova = msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
+
+ /* FIXME: Make sure it is < 32 bits */
+ return lower_32_bits(iova);
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 3f6ec077b51d..cf127250c0d0 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -85,7 +85,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb = NULL;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
- uint32_t paddr;
+ uint64_t paddr;
int ret, size;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
@@ -160,11 +160,12 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
- dev->mode_config.fb_base = paddr;
+ /* FIXME: Verify paddr < 32 bits? */
+ dev->mode_config.fb_base = lower_32_bits(paddr);
fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
fbi->screen_size = fbdev->bo->size;
- fbi->fix.smem_start = paddr;
+ fbi->fix.smem_start = lower_32_bits(paddr);
fbi->fix.smem_len = fbdev->bo->size;
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 351985327214..63128d11767e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -24,6 +24,11 @@
#include "msm_gpu.h"
#include "msm_mmu.h"
+static void *get_dmabuf_ptr(struct drm_gem_object *obj)
+{
+ return (obj && obj->import_attach) ? obj->import_attach->dmabuf : NULL;
+}
+
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -268,6 +273,67 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
return offset;
}
+static void obj_remove_domain(struct msm_gem_vma *domain)
+{
+ if (domain) {
+ list_del(&domain->list);
+ kfree(domain);
+ }
+}
+
+static void
+put_iova(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain, *tmp;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
+ if (iommu_present(&platform_bus_type)) {
+ msm_gem_unmap_vma(domain->aspace, domain,
+ msm_obj->sgt, get_dmabuf_ptr(obj));
+ }
+
+ obj_remove_domain(domain);
+ }
+}
+
+static struct msm_gem_vma *obj_add_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ domain->aspace = aspace;
+
+ list_add_tail(&domain->list, &msm_obj->domains);
+
+ return domain;
+}
+
+static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
+
+ list_for_each_entry(domain, &msm_obj->domains, list) {
+ if (domain->aspace == aspace)
+ return domain;
+ }
+
+ return NULL;
+}
+
+#ifndef IOMMU_PRIV
+#define IOMMU_PRIV 0
+#endif
+
/* should be called under struct_mutex.. although it can be called
* from atomic context without struct_mutex to acquire an extra
* iova ref if you know one is already held.
@@ -275,69 +341,64 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
* That means when I do eventually need to add support for unpinning
* the refcnt counter needs to be atomic_t.
*/
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova)
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+ struct msm_gem_vma *domain;
int ret = 0;
- if (!msm_obj->domain[id].iova) {
- struct msm_drm_private *priv = obj->dev->dev_private;
- struct page **pages = get_pages(obj);
+ if (!iommu_present(&platform_bus_type)) {
+ pages = get_pages(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
- if (iommu_present(&platform_bus_type)) {
- struct msm_mmu *mmu = priv->mmus[id];
-
- if (WARN_ON(!mmu))
- return -EINVAL;
-
- if (obj->import_attach && mmu->funcs->map_dma_buf) {
- ret = mmu->funcs->map_dma_buf(mmu, msm_obj->sgt,
- obj->import_attach->dmabuf,
- DMA_BIDIRECTIONAL);
- if (ret) {
- DRM_ERROR("Unable to map dma buf\n");
- return ret;
- }
- } else {
- ret = mmu->funcs->map_sg(mmu, msm_obj->sgt,
- DMA_BIDIRECTIONAL);
- }
-
- if (!ret)
- msm_obj->domain[id].iova =
- sg_dma_address(msm_obj->sgt->sgl);
- } else {
- WARN_ONCE(1, "physical address being used\n");
- msm_obj->domain[id].iova = physaddr(obj);
+ *iova = (uint64_t) physaddr(obj);
+ return 0;
+ }
+
+ domain = obj_get_domain(obj, aspace);
+
+ if (!domain) {
+ domain = obj_add_domain(obj, aspace);
+ if (IS_ERR(domain))
+ return PTR_ERR(domain);
+
+ pages = get_pages(obj);
+ if (IS_ERR(pages)) {
+ obj_remove_domain(domain);
+ return PTR_ERR(pages);
}
+
+ ret = msm_gem_map_vma(aspace, domain, msm_obj->sgt,
+ get_dmabuf_ptr(obj), msm_obj->flags);
}
if (!ret)
- *iova = msm_obj->domain[id].iova;
+ *iova = domain->iova;
+ else
+ obj_remove_domain(domain);
return ret;
}
/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
int ret;
- /* this is safe right now because we don't unmap until the
- * bo is deleted:
- */
- if (msm_obj->domain[id].iova) {
- *iova = msm_obj->domain[id].iova;
+ domain = obj_get_domain(obj, aspace);
+ if (domain) {
+ *iova = domain->iova;
return 0;
}
mutex_lock(&obj->dev->struct_mutex);
- ret = msm_gem_get_iova_locked(obj, id, iova);
+ ret = msm_gem_get_iova_locked(obj, aspace, iova);
mutex_unlock(&obj->dev->struct_mutex);
return ret;
}
@@ -345,14 +406,18 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
/* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_iova()'.
*/
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!msm_obj->domain[id].iova);
- return msm_obj->domain[id].iova;
+ struct msm_gem_vma *domain = obj_get_domain(obj, aspace);
+
+ WARN_ON(!domain);
+
+ return domain ? domain->iova : 0;
}
-void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
// XXX TODO ..
// NOTE: probably don't need a _locked() version.. we wouldn't
@@ -486,14 +551,21 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
uint64_t off = drm_vma_node_start(&obj->vma_node);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n",
+ seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p\t",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
msm_obj->read_fence, msm_obj->write_fence,
obj->name, obj->refcount.refcount.counter,
- off, msm_obj->vaddr, obj->size);
+ off, msm_obj->vaddr);
+
+ /* FIXME: we need to print the address space here too */
+ list_for_each_entry(domain, &msm_obj->domains, list)
+ seq_printf(m, " %08llx", domain->iova);
+
+ seq_puts(m, "\n");
}
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@@ -517,9 +589,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
void msm_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int id;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -528,22 +598,12 @@ void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->mm_list);
- for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- struct msm_mmu *mmu = priv->mmus[id];
- if (mmu && msm_obj->domain[id].iova) {
- if (obj->import_attach && mmu->funcs->unmap_dma_buf) {
- mmu->funcs->unmap_dma_buf(mmu, msm_obj->sgt,
- obj->import_attach->dmabuf,
- DMA_BIDIRECTIONAL);
- } else
- mmu->funcs->unmap_sg(mmu, msm_obj->sgt,
- DMA_BIDIRECTIONAL);
- }
- }
+ put_iova(obj);
if (obj->import_attach) {
if (msm_obj->vaddr)
- dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
+ dma_buf_vunmap(obj->import_attach->dmabuf,
+ msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
@@ -597,7 +657,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
- unsigned sz;
bool use_vram = false;
switch (flags & MSM_BO_CACHE_MASK) {
@@ -619,16 +678,16 @@ static int msm_gem_new_impl(struct drm_device *dev,
if (WARN_ON(use_vram && !priv->vram.size))
return -EINVAL;
- sz = sizeof(*msm_obj);
- if (use_vram)
- sz += sizeof(struct drm_mm_node);
-
- msm_obj = kzalloc(sz, GFP_KERNEL);
+ msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
- if (use_vram)
- msm_obj->vram_node = (void *)&msm_obj[1];
+ if (use_vram) {
+ struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base, 0);
+
+ if (!IS_ERR(domain))
+ msm_obj->vram_node = &domain->node;
+ }
msm_obj->flags = flags;
@@ -636,6 +695,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
reservation_object_init(msm_obj->resv);
INIT_LIST_HEAD(&msm_obj->submit_entry);
+ INIT_LIST_HEAD(&msm_obj->domains);
+
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
*obj = &msm_obj->base;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 2e4ae6b1c5d0..ac46c473791f 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -18,12 +18,38 @@
#ifndef __MSM_GEM_H__
#define __MSM_GEM_H__
+#include <linux/kref.h>
#include <linux/reservation.h>
#include "msm_drv.h"
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
+struct msm_gem_aspace_ops {
+ int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
+ struct sg_table *sgt, void *priv, unsigned int flags);
+
+ void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *,
+ struct sg_table *sgt, void *priv);
+
+ void (*destroy)(struct msm_gem_address_space *);
+};
+
+struct msm_gem_address_space {
+ const char *name;
+ struct msm_mmu *mmu;
+ const struct msm_gem_aspace_ops *ops;
+ struct kref kref;
+};
+
+struct msm_gem_vma {
+ /* Node used by the GPU address space, but not the SDE address space */
+ struct drm_mm_node node;
+ struct msm_gem_address_space *aspace;
+ uint64_t iova;
+ struct list_head list;
+};
+
struct msm_gem_object {
struct drm_gem_object base;
@@ -52,9 +78,7 @@ struct msm_gem_object {
struct sg_table *sgt;
void *vaddr;
- struct {
- dma_addr_t iova;
- } domain[NUM_DOMAINS];
+ struct list_head domains;
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
@@ -94,24 +118,25 @@ static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
*/
struct msm_gem_submit {
struct drm_device *dev;
- struct msm_gpu *gpu;
+ struct msm_gem_address_space *aspace;
struct list_head node; /* node in gpu submit_list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;
uint32_t fence;
+ int ring;
bool valid;
unsigned int nr_cmds;
unsigned int nr_bos;
struct {
uint32_t type;
uint32_t size; /* in dwords */
- uint32_t iova;
+ uint64_t iova;
uint32_t idx; /* cmdstream buffer idx in bos[] */
} cmd[MAX_CMDS];
struct {
uint32_t flags;
struct msm_gem_object *obj;
- uint32_t iova;
+ uint64_t iova;
} bos[0];
};
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 1847f83b1e33..0566cefaae81 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -34,7 +34,7 @@ static inline void __user *to_user_ptr(u64 address)
}
static struct msm_gem_submit *submit_create(struct drm_device *dev,
- struct msm_gpu *gpu, int nr)
+ struct msm_gem_address_space *aspace, int nr)
{
struct msm_gem_submit *submit;
int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
@@ -42,7 +42,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (submit) {
submit->dev = dev;
- submit->gpu = gpu;
+ submit->aspace = aspace;
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
@@ -90,7 +90,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
pagefault_disable();
}
- if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
+ if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
+ !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
goto out_unlock;
@@ -141,7 +142,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
- msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+ msm_gem_put_iova(&msm_obj->base, submit->aspace);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
@@ -162,7 +163,7 @@ retry:
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- uint32_t iova;
+ uint64_t iova;
if (slow_locked == i)
slow_locked = -1;
@@ -180,7 +181,7 @@ retry:
/* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
+ submit->aspace, &iova);
/* this would break the logic in the fail path.. there is no
* reason for this to happen, but just to be on the safe side
@@ -229,7 +230,7 @@ fail:
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
- struct msm_gem_object **obj, uint32_t *iova, bool *valid)
+ struct msm_gem_object **obj, uint64_t *iova, bool *valid)
{
if (idx >= submit->nr_bos) {
DRM_ERROR("invalid buffer index: %u (out of %u)\n",
@@ -275,7 +276,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
struct drm_msm_gem_submit_reloc submit_reloc;
void __user *userptr =
to_user_ptr(relocs + (i * sizeof(submit_reloc)));
- uint32_t iova, off;
+ uint64_t iova;
+ uint32_t off;
bool valid;
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
@@ -347,17 +349,19 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
*/
- if (args->pipe != MSM_PIPE_3D0)
+ if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
return -EINVAL;
gpu = priv->gpu;
+ if (!gpu)
+ return -ENXIO;
if (args->nr_cmds > MAX_CMDS)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
- submit = submit_create(dev, gpu, args->nr_bos);
+ submit = submit_create(dev, ctx->aspace, args->nr_bos);
if (!submit) {
ret = -ENOMEM;
goto out;
@@ -376,7 +380,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
void __user *userptr =
to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
struct msm_gem_object *msm_obj;
- uint32_t iova;
+ uint64_t iova;
ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
if (ret) {
@@ -408,8 +412,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
}
- if ((submit_cmd.size + submit_cmd.submit_offset) >=
- msm_obj->base.size) {
+ if (!(submit_cmd.size) ||
+ ((submit_cmd.size + submit_cmd.submit_offset) >
+ msm_obj->base.size)) {
DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
ret = -EINVAL;
goto out;
@@ -431,7 +436,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
- ret = msm_gpu_submit(gpu, submit, ctx);
+ /* Clamp the user submitted ring to the range of available rings */
+ submit->ring = clamp_t(uint32_t,
+ (args->flags & MSM_SUBMIT_RING_MASK) >> MSM_SUBMIT_RING_SHIFT,
+ 0, gpu->nr_rings - 1);
+
+ ret = msm_gpu_submit(gpu, submit);
args->fence = submit->fence;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
new file mode 100644
index 000000000000..7ca96831a9b3
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+static void
+msm_gem_address_space_destroy(struct kref *kref)
+{
+ struct msm_gem_address_space *aspace = container_of(kref,
+ struct msm_gem_address_space, kref);
+
+ if (aspace->ops->destroy)
+ aspace->ops->destroy(aspace);
+
+ kfree(aspace);
+}
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
+{
+ if (aspace)
+ kref_put(&aspace->kref, msm_gem_address_space_destroy);
+}
+
+/* SDE address space operations */
+static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv)
+{
+ struct dma_buf *buf = priv;
+
+ if (buf)
+ aspace->mmu->funcs->unmap_dma_buf(aspace->mmu,
+ sgt, buf, DMA_BIDIRECTIONAL);
+ else
+ aspace->mmu->funcs->unmap_sg(aspace->mmu, sgt,
+ DMA_BIDIRECTIONAL);
+
+ vma->iova = 0;
+
+ msm_gem_address_space_put(aspace);
+}
+
+
+static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, unsigned int flags)
+{
+ struct dma_buf *buf = priv;
+ int ret;
+
+ if (buf)
+ ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, buf,
+ DMA_BIDIRECTIONAL);
+ else
+ ret = aspace->mmu->funcs->map_sg(aspace->mmu, sgt,
+ DMA_BIDIRECTIONAL);
+
+ if (!ret)
+ vma->iova = sg_dma_address(sgt->sgl);
+
+ /* Get a reference to the aspace to keep it around */
+ kref_get(&aspace->kref);
+
+ return ret;
+}
+
+static const struct msm_gem_aspace_ops smmu_aspace_ops = {
+ .map = smmu_aspace_map_vma,
+ .unmap = smmu_aspace_unmap_vma,
+};
+
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+ const char *name)
+{
+ struct msm_gem_address_space *aspace;
+
+ if (!mmu)
+ return ERR_PTR(-EINVAL);
+
+ aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+ if (!aspace)
+ return ERR_PTR(-ENOMEM);
+
+ aspace->name = name;
+ aspace->mmu = mmu;
+ aspace->ops = &smmu_aspace_ops;
+
+ kref_init(&aspace->kref);
+
+ return aspace;
+}
+
+/* GPU address space operations */
+struct msm_iommu_aspace {
+ struct msm_gem_address_space base;
+ struct drm_mm mm;
+};
+
+#define to_iommu_aspace(aspace) \
+ ((struct msm_iommu_aspace *) \
+ container_of(aspace, struct msm_iommu_aspace, base))
+
+static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
+{
+ if (!vma->iova)
+ return;
+
+ if (aspace->mmu)
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt);
+
+ drm_mm_remove_node(&vma->node);
+
+ vma->iova = 0;
+
+ msm_gem_address_space_put(aspace);
+}
+
+static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, void *priv,
+ unsigned int flags)
+{
+ struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
+ size_t size = 0;
+ struct scatterlist *sg;
+ int ret, i;
+ int iommu_flags = IOMMU_READ;
+
+ if (!(flags & MSM_BO_GPU_READONLY))
+ iommu_flags |= IOMMU_WRITE;
+
+ if (flags & MSM_BO_PRIVILEGED)
+ iommu_flags |= IOMMU_PRIV;
+
+ if (WARN_ON(drm_mm_node_allocated(&vma->node)))
+ return 0;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ size += sg->length + sg->offset;
+
+ ret = drm_mm_insert_node(&local->mm, &vma->node, size >> PAGE_SHIFT,
+ 0, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ return ret;
+
+ vma->iova = vma->node.start << PAGE_SHIFT;
+
+ if (aspace->mmu)
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+ iommu_flags);
+
+ /* Get a reference to the aspace to keep it around */
+ kref_get(&aspace->kref);
+
+ return ret;
+}
+
+static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
+{
+ struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
+
+ drm_mm_takedown(&local->mm);
+ aspace->mmu->funcs->destroy(aspace->mmu);
+}
+
+static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
+ .map = iommu_aspace_map_vma,
+ .unmap = iommu_aspace_unmap_vma,
+ .destroy = iommu_aspace_destroy,
+};
+
+static struct msm_gem_address_space *
+msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
+ uint64_t start, uint64_t end)
+{
+ struct msm_iommu_aspace *local;
+
+ if (!mmu)
+ return ERR_PTR(-EINVAL);
+
+ local = kzalloc(sizeof(*local), GFP_KERNEL);
+ if (!local)
+ return ERR_PTR(-ENOMEM);
+
+ drm_mm_init(&local->mm, (start >> PAGE_SHIFT),
+ (end >> PAGE_SHIFT) - 1);
+
+ local->base.name = name;
+ local->base.mmu = mmu;
+ local->base.ops = &msm_iommu_aspace_ops;
+
+ kref_init(&local->base.kref);
+
+ return &local->base;
+}
+
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, unsigned int flags)
+{
+ if (aspace && aspace->ops->map)
+ return aspace->ops->map(aspace, vma, sgt, priv, flags);
+
+ return -EINVAL;
+}
+
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
+{
+ if (aspace && aspace->ops->unmap)
+ aspace->ops->unmap(aspace, vma, sgt, priv);
+}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ const char *name)
+{
+ struct msm_mmu *mmu = msm_iommu_new(dev, domain);
+
+ if (IS_ERR(mmu))
+ return (struct msm_gem_address_space *) mmu;
+
+ return msm_gem_address_space_new(mmu, name,
+ domain->geometry.aperture_start,
+ domain->geometry.aperture_end);
+}
+
+/* Create a new dynamic instance */
+struct msm_gem_address_space *
+msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
+ uint64_t start, uint64_t end)
+{
+ struct msm_mmu *child = msm_iommu_new_dynamic(parent);
+
+ if (IS_ERR(child))
+ return (struct msm_gem_address_space *) child;
+
+ return msm_gem_address_space_new(child, name, start, end);
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 6b02ada6579a..3176f301e7a8 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -90,21 +90,20 @@ static int disable_pwrrail(struct msm_gpu *gpu)
static int enable_clk(struct msm_gpu *gpu)
{
- struct clk *rate_clk = NULL;
+ uint32_t rate = gpu->gpufreq[gpu->active_level];
int i;
- /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
- if (gpu->grp_clks[i]) {
- clk_prepare(gpu->grp_clks[i]);
- rate_clk = gpu->grp_clks[i];
- }
- }
+ if (gpu->core_clk)
+ clk_set_rate(gpu->core_clk, rate);
- if (rate_clk && gpu->fast_rate)
- clk_set_rate(rate_clk, gpu->fast_rate);
+ if (gpu->rbbmtimer_clk)
+ clk_set_rate(gpu->rbbmtimer_clk, 19200000);
+
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
+ clk_prepare(gpu->grp_clks[i]);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_enable(gpu->grp_clks[i]);
@@ -113,24 +112,23 @@ static int enable_clk(struct msm_gpu *gpu)
static int disable_clk(struct msm_gpu *gpu)
{
- struct clk *rate_clk = NULL;
+ uint32_t rate = gpu->gpufreq[gpu->nr_pwrlevels - 1];
int i;
- /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
- if (gpu->grp_clks[i]) {
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
clk_disable(gpu->grp_clks[i]);
- rate_clk = gpu->grp_clks[i];
- }
- }
- if (rate_clk && gpu->slow_rate)
- clk_set_rate(rate_clk, gpu->slow_rate);
-
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_unprepare(gpu->grp_clks[i]);
+ if (gpu->core_clk)
+ clk_set_rate(gpu->core_clk, rate);
+
+ if (gpu->rbbmtimer_clk)
+ clk_set_rate(gpu->rbbmtimer_clk, 0);
+
return 0;
}
@@ -138,8 +136,9 @@ static int enable_axi(struct msm_gpu *gpu)
{
if (gpu->ebi1_clk)
clk_prepare_enable(gpu->ebi1_clk);
- if (gpu->bus_freq)
- bs_set(gpu, gpu->bus_freq);
+
+ if (gpu->busfreq[gpu->active_level])
+ bs_set(gpu, gpu->busfreq[gpu->active_level]);
return 0;
}
@@ -147,7 +146,8 @@ static int disable_axi(struct msm_gpu *gpu)
{
if (gpu->ebi1_clk)
clk_disable_unprepare(gpu->ebi1_clk);
- if (gpu->bus_freq)
+
+ if (gpu->busfreq[gpu->active_level])
bs_set(gpu, 0);
return 0;
}
@@ -155,6 +155,8 @@ static int disable_axi(struct msm_gpu *gpu)
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
int ret;
DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
@@ -167,6 +169,8 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (WARN_ON(gpu->active_cnt <= 0))
return -EINVAL;
+ WARN_ON(pm_runtime_get_sync(&pdev->dev) < 0);
+
ret = enable_pwrrail(gpu);
if (ret)
return ret;
@@ -185,6 +189,8 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
int msm_gpu_pm_suspend(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
int ret;
DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
@@ -209,6 +215,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
if (ret)
return ret;
+ pm_runtime_put(&pdev->dev);
return 0;
}
@@ -277,17 +284,35 @@ static void recover_worker(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
if (msm_gpu_active(gpu)) {
struct msm_gem_submit *submit;
- uint32_t fence = gpu->funcs->last_fence(gpu);
-
- /* retire completed submits, plus the one that hung: */
- retire_submits(gpu, fence + 1);
+ struct msm_ringbuffer *ring;
+ int i;
inactive_cancel(gpu);
+
+ FOR_EACH_RING(gpu, ring, i) {
+ uint32_t fence;
+
+ if (!ring)
+ continue;
+
+ fence = gpu->funcs->last_fence(gpu, ring);
+
+ /*
+ * Retire the faulting command on the active ring and
+ * make sure the other rings are cleaned up
+ */
+ if (ring == gpu->funcs->active_ring(gpu))
+ retire_submits(gpu, fence + 1);
+ else
+ retire_submits(gpu, fence);
+ }
+
+ /* Recover the GPU */
gpu->funcs->recover(gpu);
- /* replay the remaining submits after the one that hung: */
+ /* replay the remaining submits for all rings: */
list_for_each_entry(submit, &gpu->submit_list, node) {
- gpu->funcs->submit(gpu, submit, NULL);
+ gpu->funcs->submit(gpu, submit);
}
}
mutex_unlock(&dev->struct_mutex);
@@ -307,25 +332,28 @@ static void hangcheck_handler(unsigned long data)
struct msm_gpu *gpu = (struct msm_gpu *)data;
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+ uint32_t fence = gpu->funcs->last_fence(gpu, ring);
+ uint32_t submitted = gpu->funcs->submitted_fence(gpu, ring);
- if (fence != gpu->hangcheck_fence) {
+ if (fence != gpu->hangcheck_fence[ring->id]) {
/* some progress has been made.. ya! */
- gpu->hangcheck_fence = fence;
- } else if (fence < gpu->submitted_fence) {
+ gpu->hangcheck_fence[ring->id] = fence;
+ } else if (fence < submitted) {
/* no progress and not done.. hung! */
- gpu->hangcheck_fence = fence;
- dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
- gpu->name);
+ gpu->hangcheck_fence[ring->id] = fence;
+ dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+ gpu->name, ring->id);
dev_err(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
dev_err(dev->dev, "%s: submitted fence: %u\n",
- gpu->name, gpu->submitted_fence);
+ gpu->name, submitted);
+
queue_work(priv->wq, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
- if (gpu->submitted_fence > gpu->hangcheck_fence)
+ if (submitted > gpu->hangcheck_fence[ring->id])
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
@@ -434,54 +462,66 @@ out:
static void retire_submits(struct msm_gpu *gpu, uint32_t fence)
{
struct drm_device *dev = gpu->dev;
+ struct msm_gem_submit *submit, *tmp;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- while (!list_empty(&gpu->submit_list)) {
- struct msm_gem_submit *submit;
-
- submit = list_first_entry(&gpu->submit_list,
- struct msm_gem_submit, node);
+ /*
+ * Find and retire all the submits in the same ring that are older than
+ * or equal to 'fence'
+ */
- if (submit->fence <= fence) {
+ list_for_each_entry_safe(submit, tmp, &gpu->submit_list, node) {
+ if (COMPARE_FENCE_LTE(submit->fence, fence)) {
list_del(&submit->node);
kfree(submit);
- } else {
- break;
}
}
}
-static void retire_worker(struct work_struct *work)
+static bool _fence_signaled(struct msm_gem_object *obj, uint32_t fence)
{
- struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
- struct drm_device *dev = gpu->dev;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ if (obj->write_fence & 0x3FFFFFFF)
+ return COMPARE_FENCE_LTE(obj->write_fence, fence);
- msm_update_fence(gpu->dev, fence);
+ return COMPARE_FENCE_LTE(obj->read_fence, fence);
+}
- mutex_lock(&dev->struct_mutex);
+static void _retire_ring(struct msm_gpu *gpu, uint32_t fence)
+{
+ struct msm_gem_object *obj, *tmp;
retire_submits(gpu, fence);
- while (!list_empty(&gpu->active_list)) {
- struct msm_gem_object *obj;
-
- obj = list_first_entry(&gpu->active_list,
- struct msm_gem_object, mm_list);
-
- if ((obj->read_fence <= fence) &&
- (obj->write_fence <= fence)) {
- /* move to inactive: */
+ list_for_each_entry_safe(obj, tmp, &gpu->active_list, mm_list) {
+ if (_fence_signaled(obj, fence)) {
msm_gem_move_to_inactive(&obj->base);
- msm_gem_put_iova(&obj->base, gpu->id);
+ msm_gem_put_iova(&obj->base, gpu->aspace);
drm_gem_object_unreference(&obj->base);
- } else {
- break;
}
}
+}
- mutex_unlock(&dev->struct_mutex);
+static void retire_worker(struct work_struct *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
+ struct drm_device *dev = gpu->dev;
+ struct msm_ringbuffer *ring;
+ int i;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ uint32_t fence;
+
+ if (!ring)
+ continue;
+
+ fence = gpu->funcs->last_fence(gpu, ring);
+ msm_update_fence(gpu->dev, fence);
+
+ mutex_lock(&dev->struct_mutex);
+ _retire_ring(gpu, fence);
+ mutex_unlock(&dev->struct_mutex);
+ }
if (!msm_gpu_active(gpu))
inactive_start(gpu);
@@ -496,18 +536,16 @@ void msm_gpu_retire(struct msm_gpu *gpu)
}
/* add bo's to gpu's ring, and kick gpu: */
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx)
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->rb[submit->ring];
int i, ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- submit->fence = ++priv->next_fence;
-
- gpu->submitted_fence = submit->fence;
+ submit->fence = FENCE(submit->ring, ++priv->next_fence[submit->ring]);
inactive_cancel(gpu);
@@ -515,7 +553,7 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_rd_dump_submit(submit);
- gpu->submitted_fence = submit->fence;
+ ring->submitted_fence = submit->fence;
update_sw_cntrs(gpu);
@@ -528,12 +566,12 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
if (!is_active(msm_obj)) {
- uint32_t iova;
+ uint64_t iova;
/* ring takes a reference to the bo and iova: */
drm_gem_object_reference(&msm_obj->base);
msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
+ submit->aspace, &iova);
}
if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
@@ -543,8 +581,7 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
}
- ret = gpu->funcs->submit(gpu, submit, ctx);
- priv->lastctx = ctx;
+ ret = gpu->funcs->submit(gpu, submit);
hangcheck_timer_reset(gpu);
@@ -561,17 +598,54 @@ static irqreturn_t irq_handler(int irq, void *data)
return gpu->funcs->irq(gpu);
}
-static const char *clk_names[] = {
- "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
- "alt_mem_iface_clk",
-};
+static struct clk *get_clock(struct device *dev, const char *name)
+{
+ struct clk *clk = devm_clk_get(dev, name);
+
+ DBG("clks[%s]: %p", name, clk);
+
+ return IS_ERR(clk) ? NULL : clk;
+}
+
+static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
+{
+ struct device *dev = &pdev->dev;
+ struct property *prop;
+ const char *name;
+ int i = 0;
+
+ gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
+ if (gpu->nr_clocks < 1) {
+ gpu->nr_clocks = 0;
+ return 0;
+ }
+
+ gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
+ GFP_KERNEL);
+ if (!gpu->grp_clks)
+ return -ENOMEM;
+
+ of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+ gpu->grp_clks[i] = get_clock(dev, name);
+
+ /* Remember the key clocks that we need to control later */
+ if (!strcmp(name, "core_clk"))
+ gpu->core_clk = gpu->grp_clks[i];
+ else if (!strcmp(name, "rbbmtimer_clk"))
+ gpu->rbbmtimer_clk = gpu->grp_clks[i];
+
+ ++i;
+ }
+
+ return 0;
+}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
- const char *name, const char *ioname, const char *irqname, int ringsz)
+ const char *name, struct msm_gpu_config *config)
{
struct iommu_domain *iommu;
- int i, ret;
+ int i, ret, nr_rings;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
@@ -595,17 +669,16 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
spin_lock_init(&gpu->perf_lock);
- BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
/* Map registers: */
- gpu->mmio = msm_ioremap(pdev, ioname, name);
+ gpu->mmio = msm_ioremap(pdev, config->ioname, name);
if (IS_ERR(gpu->mmio)) {
ret = PTR_ERR(gpu->mmio);
goto fail;
}
/* Get Interrupt: */
- gpu->irq = platform_get_irq_byname(pdev, irqname);
+ gpu->irq = platform_get_irq_byname(pdev, config->irqname);
if (gpu->irq < 0) {
ret = gpu->irq;
dev_err(drm->dev, "failed to get irq: %d\n", ret);
@@ -619,13 +692,11 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
- /* Acquire clocks: */
- for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
- gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
- DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
- if (IS_ERR(gpu->grp_clks[i]))
- gpu->grp_clks[i] = NULL;
- }
+ pm_runtime_enable(&pdev->dev);
+
+ ret = get_clocks(pdev, gpu);
+ if (ret)
+ goto fail;
gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
DBG("ebi1_clk: %p", gpu->ebi1_clk);
@@ -649,12 +720,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
*/
iommu = iommu_domain_alloc(&platform_bus_type);
if (iommu) {
+ /* TODO 32b vs 64b address space.. */
+ iommu->geometry.aperture_start = config->va_start;
+ iommu->geometry.aperture_end = config->va_end;
+
dev_info(drm->dev, "%s: using IOMMU\n", name);
- gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
- if (IS_ERR(gpu->mmu)) {
- ret = PTR_ERR(gpu->mmu);
+ gpu->aspace = msm_gem_address_space_create(&pdev->dev,
+ iommu, "gpu");
+ if (IS_ERR(gpu->aspace)) {
+ ret = PTR_ERR(gpu->aspace);
dev_err(drm->dev, "failed to init iommu: %d\n", ret);
- gpu->mmu = NULL;
+ gpu->aspace = NULL;
iommu_domain_free(iommu);
goto fail;
}
@@ -662,42 +738,80 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
- gpu->id = msm_register_mmu(drm, gpu->mmu);
+ nr_rings = config->nr_rings;
- /* Create ringbuffer: */
- mutex_lock(&drm->struct_mutex);
- gpu->rb = msm_ringbuffer_new(gpu, ringsz);
- mutex_unlock(&drm->struct_mutex);
- if (IS_ERR(gpu->rb)) {
- ret = PTR_ERR(gpu->rb);
- gpu->rb = NULL;
- dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
- goto fail;
+ if (nr_rings > ARRAY_SIZE(gpu->rb)) {
+ WARN(1, "Only creating %lu ringbuffers\n", ARRAY_SIZE(gpu->rb));
+ nr_rings = ARRAY_SIZE(gpu->rb);
}
+ /* Create ringbuffer(s): */
+ for (i = 0; i < nr_rings; i++) {
+ mutex_lock(&drm->struct_mutex);
+ gpu->rb[i] = msm_ringbuffer_new(gpu, i);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(gpu->rb[i])) {
+ ret = PTR_ERR(gpu->rb[i]);
+ gpu->rb[i] = NULL;
+ dev_err(drm->dev,
+ "could not create ringbuffer %d: %d\n", i, ret);
+ goto fail;
+ }
+ }
+
+ gpu->nr_rings = nr_rings;
+
+#ifdef CONFIG_SMP
+ gpu->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
+ gpu->pm_qos_req_dma.irq = gpu->irq;
+#endif
+
+ pm_qos_add_request(&gpu->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
bs_init(gpu);
+ gpu->snapshot = msm_snapshot_new(gpu);
+ if (IS_ERR(gpu->snapshot))
+ gpu->snapshot = NULL;
+
return 0;
fail:
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ if (gpu->rb[i])
+ msm_ringbuffer_destroy(gpu->rb[i]);
+ }
+
+ pm_runtime_disable(&pdev->dev);
return ret;
}
void msm_gpu_cleanup(struct msm_gpu *gpu)
{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ int i;
+
DBG("%s", gpu->name);
WARN_ON(!list_empty(&gpu->active_list));
bs_fini(gpu);
- if (gpu->rb) {
- if (gpu->rb_iova)
- msm_gem_put_iova(gpu->rb->bo, gpu->id);
- msm_ringbuffer_destroy(gpu->rb);
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ if (!gpu->rb[i])
+ continue;
+
+ if (gpu->rb[i]->iova)
+ msm_gem_put_iova(gpu->rb[i]->bo, gpu->aspace);
+
+ msm_ringbuffer_destroy(gpu->rb[i]);
}
- if (gpu->mmu)
- gpu->mmu->funcs->destroy(gpu->mmu);
+ msm_snapshot_destroy(gpu, gpu->snapshot);
+ pm_runtime_disable(&pdev->dev);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 2bbe85a3d6f6..06dfaabbfcfe 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -19,14 +19,24 @@
#define __MSM_GPU_H__
#include <linux/clk.h>
+#include <linux/pm_qos.h>
#include <linux/regulator/consumer.h>
#include "msm_drv.h"
#include "msm_ringbuffer.h"
+#include "msm_snapshot.h"
struct msm_gem_submit;
struct msm_gpu_perfcntr;
+struct msm_gpu_config {
+ const char *ioname;
+ const char *irqname;
+ int nr_rings;
+ uint64_t va_start;
+ uint64_t va_end;
+};
+
/* So far, with hardware that I've seen to date, we can have:
* + zero, one, or two z180 2d cores
* + a3xx or a2xx 3d core, which share a common CP (the firmware
@@ -46,18 +56,21 @@ struct msm_gpu_funcs {
int (*hw_init)(struct msm_gpu *gpu);
int (*pm_suspend)(struct msm_gpu *gpu);
int (*pm_resume)(struct msm_gpu *gpu);
- int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx);
- void (*flush)(struct msm_gpu *gpu);
- void (*idle)(struct msm_gpu *gpu);
+ int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+ void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
irqreturn_t (*irq)(struct msm_gpu *irq);
- uint32_t (*last_fence)(struct msm_gpu *gpu);
+ uint32_t (*last_fence)(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring);
+ uint32_t (*submitted_fence)(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring);
+ struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu);
void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
/* show GPU status in debugfs: */
void (*show)(struct msm_gpu *gpu, struct seq_file *m);
#endif
+ int (*snapshot)(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
};
struct msm_gpu {
@@ -77,14 +90,12 @@ struct msm_gpu {
const struct msm_gpu_perfcntr *perfcntrs;
uint32_t num_perfcntrs;
- struct msm_ringbuffer *rb;
- uint32_t rb_iova;
+ struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
+ int nr_rings;
/* list of GEM active objects: */
struct list_head active_list;
- uint32_t submitted_fence;
-
/* is gpu powered/active? */
int active_cnt;
bool inactive;
@@ -95,13 +106,20 @@ struct msm_gpu {
void __iomem *mmio;
int irq;
- struct msm_mmu *mmu;
- int id;
+ struct msm_gem_address_space *aspace;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
- struct clk *ebi1_clk, *grp_clks[6];
- uint32_t fast_rate, slow_rate, bus_freq;
+ struct clk **grp_clks;
+ struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
+ int nr_clocks;
+
+ uint32_t gpufreq[10];
+ uint32_t busfreq[10];
+ uint32_t nr_pwrlevels;
+ uint32_t active_level;
+
+ struct pm_qos_request pm_qos_req_dma;
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
@@ -117,15 +135,44 @@ struct msm_gpu {
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
- uint32_t hangcheck_fence;
+ uint32_t hangcheck_fence[MSM_GPU_MAX_RINGS];
struct work_struct recover_work;
struct list_head submit_list;
+
+ struct msm_snapshot *snapshot;
};
+/* It turns out that all targets use the same ringbuffer size. */
+#define MSM_GPU_RINGBUFFER_SZ SZ_32K
+#define MSM_GPU_RINGBUFFER_BLKSIZE 32
+
+#define MSM_GPU_RB_CNTL_DEFAULT \
+ (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
+ AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
+
+static inline struct msm_ringbuffer *__get_ring(struct msm_gpu *gpu, int index)
+{
+ return (index < ARRAY_SIZE(gpu->rb) ? gpu->rb[index] : NULL);
+}
+
+#define FOR_EACH_RING(gpu, ring, index) \
+ for (index = 0, ring = (gpu)->rb[0]; \
+ index < (gpu)->nr_rings && index < ARRAY_SIZE((gpu)->rb); \
+ index++, ring = __get_ring(gpu, index))
+
static inline bool msm_gpu_active(struct msm_gpu *gpu)
{
- return gpu->submitted_fence > gpu->funcs->last_fence(gpu);
+ struct msm_ringbuffer *ring;
+ int i;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ if (gpu->funcs->submitted_fence(gpu, ring) >
+ gpu->funcs->last_fence(gpu, ring))
+ return true;
+ }
+
+ return false;
}
/* Perf-Counters:
@@ -151,6 +198,45 @@ static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
return msm_readl(gpu->mmio + (reg << 2));
}
+static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
+{
+ uint32_t val = gpu_read(gpu, reg);
+
+ val &= ~mask;
+ gpu_write(gpu, reg, val | or);
+}
+
+static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
+{
+ u64 val;
+
+ /*
+ * Why not a readq here? Two reasons: 1) many of the LO registers are
+ * not quad word aligned and 2) the GPU hardware designers have a bit
+ * of a history of putting registers where they fit, especially in
+ * spins. The longer a GPU family goes the higher the chance that
+ * we'll get burned. We could do a series of validity checks if we
+ * wanted to, but really is a readq() that much better? Nah.
+ */
+
+ /*
+ * For some lo/hi registers (like perfcounters), the hi value is latched
+ * when the lo is read, so make sure to read the lo first to trigger
+ * that
+ */
+ val = (u64) msm_readl(gpu->mmio + (lo << 2));
+ val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
+
+ return val;
+}
+
+static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
+{
+ /* Why not a writeq here? Read the screed above */
+ msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
+ msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
+}
+
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
@@ -160,12 +246,12 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
void msm_gpu_retire(struct msm_gpu *gpu);
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx);
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
- const char *name, const char *ioname, const char *irqname, int ringsz);
+ const char *name, struct msm_gpu_config *config);
+
void msm_gpu_cleanup(struct msm_gpu *gpu);
struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 7ac2f1997e4a..3c16222b8890 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -15,41 +15,189 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/of_platform.h>
#include "msm_drv.h"
-#include "msm_mmu.h"
-
-struct msm_iommu {
- struct msm_mmu base;
- struct iommu_domain *domain;
-};
-#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+#include "msm_iommu.h"
static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
unsigned long iova, int flags, void *arg)
{
- pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
+ pr_warn_ratelimited("*** fault: iova=%16llX, flags=%d\n", (u64) iova, flags);
return 0;
}
+/*
+ * Get and enable the IOMMU clocks so that we can make
+ * sure they stay on the entire duration so that we can
+ * safely change the pagetable from the GPU
+ */
+static void _get_iommu_clocks(struct msm_mmu *mmu, struct platform_device *pdev)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ struct device *dev;
+ struct property *prop;
+ const char *name;
+ int i = 0;
+
+ if (WARN_ON(!pdev))
+ return;
+
+ dev = &pdev->dev;
+
+ iommu->nr_clocks =
+ of_property_count_strings(dev->of_node, "clock-names");
+
+ if (iommu->nr_clocks < 0) {
+ iommu->nr_clocks = 0;
+ return;
+ }
+
+ if (WARN_ON(iommu->nr_clocks > ARRAY_SIZE(iommu->clocks)))
+ iommu->nr_clocks = ARRAY_SIZE(iommu->clocks);
+
+ of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+ if (i == iommu->nr_clocks)
+ break;
+
+ iommu->clocks[i] = clk_get(dev, name);
+ if (iommu->clocks[i])
+ clk_prepare_enable(iommu->clocks[i]);
+
+ i++;
+ }
+}
+
+static int _attach_iommu_device(struct msm_mmu *mmu,
+ struct iommu_domain *domain, const char **names, int cnt)
+{
+ int i;
+
+ /* See if there is a iommus member in the current device. If not, look
+ * for the names and see if there is one in there.
+ */
+
+ if (of_find_property(mmu->dev->of_node, "iommus", NULL))
+ return iommu_attach_device(domain, mmu->dev);
+
+ /* Look through the list of names for a target */
+ for (i = 0; i < cnt; i++) {
+ struct device_node *node =
+ of_find_node_by_name(mmu->dev->of_node, names[i]);
+
+ if (!node)
+ continue;
+
+ if (of_find_property(node, "iommus", NULL)) {
+ struct platform_device *pdev;
+
+ /* Get the platform device for the node */
+ of_platform_populate(node->parent, NULL, NULL,
+ mmu->dev);
+
+ pdev = of_find_device_by_node(node);
+
+ if (!pdev)
+ continue;
+
+ _get_iommu_clocks(mmu,
+ of_find_device_by_node(node->parent));
+
+ mmu->dev = &pdev->dev;
+
+ return iommu_attach_device(domain, mmu->dev);
+ }
+ }
+
+ dev_err(mmu->dev, "Couldn't find a IOMMU device\n");
+ return -ENODEV;
+}
+
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- return iommu_attach_device(iommu->domain, mmu->dev);
+ int val = 1, ret;
+
+ /* Hope springs eternal */
+ iommu->allow_dynamic = true;
+
+ /* per-instance pagetables need TTBR1 support in the IOMMU driver */
+ ret = iommu_domain_set_attr(iommu->domain,
+ DOMAIN_ATTR_ENABLE_TTBR1, &val);
+ if (ret)
+ iommu->allow_dynamic = false;
+
+ /* Attach the device to the domain */
+ ret = _attach_iommu_device(mmu, iommu->domain, names, cnt);
+ if (ret)
+ return ret;
+
+ /*
+ * Get the context bank for the base domain; this will be shared with
+ * the children.
+ */
+ iommu->cb = -1;
+ if (iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_CONTEXT_BANK,
+ &iommu->cb))
+ iommu->allow_dynamic = false;
+
+ return 0;
+}
+
+static int msm_iommu_attach_dynamic(struct msm_mmu *mmu, const char **names,
+ int cnt)
+{
+ static unsigned int procid;
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int ret;
+ unsigned int id;
+
+ /* Assign a unique procid for the domain to cut down on TLB churn */
+ id = ++procid;
+
+ iommu_domain_set_attr(iommu->domain, DOMAIN_ATTR_PROCID, &id);
+
+ ret = iommu_attach_device(iommu->domain, mmu->dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Get the TTBR0 and the CONTEXTIDR - these will be used by the GPU to
+ * switch the pagetable on its own.
+ */
+ iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_TTBR0,
+ &iommu->ttbr0);
+ iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_CONTEXTIDR,
+ &iommu->contextidr);
+
+ return 0;
+}
+
+static void msm_iommu_detach(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int i;
+
+ iommu_detach_device(iommu->domain, mmu->dev);
+
+ for (i = 0; i < iommu->nr_clocks; i++) {
+ if (iommu->clocks[i])
+ clk_disable(iommu->clocks[i]);
+ }
}
-static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+static void msm_iommu_detach_dynamic(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
iommu_detach_device(iommu->domain, mmu->dev);
}
-static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
- struct sg_table *sgt, unsigned len, int prot)
+static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ uint64_t da = iova;
unsigned int i, j;
int ret;
@@ -57,10 +205,10 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- u32 pa = sg_phys(sg) - sg->offset;
+ phys_addr_t pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
- VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+ VERB("map[%d]: %016llx %pa(%zx)", i, iova, &pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
@@ -82,13 +230,13 @@ fail:
return ret;
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
- struct sg_table *sgt, unsigned len)
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ uint64_t da = iova;
int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
@@ -99,7 +247,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
if (unmapped < bytes)
return unmapped;
- VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+ VERB("unmap[%d]: %016llx(%zx)", i, iova, bytes);
BUG_ON(!PAGE_ALIGNED(bytes));
@@ -124,7 +272,16 @@ static const struct msm_mmu_funcs funcs = {
.destroy = msm_iommu_destroy,
};
-struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
+static const struct msm_mmu_funcs dynamic_funcs = {
+ .attach = msm_iommu_attach_dynamic,
+ .detach = msm_iommu_detach_dynamic,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .destroy = msm_iommu_destroy,
+};
+
+struct msm_mmu *_msm_iommu_new(struct device *dev, struct iommu_domain *domain,
+ const struct msm_mmu_funcs *funcs)
{
struct msm_iommu *iommu;
@@ -133,8 +290,54 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
return ERR_PTR(-ENOMEM);
iommu->domain = domain;
- msm_mmu_init(&iommu->base, dev, &funcs);
+ msm_mmu_init(&iommu->base, dev, funcs);
iommu_set_fault_handler(domain, msm_fault_handler, dev);
return &iommu->base;
}
+struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
+{
+ return _msm_iommu_new(dev, domain, &funcs);
+}
+
+/*
+ * Given a base domain that is attached to a IOMMU device try to create a
+ * dynamic domain that is also attached to the same device but allocates a new
+ * pagetable. This is used to allow multiple pagetables to be attached to the
+ * same device.
+ */
+struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *base)
+{
+ struct msm_iommu *base_iommu = to_msm_iommu(base);
+ struct iommu_domain *domain;
+ struct msm_mmu *mmu;
+ int ret, val = 1;
+
+ /* Don't continue if the base domain didn't have the support we need */
+ if (!base || base_iommu->allow_dynamic == false)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ domain = iommu_domain_alloc(&platform_bus_type);
+ if (!domain)
+ return ERR_PTR(-ENODEV);
+
+ mmu = _msm_iommu_new(base->dev, domain, &dynamic_funcs);
+
+ if (IS_ERR(mmu)) {
+ if (domain)
+ iommu_domain_free(domain);
+ return mmu;
+ }
+
+ ret = iommu_domain_set_attr(domain, DOMAIN_ATTR_DYNAMIC, &val);
+ if (ret) {
+ msm_iommu_destroy(mmu);
+ return ERR_PTR(ret);
+ }
+
+ /* Set the context bank to match the base domain */
+ iommu_domain_set_attr(domain, DOMAIN_ATTR_CONTEXT_BANK,
+ &base_iommu->cb);
+
+ return mmu;
+}
diff --git a/drivers/gpu/drm/msm/msm_iommu.h b/drivers/gpu/drm/msm/msm_iommu.h
new file mode 100644
index 000000000000..d005cfb9758f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_iommu.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_IOMMU_H_
+#define _MSM_IOMMU_H_
+
+#include "msm_mmu.h"
+
+struct msm_iommu {
+ struct msm_mmu base;
+ struct iommu_domain *domain;
+ int cb;
+ phys_addr_t ttbr0;
+ uint32_t contextidr;
+ bool allow_dynamic;
+
+ struct clk *clocks[5];
+ int nr_clocks;
+};
+#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+
+static inline bool msm_iommu_allow_dynamic(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ return iommu->allow_dynamic;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index cbf0d4593522..501f12bef00d 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -21,7 +21,6 @@
#include <linux/iommu.h>
struct msm_mmu;
-struct msm_gpu;
enum msm_mmu_domain_type {
MSM_SMMU_DOMAIN_UNSECURE,
@@ -33,11 +32,10 @@ enum msm_mmu_domain_type {
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
- void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
- int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
- unsigned len, int prot);
- int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
- unsigned len);
+ void (*detach)(struct msm_mmu *mmu);
+ int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
+ int prot);
+ int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt);
int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
enum dma_data_direction dir);
void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
@@ -62,8 +60,8 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
}
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
-struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
struct msm_mmu *msm_smmu_new(struct device *dev,
enum msm_mmu_domain_type domain);
+struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *orig);
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 9a78c48817c6..6dbb516cd4dc 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -307,7 +307,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_cmds; i++) {
uint32_t idx = submit->cmd[i].idx;
- uint32_t iova = submit->cmd[i].iova;
+ uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf = msm_gem_vaddr_locked(&obj->base);
@@ -315,7 +315,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
buf += iova - submit->bos[idx].iova;
rd_write_section(rd, RD_GPUADDR,
- (uint32_t[2]){ iova, szd * 4 }, 8);
+ (uint64_t[2]) { iova, szd * 4 }, 16);
rd_write_section(rd, RD_BUFFER_CONTENTS,
buf, szd * 4);
@@ -329,7 +329,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
case MSM_SUBMIT_CMD_BUF:
rd_write_section(rd, RD_CMDSTREAM_ADDR,
- (uint32_t[2]){ iova, szd }, 8);
+ (uint64_t[2]) { iova, szd }, 16);
break;
}
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 1f14b908b221..14a16c4578d9 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -18,12 +18,13 @@
#include "msm_ringbuffer.h"
#include "msm_gpu.h"
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id)
{
struct msm_ringbuffer *ring;
int ret;
- size = ALIGN(size, 4); /* size should be dword aligned */
+ /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
+ BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
@@ -32,7 +33,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->gpu = gpu;
- ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
+ ring->id = id;
+ ring->bo = msm_gem_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, MSM_BO_WC);
if (IS_ERR(ring->bo)) {
ret = PTR_ERR(ring->bo);
ring->bo = NULL;
@@ -40,10 +42,11 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->start = msm_gem_vaddr_locked(ring->bo);
- ring->end = ring->start + (size / 4);
+ ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
+ ring->next = ring->start;
ring->cur = ring->start;
- ring->size = size;
+ spin_lock_init(&ring->lock);
return ring;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index 6e0e1049fa4f..1e84905073bf 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -22,12 +22,15 @@
struct msm_ringbuffer {
struct msm_gpu *gpu;
- int size;
+ int id;
struct drm_gem_object *bo;
- uint32_t *start, *end, *cur;
+ uint32_t *start, *end, *cur, *next;
+ uint64_t iova;
+ uint32_t submitted_fence;
+ spinlock_t lock;
};
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id);
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
@@ -35,9 +38,13 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
static inline void
OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
{
- if (ring->cur == ring->end)
- ring->cur = ring->start;
- *(ring->cur++) = data;
+ /*
+ * ring->next points to the current command being written - it won't be
+ * committed as ring->cur until the flush
+ */
+ if (ring->next == ring->end)
+ ring->next = ring->start;
+ *(ring->next++) = data;
}
#endif /* __MSM_RINGBUFFER_H__ */
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index f29c1df46691..c99f51e09700 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -86,7 +86,7 @@ static int msm_smmu_attach(struct msm_mmu *mmu, const char **names, int cnt)
return 0;
}
-static void msm_smmu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+static void msm_smmu_detach(struct msm_mmu *mmu)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
@@ -104,14 +104,14 @@ static void msm_smmu_detach(struct msm_mmu *mmu, const char **names, int cnt)
dev_dbg(client->dev, "iommu domain detached\n");
}
-static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
- struct sg_table *sgt, unsigned len, int prot)
+static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, int prot)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
struct iommu_domain *domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ uint64_t da = iova;
unsigned int i, j;
int ret;
@@ -126,7 +126,7 @@ static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
u32 pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
- VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+ VERB("map[%d]: %16llx %08x(%zx)", i, iova, pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
@@ -172,14 +172,14 @@ static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt,
dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
}
-static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
- struct sg_table *sgt, unsigned len)
+static int msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
struct iommu_domain *domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ uint64_t da = iova;
int i;
if (!client)
@@ -197,7 +197,7 @@ static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
if (unmapped < bytes)
return unmapped;
- VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+ VERB("unmap[%d]: %16llx(%zx)", i, iova, bytes);
WARN_ON(!PAGE_ALIGNED(bytes));
diff --git a/drivers/gpu/drm/msm/msm_snapshot.c b/drivers/gpu/drm/msm/msm_snapshot.c
new file mode 100644
index 000000000000..30f3e5c64ebd
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_snapshot.c
@@ -0,0 +1,105 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+#include "msm_snapshot_api.h"
+
+void msm_snapshot_destroy(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+
+ if (!snapshot)
+ return;
+
+ dma_free_coherent(&pdev->dev, SZ_1M, snapshot->ptr,
+ snapshot->physaddr);
+
+ kfree(snapshot);
+}
+
+struct msm_snapshot *msm_snapshot_new(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct msm_snapshot *snapshot;
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
+ if (!snapshot)
+ return ERR_PTR(-ENOMEM);
+
+ snapshot->ptr = dma_alloc_coherent(&pdev->dev, SZ_1M,
+ &snapshot->physaddr, GFP_KERNEL);
+
+ if (!snapshot->ptr) {
+ kfree(snapshot);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ seq_buf_init(&snapshot->buf, snapshot->ptr, SZ_1M);
+
+ return snapshot;
+}
+
+int msm_gpu_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ int ret;
+ struct msm_snapshot_header header;
+ uint64_t val;
+
+ if (!snapshot)
+ return -ENOMEM;
+
+ /*
+ * For now, blow away the snapshot and take a new one - the most
+ * interesting hang is the last one we saw
+ */
+ seq_buf_init(&snapshot->buf, snapshot->ptr, SZ_1M);
+
+ header.magic = SNAPSHOT_MAGIC;
+ gpu->funcs->get_param(gpu, MSM_PARAM_GPU_ID, &val);
+ header.gpuid = lower_32_bits(val);
+
+ gpu->funcs->get_param(gpu, MSM_PARAM_CHIP_ID, &val);
+ header.chipid = lower_32_bits(val);
+
+ seq_buf_putmem(&snapshot->buf, &header, sizeof(header));
+
+ ret = gpu->funcs->snapshot(gpu, snapshot);
+
+ if (!ret) {
+ struct msm_snapshot_section_header end;
+
+ end.magic = SNAPSHOT_SECTION_MAGIC;
+ end.id = SNAPSHOT_SECTION_END;
+ end.size = sizeof(end);
+
+ seq_buf_putmem(&snapshot->buf, &end, sizeof(end));
+
+ dev_info(gpu->dev->dev, "GPU snapshot created [0x%pa (%d bytes)]\n",
+ &snapshot->physaddr, seq_buf_used(&snapshot->buf));
+ }
+
+ return ret;
+}
+
+int msm_snapshot_write(struct msm_gpu *gpu, struct seq_file *m)
+{
+ if (gpu && gpu->snapshot)
+ seq_write(m, gpu->snapshot->ptr,
+ seq_buf_used(&gpu->snapshot->buf));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/msm_snapshot.h b/drivers/gpu/drm/msm/msm_snapshot.h
new file mode 100644
index 000000000000..247e1358c885
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_snapshot.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SNAPSHOT_H_
+#define MSM_SNAPSHOT_H_
+
+#include <linux/string.h>
+#include <linux/seq_buf.h>
+#include "msm_snapshot_api.h"
+
+struct msm_snapshot {
+ void *ptr;
+ struct seq_buf buf;
+ phys_addr_t physaddr;
+ uint32_t index;
+ uint32_t remain;
+ unsigned long timestamp;
+ void *priv;
+};
+
+/* Write a uint32_t value to the next position in the snapshot buffer */
+static inline void SNAPSHOT_WRITE_U32(struct msm_snapshot *snapshot,
+ uint32_t value)
+{
+ seq_buf_putmem(&snapshot->buf, &value, sizeof(value));
+}
+
+/* Copy a block of memory to the next position in the snapshot buffer */
+static inline void SNAPSHOT_MEMCPY(struct msm_snapshot *snapshot, void *src,
+ uint32_t size)
+{
+ if (size)
+ seq_buf_putmem(&snapshot->buf, src, size);
+}
+
+static inline bool _snapshot_header(struct msm_snapshot *snapshot,
+ struct msm_snapshot_section_header *header,
+ u32 headsz, u32 datasz, u32 id)
+{
+ u32 size = headsz + datasz;
+
+ if (seq_buf_buffer_left(&snapshot->buf) <= size)
+ return false;
+
+ /* Write the section header */
+ header->magic = SNAPSHOT_SECTION_MAGIC;
+ header->id = id;
+ header->size = headsz + datasz;
+
+ /* Write the section header */
+ seq_buf_putmem(&snapshot->buf, header, headsz);
+
+ /* The caller will fill in the data from here */
+ return true;
+}
+
+/* SNAPSHOT_HEADER
+ * _snapshot: pointer to struct msm_snapshot
+ * _header: Local variable containing the sub-section header
+ * _id: Section ID to write
+ * _dword: Size of the data section (in dword)
+ */
+#define SNAPSHOT_HEADER(_snapshot, _header, _id, _dwords) \
+ _snapshot_header((_snapshot), \
+ (struct msm_snapshot_section_header *) &(header), \
+ sizeof(header), (_dwords) << 2, (_id))
+
+struct msm_gpu;
+
+struct msm_snapshot *msm_snapshot_new(struct msm_gpu *gpu);
+void msm_snapshot_destroy(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+int msm_gpu_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+int msm_snapshot_write(struct msm_gpu *gpu, struct seq_file *m);
+
+#endif
+
diff --git a/drivers/gpu/drm/msm/msm_snapshot_api.h b/drivers/gpu/drm/msm/msm_snapshot_api.h
new file mode 100644
index 000000000000..9f0adb9ee784
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_snapshot_api.h
@@ -0,0 +1,121 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SNAPSHOT_API_H_
+#define MSM_SNAPSHOT_API_H_
+
+#include <linux/types.h>
+
+/* High word is the magic, low word is the snapshot header version */
+#define SNAPSHOT_MAGIC 0x504D0002
+
+struct msm_snapshot_header {
+ __u32 magic;
+ __u32 gpuid;
+ __u32 chipid;
+} __packed;
+
+#define SNAPSHOT_SECTION_MAGIC 0xABCD
+
+struct msm_snapshot_section_header {
+ __u16 magic;
+ __u16 id;
+ __u32 size;
+} __packed;
+
+/* Section identifiers */
+#define SNAPSHOT_SECTION_OS 0x0101
+#define SNAPSHOT_SECTION_REGS_V2 0x0202
+#define SNAPSHOT_SECTION_RB_V2 0x0302
+#define SNAPSHOT_SECTION_IB_V2 0x0402
+#define SNAPSHOT_SECTION_INDEXED_REGS 0x0501
+#define SNAPSHOT_SECTION_DEBUG 0x0901
+#define SNAPSHOT_SECTION_DEBUGBUS 0x0A01
+#define SNAPSHOT_SECTION_GPU_OBJECT_V2 0x0B02
+#define SNAPSHOT_SECTION_MEMLIST_V2 0x0E02
+#define SNAPSHOT_SECTION_SHADER 0x1201
+#define SNAPSHOT_SECTION_END 0xFFFF
+
+#define SNAPSHOT_OS_LINUX_V3 0x00000202
+
+struct msm_snapshot_linux {
+ struct msm_snapshot_section_header header;
+ int osid;
+ __u32 seconds;
+ __u32 power_flags;
+ __u32 power_level;
+ __u32 power_interval_timeout;
+ __u32 grpclk;
+ __u32 busclk;
+ __u64 ptbase;
+ __u32 pid;
+ __u32 current_context;
+ __u32 ctxtcount;
+ unsigned char release[32];
+ unsigned char version[32];
+ unsigned char comm[16];
+} __packed;
+
+struct msm_snapshot_ringbuffer {
+ struct msm_snapshot_section_header header;
+ int start;
+ int end;
+ int rbsize;
+ int wptr;
+ int rptr;
+ int count;
+ __u32 timestamp_queued;
+ __u32 timestamp_retired;
+ __u64 gpuaddr;
+ __u32 id;
+} __packed;
+
+struct msm_snapshot_regs {
+ struct msm_snapshot_section_header header;
+ __u32 count;
+} __packed;
+
+struct msm_snapshot_indexed_regs {
+ struct msm_snapshot_section_header header;
+ __u32 index_reg;
+ __u32 data_reg;
+ __u32 start;
+ __u32 count;
+} __packed;
+
+#define SNAPSHOT_DEBUG_CP_MEQ 7
+#define SNAPSHOT_DEBUG_CP_PM4_RAM 8
+#define SNAPSHOT_DEBUG_CP_PFP_RAM 9
+#define SNAPSHOT_DEBUG_CP_ROQ 10
+#define SNAPSHOT_DEBUG_SHADER_MEMORY 11
+#define SNAPSHOT_DEBUG_CP_MERCIU 12
+
+struct msm_snapshot_debug {
+ struct msm_snapshot_section_header header;
+ __u32 type;
+ __u32 size;
+} __packed;
+
+struct msm_snapshot_debugbus {
+ struct msm_snapshot_section_header header;
+ __u32 id;
+ __u32 count;
+} __packed;
+
+struct msm_snapshot_shader {
+ struct msm_snapshot_section_header header;
+ __u32 type;
+ __u32 index;
+ __u32 size;
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index ac9997c238cd..31cf25ab5691 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -65,8 +65,12 @@ static void sde_connector_destroy(struct drm_connector *connector)
c_conn = to_sde_connector(connector);
+ if (c_conn->ops.pre_deinit)
+ c_conn->ops.pre_deinit(connector, c_conn->display);
+
if (c_conn->blob_caps)
drm_property_unreference_blob(c_conn->blob_caps);
+
msm_property_destroy(&c_conn->property_info);
drm_connector_unregister(connector);
@@ -88,8 +92,7 @@ static void _sde_connector_destroy_fb(struct sde_connector *c_conn,
return;
}
- msm_framebuffer_cleanup(c_state->out_fb,
- c_state->mmu_id);
+ msm_framebuffer_cleanup(c_state->out_fb, c_state->aspace);
drm_framebuffer_unreference(c_state->out_fb);
c_state->out_fb = NULL;
@@ -193,7 +196,7 @@ sde_connector_atomic_duplicate_state(struct drm_connector *connector)
if (c_state->out_fb) {
drm_framebuffer_reference(c_state->out_fb);
rc = msm_framebuffer_prepare(c_state->out_fb,
- c_state->mmu_id);
+ c_state->aspace);
if (rc)
SDE_ERROR("failed to prepare fb, %d\n", rc);
}
@@ -241,14 +244,14 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
rc = -EFAULT;
} else {
if (c_state->out_fb->flags & DRM_MODE_FB_SECURE)
- c_state->mmu_id =
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE];
+ c_state->aspace =
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE];
else
- c_state->mmu_id =
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+ c_state->aspace =
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
rc = msm_framebuffer_prepare(c_state->out_fb,
- c_state->mmu_id);
+ c_state->aspace);
if (rc)
SDE_ERROR("prep fb failed, %d\n", rc);
}
@@ -492,18 +495,17 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
c_conn->panel = panel;
c_conn->display = display;
- /* cache mmu_id's for later */
sde_kms = to_sde_kms(priv->kms);
if (sde_kms->vbif[VBIF_NRT]) {
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
} else {
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
}
if (ops)
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 9580282291db..3f26ee7d5965 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -44,6 +44,15 @@ struct sde_connector_ops {
void *display);
/**
+ * pre_deinit - perform additional deinitialization steps
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+ int (*pre_deinit)(struct drm_connector *connector,
+ void *display);
+
+ /**
* detect - determine if connector is connected
* @connector: Pointer to drm connector structure
* @force: Force detect setting from drm framework
@@ -140,7 +149,7 @@ struct sde_connector {
struct drm_panel *panel;
void *display;
- int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+ struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
char name[SDE_CONNECTOR_NAME_SIZE];
@@ -195,13 +204,13 @@ struct sde_connector {
* struct sde_connector_state - private connector status structure
* @base: Base drm connector structure
* @out_fb: Pointer to output frame buffer, if applicable
- * @mmu_id: MMU ID for accessing frame buffer objects, if applicable
+ * @aspace: Address space for accessing frame buffer objects, if applicable
* @property_values: Local cache of current connector property values
*/
struct sde_connector_state {
struct drm_connector_state base;
struct drm_framebuffer *out_fb;
- int mmu_id;
+ struct msm_gem_address_space *aspace;
uint64_t property_values[CONNECTOR_PROP_COUNT];
};
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index b2853e874d92..dbfc2dd11a17 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -432,6 +432,99 @@ void sde_core_irq_uninstall(struct sde_kms *sde_kms)
sde_kms->irq_obj.total_irqs = 0;
}
+static void sde_hw_irq_mask(struct irq_data *irqd)
+{
+ struct sde_kms *sde_kms;
+
+ if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+ SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
+ return;
+ }
+ sde_kms = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static void sde_hw_irq_unmask(struct irq_data *irqd)
+{
+ struct sde_kms *sde_kms;
+
+ if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+ SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
+ return;
+ }
+ sde_kms = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip sde_hw_irq_chip = {
+ .name = "sde",
+ .irq_mask = sde_hw_irq_mask,
+ .irq_unmask = sde_hw_irq_unmask,
+};
+
+static int sde_hw_irqdomain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct sde_kms *sde_kms;
+ int rc;
+
+ if (!domain || !domain->host_data) {
+ SDE_ERROR("invalid parameters domain %d\n", domain != 0);
+ return -EINVAL;
+ }
+ sde_kms = domain->host_data;
+
+ irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
+ rc = irq_set_chip_data(irq, sde_kms);
+
+ return rc;
+}
+
+static struct irq_domain_ops sde_hw_irqdomain_ops = {
+ .map = sde_hw_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+int sde_core_irq_domain_add(struct sde_kms *sde_kms)
+{
+ struct device *dev;
+ struct irq_domain *domain;
+
+ if (!sde_kms->dev || !sde_kms->dev->dev) {
+ pr_err("invalid device handles\n");
+ return -EINVAL;
+ }
+
+ dev = sde_kms->dev->dev;
+
+ domain = irq_domain_add_linear(dev->of_node, 32,
+ &sde_hw_irqdomain_ops, sde_kms);
+ if (!domain) {
+ pr_err("failed to add irq_domain\n");
+ return -EINVAL;
+ }
+
+ sde_kms->irq_controller.enabled_mask = 0;
+ sde_kms->irq_controller.domain = domain;
+
+ return 0;
+}
+
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms)
+{
+ if (sde_kms->irq_controller.domain) {
+ irq_domain_remove(sde_kms->irq_controller.domain);
+ sde_kms->irq_controller.domain = NULL;
+ }
+ return 0;
+}
+
irqreturn_t sde_core_irq(struct sde_kms *sde_kms)
{
/*
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.h b/drivers/gpu/drm/msm/sde/sde_core_irq.h
index 92642e73daa8..ee1b9bd1d32b 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,20 @@ int sde_core_irq_postinstall(struct sde_kms *sde_kms);
void sde_core_irq_uninstall(struct sde_kms *sde_kms);
/**
+ * sde_core_irq_domain_add - Add core IRQ domain for SDE
+ * @sde_kms: SDE handle
+ * @return: none
+ */
+int sde_core_irq_domain_add(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_domain_fini - uninstall core IRQ domain
+ * @sde_kms: SDE handle
+ * @return: 0 if success; error code otherwise
+ */
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms);
+
+/**
* sde_core_irq - core IRQ handler
* @sde_kms: SDE handle
* @return: interrupt handling status
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index ed4b7be34281..2205dd98a927 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -264,7 +264,7 @@ struct sde_encoder_phys_cmd {
* @wb_fmt: Writeback pixel format
* @frame_count: Counter of completed writeback operations
* @kickoff_count: Counter of issued writeback operations
- * @mmu_id: mmu identifier for non-secure/secure domain
+ * @aspace: address space identifier for non-secure/secure domain
* @wb_dev: Pointer to writeback device
* @start_time: Start time of writeback latest request
* @end_time: End time of writeback latest request
@@ -285,7 +285,7 @@ struct sde_encoder_phys_wb {
const struct sde_format *wb_fmt;
u32 frame_count;
u32 kickoff_count;
- int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+ struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
struct sde_wb_device *wb_dev;
ktime_t start_time;
ktime_t end_time;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 9943e3906df0..9368c4974126 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -180,7 +180,8 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
struct sde_hw_wb *hw_wb;
struct sde_hw_wb_cfg *wb_cfg;
const struct msm_format *format;
- int ret, mmu_id;
+ int ret;
+ struct msm_gem_address_space *aspace;
if (!phys_enc) {
SDE_ERROR("invalid encoder\n");
@@ -193,9 +194,9 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
wb_cfg->intf_mode = phys_enc->intf_mode;
wb_cfg->is_secure = (fb->flags & DRM_MODE_FB_SECURE) ? true : false;
- mmu_id = (wb_cfg->is_secure) ?
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] :
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+ aspace = (wb_cfg->is_secure) ?
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] :
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
SDE_DEBUG("[fb_secure:%d]\n", wb_cfg->is_secure);
@@ -217,7 +218,7 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
wb_cfg->roi = *wb_roi;
if (hw_wb->caps->features & BIT(SDE_WB_XY_ROI_OFFSET)) {
- ret = sde_format_populate_layout(mmu_id, fb, &wb_cfg->dest);
+ ret = sde_format_populate_layout(aspace, fb, &wb_cfg->dest);
if (ret) {
SDE_DEBUG("failed to populate layout %d\n", ret);
return;
@@ -226,7 +227,7 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
wb_cfg->dest.height = fb->height;
wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
} else {
- ret = sde_format_populate_layout_with_roi(mmu_id, fb, wb_roi,
+ ret = sde_format_populate_layout_with_roi(aspace, fb, wb_roi,
&wb_cfg->dest);
if (ret) {
/* this error should be detected during atomic_check */
@@ -1017,15 +1018,15 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init(
phys_enc = &wb_enc->base;
if (p->sde_kms->vbif[VBIF_NRT]) {
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
} else {
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
}
hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 41180f5dec12..42bbbdcab2c9 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -630,7 +630,7 @@ static int _sde_format_get_plane_sizes(
}
static int _sde_format_populate_addrs_ubwc(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -641,7 +641,7 @@ static int _sde_format_populate_addrs_ubwc(
return -EINVAL;
}
- base_addr = msm_framebuffer_iova(fb, mmu_id, 0);
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
if (!base_addr) {
DRM_ERROR("failed to retrieve base addr\n");
return -EFAULT;
@@ -711,7 +711,7 @@ static int _sde_format_populate_addrs_ubwc(
}
static int _sde_format_populate_addrs_linear(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -728,7 +728,7 @@ static int _sde_format_populate_addrs_linear(
/* Populate addresses for simple formats here */
for (i = 0; i < layout->num_planes; ++i) {
- layout->plane_addr[i] = msm_framebuffer_iova(fb, mmu_id, i);
+ layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
if (!layout->plane_addr[i]) {
DRM_ERROR("failed to retrieve base addr\n");
return -EFAULT;
@@ -739,7 +739,7 @@ static int _sde_format_populate_addrs_linear(
}
int sde_format_populate_layout(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -770,9 +770,9 @@ int sde_format_populate_layout(
/* Populate the addresses given the fb */
if (SDE_FORMAT_IS_UBWC(layout->format))
- ret = _sde_format_populate_addrs_ubwc(mmu_id, fb, layout);
+ ret = _sde_format_populate_addrs_ubwc(aspace, fb, layout);
else
- ret = _sde_format_populate_addrs_linear(mmu_id, fb, layout);
+ ret = _sde_format_populate_addrs_linear(aspace, fb, layout);
/* check if anything changed */
if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
@@ -814,14 +814,14 @@ static void _sde_format_calc_offset_linear(struct sde_hw_fmt_layout *source,
}
int sde_format_populate_layout_with_roi(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_rect *roi,
struct sde_hw_fmt_layout *layout)
{
int ret;
- ret = sde_format_populate_layout(mmu_id, fb, layout);
+ ret = sde_format_populate_layout(aspace, fb, layout);
if (ret || !roi)
return ret;
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
index 5dcdfbb653ed..0de081d619b7 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.h
+++ b/drivers/gpu/drm/msm/sde/sde_formats.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#define _SDE_FORMATS_H
#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
#include "sde_hw_mdss.h"
/**
@@ -76,7 +77,7 @@ int sde_format_check_modified_format(
/**
* sde_format_populate_layout - populate the given format layout based on
* mmu, fb, and format found in the fb
- * @mmu_id: mmu id handle
+ * @aspace: address space pointer
* @fb: framebuffer pointer
* @fmtl: format layout structure to populate
*
@@ -84,14 +85,14 @@ int sde_format_check_modified_format(
* are the same as before or 0 if new addresses were populated
*/
int sde_format_populate_layout(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *fmtl);
/**
* sde_format_populate_layout_with_roi - populate the given format layout
* based on mmu, fb, roi, and format found in the fb
- * @mmu_id: mmu id handle
+ * @aspace: mmu id handle
* @fb: framebuffer pointer
* @roi: region of interest (optional)
* @fmtl: format layout structure to populate
@@ -99,7 +100,7 @@ int sde_format_populate_layout(
* Return: error code on failure, 0 on success
*/
int sde_format_populate_layout_with_roi(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_rect *roi,
struct sde_hw_fmt_layout *fmtl);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 31a6d985c38f..519288f0dda2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -1830,12 +1830,20 @@ static int sde_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
cfg->qseed_type = SDE_SSPP_SCALER_QSEED3;
else if (!rc && !strcmp(type, "qseedv2"))
cfg->qseed_type = SDE_SSPP_SCALER_QSEED2;
+ else if (rc) {
+ SDE_DEBUG("qseed property not found\n");
+ rc = 0;
+ }
rc = of_property_read_string(np, sde_prop[CSC_TYPE].prop_name, &type);
if (!rc && !strcmp(type, "csc"))
cfg->csc_type = SDE_SSPP_CSC;
else if (!rc && !strcmp(type, "csc-10bit"))
cfg->csc_type = SDE_SSPP_CSC_10BIT;
+ else if (rc) {
+ SDE_DEBUG("CSC property not found\n");
+ rc = 0;
+ }
cfg->has_src_split = PROP_VALUE_ACCESS(prop_value, SRC_SPLIT, 0);
end:
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
index 909d6df38260..eeb7a0002eab 100644
--- a/drivers/gpu/drm/msm/sde/sde_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -49,86 +49,14 @@ irqreturn_t sde_irq(struct msm_kms *kms)
return IRQ_HANDLED;
}
-static void sde_hw_irq_mask(struct irq_data *irqd)
-{
- struct sde_kms *sde_kms;
-
- if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
- SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
- return;
- }
- sde_kms = irq_data_get_irq_chip_data(irqd);
-
- smp_mb__before_atomic();
- clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static void sde_hw_irq_unmask(struct irq_data *irqd)
-{
- struct sde_kms *sde_kms;
-
- if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
- SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
- return;
- }
- sde_kms = irq_data_get_irq_chip_data(irqd);
-
- smp_mb__before_atomic();
- set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static struct irq_chip sde_hw_irq_chip = {
- .name = "sde",
- .irq_mask = sde_hw_irq_mask,
- .irq_unmask = sde_hw_irq_unmask,
-};
-
-static int sde_hw_irqdomain_map(struct irq_domain *domain,
- unsigned int irq, irq_hw_number_t hwirq)
-{
- struct sde_kms *sde_kms;
- int rc;
-
- if (!domain || !domain->host_data) {
- SDE_ERROR("invalid parameters domain %d\n", domain != 0);
- return -EINVAL;
- }
- sde_kms = domain->host_data;
-
- irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
- rc = irq_set_chip_data(irq, sde_kms);
-
- return rc;
-}
-
-static struct irq_domain_ops sde_hw_irqdomain_ops = {
- .map = sde_hw_irqdomain_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
void sde_irq_preinstall(struct msm_kms *kms)
{
struct sde_kms *sde_kms = to_sde_kms(kms);
- struct device *dev;
- struct irq_domain *domain;
if (!sde_kms->dev || !sde_kms->dev->dev) {
pr_err("invalid device handles\n");
return;
}
- dev = sde_kms->dev->dev;
-
- domain = irq_domain_add_linear(dev->of_node, 32,
- &sde_hw_irqdomain_ops, sde_kms);
- if (!domain) {
- pr_err("failed to add irq_domain\n");
- return;
- }
-
- sde_kms->irq_controller.enabled_mask = 0;
- sde_kms->irq_controller.domain = domain;
sde_core_irq_preinstall(sde_kms);
}
@@ -158,9 +86,5 @@ void sde_irq_uninstall(struct msm_kms *kms)
}
sde_core_irq_uninstall(sde_kms);
-
- if (sde_kms->irq_controller.domain) {
- irq_domain_remove(sde_kms->irq_controller.domain);
- sde_kms->irq_controller.domain = NULL;
- }
+ sde_core_irq_domain_fini(sde_kms);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index afe90d16e31d..581918da183f 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -27,6 +27,7 @@
#include "dsi_display.h"
#include "dsi_drm.h"
#include "sde_wb.h"
+#include "sde_hdmi.h"
#include "sde_kms.h"
#include "sde_core_irq.h"
@@ -504,8 +505,30 @@ static int _sde_kms_get_displays(struct sde_kms *sde_kms)
wb_display_get_displays(sde_kms->wb_displays,
sde_kms->wb_display_count);
}
+
+ /* hdmi */
+ sde_kms->hdmi_displays = NULL;
+ sde_kms->hdmi_display_count = sde_hdmi_get_num_of_displays();
+ SDE_DEBUG("hdmi display count=%d", sde_kms->hdmi_display_count);
+ if (sde_kms->hdmi_display_count) {
+ sde_kms->hdmi_displays = kcalloc(sde_kms->hdmi_display_count,
+ sizeof(void *),
+ GFP_KERNEL);
+ if (!sde_kms->hdmi_displays) {
+ SDE_ERROR("failed to allocate hdmi displays\n");
+ goto exit_deinit_hdmi;
+ }
+ sde_kms->hdmi_display_count =
+ sde_hdmi_get_displays(sde_kms->hdmi_displays,
+ sde_kms->hdmi_display_count);
+ }
+
return 0;
+exit_deinit_hdmi:
+ sde_kms->hdmi_display_count = 0;
+ sde_kms->hdmi_displays = NULL;
+
exit_deinit_wb:
kfree(sde_kms->wb_displays);
sde_kms->wb_display_count = 0;
@@ -528,6 +551,9 @@ static void _sde_kms_release_displays(struct sde_kms *sde_kms)
SDE_ERROR("invalid sde kms\n");
return;
}
+ kfree(sde_kms->hdmi_displays);
+ sde_kms->hdmi_display_count = 0;
+ sde_kms->hdmi_displays = NULL;
kfree(sde_kms->wb_displays);
sde_kms->wb_displays = NULL;
@@ -565,18 +591,30 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
.set_property = sde_wb_connector_set_property,
.get_info = sde_wb_get_info,
};
- struct msm_display_info info;
+ static const struct sde_connector_ops hdmi_ops = {
+ .pre_deinit = sde_hdmi_connector_pre_deinit,
+ .post_init = sde_hdmi_connector_post_init,
+ .detect = sde_hdmi_connector_detect,
+ .get_modes = sde_hdmi_connector_get_modes,
+ .mode_valid = sde_hdmi_mode_valid,
+ .get_info = sde_hdmi_get_info,
+ };
+ struct msm_display_info info = {0};
struct drm_encoder *encoder;
void *display, *connector;
int i, max_encoders;
int rc = 0;
+ int connector_poll;
if (!dev || !priv || !sde_kms) {
SDE_ERROR("invalid argument(s)\n");
return -EINVAL;
}
- max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count;
+ max_encoders = sde_kms->dsi_display_count +
+ sde_kms->wb_display_count +
+ sde_kms->hdmi_display_count;
+
if (max_encoders > ARRAY_SIZE(priv->encoders)) {
max_encoders = ARRAY_SIZE(priv->encoders);
SDE_ERROR("capping number of displays to %d", max_encoders);
@@ -666,6 +704,57 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
}
}
+ /* hdmi */
+ for (i = 0; i < sde_kms->hdmi_display_count &&
+ priv->num_encoders < max_encoders; ++i) {
+ display = sde_kms->hdmi_displays[i];
+ encoder = NULL;
+
+ memset(&info, 0x0, sizeof(info));
+ rc = sde_hdmi_dev_init(display);
+ if (rc) {
+ SDE_ERROR("hdmi dev_init %d failed\n", i);
+ continue;
+ }
+ rc = sde_hdmi_get_info(&info, display);
+ if (rc) {
+ SDE_ERROR("hdmi get_info %d failed\n", i);
+ continue;
+ }
+ if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+ connector_poll = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector_poll = 0;
+ encoder = sde_encoder_init(dev, &info);
+ if (IS_ERR_OR_NULL(encoder)) {
+ SDE_ERROR("encoder init failed for hdmi %d\n", i);
+ continue;
+ }
+
+ rc = sde_hdmi_drm_init(display, encoder);
+ if (rc) {
+ SDE_ERROR("hdmi drm %d init failed, %d\n", i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
+ connector = sde_connector_init(dev,
+ encoder,
+ 0,
+ display,
+ &hdmi_ops,
+ connector_poll,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (connector) {
+ priv->encoders[priv->num_encoders++] = encoder;
+ } else {
+ SDE_ERROR("hdmi %d connector init failed\n", i);
+ sde_hdmi_dev_deinit(display);
+ sde_hdmi_drm_deinit(display);
+ sde_encoder_destroy(encoder);
+ }
+ }
+
return 0;
}
@@ -726,6 +815,9 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
priv = dev->dev_private;
catalog = sde_kms->catalog;
+ ret = sde_core_irq_domain_add(sde_kms);
+ if (ret)
+ goto fail_irq;
/*
* Query for underlying display drivers, and create connectors,
* bridges and encoders for them.
@@ -784,6 +876,8 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
return 0;
fail:
_sde_kms_drm_obj_destroy(sde_kms);
+fail_irq:
+ sde_core_irq_domain_fini(sde_kms);
return ret;
}
@@ -940,17 +1034,16 @@ static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
struct msm_mmu *mmu;
int i;
- for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) {
- if (!sde_kms->mmu[i])
+ for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
+ if (!sde_kms->aspace[i])
continue;
- mmu = sde_kms->mmu[i];
- msm_unregister_mmu(sde_kms->dev, mmu);
- mmu->funcs->detach(mmu, (const char **)iommu_ports,
- ARRAY_SIZE(iommu_ports));
- mmu->funcs->destroy(mmu);
- sde_kms->mmu[i] = 0;
- sde_kms->mmu_id[i] = 0;
+ mmu = sde_kms->aspace[i]->mmu;
+
+ mmu->funcs->detach(mmu);
+ msm_gem_address_space_put(sde_kms->aspace[i]);
+
+ sde_kms->aspace[i] = NULL;
}
return 0;
@@ -962,6 +1055,8 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
int i, ret;
for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
+ struct msm_gem_address_space *aspace;
+
mmu = msm_smmu_new(sde_kms->dev->dev, i);
if (IS_ERR(mmu)) {
/* MMU's can be optional depending on platform */
@@ -971,25 +1066,24 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
continue;
}
- ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
- ARRAY_SIZE(iommu_ports));
- if (ret) {
- SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
+ aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev,
+ mmu, "sde");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
mmu->funcs->destroy(mmu);
goto fail;
}
- sde_kms->mmu_id[i] = msm_register_mmu(sde_kms->dev, mmu);
- if (sde_kms->mmu_id[i] < 0) {
- ret = sde_kms->mmu_id[i];
- SDE_ERROR("failed to register sde iommu %d: %d\n",
- i, ret);
- mmu->funcs->detach(mmu, (const char **)iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ sde_kms->aspace[i] = aspace;
+
+ ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret) {
+ SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
+ msm_gem_address_space_put(aspace);
goto fail;
}
- sde_kms->mmu[i] = mmu;
}
return 0;
@@ -1134,6 +1228,14 @@ static int sde_kms_hw_init(struct msm_kms *kms)
goto perf_err;
}
+ sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
+ if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
+ rc = PTR_ERR(sde_kms->hw_intr);
+ SDE_ERROR("hw_intr init failed: %d\n", rc);
+ sde_kms->hw_intr = NULL;
+ goto hw_intr_init_err;
+ }
+
/*
* _sde_kms_drm_obj_init should create the DRM related objects
* i.e. CRTCs, planes, encoders, connectors and so forth
@@ -1159,21 +1261,12 @@ static int sde_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
- sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
- if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
- rc = PTR_ERR(sde_kms->hw_intr);
- SDE_ERROR("hw_intr init failed: %d\n", rc);
- sde_kms->hw_intr = NULL;
- goto hw_intr_init_err;
- }
-
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
return 0;
-hw_intr_init_err:
- _sde_kms_drm_obj_destroy(sde_kms);
drm_obj_init_err:
sde_core_perf_destroy(&sde_kms->perf);
+hw_intr_init_err:
perf_err:
power_error:
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index bf127ffe9eb6..44f6be959ac9 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -22,6 +22,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_mmu.h"
+#include "msm_gem.h"
#include "sde_dbg.h"
#include "sde_hw_catalog.h"
#include "sde_hw_ctl.h"
@@ -121,8 +122,7 @@ struct sde_kms {
int core_rev;
struct sde_mdss_cfg *catalog;
- struct msm_mmu *mmu[MSM_SMMU_DOMAIN_MAX];
- int mmu_id[MSM_SMMU_DOMAIN_MAX];
+ struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
struct sde_power_client *core_client;
/* directory entry for debugfs */
@@ -154,8 +154,9 @@ struct sde_kms {
void **dsi_displays;
int wb_display_count;
void **wb_displays;
-
bool has_danger_ctrl;
+ void **hdmi_displays;
+ int hdmi_display_count;
};
struct vsync_info {
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index b3de45302dea..114acfd7a173 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -86,7 +86,7 @@ enum sde_plane_qos {
struct sde_plane {
struct drm_plane base;
- int mmu_id;
+ struct msm_gem_address_space *aspace;
struct mutex lock;
@@ -580,7 +580,7 @@ static inline void _sde_plane_set_scanout(struct drm_plane *plane,
return;
}
- ret = sde_format_populate_layout(psde->mmu_id, fb, &pipe_cfg->layout);
+ ret = sde_format_populate_layout(psde->aspace, fb, &pipe_cfg->layout);
if (ret == -EAGAIN)
SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
else if (ret)
@@ -1285,7 +1285,7 @@ static int sde_plane_prepare_fb(struct drm_plane *plane,
return 0;
SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
- return msm_framebuffer_prepare(fb, psde->mmu_id);
+ return msm_framebuffer_prepare(fb, psde->aspace);
}
static void sde_plane_cleanup_fb(struct drm_plane *plane,
@@ -1294,11 +1294,11 @@ static void sde_plane_cleanup_fb(struct drm_plane *plane,
struct drm_framebuffer *fb = old_state ? old_state->fb : NULL;
struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
- if (!fb)
+ if (!fb || !psde)
return;
SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
- msm_framebuffer_cleanup(fb, psde->mmu_id);
+ msm_framebuffer_cleanup(fb, psde->aspace);
}
static void _sde_plane_atomic_check_mode_changed(struct sde_plane *psde,
@@ -2384,7 +2384,7 @@ struct drm_plane *sde_plane_init(struct drm_device *dev,
/* cache local stuff for later */
plane = &psde->base;
psde->pipe = pipe;
- psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
+ psde->aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
/* initialize underlying h/w driver */
psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, kms->catalog);
diff --git a/drivers/gpu/drm/msm/sde_io_util.c b/drivers/gpu/drm/msm/sde_io_util.c
new file mode 100644
index 000000000000..70a42254a909
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_io_util.c
@@ -0,0 +1,502 @@
+/* Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/sde_io_util.h>
+
+#define MAX_I2C_CMDS 16
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug)
+{
+ u32 in_val;
+
+ if (!io || !io->base) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ if (offset > io->len) {
+ DEV_ERR("%pS->%s: offset out of range\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ writel_relaxed(value, io->base + offset);
+ if (debug) {
+ in_val = readl_relaxed(io->base + offset);
+ DEV_DBG("[%08x] => %08x [%08x]\n",
+ (u32)(unsigned long)(io->base + offset),
+ value, in_val);
+ }
+} /* dss_reg_w */
+EXPORT_SYMBOL(dss_reg_w);
+
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug)
+{
+ u32 value;
+
+ if (!io || !io->base) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ if (offset > io->len) {
+ DEV_ERR("%pS->%s: offset out of range\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ value = readl_relaxed(io->base + offset);
+ if (debug)
+ DEV_DBG("[%08x] <= %08x\n",
+ (u32)(unsigned long)(io->base + offset), value);
+
+ return value;
+} /* dss_reg_r */
+EXPORT_SYMBOL(dss_reg_r);
+
+void dss_reg_dump(void __iomem *base, u32 length, const char *prefix,
+ u32 debug)
+{
+ if (debug)
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
+ (void *)base, length, false);
+} /* dss_reg_dump */
+EXPORT_SYMBOL(dss_reg_dump);
+
+static struct resource *msm_dss_get_res_byname(struct platform_device *pdev,
+ unsigned int type, const char *name)
+{
+ struct resource *res = NULL;
+
+ res = platform_get_resource_byname(pdev, type, name);
+ if (!res)
+ DEV_ERR("%s: '%s' resource not found\n", __func__, name);
+
+ return res;
+} /* msm_dss_get_res_byname */
+EXPORT_SYMBOL(msm_dss_get_res_byname);
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+ struct dss_io_data *io_data, const char *name)
+{
+ struct resource *res = NULL;
+
+ if (!pdev || !io_data) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ res = msm_dss_get_res_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ DEV_ERR("%pS->%s: '%s' msm_dss_get_res_byname failed\n",
+ __builtin_return_address(0), __func__, name);
+ return -ENODEV;
+ }
+
+ io_data->len = (u32)resource_size(res);
+ io_data->base = ioremap(res->start, io_data->len);
+ if (!io_data->base) {
+ DEV_ERR("%pS->%s: '%s' ioremap failed\n",
+ __builtin_return_address(0), __func__, name);
+ return -EIO;
+ }
+
+ return 0;
+} /* msm_dss_ioremap_byname */
+EXPORT_SYMBOL(msm_dss_ioremap_byname);
+
+void msm_dss_iounmap(struct dss_io_data *io_data)
+{
+ if (!io_data) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ if (io_data->base) {
+ iounmap(io_data->base);
+ io_data->base = NULL;
+ }
+ io_data->len = 0;
+} /* msm_dss_iounmap */
+EXPORT_SYMBOL(msm_dss_iounmap);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+ int num_vreg, int config)
+{
+ int i = 0, rc = 0;
+ struct dss_vreg *curr_vreg = NULL;
+ enum dss_vreg_type type;
+
+ if (!in_vreg || !num_vreg)
+ return rc;
+
+ if (config) {
+ for (i = 0; i < num_vreg; i++) {
+ curr_vreg = &in_vreg[i];
+ curr_vreg->vreg = regulator_get(dev,
+ curr_vreg->vreg_name);
+ rc = PTR_RET(curr_vreg->vreg);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ curr_vreg->vreg_name, rc);
+ curr_vreg->vreg = NULL;
+ goto vreg_get_fail;
+ }
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ if (type == DSS_REG_LDO) {
+ rc = regulator_set_voltage(
+ curr_vreg->vreg,
+ curr_vreg->min_voltage,
+ curr_vreg->max_voltage);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s set vltg fail\n",
+ __builtin_return_address(0),
+ __func__,
+ curr_vreg->vreg_name);
+ goto vreg_set_voltage_fail;
+ }
+ }
+ }
+ } else {
+ for (i = num_vreg-1; i >= 0; i--) {
+ curr_vreg = &in_vreg[i];
+ if (curr_vreg->vreg) {
+ type = (regulator_count_voltages(
+ curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ if (type == DSS_REG_LDO) {
+ regulator_set_voltage(curr_vreg->vreg,
+ 0, curr_vreg->max_voltage);
+ }
+ regulator_put(curr_vreg->vreg);
+ curr_vreg->vreg = NULL;
+ }
+ }
+ }
+ return 0;
+
+vreg_unconfig:
+if (type == DSS_REG_LDO)
+ regulator_set_load(curr_vreg->vreg, 0);
+
+vreg_set_voltage_fail:
+ regulator_put(curr_vreg->vreg);
+ curr_vreg->vreg = NULL;
+
+vreg_get_fail:
+ for (i--; i >= 0; i--) {
+ curr_vreg = &in_vreg[i];
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ goto vreg_unconfig;
+ }
+ return rc;
+} /* msm_dss_config_vreg */
+EXPORT_SYMBOL(msm_dss_config_vreg);
+
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable)
+{
+ int i = 0, rc = 0;
+ bool need_sleep;
+
+ if (enable) {
+ for (i = 0; i < num_vreg; i++) {
+ rc = PTR_RET(in_vreg[i].vreg);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name, rc);
+ goto vreg_set_opt_mode_fail;
+ }
+ need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
+ if (in_vreg[i].pre_on_sleep && need_sleep)
+ usleep_range(in_vreg[i].pre_on_sleep * 1000,
+ in_vreg[i].pre_on_sleep * 1000);
+ rc = regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].enable_load);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s set opt m fail\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name);
+ goto vreg_set_opt_mode_fail;
+ }
+ rc = regulator_enable(in_vreg[i].vreg);
+ if (in_vreg[i].post_on_sleep && need_sleep)
+ usleep_range(in_vreg[i].post_on_sleep * 1000,
+ in_vreg[i].post_on_sleep * 1000);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s enable failed\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name);
+ goto disable_vreg;
+ }
+ }
+ } else {
+ for (i = num_vreg-1; i >= 0; i--) {
+ if (in_vreg[i].pre_off_sleep)
+ usleep_range(in_vreg[i].pre_off_sleep * 1000,
+ in_vreg[i].pre_off_sleep * 1000);
+ regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].disable_load);
+ regulator_disable(in_vreg[i].vreg);
+ if (in_vreg[i].post_off_sleep)
+ usleep_range(in_vreg[i].post_off_sleep * 1000,
+ in_vreg[i].post_off_sleep * 1000);
+ }
+ }
+ return rc;
+
+disable_vreg:
+ regulator_set_load(in_vreg[i].vreg, in_vreg[i].disable_load);
+
+vreg_set_opt_mode_fail:
+ for (i--; i >= 0; i--) {
+ if (in_vreg[i].pre_off_sleep)
+ usleep_range(in_vreg[i].pre_off_sleep * 1000,
+ in_vreg[i].pre_off_sleep * 1000);
+ regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].disable_load);
+ regulator_disable(in_vreg[i].vreg);
+ if (in_vreg[i].post_off_sleep)
+ usleep_range(in_vreg[i].post_off_sleep * 1000,
+ in_vreg[i].post_off_sleep * 1000);
+ }
+
+ return rc;
+} /* msm_dss_enable_vreg */
+EXPORT_SYMBOL(msm_dss_enable_vreg);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable)
+{
+ int i = 0, rc = 0;
+
+ if (enable) {
+ for (i = 0; i < num_gpio; i++) {
+ DEV_DBG("%pS->%s: %s enable\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+
+ rc = gpio_request(in_gpio[i].gpio,
+ in_gpio[i].gpio_name);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s enable failed\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+ goto disable_gpio;
+ }
+ gpio_set_value(in_gpio[i].gpio, in_gpio[i].value);
+ }
+ } else {
+ for (i = num_gpio-1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: %s disable\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+ if (in_gpio[i].gpio)
+ gpio_free(in_gpio[i].gpio);
+ }
+ }
+ return rc;
+
+disable_gpio:
+ for (i--; i >= 0; i--)
+ if (in_gpio[i].gpio)
+ gpio_free(in_gpio[i].gpio);
+
+ return rc;
+} /* msm_dss_enable_gpio */
+EXPORT_SYMBOL(msm_dss_enable_gpio);
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+ int i;
+
+ for (i = num_clk - 1; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+} /* msm_dss_put_clk */
+EXPORT_SYMBOL(msm_dss_put_clk);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+ rc = PTR_RET(clk_arry[i].clk);
+ if (rc) {
+ DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name, rc);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ msm_dss_put_clk(clk_arry, num_clk);
+
+ return rc;
+} /* msm_dss_get_clk */
+EXPORT_SYMBOL(msm_dss_get_clk);
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ if (clk_arry[i].clk) {
+ if (clk_arry[i].type != DSS_CLK_AHB) {
+ DEV_DBG("%pS->%s: '%s' rate %ld\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name,
+ clk_arry[i].rate);
+ rc = clk_set_rate(clk_arry[i].clk,
+ clk_arry[i].rate);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ break;
+ }
+ }
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ break;
+ }
+ }
+
+ return rc;
+} /* msm_dss_clk_set_rate */
+EXPORT_SYMBOL(msm_dss_clk_set_rate);
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+ int i, rc = 0;
+
+ if (enable) {
+ for (i = 0; i < num_clk; i++) {
+ DEV_DBG("%pS->%s: enable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ if (clk_arry[i].clk) {
+ rc = clk_prepare_enable(clk_arry[i].clk);
+ if (rc)
+ DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ }
+
+ if (rc) {
+ msm_dss_enable_clk(&clk_arry[i],
+ i, false);
+ break;
+ }
+ }
+ } else {
+ for (i = num_clk - 1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: disable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+
+ if (clk_arry[i].clk)
+ clk_disable_unprepare(clk_arry[i].clk);
+ else
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ }
+ }
+
+ return rc;
+} /* msm_dss_enable_clk */
+EXPORT_SYMBOL(msm_dss_enable_clk);
+
+
+int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *read_buf)
+{
+ struct i2c_msg msgs[2];
+ int ret = -1;
+
+ pr_debug("%s: reading from slave_addr=[%x] and offset=[%x]\n",
+ __func__, slave_addr, reg_offset);
+
+ msgs[0].addr = slave_addr >> 1;
+ msgs[0].flags = 0;
+ msgs[0].buf = &reg_offset;
+ msgs[0].len = 1;
+
+ msgs[1].addr = slave_addr >> 1;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].buf = read_buf;
+ msgs[1].len = 1;
+
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ if (ret < 1) {
+ pr_err("%s: I2C READ FAILED=[%d]\n", __func__, ret);
+ return -EACCES;
+ }
+ pr_debug("%s: i2c buf is [%x]\n", __func__, *read_buf);
+ return 0;
+}
+EXPORT_SYMBOL(sde_i2c_byte_read);
+
+int sde_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *value)
+{
+ struct i2c_msg msgs[1];
+ uint8_t data[2];
+ int status = -EACCES;
+
+ pr_debug("%s: writing from slave_addr=[%x] and offset=[%x]\n",
+ __func__, slave_addr, reg_offset);
+
+ data[0] = reg_offset;
+ data[1] = *value;
+
+ msgs[0].addr = slave_addr >> 1;
+ msgs[0].flags = 0;
+ msgs[0].len = 2;
+ msgs[0].buf = data;
+
+ status = i2c_transfer(client->adapter, msgs, 1);
+ if (status < 1) {
+ pr_err("I2C WRITE FAILED=[%d]\n", status);
+ return -EACCES;
+ }
+ pr_debug("%s: I2C write status=%x\n", __func__, status);
+ return status;
+}
+EXPORT_SYMBOL(sde_i2c_byte_write);
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 3c82a261e3fb..ab65283ceafc 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -24,7 +24,7 @@
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
-#include <linux/mdss_io_util.h>
+#include <linux/sde_io_util.h>
#include "sde_power_handle.h"
#include "sde_trace.h"
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index fffe08038bcd..5306303b8d15 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2008-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -168,6 +168,7 @@ static const struct flag_entry context_flags[] = {KGSL_CONTEXT_FLAGS};
* KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC so it is ok to cross the streams here.
*/
static const struct flag_entry context_priv[] = {
+ { KGSL_CONTEXT_PRIV_SUBMITTED, "submitted"},
{ KGSL_CONTEXT_PRIV_DETACHED, "detached"},
{ KGSL_CONTEXT_PRIV_INVALID, "invalid"},
{ KGSL_CONTEXT_PRIV_PAGEFAULT, "pagefault"},
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 0d068e9c5805..89218b62bb7d 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2327,10 +2327,6 @@ static int adreno_dispatch_process_drawqueue(struct adreno_device *adreno_dev,
if (adreno_drawqueue_is_empty(drawqueue))
return count;
- /* Don't update the drawqueue timeout if we are about to preempt out */
- if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE))
- return count;
-
/* Don't update the drawqueue timeout if it isn't active */
if (!drawqueue_is_current(drawqueue))
return count;
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 1cbd2ef4b6b4..21a6399ba38e 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -217,10 +217,12 @@ static int adreno_drawctxt_wait_rb(struct adreno_device *adreno_dev,
BUG_ON(!mutex_is_locked(&device->mutex));
/*
- * If the context is invalid then return immediately - we may end up
- * waiting for a timestamp that will never come
+ * If the context is invalid (OR) not submitted commands to GPU
+ * then return immediately - we may end up waiting for a timestamp
+ * that will never come
*/
- if (kgsl_context_invalid(context))
+ if (kgsl_context_invalid(context) ||
+ !test_bit(KGSL_CONTEXT_PRIV_SUBMITTED, &context->priv))
goto done;
trace_adreno_drawctxt_wait_start(drawctxt->rb->id, context->id,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index fc0602a60ac1..161b718b8a38 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -959,6 +959,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
drawobj->timestamp, time);
if (!ret) {
+ set_bit(KGSL_CONTEXT_PRIV_SUBMITTED, &context->priv);
cmdobj->global_ts = drawctxt->internal_timestamp;
/* Put the timevalues in the profiling buffer */
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 2b227f2c3a6c..601e7a23101b 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -4379,8 +4379,9 @@ kgsl_get_unmapped_area(struct file *file, unsigned long addr,
val = _get_svm_area(private, entry, addr, len, flags);
if (IS_ERR_VALUE(val))
KGSL_MEM_ERR(device,
- "_get_svm_area: pid %d addr %lx pgoff %lx len %ld failed error %d\n",
- private->pid, addr, pgoff, len, (int) val);
+ "_get_svm_area: pid %d mmap_base %lx addr %lx pgoff %lx len %ld failed error %d\n",
+ private->pid, current->mm->mmap_base, addr,
+ pgoff, len, (int) val);
}
put:
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index cb7ffd51460a..0a2c39b82781 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -322,6 +322,7 @@ struct kgsl_device {
/**
* enum bits for struct kgsl_context.priv
+ * @KGSL_CONTEXT_PRIV_SUBMITTED - The context has submitted commands to gpu.
* @KGSL_CONTEXT_PRIV_DETACHED - The context has been destroyed by userspace
* and is no longer using the gpu.
* @KGSL_CONTEXT_PRIV_INVALID - The context has been destroyed by the kernel
@@ -331,7 +332,8 @@ struct kgsl_device {
* reserved for devices specific use.
*/
enum kgsl_context_priv {
- KGSL_CONTEXT_PRIV_DETACHED = 0,
+ KGSL_CONTEXT_PRIV_SUBMITTED = 0,
+ KGSL_CONTEXT_PRIV_DETACHED,
KGSL_CONTEXT_PRIV_INVALID,
KGSL_CONTEXT_PRIV_PAGEFAULT,
KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC = 16,
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index ed70a980d9ac..6e72cda433db 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -136,6 +136,17 @@ config CORESIGHT_CTI
hardware component to another. It can also be used to pass
software generated events.
+config CORESIGHT_CTI_SAVE_DISABLE
+ bool "Turn off CTI save and restore"
+ depends on CORESIGHT_CTI
+ help
+ Turns off CoreSight CTI save and restore support for cpu CTIs. This
+ avoids voting for the clocks during probe as well as the associated
+ save and restore latency at the cost of breaking cpu CTI support on
+ targets where cpu CTIs have to be preserved across power collapse.
+
+ If unsure, say 'N' here to avoid breaking cpu CTI support.
+
config CORESIGHT_TPDA
bool "CoreSight Trace, Profiling & Diagnostics Aggregator driver"
help
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index f13017cd9d46..99de4002275e 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1128,17 +1128,6 @@ config TOUCHSCREEN_FT5X06_GESTURE
If unsure, say N.
-config TOUCHSCREEN_MSTAR21XX
- tristate "Mstar touchscreens"
- depends on I2C
- help
- Say Y here if you have a mstar touchscreen.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called msg21xx_ts.
-
config TOUCHSCREEN_ROHM_BU21023
tristate "ROHM BU21023/24 Dual touch support resistive touchscreens"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 7606fe53fcff..a32132cffe92 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -98,6 +98,5 @@ obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o
obj-$(CONFIG_TOUCHSCREEN_ZFORCE) += zforce_ts.o
obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50) += colibri-vf50-ts.o
obj-$(CONFIG_TOUCHSCREEN_ROHM_BU21023) += rohm_bu21023.o
-obj-$(CONFIG_TOUCHSCREEN_MSTAR21XX) += msg21xx_ts.o
obj-$(CONFIG_TOUCHSCREEN_GT9XX) += gt9xx/
obj-$(CONFIG_TOUCHSCREEN_ST) += st/
diff --git a/drivers/input/touchscreen/msg21xx_ts.c b/drivers/input/touchscreen/msg21xx_ts.c
deleted file mode 100644
index fe8c6e1647f8..000000000000
--- a/drivers/input/touchscreen/msg21xx_ts.c
+++ /dev/null
@@ -1,2260 +0,0 @@
-/*
- * MStar MSG21XX touchscreen driver
- *
- * Copyright (c) 2006-2012 MStar Semiconductor, Inc.
- *
- * Copyright (C) 2012 Bruce Ding <bruce.ding@mstarsemi.com>
- *
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/input.h>
-#include <linux/input/mt.h>
-#include <linux/interrupt.h>
-#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <linux/sysfs.h>
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/debugfs.h>
-#include <linux/regulator/consumer.h>
-
-#if defined(CONFIG_FB)
-#include <linux/notifier.h>
-#include <linux/fb.h>
-#endif
-#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
-#include <linux/input/vir_ps.h>
-#endif
-
-/* Macro Definition */
-
-#define TOUCH_DRIVER_DEBUG 0
-#if (TOUCH_DRIVER_DEBUG == 1)
-#define DBG(fmt, arg...) pr_info(fmt, ##arg)
-#else
-#define DBG(fmt, arg...)
-#endif
-
-/* Constant Value & Variable Definition */
-
-#define MSTAR_VTG_MIN_UV 2800000
-#define MSTAR_VTG_MAX_UV 3300000
-#define MSTAR_I2C_VTG_MIN_UV 1800000
-#define MSTAR_I2C_VTG_MAX_UV 1800000
-
-#define MAX_BUTTONS 4
-#define FT_COORDS_ARR_SIZE 4
-#define MSTAR_FW_NAME_MAX_LEN 50
-
-#define MSTAR_CHIPTOP_REGISTER_BANK 0x1E
-#define MSTAR_CHIPTOP_REGISTER_ICTYPE 0xCC
-#define MSTAR_INIT_SW_ID 0x7FF
-#define MSTAR_DEBUG_DIR_NAME "ts_debug"
-
-#define MSG_FW_FILE_MAJOR_VERSION(x) \
- (((x)->data[0x7f4f] << 8) + ((x)->data[0x7f4e]))
-
-#define MSG_FW_FILE_MINOR_VERSION(x) \
- (((x)->data[0x7f51] << 8) + ((x)->data[0x7f50]))
-
-/*
- * Note.
- * Please do not change the below setting.
- */
-#define TPD_WIDTH (2048)
-#define TPD_HEIGHT (2048)
-
-#ifdef FIRMWARE_AUTOUPDATE
-enum {
- SWID_START = 1,
- SWID_TRULY = SWID_START,
- SWID_NULL,
-};
-
-static unsigned char MSG_FIRMWARE[1][33*1024] = { {
- #include "msg21xx_truly_update_bin.h"
- }
-};
-#endif
-
-#define CONFIG_TP_HAVE_KEY
-#define PINCTRL_STATE_ACTIVE "pmx_ts_active"
-#define PINCTRL_STATE_SUSPEND "pmx_ts_suspend"
-#define PINCTRL_STATE_RELEASE "pmx_ts_release"
-
-#define SLAVE_I2C_ID_DBBUS (0xC4>>1)
-
-#define DEMO_MODE_PACKET_LENGTH (8)
-
-#define TP_PRINT
-
-static char *fw_version; /* customer firmware version */
-static unsigned short fw_version_major;
-static unsigned short fw_version_minor;
-static unsigned char temp[94][1024];
-static unsigned int crc32_table[256];
-
-static unsigned short fw_file_major, fw_file_minor;
-static unsigned short main_sw_id = MSTAR_INIT_SW_ID;
-static unsigned short info_sw_id = MSTAR_INIT_SW_ID;
-static unsigned int bin_conf_crc32;
-
-struct msg21xx_ts_platform_data {
- const char *name;
- char fw_name[MSTAR_FW_NAME_MAX_LEN];
- u8 fw_version_major;
- u8 fw_version_minor;
- u32 irq_gpio;
- u32 irq_gpio_flags;
- u32 reset_gpio;
- u32 reset_gpio_flags;
- u32 x_max;
- u32 y_max;
- u32 x_min;
- u32 y_min;
- u32 panel_minx;
- u32 panel_miny;
- u32 panel_maxx;
- u32 panel_maxy;
- u32 num_max_touches;
- bool no_force_update;
- bool i2c_pull_up;
- bool ignore_id_check;
- int (*power_init)(bool);
- int (*power_on)(bool);
- int (*power_init)(bool);
- int (*power_on)(bool);
- u8 ic_type;
- u32 button_map[MAX_BUTTONS];
- u32 num_buttons;
- u32 hard_reset_delay_ms;
- u32 post_hard_reset_delay_ms;
- bool updating_fw;
-};
-
-/* Touch Data Type Definition */
-struct touchPoint_t {
- unsigned short x;
- unsigned short y;
-};
-
-struct touchInfo_t {
- struct touchPoint_t *point;
- unsigned char count;
- unsigned char keycode;
-};
-
-struct msg21xx_ts_data {
- struct i2c_client *client;
- struct input_dev *input_dev;
- struct msg21xx_ts_platform_data *pdata;
- struct regulator *vdd;
- struct regulator *vcc_i2c;
- bool suspended;
-#if defined(CONFIG_FB)
- struct notifier_block fb_notif;
-#endif
- struct pinctrl *ts_pinctrl;
- struct pinctrl_state *pinctrl_state_active;
- struct pinctrl_state *pinctrl_state_suspend;
- struct pinctrl_state *pinctrl_state_release;
- struct mutex ts_mutex;
- struct touchInfo_t info;
-};
-
-#if defined(CONFIG_FB)
-static int fb_notifier_callback(struct notifier_block *self,
- unsigned long event, void *data);
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
-static unsigned char bEnableTpProximity;
-static unsigned char bFaceClosingTp;
-#endif
-
-#ifdef TP_PRINT
-static int tp_print_proc_read(struct msg21xx_ts_data *ts_data);
-static void tp_print_create_entry(struct msg21xx_ts_data *ts_data);
-#endif
-
-static void _ReadBinConfig(struct msg21xx_ts_data *ts_data);
-static unsigned int _CalMainCRC32(struct msg21xx_ts_data *ts_data);
-
-static struct mutex msg21xx_mutex;
-
-enum EMEM_TYPE_t {
- EMEM_ALL = 0,
- EMEM_MAIN,
- EMEM_INFO,
-};
-
-/* Function Definition */
-
-static unsigned int _CRC_doReflect(unsigned int ref, signed char ch)
-{
- unsigned int value = 0;
- unsigned int i = 0;
-
- for (i = 1; i < (ch + 1); i++) {
- if (ref & 1)
- value |= 1 << (ch - i);
- ref >>= 1;
- }
-
- return value;
-}
-
-static unsigned int _CRC_getValue(unsigned int text, unsigned int prevCRC)
-{
- unsigned int ulCRC = prevCRC;
-
- ulCRC = (ulCRC >> 8) ^ crc32_table[(ulCRC & 0xFF) ^ text];
-
- return ulCRC;
-}
-
-static void _CRC_initTable(void)
-{
- unsigned int magic_number = 0x04c11db7;
- unsigned int i, j;
-
- for (i = 0; i <= 0xFF; i++) {
- crc32_table[i] = _CRC_doReflect(i, 8) << 24;
- for (j = 0; j < 8; j++)
- crc32_table[i] = (crc32_table[i] << 1) ^
- (crc32_table[i] & (0x80000000L) ?
- magic_number : 0);
- crc32_table[i] = _CRC_doReflect(crc32_table[i], 32);
- }
-}
-
-static void msg21xx_reset_hw(struct msg21xx_ts_platform_data *pdata)
-{
- gpio_direction_output(pdata->reset_gpio, 1);
- gpio_set_value_cansleep(pdata->reset_gpio, 0);
- /* Note that the RST must be in LOW 10ms at least */
- usleep(pdata->hard_reset_delay_ms * 1000);
- gpio_set_value_cansleep(pdata->reset_gpio, 1);
- /* Enable the interrupt service thread/routine for INT after 50ms */
- usleep(pdata->post_hard_reset_delay_ms * 1000);
-}
-
-static int read_i2c_seq(struct msg21xx_ts_data *ts_data, unsigned char addr,
- unsigned char *buf, unsigned short size)
-{
- int rc = 0;
- struct i2c_msg msgs[] = {
- {
- .addr = addr,
- .flags = I2C_M_RD, /* read flag */
- .len = size,
- .buf = buf,
- },
- };
-
- /* If everything went ok (i.e. 1 msg transmitted), return #bytes
- * transmitted, else error code.
- */
- if (ts_data->client != NULL) {
- rc = i2c_transfer(ts_data->client->adapter, msgs, 1);
- if (rc < 0)
- dev_err(&ts_data->client->dev,
- "%s error %d\n", __func__, rc);
- } else {
- dev_err(&ts_data->client->dev, "ts_data->client is NULL\n");
- }
-
- return rc;
-}
-
-static int write_i2c_seq(struct msg21xx_ts_data *ts_data, unsigned char addr,
- unsigned char *buf, unsigned short size)
-{
- int rc = 0;
- struct i2c_msg msgs[] = {
- {
- .addr = addr,
- /*
- * if read flag is undefined,
- * then it means write flag.
- */
- .flags = 0,
- .len = size,
- .buf = buf,
- },
- };
-
- /*
- * If everything went ok (i.e. 1 msg transmitted), return #bytes
- * transmitted, else error code.
- */
- if (ts_data->client != NULL) {
- rc = i2c_transfer(ts_data->client->adapter, msgs, 1);
- if (rc < 0)
- dev_err(&ts_data->client->dev,
- "%s error %d\n", __func__, rc);
- } else {
- dev_err(&ts_data->client->dev, "ts_data->client is NULL\n");
- }
-
- return rc;
-}
-
-static unsigned short read_reg(struct msg21xx_ts_data *ts_data,
- unsigned char bank, unsigned char addr)
-{
- unsigned char tx_data[3] = {0x10, bank, addr};
- unsigned char rx_data[2] = {0};
-
- write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, tx_data, sizeof(tx_data));
- read_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, rx_data, sizeof(rx_data));
-
- return rx_data[1] << 8 | rx_data[0];
-}
-
-static void write_reg(struct msg21xx_ts_data *ts_data, unsigned char bank,
- unsigned char addr,
- unsigned short data)
-{
- unsigned char tx_data[5] = {0x10, bank, addr, data & 0xFF, data >> 8};
-
- write_i2c_seq(SLAVE_I2C_ID_DBBUS, &tx_data[0], 5);
- write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, tx_data, sizeof(tx_data));
-}
-
-static void write_reg_8bit(struct msg21xx_ts_data *ts_data, unsigned char bank,
- unsigned char addr,
- unsigned char data)
-{
- unsigned char tx_data[4] = {0x10, bank, addr, data};
-
- write_i2c_seq(SLAVE_I2C_ID_DBBUS, &tx_data[0], 4);
- write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, tx_data, sizeof(tx_data));
-}
-
-static void dbbusDWIICEnterSerialDebugMode(struct msg21xx_ts_data *ts_data)
-{
- unsigned char data[5];
-
- /* Enter the Serial Debug Mode */
- data[0] = 0x53;
- data[1] = 0x45;
- data[2] = 0x52;
- data[3] = 0x44;
- data[4] = 0x42;
-
- write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data));
-}
-
-static void dbbusDWIICStopMCU(struct msg21xx_ts_data *ts_data)
-{
- unsigned char data[1];
-
- /* Stop the MCU */
- data[0] = 0x37;
-
- write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data));
-}
-
-static void dbbusDWIICIICUseBus(struct msg21xx_ts_data *ts_data)
-{
- unsigned char data[1];
-
- /* IIC Use Bus */
- data[0] = 0x35;
-
- write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data));
-}
-
-static void dbbusDWIICIICReshape(struct msg21xx_ts_data *ts_data)
-{
- unsigned char data[1];
-
- /* IIC Re-shape */
- data[0] = 0x71;
-
- write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data));
-}
-
-static unsigned char msg21xx_get_ic_type(struct msg21xx_ts_data *ts_data)
-{
- unsigned char ic_type = 0;
- unsigned char bank;
- unsigned char addr;
-
- msg21xx_reset_hw(ts_data->pdata);
- dbbusDWIICEnterSerialDebugMode(ts_data);
- dbbusDWIICStopMCU(ts_data);
- dbbusDWIICIICUseBus(ts_data);
- dbbusDWIICIICReshape(ts_data);
- msleep(300);
-
- /* stop mcu */
- write_reg_8bit(ts_data, 0x0F, 0xE6, 0x01);
- /* disable watch dog */
- write_reg(ts_data, 0x3C, 0x60, 0xAA55);
- /* get ic type */
- bank = MSTAR_CHIPTOP_REGISTER_BANK;
- addr = MSTAR_CHIPTOP_REGISTER_ICTYPE;
- ic_type = (0xff)&(read_reg(ts_data, bank, addr));
-
- if (ic_type != ts_data->pdata->ic_type)
- ic_type = 0;
-
- msg21xx_reset_hw(ts_data->pdata);
-
- return ic_type;
-}
-
-static int msg21xx_read_firmware_id(struct msg21xx_ts_data *ts_data)
-{
- unsigned char command[3] = { 0x53, 0x00, 0x2A};
- unsigned char response[4] = { 0 };
-
- mutex_lock(&msg21xx_mutex);
- write_i2c_seq(ts_data, ts_data->client->addr, command, sizeof(command));
- read_i2c_seq(ts_data, ts_data->client->addr, response,
- sizeof(response));
- mutex_unlock(&msg21xx_mutex);
- ts_data->pdata->fw_version_major = (response[1]<<8) + response[0];
- ts_data->pdata->fw_version_minor = (response[3]<<8) + response[2];
-
- dev_info(&ts_data->client->dev, "major num = %d, minor num = %d\n",
- ts_data->pdata->fw_version_major,
- ts_data->pdata->fw_version_minor);
-
- return 0;
-}
-
-static int firmware_erase_c33(struct msg21xx_ts_data *ts_data,
- enum EMEM_TYPE_t emem_type)
-{
- /* stop mcu */
- write_reg(ts_data, 0x0F, 0xE6, 0x0001);
-
- /* disable watch dog */
- write_reg_8bit(ts_data, 0x3C, 0x60, 0x55);
- write_reg_8bit(ts_data, 0x3C, 0x61, 0xAA);
-
- /* set PROGRAM password */
- write_reg_8bit(ts_data, 0x16, 0x1A, 0xBA);
- write_reg_8bit(ts_data, 0x16, 0x1B, 0xAB);
-
- write_reg_8bit(ts_data, 0x16, 0x18, 0x80);
-
- if (emem_type == EMEM_ALL)
- write_reg_8bit(ts_data, 0x16, 0x08, 0x10);
-
- write_reg_8bit(ts_data, 0x16, 0x18, 0x40);
- msleep(20);
-
- /* clear pce */
- write_reg_8bit(0x16, 0x18, 0x80);
-
- /* erase trigger */
- if (emem_type == EMEM_MAIN)
- write_reg_8bit(ts_data, 0x16, 0x0E, 0x04); /* erase main */
- else
- write_reg_8bit(0x16, 0x0E, 0x08); /* erase all block */
-
- return 1;
-}
-
-static void _ReadBinConfig(void);
-static unsigned int _CalMainCRC32(void);
-
-static int check_fw_update(void)
-{
- int ret = 0;
-
- msg21xx_read_firmware_id();
- _ReadBinConfig();
- if (main_sw_id == info_sw_id) {
- if (_CalMainCRC32() == bin_conf_crc32) {
- /*check upgrading*/
- if ((update_bin_major == pdata->fw_version_major) &&
- (update_bin_minor > pdata->fw_version_minor)) {
- ret = 1;
- }
- }
- }
- return ret;
- write_reg_8bit(ts_data, 0x16, 0x0E, 0x08); /* erase all block */
-
- return 0;
-}
-
-static ssize_t firmware_update_c33(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size,
- enum EMEM_TYPE_t emem_type,
- bool isForce) {
- unsigned int i, j;
- unsigned int crc_main, crc_main_tp;
- unsigned int crc_info, crc_info_tp;
- unsigned short reg_data = 0;
- int update_pass = 1;
- bool fw_upgrade = false;
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- crc_main = 0xffffffff;
- crc_info = 0xffffffff;
-
- msg21xx_reset_hw(ts_data->pdata);
-
- msg21xx_read_firmware_id(ts_data);
- _ReadBinConfig(ts_data);
- if ((main_sw_id == info_sw_id) &&
- (_CalMainCRC32(ts_data) == bin_conf_crc32) &&
- (fw_file_major == ts_data->pdata->fw_version_major) &&
- (fw_file_minor > ts_data->pdata->fw_version_minor)) {
- fw_upgrade = true;
- }
-
- if (!fw_upgrade && !isForce) {
- dev_dbg(dev, "no need to update\n");
- msg21xx_reset_hw(ts_data->pdata);
- return size;
- }
- msg21xx_reset_hw(ts_data->pdata);
- msleep(300);
-
- dbbusDWIICEnterSerialDebugMode(ts_data);
- dbbusDWIICStopMCU(ts_data);
- dbbusDWIICIICUseBus(ts_data);
- dbbusDWIICIICReshape(ts_data);
- msleep(300);
-
- /* erase main */
- firmware_erase_c33(ts_data, EMEM_MAIN);
- msleep(1000);
-
- msg21xx_reset_hw(ts_data->pdata);
- dbbusDWIICEnterSerialDebugMode(ts_data);
- dbbusDWIICStopMCU(ts_data);
- dbbusDWIICIICUseBus(ts_data);
- dbbusDWIICIICReshape(ts_data);
- msleep(300);
- /*
- * Program
- */
- /* polling 0x3CE4 is 0x1C70 */
- if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) {
- do {
- reg_data = read_reg(ts_data, 0x3C, 0xE4);
- } while (reg_data != 0x1C70);
- }
-
- switch (emem_type) {
- case EMEM_ALL:
- write_reg(ts_data, 0x3C, 0xE4, 0xE38F); /* for all-blocks */
- break;
- case EMEM_MAIN:
- write_reg(ts_data, 0x3C, 0xE4, 0x7731); /* for main block */
- break;
- case EMEM_INFO:
- write_reg(ts_data, 0x3C, 0xE4, 0x7731); /* for info block */
-
- write_reg_8bit(ts_data, 0x0F, 0xE6, 0x01);
-
- write_reg_8bit(ts_data, 0x3C, 0xE4, 0xC5);
- write_reg_8bit(ts_data, 0x3C, 0xE5, 0x78);
-
- write_reg_8bit(ts_data, MSTAR_CHIPTOP_REGISTER_BANK,
- 0x04, 0x9F);
- write_reg_8bit(ts_data, MSTAR_CHIPTOP_REGISTER_BANK,
- 0x05, 0x82);
-
- write_reg_8bit(ts_data, 0x0F, 0xE6, 0x00);
- msleep(100);
- break;
- }
-
- /* polling 0x3CE4 is 0x2F43 */
- do {
- reg_data = read_reg(ts_data, 0x3C, 0xE4);
- } while (reg_data != 0x2F43);
-
- /* calculate CRC 32 */
- _CRC_initTable();
-
- /* total 32 KB : 2 byte per R/W */
- for (i = 0; i < 32; i++) {
- if (i == 31) {
- fw_bin_data[i][1014] = 0x5A;
- fw_bin_data[i][1015] = 0xA5;
-
- for (j = 0; j < 1016; j++)
- crc_main = _CRC_getValue(fw_bin_data[i][j],
- crc_main);
- } else {
- for (j = 0; j < 1024; j++)
- crc_main = _CRC_getValue(fw_bin_data[i][j],
- crc_main);
- }
-
- for (j = 0; j < 8; j++)
- write_i2c_seq(ts_data, ts_data->client->addr,
- &fw_bin_data[i][j * 128], 128);
- msleep(100);
-
- /* polling 0x3CE4 is 0xD0BC */
- do {
- reg_data = read_reg(ts_data, 0x3C, 0xE4);
- } while (reg_data != 0xD0BC);
-
- write_reg(ts_data, 0x3C, 0xE4, 0x2F43);
- }
-
- if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) {
- /* write file done and check crc */
- write_reg(ts_data, 0x3C, 0xE4, 0x1380);
- }
- msleep(20);
-
- if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) {
- /* polling 0x3CE4 is 0x9432 */
- do {
- reg_data = read_reg(ts_data, 0x3C, 0xE4);
- } while (reg_data != 0x9432);
- }
-
- crc_main = crc_main ^ 0xffffffff;
- crc_info = crc_info ^ 0xffffffff;
-
- if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) {
- /* CRC Main from TP */
- crc_main_tp = read_reg(ts_data, 0x3C, 0x80);
- crc_main_tp = (crc_main_tp << 16) |
- read_reg(ts_data, 0x3C, 0x82);
-
- /* CRC Info from TP */
- crc_info_tp = read_reg(ts_data, 0x3C, 0xA0);
- crc_info_tp = (crc_info_tp << 16) |
- read_reg(ts_data, 0x3C, 0xA2);
- }
-
- update_pass = 1;
- if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) {
- if (crc_main_tp != crc_main)
- update_pass = 0;
- }
-
- if (!update_pass) {
- dev_err(dev, "update_C33 failed\n");
- msg21xx_reset_hw(ts_data->pdata);
- return 0;
- }
-
- dev_dbg(dev, "update_C33 OK\n");
- msg21xx_reset_hw(ts_data->pdata);
- return size;
-}
-
-static unsigned int _CalMainCRC32(struct msg21xx_ts_data *ts_data)
-{
- unsigned int ret = 0;
- unsigned short reg_data = 0;
-
- msg21xx_reset_hw(ts_data->pdata);
-
- dbbusDWIICEnterSerialDebugMode(ts_data);
- dbbusDWIICStopMCU(ts_data);
- dbbusDWIICIICUseBus(ts_data);
- dbbusDWIICIICReshape(ts_data);
- msleep(100);
-
- /* Stop MCU */
- write_reg(ts_data, 0x0F, 0xE6, 0x0001);
-
- /* Stop Watchdog */
- write_reg_8bit(ts_data, 0x3C, 0x60, 0x55);
- write_reg_8bit(ts_data, 0x3C, 0x61, 0xAA);
-
- /* cmd */
- write_reg(ts_data, 0x3C, 0xE4, 0xDF4C);
- write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x7d60);
- /* TP SW reset */
- write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x829F);
-
- /* MCU run */
- write_reg(ts_data, 0x0F, 0xE6, 0x0000);
-
- /* polling 0x3CE4 */
- do {
- reg_data = read_reg(ts_data, 0x3C, 0xE4);
- } while (reg_data != 0x9432);
-
- /* Cal CRC Main from TP */
- ret = read_reg(ts_data, 0x3C, 0x80);
- ret = (ret << 16) | read_reg(ts_data, 0x3C, 0x82);
-
- dev_dbg(&ts_data->client->dev,
- "[21xxA]:Current main crc32=0x%x\n", ret);
- return ret;
-}
-
-static void _ReadBinConfig(struct msg21xx_ts_data *ts_data)
-{
- unsigned char dbbus_tx_data[5] = {0};
- unsigned char dbbus_rx_data[4] = {0};
- unsigned short reg_data = 0;
-
- msg21xx_reset_hw(ts_data->pdata);
-
- dbbusDWIICEnterSerialDebugMode(ts_data);
- dbbusDWIICStopMCU(ts_data);
- dbbusDWIICIICUseBus(ts_data);
- dbbusDWIICIICReshape(ts_data);
- msleep(100);
-
- /* Stop MCU */
- write_reg(ts_data, 0x0F, 0xE6, 0x0001);
-
- /* Stop Watchdog */
- write_reg_8bit(ts_data, 0x3C, 0x60, 0x55);
- write_reg_8bit(ts_data, 0x3C, 0x61, 0xAA);
-
- /* cmd */
- write_reg(ts_data, 0x3C, 0xE4, 0xA4AB);
- write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x7d60);
-
- /* TP SW reset */
- write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x829F);
-
- /* MCU run */
- write_reg(ts_data, 0x0F, 0xE6, 0x0000);
-
- /* polling 0x3CE4 */
- do {
- reg_data = read_reg(ts_data, 0x3C, 0xE4);
- } while (reg_data != 0x5B58);
-
- dbbus_tx_data[0] = 0x72;
- dbbus_tx_data[1] = 0x7F;
- dbbus_tx_data[2] = 0x55;
- dbbus_tx_data[3] = 0x00;
- dbbus_tx_data[4] = 0x04;
- write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 5);
- read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4);
- if ((dbbus_rx_data[0] >= 0x30 && dbbus_rx_data[0] <= 0x39)
- && (dbbus_rx_data[1] >= 0x30 && dbbus_rx_data[1] <= 0x39)
- && (dbbus_rx_data[2] >= 0x31 && dbbus_rx_data[2] <= 0x39)) {
- main_sw_id = (dbbus_rx_data[0] - 0x30) * 100 +
- (dbbus_rx_data[1] - 0x30) * 10 +
- (dbbus_rx_data[2] - 0x30);
- }
-
- dbbus_tx_data[0] = 0x72;
- dbbus_tx_data[1] = 0x7F;
- dbbus_tx_data[2] = 0xFC;
- dbbus_tx_data[3] = 0x00;
- dbbus_tx_data[4] = 0x04;
- write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 5);
- read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4);
- bin_conf_crc32 = (dbbus_rx_data[0] << 24) |
- (dbbus_rx_data[1] << 16) |
- (dbbus_rx_data[2] << 8) |
- (dbbus_rx_data[3]);
-
- dbbus_tx_data[0] = 0x72;
- dbbus_tx_data[1] = 0x83;
- dbbus_tx_data[2] = 0x00;
- dbbus_tx_data[3] = 0x00;
- dbbus_tx_data[4] = 0x04;
- write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 5);
- read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4);
- if ((dbbus_rx_data[0] >= 0x30 && dbbus_rx_data[0] <= 0x39)
- && (dbbus_rx_data[1] >= 0x30 && dbbus_rx_data[1] <= 0x39)
- && (dbbus_rx_data[2] >= 0x31 && dbbus_rx_data[2] <= 0x39)) {
- info_sw_id = (dbbus_rx_data[0] - 0x30) * 100 +
- (dbbus_rx_data[1] - 0x30) * 10 +
- (dbbus_rx_data[2] - 0x30);
- }
-
- dev_dbg(&ts_data->client->dev,
- "[21xxA]:main_sw_id = %d, info_sw_id = %d, bin_conf_crc32 = 0x%x\n",
- main_sw_id, info_sw_id, bin_conf_crc32);
-}
-
-static ssize_t firmware_update_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- return snprintf(buf, 3, "%d\n", ts_data->pdata->updating_fw);
-}
-
-static ssize_t firmware_update_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size)
-{
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- ts_data->pdata->updating_fw = true;
- disable_irq(ts_data->client->irq);
-
- size = firmware_update_c33(dev, attr, buf, size, EMEM_MAIN, false);
-
- enable_irq(ts_data->client->irq);
- ts_data->pdata->updating_fw = false;
-
- return size;
-}
-
-static DEVICE_ATTR(update, (S_IRUGO | S_IWUSR),
- firmware_update_show,
- firmware_update_store);
-
-static int prepare_fw_data(struct device *dev)
-{
- int count;
- int i;
- int ret;
- const struct firmware *fw = NULL;
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- ret = request_firmware(&fw, ts_data->pdata->fw_name, dev);
- if (ret < 0) {
- dev_err(dev, "Request firmware failed - %s (%d)\n",
- ts_data->pdata->fw_name, ret);
- return ret;
- }
-
- count = fw->size / 1024;
-
- for (i = 0; i < count; i++)
- memcpy(fw_bin_data[i], fw->data + (i * 1024), 1024);
-
- fw_file_major = MSG_FW_FILE_MAJOR_VERSION(fw);
- fw_file_minor = MSG_FW_FILE_MINOR_VERSION(fw);
- dev_dbg(dev, "New firmware: %d.%d",
- fw_file_major, fw_file_minor);
-
- return fw->size;
-}
-
-static ssize_t firmware_update_smart_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size)
-{
- int ret;
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- ret = prepare_fw_data(dev);
- if (ret < 0) {
- dev_err(dev, "Request firmware failed -(%d)\n", ret);
- return ret;
- }
- ts_data->pdata->updating_fw = true;
- disable_irq(ts_data->client->irq);
-
- ret = firmware_update_c33(dev, attr, buf, size, EMEM_MAIN, false);
- if (ret == 0)
- dev_err(dev, "firmware_update_c33 ret = %d\n", ret);
-
- enable_irq(ts_data->client->irq);
- ts_data->pdata->updating_fw = false;
-
- return ret;
-}
-
-static ssize_t firmware_force_update_smart_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size)
-{
- int ret;
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- ret = prepare_fw_data(dev);
- if (ret < 0) {
- dev_err(dev, "Request firmware failed -(%d)\n", ret);
- return ret;
- }
- ts_data->pdata->updating_fw = true;
- disable_irq(ts_data->client->irq);
-
- ret = firmware_update_c33(dev, attr, buf, size, EMEM_MAIN, true);
- if (ret == 0)
- dev_err(dev, "firmware_update_c33 et = %d\n", ret);
-
- enable_irq(ts_data->client->irq);
- ts_data->pdata->updating_fw = false;
-
- return ret;
-}
-
-static DEVICE_ATTR(update_fw, (S_IRUGO | S_IWUSR),
- firmware_update_show,
- firmware_update_smart_store);
-
-static DEVICE_ATTR(force_update_fw, (S_IRUGO | S_IWUSR),
- firmware_update_show,
- firmware_force_update_smart_store);
-
-static ssize_t firmware_version_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- msg21xx_read_firmware_id(ts_data);
- return snprintf(buf, sizeof(char) * 8, "%03d%03d\n",
- ts_data->pdata->fw_version_major,
- ts_data->pdata->fw_version_minor);
-}
-
-static DEVICE_ATTR(version, S_IRUGO,
- firmware_version_show,
- NULL);
-
-
-static ssize_t msg21xx_fw_name_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- return snprintf(buf, MSTAR_FW_NAME_MAX_LEN - 1,
- "%s\n", ts_data->pdata->fw_name);
-}
-
-static ssize_t msg21xx_fw_name_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- if (size > MSTAR_FW_NAME_MAX_LEN - 1)
- return -EINVAL;
-
- strlcpy(ts_data->pdata->fw_name, buf, size);
- if (ts_data->pdata->fw_name[size - 1] == '\n')
- ts_data->pdata->fw_name[size - 1] = 0;
-
- return size;
-}
-
-static DEVICE_ATTR(fw_name, (S_IRUGO | S_IWUSR),
- msg21xx_fw_name_show, msg21xx_fw_name_store);
-
-static ssize_t firmware_data_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size)
-{
- int count = size / 1024;
- int i;
-
- for (i = 0; i < count; i++)
- memcpy(fw_bin_data[i], buf + (i * 1024), 1024);
-
- if (buf != NULL)
- dev_dbg(dev, "buf[0] = %c\n", buf[0]);
-
- return size;
-}
-
-static DEVICE_ATTR(data, S_IWUSR, NULL, firmware_data_store);
-
-static ssize_t tp_print_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- tp_print_proc_read(ts_data);
-
- return snprintf(buf, 3, "%d\n", ts_data->suspended);
-}
-
-static ssize_t tp_print_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size)
-{
- return size;
-}
-
-static DEVICE_ATTR(tpp, (S_IRUGO | S_IWUSR),
- tp_print_show, tp_print_store);
-
-#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
-static void _msg_enable_proximity(void)
-{
- unsigned char tx_data[4] = {0};
-
- tx_data[0] = 0x52;
- tx_data[1] = 0x00;
- tx_data[2] = 0x47;
- tx_data[3] = 0xa0;
- mutex_lock(&msg21xx_mutex);
- write_i2c_seq(ts_data->client->addr, &tx_data[0], 4);
- mutex_unlock(&msg21xx_mutex);
-
- bEnableTpProximity = 1;
-}
-
-static void _msg_disable_proximity(void)
-{
- unsigned char tx_data[4] = {0};
-
- tx_data[0] = 0x52;
- tx_data[1] = 0x00;
- tx_data[2] = 0x47;
- tx_data[3] = 0xa1;
- mutex_lock(&msg21xx_mutex);
- write_i2c_seq(ts_data->client->addr, &tx_data[0], 4);
- mutex_unlock(&msg21xx_mutex);
-
- bEnableTpProximity = 0;
- bFaceClosingTp = 0;
-}
-
-static void tsps_msg21xx_enable(int en)
-{
- if (en)
- _msg_enable_proximity();
- else
- _msg_disable_proximity();
-}
-
-static int tsps_msg21xx_data(void)
-{
- return bFaceClosingTp;
-}
-#endif
-
-static int msg21xx_pinctrl_init(struct msg21xx_ts_data *ts_data)
-{
- int retval;
-
- /* Get pinctrl if target uses pinctrl */
- ts_data->ts_pinctrl = devm_pinctrl_get(&(ts_data->client->dev));
- if (IS_ERR_OR_NULL(ts_data->ts_pinctrl)) {
- retval = PTR_ERR(ts_data->ts_pinctrl);
- dev_dbg(&ts_data->client->dev,
- "Target does not use pinctrl %d\n", retval);
- goto err_pinctrl_get;
- }
-
- ts_data->pinctrl_state_active = pinctrl_lookup_state(
- ts_data->ts_pinctrl, PINCTRL_STATE_ACTIVE);
- if (IS_ERR_OR_NULL(ts_data->pinctrl_state_active)) {
- retval = PTR_ERR(ts_data->pinctrl_state_active);
- dev_dbg(&ts_data->client->dev,
- "Can't lookup %s pinstate %d\n",
- PINCTRL_STATE_ACTIVE, retval);
- goto err_pinctrl_lookup;
- }
-
- ts_data->pinctrl_state_suspend = pinctrl_lookup_state(
- ts_data->ts_pinctrl, PINCTRL_STATE_SUSPEND);
- if (IS_ERR_OR_NULL(ts_data->pinctrl_state_suspend)) {
- retval = PTR_ERR(ts_data->pinctrl_state_suspend);
- dev_dbg(&ts_data->client->dev,
- "Can't lookup %s pinstate %d\n",
- PINCTRL_STATE_SUSPEND, retval);
- goto err_pinctrl_lookup;
- }
-
- ts_data->pinctrl_state_release = pinctrl_lookup_state(
- ts_data->ts_pinctrl, PINCTRL_STATE_RELEASE);
- if (IS_ERR_OR_NULL(ts_data->pinctrl_state_release)) {
- retval = PTR_ERR(ts_data->pinctrl_state_release);
- dev_dbg(&ts_data->client->dev,
- "Can't lookup %s pinstate %d\n",
- PINCTRL_STATE_RELEASE, retval);
- }
-
- return 0;
-
-err_pinctrl_lookup:
- devm_pinctrl_put(ts_data->ts_pinctrl);
-err_pinctrl_get:
- ts_data->ts_pinctrl = NULL;
- return retval;
-}
-
-static unsigned char calculate_checksum(unsigned char *msg, int length)
-{
- int checksum = 0, i;
-
- for (i = 0; i < length; i++)
- checksum += msg[i];
-
- return (unsigned char)((-checksum) & 0xFF);
-}
-
-static int parse_info(struct msg21xx_ts_data *ts_data)
-{
- unsigned char data[DEMO_MODE_PACKET_LENGTH] = {0};
- unsigned char checksum = 0;
- unsigned int x = 0, y = 0;
- unsigned int x2 = 0, y2 = 0;
- unsigned int delta_x = 0, delta_y = 0;
-
- mutex_lock(&msg21xx_mutex);
- read_i2c_seq(ts_data, ts_data->client->addr, &data[0],
- DEMO_MODE_PACKET_LENGTH);
- mutex_unlock(&msg21xx_mutex);
- checksum = calculate_checksum(&data[0], (DEMO_MODE_PACKET_LENGTH-1));
- dev_dbg(&ts_data->client->dev, "check sum: [%x] == [%x]?\n",
- data[DEMO_MODE_PACKET_LENGTH-1], checksum);
-
- if (data[DEMO_MODE_PACKET_LENGTH-1] != checksum) {
- dev_err(&ts_data->client->dev, "WRONG CHECKSUM\n");
- return -EINVAL;
- }
-
- if (data[0] != 0x52) {
- dev_err(&ts_data->client->dev, "WRONG HEADER\n");
- return -EINVAL;
- }
-
- ts_data->info.keycode = 0xFF;
- if ((data[1] == 0xFF) && (data[2] == 0xFF) &&
- (data[3] == 0xFF) && (data[4] == 0xFF) &&
- (data[6] == 0xFF)) {
- if ((data[5] == 0xFF) || (data[5] == 0)) {
- ts_data->info.keycode = 0xFF;
- } else if ((data[5] == 1) || (data[5] == 2) ||
- (data[5] == 4) || (data[5] == 8)) {
- ts_data->info.keycode = data[5] >> 1;
-
- dev_dbg(&ts_data->client->dev,
- "ts_data->info.keycode index %d\n",
- ts_data->info.keycode);
- }
- #ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
- else if (bEnableTpProximity && ((data[5] == 0x80) ||
- (data[5] == 0x40))) {
- if (data[5] == 0x80)
- bFaceClosingTp = 1;
- else if (data[5] == 0x40)
- bFaceClosingTp = 0;
-
- return -EINVAL;
- }
- #endif
- else {
- dev_err(&ts_data->client->dev, "WRONG KEY\n");
- return -EINVAL;
- }
- } else {
- x = (((data[1] & 0xF0) << 4) | data[2]);
- y = (((data[1] & 0x0F) << 8) | data[3]);
- delta_x = (((data[4] & 0xF0) << 4) | data[5]);
- delta_y = (((data[4] & 0x0F) << 8) | data[6]);
-
- if ((delta_x == 0) && (delta_y == 0)) {
- ts_data->info.point[0].x =
- x * ts_data->pdata->x_max / TPD_WIDTH;
- ts_data->info.point[0].y =
- y * ts_data->pdata->y_max / TPD_HEIGHT;
- ts_data->info.count = 1;
- } else {
- if (delta_x > 2048)
- delta_x -= 4096;
-
- if (delta_y > 2048)
- delta_y -= 4096;
-
- x2 = (unsigned int)((signed short)x +
- (signed short)delta_x);
- y2 = (unsigned int)((signed short)y +
- (signed short)delta_y);
- ts_data->info.point[0].x =
- x * ts_data->pdata->x_max / TPD_WIDTH;
- ts_data->info.point[0].y =
- y * ts_data->pdata->y_max / TPD_HEIGHT;
- ts_data->info.point[1].x =
- x2 * ts_data->pdata->x_max / TPD_WIDTH;
- ts_data->info.point[1].y =
- y2 * ts_data->pdata->y_max / TPD_HEIGHT;
- ts_data->info.count = ts_data->pdata->num_max_touches;
- }
- }
-
- return 0;
-}
-
-static void touch_driver_touch_released(struct msg21xx_ts_data *ts_data)
-{
- int i;
-
- for (i = 0; i < ts_data->pdata->num_max_touches; i++) {
- input_mt_slot(ts_data->input_dev, i);
- input_mt_report_slot_state(ts_data->input_dev,
- MT_TOOL_FINGER, 0);
- }
-
- input_report_key(ts_data->input_dev, BTN_TOUCH, 0);
- input_report_key(ts_data->input_dev, BTN_TOOL_FINGER, 0);
- input_sync(ts_data->input_dev);
-}
-
-/* read data through I2C then report data to input
- *sub-system when interrupt occurred
- */
-static irqreturn_t msg21xx_ts_interrupt(int irq, void *dev_id)
-{
- int i = 0;
- static int last_keycode = 0xFF;
- static int last_count;
- struct msg21xx_ts_data *ts_data = dev_id;
-
- ts_data->info.count = 0;
- if (parse_info(ts_data) == 0) {
- if (ts_data->info.keycode != 0xFF) { /* key touch pressed */
- if (ts_data->info.keycode <
- ts_data->pdata->num_buttons) {
- if (ts_data->info.keycode != last_keycode) {
- dev_dbg(&ts_data->client->dev,
- "key touch pressed");
-
- input_report_key(ts_data->input_dev,
- BTN_TOUCH, 1);
- input_report_key(ts_data->input_dev,
- ts_data->pdata->button_map[
- ts_data->info.keycode], 1);
-
- last_keycode = ts_data->info.keycode;
- } else {
- /* pass duplicate key-pressing */
- dev_dbg(&ts_data->client->dev,
- "REPEATED KEY\n");
- }
- } else {
- dev_dbg(&ts_data->client->dev, "WRONG KEY\n");
- }
- } else { /* key touch released */
- if (last_keycode != 0xFF) {
- dev_dbg(&ts_data->client->dev, "key touch released");
-
- input_report_key(ts_data->input_dev,
- BTN_TOUCH, 0);
- input_report_key(ts_data->input_dev,
- ts_data->pdata->button_map[last_keycode],
- 0);
-
- last_keycode = 0xFF;
- }
- }
-
- if (ts_data->info.count > 0) { /* point touch pressed */
- for (i = 0; i < ts_data->info.count; i++) {
- input_mt_slot(ts_data->input_dev, i);
- input_mt_report_slot_state(ts_data->input_dev,
- MT_TOOL_FINGER, 1);
- input_report_abs(ts_data->input_dev,
- ABS_MT_TOUCH_MAJOR, 1);
- input_report_abs(ts_data->input_dev,
- ABS_MT_POSITION_X,
- ts_data->info.point[i].x);
- input_report_abs(ts_data->input_dev,
- ABS_MT_POSITION_Y,
- ts_data->info.point[i].y);
- }
- }
-
- if (last_count > info.count) {
- for (i = info.count; i < MAX_TOUCH_NUM; i++) {
- input_mt_slot(input_dev, i);
- input_mt_report_slot_state(input_dev,
- }
-
- if (last_count > ts_data->info.count) {
- for (i = ts_data->info.count;
- i < ts_data->pdata->num_max_touches;
- i++) {
- input_mt_slot(ts_data->input_dev, i);
- input_mt_report_slot_state(ts_data->input_dev,
- MT_TOOL_FINGER, 0);
- }
- }
- last_count = ts_data->info.count;
-
- input_report_key(ts_data->input_dev, BTN_TOUCH,
- ts_data->info.count > 0);
- input_report_key(ts_data->input_dev, BTN_TOOL_FINGER,
- ts_data->info.count > 0);
-
- input_sync(ts_data->input_dev);
- }
-
- return IRQ_HANDLED;
-}
-
-static int msg21xx_ts_power_init(struct msg21xx_ts_data *ts_data, bool init)
-{
- int rc;
-
- if (init) {
- ts_data->vdd = regulator_get(&ts_data->client->dev,
- "vdd");
- if (IS_ERR(ts_data->vdd)) {
- rc = PTR_ERR(ts_data->vdd);
- dev_err(&ts_data->client->dev,
- "Regulator get failed vdd rc=%d\n", rc);
- return rc;
- }
-
- if (regulator_count_voltages(ts_data->vdd) > 0) {
- rc = regulator_set_voltage(ts_data->vdd,
- MSTAR_VTG_MIN_UV,
- MSTAR_VTG_MAX_UV);
- if (rc) {
- dev_err(&ts_data->client->dev,
- "Regulator set_vtg failed vdd rc=%d\n",
- rc);
- goto reg_vdd_put;
- }
- }
-
- ts_data->vcc_i2c = regulator_get(&ts_data->client->dev,
- "vcc_i2c");
- if (IS_ERR(ts_data->vcc_i2c)) {
- rc = PTR_ERR(ts_data->vcc_i2c);
- dev_err(&ts_data->client->dev,
- "Regulator get failed vcc_i2c rc=%d\n", rc);
- goto reg_vdd_set_vtg;
- }
-
- if (regulator_count_voltages(ts_data->vcc_i2c) > 0) {
- rc = regulator_set_voltage(ts_data->vcc_i2c,
- MSTAR_I2C_VTG_MIN_UV,
- MSTAR_I2C_VTG_MAX_UV);
- if (rc) {
- dev_err(&ts_data->client->dev,
- "Regulator set_vtg failed vcc_i2c rc=%d\n", rc);
- goto reg_vcc_i2c_put;
- }
- }
- } else {
- if (regulator_count_voltages(ts_data->vdd) > 0)
- regulator_set_voltage(ts_data->vdd, 0,
- MSTAR_VTG_MAX_UV);
-
- regulator_put(ts_data->vdd);
-
- if (regulator_count_voltages(ts_data->vcc_i2c) > 0)
- regulator_set_voltage(ts_data->vcc_i2c, 0,
- MSTAR_I2C_VTG_MAX_UV);
-
- regulator_put(ts_data->vcc_i2c);
- }
-
- return 0;
-
-reg_vcc_i2c_put:
- regulator_put(ts_data->vcc_i2c);
-reg_vdd_set_vtg:
- if (regulator_count_voltages(ts_data->vdd) > 0)
- regulator_set_voltage(ts_data->vdd, 0, MSTAR_VTG_MAX_UV);
-reg_vdd_put:
- regulator_put(ts_data->vdd);
- return rc;
-}
-
-static int msg21xx_ts_power_on(struct msg21xx_ts_data *ts_data, bool on)
-{
- int rc;
-
- if (!on)
- goto power_off;
-
- rc = regulator_enable(ts_data->vdd);
- if (rc) {
- dev_err(&ts_data->client->dev,
- "Regulator vdd enable failed rc=%d\n", rc);
- return rc;
- }
-
- rc = regulator_enable(ts_data->vcc_i2c);
- if (rc) {
- dev_err(&ts_data->client->dev,
- "Regulator vcc_i2c enable failed rc=%d\n", rc);
- regulator_disable(ts_data->vdd);
- }
-
- return rc;
-
- DBG("*** %s ***\n", __func__);
- rc = regulator_disable(vdd);
-power_off:
- rc = regulator_disable(ts_data->vdd);
- if (rc) {
- dev_err(&ts_data->client->dev,
- "Regulator vdd disable failed rc=%d\n", rc);
- return rc;
- }
-
- rc = regulator_disable(ts_data->vcc_i2c);
- if (rc) {
- dev_err(&ts_data->client->dev,
- "Regulator vcc_i2c disable failed rc=%d\n", rc);
- rc = regulator_enable(ts_data->vdd);
- }
-
- return rc;
-}
-
-static int msg21xx_ts_gpio_configure(struct msg21xx_ts_data *ts_data, bool on)
-{
- int ret = 0;
-
- if (!on)
- goto pwr_deinit;
-
- if (gpio_is_valid(ts_data->pdata->irq_gpio)) {
- ret = gpio_request(ts_data->pdata->irq_gpio,
- "msg21xx_irq_gpio");
- if (ret) {
- dev_err(&ts_data->client->dev,
- "Failed to request GPIO[%d], %d\n",
- ts_data->pdata->irq_gpio, ret);
- goto err_irq_gpio_req;
- }
- ret = gpio_direction_input(ts_data->pdata->irq_gpio);
- if (ret) {
- dev_err(&ts_data->client->dev,
- "Failed to set direction for gpio[%d], %d\n",
- ts_data->pdata->irq_gpio, ret);
- goto err_irq_gpio_dir;
- }
- gpio_set_value_cansleep(ts_data->pdata->irq_gpio, 1);
- } else {
- dev_err(&ts_data->client->dev, "irq gpio not provided\n");
- goto err_irq_gpio_req;
- }
-
- if (gpio_is_valid(ts_data->pdata->reset_gpio)) {
- ret = gpio_request(ts_data->pdata->reset_gpio,
- "msg21xx_reset_gpio");
- if (ret) {
- dev_err(&ts_data->client->dev,
- "Failed to request GPIO[%d], %d\n",
- ts_data->pdata->reset_gpio, ret);
- goto err_reset_gpio_req;
- }
-
- } else {
- if (gpio_is_valid(pdata->irq_gpio))
- gpio_free(pdata->irq_gpio);
- if (gpio_is_valid(pdata->reset_gpio)) {
- gpio_set_value_cansleep(pdata->reset_gpio, 0);
- ret = gpio_direction_input(pdata->reset_gpio);
- if (ret)
- dev_err(&i2c_client->dev,
- "Unable to set direction for gpio [%d]\n",
- pdata->reset_gpio);
- gpio_free(pdata->reset_gpio);
- }
- }
- return 0;
- /* power on TP */
- ret = gpio_direction_output(
- ts_data->pdata->reset_gpio, 1);
- if (ret) {
- dev_err(&ts_data->client->dev,
- "Failed to set direction for GPIO[%d], %d\n",
- ts_data->pdata->reset_gpio, ret);
- goto err_reset_gpio_dir;
- }
- msleep(100);
- gpio_set_value_cansleep(ts_data->pdata->reset_gpio, 0);
- msleep(20);
- gpio_set_value_cansleep(ts_data->pdata->reset_gpio, 1);
- msleep(200);
- } else {
- dev_err(&ts_data->client->dev, "reset gpio not provided\n");
- goto err_reset_gpio_req;
- }
-
- return 0;
-
-err_reset_gpio_dir:
- if (gpio_is_valid(ts_data->pdata->reset_gpio))
- gpio_free(ts_data->pdata->irq_gpio);
-err_reset_gpio_req:
-err_irq_gpio_dir:
- if (gpio_is_valid(ts_data->pdata->irq_gpio))
- gpio_free(ts_data->pdata->irq_gpio);
-err_irq_gpio_req:
- return ret;
-
-pwr_deinit:
- if (gpio_is_valid(ts_data->pdata->irq_gpio))
- gpio_free(ts_data->pdata->irq_gpio);
- if (gpio_is_valid(ts_data->pdata->reset_gpio)) {
- gpio_set_value_cansleep(ts_data->pdata->reset_gpio, 0);
- ret = gpio_direction_input(ts_data->pdata->reset_gpio);
- if (ret)
- dev_err(&ts_data->client->dev,
- "Unable to set direction for gpio [%d]\n",
- ts_data->pdata->reset_gpio);
- gpio_free(ts_data->pdata->reset_gpio);
- }
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int msg21xx_ts_resume(struct device *dev)
-{
- int retval;
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- if (!ts_data->suspended) {
- dev_info(dev, "msg21xx_ts already in resume\n");
- return 0;
- }
-
- mutex_lock(&ts_data->ts_mutex);
-
- retval = msg21xx_ts_power_on(ts_data, true);
- if (retval) {
- dev_err(dev, "msg21xx_ts power on failed");
- mutex_unlock(&ts_data->ts_mutex);
- return retval;
- }
-
- if (ts_data->ts_pinctrl) {
- retval = pinctrl_select_state(ts_data->ts_pinctrl,
- ts_data->pinctrl_state_active);
- if (retval < 0) {
- dev_err(dev, "Cannot get active pinctrl state\n");
- mutex_unlock(&ts_data->ts_mutex);
- return retval;
- }
- }
-
- retval = msg21xx_ts_gpio_configure(ts_data, true);
- if (retval) {
- dev_err(dev, "Failed to put gpios in active state %d",
- retval);
- mutex_unlock(&ts_data->ts_mutex);
- return retval;
- }
-
- enable_irq(ts_data->client->irq);
- ts_data->suspended = false;
-
- mutex_unlock(&ts_data->ts_mutex);
-
- return 0;
-}
-
-static int msg21xx_ts_suspend(struct device *dev)
-{
- int retval;
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- if (ts_data->pdata->updating_fw) {
- dev_info(dev, "Firmware loading in progress\n");
- return 0;
- }
-
- if (ts_data->suspended) {
- dev_info(dev, "msg21xx_ts already in suspend\n");
- return 0;
- }
-
-#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
- if (bEnableTpProximity) {
- dev_dbg(dev, "suspend bEnableTpProximity=%d\n",
- bEnableTpProximity);
- return 0;
- }
-#endif
-
- mutex_lock(&ts_data->ts_mutex);
-
- disable_irq(ts_data->client->irq);
-
- touch_driver_touch_released(ts_data);
-
- if (ts_data->ts_pinctrl) {
- retval = pinctrl_select_state(ts_data->ts_pinctrl,
- ts_data->pinctrl_state_suspend);
- if (retval < 0) {
- dev_err(dev, "Cannot get idle pinctrl state %d\n",
- retval);
- mutex_unlock(&ts_data->ts_mutex);
- return retval;
- }
- }
-
- retval = msg21xx_ts_gpio_configure(ts_data, false);
- if (retval) {
- dev_err(dev, "Failed to put gpios in idle state %d",
- retval);
- mutex_unlock(&ts_data->ts_mutex);
- return retval;
- }
-
- retval = msg21xx_ts_power_on(ts_data, false);
- if (retval) {
- dev_err(dev, "msg21xx_ts power off failed");
- mutex_unlock(&ts_data->ts_mutex);
- return retval;
- }
-
- ts_data->suspended = true;
-
- mutex_unlock(&ts_data->ts_mutex);
-
- return 0;
-}
-#else
-static int msg21xx_ts_resume(struct device *dev)
-{
- return 0;
-}
-static int msg21xx_ts_suspend(struct device *dev)
-{
- return 0;
-}
-#endif
-
-static int msg21xx_debug_suspend_set(void *_data, u64 val)
-{
- struct msg21xx_ts_data *data = _data;
-
- mutex_lock(&data->input_dev->mutex);
-
- if (val)
- msg21xx_ts_suspend(&data->client->dev);
- else
- msg21xx_ts_resume(&data->client->dev);
-
- mutex_unlock(&data->input_dev->mutex);
-
- return 0;
-}
-
-static int msg21xx_debug_suspend_get(void *_data, u64 *val)
-{
- struct msg21xx_ts_data *data = _data;
-
- mutex_lock(&data->input_dev->mutex);
- *val = data->suspended;
- mutex_unlock(&data->input_dev->mutex);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(debug_suspend_fops, msg21xx_debug_suspend_get,
- msg21xx_debug_suspend_set, "%lld\n");
-
-
-#if defined(CONFIG_FB)
-static int fb_notifier_callback(struct notifier_block *self,
- unsigned long event, void *data)
-{
- struct fb_event *evdata = data;
- int *blank;
- struct msg21xx_ts_data *ts_data =
- container_of(self, struct msg21xx_ts_data, fb_notif);
-
- if (evdata && evdata->data && event == FB_EVENT_BLANK) {
- blank = evdata->data;
- if (*blank == FB_BLANK_UNBLANK)
- msg21xx_ts_resume(&ts_data->client->dev);
- else if (*blank == FB_BLANK_POWERDOWN)
- msg21xx_ts_suspend(&ts_data->client->dev);
- }
-
- return 0;
-}
-#endif
-
-static int msg21xx_get_dt_coords(struct device *dev, char *name,
- struct msg21xx_ts_platform_data *pdata)
-{
- u32 coords[FT_COORDS_ARR_SIZE];
- struct property *prop;
- struct device_node *np = dev->of_node;
- int coords_size, rc;
-
- prop = of_find_property(np, name, NULL);
- if (!prop)
- return -EINVAL;
- if (!prop->value)
- return -ENODATA;
-
- coords_size = prop->length / sizeof(u32);
- if (coords_size != FT_COORDS_ARR_SIZE) {
- dev_err(dev, "invalid %s\n", name);
- return -EINVAL;
- }
-
- rc = of_property_read_u32_array(np, name, coords, coords_size);
- if (rc && (rc != -EINVAL)) {
- dev_err(dev, "Unable to read %s\n", name);
- return rc;
- }
-
- if (!strcmp(name, "mstar,panel-coords")) {
- pdata->panel_minx = coords[0];
- pdata->panel_miny = coords[1];
- pdata->panel_maxx = coords[2];
- pdata->panel_maxy = coords[3];
- } else if (!strcmp(name, "mstar,display-coords")) {
- pdata->x_min = coords[0];
- pdata->y_min = coords[1];
- pdata->x_max = coords[2];
- pdata->y_max = coords[3];
- } else {
- dev_err(dev, "unsupported property %s\n", name);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int msg21xx_parse_dt(struct device *dev,
- struct msg21xx_ts_platform_data *pdata)
-{
- int rc;
- struct device_node *np = dev->of_node;
- struct property *prop;
- u32 temp_val;
-
- rc = msg21xx_get_dt_coords(dev, "mstar,panel-coords", pdata);
- if (rc && (rc != -EINVAL))
- return rc;
-
- rc = msg21xx_get_dt_coords(dev, "mstar,display-coords", pdata);
- if (rc)
- return rc;
-
- rc = of_property_read_u32(np, "mstar,hard-reset-delay-ms",
- &temp_val);
- if (!rc)
- pdata->hard_reset_delay_ms = temp_val;
- else
- return rc;
-
- rc = of_property_read_u32(np, "mstar,post-hard-reset-delay-ms",
- &temp_val);
- if (!rc)
- pdata->post_hard_reset_delay_ms = temp_val;
- else
- return rc;
-
- /* reset, irq gpio info */
- pdata->reset_gpio = of_get_named_gpio_flags(np, "mstar,reset-gpio",
- 0, &pdata->reset_gpio_flags);
- if (pdata->reset_gpio < 0)
- return pdata->reset_gpio;
-
- pdata->irq_gpio = of_get_named_gpio_flags(np, "mstar,irq-gpio",
- 0, &pdata->irq_gpio_flags);
- if (pdata->irq_gpio < 0)
- return pdata->irq_gpio;
-
- rc = of_property_read_u32(np, "mstar,ic-type", &temp_val);
- if (rc && (rc != -EINVAL))
- return rc;
-
- pdata->ic_type = temp_val;
-
- rc = of_property_read_u32(np, "mstar,num-max-touches", &temp_val);
- if (!rc)
- pdata->num_max_touches = temp_val;
- else
- return rc;
-
- prop = of_find_property(np, "mstar,button-map", NULL);
- if (prop) {
- pdata->num_buttons = prop->length / sizeof(temp_val);
- if (pdata->num_buttons > MAX_BUTTONS)
- return -EINVAL;
-
- rc = of_property_read_u32_array(np,
- "mstar,button-map", pdata->button_map,
- pdata->num_buttons);
- if (rc) {
- dev_err(dev, "Unable to read key codes\n");
- return rc;
- }
- }
-
- return 0;
-}
-
-/* probe function is used for matching and initializing input device */
-static int msg21xx_ts_probe(struct i2c_client *client,
- const struct i2c_device_id *id) {
-
- int ret = 0, i;
- struct dentry *temp, *dir;
- struct input_dev *input_dev;
- struct msg21xx_ts_data *ts_data;
- struct msg21xx_ts_platform_data *pdata;
-
- if (client->dev.of_node) {
- pdata = devm_kzalloc(&client->dev,
- sizeof(struct msg21xx_ts_platform_data), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- ret = msg21xx_parse_dt(&client->dev, pdata);
- if (ret) {
- dev_err(&client->dev, "DT parsing failed\n");
- return ret;
- }
- } else
- pdata = client->dev.platform_data;
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "I2C not supported\n");
- return -ENODEV;
- }
-
- ts_data = devm_kzalloc(&client->dev,
- sizeof(struct msg21xx_ts_data), GFP_KERNEL);
- if (!ts_data)
- return -ENOMEM;
-
- ts_data->client = client;
- ts_data->info.point = devm_kzalloc(&client->dev,
- sizeof(struct touchPoint_t) * pdata->num_max_touches,
- GFP_KERNEL);
- if (!ts_data->info.point) {
- dev_err(&client->dev, "Not enough memory\n");
- return -ENOMEM;
- }
-
- /* allocate an input device */
- input_dev = input_allocate_device();
- if (!input_dev) {
- ret = -ENOMEM;
- dev_err(&client->dev, "input device allocation failed\n");
- goto err_input_allocate_dev;
- }
-
- input_dev->name = client->name;
- input_dev->phys = "I2C";
- input_dev->dev.parent = &client->dev;
- input_dev->id.bustype = BUS_I2C;
-
- ts_data->input_dev = input_dev;
- ts_data->client = client;
- ts_data->pdata = pdata;
-
- input_set_drvdata(input_dev, ts_data);
- i2c_set_clientdata(client, ts_data);
-
- ret = msg21xx_ts_power_init(ts_data, true);
- if (ret) {
- dev_err(&client->dev, "Mstar power init failed\n");
- return ret;
- }
-
- ret = msg21xx_ts_power_on(ts_data, true);
- if (ret) {
- dev_err(&client->dev, "Mstar power on failed\n");
- goto exit_deinit_power;
- }
-
- ret = msg21xx_pinctrl_init(ts_data);
- if (!ret && ts_data->ts_pinctrl) {
- /*
- * Pinctrl handle is optional. If pinctrl handle is found
- * let pins to be configured in active state. If not
- * found continue further without error.
- */
- ret = pinctrl_select_state(ts_data->ts_pinctrl,
- ts_data->pinctrl_state_active);
- if (ret < 0)
- dev_err(&client->dev,
- "Failed to select %s pinatate %d\n",
- PINCTRL_STATE_ACTIVE, ret);
- }
-
- ret = msg21xx_ts_gpio_configure(ts_data, true);
- if (ret) {
- dev_err(&client->dev, "Failed to configure gpio %d\n", ret);
- goto exit_gpio_config;
- }
-
- if (msg21xx_get_ic_type(ts_data) == 0) {
- dev_err(&client->dev, "The current IC is not Mstar\n");
- ret = -1;
- goto err_wrong_ic_type;
- }
-
- mutex_init(&msg21xx_mutex);
- mutex_init(&ts_data->ts_mutex);
-
- /* set the supported event type for input device */
- set_bit(EV_ABS, input_dev->evbit);
- set_bit(EV_SYN, input_dev->evbit);
- set_bit(EV_KEY, input_dev->evbit);
- set_bit(BTN_TOUCH, input_dev->keybit);
- set_bit(BTN_TOOL_FINGER, input_dev->keybit);
- set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
-
- for (i = 0; i < pdata->num_buttons; i++)
- input_set_capability(input_dev, EV_KEY, pdata->button_map[i]);
-
- input_set_drvdata(input_dev, ts_data);
- i2c_set_clientdata(client, ts_data);
-
-#ifdef CONFIG_TP_HAVE_KEY
- {
- int i;
-
- for (i = 0; i < num_buttons; i++)
- input_set_capability(input_dev, EV_KEY, button_map[i]);
- }
-#endif
-
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
- 0, 2, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 2, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_X,
- 0, pdata->x_max, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
- 0, pdata->y_max, 0, 0);
- ret = input_mt_init_slots(input_dev, pdata->num_max_touches, 0);
- if (ret) {
- dev_err(&client->dev,
- "Error %d initialising slots\n", ret);
- goto err_free_mem;
- }
-
- /* register the input device to input sub-system */
- ret = input_register_device(input_dev);
- if (ret < 0) {
- dev_err(&client->dev,
- "Unable to register ms-touchscreen input device\n");
- goto err_input_reg_dev;
- }
-
- /* version */
- if (device_create_file(&client->dev, &dev_attr_version) < 0) {
- dev_err(&client->dev,
- "Failed to create device file(%s)!\n",
- dev_attr_version.attr.name);
- goto err_create_fw_ver_file;
- }
- /* update */
- if (device_create_file(&client->dev, &dev_attr_update) < 0) {
- dev_err(&client->dev,
- "Failed to create device file(%s)!\n",
- dev_attr_update.attr.name);
- goto err_create_fw_update_file;
- }
- /* data */
- if (device_create_file(&client->dev, &dev_attr_data) < 0) {
- dev_err(&client->dev,
- "Failed to create device file(%s)!\n",
- dev_attr_data.attr.name);
- goto err_create_fw_data_file;
- }
- /* fw name */
- if (device_create_file(&client->dev, &dev_attr_fw_name) < 0) {
- dev_err(&client->dev,
- "Failed to create device file(%s)!\n",
- dev_attr_fw_name.attr.name);
- goto err_create_fw_name_file;
- }
- /* smart fw update */
- if (device_create_file(&client->dev, &dev_attr_update_fw) < 0) {
- dev_err(&client->dev,
- "Failed to create device file(%s)!\n",
- dev_attr_update_fw.attr.name);
- goto err_create_update_fw_file;
- }
- /* smart fw force update */
- if (device_create_file(&client->dev,
- &dev_attr_force_update_fw) < 0) {
- dev_err(&client->dev,
- "Failed to create device file(%s)!\n",
- dev_attr_force_update_fw.attr.name);
- goto err_create_force_update_fw_file;
- }
- dir = debugfs_create_dir(MSTAR_DEBUG_DIR_NAME, NULL);
- temp = debugfs_create_file("suspend", S_IRUSR | S_IWUSR, dir,
- ts_data, &debug_suspend_fops);
- if (temp == NULL || IS_ERR(temp)) {
- dev_err(&client->dev,
- "debugfs_create_file failed: rc=%ld\n", PTR_ERR(temp));
- goto free_debug_dir;
- }
-
-#ifdef TP_PRINT
- tp_print_create_entry(ts_data);
-#endif
-
- ret = request_threaded_irq(client->irq, NULL,
- msg21xx_ts_interrupt,
- pdata->irq_gpio_flags | IRQF_ONESHOT,
- "msg21xx", ts_data);
- if (ret)
- goto err_req_irq;
-
- disable_irq(client->irq);
-
-#if defined(CONFIG_FB)
- ts_data->fb_notif.notifier_call = fb_notifier_callback;
- ret = fb_register_client(&ts_data->fb_notif);
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
- tsps_assist_register_callback("msg21xx", &tsps_msg21xx_enable,
- &tsps_msg21xx_data);
-#endif
-
- dev_dbg(&client->dev, "mstar touch screen registered\n");
- enable_irq(client->irq);
- return 0;
-
-err_req_irq:
- free_irq(client->irq, ts_data);
- device_remove_file(&client->dev, &dev_attr_data);
-free_debug_dir:
- debugfs_remove_recursive(dir);
-err_create_fw_data_file:
- device_remove_file(&client->dev, &dev_attr_update);
-err_create_fw_update_file:
- device_remove_file(&client->dev, &dev_attr_version);
-err_create_fw_name_file:
- device_remove_file(&client->dev, &dev_attr_fw_name);
-err_create_update_fw_file:
- device_remove_file(&client->dev, &dev_attr_update_fw);
-err_create_force_update_fw_file:
- device_remove_file(&client->dev, &dev_attr_force_update_fw);
-err_create_fw_ver_file:
- input_unregister_device(input_dev);
-
-err_input_reg_dev:
- input_free_device(input_dev);
- input_dev = NULL;
-err_input_allocate_dev:
- mutex_destroy(&msg21xx_mutex);
- mutex_destroy(&ts_data->ts_mutex);
-
-err_wrong_ic_type:
- msg21xx_ts_gpio_configure(ts_data, false);
-exit_gpio_config:
- if (ts_data->ts_pinctrl) {
- if (IS_ERR_OR_NULL(ts_data->pinctrl_state_release)) {
- devm_pinctrl_put(ts_data->ts_pinctrl);
- ts_data->ts_pinctrl = NULL;
- } else {
- ret = pinctrl_select_state(ts_data->ts_pinctrl,
- ts_data->pinctrl_state_release);
- if (ret < 0)
- dev_err(&ts_data->client->dev,
- "Cannot get release pinctrl state\n");
- }
- }
- msg21xx_ts_power_on(ts_data, false);
-exit_deinit_power:
- msg21xx_ts_power_init(ts_data, false);
-err_free_mem:
- input_free_device(input_dev);
-
- return ret;
-}
-
-/* remove function is triggered when the input device is removed
- *from input sub-system
- */
-static int touch_driver_remove(struct i2c_client *client)
-{
- int retval = 0;
- struct msg21xx_ts_data *ts_data = i2c_get_clientdata(client);
-
- free_irq(ts_data->client->irq, ts_data);
- gpio_free(ts_data->pdata->irq_gpio);
- gpio_free(ts_data->pdata->reset_gpio);
-
- if (ts_data->ts_pinctrl) {
- if (IS_ERR_OR_NULL(ts_data->pinctrl_state_release)) {
- devm_pinctrl_put(ts_data->ts_pinctrl);
- ts_data->ts_pinctrl = NULL;
- } else {
- retval = pinctrl_select_state(ts_data->ts_pinctrl,
- ts_data->pinctrl_state_release);
- if (retval < 0)
- dev_err(&ts_data->client->dev,
- "Cannot get release pinctrl state\n");
- }
- }
-
- input_unregister_device(ts_data->input_dev);
- mutex_destroy(&msg21xx_mutex);
- mutex_destroy(&ts_data->ts_mutex);
-
- return retval;
-}
-
-/* The I2C device list is used for matching I2C device
- *and I2C device driver.
- */
-static const struct i2c_device_id touch_device_id[] = {
- {"msg21xx", 0},
- {}, /* should not omitted */
-};
-
-static const struct of_device_id msg21xx_match_table[] = {
- { .compatible = "mstar,msg21xx", },
- { },
-};
-
-MODULE_DEVICE_TABLE(i2c, touch_device_id);
-
-static struct i2c_driver touch_device_driver = {
- .driver = {
- .name = "ms-msg21xx",
- .owner = THIS_MODULE,
- .of_match_table = msg21xx_match_table,
- },
- .probe = msg21xx_ts_probe,
- .remove = touch_driver_remove,
- .id_table = touch_device_id,
-};
-
-module_i2c_driver(touch_device_driver);
-
-#ifdef TP_PRINT
-#include <linux/proc_fs.h>
-
-static unsigned short InfoAddr = 0x0F, PoolAddr = 0x10, TransLen = 256;
-static unsigned char row, units, cnt;
-
-static int tp_print_proc_read(struct msg21xx_ts_data *ts_data)
-{
- unsigned short i, j;
- unsigned short left, offset = 0;
- unsigned char dbbus_tx_data[3] = {0};
- unsigned char u8Data;
- signed short s16Data;
- int s32Data;
- char *buf = NULL;
-
- left = cnt*row*units;
- if ((ts_data->suspended == 0) &&
- (InfoAddr != 0x0F) &&
- (PoolAddr != 0x10) &&
- (left > 0)) {
- buf = kmalloc(left, GFP_KERNEL);
- if (buf != NULL) {
-
- while (left > 0) {
- dbbus_tx_data[0] = 0x53;
- dbbus_tx_data[1] = ((PoolAddr + offset) >> 8)
- & 0xFF;
- dbbus_tx_data[2] = (PoolAddr + offset) & 0xFF;
- mutex_lock(&msg21xx_mutex);
- write_i2c_seq(ts_data, ts_data->client->addr,
- &dbbus_tx_data[0], 3);
- read_i2c_seq(ts_data, ts_data->client->addr,
- &buf[offset],
- left > TransLen ? TransLen : left);
- mutex_unlock(&msg21xx_mutex);
-
- if (left > TransLen) {
- left -= TransLen;
- offset += TransLen;
- } else {
- left = 0;
- }
- }
-
- for (i = 0; i < cnt; i++) {
- for (j = 0; j < row; j++) {
- if (units == 1) {
- u8Data = buf[i * row * units +
- j * units];
- } else if (units == 2) {
- s16Data = buf[i * row * units +
- j * units] +
- (buf[i * row * units +
- j * units + 1] << 8);
- } else if (units == 4) {
- s32Data = buf[i * row * units +
- j * units] +
- (buf[i * row * units +
- j * units + 1] << 8) +
- (buf[i * row * units +
- j * units + 2] << 16) +
- (buf[i * row * units +
- j * units + 3] << 24);
- }
- }
- }
-
- kfree(buf);
- }
- }
-
- return 0;
-}
-
-static void tp_print_create_entry(struct msg21xx_ts_data *ts_data)
-{
- unsigned char dbbus_tx_data[3] = {0};
- unsigned char dbbus_rx_data[8] = {0};
-
- dbbus_tx_data[0] = 0x53;
- dbbus_tx_data[1] = 0x00;
- dbbus_tx_data[2] = 0x58;
- mutex_lock(&msg21xx_mutex);
- write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 3);
- read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4);
- mutex_unlock(&msg21xx_mutex);
- InfoAddr = (dbbus_rx_data[1]<<8) + dbbus_rx_data[0];
- PoolAddr = (dbbus_rx_data[3]<<8) + dbbus_rx_data[2];
-
- if ((InfoAddr != 0x0F) && (PoolAddr != 0x10)) {
- msleep(20);
- dbbus_tx_data[0] = 0x53;
- dbbus_tx_data[1] = (InfoAddr >> 8) & 0xFF;
- dbbus_tx_data[2] = InfoAddr & 0xFF;
- mutex_lock(&msg21xx_mutex);
- write_i2c_seq(ts_data, ts_data->client->addr,
- &dbbus_tx_data[0], 3);
- read_i2c_seq(ts_data, ts_data->client->addr,
- &dbbus_rx_data[0], 8);
- mutex_unlock(&msg21xx_mutex);
-
- units = dbbus_rx_data[0];
- row = dbbus_rx_data[1];
- cnt = dbbus_rx_data[2];
- TransLen = (dbbus_rx_data[7]<<8) + dbbus_rx_data[6];
-
- if (device_create_file(&ts_data->client->dev,
- &dev_attr_tpp) < 0)
- dev_err(&ts_data->client->dev, "Failed to create device file(%s)!\n",
- dev_attr_tpp.attr.name);
- }
-}
-#endif
-
-MODULE_AUTHOR("MStar Semiconductor, Inc.");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index ce15e150277e..ce1eb562be36 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -249,17 +249,6 @@
#define RESUME_RETRY (0 << 0)
#define RESUME_TERMINATE (1 << 0)
-#define TTBCR2_SEP_SHIFT 15
-#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
-
-#define TTBCR2_SEP_31 0
-#define TTBCR2_SEP_35 1
-#define TTBCR2_SEP_39 2
-#define TTBCR2_SEP_41 3
-#define TTBCR2_SEP_43 4
-#define TTBCR2_SEP_47 5
-#define TTBCR2_SEP_NOSIGN 7
-
#define TTBRn_ASID_SHIFT 48
#define FSR_MULTI (1 << 31)
@@ -1614,7 +1603,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
if (smmu->version > ARM_SMMU_V1) {
reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
- reg |= TTBCR2_SEP_UPSTREAM;
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
}
} else {
@@ -1745,7 +1733,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
- unsigned long quirks = 0;
+ unsigned long quirks =
+ smmu_domain->attributes & (1 << DOMAIN_ATTR_ENABLE_TTBR1) ?
+ IO_PGTABLE_QUIRK_ARM_TTBR1 : 0;
if (smmu_domain->smmu)
goto out;
@@ -1837,6 +1827,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
};
fmt = ARM_MSM_SECURE;
} else {
+
smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
.quirks = quirks,
.pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
@@ -3140,6 +3131,12 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
& (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
ret = 0;
break;
+ case DOMAIN_ATTR_ENABLE_TTBR1:
+ *((int *)data) = !!(smmu_domain->attributes
+ & (1 << DOMAIN_ATTR_ENABLE_TTBR1));
+ ret = 0;
+ break;
+
default:
ret = -ENODEV;
break;
@@ -3283,6 +3280,12 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
ret = 0;
break;
}
+ case DOMAIN_ATTR_ENABLE_TTBR1:
+ if (*((int *)data))
+ smmu_domain->attributes |=
+ 1 << DOMAIN_ATTR_ENABLE_TTBR1;
+ ret = 0;
+ break;
default:
ret = -ENODEV;
break;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 0d057ca92972..5f2b66286c0c 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -131,14 +131,21 @@
#define ARM_LPAE_TCR_TG0_64K (1 << 14)
#define ARM_LPAE_TCR_TG0_16K (2 << 14)
+#define ARM_LPAE_TCR_TG1_16K 1ULL
+#define ARM_LPAE_TCR_TG1_4K 2ULL
+#define ARM_LPAE_TCR_TG1_64K 3ULL
+
#define ARM_LPAE_TCR_SH0_SHIFT 12
#define ARM_LPAE_TCR_SH0_MASK 0x3
+#define ARM_LPAE_TCR_SH1_SHIFT 28
#define ARM_LPAE_TCR_SH_NS 0
#define ARM_LPAE_TCR_SH_OS 2
#define ARM_LPAE_TCR_SH_IS 3
#define ARM_LPAE_TCR_ORGN0_SHIFT 10
+#define ARM_LPAE_TCR_ORGN1_SHIFT 26
#define ARM_LPAE_TCR_IRGN0_SHIFT 8
+#define ARM_LPAE_TCR_IRGN1_SHIFT 24
#define ARM_LPAE_TCR_RGN_MASK 0x3
#define ARM_LPAE_TCR_RGN_NC 0
#define ARM_LPAE_TCR_RGN_WBWA 1
@@ -151,6 +158,9 @@
#define ARM_LPAE_TCR_T0SZ_SHIFT 0
#define ARM_LPAE_TCR_SZ_MASK 0xf
+#define ARM_LPAE_TCR_T1SZ_SHIFT 16
+#define ARM_LPAE_TCR_T1SZ_MASK 0x3f
+
#define ARM_LPAE_TCR_PS_SHIFT 16
#define ARM_LPAE_TCR_PS_MASK 0x7
@@ -167,6 +177,16 @@
#define ARM_LPAE_TCR_EPD1_SHIFT 23
#define ARM_LPAE_TCR_EPD1_FAULT 1
+#define ARM_LPAE_TCR_SEP_SHIFT (15 + 32)
+
+#define ARM_LPAE_TCR_SEP_31 0ULL
+#define ARM_LPAE_TCR_SEP_35 1ULL
+#define ARM_LPAE_TCR_SEP_39 2ULL
+#define ARM_LPAE_TCR_SEP_41 3ULL
+#define ARM_LPAE_TCR_SEP_43 4ULL
+#define ARM_LPAE_TCR_SEP_47 5ULL
+#define ARM_LPAE_TCR_SEP_UPSTREAM 7ULL
+
#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
#define ARM_LPAE_MAIR_ATTR_MASK 0xff
#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
@@ -206,7 +226,7 @@ struct arm_lpae_io_pgtable {
unsigned long pg_shift;
unsigned long bits_per_level;
- void *pgd;
+ void *pgd[2];
};
typedef u64 arm_lpae_iopte;
@@ -524,14 +544,26 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
return pte;
}
+static inline arm_lpae_iopte *arm_lpae_get_table(
+ struct arm_lpae_io_pgtable *data, unsigned long iova)
+{
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+
+ return ((cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) &&
+ (iova & (1UL << (cfg->ias - 1)))) ?
+ data->pgd[1] : data->pgd[0];
+}
+
static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int iommu_prot)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
- arm_lpae_iopte *ptep = data->pgd;
+ arm_lpae_iopte *ptep;
int ret, lvl = ARM_LPAE_START_LVL(data);
arm_lpae_iopte prot;
+ ptep = arm_lpae_get_table(data, iova);
+
/* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
return 0;
@@ -554,7 +586,7 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
- arm_lpae_iopte *ptep = data->pgd;
+ arm_lpae_iopte *ptep;
int lvl = ARM_LPAE_START_LVL(data);
arm_lpae_iopte prot;
struct scatterlist *s;
@@ -563,6 +595,8 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
unsigned int min_pagesz;
struct map_state ms;
+ ptep = arm_lpae_get_table(data, iova);
+
/* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
goto out_err;
@@ -672,7 +706,10 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
{
struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
- __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
+ __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd[0]);
+ if (data->pgd[1])
+ __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data),
+ data->pgd[1]);
kfree(data);
}
@@ -800,9 +837,11 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t unmapped = 0;
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable *iop = &data->iop;
- arm_lpae_iopte *ptep = data->pgd;
+ arm_lpae_iopte *ptep;
int lvl = ARM_LPAE_START_LVL(data);
+ ptep = arm_lpae_get_table(data, iova);
+
while (unmapped < size) {
size_t ret, size_to_unmap, remaining;
@@ -828,7 +867,10 @@ static int arm_lpae_iova_to_pte(struct arm_lpae_io_pgtable *data,
unsigned long iova, int *plvl_ret,
arm_lpae_iopte *ptep_ret)
{
- arm_lpae_iopte pte, *ptep = data->pgd;
+ arm_lpae_iopte pte, *ptep;
+
+ ptep = arm_lpae_get_table(data, iova);
+
*plvl_ret = ARM_LPAE_START_LVL(data);
*ptep_ret = 0;
@@ -994,6 +1036,71 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
return data;
}
+static u64 arm64_lpae_setup_ttbr1(struct io_pgtable_cfg *cfg,
+ struct arm_lpae_io_pgtable *data)
+
+{
+ u64 reg;
+
+ /* If TTBR1 is disabled, disable speculative walks through the TTBR1 */
+ if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)) {
+ reg = ARM_LPAE_TCR_EPD1;
+ reg |= (ARM_LPAE_TCR_SEP_UPSTREAM << ARM_LPAE_TCR_SEP_SHIFT);
+ return reg;
+ }
+
+ if (cfg->iommu_dev && cfg->iommu_dev->archdata.dma_coherent)
+ reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH1_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN1_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN1_SHIFT);
+ else
+ reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH1_SHIFT) |
+ (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN1_SHIFT) |
+ (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN1_SHIFT);
+
+ switch (1 << data->pg_shift) {
+ case SZ_4K:
+ reg |= (ARM_LPAE_TCR_TG1_4K << 30);
+ break;
+ case SZ_16K:
+ reg |= (ARM_LPAE_TCR_TG1_16K << 30);
+ break;
+ case SZ_64K:
+ reg |= (ARM_LPAE_TCR_TG1_64K << 30);
+ break;
+ }
+
+ /* Set T1SZ */
+ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T1SZ_SHIFT;
+
+ /* Set the SEP bit based on the size */
+ switch (cfg->ias) {
+ case 32:
+ reg |= (ARM_LPAE_TCR_SEP_31 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ case 36:
+ reg |= (ARM_LPAE_TCR_SEP_35 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ case 40:
+ reg |= (ARM_LPAE_TCR_SEP_39 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ case 42:
+ reg |= (ARM_LPAE_TCR_SEP_41 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ case 44:
+ reg |= (ARM_LPAE_TCR_SEP_43 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ case 48:
+ reg |= (ARM_LPAE_TCR_SEP_47 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ default:
+ reg |= (ARM_LPAE_TCR_SEP_UPSTREAM << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ }
+
+ return reg;
+}
+
static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
{
@@ -1050,8 +1157,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
- /* Disable speculative walks through TTBR1 */
- reg |= ARM_LPAE_TCR_EPD1;
+ /* Bring in the TTBR1 configuration */
+ reg |= arm64_lpae_setup_ttbr1(cfg, data);
+
cfg->arm_lpae_s1_cfg.tcr = reg;
/* MAIRs */
@@ -1066,16 +1174,33 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s1_cfg.mair[1] = 0;
/* Looking good; allocate a pgd */
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg, cookie);
- if (!data->pgd)
+ data->pgd[0] = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
+ cookie);
+ if (!data->pgd[0])
goto out_free_data;
+
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
+ data->pgd[1] = __arm_lpae_alloc_pages(data->pgd_size,
+ GFP_KERNEL, cfg, cookie);
+ if (!data->pgd[1]) {
+ __arm_lpae_free_pages(data->pgd[0], data->pgd_size, cfg,
+ cookie);
+ goto out_free_data;
+ }
+ } else {
+ data->pgd[1] = NULL;
+ }
+
/* Ensure the empty pgd is visible before any actual TTBR write */
wmb();
/* TTBRs */
- cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
- cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
+ cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd[0]);
+
+ if (data->pgd[1])
+ cfg->arm_lpae_s1_cfg.ttbr[1] = virt_to_phys(data->pgd[1]);
+
return &data->iop;
out_free_data:
@@ -1155,15 +1280,16 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s2_cfg.vtcr = reg;
/* Allocate pgd pages */
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg, cookie);
- if (!data->pgd)
+ data->pgd[0] = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
+ cookie);
+ if (!data->pgd[0])
goto out_free_data;
/* Ensure the empty pgd is visible before any actual TTBR write */
wmb();
/* VTTBR */
- cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
+ cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd[0]);
return &data->iop;
out_free_data:
@@ -1261,7 +1387,7 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
cfg->pgsize_bitmap, cfg->ias);
pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
data->levels, data->pgd_size, data->pg_shift,
- data->bits_per_level, data->pgd);
+ data->bits_per_level, data->pgd[0]);
}
#define __FAIL(ops, i) ({ \
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index a3f366f559a7..f4533040806f 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -62,6 +62,7 @@ struct io_pgtable_cfg {
*/
#define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */
#define IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT (1 << 1)
+ #define IO_PGTABLE_QUIRK_ARM_TTBR1 (1 << 2) /* Allocate TTBR1 PT */
int quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
index 1c0a10e2fbef..a0f1d5148c94 100644
--- a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
@@ -878,6 +878,8 @@ static int cam_smmu_detach_device(int idx)
static int cam_smmu_attach_sec_cpp(int idx)
{
+ int32_t rc = 0;
+
/*
* When switching to secure, detach CPP NS, do scm call
* with CPP SID and no need of attach again, because
@@ -889,8 +891,12 @@ static int cam_smmu_attach_sec_cpp(int idx)
return -EINVAL;
}
- msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_SECURE,
+ rc = msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_SECURE,
MSM_CAMERA_TZ_HW_BLOCK_CPP);
+ if (rc != 0) {
+ pr_err("fail to set secure mode for cpp, rc %d", rc);
+ return rc;
+ }
iommu_cb_set.cb_info[idx].state = CAM_SMMU_ATTACH;
@@ -899,8 +905,14 @@ static int cam_smmu_attach_sec_cpp(int idx)
static int cam_smmu_detach_sec_cpp(int idx)
{
- msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_NON_SECURE,
+ int32_t rc = 0;
+
+ rc = msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_NON_SECURE,
MSM_CAMERA_TZ_HW_BLOCK_CPP);
+ if (rc != 0) {
+ pr_err("fail to switch to non secure mode for cpp, rc %d", rc);
+ return rc;
+ }
iommu_cb_set.cb_info[idx].state = CAM_SMMU_DETACH;
@@ -917,6 +929,8 @@ static int cam_smmu_detach_sec_cpp(int idx)
static int cam_smmu_attach_sec_vfe_ns_stats(int idx)
{
+ int32_t rc = 0;
+
/*
*When switching to secure, for secure pixel and non-secure stats
*localizing scm/attach of non-secure SID's in attach secure
@@ -933,16 +947,26 @@ static int cam_smmu_attach_sec_vfe_ns_stats(int idx)
}
}
- msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_SECURE,
+ rc = msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_SECURE,
MSM_CAMERA_TZ_HW_BLOCK_ISP);
+ if (rc != 0) {
+ pr_err("fail to set secure mode for vfe, rc %d", rc);
+ return rc;
+ }
return 0;
}
static int cam_smmu_detach_sec_vfe_ns_stats(int idx)
{
- msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_NON_SECURE,
+ int32_t rc = 0;
+
+ rc = msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_NON_SECURE,
MSM_CAMERA_TZ_HW_BLOCK_ISP);
+ if (rc != 0) {
+ pr_err("fail to switch to non secure mode for vfe, rc %d", rc);
+ return rc;
+ }
/*
*While exiting from secure mode for secure pixel and non-secure stats,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index de29692414d2..f7eb0f8ac5a8 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1179,26 +1179,6 @@ int msm_isp_smmu_attach(struct msm_isp_buf_mgr *buf_mgr,
rc = msm_isp_buf_get_scratch(buf_mgr);
if (rc)
goto err2;
- } else {
- if (buf_mgr->attach_ref_cnt > 0)
- buf_mgr->attach_ref_cnt--;
- else
- pr_err("%s: Error! Invalid ref_cnt %d\n",
- __func__, buf_mgr->attach_ref_cnt);
-
- if (buf_mgr->attach_ref_cnt == 0) {
- rc = msm_isp_buf_put_scratch(buf_mgr);
- if (buf_mgr->secure_enable == SECURE_MODE)
- rc |= cam_smmu_ops(buf_mgr->iommu_hdl,
- CAM_SMMU_DETACH_SEC_VFE_NS_STATS);
- else
- rc |= cam_smmu_ops(buf_mgr->iommu_hdl,
- CAM_SMMU_DETACH);
- if (rc < 0) {
- pr_err("%s: img/stats smmu detach error, rc :%d\n",
- __func__, rc);
- }
- }
}
mutex_unlock(&buf_mgr->lock);
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index c6dc4b75e479..5cf5582b55ab 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -53,6 +53,8 @@ spinlock_t msm_eventq_lock;
static struct pid *msm_pid;
spinlock_t msm_pid_lock;
+static uint32_t gpu_limit;
+
/*
* It takes 20 bytes + NULL character to write the
* largest decimal value of an uint64_t
@@ -442,6 +444,14 @@ int msm_create_session(unsigned int session_id, struct video_device *vdev)
mutex_init(&session->lock);
mutex_init(&session->lock_q);
mutex_init(&session->close_lock);
+
+ if (gpu_limit) {
+ session->sysfs_pwr_limit = kgsl_pwr_limits_add(KGSL_DEVICE_3D0);
+ if (session->sysfs_pwr_limit)
+ kgsl_pwr_limits_set_freq(session->sysfs_pwr_limit,
+ gpu_limit);
+ }
+
return 0;
}
EXPORT_SYMBOL(msm_create_session);
@@ -607,6 +617,11 @@ int msm_destroy_session(unsigned int session_id)
if (!session)
return -EINVAL;
+ if (gpu_limit && session->sysfs_pwr_limit) {
+ kgsl_pwr_limits_set_default(session->sysfs_pwr_limit);
+ kgsl_pwr_limits_del(session->sysfs_pwr_limit);
+ }
+
msm_destroy_session_streams(session);
msm_remove_session_cmd_ack_q(session);
mutex_destroy(&session->lock);
@@ -1290,6 +1305,9 @@ static int msm_probe(struct platform_device *pdev)
goto v4l2_fail;
}
+ of_property_read_u32(pdev->dev.of_node,
+ "qcom,gpu-limit", &gpu_limit);
+
goto probe_end;
v4l2_fail:
diff --git a/drivers/media/platform/msm/camera_v2/msm.h b/drivers/media/platform/msm/camera_v2/msm.h
index 2b3576b8edd2..7474cb119147 100644
--- a/drivers/media/platform/msm/camera_v2/msm.h
+++ b/drivers/media/platform/msm/camera_v2/msm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
#include <linux/pm_qos.h>
#include <linux/msm_ion.h>
#include <linux/iommu.h>
+#include <linux/msm_kgsl.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
@@ -110,6 +111,7 @@ struct msm_session {
struct mutex lock;
struct mutex lock_q;
struct mutex close_lock;
+ struct kgsl_pwr_limit *sysfs_pwr_limit;
};
static inline bool msm_is_daemon_present(void)
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 7885149440f9..064c1e8c5bab 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -834,8 +834,10 @@ static int cpp_init_mem(struct cpp_device *cpp_dev)
else
rc = cam_smmu_get_handle("cpp", &iommu_hdl);
- if (rc < 0)
+ if (rc < 0) {
+ pr_err("smmu get handle failed\n");
return -ENODEV;
+ }
cpp_dev->iommu_hdl = iommu_hdl;
cam_smmu_reg_client_page_fault_handler(
@@ -1466,10 +1468,16 @@ static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
msm_cpp_clear_timer(cpp_dev);
cpp_release_hardware(cpp_dev);
if (cpp_dev->iommu_state == CPP_IOMMU_STATE_ATTACHED) {
- cpp_dev->iommu_state = CPP_IOMMU_STATE_DETACHED;
- rc = cam_smmu_ops(cpp_dev->iommu_hdl, CAM_SMMU_DETACH);
+ if (cpp_dev->security_mode == SECURE_MODE)
+ rc = cam_smmu_ops(cpp_dev->iommu_hdl,
+ CAM_SMMU_DETACH_SEC_CPP);
+ else
+ rc = cam_smmu_ops(cpp_dev->iommu_hdl,
+ CAM_SMMU_DETACH);
+
if (rc < 0)
pr_err("Error: Detach fail in release\n");
+ cpp_dev->iommu_state = CPP_IOMMU_STATE_DETACHED;
}
cam_smmu_destroy_handle(cpp_dev->iommu_hdl);
msm_cpp_empty_list(processing_q, list_frame);
@@ -3450,6 +3458,7 @@ STREAM_BUFF_END:
rc = msm_cpp_copy_from_ioctl_ptr(&cpp_attach_info,
ioctl_ptr);
if (rc < 0) {
+ pr_err("CPP_IOMMU_ATTACH copy from user fail");
ERR_COPY_FROM_USER();
return -EINVAL;
}
@@ -3487,6 +3496,7 @@ STREAM_BUFF_END:
rc = msm_cpp_copy_from_ioctl_ptr(&cpp_attach_info,
ioctl_ptr);
if (rc < 0) {
+ pr_err("CPP_IOMMU_DETTACH copy from user fail");
ERR_COPY_FROM_USER();
return -EINVAL;
}
@@ -3825,6 +3835,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
struct msm_cpp_frame_info32_t k32_frame_info;
struct msm_cpp_frame_info_t k64_frame_info;
uint32_t identity_k = 0;
+ bool is_copytouser_req = true;
void __user *up = (void __user *)arg;
if (sd == NULL) {
@@ -3959,9 +3970,8 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
break;
}
}
- if (copy_to_user(
- (void __user *)kp_ioctl.ioctl_ptr, &inst_info,
- sizeof(struct msm_cpp_frame_info32_t))) {
+ if (copy_to_user((void __user *)kp_ioctl.ioctl_ptr,
+ &inst_info, sizeof(struct msm_cpp_frame_info32_t))) {
mutex_unlock(&cpp_dev->mutex);
return -EFAULT;
}
@@ -3997,6 +4007,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
sizeof(struct msm_cpp_stream_buff_info_t);
}
}
+ is_copytouser_req = false;
if (cmd == VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO32)
cmd = VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO;
else if (cmd == VIDIOC_MSM_CPP_DELETE_STREAM_BUFF32)
@@ -4011,6 +4022,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
get_user(identity_k, identity_u);
kp_ioctl.ioctl_ptr = (void *)&identity_k;
kp_ioctl.len = sizeof(uint32_t);
+ is_copytouser_req = false;
cmd = VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO;
break;
}
@@ -4069,6 +4081,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
sizeof(struct msm_cpp_clock_settings_t);
}
}
+ is_copytouser_req = false;
cmd = VIDIOC_MSM_CPP_SET_CLOCK;
break;
}
@@ -4094,6 +4107,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
kp_ioctl.ioctl_ptr = (void *)&k_queue_buf;
kp_ioctl.len = sizeof(struct msm_pproc_queue_buf_info);
+ is_copytouser_req = false;
cmd = VIDIOC_MSM_CPP_QUEUE_BUF;
break;
}
@@ -4118,6 +4132,8 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
k64_frame_info.frame_id = k32_frame_info.frame_id;
kp_ioctl.ioctl_ptr = (void *)&k64_frame_info;
+
+ is_copytouser_req = false;
cmd = VIDIOC_MSM_CPP_POP_STREAM_BUFFER;
break;
}
@@ -4171,13 +4187,16 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
break;
}
- up32_ioctl.id = kp_ioctl.id;
- up32_ioctl.len = kp_ioctl.len;
- up32_ioctl.trans_code = kp_ioctl.trans_code;
- up32_ioctl.ioctl_ptr = ptr_to_compat(kp_ioctl.ioctl_ptr);
+ if (is_copytouser_req) {
+ up32_ioctl.id = kp_ioctl.id;
+ up32_ioctl.len = kp_ioctl.len;
+ up32_ioctl.trans_code = kp_ioctl.trans_code;
+ up32_ioctl.ioctl_ptr = ptr_to_compat(kp_ioctl.ioctl_ptr);
- if (copy_to_user((void __user *)up, &up32_ioctl, sizeof(up32_ioctl)))
- return -EFAULT;
+ if (copy_to_user((void __user *)up, &up32_ioctl,
+ sizeof(up32_ioctl)))
+ return -EFAULT;
+ }
return rc;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index 5cbdc03ced9d..9ba0b7d93616 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -122,6 +122,7 @@ struct sde_smmu_client {
struct sde_module_power mp;
struct reg_bus_client *reg_bus_clt;
bool domain_attached;
+ bool domain_reattach;
int domain;
};
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 89fde987d4c1..2c79ad7e45be 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -1599,6 +1599,7 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
u32 danger_lut = 0; /* applicable for realtime client only */
u32 safe_lut = 0; /* applicable for realtime client only */
u32 flags = 0;
+ u32 rststs = 0;
struct sde_rotation_item *item;
if (!hw || !entry) {
@@ -1616,10 +1617,46 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
return -EINVAL;
}
+ /*
+ * if Rotator HW is reset, but missing PM event notification, we
+ * need to init the SW timestamp automatically.
+ */
+ rststs = SDE_ROTREG_READ(rot->mdss_base, REGDMA_RESET_STATUS_REG);
+ if (!rot->reset_hw_ts && rststs) {
+ u32 l_ts, h_ts, swts;
+
+ swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
+ h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
+ l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
+ SDEROT_EVTLOG(0xbad0, rststs, swts, h_ts, l_ts);
+
+ if (ctx->q_id == ROT_QUEUE_HIGH_PRIORITY)
+ h_ts = (h_ts - 1) & SDE_REGDMA_SWTS_MASK;
+ else
+ l_ts = (l_ts - 1) & SDE_REGDMA_SWTS_MASK;
+
+ /* construct the combined timstamp */
+ swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
+ ((l_ts & SDE_REGDMA_SWTS_MASK) <<
+ SDE_REGDMA_SWTS_SHIFT);
+
+ SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
+ swts, h_ts, l_ts);
+ SDEROT_EVTLOG(0x900d, swts, h_ts, l_ts);
+ rot->last_hw_ts = swts;
+
+ SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
+ rot->last_hw_ts);
+ SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
+ /* ensure write is issued to the rotator HW */
+ wmb();
+ }
+
if (rot->reset_hw_ts) {
SDEROT_EVTLOG(rot->last_hw_ts);
SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
rot->last_hw_ts);
+ SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
/* ensure write is issued to the rotator HW */
wmb();
rot->reset_hw_ts = false;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
index fedade122b88..b721ec54229d 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
@@ -277,5 +277,6 @@
#define REGDMA_INT_LOW_MASK 0x00000700
#define REGDMA_INT_ERR_MASK 0x000F0000
#define REGDMA_TIMESTAMP_REG ROT_SSPP_TPG_PATTERN_GEN_INIT_VAL
+#define REGDMA_RESET_STATUS_REG ROT_SSPP_TPG_RGB_MAPPING
#endif /*_SDE_ROTATOR_R3_HWIO_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index 43636b43eeb8..58cb160f118e 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -45,6 +45,13 @@ struct sde_smmu_domain {
unsigned long size;
};
+#ifndef CONFIG_FB_MSM_MDSS
+int mdss_smmu_request_mappings(msm_smmu_handler_t callback)
+{
+ return 0;
+}
+#endif
+
int sde_smmu_set_dma_direction(int dir)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
@@ -247,6 +254,14 @@ int sde_smmu_attach(struct sde_rot_data_type *mdata)
goto err;
}
sde_smmu->domain_attached = true;
+ if (sde_smmu->domain_reattach) {
+ SDEROT_DBG(
+ "domain[%i] re-attach\n",
+ i);
+ /* remove extra vote */
+ sde_smmu_enable_power(sde_smmu, false);
+ sde_smmu->domain_reattach = false;
+ }
SDEROT_DBG("iommu v2 domain[%i] attached\n", i);
}
} else {
@@ -292,6 +307,12 @@ int sde_smmu_detach(struct sde_rot_data_type *mdata)
arm_iommu_detach_device(sde_smmu->dev);
SDEROT_DBG("iommu domain[%i] detached\n", i);
sde_smmu->domain_attached = false;
+
+ /*
+ * since we are leaving the clock vote, on
+ * re-attaching do not vote for clocks
+ */
+ sde_smmu->domain_reattach = true;
}
else {
sde_smmu_enable_power(sde_smmu, false);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 52e3e9b5b778..60b02f28a8ff 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -3482,15 +3482,23 @@ static void mmc_blk_cmdq_err(struct mmc_queue *mq)
/* RED error - Fatal: requires reset */
if (mrq->cmdq_req->resp_err) {
err = mrq->cmdq_req->resp_err;
+ goto reset;
+ }
+
+ /*
+ * TIMEOUT errrors can happen because of execution error
+ * in the last command. So send cmd 13 to get device status
+ */
+ if ((mrq->cmd && (mrq->cmd->error == -ETIMEDOUT)) ||
+ (mrq->data && (mrq->data->error == -ETIMEDOUT))) {
if (mmc_host_halt(host) || mmc_host_cq_disable(host)) {
ret = get_card_status(host->card, &status, 0);
if (ret)
pr_err("%s: CMD13 failed with err %d\n",
mmc_hostname(host), ret);
}
- pr_err("%s: Response error detected with device status 0x%08x\n",
+ pr_err("%s: Timeout error detected with device status 0x%08x\n",
mmc_hostname(host), status);
- goto reset;
}
/*
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 9f5c67e850d4..26e57f3c5228 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -3095,7 +3095,6 @@ int mmc_resume_bus(struct mmc_host *host)
pr_debug("%s: Starting deferred resume\n", mmc_hostname(host));
spin_lock_irqsave(&host->lock, flags);
host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
- host->rescan_disable = 0;
spin_unlock_irqrestore(&host->lock, flags);
mmc_bus_get(host);
@@ -4041,6 +4040,7 @@ EXPORT_SYMBOL(mmc_detect_card_removed);
void mmc_rescan(struct work_struct *work)
{
+ unsigned long flags;
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
@@ -4049,8 +4049,12 @@ void mmc_rescan(struct work_struct *work)
host->trigger_card_event = false;
}
- if (host->rescan_disable)
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->rescan_disable) {
+ spin_unlock_irqrestore(&host->lock, flags);
return;
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
/* If there is a non-removable card registered, only scan once */
if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
@@ -4297,10 +4301,6 @@ int mmc_pm_notify(struct notifier_block *notify_block,
case PM_SUSPEND_PREPARE:
case PM_RESTORE_PREPARE:
spin_lock_irqsave(&host->lock, flags);
- if (mmc_bus_needs_resume(host)) {
- spin_unlock_irqrestore(&host->lock, flags);
- break;
- }
host->rescan_disable = 1;
spin_unlock_irqrestore(&host->lock, flags);
cancel_delayed_work_sync(&host->detect);
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index a7546471e780..96d4fbf1a823 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -33,8 +33,8 @@
#define DCMD_SLOT 31
#define NUM_SLOTS 32
-/* 1 sec */
-#define HALT_TIMEOUT_MS 1000
+/* 10 sec */
+#define HALT_TIMEOUT_MS 10000
static int cmdq_halt_poll(struct mmc_host *mmc, bool halt);
static int cmdq_halt(struct mmc_host *mmc, bool halt);
@@ -197,6 +197,10 @@ static void cmdq_dump_adma_mem(struct cmdq_host *cq_host)
static void cmdq_dumpregs(struct cmdq_host *cq_host)
{
struct mmc_host *mmc = cq_host->mmc;
+ int offset = 0;
+
+ if (cq_host->offset_changed)
+ offset = CQ_V5_VENDOR_CFG;
MMC_TRACE(mmc,
"%s: 0x0C=0x%08x 0x10=0x%08x 0x14=0x%08x 0x18=0x%08x 0x28=0x%08x 0x2C=0x%08x 0x30=0x%08x 0x34=0x%08x 0x54=0x%08x 0x58=0x%08x 0x5C=0x%08x 0x48=0x%08x\n",
@@ -243,7 +247,7 @@ static void cmdq_dumpregs(struct cmdq_host *cq_host)
cmdq_readl(cq_host, CQCRI),
cmdq_readl(cq_host, CQCRA));
pr_err(DRV_NAME": Vendor cfg 0x%08x\n",
- cmdq_readl(cq_host, CQ_VENDOR_CFG));
+ cmdq_readl(cq_host, CQ_VENDOR_CFG + offset));
pr_err(DRV_NAME ": ===========================================\n");
cmdq_dump_task_history(cq_host);
@@ -384,6 +388,12 @@ static int cmdq_enable(struct mmc_host *mmc)
cq_host->caps |= CMDQ_CAP_CRYPTO_SUPPORT |
CMDQ_TASK_DESC_SZ_128;
cqcfg |= CQ_ICE_ENABLE;
+ /*
+ * For SDHC v5.0 onwards, ICE 3.0 specific registers are added
+ * in CQ register space, due to which few CQ registers are
+ * shifted. Set offset_changed boolean to use updated address.
+ */
+ cq_host->offset_changed = true;
}
cmdq_writel(cq_host, cqcfg, CQCFG);
@@ -818,14 +828,18 @@ static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
{
struct mmc_request *mrq;
struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ int offset = 0;
+ if (cq_host->offset_changed)
+ offset = CQ_V5_VENDOR_CFG;
mrq = get_req_by_tag(cq_host, tag);
if (tag == cq_host->dcmd_slot)
mrq->cmd->resp[0] = cmdq_readl(cq_host, CQCRDCT);
if (mrq->cmdq_req->cmdq_req_flags & DCMD)
- cmdq_writel(cq_host, cmdq_readl(cq_host, CQ_VENDOR_CFG) |
- CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG);
+ cmdq_writel(cq_host,
+ cmdq_readl(cq_host, CQ_VENDOR_CFG + offset) |
+ CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG + offset);
cmdq_runtime_pm_put(cq_host);
if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) &&
@@ -845,7 +859,6 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
u32 dbr_set = 0;
status = cmdq_readl(cq_host, CQIS);
- cmdq_writel(cq_host, status, CQIS);
if (!status && !err)
return IRQ_NONE;
@@ -868,6 +881,17 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
if (ret)
pr_err("%s: %s: halt failed ret=%d\n",
mmc_hostname(mmc), __func__, ret);
+
+ /*
+ * Clear the CQIS after halting incase of error. This is done
+ * because if CQIS is cleared before halting, the CQ will
+ * continue with issueing commands for rest of requests with
+ * Doorbell rung. This will overwrite the Resp Arg register.
+ * So CQ must be halted first and then CQIS cleared incase
+ * of error
+ */
+ cmdq_writel(cq_host, status, CQIS);
+
cmdq_dumpregs(cq_host);
if (!err_info) {
@@ -956,13 +980,16 @@ skip_cqterri:
mrq->cmdq_req->resp_err = true;
pr_err("%s: Response error (0x%08x) from card !!!",
- mmc_hostname(mmc), status);
+ mmc_hostname(mmc), cmdq_readl(cq_host, CQCRA));
+
} else {
mrq->cmdq_req->resp_idx = cmdq_readl(cq_host, CQCRI);
mrq->cmdq_req->resp_arg = cmdq_readl(cq_host, CQCRA);
}
cmdq_finish_data(mmc, tag);
+ } else {
+ cmdq_writel(cq_host, status, CQIS);
}
if (status & CQIS_TCC) {
@@ -995,6 +1022,9 @@ skip_cqterri:
if (status & CQIS_HAC) {
if (cq_host->ops->post_cqe_halt)
cq_host->ops->post_cqe_halt(mmc);
+ /* halt done: re-enable legacy interrupts */
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, false);
/* halt is completed, wakeup waiting thread */
complete(&cq_host->halt_comp);
}
@@ -1052,6 +1082,7 @@ static int cmdq_halt(struct mmc_host *mmc, bool halt)
{
struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
u32 ret = 0;
+ u32 config = 0;
int retries = 3;
cmdq_runtime_pm_get(cq_host);
@@ -1061,16 +1092,31 @@ static int cmdq_halt(struct mmc_host *mmc, bool halt)
CQCTL);
ret = wait_for_completion_timeout(&cq_host->halt_comp,
msecs_to_jiffies(HALT_TIMEOUT_MS));
- if (!ret && !(cmdq_readl(cq_host, CQCTL) & HALT)) {
- retries--;
- continue;
+ if (!ret) {
+ pr_warn("%s: %s: HAC int timeout\n",
+ mmc_hostname(mmc), __func__);
+ if ((cmdq_readl(cq_host, CQCTL) & HALT)) {
+ /*
+ * Don't retry if CQE is halted but irq
+ * is not triggered in timeout period.
+ * And since we are returning error,
+ * un-halt CQE. Since irq was not fired
+ * yet, no need to set other params
+ */
+ retries = 0;
+ config = cmdq_readl(cq_host, CQCTL);
+ config &= ~HALT;
+ cmdq_writel(cq_host, config, CQCTL);
+ } else {
+ pr_warn("%s: %s: retryng halt (%d)\n",
+ mmc_hostname(mmc), __func__,
+ retries);
+ retries--;
+ continue;
+ }
} else {
MMC_TRACE(mmc, "%s: halt done , retries: %d\n",
__func__, retries);
- /* halt done: re-enable legacy interrupts */
- if (cq_host->ops->clear_set_irqs)
- cq_host->ops->clear_set_irqs(mmc,
- false);
break;
}
}
diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h
index 05c924ae0935..6c10ab3859d1 100644
--- a/drivers/mmc/host/cmdq_hci.h
+++ b/drivers/mmc/host/cmdq_hci.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -143,6 +143,11 @@
#define DAT_ADDR_LO(x) ((x & 0xFFFFFFFF) << 32)
#define DAT_ADDR_HI(x) ((x & 0xFFFFFFFF) << 0)
+/*
+ * Add new macro for updated CQ vendor specific
+ * register address for SDHC v5.0 onwards.
+ */
+#define CQ_V5_VENDOR_CFG 0x900
#define CQ_VENDOR_CFG 0x100
#define CMDQ_SEND_STATUS_TRIGGER (1 << 31)
@@ -175,6 +180,7 @@ struct cmdq_host {
bool enabled;
bool halted;
bool init_done;
+ bool offset_changed;
u8 *desc_base;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 127a2602aa81..15d1eface2d4 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -310,6 +310,9 @@ void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
writel_relaxed(val, base_addr + offset);
}
+/* Timeout value to avoid infinite waiting for pwr_irq */
+#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+
static const u32 tuning_block_64[] = {
0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
@@ -2779,8 +2782,10 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
*/
if (done)
init_completion(&msm_host->pwr_irq_completion);
- else
- wait_for_completion(&msm_host->pwr_irq_completion);
+ else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
+ msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
+ __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
+ mmc_hostname(host->mmc), req_type);
pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
__func__, req_type);
@@ -3284,6 +3289,8 @@ static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
/* registers offset changed starting from 4.2.0 */
int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
+ if (cq_host->offset_changed)
+ offset += CQ_V5_VENDOR_CFG;
pr_err("---- Debug RAM dump ----\n");
pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
diff --git a/drivers/net/ethernet/msm/msm_rmnet_mhi.c b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
index a55a26be4273..50d8e72a96c8 100644
--- a/drivers/net/ethernet/msm/msm_rmnet_mhi.c
+++ b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
@@ -625,6 +625,9 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
tx_ring_full_count[rmnet_mhi_ptr->dev_index]++;
netif_stop_queue(dev);
rmnet_log(MSG_VERBOSE, "Stopping Queue\n");
+ write_unlock_irqrestore(
+ &rmnet_mhi_ptr->out_chan_full_lock,
+ flags);
goto rmnet_mhi_xmit_error_cleanup;
} else {
retry = 1;
@@ -652,7 +655,6 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
rmnet_mhi_xmit_error_cleanup:
rmnet_log(MSG_VERBOSE, "Ring full\n");
- write_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock, flags);
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index fa76ca128e1b..e5bb870b5461 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2390,8 +2390,6 @@ ppp_unregister_channel(struct ppp_channel *chan)
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
- put_net(pch->chan_net);
- pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index e5213de8a686..f1ead7c28823 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -90,6 +90,20 @@ static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
return ar->bus_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
}
+static inline void ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ar->bus_write32(ar, shadow_sr_wr_ind_addr(ar, ce_ctrl_addr), n);
+}
+
+static inline void ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ar->bus_write32(ar, shadow_dst_wr_ind_addr(ar, ce_ctrl_addr), n);
+}
+
static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int addr)
@@ -259,6 +273,72 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
ar->bus_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
}
+u32 shadow_sr_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr)
+{
+ u32 addr = 0;
+ u32 ce = COPY_ENGINE_ID(ctrl_addr);
+
+ switch (ce) {
+ case 0:
+ addr = SHADOW_VALUE0;
+ break;
+ case 3:
+ addr = SHADOW_VALUE3;
+ break;
+ case 4:
+ addr = SHADOW_VALUE4;
+ break;
+ case 5:
+ addr = SHADOW_VALUE5;
+ break;
+ case 7:
+ addr = SHADOW_VALUE7;
+ break;
+ default:
+ ath10k_err(ar, "invalid CE ctrl_addr (CE=%d)", ce);
+ WARN_ON(1);
+ }
+ return addr;
+}
+
+u32 shadow_dst_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr)
+{
+ u32 addr = 0;
+ u32 ce = COPY_ENGINE_ID(ctrl_addr);
+
+ switch (ce) {
+ case 1:
+ addr = SHADOW_VALUE13;
+ break;
+ case 2:
+ addr = SHADOW_VALUE14;
+ break;
+ case 5:
+ addr = SHADOW_VALUE17;
+ break;
+ case 7:
+ addr = SHADOW_VALUE19;
+ break;
+ case 8:
+ addr = SHADOW_VALUE20;
+ break;
+ case 9:
+ addr = SHADOW_VALUE21;
+ break;
+ case 10:
+ addr = SHADOW_VALUE22;
+ break;
+ case 11:
+ addr = SHADOW_VALUE23;
+ break;
+ default:
+ ath10k_err(ar, "invalid CE ctrl_addr (CE=%d)", ce);
+ WARN_ON(1);
+ }
+
+ return addr;
+}
+
/*
* Guts of ath10k_ce_send, used by both ath10k_ce_send and
* ath10k_ce_sendlist_send.
@@ -325,8 +405,14 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
/* WORKAROUND */
- if (!(flags & CE_SEND_FLAG_GATHER))
- ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+ if (!(flags & CE_SEND_FLAG_GATHER)) {
+ if (QCA_REV_WCN3990(ar))
+ ath10k_ce_shadow_src_ring_write_index_set(ar, ctrl_addr,
+ write_index);
+ else
+ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
+ write_index);
+ }
src_ring->write_index = write_index;
exit:
@@ -957,6 +1043,24 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
src_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
+ src_ring->shadow_base_unaligned = kzalloc(
+ nentries * sizeof(struct ce_desc),
+ GFP_KERNEL);
+
+ if (!src_ring->shadow_base_unaligned) {
+ dma_free_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space_unaligned,
+ base_addr);
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ src_ring->shadow_base = (struct ce_desc *)PTR_ALIGN(
+ src_ring->shadow_base_unaligned,
+ CE_DESC_RING_ALIGN);
+
return src_ring;
}
@@ -1135,6 +1239,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
((struct ath10k_ce_pipe *)ar->ce_states + ce_id);
if (ce_state->src_ring) {
+ kfree(ce_state->src_ring->shadow_base_unaligned);
dma_free_coherent(ar->dev,
(ce_state->src_ring->nentries *
sizeof(struct ce_desc) +
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 1b49db14d387..160a13e681df 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -120,6 +120,9 @@ struct ath10k_ce_ring {
/* CE address space */
u32 base_addr_ce_space;
+ char *shadow_base_unaligned;
+ struct ce_desc *shadow_base;
+
/* keep last */
void *per_transfer_context[0];
};
@@ -143,6 +146,61 @@ struct ath10k_ce_pipe {
/* Copy Engine settable attributes */
struct ce_attr;
+#define SHADOW_VALUE0 (ar->shadow_reg_value->shadow_reg_value_0)
+#define SHADOW_VALUE1 (ar->shadow_reg_value->shadow_reg_value_1)
+#define SHADOW_VALUE2 (ar->shadow_reg_value->shadow_reg_value_2)
+#define SHADOW_VALUE3 (ar->shadow_reg_value->shadow_reg_value_3)
+#define SHADOW_VALUE4 (ar->shadow_reg_value->shadow_reg_value_4)
+#define SHADOW_VALUE5 (ar->shadow_reg_value->shadow_reg_value_5)
+#define SHADOW_VALUE6 (ar->shadow_reg_value->shadow_reg_value_6)
+#define SHADOW_VALUE7 (ar->shadow_reg_value->shadow_reg_value_7)
+#define SHADOW_VALUE8 (ar->shadow_reg_value->shadow_reg_value_8)
+#define SHADOW_VALUE9 (ar->shadow_reg_value->shadow_reg_value_9)
+#define SHADOW_VALUE10 (ar->shadow_reg_value->shadow_reg_value_10)
+#define SHADOW_VALUE11 (ar->shadow_reg_value->shadow_reg_value_11)
+#define SHADOW_VALUE12 (ar->shadow_reg_value->shadow_reg_value_12)
+#define SHADOW_VALUE13 (ar->shadow_reg_value->shadow_reg_value_13)
+#define SHADOW_VALUE14 (ar->shadow_reg_value->shadow_reg_value_14)
+#define SHADOW_VALUE15 (ar->shadow_reg_value->shadow_reg_value_15)
+#define SHADOW_VALUE16 (ar->shadow_reg_value->shadow_reg_value_16)
+#define SHADOW_VALUE17 (ar->shadow_reg_value->shadow_reg_value_17)
+#define SHADOW_VALUE18 (ar->shadow_reg_value->shadow_reg_value_18)
+#define SHADOW_VALUE19 (ar->shadow_reg_value->shadow_reg_value_19)
+#define SHADOW_VALUE20 (ar->shadow_reg_value->shadow_reg_value_20)
+#define SHADOW_VALUE21 (ar->shadow_reg_value->shadow_reg_value_21)
+#define SHADOW_VALUE22 (ar->shadow_reg_value->shadow_reg_value_22)
+#define SHADOW_VALUE23 (ar->shadow_reg_value->shadow_reg_value_23)
+#define SHADOW_ADDRESS0 (ar->shadow_reg_address->shadow_reg_address_0)
+#define SHADOW_ADDRESS1 (ar->shadow_reg_address->shadow_reg_address_1)
+#define SHADOW_ADDRESS2 (ar->shadow_reg_address->shadow_reg_address_2)
+#define SHADOW_ADDRESS3 (ar->shadow_reg_address->shadow_reg_address_3)
+#define SHADOW_ADDRESS4 (ar->shadow_reg_address->shadow_reg_address_4)
+#define SHADOW_ADDRESS5 (ar->shadow_reg_address->shadow_reg_address_5)
+#define SHADOW_ADDRESS6 (ar->shadow_reg_address->shadow_reg_address_6)
+#define SHADOW_ADDRESS7 (ar->shadow_reg_address->shadow_reg_address_7)
+#define SHADOW_ADDRESS8 (ar->shadow_reg_address->shadow_reg_address_8)
+#define SHADOW_ADDRESS9 (ar->shadow_reg_address->shadow_reg_address_9)
+#define SHADOW_ADDRESS10 (ar->shadow_reg_address->shadow_reg_address_10)
+#define SHADOW_ADDRESS11 (ar->shadow_reg_address->shadow_reg_address_11)
+#define SHADOW_ADDRESS12 (ar->shadow_reg_address->shadow_reg_address_12)
+#define SHADOW_ADDRESS13 (ar->shadow_reg_address->shadow_reg_address_13)
+#define SHADOW_ADDRESS14 (ar->shadow_reg_address->shadow_reg_address_14)
+#define SHADOW_ADDRESS15 (ar->shadow_reg_address->shadow_reg_address_15)
+#define SHADOW_ADDRESS16 (ar->shadow_reg_address->shadow_reg_address_16)
+#define SHADOW_ADDRESS17 (ar->shadow_reg_address->shadow_reg_address_17)
+#define SHADOW_ADDRESS18 (ar->shadow_reg_address->shadow_reg_address_18)
+#define SHADOW_ADDRESS19 (ar->shadow_reg_address->shadow_reg_address_19)
+#define SHADOW_ADDRESS20 (ar->shadow_reg_address->shadow_reg_address_20)
+#define SHADOW_ADDRESS21 (ar->shadow_reg_address->shadow_reg_address_21)
+#define SHADOW_ADDRESS22 (ar->shadow_reg_address->shadow_reg_address_22)
+#define SHADOW_ADDRESS23 (ar->shadow_reg_address->shadow_reg_address_23)
+
+#define SHADOW_ADDRESS(i) (SHADOW_ADDRESS0 + \
+ i * (SHADOW_ADDRESS1 - SHADOW_ADDRESS0))
+
+u32 shadow_sr_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr);
+u32 shadow_dst_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr);
+
/*==================Send====================*/
/* ath10k_ce_send flags */
@@ -591,6 +649,9 @@ struct ce_attr {
& (uint64_t)(0xF00000000)) >> 32))
#endif
+#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) ((COPY_ENGINE_BASE_ADDRESS \
+ - CE0_BASE_ADDRESS) / (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS))
+
static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
{
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 0803a963da3c..052ebd7dd26b 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -2350,6 +2350,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
/* WCN3990 chip set is non bmi based */
ar->is_bmi = false;
ar->fw_flags = &wcn3990_fw_flags;
+ ar->shadow_reg_value = &wcn3990_shadow_reg_value;
+ ar->shadow_reg_address = &wcn3990_shadow_reg_address;
break;
default:
ath10k_err(ar, "unsupported core hardware revision %d\n",
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index f2f0338696e0..bb2c5fb9a125 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -741,6 +741,8 @@ struct ath10k {
const struct ath10k_hw_regs *regs;
const struct ath10k_hw_values *hw_values;
+ struct ath10k_shadow_reg_value *shadow_reg_value;
+ struct ath10k_shadow_reg_address *shadow_reg_address;
struct ath10k_bmi bmi;
struct ath10k_wmi wmi;
struct ath10k_htc htc;
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 0cd1068b0beb..1a8f3a388ce2 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -220,6 +220,60 @@ struct fw_flag wcn3990_fw_flags = {
.flags = 0x82E,
};
+struct ath10k_shadow_reg_value wcn3990_shadow_reg_value = {
+ .shadow_reg_value_0 = 0x00032000,
+ .shadow_reg_value_1 = 0x00032004,
+ .shadow_reg_value_2 = 0x00032008,
+ .shadow_reg_value_3 = 0x0003200C,
+ .shadow_reg_value_4 = 0x00032010,
+ .shadow_reg_value_5 = 0x00032014,
+ .shadow_reg_value_6 = 0x00032018,
+ .shadow_reg_value_7 = 0x0003201C,
+ .shadow_reg_value_8 = 0x00032020,
+ .shadow_reg_value_9 = 0x00032024,
+ .shadow_reg_value_10 = 0x00032028,
+ .shadow_reg_value_11 = 0x0003202C,
+ .shadow_reg_value_12 = 0x00032030,
+ .shadow_reg_value_13 = 0x00032034,
+ .shadow_reg_value_14 = 0x00032038,
+ .shadow_reg_value_15 = 0x0003203C,
+ .shadow_reg_value_16 = 0x00032040,
+ .shadow_reg_value_17 = 0x00032044,
+ .shadow_reg_value_18 = 0x00032048,
+ .shadow_reg_value_19 = 0x0003204C,
+ .shadow_reg_value_20 = 0x00032050,
+ .shadow_reg_value_21 = 0x00032054,
+ .shadow_reg_value_22 = 0x00032058,
+ .shadow_reg_value_23 = 0x0003205C
+};
+
+struct ath10k_shadow_reg_address wcn3990_shadow_reg_address = {
+ .shadow_reg_address_0 = 0x00030020,
+ .shadow_reg_address_1 = 0x00030024,
+ .shadow_reg_address_2 = 0x00030028,
+ .shadow_reg_address_3 = 0x0003002C,
+ .shadow_reg_address_4 = 0x00030030,
+ .shadow_reg_address_5 = 0x00030034,
+ .shadow_reg_address_6 = 0x00030038,
+ .shadow_reg_address_7 = 0x0003003C,
+ .shadow_reg_address_8 = 0x00030040,
+ .shadow_reg_address_9 = 0x00030044,
+ .shadow_reg_address_10 = 0x00030048,
+ .shadow_reg_address_11 = 0x0003004C,
+ .shadow_reg_address_12 = 0x00030050,
+ .shadow_reg_address_13 = 0x00030054,
+ .shadow_reg_address_14 = 0x00030058,
+ .shadow_reg_address_15 = 0x0003005C,
+ .shadow_reg_address_16 = 0x00030060,
+ .shadow_reg_address_17 = 0x00030064,
+ .shadow_reg_address_18 = 0x00030068,
+ .shadow_reg_address_19 = 0x0003006C,
+ .shadow_reg_address_20 = 0x00030070,
+ .shadow_reg_address_21 = 0x00030074,
+ .shadow_reg_address_22 = 0x00030078,
+ .shadow_reg_address_23 = 0x0003007C
+};
+
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
{
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index b59bde40714c..ce87f8112928 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -835,4 +835,61 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
+struct ath10k_shadow_reg_value {
+ u32 shadow_reg_value_0;
+ u32 shadow_reg_value_1;
+ u32 shadow_reg_value_2;
+ u32 shadow_reg_value_3;
+ u32 shadow_reg_value_4;
+ u32 shadow_reg_value_5;
+ u32 shadow_reg_value_6;
+ u32 shadow_reg_value_7;
+ u32 shadow_reg_value_8;
+ u32 shadow_reg_value_9;
+ u32 shadow_reg_value_10;
+ u32 shadow_reg_value_11;
+ u32 shadow_reg_value_12;
+ u32 shadow_reg_value_13;
+ u32 shadow_reg_value_14;
+ u32 shadow_reg_value_15;
+ u32 shadow_reg_value_16;
+ u32 shadow_reg_value_17;
+ u32 shadow_reg_value_18;
+ u32 shadow_reg_value_19;
+ u32 shadow_reg_value_20;
+ u32 shadow_reg_value_21;
+ u32 shadow_reg_value_22;
+ u32 shadow_reg_value_23;
+};
+
+struct ath10k_shadow_reg_address {
+ u32 shadow_reg_address_0;
+ u32 shadow_reg_address_1;
+ u32 shadow_reg_address_2;
+ u32 shadow_reg_address_3;
+ u32 shadow_reg_address_4;
+ u32 shadow_reg_address_5;
+ u32 shadow_reg_address_6;
+ u32 shadow_reg_address_7;
+ u32 shadow_reg_address_8;
+ u32 shadow_reg_address_9;
+ u32 shadow_reg_address_10;
+ u32 shadow_reg_address_11;
+ u32 shadow_reg_address_12;
+ u32 shadow_reg_address_13;
+ u32 shadow_reg_address_14;
+ u32 shadow_reg_address_15;
+ u32 shadow_reg_address_16;
+ u32 shadow_reg_address_17;
+ u32 shadow_reg_address_18;
+ u32 shadow_reg_address_19;
+ u32 shadow_reg_address_20;
+ u32 shadow_reg_address_21;
+ u32 shadow_reg_address_22;
+ u32 shadow_reg_address_23;
+};
+
+extern struct ath10k_shadow_reg_value wcn3990_shadow_reg_value;
+extern struct ath10k_shadow_reg_address wcn3990_shadow_reg_address;
+
#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index be8ec97574f0..6c8797d5e5fc 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -395,10 +395,23 @@ static struct service_to_pipe target_service_to_ce_map_wlan[] = {
},
};
-#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
-#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
-
-static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = { };
+#define WCN3990_SRC_WR_INDEX_OFFSET 0x3C
+#define WCN3990_DST_WR_INDEX_OFFSET 0x40
+
+static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
+ { 0, WCN3990_SRC_WR_INDEX_OFFSET},
+ { 3, WCN3990_SRC_WR_INDEX_OFFSET},
+ { 4, WCN3990_SRC_WR_INDEX_OFFSET},
+ { 5, WCN3990_SRC_WR_INDEX_OFFSET},
+ { 7, WCN3990_SRC_WR_INDEX_OFFSET},
+ { 1, WCN3990_DST_WR_INDEX_OFFSET},
+ { 2, WCN3990_DST_WR_INDEX_OFFSET},
+ { 7, WCN3990_DST_WR_INDEX_OFFSET},
+ { 8, WCN3990_DST_WR_INDEX_OFFSET},
+ { 9, WCN3990_DST_WR_INDEX_OFFSET},
+ { 10, WCN3990_DST_WR_INDEX_OFFSET},
+ { 11, WCN3990_DST_WR_INDEX_OFFSET},
+};
void ath10k_snoc_write32(void *ar, u32 offset, u32 value)
{
@@ -1048,7 +1061,8 @@ static int ath10k_snoc_wlan_enable(struct ath10k *ar)
sizeof(struct ce_svc_pipe_cfg);
cfg.ce_svc_cfg = (struct ce_svc_pipe_cfg *)
&target_service_to_ce_map_wlan;
- cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map);
+ cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
+ sizeof(struct icnss_shadow_reg_cfg);
cfg.shadow_reg_cfg = (struct icnss_shadow_reg_cfg *)
&target_shadow_reg_cfg_map;
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index a0f76581e6eb..4874c5ba1e61 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -5,6 +5,7 @@ wil6210-y += netdev.o
wil6210-y += cfg80211.o
wil6210-y += pcie_bus.o
wil6210-y += debugfs.o
+wil6210-y += sysfs.o
wil6210-y += wmi.o
wil6210-y += interrupt.o
wil6210-y += txrx.o
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index ca06394c48bb..210ea654c83f 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -22,7 +22,7 @@
#define WIL_MAX_ROC_DURATION_MS 5000
bool disable_ap_sme;
-module_param(disable_ap_sme, bool, S_IRUGO);
+module_param(disable_ap_sme, bool, 0444);
MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
#define CHAN60G(_channel, _flags) { \
@@ -290,7 +290,7 @@ static int wil_cfg80211_get_station(struct wiphy *wiphy,
int cid = wil_find_cid(wil, mac);
- wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+ wil_dbg_misc(wil, "get_station: %pM CID %d\n", mac, cid);
if (cid < 0)
return cid;
@@ -329,7 +329,7 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy,
return -ENOENT;
ether_addr_copy(mac, wil->sta[cid].addr);
- wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+ wil_dbg_misc(wil, "dump_station: %pM CID %d\n", mac, cid);
rc = wil_cid_fill_sinfo(wil, cid, sinfo);
@@ -346,16 +346,15 @@ wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name,
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *p2p_wdev;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "add_iface\n");
if (type != NL80211_IFTYPE_P2P_DEVICE) {
- wil_err(wil, "%s: unsupported iftype %d\n", __func__, type);
+ wil_err(wil, "unsupported iftype %d\n", type);
return ERR_PTR(-EINVAL);
}
if (wil->p2p_wdev) {
- wil_err(wil, "%s: P2P_DEVICE interface already created\n",
- __func__);
+ wil_err(wil, "P2P_DEVICE interface already created\n");
return ERR_PTR(-EINVAL);
}
@@ -378,11 +377,10 @@ static int wil_cfg80211_del_iface(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "del_iface\n");
if (wdev != wil->p2p_wdev) {
- wil_err(wil, "%s: delete of incorrect interface 0x%p\n",
- __func__, wdev);
+ wil_err(wil, "delete of incorrect interface 0x%p\n", wdev);
return -EINVAL;
}
@@ -400,7 +398,7 @@ static int wil_cfg80211_change_iface(struct wiphy *wiphy,
struct wireless_dev *wdev = wil_to_wdev(wil);
int rc;
- wil_dbg_misc(wil, "%s() type=%d\n", __func__, type);
+ wil_dbg_misc(wil, "change_iface: type=%d\n", type);
if (netif_running(wil_to_ndev(wil)) && !wil_is_recovery_blocked(wil)) {
wil_dbg_misc(wil, "interface is up. resetting...\n");
@@ -447,8 +445,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
uint i, n;
int rc;
- wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
- __func__, wdev, wdev->iftype);
+ wil_dbg_misc(wil, "scan: wdev=0x%p iftype=%d\n", wdev, wdev->iftype);
/* check we are client side */
switch (wdev->iftype) {
@@ -653,7 +650,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
int rc = 0;
enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "connect\n");
wil_print_connect_params(wil, sme);
if (test_bit(wil_status_fwconnecting, wil->status) ||
@@ -689,6 +686,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
goto out;
}
wil->privacy = sme->privacy;
+ wil->pbss = sme->pbss;
if (wil->privacy) {
/* For secure assoc, remove old keys */
@@ -786,12 +784,11 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code);
+ wil_dbg_misc(wil, "disconnect: reason=%d\n", reason_code);
if (!(test_bit(wil_status_fwconnecting, wil->status) ||
test_bit(wil_status_fwconnected, wil->status))) {
- wil_err(wil, "%s: Disconnect was called while disconnected\n",
- __func__);
+ wil_err(wil, "Disconnect was called while disconnected\n");
return 0;
}
@@ -799,7 +796,7 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
WMI_DISCONNECT_EVENTID, NULL, 0,
WIL6210_DISCONNECT_TO_MS);
if (rc)
- wil_err(wil, "%s: disconnect error %d\n", __func__, rc);
+ wil_err(wil, "disconnect error %d\n", rc);
return rc;
}
@@ -847,7 +844,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
* different from currently "listened" channel and fail if it is.
*/
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "mgmt_tx\n");
print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len);
cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
@@ -908,7 +905,7 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
break;
}
}
- wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]);
+ wil_dbg_misc(wil, "detect_key_usage: -> %s\n", key_usage_str[rc]);
return rc;
}
@@ -1013,13 +1010,13 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
return -EINVAL;
}
- wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
+ wil_dbg_misc(wil, "add_key: %pM %s[%d] PN %*phN\n",
mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
if (IS_ERR(cs)) {
- wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
- __func__, mac_addr, key_usage_str[key_usage], key_index,
+ wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n",
+ mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
return -EINVAL;
}
@@ -1028,8 +1025,8 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
wil_err(wil,
- "Wrong PN len %d, %s(%pM %s[%d] PN %*phN)\n",
- params->seq_len, __func__, mac_addr,
+ "Wrong PN len %d, %pM %s[%d] PN %*phN\n",
+ params->seq_len, mac_addr,
key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
return -EINVAL;
@@ -1053,11 +1050,11 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
mac_addr);
- wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
+ wil_dbg_misc(wil, "del_key: %pM %s[%d]\n", mac_addr,
key_usage_str[key_usage], key_index);
if (IS_ERR(cs))
- wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
+ wil_info(wil, "Not connected, %pM %s[%d]\n",
mac_addr, key_usage_str[key_usage], key_index);
if (!IS_ERR_OR_NULL(cs))
@@ -1074,7 +1071,7 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s: entered\n", __func__);
+ wil_dbg_misc(wil, "set_default_key: entered\n");
return 0;
}
@@ -1087,8 +1084,9 @@ static int wil_remain_on_channel(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
- wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d iftype=%d\n",
- __func__, chan->center_freq, duration, wdev->iftype);
+ wil_dbg_misc(wil,
+ "remain_on_channel: center_freq=%d, duration=%d iftype=%d\n",
+ chan->center_freq, duration, wdev->iftype);
rc = wil_p2p_listen(wil, wdev, duration, chan, cookie);
return rc;
@@ -1100,7 +1098,7 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "cancel_remain_on_channel\n");
return wil_p2p_cancel_listen(wil, cookie);
}
@@ -1256,9 +1254,9 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
if (pbss)
wmi_nettype = WMI_NETTYPE_P2P;
- wil_dbg_misc(wil, "%s: is_go=%d\n", __func__, is_go);
+ wil_dbg_misc(wil, "start_ap: is_go=%d\n", is_go);
if (is_go && !pbss) {
- wil_err(wil, "%s: P2P GO must be in PBSS\n", __func__);
+ wil_err(wil, "P2P GO must be in PBSS\n");
return -ENOTSUPP;
}
@@ -1315,7 +1313,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
int rc;
u32 privacy = 0;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "change_beacon\n");
wil_print_bcon_data(bcon);
if (bcon->tail &&
@@ -1354,7 +1352,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct cfg80211_crypto_settings *crypto = &info->crypto;
u8 hidden_ssid;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "start_ap\n");
if (!channel) {
wil_err(wil, "AP: No channel???\n");
@@ -1405,7 +1403,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "stop_ap\n");
netif_carrier_off(ndev);
wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
@@ -1450,7 +1448,7 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac,
+ wil_dbg_misc(wil, "del_station: %pM, reason=%d\n", params->mac,
params->reason_code);
mutex_lock(&wil->mutex);
@@ -1555,7 +1553,7 @@ void wil_probe_client_flush(struct wil6210_priv *wil)
{
struct wil_probe_client_req *req, *t;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "probe_client_flush\n");
mutex_lock(&wil->probe_client_mutex);
@@ -1575,7 +1573,7 @@ static int wil_cfg80211_probe_client(struct wiphy *wiphy,
struct wil_probe_client_req *req;
int cid = wil_find_cid(wil, peer);
- wil_dbg_misc(wil, "%s(%pM => CID %d)\n", __func__, peer, cid);
+ wil_dbg_misc(wil, "probe_client: %pM => CID %d\n", peer, cid);
if (cid < 0)
return -ENOLINK;
@@ -1603,7 +1601,7 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
if (params->ap_isolate >= 0) {
- wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__,
+ wil_dbg_misc(wil, "change_bss: ap_isolate %d => %d\n",
wil->ap_isolate, params->ap_isolate);
wil->ap_isolate = params->ap_isolate;
}
@@ -1616,7 +1614,7 @@ static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s: entered\n", __func__);
+ wil_dbg_misc(wil, "start_p2p_device: entered\n");
wil->p2p.p2p_dev_started = 1;
return 0;
}
@@ -1630,7 +1628,7 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
if (!p2p->p2p_dev_started)
return;
- wil_dbg_misc(wil, "%s: entered\n", __func__);
+ wil_dbg_misc(wil, "stop_p2p_device: entered\n");
mutex_lock(&wil->mutex);
mutex_lock(&wil->p2p_wdev_mutex);
wil_p2p_stop_radio_operations(wil);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 865bb11e2a07..dc22a29349cb 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -364,13 +364,13 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
}
static const struct dbg_off isr_off[] = {
- {"ICC", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICC), doff_io32},
- {"ICR", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICR), doff_io32},
- {"ICM", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICM), doff_io32},
- {"ICS", S_IWUSR, offsetof(struct RGF_ICR, ICS), doff_io32},
- {"IMV", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, IMV), doff_io32},
- {"IMS", S_IWUSR, offsetof(struct RGF_ICR, IMS), doff_io32},
- {"IMC", S_IWUSR, offsetof(struct RGF_ICR, IMC), doff_io32},
+ {"ICC", 0644, offsetof(struct RGF_ICR, ICC), doff_io32},
+ {"ICR", 0644, offsetof(struct RGF_ICR, ICR), doff_io32},
+ {"ICM", 0644, offsetof(struct RGF_ICR, ICM), doff_io32},
+ {"ICS", 0244, offsetof(struct RGF_ICR, ICS), doff_io32},
+ {"IMV", 0644, offsetof(struct RGF_ICR, IMV), doff_io32},
+ {"IMS", 0244, offsetof(struct RGF_ICR, IMS), doff_io32},
+ {"IMC", 0244, offsetof(struct RGF_ICR, IMC), doff_io32},
{},
};
@@ -390,9 +390,9 @@ static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
}
static const struct dbg_off pseudo_isr_off[] = {
- {"CAUSE", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32},
- {"MASK_SW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32},
- {"MASK_FW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32},
+ {"CAUSE", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32},
+ {"MASK_SW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32},
+ {"MASK_FW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32},
{},
};
@@ -411,40 +411,40 @@ static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
}
static const struct dbg_off lgc_itr_cnt_off[] = {
- {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32},
- {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32},
- {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32},
+ {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32},
+ {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32},
+ {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32},
{},
};
static const struct dbg_off tx_itr_cnt_off[] = {
- {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH),
+ {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH),
doff_io32},
- {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA),
+ {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA),
doff_io32},
- {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL),
+ {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL),
doff_io32},
- {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH),
+ {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH),
doff_io32},
- {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA),
+ {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA),
doff_io32},
- {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL),
+ {"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL),
doff_io32},
{},
};
static const struct dbg_off rx_itr_cnt_off[] = {
- {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH),
+ {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH),
doff_io32},
- {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA),
+ {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA),
doff_io32},
- {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL),
+ {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL),
doff_io32},
- {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH),
+ {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH),
doff_io32},
- {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA),
+ {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA),
doff_io32},
- {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL),
+ {"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL),
doff_io32},
{},
};
@@ -819,7 +819,7 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
rc = wil_cfg80211_mgmt_tx(wiphy, wdev, &params, NULL);
kfree(frame);
- wil_info(wil, "%s() -> %d\n", __func__, rc);
+ wil_info(wil, "-> %d\n", rc);
return len;
}
@@ -861,7 +861,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
rc1 = wmi_send(wil, cmdid, cmd, cmdlen);
kfree(wmi);
- wil_info(wil, "%s(0x%04x[%d]) -> %d\n", __func__, cmdid, cmdlen, rc1);
+ wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1);
return rc;
}
@@ -1385,6 +1385,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
struct wil_sta_info *p = &wil->sta[i];
char *status = "unknown";
+ u8 aid = 0;
switch (p->status) {
case wil_sta_unused:
@@ -1395,9 +1396,10 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
break;
case wil_sta_connected:
status = "connected";
+ aid = p->aid;
break;
}
- seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
+ seq_printf(s, "[%d] %pM %s AID %d\n", i, p->addr, status, aid);
if (p->status == wil_sta_connected) {
spin_lock_bh(&p->tid_rx_lock);
@@ -1628,7 +1630,7 @@ static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
blob->data = (void * __force)wil->csr + HOSTADDR(map->host);
blob->size = map->to - map->from;
snprintf(name, sizeof(name), "blob_%s", map->name);
- wil_debugfs_create_ioblob(name, S_IRUGO, dbg, wil_blob);
+ wil_debugfs_create_ioblob(name, 0444, dbg, wil_blob);
}
}
@@ -1638,29 +1640,29 @@ static const struct {
umode_t mode;
const struct file_operations *fops;
} dbg_files[] = {
- {"mbox", S_IRUGO, &fops_mbox},
- {"vrings", S_IRUGO, &fops_vring},
- {"stations", S_IRUGO, &fops_sta},
- {"desc", S_IRUGO, &fops_txdesc},
- {"bf", S_IRUGO, &fops_bf},
- {"ssid", S_IRUGO | S_IWUSR, &fops_ssid},
- {"mem_val", S_IRUGO, &fops_memread},
- {"reset", S_IWUSR, &fops_reset},
- {"rxon", S_IWUSR, &fops_rxon},
- {"tx_mgmt", S_IWUSR, &fops_txmgmt},
- {"wmi_send", S_IWUSR, &fops_wmi},
- {"back", S_IRUGO | S_IWUSR, &fops_back},
- {"pmccfg", S_IRUGO | S_IWUSR, &fops_pmccfg},
- {"pmcdata", S_IRUGO, &fops_pmcdata},
- {"temp", S_IRUGO, &fops_temp},
- {"freq", S_IRUGO, &fops_freq},
- {"link", S_IRUGO, &fops_link},
- {"info", S_IRUGO, &fops_info},
- {"recovery", S_IRUGO | S_IWUSR, &fops_recovery},
- {"led_cfg", S_IRUGO | S_IWUSR, &fops_led_cfg},
- {"led_blink_time", S_IRUGO | S_IWUSR, &fops_led_blink_time},
- {"fw_capabilities", S_IRUGO, &fops_fw_capabilities},
- {"fw_version", S_IRUGO, &fops_fw_version},
+ {"mbox", 0444, &fops_mbox},
+ {"vrings", 0444, &fops_vring},
+ {"stations", 0444, &fops_sta},
+ {"desc", 0444, &fops_txdesc},
+ {"bf", 0444, &fops_bf},
+ {"ssid", 0644, &fops_ssid},
+ {"mem_val", 0644, &fops_memread},
+ {"reset", 0244, &fops_reset},
+ {"rxon", 0244, &fops_rxon},
+ {"tx_mgmt", 0244, &fops_txmgmt},
+ {"wmi_send", 0244, &fops_wmi},
+ {"back", 0644, &fops_back},
+ {"pmccfg", 0644, &fops_pmccfg},
+ {"pmcdata", 0444, &fops_pmcdata},
+ {"temp", 0444, &fops_temp},
+ {"freq", 0444, &fops_freq},
+ {"link", 0444, &fops_link},
+ {"info", 0444, &fops_info},
+ {"recovery", 0644, &fops_recovery},
+ {"led_cfg", 0644, &fops_led_cfg},
+ {"led_blink_time", 0644, &fops_led_blink_time},
+ {"fw_capabilities", 0444, &fops_fw_capabilities},
+ {"fw_version", 0444, &fops_fw_version},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1699,31 +1701,32 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
/* fields in struct wil6210_priv */
static const struct dbg_off dbg_wil_off[] = {
- WIL_FIELD(privacy, S_IRUGO, doff_u32),
- WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong),
- WIL_FIELD(hw_version, S_IRUGO, doff_x32),
- WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
- WIL_FIELD(ap_isolate, S_IRUGO, doff_u32),
- WIL_FIELD(discovery_mode, S_IRUGO | S_IWUSR, doff_u8),
- WIL_FIELD(chip_revision, S_IRUGO, doff_u8),
+ WIL_FIELD(privacy, 0444, doff_u32),
+ WIL_FIELD(status[0], 0644, doff_ulong),
+ WIL_FIELD(hw_version, 0444, doff_x32),
+ WIL_FIELD(recovery_count, 0444, doff_u32),
+ WIL_FIELD(ap_isolate, 0444, doff_u32),
+ WIL_FIELD(discovery_mode, 0644, doff_u8),
+ WIL_FIELD(chip_revision, 0444, doff_u8),
+ WIL_FIELD(abft_len, 0644, doff_u8),
{},
};
static const struct dbg_off dbg_wil_regs[] = {
- {"RGF_MAC_MTRL_COUNTER_0", S_IRUGO, HOSTADDR(RGF_MAC_MTRL_COUNTER_0),
+ {"RGF_MAC_MTRL_COUNTER_0", 0444, HOSTADDR(RGF_MAC_MTRL_COUNTER_0),
doff_io32},
- {"RGF_USER_USAGE_1", S_IRUGO, HOSTADDR(RGF_USER_USAGE_1), doff_io32},
+ {"RGF_USER_USAGE_1", 0444, HOSTADDR(RGF_USER_USAGE_1), doff_io32},
{},
};
/* static parameters */
static const struct dbg_off dbg_statics[] = {
- {"desc_index", S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32},
- {"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32},
- {"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32},
- {"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh,
+ {"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32},
+ {"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32},
+ {"mem_addr", 0644, (ulong)&mem_addr, doff_u32},
+ {"vring_idle_trsh", 0644, (ulong)&vring_idle_trsh,
doff_u32},
- {"led_polarity", S_IRUGO | S_IWUSR, (ulong)&led_polarity, doff_u8},
+ {"led_polarity", 0644, (ulong)&led_polarity, doff_u8},
{},
};
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index 7053b62ca8d3..adcfef4dabf7 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -27,7 +27,7 @@ static int wil_ethtoolops_begin(struct net_device *ndev)
mutex_lock(&wil->mutex);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "ethtoolops_begin\n");
return 0;
}
@@ -36,7 +36,7 @@ static void wil_ethtoolops_complete(struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "ethtoolops_complete\n");
mutex_unlock(&wil->mutex);
}
@@ -48,7 +48,7 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
u32 tx_itr_en, tx_itr_val = 0;
u32 rx_itr_en, rx_itr_val = 0;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
@@ -68,7 +68,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- wil_dbg_misc(wil, "%s(rx %d usec, tx %d usec)\n", __func__,
+ wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 64046e0bd0a2..cab1e5c0e374 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -94,7 +94,7 @@ static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
{
- wil_dbg_irq(wil, "%s: mask_halp(%s)\n", __func__,
+ wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n",
mask_halp ? "true" : "false");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
@@ -103,7 +103,7 @@ static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
void wil6210_mask_halp(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "mask_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
BIT_DMA_EP_MISC_ICR_HALP);
@@ -111,7 +111,7 @@ void wil6210_mask_halp(struct wil6210_priv *wil)
static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "mask_irq_pseudo\n");
wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE);
@@ -134,7 +134,7 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
{
- wil_dbg_irq(wil, "%s: unmask_halp(%s)\n", __func__,
+ wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n",
unmask_halp ? "true" : "false");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
@@ -143,7 +143,7 @@ static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
static void wil6210_unmask_halp(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "unmask_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
BIT_DMA_EP_MISC_ICR_HALP);
@@ -151,7 +151,7 @@ static void wil6210_unmask_halp(struct wil6210_priv *wil)
static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "unmask_irq_pseudo\n");
set_bit(wil_status_irqen, wil->status);
@@ -160,7 +160,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
void wil_mask_irq(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "mask_irq\n");
wil6210_mask_irq_tx(wil);
wil6210_mask_irq_rx(wil);
@@ -170,7 +170,7 @@ void wil_mask_irq(struct wil6210_priv *wil)
void wil_unmask_irq(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "unmask_irq\n");
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
@@ -187,7 +187,7 @@ void wil_unmask_irq(struct wil6210_priv *wil)
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "configure_interrupt_moderation\n");
/* disable interrupt moderation for monitor
* to get better timestamp precision
@@ -400,7 +400,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
}
if (isr & BIT_DMA_EP_MISC_ICR_HALP) {
- wil_dbg_irq(wil, "%s: HALP IRQ invoked\n", __func__);
+ wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
wil6210_mask_halp(wil);
isr &= ~BIT_DMA_EP_MISC_ICR_HALP;
complete(&wil->halp.comp);
@@ -599,7 +599,7 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
void wil6210_set_halp(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "set_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
BIT_DMA_EP_MISC_ICR_HALP);
@@ -607,7 +607,7 @@ void wil6210_set_halp(struct wil6210_priv *wil)
void wil6210_clear_halp(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "clear_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
BIT_DMA_EP_MISC_ICR_HALP);
@@ -618,7 +618,7 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
{
int rc;
- wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx");
+ wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx");
rc = request_threaded_irq(irq, wil6210_hardirq,
wil6210_thread_irq,
@@ -629,7 +629,7 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
{
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "fini_irq:\n");
wil_mask_irq(wil);
free_irq(irq, wil);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 32e217e7159e..75ae474367f9 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -27,23 +27,23 @@
#define WAIT_FOR_SCAN_ABORT_MS 1000
bool debug_fw; /* = false; */
-module_param(debug_fw, bool, S_IRUGO);
+module_param(debug_fw, bool, 0444);
MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
static bool oob_mode;
-module_param(oob_mode, bool, S_IRUGO);
+module_param(oob_mode, bool, 0444);
MODULE_PARM_DESC(oob_mode,
" enable out of the box (OOB) mode in FW, for diagnostics and certification");
bool no_fw_recovery;
-module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
+module_param(no_fw_recovery, bool, 0644);
MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
/* if not set via modparam, will be set to default value of 1/8 of
* rx ring size during init flow
*/
unsigned short rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_INIT;
-module_param(rx_ring_overflow_thrsh, ushort, S_IRUGO);
+module_param(rx_ring_overflow_thrsh, ushort, 0444);
MODULE_PARM_DESC(rx_ring_overflow_thrsh,
" RX ring overflow threshold in descriptors.");
@@ -73,7 +73,7 @@ static const struct kernel_param_ops mtu_max_ops = {
.get = param_get_uint,
};
-module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, S_IRUGO);
+module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, 0444);
MODULE_PARM_DESC(mtu_max, " Max MTU value.");
static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT;
@@ -102,11 +102,11 @@ static const struct kernel_param_ops ring_order_ops = {
.get = param_get_uint,
};
-module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, S_IRUGO);
+module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, 0444);
MODULE_PARM_DESC(rx_ring_order, " Rx ring order; size = 1 << order");
-module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, S_IRUGO);
+module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, 0444);
MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
-module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, S_IRUGO);
+module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444);
MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order");
#define RST_DELAY (20) /* msec, for loop in @wil_target_reset */
@@ -172,8 +172,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
struct wil_sta_info *sta = &wil->sta[cid];
might_sleep();
- wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
- sta->status);
+ wil_dbg_misc(wil, "disconnect_cid: CID %d, status %d\n",
+ cid, sta->status);
/* inform upper/lower layers */
if (sta->status != wil_sta_unused) {
if (!from_event) {
@@ -241,7 +241,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
return;
might_sleep();
- wil_info(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid,
+ wil_info(wil, "bssid=%pM, reason=%d, ev%s\n", bssid,
reason_code, from_event ? "+" : "-");
/* Cases are:
@@ -352,7 +352,7 @@ static int wil_wait_for_recovery(struct wil6210_priv *wil)
void wil_set_recovery_state(struct wil6210_priv *wil, int state)
{
- wil_dbg_misc(wil, "%s(%d -> %d)\n", __func__,
+ wil_dbg_misc(wil, "set_recovery_state: %d -> %d\n",
wil->recovery_state, state);
wil->recovery_state = state;
@@ -494,7 +494,7 @@ int wil_priv_init(struct wil6210_priv *wil)
{
uint i;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "priv_init\n");
memset(wil->sta, 0, sizeof(wil->sta));
for (i = 0; i < WIL6210_MAX_CID; i++)
@@ -577,7 +577,7 @@ void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event)
{
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "disconnect\n");
del_timer_sync(&wil->connect_timer);
_wil6210_disconnect(wil, bssid, reason_code, from_event);
@@ -585,7 +585,7 @@ void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
void wil_priv_deinit(struct wil6210_priv *wil)
{
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "priv_deinit\n");
wil_ftm_deinit(wil);
wil_set_recovery_state(wil, fw_recovery_idle);
@@ -619,7 +619,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable)
{
- wil_info(wil, "%s: enable=%d\n", __func__, enable);
+ wil_info(wil, "enable=%d\n", enable);
if (enable)
wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
else
@@ -875,7 +875,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
{
int rc;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "reset\n");
WARN_ON(!mutex_is_locked(&wil->mutex));
WARN_ON(test_bit(wil_status_napi_en, wil->status));
@@ -898,9 +898,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_PRE_RESET);
if (rc)
- wil_err(wil,
- "%s: PRE_RESET platform notify failed, rc %d\n",
- __func__, rc);
+ wil_err(wil, "PRE_RESET platform notify failed, rc %d\n",
+ rc);
}
set_bit(wil_status_resetting, wil->status);
@@ -993,8 +992,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* check FW is responsive */
rc = wmi_echo(wil);
if (rc) {
- wil_err(wil, "%s: wmi_echo failed, rc %d\n",
- __func__, rc);
+ wil_err(wil, "wmi_echo failed, rc %d\n", rc);
return rc;
}
@@ -1004,9 +1002,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_FW_RDY);
if (rc) {
- wil_err(wil,
- "%s: FW_RDY notify failed, rc %d\n",
- __func__, rc);
+ wil_err(wil, "FW_RDY notify failed, rc %d\n",
+ rc);
rc = 0;
}
}
@@ -1088,7 +1085,7 @@ int wil_up(struct wil6210_priv *wil)
{
int rc;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "up\n");
mutex_lock(&wil->mutex);
rc = __wil_up(wil);
@@ -1129,7 +1126,7 @@ int wil_down(struct wil6210_priv *wil)
{
int rc;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "down\n");
wil_set_recovery_state(wil, fw_recovery_idle);
mutex_lock(&wil->mutex);
@@ -1162,25 +1159,24 @@ void wil_halp_vote(struct wil6210_priv *wil)
mutex_lock(&wil->halp.lock);
- wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil_dbg_irq(wil, "halp_vote: start, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
if (++wil->halp.ref_cnt == 1) {
wil6210_set_halp(wil);
rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
if (!rc) {
- wil_err(wil, "%s: HALP vote timed out\n", __func__);
+ wil_err(wil, "HALP vote timed out\n");
/* Mask HALP as done in case the interrupt is raised */
wil6210_mask_halp(wil);
} else {
wil_dbg_irq(wil,
- "%s: HALP vote completed after %d ms\n",
- __func__,
+ "halp_vote: HALP vote completed after %d ms\n",
jiffies_to_msecs(to_jiffies - rc));
}
}
- wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil_dbg_irq(wil, "halp_vote: end, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
@@ -1192,15 +1188,15 @@ void wil_halp_unvote(struct wil6210_priv *wil)
mutex_lock(&wil->halp.lock);
- wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil_dbg_irq(wil, "halp_unvote: start, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
if (--wil->halp.ref_cnt == 0) {
wil6210_clear_halp(wil);
- wil_dbg_irq(wil, "%s: HALP unvote\n", __func__);
+ wil_dbg_irq(wil, "HALP unvote\n");
}
- wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil_dbg_irq(wil, "halp_unvote:end, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 05ecf3a00f57..7bc6149480f2 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -27,10 +27,11 @@ static int wil_open(struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "open\n");
- if (debug_fw) {
- wil_err(wil, "%s() while in debug_fw mode\n", __func__);
+ if (debug_fw ||
+ test_bit(WMI_FW_CAPABILITY_WMI_ONLY, wil->fw_capabilities)) {
+ wil_err(wil, "while in debug_fw or wmi_only mode\n");
return -EINVAL;
}
@@ -41,7 +42,7 @@ static int wil_stop(struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "stop\n");
return wil_down(wil);
}
@@ -153,7 +154,7 @@ void *wil_if_alloc(struct device *dev)
wil->wdev = wdev;
wil->radio_wdev = wdev;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "if_alloc\n");
rc = wil_priv_init(wil);
if (rc) {
@@ -200,7 +201,7 @@ void wil_if_free(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "if_free\n");
if (!ndev)
return;
@@ -255,7 +256,7 @@ void wil_if_remove(struct wil6210_priv *wil)
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *wdev = wil_to_wdev(wil);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "if_remove\n");
unregister_netdev(ndev);
wiphy_unregister(wdev->wiphy);
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
index 24184cfc5da4..ccd98487ea09 100644
--- a/drivers/net/wireless/ath/wil6210/p2p.c
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -69,7 +69,7 @@ void wil_p2p_discovery_timer_fn(ulong x)
{
struct wil6210_priv *wil = (void *)x;
- wil_dbg_misc(wil, "%s\n", __func__);
+ wil_dbg_misc(wil, "p2p_discovery_timer_fn\n");
schedule_work(&wil->p2p.discovery_expired_work);
}
@@ -80,27 +80,25 @@ int wil_p2p_search(struct wil6210_priv *wil,
int rc;
struct wil_p2p_info *p2p = &wil->p2p;
- wil_dbg_misc(wil, "%s: channel %d\n",
- __func__, P2P_DMG_SOCIAL_CHANNEL);
+ wil_dbg_misc(wil, "p2p_search: channel %d\n", P2P_DMG_SOCIAL_CHANNEL);
lockdep_assert_held(&wil->mutex);
if (p2p->discovery_started) {
- wil_err(wil, "%s: search failed. discovery already ongoing\n",
- __func__);
+ wil_err(wil, "search failed. discovery already ongoing\n");
rc = -EBUSY;
goto out;
}
rc = wmi_p2p_cfg(wil, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI);
if (rc) {
- wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
+ wil_err(wil, "wmi_p2p_cfg failed\n");
goto out;
}
rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
if (rc) {
- wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
+ wil_err(wil, "wmi_set_ssid failed\n");
goto out_stop;
}
@@ -108,8 +106,7 @@ int wil_p2p_search(struct wil6210_priv *wil,
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ,
request->ie_len, request->ie);
if (rc) {
- wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n",
- __func__);
+ wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n");
goto out_stop;
}
@@ -119,14 +116,13 @@ int wil_p2p_search(struct wil6210_priv *wil,
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
request->ie_len, request->ie);
if (rc) {
- wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n",
- __func__);
+ wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n");
goto out_stop;
}
rc = wmi_start_search(wil);
if (rc) {
- wil_err(wil, "%s: wmi_start_search failed\n", __func__);
+ wil_err(wil, "wmi_start_search failed\n");
goto out_stop;
}
@@ -153,12 +149,12 @@ int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev,
if (!chan)
return -EINVAL;
- wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration);
+ wil_dbg_misc(wil, "p2p_listen: duration %d\n", duration);
mutex_lock(&wil->mutex);
if (p2p->discovery_started) {
- wil_err(wil, "%s: discovery already ongoing\n", __func__);
+ wil_err(wil, "discovery already ongoing\n");
rc = -EBUSY;
goto out;
}
@@ -220,8 +216,8 @@ int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie)
mutex_lock(&wil->mutex);
if (cookie != p2p->cookie) {
- wil_info(wil, "%s: Cookie mismatch: 0x%016llx vs. 0x%016llx\n",
- __func__, p2p->cookie, cookie);
+ wil_info(wil, "Cookie mismatch: 0x%016llx vs. 0x%016llx\n",
+ p2p->cookie, cookie);
mutex_unlock(&wil->mutex);
return -ENOENT;
}
@@ -231,7 +227,7 @@ int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie)
mutex_unlock(&wil->mutex);
if (!started) {
- wil_err(wil, "%s: listen not started\n", __func__);
+ wil_err(wil, "listen not started\n");
return -ENOENT;
}
@@ -253,7 +249,7 @@ void wil_p2p_listen_expired(struct work_struct *work)
struct wil6210_priv, p2p);
u8 started;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "p2p_listen_expired\n");
mutex_lock(&wil->mutex);
started = wil_p2p_stop_discovery(wil);
@@ -279,7 +275,7 @@ void wil_p2p_search_expired(struct work_struct *work)
struct wil6210_priv, p2p);
u8 started;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "p2p_search_expired\n");
mutex_lock(&wil->mutex);
started = wil_p2p_stop_discovery(wil);
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index c97263b45cbd..d472e13fb9d9 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -23,7 +23,7 @@
#include <linux/rtnetlink.h>
static bool use_msi = true;
-module_param(use_msi, bool, S_IRUGO);
+module_param(use_msi, bool, 0444);
MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
#ifdef CONFIG_PM
@@ -99,8 +99,10 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
*/
int msi_only = pdev->msi_enabled;
bool _use_msi = use_msi;
+ bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY,
+ wil->fw_capabilities);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "if_pcie_enable, wmi_only %d\n", wmi_only);
pdev->msi_enabled = 0;
@@ -123,9 +125,11 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
if (rc)
goto stop_master;
- /* need reset here to obtain MAC */
+ /* need reset here to obtain MAC or in case of WMI-only FW, full reset
+ * and fw loading takes place
+ */
mutex_lock(&wil->mutex);
- rc = wil_reset(wil, false);
+ rc = wil_reset(wil, wmi_only);
mutex_unlock(&wil->mutex);
if (rc)
goto release_irq;
@@ -145,7 +149,7 @@ static int wil_if_pcie_disable(struct wil6210_priv *wil)
{
struct pci_dev *pdev = wil->pdev;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "if_pcie_disable\n");
pci_clear_master(pdev);
/* disable and release IRQ */
@@ -208,21 +212,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return rc;
}
- /* device supports 48 bit addresses */
- rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (rc) {
- dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
- rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(dev,
- "dma_set_mask_and_coherent(32) failed: %d\n",
- rc);
- goto if_free;
- }
- } else {
- wil->use_extended_dma_addr = 1;
- }
-
wil->pdev = pdev;
pci_set_drvdata(pdev, wil);
/* rollback to if_free */
@@ -236,6 +225,21 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* rollback to err_plat */
+ /* device supports 48bit addresses */
+ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (rc) {
+ dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
+ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(dev,
+ "dma_set_mask_and_coherent(32) failed: %d\n",
+ rc);
+ goto err_plat;
+ }
+ } else {
+ wil->use_extended_dma_addr = 1;
+ }
+
rc = pci_enable_device(pdev);
if (rc) {
wil_err(wil,
@@ -299,7 +303,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
#endif /* CONFIG_PM */
wil6210_debugfs_init(wil);
-
+ wil6210_sysfs_init(wil);
return 0;
@@ -325,7 +329,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
struct wil6210_priv *wil = pci_get_drvdata(pdev);
void __iomem *csr = wil->csr;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "pcie_remove\n");
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
@@ -333,6 +337,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM */
+ wil6210_sysfs_remove(wil);
wil6210_debugfs_remove(wil);
rtnl_lock();
wil_p2p_wdev_free(wil);
@@ -363,8 +368,7 @@ static int wil6210_suspend(struct device *dev, bool is_runtime)
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
- wil_dbg_pm(wil, "%s(%s)\n", __func__,
- is_runtime ? "runtime" : "system");
+ wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
rc = wil_can_suspend(wil, is_runtime);
if (rc)
@@ -390,8 +394,7 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
- wil_dbg_pm(wil, "%s(%s)\n", __func__,
- is_runtime ? "runtime" : "system");
+ wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
/* allow master */
pci_set_master(pdev);
@@ -411,7 +414,7 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
int rc = 0;
enum wil_platform_event evt;
- wil_dbg_pm(wil, "%s: mode (%ld)\n", __func__, mode);
+ wil_dbg_pm(wil, "pm_notify: mode (%ld)\n", mode);
switch (mode) {
case PM_HIBERNATION_PREPARE:
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 11ee24d509e5..a0acb2d0cb79 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -21,8 +21,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
int rc = 0;
struct wireless_dev *wdev = wil->wdev;
- wil_dbg_pm(wil, "%s(%s)\n", __func__,
- is_runtime ? "runtime" : "system");
+ wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system");
if (!netif_running(wil_to_ndev(wil))) {
/* can always sleep when down */
@@ -59,7 +58,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
}
out:
- wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__,
+ wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n",
is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
return rc;
@@ -70,8 +69,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
- wil_dbg_pm(wil, "%s(%s)\n", __func__,
- is_runtime ? "runtime" : "system");
+ wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
/* if netif up, hardware is alive, shut it down */
if (ndev->flags & IFF_UP) {
@@ -86,7 +84,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
rc = wil->platform_ops.suspend(wil->platform_handle);
out:
- wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+ wil_dbg_pm(wil, "suspend: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
return rc;
}
@@ -96,8 +94,7 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
- wil_dbg_pm(wil, "%s(%s)\n", __func__,
- is_runtime ? "runtime" : "system");
+ wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
if (wil->platform_ops.resume) {
rc = wil->platform_ops.resume(wil->platform_handle);
@@ -115,7 +112,7 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
rc = wil_up(wil);
out:
- wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+ wil_dbg_pm(wil, "resume: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index b6765d4a4602..b067fdf086d4 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -60,7 +60,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
if (wil_is_pmc_allocated(pmc)) {
/* sanity check */
- wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__);
+ wil_err(wil, "ERROR pmc is already allocated\n");
goto no_release_err;
}
if ((num_descriptors <= 0) || (descriptor_size <= 0)) {
@@ -90,21 +90,20 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
pmc->num_descriptors = num_descriptors;
pmc->descriptor_size = descriptor_size;
- wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n",
- __func__, num_descriptors, descriptor_size);
+ wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n",
+ num_descriptors, descriptor_size);
/* allocate descriptors info list in pmc context*/
pmc->descriptors = kcalloc(num_descriptors,
sizeof(struct desc_alloc_info),
GFP_KERNEL);
if (!pmc->descriptors) {
- wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__);
+ wil_err(wil, "ERROR allocating pmc skb list\n");
goto no_release_err;
}
- wil_dbg_misc(wil,
- "%s: allocated descriptors info list %p\n",
- __func__, pmc->descriptors);
+ wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n",
+ pmc->descriptors);
/* Allocate pring buffer and descriptors.
* vring->va should be aligned on its size rounded up to power of 2
@@ -131,15 +130,14 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
wil_dbg_misc(wil,
- "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
- __func__,
+ "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
pmc->pring_va, &pmc->pring_pa,
sizeof(struct vring_tx_desc),
num_descriptors,
sizeof(struct vring_tx_desc) * num_descriptors);
if (!pmc->pring_va) {
- wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__);
+ wil_err(wil, "ERROR allocating pmc pring\n");
goto release_pmc_skb_list;
}
@@ -158,9 +156,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
GFP_KERNEL);
if (unlikely(!pmc->descriptors[i].va)) {
- wil_err(wil,
- "%s: ERROR allocating pmc descriptor %d",
- __func__, i);
+ wil_err(wil, "ERROR allocating pmc descriptor %d", i);
goto release_pmc_skbs;
}
@@ -180,21 +176,21 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
*_d = *d;
}
- wil_dbg_misc(wil, "%s: allocated successfully\n", __func__);
+ wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n");
pmc_cmd.op = WMI_PMC_ALLOCATE;
pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
- wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__);
+ wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n");
pmc->last_cmd_status = wmi_send(wil,
WMI_PMC_CMDID,
&pmc_cmd,
sizeof(pmc_cmd));
if (pmc->last_cmd_status) {
wil_err(wil,
- "%s: WMI_PMC_CMD with ALLOCATE op failed with status %d",
- __func__, pmc->last_cmd_status);
+ "WMI_PMC_CMD with ALLOCATE op failed with status %d",
+ pmc->last_cmd_status);
goto release_pmc_skbs;
}
@@ -203,7 +199,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
return;
release_pmc_skbs:
- wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__);
+ wil_err(wil, "exit on error: Releasing skbs...\n");
for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
dma_free_coherent(dev,
descriptor_size,
@@ -212,7 +208,7 @@ release_pmc_skbs:
pmc->descriptors[i].va = NULL;
}
- wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__);
+ wil_err(wil, "exit on error: Releasing pring...\n");
dma_free_coherent(dev,
sizeof(struct vring_tx_desc) * num_descriptors,
@@ -222,8 +218,7 @@ release_pmc_skbs:
pmc->pring_va = NULL;
release_pmc_skb_list:
- wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n",
- __func__);
+ wil_err(wil, "exit on error: Releasing descriptors info list...\n");
kfree(pmc->descriptors);
pmc->descriptors = NULL;
@@ -247,24 +242,23 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
pmc->last_cmd_status = 0;
if (!wil_is_pmc_allocated(pmc)) {
- wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n",
- __func__);
+ wil_dbg_misc(wil,
+ "pmc_free: Error, can't free - not allocated\n");
pmc->last_cmd_status = -EPERM;
mutex_unlock(&pmc->lock);
return;
}
if (send_pmc_cmd) {
- wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n",
- __func__);
+ wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n");
pmc_cmd.op = WMI_PMC_RELEASE;
pmc->last_cmd_status =
wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd,
sizeof(pmc_cmd));
if (pmc->last_cmd_status) {
wil_err(wil,
- "%s WMI_PMC_CMD with RELEASE op failed, status %d",
- __func__, pmc->last_cmd_status);
+ "WMI_PMC_CMD with RELEASE op failed, status %d",
+ pmc->last_cmd_status);
/* There's nothing we can do with this error.
* Normally, it should never occur.
* Continue to freeing all memory allocated for pmc.
@@ -276,8 +270,8 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
size_t buf_size = sizeof(struct vring_tx_desc) *
pmc->num_descriptors;
- wil_dbg_misc(wil, "%s: free pring va %p\n",
- __func__, pmc->pring_va);
+ wil_dbg_misc(wil, "pmc_free: free pring va %p\n",
+ pmc->pring_va);
dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
pmc->pring_va = NULL;
@@ -296,11 +290,11 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
pmc->descriptors[i].pa);
pmc->descriptors[i].va = NULL;
}
- wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n",
- __func__, i, pmc->num_descriptors);
+ wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i,
+ pmc->num_descriptors);
wil_dbg_misc(wil,
- "%s: free pmc descriptors info list %p\n",
- __func__, pmc->descriptors);
+ "pmc_free: free pmc descriptors info list %p\n",
+ pmc->descriptors);
kfree(pmc->descriptors);
pmc->descriptors = NULL;
} else {
@@ -316,7 +310,7 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
*/
int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
{
- wil_dbg_misc(wil, "%s: status %d\n", __func__,
+ wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n",
wil->pmc.last_cmd_status);
return wil->pmc.last_cmd_status;
@@ -339,7 +333,7 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
mutex_lock(&pmc->lock);
if (!wil_is_pmc_allocated(pmc)) {
- wil_err(wil, "%s: error, pmc is not allocated!\n", __func__);
+ wil_err(wil, "error, pmc is not allocated!\n");
pmc->last_cmd_status = -EPERM;
mutex_unlock(&pmc->lock);
return -EPERM;
@@ -348,8 +342,8 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
pmc_size = pmc->descriptor_size * pmc->num_descriptors;
wil_dbg_misc(wil,
- "%s: size %u, pos %lld\n",
- __func__, (unsigned)count, *f_pos);
+ "pmc_read: size %u, pos %lld\n",
+ (u32)count, *f_pos);
pmc->last_cmd_status = 0;
@@ -358,15 +352,16 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
offset = *f_pos - (idx * pmc->descriptor_size);
if (*f_pos >= pmc_size) {
- wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n",
- __func__, *f_pos, (unsigned)pmc_size);
+ wil_dbg_misc(wil,
+ "pmc_read: reached end of pmc buf: %lld >= %u\n",
+ *f_pos, (u32)pmc_size);
pmc->last_cmd_status = -ERANGE;
goto out;
}
wil_dbg_misc(wil,
- "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
- __func__, *f_pos, idx, offset, count);
+ "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
+ *f_pos, idx, offset, count);
/* if no errors, return the copied byte count */
retval = simple_read_from_buffer(buf,
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 19ed127d4d05..7404b6f39c6a 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -349,8 +349,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status,
agg_amsdu, agg_wsize, agg_timeout);
if (rc || (status != WLAN_STATUS_SUCCESS)) {
- wil_err(wil, "%s: do not apply ba, rc(%d), status(%d)\n",
- __func__, rc, status);
+ wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc,
+ status);
goto out;
}
@@ -387,7 +387,7 @@ int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
txdata->addba_in_progress = true;
rc = wmi_addba(wil, ringid, agg_wsize, agg_timeout);
if (rc) {
- wil_err(wil, "%s: wmi_addba failed, rc (%d)", __func__, rc);
+ wil_err(wil, "wmi_addba failed, rc (%d)", rc);
txdata->addba_in_progress = false;
}
diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c
new file mode 100644
index 000000000000..0faa26ca41c9
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/sysfs.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+
+static ssize_t
+wil_ftm_txrx_offset_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_tof_get_tx_rx_offset_event evt;
+ } __packed reply;
+ int rc;
+ ssize_t len;
+
+ if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ memset(&reply, 0, sizeof(reply));
+ rc = wmi_call(wil, WMI_TOF_GET_TX_RX_OFFSET_CMDID, NULL, 0,
+ WMI_TOF_GET_TX_RX_OFFSET_EVENTID,
+ &reply, sizeof(reply), 100);
+ if (rc < 0)
+ return rc;
+ if (reply.evt.status) {
+ wil_err(wil, "get_tof_tx_rx_offset failed, error %d\n",
+ reply.evt.status);
+ return -EIO;
+ }
+ len = snprintf(buf, PAGE_SIZE, "%u %u\n",
+ le32_to_cpu(reply.evt.tx_offset),
+ le32_to_cpu(reply.evt.rx_offset));
+ return len;
+}
+
+static ssize_t
+wil_ftm_txrx_offset_sysfs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
+ struct wmi_tof_set_tx_rx_offset_cmd cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_tof_set_tx_rx_offset_event evt;
+ } __packed reply;
+ unsigned int tx_offset, rx_offset;
+ int rc;
+
+ if (sscanf(buf, "%u %u", &tx_offset, &rx_offset) != 2)
+ return -EINVAL;
+
+ if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.tx_offset = cpu_to_le32(tx_offset);
+ cmd.rx_offset = cpu_to_le32(rx_offset);
+ memset(&reply, 0, sizeof(reply));
+ rc = wmi_call(wil, WMI_TOF_SET_TX_RX_OFFSET_CMDID, &cmd, sizeof(cmd),
+ WMI_TOF_SET_TX_RX_OFFSET_EVENTID,
+ &reply, sizeof(reply), 100);
+ if (rc < 0)
+ return rc;
+ if (reply.evt.status) {
+ wil_err(wil, "set_tof_tx_rx_offset failed, error %d\n",
+ reply.evt.status);
+ return -EIO;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(ftm_txrx_offset, 0644,
+ wil_ftm_txrx_offset_sysfs_show,
+ wil_ftm_txrx_offset_sysfs_store);
+
+static struct attribute *wil6210_sysfs_entries[] = {
+ &dev_attr_ftm_txrx_offset.attr,
+ NULL
+};
+
+static struct attribute_group wil6210_attribute_group = {
+ .name = "wil6210",
+ .attrs = wil6210_sysfs_entries,
+};
+
+int wil6210_sysfs_init(struct wil6210_priv *wil)
+{
+ struct device *dev = wil_to_dev(wil);
+ int err;
+
+ err = sysfs_create_group(&dev->kobj, &wil6210_attribute_group);
+ if (err) {
+ wil_err(wil, "failed to create sysfs group: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+void wil6210_sysfs_remove(struct wil6210_priv *wil)
+{
+ struct device *dev = wil_to_dev(wil);
+
+ sysfs_remove_group(&dev->kobj, &wil6210_attribute_group);
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index b0166d224508..8b5411e4dc34 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -29,12 +29,12 @@
#include "trace.h"
static bool rtap_include_phy_info;
-module_param(rtap_include_phy_info, bool, S_IRUGO);
+module_param(rtap_include_phy_info, bool, 0444);
MODULE_PARM_DESC(rtap_include_phy_info,
" Include PHY info in the radiotap header, default - no");
bool rx_align_2;
-module_param(rx_align_2, bool, S_IRUGO);
+module_param(rx_align_2, bool, 0444);
MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
static inline uint wil_rx_snaplen(void)
@@ -112,7 +112,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
size_t sz = vring->size * sizeof(vring->va[0]);
uint i;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "vring_alloc:\n");
BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
@@ -762,7 +762,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
return;
}
- wil_dbg_txrx(wil, "%s()\n", __func__);
+ wil_dbg_txrx(wil, "rx_handle\n");
while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
(*quota)--;
@@ -785,7 +785,7 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
struct vring *vring = &wil->vring_rx;
int rc;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "rx_init\n");
if (vring->va) {
wil_err(wil, "Rx ring already allocated\n");
@@ -816,7 +816,7 @@ void wil_rx_fini(struct wil6210_priv *wil)
{
struct vring *vring = &wil->vring_rx;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "rx_fini\n");
if (vring->va)
wil_vring_free(wil, vring, 0);
@@ -868,7 +868,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
struct vring *vring = &wil->vring_tx[id];
struct vring_tx_data *txdata = &wil->vring_tx_data[id];
- wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
+ wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
lockdep_assert_held(&wil->mutex);
@@ -948,7 +948,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
struct vring *vring = &wil->vring_tx[id];
struct vring_tx_data *txdata = &wil->vring_tx_data[id];
- wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
+ wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
lockdep_assert_held(&wil->mutex);
@@ -1010,7 +1010,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
if (!vring->va)
return;
- wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
+ wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = false;
@@ -1049,12 +1049,14 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
struct vring *v = &wil->vring_tx[i];
struct vring_tx_data *txdata = &wil->vring_tx_data[i];
- wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n",
- __func__, eth->h_dest, i);
+ wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
+ eth->h_dest, i);
if (v->va && txdata->enabled) {
return v;
} else {
- wil_dbg_txrx(wil, "vring[%d] not valid\n", i);
+ wil_dbg_txrx(wil,
+ "find_tx_ucast: vring[%d] not valid\n",
+ i);
return NULL;
}
}
@@ -1210,17 +1212,6 @@ found:
return v;
}
-static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
- struct sk_buff *skb)
-{
- struct wireless_dev *wdev = wil->wdev;
-
- if (wdev->iftype != NL80211_IFTYPE_AP)
- return wil_find_tx_bcast_2(wil, skb);
-
- return wil_find_tx_bcast_1(wil, skb);
-}
-
static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
int vring_index)
{
@@ -1390,8 +1381,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
int gso_type;
int rc = -EINVAL;
- wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
- __func__, skb->len, vring_index);
+ wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
+ vring_index);
if (unlikely(!txdata->enabled))
return -EINVAL;
@@ -1660,8 +1651,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
bool mcast = (vring_index == wil->bcast_vring);
uint len = skb_headlen(skb);
- wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
- __func__, skb->len, vring_index);
+ wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len,
+ vring_index);
if (unlikely(!txdata->enabled))
return -EINVAL;
@@ -1901,7 +1892,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
static bool pr_once_fw;
int rc;
- wil_dbg_txrx(wil, "%s()\n", __func__);
+ wil_dbg_txrx(wil, "start_xmit\n");
if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
if (!pr_once_fw) {
wil_err(wil, "FW not ready\n");
@@ -1920,12 +1911,26 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
pr_once_fw = false;
/* find vring */
- if (wil->wdev->iftype == NL80211_IFTYPE_STATION) {
- /* in STA mode (ESS), all to same VRING */
+ if (wil->wdev->iftype == NL80211_IFTYPE_STATION && !wil->pbss) {
+ /* in STA mode (ESS), all to same VRING (to AP) */
vring = wil_find_tx_vring_sta(wil, skb);
- } else { /* direct communication, find matching VRING */
- vring = bcast ? wil_find_tx_bcast(wil, skb) :
- wil_find_tx_ucast(wil, skb);
+ } else if (bcast) {
+ if (wil->pbss)
+ /* in pbss, no bcast VRING - duplicate skb in
+ * all stations VRINGs
+ */
+ vring = wil_find_tx_bcast_2(wil, skb);
+ else if (wil->wdev->iftype == NL80211_IFTYPE_AP)
+ /* AP has a dedicated bcast VRING */
+ vring = wil_find_tx_bcast_1(wil, skb);
+ else
+ /* unexpected combination, fallback to duplicating
+ * the skb in all stations VRINGs
+ */
+ vring = wil_find_tx_bcast_2(wil, skb);
+ } else {
+ /* unicast, find specific VRING by dest. address */
+ vring = wil_find_tx_ucast(wil, skb);
}
if (unlikely(!vring)) {
wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
@@ -1999,7 +2004,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
return 0;
}
- wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
+ wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
used_before_complete = wil_vring_used_tx(vring);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 19de0ebb48f7..fcfbcf7fbd7d 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -528,6 +528,7 @@ struct wil_sta_info {
unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)];
struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM];
struct wil_tid_crypto_rx group_crypto_rx;
+ u8 aid; /* 1-254; 0 if unknown/not reported */
};
enum {
@@ -672,6 +673,7 @@ struct wil6210_priv {
struct dentry *debug;
struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
u8 discovery_mode;
+ u8 abft_len;
void *platform_handle;
struct wil_platform_ops platform_ops;
@@ -891,6 +893,8 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
int wil6210_debugfs_init(struct wil6210_priv *wil);
void wil6210_debugfs_remove(struct wil6210_priv *wil);
+int wil6210_sysfs_init(struct wil6210_priv *wil);
+void wil6210_sysfs_remove(struct wil6210_priv *wil);
int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
struct station_info *sinfo);
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index d051eea47a54..e53cf0cf7031 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -62,13 +62,13 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
u32 host_min, dump_size, offset, len;
if (wil_fw_get_crash_dump_bounds(wil, &dump_size, &host_min)) {
- wil_err(wil, "%s: fail to obtain crash dump size\n", __func__);
+ wil_err(wil, "fail to obtain crash dump size\n");
return -EINVAL;
}
if (dump_size > size) {
- wil_err(wil, "%s: not enough space for dump. Need %d have %d\n",
- __func__, dump_size, size);
+ wil_err(wil, "not enough space for dump. Need %d have %d\n",
+ dump_size, size);
return -EINVAL;
}
@@ -83,8 +83,9 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
len = map->to - map->from;
offset = map->host - host_min;
- wil_dbg_misc(wil, "%s() - dump %s, size %d, offset %d\n",
- __func__, fw_mapping[i].name, len, offset);
+ wil_dbg_misc(wil,
+ "fw_copy_crash_dump: - dump %s, size %d, offset %d\n",
+ fw_mapping[i].name, len, offset);
wil_memcpy_fromio_32((void * __force)(dest + offset),
(const void __iomem * __force)data, len);
@@ -99,7 +100,7 @@ void wil_fw_core_dump(struct wil6210_priv *wil)
u32 fw_dump_size;
if (wil_fw_get_crash_dump_bounds(wil, &fw_dump_size, NULL)) {
- wil_err(wil, "%s: fail to get fw dump size\n", __func__);
+ wil_err(wil, "fail to get fw dump size\n");
return;
}
@@ -115,6 +116,5 @@ void wil_fw_core_dump(struct wil6210_priv *wil)
* after 5 min
*/
dev_coredumpv(wil_to_dev(wil), fw_dump_data, fw_dump_size, GFP_KERNEL);
- wil_info(wil, "%s: fw core dumped, size %d bytes\n", __func__,
- fw_dump_size);
+ wil_info(wil, "fw core dumped, size %d bytes\n", fw_dump_size);
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index ef6472fb1f7e..bccd27dcb42b 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -25,16 +25,16 @@
#include "ftm.h"
static uint max_assoc_sta = WIL6210_MAX_CID;
-module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR);
+module_param(max_assoc_sta, uint, 0644);
MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP");
int agg_wsize; /* = 0; */
-module_param(agg_wsize, int, S_IRUGO | S_IWUSR);
+module_param(agg_wsize, int, 0644);
MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;"
" 0 - use default; < 0 - don't auto-establish");
u8 led_id = WIL_LED_INVALID_ID;
-module_param(led_id, byte, S_IRUGO);
+module_param(led_id, byte, 0444);
MODULE_PARM_DESC(led_id,
" 60G device led enablement. Set the led ID (0-2) to enable");
@@ -495,8 +495,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
}
ch = evt->channel + 1;
- wil_info(wil, "Connect %pM channel [%d] cid %d\n",
- evt->bssid, ch, evt->cid);
+ wil_info(wil, "Connect %pM channel [%d] cid %d aid %d\n",
+ evt->bssid, ch, evt->cid, evt->aid);
wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
evt->assoc_info, len - sizeof(*evt), true);
@@ -539,8 +539,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
(wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
if (wil->sta[evt->cid].status != wil_sta_unused) {
- wil_err(wil, "%s: AP: Invalid status %d for CID %d\n",
- __func__, wil->sta[evt->cid].status, evt->cid);
+ wil_err(wil, "AP: Invalid status %d for CID %d\n",
+ wil->sta[evt->cid].status, evt->cid);
mutex_unlock(&wil->mutex);
return;
}
@@ -553,13 +553,12 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
rc = wil_tx_init(wil, evt->cid);
if (rc) {
- wil_err(wil, "%s: config tx vring failed for CID %d, rc (%d)\n",
- __func__, evt->cid, rc);
+ wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n",
+ evt->cid, rc);
wmi_disconnect_sta(wil, wil->sta[evt->cid].addr,
WLAN_REASON_UNSPECIFIED, false, false);
} else {
- wil_info(wil, "%s: successful connection to CID %d\n",
- __func__, evt->cid);
+ wil_info(wil, "successful connection to CID %d\n", evt->cid);
}
if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
@@ -567,9 +566,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
if (rc) {
netif_carrier_off(ndev);
wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
- wil_err(wil,
- "%s: cfg80211_connect_result with failure\n",
- __func__);
+ wil_err(wil, "cfg80211_connect_result with failure\n");
cfg80211_connect_result(ndev, evt->bssid, NULL, 0,
NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
@@ -602,12 +599,13 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
} else {
- wil_err(wil, "%s: unhandled iftype %d for CID %d\n",
- __func__, wdev->iftype, evt->cid);
+ wil_err(wil, "unhandled iftype %d for CID %d\n", wdev->iftype,
+ evt->cid);
goto out;
}
wil->sta[evt->cid].status = wil_sta_connected;
+ wil->sta[evt->cid].aid = evt->aid;
set_bit(wil_status_fwconnected, wil->status);
wil_update_net_queues_bh(wil, NULL, false);
@@ -961,8 +959,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail);
if (immed_reply) {
- wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n",
- __func__, wil->reply_id);
+ wil_dbg_wmi(wil, "recv_cmd: Complete WMI 0x%04x\n",
+ wil->reply_id);
kfree(evt);
num_immed_reply++;
complete(&wil->wmi_call);
@@ -976,7 +974,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
}
}
/* normally, 1 event per IRQ should be processed */
- wil_dbg_wmi(wil, "%s -> %d events queued, %d completed\n", __func__,
+ wil_dbg_wmi(wil, "recv_cmd: -> %d events queued, %d completed\n",
n - num_immed_reply, num_immed_reply);
}
@@ -1113,6 +1111,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
.hidden_ssid = hidden_ssid,
.is_go = is_go,
.disable_ap_sme = disable_ap_sme,
+ .abft_len = wil->abft_len,
};
struct {
struct wmi_cmd_hdr wmi;
@@ -1403,7 +1402,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
struct wmi_listen_started_event evt;
} __packed reply;
- wil_info(wil, "%s(%s)\n", __func__, on ? "on" : "off");
+ wil_info(wil, "(%s)\n", on ? "on" : "off");
if (on) {
rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0,
@@ -1523,7 +1522,7 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac,
struct wmi_disconnect_event evt;
} __packed reply;
- wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
+ wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason);
if (del_sta) {
ether_addr_copy(del_sta_cmd.dst_mac, mac);
@@ -1568,8 +1567,8 @@ int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout)
.amsdu = 0,
};
- wil_dbg_wmi(wil, "%s(ring %d size %d timeout %d)\n", __func__,
- ringid, size, timeout);
+ wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size,
+ timeout);
return wmi_send(wil, WMI_VRING_BA_EN_CMDID, &cmd, sizeof(cmd));
}
@@ -1581,8 +1580,7 @@ int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason)
.reason = cpu_to_le16(reason),
};
- wil_dbg_wmi(wil, "%s(ring %d reason %d)\n", __func__,
- ringid, reason);
+ wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason);
return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, &cmd, sizeof(cmd));
}
@@ -1594,8 +1592,8 @@ int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason)
.reason = cpu_to_le16(reason),
};
- wil_dbg_wmi(wil, "%s(CID %d TID %d reason %d)\n", __func__,
- cidxtid & 0xf, (cidxtid >> 4) & 0xf, reason);
+ wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cidxtid & 0xf,
+ (cidxtid >> 4) & 0xf, reason);
return wmi_send(wil, WMI_RCP_DELBA_CMDID, &cmd, sizeof(cmd));
}
@@ -1770,7 +1768,7 @@ void wmi_event_flush(struct wil6210_priv *wil)
ulong flags;
struct pending_wmi_event *evt, *t;
- wil_dbg_wmi(wil, "%s()\n", __func__);
+ wil_dbg_wmi(wil, "event_flush\n");
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
@@ -1815,8 +1813,8 @@ static void wmi_event_handle(struct wil6210_priv *wil,
WARN_ON(wil->reply_buf);
wmi_evt_call_handler(wil, id, evt_data,
len - sizeof(*wmi));
- wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n",
- __func__, id);
+ wil_dbg_wmi(wil, "event_handle: Complete WMI 0x%04x\n",
+ id);
complete(&wil->wmi_call);
return;
}
@@ -1863,11 +1861,11 @@ void wmi_event_worker(struct work_struct *work)
struct pending_wmi_event *evt;
struct list_head *lh;
- wil_dbg_wmi(wil, "Start %s\n", __func__);
+ wil_dbg_wmi(wil, "event_worker: Start\n");
while ((lh = next_wmi_ev(wil)) != NULL) {
evt = list_entry(lh, struct pending_wmi_event, list);
wmi_event_handle(wil, &evt->event.hdr);
kfree(evt);
}
- wil_dbg_wmi(wil, "Finished %s\n", __func__);
+ wil_dbg_wmi(wil, "event_worker: Finished\n");
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 9c4a0bdc51f3..7c9fee57aa91 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -57,6 +57,7 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_RF_SECTORS = 2,
WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3,
WMI_FW_CAPABILITY_DISABLE_AP_SME = 4,
+ WMI_FW_CAPABILITY_WMI_ONLY = 5,
WMI_FW_CAPABILITY_MAX,
};
@@ -186,6 +187,7 @@ enum wmi_command_id {
WMI_RS_CFG_CMDID = 0x921,
WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
WMI_AOA_MEAS_CMDID = 0x923,
+ WMI_BRP_SET_ANT_LIMIT_CMDID = 0x924,
WMI_SET_MGMT_RETRY_LIMIT_CMDID = 0x930,
WMI_GET_MGMT_RETRY_LIMIT_CMDID = 0x931,
WMI_NEW_STA_CMDID = 0x935,
@@ -546,7 +548,9 @@ struct wmi_pcp_start_cmd {
u8 pcp_max_assoc_sta;
u8 hidden_ssid;
u8 is_go;
- u8 reserved0[6];
+ u8 reserved0[5];
+ /* abft_len override if non-0 */
+ u8 abft_len;
u8 disable_ap_sme;
u8 network_type;
u8 channel;
@@ -1083,6 +1087,7 @@ enum wmi_event_id {
WMI_RS_CFG_DONE_EVENTID = 0x1921,
WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922,
WMI_AOA_MEAS_EVENTID = 0x1923,
+ WMI_BRP_SET_ANT_LIMIT_EVENTID = 0x1924,
WMI_SET_MGMT_RETRY_LIMIT_EVENTID = 0x1930,
WMI_GET_MGMT_RETRY_LIMIT_EVENTID = 0x1931,
WMI_TOF_SESSION_END_EVENTID = 0x1991,
@@ -1303,7 +1308,8 @@ struct wmi_connect_event {
u8 assoc_req_len;
u8 assoc_resp_len;
u8 cid;
- u8 reserved2[3];
+ u8 aid;
+ u8 reserved2[2];
/* not in use */
u8 assoc_info[0];
} __packed;
@@ -1776,6 +1782,42 @@ struct wmi_get_detailed_rs_res_event {
u8 reserved[3];
} __packed;
+/* BRP antenna limit mode */
+enum wmi_brp_ant_limit_mode {
+ /* Disable BRP force antenna limit */
+ WMI_BRP_ANT_LIMIT_MODE_DISABLE = 0x00,
+ /* Define maximal antennas limit. Only effective antennas will be
+ * actually used
+ */
+ WMI_BRP_ANT_LIMIT_MODE_EFFECTIVE = 0x01,
+ /* Force a specific number of antennas */
+ WMI_BRP_ANT_LIMIT_MODE_FORCE = 0x02,
+ /* number of BRP antenna limit modes */
+ WMI_BRP_ANT_LIMIT_MODES_NUM = 0x03,
+};
+
+/* WMI_BRP_SET_ANT_LIMIT_CMDID */
+struct wmi_brp_set_ant_limit_cmd {
+ /* connection id */
+ u8 cid;
+ /* enum wmi_brp_ant_limit_mode */
+ u8 limit_mode;
+ /* antenna limit count, 1-27
+ * disable_mode - ignored
+ * effective_mode - upper limit to number of antennas to be used
+ * force_mode - exact number of antennas to be used
+ */
+ u8 ant_limit;
+ u8 reserved;
+} __packed;
+
+/* WMI_BRP_SET_ANT_LIMIT_EVENTID */
+struct wmi_brp_set_ant_limit_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
/* broadcast connection ID */
#define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST (0xFFFFFFFF)
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 7c8b5e3e57a1..cd105a0bf5d1 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2414,8 +2414,16 @@ static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
dev->res[base_sel - 1].base,
wr_offset, wr_mask, wr_value);
- msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
- wr_offset, wr_mask, wr_value);
+ base_sel_size = resource_size(dev->res[base_sel - 1].resource);
+
+ if (wr_offset > base_sel_size - 4 ||
+ msm_pcie_check_align(dev, wr_offset))
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
+ dev->rc_idx, wr_offset, base_sel_size - 4);
+ else
+ msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
+ wr_offset, wr_mask, wr_value);
break;
case 13: /* dump all registers of base_sel */
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index 6e06fef81849..c3cb57ed6083 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -267,6 +267,14 @@ static int __ufs_qcom_phy_init_vreg(struct phy *phy,
char prop_name[MAX_PROP_NAME];
+ if (dev->of_node) {
+ snprintf(prop_name, MAX_PROP_NAME, "%s-supply", name);
+ if (!of_parse_phandle(dev->of_node, prop_name, 0)) {
+ dev_dbg(dev, "No vreg data found for %s\n", prop_name);
+ return optional ? err : -ENODATA;
+ }
+ }
+
vreg->name = kstrdup(name, GFP_KERNEL);
if (!vreg->name) {
err = -ENOMEM;
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 188388bc97a0..23b0428bcf34 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -264,6 +264,11 @@ static void gsi_handle_glob_err(uint32_t err)
}
}
+static void gsi_handle_gp_int1(void)
+{
+ complete(&gsi_ctx->gen_ee_cmd_compl);
+}
+
static void gsi_handle_glob_ee(int ee)
{
uint32_t val;
@@ -288,8 +293,7 @@ static void gsi_handle_glob_ee(int ee)
}
if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK) {
- notify.evt_id = GSI_PER_EVT_GLOB_GP1;
- gsi_ctx->per.notify_cb(&notify);
+ gsi_handle_gp_int1();
}
if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK) {
@@ -2745,6 +2749,67 @@ void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
}
EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size);
+int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
+{
+ enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_HALT_CHANNEL;
+ uint32_t val;
+ int res;
+
+ if (!gsi_ctx) {
+ pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+ return -GSI_STATUS_NODEV;
+ }
+
+ if (chan_idx >= gsi_ctx->max_ch || !code) {
+ GSIERR("bad params chan_idx=%d\n", chan_idx);
+ return -GSI_STATUS_INVALID_PARAMS;
+ }
+
+ mutex_lock(&gsi_ctx->mlock);
+ reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
+
+ /* invalidate the response */
+ gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
+ GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+ gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
+ gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
+ GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+
+ gsi_ctx->gen_ee_cmd_dbg.halt_channel++;
+ val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
+ GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
+ ((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
+ GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
+ ((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
+ GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
+ gsi_writel(val, gsi_ctx->base +
+ GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
+
+ res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
+ msecs_to_jiffies(GSI_CMD_TIMEOUT));
+ if (res == 0) {
+ GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
+ res = -GSI_STATUS_TIMED_OUT;
+ goto free_lock;
+ }
+
+ gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
+ GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+ if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
+ GSIERR("No response received\n");
+ res = -GSI_STATUS_ERROR;
+ goto free_lock;
+ }
+
+ res = GSI_STATUS_SUCCESS;
+ *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
+free_lock:
+ mutex_unlock(&gsi_ctx->mlock);
+
+ return res;
+}
+EXPORT_SYMBOL(gsi_halt_channel_ee);
+
static int msm_gsi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -2757,6 +2822,7 @@ static int msm_gsi_probe(struct platform_device *pdev)
}
gsi_ctx->dev = dev;
+ init_completion(&gsi_ctx->gen_ee_cmd_compl);
gsi_debugfs_init();
return 0;
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index 750ae2b329d3..d0eb162c49cf 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -115,9 +115,12 @@ struct gsi_evt_ctx {
struct gsi_ee_scratch {
union __packed {
struct {
- uint32_t resvd1:15;
+ uint32_t inter_ee_cmd_return_code:3;
+ uint32_t resvd1:2;
+ uint32_t generic_ee_cmd_return_code:3;
+ uint32_t resvd2:7;
uint32_t max_usb_pkt_size:1;
- uint32_t resvd2:8;
+ uint32_t resvd3:8;
uint32_t mhi_base_chan_idx:8;
} s;
uint32_t val;
@@ -135,6 +138,10 @@ struct ch_debug_stats {
unsigned long cmd_completed;
};
+struct gsi_generic_ee_cmd_debug_stats {
+ unsigned long halt_channel;
+};
+
struct gsi_ctx {
void __iomem *base;
struct device *dev;
@@ -143,6 +150,7 @@ struct gsi_ctx {
struct gsi_chan_ctx chan[GSI_CHAN_MAX];
struct ch_debug_stats ch_dbg[GSI_CHAN_MAX];
struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX];
+ struct gsi_generic_ee_cmd_debug_stats gen_ee_cmd_dbg;
struct mutex mlock;
spinlock_t slock;
unsigned long evt_bmap;
@@ -154,6 +162,7 @@ struct gsi_ctx {
struct workqueue_struct *dp_stat_wq;
u32 max_ch;
u32 max_ev;
+ struct completion gen_ee_cmd_compl;
};
enum gsi_re_type {
@@ -227,6 +236,18 @@ enum gsi_evt_ch_cmd_opcode {
GSI_EVT_DE_ALLOC = 0xa,
};
+enum gsi_generic_ee_cmd_opcode {
+ GSI_GEN_EE_CMD_HALT_CHANNEL = 0x1,
+};
+
+enum gsi_generic_ee_cmd_return_code {
+ GSI_GEN_EE_CMD_RETURN_CODE_SUCCESS = 0x1,
+ GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING = 0x2,
+ GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_DIRECTION = 0x3,
+ GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE = 0x4,
+ GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX = 0x5,
+};
+
extern struct gsi_ctx *gsi_ctx;
void gsi_debugfs_init(void);
uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr);
diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h
index 1acaf74a0968..d0462aad72d2 100644
--- a/drivers/platform/msm/gsi/gsi_reg.h
+++ b/drivers/platform/msm/gsi/gsi_reg.h
@@ -1365,8 +1365,12 @@
(GSI_GSI_REG_BASE_OFFS + 0x0001f018 + 0x4000 * (n))
#define GSI_EE_n_GSI_EE_GENERIC_CMD_RMSK 0xffffffff
#define GSI_EE_n_GSI_EE_GENERIC_CMD_MAXn 3
-#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0xffffffff
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0x1f
#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK 0x3e0
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT 0x5
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK 0x3c00
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT 0xa
/* v1.0 */
#define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
index 66e329a03df7..74e7394a80b1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -95,6 +95,46 @@ int ipa_disable_data_path(u32 clnt_hdl)
return res;
}
+int ipa2_enable_force_clear(u32 request_id, bool throttle_source,
+ u32 source_pipe_bitmask)
+{
+ struct ipa_enable_force_clear_datapath_req_msg_v01 req;
+ int result;
+
+ memset(&req, 0, sizeof(req));
+ req.request_id = request_id;
+ req.source_pipe_bitmask = source_pipe_bitmask;
+ if (throttle_source) {
+ req.throttle_source_valid = 1;
+ req.throttle_source = 1;
+ }
+ result = qmi_enable_force_clear_datapath_send(&req);
+ if (result) {
+ IPAERR("qmi_enable_force_clear_datapath_send failed %d\n",
+ result);
+ return result;
+ }
+
+ return 0;
+}
+
+int ipa2_disable_force_clear(u32 request_id)
+{
+ struct ipa_disable_force_clear_datapath_req_msg_v01 req;
+ int result;
+
+ memset(&req, 0, sizeof(req));
+ req.request_id = request_id;
+ result = qmi_disable_force_clear_datapath_send(&req);
+ if (result) {
+ IPAERR("qmi_disable_force_clear_datapath_send failed %d\n",
+ result);
+ return result;
+ }
+
+ return 0;
+}
+
static int ipa2_smmu_map_peer_bam(unsigned long dev)
{
phys_addr_t base;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index b45e748b66a6..94d76db6f993 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -1803,6 +1803,9 @@ void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx);
int ipa_enable_data_path(u32 clnt_hdl);
int ipa_disable_data_path(u32 clnt_hdl);
+int ipa2_enable_force_clear(u32 request_id, bool throttle_source,
+ u32 source_pipe_bitmask);
+int ipa2_disable_force_clear(u32 request_id);
int ipa_id_alloc(void *ptr);
void *ipa_id_find(u32 id);
void ipa_id_remove(u32 id);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
index d88e5a6d3d73..e2ac9bfceed7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -39,6 +39,8 @@
#define QMI_SEND_STATS_REQ_TIMEOUT_MS 5000
#define QMI_SEND_REQ_TIMEOUT_MS 60000
+#define QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS 1000
+
static struct qmi_handle *ipa_svc_handle;
static void ipa_a5_svc_recv_msg(struct work_struct *work);
static DECLARE_DELAYED_WORK(work_recv_msg, ipa_a5_svc_recv_msg);
@@ -583,7 +585,8 @@ int qmi_enable_force_clear_datapath_send(
&req_desc,
req,
sizeof(*req),
- &resp_desc, &resp, sizeof(resp), 0);
+ &resp_desc, &resp, sizeof(resp),
+ QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS);
if (rc < 0) {
IPAWANERR("send req failed %d\n", rc);
return rc;
@@ -628,7 +631,8 @@ int qmi_disable_force_clear_datapath_send(
&req_desc,
req,
sizeof(*req),
- &resp_desc, &resp, sizeof(resp), 0);
+ &resp_desc, &resp, sizeof(resp),
+ QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS);
if (rc < 0) {
IPAWANERR("send req failed %d\n", rc);
return rc;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index 5bda4cb9ed2b..f60669132865 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -1404,7 +1404,6 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
union IpaHwWdiCommonChCmdData_t disable;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
u32 prod_hdl;
- int i;
if (unlikely(!ipa_ctx)) {
IPAERR("IPA driver was not initialized\n");
@@ -1421,28 +1420,6 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
if (result)
return result;
- /* checking rdy_ring_rp_pa matches the rdy_comp_ring_wp_pa on WDI2.0 */
- if (ipa_ctx->ipa_wdi2) {
- for (i = 0; i < IPA_UC_FINISH_MAX; i++) {
- IPADBG("(%d) rp_value(%u), comp_wp_value(%u)\n",
- i,
- *ipa_ctx->uc_ctx.rdy_ring_rp_va,
- *ipa_ctx->uc_ctx.rdy_comp_ring_wp_va);
- if (*ipa_ctx->uc_ctx.rdy_ring_rp_va !=
- *ipa_ctx->uc_ctx.rdy_comp_ring_wp_va) {
- usleep_range(IPA_UC_WAIT_MIN_SLEEP,
- IPA_UC_WAII_MAX_SLEEP);
- } else {
- break;
- }
- }
- /* In case ipa_uc still haven't processed all
- * pending descriptors, we have to assert
- */
- if (i == IPA_UC_FINISH_MAX)
- BUG();
- }
-
IPADBG("ep=%d\n", clnt_hdl);
ep = &ipa_ctx->ep[clnt_hdl];
@@ -1468,6 +1445,11 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
* holb on IPA Producer pipe
*/
if (IPA_CLIENT_IS_PROD(ep->client)) {
+
+ IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n",
+ clnt_hdl, ep->client);
+
+ /* remove delay on wlan-prod pipe*/
memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl));
ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
@@ -1594,6 +1576,8 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
struct ipa_ep_context *ep;
union IpaHwWdiCommonChCmdData_t suspend;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+ u32 source_pipe_bitmask = 0;
+ bool disable_force_clear = false;
if (unlikely(!ipa_ctx)) {
IPAERR("IPA driver was not initialized\n");
@@ -1623,6 +1607,31 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
suspend.params.ipa_pipe_number = clnt_hdl;
if (IPA_CLIENT_IS_PROD(ep->client)) {
+ /*
+ * For WDI 2.0 need to ensure pipe will be empty before suspend
+ * as IPA uC will fail to suspend the pipe otherwise.
+ */
+ if (ipa_ctx->ipa_wdi2) {
+ source_pipe_bitmask = 1 <<
+ ipa_get_ep_mapping(ep->client);
+ result = ipa2_enable_force_clear(clnt_hdl,
+ false, source_pipe_bitmask);
+ if (result) {
+ /*
+ * assuming here modem SSR, AP can remove
+ * the delay in this case
+ */
+ IPAERR("failed to force clear %d\n", result);
+ IPAERR("remove delay from SCND reg\n");
+ memset(&ep_cfg_ctrl, 0,
+ sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_delay = false;
+ ep_cfg_ctrl.ipa_ep_suspend = false;
+ ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ } else {
+ disable_force_clear = true;
+ }
+ }
IPADBG("Post suspend event first for IPA Producer\n");
IPADBG("Client: %d clnt_hdl: %d\n", ep->client, clnt_hdl);
result = ipa_uc_send_cmd(suspend.raw32b,
@@ -1667,6 +1676,9 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
}
}
+ if (disable_force_clear)
+ ipa2_disable_force_clear(clnt_hdl);
+
ipa_ctx->tag_process_before_gating = true;
IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
ep->uc_offload_state &= ~IPA_WDI_RESUMED;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index e90f69b466e1..aa681d3eacaa 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1878,6 +1878,45 @@ static void ipa3_q6_avoid_holb(void)
}
}
+static void ipa3_halt_q6_cons_gsi_channels(void)
+{
+ int ep_idx;
+ int client_idx;
+ struct ipa_gsi_ep_config *gsi_ep_cfg;
+ int ret;
+ int code = 0;
+
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+ if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
+ ep_idx = ipa3_get_ep_mapping(client_idx);
+ if (ep_idx == -1)
+ continue;
+
+ gsi_ep_cfg = ipa3_get_gsi_ep_info(ep_idx);
+ if (!gsi_ep_cfg) {
+ IPAERR("failed to get GSI config\n");
+ ipa_assert();
+ return;
+ }
+
+ ret = gsi_halt_channel_ee(
+ gsi_ep_cfg->ipa_gsi_chan_num, gsi_ep_cfg->ee,
+ &code);
+ if (ret == GSI_STATUS_SUCCESS)
+ IPADBG("halted gsi ch %d ee %d with code %d\n",
+ gsi_ep_cfg->ipa_gsi_chan_num,
+ gsi_ep_cfg->ee,
+ code);
+ else
+ IPAERR("failed to halt ch %d ee %d code %d\n",
+ gsi_ep_cfg->ipa_gsi_chan_num,
+ gsi_ep_cfg->ee,
+ code);
+ }
+ }
+}
+
+
static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
enum ipa_rule_type rlt)
{
@@ -2312,6 +2351,7 @@ void ipa3_q6_post_shutdown_cleanup(void)
/* Handle the issue where SUSPEND was removed for some reason */
ipa3_q6_avoid_holb();
+ ipa3_halt_q6_cons_gsi_channels();
for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
@@ -4116,7 +4156,7 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
if (result) {
IPAERR("FW loading process has failed\n");
- BUG();
+ return result;
} else
ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
}
diff --git a/drivers/platform/msm/mhi/mhi.h b/drivers/platform/msm/mhi/mhi.h
index c6fa39d6e0e2..3d40d114437a 100644
--- a/drivers/platform/msm/mhi/mhi.h
+++ b/drivers/platform/msm/mhi/mhi.h
@@ -20,6 +20,7 @@
#include <linux/completion.h>
#include <linux/atomic.h>
#include <linux/spinlock.h>
+#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/cdev.h>
#include <linux/msm_pcie.h>
@@ -29,6 +30,7 @@
#include <linux/dma-mapping.h>
extern struct mhi_pcie_devices mhi_devices;
+struct mhi_device_ctxt;
enum MHI_DEBUG_LEVEL {
MHI_MSG_RAW = 0x1,
@@ -125,6 +127,31 @@ enum MHI_STATE {
MHI_STATE_reserved = 0x80000000
};
+enum MHI_BRSTMODE {
+ /* BRST Mode Enable for HW Channels, SW Channel Disabled */
+ MHI_BRSTMODE_DEFAULT = 0x0,
+ MHI_BRSTMODE_RESERVED = 0x1,
+ MHI_BRSTMODE_DISABLE = 0x2,
+ MHI_BRSTMODE_ENABLE = 0x3
+};
+
+enum MHI_PM_STATE {
+ MHI_PM_DISABLE = 0x0, /* MHI is not enabled */
+ MHI_PM_POR = 0x1, /* Power On Reset State */
+ MHI_PM_M0 = 0x2,
+ MHI_PM_M1 = 0x4,
+ MHI_PM_M1_M2_TRANSITION = 0x8, /* Register access not allowed */
+ MHI_PM_M2 = 0x10,
+ MHI_PM_M3_ENTER = 0x20,
+ MHI_PM_M3 = 0x40,
+ MHI_PM_M3_EXIT = 0x80,
+};
+
+#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | MHI_PM_M1))
+#define MHI_WAKE_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
+ MHI_PM_M1 | MHI_PM_M2))
+#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state > MHI_PM_DISABLE) && \
+ (pm_state < MHI_PM_M3_EXIT))
struct __packed mhi_event_ctxt {
u32 mhi_intmodt;
u32 mhi_event_er_type;
@@ -136,8 +163,11 @@ struct __packed mhi_event_ctxt {
};
struct __packed mhi_chan_ctxt {
- enum MHI_CHAN_STATE mhi_chan_state;
- enum MHI_CHAN_DIR mhi_chan_type;
+ u32 chstate : 8;
+ u32 brstmode : 2;
+ u32 pollcfg : 6;
+ u32 reserved : 16;
+ u32 chtype;
u32 mhi_event_ring_index;
u64 mhi_trb_ring_base_addr;
u64 mhi_trb_ring_len;
@@ -172,11 +202,11 @@ enum MHI_PKT_TYPE {
MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
- MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD = 0x1F,
MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
MHI_PKT_TYPE_TX_EVENT = 0x22,
MHI_PKT_TYPE_EE_EVENT = 0x40,
+ MHI_PKT_TYPE_STALE_EVENT, /* Internal event */
MHI_PKT_TYPE_SYS_ERR_EVENT = 0xFF,
};
@@ -261,6 +291,17 @@ enum MHI_EVENT_CCS {
MHI_EVENT_CC_BAD_TRE = 0x11,
};
+struct db_mode {
+ /* if set do not reset DB_Mode during M0 resume */
+ u32 preserve_db_state : 1;
+ u32 db_mode : 1;
+ enum MHI_BRSTMODE brstmode;
+ void (*process_db)(struct mhi_device_ctxt *mhi_dev_ctxt,
+ void __iomem *io_addr,
+ uintptr_t chan,
+ u32 val);
+};
+
struct mhi_ring {
void *base;
void *wp;
@@ -270,6 +311,11 @@ struct mhi_ring {
uintptr_t el_size;
u32 overwrite_en;
enum MHI_CHAN_DIR dir;
+ enum MHI_CHAN_STATE ch_state;
+ struct db_mode db_mode;
+ u32 msi_disable_cntr;
+ u32 msi_enable_cntr;
+ spinlock_t ring_lock;
};
enum MHI_CMD_STATUS {
@@ -327,12 +373,19 @@ struct mhi_chan_info {
u32 flags;
};
+struct mhi_chan_cfg {
+ enum MHI_COMMAND current_cmd;
+ struct mutex chan_lock;
+ spinlock_t event_lock; /* completion event lock */
+ struct completion cmd_complete;
+ struct mhi_cmd_complete_event_pkt cmd_event_pkt;
+ union mhi_cmd_pkt cmd_pkt;
+};
+
struct mhi_client_handle {
struct mhi_chan_info chan_info;
struct mhi_device_ctxt *mhi_dev_ctxt;
struct mhi_client_info_t client_info;
- struct completion chan_reset_complete;
- struct completion chan_open_complete;
void *user_data;
struct mhi_result result;
u32 device_index;
@@ -344,12 +397,6 @@ struct mhi_client_handle {
int event_ring_index;
};
-enum MHI_EVENT_POLLING {
- MHI_EVENT_POLLING_DISABLED = 0x0,
- MHI_EVENT_POLLING_ENABLED = 0x1,
- MHI_EVENT_POLLING_reserved = 0x80000000
-};
-
enum MHI_TYPE_EVENT_RING {
MHI_ER_DATA_TYPE = 0x1,
MHI_ER_CTRL_TYPE = 0x2,
@@ -375,46 +422,27 @@ struct mhi_buf_info {
struct mhi_counters {
u32 m0_m1;
- u32 m1_m0;
u32 m1_m2;
u32 m2_m0;
u32 m0_m3;
- u32 m3_m0;
u32 m1_m3;
- u32 mhi_reset_cntr;
- u32 mhi_ready_cntr;
- u32 m3_event_timeouts;
- u32 m0_event_timeouts;
- u32 m2_event_timeouts;
- u32 msi_disable_cntr;
- u32 msi_enable_cntr;
- u32 nr_irq_migrations;
- u32 *msi_counter;
- u32 *ev_counter;
- atomic_t outbound_acks;
+ u32 m3_m0;
u32 chan_pkts_xferd[MHI_MAX_CHANNELS];
u32 bb_used[MHI_MAX_CHANNELS];
+ atomic_t device_wake;
+ atomic_t outbound_acks;
+ atomic_t events_pending;
+ u32 *msi_counter;
+ u32 mhi_reset_cntr;
};
struct mhi_flags {
u32 mhi_initialized;
- u32 pending_M3;
- u32 pending_M0;
u32 link_up;
- u32 kill_threads;
- atomic_t data_pending;
- atomic_t events_pending;
- atomic_t pending_resume;
- atomic_t pending_ssr;
- atomic_t pending_powerup;
- atomic_t m2_transition;
int stop_threads;
- atomic_t device_wake;
- u32 ssr;
+ u32 kill_threads;
u32 ev_thread_stopped;
u32 st_thread_stopped;
- u32 uldl_enabled;
- u32 db_mode[MHI_MAX_CHANNELS];
};
struct mhi_wait_queues {
@@ -458,44 +486,35 @@ struct mhi_dev_space {
};
struct mhi_device_ctxt {
- enum MHI_STATE mhi_state;
+ enum MHI_PM_STATE mhi_pm_state; /* Host driver state */
+ enum MHI_STATE mhi_state; /* protocol state */
enum MHI_EXEC_ENV dev_exec_env;
struct mhi_dev_space dev_space;
struct mhi_pcie_dev_info *dev_info;
struct pcie_core_info *dev_props;
struct mhi_ring chan_bb_list[MHI_MAX_CHANNELS];
-
struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS];
struct mhi_ring *mhi_local_event_ctxt;
struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS];
+ struct mhi_chan_cfg mhi_chan_cfg[MHI_MAX_CHANNELS];
+
- struct mutex *mhi_chan_mutex;
- struct mutex mhi_link_state;
- spinlock_t *mhi_ev_spinlock_list;
- struct mutex *mhi_cmd_mutex_list;
struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS];
struct mhi_event_ring_cfg *ev_ring_props;
struct task_struct *event_thread_handle;
struct task_struct *st_thread_handle;
+ struct tasklet_struct ev_task; /* Process control Events */
+ struct work_struct process_m1_worker;
struct mhi_wait_queues mhi_ev_wq;
struct dev_mmio_info mmio_info;
- u32 mhi_chan_db_order[MHI_MAX_CHANNELS];
- u32 mhi_ev_db_order[MHI_MAX_CHANNELS];
- spinlock_t *db_write_lock;
-
struct mhi_state_work_queue state_change_work_item_list;
- enum MHI_CMD_STATUS mhi_chan_pend_cmd_ack[MHI_MAX_CHANNELS];
- u32 cmd_ring_order;
struct mhi_counters counters;
struct mhi_flags flags;
- u32 device_wake_asserted;
-
- rwlock_t xfer_lock;
struct hrtimer m1_timer;
ktime_t m1_timeout;
@@ -508,11 +527,12 @@ struct mhi_device_ctxt {
unsigned long esoc_notif;
enum STATE_TRANSITION base_state;
- atomic_t outbound_acks;
+
+ rwlock_t pm_xfer_lock; /* lock to control PM State */
+ spinlock_t dev_wake_lock; /* lock to set wake bit */
struct mutex pm_lock;
struct wakeup_source w_lock;
- int enable_lpm;
char *chan_info;
struct dentry *mhi_parent_folder;
};
@@ -577,7 +597,9 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
enum MHI_CHAN_DIR chan_type,
u32 event_ring,
struct mhi_ring *ring,
- enum MHI_CHAN_STATE chan_state);
+ enum MHI_CHAN_STATE chan_state,
+ bool preserve_db_state,
+ enum MHI_BRSTMODE brstmode);
int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 chan);
@@ -622,8 +644,9 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_CB_REASON reason);
void mhi_notify_client(struct mhi_client_handle *client_handle,
enum MHI_CB_REASON reason);
-int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
-int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
+void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
+void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt,
+ bool force_set);
int mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action,
void *hcpu);
@@ -635,6 +658,14 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt,
int index);
int start_chan_sync(struct mhi_client_handle *client_handle);
+void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
+ void __iomem *io_addr,
+ uintptr_t chan,
+ u32 val);
+void mhi_process_db_brstmode_disable(struct mhi_device_ctxt *mhi_dev_ctxt,
+ void __iomem *io_addr,
+ uintptr_t chan,
+ u32 val);
void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt, void __iomem *io_addr,
uintptr_t io_offset, u32 val);
void mhi_reg_write_field(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -651,12 +682,19 @@ int mhi_runtime_suspend(struct device *dev);
int get_chan_props(struct mhi_device_ctxt *mhi_dev_ctxt, int chan,
struct mhi_chan_info *chan_info);
int mhi_runtime_resume(struct device *dev);
-int mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt);
+int mhi_runtime_idle(struct device *dev);
int init_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_TYPE_EVENT_RING type);
void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
int index);
void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt);
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt);
+enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt);
+void process_m1_transition(struct work_struct *work);
+int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev);
+void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
+ enum MHI_STATE new_state);
+const char *state_transition_str(enum STATE_TRANSITION state);
+void mhi_ctrl_ev_task(unsigned long data);
#endif
diff --git a/drivers/platform/msm/mhi/mhi_bhi.c b/drivers/platform/msm/mhi/mhi_bhi.c
index 4b63e880a44f..113791a62c38 100644
--- a/drivers/platform/msm/mhi/mhi_bhi.c
+++ b/drivers/platform/msm/mhi/mhi_bhi.c
@@ -41,6 +41,9 @@ static ssize_t bhi_write(struct file *file,
size_t amount_copied = 0;
uintptr_t align_len = 0x1000;
u32 tx_db_val = 0;
+ rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
+ const long bhi_timeout_ms = 1000;
+ long timeout;
if (buf == NULL || 0 == count)
return -EIO;
@@ -48,8 +51,12 @@ static ssize_t bhi_write(struct file *file,
if (count > BHI_MAX_IMAGE_SIZE)
return -ENOMEM;
- wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_BHI);
+ timeout = wait_event_interruptible_timeout(
+ *mhi_dev_ctxt->mhi_ev_wq.bhi_event,
+ mhi_dev_ctxt->mhi_state == MHI_STATE_BHI,
+ msecs_to_jiffies(bhi_timeout_ms));
+ if (timeout <= 0 && mhi_dev_ctxt->mhi_state != MHI_STATE_BHI)
+ return -EIO;
mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%zx\n", count);
@@ -95,6 +102,11 @@ static ssize_t bhi_write(struct file *file,
bhi_ctxt->image_size = count;
/* Write the image size */
+ read_lock_bh(pm_xfer_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_bh(pm_xfer_lock);
+ goto bhi_copy_error;
+ }
pcie_word_val = HIGH_WORD(bhi_ctxt->phy_image_loc);
mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
BHI_IMGADDR_HIGH,
@@ -119,10 +131,15 @@ static ssize_t bhi_write(struct file *file,
BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val);
mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_INTVEC, 0);
-
+ read_unlock_bh(pm_xfer_lock);
for (i = 0; i < BHI_POLL_NR_RETRIES; ++i) {
u32 err = 0, errdbg1 = 0, errdbg2 = 0, errdbg3 = 0;
+ read_lock_bh(pm_xfer_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_bh(pm_xfer_lock);
+ goto bhi_copy_error;
+ }
err = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRCODE);
errdbg1 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG1);
errdbg2 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG2);
@@ -131,6 +148,7 @@ static ssize_t bhi_write(struct file *file,
BHI_STATUS,
BHI_STATUS_MASK,
BHI_STATUS_SHIFT);
+ read_unlock_bh(pm_xfer_lock);
mhi_log(MHI_MSG_CRITICAL,
"BHI STATUS 0x%x, err:0x%x errdbg1:0x%x errdbg2:0x%x errdbg3:0x%x\n",
tx_db_val, err, errdbg1, errdbg2, errdbg3);
@@ -176,9 +194,6 @@ int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device)
|| 0 == mhi_pcie_device->core.bar0_end)
return -EIO;
- mhi_log(MHI_MSG_INFO,
- "Successfully registered char dev. bhi base is: 0x%p.\n",
- bhi_ctxt->bhi_base);
ret_val = alloc_chrdev_region(&bhi_ctxt->bhi_dev, 0, 1, "bhi");
if (IS_ERR_VALUE(ret_val)) {
mhi_log(MHI_MSG_CRITICAL,
diff --git a/drivers/platform/msm/mhi/mhi_event.c b/drivers/platform/msm/mhi/mhi_event.c
index aa8500d277cd..fe163f3895a5 100644
--- a/drivers/platform/msm/mhi/mhi_event.c
+++ b/drivers/platform/msm/mhi/mhi_event.c
@@ -89,32 +89,31 @@ dt_error:
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int r = 0;
+ int i;
mhi_dev_ctxt->mhi_local_event_ctxt = kzalloc(sizeof(struct mhi_ring)*
mhi_dev_ctxt->mmio_info.nr_event_rings,
GFP_KERNEL);
-
if (!mhi_dev_ctxt->mhi_local_event_ctxt)
return -ENOMEM;
- mhi_dev_ctxt->counters.ev_counter = kzalloc(sizeof(u32) *
- mhi_dev_ctxt->mmio_info.nr_event_rings,
- GFP_KERNEL);
- if (!mhi_dev_ctxt->counters.ev_counter) {
- r = -ENOMEM;
- goto free_local_ec_list;
- }
mhi_dev_ctxt->counters.msi_counter = kzalloc(sizeof(u32) *
mhi_dev_ctxt->mmio_info.nr_event_rings,
GFP_KERNEL);
if (!mhi_dev_ctxt->counters.msi_counter) {
r = -ENOMEM;
- goto free_ev_counter;
+ goto free_local_ec_list;
}
+
+ for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++) {
+ struct mhi_ring *mhi_ring = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[i];
+
+ spin_lock_init(&mhi_ring->ring_lock);
+ }
+
return r;
-free_ev_counter:
- kfree(mhi_dev_ctxt->counters.ev_counter);
free_local_ec_list:
kfree(mhi_dev_ctxt->mhi_local_event_ctxt);
return r;
@@ -123,19 +122,27 @@ void ring_ev_db(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index)
{
struct mhi_ring *event_ctxt = NULL;
u64 db_value = 0;
+ unsigned long flags;
event_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index];
+ spin_lock_irqsave(&event_ctxt->ring_lock, flags);
db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING,
event_ring_index,
(uintptr_t) event_ctxt->wp);
- mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.event_db_addr,
- event_ring_index, db_value);
+ event_ctxt->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.event_db_addr,
+ event_ring_index,
+ db_value);
+ spin_unlock_irqrestore(&event_ctxt->ring_lock, flags);
}
static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
- struct mhi_ring *ring, u32 el_per_ring,
- u32 intmodt_val, u32 msi_vec)
+ struct mhi_ring *ring,
+ u32 el_per_ring,
+ u32 intmodt_val,
+ u32 msi_vec,
+ enum MHI_BRSTMODE brstmode)
{
ev_list->mhi_event_er_type = MHI_EVENT_RING_TYPE_VALID;
ev_list->mhi_msi_vector = msi_vec;
@@ -144,6 +151,20 @@ static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
ring->len = ((size_t)(el_per_ring)*sizeof(union mhi_event_pkt));
ring->el_size = sizeof(union mhi_event_pkt);
ring->overwrite_en = 0;
+
+ ring->db_mode.db_mode = 1;
+ ring->db_mode.brstmode = brstmode;
+ switch (ring->db_mode.brstmode) {
+ case MHI_BRSTMODE_ENABLE:
+ ring->db_mode.process_db = mhi_process_db_brstmode;
+ break;
+ case MHI_BRSTMODE_DISABLE:
+ ring->db_mode.process_db = mhi_process_db_brstmode_disable;
+ break;
+ default:
+ ring->db_mode.process_db = mhi_process_db;
+ }
+
/* Flush writes to MMIO */
wmb();
return 0;
@@ -159,9 +180,12 @@ void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt)
event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
mhi_local_event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
mhi_event_ring_init(event_ctxt, mhi_local_event_ctxt,
- mhi_dev_ctxt->ev_ring_props[i].nr_desc,
- mhi_dev_ctxt->ev_ring_props[i].intmod,
- mhi_dev_ctxt->ev_ring_props[i].msi_vec);
+ mhi_dev_ctxt->ev_ring_props[i].nr_desc,
+ mhi_dev_ctxt->ev_ring_props[i].intmod,
+ mhi_dev_ctxt->ev_ring_props[i].msi_vec,
+ GET_EV_PROPS(EV_BRSTMODE,
+ mhi_dev_ctxt->
+ ev_ring_props[i].flags));
}
}
@@ -219,10 +243,9 @@ int mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 i = 0;
unsigned long flags = 0;
int ret_val = 0;
- spinlock_t *lock =
- &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
struct mhi_ring *event_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[ring_index];
+ spinlock_t *lock = &event_ctxt->ring_lock;
if (NULL == mhi_dev_ctxt || 0 == nr_ev_el) {
mhi_log(MHI_MSG_ERROR, "Bad Input data, quitting\n");
diff --git a/drivers/platform/msm/mhi/mhi_hwio.h b/drivers/platform/msm/mhi/mhi_hwio.h
index 370ee0ebab7e..88a6a74566e6 100644
--- a/drivers/platform/msm/mhi/mhi_hwio.h
+++ b/drivers/platform/msm/mhi/mhi_hwio.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,14 +23,14 @@
#define MHICFG (0x10)
-#define MHICFG_RESERVED_BITS31_24_MASK 0xff000000
-#define MHICFG_RESERVED_BITS31_24_SHIFT 0x18
-#define MHICFG_NER_MASK 0xff0000
-#define MHICFG_NER_SHIFT 0x10
-#define MHICFG_RESERVED_BITS15_8_MASK 0xff00
-#define MHICFG_RESERVED_BITS15_8_SHIFT 0x8
-#define MHICFG_NCH_MASK 0xff
-#define MHICFG_NCH_SHIFT 0x0
+#define MHICFG_NHWER_MASK (0xff000000)
+#define MHICFG_NHWER_SHIFT (24)
+#define MHICFG_NER_MASK (0xff0000)
+#define MHICFG_NER_SHIFT (16)
+#define MHICFG_NHWCH_MASK (0xff00)
+#define MHICFG_NHWCH_SHIFT (8)
+#define MHICFG_NCH_MASK (0xff)
+#define MHICFG_NCH_SHIFT (0)
#define CHDBOFF (0x18)
diff --git a/drivers/platform/msm/mhi/mhi_iface.c b/drivers/platform/msm/mhi/mhi_iface.c
index b3fff19d8fa4..395e19c91f35 100644
--- a/drivers/platform/msm/mhi/mhi_iface.c
+++ b/drivers/platform/msm/mhi/mhi_iface.c
@@ -96,22 +96,6 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
"Failed to register with esoc ret %d.\n",
ret_val);
}
- mhi_pcie_dev->mhi_ctxt.bus_scale_table =
- msm_bus_cl_get_pdata(mhi_pcie_dev->plat_dev);
- mhi_pcie_dev->mhi_ctxt.bus_client =
- msm_bus_scale_register_client(
- mhi_pcie_dev->mhi_ctxt.bus_scale_table);
- if (!mhi_pcie_dev->mhi_ctxt.bus_client) {
- mhi_log(MHI_MSG_CRITICAL,
- "Could not register for bus control ret: %d.\n",
- mhi_pcie_dev->mhi_ctxt.bus_client);
- } else {
- ret_val = mhi_set_bus_request(&mhi_pcie_dev->mhi_ctxt, 1);
- if (ret_val)
- mhi_log(MHI_MSG_CRITICAL,
- "Could not set bus frequency ret: %d\n",
- ret_val);
- }
device_disable_async_suspend(&pcie_device->dev);
ret_val = pci_enable_msi_range(pcie_device, 1, requested_msi_number);
@@ -188,9 +172,7 @@ mhi_state_transition_error:
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
- kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
- kfree(mhi_dev_ctxt->mhi_chan_mutex);
- kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
+
kfree(mhi_dev_ctxt->ev_ring_props);
mhi_rem_pm_sysfs(&pcie_device->dev);
sysfs_config_err:
@@ -203,7 +185,9 @@ msi_config_err:
}
static const struct dev_pm_ops pm_ops = {
- SET_RUNTIME_PM_OPS(mhi_runtime_suspend, mhi_runtime_resume, NULL)
+ SET_RUNTIME_PM_OPS(mhi_runtime_suspend,
+ mhi_runtime_resume,
+ mhi_runtime_idle)
SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
};
@@ -217,14 +201,15 @@ static struct pci_driver mhi_pcie_driver = {
};
static int mhi_pci_probe(struct pci_dev *pcie_device,
- const struct pci_device_id *mhi_device_id)
+ const struct pci_device_id *mhi_device_id)
{
int ret_val = 0;
struct mhi_pcie_dev_info *mhi_pcie_dev = NULL;
struct platform_device *plat_dev;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
u32 nr_dev = mhi_devices.nr_of_devices;
- mhi_log(MHI_MSG_INFO, "Entering.\n");
+ mhi_log(MHI_MSG_INFO, "Entering\n");
mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices];
if (mhi_devices.nr_of_devices + 1 > MHI_MAX_SUPPORTED_DEVICES) {
mhi_log(MHI_MSG_ERROR, "Error: Too many devices\n");
@@ -234,29 +219,120 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
mhi_devices.nr_of_devices++;
plat_dev = mhi_devices.device_list[nr_dev].plat_dev;
pcie_device->dev.of_node = plat_dev->dev.of_node;
- pm_runtime_put_noidle(&pcie_device->dev);
+ mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
+ INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
+ mutex_init(&mhi_dev_ctxt->pm_lock);
+ rwlock_init(&mhi_dev_ctxt->pm_xfer_lock);
+ spin_lock_init(&mhi_dev_ctxt->dev_wake_lock);
+ tasklet_init(&mhi_dev_ctxt->ev_task,
+ mhi_ctrl_ev_task,
+ (unsigned long)mhi_dev_ctxt);
+
+ mhi_dev_ctxt->flags.link_up = 1;
+ ret_val = mhi_set_bus_request(mhi_dev_ctxt, 1);
mhi_pcie_dev->pcie_device = pcie_device;
mhi_pcie_dev->mhi_pcie_driver = &mhi_pcie_driver;
mhi_pcie_dev->mhi_pci_link_event.events =
- (MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_LINKUP |
- MSM_PCIE_EVENT_WAKEUP);
+ (MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_WAKEUP);
mhi_pcie_dev->mhi_pci_link_event.user = pcie_device;
mhi_pcie_dev->mhi_pci_link_event.callback = mhi_link_state_cb;
mhi_pcie_dev->mhi_pci_link_event.notify.data = mhi_pcie_dev;
ret_val = msm_pcie_register_event(&mhi_pcie_dev->mhi_pci_link_event);
- if (ret_val)
+ if (ret_val) {
mhi_log(MHI_MSG_ERROR,
"Failed to register for link notifications %d.\n",
ret_val);
+ return ret_val;
+ }
+
+ /* Initialize MHI CNTXT */
+ ret_val = mhi_ctxt_init(mhi_pcie_dev);
+ if (ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "MHI Initialization failed, ret %d\n",
+ ret_val);
+ goto deregister_pcie;
+ }
+ pci_set_master(mhi_pcie_dev->pcie_device);
+
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_POR;
+ ret_val = set_mhi_base_state(mhi_pcie_dev);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ if (ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error Setting MHI Base State %d\n", ret_val);
+ goto unlock_pm_lock;
+ }
+
+ if (mhi_dev_ctxt->base_state == STATE_TRANSITION_BHI) {
+ ret_val = bhi_probe(mhi_pcie_dev);
+ if (ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error with bhi_probe ret:%d", ret_val);
+ goto unlock_pm_lock;
+ }
+ }
+
+ init_mhi_base_state(mhi_dev_ctxt);
+
+ pm_runtime_set_autosuspend_delay(&pcie_device->dev,
+ MHI_RPM_AUTOSUSPEND_TMR_VAL_MS);
+ pm_runtime_use_autosuspend(&pcie_device->dev);
+ pm_suspend_ignore_children(&pcie_device->dev, true);
+
+ /*
+ * pci framework will increment usage count (twice) before
+ * calling local device driver probe function.
+ * 1st pci.c pci_pm_init() calls pm_runtime_forbid
+ * 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync
+ * Framework expect pci device driver to call pm_runtime_put_noidle
+ * to decrement usage count after successful probe and
+ * and call pm_runtime_allow to enable runtime suspend.
+ * MHI will allow runtime after entering AMSS state.
+ */
+ pm_runtime_mark_last_busy(&pcie_device->dev);
+ pm_runtime_put_noidle(&pcie_device->dev);
+
+ /*
+ * Keep the MHI state in Active (M0) state until AMSS because EP
+ * would error fatal if we try to enter M1 before entering
+ * AMSS state.
+ */
+ read_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+
+ return 0;
+
+unlock_pm_lock:
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+deregister_pcie:
+ msm_pcie_deregister_event(&mhi_pcie_dev->mhi_pci_link_event);
return ret_val;
}
static int mhi_plat_probe(struct platform_device *pdev)
{
u32 nr_dev = mhi_devices.nr_of_devices;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
int r = 0;
mhi_log(MHI_MSG_INFO, "Entered\n");
+ mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;
+
+ mhi_dev_ctxt->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (!mhi_dev_ctxt->bus_scale_table)
+ return -ENODATA;
+ mhi_dev_ctxt->bus_client = msm_bus_scale_register_client
+ (mhi_dev_ctxt->bus_scale_table);
+ if (!mhi_dev_ctxt->bus_client)
+ return -EINVAL;
+
mhi_devices.device_list[nr_dev].plat_dev = pdev;
r = dma_set_mask(&pdev->dev, MHI_DMA_MASK);
if (r)
diff --git a/drivers/platform/msm/mhi/mhi_init.c b/drivers/platform/msm/mhi/mhi_init.c
index 93c3de35473b..a496c81239bf 100644
--- a/drivers/platform/msm/mhi/mhi_init.c
+++ b/drivers/platform/msm/mhi/mhi_init.c
@@ -27,46 +27,21 @@ static int mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int i;
- mhi_dev_ctxt->mhi_ev_spinlock_list = kmalloc(sizeof(spinlock_t) *
- mhi_dev_ctxt->mmio_info.nr_event_rings,
- GFP_KERNEL);
- if (NULL == mhi_dev_ctxt->mhi_ev_spinlock_list)
- goto ev_mutex_free;
- mhi_dev_ctxt->mhi_chan_mutex = kmalloc(sizeof(struct mutex) *
- MHI_MAX_CHANNELS, GFP_KERNEL);
- if (NULL == mhi_dev_ctxt->mhi_chan_mutex)
- goto chan_mutex_free;
- mhi_dev_ctxt->mhi_cmd_mutex_list = kmalloc(sizeof(struct mutex) *
- NR_OF_CMD_RINGS, GFP_KERNEL);
- if (NULL == mhi_dev_ctxt->mhi_cmd_mutex_list)
- goto cmd_mutex_free;
-
- mhi_dev_ctxt->db_write_lock = kmalloc(sizeof(spinlock_t) *
- MHI_MAX_CHANNELS, GFP_KERNEL);
- if (NULL == mhi_dev_ctxt->db_write_lock)
- goto db_write_lock_free;
- for (i = 0; i < MHI_MAX_CHANNELS; ++i)
- mutex_init(&mhi_dev_ctxt->mhi_chan_mutex[i]);
- for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i)
- spin_lock_init(&mhi_dev_ctxt->mhi_ev_spinlock_list[i]);
- for (i = 0; i < NR_OF_CMD_RINGS; ++i)
- mutex_init(&mhi_dev_ctxt->mhi_cmd_mutex_list[i]);
- for (i = 0; i < MHI_MAX_CHANNELS; ++i)
- spin_lock_init(&mhi_dev_ctxt->db_write_lock[i]);
- rwlock_init(&mhi_dev_ctxt->xfer_lock);
- mutex_init(&mhi_dev_ctxt->mhi_link_state);
- mutex_init(&mhi_dev_ctxt->pm_lock);
- atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0);
- return 0;
+ for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
+ struct mhi_ring *ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
-db_write_lock_free:
- kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
-cmd_mutex_free:
- kfree(mhi_dev_ctxt->mhi_chan_mutex);
-chan_mutex_free:
- kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
-ev_mutex_free:
- return -ENOMEM;
+ mutex_init(&mhi_dev_ctxt->mhi_chan_cfg[i].chan_lock);
+ spin_lock_init(&mhi_dev_ctxt->mhi_chan_cfg[i].event_lock);
+ spin_lock_init(&ring->ring_lock);
+ }
+
+ for (i = 0; i < NR_OF_CMD_RINGS; i++) {
+ struct mhi_ring *ring = &mhi_dev_ctxt->mhi_local_cmd_ctxt[i];
+
+ spin_lock_init(&ring->ring_lock);
+ }
+
+ return 0;
}
size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt)
@@ -115,7 +90,7 @@ void init_dev_chan_ctxt(struct mhi_chan_ctxt *chan_ctxt,
chan_ctxt->mhi_trb_write_ptr = p_base_addr;
chan_ctxt->mhi_trb_ring_len = len;
/* Prepulate the channel ctxt */
- chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED;
+ chan_ctxt->chstate = MHI_CHAN_STATE_ENABLED;
chan_ctxt->mhi_event_ring_index = ev_index;
}
@@ -173,6 +148,8 @@ static int mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt,
ring[PRIMARY_CMD_RING].len = ring_size;
ring[PRIMARY_CMD_RING].el_size = sizeof(union mhi_cmd_pkt);
ring[PRIMARY_CMD_RING].overwrite_en = 0;
+ ring[PRIMARY_CMD_RING].db_mode.process_db =
+ mhi_process_db_brstmode_disable;
return 0;
}
@@ -547,7 +524,6 @@ int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
}
init_event_ctxt_array(mhi_dev_ctxt);
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
- mhi_dev_ctxt->enable_lpm = 1;
r = mhi_spawn_threads(mhi_dev_ctxt);
if (r) {
@@ -573,9 +549,6 @@ error_wq_init:
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
error_during_dev_mem_init:
error_during_local_ev_ctxt:
- kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
- kfree(mhi_dev_ctxt->mhi_chan_mutex);
- kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
error_during_sync:
kfree(mhi_dev_ctxt->ev_ring_props);
error_during_props:
@@ -585,24 +558,28 @@ error_during_props:
/**
* @brief Initialize the channel context and shadow context
*
- * @cc_list: Context to initialize
- * @trb_list_phy: Physical base address for the TRE ring
- * @trb_list_virt: Virtual base address for the TRE ring
- * @el_per_ring: Number of TREs this ring will contain
- * @chan_type: Type of channel IN/OUT
- * @event_ring: Event ring to be mapped to this channel context
- * @ring: Shadow context to be initialized alongside
- *
+ * @cc_list: Context to initialize
+ * @trb_list_phy: Physical base address for the TRE ring
+ * @trb_list_virt: Virtual base address for the TRE ring
+ * @el_per_ring: Number of TREs this ring will contain
+ * @chan_type: Type of channel IN/OUT
+ * @event_ring: Event ring to be mapped to this channel context
+ * @ring: Shadow context to be initialized alongside
+ * @chan_state: Channel state
+ * @preserve_db_state: Do not reset DB state during resume
* @Return errno
*/
int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
- uintptr_t trb_list_phy, uintptr_t trb_list_virt,
- u64 el_per_ring, enum MHI_CHAN_DIR chan_type,
- u32 event_ring, struct mhi_ring *ring,
- enum MHI_CHAN_STATE chan_state)
+ uintptr_t trb_list_phy, uintptr_t trb_list_virt,
+ u64 el_per_ring, enum MHI_CHAN_DIR chan_type,
+ u32 event_ring, struct mhi_ring *ring,
+ enum MHI_CHAN_STATE chan_state,
+ bool preserve_db_state,
+ enum MHI_BRSTMODE brstmode)
{
- cc_list->mhi_chan_state = chan_state;
- cc_list->mhi_chan_type = chan_type;
+ cc_list->brstmode = brstmode;
+ cc_list->chstate = chan_state;
+ cc_list->chtype = chan_type;
cc_list->mhi_event_ring_index = event_ring;
cc_list->mhi_trb_ring_base_addr = trb_list_phy;
cc_list->mhi_trb_ring_len =
@@ -617,6 +594,22 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
ring->el_size = sizeof(struct mhi_tx_pkt);
ring->overwrite_en = 0;
ring->dir = chan_type;
+ ring->ch_state = MHI_CHAN_STATE_DISABLED;
+ ring->db_mode.db_mode = 1;
+ ring->db_mode.preserve_db_state = (preserve_db_state) ? 1 : 0;
+ ring->db_mode.brstmode = brstmode;
+
+ switch (ring->db_mode.brstmode) {
+ case MHI_BRSTMODE_ENABLE:
+ ring->db_mode.process_db = mhi_process_db_brstmode;
+ break;
+ case MHI_BRSTMODE_DISABLE:
+ ring->db_mode.process_db = mhi_process_db_brstmode_disable;
+ break;
+ default:
+ ring->db_mode.process_db = mhi_process_db;
+ }
+
/* Flush writes to MMIO */
wmb();
return 0;
diff --git a/drivers/platform/msm/mhi/mhi_isr.c b/drivers/platform/msm/mhi/mhi_isr.c
index 5336b9ff0c11..d7c604419593 100644
--- a/drivers/platform/msm/mhi/mhi_isr.c
+++ b/drivers/platform/msm/mhi/mhi_isr.c
@@ -15,57 +15,6 @@
#include "mhi_sys.h"
#include "mhi_trace.h"
-irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
-{
- struct device *mhi_device = dev_id;
- struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
-
- if (!mhi_dev_ctxt) {
- mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n");
- return IRQ_HANDLED;
- }
- mhi_dev_ctxt->counters.msi_counter[
- IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
- mhi_log(MHI_MSG_VERBOSE,
- "Got MSI 0x%x\n", IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
- trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
- atomic_inc(&mhi_dev_ctxt->flags.events_pending);
- wake_up_interruptible(
- mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
- return IRQ_HANDLED;
-}
-
-irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id)
-{
- struct device *mhi_device = dev_id;
- u32 client_index;
- struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
- struct mhi_client_handle *client_handle;
- struct mhi_client_info_t *client_info;
- struct mhi_cb_info cb_info;
- int msi_num = (IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
-
- mhi_dev_ctxt->counters.msi_counter[msi_num]++;
- mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num);
- trace_mhi_msi(msi_num);
- client_index = MHI_MAX_CHANNELS -
- (mhi_dev_ctxt->mmio_info.nr_event_rings - msi_num);
- client_handle = mhi_dev_ctxt->client_handle_list[client_index];
- client_info = &client_handle->client_info;
- if (likely(NULL != client_handle)) {
- client_handle->result.user_data =
- client_handle->user_data;
- if (likely(NULL != &client_info->mhi_client_cb)) {
- cb_info.result = &client_handle->result;
- cb_info.cb_reason = MHI_CB_XFER;
- cb_info.chan = client_handle->chan_info.chan_nr;
- cb_info.result->transaction_status = 0;
- client_info->mhi_client_cb(&cb_info);
- }
- }
- return IRQ_HANDLED;
-}
-
static int mhi_process_event_ring(
struct mhi_device_ctxt *mhi_dev_ctxt,
u32 ev_index,
@@ -76,14 +25,21 @@ static int mhi_process_event_ring(
union mhi_event_pkt event_to_process;
int ret_val = 0;
struct mhi_event_ctxt *ev_ctxt = NULL;
- union mhi_cmd_pkt *cmd_pkt = NULL;
- union mhi_event_pkt *ev_ptr = NULL;
+ unsigned long flags;
struct mhi_ring *local_ev_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[ev_index];
- u32 event_code;
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) {
+ mhi_log(MHI_MSG_ERROR, "Invalid MHI PM State\n");
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ return -EIO;
+ }
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[ev_index];
+ spin_lock_irqsave(&local_ev_ctxt->ring_lock, flags);
device_rp = (union mhi_event_pkt *)mhi_p2v_addr(
mhi_dev_ctxt,
MHI_RING_TYPE_EVENT_RING,
@@ -91,64 +47,105 @@ static int mhi_process_event_ring(
ev_ctxt->mhi_event_read_ptr);
local_rp = (union mhi_event_pkt *)local_ev_ctxt->rp;
-
+ spin_unlock_irqrestore(&local_ev_ctxt->ring_lock, flags);
BUG_ON(validate_ev_el_addr(local_ev_ctxt, (uintptr_t)device_rp));
while ((local_rp != device_rp) && (event_quota > 0) &&
(device_rp != NULL) && (local_rp != NULL)) {
+
+ spin_lock_irqsave(&local_ev_ctxt->ring_lock, flags);
event_to_process = *local_rp;
- ev_ptr = &event_to_process;
- event_code = get_cmd_pkt(mhi_dev_ctxt,
- ev_ptr, &cmd_pkt, ev_index);
- if (((MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process)) ==
- MHI_PKT_TYPE_CMD_COMPLETION_EVENT)) &&
- (event_code == MHI_EVENT_CC_SUCCESS)) {
- mhi_log(MHI_MSG_INFO, "Command Completion event\n");
- if ((MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt) ==
- MHI_PKT_TYPE_RESET_CHAN_CMD)) {
- mhi_log(MHI_MSG_INFO, "First Reset CC event\n");
- MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt,
- MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD);
- ret_val = -EINPROGRESS;
- break;
- } else if ((MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)
- == MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD)) {
- MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt,
- MHI_PKT_TYPE_RESET_CHAN_CMD);
- mhi_log(MHI_MSG_INFO,
- "Processing Reset CC event\n");
- }
- }
- if (unlikely(0 != recycle_trb_and_ring(mhi_dev_ctxt,
- local_ev_ctxt,
- MHI_RING_TYPE_EVENT_RING,
- ev_index)))
- mhi_log(MHI_MSG_ERROR, "Failed to recycle ev pkt\n");
- switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process))) {
+ recycle_trb_and_ring(mhi_dev_ctxt,
+ local_ev_ctxt,
+ MHI_RING_TYPE_EVENT_RING,
+ ev_index);
+ spin_unlock_irqrestore(&local_ev_ctxt->ring_lock, flags);
+
+ switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, &event_to_process)) {
case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
- mhi_log(MHI_MSG_INFO,
- "MHI CCE received ring 0x%x\n",
- ev_index);
+ {
+ union mhi_cmd_pkt *cmd_pkt;
+ u32 chan;
+ struct mhi_chan_cfg *cfg;
+ unsigned long flags;
+ struct mhi_ring *cmd_ring = &mhi_dev_ctxt->
+ mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
__pm_relax(&mhi_dev_ctxt->w_lock);
- ret_val = parse_cmd_event(mhi_dev_ctxt,
- &event_to_process, ev_index);
+ get_cmd_pkt(mhi_dev_ctxt,
+ &event_to_process,
+ &cmd_pkt, ev_index);
+ MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
+ cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
+ mhi_log(MHI_MSG_INFO,
+ "MHI CCE received ring 0x%x chan:%u\n",
+ ev_index,
+ chan);
+ spin_lock_irqsave(&cfg->event_lock, flags);
+ cfg->cmd_pkt = *cmd_pkt;
+ cfg->cmd_event_pkt =
+ event_to_process.cmd_complete_event_pkt;
+ complete(&cfg->cmd_complete);
+ spin_unlock_irqrestore(&cfg->event_lock, flags);
+ spin_lock_irqsave(&cmd_ring->ring_lock,
+ flags);
+ ctxt_del_element(cmd_ring, NULL);
+ spin_unlock_irqrestore(&cmd_ring->ring_lock,
+ flags);
break;
+ }
case MHI_PKT_TYPE_TX_EVENT:
+ {
+ u32 chan;
+ struct mhi_ring *ring;
+
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
- parse_xfer_event(mhi_dev_ctxt,
- &event_to_process, ev_index);
+ chan = MHI_EV_READ_CHID(EV_CHID, &event_to_process);
+ if (unlikely(!VALID_CHAN_NR(chan))) {
+ mhi_log(MHI_MSG_ERROR,
+ "Invalid chan:%d\n",
+ chan);
+ break;
+ }
+ ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+ spin_lock_bh(&ring->ring_lock);
+ if (ring->ch_state == MHI_CHAN_STATE_ENABLED)
+ parse_xfer_event(mhi_dev_ctxt,
+ &event_to_process,
+ ev_index);
+ spin_unlock_bh(&ring->ring_lock);
__pm_relax(&mhi_dev_ctxt->w_lock);
break;
+ }
case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
{
enum STATE_TRANSITION new_state;
-
+ unsigned long flags;
new_state = MHI_READ_STATE(&event_to_process);
mhi_log(MHI_MSG_INFO,
- "MHI STE received ring 0x%x\n",
- ev_index);
- mhi_init_state_transition(mhi_dev_ctxt, new_state);
+ "MHI STE received ring 0x%x State:%s\n",
+ ev_index,
+ state_transition_str(new_state));
+
+ /* If transitioning to M1 schedule worker thread */
+ if (new_state == STATE_TRANSITION_M1) {
+ write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock,
+ flags);
+ mhi_dev_ctxt->mhi_state =
+ mhi_get_m_state(mhi_dev_ctxt);
+ if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1;
+ mhi_dev_ctxt->counters.m0_m1++;
+ schedule_work(&mhi_dev_ctxt->
+ process_m1_worker);
+ }
+ write_unlock_irqrestore(&mhi_dev_ctxt->
+ pm_xfer_lock,
+ flags);
+ } else {
+ mhi_init_state_transition(mhi_dev_ctxt,
+ new_state);
+ }
break;
}
case MHI_PKT_TYPE_EE_EVENT:
@@ -174,14 +171,16 @@ static int mhi_process_event_ring(
}
break;
}
+ case MHI_PKT_TYPE_STALE_EVENT:
+ mhi_log(MHI_MSG_INFO,
+ "Stale Event received for chan:%u\n",
+ MHI_EV_READ_CHID(EV_CHID, local_rp));
+ break;
case MHI_PKT_TYPE_SYS_ERR_EVENT:
mhi_log(MHI_MSG_INFO,
"MHI System Error Detected. Triggering Reset\n");
BUG();
- if (!mhi_trigger_reset(mhi_dev_ctxt))
- mhi_log(MHI_MSG_ERROR,
- "Failed to reset for SYSERR recovery\n");
- break;
+ break;
default:
mhi_log(MHI_MSG_ERROR,
"Unsupported packet type code 0x%x\n",
@@ -189,15 +188,20 @@ static int mhi_process_event_ring(
&event_to_process));
break;
}
+ spin_lock_irqsave(&local_ev_ctxt->ring_lock, flags);
local_rp = (union mhi_event_pkt *)local_ev_ctxt->rp;
device_rp = (union mhi_event_pkt *)mhi_p2v_addr(
mhi_dev_ctxt,
MHI_RING_TYPE_EVENT_RING,
ev_index,
ev_ctxt->mhi_event_read_ptr);
+ spin_unlock_irqrestore(&local_ev_ctxt->ring_lock, flags);
ret_val = 0;
--event_quota;
}
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
return ret_val;
}
@@ -207,7 +211,7 @@ int parse_event_thread(void *ctxt)
u32 i = 0;
int ret_val = 0;
int ret_val_process_event = 0;
- atomic_t *ev_pen_ptr = &mhi_dev_ctxt->flags.events_pending;
+ atomic_t *ev_pen_ptr = &mhi_dev_ctxt->counters.events_pending;
/* Go through all event rings */
for (;;) {
@@ -215,7 +219,7 @@ int parse_event_thread(void *ctxt)
wait_event_interruptible(
*mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq,
((atomic_read(
- &mhi_dev_ctxt->flags.events_pending) > 0) &&
+ &mhi_dev_ctxt->counters.events_pending) > 0) &&
!mhi_dev_ctxt->flags.stop_threads) ||
mhi_dev_ctxt->flags.kill_threads ||
(mhi_dev_ctxt->flags.stop_threads &&
@@ -237,27 +241,45 @@ int parse_event_thread(void *ctxt)
break;
}
mhi_dev_ctxt->flags.ev_thread_stopped = 0;
- atomic_dec(&mhi_dev_ctxt->flags.events_pending);
- for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
+ atomic_dec(&mhi_dev_ctxt->counters.events_pending);
+ for (i = 1; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
if (mhi_dev_ctxt->mhi_state == MHI_STATE_SYS_ERR) {
mhi_log(MHI_MSG_INFO,
- "SYS_ERR detected, not processing events\n");
- atomic_set(&mhi_dev_ctxt->flags.events_pending,
+ "SYS_ERR detected, not processing events\n");
+ atomic_set(&mhi_dev_ctxt->
+ counters.events_pending,
0);
break;
}
if (GET_EV_PROPS(EV_MANAGED,
- mhi_dev_ctxt->ev_ring_props[i].flags)){
+ mhi_dev_ctxt->ev_ring_props[i].flags)) {
ret_val_process_event =
- mhi_process_event_ring(mhi_dev_ctxt, i,
- mhi_dev_ctxt->ev_ring_props[i].nr_desc);
- if (ret_val_process_event ==
- -EINPROGRESS)
+ mhi_process_event_ring(mhi_dev_ctxt,
+ i,
+ mhi_dev_ctxt->
+ ev_ring_props[i].nr_desc);
+ if (ret_val_process_event == -EINPROGRESS)
atomic_inc(ev_pen_ptr);
}
}
}
- return ret_val;
+}
+
+void mhi_ctrl_ev_task(unsigned long data)
+{
+ struct mhi_device_ctxt *mhi_dev_ctxt =
+ (struct mhi_device_ctxt *)data;
+ const unsigned CTRL_EV_RING = 0;
+ struct mhi_event_ring_cfg *ring_props =
+ &mhi_dev_ctxt->ev_ring_props[CTRL_EV_RING];
+
+ mhi_log(MHI_MSG_VERBOSE, "Enter\n");
+ /* Process control event ring */
+ mhi_process_event_ring(mhi_dev_ctxt,
+ CTRL_EV_RING,
+ ring_props->nr_desc);
+ enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, CTRL_EV_RING));
+ mhi_log(MHI_MSG_VERBOSE, "Exit\n");
}
struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
@@ -268,8 +290,8 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
client_handle->result.bytes_xferd = 0;
client_handle->result.transaction_status = 0;
ret_val = mhi_process_event_ring(client_handle->mhi_dev_ctxt,
- client_handle->event_ring_index,
- 1);
+ client_handle->event_ring_index,
+ 1);
if (ret_val)
mhi_log(MHI_MSG_INFO, "NAPI failed to process event ring\n");
return &(client_handle->result);
@@ -277,20 +299,79 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
void mhi_mask_irq(struct mhi_client_handle *client_handle)
{
- disable_irq_nosync(MSI_TO_IRQ(client_handle->mhi_dev_ctxt,
- client_handle->msi_vec));
- client_handle->mhi_dev_ctxt->counters.msi_disable_cntr++;
- if (client_handle->mhi_dev_ctxt->counters.msi_disable_cntr >
- (client_handle->mhi_dev_ctxt->counters.msi_enable_cntr + 1))
- mhi_log(MHI_MSG_INFO, "No nested IRQ disable Allowed\n");
+ struct mhi_device_ctxt *mhi_dev_ctxt =
+ client_handle->mhi_dev_ctxt;
+ struct mhi_ring *ev_ring = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[client_handle->event_ring_index];
+
+ disable_irq_nosync(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
+ ev_ring->msi_disable_cntr++;
}
void mhi_unmask_irq(struct mhi_client_handle *client_handle)
{
- client_handle->mhi_dev_ctxt->counters.msi_enable_cntr++;
- enable_irq(MSI_TO_IRQ(client_handle->mhi_dev_ctxt,
- client_handle->msi_vec));
- if (client_handle->mhi_dev_ctxt->counters.msi_enable_cntr >
- client_handle->mhi_dev_ctxt->counters.msi_disable_cntr)
- mhi_log(MHI_MSG_INFO, "No nested IRQ enable Allowed\n");
+ struct mhi_device_ctxt *mhi_dev_ctxt =
+ client_handle->mhi_dev_ctxt;
+ struct mhi_ring *ev_ring = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[client_handle->event_ring_index];
+
+ ev_ring->msi_enable_cntr++;
+ enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
+}
+
+irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
+{
+ struct device *mhi_device = dev_id;
+ struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
+ int msi = IRQ_TO_MSI(mhi_dev_ctxt, irq_number);
+
+ if (!mhi_dev_ctxt) {
+ mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n");
+ return IRQ_HANDLED;
+ }
+ mhi_dev_ctxt->counters.msi_counter[
+ IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
+ mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi);
+ trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
+
+ if (msi) {
+ atomic_inc(&mhi_dev_ctxt->counters.events_pending);
+ wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
+ } else {
+ disable_irq_nosync(irq_number);
+ tasklet_schedule(&mhi_dev_ctxt->ev_task);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id)
+{
+ struct device *mhi_device = dev_id;
+ u32 client_index;
+ struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
+ struct mhi_client_handle *client_handle;
+ struct mhi_client_info_t *client_info;
+ struct mhi_cb_info cb_info;
+ int msi_num = (IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
+
+ mhi_dev_ctxt->counters.msi_counter[msi_num]++;
+ mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num);
+ trace_mhi_msi(msi_num);
+ client_index = MHI_MAX_CHANNELS -
+ (mhi_dev_ctxt->mmio_info.nr_event_rings - msi_num);
+ client_handle = mhi_dev_ctxt->client_handle_list[client_index];
+ client_info = &client_handle->client_info;
+ if (likely(client_handle)) {
+ client_handle->result.user_data =
+ client_handle->user_data;
+ if (likely(client_info->mhi_client_cb)) {
+ cb_info.result = &client_handle->result;
+ cb_info.cb_reason = MHI_CB_XFER;
+ cb_info.chan = client_handle->chan_info.chan_nr;
+ cb_info.result->transaction_status = 0;
+ client_info->mhi_client_cb(&cb_info);
+ }
+ }
+ return IRQ_HANDLED;
}
diff --git a/drivers/platform/msm/mhi/mhi_macros.h b/drivers/platform/msm/mhi/mhi_macros.h
index bb47f53e244d..133c0eeb034e 100644
--- a/drivers/platform/msm/mhi/mhi_macros.h
+++ b/drivers/platform/msm/mhi/mhi_macros.h
@@ -96,7 +96,6 @@
((_mhi_dev_ctxt)->mmio_info.nr_event_rings - \
((_mhi_dev_ctxt)->mmio_info.nr_hw_event_rings)))
-
/* MHI Transfer Ring Elements 7.4.1*/
#define TX_TRB_LEN
#define MHI_TX_TRB_LEN__SHIFT (0)
@@ -244,9 +243,21 @@
#define MHI_CHAN_TYPE__MASK (3)
#define MHI_CHAN_TYPE__SHIFT (6)
+#define PRESERVE_DB_STATE
+#define MHI_PRESERVE_DB_STATE__MASK (1)
+#define MHI_PRESERVE_DB_STATE__SHIFT (8)
+
+#define BRSTMODE
+#define MHI_BRSTMODE__MASK (3)
+#define MHI_BRSTMODE__SHIFT (9)
+
#define GET_CHAN_PROPS(_FIELD, _VAL) \
(((_VAL) >> MHI_##_FIELD ## __SHIFT) & MHI_##_FIELD ## __MASK)
+#define EV_BRSTMODE
+#define MHI_EV_BRSTMODE__MASK (3)
+#define MHI_EV_BRSTMODE__SHIFT (5)
+
#define EV_TYPE
#define MHI_EV_TYPE__MASK (3)
#define MHI_EV_TYPE__SHIFT (3)
diff --git a/drivers/platform/msm/mhi/mhi_main.c b/drivers/platform/msm/mhi/mhi_main.c
index c949745f5af7..430dc918af7e 100644
--- a/drivers/platform/msm/mhi/mhi_main.c
+++ b/drivers/platform/msm/mhi/mhi_main.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,9 @@
#include "mhi_macros.h"
#include "mhi_trace.h"
+static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
+ union mhi_cmd_pkt *cmd_pkt);
+
static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el)
{
bb_ctxt->el_size = sizeof(struct mhi_buf_info);
@@ -212,7 +215,9 @@ int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
ring->len, ring->base,
cc_list->mhi_trb_ring_base_addr);
mhi_init_chan_ctxt(cc_list, 0, 0, 0, 0, 0, ring,
- MHI_CHAN_STATE_DISABLED);
+ MHI_CHAN_STATE_DISABLED,
+ false,
+ MHI_BRSTMODE_DEFAULT);
return 0;
}
@@ -259,7 +264,11 @@ static int populate_tre_ring(struct mhi_client_handle *client_handle)
client_handle->chan_info.flags),
client_handle->chan_info.ev_ring,
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
- MHI_CHAN_STATE_ENABLED);
+ MHI_CHAN_STATE_ENABLED,
+ GET_CHAN_PROPS(PRESERVE_DB_STATE,
+ client_handle->chan_info.flags),
+ GET_CHAN_PROPS(BRSTMODE,
+ client_handle->chan_info.flags));
mhi_log(MHI_MSG_INFO, "Exited\n");
return 0;
}
@@ -268,48 +277,60 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
{
int ret_val = 0;
struct mhi_device_ctxt *mhi_dev_ctxt;
- int r = 0;
+ struct mhi_ring *chan_ring;
int chan;
+ struct mhi_chan_cfg *cfg;
+ struct mhi_cmd_complete_event_pkt cmd_event_pkt;
+ union mhi_cmd_pkt cmd_pkt;
+ enum MHI_EVENT_CCS ev_code;
- if (NULL == client_handle ||
- client_handle->magic != MHI_HANDLE_MAGIC)
+ if (!client_handle || client_handle->magic != MHI_HANDLE_MAGIC)
return -EINVAL;
mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
- r = get_chan_props(mhi_dev_ctxt,
- client_handle->chan_info.chan_nr,
- &client_handle->chan_info);
- if (r)
- return r;
+ ret_val = get_chan_props(mhi_dev_ctxt,
+ client_handle->chan_info.chan_nr,
+ &client_handle->chan_info);
+ if (ret_val)
+ return ret_val;
chan = client_handle->chan_info.chan_nr;
+ cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
+ chan_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+ mutex_lock(&cfg->chan_lock);
mhi_log(MHI_MSG_INFO,
"Entered: Client opening chan 0x%x\n", chan);
if (mhi_dev_ctxt->dev_exec_env <
GET_CHAN_PROPS(CHAN_BRINGUP_STAGE,
- client_handle->chan_info.flags)) {
+ client_handle->chan_info.flags)) {
mhi_log(MHI_MSG_INFO,
"Chan %d, MHI exec_env %d, not ready!\n",
- chan, mhi_dev_ctxt->dev_exec_env);
+ chan,
+ mhi_dev_ctxt->dev_exec_env);
+ mutex_unlock(&cfg->chan_lock);
return -ENOTCONN;
}
- r = populate_tre_ring(client_handle);
- if (r) {
+ ret_val = populate_tre_ring(client_handle);
+ if (ret_val) {
mhi_log(MHI_MSG_ERROR,
"Failed to initialize tre ring chan %d ret %d\n",
- chan, r);
- return r;
+ chan,
+ ret_val);
+ mutex_unlock(&cfg->chan_lock);
+ return ret_val;
}
client_handle->event_ring_index =
- mhi_dev_ctxt->dev_space.ring_ctxt.
- cc_list[chan].mhi_event_ring_index;
- r = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan],
- client_handle->chan_info.max_desc);
- if (r) {
+ mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan].
+ mhi_event_ring_index;
+ ret_val = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan],
+ client_handle->chan_info.max_desc);
+ if (ret_val) {
mhi_log(MHI_MSG_ERROR,
"Failed to initialize bb ctxt chan %d ret %d\n",
- chan, r);
- return r;
+ chan,
+ ret_val);
+ mutex_unlock(&cfg->chan_lock);
+ return ret_val;
}
client_handle->msi_vec =
@@ -319,16 +340,70 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[
client_handle->event_ring_index].mhi_intmodt;
- init_completion(&client_handle->chan_open_complete);
- ret_val = start_chan_sync(client_handle);
+ init_completion(&cfg->cmd_complete);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) {
+ mhi_log(MHI_MSG_ERROR,
+ "MHI State is disabled\n");
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&cfg->chan_lock);
+ return -EIO;
+ }
+ WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE);
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- if (0 != ret_val)
+ spin_lock_irq(&chan_ring->ring_lock);
+ chan_ring->ch_state = MHI_CHAN_STATE_ENABLED;
+ spin_unlock_irq(&chan_ring->ring_lock);
+ ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
+ MHI_COMMAND_START_CHAN,
+ chan);
+ if (ret_val) {
mhi_log(MHI_MSG_ERROR,
- "Failed to start chan 0x%x, ret %d\n", chan, ret_val);
- BUG_ON(ret_val);
+ "Failed to send start cmd for chan %d ret %d\n",
+ chan, ret_val);
+ goto error_completion;
+ }
+ ret_val = wait_for_completion_timeout(&cfg->cmd_complete,
+ msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
+ if (!ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "Failed to receive cmd completion for %d\n",
+ chan);
+ goto error_completion;
+ } else {
+ ret_val = 0;
+ }
+
+ spin_lock(&cfg->event_lock);
+ cmd_event_pkt = cfg->cmd_event_pkt;
+ cmd_pkt = cfg->cmd_pkt;
+ spin_unlock(&cfg->event_lock);
+
+ ev_code = MHI_EV_READ_CODE(EV_TRB_CODE,
+ ((union mhi_event_pkt *)&cmd_event_pkt));
+ if (ev_code != MHI_EVENT_CC_SUCCESS) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error to receive event completion ev_code:0x%x\n",
+ ev_code);
+ ret_val = -EIO;
+ goto error_completion;
+ }
+
client_handle->chan_status = 1;
+
+error_completion:
+
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ mutex_unlock(&cfg->chan_lock);
+
mhi_log(MHI_MSG_INFO,
- "Exited chan 0x%x\n", chan);
+ "Exited chan 0x%x ret:%d\n", chan, ret_val);
return ret_val;
}
EXPORT_SYMBOL(mhi_open_channel);
@@ -387,46 +462,86 @@ EXPORT_SYMBOL(mhi_register_channel);
void mhi_close_channel(struct mhi_client_handle *client_handle)
{
u32 chan;
- int r = 0;
int ret_val = 0;
+ struct mhi_chan_cfg *cfg;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
+ struct mhi_cmd_complete_event_pkt cmd_event_pkt;
+ union mhi_cmd_pkt cmd_pkt;
+ struct mhi_ring *chan_ring;
+ enum MHI_EVENT_CCS ev_code;
if (!client_handle ||
client_handle->magic != MHI_HANDLE_MAGIC ||
!client_handle->chan_status)
return;
+ mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
chan = client_handle->chan_info.chan_nr;
+ cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
mhi_log(MHI_MSG_INFO, "Client attempting to close chan 0x%x\n", chan);
- init_completion(&client_handle->chan_reset_complete);
- if (!atomic_read(&client_handle->mhi_dev_ctxt->flags.pending_ssr)) {
- ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
- MHI_COMMAND_RESET_CHAN, chan);
- if (ret_val != 0) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to send reset cmd for chan %d ret %d\n",
- chan, ret_val);
- }
- r = wait_for_completion_timeout(
- &client_handle->chan_reset_complete,
+ chan_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+ mutex_lock(&cfg->chan_lock);
+
+ /* No more processing events for this channel */
+ spin_lock_irq(&chan_ring->ring_lock);
+ chan_ring->ch_state = MHI_CHAN_STATE_DISABLED;
+ spin_unlock_irq(&chan_ring->ring_lock);
+ init_completion(&cfg->cmd_complete);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE);
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
+ MHI_COMMAND_RESET_CHAN,
+ chan);
+ if (ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "Failed to send reset cmd for chan %d ret %d\n",
+ chan,
+ ret_val);
+ goto error_completion;
+ }
+ ret_val = wait_for_completion_timeout(&cfg->cmd_complete,
msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
- if (!r)
- mhi_log(MHI_MSG_ERROR,
- "Failed to reset chan %d ret %d\n",
- chan, r);
- } else {
- /*
- * Assumption: Device is not playing with our
- * buffers after BEFORE_SHUTDOWN
- */
- mhi_log(MHI_MSG_INFO,
- "Pending SSR local free only chan %d.\n", chan);
+ if (!ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "Failed to receive cmd completion for %d\n",
+ chan);
+ goto error_completion;
}
+ spin_lock_irq(&cfg->event_lock);
+ cmd_event_pkt = cfg->cmd_event_pkt;
+ cmd_pkt = cfg->cmd_pkt;
+ spin_unlock_irq(&cfg->event_lock);
+ ev_code = MHI_EV_READ_CODE(EV_TRB_CODE,
+ ((union mhi_event_pkt *)&cmd_event_pkt));
+ if (ev_code != MHI_EVENT_CC_SUCCESS) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error to receive event completion ev_cod:0x%x\n",
+ ev_code);
+ goto error_completion;
+ }
+
+ ret_val = reset_chan_cmd(mhi_dev_ctxt, &cmd_pkt);
+ if (ret_val)
+ mhi_log(MHI_MSG_ERROR,
+ "Error resetting cmd ret:%d\n",
+ ret_val);
+
+error_completion:
+
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
mhi_log(MHI_MSG_INFO, "Freeing ring for chan 0x%x\n", chan);
free_tre_ring(client_handle);
mhi_log(MHI_MSG_INFO, "Chan 0x%x confirmed closed.\n", chan);
client_handle->chan_status = 0;
+ mutex_unlock(&cfg->chan_lock);
}
EXPORT_SYMBOL(mhi_close_channel);
@@ -439,93 +554,47 @@ void mhi_update_chan_db(struct mhi_device_ctxt *mhi_dev_ctxt,
chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_XFER_RING, chan,
(uintptr_t) chan_ctxt->wp);
- mhi_dev_ctxt->mhi_chan_db_order[chan]++;
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.chan_db_addr,
- chan, db_value);
-}
-
-int mhi_check_m2_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- int ret_val = 0;
+ chan_ctxt->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.chan_db_addr,
+ chan,
+ db_value);
- if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
- mhi_log(MHI_MSG_INFO, "M2 Transition flag value = %d\n",
- (atomic_read(&mhi_dev_ctxt->flags.m2_transition)));
- if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) {
- if (mhi_dev_ctxt->flags.link_up) {
- mhi_assert_device_wake(mhi_dev_ctxt);
- ret_val = -ENOTCONN;
- }
- } else{
- mhi_log(MHI_MSG_INFO, "M2 transition flag is set\n");
- ret_val = -ENOTCONN;
- }
- } else {
- ret_val = 0;
- }
-
- return ret_val;
}
static inline int mhi_queue_tre(struct mhi_device_ctxt
- *mhi_dev_ctxt,
- u32 chan,
- enum MHI_RING_TYPE type)
+ *mhi_dev_ctxt,
+ u32 chan,
+ enum MHI_RING_TYPE type)
{
struct mhi_chan_ctxt *chan_ctxt;
unsigned long flags = 0;
- int ret_val = 0;
u64 db_value = 0;
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
- mhi_dev_ctxt->counters.m1_m0++;
- if (type == MHI_RING_TYPE_CMD_RING)
- atomic_inc(&mhi_dev_ctxt->counters.outbound_acks);
+ if (!MHI_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+ return -EACCES;
- ret_val = mhi_check_m2_transition(mhi_dev_ctxt);
- if (likely(((ret_val == 0) &&
- (((mhi_dev_ctxt->mhi_state == MHI_STATE_M0) ||
- (mhi_dev_ctxt->mhi_state == MHI_STATE_M1))) &&
- (chan_ctxt->mhi_chan_state != MHI_CHAN_STATE_ERROR)) &&
- (!mhi_dev_ctxt->flags.pending_M3))) {
- if (likely(type == MHI_RING_TYPE_XFER_RING)) {
- spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan],
- flags);
- db_value =
- mhi_v2p_addr(
- mhi_dev_ctxt,
- MHI_RING_TYPE_XFER_RING,
- chan,
- (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp);
- mhi_dev_ctxt->mhi_chan_db_order[chan]++;
- mhi_update_chan_db(mhi_dev_ctxt, chan);
- spin_unlock_irqrestore(
- &mhi_dev_ctxt->db_write_lock[chan], flags);
- } else if (type == MHI_RING_TYPE_CMD_RING) {
- db_value = mhi_v2p_addr(mhi_dev_ctxt,
- MHI_RING_TYPE_CMD_RING,
- PRIMARY_CMD_RING,
- (uintptr_t)
- mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING].wp);
- mhi_dev_ctxt->cmd_ring_order++;
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.cmd_db_addr,
- 0, db_value);
- } else {
- mhi_log(MHI_MSG_VERBOSE,
- "Wrong type of packet = %d\n", type);
- ret_val = -EPROTO;
- }
+ if (likely(type == MHI_RING_TYPE_XFER_RING)) {
+ struct mhi_ring *mhi_ring =
+ &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+ spin_lock_irqsave(&mhi_ring->ring_lock, flags);
+ mhi_update_chan_db(mhi_dev_ctxt, chan);
+ spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
} else {
- mhi_log(MHI_MSG_VERBOSE,
- "Wakeup, pending data state %d chan state %d\n",
- mhi_dev_ctxt->mhi_state,
- chan_ctxt->mhi_chan_state);
- ret_val = 0;
+ struct mhi_ring *cmd_ring = &mhi_dev_ctxt->
+ mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
+ db_value = mhi_v2p_addr(mhi_dev_ctxt,
+ MHI_RING_TYPE_CMD_RING,
+ PRIMARY_CMD_RING,
+ (uintptr_t)cmd_ring->wp);
+ cmd_ring->db_mode.process_db
+ (mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.cmd_db_addr,
+ 0,
+ db_value);
}
- return ret_val;
+ return 0;
}
static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
int chan, void *buf, size_t buf_len,
@@ -645,17 +714,11 @@ static int mhi_queue_dma_xfer(
int ret_val;
enum MHI_CLIENT_CHANNEL chan;
struct mhi_device_ctxt *mhi_dev_ctxt;
- unsigned long flags;
-
- if (!client_handle || !buf || !buf_len)
- return -EINVAL;
mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
MHI_ASSERT(VALID_BUF(buf, buf_len, mhi_dev_ctxt),
"Client buffer is of invalid length\n");
chan = client_handle->chan_info.chan_nr;
- mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan);
- pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
pkt_loc = mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp;
pkt_loc->data_tx_pkt.buffer_ptr = buf;
@@ -682,24 +745,14 @@ static int mhi_queue_dma_xfer(
(void *)&pkt_loc);
if (unlikely(0 != ret_val)) {
mhi_log(MHI_MSG_VERBOSE,
- "Failed to insert trb in xfer ring\n");
- goto error;
+ "Failed to insert trb in xfer ring\n");
+ return ret_val;
}
- read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
+
if (MHI_OUT ==
GET_CHAN_PROPS(CHAN_DIR, client_handle->chan_info.flags))
atomic_inc(&mhi_dev_ctxt->counters.outbound_acks);
- ret_val = mhi_queue_tre(mhi_dev_ctxt, chan, MHI_RING_TYPE_XFER_RING);
- if (unlikely(ret_val))
- mhi_log(MHI_MSG_VERBOSE, "Failed queue TRE.\n");
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
- read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-error:
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_INFO, "Putting Reference %d", chan);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
return ret_val;
}
@@ -709,10 +762,28 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
int r;
enum dma_data_direction dma_dir;
struct mhi_buf_info *bb;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
+ u32 chan;
+ unsigned long flags;
if (!client_handle || !buf || !buf_len)
return -EINVAL;
+ mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
+ chan = client_handle->chan_info.chan_nr;
+
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE) {
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ mhi_log(MHI_MSG_ERROR,
+ "MHI is not in active state\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+
if (MHI_OUT == GET_CHAN_PROPS(CHAN_DIR, client_handle->chan_info.flags))
dma_dir = DMA_TO_DEVICE;
else
@@ -723,8 +794,16 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
buf, buf_len, dma_dir, &bb);
if (r) {
mhi_log(MHI_MSG_VERBOSE,
- "Failed to create BB, chan %d ret %d\n",
- client_handle->chan_info.chan_nr, r);
+ "Failed to create BB, chan %d ret %d\n",
+ chan,
+ r);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->
+ pcie_device->dev);
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
return r;
}
@@ -733,9 +812,9 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
buf, buf_len, (u64)bb->bb_p_addr,
client_handle->chan_info.chan_nr);
r = mhi_queue_dma_xfer(client_handle,
- bb->bb_p_addr,
- bb->buf_len,
- mhi_flags);
+ bb->bb_p_addr,
+ bb->buf_len,
+ mhi_flags);
/*
* Assumption: If create_bounce_buffer did not fail, we do not
@@ -743,103 +822,76 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
* out of sync with the descriptor list which is problematic.
*/
BUG_ON(r);
- return r;
+
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ mhi_queue_tre(mhi_dev_ctxt, chan, MHI_RING_TYPE_XFER_RING);
+ if (dma_dir == DMA_FROM_DEVICE) {
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ }
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ return 0;
}
EXPORT_SYMBOL(mhi_queue_xfer);
int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_COMMAND cmd, u32 chan)
{
- unsigned long flags = 0;
union mhi_cmd_pkt *cmd_pkt = NULL;
- enum MHI_CHAN_STATE from_state = MHI_CHAN_STATE_DISABLED;
- enum MHI_CHAN_STATE to_state = MHI_CHAN_STATE_DISABLED;
enum MHI_PKT_TYPE ring_el_type = MHI_PKT_TYPE_NOOP_CMD;
- struct mutex *chan_mutex = NULL;
int ret_val = 0;
+ unsigned long flags, flags2;
+ struct mhi_ring *mhi_ring = &mhi_dev_ctxt->
+ mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
- if (chan >= MHI_MAX_CHANNELS ||
- cmd >= MHI_COMMAND_MAX_NR || mhi_dev_ctxt == NULL) {
+ if (chan >= MHI_MAX_CHANNELS || cmd >= MHI_COMMAND_MAX_NR) {
mhi_log(MHI_MSG_ERROR,
- "Invalid channel id, received id: 0x%x", chan);
+ "Invalid channel id, received id: 0x%x",
+ chan);
return -EINVAL;
}
mhi_log(MHI_MSG_INFO,
- "Entered, MHI state %d dev_exec_env %d chan %d cmd %d\n",
- mhi_dev_ctxt->mhi_state,
- mhi_dev_ctxt->dev_exec_env,
- chan, cmd);
- mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan);
- pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- /*
- * If there is a cmd pending a device confirmation,
- * do not send anymore for this channel
- */
- if (MHI_CMD_PENDING == mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan]) {
- mhi_log(MHI_MSG_ERROR, "Cmd Pending on chan %d", chan);
- ret_val = -EALREADY;
- goto error_invalid;
- }
-
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
- from_state =
- mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan].mhi_chan_state;
+ "Entered, MHI state %s dev_exec_env %d chan %d cmd %d\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
+ mhi_dev_ctxt->dev_exec_env, chan, cmd);
switch (cmd) {
break;
case MHI_COMMAND_RESET_CHAN:
- to_state = MHI_CHAN_STATE_DISABLED;
ring_el_type = MHI_PKT_TYPE_RESET_CHAN_CMD;
break;
case MHI_COMMAND_START_CHAN:
- switch (from_state) {
- case MHI_CHAN_STATE_DISABLED:
- case MHI_CHAN_STATE_ENABLED:
- case MHI_CHAN_STATE_STOP:
- to_state = MHI_CHAN_STATE_RUNNING;
- break;
- default:
- mhi_log(MHI_MSG_ERROR,
- "Invalid stt cmd 0x%x, from_state 0x%x\n",
- cmd, from_state);
- ret_val = -EPERM;
- goto error_invalid;
- }
ring_el_type = MHI_PKT_TYPE_START_CHAN_CMD;
break;
default:
mhi_log(MHI_MSG_ERROR, "Bad command received\n");
+ return -EINVAL;
}
- mutex_lock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]);
- ret_val = ctxt_add_element(mhi_dev_ctxt->mhi_local_cmd_ctxt,
- (void *)&cmd_pkt);
+ spin_lock_irqsave(&mhi_ring->ring_lock, flags);
+ ret_val = ctxt_add_element(mhi_ring, (void *)&cmd_pkt);
if (ret_val) {
mhi_log(MHI_MSG_ERROR, "Failed to insert element\n");
- goto error_general;
+ spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
+ return ret_val;
}
- chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan];
- mutex_lock(chan_mutex);
+
MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt, ring_el_type);
MHI_TRB_SET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
- mutex_unlock(chan_mutex);
- mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_PENDING;
-
- read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags2);
mhi_queue_tre(mhi_dev_ctxt, 0, MHI_RING_TYPE_CMD_RING);
- read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags2);
+ spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
- mhi_log(MHI_MSG_VERBOSE, "Sent command 0x%x for chan %d\n",
- cmd, chan);
-error_general:
- mutex_unlock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]);
-error_invalid:
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_INFO, "Putting Reference %d", chan);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ mhi_log(MHI_MSG_VERBOSE,
+ "Sent command 0x%x for chan %d\n",
+ cmd,
+ chan);
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
mhi_log(MHI_MSG_INFO, "Exited ret %d.\n", ret_val);
return ret_val;
}
@@ -870,7 +922,6 @@ static void parse_inbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
result->buf_addr = bb->client_buf;
result->bytes_xferd = bb->filled_size;
- result->transaction_status = 0;
/* At this point the bounce buffer is no longer necessary
* Whatever was received from the device was copied back to the
@@ -922,6 +973,9 @@ static int parse_outbound(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_log(MHI_MSG_RAW, "Removing BB from head, chan %d\n", chan);
atomic_dec(&mhi_dev_ctxt->counters.outbound_acks);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
ret_val = ctxt_del_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
NULL);
BUG_ON(ret_val);
@@ -1018,16 +1072,8 @@ static int parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt,
ctxt_index_rp, ctxt_index_wp, chan);
BUG_ON(bb_index != ctxt_index_rp);
} else {
- /* Hardware Channel, no client registerered,
- drop data */
- recycle_trb_and_ring(mhi_dev_ctxt,
- &mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
- MHI_RING_TYPE_XFER_RING,
- chan);
BUG();
- /* No bounce buffer to recycle as no user request
- * can be present.
- */
+
}
}
return 0;
@@ -1068,13 +1114,11 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
u32 nr_trb_to_parse;
u32 i = 0;
u32 ev_code;
+ struct mhi_ring *local_chan_ctxt;
trace_mhi_ev(event);
chan = MHI_EV_READ_CHID(EV_CHID, event);
- if (unlikely(!VALID_CHAN_NR(chan))) {
- mhi_log(MHI_MSG_ERROR, "Bad ring id.\n");
- return -EINVAL;
- }
+ local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
ev_code = MHI_EV_READ_CODE(EV_TRB_CODE, event);
client_handle = mhi_dev_ctxt->client_handle_list[chan];
client_handle->pkt_count++;
@@ -1095,10 +1139,7 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
dma_addr_t trb_data_loc;
u32 ieot_flag;
int ret_val;
- struct mhi_ring *local_chan_ctxt;
- local_chan_ctxt =
- &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
phy_ev_trb_loc = MHI_EV_READ_PTR(EV_PTR, event);
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
@@ -1120,6 +1161,7 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
mhi_log(MHI_MSG_CRITICAL,
"Failed to get nr available trbs ret: %d.\n",
ret_val);
+ panic("critical error");
return ret_val;
}
do {
@@ -1164,26 +1206,22 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
case MHI_EVENT_CC_OOB:
case MHI_EVENT_CC_DB_MODE:
{
- struct mhi_ring *chan_ctxt = NULL;
u64 db_value = 0;
- mhi_dev_ctxt->flags.uldl_enabled = 1;
- chan = MHI_EV_READ_CHID(EV_CHID, event);
- mhi_dev_ctxt->flags.db_mode[chan] = 1;
- chan_ctxt =
- &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
mhi_log(MHI_MSG_INFO, "DB_MODE/OOB Detected chan %d.\n", chan);
- if (chan_ctxt->wp != chan_ctxt->rp) {
+
+ local_chan_ctxt->db_mode.db_mode = 1;
+ if (local_chan_ctxt->wp != local_chan_ctxt->rp) {
db_value = mhi_v2p_addr(mhi_dev_ctxt,
- MHI_RING_TYPE_XFER_RING, chan,
- (uintptr_t) chan_ctxt->wp);
- mhi_process_db(mhi_dev_ctxt,
+ MHI_RING_TYPE_XFER_RING, chan,
+ (uintptr_t) local_chan_ctxt->wp);
+ local_chan_ctxt->db_mode.process_db(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.chan_db_addr, chan,
db_value);
}
client_handle = mhi_dev_ctxt->client_handle_list[chan];
- if (NULL != client_handle)
- result->transaction_status = -ENOTCONN;
+ if (client_handle)
+ result->transaction_status = -ENOTCONN;
break;
}
case MHI_EVENT_CC_BAD_TRE:
@@ -1216,6 +1254,9 @@ int recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
u64 db_value = 0;
void *removed_element = NULL;
void *added_element = NULL;
+ unsigned long flags;
+ struct mhi_ring *mhi_ring = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[ring_index];
ret_val = ctxt_del_element(ring, &removed_element);
@@ -1224,102 +1265,26 @@ int recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
return ret_val;
}
ret_val = ctxt_add_element(ring, &added_element);
- if (0 != ret_val)
+ if (ret_val) {
mhi_log(MHI_MSG_ERROR, "Could not add element to ring\n");
- db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type, ring_index,
- (uintptr_t) ring->wp);
- if (0 != ret_val)
return ret_val;
- if (MHI_RING_TYPE_XFER_RING == ring_type) {
- union mhi_xfer_pkt *removed_xfer_pkt =
- (union mhi_xfer_pkt *)removed_element;
- union mhi_xfer_pkt *added_xfer_pkt =
- (union mhi_xfer_pkt *)added_element;
- added_xfer_pkt->data_tx_pkt =
- *(struct mhi_tx_pkt *)removed_xfer_pkt;
- } else if (MHI_RING_TYPE_EVENT_RING == ring_type) {
-
- spinlock_t *lock;
- unsigned long flags;
-
- if (ring_index >= mhi_dev_ctxt->mmio_info.nr_event_rings)
- return -ERANGE;
- lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
- spin_lock_irqsave(lock, flags);
- db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type, ring_index,
- (uintptr_t) ring->wp);
- mhi_log(MHI_MSG_INFO,
- "Updating ctxt, ring index %d\n", ring_index);
- mhi_update_ctxt(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.event_db_addr,
- ring_index, db_value);
- mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
- mhi_dev_ctxt->counters.ev_counter[ring_index]++;
- spin_unlock_irqrestore(lock, flags);
}
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
- /* Asserting Device Wake here, will imediately wake mdm */
- if ((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state ||
- MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) &&
- mhi_dev_ctxt->flags.link_up) {
- switch (ring_type) {
- case MHI_RING_TYPE_CMD_RING:
- {
- struct mutex *cmd_mutex = NULL;
-
- cmd_mutex =
- &mhi_dev_ctxt->
- mhi_cmd_mutex_list[PRIMARY_CMD_RING];
- mutex_lock(cmd_mutex);
- mhi_dev_ctxt->cmd_ring_order = 1;
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.cmd_db_addr,
- ring_index, db_value);
- mutex_unlock(cmd_mutex);
- break;
- }
- case MHI_RING_TYPE_EVENT_RING:
- {
- spinlock_t *lock = NULL;
- unsigned long flags = 0;
-
- lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
- spin_lock_irqsave(lock, flags);
- mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
- if ((mhi_dev_ctxt->counters.ev_counter[ring_index] %
- MHI_EV_DB_INTERVAL) == 0) {
- db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type,
- ring_index,
- (uintptr_t) ring->wp);
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.event_db_addr,
- ring_index, db_value);
- }
- spin_unlock_irqrestore(lock, flags);
- break;
- }
- case MHI_RING_TYPE_XFER_RING:
- {
- unsigned long flags = 0;
-
- spin_lock_irqsave(
- &mhi_dev_ctxt->db_write_lock[ring_index],
- flags);
- mhi_dev_ctxt->mhi_chan_db_order[ring_index] = 1;
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.chan_db_addr,
- ring_index, db_value);
- spin_unlock_irqrestore(
- &mhi_dev_ctxt->db_write_lock[ring_index],
- flags);
- break;
- }
- default:
- mhi_log(MHI_MSG_ERROR, "Bad ring type\n");
- }
- }
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
- return ret_val;
+
+ if (!MHI_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+ return -EACCES;
+
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ db_value = mhi_v2p_addr(mhi_dev_ctxt,
+ ring_type,
+ ring_index,
+ (uintptr_t) ring->wp);
+ mhi_ring->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.event_db_addr,
+ ring_index, db_value);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+
+ return 0;
+
}
static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -1328,15 +1293,18 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 chan = 0;
int ret_val = 0;
struct mhi_ring *local_chan_ctxt;
+ struct mhi_ring *ev_ring;
struct mhi_chan_ctxt *chan_ctxt;
+ struct mhi_event_ctxt *ev_ctxt = NULL;
struct mhi_client_handle *client_handle = NULL;
- struct mutex *chan_mutex;
- int pending_el = 0;
+ int pending_el = 0, i;
struct mhi_ring *bb_ctxt;
+ unsigned long flags;
+ union mhi_event_pkt *local_rp = NULL;
+ union mhi_event_pkt *device_rp = NULL;
MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
-
if (!VALID_CHAN_NR(chan)) {
mhi_log(MHI_MSG_ERROR,
"Bad channel number for CCE\n");
@@ -1344,13 +1312,41 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
}
bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
- chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan];
- mutex_lock(chan_mutex);
client_handle = mhi_dev_ctxt->client_handle_list[chan];
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
+ ev_ring = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[chan_ctxt->mhi_event_ring_index];
+ ev_ctxt = &mhi_dev_ctxt->
+ dev_space.ring_ctxt.ec_list[chan_ctxt->mhi_event_ring_index];
mhi_log(MHI_MSG_INFO, "Processed cmd reset event\n");
+ /* Clear all stale events related to Channel */
+ spin_lock_irqsave(&ev_ring->ring_lock, flags);
+ device_rp = (union mhi_event_pkt *)mhi_p2v_addr(
+ mhi_dev_ctxt,
+ MHI_RING_TYPE_EVENT_RING,
+ chan_ctxt->mhi_event_ring_index,
+ ev_ctxt->mhi_event_read_ptr);
+ local_rp = (union mhi_event_pkt *)ev_ring->rp;
+ while (device_rp != local_rp) {
+ if (MHI_TRB_READ_INFO(EV_TRB_TYPE, local_rp) ==
+ MHI_PKT_TYPE_TX_EVENT) {
+ u32 ev_chan = MHI_EV_READ_CHID(EV_CHID, local_rp);
+
+ /* Mark as stale event */
+ if (ev_chan == chan)
+ MHI_TRB_SET_INFO(EV_TRB_TYPE,
+ local_rp,
+ MHI_PKT_TYPE_STALE_EVENT);
+ }
+
+ local_rp++;
+ if (local_rp == (ev_ring->base + ev_ring->len))
+ local_rp = ev_ring->base;
+ }
+ spin_unlock_irqrestore(&ev_ring->ring_lock, flags);
+
/*
* If outbound elements are pending, they must be cleared since
* they will never be acked after a channel reset.
@@ -1365,6 +1361,18 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
chan, pending_el);
atomic_sub(pending_el, &mhi_dev_ctxt->counters.outbound_acks);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ for (i = 0; i < pending_el; i++)
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+ for (i = 0; i < pending_el; i++) {
+ pm_runtime_put_noidle(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ }
/* Reset the local channel context */
local_chan_ctxt->rp = local_chan_ctxt->base;
@@ -1372,37 +1380,17 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
local_chan_ctxt->ack_rp = local_chan_ctxt->base;
/* Reset the mhi channel context */
- chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED;
+ chan_ctxt->chstate = MHI_CHAN_STATE_DISABLED;
chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr;
chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr;
mhi_log(MHI_MSG_INFO, "Cleaning up BB list\n");
reset_bb_ctxt(mhi_dev_ctxt, bb_ctxt);
- mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_NOT_PENDING;
- mutex_unlock(chan_mutex);
mhi_log(MHI_MSG_INFO, "Reset complete.\n");
- if (NULL != client_handle)
- complete(&client_handle->chan_reset_complete);
return ret_val;
}
-static int start_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
- union mhi_cmd_pkt *cmd_pkt)
-{
- u32 chan;
-
- MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
- if (!VALID_CHAN_NR(chan))
- mhi_log(MHI_MSG_ERROR, "Bad chan: 0x%x\n", chan);
- mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] =
- MHI_CMD_NOT_PENDING;
- mhi_log(MHI_MSG_INFO, "Processed START CMD chan %d\n", chan);
- if (NULL != mhi_dev_ctxt->client_handle_list[chan])
- complete(
- &mhi_dev_ctxt->client_handle_list[chan]->chan_open_complete);
- return 0;
-}
enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
union mhi_event_pkt *ev_pkt,
union mhi_cmd_pkt **cmd_pkt,
@@ -1421,68 +1409,13 @@ enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
return MHI_EV_READ_CODE(EV_TRB_CODE, ev_pkt);
}
-int parse_cmd_event(struct mhi_device_ctxt *mhi_dev_ctxt,
- union mhi_event_pkt *ev_pkt, u32 event_index)
-{
- int ret_val = 0;
- union mhi_cmd_pkt *cmd_pkt = NULL;
- u32 event_code = 0;
-
- event_code = get_cmd_pkt(mhi_dev_ctxt, ev_pkt, &cmd_pkt, event_index);
- switch (event_code) {
- case MHI_EVENT_CC_SUCCESS:
- {
- u32 chan = 0;
-
- MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
- switch (MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)) {
- mhi_log(MHI_MSG_INFO, "CCE chan %d cmd %d\n", chan,
- MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt));
- case MHI_PKT_TYPE_RESET_CHAN_CMD:
- ret_val = reset_chan_cmd(mhi_dev_ctxt, cmd_pkt);
- if (ret_val)
- mhi_log(MHI_MSG_INFO,
- "Failed to process reset cmd ret %d\n",
- ret_val);
- break;
- case MHI_PKT_TYPE_STOP_CHAN_CMD:
- if (ret_val) {
- mhi_log(MHI_MSG_INFO,
- "Failed to set chan state\n");
- return ret_val;
- }
- break;
- case MHI_PKT_TYPE_START_CHAN_CMD:
- ret_val = start_chan_cmd(mhi_dev_ctxt, cmd_pkt);
- if (ret_val)
- mhi_log(MHI_MSG_INFO,
- "Failed to process reset cmd\n");
- break;
- default:
- mhi_log(MHI_MSG_INFO,
- "Bad cmd type 0x%x\n",
- MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt));
- break;
- }
- mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_NOT_PENDING;
- atomic_dec(&mhi_dev_ctxt->counters.outbound_acks);
- break;
- }
- default:
- mhi_log(MHI_MSG_INFO, "Unhandled mhi completion code\n");
- break;
- }
- ctxt_del_element(mhi_dev_ctxt->mhi_local_cmd_ctxt, NULL);
- return 0;
-}
-
int mhi_poll_inbound(struct mhi_client_handle *client_handle,
struct mhi_result *result)
{
struct mhi_tx_pkt *pending_trb = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
struct mhi_ring *local_chan_ctxt = NULL;
- struct mutex *chan_mutex = NULL;
+ struct mhi_chan_cfg *cfg;
struct mhi_ring *bb_ctxt = NULL;
struct mhi_buf_info *bb = NULL;
int chan = 0, r = 0;
@@ -1495,10 +1428,10 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
chan = client_handle->chan_info.chan_nr;
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
- chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan];
+ cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
- mutex_lock(chan_mutex);
+ mutex_lock(&cfg->chan_lock);
if (bb_ctxt->rp != bb_ctxt->ack_rp) {
pending_trb = (struct mhi_tx_pkt *)(local_chan_ctxt->ack_rp);
result->flags = pending_trb->info;
@@ -1524,7 +1457,7 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
result->bytes_xferd = 0;
r = -ENODATA;
}
- mutex_unlock(chan_mutex);
+ mutex_unlock(&cfg->chan_lock);
mhi_log(MHI_MSG_VERBOSE,
"Exited Result: Buf addr: 0x%p Bytes xfed 0x%zx chan %d\n",
result->buf_addr, result->bytes_xferd, chan);
@@ -1581,49 +1514,80 @@ int mhi_get_epid(struct mhi_client_handle *client_handle)
return MHI_EPID;
}
-int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
+/*
+ * mhi_assert_device_wake - Set WAKE_DB register
+ * force_set - if true, will set bit regardless of counts
+ */
+void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt,
+ bool force_set)
{
- if ((mhi_dev_ctxt->mmio_info.chan_db_addr) &&
- (mhi_dev_ctxt->flags.link_up)) {
- mhi_log(MHI_MSG_VERBOSE, "LPM %d\n",
- mhi_dev_ctxt->enable_lpm);
- atomic_set(&mhi_dev_ctxt->flags.device_wake, 1);
+ unsigned long flags;
+
+ if (unlikely(force_set)) {
+ spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags);
+ atomic_inc(&mhi_dev_ctxt->counters.device_wake);
+ mhi_write_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.chan_db_addr,
+ MHI_DEV_WAKE_DB, 1);
+ spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags);
+ } else {
+ if (likely(atomic_add_unless(&mhi_dev_ctxt->
+ counters.device_wake,
+ 1,
+ 0)))
+ return;
+
+ spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags);
+ if ((atomic_inc_return(&mhi_dev_ctxt->counters.device_wake)
+ == 1) &&
+ MHI_WAKE_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
mhi_write_db(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.chan_db_addr,
- MHI_DEV_WAKE_DB, 1);
- mhi_dev_ctxt->device_wake_asserted = 1;
- } else {
- mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm);
+ MHI_DEV_WAKE_DB,
+ 1);
+ }
+ spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags);
}
- return 0;
}
-inline int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
+void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
{
- if ((mhi_dev_ctxt->enable_lpm) &&
- (atomic_read(&mhi_dev_ctxt->flags.device_wake)) &&
- (mhi_dev_ctxt->mmio_info.chan_db_addr != NULL) &&
- (mhi_dev_ctxt->flags.link_up)) {
- mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm);
- atomic_set(&mhi_dev_ctxt->flags.device_wake, 0);
- mhi_write_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.chan_db_addr,
- MHI_DEV_WAKE_DB, 0);
- mhi_dev_ctxt->device_wake_asserted = 0;
- } else {
- mhi_log(MHI_MSG_VERBOSE, "LPM %d DEV_WAKE %d link %d\n",
- mhi_dev_ctxt->enable_lpm,
- atomic_read(&mhi_dev_ctxt->flags.device_wake),
- mhi_dev_ctxt->flags.link_up);
- }
- return 0;
+ unsigned long flags;
+
+ WARN_ON(atomic_read(&mhi_dev_ctxt->counters.device_wake) == 0);
+
+ if (likely(atomic_add_unless
+ (&mhi_dev_ctxt->counters.device_wake, -1, 1)))
+ return;
+
+ spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags);
+ if ((atomic_dec_return(&mhi_dev_ctxt->counters.device_wake) == 0) &&
+ MHI_WAKE_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+ mhi_write_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.chan_db_addr,
+ MHI_DEV_WAKE_DB,
+ 0);
+ spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags);
}
-int mhi_set_lpm(struct mhi_client_handle *client_handle, int enable_lpm)
+int mhi_set_lpm(struct mhi_client_handle *client_handle, bool enable_lpm)
{
- mhi_log(MHI_MSG_VERBOSE, "LPM Set %d\n", enable_lpm);
- client_handle->mhi_dev_ctxt->enable_lpm = enable_lpm ? 1 : 0;
+ struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
+ unsigned long flags;
+
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+
+ /* Disable low power mode by asserting Wake */
+ if (enable_lpm == false)
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ else
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+
return 0;
}
+EXPORT_SYMBOL(mhi_set_lpm);
int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt,
int index)
@@ -1648,10 +1612,57 @@ int mhi_deregister_channel(struct mhi_client_handle
}
EXPORT_SYMBOL(mhi_deregister_channel);
+void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
+ void __iomem *io_addr,
+ uintptr_t chan,
+ u32 val)
+{
+ struct mhi_ring *ring_ctxt =
+ &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+
+ if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr)
+ ring_ctxt = &mhi_dev_ctxt->
+ mhi_local_chan_ctxt[chan];
+ else
+ ring_ctxt = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[chan];
+
+ mhi_log(MHI_MSG_VERBOSE,
+ "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+ io_addr, chan, val);
+
+ mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
+
+ if (ring_ctxt->db_mode.db_mode) {
+ mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
+ ring_ctxt->db_mode.db_mode = 0;
+ } else {
+ mhi_log(MHI_MSG_INFO,
+ "Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n",
+ chan,
+ ring_ctxt->db_mode.brstmode,
+ ring_ctxt->db_mode.db_mode);
+ }
+}
+
+void mhi_process_db_brstmode_disable(struct mhi_device_ctxt *mhi_dev_ctxt,
+ void __iomem *io_addr,
+ uintptr_t chan,
+ u32 val)
+{
+ mhi_log(MHI_MSG_VERBOSE,
+ "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+ io_addr, chan, val);
+
+ mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
+ mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
+}
+
void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
void __iomem *io_addr,
uintptr_t chan, u32 val)
{
+
mhi_log(MHI_MSG_VERBOSE,
"db.set addr: %p io_offset 0x%lx val:0x%x\n",
io_addr, chan, val);
@@ -1660,28 +1671,29 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
/* Channel Doorbell and Polling Mode Disabled or Software Channel*/
if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr) {
+ struct mhi_ring *chan_ctxt =
+ &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
if (!(IS_HARDWARE_CHANNEL(chan) &&
- mhi_dev_ctxt->flags.uldl_enabled &&
- !mhi_dev_ctxt->flags.db_mode[chan])) {
+ !chan_ctxt->db_mode.db_mode)) {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
- mhi_dev_ctxt->flags.db_mode[chan] = 0;
+ chan_ctxt->db_mode.db_mode = 0;
} else {
mhi_log(MHI_MSG_INFO,
- "Not ringing xfer db, chan %ld, ul_dl %d db_mode %d\n",
- chan, mhi_dev_ctxt->flags.uldl_enabled,
- mhi_dev_ctxt->flags.db_mode[chan]);
+ "Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n",
+ chan, chan_ctxt->db_mode.brstmode,
+ chan_ctxt->db_mode.db_mode);
}
/* Event Doorbell and Polling mode Disabled */
} else if (io_addr == mhi_dev_ctxt->mmio_info.event_db_addr) {
- /* Only ring for software channel */
- if (IS_SW_EV_RING(mhi_dev_ctxt, chan) ||
- !mhi_dev_ctxt->flags.uldl_enabled) {
+ struct mhi_ring *ev_ctxt =
+ &mhi_dev_ctxt->mhi_local_event_ctxt[chan];
+ /* Only ring for software channel or db mode*/
+ if (!(IS_HW_EV_RING(mhi_dev_ctxt, chan) &&
+ !ev_ctxt->db_mode.db_mode)) {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
- mhi_dev_ctxt->flags.db_mode[chan] = 0;
}
} else {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
- mhi_dev_ctxt->flags.db_mode[chan] = 0;
}
}
diff --git a/drivers/platform/msm/mhi/mhi_mmio_ops.c b/drivers/platform/msm/mhi/mhi_mmio_ops.c
index ddf18e466cd3..b4447378683e 100644
--- a/drivers/platform/msm/mhi/mhi_mmio_ops.c
+++ b/drivers/platform/msm/mhi/mhi_mmio_ops.c
@@ -9,6 +9,19 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+#include <linux/completion.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/msm-bus.h>
+#include <linux/cpu.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/pm_runtime.h>
#include "mhi_sys.h"
#include "mhi_hwio.h"
#include "mhi.h"
@@ -17,25 +30,40 @@ int mhi_test_for_device_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 pcie_word_val = 0;
u32 expiry_counter;
+ unsigned long flags;
+ rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
mhi_log(MHI_MSG_INFO, "Waiting for MMIO RESET bit to be cleared.\n");
+ read_lock_irqsave(pm_xfer_lock, flags);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_irqrestore(pm_xfer_lock, flags);
+ return -EIO;
+ }
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
- MHISTATUS);
+ MHISTATUS);
MHI_READ_FIELD(pcie_word_val,
MHICTRL_RESET_MASK,
MHICTRL_RESET_SHIFT);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
if (pcie_word_val == 0xFFFFFFFF)
return -ENOTCONN;
+
while (MHI_STATE_RESET != pcie_word_val && expiry_counter < 100) {
expiry_counter++;
mhi_log(MHI_MSG_ERROR,
"Device is not RESET, sleeping and retrying.\n");
msleep(MHI_READY_STATUS_TIMEOUT_MS);
+ read_lock_irqsave(pm_xfer_lock, flags);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_irqrestore(pm_xfer_lock, flags);
+ return -EIO;
+ }
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
MHICTRL);
MHI_READ_FIELD(pcie_word_val,
MHICTRL_RESET_MASK,
MHICTRL_RESET_SHIFT);
+ read_unlock_irqrestore(pm_xfer_lock, flags);
}
if (MHI_STATE_READY != pcie_word_val)
@@ -47,15 +75,23 @@ int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 pcie_word_val = 0;
u32 expiry_counter;
+ unsigned long flags;
+ rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
mhi_log(MHI_MSG_INFO, "Waiting for MMIO Ready bit to be set\n");
+ read_lock_irqsave(pm_xfer_lock, flags);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_irqrestore(pm_xfer_lock, flags);
+ return -EIO;
+ }
/* Read MMIO and poll for READY bit to be set */
pcie_word_val = mhi_reg_read(
mhi_dev_ctxt->mmio_info.mmio_addr, MHISTATUS);
MHI_READ_FIELD(pcie_word_val,
MHISTATUS_READY_MASK,
MHISTATUS_READY_SHIFT);
+ read_unlock_irqrestore(pm_xfer_lock, flags);
if (pcie_word_val == 0xFFFFFFFF)
return -ENOTCONN;
@@ -65,10 +101,16 @@ int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_log(MHI_MSG_ERROR,
"Device is not ready, sleeping and retrying.\n");
msleep(MHI_READY_STATUS_TIMEOUT_MS);
+ read_lock_irqsave(pm_xfer_lock, flags);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_irqrestore(pm_xfer_lock, flags);
+ return -EIO;
+ }
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
MHISTATUS);
MHI_READ_FIELD(pcie_word_val,
MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT);
+ read_unlock_irqrestore(pm_xfer_lock, flags);
}
if (pcie_word_val != MHI_STATE_READY)
@@ -102,21 +144,20 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_ctxt->dev_props->mhi_ver = mhi_reg_read(
mhi_dev_ctxt->mmio_info.mmio_addr, MHIVER);
if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) {
- mhi_log(MHI_MSG_CRITICAL, "Bad MMIO version, 0x%x\n",
- mhi_dev_ctxt->dev_props->mhi_ver);
- if (mhi_dev_ctxt->dev_props->mhi_ver == 0xFFFFFFFF)
- ret_val = mhi_wait_for_mdm(mhi_dev_ctxt);
- if (ret_val)
+ mhi_log(MHI_MSG_CRITICAL,
+ "Bad MMIO version, 0x%x\n",
+ mhi_dev_ctxt->dev_props->mhi_ver);
return ret_val;
}
+
/* Enable the channels */
for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
struct mhi_chan_ctxt *chan_ctxt =
&mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i];
if (VALID_CHAN_NR(i))
- chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED;
+ chan_ctxt->chstate = MHI_CHAN_STATE_ENABLED;
else
- chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED;
+ chan_ctxt->chstate = MHI_CHAN_STATE_DISABLED;
}
mhi_log(MHI_MSG_INFO,
"Read back MMIO Ready bit successfully. Moving on..\n");
@@ -144,6 +185,11 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
MHICFG,
MHICFG_NER_MASK, MHICFG_NER_SHIFT,
mhi_dev_ctxt->mmio_info.nr_event_rings);
+ mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr,
+ MHICFG,
+ MHICFG_NHWER_MASK,
+ MHICFG_NHWER_SHIFT,
+ mhi_dev_ctxt->mmio_info.nr_hw_event_rings);
pcie_dword_val = mhi_dev_ctxt->dev_space.ring_ctxt.dma_cc_list;
pcie_word_val = HIGH_WORD(pcie_dword_val);
diff --git a/drivers/platform/msm/mhi/mhi_pm.c b/drivers/platform/msm/mhi/mhi_pm.c
index a928c05b805a..2f44601e225e 100644
--- a/drivers/platform/msm/mhi/mhi_pm.c
+++ b/drivers/platform/msm/mhi/mhi_pm.c
@@ -22,16 +22,12 @@
#include "mhi_hwio.h"
/* Write only sysfs attributes */
-static DEVICE_ATTR(MHI_M3, S_IWUSR, NULL, sysfs_init_m3);
static DEVICE_ATTR(MHI_M0, S_IWUSR, NULL, sysfs_init_m0);
-static DEVICE_ATTR(MHI_RESET, S_IWUSR, NULL, sysfs_init_mhi_reset);
/* Read only sysfs attributes */
static struct attribute *mhi_attributes[] = {
- &dev_attr_MHI_M3.attr,
&dev_attr_MHI_M0.attr,
- &dev_attr_MHI_RESET.attr,
NULL,
};
@@ -42,21 +38,20 @@ static struct attribute_group mhi_attribute_group = {
int mhi_pci_suspend(struct device *dev)
{
int r = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
- if (NULL == mhi_dev_ctxt)
- return -EINVAL;
- mhi_log(MHI_MSG_INFO, "Entered, MHI state %d\n",
- mhi_dev_ctxt->mhi_state);
- atomic_set(&mhi_dev_ctxt->flags.pending_resume, 1);
+ mhi_log(MHI_MSG_INFO, "Entered\n");
- r = mhi_initiate_m3(mhi_dev_ctxt);
+ /* if rpm status still active then force suspend */
+ if (!pm_runtime_status_suspended(dev)) {
+ r = mhi_runtime_suspend(dev);
+ if (r)
+ return r;
+ }
- if (!r)
- return r;
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
- atomic_set(&mhi_dev_ctxt->flags.pending_resume, 0);
- mhi_log(MHI_MSG_INFO, "Exited, ret %d\n", r);
+ mhi_log(MHI_MSG_INFO, "Exit\n");
return r;
}
@@ -65,61 +60,150 @@ int mhi_runtime_suspend(struct device *dev)
int r = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
- mhi_log(MHI_MSG_INFO, "Entered\n");
- r = mhi_initiate_m3(mhi_dev_ctxt);
- if (r)
- mhi_log(MHI_MSG_ERROR, "Init M3 failed ret %d\n", r);
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+ mhi_log(MHI_MSG_INFO, "Entered with State:0x%x %s\n",
+ mhi_dev_ctxt->mhi_pm_state,
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+ /* Link is already disabled */
+ if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE ||
+ mhi_dev_ctxt->mhi_pm_state == MHI_PM_M3) {
+ mhi_log(MHI_MSG_INFO, "Already in active state, exiting\n");
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ return 0;
+ }
+
+ if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) {
+ mhi_log(MHI_MSG_INFO, "Busy, Aborting Runtime Suspend\n");
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ return -EBUSY;
+ }
+
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
+ msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
+ if (!r) {
+ mhi_log(MHI_MSG_CRITICAL,
+ "Failed to get M0||M1 event, timeout, current state:%s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ r = -EIO;
+ goto rpm_suspend_exit;
+ }
+
+ mhi_log(MHI_MSG_INFO, "Allowing M3 State\n");
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_ENTER;
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_log(MHI_MSG_INFO,
+ "Waiting for M3 completion.\n");
+ r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
+ msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
+ if (!r) {
+ mhi_log(MHI_MSG_CRITICAL,
+ "Failed to get M3 event, timeout, current state:%s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ r = -EIO;
+ goto rpm_suspend_exit;
+ }
+
+ r = mhi_turn_off_pcie_link(mhi_dev_ctxt);
+ if (r) {
+ mhi_log(MHI_MSG_ERROR,
+ "Failed to Turn off link ret:%d\n", r);
+ }
- pm_runtime_mark_last_busy(dev);
+rpm_suspend_exit:
mhi_log(MHI_MSG_INFO, "Exited\n");
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
return r;
}
+int mhi_runtime_idle(struct device *dev)
+{
+ mhi_log(MHI_MSG_INFO, "Entered returning -EBUSY\n");
+
+ /*
+ * RPM framework during runtime resume always calls
+ * rpm_idle to see if device ready to suspend.
+ * If dev.power usage_count count is 0, rpm fw will call
+ * rpm_idle cb to see if device is ready to suspend.
+ * if cb return 0, or cb not defined the framework will
+ * assume device driver is ready to suspend;
+ * therefore, fw will schedule runtime suspend.
+ * In MHI power management, MHI host shall go to
+ * runtime suspend only after entering MHI State M2, even if
+ * usage count is 0. Return -EBUSY to disable automatic suspend.
+ */
+ return -EBUSY;
+}
+
int mhi_runtime_resume(struct device *dev)
{
int r = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
- mhi_log(MHI_MSG_INFO, "Entered\n");
- r = mhi_initiate_m0(mhi_dev_ctxt);
- if (r)
- mhi_log(MHI_MSG_ERROR, "Init M0 failed ret %d\n", r);
- pm_runtime_mark_last_busy(dev);
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ WARN_ON(mhi_dev_ctxt->mhi_pm_state != MHI_PM_M3);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+ /* turn on link */
+ r = mhi_turn_on_pcie_link(mhi_dev_ctxt);
+ if (r) {
+ mhi_log(MHI_MSG_CRITICAL,
+ "Failed to resume link\n");
+ goto rpm_resume_exit;
+ }
+
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_EXIT;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
+ /* Set and wait for M0 Event */
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
+ msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
+ if (!r) {
+ mhi_log(MHI_MSG_ERROR,
+ "Failed to get M0 event, timeout\n");
+ r = -EIO;
+ goto rpm_resume_exit;
+ }
+ r = 0; /* no errors */
+
+rpm_resume_exit:
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ mhi_log(MHI_MSG_INFO, "Exited with :%d\n", r);
return r;
}
int mhi_pci_resume(struct device *dev)
{
int r = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
- r = mhi_initiate_m0(mhi_dev_ctxt);
- if (r)
- goto exit;
- r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
- msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
- switch (r) {
- case 0:
- mhi_log(MHI_MSG_CRITICAL,
- "Timeout: No M0 event after %d ms\n",
- MHI_MAX_SUSPEND_TIMEOUT);
- mhi_dev_ctxt->counters.m0_event_timeouts++;
- r = -ETIME;
- break;
- case -ERESTARTSYS:
+ r = mhi_runtime_resume(dev);
+ if (r) {
mhi_log(MHI_MSG_CRITICAL,
- "Going Down...\n");
- break;
- default:
- mhi_log(MHI_MSG_INFO,
- "Wait complete state: %d\n", mhi_dev_ctxt->mhi_state);
- r = 0;
+ "Failed to resume link\n");
+ } else {
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
}
-exit:
- atomic_set(&mhi_dev_ctxt->flags.pending_resume, 0);
+
return r;
}
@@ -133,57 +217,15 @@ void mhi_rem_pm_sysfs(struct device *dev)
return sysfs_remove_group(&dev->kobj, &mhi_attribute_group);
}
-ssize_t sysfs_init_m3(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int r = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
- r = mhi_initiate_m3(mhi_dev_ctxt);
- if (r) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to suspend %d\n", r);
- return r;
- }
- r = mhi_turn_off_pcie_link(mhi_dev_ctxt);
- if (r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to turn off link ret %d\n", r);
-
- return count;
-}
-ssize_t sysfs_init_mhi_reset(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
- int r = 0;
-
- mhi_log(MHI_MSG_INFO, "Triggering MHI Reset.\n");
- r = mhi_trigger_reset(mhi_dev_ctxt);
- if (r != 0)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to trigger MHI RESET ret %d\n",
- r);
- else
- mhi_log(MHI_MSG_INFO, "Triggered! MHI RESET\n");
- return count;
-}
ssize_t sysfs_init_m0(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct mhi_device_ctxt *mhi_dev_ctxt =
&mhi_devices.device_list[0].mhi_ctxt;
- if (0 != mhi_turn_on_pcie_link(mhi_dev_ctxt)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to resume link\n");
- return count;
- }
- mhi_initiate_m0(mhi_dev_ctxt);
- mhi_log(MHI_MSG_CRITICAL,
- "Current mhi_state = 0x%x\n",
- mhi_dev_ctxt->mhi_state);
+ pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
return count;
}
@@ -194,35 +236,42 @@ int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_log(MHI_MSG_INFO, "Entered...\n");
pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
- mutex_lock(&mhi_dev_ctxt->mhi_link_state);
+
if (0 == mhi_dev_ctxt->flags.link_up) {
mhi_log(MHI_MSG_CRITICAL,
"Link already marked as down, nothing to do\n");
goto exit;
}
- /* Disable shadow to avoid restoring D3 hot struct device */
- r = msm_pcie_shadow_control(mhi_dev_ctxt->dev_info->pcie_device, 0);
- if (r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to stop shadow config space: %d\n", r);
- r = pci_set_power_state(mhi_dev_ctxt->dev_info->pcie_device, PCI_D3hot);
+ r = pci_save_state(pcie_dev);
if (r) {
mhi_log(MHI_MSG_CRITICAL,
- "Failed to set pcie power state to D3 hotret: %x\n", r);
- goto exit;
+ "Failed to save pcie state ret: %d\n",
+ r);
}
+ mhi_dev_ctxt->dev_props->pcie_state = pci_store_saved_state(pcie_dev);
+ pci_disable_device(pcie_dev);
+ r = pci_set_power_state(pcie_dev, PCI_D3hot);
+ if (r) {
+ mhi_log(MHI_MSG_CRITICAL,
+ "Failed to set pcie power state to D3 hot ret: %d\n",
+ r);
+ }
+
r = msm_pcie_pm_control(MSM_PCIE_SUSPEND,
- mhi_dev_ctxt->dev_info->pcie_device->bus->number,
- mhi_dev_ctxt->dev_info->pcie_device,
+ pcie_dev->bus->number,
+ pcie_dev,
NULL,
0);
if (r)
mhi_log(MHI_MSG_CRITICAL,
- "Failed to suspend pcie bus ret 0x%x\n", r);
+ "Failed to suspend pcie bus ret 0x%x\n", r);
+
+ r = mhi_set_bus_request(mhi_dev_ctxt, 0);
+ if (r)
+ mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
mhi_dev_ctxt->flags.link_up = 0;
exit:
- mutex_unlock(&mhi_dev_ctxt->mhi_link_state);
mhi_log(MHI_MSG_INFO, "Exited...\n");
return 0;
}
@@ -234,37 +283,40 @@ int mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
- mutex_lock(&mhi_dev_ctxt->mhi_link_state);
mhi_log(MHI_MSG_INFO, "Entered...\n");
if (mhi_dev_ctxt->flags.link_up)
goto exit;
+
+ r = mhi_set_bus_request(mhi_dev_ctxt, 1);
+ if (r)
+ mhi_log(MHI_MSG_CRITICAL,
+ "Could not set bus frequency ret: %d\n",
+ r);
+
r = msm_pcie_pm_control(MSM_PCIE_RESUME,
- mhi_dev_ctxt->dev_info->pcie_device->bus->number,
- mhi_dev_ctxt->dev_info->pcie_device,
- NULL, 0);
+ pcie_dev->bus->number,
+ pcie_dev,
+ NULL,
+ 0);
if (r) {
mhi_log(MHI_MSG_CRITICAL,
"Failed to resume pcie bus ret %d\n", r);
goto exit;
}
- r = pci_set_power_state(mhi_dev_ctxt->dev_info->pcie_device,
- PCI_D0);
- if (r) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to load stored state %d\n", r);
- goto exit;
- }
- r = msm_pcie_recover_config(mhi_dev_ctxt->dev_info->pcie_device);
- if (r) {
+ r = pci_enable_device(pcie_dev);
+ if (r)
mhi_log(MHI_MSG_CRITICAL,
- "Failed to Recover config space ret: %d\n", r);
- goto exit;
- }
+ "Failed to enable device ret:%d\n",
+ r);
+
+ pci_load_and_free_saved_state(pcie_dev,
+ &mhi_dev_ctxt->dev_props->pcie_state);
+ pci_restore_state(pcie_dev);
+ pci_set_master(pcie_dev);
+
mhi_dev_ctxt->flags.link_up = 1;
exit:
- mutex_unlock(&mhi_dev_ctxt->mhi_link_state);
mhi_log(MHI_MSG_INFO, "Exited...\n");
return r;
}
-
diff --git a/drivers/platform/msm/mhi/mhi_ring_ops.c b/drivers/platform/msm/mhi/mhi_ring_ops.c
index e5a6dd51f51a..07d0098a1b61 100644
--- a/drivers/platform/msm/mhi/mhi_ring_ops.c
+++ b/drivers/platform/msm/mhi/mhi_ring_ops.c
@@ -48,6 +48,9 @@ static int add_element(struct mhi_ring *ring, void **rp,
*assigned_addr = (char *)ring->wp;
*wp = (void *)(((d_wp + 1) % ring_size) * ring->el_size +
(uintptr_t)ring->base);
+
+ /* force update visible to other cores */
+ smp_wmb();
return 0;
}
@@ -101,6 +104,9 @@ int delete_element(struct mhi_ring *ring, void **rp,
*rp = (void *)(((d_rp + 1) % ring_size) * ring->el_size +
(uintptr_t)ring->base);
+
+ /* force update visible to other cores */
+ smp_wmb();
return 0;
}
@@ -108,6 +114,7 @@ int mhi_get_free_desc(struct mhi_client_handle *client_handle)
{
u32 chan;
struct mhi_device_ctxt *ctxt;
+ int bb_ring, ch_ring;
if (!client_handle || MHI_HANDLE_MAGIC != client_handle->magic ||
!client_handle->mhi_dev_ctxt)
@@ -115,7 +122,10 @@ int mhi_get_free_desc(struct mhi_client_handle *client_handle)
ctxt = client_handle->mhi_dev_ctxt;
chan = client_handle->chan_info.chan_nr;
- return get_nr_avail_ring_elements(&ctxt->mhi_local_chan_ctxt[chan]);
+ bb_ring = get_nr_avail_ring_elements(&ctxt->chan_bb_list[chan]);
+ ch_ring = get_nr_avail_ring_elements(&ctxt->mhi_local_chan_ctxt[chan]);
+
+ return min(bb_ring, ch_ring);
}
EXPORT_SYMBOL(mhi_get_free_desc);
diff --git a/drivers/platform/msm/mhi/mhi_ssr.c b/drivers/platform/msm/mhi/mhi_ssr.c
index 8ee3deddbb95..defd6f4fd137 100644
--- a/drivers/platform/msm/mhi/mhi_ssr.c
+++ b/drivers/platform/msm/mhi/mhi_ssr.c
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <linux/pm_runtime.h>
#include <mhi_sys.h>
#include <mhi.h>
#include <mhi_bhi.h>
@@ -23,24 +24,11 @@
static int mhi_ssr_notify_cb(struct notifier_block *nb,
unsigned long action, void *data)
{
- int ret_val = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
- struct mhi_pcie_dev_info *mhi_pcie_dev = NULL;
- mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices];
- if (NULL != mhi_dev_ctxt)
- mhi_dev_ctxt->esoc_notif = action;
switch (action) {
case SUBSYS_BEFORE_POWERUP:
mhi_log(MHI_MSG_INFO,
"Received Subsystem event BEFORE_POWERUP\n");
- atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 1);
- ret_val = init_mhi_base_state(mhi_dev_ctxt);
- if (0 != ret_val)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to transition to base state %d.\n",
- ret_val);
break;
case SUBSYS_AFTER_POWERUP:
mhi_log(MHI_MSG_INFO,
@@ -148,7 +136,7 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
}
}
-static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
+int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
{
u32 pcie_word_val = 0;
int r = 0;
@@ -159,13 +147,11 @@ static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
mhi_pcie_dev->bhi_ctxt.bhi_base += pcie_word_val;
pcie_word_val = mhi_reg_read(mhi_pcie_dev->bhi_ctxt.bhi_base,
BHI_EXECENV);
+ mhi_dev_ctxt->dev_exec_env = pcie_word_val;
if (pcie_word_val == MHI_EXEC_ENV_AMSS) {
mhi_dev_ctxt->base_state = STATE_TRANSITION_RESET;
} else if (pcie_word_val == MHI_EXEC_ENV_PBL) {
mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI;
- r = bhi_probe(mhi_pcie_dev);
- if (r)
- mhi_log(MHI_MSG_ERROR, "Failed to initialize BHI.\n");
} else {
mhi_log(MHI_MSG_ERROR, "Invalid EXEC_ENV: 0x%x\n",
pcie_word_val);
@@ -178,10 +164,9 @@ static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
void mhi_link_state_cb(struct msm_pcie_notify *notify)
{
- int ret_val = 0;
+
struct mhi_pcie_dev_info *mhi_pcie_dev;
struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
- int r = 0;
if (NULL == notify || NULL == notify->data) {
mhi_log(MHI_MSG_CRITICAL,
@@ -198,32 +183,6 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify)
case MSM_PCIE_EVENT_LINKUP:
mhi_log(MHI_MSG_INFO,
"Received MSM_PCIE_EVENT_LINKUP\n");
- if (0 == mhi_pcie_dev->link_up_cntr) {
- mhi_log(MHI_MSG_INFO,
- "Initializing MHI for the first time\n");
- r = mhi_ctxt_init(mhi_pcie_dev);
- if (r) {
- mhi_log(MHI_MSG_ERROR,
- "MHI initialization failed, ret %d.\n",
- r);
- r = msm_pcie_register_event(
- &mhi_pcie_dev->mhi_pci_link_event);
- mhi_log(MHI_MSG_ERROR,
- "Deregistered from PCIe notif r %d.\n",
- r);
- return;
- }
- mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt;
- mhi_pcie_dev->mhi_ctxt.flags.link_up = 1;
- pci_set_master(mhi_pcie_dev->pcie_device);
- r = set_mhi_base_state(mhi_pcie_dev);
- if (r)
- return;
- init_mhi_base_state(mhi_dev_ctxt);
- } else {
- mhi_log(MHI_MSG_INFO,
- "Received Link Up Callback\n");
- }
mhi_pcie_dev->link_up_cntr++;
break;
case MSM_PCIE_EVENT_WAKEUP:
@@ -231,17 +190,14 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify)
"Received MSM_PCIE_EVENT_WAKE\n");
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
__pm_relax(&mhi_dev_ctxt->w_lock);
- if (atomic_read(&mhi_dev_ctxt->flags.pending_resume)) {
- mhi_log(MHI_MSG_INFO,
- "There is a pending resume, doing nothing.\n");
- return;
- }
- ret_val = mhi_init_state_transition(mhi_dev_ctxt,
- STATE_TRANSITION_WAKE);
- if (0 != ret_val) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to init state transition, to %d\n",
- STATE_TRANSITION_WAKE);
+
+ if (mhi_dev_ctxt->flags.mhi_initialized) {
+ pm_runtime_get(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
}
break;
default:
@@ -255,12 +211,6 @@ int init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int r = 0;
- mhi_assert_device_wake(mhi_dev_ctxt);
- mhi_dev_ctxt->flags.link_up = 1;
- r = mhi_set_bus_request(mhi_dev_ctxt, 1);
- if (r)
- mhi_log(MHI_MSG_INFO,
- "Failed to scale bus request to active set.\n");
r = mhi_init_state_transition(mhi_dev_ctxt, mhi_dev_ctxt->base_state);
if (r) {
mhi_log(MHI_MSG_CRITICAL,
diff --git a/drivers/platform/msm/mhi/mhi_states.c b/drivers/platform/msm/mhi/mhi_states.c
index ca4520a3c57d..1021a56d1b3d 100644
--- a/drivers/platform/msm/mhi/mhi_states.c
+++ b/drivers/platform/msm/mhi/mhi_states.c
@@ -17,7 +17,40 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-static inline void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
+const char *state_transition_str(enum STATE_TRANSITION state)
+{
+ static const char * const mhi_states_transition_str[] = {
+ "RESET",
+ "READY",
+ "M0",
+ "M1",
+ "M2",
+ "M3",
+ "BHI",
+ "SBL",
+ "AMSS",
+ "LINK_DOWN",
+ "WAKE"
+ };
+
+ if (state == STATE_TRANSITION_SYS_ERR)
+ return "SYS_ERR";
+
+ return (state <= STATE_TRANSITION_WAKE) ?
+ mhi_states_transition_str[state] : "Invalid";
+}
+
+enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ u32 state = mhi_reg_read_field(mhi_dev_ctxt->mmio_info.mmio_addr,
+ MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT);
+
+ return (state >= MHI_STATE_LIMIT) ? MHI_STATE_LIMIT : state;
+}
+
+void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_STATE new_state)
{
if (MHI_STATE_RESET == new_state) {
@@ -41,23 +74,22 @@ static void conditional_chan_db_write(
{
u64 db_value;
unsigned long flags;
-
- mhi_dev_ctxt->mhi_chan_db_order[chan] = 0;
- spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags);
- if (0 == mhi_dev_ctxt->mhi_chan_db_order[chan]) {
- db_value =
- mhi_v2p_addr(mhi_dev_ctxt,
- MHI_RING_TYPE_XFER_RING, chan,
- (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp);
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.chan_db_addr,
- chan, db_value);
- }
- mhi_dev_ctxt->mhi_chan_db_order[chan] = 0;
- spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags);
+ struct mhi_ring *mhi_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+
+ spin_lock_irqsave(&mhi_ring->ring_lock, flags);
+ db_value = mhi_v2p_addr(mhi_dev_ctxt,
+ MHI_RING_TYPE_XFER_RING,
+ chan,
+ (uintptr_t)mhi_ring->wp);
+ mhi_ring->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.chan_db_addr,
+ chan,
+ db_value);
+ spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
}
-static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
+static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt,
+ bool reset_db_mode)
{
u32 i = 0;
struct mhi_ring *local_ctxt = NULL;
@@ -66,40 +98,38 @@ static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
if (VALID_CHAN_NR(i)) {
local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
- if (IS_HARDWARE_CHANNEL(i))
- mhi_dev_ctxt->flags.db_mode[i] = 1;
- if ((local_ctxt->wp != local_ctxt->rp) ||
- ((local_ctxt->wp != local_ctxt->rp) &&
- (local_ctxt->dir == MHI_IN)))
+
+ /* Reset the DB Mode state to DB Mode */
+ if (local_ctxt->db_mode.preserve_db_state == 0
+ && reset_db_mode)
+ local_ctxt->db_mode.db_mode = 1;
+
+ if (local_ctxt->wp != local_ctxt->rp)
conditional_chan_db_write(mhi_dev_ctxt, i);
}
}
static void ring_all_cmd_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
{
- struct mutex *cmd_mutex = NULL;
u64 db_value;
u64 rp = 0;
struct mhi_ring *local_ctxt = NULL;
mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n");
- cmd_mutex = &mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING];
- mhi_dev_ctxt->cmd_ring_order = 0;
- mutex_lock(cmd_mutex);
+
local_ctxt = &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
rp = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING,
PRIMARY_CMD_RING,
(uintptr_t)local_ctxt->rp);
- db_value =
- mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING,
- PRIMARY_CMD_RING,
- (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[0].wp);
- if (0 == mhi_dev_ctxt->cmd_ring_order && rp != db_value)
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.cmd_db_addr,
- 0, db_value);
- mhi_dev_ctxt->cmd_ring_order = 0;
- mutex_unlock(cmd_mutex);
+ db_value = mhi_v2p_addr(mhi_dev_ctxt,
+ MHI_RING_TYPE_CMD_RING,
+ PRIMARY_CMD_RING,
+ (uintptr_t)local_ctxt->wp);
+ if (rp != db_value)
+ local_ctxt->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.cmd_db_addr,
+ 0,
+ db_value);
}
static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
@@ -107,24 +137,23 @@ static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
u32 i;
u64 db_value = 0;
struct mhi_event_ctxt *event_ctxt = NULL;
+ struct mhi_ring *mhi_ring;
spinlock_t *lock = NULL;
unsigned long flags;
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
- lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[i];
- mhi_dev_ctxt->mhi_ev_db_order[i] = 0;
+ mhi_ring = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
+ lock = &mhi_ring->ring_lock;
spin_lock_irqsave(lock, flags);
event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
- db_value =
- mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING,
- i,
- (uintptr_t)mhi_dev_ctxt->mhi_local_event_ctxt[i].wp);
- if (0 == mhi_dev_ctxt->mhi_ev_db_order[i]) {
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.event_db_addr,
- i, db_value);
- }
- mhi_dev_ctxt->mhi_ev_db_order[i] = 0;
+ db_value = mhi_v2p_addr(mhi_dev_ctxt,
+ MHI_RING_TYPE_EVENT_RING,
+ i,
+ (uintptr_t)mhi_ring->wp);
+ mhi_ring->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.event_db_addr,
+ i,
+ db_value);
spin_unlock_irqrestore(lock, flags);
}
}
@@ -133,168 +162,121 @@ static int process_m0_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- unsigned long flags;
- int r = 0;
- mhi_log(MHI_MSG_INFO, "Entered\n");
+ mhi_log(MHI_MSG_INFO, "Entered With State %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
- if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
+ switch (mhi_dev_ctxt->mhi_state) {
+ case MHI_STATE_M2:
mhi_dev_ctxt->counters.m2_m0++;
- } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M3) {
- mhi_dev_ctxt->counters.m3_m0++;
- } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_READY) {
- mhi_log(MHI_MSG_INFO,
- "Transitioning from READY.\n");
- } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
- mhi_log(MHI_MSG_INFO,
- "Transitioning from M1.\n");
- } else {
- mhi_log(MHI_MSG_INFO,
- "MHI State %d link state %d. Quitting\n",
- mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up);
+ break;
+ case MHI_STATE_M3:
+ mhi_dev_ctxt->counters.m3_m0++;
+ break;
+ default:
+ break;
}
- read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_M0;
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
- mhi_assert_device_wake(mhi_dev_ctxt);
- read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M0;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_assert_device_wake(mhi_dev_ctxt, true);
if (mhi_dev_ctxt->flags.mhi_initialized) {
ring_all_ev_dbs(mhi_dev_ctxt);
- ring_all_chan_dbs(mhi_dev_ctxt);
+ ring_all_chan_dbs(mhi_dev_ctxt, true);
ring_all_cmd_dbs(mhi_dev_ctxt);
}
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
- r = mhi_set_bus_request(mhi_dev_ctxt, 1);
- if (r)
- mhi_log(MHI_MSG_CRITICAL,
- "Could not set bus frequency ret: %d\n",
- r);
- mhi_dev_ctxt->flags.pending_M0 = 0;
- if (atomic_read(&mhi_dev_ctxt->flags.pending_powerup)) {
- atomic_set(&mhi_dev_ctxt->flags.pending_ssr, 0);
- atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 0);
- }
- wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m0_event);
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- if (!mhi_dev_ctxt->flags.pending_M3 &&
- mhi_dev_ctxt->flags.link_up &&
- mhi_dev_ctxt->flags.mhi_initialized)
- mhi_deassert_device_wake(mhi_dev_ctxt);
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ wake_up(mhi_dev_ctxt->mhi_ev_wq.m0_event);
mhi_log(MHI_MSG_INFO, "Exited\n");
return 0;
}
-static int process_m1_transition(
- struct mhi_device_ctxt *mhi_dev_ctxt,
- enum STATE_TRANSITION cur_work_item)
+void process_m1_transition(struct work_struct *work)
{
- unsigned long flags = 0;
- int r = 0;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
+
+ mhi_dev_ctxt = container_of(work,
+ struct mhi_device_ctxt,
+ process_m1_worker);
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_log(MHI_MSG_INFO,
- "Processing M1 state transition from state %d\n",
- mhi_dev_ctxt->mhi_state);
-
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- if (!mhi_dev_ctxt->flags.pending_M3) {
- mhi_log(MHI_MSG_INFO, "Setting M2 Transition flag\n");
- atomic_inc(&mhi_dev_ctxt->flags.m2_transition);
- mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
- mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n");
- mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M2);
- mhi_dev_ctxt->counters.m1_m2++;
+ "Processing M1 state transition from state %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+ /* We either Entered M3 or we did M3->M0 Exit */
+ if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_M1) {
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ return;
}
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- r = mhi_set_bus_request(mhi_dev_ctxt, 0);
- if (r)
- mhi_log(MHI_MSG_INFO, "Failed to update bus request\n");
- mhi_log(MHI_MSG_INFO, "Debouncing M2\n");
+ mhi_log(MHI_MSG_INFO, "Transitioning to M2 Transition\n");
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1_M2_TRANSITION;
+ mhi_dev_ctxt->counters.m1_m2++;
+ mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M2);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
msleep(MHI_M2_DEBOUNCE_TMR_MS);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_log(MHI_MSG_INFO, "Pending acks %d\n",
- atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
- if (atomic_read(&mhi_dev_ctxt->counters.outbound_acks) ||
- mhi_dev_ctxt->flags.pending_M3) {
- mhi_assert_device_wake(mhi_dev_ctxt);
- } else {
- pm_runtime_mark_last_busy(
- &mhi_dev_ctxt->dev_info->pcie_device->dev);
- r = pm_request_autosuspend(
- &mhi_dev_ctxt->dev_info->pcie_device->dev);
- if (r && r != -EAGAIN) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to remove counter ret %d\n", r);
- BUG_ON(mhi_dev_ctxt->dev_info->
- pcie_device->dev.power.runtime_error);
- }
+ /* During DEBOUNCE Time We could be receiving M0 Event */
+ if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_M1_M2_TRANSITION) {
+ mhi_log(MHI_MSG_INFO, "Entered M2 State\n");
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M2;
}
- atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0);
- mhi_log(MHI_MSG_INFO, "M2 transition complete.\n");
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- BUG_ON(atomic_read(&mhi_dev_ctxt->outbound_acks) < 0);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- return 0;
+ if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) {
+ mhi_log(MHI_MSG_INFO, "Exiting M2 Immediately, count:%d\n",
+ atomic_read(&mhi_dev_ctxt->counters.device_wake));
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_assert_device_wake(mhi_dev_ctxt, true);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ } else {
+ mhi_log(MHI_MSG_INFO, "Schedule RPM suspend");
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_request_autosuspend(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ }
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
}
static int process_m3_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- unsigned long flags;
mhi_log(MHI_MSG_INFO,
- "Processing M3 state transition\n");
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
- mhi_dev_ctxt->flags.pending_M3 = 0;
- wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m3_event);
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_dev_ctxt->counters.m0_m3++;
- return 0;
-}
-
-static int mhi_process_link_down(
- struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- unsigned long flags;
- int r;
-
- mhi_log(MHI_MSG_INFO, "Entered.\n");
- if (NULL == mhi_dev_ctxt)
- return -EINVAL;
-
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_dev_ctxt->flags.mhi_initialized = 0;
- mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
- mhi_deassert_device_wake(mhi_dev_ctxt);
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-
- mhi_dev_ctxt->flags.stop_threads = 1;
+ "Entered with State %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
- while (!mhi_dev_ctxt->flags.ev_thread_stopped) {
- wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
- mhi_log(MHI_MSG_INFO,
- "Waiting for threads to SUSPEND EVT: %d, STT: %d\n",
- mhi_dev_ctxt->flags.st_thread_stopped,
- mhi_dev_ctxt->flags.ev_thread_stopped);
- msleep(20);
+ switch (mhi_dev_ctxt->mhi_state) {
+ case MHI_STATE_M1:
+ mhi_dev_ctxt->counters.m1_m3++;
+ break;
+ case MHI_STATE_M0:
+ mhi_dev_ctxt->counters.m0_m3++;
+ break;
+ default:
+ break;
}
- r = mhi_set_bus_request(mhi_dev_ctxt, 0);
- if (r)
- mhi_log(MHI_MSG_INFO,
- "Failed to scale bus request to sleep set.\n");
- mhi_turn_off_pcie_link(mhi_dev_ctxt);
- mhi_dev_ctxt->dev_info->link_down_cntr++;
- atomic_set(&mhi_dev_ctxt->flags.data_pending, 0);
- mhi_log(MHI_MSG_INFO, "Exited.\n");
-
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ wake_up(mhi_dev_ctxt->mhi_ev_wq.m3_event);
return 0;
}
@@ -302,51 +284,20 @@ static int process_link_down_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- mhi_log(MHI_MSG_INFO, "Entered\n");
- if (0 !=
- mhi_process_link_down(mhi_dev_ctxt)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to process link down\n");
- }
- mhi_log(MHI_MSG_INFO, "Exited.\n");
- return 0;
+ mhi_log(MHI_MSG_INFO,
+ "Entered with State %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ return -EIO;
}
static int process_wake_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- int r = 0;
-
- mhi_log(MHI_MSG_INFO, "Entered\n");
- __pm_stay_awake(&mhi_dev_ctxt->w_lock);
-
- if (atomic_read(&mhi_dev_ctxt->flags.pending_ssr)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Pending SSR, Ignoring.\n");
- goto exit;
- }
- if (mhi_dev_ctxt->flags.mhi_initialized) {
- r = pm_request_resume(
- &mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_VERBOSE,
- "MHI is initialized, transitioning to M0, ret %d\n", r);
- }
-
- if (!mhi_dev_ctxt->flags.mhi_initialized) {
- mhi_log(MHI_MSG_INFO,
- "MHI is not initialized transitioning to base.\n");
- r = init_mhi_base_state(mhi_dev_ctxt);
- if (0 != r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to transition to base state %d.\n",
- r);
- }
-
-exit:
- __pm_relax(&mhi_dev_ctxt->w_lock);
- mhi_log(MHI_MSG_INFO, "Exited.\n");
- return r;
+ mhi_log(MHI_MSG_INFO,
+ "Entered with State %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ return -EIO;
}
@@ -354,9 +305,10 @@ static int process_bhi_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- mhi_turn_on_pcie_link(mhi_dev_ctxt);
mhi_log(MHI_MSG_INFO, "Entered\n");
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_BHI;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
mhi_log(MHI_MSG_INFO, "Exited\n");
return 0;
@@ -369,36 +321,42 @@ static int process_ready_transition(
int r = 0;
mhi_log(MHI_MSG_INFO, "Processing READY state transition\n");
- mhi_dev_ctxt->mhi_state = MHI_STATE_READY;
r = mhi_reset_all_thread_queues(mhi_dev_ctxt);
-
- if (r)
+ if (r) {
mhi_log(MHI_MSG_ERROR,
"Failed to reset thread queues\n");
+ return r;
+ }
+
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_state = MHI_STATE_READY;
r = mhi_init_mmio(mhi_dev_ctxt);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
/* Initialize MMIO */
if (r) {
mhi_log(MHI_MSG_ERROR,
"Failure during MMIO initialization\n");
return r;
}
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
cur_work_item);
-
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
if (r) {
mhi_log(MHI_MSG_ERROR,
"Failure during event ring init\n");
return r;
}
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->flags.stop_threads = 0;
- mhi_assert_device_wake(mhi_dev_ctxt);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
MHICTRL_MHISTATE_MASK,
MHICTRL_MHISTATE_SHIFT,
MHI_STATE_M0);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
return r;
}
@@ -421,37 +379,22 @@ static int process_reset_transition(
enum STATE_TRANSITION cur_work_item)
{
int r = 0, i = 0;
- unsigned long flags = 0;
-
mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n");
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
mhi_dev_ctxt->counters.mhi_reset_cntr++;
- mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_PBL;
r = mhi_test_for_device_reset(mhi_dev_ctxt);
if (r)
mhi_log(MHI_MSG_INFO, "Device not RESET ret %d\n", r);
r = mhi_test_for_device_ready(mhi_dev_ctxt);
- switch (r) {
- case 0:
- break;
- case -ENOTCONN:
- mhi_log(MHI_MSG_CRITICAL, "Link down detected\n");
- break;
- case -ETIMEDOUT:
- r = mhi_init_state_transition(mhi_dev_ctxt,
- STATE_TRANSITION_RESET);
- if (0 != r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to initiate 0x%x state trans\n",
- STATE_TRANSITION_RESET);
- break;
- default:
- mhi_log(MHI_MSG_CRITICAL,
- "Unexpected ret code detected for\n");
- break;
+ if (r) {
+ mhi_log(MHI_MSG_ERROR, "timed out waiting for ready ret:%d\n",
+ r);
+ return r;
}
+
for (i = 0; i < NR_OF_CMD_RINGS; ++i) {
mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp =
mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
@@ -475,8 +418,8 @@ static int process_reset_transition(
STATE_TRANSITION_READY);
if (0 != r)
mhi_log(MHI_MSG_CRITICAL,
- "Failed to initiate 0x%x state trans\n",
- STATE_TRANSITION_READY);
+ "Failed to initiate %s state trans\n",
+ state_transition_str(STATE_TRANSITION_READY));
return r;
}
@@ -484,45 +427,10 @@ static int process_syserr_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- int r = 0;
-
- mhi_log(MHI_MSG_CRITICAL, "Received SYS ERROR. Resetting MHI\n");
- mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
- r = mhi_init_state_transition(mhi_dev_ctxt,
- STATE_TRANSITION_RESET);
- if (r) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to init state transition to RESET ret %d\n", r);
- mhi_log(MHI_MSG_CRITICAL, "Failed to reset mhi\n");
- }
- return r;
-}
-
-int start_chan_sync(struct mhi_client_handle *client_handle)
-{
- int r = 0;
- int chan = client_handle->chan_info.chan_nr;
-
- init_completion(&client_handle->chan_open_complete);
- r = mhi_send_cmd(client_handle->mhi_dev_ctxt,
- MHI_COMMAND_START_CHAN,
- chan);
- if (r != 0) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to send start command for chan %d ret %d\n",
- chan, r);
- return r;
- }
- r = wait_for_completion_timeout(
- &client_handle->chan_open_complete,
- msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
- if (!r) {
- mhi_log(MHI_MSG_ERROR,
- "Timed out waiting for chan %d start completion\n",
- chan);
- r = -ETIME;
- }
- return 0;
+ mhi_log(MHI_MSG_INFO,
+ "Entered with State %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ return -EIO;
}
static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -546,8 +454,7 @@ static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
chan_info.flags))
mhi_notify_client(client_handle, MHI_CB_MHI_ENABLED);
}
- if (exec_env == MHI_EXEC_ENV_AMSS)
- mhi_deassert_device_wake(mhi_dev_ctxt);
+
mhi_log(MHI_MSG_INFO, "Done.\n");
}
@@ -555,36 +462,25 @@ static int process_sbl_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- int r = 0;
- pm_runtime_set_autosuspend_delay(
- &mhi_dev_ctxt->dev_info->pcie_device->dev,
- MHI_RPM_AUTOSUSPEND_TMR_VAL_MS);
- pm_runtime_use_autosuspend(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- r = pm_runtime_set_active(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- if (r) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to activate runtime pm ret %d\n", r);
- }
- pm_runtime_enable(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_INFO, "Enabled runtime pm autosuspend\n");
+ mhi_log(MHI_MSG_INFO, "Enabled\n");
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
return 0;
-
}
static int process_amss_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- int r = 0, i = 0;
- struct mhi_client_handle *client_handle = NULL;
+ int r = 0;
mhi_log(MHI_MSG_INFO, "Processing AMSS state transition\n");
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_AMSS;
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
- mhi_assert_device_wake(mhi_dev_ctxt);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
if (!mhi_dev_ctxt->flags.mhi_initialized) {
r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
cur_work_item);
@@ -592,54 +488,40 @@ static int process_amss_transition(
if (r) {
mhi_log(MHI_MSG_CRITICAL,
"Failed to set local chan state ret %d\n", r);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
return r;
}
- ring_all_chan_dbs(mhi_dev_ctxt);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ ring_all_chan_dbs(mhi_dev_ctxt, true);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_log(MHI_MSG_INFO,
"Notifying clients that MHI is enabled\n");
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
} else {
mhi_log(MHI_MSG_INFO, "MHI is initialized\n");
- for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
- client_handle = mhi_dev_ctxt->client_handle_list[i];
- if (client_handle && client_handle->chan_status)
- r = start_chan_sync(client_handle);
- WARN(r, "Failed to start chan %d ret %d\n",
- i, r);
- return r;
- }
- ring_all_chan_dbs(mhi_dev_ctxt);
}
+
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
ring_all_ev_dbs(mhi_dev_ctxt);
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
- if (!mhi_dev_ctxt->flags.pending_M3 &&
- mhi_dev_ctxt->flags.link_up)
- mhi_deassert_device_wake(mhi_dev_ctxt);
- mhi_log(MHI_MSG_INFO, "Exited\n");
- return 0;
-}
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
-int mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- int r = 0;
- unsigned long flags = 0;
+ /*
+ * runtime_allow will decrement usage_count, counts were
+ * incremented by pci fw pci_pm_init() or by
+ * mhi shutdown/ssr apis.
+ */
+ mhi_log(MHI_MSG_INFO, "Allow runtime suspend\n");
- mhi_log(MHI_MSG_INFO, "Entered\n");
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_dev_ctxt->mhi_state = MHI_STATE_SYS_ERR;
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ pm_runtime_allow(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_INFO, "Setting RESET to MDM.\n");
- mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_RESET);
- mhi_log(MHI_MSG_INFO, "Transitioning state to RESET\n");
- r = mhi_init_state_transition(mhi_dev_ctxt,
- STATE_TRANSITION_RESET);
- if (0 != r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to initiate 0x%x state trans ret %d\n",
- STATE_TRANSITION_RESET, r);
- mhi_log(MHI_MSG_INFO, "Exiting\n");
- return r;
+ /* During probe we incremented, releasing that count */
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+ mhi_log(MHI_MSG_INFO, "Exited\n");
+ return 0;
}
static int process_stt_work_item(
@@ -648,8 +530,8 @@ static int process_stt_work_item(
{
int r = 0;
- mhi_log(MHI_MSG_INFO, "Transitioning to %d\n",
- (int)cur_work_item);
+ mhi_log(MHI_MSG_INFO, "Transitioning to %s\n",
+ state_transition_str(cur_work_item));
trace_mhi_state(cur_work_item);
switch (cur_work_item) {
case STATE_TRANSITION_BHI:
@@ -670,9 +552,6 @@ static int process_stt_work_item(
case STATE_TRANSITION_M0:
r = process_m0_transition(mhi_dev_ctxt, cur_work_item);
break;
- case STATE_TRANSITION_M1:
- r = process_m1_transition(mhi_dev_ctxt, cur_work_item);
- break;
case STATE_TRANSITION_M3:
r = process_m3_transition(mhi_dev_ctxt, cur_work_item);
break;
@@ -689,7 +568,8 @@ static int process_stt_work_item(
break;
default:
mhi_log(MHI_MSG_ERROR,
- "Unrecongized state: %d\n", cur_work_item);
+ "Unrecongized state: %s\n",
+ state_transition_str(cur_work_item));
break;
}
return r;
@@ -762,8 +642,8 @@ int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
BUG_ON(nr_avail_work_items <= 0);
mhi_log(MHI_MSG_VERBOSE,
- "Processing state transition %x\n",
- new_state);
+ "Processing state transition %s\n",
+ state_transition_str(new_state));
*(enum STATE_TRANSITION *)stt_ring->wp = new_state;
r = ctxt_add_element(stt_ring, (void **)&cur_work_item);
BUG_ON(r);
@@ -771,216 +651,3 @@ int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
return r;
}
-
-int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- int r = 0;
- unsigned long flags;
-
- mhi_log(MHI_MSG_INFO,
- "Entered MHI state %d, Pending M0 %d Pending M3 %d\n",
- mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0,
- mhi_dev_ctxt->flags.pending_M3);
- mutex_lock(&mhi_dev_ctxt->pm_lock);
- mhi_log(MHI_MSG_INFO,
- "Waiting for M0 M1 or M3. Currently %d...\n",
- mhi_dev_ctxt->mhi_state);
-
- r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M3 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
- msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
- switch (r) {
- case 0:
- mhi_log(MHI_MSG_CRITICAL,
- "Timeout: State %d after %d ms\n",
- mhi_dev_ctxt->mhi_state,
- MHI_MAX_SUSPEND_TIMEOUT);
- mhi_dev_ctxt->counters.m0_event_timeouts++;
- r = -ETIME;
- goto exit;
- case -ERESTARTSYS:
- mhi_log(MHI_MSG_CRITICAL,
- "Going Down...\n");
- goto exit;
- default:
- mhi_log(MHI_MSG_INFO,
- "Wait complete state: %d\n", mhi_dev_ctxt->mhi_state);
- r = 0;
- break;
- }
- if (mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
- mhi_assert_device_wake(mhi_dev_ctxt);
- mhi_log(MHI_MSG_INFO,
- "MHI state %d, done\n",
- mhi_dev_ctxt->mhi_state);
- goto exit;
- } else {
- if (0 != mhi_turn_on_pcie_link(mhi_dev_ctxt)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to resume link\n");
- r = -EIO;
- goto exit;
- }
-
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_log(MHI_MSG_VERBOSE, "Setting M0 ...\n");
- if (mhi_dev_ctxt->flags.pending_M3) {
- mhi_log(MHI_MSG_INFO,
- "Pending M3 detected, aborting M0 procedure\n");
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock,
- flags);
- r = -EPERM;
- goto exit;
- }
- if (mhi_dev_ctxt->flags.link_up) {
- mhi_dev_ctxt->flags.pending_M0 = 1;
- mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
- }
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- r = wait_event_interruptible_timeout(
- *mhi_dev_ctxt->mhi_ev_wq.m0_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
- msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
- WARN_ON(!r || -ERESTARTSYS == r);
- if (!r || -ERESTARTSYS == r)
- mhi_log(MHI_MSG_ERROR,
- "Failed to get M0 event ret %d\n", r);
- r = 0;
- }
-exit:
- mutex_unlock(&mhi_dev_ctxt->pm_lock);
- mhi_log(MHI_MSG_INFO, "Exited...\n");
- return r;
-}
-
-int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
-
- unsigned long flags;
- int r = 0, abort_m3 = 0;
-
- mhi_log(MHI_MSG_INFO,
- "Entered MHI state %d, Pending M0 %d Pending M3 %d\n",
- mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0,
- mhi_dev_ctxt->flags.pending_M3);
- mutex_lock(&mhi_dev_ctxt->pm_lock);
- switch (mhi_dev_ctxt->mhi_state) {
- case MHI_STATE_RESET:
- mhi_log(MHI_MSG_INFO,
- "MHI in RESET turning link off and quitting\n");
- mhi_turn_off_pcie_link(mhi_dev_ctxt);
- r = mhi_set_bus_request(mhi_dev_ctxt, 0);
- if (r)
- mhi_log(MHI_MSG_INFO,
- "Failed to set bus freq ret %d\n", r);
- goto exit;
- case MHI_STATE_M0:
- case MHI_STATE_M1:
- case MHI_STATE_M2:
- mhi_log(MHI_MSG_INFO,
- "Triggering wake out of M2\n");
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_dev_ctxt->flags.pending_M3 = 1;
- if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) {
- mhi_log(MHI_MSG_INFO,
- "M2 transition not set\n");
- mhi_assert_device_wake(mhi_dev_ctxt);
- }
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- r = wait_event_interruptible_timeout(
- *mhi_dev_ctxt->mhi_ev_wq.m0_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
- msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
- if (0 == r || -ERESTARTSYS == r) {
- mhi_log(MHI_MSG_CRITICAL,
- "MDM failed to come out of M2.\n");
- mhi_dev_ctxt->counters.m2_event_timeouts++;
- r = -EAGAIN;
- goto exit;
- }
- break;
- case MHI_STATE_M3:
- mhi_log(MHI_MSG_INFO,
- "MHI state %d, link state %d.\n",
- mhi_dev_ctxt->mhi_state,
- mhi_dev_ctxt->flags.link_up);
- if (mhi_dev_ctxt->flags.link_up)
- r = -EAGAIN;
- else
- r = 0;
- goto exit;
- default:
- mhi_log(MHI_MSG_INFO,
- "MHI state %d, link state %d.\n",
- mhi_dev_ctxt->mhi_state,
- mhi_dev_ctxt->flags.link_up);
- break;
- }
- while (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) {
- mhi_log(MHI_MSG_INFO,
- "There are still %d acks pending from device\n",
- atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
- __pm_stay_awake(&mhi_dev_ctxt->w_lock);
- __pm_relax(&mhi_dev_ctxt->w_lock);
- abort_m3 = 1;
- r = -EAGAIN;
- goto exit;
- }
-
- if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) {
- abort_m3 = 1;
- r = -EAGAIN;
- goto exit;
- }
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- if (mhi_dev_ctxt->flags.pending_M0) {
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- r = -EAGAIN;
- goto exit;
- }
- mhi_dev_ctxt->flags.pending_M3 = 1;
-
- mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-
- mhi_log(MHI_MSG_INFO,
- "Waiting for M3 completion.\n");
- r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
- msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
- switch (r) {
- case 0:
- mhi_log(MHI_MSG_CRITICAL,
- "MDM failed to suspend after %d ms\n",
- MHI_MAX_SUSPEND_TIMEOUT);
- mhi_dev_ctxt->counters.m3_event_timeouts++;
- mhi_dev_ctxt->flags.pending_M3 = 0;
- goto exit;
- default:
- mhi_log(MHI_MSG_INFO,
- "M3 completion received\n");
- break;
- }
- mhi_turn_off_pcie_link(mhi_dev_ctxt);
- r = mhi_set_bus_request(mhi_dev_ctxt, 0);
- if (r)
- mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
-exit:
- if (abort_m3) {
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- ring_all_chan_dbs(mhi_dev_ctxt);
- ring_all_cmd_dbs(mhi_dev_ctxt);
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
- mhi_deassert_device_wake(mhi_dev_ctxt);
- }
- mhi_dev_ctxt->flags.pending_M3 = 0;
- mutex_unlock(&mhi_dev_ctxt->pm_lock);
- return r;
-}
diff --git a/drivers/platform/msm/mhi/mhi_sys.c b/drivers/platform/msm/mhi/mhi_sys.c
index b8652770275e..c5c025b8585a 100644
--- a/drivers/platform/msm/mhi/mhi_sys.c
+++ b/drivers/platform/msm/mhi/mhi_sys.c
@@ -21,9 +21,9 @@
enum MHI_DEBUG_LEVEL mhi_msg_lvl = MHI_MSG_ERROR;
#ifdef CONFIG_MSM_MHI_DEBUG
- enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_VERBOSE;
+enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_VERBOSE;
#else
- enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_ERROR;
+enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_ERROR;
#endif
unsigned int mhi_log_override;
@@ -34,6 +34,18 @@ MODULE_PARM_DESC(mhi_msg_lvl, "dbg lvl");
module_param(mhi_ipc_log_lvl, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(mhi_ipc_log_lvl, "dbg lvl");
+const char * const mhi_states_str[MHI_STATE_LIMIT] = {
+ "RESET",
+ "READY",
+ "M0",
+ "M1",
+ "M2",
+ "M3",
+ "Reserved: 0x06",
+ "BHI",
+ "SYS_ERR",
+};
+
static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
size_t count, loff_t *offp)
{
@@ -46,6 +58,7 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
int valid_chan = 0;
struct mhi_chan_ctxt *cc_list;
struct mhi_client_handle *client_handle;
+ int pkts_queued;
if (NULL == mhi_dev_ctxt)
return -EIO;
@@ -74,35 +87,37 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
&v_wp_index);
+ pkts_queued = client_handle->chan_info.max_desc -
+ get_nr_avail_ring_elements(&mhi_dev_ctxt->
+ mhi_local_chan_ctxt[*offp]) - 1;
amnt_copied =
scnprintf(mhi_dev_ctxt->chan_info,
- MHI_LOG_SIZE,
- "%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d %s %u\n",
- "chan:",
- (unsigned int)*offp,
- "pkts from dev:",
- mhi_dev_ctxt->counters.chan_pkts_xferd[*offp],
- "state:",
- chan_ctxt->mhi_chan_state,
- "p_base:",
- chan_ctxt->mhi_trb_ring_base_addr,
- "v_base:",
- mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].base,
- "v_wp:",
- mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
- "index:",
- v_wp_index,
- "v_rp:",
- mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp,
- "index:",
- v_rp_index,
- "pkts_queued",
- get_nr_avail_ring_elements(
- &mhi_dev_ctxt->mhi_local_chan_ctxt[*offp]),
- "/",
- client_handle->chan_info.max_desc,
- "bb_used:",
- mhi_dev_ctxt->counters.bb_used[*offp]);
+ MHI_LOG_SIZE,
+ "%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d %s %u\n",
+ "chan:",
+ (unsigned int)*offp,
+ "pkts from dev:",
+ mhi_dev_ctxt->counters.chan_pkts_xferd[*offp],
+ "state:",
+ chan_ctxt->chstate,
+ "p_base:",
+ chan_ctxt->mhi_trb_ring_base_addr,
+ "v_base:",
+ mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].base,
+ "v_wp:",
+ mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
+ "index:",
+ v_wp_index,
+ "v_rp:",
+ mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp,
+ "index:",
+ v_rp_index,
+ "pkts_queued",
+ pkts_queued,
+ "/",
+ client_handle->chan_info.max_desc,
+ "bb_used:",
+ mhi_dev_ctxt->counters.bb_used[*offp]);
*offp += 1;
@@ -224,39 +239,37 @@ static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf,
msleep(100);
amnt_copied =
scnprintf(mhi_dev_ctxt->chan_info,
- MHI_LOG_SIZE,
- "%s %u %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d, %s, %d, %s %d\n",
- "Our State:",
- mhi_dev_ctxt->mhi_state,
- "M0->M1:",
- mhi_dev_ctxt->counters.m0_m1,
- "M0<-M1:",
- mhi_dev_ctxt->counters.m1_m0,
- "M1->M2:",
- mhi_dev_ctxt->counters.m1_m2,
- "M0<-M2:",
- mhi_dev_ctxt->counters.m2_m0,
- "M0->M3:",
- mhi_dev_ctxt->counters.m0_m3,
- "M0<-M3:",
- mhi_dev_ctxt->counters.m3_m0,
- "M3_ev_TO:",
- mhi_dev_ctxt->counters.m3_event_timeouts,
- "M0_ev_TO:",
- mhi_dev_ctxt->counters.m0_event_timeouts,
- "MSI_d:",
- mhi_dev_ctxt->counters.msi_disable_cntr,
- "MSI_e:",
- mhi_dev_ctxt->counters.msi_enable_cntr,
- "outstanding_acks:",
- atomic_read(&mhi_dev_ctxt->counters.outbound_acks),
- "LPM:",
- mhi_dev_ctxt->enable_lpm);
+ MHI_LOG_SIZE,
+ "%s %s %s 0x%02x %s %u %s %u %s %u %s %u %s %u %s %u %s %d %s %d %s %d\n",
+ "MHI State:",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
+ "PM State:",
+ mhi_dev_ctxt->mhi_pm_state,
+ "M0->M1:",
+ mhi_dev_ctxt->counters.m0_m1,
+ "M1->M2:",
+ mhi_dev_ctxt->counters.m1_m2,
+ "M2->M0:",
+ mhi_dev_ctxt->counters.m2_m0,
+ "M0->M3:",
+ mhi_dev_ctxt->counters.m0_m3,
+ "M1->M3:",
+ mhi_dev_ctxt->counters.m1_m3,
+ "M3->M0:",
+ mhi_dev_ctxt->counters.m3_m0,
+ "device_wake:",
+ atomic_read(&mhi_dev_ctxt->counters.device_wake),
+ "usage_count:",
+ atomic_read(&mhi_dev_ctxt->dev_info->pcie_device->dev.
+ power.usage_count),
+ "outbound_acks:",
+ atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
if (amnt_copied < count)
return amnt_copied - copy_to_user(buf,
mhi_dev_ctxt->chan_info, amnt_copied);
else
return -ENOMEM;
+ return 0;
}
static const struct file_operations mhi_dbgfs_state_fops = {
diff --git a/drivers/platform/msm/mhi/mhi_sys.h b/drivers/platform/msm/mhi/mhi_sys.h
index 765fd46ef2dd..a948a2354de7 100644
--- a/drivers/platform/msm/mhi/mhi_sys.h
+++ b/drivers/platform/msm/mhi/mhi_sys.h
@@ -46,6 +46,10 @@ extern void *mhi_ipc_log;
"[%s] " _msg, __func__, ##__VA_ARGS__); \
} while (0)
+extern const char * const mhi_states_str[MHI_STATE_LIMIT];
+#define TO_MHI_STATE_STR(state) (((state) >= MHI_STATE_LIMIT) ? \
+ "INVALID_STATE" : mhi_states_str[state])
+
irqreturn_t mhi_msi_handlr(int msi_number, void *dev_id);
struct mhi_meminfo {
diff --git a/drivers/platform/msm/mhi_uci/mhi_uci.c b/drivers/platform/msm/mhi_uci/mhi_uci.c
index d6eb96a3e89e..88a213b1b241 100644
--- a/drivers/platform/msm/mhi_uci/mhi_uci.c
+++ b/drivers/platform/msm/mhi_uci/mhi_uci.c
@@ -895,6 +895,8 @@ static int uci_init_client_attributes(struct mhi_uci_ctxt_t
case MHI_CLIENT_BL_IN:
case MHI_CLIENT_DUN_OUT:
case MHI_CLIENT_DUN_IN:
+ case MHI_CLIENT_TF_OUT:
+ case MHI_CLIENT_TF_IN:
chan_attrib->uci_ownership = 1;
break;
default:
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 35dc3842017b..4973f91c8af1 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -45,6 +45,7 @@ struct pl_data {
struct votable *fv_votable;
struct votable *pl_disable_votable;
struct votable *pl_awake_votable;
+ struct votable *hvdcp_hw_inov_dis_votable;
struct work_struct status_change_work;
struct work_struct pl_disable_forever_work;
struct delayed_work pl_taper_work;
@@ -96,7 +97,8 @@ static void split_settled(struct pl_data *chip)
* for desired split
*/
- if (chip->pl_mode != POWER_SUPPLY_PARALLEL_USBIN_USBIN)
+ if ((chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN)
+ && (chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN_EXT))
return;
if (!chip->main_psy)
@@ -262,7 +264,7 @@ static void split_fcc(struct pl_data *chip, int total_ua,
hw_cc_delta_ua = pval.intval;
bcl_ua = INT_MAX;
- if (chip->pl_mode == POWER_SUPPLY_PARALLEL_MID_MID) {
+ if (chip->pl_mode == POWER_SUPPLY_PL_USBMID_USBMID) {
rc = power_supply_get_property(chip->main_psy,
POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, &pval);
if (rc < 0) {
@@ -287,7 +289,15 @@ static void split_fcc(struct pl_data *chip, int total_ua,
slave_limited_ua = min(effective_total_ua, bcl_ua);
*slave_ua = (slave_limited_ua * chip->slave_pct) / 100;
*slave_ua = (*slave_ua * chip->taper_pct) / 100;
- *master_ua = max(0, total_ua - *slave_ua);
+ /*
+ * In USBIN_USBIN configuration with internal rsense parallel
+ * charger's current goes through main charger's BATFET, keep
+ * the main charger's FCC to the votable result.
+ */
+ if (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ *master_ua = max(0, total_ua);
+ else
+ *master_ua = max(0, total_ua - *slave_ua);
}
static int pl_fcc_vote_callback(struct votable *votable, void *data,
@@ -316,7 +326,7 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data,
total_fcc_ua = pval.intval;
}
- if (chip->pl_mode == POWER_SUPPLY_PARALLEL_NONE
+ if (chip->pl_mode == POWER_SUPPLY_PL_NONE
|| get_effective_result_locked(chip->pl_disable_votable)) {
pval.intval = total_fcc_ua;
rc = power_supply_set_property(chip->main_psy,
@@ -391,7 +401,7 @@ static int pl_fv_vote_callback(struct votable *votable, void *data,
return rc;
}
- if (chip->pl_mode != POWER_SUPPLY_PARALLEL_NONE) {
+ if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
pval.intval += PARALLEL_FLOAT_VOLTAGE_DELTA_UV;
rc = power_supply_set_property(chip->pl_psy,
POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
@@ -411,6 +421,10 @@ static void pl_disable_forever_work(struct work_struct *work)
/* Disable Parallel charger forever */
vote(chip->pl_disable_votable, PL_HW_ABSENT_VOTER, true, 0);
+
+ /* Re-enable autonomous mode */
+ if (chip->hvdcp_hw_inov_dis_votable)
+ vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER, false, 0);
}
static int pl_disable_vote_callback(struct votable *votable,
@@ -451,7 +465,8 @@ static int pl_disable_vote_callback(struct votable *votable,
pr_err("Couldn't change slave suspend state rc=%d\n",
rc);
- if (chip->pl_mode == POWER_SUPPLY_PARALLEL_USBIN_USBIN)
+ if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ || (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
split_settled(chip);
/*
* we could have been enabled while in taper mode,
@@ -469,7 +484,8 @@ static int pl_disable_vote_callback(struct votable *votable,
}
}
} else {
- if (chip->pl_mode == POWER_SUPPLY_PARALLEL_USBIN_USBIN)
+ if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ || (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
split_settled(chip);
/* pl_psy may be NULL while in the disable branch */
@@ -552,6 +568,21 @@ static bool is_parallel_available(struct pl_data *chip)
* pl_psy is present and valid.
*/
chip->pl_mode = pval.intval;
+
+ /* Disable autonomous votage increments for USBIN-USBIN */
+ if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ || (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)) {
+ if (!chip->hvdcp_hw_inov_dis_votable)
+ chip->hvdcp_hw_inov_dis_votable =
+ find_votable("HVDCP_HW_INOV_DIS");
+ if (chip->hvdcp_hw_inov_dis_votable)
+ /* Read current pulse count */
+ vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER,
+ true, 0);
+ else
+ return false;
+ }
+
vote(chip->pl_disable_votable, PARALLEL_PSY_VOTER, false, 0);
return true;
@@ -604,13 +635,16 @@ static void handle_main_charge_type(struct pl_data *chip)
}
#define MIN_ICL_CHANGE_DELTA_UA 300000
-static void handle_settled_aicl_split(struct pl_data *chip)
+static void handle_settled_icl_change(struct pl_data *chip)
{
union power_supply_propval pval = {0, };
int rc;
- if (!get_effective_result(chip->pl_disable_votable)
- && chip->pl_mode == POWER_SUPPLY_PARALLEL_USBIN_USBIN) {
+ if (get_effective_result(chip->pl_disable_votable))
+ return;
+
+ if (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN
+ || chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT) {
/*
* call aicl split only when USBIN_USBIN and enabled
* and if aicl changed
@@ -627,6 +661,8 @@ static void handle_settled_aicl_split(struct pl_data *chip)
if (abs((chip->main_settled_ua - chip->pl_settled_ua)
- pval.intval) > MIN_ICL_CHANGE_DELTA_UA)
split_settled(chip);
+ } else {
+ rerun_election(chip->fcc_votable);
}
}
@@ -673,7 +709,7 @@ static void status_change_work(struct work_struct *work)
is_parallel_available(chip);
handle_main_charge_type(chip);
- handle_settled_aicl_split(chip);
+ handle_settled_icl_change(chip);
handle_parallel_in_taper(chip);
}
@@ -687,7 +723,8 @@ static int pl_notifier_call(struct notifier_block *nb,
return NOTIFY_OK;
if ((strcmp(psy->desc->name, "parallel") == 0)
- || (strcmp(psy->desc->name, "battery") == 0))
+ || (strcmp(psy->desc->name, "battery") == 0)
+ || (strcmp(psy->desc->name, "main") == 0))
schedule_work(&chip->status_change_work);
return NOTIFY_OK;
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index b51ce758a16b..64f4d46df9a0 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -848,6 +848,8 @@ static enum power_supply_property smb2_batt_props[] = {
POWER_SUPPLY_PROP_PARALLEL_DISABLE,
POWER_SUPPLY_PROP_SET_SHIP_MODE,
POWER_SUPPLY_PROP_DIE_HEALTH,
+ POWER_SUPPLY_PROP_RERUN_AICL,
+ POWER_SUPPLY_PROP_DP_DM,
};
static int smb2_batt_get_prop(struct power_supply *psy,
@@ -933,6 +935,12 @@ static int smb2_batt_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_DIE_HEALTH:
rc = smblib_get_prop_die_health(chg, val);
break;
+ case POWER_SUPPLY_PROP_DP_DM:
+ val->intval = chg->pulse_cnt;
+ break;
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ val->intval = 0;
+ break;
default:
pr_err("batt power supply prop %d not supported\n", psp);
return -EINVAL;
@@ -984,8 +992,17 @@ static int smb2_batt_set_prop(struct power_supply *psy,
/* Not in ship mode as long as the device is active */
if (!val->intval)
break;
+ if (chg->pl.psy)
+ power_supply_set_property(chg->pl.psy,
+ POWER_SUPPLY_PROP_SET_SHIP_MODE, val);
rc = smblib_set_prop_ship_mode(chg, val);
break;
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ rc = smblib_rerun_aicl(chg);
+ break;
+ case POWER_SUPPLY_PROP_DP_DM:
+ rc = smblib_dp_dm(chg, val->intval);
+ break;
default:
rc = -EINVAL;
}
@@ -1001,6 +1018,8 @@ static int smb2_batt_prop_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
case POWER_SUPPLY_PROP_CAPACITY:
case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+ case POWER_SUPPLY_PROP_DP_DM:
+ case POWER_SUPPLY_PROP_RERUN_AICL:
return 1;
default:
break;
@@ -1615,7 +1634,7 @@ static int smb2_chg_config_init(struct smb2 *chip)
switch (pmic_rev_id->pmic_subtype) {
case PMI8998_SUBTYPE:
chip->chg.smb_version = PMI8998_SUBTYPE;
- chip->chg.wa_flags |= BOOST_BACK_WA;
+ chip->chg.wa_flags |= BOOST_BACK_WA | QC_AUTH_INTERRUPT_WA_BIT;
if (pmic_rev_id->rev4 == PMI8998_V1P1_REV4) /* PMI rev 1.1 */
chg->wa_flags |= QC_CHARGER_DETECTION_WA_BIT;
if (pmic_rev_id->rev4 == PMI8998_V2P0_REV4) /* PMI rev 2.0 */
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 33339588599e..f9784630c327 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -58,6 +58,12 @@ int smblib_read(struct smb_charger *chg, u16 addr, u8 *val)
return rc;
}
+int smblib_multibyte_read(struct smb_charger *chg, u16 addr, u8 *val,
+ int count)
+{
+ return regmap_bulk_read(chg->regmap, addr, val, count);
+}
+
int smblib_masked_write(struct smb_charger *chg, u16 addr, u8 mask, u8 val)
{
int rc = 0;
@@ -633,11 +639,15 @@ static void smblib_uusb_removal(struct smb_charger *chg)
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
- /* reset AUTH_IRQ_EN_CFG_BIT */
- rc = smblib_masked_write(chg, USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
- AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable QC auth setting rc=%d\n", rc);
+ if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+ /* re-enable AUTH_IRQ_EN_CFG_BIT */
+ rc = smblib_masked_write(chg,
+ USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+ AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't enable QC auth setting rc=%d\n", rc);
+ }
/* reconfigure allowed voltage for HVDCP */
rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG,
@@ -648,6 +658,8 @@ static void smblib_uusb_removal(struct smb_charger *chg)
chg->voltage_min_uv = MICRO_5V;
chg->voltage_max_uv = MICRO_5V;
+ chg->usb_icl_delta_ua = 0;
+ chg->pulse_cnt = 0;
/* clear USB ICL vote for USB_PSY_VOTER */
rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
@@ -737,6 +749,40 @@ int smblib_rerun_apsd_if_required(struct smb_charger *chg)
return 0;
}
+static int smblib_get_pulse_cnt(struct smb_charger *chg, int *count)
+{
+ int rc;
+ u8 val[2];
+
+ switch (chg->smb_version) {
+ case PMI8998_SUBTYPE:
+ rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, val);
+ if (rc) {
+ pr_err("failed to read QC_PULSE_COUNT_STATUS_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+ *count = val[0] & QC_PULSE_COUNT_MASK;
+ break;
+ case PM660_SUBTYPE:
+ rc = smblib_multibyte_read(chg,
+ QC_PULSE_COUNT_STATUS_1_REG, val, 2);
+ if (rc) {
+ pr_err("failed to read QC_PULSE_COUNT_STATUS_1_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+ *count = (val[1] << 8) | val[0];
+ break;
+ default:
+ smblib_dbg(chg, PR_PARALLEL, "unknown SMB chip %d\n",
+ chg->smb_version);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*********************
* VOTABLE CALLBACKS *
*********************/
@@ -875,7 +921,6 @@ override_suspend_config:
enable_icl_changed_interrupt:
enable_irq(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
-
return rc;
}
@@ -971,8 +1016,10 @@ static int smblib_hvdcp_enable_vote_callback(struct votable *votable,
{
struct smb_charger *chg = data;
int rc;
- u8 val = HVDCP_AUTH_ALG_EN_CFG_BIT
- | HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT | HVDCP_EN_BIT;
+ u8 val = HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_EN_BIT;
+
+ /* vote to enable/disable HW autonomous INOV */
+ vote(chg->hvdcp_hw_inov_dis_votable, client, !hvdcp_enable, 0);
/*
* Disable the autonomous bit and auth bit for disabling hvdcp.
@@ -983,9 +1030,7 @@ static int smblib_hvdcp_enable_vote_callback(struct votable *votable,
val = HVDCP_EN_BIT;
rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
- HVDCP_EN_BIT
- | HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT
- | HVDCP_AUTH_ALG_EN_CFG_BIT,
+ HVDCP_EN_BIT | HVDCP_AUTH_ALG_EN_CFG_BIT,
val);
if (rc < 0) {
smblib_err(chg, "Couldn't %s hvdcp rc=%d\n",
@@ -1054,6 +1099,37 @@ static int smblib_apsd_disable_vote_callback(struct votable *votable,
return 0;
}
+static int smblib_hvdcp_hw_inov_dis_vote_callback(struct votable *votable,
+ void *data, int disable, const char *client)
+{
+ struct smb_charger *chg = data;
+ int rc;
+
+ if (disable) {
+ /*
+ * the pulse count register get zeroed when autonomous mode is
+ * disabled. Track that in variables before disabling
+ */
+ rc = smblib_get_pulse_cnt(chg, &chg->pulse_cnt);
+ if (rc < 0) {
+ pr_err("failed to read QC_PULSE_COUNT_STATUS_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+ HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT,
+ disable ? 0 : HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't %s hvdcp rc=%d\n",
+ disable ? "disable" : "enable", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
/*******************
* VCONN REGULATOR *
* *****************/
@@ -1641,6 +1717,119 @@ int smblib_set_prop_system_temp_level(struct smb_charger *chg,
return 0;
}
+int smblib_rerun_aicl(struct smb_charger *chg)
+{
+ int rc, settled_icl_ua;
+ u8 stat;
+
+ rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* USB is suspended so skip re-running AICL */
+ if (stat & USBIN_SUSPEND_STS_BIT)
+ return rc;
+
+ smblib_dbg(chg, PR_MISC, "re-running AICL\n");
+ switch (chg->smb_version) {
+ case PMI8998_SUBTYPE:
+ rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+ &settled_icl_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
+ return rc;
+ }
+
+ vote(chg->usb_icl_votable, AICL_RERUN_VOTER, true,
+ max(settled_icl_ua - chg->param.usb_icl.step_u,
+ chg->param.usb_icl.step_u));
+ vote(chg->usb_icl_votable, AICL_RERUN_VOTER, false, 0);
+ break;
+ case PM660_SUBTYPE:
+ /*
+ * Use restart_AICL instead of trigger_AICL as it runs the
+ * complete AICL instead of starting from the last settled
+ * value.
+ */
+ rc = smblib_masked_write(chg, CMD_HVDCP_2_REG,
+ RESTART_AICL_BIT, RESTART_AICL_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+ rc);
+ break;
+ default:
+ smblib_dbg(chg, PR_PARALLEL, "unknown SMB chip %d\n",
+ chg->smb_version);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smblib_dp_pulse(struct smb_charger *chg)
+{
+ int rc;
+
+ /* QC 3.0 increment */
+ rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, SINGLE_INCREMENT_BIT,
+ SINGLE_INCREMENT_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+static int smblib_dm_pulse(struct smb_charger *chg)
+{
+ int rc;
+
+ /* QC 3.0 decrement */
+ rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, SINGLE_DECREMENT_BIT,
+ SINGLE_DECREMENT_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+int smblib_dp_dm(struct smb_charger *chg, int val)
+{
+ int target_icl_ua, rc = 0;
+
+ switch (val) {
+ case POWER_SUPPLY_DP_DM_DP_PULSE:
+ rc = smblib_dp_pulse(chg);
+ if (!rc)
+ chg->pulse_cnt++;
+ smblib_dbg(chg, PR_PARALLEL, "DP_DM_DP_PULSE rc=%d cnt=%d\n",
+ rc, chg->pulse_cnt);
+ break;
+ case POWER_SUPPLY_DP_DM_DM_PULSE:
+ rc = smblib_dm_pulse(chg);
+ if (!rc && chg->pulse_cnt)
+ chg->pulse_cnt--;
+ smblib_dbg(chg, PR_PARALLEL, "DP_DM_DM_PULSE rc=%d cnt=%d\n",
+ rc, chg->pulse_cnt);
+ break;
+ case POWER_SUPPLY_DP_DM_ICL_DOWN:
+ chg->usb_icl_delta_ua -= 100000;
+ target_icl_ua = get_effective_result(chg->usb_icl_votable);
+ vote(chg->usb_icl_votable, SW_QC3_VOTER, true,
+ target_icl_ua + chg->usb_icl_delta_ua);
+ break;
+ case POWER_SUPPLY_DP_DM_ICL_UP:
+ default:
+ break;
+ }
+
+ return rc;
+}
+
/*******************
* DC PSY GETTERS *
*******************/
@@ -2234,6 +2423,10 @@ int smblib_set_prop_usb_voltage_max(struct smb_charger *chg,
}
chg->voltage_max_uv = max_uv;
+ rc = smblib_rerun_aicl(chg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't re-run AICL rc=%d\n", rc);
+
return rc;
}
@@ -2660,7 +2853,6 @@ int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua)
int current_ua, rc;
if (reduction_ua == 0) {
- chg->icl_reduction_ua = 0;
vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
} else {
/*
@@ -2678,6 +2870,8 @@ int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua)
}
}
+ chg->icl_reduction_ua = reduction_ua;
+
return rerun_election(chg->usb_icl_votable);
}
@@ -2910,26 +3104,38 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
}
#define USB_WEAK_INPUT_UA 1400000
+#define ICL_CHANGE_DELAY_MS 1000
irqreturn_t smblib_handle_icl_change(int irq, void *data)
{
+ u8 stat;
+ int rc, settled_ua, delay = ICL_CHANGE_DELAY_MS;
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
- int rc, settled_ua;
-
- rc = smblib_get_charge_param(chg, &chg->param.icl_stat, &settled_ua);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
- return IRQ_HANDLED;
- }
if (chg->mode == PARALLEL_MASTER) {
- power_supply_changed(chg->usb_main_psy);
- vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER,
- settled_ua >= USB_WEAK_INPUT_UA, 0);
+ rc = smblib_read(chg, AICL_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n",
+ rc);
+ return IRQ_HANDLED;
+ }
+
+ rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+ &settled_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ /* If AICL settled then schedule work now */
+ if ((settled_ua == get_effective_result(chg->usb_icl_votable))
+ || (stat & AICL_DONE_BIT))
+ delay = 0;
+
+ schedule_delayed_work(&chg->icl_change_work,
+ msecs_to_jiffies(delay));
}
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s icl_settled=%d\n",
- irq_data->name, settled_ua);
return IRQ_HANDLED;
}
@@ -3019,14 +3225,18 @@ static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg,
if (!rising)
return;
- /*
- * Disable AUTH_IRQ_EN_CFG_BIT to receive adapter voltage
- * change interrupt.
- */
- rc = smblib_masked_write(chg, USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
- AUTH_IRQ_EN_CFG_BIT, 0);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable QC auth setting rc=%d\n", rc);
+ if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+ /*
+ * Disable AUTH_IRQ_EN_CFG_BIT to receive adapter voltage
+ * change interrupt.
+ */
+ rc = smblib_masked_write(chg,
+ USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+ AUTH_IRQ_EN_CFG_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't enable QC auth setting rc=%d\n", rc);
+ }
if (chg->mode == PARALLEL_MASTER)
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, true, 0);
@@ -3184,11 +3394,15 @@ static void typec_source_removal(struct smb_charger *chg)
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
- /* reset AUTH_IRQ_EN_CFG_BIT */
- rc = smblib_masked_write(chg, USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
- AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable QC auth setting rc=%d\n", rc);
+ if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+ /* re-enable AUTH_IRQ_EN_CFG_BIT */
+ rc = smblib_masked_write(chg,
+ USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+ AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't enable QC auth setting rc=%d\n", rc);
+ }
/* reconfigure allowed voltage for HVDCP */
rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG,
@@ -3266,6 +3480,8 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
chg->vconn_attempts = 0;
chg->otg_attempts = 0;
+ chg->pulse_cnt = 0;
+ chg->usb_icl_delta_ua = 0;
chg->usb_ever_removed = true;
@@ -3788,6 +4004,25 @@ static void smblib_otg_ss_done_work(struct work_struct *work)
mutex_unlock(&chg->otg_oc_lock);
}
+static void smblib_icl_change_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ icl_change_work.work);
+ int rc, settled_ua;
+
+ rc = smblib_get_charge_param(chg, &chg->param.icl_stat, &settled_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
+ return;
+ }
+
+ power_supply_changed(chg->usb_main_psy);
+ vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER,
+ settled_ua >= USB_WEAK_INPUT_UA, 0);
+
+ smblib_dbg(chg, PR_INTERRUPT, "icl_settled=%d\n", settled_ua);
+}
+
static int smblib_create_votables(struct smb_charger *chg)
{
int rc = 0;
@@ -3904,6 +4139,15 @@ static int smblib_create_votables(struct smb_charger *chg)
return rc;
}
+ chg->hvdcp_hw_inov_dis_votable = create_votable("HVDCP_HW_INOV_DIS",
+ VOTE_SET_ANY,
+ smblib_hvdcp_hw_inov_dis_vote_callback,
+ chg);
+ if (IS_ERR(chg->hvdcp_hw_inov_dis_votable)) {
+ rc = PTR_ERR(chg->hvdcp_hw_inov_dis_votable);
+ return rc;
+ }
+
return rc;
}
@@ -3927,6 +4171,8 @@ static void smblib_destroy_votables(struct smb_charger *chg)
destroy_votable(chg->pl_enable_votable_indirect);
if (chg->apsd_disable_votable)
destroy_votable(chg->apsd_disable_votable);
+ if (chg->hvdcp_hw_inov_dis_votable)
+ destroy_votable(chg->hvdcp_hw_inov_dis_votable);
}
static void smblib_iio_deinit(struct smb_charger *chg)
@@ -3957,6 +4203,7 @@ int smblib_init(struct smb_charger *chg)
INIT_WORK(&chg->otg_oc_work, smblib_otg_oc_work);
INIT_WORK(&chg->vconn_oc_work, smblib_vconn_oc_work);
INIT_DELAYED_WORK(&chg->otg_ss_done_work, smblib_otg_ss_done_work);
+ INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
chg->fake_capacity = -EINVAL;
switch (chg->mode) {
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index b2cfeeda5792..21ccd3ce57c7 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -56,6 +56,8 @@ enum print_reason {
#define PD_SUSPEND_SUPPORTED_VOTER "PD_SUSPEND_SUPPORTED_VOTER"
#define PL_DELAY_HVDCP_VOTER "PL_DELAY_HVDCP_VOTER"
#define CTM_VOTER "CTM_VOTER"
+#define SW_QC3_VOTER "SW_QC3_VOTER"
+#define AICL_RERUN_VOTER "AICL_RERUN_VOTER"
#define VCONN_MAX_ATTEMPTS 3
#define OTG_MAX_ATTEMPTS 3
@@ -77,6 +79,7 @@ enum {
QC_CHARGER_DETECTION_WA_BIT = BIT(0),
BOOST_BACK_WA = BIT(1),
TYPEC_CC2_REMOVAL_WA_BIT = BIT(2),
+ QC_AUTH_INTERRUPT_WA_BIT = BIT(3),
};
enum smb_irq_index {
@@ -201,6 +204,7 @@ struct smb_iio {
struct iio_channel *usbin_i_chan;
struct iio_channel *usbin_v_chan;
struct iio_channel *batt_i_chan;
+ struct iio_channel *connector_temp_chan;
struct iio_channel *connector_temp_thr1_chan;
struct iio_channel *connector_temp_thr2_chan;
struct iio_channel *connector_temp_thr3_chan;
@@ -266,6 +270,7 @@ struct smb_charger {
struct votable *hvdcp_disable_votable_indirect;
struct votable *hvdcp_enable_votable;
struct votable *apsd_disable_votable;
+ struct votable *hvdcp_hw_inov_dis_votable;
/* work */
struct work_struct bms_update_work;
@@ -277,6 +282,7 @@ struct smb_charger {
struct work_struct otg_oc_work;
struct work_struct vconn_oc_work;
struct delayed_work otg_ss_done_work;
+ struct delayed_work icl_change_work;
/* cached status */
int voltage_min_uv;
@@ -314,6 +320,8 @@ struct smb_charger {
/* qnovo */
int qnovo_fcc_ua;
int qnovo_fv_uv;
+ int usb_icl_delta_ua;
+ int pulse_cnt;
};
int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
@@ -471,6 +479,8 @@ int smblib_get_prop_fcc_delta(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_icl_override(struct smb_charger *chg, bool override);
int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua);
+int smblib_dp_dm(struct smb_charger *chg, int val);
+int smblib_rerun_aicl(struct smb_charger *chg);
int smblib_init(struct smb_charger *chg);
int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index e005a27e8dd9..54b6b38d134b 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -535,6 +535,8 @@ enum {
#define USBIN_LT_3P6V_RT_STS_BIT BIT(1)
#define USBIN_COLLAPSE_RT_STS_BIT BIT(0)
+#define QC_PULSE_COUNT_STATUS_1_REG (USBIN_BASE + 0x30)
+
#define USBIN_CMD_IL_REG (USBIN_BASE + 0x40)
#define BAT_2_SYS_FET_DIS_BIT BIT(1)
#define USBIN_SUSPEND_BIT BIT(0)
@@ -544,6 +546,7 @@ enum {
#define APSD_RERUN_BIT BIT(0)
#define CMD_HVDCP_2_REG (USBIN_BASE + 0x43)
+#define RESTART_AICL_BIT BIT(7)
#define TRIGGER_AICL_BIT BIT(6)
#define FORCE_12V_BIT BIT(5)
#define FORCE_9V_BIT BIT(4)
diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c
index d0cf37b0613a..8467d167512f 100644
--- a/drivers/power/supply/qcom/smb1351-charger.c
+++ b/drivers/power/supply/qcom/smb1351-charger.c
@@ -460,6 +460,7 @@ struct smb1351_charger {
int workaround_flags;
int parallel_pin_polarity_setting;
+ int parallel_mode;
bool parallel_charger;
bool parallel_charger_suspended;
bool bms_controlled_charging;
@@ -1699,7 +1700,7 @@ static int smb1351_parallel_get_property(struct power_supply *psy,
val->intval = 0;
break;
case POWER_SUPPLY_PROP_PARALLEL_MODE:
- val->intval = POWER_SUPPLY_PARALLEL_USBIN_USBIN;
+ val->intval = chip->parallel_mode;
break;
default:
return -EINVAL;
@@ -3191,6 +3192,12 @@ static int smb1351_parallel_charger_probe(struct i2c_client *client,
chip->parallel_pin_polarity_setting ?
EN_BY_PIN_HIGH_ENABLE : EN_BY_PIN_LOW_ENABLE;
+ if (of_property_read_bool(node,
+ "qcom,parallel-external-current-sense"))
+ chip->parallel_mode = POWER_SUPPLY_PL_USBIN_USBIN_EXT;
+ else
+ chip->parallel_mode = POWER_SUPPLY_PL_USBIN_USBIN;
+
i2c_set_clientdata(client, chip);
chip->parallel_psy_d.name = "parallel";
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index 4180edc89a4c..37dc15494761 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -312,6 +312,7 @@ static enum power_supply_property smb138x_batt_props[] = {
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CHARGER_TEMP,
POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+ POWER_SUPPLY_PROP_SET_SHIP_MODE,
};
static int smb138x_batt_get_prop(struct power_supply *psy,
@@ -347,6 +348,10 @@ static int smb138x_batt_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
rc = smblib_get_prop_charger_temp_max(chg, val);
break;
+ case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+ /* Not in ship mode as long as device is active */
+ val->intval = 0;
+ break;
default:
pr_err("batt power supply get prop %d not supported\n", prop);
return -EINVAL;
@@ -375,6 +380,12 @@ static int smb138x_batt_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CAPACITY:
rc = smblib_set_prop_batt_capacity(chg, val);
break;
+ case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+ /* Not in ship mode as long as the device is active */
+ if (!val->intval)
+ break;
+ rc = smblib_set_prop_ship_mode(chg, val);
+ break;
default:
pr_err("batt power supply set prop %d not supported\n", prop);
return -EINVAL;
@@ -430,6 +441,57 @@ static int smb138x_init_batt_psy(struct smb138x *chip)
* PARALLEL PSY REGISTRATION *
*****************************/
+static int smb138x_get_prop_connector_health(struct smb138x *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ int rc, lb_mdegc, ub_mdegc, rst_mdegc, connector_mdegc;
+
+ if (!chg->iio.connector_temp_chan ||
+ PTR_ERR(chg->iio.connector_temp_chan) == -EPROBE_DEFER)
+ chg->iio.connector_temp_chan = iio_channel_get(chg->dev,
+ "connector_temp");
+
+ if (IS_ERR(chg->iio.connector_temp_chan))
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+
+ rc = iio_read_channel_processed(chg->iio.connector_temp_thr1_chan,
+ &lb_mdegc);
+ if (rc < 0) {
+ pr_err("Couldn't read connector lower bound rc=%d\n", rc);
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ rc = iio_read_channel_processed(chg->iio.connector_temp_thr2_chan,
+ &ub_mdegc);
+ if (rc < 0) {
+ pr_err("Couldn't read connector upper bound rc=%d\n", rc);
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ rc = iio_read_channel_processed(chg->iio.connector_temp_thr3_chan,
+ &rst_mdegc);
+ if (rc < 0) {
+ pr_err("Couldn't read connector reset bound rc=%d\n", rc);
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ rc = iio_read_channel_processed(chg->iio.connector_temp_chan,
+ &connector_mdegc);
+ if (rc < 0) {
+ pr_err("Couldn't read connector temperature rc=%d\n", rc);
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ if (connector_mdegc < lb_mdegc)
+ return POWER_SUPPLY_HEALTH_COOL;
+ else if (connector_mdegc < ub_mdegc)
+ return POWER_SUPPLY_HEALTH_WARM;
+ else if (connector_mdegc < rst_mdegc)
+ return POWER_SUPPLY_HEALTH_HOT;
+
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+}
+
static enum power_supply_property smb138x_parallel_props[] = {
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_CHARGING_ENABLED,
@@ -443,6 +505,7 @@ static enum power_supply_property smb138x_parallel_props[] = {
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_PARALLEL_MODE,
POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+ POWER_SUPPLY_PROP_SET_SHIP_MODE,
};
static int smb138x_parallel_get_prop(struct power_supply *psy,
@@ -493,10 +556,14 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
val->strval = "smb138x";
break;
case POWER_SUPPLY_PROP_PARALLEL_MODE:
- val->intval = POWER_SUPPLY_PARALLEL_MID_MID;
+ val->intval = POWER_SUPPLY_PL_USBMID_USBMID;
break;
case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
- rc = smblib_get_prop_die_health(chg, val);
+ val->intval = smb138x_get_prop_connector_health(chip);
+ break;
+ case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+ /* Not in ship mode as long as device is active */
+ val->intval = 0;
break;
default:
pr_err("parallel power supply get prop %d not supported\n",
@@ -558,6 +625,12 @@ static int smb138x_parallel_set_prop(struct power_supply *psy,
rc = smblib_set_charge_param(chg, &chg->param.freq_buck,
val->intval);
break;
+ case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+ /* Not in ship mode as long as the device is active */
+ if (!val->intval)
+ break;
+ rc = smblib_set_prop_ship_mode(chg, val);
+ break;
default:
pr_err("parallel power supply set prop %d not supported\n",
prop);
diff --git a/drivers/regulator/cpr4-mmss-ldo-regulator.c b/drivers/regulator/cpr4-mmss-ldo-regulator.c
index 525f9d6fcf75..6c6d112d2a6a 100644
--- a/drivers/regulator/cpr4-mmss-ldo-regulator.c
+++ b/drivers/regulator/cpr4-mmss-ldo-regulator.c
@@ -135,9 +135,15 @@ static const int sdm660_mmss_fuse_ref_volt[SDM660_MMSS_FUSE_CORNERS] = {
#define SDM660_MMSS_VOLTAGE_FUSE_SIZE 5
#define SDM660_MMSS_CPR_SENSOR_COUNT 11
+#define SDM630_MMSS_CPR_SENSOR_COUNT 7
#define SDM660_MMSS_CPR_CLOCK_RATE 19200000
+enum {
+ SDM660_SOC_ID,
+ SDM630_SOC_ID,
+};
+
/**
* cpr4_sdm660_mmss_read_fuse_data() - load MMSS specific fuse parameter
* values
@@ -594,7 +600,10 @@ static int cpr4_mmss_init_controller(struct cpr3_controller *ctrl)
return rc;
}
- ctrl->sensor_count = SDM660_MMSS_CPR_SENSOR_COUNT;
+ if (ctrl->soc_revision == SDM660_SOC_ID)
+ ctrl->sensor_count = SDM660_MMSS_CPR_SENSOR_COUNT;
+ else if (ctrl->soc_revision == SDM630_SOC_ID)
+ ctrl->sensor_count = SDM630_MMSS_CPR_SENSOR_COUNT;
/*
* MMSS only has one thread (0) so the zeroed array does not need
@@ -632,9 +641,23 @@ static int cpr4_mmss_init_controller(struct cpr3_controller *ctrl)
return 0;
}
+/* Data corresponds to the SoC revision */
+static const struct of_device_id cpr4_mmss_regulator_match_table[] = {
+ {
+ .compatible = "qcom,cpr4-sdm660-mmss-ldo-regulator",
+ .data = (void *)(uintptr_t)SDM660_SOC_ID,
+ },
+ {
+ .compatible = "qcom,cpr4-sdm630-mmss-ldo-regulator",
+ .data = (void *)(uintptr_t)SDM630_SOC_ID,
+ },
+ { },
+};
+
static int cpr4_mmss_regulator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
struct cpr3_controller *ctrl;
int rc;
@@ -659,6 +682,12 @@ static int cpr4_mmss_regulator_probe(struct platform_device *pdev)
return rc;
}
+ match = of_match_node(cpr4_mmss_regulator_match_table, dev->of_node);
+ if (match)
+ ctrl->soc_revision = (uintptr_t)match->data;
+ else
+ cpr3_err(ctrl, "could not find compatible string match\n");
+
rc = cpr3_map_fuse_base(ctrl, pdev);
if (rc) {
cpr3_err(ctrl, "could not map fuse base address\n");
@@ -731,19 +760,6 @@ static int cpr4_mmss_regulator_resume(struct platform_device *pdev)
return cpr3_regulator_resume(ctrl);
}
-/* Data corresponds to the SoC revision */
-static const struct of_device_id cpr4_mmss_regulator_match_table[] = {
- {
- .compatible = "qcom,cpr4-sdm660-mmss-ldo-regulator",
- .data = (void *)NULL,
- },
- {
- .compatible = "qcom,cpr4-sdm630-mmss-ldo-regulator",
- .data = (void *)NULL,
- },
- { },
-};
-
static struct platform_driver cpr4_mmss_regulator_driver = {
.driver = {
.name = "qcom,cpr4-mmss-ldo-regulator",
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index 9be8bb94b257..2307da9497dc 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -37,7 +37,8 @@
#define MSM8998_KBSS_FUSE_CORNERS 4
#define SDM660_KBSS_FUSE_CORNERS 5
-#define SDM630_KBSS_FUSE_CORNERS 4
+#define SDM630_POWER_KBSS_FUSE_CORNERS 3
+#define SDM630_PERF_KBSS_FUSE_CORNERS 5
/**
* struct cprh_kbss_fuses - KBSS specific fuse data
@@ -131,18 +132,32 @@ static const char * const cprh_sdm660_perf_kbss_fuse_corner_name[] = {
[CPRH_SDM660_PERF_KBSS_FUSE_CORNER_TURBO_L2] = "TURBO_L2",
};
-enum cprh_sdm630_kbss_fuse_corner {
- CPRH_SDM630_KBSS_FUSE_CORNER_LOWSVS = 0,
- CPRH_SDM630_KBSS_FUSE_CORNER_SVSPLUS = 1,
- CPRH_SDM630_KBSS_FUSE_CORNER_NOM = 2,
- CPRH_SDM630_KBSS_FUSE_CORNER_TURBO_L1 = 3,
+enum cprh_sdm630_power_kbss_fuse_corner {
+ CPRH_SDM630_POWER_KBSS_FUSE_CORNER_LOWSVS = 0,
+ CPRH_SDM630_POWER_KBSS_FUSE_CORNER_SVSPLUS = 1,
+ CPRH_SDM630_POWER_KBSS_FUSE_CORNER_TURBO_L1 = 2,
};
-static const char * const cprh_sdm630_kbss_fuse_corner_name[] = {
- [CPRH_SDM630_KBSS_FUSE_CORNER_LOWSVS] = "LowSVS",
- [CPRH_SDM630_KBSS_FUSE_CORNER_SVSPLUS] = "SVSPLUS",
- [CPRH_SDM630_KBSS_FUSE_CORNER_NOM] = "NOM",
- [CPRH_SDM630_KBSS_FUSE_CORNER_TURBO_L1] = "TURBO_L1",
+static const char * const cprh_sdm630_power_kbss_fuse_corner_name[] = {
+ [CPRH_SDM630_POWER_KBSS_FUSE_CORNER_LOWSVS] = "LowSVS",
+ [CPRH_SDM630_POWER_KBSS_FUSE_CORNER_SVSPLUS] = "SVSPLUS",
+ [CPRH_SDM630_POWER_KBSS_FUSE_CORNER_TURBO_L1] = "TURBO_L1",
+};
+
+enum cprh_sdm630_perf_kbss_fuse_corner {
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_LOWSVS = 0,
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_SVSPLUS = 1,
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_NOM = 2,
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO = 3,
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO_L2 = 4,
+};
+
+static const char * const cprh_sdm630_perf_kbss_fuse_corner_name[] = {
+ [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_LOWSVS] = "LowSVS",
+ [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_SVSPLUS] = "SVSPLUS",
+ [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_NOM] = "NOM",
+ [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO] = "TURBO",
+ [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO_L2] = "TURBO_L2",
};
/* KBSS cluster IDs */
@@ -202,17 +217,17 @@ sdm660_kbss_ro_sel_param[2][SDM660_KBSS_FUSE_CORNERS][3] = {
};
static const struct cpr3_fuse_param
-sdm630_kbss_ro_sel_param[2][SDM630_KBSS_FUSE_CORNERS][3] = {
+sdm630_kbss_ro_sel_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][3] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
{{67, 12, 15}, {} },
{{65, 56, 59}, {} },
- {{67, 4, 7}, {} },
{{67, 0, 3}, {} },
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
{{68, 61, 63}, {69, 0, 0} },
{{69, 1, 4}, {} },
{{68, 57, 60}, {} },
+ {{68, 53, 56}, {} },
{{66, 14, 17}, {} },
},
};
@@ -252,17 +267,17 @@ sdm660_kbss_init_voltage_param[2][SDM660_KBSS_FUSE_CORNERS][2] = {
};
static const struct cpr3_fuse_param
-sdm630_kbss_init_voltage_param[2][SDM630_KBSS_FUSE_CORNERS][2] = {
+sdm630_kbss_init_voltage_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][2] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
{{67, 34, 39}, {} },
{{71, 3, 8}, {} },
- {{67, 22, 27}, {} },
{{67, 16, 21}, {} },
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
{{69, 17, 22}, {} },
{{69, 23, 28}, {} },
{{69, 11, 16}, {} },
+ {{69, 5, 10}, {} },
{{70, 42, 47}, {} },
},
};
@@ -302,17 +317,17 @@ sdm660_kbss_target_quot_param[2][SDM660_KBSS_FUSE_CORNERS][3] = {
};
static const struct cpr3_fuse_param
-sdm630_kbss_target_quot_param[2][SDM630_KBSS_FUSE_CORNERS][3] = {
+sdm630_kbss_target_quot_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][3] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
{{68, 12, 23}, {} },
{{71, 9, 20}, {} },
- {{67, 52, 63}, {} },
{{67, 40, 51}, {} },
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
{{69, 53, 63}, {70, 0, 0}, {} },
{{70, 1, 12}, {} },
{{69, 41, 52}, {} },
+ {{69, 29, 40}, {} },
{{70, 48, 59}, {} },
},
};
@@ -352,17 +367,17 @@ sdm660_kbss_quot_offset_param[2][SDM660_KBSS_FUSE_CORNERS][3] = {
};
static const struct cpr3_fuse_param
-sdm630_kbss_quot_offset_param[2][SDM630_KBSS_FUSE_CORNERS][3] = {
+sdm630_kbss_quot_offset_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][3] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
{{} },
{{71, 21, 27}, {} },
- {{68, 31, 37}, {} },
{{68, 24, 30}, {} },
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
{{} },
{{70, 27, 33}, {} },
{{70, 20, 26}, {} },
+ {{70, 13, 19}, {} },
{{70, 60, 63}, {71, 0, 2}, {} },
},
};
@@ -484,18 +499,27 @@ sdm660_kbss_fuse_ref_volt[2][SDM660_KBSS_FUSE_CORNERS] = {
* Open loop voltage fuse reference voltages in microvolts for SDM630
*/
static const int
-sdm630_kbss_fuse_ref_volt[SDM630_KBSS_FUSE_CORNERS] = {
- 644000,
- 788000,
- 868000,
- 1068000,
+sdm630_kbss_fuse_ref_volt[2][SDM630_PERF_KBSS_FUSE_CORNERS] = {
+ [CPRH_KBSS_POWER_CLUSTER_ID] = {
+ 644000,
+ 788000,
+ 1068000,
+ },
+ [CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+ 644000,
+ 788000,
+ 868000,
+ 988000,
+ 1068000,
+ },
};
static const int
-sdm630_kbss_speed_bin_2_fuse_ref_volt[SDM630_KBSS_FUSE_CORNERS] = {
+sdm630_perf_kbss_speed_bin_2_fuse_ref_volt[SDM630_PERF_KBSS_FUSE_CORNERS] = {
644000,
788000,
868000,
+ 988000,
1140000,
};
@@ -552,7 +576,7 @@ sdm630_kbss_speed_bin_2_fuse_ref_volt[SDM630_KBSS_FUSE_CORNERS] = {
* sdm630 configuration
*/
#define SDM630_KBSS_POWER_CPR_SENSOR_COUNT 6
-#define SDM630_KBSS_PERFORMANCE_CPR_SENSOR_COUNT 9
+#define SDM630_KBSS_PERFORMANCE_CPR_SENSOR_COUNT 6
/*
* SOC IDs
@@ -760,7 +784,7 @@ static int cprh_sdm630_kbss_read_fuse_data(struct cpr3_regulator *vreg,
struct cprh_kbss_fuses *fuse)
{
void __iomem *base = vreg->thread->ctrl->fuse_base;
- int i, id, rc;
+ int i, id, rc, fuse_corners;
rc = cpr3_read_fuse_param(base, sdm630_cpr_fusing_rev_param,
&fuse->cpr_fusing_rev);
@@ -772,7 +796,12 @@ static int cprh_sdm630_kbss_read_fuse_data(struct cpr3_regulator *vreg,
cpr3_info(vreg, "CPR fusing revision = %llu\n", fuse->cpr_fusing_rev);
id = vreg->thread->ctrl->ctrl_id;
- for (i = 0; i < SDM630_KBSS_FUSE_CORNERS; i++) {
+ if (id == CPRH_KBSS_POWER_CLUSTER_ID)
+ fuse_corners = SDM630_POWER_KBSS_FUSE_CORNERS;
+ else
+ fuse_corners = SDM630_PERF_KBSS_FUSE_CORNERS;
+
+ for (i = 0; i < fuse_corners; i++) {
rc = cpr3_read_fuse_param(base,
sdm630_kbss_init_voltage_param[id][i],
&fuse->init_voltage[i]);
@@ -856,7 +885,10 @@ static int cprh_kbss_read_fuse_data(struct cpr3_regulator *vreg)
fuse_corners = SDM660_KBSS_FUSE_CORNERS;
break;
case SDM630_SOC_ID:
- fuse_corners = SDM630_KBSS_FUSE_CORNERS;
+ if (vreg->thread->ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID)
+ fuse_corners = SDM630_POWER_KBSS_FUSE_CORNERS;
+ else
+ fuse_corners = SDM630_PERF_KBSS_FUSE_CORNERS;
break;
case MSM8998_V1_SOC_ID:
case MSM8998_V2_SOC_ID:
@@ -1008,10 +1040,15 @@ static int cprh_kbss_calculate_open_loop_voltages(struct cpr3_regulator *vreg)
corner_name = cprh_sdm660_perf_kbss_fuse_corner_name;
break;
case SDM630_SOC_ID:
- ref_volt = sdm630_kbss_fuse_ref_volt;
- if (vreg->speed_bin_fuse == 2)
- ref_volt = sdm630_kbss_speed_bin_2_fuse_ref_volt;
- corner_name = cprh_sdm630_kbss_fuse_corner_name;
+ ref_volt = sdm630_kbss_fuse_ref_volt[id];
+ if (id == CPRH_KBSS_PERFORMANCE_CLUSTER_ID
+ && vreg->speed_bin_fuse == 2)
+ ref_volt = sdm630_perf_kbss_speed_bin_2_fuse_ref_volt;
+
+ if (id == CPRH_KBSS_POWER_CLUSTER_ID)
+ corner_name = cprh_sdm630_power_kbss_fuse_corner_name;
+ else
+ corner_name = cprh_sdm630_perf_kbss_fuse_corner_name;
break;
case MSM8998_V1_SOC_ID:
ref_volt = msm8998_v1_kbss_fuse_ref_volt;
@@ -1581,11 +1618,19 @@ static int cprh_kbss_calculate_target_quotients(struct cpr3_regulator *vreg)
}
break;
case SDM630_SOC_ID:
- corner_name = cprh_sdm630_kbss_fuse_corner_name;
- lowest_fuse_corner =
- CPRH_SDM630_KBSS_FUSE_CORNER_LOWSVS;
- highest_fuse_corner =
- CPRH_SDM630_KBSS_FUSE_CORNER_TURBO_L1;
+ if (vreg->thread->ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID) {
+ corner_name = cprh_sdm630_power_kbss_fuse_corner_name;
+ lowest_fuse_corner =
+ CPRH_SDM630_POWER_KBSS_FUSE_CORNER_LOWSVS;
+ highest_fuse_corner =
+ CPRH_SDM630_POWER_KBSS_FUSE_CORNER_TURBO_L1;
+ } else {
+ corner_name = cprh_sdm630_perf_kbss_fuse_corner_name;
+ lowest_fuse_corner =
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_LOWSVS;
+ highest_fuse_corner =
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO_L2;
+ }
break;
case MSM8998_V1_SOC_ID:
case MSM8998_V2_SOC_ID:
diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c
index cf8f00085a0c..dbe2a08f1776 100644
--- a/drivers/regulator/qpnp-labibb-regulator.c
+++ b/drivers/regulator/qpnp-labibb-regulator.c
@@ -19,16 +19,19 @@
#include <linux/kernel.h>
#include <linux/regmap.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/spmi.h>
#include <linux/platform_device.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/qpnp/qpnp-revid.h>
+#include <linux/regulator/qpnp-labibb-regulator.h>
#define QPNP_LABIBB_REGULATOR_DRIVER_NAME "qcom,qpnp-labibb-regulator"
@@ -594,6 +597,7 @@ struct qpnp_labibb {
const struct lab_ver_ops *lab_ver_ops;
struct mutex bus_mutex;
enum qpnp_labibb_mode mode;
+ struct work_struct lab_vreg_ok_work;
bool standalone;
bool ttw_en;
bool in_ttw_mode;
@@ -603,10 +607,13 @@ struct qpnp_labibb {
bool ttw_force_lab_on;
bool skip_2nd_swire_cmd;
bool pfm_enable;
+ bool notify_lab_vreg_ok_sts;
u32 swire_2nd_cmd_delay;
u32 swire_ibb_ps_enable_delay;
};
+static RAW_NOTIFIER_HEAD(labibb_notifier);
+
struct ibb_ver_ops {
int (*set_default_voltage)(struct qpnp_labibb *labibb,
bool use_default);
@@ -2124,6 +2131,36 @@ static int qpnp_labibb_regulator_ttw_mode_exit(struct qpnp_labibb *labibb)
return rc;
}
+static void qpnp_lab_vreg_notifier_work(struct work_struct *work)
+{
+ int rc = 0;
+ u16 retries = 1000, dly = 5000;
+ u8 val;
+ struct qpnp_labibb *labibb = container_of(work, struct qpnp_labibb,
+ lab_vreg_ok_work);
+
+ while (retries--) {
+ rc = qpnp_labibb_read(labibb, labibb->lab_base +
+ REG_LAB_STATUS1, &val, 1);
+ if (rc < 0) {
+ pr_err("read register %x failed rc = %d\n",
+ REG_LAB_STATUS1, rc);
+ return;
+ }
+
+ if (val & LAB_STATUS1_VREG_OK) {
+ raw_notifier_call_chain(&labibb_notifier,
+ LAB_VREG_OK, NULL);
+ break;
+ }
+
+ usleep_range(dly, dly + 100);
+ }
+
+ if (!retries)
+ pr_err("LAB_VREG_OK not set, failed to notify\n");
+}
+
static int qpnp_labibb_regulator_enable(struct qpnp_labibb *labibb)
{
int rc;
@@ -2326,6 +2363,9 @@ static int qpnp_lab_regulator_enable(struct regulator_dev *rdev)
labibb->lab_vreg.vreg_enabled = 1;
}
+ if (labibb->notify_lab_vreg_ok_sts)
+ schedule_work(&labibb->lab_vreg_ok_work);
+
return 0;
}
@@ -2578,6 +2618,9 @@ static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb,
return rc;
}
+ labibb->notify_lab_vreg_ok_sts = of_property_read_bool(of_node,
+ "qcom,notify-lab-vreg-ok-sts");
+
rc = of_property_read_u32(of_node, "qcom,qpnp-lab-soft-start",
&(labibb->lab_vreg.soft_start));
if (!rc) {
@@ -3817,6 +3860,8 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
goto fail_registration;
}
}
+
+ INIT_WORK(&labibb->lab_vreg_ok_work, qpnp_lab_vreg_notifier_work);
dev_set_drvdata(&pdev->dev, labibb);
pr_info("LAB/IBB registered successfully, lab_vreg enable=%d ibb_vreg enable=%d swire_control=%d\n",
labibb->lab_vreg.vreg_enabled,
@@ -3834,6 +3879,18 @@ fail_registration:
return rc;
}
+int qpnp_labibb_notifier_register(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&labibb_notifier, nb);
+}
+EXPORT_SYMBOL(qpnp_labibb_notifier_register);
+
+int qpnp_labibb_notifier_unregister(struct notifier_block *nb)
+{
+ return raw_notifier_chain_unregister(&labibb_notifier, nb);
+}
+EXPORT_SYMBOL(qpnp_labibb_notifier_unregister);
+
static int qpnp_labibb_regulator_remove(struct platform_device *pdev)
{
struct qpnp_labibb *labibb = dev_get_drvdata(&pdev->dev);
@@ -3843,6 +3900,8 @@ static int qpnp_labibb_regulator_remove(struct platform_device *pdev)
regulator_unregister(labibb->lab_vreg.rdev);
if (labibb->ibb_vreg.rdev)
regulator_unregister(labibb->ibb_vreg.rdev);
+
+ cancel_work_sync(&labibb->lab_vreg_ok_work);
}
return 0;
}
diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c
index a08ade61e057..19b1d319ef45 100644
--- a/drivers/regulator/qpnp-lcdb-regulator.c
+++ b/drivers/regulator/qpnp-lcdb-regulator.c
@@ -190,6 +190,9 @@ struct qpnp_lcdb {
bool ttw_enable;
bool ttw_mode_sw;
+ /* top level DT params */
+ bool force_module_reenable;
+
/* status parameters */
bool lcdb_enabled;
bool settings_saved;
@@ -588,6 +591,23 @@ static int qpnp_lcdb_enable(struct qpnp_lcdb *lcdb)
goto fail_enable;
}
+ if (lcdb->force_module_reenable) {
+ val = 0;
+ rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+ &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to enable lcdb rc= %d\n", rc);
+ goto fail_enable;
+ }
+ val = MODULE_EN_BIT;
+ rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+ &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to disable lcdb rc= %d\n", rc);
+ goto fail_enable;
+ }
+ }
+
/* poll for vreg_ok */
timeout = 10;
delay = lcdb->bst.soft_start_us + lcdb->ldo.soft_start_us +
@@ -1590,6 +1610,9 @@ static int qpnp_lcdb_parse_dt(struct qpnp_lcdb *lcdb)
}
}
+ lcdb->force_module_reenable = of_property_read_bool(node,
+ "qcom,force-module-reenable");
+
if (of_property_read_bool(node, "qcom,ttw-enable")) {
rc = qpnp_lcdb_parse_ttw(lcdb);
if (rc < 0) {
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 7369478a8c5d..bc00a7e9572f 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -35,6 +35,9 @@
#include "ufs-qcom-debugfs.h"
#include <linux/clk/msm-clk.h>
+#define MAX_PROP_SIZE 32
+#define VDDP_REF_CLK_MIN_UV 1200000
+#define VDDP_REF_CLK_MAX_UV 1200000
/* TODO: further tuning for this parameter may be required */
#define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US (10000) /* microseconds */
@@ -709,40 +712,105 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
return err;
}
-static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+
+static int ufs_qcom_config_vreg(struct device *dev,
+ struct ufs_vreg *vreg, bool on)
{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
int ret = 0;
+ struct regulator *reg;
+ int min_uV, uA_load;
- if (ufs_qcom_is_link_off(hba)) {
- /*
- * Disable the tx/rx lane symbol clocks before PHY is
- * powered down as the PLL source should be disabled
- * after downstream clocks are disabled.
- */
- ufs_qcom_disable_lane_clks(host);
- phy_power_off(phy);
- ret = ufs_qcom_ice_suspend(host);
+ if (!vreg) {
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ reg = vreg->reg;
+ if (regulator_count_voltages(reg) > 0) {
+ min_uV = on ? vreg->min_uV : 0;
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+ if (ret) {
+ dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+ __func__, vreg->name, ret);
+ goto out;
+ }
+
+ uA_load = on ? vreg->max_uA : 0;
+ ret = regulator_set_load(vreg->reg, uA_load);
if (ret)
- dev_err(hba->dev, "%s: failed ufs_qcom_ice_suspend %d\n",
- __func__, ret);
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ufs_qcom_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (vreg->enabled)
+ return ret;
- /* Assert PHY soft reset */
- ufs_qcom_assert_reset(hba);
+ ret = ufs_qcom_config_vreg(dev, vreg, true);
+ if (ret)
+ goto out;
+
+ ret = regulator_enable(vreg->reg);
+ if (ret)
+ goto out;
+
+ vreg->enabled = true;
+out:
+ return ret;
+}
+
+static int ufs_qcom_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg->enabled)
+ return ret;
+
+ ret = regulator_disable(vreg->reg);
+ if (ret)
+ goto out;
+
+ ret = ufs_qcom_config_vreg(dev, vreg, false);
+ if (ret)
goto out;
- }
+
+ vreg->enabled = false;
+out:
+ return ret;
+}
+
+static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy = host->generic_phy;
+ int ret = 0;
/*
- * If UniPro link is not active, PHY ref_clk, main PHY analog power
- * rail and low noise analog power rail for PLL can be switched off.
+ * If UniPro link is not active or OFF, PHY ref_clk, main PHY analog
+ * power rail and low noise analog power rail for PLL can be
+ * switched off.
*/
if (!ufs_qcom_is_link_active(hba)) {
ufs_qcom_disable_lane_clks(host);
phy_power_off(phy);
+
+ if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba))
+ ret = ufs_qcom_disable_vreg(hba->dev,
+ host->vddp_ref_clk);
ufs_qcom_ice_suspend(host);
- }
+ if (ufs_qcom_is_link_off(hba)) {
+ /* Assert PHY soft reset */
+ ufs_qcom_assert_reset(hba);
+ goto out;
+ }
+ }
/* Unvote PM QoS */
ufs_qcom_pm_qos_suspend(host);
@@ -763,6 +831,11 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto out;
}
+ if (host->vddp_ref_clk && (hba->rpm_lvl > UFS_PM_LVL_3 ||
+ hba->spm_lvl > UFS_PM_LVL_3))
+ ufs_qcom_enable_vreg(hba->dev,
+ host->vddp_ref_clk);
+
err = ufs_qcom_enable_lane_clks(host);
if (err)
goto out;
@@ -1951,6 +2024,57 @@ static void ufs_qcom_parse_lpm(struct ufs_qcom_host *host)
pr_info("%s: will disable all LPM modes\n", __func__);
}
+static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
+ struct ufs_vreg **out_vreg)
+{
+ int ret = 0;
+ char prop_name[MAX_PROP_SIZE];
+ struct ufs_vreg *vreg = NULL;
+ struct device *dev = host->hba->dev;
+ struct device_node *np = dev->of_node;
+
+ if (!np) {
+ dev_err(dev, "%s: non DT initialization\n", __func__);
+ goto out;
+ }
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
+ if (!of_parse_phandle(np, prop_name, 0)) {
+ dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
+ __func__, prop_name);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg)
+ return -ENOMEM;
+
+ vreg->name = name;
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
+ ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
+ if (ret) {
+ dev_err(dev, "%s: unable to find %s err %d\n",
+ __func__, prop_name, ret);
+ goto out;
+ }
+
+ vreg->reg = devm_regulator_get(dev, vreg->name);
+ if (IS_ERR(vreg->reg)) {
+ ret = PTR_ERR(vreg->reg);
+ dev_err(dev, "%s: %s get failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+ vreg->min_uV = VDDP_REF_CLK_MIN_UV;
+ vreg->max_uV = VDDP_REF_CLK_MAX_UV;
+
+out:
+ if (!ret)
+ *out_vreg = vreg;
+ return ret;
+}
+
/**
* ufs_qcom_init - bind phy with controller
* @hba: host controller instance
@@ -2068,14 +2192,24 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_phy_save_controller_version(host->generic_phy,
host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
+ err = ufs_qcom_parse_reg_info(host, "qcom,vddp-ref-clk",
+ &host->vddp_ref_clk);
phy_init(host->generic_phy);
err = phy_power_on(host->generic_phy);
if (err)
goto out_unregister_bus;
+ if (host->vddp_ref_clk) {
+ err = ufs_qcom_enable_vreg(dev, host->vddp_ref_clk);
+ if (err) {
+ dev_err(dev, "%s: failed enabling ref clk supply: %d\n",
+ __func__, err);
+ goto out_disable_phy;
+ }
+ }
err = ufs_qcom_init_lane_clks(host);
if (err)
- goto out_disable_phy;
+ goto out_disable_vddp;
ufs_qcom_parse_lpm(host);
if (host->disable_lpm)
@@ -2100,6 +2234,9 @@ static int ufs_qcom_init(struct ufs_hba *hba)
goto out;
+out_disable_vddp:
+ if (host->vddp_ref_clk)
+ ufs_qcom_disable_vreg(dev, host->vddp_ref_clk);
out_disable_phy:
phy_power_off(host->generic_phy);
out_unregister_bus:
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 74d8a7a30ad6..12154b268132 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -373,6 +373,7 @@ struct ufs_qcom_host {
spinlock_t ice_work_lock;
struct work_struct ice_cfg_work;
struct request *req_pending;
+ struct ufs_vreg *vddp_ref_clk;
};
static inline u32
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index f5d7cd0f4701..c891fc8d34a3 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -8603,9 +8603,13 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto vendor_suspend;
}
} else if (ufshcd_is_link_off(hba)) {
- ret = ufshcd_host_reset_and_restore(hba);
/*
- * ufshcd_host_reset_and_restore() should have already
+ * A full initialization of the host and the device is required
+ * since the link was put to off during suspend.
+ */
+ ret = ufshcd_reset_and_restore(hba);
+ /*
+ * ufshcd_reset_and_restore() should have already
* set the link state as active
*/
if (ret || !ufshcd_is_link_active(hba))
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 33cf48454414..09c50a81a943 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -2048,7 +2048,7 @@ static struct glink_core_xprt_ctx *find_open_transport(const char *edge,
uint16_t *best_id)
{
struct glink_core_xprt_ctx *xprt;
- struct glink_core_xprt_ctx *best_xprt;
+ struct glink_core_xprt_ctx *best_xprt = NULL;
struct glink_core_xprt_ctx *ret;
bool first = true;
@@ -5502,7 +5502,7 @@ static void tx_func(struct kthread_work *work)
{
struct channel_ctx *ch_ptr;
uint32_t prio;
- uint32_t tx_ready_head_prio;
+ uint32_t tx_ready_head_prio = 0;
int ret;
struct channel_ctx *tx_ready_head = NULL;
bool transmitted_successfully = true;
diff --git a/drivers/soc/qcom/msm_smem.c b/drivers/soc/qcom/msm_smem.c
index d3f44ab75f67..a94f741c2056 100644
--- a/drivers/soc/qcom/msm_smem.c
+++ b/drivers/soc/qcom/msm_smem.c
@@ -1157,7 +1157,7 @@ static void smem_module_init_notify(uint32_t state, void *data)
static void smem_init_security_partition(struct smem_toc_entry *entry,
uint32_t num)
{
- uint16_t remote_host;
+ uint16_t remote_host = 0;
struct smem_partition_header *hdr;
bool is_comm_partition = false;
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 4d9767b6f8a3..21e9f17e6a7e 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -595,6 +595,22 @@ static void pil_release_mmap(struct pil_desc *desc)
struct pil_priv *priv = desc->priv;
struct pil_seg *p, *tmp;
u64 zero = 0ULL;
+
+ if (priv->info) {
+ __iowrite32_copy(&priv->info->start, &zero,
+ sizeof(zero) / 4);
+ writel_relaxed(0, &priv->info->size);
+ }
+
+ list_for_each_entry_safe(p, tmp, &priv->segs, list) {
+ list_del(&p->list);
+ kfree(p);
+ }
+}
+
+static void pil_clear_segment(struct pil_desc *desc)
+{
+ struct pil_priv *priv = desc->priv;
u8 __iomem *buf;
struct pil_map_fw_info map_fw_info = {
@@ -613,16 +629,6 @@ static void pil_release_mmap(struct pil_desc *desc)
desc->unmap_fw_mem(buf, (priv->region_end - priv->region_start),
map_data);
- if (priv->info) {
- __iowrite32_copy(&priv->info->start, &zero,
- sizeof(zero) / 4);
- writel_relaxed(0, &priv->info->size);
- }
-
- list_for_each_entry_safe(p, tmp, &priv->segs, list) {
- list_del(&p->list);
- kfree(p);
- }
}
#define IOMAP_SIZE SZ_1M
@@ -914,6 +920,7 @@ out:
&desc->attrs);
priv->region = NULL;
}
+ pil_clear_segment(desc);
pil_release_mmap(desc);
}
return ret;
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index bbd77d8e0650..53bddc5987df 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -332,6 +332,7 @@ int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
struct modem_data *drv = dev_get_drvdata(pil->dev);
struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
int ret = 0;
+ struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
s32 status;
u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
@@ -360,7 +361,7 @@ int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
if (pil->subsys_vmid > 0)
pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
drv->q6->mba_dp_size);
- dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+ dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
&drv->attrs_dma);
drv->q6->mba_dp_virt = NULL;
@@ -552,6 +553,7 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
dma_addr_t mba_dp_phys, mba_dp_phys_end;
int ret, count;
const u8 *data;
+ struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
ret = request_firmware(&fw, fw_name_p, pil->dev);
@@ -570,11 +572,12 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
drv->mba_dp_size = SZ_1M;
- arch_setup_dma_ops(&md->mba_mem_dev, 0, 0, NULL, 0);
+ arch_setup_dma_ops(dma_dev, 0, 0, NULL, 0);
+
+ dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
- md->mba_mem_dev.coherent_dma_mask =
- DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
init_dma_attrs(&md->attrs_dma);
+ dma_set_attr(DMA_ATTR_SKIP_ZEROING, &md->attrs_dma);
dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &md->attrs_dma);
ret = request_firmware(&dp_fw, dp_name, pil->dev);
@@ -591,10 +594,10 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
drv->mba_dp_size += drv->dp_size;
}
- mba_dp_virt = dma_alloc_attrs(&md->mba_mem_dev, drv->mba_dp_size,
- &mba_dp_phys, GFP_KERNEL, &md->attrs_dma);
+ mba_dp_virt = dma_alloc_attrs(dma_dev, drv->mba_dp_size, &mba_dp_phys,
+ GFP_KERNEL, &md->attrs_dma);
if (!mba_dp_virt) {
- dev_err(pil->dev, "%s MBA metadata buffer allocation %zx bytes failed\n",
+ dev_err(pil->dev, "%s MBA/DP buffer allocation %zx bytes failed\n",
__func__, drv->mba_dp_size);
ret = -ENOMEM;
goto err_invalid_fw;
@@ -651,7 +654,7 @@ err_mss_reset:
pil_assign_mem_to_linux(pil, drv->mba_dp_phys,
drv->mba_dp_size);
err_mba_data:
- dma_free_attrs(&md->mba_mem_dev, drv->mba_dp_size, drv->mba_dp_virt,
+ dma_free_attrs(dma_dev, drv->mba_dp_size, drv->mba_dp_virt,
drv->mba_dp_phys, &md->attrs_dma);
err_invalid_fw:
if (dp_fw)
@@ -670,14 +673,16 @@ static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
s32 status;
int ret;
u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+ struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
DEFINE_DMA_ATTRS(attrs);
- drv->mba_mem_dev.coherent_dma_mask =
- DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+
+ dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+ dma_set_attr(DMA_ATTR_SKIP_ZEROING, &attrs);
dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
/* Make metadata physically contiguous and 4K aligned. */
- mdata_virt = dma_alloc_attrs(&drv->mba_mem_dev, size, &mdata_phys,
- GFP_KERNEL, &attrs);
+ mdata_virt = dma_alloc_attrs(dma_dev, size, &mdata_phys, GFP_KERNEL,
+ &attrs);
if (!mdata_virt) {
dev_err(pil->dev, "%s MBA metadata buffer allocation %zx bytes failed\n",
__func__, size);
@@ -694,8 +699,8 @@ static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
if (ret) {
pr_err("scm_call to unprotect modem metadata mem failed(rc:%d)\n",
ret);
- dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt,
- mdata_phys, &attrs);
+ dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys,
+ &attrs);
goto fail;
}
}
@@ -721,7 +726,7 @@ static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
if (pil->subsys_vmid > 0)
pil_assign_mem_to_linux(pil, mdata_phys, ALIGN(size, SZ_4K));
- dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt, mdata_phys, &attrs);
+ dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys, &attrs);
if (!ret)
return ret;
@@ -733,7 +738,7 @@ fail:
if (pil->subsys_vmid > 0)
pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
drv->q6->mba_dp_size);
- dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+ dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
&drv->attrs_dma);
drv->q6->mba_dp_virt = NULL;
@@ -785,6 +790,7 @@ static int pil_msa_mba_auth(struct pil_desc *pil)
struct modem_data *drv = dev_get_drvdata(pil->dev);
struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
int ret;
+ struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
s32 status;
u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
@@ -806,9 +812,9 @@ static int pil_msa_mba_auth(struct pil_desc *pil)
pil_assign_mem_to_linux(pil,
drv->q6->mba_dp_phys,
drv->q6->mba_dp_size);
- dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
- drv->q6->mba_dp_virt,
- drv->q6->mba_dp_phys, &drv->attrs_dma);
+ dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+ drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+ &drv->attrs_dma);
drv->q6->mba_dp_virt = NULL;
}
diff --git a/drivers/soc/qcom/pil-msa.h b/drivers/soc/qcom/pil-msa.h
index fea8e1f9db37..896f0c7c232b 100644
--- a/drivers/soc/qcom/pil-msa.h
+++ b/drivers/soc/qcom/pil-msa.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -32,6 +32,7 @@ struct modem_data {
struct clk *xo;
struct pil_desc desc;
struct device mba_mem_dev;
+ struct device *mba_mem_dev_fixed;
struct dma_attrs attrs_dma;
};
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index 5f01d30de8d9..0e023a019280 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
@@ -394,6 +395,11 @@ static int pil_mss_driver_probe(struct platform_device *pdev)
}
init_completion(&drv->stop_ack);
+ /* Probe the MBA mem device if present */
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret)
+ return ret;
+
return pil_subsys_init(drv, pdev);
}
@@ -407,6 +413,33 @@ static int pil_mss_driver_exit(struct platform_device *pdev)
return 0;
}
+static int pil_mba_mem_driver_probe(struct platform_device *pdev)
+{
+ struct modem_data *drv;
+
+ if (!pdev->dev.parent) {
+ pr_err("No parent found.\n");
+ return -EINVAL;
+ }
+ drv = dev_get_drvdata(pdev->dev.parent);
+ drv->mba_mem_dev_fixed = &pdev->dev;
+ return 0;
+}
+
+static const struct of_device_id mba_mem_match_table[] = {
+ { .compatible = "qcom,pil-mba-mem" },
+ {}
+};
+
+static struct platform_driver pil_mba_mem_driver = {
+ .probe = pil_mba_mem_driver_probe,
+ .driver = {
+ .name = "pil-mba-mem",
+ .of_match_table = mba_mem_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
static struct of_device_id mss_match_table[] = {
{ .compatible = "qcom,pil-q6v5-mss" },
{ .compatible = "qcom,pil-q6v55-mss" },
@@ -426,7 +459,12 @@ static struct platform_driver pil_mss_driver = {
static int __init pil_mss_init(void)
{
- return platform_driver_register(&pil_mss_driver);
+ int ret;
+
+ ret = platform_driver_register(&pil_mba_mem_driver);
+ if (!ret)
+ ret = platform_driver_register(&pil_mss_driver);
+ return ret;
}
module_init(pil_mss_init);
diff --git a/drivers/soc/qcom/qdsp6v2/audio_notifier.c b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
index 47adc3bb3f40..b120883afbb0 100644
--- a/drivers/soc/qcom/qdsp6v2/audio_notifier.c
+++ b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -394,8 +394,8 @@ static void audio_notifer_reg_all_clients(void)
int ret;
list_for_each_safe(ptr, next, &client_list) {
- client_data = list_entry(ptr,
- struct client_data, list);
+ client_data = list_entry(ptr, struct client_data, list);
+
ret = audio_notifer_reg_client(client_data);
if (IS_ERR_VALUE(ret))
pr_err("%s: audio_notifer_reg_client failed for client %s, ret %d\n",
@@ -518,9 +518,8 @@ int audio_notifier_deregister(char *client_name)
goto done;
}
mutex_lock(&notifier_mutex);
- list_for_each_safe(ptr, next, &client_data->list) {
- client_data = list_entry(ptr, struct client_data,
- list);
+ list_for_each_safe(ptr, next, &client_list) {
+ client_data = list_entry(ptr, struct client_data, list);
if (!strcmp(client_name, client_data->client_name)) {
ret2 = audio_notifer_dereg_client(client_data);
if (ret2 < 0) {
diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c
index 08499c549b47..c712ed392b0b 100644
--- a/drivers/soc/qcom/ramdump.c
+++ b/drivers/soc/qcom/ramdump.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -326,13 +326,7 @@ static int _do_ramdump(void *handle, struct ramdump_segment *segments,
if (rd_dev->complete_ramdump) {
for (i = 0; i < nsegments-1; i++)
segments[i].size =
- PAGE_ALIGN(segments[i+1].address - segments[i].address);
-
- segments[nsegments-1].size =
- PAGE_ALIGN(segments[nsegments-1].size);
- } else {
- for (i = 0; i < nsegments; i++)
- segments[i].size = PAGE_ALIGN(segments[i].size);
+ segments[i + 1].address - segments[i].address;
}
rd_dev->segments = segments;
diff --git a/drivers/soc/qcom/rpm_rail_stats.c b/drivers/soc/qcom/rpm_rail_stats.c
index d80a9fc5146a..9ef96dc54eb6 100644
--- a/drivers/soc/qcom/rpm_rail_stats.c
+++ b/drivers/soc/qcom/rpm_rail_stats.c
@@ -27,7 +27,7 @@
#include "rpm_stats.h"
-#define RPM_RAIL_BUF_LEN 600
+#define RPM_RAIL_BUF_LEN 1300
#define SNPRINTF(buf, size, format, ...) \
{ \
@@ -118,7 +118,7 @@ static int msm_rpm_rail_type_copy(void __iomem **base, char **buf, int count)
rail[NAMELEN - 1] = '\0';
memcpy(rail, &rt.rail, NAMELEN - 1);
SNPRINTF(*buf, count,
- "\trail:%-2s num_corners:%-2u current_corner:%-2u last_entered:%-8u\n",
+ "\trail:%-2s \tnum_corners:%-2u current_corner:%-2u last_entered:%-8u\n",
rail, rt.num_corners, rt.current_corner, rt.last_entered);
*base += sizeof(rt);
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
index d0fef9d31755..99ae24735f05 100644
--- a/drivers/soc/qcom/smcinvoke.c
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -207,12 +207,11 @@ static int prepare_send_scm_msg(const uint8_t *in_buf, size_t in_buf_len,
ret = qseecom_process_listener_from_smcinvoke(&desc);
*smcinvoke_result = (int32_t)desc.ret[1];
- if (ret || desc.ret[1] || desc.ret[2] || desc.ret[0]) {
+ if (ret || desc.ret[1] || desc.ret[2] || desc.ret[0])
pr_err("SCM call failed with ret val = %d %d %d %d\n",
ret, (int)desc.ret[0],
(int)desc.ret[1], (int)desc.ret[2]);
- ret = ret | desc.ret[0] | desc.ret[1] | desc.ret[2];
- }
+
dmac_inv_range(in_buf, in_buf + inbuf_flush_size);
dmac_inv_range(out_buf, out_buf + outbuf_flush_size);
return ret;
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index 2d52fcaf954a..6f75eee8f921 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -1442,9 +1442,9 @@ static inline void msm_spi_set_cs(struct spi_device *spi, bool set_flag)
u32 spi_ioc_orig;
int rc;
- rc = pm_runtime_get_sync(dd->dev);
- if (rc < 0) {
- dev_err(dd->dev, "Failure during runtime get");
+ if (dd->suspended) {
+ dev_err(dd->dev, "%s: SPI operational state not valid %d\n",
+ __func__, dd->suspended);
return;
}
@@ -1470,8 +1470,6 @@ static inline void msm_spi_set_cs(struct spi_device *spi, bool set_flag)
writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
if (dd->pdata->is_shared)
put_local_resources(dd);
- pm_runtime_mark_last_busy(dd->dev);
- pm_runtime_put_autosuspend(dd->dev);
}
static void reset_core(struct msm_spi *dd)
@@ -1655,23 +1653,16 @@ static int msm_spi_prepare_transfer_hardware(struct spi_master *master)
struct msm_spi *dd = spi_master_get_devdata(master);
int resume_state = 0;
- resume_state = pm_runtime_get_sync(dd->dev);
- if (resume_state < 0)
+ if (!pm_runtime_enabled(dd->dev)) {
+ dev_err(dd->dev, "Runtime PM not available\n");
+ resume_state = -EBUSY;
goto spi_finalize;
+ }
- /*
- * Counter-part of system-suspend when runtime-pm is not enabled.
- * This way, resume can be left empty and device will be put in
- * active mode only if client requests anything on the bus
- */
- if (!pm_runtime_enabled(dd->dev))
- resume_state = msm_spi_pm_resume_runtime(dd->dev);
+ resume_state = pm_runtime_get_sync(dd->dev);
if (resume_state < 0)
goto spi_finalize;
- if (dd->suspended) {
- resume_state = -EBUSY;
- goto spi_finalize;
- }
+
return 0;
spi_finalize:
@@ -1683,6 +1674,11 @@ static int msm_spi_unprepare_transfer_hardware(struct spi_master *master)
{
struct msm_spi *dd = spi_master_get_devdata(master);
+ if (!pm_runtime_enabled(dd->dev)) {
+ dev_err(dd->dev, "Runtime PM not available\n");
+ return -EBUSY;
+ }
+
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
return 0;
@@ -2621,27 +2617,10 @@ resume_exit:
#ifdef CONFIG_PM_SLEEP
static int msm_spi_suspend(struct device *device)
{
- if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
- struct platform_device *pdev = to_platform_device(device);
- struct spi_master *master = platform_get_drvdata(pdev);
- struct msm_spi *dd;
-
- dev_dbg(device, "system suspend");
- if (!master)
- goto suspend_exit;
- dd = spi_master_get_devdata(master);
- if (!dd)
- goto suspend_exit;
- msm_spi_pm_suspend_runtime(device);
-
- /*
- * set the device's runtime PM status to 'suspended'
- */
- pm_runtime_disable(device);
- pm_runtime_set_suspended(device);
- pm_runtime_enable(device);
+ if (!pm_runtime_status_suspended(device)) {
+ dev_err(device, "Runtime not suspended, deny sys suspend");
+ return -EBUSY;
}
-suspend_exit:
return 0;
}
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 6481ee96f3e4..2f08a6c9d476 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -268,6 +268,7 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
struct ipa_req_chan_out_params ipa_in_channel_out_params;
struct ipa_req_chan_out_params ipa_out_channel_out_params;
+ log_event_dbg("%s: USB GSI IN OPS", __func__);
usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
GSI_EP_OP_PREPARE_TRBS);
usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
@@ -279,6 +280,8 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
gsi_channel_info.ch_req = &d_port->in_request;
usb_gsi_ep_op(d_port->in_ep, (void *)&gsi_channel_info,
GSI_EP_OP_GET_CH_INFO);
+
+ log_event_dbg("%s: USB GSI IN OPS Completed", __func__);
in_params->client =
(gsi->prot_id != IPA_USB_DIAG) ? IPA_CLIENT_USB_CONS :
IPA_CLIENT_USB_DPL_CONS;
@@ -307,6 +310,7 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
gsi_channel_info.depcmd_hi_addr;
if (d_port->out_ep) {
+ log_event_dbg("%s: USB GSI OUT OPS", __func__);
usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
GSI_EP_OP_PREPARE_TRBS);
usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
@@ -318,7 +322,7 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
gsi_channel_info.ch_req = &d_port->out_request;
usb_gsi_ep_op(d_port->out_ep, (void *)&gsi_channel_info,
GSI_EP_OP_GET_CH_INFO);
-
+ log_event_dbg("%s: USB GSI OUT OPS Completed", __func__);
out_params->client = IPA_CLIENT_USB_PROD;
out_params->ipa_ep_cfg.mode.mode = IPA_BASIC;
out_params->teth_prot = gsi->prot_id;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 7244dc9419f3..81ce22e91883 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -889,7 +889,7 @@ static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
if (len < 18)
return -EINVAL;
- snprintf(str, len, "%pKM", dev_addr);
+ snprintf(str, len, "%pM", dev_addr);
return 18;
}
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 087cd9fecfe9..ea278781440c 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -1931,6 +1931,11 @@ static void usbpd_sm(struct work_struct *w)
case PE_SNK_SELECT_CAPABILITY:
if (IS_CTRL(rx_msg, MSG_ACCEPT)) {
+ u32 pdo = pd->received_pdos[pd->requested_pdo - 1];
+ bool same_pps = (pd->selected_pdo == pd->requested_pdo)
+ && (PD_SRC_PDO_TYPE(pdo) ==
+ PD_SRC_PDO_TYPE_AUGMENTED);
+
usbpd_set_state(pd, PE_SNK_TRANSITION_SINK);
/* prepare for voltage increase/decrease */
@@ -1942,11 +1947,12 @@ static void usbpd_sm(struct work_struct *w)
&val);
/*
- * if we are changing voltages, we must lower input
- * current to pSnkStdby (2.5W). Calculate it and set
- * PD_CURRENT_MAX accordingly.
+ * if changing voltages (not within the same PPS PDO),
+ * we must lower input current to pSnkStdby (2.5W).
+ * Calculate it and set PD_CURRENT_MAX accordingly.
*/
- if (pd->requested_voltage != pd->current_voltage) {
+ if (!same_pps &&
+ pd->requested_voltage != pd->current_voltage) {
int mv = max(pd->requested_voltage,
pd->current_voltage) / 1000;
val.intval = (2500000 / mv) * 1000;
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 6430e419fc9c..5867c6c204c9 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -72,6 +72,7 @@
#define QUSB2PHY_PORT_TUNE2 0x84
#define QUSB2PHY_PORT_TUNE3 0x88
#define QUSB2PHY_PORT_TUNE4 0x8C
+#define QUSB2PHY_PORT_TUNE5 0x90
/* In case Efuse register shows zero, use this value */
#define TUNE2_DEFAULT_HIGH_NIBBLE 0xB
@@ -102,10 +103,27 @@
#define QUSB2PHY_REFCLK_ENABLE BIT(0)
+unsigned int tune1;
+module_param(tune1, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune1, "QUSB PHY TUNE1");
+
unsigned int tune2;
module_param(tune2, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(tune2, "QUSB PHY TUNE2");
+unsigned int tune3;
+module_param(tune3, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune3, "QUSB PHY TUNE3");
+
+unsigned int tune4;
+module_param(tune4, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune4, "QUSB PHY TUNE4");
+
+unsigned int tune5;
+module_param(tune5, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune5, "QUSB PHY TUNE5");
+
+
struct qusb_phy {
struct usb_phy phy;
void __iomem *base;
@@ -562,13 +580,29 @@ static int qusb_phy_init(struct usb_phy *phy)
qphy->base + QUSB2PHY_PORT_TUNE2);
}
- /* If tune2 modparam set, override tune2 value */
- if (tune2) {
- pr_debug("%s(): (modparam) TUNE2 val:0x%02x\n",
- __func__, tune2);
+ /* If tune modparam set, override tune value */
+
+ pr_debug("%s():userspecified modparams TUNEX val:0x%x %x %x %x %x\n",
+ __func__, tune1, tune2, tune3, tune4, tune5);
+ if (tune1)
+ writel_relaxed(tune1,
+ qphy->base + QUSB2PHY_PORT_TUNE1);
+
+ if (tune2)
writel_relaxed(tune2,
qphy->base + QUSB2PHY_PORT_TUNE2);
- }
+
+ if (tune3)
+ writel_relaxed(tune3,
+ qphy->base + QUSB2PHY_PORT_TUNE3);
+
+ if (tune4)
+ writel_relaxed(tune4,
+ qphy->base + QUSB2PHY_PORT_TUNE4);
+
+ if (tune5)
+ writel_relaxed(tune5,
+ qphy->base + QUSB2PHY_PORT_TUNE5);
/* ensure above writes are completed before re-enabling PHY */
wmb();
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index 6182aa2c626e..bf4dc39f57ee 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -222,6 +222,7 @@ struct mdss_smmu_client {
struct dss_module_power mp;
struct reg_bus_client *reg_bus_clt;
bool domain_attached;
+ bool domain_reattach;
bool handoff_pending;
void __iomem *mmu_base;
struct list_head _client;
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index a183fd7cd247..f4c4c509410a 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -4276,8 +4276,6 @@ static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
goto buf_sync_err_2;
}
- sync_fence_install(rel_fence, rel_fen_fd);
-
ret = copy_to_user(buf_sync->rel_fen_fd, &rel_fen_fd, sizeof(int));
if (ret) {
pr_err("%s: copy_to_user failed\n", sync_pt_data->fence_name);
@@ -4314,8 +4312,6 @@ static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
goto buf_sync_err_3;
}
- sync_fence_install(retire_fence, retire_fen_fd);
-
ret = copy_to_user(buf_sync->retire_fen_fd, &retire_fen_fd,
sizeof(int));
if (ret) {
@@ -4326,6 +4322,9 @@ static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
goto buf_sync_err_3;
}
+ sync_fence_install(rel_fence, rel_fen_fd);
+ sync_fence_install(retire_fence, retire_fen_fd);
+
skip_retire_fence:
mutex_unlock(&sync_pt_data->sync_mutex);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index ec37cd1d5bb0..e258f258aeca 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -742,7 +742,8 @@ int mdss_mdp_get_pipe_overlap_bw(struct mdss_mdp_pipe *pipe,
*quota = fps * src.w * src_h;
- if (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
+ if (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420 &&
+ pipe->src_fmt->format != MDP_Y_CBCR_H2V2_TP10_UBWC)
/*
* with decimation, chroma is not downsampled, this means we
* need to allocate bw for extra lines that will be fetched
@@ -4769,6 +4770,8 @@ static void __mdss_mdp_mixer_get_offsets(u32 mixer_num,
static inline int __mdss_mdp_mixer_get_hw_num(struct mdss_mdp_mixer *mixer)
{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
/*
* mapping to hardware expectation of actual mixer programming to
* happen on following registers:
@@ -4776,6 +4779,11 @@ static inline int __mdss_mdp_mixer_get_hw_num(struct mdss_mdp_mixer *mixer)
* WB: 3, 4
* With some exceptions on certain revisions
*/
+
+ if (mdata->mdp_rev == MDSS_MDP_HW_REV_330
+ && mixer->num == MDSS_MDP_INTF_LAYERMIXER1)
+ return MDSS_MDP_INTF_LAYERMIXER2;
+
if (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) {
u32 wb_offset;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index c97a58c86c29..dfd6226ce602 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -2659,10 +2659,11 @@ static void mdss_mdp_cmd_wait4_autorefresh_done(struct mdss_mdp_ctl *ctl)
struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
unsigned long flags;
unsigned long autorefresh_timeout;
+ u32 timeout_us = 20000;
line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
- MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h);
+ MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h, 0x111);
reinit_completion(&ctx->autorefresh_done);
@@ -2721,6 +2722,26 @@ static void mdss_mdp_cmd_wait4_autorefresh_done(struct mdss_mdp_ctl *ctl)
"dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
"dbg_bus", "vbif_dbg_bus", "panic");
}
+
+ /* once autorefresh is done, wait for write pointer */
+ line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
+ MDSS_XLOG(ctl->num, line_out, 0x222);
+
+ /* wait until the first line is out to make sure transfer is on-going */
+ rc = readl_poll_timeout(pp_base +
+ MDSS_MDP_REG_PP_LINE_COUNT, val,
+ (val & 0xffff) >= 1, 10, timeout_us);
+
+ if (rc) {
+ pr_err("timed out waiting for line out ctl:%d val:0x%x\n",
+ ctl->num, val);
+ MDSS_XLOG(0xbad5, val);
+ MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+ "dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
+ "dbg_bus", "vbif_dbg_bus", "panic");
+ }
+
+ MDSS_XLOG(val, 0x333);
}
/* caller needs to hold autorefresh_lock before calling this function */
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 1a9b09ca6988..da08917d334b 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -2765,6 +2765,7 @@ static int mdss_mdp_overlay_get_fb_pipe(struct msm_fb_data_type *mfd,
bpp = fbi->var.bits_per_pixel / 8;
req->id = MSMFB_NEW_REQUEST;
+ req->flags |= MDP_OV_PIPE_FORCE_DMA;
req->src.format = mfd->fb_imgType;
req->src.height = fbi->var.yres;
req->src.width = fbi->fix.line_length / bpp;
@@ -2878,7 +2879,7 @@ static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
MDSS_MDP_MIXER_MUX_LEFT, &l_pipe_allocated);
if (ret) {
pr_err("unable to allocate base pipe\n");
- goto iommu_disable;
+ goto pipe_release;
}
if (mdss_mdp_pipe_map(l_pipe)) {
@@ -2889,20 +2890,20 @@ static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
ret = mdss_mdp_overlay_start(mfd);
if (ret) {
pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
- goto clk_disable;
+ goto pipe_release;
}
ret = mdss_iommu_ctrl(1);
if (IS_ERR_VALUE(ret)) {
pr_err("IOMMU attach failed\n");
- goto clk_disable;
+ goto iommu_disable;
}
buf_l = __mdp_overlay_buf_alloc(mfd, l_pipe);
if (!buf_l) {
pr_err("unable to allocate memory for fb buffer\n");
mdss_mdp_pipe_unmap(l_pipe);
- goto pipe_release;
+ goto iommu_disable;
}
buf_l->p[0].srcp_table = mfd->fb_table;
@@ -2925,19 +2926,19 @@ static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
MDSS_MDP_MIXER_MUX_RIGHT, &r_pipe_allocated);
if (ret) {
pr_err("unable to allocate right base pipe\n");
- goto pipe_release;
+ goto iommu_disable;
}
if (mdss_mdp_pipe_map(r_pipe)) {
pr_err("unable to map right base pipe\n");
- goto pipe_release;
+ goto iommu_disable;
}
buf_r = __mdp_overlay_buf_alloc(mfd, r_pipe);
if (!buf_r) {
pr_err("unable to allocate memory for fb buffer\n");
mdss_mdp_pipe_unmap(r_pipe);
- goto pipe_release;
+ goto iommu_disable;
}
buf_r->p[0] = buf_l->p[0];
@@ -2955,6 +2956,9 @@ static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
return;
+iommu_disable:
+ mdss_iommu_ctrl(0);
+
pipe_release:
if (r_pipe_allocated)
mdss_mdp_overlay_release(mfd, r_pipe->ndx);
@@ -2962,8 +2966,6 @@ pipe_release:
__mdp_overlay_buf_free(mfd, buf_l);
if (l_pipe_allocated)
mdss_mdp_overlay_release(mfd, l_pipe->ndx);
-iommu_disable:
- mdss_iommu_ctrl(0);
clk_disable:
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
mutex_unlock(&mdp5_data->ov_lock);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index 38bff6b8edee..1fe8fe6f7be8 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -5319,8 +5319,8 @@ static int pp_hist_collect(struct mdp_histogram_data *hist,
pr_err("failed to get the hist data, sum = %d\n", sum);
ret = sum;
} else if (expect_sum && sum != expect_sum) {
- pr_err("hist error: bin sum incorrect! (%d/%d)\n",
- sum, expect_sum);
+ pr_err_ratelimited("hist error: bin sum incorrect! (%d/%d)\n",
+ sum, expect_sum);
ret = -EINVAL;
}
hist_collect_exit:
@@ -5390,8 +5390,8 @@ int mdss_mdp_hist_collect(struct mdp_histogram_data *hist)
ret = pp_hist_collect(hist, hists[i], ctl_base,
exp_sum, DSPP);
if (ret)
- pr_err("hist error: dspp[%d] collect %d\n",
- dspp_num, ret);
+ pr_err_ratelimited("hist error: dspp[%d] collect %d\n",
+ dspp_num, ret);
}
/* state of dspp histogram blocks attached to logical display
* should be changed atomically to idle. This will ensure that
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
index 6930444118b6..62e25500060e 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.c
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -299,7 +299,6 @@ static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
if (!mdss_smmu_is_valid_domain_type(mdata, i))
continue;
-
mdss_smmu = mdss_smmu_get_cb(i);
if (mdss_smmu && mdss_smmu->base.dev) {
if (!mdss_smmu->handoff_pending) {
@@ -326,6 +325,14 @@ static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
goto err;
}
mdss_smmu->domain_attached = true;
+ if (mdss_smmu->domain_reattach) {
+ pr_debug("iommu v2 domain[%i] remove extra vote\n",
+ i);
+ /* remove extra power vote */
+ mdss_smmu_enable_power(mdss_smmu,
+ false);
+ mdss_smmu->domain_reattach = false;
+ }
pr_debug("iommu v2 domain[%i] attached\n", i);
}
} else {
@@ -379,6 +386,11 @@ static int mdss_smmu_detach_v2(struct mdss_data_type *mdata)
*/
arm_iommu_detach_device(mdss_smmu->base.dev);
mdss_smmu->domain_attached = false;
+ /*
+ * since we are leaving clocks on, on
+ * re-attach do not vote for clocks
+ */
+ mdss_smmu->domain_reattach = true;
pr_debug("iommu v2 domain[%i] detached\n", i);
} else {
mdss_smmu_enable_power(mdss_smmu, false);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 56f7e2521202..8959b320d472 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -30,6 +30,7 @@
#include <linux/balloon_compaction.h>
#include <linux/oom.h>
#include <linux/wait.h>
+#include <linux/mount.h>
/*
* Balloon device works in 4K page units. So each page is pointed to by
@@ -45,6 +46,10 @@ static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES;
module_param(oom_pages, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(oom_pages, "pages to free on OOM");
+#ifdef CONFIG_BALLOON_COMPACTION
+static struct vfsmount *balloon_mnt;
+#endif
+
struct virtio_balloon {
struct virtio_device *vdev;
struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
@@ -486,6 +491,24 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
return MIGRATEPAGE_SUCCESS;
}
+
+static struct dentry *balloon_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ static const struct dentry_operations ops = {
+ .d_dname = simple_dname,
+ };
+
+ return mount_pseudo(fs_type, "balloon-kvm:", NULL, &ops,
+ BALLOON_KVM_MAGIC);
+}
+
+static struct file_system_type balloon_fs = {
+ .name = "balloon-kvm",
+ .mount = balloon_mount,
+ .kill_sb = kill_anon_super,
+};
+
#endif /* CONFIG_BALLOON_COMPACTION */
static int virtballoon_probe(struct virtio_device *vdev)
@@ -513,9 +536,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
vb->need_stats_update = 0;
balloon_devinfo_init(&vb->vb_dev_info);
-#ifdef CONFIG_BALLOON_COMPACTION
- vb->vb_dev_info.migratepage = virtballoon_migratepage;
-#endif
err = init_vqs(vb);
if (err)
@@ -525,7 +545,27 @@ static int virtballoon_probe(struct virtio_device *vdev)
vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY;
err = register_oom_notifier(&vb->nb);
if (err < 0)
- goto out_oom_notify;
+ goto out_del_vqs;
+
+#ifdef CONFIG_BALLOON_COMPACTION
+ balloon_mnt = kern_mount(&balloon_fs);
+ if (IS_ERR(balloon_mnt)) {
+ err = PTR_ERR(balloon_mnt);
+ unregister_oom_notifier(&vb->nb);
+ goto out_del_vqs;
+ }
+
+ vb->vb_dev_info.migratepage = virtballoon_migratepage;
+ vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
+ if (IS_ERR(vb->vb_dev_info.inode)) {
+ err = PTR_ERR(vb->vb_dev_info.inode);
+ kern_unmount(balloon_mnt);
+ unregister_oom_notifier(&vb->nb);
+ vb->vb_dev_info.inode = NULL;
+ goto out_del_vqs;
+ }
+ vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
+#endif
virtio_device_ready(vdev);
@@ -539,7 +579,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
out_del_vqs:
unregister_oom_notifier(&vb->nb);
-out_oom_notify:
vdev->config->del_vqs(vdev);
out_free_vb:
kfree(vb);
@@ -567,6 +606,8 @@ static void virtballoon_remove(struct virtio_device *vdev)
unregister_oom_notifier(&vb->nb);
kthread_stop(vb->thread);
remove_common(vb);
+ if (vb->vb_dev_info.inode)
+ iput(vb->vb_dev_info.inode);
kfree(vb);
}