summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJordan Crouse <jcrouse@codeaurora.org>2017-06-12 09:16:49 -0600
committerJordan Crouse <jcrouse@codeaurora.org>2017-06-19 15:50:34 -0600
commit180cb9a578d1333651912f9539ba8c2db91b3d04 (patch)
tree7d05f032053a622197d734cb1080bc0ad6eb717b /drivers
parentae2cb03114bd664101ff72709a534ed4ddafd012 (diff)
drm/msm: Move memptrs to msm_gpu and reorganize
Since most of the heavy lifting for managing submits lives in the msm_gpu domain it makes sense to move the memptrs so that they are globally visible and we can use them without relying on function pointers. Additionally, instead of having a single struct full of per-ring arrays, reorganize the structure and assign a sub-allocation to each ring. This simplifies all of the various macros and other bits and allows us to make the size of the allocation dependent on the acutal number of rings for the implementation. Change-Id: Ic0dedbadc18ba1dc786c82b082c5030e13ff8012 Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c11
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c43
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h23
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c27
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c7
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h16
8 files changed, 69 insertions, 75 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 3fc3065147b2..f8dbc843f852 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -46,7 +46,6 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
struct msm_gem_address_space *aspace)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_mmu *mmu = aspace->mmu;
struct msm_iommu *iommu = to_msm_iommu(mmu);
@@ -75,17 +74,15 @@ static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
* reload the pagetable if the current ring gets preempted out.
*/
OUT_PKT7(ring, CP_MEM_WRITE, 4);
- OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0)));
- OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0)));
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, ttbr0)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, ttbr0)));
OUT_RING(ring, lower_32_bits(iommu->ttbr0));
OUT_RING(ring, upper_32_bits(iommu->ttbr0));
/* Also write the current contextidr (ASID) */
OUT_PKT7(ring, CP_MEM_WRITE, 3);
- OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id,
- contextidr)));
- OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id,
- contextidr)));
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, contextidr)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, contextidr)));
OUT_RING(ring, iommu->contextidr);
/* Invalidate the draw state so we start off fresh */
@@ -217,8 +214,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_EVENT_WRITE, 4);
OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
- OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
- OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->fence);
if (submit->secure) {
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index dacdb4752393..57ef366cf82c 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -65,7 +65,6 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
/* Return the highest priority ringbuffer with something in it */
static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
unsigned long flags;
int i;
@@ -74,7 +73,7 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
struct msm_ringbuffer *ring = gpu->rb[i];
spin_lock_irqsave(&ring->lock, flags);
- empty = (get_wptr(ring) == adreno_gpu->memptrs->rptr[ring->id]);
+ empty = (get_wptr(ring) == ring->memptrs->rptr);
spin_unlock_irqrestore(&ring->lock, flags);
if (!empty)
@@ -141,10 +140,8 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
/* Set the SMMU info for the preemption */
if (a5xx_gpu->smmu_info) {
- a5xx_gpu->smmu_info->ttbr0 =
- adreno_gpu->memptrs->ttbr0[ring->id];
- a5xx_gpu->smmu_info->contextidr =
- adreno_gpu->memptrs->contextidr[ring->id];
+ a5xx_gpu->smmu_info->ttbr0 = ring->memptrs->ttbr0;
+ a5xx_gpu->smmu_info->contextidr = ring->memptrs->contextidr;
}
/* Set the address of the incoming preemption record */
@@ -261,7 +258,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr->info = 0;
ptr->data = 0;
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
- ptr->rptr_addr = rbmemptr(adreno_gpu, ring->id, rptr);
+ ptr->rptr_addr = rbmemptr(ring, rptr);
ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
return 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 0de73f61bfcb..9f3d957499d3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -90,7 +90,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
- REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(adreno_gpu, 0, rptr));
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(gpu->rb[0], rptr));
return 0;
}
@@ -106,10 +106,11 @@ static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
* ensure that it won't be. If not then this is why your
* a430 stopped working.
*/
- return adreno_gpu->memptrs->rptr[ring->id] = adreno_gpu_read(
- adreno_gpu, REG_ADRENO_CP_RB_RPTR);
- } else
- return adreno_gpu->memptrs->rptr[ring->id];
+ return ring->memptrs->rptr =
+ adreno_gpu_read(adreno_gpu, REG_ADRENO_CP_RB_RPTR);
+ }
+
+ return ring->memptrs->rptr;
}
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
@@ -128,17 +129,11 @@ uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-
- if (!ring)
- return 0;
-
- return adreno_gpu->memptrs->fence[ring->id];
+ return ring ? ring->memptrs->fence : 0;
}
void adreno_recover(struct msm_gpu *gpu)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *dev = gpu->dev;
struct msm_ringbuffer *ring;
int ret, i;
@@ -156,9 +151,8 @@ void adreno_recover(struct msm_gpu *gpu)
ring->next = ring->start;
/* reset completed fence seqno, discard anything pending: */
- adreno_gpu->memptrs->fence[ring->id] =
- adreno_submitted_fence(gpu, ring);
- adreno_gpu->memptrs->rptr[ring->id] = 0;
+ ring->memptrs->fence = adreno_submitted_fence(gpu, ring);
+ ring->memptrs->rptr = 0;
}
gpu->funcs->pm_resume(gpu);
@@ -213,7 +207,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
- OUT_RING(ring, rbmemptr(adreno_gpu, ring->id, fence));
+ OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->fence);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
@@ -540,30 +534,15 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
}
ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev);
- if (ret) {
+ if (ret)
dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
adreno_gpu->info->pfpfw, ret);
- return ret;
- }
-
- adreno_gpu->memptrs = msm_gem_kernel_new(drm,
- sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace,
- &adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova);
-
- if (IS_ERR(adreno_gpu->memptrs)) {
- ret = PTR_ERR(adreno_gpu->memptrs);
- adreno_gpu->memptrs = NULL;
- }
return ret;
}
void adreno_gpu_cleanup(struct adreno_gpu *gpu)
{
- if (gpu->memptrs_bo) {
- msm_gem_put_iova(gpu->memptrs_bo, gpu->base.aspace);
- drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
- }
release_firmware(gpu->pm4);
release_firmware(gpu->pfp);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 9e622fa06ce4..c894956fb5e8 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -83,22 +83,6 @@ struct adreno_info {
const struct adreno_info *adreno_info(struct adreno_rev rev);
-#define _sizeof(member) \
- sizeof(((struct adreno_rbmemptrs *) 0)->member[0])
-
-#define _base(adreno_gpu, member) \
- ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
-
-#define rbmemptr(adreno_gpu, index, member) \
- (_base((adreno_gpu), member) + ((index) * _sizeof(member)))
-
-struct adreno_rbmemptrs {
- volatile uint32_t rptr[MSM_GPU_MAX_RINGS];
- volatile uint32_t fence[MSM_GPU_MAX_RINGS];
- volatile uint64_t ttbr0[MSM_GPU_MAX_RINGS];
- volatile unsigned int contextidr[MSM_GPU_MAX_RINGS];
-};
-
struct adreno_counter {
u32 lo;
u32 hi;
@@ -137,13 +121,6 @@ struct adreno_gpu {
/* firmware: */
const struct firmware *pm4, *pfp;
- /* ringbuffer rptr/wptr: */
- // TODO should this be in msm_ringbuffer? I think it would be
- // different for z180..
- struct adreno_rbmemptrs *memptrs;
- struct drm_gem_object *memptrs_bo;
- uint64_t memptrs_iova;
-
/*
* Register offsets are different between some GPUs.
* GPU specific offsets will be exported by GPU specific
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 159dc6d75cbb..16049e830b7a 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -841,6 +841,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
const char *name, struct msm_gpu_config *config)
{
int i, ret, nr_rings;
+ void *memptrs;
+ uint64_t memptrs_iova;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
@@ -923,10 +925,18 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
nr_rings = ARRAY_SIZE(gpu->rb);
}
+ /* Allocate one buffer to hold all the memptr records for the rings */
+ memptrs = msm_gem_kernel_new(drm, sizeof(struct msm_memptrs) * nr_rings,
+ MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo, &memptrs_iova);
+
+ if (IS_ERR(memptrs)) {
+ ret = PTR_ERR(memptrs);
+ goto fail;
+ }
+
/* Create ringbuffer(s): */
for (i = 0; i < nr_rings; i++) {
-
- gpu->rb[i] = msm_ringbuffer_new(gpu, i);
+ gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
if (IS_ERR(gpu->rb[i])) {
ret = PTR_ERR(gpu->rb[i]);
gpu->rb[i] = NULL;
@@ -934,6 +944,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
"could not create ringbuffer %d: %d\n", i, ret);
goto fail;
}
+
+ memptrs += sizeof(struct msm_memptrs);
+ memptrs_iova += sizeof(struct msm_memptrs);
}
gpu->nr_rings = nr_rings;
@@ -958,6 +971,11 @@ fail:
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)
msm_ringbuffer_destroy(gpu->rb[i]);
+ if (gpu->memptrs_bo) {
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ }
+
msm_gpu_destroy_address_space(gpu->aspace);
msm_gpu_destroy_address_space(gpu->secure_aspace);
@@ -981,6 +999,11 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)
msm_ringbuffer_destroy(gpu->rb[i]);
+ if (gpu->memptrs_bo) {
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ }
+
msm_snapshot_destroy(gpu, gpu->snapshot);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index cddbcbbb8557..306139bcd103 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -131,6 +131,8 @@ struct msm_gpu {
struct pm_qos_request pm_qos_req_dma;
+ struct drm_gem_object *memptrs_bo;
+
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
uint32_t bsc;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 9b8e76da449f..2a5843e6f81b 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -18,7 +18,8 @@
#include "msm_ringbuffer.h"
#include "msm_gpu.h"
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id)
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
+ struct msm_memptrs *memptrs, uint64_t memptrs_iova)
{
struct msm_ringbuffer *ring;
int ret;
@@ -42,6 +43,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id)
goto fail;
}
+ ring->memptrs = memptrs;
+ ring->memptrs_iova = memptrs_iova;
+
+
ring->start = msm_gem_vaddr(ring->bo);
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index 70643a7b4cc4..3eb9a86b2a2e 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -20,6 +20,16 @@
#include "msm_drv.h"
+#define rbmemptr(ring, member) \
+ ((ring)->memptrs_iova + offsetof(struct msm_memptrs, member))
+
+struct msm_memptrs {
+ volatile uint32_t rptr;
+ volatile uint32_t fence;
+ volatile uint64_t ttbr0;
+ volatile unsigned int contextidr;
+};
+
struct msm_ringbuffer {
struct msm_gpu *gpu;
int id;
@@ -29,9 +39,13 @@ struct msm_ringbuffer {
uint32_t submitted_fence;
spinlock_t lock;
struct list_head submits;
+
+ struct msm_memptrs *memptrs;
+ uint64_t memptrs_iova;
};
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id);
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
+ struct msm_memptrs *memptrs, uint64_t memptrs_iova);
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */