summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorTarun Karra <tkarra@codeaurora.org>2016-07-10 13:22:57 -0700
committerTarun Karra <tkarra@codeaurora.org>2016-10-05 16:57:21 -0700
commit2811b6d610686d5c91b48741d6f66eaf63f29e8c (patch)
treedecab82cc6b94a203d8b1759b865ddddb50b1287 /drivers/gpu
parente4e58248efa8d03b39982c26566d846e2baa2687 (diff)
msm: kgsl: Rename cmdbatch to drawobj
Rename all cmdbatch to drawobj. This forms a platform for future changes where cmdbatch is split into different types of drawobjs. CRs-Fixed: 1054353 Change-Id: Ib84bee679e859db34e0d1f8a0ac70319eabddf53 Signed-off-by: Tarun Karra <tkarra@codeaurora.org>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/msm/Makefile2
-rw-r--r--drivers/gpu/msm/adreno.c18
-rw-r--r--drivers/gpu/msm/adreno.h42
-rw-r--r--drivers/gpu/msm/adreno_a5xx_preempt.c2
-rw-r--r--drivers/gpu/msm/adreno_debugfs.c48
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c749
-rw-r--r--drivers/gpu/msm/adreno_dispatch.h34
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.c46
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.h25
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c114
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.h6
-rw-r--r--drivers/gpu/msm/adreno_trace.h60
-rw-r--r--drivers/gpu/msm/kgsl.c114
-rw-r--r--drivers/gpu/msm/kgsl.h19
-rw-r--r--drivers/gpu/msm/kgsl_cffdump.c4
-rw-r--r--drivers/gpu/msm/kgsl_cffdump.h6
-rw-r--r--drivers/gpu/msm/kgsl_cmdbatch.h168
-rw-r--r--drivers/gpu/msm/kgsl_compat.h8
-rw-r--r--drivers/gpu/msm/kgsl_device.h10
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.c (renamed from drivers/gpu/msm/kgsl_cmdbatch.c)418
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.h168
-rw-r--r--drivers/gpu/msm/kgsl_trace.h42
22 files changed, 1061 insertions, 1042 deletions
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index 90aee3cad5ad..625a2640b4c4 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -3,7 +3,7 @@ ccflags-y := -Idrivers/staging/android
msm_kgsl_core-y = \
kgsl.o \
kgsl_trace.o \
- kgsl_cmdbatch.o \
+ kgsl_drawobj.o \
kgsl_ioctl.o \
kgsl_sharedmem.o \
kgsl_pwrctrl.o \
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 498386903936..3900da6af7da 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1015,8 +1015,8 @@ static void _adreno_free_memories(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv))
- kgsl_free_global(device, &adreno_dev->cmdbatch_profile_buffer);
+ if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv))
+ kgsl_free_global(device, &adreno_dev->profile_buffer);
/* Free local copies of firmware and other command streams */
kfree(adreno_dev->pfp_fw);
@@ -1187,22 +1187,22 @@ static int adreno_init(struct kgsl_device *device)
}
/*
- * Allocate a small chunk of memory for precise cmdbatch profiling for
+ * Allocate a small chunk of memory for precise drawobj profiling for
* those targets that have the always on timer
*/
if (!adreno_is_a3xx(adreno_dev)) {
int r = kgsl_allocate_global(device,
- &adreno_dev->cmdbatch_profile_buffer, PAGE_SIZE,
+ &adreno_dev->profile_buffer, PAGE_SIZE,
0, 0, "alwayson");
- adreno_dev->cmdbatch_profile_index = 0;
+ adreno_dev->profile_index = 0;
if (r == 0) {
- set_bit(ADRENO_DEVICE_CMDBATCH_PROFILE,
+ set_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE,
&adreno_dev->priv);
kgsl_sharedmem_set(device,
- &adreno_dev->cmdbatch_profile_buffer, 0, 0,
+ &adreno_dev->profile_buffer, 0, 0,
PAGE_SIZE);
}
@@ -2340,12 +2340,12 @@ int adreno_idle(struct kgsl_device *device)
* adreno_drain() - Drain the dispatch queue
* @device: Pointer to the KGSL device structure for the GPU
*
- * Drain the dispatcher of existing command batches. This halts
+ * Drain the dispatcher of existing drawobjs. This halts
* additional commands from being issued until the gate is completed.
*/
static int adreno_drain(struct kgsl_device *device)
{
- reinit_completion(&device->cmdbatch_gate);
+ reinit_completion(&device->halt_gate);
return 0;
}
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 286f7d63c8fe..d037d8248ba5 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -76,13 +76,13 @@
KGSL_CONTEXT_PREEMPT_STYLE_SHIFT)
/*
- * return the dispatcher cmdqueue in which the given cmdbatch should
+ * return the dispatcher drawqueue in which the given drawobj should
* be submitted
*/
-#define ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(c) \
+#define ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(c) \
(&((ADRENO_CONTEXT(c->context))->rb->dispatch_q))
-#define ADRENO_CMDBATCH_RB(c) \
+#define ADRENO_DRAWOBJ_RB(c) \
((ADRENO_CONTEXT(c->context))->rb)
/* Adreno core features */
@@ -346,8 +346,8 @@ struct adreno_gpu_core {
* @halt: Atomic variable to check whether the GPU is currently halted
* @ctx_d_debugfs: Context debugfs node
* @pwrctrl_flag: Flag to hold adreno specific power attributes
- * @cmdbatch_profile_buffer: Memdesc holding the cmdbatch profiling buffer
- * @cmdbatch_profile_index: Index to store the start/stop ticks in the profiling
+ * @profile_buffer: Memdesc holding the drawobj profiling buffer
+ * @profile_index: Index to store the start/stop ticks in the profiling
* buffer
* @sp_local_gpuaddr: Base GPU virtual address for SP local memory
* @sp_pvt_gpuaddr: Base GPU virtual address for SP private memory
@@ -404,8 +404,8 @@ struct adreno_device {
struct dentry *ctx_d_debugfs;
unsigned long pwrctrl_flag;
- struct kgsl_memdesc cmdbatch_profile_buffer;
- unsigned int cmdbatch_profile_index;
+ struct kgsl_memdesc profile_buffer;
+ unsigned int profile_index;
uint64_t sp_local_gpuaddr;
uint64_t sp_pvt_gpuaddr;
const struct firmware *lm_fw;
@@ -441,7 +441,7 @@ struct adreno_device {
* @ADRENO_DEVICE_STARTED - Set if the device start sequence is in progress
* @ADRENO_DEVICE_FAULT - Set if the device is currently in fault (and shouldn't
* send any more commands to the ringbuffer)
- * @ADRENO_DEVICE_CMDBATCH_PROFILE - Set if the device supports command batch
+ * @ADRENO_DEVICE_DRAWOBJ_PROFILE - Set if the device supports drawobj
* profiling via the ALWAYSON counter
* @ADRENO_DEVICE_PREEMPTION - Turn on/off preemption
* @ADRENO_DEVICE_SOFT_FAULT_DETECT - Set if soft fault detect is enabled
@@ -459,7 +459,7 @@ enum adreno_device_flags {
ADRENO_DEVICE_HANG_INTR = 4,
ADRENO_DEVICE_STARTED = 5,
ADRENO_DEVICE_FAULT = 6,
- ADRENO_DEVICE_CMDBATCH_PROFILE = 7,
+ ADRENO_DEVICE_DRAWOBJ_PROFILE = 7,
ADRENO_DEVICE_GPU_REGULATOR_ENABLED = 8,
ADRENO_DEVICE_PREEMPTION = 9,
ADRENO_DEVICE_SOFT_FAULT_DETECT = 10,
@@ -469,22 +469,22 @@ enum adreno_device_flags {
};
/**
- * struct adreno_cmdbatch_profile_entry - a single command batch entry in the
+ * struct adreno_drawobj_profile_entry - a single drawobj entry in the
* kernel profiling buffer
- * @started: Number of GPU ticks at start of the command batch
- * @retired: Number of GPU ticks at the end of the command batch
+ * @started: Number of GPU ticks at start of the drawobj
+ * @retired: Number of GPU ticks at the end of the drawobj
*/
-struct adreno_cmdbatch_profile_entry {
+struct adreno_drawobj_profile_entry {
uint64_t started;
uint64_t retired;
};
-#define ADRENO_CMDBATCH_PROFILE_COUNT \
- (PAGE_SIZE / sizeof(struct adreno_cmdbatch_profile_entry))
+#define ADRENO_DRAWOBJ_PROFILE_COUNT \
+ (PAGE_SIZE / sizeof(struct adreno_drawobj_profile_entry))
-#define ADRENO_CMDBATCH_PROFILE_OFFSET(_index, _member) \
- ((_index) * sizeof(struct adreno_cmdbatch_profile_entry) \
- + offsetof(struct adreno_cmdbatch_profile_entry, _member))
+#define ADRENO_DRAWOBJ_PROFILE_OFFSET(_index, _member) \
+ ((_index) * sizeof(struct adreno_drawobj_profile_entry) \
+ + offsetof(struct adreno_drawobj_profile_entry, _member))
/**
@@ -776,7 +776,7 @@ struct adreno_gpudev {
* @KGSL_FT_REPLAY: Replay the faulting command
* @KGSL_FT_SKIPIB: Skip the faulting indirect buffer
* @KGSL_FT_SKIPFRAME: Skip the frame containing the faulting IB
- * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command batch
+ * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the drawobj
* @KGSL_FT_TEMP_DISABLE: Disables FT for all commands
* @KGSL_FT_THROTTLE: Disable the context if it faults too often
* @KGSL_FT_SKIPCMD: Skip the command containing the faulting IB
@@ -793,7 +793,7 @@ enum kgsl_ft_policy_bits {
/* KGSL_FT_MAX_BITS is used to calculate the mask */
KGSL_FT_MAX_BITS,
/* Internal bits - set during GFT */
- /* Skip the PM dump on replayed command batches */
+ /* Skip the PM dump on replayed drawobjs */
KGSL_FT_SKIP_PMDUMP = 31,
};
@@ -882,7 +882,7 @@ int adreno_reset(struct kgsl_device *device, int fault);
void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch);
+ struct kgsl_drawobj *drawobj);
int adreno_coresight_init(struct adreno_device *adreno_dev);
diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c
index 4baee4a5c0b1..09c550c9f58c 100644
--- a/drivers/gpu/msm/adreno_a5xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a5xx_preempt.c
@@ -37,7 +37,7 @@ static void _update_wptr(struct adreno_device *adreno_dev)
rb->wptr);
rb->dispatch_q.expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
+ msecs_to_jiffies(adreno_drawobj_timeout);
}
spin_unlock_irqrestore(&rb->preempt_lock, flags);
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 680827e5b848..edf5c634079e 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -129,7 +129,7 @@ typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i,
static void sync_event_print(struct seq_file *s,
- struct kgsl_cmdbatch_sync_event *sync_event)
+ struct kgsl_drawobj_sync_event *sync_event)
{
switch (sync_event->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: {
@@ -153,12 +153,12 @@ struct flag_entry {
const char *str;
};
-static const struct flag_entry cmdbatch_flags[] = {KGSL_CMDBATCH_FLAGS};
+static const struct flag_entry drawobj_flags[] = {KGSL_DRAWOBJ_FLAGS};
-static const struct flag_entry cmdbatch_priv[] = {
- { CMDBATCH_FLAG_SKIP, "skip"},
- { CMDBATCH_FLAG_FORCE_PREAMBLE, "force_preamble"},
- { CMDBATCH_FLAG_WFI, "wait_for_idle" },
+static const struct flag_entry drawobj_priv[] = {
+ { DRAWOBJ_FLAG_SKIP, "skip"},
+ { DRAWOBJ_FLAG_FORCE_PREAMBLE, "force_preamble"},
+ { DRAWOBJ_FLAG_WFI, "wait_for_idle" },
};
static const struct flag_entry context_flags[] = {KGSL_CONTEXT_FLAGS};
@@ -199,17 +199,17 @@ static void print_flags(struct seq_file *s, const struct flag_entry *table,
seq_puts(s, "None");
}
-static void cmdbatch_print(struct seq_file *s, struct kgsl_cmdbatch *cmdbatch)
+static void drawobj_print(struct seq_file *s, struct kgsl_drawobj *drawobj)
{
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
unsigned int i;
- /* print fences first, since they block this cmdbatch */
+ /* print fences first, since they block this drawobj */
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- event = &cmdbatch->synclist[i];
+ for (i = 0; i < drawobj->numsyncs; i++) {
+ event = &drawobj->synclist[i];
- if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
+ if (!kgsl_drawobj_event_pending(drawobj, i))
continue;
/*
@@ -217,24 +217,24 @@ static void cmdbatch_print(struct seq_file *s, struct kgsl_cmdbatch *cmdbatch)
* so that it is clear if the fence was a separate submit
* or part of an IB submit.
*/
- seq_printf(s, "\t%d ", cmdbatch->timestamp);
+ seq_printf(s, "\t%d ", drawobj->timestamp);
sync_event_print(s, event);
seq_puts(s, "\n");
}
/* if this flag is set, there won't be an IB */
- if (cmdbatch->flags & KGSL_CONTEXT_SYNC)
+ if (drawobj->flags & KGSL_CONTEXT_SYNC)
return;
- seq_printf(s, "\t%d: ", cmdbatch->timestamp);
+ seq_printf(s, "\t%d: ", drawobj->timestamp);
seq_puts(s, " flags: ");
- print_flags(s, cmdbatch_flags, ARRAY_SIZE(cmdbatch_flags),
- cmdbatch->flags);
+ print_flags(s, drawobj_flags, ARRAY_SIZE(drawobj_flags),
+ drawobj->flags);
seq_puts(s, " priv: ");
- print_flags(s, cmdbatch_priv, ARRAY_SIZE(cmdbatch_priv),
- cmdbatch->priv);
+ print_flags(s, drawobj_priv, ARRAY_SIZE(drawobj_priv),
+ drawobj->priv);
seq_puts(s, "\n");
}
@@ -285,13 +285,13 @@ static int ctx_print(struct seq_file *s, void *unused)
queued, consumed, retired,
drawctxt->internal_timestamp);
- seq_puts(s, "cmdqueue:\n");
+ seq_puts(s, "drawqueue:\n");
spin_lock(&drawctxt->lock);
- for (i = drawctxt->cmdqueue_head;
- i != drawctxt->cmdqueue_tail;
- i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE))
- cmdbatch_print(s, drawctxt->cmdqueue[i]);
+ for (i = drawctxt->drawqueue_head;
+ i != drawctxt->drawqueue_tail;
+ i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE))
+ drawobj_print(s, drawctxt->drawqueue[i]);
spin_unlock(&drawctxt->lock);
seq_puts(s, "events:\n");
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 522c32743d3d..e42588004c50 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -25,7 +25,7 @@
#include "adreno_trace.h"
#include "kgsl_sharedmem.h"
-#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
+#define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
/* Time in ms after which the dispatcher tries to schedule an unscheduled RB */
unsigned int adreno_dispatch_starvation_time = 2000;
@@ -43,13 +43,13 @@ unsigned int adreno_dispatch_time_slice = 25;
unsigned int adreno_disp_preempt_fair_sched;
/* Number of commands that can be queued in a context before it sleeps */
-static unsigned int _context_cmdqueue_size = 50;
+static unsigned int _context_drawqueue_size = 50;
/* Number of milliseconds to wait for the context queue to clear */
static unsigned int _context_queue_wait = 10000;
-/* Number of command batches sent at a time from a single context */
-static unsigned int _context_cmdbatch_burst = 5;
+/* Number of drawobjs sent at a time from a single context */
+static unsigned int _context_drawobj_burst = 5;
/*
* GFT throttle parameters. If GFT recovered more than
@@ -73,24 +73,25 @@ static unsigned int _dispatcher_q_inflight_hi = 15;
static unsigned int _dispatcher_q_inflight_lo = 4;
/* Command batch timeout (in milliseconds) */
-unsigned int adreno_cmdbatch_timeout = 2000;
+unsigned int adreno_drawobj_timeout = 2000;
/* Interval for reading and comparing fault detection registers */
static unsigned int _fault_timer_interval = 200;
-#define CMDQUEUE_RB(_cmdqueue) \
+#define DRAWQUEUE_RB(_drawqueue) \
((struct adreno_ringbuffer *) \
- container_of((_cmdqueue), struct adreno_ringbuffer, dispatch_q))
+ container_of((_drawqueue),\
+ struct adreno_ringbuffer, dispatch_q))
-#define CMDQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q)
+#define DRAWQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q)
-static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue);
+static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_drawqueue *drawqueue);
-static inline bool cmdqueue_is_current(
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static inline bool drawqueue_is_current(
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
- struct adreno_ringbuffer *rb = CMDQUEUE_RB(cmdqueue);
+ struct adreno_ringbuffer *rb = DRAWQUEUE_RB(drawqueue);
struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
return (adreno_dev->cur_rb == rb);
@@ -114,7 +115,8 @@ static int __count_context(struct adreno_context *drawctxt, void *data)
return time_after(jiffies, expires) ? 0 : 1;
}
-static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data)
+static int __count_drawqueue_context(struct adreno_context *drawctxt,
+ void *data)
{
unsigned long expires = drawctxt->active_time + msecs_to_jiffies(100);
@@ -122,7 +124,7 @@ static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data)
return 0;
return (&drawctxt->rb->dispatch_q ==
- (struct adreno_dispatcher_cmdqueue *) data) ? 1 : 0;
+ (struct adreno_dispatcher_drawqueue *) data) ? 1 : 0;
}
static int _adreno_count_active_contexts(struct adreno_device *adreno_dev,
@@ -142,7 +144,7 @@ static int _adreno_count_active_contexts(struct adreno_device *adreno_dev,
}
static void _track_context(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue,
+ struct adreno_dispatcher_drawqueue *drawqueue,
struct adreno_context *drawctxt)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -154,9 +156,9 @@ static void _track_context(struct adreno_device *adreno_dev,
device->active_context_count =
_adreno_count_active_contexts(adreno_dev,
__count_context, NULL);
- cmdqueue->active_context_count =
+ drawqueue->active_context_count =
_adreno_count_active_contexts(adreno_dev,
- __count_cmdqueue_context, cmdqueue);
+ __count_drawqueue_context, drawqueue);
spin_unlock(&adreno_dev->active_list_lock);
}
@@ -169,9 +171,9 @@ static void _track_context(struct adreno_device *adreno_dev,
*/
static inline int
-_cmdqueue_inflight(struct adreno_dispatcher_cmdqueue *cmdqueue)
+_drawqueue_inflight(struct adreno_dispatcher_drawqueue *drawqueue)
{
- return (cmdqueue->active_context_count > 1)
+ return (drawqueue->active_context_count > 1)
? _dispatcher_q_inflight_lo : _dispatcher_q_inflight_hi;
}
@@ -271,18 +273,18 @@ static void start_fault_timer(struct adreno_device *adreno_dev)
}
/**
- * _retire_marker() - Retire a marker command batch without sending it to the
+ * _retire_marker() - Retire a marker drawobj without sending it to the
* hardware
- * @cmdbatch: Pointer to the cmdbatch to retire
+ * @drawobj: Pointer to the drawobj to retire
*
* In some cases marker commands can be retired by the software without going to
* the GPU. In those cases, update the memstore from the CPU, kick off the
- * event engine to handle expired events and destroy the command batch.
+ * event engine to handle expired events and destroy the drawobj.
*/
-static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
+static void _retire_marker(struct kgsl_drawobj *drawobj)
{
- struct kgsl_context *context = cmdbatch->context;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_context *context = drawobj->context;
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
struct kgsl_device *device = context->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -292,11 +294,11 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
*/
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
- cmdbatch->timestamp);
+ drawobj->timestamp);
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
- cmdbatch->timestamp);
+ drawobj->timestamp);
/* Retire pending GPU events for the object */
@@ -308,12 +310,12 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
* So avoid reading GPU register directly for A3xx.
*/
if (adreno_is_a3xx(adreno_dev))
- trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
+ trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb,
0);
else
- trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
+ trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb,
adreno_get_rptr(drawctxt->rb));
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
}
static int _check_context_queue(struct adreno_context *drawctxt)
@@ -330,7 +332,7 @@ static int _check_context_queue(struct adreno_context *drawctxt)
if (kgsl_context_invalid(&drawctxt->base))
ret = 1;
else
- ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0;
+ ret = drawctxt->queued < _context_drawqueue_size ? 1 : 0;
spin_unlock(&drawctxt->lock);
@@ -341,54 +343,54 @@ static int _check_context_queue(struct adreno_context *drawctxt)
* return true if this is a marker command and the dependent timestamp has
* retired
*/
-static bool _marker_expired(struct kgsl_cmdbatch *cmdbatch)
+static bool _marker_expired(struct kgsl_drawobj *drawobj)
{
- return (cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
- kgsl_check_timestamp(cmdbatch->device, cmdbatch->context,
- cmdbatch->marker_timestamp);
+ return (drawobj->flags & KGSL_DRAWOBJ_MARKER) &&
+ kgsl_check_timestamp(drawobj->device, drawobj->context,
+ drawobj->marker_timestamp);
}
-static inline void _pop_cmdbatch(struct adreno_context *drawctxt)
+static inline void _pop_drawobj(struct adreno_context *drawctxt)
{
- drawctxt->cmdqueue_head = CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
- ADRENO_CONTEXT_CMDQUEUE_SIZE);
+ drawctxt->drawqueue_head = DRAWQUEUE_NEXT(drawctxt->drawqueue_head,
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE);
drawctxt->queued--;
}
/**
- * Removes all expired marker and sync cmdbatches from
+ * Removes all expired marker and sync obj's from
* the context queue when marker command and dependent
* timestamp are retired. This function is recursive.
- * returns cmdbatch if context has command, NULL otherwise.
+ * returns drawobj if context has command, NULL otherwise.
*/
-static struct kgsl_cmdbatch *_expire_markers(struct adreno_context *drawctxt)
+static struct kgsl_drawobj *_expire_markers(struct adreno_context *drawctxt)
{
- struct kgsl_cmdbatch *cmdbatch;
+ struct kgsl_drawobj *drawobj;
- if (drawctxt->cmdqueue_head == drawctxt->cmdqueue_tail)
+ if (drawctxt->drawqueue_head == drawctxt->drawqueue_tail)
return NULL;
- cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+ drawobj = drawctxt->drawqueue[drawctxt->drawqueue_head];
- if (cmdbatch == NULL)
+ if (drawobj == NULL)
return NULL;
/* Check to see if this is a marker we can skip over */
- if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
- _marker_expired(cmdbatch)) {
- _pop_cmdbatch(drawctxt);
- _retire_marker(cmdbatch);
+ if ((drawobj->flags & KGSL_DRAWOBJ_MARKER) &&
+ _marker_expired(drawobj)) {
+ _pop_drawobj(drawctxt);
+ _retire_marker(drawobj);
return _expire_markers(drawctxt);
}
- if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
- if (!kgsl_cmdbatch_events_pending(cmdbatch)) {
- _pop_cmdbatch(drawctxt);
- kgsl_cmdbatch_destroy(cmdbatch);
+ if (drawobj->flags & KGSL_DRAWOBJ_SYNC) {
+ if (!kgsl_drawobj_events_pending(drawobj)) {
+ _pop_drawobj(drawctxt);
+ kgsl_drawobj_destroy(drawobj);
return _expire_markers(drawctxt);
}
}
- return cmdbatch;
+ return drawobj;
}
static void expire_markers(struct adreno_context *drawctxt)
@@ -398,14 +400,14 @@ static void expire_markers(struct adreno_context *drawctxt)
spin_unlock(&drawctxt->lock);
}
-static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt)
+static struct kgsl_drawobj *_get_drawobj(struct adreno_context *drawctxt)
{
- struct kgsl_cmdbatch *cmdbatch;
+ struct kgsl_drawobj *drawobj;
bool pending = false;
- cmdbatch = _expire_markers(drawctxt);
+ drawobj = _expire_markers(drawctxt);
- if (cmdbatch == NULL)
+ if (drawobj == NULL)
return NULL;
/*
@@ -416,11 +418,11 @@ static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt)
* correct. If skip isn't set then we block this queue
* until the dependent timestamp expires
*/
- if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
- (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)))
+ if ((drawobj->flags & KGSL_DRAWOBJ_MARKER) &&
+ (!test_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv)))
pending = true;
- if (kgsl_cmdbatch_events_pending(cmdbatch))
+ if (kgsl_drawobj_events_pending(drawobj))
pending = true;
/*
@@ -432,32 +434,32 @@ static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt)
* If syncpoints are pending start the canary timer if
* it hasn't already been started
*/
- if (!cmdbatch->timeout_jiffies) {
- cmdbatch->timeout_jiffies =
+ if (!drawobj->timeout_jiffies) {
+ drawobj->timeout_jiffies =
jiffies + msecs_to_jiffies(5000);
- mod_timer(&cmdbatch->timer, cmdbatch->timeout_jiffies);
+ mod_timer(&drawobj->timer, drawobj->timeout_jiffies);
}
return ERR_PTR(-EAGAIN);
}
- _pop_cmdbatch(drawctxt);
- return cmdbatch;
+ _pop_drawobj(drawctxt);
+ return drawobj;
}
/**
- * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue
+ * adreno_dispatcher_get_drawobj() - Get a new command from a context queue
* @drawctxt: Pointer to the adreno draw context
*
- * Dequeue a new command batch from the context list
+ * Dequeue a new drawobj from the context list
*/
-static struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch(
+static struct kgsl_drawobj *adreno_dispatcher_get_drawobj(
struct adreno_context *drawctxt)
{
- struct kgsl_cmdbatch *cmdbatch;
+ struct kgsl_drawobj *drawobj;
spin_lock(&drawctxt->lock);
- cmdbatch = _get_cmdbatch(drawctxt);
+ drawobj = _get_drawobj(drawctxt);
spin_unlock(&drawctxt->lock);
/*
@@ -465,24 +467,24 @@ static struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch(
* on another core before queueing the buffer. We must do this
* without holding any spin lock that the timer handler might be using
*/
- if (!IS_ERR_OR_NULL(cmdbatch))
- del_timer_sync(&cmdbatch->timer);
+ if (!IS_ERR_OR_NULL(drawobj))
+ del_timer_sync(&drawobj->timer);
- return cmdbatch;
+ return drawobj;
}
/**
- * adreno_dispatcher_requeue_cmdbatch() - Put a command back on the context
+ * adreno_dispatcher_requeue_drawobj() - Put a command back on the context
* queue
* @drawctxt: Pointer to the adreno draw context
- * @cmdbatch: Pointer to the KGSL cmdbatch to requeue
+ * @drawobj: Pointer to the KGSL drawobj to requeue
*
* Failure to submit a command to the ringbuffer isn't the fault of the command
* being submitted so if a failure happens, push it back on the head of the the
* context queue to be reconsidered again unless the context got detached.
*/
-static inline int adreno_dispatcher_requeue_cmdbatch(
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch)
+static inline int adreno_dispatcher_requeue_drawobj(
+ struct adreno_context *drawctxt, struct kgsl_drawobj *drawobj)
{
unsigned int prev;
spin_lock(&drawctxt->lock);
@@ -490,27 +492,27 @@ static inline int adreno_dispatcher_requeue_cmdbatch(
if (kgsl_context_detached(&drawctxt->base) ||
kgsl_context_invalid(&drawctxt->base)) {
spin_unlock(&drawctxt->lock);
- /* get rid of this cmdbatch since the context is bad */
- kgsl_cmdbatch_destroy(cmdbatch);
+ /* get rid of this drawobj since the context is bad */
+ kgsl_drawobj_destroy(drawobj);
return -ENOENT;
}
- prev = drawctxt->cmdqueue_head == 0 ?
- (ADRENO_CONTEXT_CMDQUEUE_SIZE - 1) :
- (drawctxt->cmdqueue_head - 1);
+ prev = drawctxt->drawqueue_head == 0 ?
+ (ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1) :
+ (drawctxt->drawqueue_head - 1);
/*
* The maximum queue size always needs to be one less then the size of
- * the ringbuffer queue so there is "room" to put the cmdbatch back in
+ * the ringbuffer queue so there is "room" to put the drawobj back in
*/
- BUG_ON(prev == drawctxt->cmdqueue_tail);
+ WARN_ON(prev == drawctxt->drawqueue_tail);
- drawctxt->cmdqueue[prev] = cmdbatch;
+ drawctxt->drawqueue[prev] = drawobj;
drawctxt->queued++;
/* Reset the command queue head to reflect the newly requeued change */
- drawctxt->cmdqueue_head = prev;
+ drawctxt->drawqueue_head = prev;
spin_unlock(&drawctxt->lock);
return 0;
}
@@ -545,21 +547,21 @@ static void dispatcher_queue_context(struct adreno_device *adreno_dev,
}
/**
- * sendcmd() - Send a command batch to the GPU hardware
+ * sendcmd() - Send a drawobj to the GPU hardware
* @dispatcher: Pointer to the adreno dispatcher struct
- * @cmdbatch: Pointer to the KGSL cmdbatch being sent
+ * @drawobj: Pointer to the KGSL drawobj being sent
*
- * Send a KGSL command batch to the GPU hardware
+ * Send a KGSL drawobj to the GPU hardware
*/
static int sendcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj *drawobj)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
- struct adreno_dispatcher_cmdqueue *dispatch_q =
- ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
+ struct adreno_dispatcher_drawqueue *dispatch_q =
+ ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj);
struct adreno_submit_time time;
uint64_t secs = 0;
unsigned long nsecs = 0;
@@ -588,15 +590,15 @@ static int sendcmd(struct adreno_device *adreno_dev,
set_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
}
- if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv)) {
- set_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv);
- cmdbatch->profile_index = adreno_dev->cmdbatch_profile_index;
- adreno_dev->cmdbatch_profile_index =
- (adreno_dev->cmdbatch_profile_index + 1) %
- ADRENO_CMDBATCH_PROFILE_COUNT;
+ if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv)) {
+ set_bit(DRAWOBJ_FLAG_PROFILE, &drawobj->priv);
+ drawobj->profile_index = adreno_dev->profile_index;
+ adreno_dev->profile_index =
+ (adreno_dev->profile_index + 1) %
+ ADRENO_DRAWOBJ_PROFILE_COUNT;
}
- ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch, &time);
+ ret = adreno_ringbuffer_submitcmd(adreno_dev, drawobj, &time);
/*
* On the first command, if the submission was successful, then read the
@@ -649,17 +651,17 @@ static int sendcmd(struct adreno_device *adreno_dev,
secs = time.ktime;
nsecs = do_div(secs, 1000000000);
- trace_adreno_cmdbatch_submitted(cmdbatch, (int) dispatcher->inflight,
+ trace_adreno_cmdbatch_submitted(drawobj, (int) dispatcher->inflight,
time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb,
adreno_get_rptr(drawctxt->rb));
mutex_unlock(&device->mutex);
- cmdbatch->submit_ticks = time.ticks;
+ drawobj->submit_ticks = time.ticks;
- dispatch_q->cmd_q[dispatch_q->tail] = cmdbatch;
+ dispatch_q->cmd_q[dispatch_q->tail] = drawobj;
dispatch_q->tail = (dispatch_q->tail + 1) %
- ADRENO_DISPATCH_CMDQUEUE_SIZE;
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE;
/*
* For the first submission in any given command queue update the
@@ -670,7 +672,7 @@ static int sendcmd(struct adreno_device *adreno_dev,
if (dispatch_q->inflight == 1)
dispatch_q->expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
+ msecs_to_jiffies(adreno_drawobj_timeout);
/*
* If we believe ourselves to be current and preemption isn't a thing,
@@ -678,7 +680,7 @@ static int sendcmd(struct adreno_device *adreno_dev,
* thing and the timer will be set up in due time
*/
if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
- if (cmdqueue_is_current(dispatch_q))
+ if (drawqueue_is_current(dispatch_q))
mod_timer(&dispatcher->timer, dispatch_q->expires);
}
@@ -704,11 +706,11 @@ static int sendcmd(struct adreno_device *adreno_dev,
static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt)
{
- struct adreno_dispatcher_cmdqueue *dispatch_q =
+ struct adreno_dispatcher_drawqueue *dispatch_q =
&(drawctxt->rb->dispatch_q);
int count = 0;
int ret = 0;
- int inflight = _cmdqueue_inflight(dispatch_q);
+ int inflight = _drawqueue_inflight(dispatch_q);
unsigned int timestamp;
if (dispatch_q->inflight >= inflight) {
@@ -717,27 +719,27 @@ static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
}
/*
- * Each context can send a specific number of command batches per cycle
+ * Each context can send a specific number of drawobjs per cycle
*/
- while ((count < _context_cmdbatch_burst) &&
+ while ((count < _context_drawobj_burst) &&
(dispatch_q->inflight < inflight)) {
- struct kgsl_cmdbatch *cmdbatch;
+ struct kgsl_drawobj *drawobj;
if (adreno_gpu_fault(adreno_dev) != 0)
break;
- cmdbatch = adreno_dispatcher_get_cmdbatch(drawctxt);
+ drawobj = adreno_dispatcher_get_drawobj(drawctxt);
/*
- * adreno_context_get_cmdbatch returns -EAGAIN if the current
- * cmdbatch has pending sync points so no more to do here.
+ * adreno_context_get_drawobj returns -EAGAIN if the current
+ * drawobj has pending sync points so no more to do here.
* When the sync points are satisfied then the context will get
* reqeueued
*/
- if (IS_ERR_OR_NULL(cmdbatch)) {
- if (IS_ERR(cmdbatch))
- ret = PTR_ERR(cmdbatch);
+ if (IS_ERR_OR_NULL(drawobj)) {
+ if (IS_ERR(drawobj))
+ ret = PTR_ERR(drawobj);
break;
}
@@ -748,31 +750,31 @@ static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
* against the burst for the context
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
- kgsl_cmdbatch_destroy(cmdbatch);
+ if (drawobj->flags & KGSL_DRAWOBJ_SYNC) {
+ kgsl_drawobj_destroy(drawobj);
continue;
}
- timestamp = cmdbatch->timestamp;
+ timestamp = drawobj->timestamp;
- ret = sendcmd(adreno_dev, cmdbatch);
+ ret = sendcmd(adreno_dev, drawobj);
/*
- * On error from sendcmd() try to requeue the command batch
+ * On error from sendcmd() try to requeue the drawobj
* unless we got back -ENOENT which means that the context has
* been detached and there will be no more deliveries from here
*/
if (ret != 0) {
- /* Destroy the cmdbatch on -ENOENT */
+ /* Destroy the drawobj on -ENOENT */
if (ret == -ENOENT)
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
else {
/*
* If the requeue returns an error, return that
* instead of whatever sendcmd() sent us
*/
- int r = adreno_dispatcher_requeue_cmdbatch(
- drawctxt, cmdbatch);
+ int r = adreno_dispatcher_requeue_drawobj(
+ drawctxt, drawobj);
if (r)
ret = r;
}
@@ -934,17 +936,17 @@ static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
/**
* get_timestamp() - Return the next timestamp for the context
* @drawctxt - Pointer to an adreno draw context struct
- * @cmdbatch - Pointer to a command batch
+ * @drawobj - Pointer to a drawobj
* @timestamp - Pointer to a timestamp value possibly passed from the user
*
* Assign a timestamp based on the settings of the draw context and the command
* batch.
*/
static int get_timestamp(struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch, unsigned int *timestamp)
+ struct kgsl_drawobj *drawobj, unsigned int *timestamp)
{
/* Synchronization commands don't get a timestamp */
- if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
+ if (drawobj->flags & KGSL_DRAWOBJ_SYNC) {
*timestamp = 0;
return 0;
}
@@ -969,18 +971,18 @@ static int get_timestamp(struct adreno_context *drawctxt,
* adreno_dispactcher_queue_cmd() - Queue a new command in the context
* @adreno_dev: Pointer to the adreno device struct
* @drawctxt: Pointer to the adreno draw context
- * @cmdbatch: Pointer to the command batch being submitted
+ * @drawobj: Pointer to the drawobj being submitted
* @timestamp: Pointer to the requested timestamp
*
* Queue a command in the context - if there isn't any room in the queue, then
* block until there is
*/
int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
+ struct adreno_context *drawctxt, struct kgsl_drawobj *drawobj,
uint32_t *timestamp)
{
- struct adreno_dispatcher_cmdqueue *dispatch_q =
- ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch);
+ struct adreno_dispatcher_drawqueue *dispatch_q =
+ ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj);
int ret;
spin_lock(&drawctxt->lock);
@@ -997,36 +999,36 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
if (test_and_clear_bit(ADRENO_CONTEXT_FORCE_PREAMBLE,
&drawctxt->base.priv))
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+ set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &drawobj->priv);
/*
- * Force the premable if set from userspace in the context or cmdbatch
+ * Force the premable if set from userspace in the context or drawobj
* flags
*/
if ((drawctxt->base.flags & KGSL_CONTEXT_CTX_SWITCH) ||
- (cmdbatch->flags & KGSL_CMDBATCH_CTX_SWITCH))
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+ (drawobj->flags & KGSL_DRAWOBJ_CTX_SWITCH))
+ set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &drawobj->priv);
- /* Skip this cmdbatch commands if IFH_NOP is enabled */
+ /* Skip this drawobj commands if IFH_NOP is enabled */
if (drawctxt->base.flags & KGSL_CONTEXT_IFH_NOP)
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
+ set_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv);
/*
* If we are waiting for the end of frame and it hasn't appeared yet,
- * then mark the command batch as skipped. It will still progress
+ * then mark the drawobj as skipped. It will still progress
* through the pipeline but it won't actually send any commands
*/
if (test_bit(ADRENO_CONTEXT_SKIP_EOF, &drawctxt->base.priv)) {
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
+ set_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv);
/*
- * If this command batch represents the EOF then clear the way
+ * If this drawobj represents the EOF then clear the way
* for the dispatcher to continue submitting
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_END_OF_FRAME) {
+ if (drawobj->flags & KGSL_DRAWOBJ_END_OF_FRAME) {
clear_bit(ADRENO_CONTEXT_SKIP_EOF,
&drawctxt->base.priv);
@@ -1041,7 +1043,7 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
/* Wait for room in the context queue */
- while (drawctxt->queued >= _context_cmdqueue_size) {
+ while (drawctxt->queued >= _context_drawqueue_size) {
trace_adreno_drawctxt_sleep(drawctxt);
spin_unlock(&drawctxt->lock);
@@ -1071,27 +1073,27 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
return -ENOENT;
}
- ret = get_timestamp(drawctxt, cmdbatch, timestamp);
+ ret = get_timestamp(drawctxt, drawobj, timestamp);
if (ret) {
spin_unlock(&drawctxt->lock);
return ret;
}
- cmdbatch->timestamp = *timestamp;
+ drawobj->timestamp = *timestamp;
- if (cmdbatch->flags & KGSL_CMDBATCH_MARKER) {
+ if (drawobj->flags & KGSL_DRAWOBJ_MARKER) {
/*
* See if we can fastpath this thing - if nothing is queued
* and nothing is inflight retire without bothering the GPU
*/
- if (!drawctxt->queued && kgsl_check_timestamp(cmdbatch->device,
- cmdbatch->context, drawctxt->queued_timestamp)) {
- trace_adreno_cmdbatch_queued(cmdbatch,
+ if (!drawctxt->queued && kgsl_check_timestamp(drawobj->device,
+ drawobj->context, drawctxt->queued_timestamp)) {
+ trace_adreno_cmdbatch_queued(drawobj,
drawctxt->queued);
- _retire_marker(cmdbatch);
+ _retire_marker(drawobj);
spin_unlock(&drawctxt->lock);
return 0;
}
@@ -1102,27 +1104,27 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
* comes along and forces the marker to execute)
*/
- cmdbatch->marker_timestamp = drawctxt->queued_timestamp;
+ drawobj->marker_timestamp = drawctxt->queued_timestamp;
}
/* SYNC commands have timestamp 0 and will get optimized out anyway */
- if (!(cmdbatch->flags & KGSL_CONTEXT_SYNC))
+ if (!(drawobj->flags & KGSL_CONTEXT_SYNC))
drawctxt->queued_timestamp = *timestamp;
/*
- * Set the fault tolerance policy for the command batch - assuming the
+ * Set the fault tolerance policy for the drawobj - assuming the
* context hasn't disabled FT use the current device policy
*/
if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
- set_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy);
+ set_bit(KGSL_FT_DISABLE, &drawobj->fault_policy);
else
- cmdbatch->fault_policy = adreno_dev->ft_policy;
+ drawobj->fault_policy = adreno_dev->ft_policy;
/* Put the command into the queue */
- drawctxt->cmdqueue[drawctxt->cmdqueue_tail] = cmdbatch;
- drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) %
- ADRENO_CONTEXT_CMDQUEUE_SIZE;
+ drawctxt->drawqueue[drawctxt->drawqueue_tail] = drawobj;
+ drawctxt->drawqueue_tail = (drawctxt->drawqueue_tail + 1) %
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE;
/*
* If this is a real command then we need to force any markers queued
@@ -1130,20 +1132,20 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
* the commands get NOPed.
*/
- if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER)) {
- unsigned int i = drawctxt->cmdqueue_head;
+ if (!(drawobj->flags & KGSL_DRAWOBJ_MARKER)) {
+ unsigned int i = drawctxt->drawqueue_head;
- while (i != drawctxt->cmdqueue_tail) {
- if (drawctxt->cmdqueue[i]->flags & KGSL_CMDBATCH_MARKER)
- set_bit(CMDBATCH_FLAG_SKIP,
- &drawctxt->cmdqueue[i]->priv);
+ while (i != drawctxt->drawqueue_tail) {
+ if (drawctxt->drawqueue[i]->flags & KGSL_DRAWOBJ_MARKER)
+ set_bit(DRAWOBJ_FLAG_SKIP,
+ &drawctxt->drawqueue[i]->priv);
- i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE);
+ i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE);
}
}
drawctxt->queued++;
- trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued);
+ trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued);
_track_context(adreno_dev, dispatch_q, drawctxt);
@@ -1163,7 +1165,7 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
* queue will try to schedule new commands anyway.
*/
- if (dispatch_q->inflight < _context_cmdbatch_burst)
+ if (dispatch_q->inflight < _context_drawobj_burst)
adreno_dispatcher_issuecmds(adreno_dev);
return 0;
@@ -1208,15 +1210,15 @@ static void mark_guilty_context(struct kgsl_device *device, unsigned int id)
}
/*
- * If an IB inside of the command batch has a gpuaddr that matches the base
+ * If an IB inside of the drawobj has a gpuaddr that matches the base
* passed in then zero the size which effectively skips it when it is submitted
* in the ringbuffer.
*/
-static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base)
+static void drawobj_skip_ib(struct kgsl_drawobj *drawobj, uint64_t base)
{
struct kgsl_memobj_node *ib;
- list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
+ list_for_each_entry(ib, &drawobj->cmdlist, node) {
if (ib->gpuaddr == base) {
ib->priv |= MEMOBJ_SKIP;
if (base)
@@ -1225,10 +1227,10 @@ static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base)
}
}
-static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmdbatch **replay, int count)
+static void drawobj_skip_cmd(struct kgsl_drawobj *drawobj,
+ struct kgsl_drawobj **replay, int count)
{
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
int i;
/*
@@ -1243,9 +1245,9 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch,
* b) force preamble for next commandbatch
*/
for (i = 1; i < count; i++) {
- if (replay[i]->context->id == cmdbatch->context->id) {
+ if (replay[i]->context->id == drawobj->context->id) {
replay[i]->fault_policy = replay[0]->fault_policy;
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
set_bit(KGSL_FT_SKIPCMD, &replay[i]->fault_recovery);
break;
}
@@ -1262,41 +1264,41 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch,
drawctxt->fault_policy = replay[0]->fault_policy;
}
- /* set the flags to skip this cmdbatch */
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
- cmdbatch->fault_recovery = 0;
+ /* set the flags to skip this drawobj */
+ set_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv);
+ drawobj->fault_recovery = 0;
}
-static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmdbatch **replay, int count)
+static void drawobj_skip_frame(struct kgsl_drawobj *drawobj,
+ struct kgsl_drawobj **replay, int count)
{
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
int skip = 1;
int i;
for (i = 0; i < count; i++) {
/*
- * Only operate on command batches that belong to the
+ * Only operate on drawobjs that belong to the
* faulting context
*/
- if (replay[i]->context->id != cmdbatch->context->id)
+ if (replay[i]->context->id != drawobj->context->id)
continue;
/*
- * Skip all the command batches in this context until
+ * Skip all the drawobjs in this context until
* the EOF flag is seen. If the EOF flag is seen then
* force the preamble for the next command.
*/
if (skip) {
- set_bit(CMDBATCH_FLAG_SKIP, &replay[i]->priv);
+ set_bit(DRAWOBJ_FLAG_SKIP, &replay[i]->priv);
- if (replay[i]->flags & KGSL_CMDBATCH_END_OF_FRAME)
+ if (replay[i]->flags & KGSL_DRAWOBJ_END_OF_FRAME)
skip = 0;
} else {
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
return;
}
}
@@ -1318,13 +1320,13 @@ static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch,
set_bit(ADRENO_CONTEXT_FORCE_PREAMBLE, &drawctxt->base.priv);
}
-static void remove_invalidated_cmdbatches(struct kgsl_device *device,
- struct kgsl_cmdbatch **replay, int count)
+static void remove_invalidated_drawobjs(struct kgsl_device *device,
+ struct kgsl_drawobj **replay, int count)
{
int i;
for (i = 0; i < count; i++) {
- struct kgsl_cmdbatch *cmd = replay[i];
+ struct kgsl_drawobj *cmd = replay[i];
if (cmd == NULL)
continue;
@@ -1337,7 +1339,7 @@ static void remove_invalidated_cmdbatches(struct kgsl_device *device,
&cmd->context->events, cmd->timestamp);
mutex_unlock(&device->mutex);
- kgsl_cmdbatch_destroy(cmd);
+ kgsl_drawobj_destroy(cmd);
}
}
}
@@ -1361,7 +1363,7 @@ static inline const char *_kgsl_context_comm(struct kgsl_context *context)
static void adreno_fault_header(struct kgsl_device *device,
- struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch)
+ struct adreno_ringbuffer *rb, struct kgsl_drawobj *drawobj)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned int status, rptr, wptr, ib1sz, ib2sz;
@@ -1377,22 +1379,22 @@ static void adreno_fault_header(struct kgsl_device *device,
ADRENO_REG_CP_IB2_BASE_HI, &ib2base);
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &ib2sz);
- if (cmdbatch != NULL) {
+ if (drawobj != NULL) {
struct adreno_context *drawctxt =
- ADRENO_CONTEXT(cmdbatch->context);
+ ADRENO_CONTEXT(drawobj->context);
- trace_adreno_gpu_fault(cmdbatch->context->id,
- cmdbatch->timestamp,
+ trace_adreno_gpu_fault(drawobj->context->id,
+ drawobj->timestamp,
status, rptr, wptr, ib1base, ib1sz,
ib2base, ib2sz, drawctxt->rb->id);
- pr_fault(device, cmdbatch,
+ pr_fault(device, drawobj,
"gpu fault ctx %d ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
- cmdbatch->context->id, cmdbatch->timestamp, status,
+ drawobj->context->id, drawobj->timestamp, status,
rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
if (rb != NULL)
- pr_fault(device, cmdbatch,
+ pr_fault(device, drawobj,
"gpu fault rb %d rb sw r/w %4.4x/%4.4x\n",
rb->id, rptr, rb->wptr);
} else {
@@ -1411,33 +1413,33 @@ static void adreno_fault_header(struct kgsl_device *device,
void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj *drawobj)
{
if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) &&
kgsl_context_detached(&drawctxt->base)) {
- pr_context(KGSL_DEVICE(adreno_dev), cmdbatch->context,
- "gpu detached context %d\n", cmdbatch->context->id);
+ pr_context(KGSL_DEVICE(adreno_dev), drawobj->context,
+ "gpu detached context %d\n", drawobj->context->id);
clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv);
}
}
/**
- * process_cmdbatch_fault() - Process a cmdbatch for fault policies
- * @device: Device on which the cmdbatch caused a fault
- * @replay: List of cmdbatches that are to be replayed on the device. The
- * faulting cmdbatch is the first command in the replay list and the remaining
- * cmdbatches in the list are commands that were submitted to the same queue
+ * process_drawobj_fault() - Process a drawobj for fault policies
+ * @device: Device on which the drawobj caused a fault
+ * @replay: List of drawobj's that are to be replayed on the device. The
+ * faulting drawobj is the first command in the replay list and the remaining
+ * drawobj's in the list are commands that were submitted to the same queue
* as the faulting one.
- * @count: Number of cmdbatches in replay
+ * @count: Number of drawobj's in replay
* @base: The IB1 base at the time of fault
* @fault: The fault type
*/
-static void process_cmdbatch_fault(struct kgsl_device *device,
- struct kgsl_cmdbatch **replay, int count,
+static void process_drawobj_fault(struct kgsl_device *device,
+ struct kgsl_drawobj **replay, int count,
unsigned int base,
int fault)
{
- struct kgsl_cmdbatch *cmdbatch = replay[0];
+ struct kgsl_drawobj *drawobj = replay[0];
int i;
char *state = "failed";
@@ -1451,18 +1453,18 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
* where 1st and 4th gpu hang are more than 3 seconds apart we
* won't disable GFT and invalidate the context.
*/
- if (test_bit(KGSL_FT_THROTTLE, &cmdbatch->fault_policy)) {
- if (time_after(jiffies, (cmdbatch->context->fault_time
+ if (test_bit(KGSL_FT_THROTTLE, &drawobj->fault_policy)) {
+ if (time_after(jiffies, (drawobj->context->fault_time
+ msecs_to_jiffies(_fault_throttle_time)))) {
- cmdbatch->context->fault_time = jiffies;
- cmdbatch->context->fault_count = 1;
+ drawobj->context->fault_time = jiffies;
+ drawobj->context->fault_count = 1;
} else {
- cmdbatch->context->fault_count++;
- if (cmdbatch->context->fault_count >
+ drawobj->context->fault_count++;
+ if (drawobj->context->fault_count >
_fault_throttle_burst) {
set_bit(KGSL_FT_DISABLE,
- &cmdbatch->fault_policy);
- pr_context(device, cmdbatch->context,
+ &drawobj->fault_policy);
+ pr_context(device, drawobj->context,
"gpu fault threshold exceeded %d faults in %d msecs\n",
_fault_throttle_burst,
_fault_throttle_time);
@@ -1471,45 +1473,45 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
}
/*
- * If FT is disabled for this cmdbatch invalidate immediately
+ * If FT is disabled for this drawobj invalidate immediately
*/
- if (test_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy) ||
- test_bit(KGSL_FT_TEMP_DISABLE, &cmdbatch->fault_policy)) {
+ if (test_bit(KGSL_FT_DISABLE, &drawobj->fault_policy) ||
+ test_bit(KGSL_FT_TEMP_DISABLE, &drawobj->fault_policy)) {
state = "skipped";
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+ bitmap_zero(&drawobj->fault_policy, BITS_PER_LONG);
}
/* If the context is detached do not run FT on context */
- if (kgsl_context_detached(cmdbatch->context)) {
+ if (kgsl_context_detached(drawobj->context)) {
state = "detached";
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+ bitmap_zero(&drawobj->fault_policy, BITS_PER_LONG);
}
/*
- * Set a flag so we don't print another PM dump if the cmdbatch fails
+ * Set a flag so we don't print another PM dump if the drawobj fails
* again on replay
*/
- set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
+ set_bit(KGSL_FT_SKIP_PMDUMP, &drawobj->fault_policy);
/*
* A hardware fault generally means something was deterministically
- * wrong with the command batch - no point in trying to replay it
+ * wrong with the drawobj - no point in trying to replay it
* Clear the replay bit and move on to the next policy level
*/
if (fault & ADRENO_HARD_FAULT)
- clear_bit(KGSL_FT_REPLAY, &(cmdbatch->fault_policy));
+ clear_bit(KGSL_FT_REPLAY, &(drawobj->fault_policy));
/*
* A timeout fault means the IB timed out - clear the policy and
* invalidate - this will clear the FT_SKIP_PMDUMP bit but that is okay
- * because we won't see this cmdbatch again
+ * because we won't see this drawobj again
*/
if (fault & ADRENO_TIMEOUT_FAULT)
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+ bitmap_zero(&drawobj->fault_policy, BITS_PER_LONG);
/*
* If the context had a GPU page fault then it is likely it would fault
@@ -1517,83 +1519,83 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
*/
if (test_bit(KGSL_CONTEXT_PRIV_PAGEFAULT,
- &cmdbatch->context->priv)) {
+ &drawobj->context->priv)) {
/* we'll need to resume the mmu later... */
- clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy);
+ clear_bit(KGSL_FT_REPLAY, &drawobj->fault_policy);
clear_bit(KGSL_CONTEXT_PRIV_PAGEFAULT,
- &cmdbatch->context->priv);
+ &drawobj->context->priv);
}
/*
- * Execute the fault tolerance policy. Each command batch stores the
+ * Execute the fault tolerance policy. Each drawobj stores the
* current fault policy that was set when it was queued.
* As the options are tried in descending priority
* (REPLAY -> SKIPIBS -> SKIPFRAME -> NOTHING) the bits are cleared
- * from the cmdbatch policy so the next thing can be tried if the
+ * from the drawobj policy so the next thing can be tried if the
* change comes around again
*/
- /* Replay the hanging command batch again */
- if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_REPLAY));
- set_bit(KGSL_FT_REPLAY, &cmdbatch->fault_recovery);
+ /* Replay the hanging drawobj again */
+ if (test_and_clear_bit(KGSL_FT_REPLAY, &drawobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(drawobj, BIT(KGSL_FT_REPLAY));
+ set_bit(KGSL_FT_REPLAY, &drawobj->fault_recovery);
return;
}
/*
* Skip the last IB1 that was played but replay everything else.
- * Note that the last IB1 might not be in the "hung" command batch
+ * Note that the last IB1 might not be in the "hung" drawobj
* because the CP may have caused a page-fault while it was prefetching
* the next IB1/IB2. walk all outstanding commands and zap the
* supposedly bad IB1 where ever it lurks.
*/
- if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPIB));
- set_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_recovery);
+ if (test_and_clear_bit(KGSL_FT_SKIPIB, &drawobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(drawobj, BIT(KGSL_FT_SKIPIB));
+ set_bit(KGSL_FT_SKIPIB, &drawobj->fault_recovery);
for (i = 0; i < count; i++) {
if (replay[i] != NULL &&
- replay[i]->context->id == cmdbatch->context->id)
- cmdbatch_skip_ib(replay[i], base);
+ replay[i]->context->id == drawobj->context->id)
+ drawobj_skip_ib(replay[i], base);
}
return;
}
- /* Skip the faulted command batch submission */
- if (test_and_clear_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPCMD));
+ /* Skip the faulted drawobj submission */
+ if (test_and_clear_bit(KGSL_FT_SKIPCMD, &drawobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(drawobj, BIT(KGSL_FT_SKIPCMD));
- /* Skip faulting command batch */
- cmdbatch_skip_cmd(cmdbatch, replay, count);
+ /* Skip faulting drawobj */
+ drawobj_skip_cmd(drawobj, replay, count);
return;
}
- if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch,
+ if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &drawobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(drawobj,
BIT(KGSL_FT_SKIPFRAME));
- set_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_recovery);
+ set_bit(KGSL_FT_SKIPFRAME, &drawobj->fault_recovery);
/*
- * Skip all the pending command batches for this context until
+ * Skip all the pending drawobjs for this context until
* the EOF frame is seen
*/
- cmdbatch_skip_frame(cmdbatch, replay, count);
+ drawobj_skip_frame(drawobj, replay, count);
return;
}
/* If we get here then all the policies failed */
- pr_context(device, cmdbatch->context, "gpu %s ctx %d ts %d\n",
- state, cmdbatch->context->id, cmdbatch->timestamp);
+ pr_context(device, drawobj->context, "gpu %s ctx %d ts %d\n",
+ state, drawobj->context->id, drawobj->timestamp);
/* Mark the context as failed */
- mark_guilty_context(device, cmdbatch->context->id);
+ mark_guilty_context(device, drawobj->context->id);
/* Invalidate the context */
- adreno_drawctxt_invalidate(device, cmdbatch->context);
+ adreno_drawctxt_invalidate(device, drawobj->context);
}
/**
@@ -1605,12 +1607,12 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
* @base: The IB1 base during the fault
*/
static void recover_dispatch_q(struct kgsl_device *device,
- struct adreno_dispatcher_cmdqueue *dispatch_q,
+ struct adreno_dispatcher_drawqueue *dispatch_q,
int fault,
unsigned int base)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct kgsl_cmdbatch **replay = NULL;
+ struct kgsl_drawobj **replay = NULL;
unsigned int ptr;
int first = 0;
int count = 0;
@@ -1629,9 +1631,10 @@ static void recover_dispatch_q(struct kgsl_device *device,
mark_guilty_context(device, context->id);
adreno_drawctxt_invalidate(device, context);
- kgsl_cmdbatch_destroy(dispatch_q->cmd_q[ptr]);
+ kgsl_drawobj_destroy(dispatch_q->cmd_q[ptr]);
- ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ ptr = DRAWQUEUE_NEXT(ptr,
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE);
}
/*
@@ -1643,22 +1646,22 @@ static void recover_dispatch_q(struct kgsl_device *device,
goto replay;
}
- /* Copy the inflight command batches into the temporary storage */
+ /* Copy the inflight drawobjs into the temporary storage */
ptr = dispatch_q->head;
while (ptr != dispatch_q->tail) {
replay[count++] = dispatch_q->cmd_q[ptr];
- ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ ptr = DRAWQUEUE_NEXT(ptr, ADRENO_DISPATCH_DRAWQUEUE_SIZE);
}
if (fault && count)
- process_cmdbatch_fault(device, replay,
+ process_drawobj_fault(device, replay,
count, base, fault);
replay:
dispatch_q->inflight = 0;
dispatch_q->head = dispatch_q->tail = 0;
- /* Remove any pending command batches that have been invalidated */
- remove_invalidated_cmdbatches(device, replay, count);
+ /* Remove any pending drawobjs that have been invalidated */
+ remove_invalidated_drawobjs(device, replay, count);
/* Replay the pending command buffers */
for (i = 0; i < count; i++) {
@@ -1674,16 +1677,16 @@ replay:
*/
if (first == 0) {
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
first = 1;
}
/*
- * Force each command batch to wait for idle - this avoids weird
+ * Force each drawobj to wait for idle - this avoids weird
* CP parse issues
*/
- set_bit(CMDBATCH_FLAG_WFI, &replay[i]->priv);
+ set_bit(DRAWOBJ_FLAG_WFI, &replay[i]->priv);
ret = sendcmd(adreno_dev, replay[i]);
@@ -1701,7 +1704,7 @@ replay:
mark_guilty_context(device, replay[i]->context->id);
adreno_drawctxt_invalidate(device, replay[i]->context);
- remove_invalidated_cmdbatches(device, &replay[i],
+ remove_invalidated_drawobjs(device, &replay[i],
count - i);
}
}
@@ -1713,36 +1716,36 @@ replay:
}
static void do_header_and_snapshot(struct kgsl_device *device,
- struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch)
+ struct adreno_ringbuffer *rb, struct kgsl_drawobj *drawobj)
{
- /* Always dump the snapshot on a non-cmdbatch failure */
- if (cmdbatch == NULL) {
+ /* Always dump the snapshot on a non-drawobj failure */
+ if (drawobj == NULL) {
adreno_fault_header(device, rb, NULL);
kgsl_device_snapshot(device, NULL);
return;
}
/* Skip everything if the PMDUMP flag is set */
- if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy))
+ if (test_bit(KGSL_FT_SKIP_PMDUMP, &drawobj->fault_policy))
return;
/* Print the fault header */
- adreno_fault_header(device, rb, cmdbatch);
+ adreno_fault_header(device, rb, drawobj);
- if (!(cmdbatch->context->flags & KGSL_CONTEXT_NO_SNAPSHOT))
- kgsl_device_snapshot(device, cmdbatch->context);
+ if (!(drawobj->context->flags & KGSL_CONTEXT_NO_SNAPSHOT))
+ kgsl_device_snapshot(device, drawobj->context);
}
static int dispatcher_do_fault(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct adreno_dispatcher_cmdqueue *dispatch_q = NULL, *dispatch_q_temp;
+ struct adreno_dispatcher_drawqueue *dispatch_q = NULL, *dispatch_q_temp;
struct adreno_ringbuffer *rb;
struct adreno_ringbuffer *hung_rb = NULL;
unsigned int reg;
uint64_t base;
- struct kgsl_cmdbatch *cmdbatch = NULL;
+ struct kgsl_drawobj *drawobj = NULL;
int ret, i;
int fault;
int halt;
@@ -1792,10 +1795,10 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg);
}
/*
- * retire cmdbatches from all the dispatch_q's before starting recovery
+ * retire drawobj's from all the dispatch_q's before starting recovery
*/
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- adreno_dispatch_retire_cmdqueue(adreno_dev,
+ adreno_dispatch_retire_drawqueue(adreno_dev,
&(rb->dispatch_q));
/* Select the active dispatch_q */
if (base == rb->buffer_desc.gpuaddr) {
@@ -1814,15 +1817,15 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
}
}
- if (dispatch_q && !adreno_cmdqueue_is_empty(dispatch_q)) {
- cmdbatch = dispatch_q->cmd_q[dispatch_q->head];
- trace_adreno_cmdbatch_fault(cmdbatch, fault);
+ if (dispatch_q && !adreno_drawqueue_is_empty(dispatch_q)) {
+ drawobj = dispatch_q->cmd_q[dispatch_q->head];
+ trace_adreno_cmdbatch_fault(drawobj, fault);
}
adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
ADRENO_REG_CP_IB1_BASE_HI, &base);
- do_header_and_snapshot(device, hung_rb, cmdbatch);
+ do_header_and_snapshot(device, hung_rb, drawobj);
/* Terminate the stalled transaction and resume the IOMMU */
if (fault & ADRENO_IOMMU_PAGE_FAULT)
@@ -1876,22 +1879,22 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
return 1;
}
-static inline int cmdbatch_consumed(struct kgsl_cmdbatch *cmdbatch,
+static inline int drawobj_consumed(struct kgsl_drawobj *drawobj,
unsigned int consumed, unsigned int retired)
{
- return ((timestamp_cmp(cmdbatch->timestamp, consumed) >= 0) &&
- (timestamp_cmp(retired, cmdbatch->timestamp) < 0));
+ return ((timestamp_cmp(drawobj->timestamp, consumed) >= 0) &&
+ (timestamp_cmp(retired, drawobj->timestamp) < 0));
}
static void _print_recovery(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj *drawobj)
{
static struct {
unsigned int mask;
const char *str;
} flags[] = { ADRENO_FT_TYPES };
- int i, nr = find_first_bit(&cmdbatch->fault_recovery, BITS_PER_LONG);
+ int i, nr = find_first_bit(&drawobj->fault_recovery, BITS_PER_LONG);
char *result = "unknown";
for (i = 0; i < ARRAY_SIZE(flags); i++) {
@@ -1901,40 +1904,40 @@ static void _print_recovery(struct kgsl_device *device,
}
}
- pr_context(device, cmdbatch->context,
+ pr_context(device, drawobj->context,
"gpu %s ctx %d ts %d policy %lX\n",
- result, cmdbatch->context->id, cmdbatch->timestamp,
- cmdbatch->fault_recovery);
+ result, drawobj->context->id, drawobj->timestamp,
+ drawobj->fault_recovery);
}
-static void cmdbatch_profile_ticks(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch, uint64_t *start, uint64_t *retire)
+static void drawobj_profile_ticks(struct adreno_device *adreno_dev,
+ struct kgsl_drawobj *drawobj, uint64_t *start, uint64_t *retire)
{
- void *ptr = adreno_dev->cmdbatch_profile_buffer.hostptr;
- struct adreno_cmdbatch_profile_entry *entry;
+ void *ptr = adreno_dev->profile_buffer.hostptr;
+ struct adreno_drawobj_profile_entry *entry;
- entry = (struct adreno_cmdbatch_profile_entry *)
- (ptr + (cmdbatch->profile_index * sizeof(*entry)));
+ entry = (struct adreno_drawobj_profile_entry *)
+ (ptr + (drawobj->profile_index * sizeof(*entry)));
rmb();
*start = entry->started;
*retire = entry->retired;
}
-static void retire_cmdbatch(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch)
+static void retire_drawobj(struct adreno_device *adreno_dev,
+ struct kgsl_drawobj *drawobj)
{
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
uint64_t start = 0, end = 0;
- if (cmdbatch->fault_recovery != 0) {
- set_bit(ADRENO_CONTEXT_FAULT, &cmdbatch->context->priv);
- _print_recovery(KGSL_DEVICE(adreno_dev), cmdbatch);
+ if (drawobj->fault_recovery != 0) {
+ set_bit(ADRENO_CONTEXT_FAULT, &drawobj->context->priv);
+ _print_recovery(KGSL_DEVICE(adreno_dev), drawobj);
}
- if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv))
- cmdbatch_profile_ticks(adreno_dev, cmdbatch, &start, &end);
+ if (test_bit(DRAWOBJ_FLAG_PROFILE, &drawobj->priv))
+ drawobj_profile_ticks(adreno_dev, drawobj, &start, &end);
/*
* For A3xx we still get the rptr from the CP_RB_RPTR instead of
@@ -1942,48 +1945,48 @@ static void retire_cmdbatch(struct adreno_device *adreno_dev,
* So avoid reading GPU register directly for A3xx.
*/
if (adreno_is_a3xx(adreno_dev))
- trace_adreno_cmdbatch_retired(cmdbatch,
+ trace_adreno_cmdbatch_retired(drawobj,
(int) dispatcher->inflight, start, end,
- ADRENO_CMDBATCH_RB(cmdbatch), 0);
+ ADRENO_DRAWOBJ_RB(drawobj), 0);
else
- trace_adreno_cmdbatch_retired(cmdbatch,
+ trace_adreno_cmdbatch_retired(drawobj,
(int) dispatcher->inflight, start, end,
- ADRENO_CMDBATCH_RB(cmdbatch),
+ ADRENO_DRAWOBJ_RB(drawobj),
adreno_get_rptr(drawctxt->rb));
drawctxt->submit_retire_ticks[drawctxt->ticks_index] =
- end - cmdbatch->submit_ticks;
+ end - drawobj->submit_ticks;
drawctxt->ticks_index = (drawctxt->ticks_index + 1) %
SUBMIT_RETIRE_TICKS_SIZE;
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
}
-static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
int count = 0;
- while (!adreno_cmdqueue_is_empty(cmdqueue)) {
- struct kgsl_cmdbatch *cmdbatch =
- cmdqueue->cmd_q[cmdqueue->head];
+ while (!adreno_drawqueue_is_empty(drawqueue)) {
+ struct kgsl_drawobj *drawobj =
+ drawqueue->cmd_q[drawqueue->head];
- if (!kgsl_check_timestamp(device, cmdbatch->context,
- cmdbatch->timestamp))
+ if (!kgsl_check_timestamp(device, drawobj->context,
+ drawobj->timestamp))
break;
- retire_cmdbatch(adreno_dev, cmdbatch);
+ retire_drawobj(adreno_dev, drawobj);
dispatcher->inflight--;
- cmdqueue->inflight--;
+ drawqueue->inflight--;
- cmdqueue->cmd_q[cmdqueue->head] = NULL;
+ drawqueue->cmd_q[drawqueue->head] = NULL;
- cmdqueue->head = CMDQUEUE_NEXT(cmdqueue->head,
- ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ drawqueue->head = DRAWQUEUE_NEXT(drawqueue->head,
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE);
count++;
}
@@ -1992,13 +1995,13 @@ static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
}
static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_cmdbatch *cmdbatch = cmdqueue->cmd_q[cmdqueue->head];
+ struct kgsl_drawobj *drawobj = drawqueue->cmd_q[drawqueue->head];
/* Don't timeout if the timer hasn't expired yet (duh) */
- if (time_is_after_jiffies(cmdqueue->expires))
+ if (time_is_after_jiffies(drawqueue->expires))
return;
/* Don't timeout if the IB timeout is disabled globally */
@@ -2006,30 +2009,30 @@ static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev,
return;
/* Don't time out if the context has disabled it */
- if (cmdbatch->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
+ if (drawobj->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
return;
- pr_context(device, cmdbatch->context, "gpu timeout ctx %d ts %d\n",
- cmdbatch->context->id, cmdbatch->timestamp);
+ pr_context(device, drawobj->context, "gpu timeout ctx %d ts %d\n",
+ drawobj->context->id, drawobj->timestamp);
adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);
}
-static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static int adreno_dispatch_process_drawqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
- int count = adreno_dispatch_retire_cmdqueue(adreno_dev, cmdqueue);
+ int count = adreno_dispatch_retire_drawqueue(adreno_dev, drawqueue);
/* Nothing to do if there are no pending commands */
- if (adreno_cmdqueue_is_empty(cmdqueue))
+ if (adreno_drawqueue_is_empty(drawqueue))
return count;
- /* Don't update the cmdqueue timeout if we are about to preempt out */
+ /* Don't update the drawqueue timeout if we are about to preempt out */
if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE))
return count;
- /* Don't update the cmdqueue timeout if it isn't active */
- if (!cmdqueue_is_current(cmdqueue))
+ /* Don't update the drawqueue timeout if it isn't active */
+ if (!drawqueue_is_current(drawqueue))
return count;
/*
@@ -2038,17 +2041,17 @@ static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
*/
if (count) {
- cmdqueue->expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
+ drawqueue->expires = jiffies +
+ msecs_to_jiffies(adreno_drawobj_timeout);
return count;
}
/*
* If we get here then 1) the ringbuffer is current and 2) we haven't
* retired anything. Check to see if the timeout if valid for the
- * current cmdbatch and fault if it has expired
+ * current drawobj and fault if it has expired
*/
- _adreno_dispatch_check_timeout(adreno_dev, cmdqueue);
+ _adreno_dispatch_check_timeout(adreno_dev, drawqueue);
return 0;
}
@@ -2067,11 +2070,11 @@ static void _dispatcher_update_timers(struct adreno_device *adreno_dev)
/* Check to see if we need to update the command timer */
if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
- struct adreno_dispatcher_cmdqueue *cmdqueue =
- CMDQUEUE(adreno_dev->cur_rb);
+ struct adreno_dispatcher_drawqueue *drawqueue =
+ DRAWQUEUE(adreno_dev->cur_rb);
- if (!adreno_cmdqueue_is_empty(cmdqueue))
- mod_timer(&dispatcher->timer, cmdqueue->expires);
+ if (!adreno_drawqueue_is_empty(drawqueue))
+ mod_timer(&dispatcher->timer, drawqueue->expires);
}
}
@@ -2111,14 +2114,14 @@ static void adreno_dispatcher_work(struct work_struct *work)
/*
* As long as there are inflight commands, process retired comamnds from
- * all cmdqueues
+ * all drawqueues
*/
for (i = 0; i < adreno_dev->num_ringbuffers; i++) {
- struct adreno_dispatcher_cmdqueue *cmdqueue =
- CMDQUEUE(&adreno_dev->ringbuffers[i]);
+ struct adreno_dispatcher_drawqueue *drawqueue =
+ DRAWQUEUE(&adreno_dev->ringbuffers[i]);
- count += adreno_dispatch_process_cmdqueue(adreno_dev,
- cmdqueue);
+ count += adreno_dispatch_process_drawqueue(adreno_dev,
+ drawqueue);
if (dispatcher->inflight == 0)
break;
}
@@ -2178,7 +2181,7 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device,
}
/*
- * This is called on a regular basis while command batches are inflight. Fault
+ * This is called on a regular basis while drawobjs are inflight. Fault
* detection registers are read and compared to the existing values - if they
* changed then the GPU is still running. If they are the same between
* subsequent calls then the GPU may have faulted
@@ -2230,7 +2233,7 @@ static void adreno_dispatcher_timer(unsigned long data)
*/
void adreno_dispatcher_start(struct kgsl_device *device)
{
- complete_all(&device->cmdbatch_gate);
+ complete_all(&device->halt_gate);
/* Schedule the work loop to get things going */
adreno_dispatcher_schedule(device);
@@ -2267,13 +2270,13 @@ void adreno_dispatcher_close(struct adreno_device *adreno_dev)
del_timer_sync(&dispatcher->fault_timer);
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- struct adreno_dispatcher_cmdqueue *dispatch_q =
+ struct adreno_dispatcher_drawqueue *dispatch_q =
&(rb->dispatch_q);
- while (!adreno_cmdqueue_is_empty(dispatch_q)) {
- kgsl_cmdbatch_destroy(
+ while (!adreno_drawqueue_is_empty(dispatch_q)) {
+ kgsl_drawobj_destroy(
dispatch_q->cmd_q[dispatch_q->head]);
dispatch_q->head = (dispatch_q->head + 1)
- % ADRENO_DISPATCH_CMDQUEUE_SIZE;
+ % ADRENO_DISPATCH_DRAWQUEUE_SIZE;
}
}
@@ -2332,23 +2335,23 @@ static ssize_t _show_uint(struct adreno_dispatcher *dispatcher,
*((unsigned int *) attr->value));
}
-static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_CMDQUEUE_SIZE,
+static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_DRAWQUEUE_SIZE,
_dispatcher_q_inflight_hi);
static DISPATCHER_UINT_ATTR(inflight_low_latency, 0644,
- ADRENO_DISPATCH_CMDQUEUE_SIZE, _dispatcher_q_inflight_lo);
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE, _dispatcher_q_inflight_lo);
/*
* Our code that "puts back" a command from the context is much cleaner
* if we are sure that there will always be enough room in the
* ringbuffer so restrict the maximum size of the context queue to
- * ADRENO_CONTEXT_CMDQUEUE_SIZE - 1
+ * ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1
*/
-static DISPATCHER_UINT_ATTR(context_cmdqueue_size, 0644,
- ADRENO_CONTEXT_CMDQUEUE_SIZE - 1, _context_cmdqueue_size);
+static DISPATCHER_UINT_ATTR(context_drawqueue_size, 0644,
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1, _context_drawqueue_size);
static DISPATCHER_UINT_ATTR(context_burst_count, 0644, 0,
- _context_cmdbatch_burst);
-static DISPATCHER_UINT_ATTR(cmdbatch_timeout, 0644, 0,
- adreno_cmdbatch_timeout);
+ _context_drawobj_burst);
+static DISPATCHER_UINT_ATTR(drawobj_timeout, 0644, 0,
+ adreno_drawobj_timeout);
static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait);
static DISPATCHER_UINT_ATTR(fault_detect_interval, 0644, 0,
_fault_timer_interval);
@@ -2366,9 +2369,9 @@ static DISPATCHER_UINT_ATTR(dispatch_starvation_time, 0644, 0,
static struct attribute *dispatcher_attrs[] = {
&dispatcher_attr_inflight.attr,
&dispatcher_attr_inflight_low_latency.attr,
- &dispatcher_attr_context_cmdqueue_size.attr,
+ &dispatcher_attr_context_drawqueue_size.attr,
&dispatcher_attr_context_burst_count.attr,
- &dispatcher_attr_cmdbatch_timeout.attr,
+ &dispatcher_attr_drawobj_timeout.attr,
&dispatcher_attr_context_queue_wait.attr,
&dispatcher_attr_fault_detect_interval.attr,
&dispatcher_attr_fault_throttle_time.attr,
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index 699c3e4adb27..8386129bc143 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -15,7 +15,7 @@
#define ____ADRENO_DISPATCHER_H
extern unsigned int adreno_disp_preempt_fair_sched;
-extern unsigned int adreno_cmdbatch_timeout;
+extern unsigned int adreno_drawobj_timeout;
extern unsigned int adreno_dispatch_starvation_time;
extern unsigned int adreno_dispatch_time_slice;
@@ -44,21 +44,21 @@ enum adreno_dispatcher_starve_timer_states {
* sizes that can be chosen at runtime
*/
-#define ADRENO_DISPATCH_CMDQUEUE_SIZE 128
+#define ADRENO_DISPATCH_DRAWQUEUE_SIZE 128
-#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
+#define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
/**
- * struct adreno_dispatcher_cmdqueue - List of commands for a RB level
- * @cmd_q: List of command batches submitted to dispatcher
+ * struct adreno_dispatcher_drawqueue - List of commands for a RB level
+ * @cmd_q: List of drawobjs submitted to dispatcher
* @inflight: Number of commands inflight in this q
* @head: Head pointer to the q
* @tail: Queues tail pointer
- * @active_context_count: Number of active contexts seen in this rb cmdqueue
- * @expires: The jiffies value at which this cmdqueue has run too long
+ * @active_context_count: Number of active contexts seen in this rb drawqueue
+ * @expires: The jiffies value at which this drawqueue has run too long
*/
-struct adreno_dispatcher_cmdqueue {
- struct kgsl_cmdbatch *cmd_q[ADRENO_DISPATCH_CMDQUEUE_SIZE];
+struct adreno_dispatcher_drawqueue {
+ struct kgsl_drawobj *cmd_q[ADRENO_DISPATCH_DRAWQUEUE_SIZE];
unsigned int inflight;
unsigned int head;
unsigned int tail;
@@ -70,10 +70,10 @@ struct adreno_dispatcher_cmdqueue {
* struct adreno_dispatcher - container for the adreno GPU dispatcher
* @mutex: Mutex to protect the structure
* @state: Current state of the dispatcher (active or paused)
- * @timer: Timer to monitor the progress of the command batches
- * @inflight: Number of command batch operations pending in the ringbuffer
+ * @timer: Timer to monitor the progress of the drawobjs
+ * @inflight: Number of drawobj operations pending in the ringbuffer
* @fault: Non-zero if a fault was detected.
- * @pending: Priority list of contexts waiting to submit command batches
+ * @pending: Priority list of contexts waiting to submit drawobjs
* @plist_lock: Spin lock to protect the pending queue
* @work: work_struct to put the dispatcher in a work queue
* @kobj: kobject for the dispatcher directory in the device sysfs node
@@ -110,7 +110,7 @@ void adreno_dispatcher_irq_fault(struct adreno_device *adreno_dev);
void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
+ struct adreno_context *drawctxt, struct kgsl_drawobj *drawobj,
uint32_t *timestamp);
void adreno_dispatcher_schedule(struct kgsl_device *device);
@@ -120,11 +120,11 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device,
void adreno_dispatcher_preempt_callback(struct adreno_device *adreno_dev,
int bit);
void adreno_preempt_process_dispatch_queue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *dispatch_q);
+ struct adreno_dispatcher_drawqueue *dispatch_q);
-static inline bool adreno_cmdqueue_is_empty(
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static inline bool adreno_drawqueue_is_empty(
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
- return (cmdqueue != NULL && cmdqueue->head == cmdqueue->tail);
+ return (drawqueue != NULL && drawqueue->head == drawqueue->tail);
}
#endif /* __ADRENO_DISPATCHER_H */
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index fb95f6108fb8..c3709efb3c67 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -59,14 +59,14 @@ void adreno_drawctxt_dump(struct kgsl_device *device,
kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &retire);
/*
- * We may have cmdbatch timer running, which also uses same
+ * We may have drawobj timer running, which also uses same
* lock, take a lock with software interrupt disabled (bh)
* to avoid spin lock recursion.
*
* Use Spin trylock because dispatcher can acquire drawctxt->lock
* if context is pending and the fence it is waiting on just got
* signalled. Dispatcher acquires drawctxt->lock and tries to
- * delete the cmdbatch timer using del_timer_sync().
+ * delete the drawobj timer using del_timer_sync().
* del_timer_sync() waits till timer and its pending handlers
* are deleted. But if the timer expires at the same time,
* timer handler could be waiting on drawctxt->lock leading to a
@@ -83,23 +83,23 @@ void adreno_drawctxt_dump(struct kgsl_device *device,
context->id, queue, drawctxt->submitted_timestamp,
start, retire);
- if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
- struct kgsl_cmdbatch *cmdbatch =
- drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+ if (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
+ struct kgsl_drawobj *drawobj =
+ drawctxt->drawqueue[drawctxt->drawqueue_head];
- if (test_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv)) {
+ if (test_bit(DRAWOBJ_FLAG_FENCE_LOG, &drawobj->priv)) {
dev_err(device->dev,
" possible deadlock. Context %d might be blocked for itself\n",
context->id);
goto stats;
}
- if (kgsl_cmdbatch_events_pending(cmdbatch)) {
+ if (kgsl_drawobj_events_pending(drawobj)) {
dev_err(device->dev,
" context[%d] (ts=%d) Active sync points:\n",
- context->id, cmdbatch->timestamp);
+ context->id, drawobj->timestamp);
- kgsl_dump_syncpoints(device, cmdbatch);
+ kgsl_dump_syncpoints(device, drawobj);
}
}
@@ -229,19 +229,19 @@ done:
return ret;
}
-static int drawctxt_detach_cmdbatches(struct adreno_context *drawctxt,
- struct kgsl_cmdbatch **list)
+static int drawctxt_detach_drawobjs(struct adreno_context *drawctxt,
+ struct kgsl_drawobj **list)
{
int count = 0;
- while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
- struct kgsl_cmdbatch *cmdbatch =
- drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+ while (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
+ struct kgsl_drawobj *drawobj =
+ drawctxt->drawqueue[drawctxt->drawqueue_head];
- drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
- ADRENO_CONTEXT_CMDQUEUE_SIZE;
+ drawctxt->drawqueue_head = (drawctxt->drawqueue_head + 1) %
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE;
- list[count++] = cmdbatch;
+ list[count++] = drawobj;
}
return count;
@@ -259,7 +259,7 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device,
struct kgsl_context *context)
{
struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
- struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE];
+ struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
int i, count;
trace_adreno_drawctxt_invalidate(drawctxt);
@@ -280,13 +280,13 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device,
drawctxt->timestamp);
/* Get rid of commands still waiting in the queue */
- count = drawctxt_detach_cmdbatches(drawctxt, list);
+ count = drawctxt_detach_drawobjs(drawctxt, list);
spin_unlock(&drawctxt->lock);
for (i = 0; i < count; i++) {
kgsl_cancel_events_timestamp(device, &context->events,
list[i]->timestamp);
- kgsl_cmdbatch_destroy(list[i]);
+ kgsl_drawobj_destroy(list[i]);
}
/* Make sure all pending events are processed or cancelled */
@@ -453,7 +453,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
struct adreno_context *drawctxt;
struct adreno_ringbuffer *rb;
int ret, count, i;
- struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE];
+ struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
if (context == NULL)
return;
@@ -468,7 +468,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
spin_unlock(&adreno_dev->active_list_lock);
spin_lock(&drawctxt->lock);
- count = drawctxt_detach_cmdbatches(drawctxt, list);
+ count = drawctxt_detach_drawobjs(drawctxt, list);
spin_unlock(&drawctxt->lock);
for (i = 0; i < count; i++) {
@@ -478,7 +478,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
* detached status here.
*/
adreno_fault_skipcmd_detached(adreno_dev, drawctxt, list[i]);
- kgsl_cmdbatch_destroy(list[i]);
+ kgsl_drawobj_destroy(list[i]);
}
/*
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 5ea911954991..76b4e8ce63c3 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -18,7 +18,7 @@ struct adreno_context_type {
const char *str;
};
-#define ADRENO_CONTEXT_CMDQUEUE_SIZE 128
+#define ADRENO_CONTEXT_DRAWQUEUE_SIZE 128
#define SUBMIT_RETIRE_TICKS_SIZE 7
struct kgsl_device;
@@ -32,20 +32,21 @@ struct kgsl_context;
* @internal_timestamp: Global timestamp of the last issued command
* NOTE: guarded by device->mutex, not drawctxt->mutex!
* @type: Context type (GL, CL, RS)
- * @mutex: Mutex to protect the cmdqueue
- * @cmdqueue: Queue of command batches waiting to be dispatched for this context
- * @cmdqueue_head: Head of the cmdqueue queue
- * @cmdqueue_tail: Tail of the cmdqueue queue
+ * @mutex: Mutex to protect the drawqueue
+ * @drawqueue: Queue of drawobjs waiting to be dispatched for this
+ * context
+ * @drawqueue_head: Head of the drawqueue queue
+ * @drawqueue_tail: Tail of the drawqueue queue
* @pending: Priority list node for the dispatcher list of pending contexts
* @wq: Workqueue structure for contexts to sleep pending room in the queue
* @waiting: Workqueue structure for contexts waiting for a timestamp or event
- * @queued: Number of commands queued in the cmdqueue
- * @fault_policy: GFT fault policy set in cmdbatch_skip_cmd();
+ * @queued: Number of commands queued in the drawqueue
+ * @fault_policy: GFT fault policy set in drawobj_skip_cmd();
* @debug_root: debugfs entry for this context.
* @queued_timestamp: The last timestamp that was queued on this context
* @rb: The ringbuffer in which this context submits commands.
* @submitted_timestamp: The last timestamp that was submitted for this context
- * @submit_retire_ticks: Array to hold cmdbatch execution times from submit
+ * @submit_retire_ticks: Array to hold drawobj execution times from submit
* to retire
* @ticks_index: The index into submit_retire_ticks[] where the new delta will
* be written.
@@ -60,9 +61,9 @@ struct adreno_context {
spinlock_t lock;
/* Dispatcher */
- struct kgsl_cmdbatch *cmdqueue[ADRENO_CONTEXT_CMDQUEUE_SIZE];
- unsigned int cmdqueue_head;
- unsigned int cmdqueue_tail;
+ struct kgsl_drawobj *drawqueue[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
+ unsigned int drawqueue_head;
+ unsigned int drawqueue_tail;
struct plist_node pending;
wait_queue_head_t wq;
@@ -92,7 +93,7 @@ struct adreno_context {
* @ADRENO_CONTEXT_SKIP_EOF - Context skip IBs until the next end of frame
* marker.
* @ADRENO_CONTEXT_FORCE_PREAMBLE - Force the preamble for the next submission.
- * @ADRENO_CONTEXT_SKIP_CMD - Context's command batch is skipped during
+ * @ADRENO_CONTEXT_SKIP_CMD - Context's drawobj is skipped during
fault tolerance.
*/
enum adreno_context_priv {
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 07ef09034d7c..de84aad4668c 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -702,7 +702,7 @@ static inline bool _ringbuffer_verify_ib(struct kgsl_device_private *dev_priv,
int
adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj *drawobj,
uint32_t *timestamp)
{
struct kgsl_device *device = dev_priv->device;
@@ -715,29 +715,29 @@ adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
return -EDEADLK;
/* Verify the IBs before they get queued */
- list_for_each_entry(ib, &cmdbatch->cmdlist, node)
+ list_for_each_entry(ib, &drawobj->cmdlist, node)
if (_ringbuffer_verify_ib(dev_priv, context, ib) == false)
return -EINVAL;
/* wait for the suspend gate */
- wait_for_completion(&device->cmdbatch_gate);
+ wait_for_completion(&device->halt_gate);
/*
* Clear the wake on touch bit to indicate an IB has been
* submitted since the last time we set it. But only clear
* it when we have rendering commands.
*/
- if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER)
- && !(cmdbatch->flags & KGSL_CMDBATCH_SYNC))
+ if (!(drawobj->flags & KGSL_DRAWOBJ_MARKER)
+ && !(drawobj->flags & KGSL_DRAWOBJ_SYNC))
device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH;
- /* A3XX does not have support for command batch profiling */
+ /* A3XX does not have support for drawobj profiling */
if (adreno_is_a3xx(adreno_dev) &&
- (cmdbatch->flags & KGSL_CMDBATCH_PROFILING))
+ (drawobj->flags & KGSL_DRAWOBJ_PROFILING))
return -EOPNOTSUPP;
/* Queue the command in the ringbuffer */
- ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch,
+ ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, drawobj,
timestamp);
/*
@@ -751,16 +751,16 @@ adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
}
static void adreno_ringbuffer_set_constraint(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj *drawobj)
{
- struct kgsl_context *context = cmdbatch->context;
+ struct kgsl_context *context = drawobj->context;
/*
* Check if the context has a constraint and constraint flags are
* set.
*/
if (context->pwr_constraint.type &&
((context->flags & KGSL_CONTEXT_PWR_CONSTRAINT) ||
- (cmdbatch->flags & KGSL_CONTEXT_PWR_CONSTRAINT)))
+ (drawobj->flags & KGSL_CONTEXT_PWR_CONSTRAINT)))
kgsl_pwrctrl_set_constraint(device, &context->pwr_constraint,
context->id);
}
@@ -792,7 +792,7 @@ static inline int _get_alwayson_counter(struct adreno_device *adreno_dev,
/* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */
int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch, struct adreno_submit_time *time)
+ struct kgsl_drawobj *drawobj, struct adreno_submit_time *time)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
@@ -803,25 +803,25 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
struct kgsl_context *context;
struct adreno_context *drawctxt;
bool use_preamble = true;
- bool cmdbatch_user_profiling = false;
- bool cmdbatch_kernel_profiling = false;
+ bool drawobj_user_profiling = false;
+ bool drawobj_kernel_profiling = false;
int flags = KGSL_CMD_FLAGS_NONE;
int ret;
struct adreno_ringbuffer *rb;
- struct kgsl_cmdbatch_profiling_buffer *profile_buffer = NULL;
+ struct kgsl_drawobj_profiling_buffer *profile_buffer = NULL;
unsigned int dwords = 0;
struct adreno_submit_time local;
- struct kgsl_mem_entry *entry = cmdbatch->profiling_buf_entry;
+ struct kgsl_mem_entry *entry = drawobj->profiling_buf_entry;
if (entry)
profile_buffer = kgsl_gpuaddr_to_vaddr(&entry->memdesc,
- cmdbatch->profiling_buffer_gpuaddr);
+ drawobj->profiling_buffer_gpuaddr);
- context = cmdbatch->context;
+ context = drawobj->context;
drawctxt = ADRENO_CONTEXT(context);
/* Get the total IBs in the list */
- list_for_each_entry(ib, &cmdbatch->cmdlist, node)
+ list_for_each_entry(ib, &drawobj->cmdlist, node)
numibs++;
rb = drawctxt->rb;
@@ -838,14 +838,14 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
* c) force preamble for commandbatch
*/
if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) &&
- (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv))) {
+ (!test_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv))) {
- set_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_recovery);
- cmdbatch->fault_policy = drawctxt->fault_policy;
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+ set_bit(KGSL_FT_SKIPCMD, &drawobj->fault_recovery);
+ drawobj->fault_policy = drawctxt->fault_policy;
+ set_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &drawobj->priv);
/* if context is detached print fault recovery */
- adreno_fault_skipcmd_detached(adreno_dev, drawctxt, cmdbatch);
+ adreno_fault_skipcmd_detached(adreno_dev, drawctxt, drawobj);
/* clear the drawctxt flags */
clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv);
@@ -857,7 +857,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
if a context switch hasn't occured */
if ((drawctxt->base.flags & KGSL_CONTEXT_PREAMBLE) &&
- !test_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv) &&
+ !test_bit(DRAWOBJ_FLAG_FORCE_PREAMBLE, &drawobj->priv) &&
(rb->drawctxt_active == drawctxt))
use_preamble = false;
@@ -867,7 +867,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
* the accounting sane. Set start_index and numibs to 0 to just
* generate the start and end markers and skip everything else
*/
- if (test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)) {
+ if (test_bit(DRAWOBJ_FLAG_SKIP, &drawobj->priv)) {
use_preamble = false;
numibs = 0;
}
@@ -884,9 +884,9 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
/* Each IB takes up 30 dwords in worst case */
dwords += (numibs * 30);
- if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING &&
+ if (drawobj->flags & KGSL_DRAWOBJ_PROFILING &&
!adreno_is_a3xx(adreno_dev) && profile_buffer) {
- cmdbatch_user_profiling = true;
+ drawobj_user_profiling = true;
dwords += 6;
/*
@@ -907,8 +907,8 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
time = &local;
}
- if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv)) {
- cmdbatch_kernel_profiling = true;
+ if (test_bit(DRAWOBJ_FLAG_PROFILE, &drawobj->priv)) {
+ drawobj_kernel_profiling = true;
dwords += 6;
if (adreno_is_a5xx(adreno_dev))
dwords += 2;
@@ -929,26 +929,26 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
*cmds++ = cp_packet(adreno_dev, CP_NOP, 1);
*cmds++ = KGSL_START_OF_IB_IDENTIFIER;
- if (cmdbatch_kernel_profiling) {
+ if (drawobj_kernel_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- adreno_dev->cmdbatch_profile_buffer.gpuaddr +
- ADRENO_CMDBATCH_PROFILE_OFFSET(cmdbatch->profile_index,
+ adreno_dev->profile_buffer.gpuaddr +
+ ADRENO_DRAWOBJ_PROFILE_OFFSET(drawobj->profile_index,
started));
}
/*
- * Add cmds to read the GPU ticks at the start of the cmdbatch and
- * write it into the appropriate cmdbatch profiling buffer offset
+ * Add cmds to read the GPU ticks at the start of the drawobj and
+ * write it into the appropriate drawobj profiling buffer offset
*/
- if (cmdbatch_user_profiling) {
+ if (drawobj_user_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- cmdbatch->profiling_buffer_gpuaddr +
- offsetof(struct kgsl_cmdbatch_profiling_buffer,
+ drawobj->profiling_buffer_gpuaddr +
+ offsetof(struct kgsl_drawobj_profiling_buffer,
gpu_ticks_submitted));
}
if (numibs) {
- list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
+ list_for_each_entry(ib, &drawobj->cmdlist, node) {
/*
* Skip 0 sized IBs - these are presumed to have been
* removed from consideration by the FT policy
@@ -972,21 +972,21 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
adreno_is_preemption_enabled(adreno_dev))
cmds += gpudev->preemption_yield_enable(cmds);
- if (cmdbatch_kernel_profiling) {
+ if (drawobj_kernel_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- adreno_dev->cmdbatch_profile_buffer.gpuaddr +
- ADRENO_CMDBATCH_PROFILE_OFFSET(cmdbatch->profile_index,
+ adreno_dev->profile_buffer.gpuaddr +
+ ADRENO_DRAWOBJ_PROFILE_OFFSET(drawobj->profile_index,
retired));
}
/*
- * Add cmds to read the GPU ticks at the end of the cmdbatch and
- * write it into the appropriate cmdbatch profiling buffer offset
+ * Add cmds to read the GPU ticks at the end of the drawobj and
+ * write it into the appropriate drawobj profiling buffer offset
*/
- if (cmdbatch_user_profiling) {
+ if (drawobj_user_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- cmdbatch->profiling_buffer_gpuaddr +
- offsetof(struct kgsl_cmdbatch_profiling_buffer,
+ drawobj->profiling_buffer_gpuaddr +
+ offsetof(struct kgsl_drawobj_profiling_buffer,
gpu_ticks_retired));
}
@@ -1012,7 +1012,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
goto done;
}
- if (test_bit(CMDBATCH_FLAG_WFI, &cmdbatch->priv))
+ if (test_bit(DRAWOBJ_FLAG_WFI, &drawobj->priv))
flags = KGSL_CMD_FLAGS_WFI;
/*
@@ -1025,26 +1025,26 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
flags |= KGSL_CMD_FLAGS_PWRON_FIXUP;
/* Set the constraints before adding to ringbuffer */
- adreno_ringbuffer_set_constraint(device, cmdbatch);
+ adreno_ringbuffer_set_constraint(device, drawobj);
/* CFF stuff executed only if CFF is enabled */
- kgsl_cffdump_capture_ib_desc(device, context, cmdbatch);
+ kgsl_cffdump_capture_ib_desc(device, context, drawobj);
ret = adreno_ringbuffer_addcmds(rb, flags,
&link[0], (cmds - link),
- cmdbatch->timestamp, time);
+ drawobj->timestamp, time);
if (!ret) {
- cmdbatch->global_ts = drawctxt->internal_timestamp;
+ drawobj->global_ts = drawctxt->internal_timestamp;
/* Put the timevalues in the profiling buffer */
- if (cmdbatch_user_profiling) {
+ if (drawobj_user_profiling) {
/*
* Return kernel clock time to the the client
* if requested
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING_KTIME) {
+ if (drawobj->flags & KGSL_DRAWOBJ_PROFILING_KTIME) {
uint64_t secs = time->ktime;
profile_buffer->wall_clock_ns =
@@ -1069,9 +1069,9 @@ done:
kgsl_memdesc_unmap(&entry->memdesc);
- trace_kgsl_issueibcmds(device, context->id, cmdbatch,
- numibs, cmdbatch->timestamp,
- cmdbatch->flags, ret, drawctxt->type);
+ trace_kgsl_issueibcmds(device, context->id, drawobj,
+ numibs, drawobj->timestamp,
+ drawobj->flags, ret, drawctxt->type);
kfree(link);
return ret;
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index b126f710b5e6..3cf4c23bd4c8 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -119,7 +119,7 @@ struct adreno_ringbuffer {
struct adreno_context *drawctxt_active;
struct kgsl_memdesc preemption_desc;
struct kgsl_memdesc pagetable_desc;
- struct adreno_dispatcher_cmdqueue dispatch_q;
+ struct adreno_dispatcher_drawqueue dispatch_q;
wait_queue_head_t ts_expire_waitq;
unsigned int wptr_preempt_end;
unsigned int gpr11;
@@ -136,11 +136,11 @@ int cp_secure_mode(struct adreno_device *adreno_dev, uint *cmds, int set);
int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj *drawobj,
uint32_t *timestamp);
int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj *drawobj,
struct adreno_submit_time *time);
int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt);
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
index f52ddfa894d5..e021efcd2676 100644
--- a/drivers/gpu/msm/adreno_trace.h
+++ b/drivers/gpu/msm/adreno_trace.h
@@ -27,8 +27,8 @@
#include "adreno_a5xx.h"
TRACE_EVENT(adreno_cmdbatch_queued,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int queued),
- TP_ARGS(cmdbatch, queued),
+ TP_PROTO(struct kgsl_drawobj *drawobj, unsigned int queued),
+ TP_ARGS(drawobj, queued),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -37,26 +37,26 @@ TRACE_EVENT(adreno_cmdbatch_queued,
__field(unsigned int, prio)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->queued = queued;
- __entry->flags = cmdbatch->flags;
- __entry->prio = cmdbatch->context->priority;
+ __entry->flags = drawobj->flags;
+ __entry->prio = drawobj->context->priority;
),
TP_printk(
"ctx=%u ctx_prio=%u ts=%u queued=%u flags=%s",
__entry->id, __entry->prio,
__entry->timestamp, __entry->queued,
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "none"
+ KGSL_DRAWOBJ_FLAGS) : "none"
)
);
TRACE_EVENT(adreno_cmdbatch_submitted,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight, uint64_t ticks,
+ TP_PROTO(struct kgsl_drawobj *drawobj, int inflight, uint64_t ticks,
unsigned long secs, unsigned long usecs,
struct adreno_ringbuffer *rb, unsigned int rptr),
- TP_ARGS(cmdbatch, inflight, ticks, secs, usecs, rb, rptr),
+ TP_ARGS(drawobj, inflight, ticks, secs, usecs, rb, rptr),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -72,14 +72,14 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
__field(int, q_inflight)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->inflight = inflight;
- __entry->flags = cmdbatch->flags;
+ __entry->flags = drawobj->flags;
__entry->ticks = ticks;
__entry->secs = secs;
__entry->usecs = usecs;
- __entry->prio = cmdbatch->context->priority;
+ __entry->prio = drawobj->context->priority;
__entry->rb_id = rb->id;
__entry->rptr = rptr;
__entry->wptr = rb->wptr;
@@ -90,7 +90,7 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
__entry->id, __entry->prio, __entry->timestamp,
__entry->inflight,
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "none",
+ KGSL_DRAWOBJ_FLAGS) : "none",
__entry->ticks, __entry->secs, __entry->usecs,
__entry->rb_id, __entry->rptr, __entry->wptr,
__entry->q_inflight
@@ -98,10 +98,10 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
);
TRACE_EVENT(adreno_cmdbatch_retired,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight,
+ TP_PROTO(struct kgsl_drawobj *drawobj, int inflight,
uint64_t start, uint64_t retire,
struct adreno_ringbuffer *rb, unsigned int rptr),
- TP_ARGS(cmdbatch, inflight, start, retire, rb, rptr),
+ TP_ARGS(drawobj, inflight, start, retire, rb, rptr),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -117,14 +117,14 @@ TRACE_EVENT(adreno_cmdbatch_retired,
__field(int, q_inflight)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->inflight = inflight;
- __entry->recovery = cmdbatch->fault_recovery;
- __entry->flags = cmdbatch->flags;
+ __entry->recovery = drawobj->fault_recovery;
+ __entry->flags = drawobj->flags;
__entry->start = start;
__entry->retire = retire;
- __entry->prio = cmdbatch->context->priority;
+ __entry->prio = drawobj->context->priority;
__entry->rb_id = rb->id;
__entry->rptr = rptr;
__entry->wptr = rb->wptr;
@@ -138,7 +138,7 @@ TRACE_EVENT(adreno_cmdbatch_retired,
__print_flags(__entry->recovery, "|",
ADRENO_FT_TYPES) : "none",
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "none",
+ KGSL_DRAWOBJ_FLAGS) : "none",
__entry->start,
__entry->retire,
__entry->rb_id, __entry->rptr, __entry->wptr,
@@ -147,16 +147,16 @@ TRACE_EVENT(adreno_cmdbatch_retired,
);
TRACE_EVENT(adreno_cmdbatch_fault,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int fault),
- TP_ARGS(cmdbatch, fault),
+ TP_PROTO(struct kgsl_drawobj *drawobj, unsigned int fault),
+ TP_ARGS(drawobj, fault),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
__field(unsigned int, fault)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->fault = fault;
),
TP_printk(
@@ -171,16 +171,16 @@ TRACE_EVENT(adreno_cmdbatch_fault,
);
TRACE_EVENT(adreno_cmdbatch_recovery,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int action),
- TP_ARGS(cmdbatch, action),
+ TP_PROTO(struct kgsl_drawobj *drawobj, unsigned int action),
+ TP_ARGS(drawobj, action),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
__field(unsigned int, action)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->action = action;
),
TP_printk(
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 88581b079246..70eae9f27991 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -36,7 +36,7 @@
#include "kgsl_cffdump.h"
#include "kgsl_log.h"
#include "kgsl_sharedmem.h"
-#include "kgsl_cmdbatch.h"
+#include "kgsl_drawobj.h"
#include "kgsl_device.h"
#include "kgsl_trace.h"
#include "kgsl_sync.h"
@@ -1497,11 +1497,16 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_ringbuffer_issueibcmds *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch = NULL;
+ struct kgsl_drawobj *drawobj = NULL;
long result = -EINVAL;
/* The legacy functions don't support synchronization commands */
- if ((param->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER)))
+ if ((param->flags & (KGSL_DRAWOBJ_SYNC | KGSL_DRAWOBJ_MARKER)))
+ return -EINVAL;
+
+ /* Sanity check the number of IBs */
+ if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST &&
+ (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS))
return -EINVAL;
/* Get the context */
@@ -1509,23 +1514,18 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
if (context == NULL)
return -EINVAL;
- /* Create a command batch */
- cmdbatch = kgsl_cmdbatch_create(device, context, param->flags);
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
- goto done;
+ /* Create a drawobj */
+ drawobj = kgsl_drawobj_create(device, context, param->flags);
+ if (IS_ERR(drawobj)) {
+ kgsl_context_put(context);
+ return PTR_ERR(drawobj);
}
- if (param->flags & KGSL_CMDBATCH_SUBMIT_IB_LIST) {
- /* Sanity check the number of IBs */
- if (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS) {
- result = -EINVAL;
- goto done;
- }
- result = kgsl_cmdbatch_add_ibdesc_list(device, cmdbatch,
+ if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST)
+ result = kgsl_drawobj_add_ibdesc_list(device, drawobj,
(void __user *) param->ibdesc_addr,
param->numibs);
- } else {
+ else {
struct kgsl_ibdesc ibdesc;
/* Ultra legacy path */
@@ -1533,22 +1533,19 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
ibdesc.sizedwords = param->numibs;
ibdesc.ctrl = 0;
- result = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc);
+ result = kgsl_drawobj_add_ibdesc(device, drawobj, &ibdesc);
}
- if (result)
- goto done;
-
- result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, &param->timestamp);
+ if (result == 0)
+ result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
+ drawobj, &param->timestamp);
-done:
/*
* -EPROTO is a "success" error - it just tells the user that the
* context had previously faulted
*/
if (result && result != -EPROTO)
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
kgsl_context_put(context);
return result;
@@ -1560,56 +1557,53 @@ long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
struct kgsl_submit_commands *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch = NULL;
+ struct kgsl_drawobj *drawobj = NULL;
long result = -EINVAL;
/*
* The SYNC bit is supposed to identify a dummy sync object so warn the
* user if they specified any IBs with it. A MARKER command can either
* have IBs or not but if the command has 0 IBs it is automatically
- * assumed to be a marker. If none of the above make sure that the user
- * specified a sane number of IBs
+ * assumed to be a marker.
*/
- if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds)
+ if ((param->flags & KGSL_DRAWOBJ_SYNC) && param->numcmds)
KGSL_DEV_ERR_ONCE(device,
"Commands specified with the SYNC flag. They will be ignored\n");
- else if (param->numcmds > KGSL_MAX_NUMIBS)
- return -EINVAL;
- else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0)
- param->flags |= KGSL_CMDBATCH_MARKER;
+ else if (!(param->flags & KGSL_DRAWOBJ_SYNC) && param->numcmds == 0)
+ param->flags |= KGSL_DRAWOBJ_MARKER;
- /* Make sure that we don't have too many syncpoints */
- if (param->numsyncs > KGSL_MAX_SYNCPOINTS)
+ if (param->numcmds > KGSL_MAX_NUMIBS ||
+ param->numsyncs > KGSL_MAX_SYNCPOINTS)
return -EINVAL;
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context == NULL)
return -EINVAL;
- /* Create a command batch */
- cmdbatch = kgsl_cmdbatch_create(device, context, param->flags);
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
+ /* Create a drawobj */
+ drawobj = kgsl_drawobj_create(device, context, param->flags);
+ if (IS_ERR(drawobj)) {
+ result = PTR_ERR(drawobj);
goto done;
}
- result = kgsl_cmdbatch_add_ibdesc_list(device, cmdbatch,
+ result = kgsl_drawobj_add_ibdesc_list(device, drawobj,
param->cmdlist, param->numcmds);
if (result)
goto done;
- result = kgsl_cmdbatch_add_syncpoints(device, cmdbatch,
+ result = kgsl_drawobj_add_syncpoints(device, drawobj,
param->synclist, param->numsyncs);
if (result)
goto done;
/* If no profiling buffer was specified, clear the flag */
- if (cmdbatch->profiling_buf_entry == NULL)
- cmdbatch->flags &= ~KGSL_CMDBATCH_PROFILING;
+ if (drawobj->profiling_buf_entry == NULL)
+ drawobj->flags &= ~KGSL_DRAWOBJ_PROFILING;
result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, &param->timestamp);
+ drawobj, &param->timestamp);
done:
/*
@@ -1617,7 +1611,7 @@ done:
* context had previously faulted
*/
if (result && result != -EPROTO)
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
kgsl_context_put(context);
return result;
@@ -1629,7 +1623,7 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
struct kgsl_gpu_command *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch = NULL;
+ struct kgsl_drawobj *drawobj = NULL;
long result = -EINVAL;
@@ -1640,11 +1634,11 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
* assumed to be a marker. If none of the above make sure that the user
* specified a sane number of IBs
*/
- if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds)
+ if ((param->flags & KGSL_DRAWOBJ_SYNC) && param->numcmds)
KGSL_DEV_ERR_ONCE(device,
"Commands specified with the SYNC flag. They will be ignored\n");
- else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0)
- param->flags |= KGSL_CMDBATCH_MARKER;
+ else if (!(param->flags & KGSL_DRAWOBJ_SYNC) && param->numcmds == 0)
+ param->flags |= KGSL_DRAWOBJ_MARKER;
/* Make sure that the memobj and syncpoint count isn't too big */
if (param->numcmds > KGSL_MAX_NUMIBS ||
@@ -1656,36 +1650,36 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
if (context == NULL)
return -EINVAL;
- cmdbatch = kgsl_cmdbatch_create(device, context, param->flags);
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
+ drawobj = kgsl_drawobj_create(device, context, param->flags);
+ if (IS_ERR(drawobj)) {
+ result = PTR_ERR(drawobj);
goto done;
}
- result = kgsl_cmdbatch_add_cmdlist(device, cmdbatch,
+ result = kgsl_drawobj_add_cmdlist(device, drawobj,
to_user_ptr(param->cmdlist),
param->cmdsize, param->numcmds);
if (result)
goto done;
- result = kgsl_cmdbatch_add_memlist(device, cmdbatch,
+ result = kgsl_drawobj_add_memlist(device, drawobj,
to_user_ptr(param->objlist),
param->objsize, param->numobjs);
if (result)
goto done;
- result = kgsl_cmdbatch_add_synclist(device, cmdbatch,
+ result = kgsl_drawobj_add_synclist(device, drawobj,
to_user_ptr(param->synclist),
param->syncsize, param->numsyncs);
if (result)
goto done;
/* If no profiling buffer was specified, clear the flag */
- if (cmdbatch->profiling_buf_entry == NULL)
- cmdbatch->flags &= ~KGSL_CMDBATCH_PROFILING;
+ if (drawobj->profiling_buf_entry == NULL)
+ drawobj->flags &= ~KGSL_DRAWOBJ_PROFILING;
result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, &param->timestamp);
+ drawobj, &param->timestamp);
done:
/*
@@ -1693,7 +1687,7 @@ done:
* context had previously faulted
*/
if (result && result != -EPROTO)
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
kgsl_context_put(context);
return result;
@@ -4600,7 +4594,7 @@ static void kgsl_core_exit(void)
kgsl_driver.class = NULL;
}
- kgsl_cmdbatch_exit();
+ kgsl_drawobj_exit();
kgsl_memfree_exit();
unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
@@ -4676,7 +4670,7 @@ static int __init kgsl_core_init(void)
kgsl_events_init();
- result = kgsl_cmdbatch_init();
+ result = kgsl_drawobj_init();
if (result)
goto err;
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 25f5de6ce645..c4fc699f184c 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -28,6 +28,25 @@
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
+/*
+ * --- kgsl drawobj flags ---
+ * These flags are same as --- drawobj flags ---
+ * but renamed to reflect that cmdbatch is renamed to drawobj.
+ */
+#define KGSL_DRAWOBJ_MEMLIST KGSL_CMDBATCH_MEMLIST
+#define KGSL_DRAWOBJ_MARKER KGSL_CMDBATCH_MARKER
+#define KGSL_DRAWOBJ_SUBMIT_IB_LIST KGSL_CMDBATCH_SUBMIT_IB_LIST
+#define KGSL_DRAWOBJ_CTX_SWITCH KGSL_CMDBATCH_CTX_SWITCH
+#define KGSL_DRAWOBJ_PROFILING KGSL_CMDBATCH_PROFILING
+#define KGSL_DRAWOBJ_PROFILING_KTIME KGSL_CMDBATCH_PROFILING_KTIME
+#define KGSL_DRAWOBJ_END_OF_FRAME KGSL_CMDBATCH_END_OF_FRAME
+#define KGSL_DRAWOBJ_SYNC KGSL_CMDBATCH_SYNC
+#define KGSL_DRAWOBJ_PWR_CONSTRAINT KGSL_CMDBATCH_PWR_CONSTRAINT
+#define KGSL_DRAWOBJ_SPARSE KGSL_CMDBATCH_SPARSE
+
+#define kgsl_drawobj_profiling_buffer kgsl_cmdbatch_profiling_buffer
+
+
/* The number of memstore arrays limits the number of contexts allowed.
* If more contexts are needed, update multiple for MEMSTORE_SIZE
*/
diff --git a/drivers/gpu/msm/kgsl_cffdump.c b/drivers/gpu/msm/kgsl_cffdump.c
index 8e783f8ce017..62bb0146ba2e 100644
--- a/drivers/gpu/msm/kgsl_cffdump.c
+++ b/drivers/gpu/msm/kgsl_cffdump.c
@@ -705,7 +705,7 @@ static int kgsl_cffdump_capture_adreno_ib_cff(struct kgsl_device *device,
*/
int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj *drawobj)
{
int ret = 0;
struct kgsl_memobj_node *ib;
@@ -713,7 +713,7 @@ int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
if (!device->cff_dump_enable)
return 0;
/* Dump CFF for IB and all objects in it */
- list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
+ list_for_each_entry(ib, &drawobj->cmdlist, node) {
ret = kgsl_cffdump_capture_adreno_ib_cff(
device, context->proc_priv, ib->gpuaddr,
ib->size >> 2);
diff --git a/drivers/gpu/msm/kgsl_cffdump.h b/drivers/gpu/msm/kgsl_cffdump.h
index 315a097ba817..928cde1c8771 100644
--- a/drivers/gpu/msm/kgsl_cffdump.h
+++ b/drivers/gpu/msm/kgsl_cffdump.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011,2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2011,2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -58,7 +58,7 @@ int kgsl_cff_dump_enable_set(void *data, u64 val);
int kgsl_cff_dump_enable_get(void *data, u64 *val);
int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch);
+ struct kgsl_drawobj *drawobj);
void kgsl_cffdump_printline(int id, uint opcode, uint op1, uint op2,
uint op3, uint op4, uint op5);
@@ -164,7 +164,7 @@ static inline void kgsl_cffdump_user_event(struct kgsl_device *device,
static inline int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj *drawobj)
{
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_cmdbatch.h b/drivers/gpu/msm/kgsl_cmdbatch.h
deleted file mode 100644
index d5cbf375b5d3..000000000000
--- a/drivers/gpu/msm/kgsl_cmdbatch.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __KGSL_CMDBATCH_H
-#define __KGSL_CMDBATCH_H
-
-#define KGSL_CMDBATCH_FLAGS \
- { KGSL_CMDBATCH_MARKER, "MARKER" }, \
- { KGSL_CMDBATCH_CTX_SWITCH, "CTX_SWITCH" }, \
- { KGSL_CMDBATCH_SYNC, "SYNC" }, \
- { KGSL_CMDBATCH_END_OF_FRAME, "EOF" }, \
- { KGSL_CMDBATCH_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \
- { KGSL_CMDBATCH_SUBMIT_IB_LIST, "IB_LIST" }
-
-/**
- * struct kgsl_cmdbatch - KGSl command descriptor
- * @device: KGSL GPU device that the command was created for
- * @context: KGSL context that created the command
- * @timestamp: Timestamp assigned to the command
- * @flags: flags
- * @priv: Internal flags
- * @fault_policy: Internal policy describing how to handle this command in case
- * of a fault
- * @fault_recovery: recovery actions actually tried for this batch
- * @refcount: kref structure to maintain the reference count
- * @cmdlist: List of IBs to issue
- * @memlist: List of all memory used in this command batch
- * @synclist: Array of context/timestamp tuples to wait for before issuing
- * @numsyncs: Number of sync entries in the array
- * @pending: Bitmask of sync events that are active
- * @timer: a timer used to track possible sync timeouts for this cmdbatch
- * @marker_timestamp: For markers, the timestamp of the last "real" command that
- * was queued
- * @profiling_buf_entry: Mem entry containing the profiling buffer
- * @profiling_buffer_gpuaddr: GPU virt address of the profile buffer added here
- * for easy access
- * @profile_index: Index to store the start/stop ticks in the kernel profiling
- * buffer
- * @submit_ticks: Variable to hold ticks at the time of cmdbatch submit.
- * @global_ts: The ringbuffer timestamp corresponding to this cmdbatch
- * @timeout_jiffies: For a syncpoint cmdbatch the jiffies at which the
- * timer will expire
- * This structure defines an atomic batch of command buffers issued from
- * userspace.
- */
-struct kgsl_cmdbatch {
- struct kgsl_device *device;
- struct kgsl_context *context;
- uint32_t timestamp;
- uint32_t flags;
- unsigned long priv;
- unsigned long fault_policy;
- unsigned long fault_recovery;
- struct kref refcount;
- struct list_head cmdlist;
- struct list_head memlist;
- struct kgsl_cmdbatch_sync_event *synclist;
- unsigned int numsyncs;
- unsigned long pending;
- struct timer_list timer;
- unsigned int marker_timestamp;
- struct kgsl_mem_entry *profiling_buf_entry;
- uint64_t profiling_buffer_gpuaddr;
- unsigned int profile_index;
- uint64_t submit_ticks;
- unsigned int global_ts;
- unsigned long timeout_jiffies;
-};
-
-/**
- * struct kgsl_cmdbatch_sync_event
- * @id: identifer (positiion within the pending bitmap)
- * @type: Syncpoint type
- * @cmdbatch: Pointer to the cmdbatch that owns the sync event
- * @context: Pointer to the KGSL context that owns the cmdbatch
- * @timestamp: Pending timestamp for the event
- * @handle: Pointer to a sync fence handle
- * @device: Pointer to the KGSL device
- */
-struct kgsl_cmdbatch_sync_event {
- unsigned int id;
- int type;
- struct kgsl_cmdbatch *cmdbatch;
- struct kgsl_context *context;
- unsigned int timestamp;
- struct kgsl_sync_fence_waiter *handle;
- struct kgsl_device *device;
-};
-
-/**
- * enum kgsl_cmdbatch_priv - Internal cmdbatch flags
- * @CMDBATCH_FLAG_SKIP - skip the entire command batch
- * @CMDBATCH_FLAG_FORCE_PREAMBLE - Force the preamble on for the cmdbatch
- * @CMDBATCH_FLAG_WFI - Force wait-for-idle for the submission
- * @CMDBATCH_FLAG_PROFILE - store the start / retire ticks for the command batch
- * in the profiling buffer
- * @CMDBATCH_FLAG_FENCE_LOG - Set if the cmdbatch is dumping fence logs via the
- * cmdbatch timer - this is used to avoid recursion
- */
-
-enum kgsl_cmdbatch_priv {
- CMDBATCH_FLAG_SKIP = 0,
- CMDBATCH_FLAG_FORCE_PREAMBLE,
- CMDBATCH_FLAG_WFI,
- CMDBATCH_FLAG_PROFILE,
- CMDBATCH_FLAG_FENCE_LOG,
-};
-
-
-int kgsl_cmdbatch_add_memobj(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_ibdesc *ibdesc);
-
-int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmd_syncpoint *sync);
-
-struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int flags);
-int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, struct kgsl_ibdesc *ibdesc);
-int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count);
-int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count);
-int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
- unsigned int size, unsigned int count);
-int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
- unsigned int size, unsigned int count);
-int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
- unsigned int size, unsigned int count);
-
-int kgsl_cmdbatch_init(void);
-void kgsl_cmdbatch_exit(void);
-
-void kgsl_dump_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch);
-
-void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch);
-
-void kgsl_cmdbatch_destroy_object(struct kref *kref);
-
-static inline bool kgsl_cmdbatch_events_pending(struct kgsl_cmdbatch *cmdbatch)
-{
- return !bitmap_empty(&cmdbatch->pending, KGSL_MAX_SYNCPOINTS);
-}
-
-static inline bool kgsl_cmdbatch_event_pending(struct kgsl_cmdbatch *cmdbatch,
- unsigned int bit)
-{
- if (bit >= KGSL_MAX_SYNCPOINTS)
- return false;
-
- return test_bit(bit, &cmdbatch->pending);
-}
-
-#endif /* __KGSL_CMDBATCH_H */
diff --git a/drivers/gpu/msm/kgsl_compat.h b/drivers/gpu/msm/kgsl_compat.h
index ca1685e5fcf5..7681d74fb108 100644
--- a/drivers/gpu/msm/kgsl_compat.h
+++ b/drivers/gpu/msm/kgsl_compat.h
@@ -236,8 +236,8 @@ static inline compat_size_t sizet_to_compat(size_t size)
return (compat_size_t)size;
}
-int kgsl_cmdbatch_create_compat(struct kgsl_device *device, unsigned int flags,
- struct kgsl_cmdbatch *cmdbatch, void __user *cmdlist,
+int kgsl_drawobj_create_compat(struct kgsl_device *device, unsigned int flags,
+ struct kgsl_drawobj *drawobj, void __user *cmdlist,
unsigned int numcmds, void __user *synclist,
unsigned int numsyncs);
@@ -245,8 +245,8 @@ long kgsl_compat_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg);
#else
-static inline int kgsl_cmdbatch_create_compat(struct kgsl_device *device,
- unsigned int flags, struct kgsl_cmdbatch *cmdbatch,
+static inline int kgsl_drawobj_create_compat(struct kgsl_device *device,
+ unsigned int flags, struct kgsl_drawobj *drawobj,
void __user *cmdlist, unsigned int numcmds,
void __user *synclist, unsigned int numsyncs)
{
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 0df6dd8628a5..ef07a8594870 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -25,7 +25,7 @@
#include "kgsl_pwrscale.h"
#include "kgsl_snapshot.h"
#include "kgsl_sharedmem.h"
-#include "kgsl_cmdbatch.h"
+#include "kgsl_drawobj.h"
#define KGSL_IOCTL_FUNC(_cmd, _func) \
[_IOC_NR((_cmd))] = \
@@ -128,7 +128,7 @@ struct kgsl_functable {
int (*readtimestamp) (struct kgsl_device *device, void *priv,
enum kgsl_timestamp_type type, unsigned int *timestamp);
int (*issueibcmds) (struct kgsl_device_private *dev_priv,
- struct kgsl_context *context, struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj,
uint32_t *timestamps);
void (*power_stats)(struct kgsl_device *device,
struct kgsl_power_stats *stats);
@@ -184,7 +184,7 @@ long kgsl_ioctl_helper(struct file *filep, unsigned int cmd, unsigned long arg,
/**
* struct kgsl_memobj_node - Memory object descriptor
- * @node: Local list node for the cmdbatch
+ * @node: Local list node for the drawobj
* @id: GPU memory ID for the object
* offset: Offset within the object
* @gpuaddr: GPU address for the object
@@ -233,7 +233,7 @@ struct kgsl_device {
struct kgsl_mmu mmu;
struct completion hwaccess_gate;
- struct completion cmdbatch_gate;
+ struct completion halt_gate;
const struct kgsl_functable *ftbl;
struct work_struct idle_check_ws;
struct timer_list idle_timer;
@@ -288,7 +288,7 @@ struct kgsl_device {
#define KGSL_DEVICE_COMMON_INIT(_dev) \
.hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
- .cmdbatch_gate = COMPLETION_INITIALIZER((_dev).cmdbatch_gate),\
+ .halt_gate = COMPLETION_INITIALIZER((_dev).halt_gate),\
.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
kgsl_idle_check),\
.context_idr = IDR_INIT((_dev).context_idr),\
diff --git a/drivers/gpu/msm/kgsl_cmdbatch.c b/drivers/gpu/msm/kgsl_drawobj.c
index 6272410ce544..e19859945fa8 100644
--- a/drivers/gpu/msm/kgsl_cmdbatch.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,17 +11,17 @@
*/
/*
- * KGSL command batch management
- * A command batch is a single submission from userland. The cmdbatch
+ * KGSL drawobj management
+ * A drawobj is a single submission from userland. The drawobj
* encapsulates everything about the submission : command buffers, flags and
* sync points.
*
* Sync points are events that need to expire before the
- * cmdbatch can be queued to the hardware. All synpoints are contained in an
- * array of kgsl_cmdbatch_sync_event structs in the command batch. There can be
+ * drawobj can be queued to the hardware. All synpoints are contained in an
+ * array of kgsl_drawobj_sync_event structs in the drawobj. There can be
* multiple types of events both internal ones (GPU events) and external
* triggers. As the events expire bits are cleared in a pending bitmap stored
- * in the command batch. The GPU will submit the command as soon as the bitmap
+ * in the drawobj. The GPU will submit the command as soon as the bitmap
* goes to zero indicating no more pending events.
*/
@@ -31,7 +31,7 @@
#include "kgsl.h"
#include "kgsl_device.h"
-#include "kgsl_cmdbatch.h"
+#include "kgsl_drawobj.h"
#include "kgsl_sync.h"
#include "kgsl_trace.h"
#include "kgsl_compat.h"
@@ -43,25 +43,25 @@
static struct kmem_cache *memobjs_cache;
/**
- * kgsl_cmdbatch_put() - Decrement the refcount for a command batch object
- * @cmdbatch: Pointer to the command batch object
+ * kgsl_drawobj_put() - Decrement the refcount for a drawobj object
+ * @drawobj: Pointer to the drawobj object
*/
-static inline void kgsl_cmdbatch_put(struct kgsl_cmdbatch *cmdbatch)
+static inline void kgsl_drawobj_put(struct kgsl_drawobj *drawobj)
{
- if (cmdbatch)
- kref_put(&cmdbatch->refcount, kgsl_cmdbatch_destroy_object);
+ if (drawobj)
+ kref_put(&drawobj->refcount, kgsl_drawobj_destroy_object);
}
void kgsl_dump_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj *drawobj)
{
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
unsigned int i;
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- event = &cmdbatch->synclist[i];
+ for (i = 0; i < drawobj->numsyncs; i++) {
+ event = &drawobj->synclist[i];
- if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
+ if (!kgsl_drawobj_event_pending(drawobj, i))
continue;
switch (event->type) {
@@ -90,32 +90,32 @@ void kgsl_dump_syncpoints(struct kgsl_device *device,
}
}
-static void _kgsl_cmdbatch_timer(unsigned long data)
+static void _kgsl_drawobj_timer(unsigned long data)
{
struct kgsl_device *device;
- struct kgsl_cmdbatch *cmdbatch = (struct kgsl_cmdbatch *) data;
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj *drawobj = (struct kgsl_drawobj *) data;
+ struct kgsl_drawobj_sync_event *event;
unsigned int i;
- if (cmdbatch == NULL || cmdbatch->context == NULL)
+ if (drawobj == NULL || drawobj->context == NULL)
return;
- device = cmdbatch->context->device;
+ device = drawobj->context->device;
dev_err(device->dev,
"kgsl: possible gpu syncpoint deadlock for context %d timestamp %d\n",
- cmdbatch->context->id, cmdbatch->timestamp);
+ drawobj->context->id, drawobj->timestamp);
- set_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv);
- kgsl_context_dump(cmdbatch->context);
- clear_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv);
+ set_bit(DRAWOBJ_FLAG_FENCE_LOG, &drawobj->priv);
+ kgsl_context_dump(drawobj->context);
+ clear_bit(DRAWOBJ_FLAG_FENCE_LOG, &drawobj->priv);
dev_err(device->dev, " pending events:\n");
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- event = &cmdbatch->synclist[i];
+ for (i = 0; i < drawobj->numsyncs; i++) {
+ event = &drawobj->synclist[i];
- if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
+ if (!kgsl_drawobj_event_pending(drawobj, i))
continue;
switch (event->type) {
@@ -138,47 +138,47 @@ static void _kgsl_cmdbatch_timer(unsigned long data)
}
/**
- * kgsl_cmdbatch_destroy_object() - Destroy a cmdbatch object
+ * kgsl_drawobj_destroy_object() - Destroy a drawobj object
* @kref: Pointer to the kref structure for this object
*
- * Actually destroy a command batch object. Called from kgsl_cmdbatch_put
+ * Actually destroy a drawobj object. Called from kgsl_drawobj_put
*/
-void kgsl_cmdbatch_destroy_object(struct kref *kref)
+void kgsl_drawobj_destroy_object(struct kref *kref)
{
- struct kgsl_cmdbatch *cmdbatch = container_of(kref,
- struct kgsl_cmdbatch, refcount);
+ struct kgsl_drawobj *drawobj = container_of(kref,
+ struct kgsl_drawobj, refcount);
- kgsl_context_put(cmdbatch->context);
+ kgsl_context_put(drawobj->context);
- kfree(cmdbatch->synclist);
- kfree(cmdbatch);
+ kfree(drawobj->synclist);
+ kfree(drawobj);
}
-EXPORT_SYMBOL(kgsl_cmdbatch_destroy_object);
+EXPORT_SYMBOL(kgsl_drawobj_destroy_object);
/*
* a generic function to retire a pending sync event and (possibly)
* kick the dispatcher
*/
-static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device,
- struct kgsl_cmdbatch_sync_event *event)
+static void kgsl_drawobj_sync_expire(struct kgsl_device *device,
+ struct kgsl_drawobj_sync_event *event)
{
/*
* Clear the event from the pending mask - if it is already clear, then
* leave without doing anything useful
*/
- if (!test_and_clear_bit(event->id, &event->cmdbatch->pending))
+ if (!test_and_clear_bit(event->id, &event->drawobj->pending))
return;
/*
* If no more pending events, delete the timer and schedule the command
* for dispatch
*/
- if (!kgsl_cmdbatch_events_pending(event->cmdbatch)) {
- del_timer_sync(&event->cmdbatch->timer);
+ if (!kgsl_drawobj_events_pending(event->drawobj)) {
+ del_timer_sync(&event->drawobj->timer);
if (device->ftbl->drawctxt_sched)
device->ftbl->drawctxt_sched(device,
- event->cmdbatch->context);
+ event->drawobj->context);
}
}
@@ -186,17 +186,17 @@ static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device,
* This function is called by the GPU event when the sync event timestamp
* expires
*/
-static void kgsl_cmdbatch_sync_func(struct kgsl_device *device,
+static void kgsl_drawobj_sync_func(struct kgsl_device *device,
struct kgsl_event_group *group, void *priv, int result)
{
- struct kgsl_cmdbatch_sync_event *event = priv;
+ struct kgsl_drawobj_sync_event *event = priv;
- trace_syncpoint_timestamp_expire(event->cmdbatch,
+ trace_syncpoint_timestamp_expire(event->drawobj,
event->context, event->timestamp);
- kgsl_cmdbatch_sync_expire(device, event);
+ kgsl_drawobj_sync_expire(device, event);
kgsl_context_put(event->context);
- kgsl_cmdbatch_put(event->cmdbatch);
+ kgsl_drawobj_put(event->drawobj);
}
static inline void _free_memobj_list(struct list_head *list)
@@ -211,38 +211,38 @@ static inline void _free_memobj_list(struct list_head *list)
}
/**
- * kgsl_cmdbatch_destroy() - Destroy a cmdbatch structure
- * @cmdbatch: Pointer to the command batch object to destroy
+ * kgsl_drawobj_destroy() - Destroy a drawobj structure
+ * @drawobj: Pointer to the drawobj object to destroy
*
- * Start the process of destroying a command batch. Cancel any pending events
+ * Start the process of destroying a drawobj. Cancel any pending events
* and decrement the refcount. Asynchronous events can still signal after
- * kgsl_cmdbatch_destroy has returned.
+ * kgsl_drawobj_destroy has returned.
*/
-void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
+void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj)
{
unsigned int i;
unsigned long pending;
- if (IS_ERR_OR_NULL(cmdbatch))
+ if (IS_ERR_OR_NULL(drawobj))
return;
/* Zap the canary timer */
- del_timer_sync(&cmdbatch->timer);
+ del_timer_sync(&drawobj->timer);
/*
* Copy off the pending list and clear all pending events - this will
* render any subsequent asynchronous callback harmless
*/
- bitmap_copy(&pending, &cmdbatch->pending, KGSL_MAX_SYNCPOINTS);
- bitmap_zero(&cmdbatch->pending, KGSL_MAX_SYNCPOINTS);
+ bitmap_copy(&pending, &drawobj->pending, KGSL_MAX_SYNCPOINTS);
+ bitmap_zero(&drawobj->pending, KGSL_MAX_SYNCPOINTS);
/*
* Clear all pending events - this will render any subsequent async
* callbacks harmless
*/
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- struct kgsl_cmdbatch_sync_event *event = &cmdbatch->synclist[i];
+ for (i = 0; i < drawobj->numsyncs; i++) {
+ struct kgsl_drawobj_sync_event *event = &drawobj->synclist[i];
/* Don't do anything if the event has already expired */
if (!test_bit(i, &pending))
@@ -250,127 +250,127 @@ void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
switch (event->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
- kgsl_cancel_event(cmdbatch->device,
+ kgsl_cancel_event(drawobj->device,
&event->context->events, event->timestamp,
- kgsl_cmdbatch_sync_func, event);
+ kgsl_drawobj_sync_func, event);
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
if (kgsl_sync_fence_async_cancel(event->handle))
- kgsl_cmdbatch_put(cmdbatch);
+ kgsl_drawobj_put(drawobj);
break;
}
}
/*
* Release the the refcount on the mem entry associated with the
- * cmdbatch profiling buffer
+ * drawobj profiling buffer
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING)
- kgsl_mem_entry_put(cmdbatch->profiling_buf_entry);
+ if (drawobj->flags & KGSL_DRAWOBJ_PROFILING)
+ kgsl_mem_entry_put(drawobj->profiling_buf_entry);
/* Destroy the cmdlist we created */
- _free_memobj_list(&cmdbatch->cmdlist);
+ _free_memobj_list(&drawobj->cmdlist);
/* Destroy the memlist we created */
- _free_memobj_list(&cmdbatch->memlist);
+ _free_memobj_list(&drawobj->memlist);
/*
* If we cancelled an event, there's a good chance that the context is
* on a dispatcher queue, so schedule to get it removed.
*/
if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) &&
- cmdbatch->device->ftbl->drawctxt_sched)
- cmdbatch->device->ftbl->drawctxt_sched(cmdbatch->device,
- cmdbatch->context);
+ drawobj->device->ftbl->drawctxt_sched)
+ drawobj->device->ftbl->drawctxt_sched(drawobj->device,
+ drawobj->context);
- kgsl_cmdbatch_put(cmdbatch);
+ kgsl_drawobj_put(drawobj);
}
-EXPORT_SYMBOL(kgsl_cmdbatch_destroy);
+EXPORT_SYMBOL(kgsl_drawobj_destroy);
/*
* A callback that gets registered with kgsl_sync_fence_async_wait and is fired
* when a fence is expired
*/
-static void kgsl_cmdbatch_sync_fence_func(void *priv)
+static void kgsl_drawobj_sync_fence_func(void *priv)
{
- struct kgsl_cmdbatch_sync_event *event = priv;
+ struct kgsl_drawobj_sync_event *event = priv;
- trace_syncpoint_fence_expire(event->cmdbatch,
+ trace_syncpoint_fence_expire(event->drawobj,
event->handle ? event->handle->name : "unknown");
- kgsl_cmdbatch_sync_expire(event->device, event);
+ kgsl_drawobj_sync_expire(event->device, event);
- kgsl_cmdbatch_put(event->cmdbatch);
+ kgsl_drawobj_put(event->drawobj);
}
-/* kgsl_cmdbatch_add_sync_fence() - Add a new sync fence syncpoint
+/* kgsl_drawobj_add_sync_fence() - Add a new sync fence syncpoint
* @device: KGSL device
- * @cmdbatch: KGSL cmdbatch to add the sync point to
- * @priv: Private sructure passed by the user
+ * @drawobj: KGSL drawobj to add the sync point to
+ * @priv: Private structure passed by the user
*
- * Add a new fence sync syncpoint to the cmdbatch.
+ * Add a new fence sync syncpoint to the drawobj.
*/
-static int kgsl_cmdbatch_add_sync_fence(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void *priv)
+static int kgsl_drawobj_add_sync_fence(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj, void *priv)
{
struct kgsl_cmd_syncpoint_fence *sync = priv;
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
unsigned int id;
- kref_get(&cmdbatch->refcount);
+ kref_get(&drawobj->refcount);
- id = cmdbatch->numsyncs++;
+ id = drawobj->numsyncs++;
- event = &cmdbatch->synclist[id];
+ event = &drawobj->synclist[id];
event->id = id;
event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE;
- event->cmdbatch = cmdbatch;
+ event->drawobj = drawobj;
event->device = device;
event->context = NULL;
- set_bit(event->id, &cmdbatch->pending);
+ set_bit(event->id, &drawobj->pending);
event->handle = kgsl_sync_fence_async_wait(sync->fd,
- kgsl_cmdbatch_sync_fence_func, event);
+ kgsl_drawobj_sync_fence_func, event);
if (IS_ERR_OR_NULL(event->handle)) {
int ret = PTR_ERR(event->handle);
- clear_bit(event->id, &cmdbatch->pending);
+ clear_bit(event->id, &drawobj->pending);
event->handle = NULL;
- kgsl_cmdbatch_put(cmdbatch);
+ kgsl_drawobj_put(drawobj);
/*
* If ret == 0 the fence was already signaled - print a trace
* message so we can track that
*/
if (ret == 0)
- trace_syncpoint_fence_expire(cmdbatch, "signaled");
+ trace_syncpoint_fence_expire(drawobj, "signaled");
return ret;
}
- trace_syncpoint_fence(cmdbatch, event->handle->name);
+ trace_syncpoint_fence(drawobj, event->handle->name);
return 0;
}
-/* kgsl_cmdbatch_add_sync_timestamp() - Add a new sync point for a cmdbatch
+/* kgsl_drawobj_add_sync_timestamp() - Add a new sync point for a drawobj
* @device: KGSL device
- * @cmdbatch: KGSL cmdbatch to add the sync point to
- * @priv: Private sructure passed by the user
+ * @drawobj: KGSL drawobj to add the sync point to
+ * @priv: Private structure passed by the user
*
- * Add a new sync point timestamp event to the cmdbatch.
+ * Add a new sync point timestamp event to the drawobj.
*/
-static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void *priv)
+static int kgsl_drawobj_add_sync_timestamp(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj, void *priv)
{
struct kgsl_cmd_syncpoint_timestamp *sync = priv;
- struct kgsl_context *context = kgsl_context_get(cmdbatch->device,
+ struct kgsl_context *context = kgsl_context_get(drawobj->device,
sync->context_id);
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
int ret = -EINVAL;
unsigned int id;
@@ -384,8 +384,9 @@ static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
* create a sync point on a future timestamp.
*/
- if (context == cmdbatch->context) {
+ if (context == drawobj->context) {
unsigned int queued;
+
kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED,
&queued);
@@ -397,29 +398,29 @@ static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
}
}
- kref_get(&cmdbatch->refcount);
+ kref_get(&drawobj->refcount);
- id = cmdbatch->numsyncs++;
+ id = drawobj->numsyncs++;
- event = &cmdbatch->synclist[id];
+ event = &drawobj->synclist[id];
event->id = id;
event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP;
- event->cmdbatch = cmdbatch;
+ event->drawobj = drawobj;
event->context = context;
event->timestamp = sync->timestamp;
event->device = device;
- set_bit(event->id, &cmdbatch->pending);
+ set_bit(event->id, &drawobj->pending);
ret = kgsl_add_event(device, &context->events, sync->timestamp,
- kgsl_cmdbatch_sync_func, event);
+ kgsl_drawobj_sync_func, event);
if (ret) {
- clear_bit(event->id, &cmdbatch->pending);
- kgsl_cmdbatch_put(cmdbatch);
+ clear_bit(event->id, &drawobj->pending);
+ kgsl_drawobj_put(drawobj);
} else {
- trace_syncpoint_timestamp(cmdbatch, context, sync->timestamp);
+ trace_syncpoint_timestamp(drawobj, context, sync->timestamp);
}
done:
@@ -430,43 +431,43 @@ done:
}
/**
- * kgsl_cmdbatch_add_sync() - Add a sync point to a command batch
+ * kgsl_drawobj_add_sync() - Add a sync point to a drawobj
* @device: Pointer to the KGSL device struct for the GPU
- * @cmdbatch: Pointer to the cmdbatch
+ * @drawobj: Pointer to the drawobj
* @sync: Pointer to the user-specified struct defining the syncpoint
*
- * Create a new sync point in the cmdbatch based on the user specified
+ * Create a new sync point in the drawobj based on the user specified
* parameters
*/
-int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch,
+int kgsl_drawobj_add_sync(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj,
struct kgsl_cmd_syncpoint *sync)
{
void *priv;
int ret, psize;
- int (*func)(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch,
+ int (*func)(struct kgsl_device *device, struct kgsl_drawobj *drawobj,
void *priv);
switch (sync->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
psize = sizeof(struct kgsl_cmd_syncpoint_timestamp);
- func = kgsl_cmdbatch_add_sync_timestamp;
+ func = kgsl_drawobj_add_sync_timestamp;
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
psize = sizeof(struct kgsl_cmd_syncpoint_fence);
- func = kgsl_cmdbatch_add_sync_fence;
+ func = kgsl_drawobj_add_sync_fence;
break;
default:
KGSL_DRV_ERR(device,
"bad syncpoint type ctxt %d type 0x%x size %zu\n",
- cmdbatch->context->id, sync->type, sync->size);
+ drawobj->context->id, sync->type, sync->size);
return -EINVAL;
}
if (sync->size != psize) {
KGSL_DRV_ERR(device,
"bad syncpoint size ctxt %d type 0x%x size %zu\n",
- cmdbatch->context->id, sync->type, sync->size);
+ drawobj->context->id, sync->type, sync->size);
return -EINVAL;
}
@@ -479,30 +480,30 @@ int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
return -EFAULT;
}
- ret = func(device, cmdbatch, priv);
+ ret = func(device, drawobj, priv);
kfree(priv);
return ret;
}
static void add_profiling_buffer(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, uint64_t gpuaddr, uint64_t size,
+ struct kgsl_drawobj *drawobj, uint64_t gpuaddr, uint64_t size,
unsigned int id, uint64_t offset)
{
struct kgsl_mem_entry *entry;
- if (!(cmdbatch->flags & KGSL_CMDBATCH_PROFILING))
+ if (!(drawobj->flags & KGSL_DRAWOBJ_PROFILING))
return;
/* Only the first buffer entry counts - ignore the rest */
- if (cmdbatch->profiling_buf_entry != NULL)
+ if (drawobj->profiling_buf_entry != NULL)
return;
if (id != 0)
- entry = kgsl_sharedmem_find_id(cmdbatch->context->proc_priv,
+ entry = kgsl_sharedmem_find_id(drawobj->context->proc_priv,
id);
else
- entry = kgsl_sharedmem_find(cmdbatch->context->proc_priv,
+ entry = kgsl_sharedmem_find(drawobj->context->proc_priv,
gpuaddr);
if (entry != NULL) {
@@ -515,29 +516,29 @@ static void add_profiling_buffer(struct kgsl_device *device,
if (entry == NULL) {
KGSL_DRV_ERR(device,
"ignore bad profile buffer ctxt %d id %d offset %lld gpuaddr %llx size %lld\n",
- cmdbatch->context->id, id, offset, gpuaddr, size);
+ drawobj->context->id, id, offset, gpuaddr, size);
return;
}
- cmdbatch->profiling_buf_entry = entry;
+ drawobj->profiling_buf_entry = entry;
if (id != 0)
- cmdbatch->profiling_buffer_gpuaddr =
+ drawobj->profiling_buffer_gpuaddr =
entry->memdesc.gpuaddr + offset;
else
- cmdbatch->profiling_buffer_gpuaddr = gpuaddr;
+ drawobj->profiling_buffer_gpuaddr = gpuaddr;
}
/**
- * kgsl_cmdbatch_add_ibdesc() - Add a legacy ibdesc to a command batch
- * @cmdbatch: Pointer to the cmdbatch
+ * kgsl_drawobj_add_ibdesc() - Add a legacy ibdesc to a drawobj
+ * @drawobj: Pointer to the drawobj
* @ibdesc: Pointer to the user-specified struct defining the memory or IB
*
- * Create a new memory entry in the cmdbatch based on the user specified
+ * Create a new memory entry in the drawobj based on the user specified
* parameters
*/
-int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, struct kgsl_ibdesc *ibdesc)
+int kgsl_drawobj_add_ibdesc(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj, struct kgsl_ibdesc *ibdesc)
{
uint64_t gpuaddr = (uint64_t) ibdesc->gpuaddr;
uint64_t size = (uint64_t) ibdesc->sizedwords << 2;
@@ -546,16 +547,16 @@ int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
/* sanitize the ibdesc ctrl flags */
ibdesc->ctrl &= KGSL_IBDESC_MEMLIST | KGSL_IBDESC_PROFILING_BUFFER;
- if (cmdbatch->flags & KGSL_CMDBATCH_MEMLIST &&
+ if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
ibdesc->ctrl & KGSL_IBDESC_MEMLIST) {
if (ibdesc->ctrl & KGSL_IBDESC_PROFILING_BUFFER) {
- add_profiling_buffer(device, cmdbatch,
+ add_profiling_buffer(device, drawobj,
gpuaddr, size, 0, 0);
return 0;
}
}
- if (cmdbatch->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER))
+ if (drawobj->flags & (KGSL_DRAWOBJ_SYNC | KGSL_DRAWOBJ_MARKER))
return 0;
mem = kmem_cache_alloc(memobjs_cache, GFP_KERNEL);
@@ -569,74 +570,75 @@ int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
mem->offset = 0;
mem->flags = 0;
- if (cmdbatch->flags & KGSL_CMDBATCH_MEMLIST &&
+ if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
ibdesc->ctrl & KGSL_IBDESC_MEMLIST) {
/* add to the memlist */
- list_add_tail(&mem->node, &cmdbatch->memlist);
+ list_add_tail(&mem->node, &drawobj->memlist);
} else {
/* set the preamble flag if directed to */
- if (cmdbatch->context->flags & KGSL_CONTEXT_PREAMBLE &&
- list_empty(&cmdbatch->cmdlist))
+ if (drawobj->context->flags & KGSL_CONTEXT_PREAMBLE &&
+ list_empty(&drawobj->cmdlist))
mem->flags = KGSL_CMDLIST_CTXTSWITCH_PREAMBLE;
/* add to the cmd list */
- list_add_tail(&mem->node, &cmdbatch->cmdlist);
+ list_add_tail(&mem->node, &drawobj->cmdlist);
}
return 0;
}
/**
- * kgsl_cmdbatch_create() - Create a new cmdbatch structure
+ * kgsl_drawobj_create() - Create a new drawobj structure
* @device: Pointer to a KGSL device struct
* @context: Pointer to a KGSL context struct
- * @flags: Flags for the cmdbatch
+ * @flags: Flags for the drawobj
*
- * Allocate an new cmdbatch structure
+ * Allocate an new drawobj structure
*/
-struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device,
+struct kgsl_drawobj *kgsl_drawobj_create(struct kgsl_device *device,
struct kgsl_context *context, unsigned int flags)
{
- struct kgsl_cmdbatch *cmdbatch = kzalloc(sizeof(*cmdbatch), GFP_KERNEL);
- if (cmdbatch == NULL)
+ struct kgsl_drawobj *drawobj = kzalloc(sizeof(*drawobj), GFP_KERNEL);
+
+ if (drawobj == NULL)
return ERR_PTR(-ENOMEM);
/*
* Increase the reference count on the context so it doesn't disappear
- * during the lifetime of this command batch
+ * during the lifetime of this drawobj
*/
if (!_kgsl_context_get(context)) {
- kfree(cmdbatch);
+ kfree(drawobj);
return ERR_PTR(-ENOENT);
}
- kref_init(&cmdbatch->refcount);
- INIT_LIST_HEAD(&cmdbatch->cmdlist);
- INIT_LIST_HEAD(&cmdbatch->memlist);
-
- cmdbatch->device = device;
- cmdbatch->context = context;
- /* sanitize our flags for cmdbatches */
- cmdbatch->flags = flags & (KGSL_CMDBATCH_CTX_SWITCH
- | KGSL_CMDBATCH_MARKER
- | KGSL_CMDBATCH_END_OF_FRAME
- | KGSL_CMDBATCH_SYNC
- | KGSL_CMDBATCH_PWR_CONSTRAINT
- | KGSL_CMDBATCH_MEMLIST
- | KGSL_CMDBATCH_PROFILING
- | KGSL_CMDBATCH_PROFILING_KTIME);
+ kref_init(&drawobj->refcount);
+ INIT_LIST_HEAD(&drawobj->cmdlist);
+ INIT_LIST_HEAD(&drawobj->memlist);
+
+ drawobj->device = device;
+ drawobj->context = context;
+ /* sanitize our flags for drawobj's */
+ drawobj->flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH
+ | KGSL_DRAWOBJ_MARKER
+ | KGSL_DRAWOBJ_END_OF_FRAME
+ | KGSL_DRAWOBJ_SYNC
+ | KGSL_DRAWOBJ_PWR_CONSTRAINT
+ | KGSL_DRAWOBJ_MEMLIST
+ | KGSL_DRAWOBJ_PROFILING
+ | KGSL_DRAWOBJ_PROFILING_KTIME);
/* Add a timer to help debug sync deadlocks */
- setup_timer(&cmdbatch->timer, _kgsl_cmdbatch_timer,
- (unsigned long) cmdbatch);
+ setup_timer(&drawobj->timer, _kgsl_drawobj_timer,
+ (unsigned long) drawobj);
- return cmdbatch;
+ return drawobj;
}
#ifdef CONFIG_COMPAT
static int add_ibdesc_list_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj *drawobj, void __user *ptr, int count)
{
int i, ret = 0;
struct kgsl_ibdesc_compat ibdesc32;
@@ -654,7 +656,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device,
ibdesc.sizedwords = (size_t) ibdesc32.sizedwords;
ibdesc.ctrl = (unsigned int) ibdesc32.ctrl;
- ret = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc);
+ ret = kgsl_drawobj_add_ibdesc(device, drawobj, &ibdesc);
if (ret)
break;
@@ -665,7 +667,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device,
}
static int add_syncpoints_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj *drawobj, void __user *ptr, int count)
{
struct kgsl_cmd_syncpoint_compat sync32;
struct kgsl_cmd_syncpoint sync;
@@ -683,7 +685,7 @@ static int add_syncpoints_compat(struct kgsl_device *device,
sync.priv = compat_ptr(sync32.priv);
sync.size = (size_t) sync32.size;
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+ ret = kgsl_drawobj_add_sync(device, drawobj, &sync);
if (ret)
break;
@@ -694,26 +696,26 @@ static int add_syncpoints_compat(struct kgsl_device *device,
}
#else
static int add_ibdesc_list_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj *drawobj, void __user *ptr, int count)
{
return -EINVAL;
}
static int add_syncpoints_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj *drawobj, void __user *ptr, int count)
{
return -EINVAL;
}
#endif
-int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+int kgsl_drawobj_add_ibdesc_list(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj, void __user *ptr, int count)
{
struct kgsl_ibdesc ibdesc;
int i, ret;
if (is_compat_task())
- return add_ibdesc_list_compat(device, cmdbatch, ptr, count);
+ return add_ibdesc_list_compat(device, drawobj, ptr, count);
for (i = 0; i < count; i++) {
memset(&ibdesc, 0, sizeof(ibdesc));
@@ -721,7 +723,7 @@ int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
if (copy_from_user(&ibdesc, ptr, sizeof(ibdesc)))
return -EFAULT;
- ret = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc);
+ ret = kgsl_drawobj_add_ibdesc(device, drawobj, &ibdesc);
if (ret)
return ret;
@@ -731,8 +733,8 @@ int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
return 0;
}
-int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+int kgsl_drawobj_add_syncpoints(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj, void __user *ptr, int count)
{
struct kgsl_cmd_syncpoint sync;
int i, ret;
@@ -743,14 +745,14 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
if (count > KGSL_MAX_SYNCPOINTS)
return -EINVAL;
- cmdbatch->synclist = kcalloc(count,
- sizeof(struct kgsl_cmdbatch_sync_event), GFP_KERNEL);
+ drawobj->synclist = kcalloc(count,
+ sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
- if (cmdbatch->synclist == NULL)
+ if (drawobj->synclist == NULL)
return -ENOMEM;
if (is_compat_task())
- return add_syncpoints_compat(device, cmdbatch, ptr, count);
+ return add_syncpoints_compat(device, drawobj, ptr, count);
for (i = 0; i < count; i++) {
memset(&sync, 0, sizeof(sync));
@@ -758,7 +760,7 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
if (copy_from_user(&sync, ptr, sizeof(sync)))
return -EFAULT;
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+ ret = kgsl_drawobj_add_sync(device, drawobj, &sync);
if (ret)
return ret;
@@ -768,7 +770,7 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
return 0;
}
-static int kgsl_cmdbatch_add_object(struct list_head *head,
+static int kgsl_drawobj_add_object(struct list_head *head,
struct kgsl_command_object *obj)
{
struct kgsl_memobj_node *mem;
@@ -793,8 +795,8 @@ static int kgsl_cmdbatch_add_object(struct list_head *head,
KGSL_CMDLIST_CTXTSWITCH_PREAMBLE | \
KGSL_CMDLIST_IB_PREAMBLE)
-int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
+int kgsl_drawobj_add_cmdlist(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj, void __user *ptr,
unsigned int size, unsigned int count)
{
struct kgsl_command_object obj;
@@ -809,7 +811,7 @@ int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
return -EINVAL;
/* Ignore all if SYNC or MARKER is specified */
- if (cmdbatch->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER))
+ if (drawobj->flags & (KGSL_DRAWOBJ_SYNC | KGSL_DRAWOBJ_MARKER))
return 0;
for (i = 0; i < count; i++) {
@@ -823,12 +825,12 @@ int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
if (!(obj.flags & CMDLIST_FLAGS)) {
KGSL_DRV_ERR(device,
"invalid cmdobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n",
- cmdbatch->context->id, obj.flags, obj.id,
+ drawobj->context->id, obj.flags, obj.id,
obj.offset, obj.gpuaddr, obj.size);
return -EINVAL;
}
- ret = kgsl_cmdbatch_add_object(&cmdbatch->cmdlist, &obj);
+ ret = kgsl_drawobj_add_object(&drawobj->cmdlist, &obj);
if (ret)
return ret;
@@ -838,8 +840,8 @@ int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
return 0;
}
-int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
+int kgsl_drawobj_add_memlist(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj, void __user *ptr,
unsigned int size, unsigned int count)
{
struct kgsl_command_object obj;
@@ -863,16 +865,16 @@ int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
if (!(obj.flags & KGSL_OBJLIST_MEMOBJ)) {
KGSL_DRV_ERR(device,
"invalid memobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n",
- cmdbatch->context->id, obj.flags, obj.id,
+ drawobj->context->id, obj.flags, obj.id,
obj.offset, obj.gpuaddr, obj.size);
return -EINVAL;
}
if (obj.flags & KGSL_OBJLIST_PROFILE)
- add_profiling_buffer(device, cmdbatch, obj.gpuaddr,
+ add_profiling_buffer(device, drawobj, obj.gpuaddr,
obj.size, obj.id, obj.offset);
else {
- ret = kgsl_cmdbatch_add_object(&cmdbatch->memlist,
+ ret = kgsl_drawobj_add_object(&drawobj->memlist,
&obj);
if (ret)
return ret;
@@ -884,8 +886,8 @@ int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
return 0;
}
-int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
+int kgsl_drawobj_add_synclist(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj, void __user *ptr,
unsigned int size, unsigned int count)
{
struct kgsl_command_syncpoint syncpoint;
@@ -903,10 +905,10 @@ int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
if (count > KGSL_MAX_SYNCPOINTS)
return -EINVAL;
- cmdbatch->synclist = kcalloc(count,
- sizeof(struct kgsl_cmdbatch_sync_event), GFP_KERNEL);
+ drawobj->synclist = kcalloc(count,
+ sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
- if (cmdbatch->synclist == NULL)
+ if (drawobj->synclist == NULL)
return -ENOMEM;
for (i = 0; i < count; i++) {
@@ -920,7 +922,7 @@ int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
sync.priv = to_user_ptr(syncpoint.priv);
sync.size = syncpoint.size;
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+ ret = kgsl_drawobj_add_sync(device, drawobj, &sync);
if (ret)
return ret;
@@ -930,13 +932,13 @@ int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
return 0;
}
-void kgsl_cmdbatch_exit(void)
+void kgsl_drawobj_exit(void)
{
if (memobjs_cache != NULL)
kmem_cache_destroy(memobjs_cache);
}
-int kgsl_cmdbatch_init(void)
+int kgsl_drawobj_init(void)
{
memobjs_cache = KMEM_CACHE(kgsl_memobj_node, 0);
if (memobjs_cache == NULL) {
diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h
new file mode 100644
index 000000000000..48aa7af24029
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_drawobj.h
@@ -0,0 +1,168 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __KGSL_DRAWOBJ_H
+#define __KGSL_DRAWOBJ_H
+
+#define KGSL_DRAWOBJ_FLAGS \
+ { KGSL_DRAWOBJ_MARKER, "MARKER" }, \
+ { KGSL_DRAWOBJ_CTX_SWITCH, "CTX_SWITCH" }, \
+ { KGSL_DRAWOBJ_SYNC, "SYNC" }, \
+ { KGSL_DRAWOBJ_END_OF_FRAME, "EOF" }, \
+ { KGSL_DRAWOBJ_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \
+ { KGSL_DRAWOBJ_SUBMIT_IB_LIST, "IB_LIST" }
+
+/**
+ * struct kgsl_drawobj - KGSl command descriptor
+ * @device: KGSL GPU device that the command was created for
+ * @context: KGSL context that created the command
+ * @timestamp: Timestamp assigned to the command
+ * @flags: flags
+ * @priv: Internal flags
+ * @fault_policy: Internal policy describing how to handle this command in case
+ * of a fault
+ * @fault_recovery: recovery actions actually tried for this batch
+ * @refcount: kref structure to maintain the reference count
+ * @cmdlist: List of IBs to issue
+ * @memlist: List of all memory used in this drawobj
+ * @synclist: Array of context/timestamp tuples to wait for before issuing
+ * @numsyncs: Number of sync entries in the array
+ * @pending: Bitmask of sync events that are active
+ * @timer: a timer used to track possible sync timeouts for this drawobj
+ * @marker_timestamp: For markers, the timestamp of the last "real" command that
+ * was queued
+ * @profiling_buf_entry: Mem entry containing the profiling buffer
+ * @profiling_buffer_gpuaddr: GPU virt address of the profile buffer added here
+ * for easy access
+ * @profile_index: Index to store the start/stop ticks in the kernel profiling
+ * buffer
+ * @submit_ticks: Variable to hold ticks at the time of drawobj submit.
+ * @global_ts: The ringbuffer timestamp corresponding to this drawobj
+ * @timeout_jiffies: For a syncpoint drawobj the jiffies at which the
+ * timer will expire
+ * This structure defines an atomic batch of command buffers issued from
+ * userspace.
+ */
+struct kgsl_drawobj {
+ struct kgsl_device *device;
+ struct kgsl_context *context;
+ uint32_t timestamp;
+ uint32_t flags;
+ unsigned long priv;
+ unsigned long fault_policy;
+ unsigned long fault_recovery;
+ struct kref refcount;
+ struct list_head cmdlist;
+ struct list_head memlist;
+ struct kgsl_drawobj_sync_event *synclist;
+ unsigned int numsyncs;
+ unsigned long pending;
+ struct timer_list timer;
+ unsigned int marker_timestamp;
+ struct kgsl_mem_entry *profiling_buf_entry;
+ uint64_t profiling_buffer_gpuaddr;
+ unsigned int profile_index;
+ uint64_t submit_ticks;
+ unsigned int global_ts;
+ unsigned long timeout_jiffies;
+};
+
+/**
+ * struct kgsl_drawobj_sync_event
+ * @id: identifer (positiion within the pending bitmap)
+ * @type: Syncpoint type
+ * @drawobj: Pointer to the drawobj that owns the sync event
+ * @context: Pointer to the KGSL context that owns the drawobj
+ * @timestamp: Pending timestamp for the event
+ * @handle: Pointer to a sync fence handle
+ * @device: Pointer to the KGSL device
+ */
+struct kgsl_drawobj_sync_event {
+ unsigned int id;
+ int type;
+ struct kgsl_drawobj *drawobj;
+ struct kgsl_context *context;
+ unsigned int timestamp;
+ struct kgsl_sync_fence_waiter *handle;
+ struct kgsl_device *device;
+};
+
+/**
+ * enum kgsl_drawobj_priv - Internal drawobj flags
+ * @DRAWOBJ_FLAG_SKIP - skip the entire drawobj
+ * @DRAWOBJ_FLAG_FORCE_PREAMBLE - Force the preamble on for the drawobj
+ * @DRAWOBJ_FLAG_WFI - Force wait-for-idle for the submission
+ * @DRAWOBJ_FLAG_PROFILE - store the start / retire ticks for the drawobj
+ * in the profiling buffer
+ * @DRAWOBJ_FLAG_FENCE_LOG - Set if the drawobj is dumping fence logs via the
+ * drawobj timer - this is used to avoid recursion
+ */
+
+enum kgsl_drawobj_priv {
+ DRAWOBJ_FLAG_SKIP = 0,
+ DRAWOBJ_FLAG_FORCE_PREAMBLE,
+ DRAWOBJ_FLAG_WFI,
+ DRAWOBJ_FLAG_PROFILE,
+ DRAWOBJ_FLAG_FENCE_LOG,
+};
+
+
+int kgsl_drawobj_add_memobj(struct kgsl_drawobj *drawobj,
+ struct kgsl_ibdesc *ibdesc);
+
+int kgsl_drawobj_add_sync(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj,
+ struct kgsl_cmd_syncpoint *sync);
+
+struct kgsl_drawobj *kgsl_drawobj_create(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int flags);
+int kgsl_drawobj_add_ibdesc(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj, struct kgsl_ibdesc *ibdesc);
+int kgsl_drawobj_add_ibdesc_list(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj, void __user *ptr, int count);
+int kgsl_drawobj_add_syncpoints(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj, void __user *ptr, int count);
+int kgsl_drawobj_add_cmdlist(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj, void __user *ptr,
+ unsigned int size, unsigned int count);
+int kgsl_drawobj_add_memlist(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj, void __user *ptr,
+ unsigned int size, unsigned int count);
+int kgsl_drawobj_add_synclist(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj, void __user *ptr,
+ unsigned int size, unsigned int count);
+
+int kgsl_drawobj_init(void);
+void kgsl_drawobj_exit(void);
+
+void kgsl_dump_syncpoints(struct kgsl_device *device,
+ struct kgsl_drawobj *drawobj);
+
+void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj);
+
+void kgsl_drawobj_destroy_object(struct kref *kref);
+
+static inline bool kgsl_drawobj_events_pending(struct kgsl_drawobj *drawobj)
+{
+ return !bitmap_empty(&drawobj->pending, KGSL_MAX_SYNCPOINTS);
+}
+
+static inline bool kgsl_drawobj_event_pending(struct kgsl_drawobj *drawobj,
+ unsigned int bit)
+{
+ if (bit >= KGSL_MAX_SYNCPOINTS)
+ return false;
+
+ return test_bit(bit, &drawobj->pending);
+}
+
+#endif /* __KGSL_DRAWOBJ_H */
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 4ef9f80177d6..2dd462454565 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -36,14 +36,14 @@ TRACE_EVENT(kgsl_issueibcmds,
TP_PROTO(struct kgsl_device *device,
int drawctxt_id,
- struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj *drawobj,
unsigned int numibs,
int timestamp,
int flags,
int result,
unsigned int type),
- TP_ARGS(device, drawctxt_id, cmdbatch, numibs, timestamp,
+ TP_ARGS(device, drawctxt_id, drawobj, numibs, timestamp,
flags, result, type),
TP_STRUCT__entry(
@@ -74,7 +74,7 @@ TRACE_EVENT(kgsl_issueibcmds,
__entry->numibs,
__entry->timestamp,
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "None",
+ KGSL_DRAWOBJ_FLAGS) : "None",
__entry->result,
__print_symbolic(__entry->drawctxt_type, KGSL_CONTEXT_TYPES)
)
@@ -1028,59 +1028,59 @@ TRACE_EVENT(kgsl_pagetable_destroy,
);
DECLARE_EVENT_CLASS(syncpoint_timestamp_template,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context,
+ TP_PROTO(struct kgsl_drawobj *drawobj, struct kgsl_context *context,
unsigned int timestamp),
- TP_ARGS(cmdbatch, context, timestamp),
+ TP_ARGS(drawobj, context, timestamp),
TP_STRUCT__entry(
- __field(unsigned int, cmdbatch_context_id)
+ __field(unsigned int, drawobj_context_id)
__field(unsigned int, context_id)
__field(unsigned int, timestamp)
),
TP_fast_assign(
- __entry->cmdbatch_context_id = cmdbatch->context->id;
+ __entry->drawobj_context_id = drawobj->context->id;
__entry->context_id = context->id;
__entry->timestamp = timestamp;
),
TP_printk("ctx=%d sync ctx=%d ts=%d",
- __entry->cmdbatch_context_id, __entry->context_id,
+ __entry->drawobj_context_id, __entry->context_id,
__entry->timestamp)
);
DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context,
+ TP_PROTO(struct kgsl_drawobj *drawobj, struct kgsl_context *context,
unsigned int timestamp),
- TP_ARGS(cmdbatch, context, timestamp)
+ TP_ARGS(drawobj, context, timestamp)
);
DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp_expire,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context,
+ TP_PROTO(struct kgsl_drawobj *drawobj, struct kgsl_context *context,
unsigned int timestamp),
- TP_ARGS(cmdbatch, context, timestamp)
+ TP_ARGS(drawobj, context, timestamp)
);
DECLARE_EVENT_CLASS(syncpoint_fence_template,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name),
- TP_ARGS(cmdbatch, name),
+ TP_PROTO(struct kgsl_drawobj *drawobj, char *name),
+ TP_ARGS(drawobj, name),
TP_STRUCT__entry(
__string(fence_name, name)
- __field(unsigned int, cmdbatch_context_id)
+ __field(unsigned int, drawobj_context_id)
),
TP_fast_assign(
- __entry->cmdbatch_context_id = cmdbatch->context->id;
+ __entry->drawobj_context_id = drawobj->context->id;
__assign_str(fence_name, name);
),
TP_printk("ctx=%d fence=%s",
- __entry->cmdbatch_context_id, __get_str(fence_name))
+ __entry->drawobj_context_id, __get_str(fence_name))
);
DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name),
- TP_ARGS(cmdbatch, name)
+ TP_PROTO(struct kgsl_drawobj *drawobj, char *name),
+ TP_ARGS(drawobj, name)
);
DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence_expire,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name),
- TP_ARGS(cmdbatch, name)
+ TP_PROTO(struct kgsl_drawobj *drawobj, char *name),
+ TP_ARGS(drawobj, name)
);
TRACE_EVENT(kgsl_msg,