summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorHareesh Gundu <hareeshg@codeaurora.org>2017-06-07 14:50:15 +0530
committerHareesh Gundu <hareeshg@codeaurora.org>2017-06-13 12:03:27 +0530
commit71c3b2e17c6fb4d1b040d3c8c75d76b78a050cce (patch)
tree50e555ec334bfa860d3a3a6f2798358e7ca29e87 /drivers/gpu
parent85baaeb2e2d0e7c67bf4e5cc22d15e173d01b209 (diff)
msm: kgsl: Defer issue commands to worker thread
Currently submit ioctl getting blocked till the commands gets added to ringbuffer incase inflight count is less than context burst count. If the submit command happens in GPU slumber state, it will add the GPU wakeup time to submit IOCTL. This will add latency in preparing next frame in CPU side. Defer commands submission to dispatcher worker, if the GPU is in slumber state. CRs-Fixed: 2055107 Change-Id: I099ba721e02bbcd8ccadb1bc518c7c1ef4fb7e21 Signed-off-by: Hareesh Gundu <hareeshg@codeaurora.org>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c25
-rw-r--r--drivers/gpu/msm/kgsl.c1
-rw-r--r--drivers/gpu/msm/kgsl_device.h5
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c24
4 files changed, 52 insertions, 3 deletions
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 55f906c9cb90..b4d0656d062b 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -979,6 +979,13 @@ static void _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
spin_unlock(&dispatcher->plist_lock);
}
+static inline void _decrement_submit_now(struct kgsl_device *device)
+{
+ spin_lock(&device->submit_lock);
+ device->submit_now--;
+ spin_unlock(&device->submit_lock);
+}
+
/**
* adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
* @adreno_dev: Pointer to the adreno device struct
@@ -988,15 +995,29 @@ static void _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
{
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ spin_lock(&device->submit_lock);
+ /* If state transition to SLUMBER, schedule the work for later */
+ if (device->slumber == true) {
+ spin_unlock(&device->submit_lock);
+ goto done;
+ }
+ device->submit_now++;
+ spin_unlock(&device->submit_lock);
/* If the dispatcher is busy then schedule the work for later */
if (!mutex_trylock(&dispatcher->mutex)) {
- adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
- return;
+ _decrement_submit_now(device);
+ goto done;
}
_adreno_dispatcher_issuecmds(adreno_dev);
mutex_unlock(&dispatcher->mutex);
+ _decrement_submit_now(device);
+ return;
+done:
+ adreno_dispatcher_schedule(device);
}
/**
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index b2def8dea954..7584811f388a 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -4719,6 +4719,7 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
device->id, device->reg_phys, device->reg_len);
rwlock_init(&device->context_lock);
+ spin_lock_init(&device->submit_lock);
setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index d93fd9bfbcd0..64dd45a30612 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -256,6 +256,11 @@ struct kgsl_device {
struct kgsl_pwrctrl pwrctrl;
int open_count;
+ /* For GPU inline submission */
+ uint32_t submit_now;
+ spinlock_t submit_lock;
+ bool slumber;
+
struct mutex mutex;
uint32_t state;
uint32_t requested_state;
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 0150d50c925b..ead436c68cbb 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -2347,9 +2347,24 @@ void kgsl_idle_check(struct work_struct *work)
|| device->state == KGSL_STATE_NAP) {
if (!atomic_read(&device->active_cnt)) {
+ spin_lock(&device->submit_lock);
+ if (device->submit_now) {
+ spin_unlock(&device->submit_lock);
+ goto done;
+ }
+ /* Don't allow GPU inline submission in SLUMBER */
+ if (requested_state == KGSL_STATE_SLUMBER)
+ device->slumber = true;
+ spin_unlock(&device->submit_lock);
+
ret = kgsl_pwrctrl_change_state(device,
device->requested_state);
if (ret == -EBUSY) {
+ if (requested_state == KGSL_STATE_SLUMBER) {
+ spin_lock(&device->submit_lock);
+ device->slumber = false;
+ spin_unlock(&device->submit_lock);
+ }
/*
* If the GPU is currently busy, restore
* the requested state and reschedule
@@ -2360,7 +2375,7 @@ void kgsl_idle_check(struct work_struct *work)
kgsl_schedule_work(&device->idle_check_ws);
}
}
-
+done:
if (!ret)
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
@@ -2789,6 +2804,13 @@ static void kgsl_pwrctrl_set_state(struct kgsl_device *device,
trace_kgsl_pwr_set_state(device, state);
device->state = state;
device->requested_state = KGSL_STATE_NONE;
+
+ spin_lock(&device->submit_lock);
+ if (state == KGSL_STATE_SLUMBER || state == KGSL_STATE_SUSPEND)
+ device->slumber = true;
+ else
+ device->slumber = false;
+ spin_unlock(&device->submit_lock);
}
static void kgsl_pwrctrl_request_state(struct kgsl_device *device,