summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2014-12-04 21:03:25 +0000
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-01-21 14:57:03 +0100
commitcdd1cf799bd24ac0a4184549601ae302267301c5 (patch)
treebf666cc69457b61505a8c3528f8dd1a77de80979 /drivers
parent01934c2a691882185b3021d437df13bcba07711d (diff)
drm: Make drm_read() more robust against multithreaded races
The current implementation of drm_read() faces a number of issues: 1. Upon an error, it consumes the event which may lead to the client blocking. 2. Upon an error, it forgets about events already copied 3. If it fails to copy a single event with O_NONBLOCK it falls into a infinite loop of reporting EAGAIN. 3. There is a race between multiple waiters and blocking reads of the events list. Here, we inline drm_dequeue_event() into drm_read() so that we can take the spinlock around the list walking and event copying, and importantly reorder the error handling to avoid the issues above. Cc: Takashi Iwai <tiwai@suse.de> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Takashi Iwai <tiwai@suse.de> Testcase: igt/drm_read Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/drm_fops.c89
1 files changed, 42 insertions, 47 deletions
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 0b9514b6cd64..076dd606b580 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -478,64 +478,59 @@ int drm_release(struct inode *inode, struct file *filp)
}
EXPORT_SYMBOL(drm_release);
-static bool
-drm_dequeue_event(struct drm_file *file_priv,
- size_t total, size_t max, struct drm_pending_event **out)
+ssize_t drm_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset)
{
+ struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
- struct drm_pending_event *e;
- unsigned long flags;
- bool ret = false;
-
- spin_lock_irqsave(&dev->event_lock, flags);
+ ssize_t ret = 0;
- *out = NULL;
- if (list_empty(&file_priv->event_list))
- goto out;
- e = list_first_entry(&file_priv->event_list,
- struct drm_pending_event, link);
- if (e->event->length + total > max)
- goto out;
+ if (!access_ok(VERIFY_WRITE, buffer, count))
+ return -EFAULT;
- file_priv->event_space += e->event->length;
- list_del(&e->link);
- *out = e;
- ret = true;
+ spin_lock_irq(&dev->event_lock);
+ for (;;) {
+ if (list_empty(&file_priv->event_list)) {
+ if (ret)
+ break;
-out:
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return ret;
-}
-
-ssize_t drm_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *offset)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_pending_event *e;
- size_t total;
- ssize_t ret;
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
- if ((filp->f_flags & O_NONBLOCK) == 0) {
- ret = wait_event_interruptible(file_priv->event_wait,
- !list_empty(&file_priv->event_list));
- if (ret < 0)
- return ret;
- }
+ spin_unlock_irq(&dev->event_lock);
+ ret = wait_event_interruptible(file_priv->event_wait,
+ !list_empty(&file_priv->event_list));
+ spin_lock_irq(&dev->event_lock);
+ if (ret < 0)
+ break;
+
+ ret = 0;
+ } else {
+ struct drm_pending_event *e;
+
+ e = list_first_entry(&file_priv->event_list,
+ struct drm_pending_event, link);
+ if (e->event->length + ret > count)
+ break;
+
+ if (__copy_to_user_inatomic(buffer + ret,
+ e->event, e->event->length)) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
- total = 0;
- while (drm_dequeue_event(file_priv, total, count, &e)) {
- if (copy_to_user(buffer + total,
- e->event, e->event->length)) {
- total = -EFAULT;
+ file_priv->event_space += e->event->length;
+ ret += e->event->length;
+ list_del(&e->link);
e->destroy(e);
- break;
}
-
- total += e->event->length;
- e->destroy(e);
}
+ spin_unlock_irq(&dev->event_lock);
- return total ?: -EAGAIN;
+ return ret;
}
EXPORT_SYMBOL(drm_read);