summaryrefslogtreecommitdiff
path: root/fs/eventpoll.c
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2008-12-20 23:42:55 +0100
committerTakashi Iwai <tiwai@suse.de>2008-12-20 23:42:55 +0100
commit6a8436419d1b77b24d82bd90354adc4baa393566 (patch)
treed20e17b148f37dc299658a60303e6a2e4e6ce63f /fs/eventpoll.c
parent55fa518867978e1f5fd8353098f80d125ac734d7 (diff)
parentff75427a7f641c4468610fbda2ccb69218174cd1 (diff)
Merge branch 'topic/ca0106-capture-no-44khz' into topic/ca0106
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r--fs/eventpoll.c85
1 files changed, 77 insertions, 8 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index aec5c13f6341..96355d505347 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -102,6 +102,8 @@
#define EP_UNACTIVE_PTR ((void *) -1L)
+#define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
+
struct epoll_filefd {
struct file *file;
int fd;
@@ -200,6 +202,9 @@ struct eventpoll {
* holding ->lock.
*/
struct epitem *ovflist;
+
+ /* The user that created the eventpoll descriptor */
+ struct user_struct *user;
};
/* Wait structure used by the poll hooks */
@@ -227,9 +232,17 @@ struct ep_pqueue {
};
/*
+ * Configuration options available inside /proc/sys/fs/epoll/
+ */
+/* Maximum number of epoll devices, per user */
+static int max_user_instances __read_mostly;
+/* Maximum number of epoll watched descriptors, per user */
+static int max_user_watches __read_mostly;
+
+/*
* This mutex is used to serialize ep_free() and eventpoll_release_file().
*/
-static struct mutex epmutex;
+static DEFINE_MUTEX(epmutex);
/* Safe wake up implementation */
static struct poll_safewake psw;
@@ -240,6 +253,33 @@ static struct kmem_cache *epi_cache __read_mostly;
/* Slab cache used to allocate "struct eppoll_entry" */
static struct kmem_cache *pwq_cache __read_mostly;
+#ifdef CONFIG_SYSCTL
+
+#include <linux/sysctl.h>
+
+static int zero;
+
+ctl_table epoll_table[] = {
+ {
+ .procname = "max_user_instances",
+ .data = &max_user_instances,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = &zero,
+ },
+ {
+ .procname = "max_user_watches",
+ .data = &max_user_watches,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = &zero,
+ },
+ { .ctl_name = 0 }
+};
+#endif /* CONFIG_SYSCTL */
+
/* Setup the structure that is used as key for the RB tree */
static inline void ep_set_ffd(struct epoll_filefd *ffd,
@@ -402,6 +442,8 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
/* At this point it is safe to free the eventpoll item */
kmem_cache_free(epi_cache, epi);
+ atomic_dec(&ep->user->epoll_watches);
+
DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p)\n",
current, ep, file));
@@ -449,6 +491,8 @@ static void ep_free(struct eventpoll *ep)
mutex_unlock(&epmutex);
mutex_destroy(&ep->mtx);
+ atomic_dec(&ep->user->epoll_devs);
+ free_uid(ep->user);
kfree(ep);
}
@@ -532,10 +576,19 @@ void eventpoll_release_file(struct file *file)
static int ep_alloc(struct eventpoll **pep)
{
- struct eventpoll *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+ int error;
+ struct user_struct *user;
+ struct eventpoll *ep;
- if (!ep)
- return -ENOMEM;
+ user = get_current_user();
+ error = -EMFILE;
+ if (unlikely(atomic_read(&user->epoll_devs) >=
+ max_user_instances))
+ goto free_uid;
+ error = -ENOMEM;
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+ if (unlikely(!ep))
+ goto free_uid;
spin_lock_init(&ep->lock);
mutex_init(&ep->mtx);
@@ -544,12 +597,17 @@ static int ep_alloc(struct eventpoll **pep)
INIT_LIST_HEAD(&ep->rdllist);
ep->rbr = RB_ROOT;
ep->ovflist = EP_UNACTIVE_PTR;
+ ep->user = user;
*pep = ep;
DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_alloc() ep=%p\n",
current, ep));
return 0;
+
+free_uid:
+ free_uid(user);
+ return error;
}
/*
@@ -703,9 +761,11 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
struct epitem *epi;
struct ep_pqueue epq;
- error = -ENOMEM;
+ if (unlikely(atomic_read(&ep->user->epoll_watches) >=
+ max_user_watches))
+ return -ENOSPC;
if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
- goto error_return;
+ return -ENOMEM;
/* Item initialization follow here ... */
INIT_LIST_HEAD(&epi->rdllink);
@@ -735,6 +795,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
* install process. Namely an allocation for a wait queue failed due
* high memory pressure.
*/
+ error = -ENOMEM;
if (epi->nwait < 0)
goto error_unregister;
@@ -765,6 +826,8 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
spin_unlock_irqrestore(&ep->lock, flags);
+ atomic_inc(&ep->user->epoll_watches);
+
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(&psw, &ep->poll_wait);
@@ -789,7 +852,7 @@ error_unregister:
spin_unlock_irqrestore(&ep->lock, flags);
kmem_cache_free(epi_cache, epi);
-error_return:
+
return error;
}
@@ -1078,6 +1141,7 @@ asmlinkage long sys_epoll_create1(int flags)
flags & O_CLOEXEC);
if (fd < 0)
ep_free(ep);
+ atomic_inc(&ep->user->epoll_devs);
error_return:
DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
@@ -1299,7 +1363,12 @@ asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
static int __init eventpoll_init(void)
{
- mutex_init(&epmutex);
+ struct sysinfo si;
+
+ si_meminfo(&si);
+ max_user_instances = 128;
+ max_user_watches = (((si.totalram - si.totalhigh) / 32) << PAGE_SHIFT) /
+ EP_ITEM_COST;
/* Initialize the structure used to perform safe poll wait head wake ups */
ep_poll_safewake_init(&psw);