diff options
author | Michael Bestas <mkbestas@lineageos.org> | 2020-10-23 18:21:25 +0300 |
---|---|---|
committer | Michael Bestas <mkbestas@lineageos.org> | 2020-10-23 18:21:25 +0300 |
commit | 794b42a9a5fd60bd14413abedafdd2a9b07b1308 (patch) | |
tree | 2c00d2a954de42f9dc54b3a7894c1ad99a2e8a8b /fs/eventpoll.c | |
parent | 24b3bdcf71522f4d711958b01e8a8f234fe2450d (diff) | |
parent | 7a9986e91f90994623e4c5de1effce14729dba96 (diff) |
Merge branch 'android-4.4-p' of https://android.googlesource.com/kernel/common into lineage-17.1-caf-msm8998
This brings LA.UM.8.4.r1-06000-8x98.0 up to date with
https://android.googlesource.com/kernel/common/ android-4.4-p at commit:
7a9986e91f909 UPSTREAM: binder: fix UAF when releasing todo list
Conflicts:
fs/eventpoll.c
Change-Id: I77260d03cb539d7e7eefcea360aee2d59bb9e0cb
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r-- | fs/eventpoll.c | 73 |
1 files changed, 31 insertions, 42 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index cbb37284a5d6..b484d4500687 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -218,8 +218,7 @@ struct eventpoll { struct file *file; /* used to optimize loop detection check */ - int visited; - struct list_head visited_list_link; + u64 gen; }; /* Wait structure used by the poll hooks */ @@ -263,6 +262,8 @@ static long max_user_watches __read_mostly; */ static DEFINE_MUTEX(epmutex); +static u64 loop_check_gen = 0; + /* Used to check for epoll file descriptor inclusion loops */ static struct nested_calls poll_loop_ncalls; @@ -278,9 +279,6 @@ static struct kmem_cache *epi_cache __read_mostly; /* Slab cache used to allocate "struct eppoll_entry" */ static struct kmem_cache *pwq_cache __read_mostly; -/* Visited nodes during ep_loop_check(), so we can unset them when we finish */ -static LIST_HEAD(visited_list); - /* * List of files with newly added links, where we may need to limit the number * of emanating paths. Protected by the epmutex. @@ -1235,7 +1233,7 @@ static int reverse_path_check(void) static int ep_create_wakeup_source(struct epitem *epi) { - const char *name; + struct name_snapshot n; struct wakeup_source *ws; char task_comm_buf[TASK_COMM_LEN]; char buf[64]; @@ -1250,10 +1248,11 @@ static int ep_create_wakeup_source(struct epitem *epi) return -ENOMEM; } - name = epi->ffd.file->f_path.dentry->d_name.name; + take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry); snprintf(buf, sizeof(buf), "epoll_%.*s_file:%s", - (int)sizeof(task_comm_buf), task_comm_buf, name); - ws = wakeup_source_register(buf); + (int)sizeof(task_comm_buf), task_comm_buf, n.name); + ws = wakeup_source_register(n.name); + release_dentry_name_snapshot(&n); if (!ws) return -ENOMEM; @@ -1313,6 +1312,22 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, RCU_INIT_POINTER(epi->ws, NULL); } + /* Add the current item to the list of active epoll hook for this file */ + spin_lock(&tfile->f_lock); + list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); + spin_unlock(&tfile->f_lock); + + /* + * Add the current item to the RB tree. All RB tree operations are + * protected by "mtx", and ep_insert() is called with "mtx" held. + */ + ep_rbtree_insert(ep, epi); + + /* now check if we've created too many backpaths */ + error = -EINVAL; + if (full_check && reverse_path_check()) + goto error_remove_epi; + /* Initialize the poll table using the queue callback */ epq.epi = epi; init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); @@ -1335,22 +1350,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, if (epi->nwait < 0) goto error_unregister; - /* Add the current item to the list of active epoll hook for this file */ - spin_lock(&tfile->f_lock); - list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); - spin_unlock(&tfile->f_lock); - - /* - * Add the current item to the RB tree. All RB tree operations are - * protected by "mtx", and ep_insert() is called with "mtx" held. - */ - ep_rbtree_insert(ep, epi); - - /* now check if we've created too many backpaths */ - error = -EINVAL; - if (full_check && reverse_path_check()) - goto error_remove_epi; - /* We have to drop the new item inside our item list to keep track of it */ spin_lock_irqsave(&ep->lock, flags); @@ -1376,6 +1375,8 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, return 0; +error_unregister: + ep_unregister_pollwait(ep, epi); error_remove_epi: spin_lock(&tfile->f_lock); list_del_rcu(&epi->fllink); @@ -1383,9 +1384,6 @@ error_remove_epi: rb_erase(&epi->rbn, &ep->rbr); -error_unregister: - ep_unregister_pollwait(ep, epi); - /* * We need to do this because an event could have been arrived on some * allocated wait queue. Note that we don't care about the ep->ovflist @@ -1707,13 +1705,12 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) struct epitem *epi; mutex_lock_nested(&ep->mtx, call_nests + 1); - ep->visited = 1; - list_add(&ep->visited_list_link, &visited_list); + ep->gen = loop_check_gen; for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { epi = rb_entry(rbp, struct epitem, rbn); if (unlikely(is_file_epoll(epi->ffd.file))) { ep_tovisit = epi->ffd.file->private_data; - if (ep_tovisit->visited) + if (ep_tovisit->gen == loop_check_gen) continue; error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, ep_loop_check_proc, epi->ffd.file, @@ -1754,18 +1751,8 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) */ static int ep_loop_check(struct eventpoll *ep, struct file *file) { - int ret; - struct eventpoll *ep_cur, *ep_next; - - ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, ep_loop_check_proc, file, ep, current); - /* clear visited list */ - list_for_each_entry_safe(ep_cur, ep_next, &visited_list, - visited_list_link) { - ep_cur->visited = 0; - list_del(&ep_cur->visited_list_link); - } - return ret; } static void clear_tfile_check_list(void) @@ -1909,6 +1896,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, mutex_lock_nested(&ep->mtx, 0); if (op == EPOLL_CTL_ADD) { if (!list_empty(&f.file->f_ep_links) || + ep->gen == loop_check_gen || is_file_epoll(tf.file)) { full_check = 1; mutex_unlock(&ep->mtx); @@ -1967,6 +1955,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, error_tgt_fput: if (full_check) { clear_tfile_check_list(); + loop_check_gen++; mutex_unlock(&epmutex); } |