summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorSahitya Tummala <stummala@codeaurora.org>2017-06-05 16:10:31 +0530
committerSahitya Tummala <stummala@codeaurora.org>2017-06-12 13:11:12 +0530
commit33b30b124fde308d6192a55df6f176f1c36a79ce (patch)
treebdd4e8bdfda235d6e4c9ce20f14eadf2fe215083 /fs
parentc1a2472056c800ff46e0ac21a4b67c179a570ad0 (diff)
fs/mbcache: fix mb_cache_lru_list corruption
With the recent 'commit d07d314e7d1d ("fs/mbcache: fix use after free issue in mb_cache_shrink_scan()")', the ce entry is deleted from mbcache list after ce->e_refcnt incremented under global spinlock mb_cache_spinlock. If __mb_cache_entry_release(), is waiting for mb_cache_spinlock at the same time, to add the same ce to mb_cache_lru_list and if it gets the lock after mb_cache_entry_get() deleted it, then it corrupts the list, as that element will be freed immediately after mb_cache_entry_get(). When this list is accessed next time for deleting/adding another ce, we see list corruption issue. Fix this by synchronizing these two contexts with mb_cache_spinlock and evaluating the conditions(ce->e_refcnt) in __mb_cache_entry_release() under the global lock before adding ce to mb_cache_lru_list. Change-Id: I3e20fb4fa163755126e30be7aeca747d74215ed2 Signed-off-by: Sahitya Tummala <stummala@codeaurora.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/mbcache.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/fs/mbcache.c b/fs/mbcache.c
index ab1da987d1ae..de509271d031 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -222,8 +222,19 @@ __mb_cache_entry_release(struct mb_cache_entry *ce)
* then reacquire the lock in the proper order.
*/
spin_lock(&mb_cache_spinlock);
- if (list_empty(&ce->e_lru_list))
- list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
+ /*
+ * Evaluate the conditions under global lock mb_cache_spinlock,
+ * to check if mb_cache_entry_get() is running now
+ * and has already deleted the entry from mb_cache_lru_list
+ * and incremented ce->e_refcnt to prevent further additions
+ * to mb_cache_lru_list.
+ */
+ if (!(ce->e_used || ce->e_queued ||
+ atomic_read(&ce->e_refcnt))) {
+ if (list_empty(&ce->e_lru_list))
+ list_add_tail(&ce->e_lru_list,
+ &mb_cache_lru_list);
+ }
spin_unlock(&mb_cache_spinlock);
}
__spin_unlock_mb_cache_entry(ce);