summaryrefslogtreecommitdiff
path: root/mm/zcache.c
diff options
context:
space:
mode:
authorShiraz Hashim <shashim@codeaurora.org>2016-03-05 13:17:40 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-25 16:03:08 -0700
commit4c2cff20a2fbbff1199a7e44114197c406ab3add (patch)
tree37db58425de12ec028083b1431d2b62522b67963 /mm/zcache.c
parent563339135f4176f6891806615eec8107e3a126ba (diff)
mm: zcache: fix locking sequence
Deadlock is observed in zcache reclaim paths due to different locking sequence. Core#0: Core#1: |spin_bug() |do_raw_write_lock() |do_raw_spin_lock() |_raw_write_lock_irqsave() |_raw_spin_lock_irqsave() |zcache_rbnode_isolate() |zcache_flush_inode() |zcache_load_delete_zaddr() |__cleancache_invalidate_inode() |zcache_evict_zpage() |truncate_inode_pages_range() |zbud_reclaim_page() |truncate_inode_pages() |zcache_scan() |truncate_inode_pages_final() |shrink_slab_node() |ext4_evict_inode() |shrink_slab() |evict() |try_to_free_pages() |dispose_list() |__alloc_pages_nodemask() |prune_icache_sb() |alloc_kmem_pages_node() |super_cache_scan() |copy_process.part.52() |shrink_slab_node() |do_fork() |shrink_slab() |sys_clone() |kswapd_shrink_zone.constprop |el0_svc() |balance_pgdat() |kswapd() |kthread() |ret_from_fork() The deadlock happens because alternate sequence are followed while taking zpool->rb_lock (protects zpool rb tree), and rbnode->ra_lock (protects radix tree maintained by rbtree node) Fix the sequence of locks being taken to avoid deadlock. Change-Id: I32db23268f63eb8eb5aee30e4462c190e2e02f48 Signed-off-by: Shiraz Hashim <shashim@codeaurora.org>
Diffstat (limited to 'mm/zcache.c')
-rw-r--r--mm/zcache.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/mm/zcache.c b/mm/zcache.c
index a8b0e64de7ed..28c070b99e06 100644
--- a/mm/zcache.c
+++ b/mm/zcache.c
@@ -568,10 +568,17 @@ static int zcache_store_zaddr(struct zcache_pool *zpool,
/* Insert zcache_ra_handle to ratree */
ret = radix_tree_insert(&rbnode->ratree, ra_index,
(void *)zaddr);
- if (unlikely(ret))
- if (zcache_rbnode_empty(rbnode))
- zcache_rbnode_isolate(zpool, rbnode, 0);
spin_unlock_irqrestore(&rbnode->ra_lock, flags);
+ if (unlikely(ret)) {
+ write_lock_irqsave(&zpool->rb_lock, flags);
+ spin_lock(&rbnode->ra_lock);
+
+ if (zcache_rbnode_empty(rbnode))
+ zcache_rbnode_isolate(zpool, rbnode, 1);
+
+ spin_unlock(&rbnode->ra_lock);
+ write_unlock_irqrestore(&zpool->rb_lock, flags);
+ }
kref_put(&rbnode->refcount, zcache_rbnode_release);
return ret;
@@ -597,10 +604,16 @@ static void *zcache_load_delete_zaddr(struct zcache_pool *zpool,
spin_lock_irqsave(&rbnode->ra_lock, flags);
zaddr = radix_tree_delete(&rbnode->ratree, ra_index);
- if (zcache_rbnode_empty(rbnode))
- zcache_rbnode_isolate(zpool, rbnode, 0);
spin_unlock_irqrestore(&rbnode->ra_lock, flags);
+ /* rb_lock and ra_lock must be taken again in the given sequence */
+ write_lock_irqsave(&zpool->rb_lock, flags);
+ spin_lock(&rbnode->ra_lock);
+ if (zcache_rbnode_empty(rbnode))
+ zcache_rbnode_isolate(zpool, rbnode, 1);
+ spin_unlock(&rbnode->ra_lock);
+ write_unlock_irqrestore(&zpool->rb_lock, flags);
+
kref_put(&rbnode->refcount, zcache_rbnode_release);
out:
return zaddr;