summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorVinayak Menon <vinmenon@codeaurora.org>2016-03-08 15:08:27 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-25 16:03:17 -0700
commit30781083ea60e69950381596660b8c3b4a169be4 (patch)
treea554d029ae09cae2933437a9ba45a7ce67717731 /mm
parent43bddb5b6c46563d32bae15d28a00240606e8ff0 (diff)
mm: zbud: prevent softirq during zbud alloc, free and reclaim
The following deadlock is observed. Core 2 waiting on mapping->tree_lock which is taken by core 6 do_raw_spin_lock raw_spin_lock_irq atomic_cmpxchg page_freeze_refs __remove_mapping shrink_page_list shrink_inactive_list shrink_list shrink_lruvec shrink_zone shrink_zones do_try_to_free_pages try_to_free_pages(?, ?, ?, ?) __perform_reclaim __alloc_pages_direct_reclaim __alloc_pages_slowpath __alloc_pages_nodemask alloc_kmem_pages_node alloc_thread_info_node dup_task_struct copy_process.part.56 do_fork sys_clone el0_svc_naked Core 6 after taking mapping->tree_lock is waiting on zbud pool lock which is held by core 5 zbud_alloc zcache_store_page __cleancache_put_page cleancache_put_page __delete_from_page_cache spin_unlock_irq __remove_mapping shrink_page_list shrink_inactive_list shrink_list shrink_lruvec shrink_zone bitmap_zero __nodes_clear kswapd_shrink_zone.constprop.58 balance_pgdat kswapd_try_to_sleep kswapd kthread ret_from_fork Core 5 after taking zbud pool lock from zbud_free received an IRQ, and after IRQ exit, softirqs were scheduled and end_page_writeback tried to lock on mapping->tree_lock which is already held by Core 6. Deadlock. do_raw_spin_lock raw_spin_lock_irqsave test_clear_page_writeba end_page_writeback ext4_finish_bio ext4_end_bio bio_endio blk_update_request end_clone_bio bio_endio blk_update_request blk_update_bidi_request blk_end_bidi_request blk_end_request mmc_blk_cmdq_complete_r mmc_cmdq_softirq_done blk_done_softirq static_key_count static_key_false trace_softirq_exit __do_softirq() tick_irq_exit irq_exit() set_irq_regs __handle_domain_irq gic_handle_irq el1_irq exception __list_del_entry list_del zbud_free zcache_load_page __cleancache_get_page(? So protect zbud_alloc/free/reclaim with spink_lock_bh CRs-Fixed: 986783 Change-Id: Ib0605b38e7371c29316ed81e43549a0b9503d531 Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/zbud.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/zbud.c b/mm/zbud.c
index 011e42bbf056..04359b845ead 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -364,7 +364,7 @@ int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
return -ENOSPC;
chunks = size_to_chunks(size);
- spin_lock(&pool->lock);
+ spin_lock_bh(&pool->lock);
/* First, try to find an unbuddied zbud page. */
zhdr = NULL;
@@ -383,11 +383,11 @@ int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
}
/* Couldn't find unbuddied zbud page, create new one */
- spin_unlock(&pool->lock);
+ spin_unlock_bh(&pool->lock);
page = alloc_page(gfp);
if (!page)
return -ENOMEM;
- spin_lock(&pool->lock);
+ spin_lock_bh(&pool->lock);
pool->pages_nr++;
zhdr = init_zbud_page(page);
bud = FIRST;
@@ -415,7 +415,7 @@ found:
*handle = encode_handle(zhdr, bud);
if ((gfp & __GFP_ZERO) && found)
memset((void *)*handle, 0, size);
- spin_unlock(&pool->lock);
+ spin_unlock_bh(&pool->lock);
return 0;
}
@@ -435,7 +435,7 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle)
struct zbud_header *zhdr;
int freechunks;
- spin_lock(&pool->lock);
+ spin_lock_bh(&pool->lock);
zhdr = handle_to_zbud_header(handle);
/* If first buddy, handle will be page aligned */
@@ -446,7 +446,7 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle)
if (zhdr->under_reclaim) {
/* zbud page is under reclaim, reclaim will free */
- spin_unlock(&pool->lock);
+ spin_unlock_bh(&pool->lock);
return;
}
@@ -464,7 +464,7 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle)
list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
}
- spin_unlock(&pool->lock);
+ spin_unlock_bh(&pool->lock);
}
#define list_tail_entry(ptr, type, member) \
@@ -511,10 +511,10 @@ int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
struct zbud_header *zhdr;
unsigned long first_handle = 0, last_handle = 0;
- spin_lock(&pool->lock);
+ spin_lock_bh(&pool->lock);
if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
retries == 0) {
- spin_unlock(&pool->lock);
+ spin_unlock_bh(&pool->lock);
return -EINVAL;
}
for (i = 0; i < retries; i++) {
@@ -533,7 +533,7 @@ int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
first_handle = encode_handle(zhdr, FIRST);
if (zhdr->last_chunks)
last_handle = encode_handle(zhdr, LAST);
- spin_unlock(&pool->lock);
+ spin_unlock_bh(&pool->lock);
/* Issue the eviction callback(s) */
if (first_handle) {
@@ -547,7 +547,7 @@ int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
goto next;
}
next:
- spin_lock(&pool->lock);
+ spin_lock_bh(&pool->lock);
zhdr->under_reclaim = false;
if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
/*
@@ -556,7 +556,7 @@ next:
*/
free_zbud_page(zhdr);
pool->pages_nr--;
- spin_unlock(&pool->lock);
+ spin_unlock_bh(&pool->lock);
return 0;
} else if (zhdr->first_chunks == 0 ||
zhdr->last_chunks == 0) {
@@ -571,7 +571,7 @@ next:
/* add to beginning of LRU */
list_add(&zhdr->lru, &pool->lru);
}
- spin_unlock(&pool->lock);
+ spin_unlock_bh(&pool->lock);
return -EAGAIN;
}