diff options
-rw-r--r-- | mm/zcache.c | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/mm/zcache.c b/mm/zcache.c index 8667fcfb971e..49b0aac126e6 100644 --- a/mm/zcache.c +++ b/mm/zcache.c @@ -70,6 +70,8 @@ static u64 zcache_evict_filepages; static u64 zcache_reclaim_fail; static atomic_t zcache_stored_pages = ATOMIC_INIT(0); +#define GFP_ZCACHE \ + (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC) /* * Zcache receives pages for compression through the Cleancache API and is able * to evict pages from its own compressed pool on an LRU basis in the case that @@ -405,7 +407,8 @@ static int zcache_store_zaddr(struct zcache_pool *zpool, rbnode = zcache_find_get_rbnode(zpool, zhandle->rb_index); if (!rbnode) { /* alloc and init a new rbnode */ - rbnode = kmem_cache_alloc(zcache_rbnode_cache, GFP_KERNEL); + rbnode = kmem_cache_alloc(zcache_rbnode_cache, + GFP_ZCACHE); if (!rbnode) return -ENOMEM; @@ -523,7 +526,7 @@ static void zcache_store_page(int pool_id, struct cleancache_filekey key, /* store zcache handle together with compressed page data */ ret = zbud_alloc(zpool->pool, zlen + sizeof(struct zcache_ra_handle), - __GFP_NORETRY | __GFP_NOWARN, &zaddr); + GFP_ZCACHE, &zaddr); if (ret) { zcache_zbud_alloc_fail++; put_cpu_var(zcache_dstmem); |