summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-07-26 15:23:58 -0700
committerPrakash Gupta <guptap@codeaurora.org>2017-07-07 15:39:34 +0530
commite448e4e545c589d8ab670e73da35e1d7d1ef8226 (patch)
tree8db43e82548197f869f51bfbeb1660fae874af0a
parent0db2683327f6f9ab1ecded2d1128e74748bbd095 (diff)
mm/page_alloc: introduce post allocation processing on page allocator
This patch is motivated from Hugh and Vlastimil's concern [1]. There are two ways to get freepage from the allocator. One is using normal memory allocation API and the other is __isolate_free_page() which is internally used for compaction and pageblock isolation. Later usage is rather tricky since it doesn't do whole post allocation processing done by normal API. One problematic thing I already know is that poisoned page would not be checked if it is allocated by __isolate_free_page(). Perhaps, there would be more. We could add more debug logic for allocated page in the future and this separation would cause more problem. I'd like to fix this situation at this time. Solution is simple. This patch commonize some logic for newly allocated page and uses it on all sites. This will solve the problem. [1] http://marc.info/?i=alpine.LSU.2.11.1604270029350.7066%40eggly.anvils%3E Change-Id: I601ec8ce8ee4ab76cd408ff2148dd8c73b959fc2 [iamjoonsoo.kim@lge.com: mm-page_alloc-introduce-post-allocation-processing-on-page-allocator-v3] Link: http://lkml.kernel.org/r/1464230275-25791-7-git-send-email-iamjoonsoo.kim@lge.com Link: http://lkml.kernel.org/r/1466150259-27727-9-git-send-email-iamjoonsoo.kim@lge.com Link: http://lkml.kernel.org/r/1464230275-25791-7-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Minchan Kim <minchan@kernel.org> Cc: Alexander Potapenko <glider@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Git-commit: 46f24fd857b37bb86ddd5d0ac3d194e984dfdf1c Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git [guptap@codeaurora.org: resolve trivial merge conflicts] Signed-off-by: Prakash Gupta <guptap@codeaurora.org>
-rw-r--r--mm/compaction.c8
-rw-r--r--mm/internal.h2
-rw-r--r--mm/page_alloc.c25
-rw-r--r--mm/page_isolation.c6
4 files changed, 19 insertions, 22 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 1af10625f5d5..f96a58e1843a 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -69,14 +69,8 @@ static void map_pages(struct list_head *list)
order = page_private(page);
nr_pages = 1 << order;
- set_page_private(page, 0);
- set_page_refcounted(page);
- kasan_alloc_pages(page, order);
- arch_alloc_page(page, order);
- kernel_map_pages(page, nr_pages, 1);
-
- set_page_owner(page, order, __GFP_MOVABLE);
+ post_alloc_hook(page, order, __GFP_MOVABLE);
if (order)
split_page(page, order);
diff --git a/mm/internal.h b/mm/internal.h
index 7b9e313d9dea..46d27f378885 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -182,6 +182,8 @@ extern void prep_compound_page(struct page *page, unsigned int order);
#ifdef CONFIG_MEMORY_FAILURE
extern bool is_free_buddy_page(struct page *page);
#endif
+extern void post_alloc_hook(struct page *page, unsigned int order,
+ gfp_t gfp_flags);
extern int user_min_free_kbytes;
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 98c19d45ae4f..6759192e69de 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1443,8 +1443,21 @@ static inline bool free_pages_prezeroed(void)
page_poisoning_enabled();
}
+inline void post_alloc_hook(struct page *page, unsigned int order,
+ gfp_t gfp_flags)
+{
+ set_page_private(page, 0);
+ set_page_refcounted(page);
+
+ kasan_alloc_pages(page, order);
+ arch_alloc_page(page, order);
+ kernel_map_pages(page, 1 << order, 1);
+ kernel_poison_pages(page, 1 << order, 1);
+ set_page_owner(page, order, gfp_flags);
+}
+
static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
- int alloc_flags)
+ int alloc_flags)
{
int i;
@@ -1454,13 +1467,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
return 1;
}
- set_page_private(page, 0);
- set_page_refcounted(page);
-
- kasan_alloc_pages(page, order);
- arch_alloc_page(page, order);
- kernel_map_pages(page, 1 << order, 1);
- kernel_poison_pages(page, 1 << order, 1);
+ post_alloc_hook(page, order, gfp_flags);
if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
for (i = 0; i < (1 << order); i++)
@@ -1469,8 +1476,6 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
- set_page_owner(page, order, gfp_flags);
-
/*
* page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
* allocate the page. The expectation is that the caller is taking
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 978b955ef470..efb6c3c38c01 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -126,11 +126,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
out:
spin_unlock_irqrestore(&zone->lock, flags);
if (isolated_page) {
- kasan_alloc_pages(page, order);
- arch_alloc_page(page, order);
- kernel_map_pages(page, (1 << order), 1);
- set_page_refcounted(page);
- set_page_owner(page, order, __GFP_MOVABLE);
+ post_alloc_hook(page, order, __GFP_MOVABLE);
__free_pages(isolated_page, order);
}
}