summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 98c19d45ae4f..6759192e69de 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1443,8 +1443,21 @@ static inline bool free_pages_prezeroed(void)
page_poisoning_enabled();
}
+inline void post_alloc_hook(struct page *page, unsigned int order,
+ gfp_t gfp_flags)
+{
+ set_page_private(page, 0);
+ set_page_refcounted(page);
+
+ kasan_alloc_pages(page, order);
+ arch_alloc_page(page, order);
+ kernel_map_pages(page, 1 << order, 1);
+ kernel_poison_pages(page, 1 << order, 1);
+ set_page_owner(page, order, gfp_flags);
+}
+
static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
- int alloc_flags)
+ int alloc_flags)
{
int i;
@@ -1454,13 +1467,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
return 1;
}
- set_page_private(page, 0);
- set_page_refcounted(page);
-
- kasan_alloc_pages(page, order);
- arch_alloc_page(page, order);
- kernel_map_pages(page, 1 << order, 1);
- kernel_poison_pages(page, 1 << order, 1);
+ post_alloc_hook(page, order, gfp_flags);
if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
for (i = 0; i < (1 << order); i++)
@@ -1469,8 +1476,6 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
- set_page_owner(page, order, gfp_flags);
-
/*
* page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
* allocate the page. The expectation is that the caller is taking