diff options
-rw-r--r-- | mm/page_alloc.c | 37 |
1 files changed, 10 insertions, 27 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a8f2c87792c3..8193809f3de0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2378,15 +2378,6 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); } -/* Returns true if the allocation is likely for THP */ -static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order) -{ - if (order == pageblock_order && - (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE) - return true; - return false; -} - static inline struct page * __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, @@ -2425,9 +2416,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, goto nopage; restart: - /* The decision whether to wake kswapd for THP is made later */ - if (!is_thp_alloc(gfp_mask, order)) - wake_all_kswapd(order, zonelist, high_zoneidx, + wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(preferred_zone)); /* @@ -2498,21 +2487,15 @@ rebalance: goto got_pg; sync_migration = true; - if (is_thp_alloc(gfp_mask, order)) { - /* - * If compaction is deferred for high-order allocations, it is - * because sync compaction recently failed. If this is the case - * and the caller requested a movable allocation that does not - * heavily disrupt the system then fail the allocation instead - * of entering direct reclaim. - */ - if (deferred_compaction || contended_compaction) - goto nopage; - - /* If process is willing to reclaim/compact then wake kswapd */ - wake_all_kswapd(order, zonelist, high_zoneidx, - zone_idx(preferred_zone)); - } + /* + * If compaction is deferred for high-order allocations, it is because + * sync compaction recently failed. In this is the case and the caller + * requested a movable allocation that does not heavily disrupt the + * system then fail the allocation instead of entering direct reclaim. + */ + if ((deferred_compaction || contended_compaction) && + (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE) + goto nopage; /* Try direct reclaim and then allocating */ page = __alloc_pages_direct_reclaim(gfp_mask, order, |