summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/page_alloc.c8
2 files changed, 5 insertions, 5 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index d4f13525e42e..42eb5e580b3c 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1393,7 +1393,7 @@ static void kmemleak_scan(void)
if (page_count(page) == 0)
continue;
scan_block(page, page + 1, NULL);
- if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page))))
+ if (!(pfn & 63))
cond_resched();
}
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2db537e1da49..3348d88b2b71 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -712,7 +712,7 @@ static inline void __free_one_page(struct page *page,
struct page *buddy;
unsigned int max_order;
- max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
+ max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order);
VM_BUG_ON(!zone_is_initialized(zone));
VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
@@ -727,7 +727,7 @@ static inline void __free_one_page(struct page *page,
VM_BUG_ON_PAGE(bad_range(zone, page), page);
continue_merging:
- while (order < max_order - 1) {
+ while (order < max_order) {
buddy_idx = __find_buddy_index(page_idx, order);
buddy = page + (buddy_idx - page_idx);
if (!page_is_buddy(page, buddy, order))
@@ -748,7 +748,7 @@ continue_merging:
page_idx = combined_idx;
order++;
}
- if (max_order < MAX_ORDER) {
+ if (order < MAX_ORDER - 1) {
/* If we are here, it means order is >= pageblock_order.
* We want to prevent merge between freepages on isolate
* pageblock and normal pageblock. Without this, pageblock
@@ -769,7 +769,7 @@ continue_merging:
is_migrate_isolate(buddy_mt)))
goto done_merging;
}
- max_order++;
+ max_order = order + 1;
goto continue_merging;
}