diff options
author | Cody P Schafer <cody@linux.vnet.ibm.com> | 2013-07-03 15:01:32 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-03 16:07:27 -0700 |
commit | 998d39cb236fe464af86a3492a24d2f67ee1efc2 (patch) | |
tree | c093fb89e8d4a4b62e656f5becf92876ce13f958 | |
parent | 8d7a8fa97abeb4fd6b3975d32c9f859875157770 (diff) |
mm/page_alloc: protect pcp->batch accesses with ACCESS_ONCE
pcp->batch could change at any point, avoid relying on it being a stable
value.
Signed-off-by: Cody P Schafer <cody@linux.vnet.ibm.com>
Cc: Gilad Ben-Yossef <gilad@benyossef.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/page_alloc.c | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index eaaef2a09424..97b8f861e63d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1182,10 +1182,12 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { unsigned long flags; int to_drain; + unsigned long batch; local_irq_save(flags); - if (pcp->count >= pcp->batch) - to_drain = pcp->batch; + batch = ACCESS_ONCE(pcp->batch); + if (pcp->count >= batch) + to_drain = batch; else to_drain = pcp->count; if (to_drain > 0) { @@ -1353,8 +1355,9 @@ void free_hot_cold_page(struct page *page, int cold) list_add(&page->lru, &pcp->lists[migratetype]); pcp->count++; if (pcp->count >= pcp->high) { - free_pcppages_bulk(zone, pcp->batch, pcp); - pcp->count -= pcp->batch; + unsigned long batch = ACCESS_ONCE(pcp->batch); + free_pcppages_bulk(zone, batch, pcp); + pcp->count -= batch; } out: |