summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorBlagovest Kolenichev <bkolenichev@codeaurora.org>2017-06-28 09:45:12 -0700
committerBlagovest Kolenichev <bkolenichev@codeaurora.org>2017-06-28 10:03:23 -0700
commit0fc26d942395c95a08159685aa0f873737bf675f (patch)
treee88030dec8e11b3781e6ac09005d913b239a7e7f /mm
parent35589846a02122d17cd8703ed6a642cdc4918621 (diff)
parent77ddb509298785b46400c2944550be38623948b6 (diff)
Merge branch 'android-4.4@77ddb50' (v4.4.74) into 'msm-4.4'
* refs/heads/tmp-77ddb50: UPSTREAM: usb: gadget: f_fs: avoid out of bounds access on comp_desc Linux 4.4.74 mm: fix new crash in unmapped_area_topdown() Allow stack to grow up to address space limit mm: larger stack guard gap, between vmas alarmtimer: Rate limit periodic intervals MIPS: Fix bnezc/jialc return address calculation usb: dwc3: exynos fix axius clock error path to do cleanup alarmtimer: Prevent overflow of relative timers genirq: Release resources in __setup_irq() error path swap: cond_resched in swap_cgroup_prepare() mm/memory-failure.c: use compound_head() flags for huge pages USB: gadgetfs, dummy-hcd, net2280: fix locking for callbacks usb: xhci: ASMedia ASM1042A chipset need shorts TX quirk drivers/misc/c2port/c2port-duramar2150.c: checking for NULL instead of IS_ERR() usb: r8a66597-hcd: decrease timeout usb: r8a66597-hcd: select a different endpoint on timeout USB: gadget: dummy_hcd: fix hub-descriptor removable fields pvrusb2: reduce stack usage pvr2_eeprom_analyze() usb: core: fix potential memory leak in error path during hcd creation USB: hub: fix SS max number of ports iio: proximity: as3935: recalibrate RCO after resume staging: rtl8188eu: prevent an underflow in rtw_check_beacon_data() mfd: omap-usb-tll: Fix inverted bit use for USB TLL mode x86/mm/32: Set the '__vmalloc_start_set' flag in initmem_init() serial: efm32: Fix parity management in 'efm32_uart_console_get_options()' mac80211: fix IBSS presp allocation size mac80211: fix CSA in IBSS mode mac80211/wpa: use constant time memory comparison for MACs mac80211: don't look at the PM bit of BAR frames vb2: Fix an off by one error in 'vb2_plane_vaddr' cpufreq: conservative: Allow down_threshold to take values from 1 to 10 can: gs_usb: fix memory leak in gs_cmd_reset() configfs: Fix race between create_link and configfs_rmdir UPSTREAM: bpf: don't let ldimm64 leak map addresses on unprivileged BACKPORT: ext4: fix data exposure after a crash ANDROID: sdcardfs: remove dead function open_flags_to_access_mode() ANDROID: android-base.cfg: split out arm64-specific configs Linux 4.4.73 sparc64: make string buffers large enough s390/kvm: do not rely on the ILC on kvm host protection fauls xtensa: don't use linux IRQ #0 tipc: ignore requests when the connection state is not CONNECTED proc: add a schedule point in proc_pid_readdir() romfs: use different way to generate fsid for BLOCK or MTD sctp: sctp_addr_id2transport should verify the addr before looking up assoc r8152: avoid start_xmit to schedule napi when napi is disabled r8152: fix rtl8152_post_reset function r8152: re-schedule napi for tx nfs: Fix "Don't increment lock sequence ID after NFS4ERR_MOVED" ravb: unmap descriptors when freeing rings drm/ast: Fixed system hanged if disable P2A drm/nouveau: Don't enabling polling twice on runtime resume parisc, parport_gsc: Fixes for printk continuation lines net: adaptec: starfire: add checks for dma mapping errors pinctrl: berlin-bg4ct: fix the value for "sd1a" of pin SCRD0_CRD_PRES gianfar: synchronize DMA API usage by free_skb_rx_queue w/ gfar_new_page net/mlx4_core: Avoid command timeouts during VF driver device shutdown drm/nouveau/fence/g84-: protect against concurrent access to semaphore buffers drm/nouveau: prevent userspace from deleting client object ipv6: fix flow labels when the traffic class is non-0 FS-Cache: Initialise stores_lock in netfs cookie fscache: Clear outstanding writes when disabling a cookie fscache: Fix dead object requeue ethtool: do not vzalloc(0) on registers dump log2: make order_base_2() behave correctly on const input value zero kasan: respect /proc/sys/kernel/traceoff_on_warning jump label: pass kbuild_cflags when checking for asm goto support PM / runtime: Avoid false-positive warnings from might_sleep_if() ipv6: Fix IPv6 packet loss in scenarios involving roaming + snooping switches i2c: piix4: Fix request_region size sierra_net: Add support for IPv6 and Dual-Stack Link Sense Indications sierra_net: Skip validating irrelevant fields for IDLE LSIs net: hns: Fix the device being used for dma mapping during TX NET: mkiss: Fix panic NET: Fix /proc/net/arp for AX.25 ipv6: Inhibit IPv4-mapped src address on the wire. ipv6: Handle IPv4-mapped src to in6addr_any dst. net: xilinx_emaclite: fix receive buffer overflow net: xilinx_emaclite: fix freezes due to unordered I/O Call echo service immediately after socket reconnect staging: rtl8192e: rtl92e_fill_tx_desc fix write to mapped out memory. ARM: dts: imx6dl: Fix the VDD_ARM_CAP voltage for 396MHz operation partitions/msdos: FreeBSD UFS2 file systems are not recognized s390/vmem: fix identity mapping usb: gadget: f_fs: Fix possibe deadlock Conflicts: drivers/usb/gadget/function/f_fs.c Change-Id: I23106e9fc2c4f2d0b06acce59b781f6c36487fcc Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/gup.c5
-rw-r--r--mm/kasan/report.c3
-rw-r--r--mm/memory-failure.c5
-rw-r--r--mm/memory.c38
-rw-r--r--mm/mmap.c160
-rw-r--r--mm/swap_cgroup.c3
6 files changed, 107 insertions, 107 deletions
diff --git a/mm/gup.c b/mm/gup.c
index 4b0b7e7d1136..b599526db9f7 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -312,11 +312,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
/* mlock all present pages, but do not fault in new pages */
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
return -ENOENT;
- /* For mm_populate(), just skip the stack guard page. */
- if ((*flags & FOLL_POPULATE) &&
- (stack_guard_page_start(vma, address) ||
- stack_guard_page_end(vma, address + PAGE_SIZE)))
- return -ENOENT;
if (*flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (nonblocking)
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 12f222d0224b..b4e31f78ae69 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -13,6 +13,7 @@
*
*/
+#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
@@ -251,6 +252,8 @@ void kasan_report(unsigned long addr, size_t size,
if (likely(!kasan_report_enabled()))
return;
+ disable_trace_on_warning();
+
info.access_addr = (void *)addr;
info.access_size = size;
info.is_write = is_write;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 44443a5b5a59..14c1c5cb3ccf 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1208,7 +1208,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* page_remove_rmap() in try_to_unmap_one(). So to determine page status
* correctly, we save a copy of the page flags at this time.
*/
- page_flags = p->flags;
+ if (PageHuge(p))
+ page_flags = hpage->flags;
+ else
+ page_flags = p->flags;
/*
* unpoison always clear PG_hwpoison inside page lock
diff --git a/mm/memory.c b/mm/memory.c
index b536e3d60fc7..8689df9b09d5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2663,40 +2663,6 @@ out_release:
}
/*
- * This is like a special single-page "expand_{down|up}wards()",
- * except we must first make sure that 'address{-|+}PAGE_SIZE'
- * doesn't hit another vma.
- */
-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
-{
- address &= PAGE_MASK;
- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
- struct vm_area_struct *prev = vma->vm_prev;
-
- /*
- * Is there a mapping abutting this one below?
- *
- * That's only ok if it's the same stack mapping
- * that has gotten split..
- */
- if (prev && prev->vm_end == address)
- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
-
- return expand_downwards(vma, address - PAGE_SIZE);
- }
- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
- struct vm_area_struct *next = vma->vm_next;
-
- /* As VM_GROWSDOWN but s/below/above/ */
- if (next && next->vm_start == address + PAGE_SIZE)
- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-
- return expand_upwards(vma, address + PAGE_SIZE);
- }
- return 0;
-}
-
-/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2716,10 +2682,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS;
- /* Check if we need to add a guard page to the stack */
- if (check_stack_guard_page(vma, address) < 0)
- return VM_FAULT_SIGSEGV;
-
/* Use the zero-page for reads */
if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
diff --git a/mm/mmap.c b/mm/mmap.c
index ebc9d1923c3d..092729c2cb72 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -304,6 +304,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
unsigned long retval;
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
+ struct vm_area_struct *next;
unsigned long min_brk;
bool populate;
@@ -348,7 +349,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
}
/* Check against existing mmap mappings. */
- if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+ next = find_vma(mm, oldbrk);
+ if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
/* Ok, looks good - let it rip. */
@@ -371,10 +373,22 @@ out:
static long vma_compute_subtree_gap(struct vm_area_struct *vma)
{
- unsigned long max, subtree_gap;
- max = vma->vm_start;
- if (vma->vm_prev)
- max -= vma->vm_prev->vm_end;
+ unsigned long max, prev_end, subtree_gap;
+
+ /*
+ * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
+ * allow two stack_guard_gaps between them here, and when choosing
+ * an unmapped area; whereas when expanding we only require one.
+ * That's a little inconsistent, but keeps the code here simpler.
+ */
+ max = vm_start_gap(vma);
+ if (vma->vm_prev) {
+ prev_end = vm_end_gap(vma->vm_prev);
+ if (max > prev_end)
+ max -= prev_end;
+ else
+ max = 0;
+ }
if (vma->vm_rb.rb_left) {
subtree_gap = rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -467,7 +481,7 @@ static void validate_mm(struct mm_struct *mm)
anon_vma_unlock_read(anon_vma);
}
- highest_address = vma->vm_end;
+ highest_address = vm_end_gap(vma);
vma = vma->vm_next;
i++;
}
@@ -636,7 +650,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- mm->highest_vm_end = vma->vm_end;
+ mm->highest_vm_end = vm_end_gap(vma);
/*
* vma->vm_prev wasn't known when we followed the rbtree to find the
@@ -882,7 +896,7 @@ again: remove_next = 1 + (end > next->vm_end);
vma_gap_update(vma);
if (end_changed) {
if (!next)
- mm->highest_vm_end = end;
+ mm->highest_vm_end = vm_end_gap(vma);
else if (!adjust_next)
vma_gap_update(next);
}
@@ -925,7 +939,7 @@ again: remove_next = 1 + (end > next->vm_end);
else if (next)
vma_gap_update(next);
else
- mm->highest_vm_end = end;
+ VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
}
if (insert && file)
uprobe_mmap(insert);
@@ -1771,7 +1785,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
while (true) {
/* Visit left subtree if it looks promising */
- gap_end = vma->vm_start;
+ gap_end = vm_start_gap(vma);
if (gap_end >= low_limit && vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,
@@ -1782,12 +1796,13 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
}
}
- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
check_current:
/* Check if current node has a suitable gap */
if (gap_start > high_limit)
return -ENOMEM;
- if (gap_end >= low_limit && gap_end - gap_start >= length)
+ if (gap_end >= low_limit &&
+ gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit right subtree if it looks promising */
@@ -1809,8 +1824,8 @@ check_current:
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_left) {
- gap_start = vma->vm_prev->vm_end;
- gap_end = vma->vm_start;
+ gap_start = vm_end_gap(vma->vm_prev);
+ gap_end = vm_start_gap(vma);
goto check_current;
}
}
@@ -1874,7 +1889,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
while (true) {
/* Visit right subtree if it looks promising */
- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
if (gap_start <= high_limit && vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
@@ -1887,10 +1902,11 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
check_current:
/* Check if current node has a suitable gap */
- gap_end = vma->vm_start;
+ gap_end = vm_start_gap(vma);
if (gap_end < low_limit)
return -ENOMEM;
- if (gap_start <= high_limit && gap_end - gap_start >= length)
+ if (gap_start <= high_limit &&
+ gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit left subtree if it looks promising */
@@ -1913,7 +1929,7 @@ check_current:
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_right) {
gap_start = vma->vm_prev ?
- vma->vm_prev->vm_end : 0;
+ vm_end_gap(vma->vm_prev) : 0;
goto check_current;
}
}
@@ -1951,7 +1967,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct vm_unmapped_area_info info;
if (len > TASK_SIZE - mmap_min_addr)
@@ -1962,9 +1978,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
@@ -1987,7 +2004,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
@@ -2002,9 +2019,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
@@ -2129,21 +2147,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
* update accounting. This is shared with both the
* grow-up and grow-down cases.
*/
-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma,
+ unsigned long size, unsigned long grow)
{
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
- unsigned long new_start, actual_size;
+ unsigned long new_start;
/* address space limit tests */
if (!may_expand_vm(mm, grow))
return -ENOMEM;
/* Stack limit test */
- actual_size = size;
- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
- actual_size -= PAGE_SIZE;
- if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
/* mlock limit tests */
@@ -2181,16 +2197,32 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *next;
+ unsigned long gap_addr;
int error = 0;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
- /* Guard against wrapping around to address 0. */
- if (address < PAGE_ALIGN(address+4))
- address = PAGE_ALIGN(address+4);
- else
+ /* Guard against exceeding limits of the address space. */
+ address &= PAGE_MASK;
+ if (address >= TASK_SIZE)
return -ENOMEM;
+ address += PAGE_SIZE;
+
+ /* Enforce stack_guard_gap */
+ gap_addr = address + stack_guard_gap;
+
+ /* Guard against overflow */
+ if (gap_addr < address || gap_addr > TASK_SIZE)
+ gap_addr = TASK_SIZE;
+
+ next = vma->vm_next;
+ if (next && next->vm_start < gap_addr) {
+ if (!(next->vm_flags & VM_GROWSUP))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
@@ -2236,7 +2268,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- mm->highest_vm_end = address;
+ mm->highest_vm_end = vm_end_gap(vma);
spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
@@ -2257,6 +2289,8 @@ int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *prev;
+ unsigned long gap_addr;
int error;
address &= PAGE_MASK;
@@ -2264,6 +2298,17 @@ int expand_downwards(struct vm_area_struct *vma,
if (error)
return error;
+ /* Enforce stack_guard_gap */
+ gap_addr = address - stack_guard_gap;
+ if (gap_addr > address)
+ return -ENOMEM;
+ prev = vma->vm_prev;
+ if (prev && prev->vm_end > gap_addr) {
+ if (!(prev->vm_flags & VM_GROWSDOWN))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
+
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
@@ -2319,28 +2364,25 @@ int expand_downwards(struct vm_area_struct *vma,
return error;
}
-/*
- * Note how expand_stack() refuses to expand the stack all the way to
- * abut the next virtual mapping, *unless* that mapping itself is also
- * a stack mapping. We want to leave room for a guard page, after all
- * (the guard page itself is not added here, that is done by the
- * actual page faulting logic)
- *
- * This matches the behavior of the guard page logic (see mm/memory.c:
- * check_stack_guard_page()), which only allows the guard page to be
- * removed under these circumstances.
- */
+/* enforced gap between the expanding stack and other mappings. */
+unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
+
+static int __init cmdline_parse_stack_guard_gap(char *p)
+{
+ unsigned long val;
+ char *endptr;
+
+ val = simple_strtoul(p, &endptr, 10);
+ if (!*endptr)
+ stack_guard_gap = val << PAGE_SHIFT;
+
+ return 0;
+}
+__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
+
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- struct vm_area_struct *next;
-
- address &= PAGE_MASK;
- next = vma->vm_next;
- if (next && next->vm_start == address + PAGE_SIZE) {
- if (!(next->vm_flags & VM_GROWSUP))
- return -ENOMEM;
- }
return expand_upwards(vma, address);
}
@@ -2362,14 +2404,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
#else
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- struct vm_area_struct *prev;
-
- address &= PAGE_MASK;
- prev = vma->vm_prev;
- if (prev && prev->vm_end == address) {
- if (!(prev->vm_flags & VM_GROWSDOWN))
- return -ENOMEM;
- }
return expand_downwards(vma, address);
}
@@ -2467,7 +2501,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
vma->vm_prev = prev;
vma_gap_update(vma);
} else
- mm->highest_vm_end = prev ? prev->vm_end : 0;
+ mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
tail_vma->vm_next = NULL;
/* Kill the cache */
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index b5f7f24b8dd1..40dd0f9b00d6 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -48,6 +48,9 @@ static int swap_cgroup_prepare(int type)
if (!page)
goto not_enough_page;
ctrl->map[idx] = page;
+
+ if (!(idx % SWAP_CLUSTER_MAX))
+ cond_resched();
}
return 0;
not_enough_page: