summaryrefslogtreecommitdiff
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c72
1 files changed, 65 insertions, 7 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9b5721f29f67..ee79fadfc6e7 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1435,15 +1435,17 @@ const struct file_operations proc_pagemap_operations = {
static int reclaim_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
- struct vm_area_struct *vma = walk->private;
+ struct reclaim_param *rp = walk->private;
+ struct vm_area_struct *vma = rp->vma;
pte_t *pte, ptent;
spinlock_t *ptl;
struct page *page;
LIST_HEAD(page_list);
int isolated;
+ int reclaimed;
split_huge_page_pmd(vma, addr, pmd);
- if (pmd_trans_unstable(pmd))
+ if (pmd_trans_unstable(pmd) || !rp->nr_to_reclaim)
return 0;
cont:
isolated = 0;
@@ -1464,12 +1466,18 @@ cont:
inc_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
isolated++;
- if (isolated >= SWAP_CLUSTER_MAX)
+ rp->nr_scanned++;
+ if ((isolated >= SWAP_CLUSTER_MAX) || !rp->nr_to_reclaim)
break;
}
pte_unmap_unlock(pte - 1, ptl);
- reclaim_pages_from_list(&page_list, vma);
- if (addr != end)
+ reclaimed = reclaim_pages_from_list(&page_list, vma);
+ rp->nr_reclaimed += reclaimed;
+ rp->nr_to_reclaim -= reclaimed;
+ if (rp->nr_to_reclaim < 0)
+ rp->nr_to_reclaim = 0;
+
+ if (rp->nr_to_reclaim && (addr != end))
goto cont;
cond_resched();
@@ -1483,6 +1491,51 @@ enum reclaim_type {
RECLAIM_RANGE,
};
+struct reclaim_param reclaim_task_anon(struct task_struct *task,
+ int nr_to_reclaim)
+{
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ struct mm_walk reclaim_walk = {};
+ struct reclaim_param rp;
+
+ rp.nr_reclaimed = 0;
+ rp.nr_scanned = 0;
+ get_task_struct(task);
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out;
+
+ reclaim_walk.mm = mm;
+ reclaim_walk.pmd_entry = reclaim_pte_range;
+
+ rp.nr_to_reclaim = nr_to_reclaim;
+ reclaim_walk.private = &rp;
+
+ down_read(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (is_vm_hugetlb_page(vma))
+ continue;
+
+ if (vma->vm_file)
+ continue;
+
+ if (!rp.nr_to_reclaim)
+ break;
+
+ rp.vma = vma;
+ walk_page_range(vma->vm_start, vma->vm_end,
+ &reclaim_walk);
+ }
+
+ flush_tlb_mm(mm);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+out:
+ put_task_struct(task);
+ return rp;
+}
+
static ssize_t reclaim_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -1495,6 +1548,7 @@ static ssize_t reclaim_write(struct file *file, const char __user *buf,
struct mm_walk reclaim_walk = {};
unsigned long start = 0;
unsigned long end = 0;
+ struct reclaim_param rp;
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
@@ -1556,6 +1610,10 @@ static ssize_t reclaim_write(struct file *file, const char __user *buf,
reclaim_walk.mm = mm;
reclaim_walk.pmd_entry = reclaim_pte_range;
+ rp.nr_to_reclaim = ~0;
+ rp.nr_reclaimed = 0;
+ reclaim_walk.private = &rp;
+
down_read(&mm->mmap_sem);
if (type == RECLAIM_RANGE) {
vma = find_vma(mm, start);
@@ -1565,7 +1623,7 @@ static ssize_t reclaim_write(struct file *file, const char __user *buf,
if (is_vm_hugetlb_page(vma))
continue;
- reclaim_walk.private = vma;
+ rp.vma = vma;
walk_page_range(max(vma->vm_start, start),
min(vma->vm_end, end),
&reclaim_walk);
@@ -1582,7 +1640,7 @@ static ssize_t reclaim_write(struct file *file, const char __user *buf,
if (type == RECLAIM_FILE && !vma->vm_file)
continue;
- reclaim_walk.private = vma;
+ rp.vma = vma;
walk_page_range(vma->vm_start, vma->vm_end,
&reclaim_walk);
}