summaryrefslogtreecommitdiff
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
authorVinayak Menon <vinmenon@codeaurora.org>2015-03-20 14:15:39 +0530
committerKyle Yan <kyan@codeaurora.org>2016-06-29 15:11:26 -0700
commit9caa3b38bbead2fc0e28554641880c02e0f6d649 (patch)
tree4f17773f9b8d8361ddd4fba9076c8f6ef3d0615c /fs/proc/task_mmu.c
parent82b3ab03808f5934c784ed89bbe4625183525ad8 (diff)
mm: process reclaim: vmpressure based process reclaim
With this patch, anon pages of inactive tasks can be reclaimed, depending on memory pressure. Memory pressure is detected using vmpressure events. 'N' best tasks in terms of anon size is selected and pages proportional to their tasksize is reclaimed. The total number of pages reclaimed at each run of the swap work, can be tuned from userspace, the default being SWAP_CLUSTER_MAX * 32. The patch also adds tracepoints to debug and tune the feature. echo 1 > /sys/module/process_reclaim/parameters/enable_process_reclaim to enable the feature. echo <pages> > /sys/module/process_reclaim/parameters/per_swap_size, to set the number of pages reclaimed in each scan. /sys/module/process_reclaim/parameters/reclaim_avg_efficiency, provides the average efficiency (scan to reclaim ratio) of the algorithm. /sys/module/process_reclaim/parameters/swap_eff_win, to set the window period (in unit of number of times reclaim is triggered) to detect low efficiency runs. /sys/module/process_reclaim/parameters/swap_opt_eff, to set the optimal efficiency threshold for low efficiency detection. Change-Id: I895986f10c997d1715761eaaadc4bbbee60db9d2 Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c72
1 files changed, 65 insertions, 7 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9b5721f29f67..ee79fadfc6e7 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1435,15 +1435,17 @@ const struct file_operations proc_pagemap_operations = {
static int reclaim_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
- struct vm_area_struct *vma = walk->private;
+ struct reclaim_param *rp = walk->private;
+ struct vm_area_struct *vma = rp->vma;
pte_t *pte, ptent;
spinlock_t *ptl;
struct page *page;
LIST_HEAD(page_list);
int isolated;
+ int reclaimed;
split_huge_page_pmd(vma, addr, pmd);
- if (pmd_trans_unstable(pmd))
+ if (pmd_trans_unstable(pmd) || !rp->nr_to_reclaim)
return 0;
cont:
isolated = 0;
@@ -1464,12 +1466,18 @@ cont:
inc_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
isolated++;
- if (isolated >= SWAP_CLUSTER_MAX)
+ rp->nr_scanned++;
+ if ((isolated >= SWAP_CLUSTER_MAX) || !rp->nr_to_reclaim)
break;
}
pte_unmap_unlock(pte - 1, ptl);
- reclaim_pages_from_list(&page_list, vma);
- if (addr != end)
+ reclaimed = reclaim_pages_from_list(&page_list, vma);
+ rp->nr_reclaimed += reclaimed;
+ rp->nr_to_reclaim -= reclaimed;
+ if (rp->nr_to_reclaim < 0)
+ rp->nr_to_reclaim = 0;
+
+ if (rp->nr_to_reclaim && (addr != end))
goto cont;
cond_resched();
@@ -1483,6 +1491,51 @@ enum reclaim_type {
RECLAIM_RANGE,
};
+struct reclaim_param reclaim_task_anon(struct task_struct *task,
+ int nr_to_reclaim)
+{
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ struct mm_walk reclaim_walk = {};
+ struct reclaim_param rp;
+
+ rp.nr_reclaimed = 0;
+ rp.nr_scanned = 0;
+ get_task_struct(task);
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out;
+
+ reclaim_walk.mm = mm;
+ reclaim_walk.pmd_entry = reclaim_pte_range;
+
+ rp.nr_to_reclaim = nr_to_reclaim;
+ reclaim_walk.private = &rp;
+
+ down_read(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (is_vm_hugetlb_page(vma))
+ continue;
+
+ if (vma->vm_file)
+ continue;
+
+ if (!rp.nr_to_reclaim)
+ break;
+
+ rp.vma = vma;
+ walk_page_range(vma->vm_start, vma->vm_end,
+ &reclaim_walk);
+ }
+
+ flush_tlb_mm(mm);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+out:
+ put_task_struct(task);
+ return rp;
+}
+
static ssize_t reclaim_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -1495,6 +1548,7 @@ static ssize_t reclaim_write(struct file *file, const char __user *buf,
struct mm_walk reclaim_walk = {};
unsigned long start = 0;
unsigned long end = 0;
+ struct reclaim_param rp;
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
@@ -1556,6 +1610,10 @@ static ssize_t reclaim_write(struct file *file, const char __user *buf,
reclaim_walk.mm = mm;
reclaim_walk.pmd_entry = reclaim_pte_range;
+ rp.nr_to_reclaim = ~0;
+ rp.nr_reclaimed = 0;
+ reclaim_walk.private = &rp;
+
down_read(&mm->mmap_sem);
if (type == RECLAIM_RANGE) {
vma = find_vma(mm, start);
@@ -1565,7 +1623,7 @@ static ssize_t reclaim_write(struct file *file, const char __user *buf,
if (is_vm_hugetlb_page(vma))
continue;
- reclaim_walk.private = vma;
+ rp.vma = vma;
walk_page_range(max(vma->vm_start, start),
min(vma->vm_end, end),
&reclaim_walk);
@@ -1582,7 +1640,7 @@ static ssize_t reclaim_write(struct file *file, const char __user *buf,
if (type == RECLAIM_FILE && !vma->vm_file)
continue;
- reclaim_walk.private = vma;
+ rp.vma = vma;
walk_page_range(vma->vm_start, vma->vm_end,
&reclaim_walk);
}