summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2013-05-09 16:21:24 +0900
committerKyle Yan <kyan@codeaurora.org>2016-06-22 14:43:06 -0700
commitecc070a294b046153cf0bdb1191c0db18fdf5ed9 (patch)
treefd0afaf0974de6139aa32b0de52f7c79a30d90b2 /mm/vmscan.c
parentb3d3ad2c8f2fb7a432a2bab78a5c660d26a6bf85 (diff)
mm: Per process reclaim
These day, there are many platforms avaiable in the embedded market and they are smarter than kernel which has very limited information about working set so they want to involve memory management more heavily like android's lowmemory killer and ashmem or recent many lowmemory notifier. One of the simple imagine scenario about userspace's intelligence is that platform can manage tasks as forground and backgroud so it would be better to reclaim background's task pages for end-user's *responsibility* although it has frequent referenced pages. This patch adds new knob "reclaim under proc/<pid>/" so task manager can reclaim any target process anytime, anywhere. It could give another method to platform for using memory efficiently. It can avoid process killing for getting free memory, which was really terrible experience because I lost my best score of game I had ever after I switch the phone call while I enjoyed the game. Reclaim file-backed pages only. echo file > /proc/PID/reclaim Reclaim anonymous pages only. echo anon > /proc/PID/reclaim Reclaim all pages echo all > /proc/PID/reclaim Change-Id: Iabdb7bc2ef3dc4d94e3ea005fbe18f4cd06739ab Signed-off-by: Minchan Kim <minchan@kernel.org> Patch-mainline: linux-mm @ 9 May 2013 16:21:24 [vinmenon@codeaurora.org: trivial merge conflict fixes, and minor tweak of the commit msg] Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c60
1 files changed, 60 insertions, 0 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a6caebdf8140..5aae621bb534 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1314,6 +1314,66 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
return ret;
}
+#ifdef CONFIG_PROCESS_RECLAIM
+static unsigned long shrink_page(struct page *page,
+ struct zone *zone,
+ struct scan_control *sc,
+ enum ttu_flags ttu_flags,
+ unsigned long *ret_nr_dirty,
+ unsigned long *ret_nr_writeback,
+ bool force_reclaim,
+ struct list_head *ret_pages)
+{
+ int reclaimed;
+ LIST_HEAD(page_list);
+ list_add(&page->lru, &page_list);
+
+ reclaimed = shrink_page_list(&page_list, zone, sc, ttu_flags,
+ ret_nr_dirty, ret_nr_writeback,
+ force_reclaim);
+ if (!reclaimed)
+ list_splice(&page_list, ret_pages);
+
+ return reclaimed;
+}
+
+unsigned long reclaim_pages_from_list(struct list_head *page_list)
+{
+ struct scan_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .priority = DEF_PRIORITY,
+ .may_writepage = 1,
+ .may_unmap = 1,
+ .may_swap = 1,
+ };
+
+ LIST_HEAD(ret_pages);
+ struct page *page;
+ unsigned long dummy1, dummy2, dummy3, dummy4, dummy5;
+ unsigned long nr_reclaimed = 0;
+
+ while (!list_empty(page_list)) {
+ page = lru_to_page(page_list);
+ list_del(&page->lru);
+
+ ClearPageActive(page);
+ nr_reclaimed += shrink_page(page, page_zone(page), &sc,
+ TTU_UNMAP|TTU_IGNORE_ACCESS,
+ &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
+ }
+
+ while (!list_empty(&ret_pages)) {
+ page = lru_to_page(&ret_pages);
+ list_del(&page->lru);
+ dec_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+ putback_lru_page(page);
+ }
+
+ return nr_reclaimed;
+}
+#endif
+
/*
* Attempt to remove the specified page from its LRU. Only take this page
* if it is of the appropriate PageActive status. Pages which are being