diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 91cee9dfc501..f87702a376d0 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1795,6 +1795,7 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, enum lru_list l; int noswap = 0; int force_scan = 0; + unsigned long nr_force_scan[2]; anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + @@ -1817,6 +1818,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, fraction[0] = 0; fraction[1] = 1; denominator = 1; + nr_force_scan[0] = 0; + nr_force_scan[1] = SWAP_CLUSTER_MAX; goto out; } @@ -1828,6 +1831,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, fraction[0] = 1; fraction[1] = 0; denominator = 1; + nr_force_scan[0] = SWAP_CLUSTER_MAX; + nr_force_scan[1] = 0; goto out; } } @@ -1876,6 +1881,11 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, fraction[0] = ap; fraction[1] = fp; denominator = ap + fp + 1; + if (force_scan) { + unsigned long scan = SWAP_CLUSTER_MAX; + nr_force_scan[0] = div64_u64(scan * ap, denominator); + nr_force_scan[1] = div64_u64(scan * fp, denominator); + } out: for_each_evictable_lru(l) { int file = is_file_lru(l); @@ -1896,12 +1906,8 @@ out: * memcg, priority drop can cause big latency. So, it's better * to scan small amount. See may_noscan above. */ - if (!scan && force_scan) { - if (file) - scan = SWAP_CLUSTER_MAX; - else if (!noswap) - scan = SWAP_CLUSTER_MAX; - } + if (!scan && force_scan) + scan = nr_force_scan[file]; nr[l] = scan; } } |