summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/staging/android/lowmemorykiller.c3
-rw-r--r--include/linux/shrinker.h1
-rw-r--r--mm/vmscan.c54
3 files changed, 55 insertions, 3 deletions
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index bec687853c5d..9bd60de8380f 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -565,7 +565,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
static struct shrinker lowmem_shrinker = {
.scan_objects = lowmem_scan,
.count_objects = lowmem_count,
- .seeks = DEFAULT_SEEKS * 16
+ .seeks = DEFAULT_SEEKS * 16,
+ .flags = SHRINKER_LMK
};
static int __init lowmem_init(void)
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 4fcacd915d45..e77f648f9662 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -66,6 +66,7 @@ struct shrinker {
/* Flags */
#define SHRINKER_NUMA_AWARE (1 << 0)
#define SHRINKER_MEMCG_AWARE (1 << 1)
+#define SHRINKER_LMK (1 << 2)
extern int register_shrinker(struct shrinker *);
extern void unregister_shrinker(struct shrinker *);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5e9e74955bd1..94fecacf0ddc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -399,6 +399,35 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
return freed;
}
+static void shrink_slab_lmk(gfp_t gfp_mask, int nid,
+ struct mem_cgroup *memcg,
+ unsigned long nr_scanned,
+ unsigned long nr_eligible)
+{
+ struct shrinker *shrinker;
+
+ if (nr_scanned == 0)
+ nr_scanned = SWAP_CLUSTER_MAX;
+
+ if (!down_read_trylock(&shrinker_rwsem))
+ goto out;
+
+ list_for_each_entry(shrinker, &shrinker_list, list) {
+ struct shrink_control sc = {
+ .gfp_mask = gfp_mask,
+ };
+
+ if (!(shrinker->flags & SHRINKER_LMK))
+ continue;
+
+ do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible);
+ }
+
+ up_read(&shrinker_rwsem);
+out:
+ cond_resched();
+}
+
/**
* shrink_slab - shrink slab caches
* @gfp_mask: allocation context
@@ -460,6 +489,9 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
.memcg = memcg,
};
+ if (shrinker->flags & SHRINKER_LMK)
+ continue;
+
if (memcg && !(shrinker->flags & SHRINKER_MEMCG_AWARE))
continue;
@@ -2626,6 +2658,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
gfp_t orig_mask;
enum zone_type requested_highidx = gfp_zone(sc->gfp_mask);
bool reclaimable = false;
+ unsigned long lru_pages = 0;
/*
* If the number of buffer_heads in the machine exceeds the maximum
@@ -2653,6 +2686,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
* to global LRU.
*/
if (global_reclaim(sc)) {
+ lru_pages += zone_reclaimable_pages(zone);
if (!cpuset_zone_allowed(zone,
GFP_KERNEL | __GFP_HARDWALL))
continue;
@@ -2703,6 +2737,9 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
reclaimable = true;
}
+ if (global_reclaim(sc))
+ shrink_slab_lmk(sc->gfp_mask, 0, NULL,
+ sc->nr_scanned, lru_pages);
/*
* Restore to original mask to avoid the impact on the caller if we
* promoted it to __GFP_HIGHMEM.
@@ -3181,7 +3218,8 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
*/
static bool kswapd_shrink_zone(struct zone *zone,
int classzone_idx,
- struct scan_control *sc)
+ struct scan_control *sc,
+ unsigned long lru_pages)
{
unsigned long balance_gap;
bool lowmem_pressure;
@@ -3208,6 +3246,8 @@ static bool kswapd_shrink_zone(struct zone *zone,
return true;
shrink_zone(zone, sc, zone_idx(zone) == classzone_idx);
+ shrink_slab_lmk(sc->gfp_mask, zone_to_nid(zone), NULL,
+ sc->nr_scanned, lru_pages);
clear_bit(ZONE_WRITEBACK, &zone->flags);
@@ -3265,6 +3305,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
do {
bool raise_priority = true;
+ unsigned long lru_pages = 0;
sc.nr_reclaimed = 0;
@@ -3322,6 +3363,15 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
if (sc.priority < DEF_PRIORITY - 2)
sc.may_writepage = 1;
+ for (i = 0; i <= end_zone; i++) {
+ struct zone *zone = pgdat->node_zones + i;
+
+ if (!populated_zone(zone))
+ continue;
+
+ lru_pages += zone_reclaimable_pages(zone);
+ }
+
/*
* Now scan the zone in the dma->highmem direction, stopping
* at the last zone which needs scanning.
@@ -3358,7 +3408,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
* that that high watermark would be met at 100%
* efficiency.
*/
- if (kswapd_shrink_zone(zone, end_zone, &sc))
+ if (kswapd_shrink_zone(zone, end_zone, &sc, lru_pages))
raise_priority = false;
}