summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVinayak Menon <vinmenon@codeaurora.org>2015-12-21 13:00:58 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 21:19:04 -0700
commitad6f4862845c0d6462a9ad7ba2d2e67132449eb1 (patch)
tree8e86c13fc0d0e0a5badda10a4012123795ad4394
parent1506b57673ab9393f54d584c9a1144f5152fa18f (diff)
mm: swap: swap ratio support
Add support to receive a static ratio from userspace to divide the swap pages between ZRAM and disk based swap devices. The existing infrastructure allows to keep same priority for multiple swap devices, which results in round robin distribution of pages. With this patch, the ratio can be defined. CRs-fixed: 968416 Change-Id: I54f54489db84cabb206569dd62d61a8a7a898991 Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
-rw-r--r--include/linux/plist.h3
-rw-r--r--include/linux/swap.h5
-rw-r--r--include/linux/swapfile.h5
-rw-r--r--kernel/sysctl.c16
-rw-r--r--mm/Makefile2
-rw-r--r--mm/swap_ratio.c189
-rw-r--r--mm/swapfile.c30
7 files changed, 245 insertions, 5 deletions
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 97883604a3c5..0ea3e1bc7ccc 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -266,6 +266,9 @@ static inline int plist_node_empty(const struct plist_node *node)
#define plist_next(pos) \
list_next_entry(pos, node_list)
+#define plist_next_entry(pos, type, member) \
+ container_of(plist_next(pos), type, member)
+
/**
* plist_prev - get the prev entry in list
* @pos: the type * to cursor
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b142d27b3edb..a93eb9262eb6 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -157,6 +157,7 @@ enum {
#define SWAP_CLUSTER_MAX 32UL
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
+#define SWAPFILE_CLUSTER 256
/*
* Ratio between zone->managed_pages and the "gap" that above the per-zone
@@ -247,6 +248,8 @@ struct swap_info_struct {
struct work_struct discard_work; /* discard worker */
struct swap_cluster_info discard_cluster_head; /* list head of discard clusters */
struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */
+ unsigned int write_pending;
+ unsigned int max_writes;
};
/* linux/mm/workingset.c */
@@ -330,6 +333,8 @@ extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
unsigned long *nr_scanned);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
+extern int sysctl_swap_ratio;
+extern int sysctl_swap_ratio_enable;
extern int remove_mapping(struct address_space *mapping, struct page *page);
extern unsigned long vm_total_pages;
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index 388293a91e8c..ed2a9c9c2b43 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -7,7 +7,12 @@
*/
extern spinlock_t swap_lock;
extern struct plist_head swap_active_head;
+extern spinlock_t swap_avail_lock;
+extern struct plist_head swap_avail_head;
extern struct swap_info_struct *swap_info[];
extern int try_to_unuse(unsigned int, bool, unsigned long);
+extern int swap_ratio(struct swap_info_struct **si);
+extern void setup_swap_ratio(struct swap_info_struct *p, int prio);
+extern bool is_swap_ratio_group(int prio);
#endif /* _LINUX_SWAPFILE_H */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f657173f5c15..dcb852652bc8 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1827,6 +1827,22 @@ static struct ctl_table vm_table[] = {
.extra2 = (void *)&mmap_rnd_compat_bits_max,
},
#endif
+#ifdef CONFIG_SWAP
+ {
+ .procname = "swap_ratio",
+ .data = &sysctl_swap_ratio,
+ .maxlen = sizeof(sysctl_swap_ratio),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ },
+ {
+ .procname = "swap_ratio_enable",
+ .data = &sysctl_swap_ratio_enable,
+ .maxlen = sizeof(sysctl_swap_ratio_enable),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ },
+#endif
{ }
};
diff --git a/mm/Makefile b/mm/Makefile
index 523d7679d205..11a79895a04f 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -37,7 +37,7 @@ ifdef CONFIG_MMU
endif
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
-obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
+obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o swap_ratio.o
obj-$(CONFIG_FRONTSWAP) += frontswap.o
obj-$(CONFIG_ZSWAP) += zswap.o
obj-$(CONFIG_ZCACHE) += zcache.o
diff --git a/mm/swap_ratio.c b/mm/swap_ratio.c
new file mode 100644
index 000000000000..3b4cf29c3777
--- /dev/null
+++ b/mm/swap_ratio.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mm_types.h>
+#include <linux/swapfile.h>
+#include <linux/swap.h>
+
+#define SWAP_RATIO_GROUP_START (SWAP_FLAG_PRIO_MASK - 9) /* 32758 */
+#define SWAP_RATIO_GROUP_END (SWAP_FLAG_PRIO_MASK) /* 32767 */
+#define SWAP_FAST_WRITES (SWAPFILE_CLUSTER * (SWAP_CLUSTER_MAX / 8))
+#define SWAP_SLOW_WRITES SWAPFILE_CLUSTER
+
+/*
+ * The fast/slow swap write ratio.
+ * 100 indicates that all writes should
+ * go to fast swap device.
+ */
+int sysctl_swap_ratio = 100;
+
+/* Enable the swap ratio feature */
+int sysctl_swap_ratio_enable;
+
+static bool is_same_group(struct swap_info_struct *a,
+ struct swap_info_struct *b)
+{
+ if (!sysctl_swap_ratio_enable)
+ return false;
+
+ if (!is_swap_ratio_group(a->prio))
+ return false;
+
+ if (a->prio == b->prio)
+ return true;
+
+ return false;
+}
+
+/* Caller must hold swap_avail_lock */
+static int calculate_write_pending(struct swap_info_struct *si,
+ struct swap_info_struct *n)
+{
+ int ratio = sysctl_swap_ratio;
+
+ if ((ratio < 0) || (ratio > 100))
+ return -EINVAL;
+
+ if (WARN_ON(!(si->flags & SWP_FAST)))
+ return -ENODEV;
+
+ if ((n->flags & SWP_FAST) || !is_same_group(si, n))
+ return -ENODEV;
+
+ si->max_writes = ratio ? SWAP_FAST_WRITES : 0;
+ n->max_writes = ratio ? (SWAP_FAST_WRITES * 100) /
+ ratio - SWAP_FAST_WRITES : SWAP_SLOW_WRITES;
+
+ si->write_pending = si->max_writes;
+ n->write_pending = n->max_writes;
+
+ trace_printk("%u, %u\n", si->max_writes, n->max_writes);
+
+ return 0;
+}
+
+static int swap_ratio_slow(struct swap_info_struct **si)
+{
+ struct swap_info_struct *n = NULL;
+ int ret = 0;
+
+ spin_lock(&(*si)->lock);
+ spin_lock(&swap_avail_lock);
+ if (&(*si)->avail_list == plist_last(&swap_avail_head)) {
+ /* just to make skip work */
+ n = *si;
+ ret = -ENODEV;
+ goto skip;
+ }
+ n = plist_next_entry(&(*si)->avail_list,
+ struct swap_info_struct,
+ avail_list);
+ spin_unlock(&swap_avail_lock);
+ spin_lock(&n->lock);
+ spin_lock(&swap_avail_lock);
+
+ if ((*si)->flags & SWP_FAST) {
+ if ((*si)->write_pending) {
+ (*si)->write_pending--;
+ goto exit;
+ } else {
+ if ((n->flags & SWP_FAST) || !is_same_group(*si, n)) {
+ /* Should never happen */
+ ret = -ENODEV;
+ } else if (n->write_pending) {
+ /*
+ * Requeue fast device, since there are pending
+ * writes for slow device.
+ */
+ plist_requeue(&(*si)->avail_list,
+ &swap_avail_head);
+ n->write_pending--;
+ spin_unlock(&(*si)->lock);
+ *si = n;
+ goto skip;
+ } else {
+ if (0 > calculate_write_pending(*si, n)) {
+ ret = -ENODEV;
+ goto exit;
+ }
+ /* Restart from fast device */
+ (*si)->write_pending--;
+ }
+ }
+ } else {
+ if (!(n->flags & SWP_FAST) || !is_same_group(*si, n)) {
+ /* Should never happen */
+ ret = -ENODEV;
+ } else if (n->write_pending) {
+ /*
+ * Pending writes for fast device.
+ * We reach here when slow device is swapped on first,
+ * before fast device.
+ */
+ /* requeue slow device to the end */
+ plist_requeue(&(*si)->avail_list, &swap_avail_head);
+ n->write_pending--;
+ spin_unlock(&(*si)->lock);
+ *si = n;
+ goto skip;
+ } else {
+ if ((*si)->write_pending) {
+ (*si)->write_pending--;
+ } else {
+ if (0 > calculate_write_pending(n, *si)) {
+ ret = -ENODEV;
+ goto exit;
+ }
+ n->write_pending--;
+ plist_requeue(&(*si)->avail_list,
+ &swap_avail_head);
+ spin_unlock(&(*si)->lock);
+ *si = n;
+ goto skip;
+ }
+ }
+ }
+exit:
+ spin_unlock(&(*si)->lock);
+skip:
+ spin_unlock(&swap_avail_lock);
+ /* n and si would have got interchanged */
+ spin_unlock(&n->lock);
+ return ret;
+}
+
+bool is_swap_ratio_group(int prio)
+{
+ return ((prio >= SWAP_RATIO_GROUP_START) &&
+ (prio <= SWAP_RATIO_GROUP_END)) ? true : false;
+}
+
+void setup_swap_ratio(struct swap_info_struct *p, int prio)
+{
+ /* Used only if sysctl_swap_ratio_enable is set */
+ if (is_swap_ratio_group(prio)) {
+ if (p->flags & SWP_FAST)
+ p->write_pending = SWAP_FAST_WRITES;
+ else
+ p->write_pending = SWAP_SLOW_WRITES;
+ p->max_writes = p->write_pending;
+ }
+}
+
+int swap_ratio(struct swap_info_struct **si)
+{
+ if (is_swap_ratio_group((*si)->prio))
+ return swap_ratio_slow(si);
+ else
+ return -ENODEV;
+}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index c86c3fd4fc00..3ae1225084c9 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -75,8 +75,8 @@ PLIST_HEAD(swap_active_head);
* is held and the locking order requires swap_lock to be taken
* before any swap_info_struct->lock.
*/
-static PLIST_HEAD(swap_avail_head);
-static DEFINE_SPINLOCK(swap_avail_lock);
+PLIST_HEAD(swap_avail_head);
+DEFINE_SPINLOCK(swap_avail_lock);
struct swap_info_struct *swap_info[MAX_SWAPFILES];
@@ -213,7 +213,6 @@ static void discard_swap_cluster(struct swap_info_struct *si,
}
}
-#define SWAPFILE_CLUSTER 256
#define LATENCY_LIMIT 256
static inline void cluster_set_flag(struct swap_cluster_info *info,
@@ -662,18 +661,39 @@ swp_entry_t get_swap_page(void)
{
struct swap_info_struct *si, *next;
pgoff_t offset;
+ int swap_ratio_off = 0;
if (atomic_long_read(&nr_swap_pages) <= 0)
goto noswap;
atomic_long_dec(&nr_swap_pages);
+lock_and_start:
spin_lock(&swap_avail_lock);
start_over:
plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) {
+
+ if (sysctl_swap_ratio && !swap_ratio_off) {
+ int ret;
+
+ spin_unlock(&swap_avail_lock);
+ ret = swap_ratio(&si);
+ if (0 > ret) {
+ /*
+ * Error. Start again with swap
+ * ratio disabled.
+ */
+ swap_ratio_off = 1;
+ goto lock_and_start;
+ } else {
+ goto start;
+ }
+ }
+
/* requeue si to after same-priority siblings */
plist_requeue(&si->avail_list, &swap_avail_head);
spin_unlock(&swap_avail_lock);
+start:
spin_lock(&si->lock);
if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
spin_lock(&swap_avail_lock);
@@ -2561,9 +2581,11 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
mutex_lock(&swapon_mutex);
prio = -1;
- if (swap_flags & SWAP_FLAG_PREFER)
+ if (swap_flags & SWAP_FLAG_PREFER) {
prio =
(swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
+ setup_swap_ratio(p, prio);
+ }
enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
pr_info("Adding %uk swap on %s. "