summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@codeaurora.org>2014-11-04 15:25:50 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:00:59 -0700
commitb2e57842c0c566fe2a45d32eacc3ba41f64c3e2a (patch)
tree86d6d83b70dd90cf8178210d32de6325e9889c63 /drivers
parent33af11b6f4238f34860e305cf7610021ea8036b4 (diff)
sched: per-cpu mostly_idle threshold
sched_mostly_idle_load and sched_mostly_idle_nr_run knobs help pack tasks on cpus to some extent. In some cases, it may be desirable to have different packing limits for different cpus. For example, pack to a higher limit on high-performance cpus compared to power-efficient cpus. This patch removes the global mostly_idle tunables and makes them per-cpu, thus letting task packing behavior to be controlled in a fine-grained manner. Change-Id: Ifc254cda34b928eae9d6c342ce4c0f64e531e6c2 Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/cpu.c95
1 files changed, 95 insertions, 0 deletions
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 91bbb1959d8d..763fd00c697b 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -180,10 +180,102 @@ static struct attribute_group crash_note_cpu_attr_group = {
};
#endif
+#ifdef CONFIG_SCHED_HMP
+static ssize_t show_sched_mostly_idle_load(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ ssize_t rc;
+ int cpunum;
+ int mostly_idle_pct;
+
+ cpunum = cpu->dev.id;
+
+ mostly_idle_pct = sched_get_cpu_mostly_idle_load(cpunum);
+
+ rc = snprintf(buf, PAGE_SIZE-2, "%d\n", mostly_idle_pct);
+
+ return rc;
+}
+
+static ssize_t __ref store_sched_mostly_idle_load(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int cpuid = cpu->dev.id;
+ int mostly_idle_load, err;
+
+ err = kstrtoint(strstrip((char *)buf), 0, &mostly_idle_load);
+ if (err)
+ return err;
+
+ err = sched_set_cpu_mostly_idle_load(cpuid, mostly_idle_load);
+ if (err >= 0)
+ err = count;
+
+ return err;
+}
+
+static ssize_t show_sched_mostly_idle_nr_run(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ ssize_t rc;
+ int cpunum;
+ int mostly_idle_nr_run;
+
+ cpunum = cpu->dev.id;
+
+ mostly_idle_nr_run = sched_get_cpu_mostly_idle_nr_run(cpunum);
+
+ rc = snprintf(buf, PAGE_SIZE-2, "%d\n", mostly_idle_nr_run);
+
+ return rc;
+}
+
+static ssize_t __ref store_sched_mostly_idle_nr_run(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int cpuid = cpu->dev.id;
+ int mostly_idle_nr_run, err;
+
+ err = kstrtoint(strstrip((char *)buf), 0, &mostly_idle_nr_run);
+ if (err)
+ return err;
+
+ err = sched_set_cpu_mostly_idle_nr_run(cpuid, mostly_idle_nr_run);
+ if (err >= 0)
+ err = count;
+
+ return err;
+}
+
+static DEVICE_ATTR(sched_mostly_idle_load, 0664, show_sched_mostly_idle_load,
+ store_sched_mostly_idle_load);
+static DEVICE_ATTR(sched_mostly_idle_nr_run, 0664,
+ show_sched_mostly_idle_nr_run, store_sched_mostly_idle_nr_run);
+
+static struct attribute *hmp_sched_cpu_attrs[] = {
+ &dev_attr_sched_mostly_idle_load.attr,
+ &dev_attr_sched_mostly_idle_nr_run.attr,
+ NULL
+};
+
+static struct attribute_group sched_hmp_cpu_attr_group = {
+ .attrs = hmp_sched_cpu_attrs,
+};
+
+#endif /* CONFIG_SCHED_HMP */
static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
&crash_note_cpu_attr_group,
#endif
+#ifdef CONFIG_SCHED_HMP
+ &sched_hmp_cpu_attr_group,
+#endif
NULL
};
@@ -191,6 +283,9 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
&crash_note_cpu_attr_group,
#endif
+#ifdef CONFIG_SCHED_HMP
+ &sched_hmp_cpu_attr_group,
+#endif
NULL
};