summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@codeaurora.org>2014-11-21 18:25:11 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:01:03 -0700
commit8e3aa6790ca4ff4744e30720e97c458375a35237 (patch)
treeb850a59db6aaa036957f304fb6ee6edae1c3a8c4 /kernel
parent2365b0cbd64fe7a00ec2cfd3b7d8a20df640e095 (diff)
sched: Packing support until a frequency threshold
Add another dimension for task packing based on frequency. This patch adds a per-cpu tunable, rq->mostly_idle_freq, which when set will result in tasks being packed on a single cpu in cluster as long as cluster frequency is less than set threshold. Change-Id: I318e9af6c8788ddf5dfcda407d621449ea5343c0 Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/fair.c62
-rw-r--r--kernel/sched/sched.h1
3 files changed, 64 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a7324abaeb3f..d43925c6e560 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -9134,6 +9134,7 @@ void __init sched_init(void)
rq->hmp_flags = 0;
rq->mostly_idle_load = pct_to_real(20);
rq->mostly_idle_nr_run = 3;
+ rq->mostly_idle_freq = 0;
#ifdef CONFIG_SCHED_FREQ_INPUT
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
rq->old_busy_time = 0;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 087f5e072e35..1238a6825e7f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2703,6 +2703,25 @@ int sched_set_cpu_mostly_idle_load(int cpu, int mostly_idle_pct)
return 0;
}
+int sched_set_cpu_mostly_idle_freq(int cpu, unsigned int mostly_idle_freq)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (mostly_idle_freq > rq->max_possible_freq)
+ return -EINVAL;
+
+ rq->mostly_idle_freq = mostly_idle_freq;
+
+ return 0;
+}
+
+unsigned int sched_get_cpu_mostly_idle_freq(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ return rq->mostly_idle_freq;
+}
+
int sched_get_cpu_mostly_idle_load(int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -3109,6 +3128,42 @@ static int skip_cpu(struct task_struct *p, int cpu, int reason)
return skip;
}
+/*
+ * Select a single cpu in cluster as target for packing, iff cluster frequency
+ * is less than a threshold level
+ */
+static int select_packing_target(struct task_struct *p, int best_cpu)
+{
+ struct rq *rq = cpu_rq(best_cpu);
+ struct cpumask search_cpus;
+ int i;
+ int min_cost = INT_MAX;
+ int target = best_cpu;
+
+ if (rq->cur_freq >= rq->mostly_idle_freq)
+ return best_cpu;
+
+ /* Don't pack if current freq is low because of throttling */
+ if (rq->max_freq <= rq->mostly_idle_freq)
+ return best_cpu;
+
+ cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
+ cpumask_and(&search_cpus, &search_cpus, &rq->freq_domain_cpumask);
+
+ /* Pick the first lowest power cpu as target */
+ for_each_cpu(i, &search_cpus) {
+ int cost = power_cost(p, i);
+
+ if (cost < min_cost) {
+ target = i;
+ min_cost = cost;
+ }
+ }
+
+ return target;
+}
+
+
/* return cheapest cpu that can fit this task */
static int select_best_cpu(struct task_struct *p, int target, int reason)
{
@@ -3220,6 +3275,9 @@ done:
best_cpu = fallback_idle_cpu;
}
+ if (cpu_rq(best_cpu)->mostly_idle_freq)
+ best_cpu = select_packing_target(p, best_cpu);
+
return best_cpu;
}
@@ -9286,6 +9344,10 @@ static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
struct sched_domain *sd;
int i;
+ if (rq->mostly_idle_freq && rq->cur_freq < rq->mostly_idle_freq
+ && rq->max_freq > rq->mostly_idle_freq)
+ return 0;
+
if (rq->nr_running >= 2 && (rq->nr_running - rq->nr_small_tasks >= 2 ||
rq->nr_running > rq->mostly_idle_nr_run ||
cpu_load(cpu) > rq->mostly_idle_load)) {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d0193932354f..fcdf4063ac11 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -660,6 +660,7 @@ struct rq {
u64 window_start;
u32 mostly_idle_load;
int mostly_idle_nr_run;
+ int mostly_idle_freq;
#ifdef CONFIG_SCHED_FREQ_INPUT
unsigned int old_busy_time;