summaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorDietmar Eggemann <dietmar.eggemann@arm.com>2015-01-26 19:47:28 +0000
committerLeo Yan <leo.yan@linaro.org>2016-05-10 16:49:49 +0800
commitcda2bd333be6e5e8c38fa072c9c4c296312dfd8d (patch)
tree8707e66be01c969ea77d0281b95bce399821234e /kernel/sched/fair.c
parent8a5c0339bb7b7eedd2e75b62ebe4144a195f7ef3 (diff)
sched: Enable idle balance to pull single task towards cpu with higher capacity
We do not want to miss out on the ability to pull a single remaining task from a potential source cpu towards an idle destination cpu. Add an extra criteria to need_active_balance() to kick off active load balance if the source cpu is over-utilized and has lower capacity than the destination cpu. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bebc8367edee..fe40cdb8e640 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4779,6 +4779,11 @@ static inline bool task_fits_spare(struct task_struct *p, int cpu)
return __task_fits(p, cpu, cpu_util(cpu));
}
+static bool cpu_overutilized(int cpu)
+{
+ return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
+}
+
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
@@ -6995,6 +7000,13 @@ static int need_active_balance(struct lb_env *env)
return 1;
}
+ if ((capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) &&
+ env->src_rq->cfs.h_nr_running == 1 &&
+ cpu_overutilized(env->src_cpu) &&
+ !cpu_overutilized(env->dst_cpu)) {
+ return 1;
+ }
+
return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
}