summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2015-02-10 14:55:24 -0800
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:01:38 -0700
commitfffa33d56acad1c91ae27673c7af5f5415e2151f (patch)
tree3bf1fac923dbd0bdaa5dad0f5a4e809054885929 /kernel
parentdbd548aed7e2657b1ec16e2b1b6adb43642ea8b3 (diff)
sched: Avoid pulling big tasks to the little cluster during load balance
When a lower capacity CPU attempts to pull work from a higher capacity CPU, during load balance, it does not distinguish between tasks that will fit or not fit on the destination CPU. This causes suboptimal load balancing decisions whereby big tasks end up on the lower capacity CPUs and little tasks remain on higher capacity CPUs. Avoid this behavior, by first restricting search to only include tasks that fit on the destination CPU. If such a task cannot be found, remove this restriction so that any task can be pulled over to the destination CPU. This behavior is not applicable during sched_boost, however, as none of the tasks will fit on a lower capacity CPU. Change-Id: I1093420a629a0886fc3375849372ab7cf42e928e Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org> [joonwoop@codeaurora.org: fixed minor conflict in can_migrate_task().] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c28
1 files changed, 24 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2eebdd524a52..696b52673436 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7395,6 +7395,7 @@ enum fbq_type { regular, remote, all };
#define LBF_IGNORE_SMALL_TASKS 0x10
#define LBF_PWR_ACTIVE_BALANCE 0x20
#define LBF_SCHED_BOOST 0x40
+#define LBF_IGNORE_BIG_TASKS 0x80
struct lb_env {
struct sched_domain *sd;
@@ -7522,6 +7523,7 @@ static
int can_migrate_task(struct task_struct *p, struct lb_env *env)
{
int tsk_cache_hot;
+ int twf;
lockdep_assert_held(&env->src_rq->lock);
@@ -7543,8 +7545,22 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
if (env->flags & LBF_IGNORE_SMALL_TASKS && is_small_task(p))
return 0;
- if (!task_will_fit(p, env->dst_cpu) &&
- env->busiest_nr_running <= env->busiest_grp_capacity)
+ twf = task_will_fit(p, env->dst_cpu);
+
+ /*
+ * Attempt to not pull tasks that don't fit. We may get lucky and find
+ * one that actually fits.
+ */
+ if (env->flags & LBF_IGNORE_BIG_TASKS && !twf)
+ return 0;
+
+ /*
+ * Group imbalance can sometimes cause work to be pulled across groups
+ * even though the group could have managed the imbalance on its own.
+ * Prevent inter-cluster migrations for big tasks when the number of
+ * tasks is lower than the capacity of the group.
+ */
+ if (!twf && env->busiest_nr_running <= env->busiest_grp_capacity)
return 0;
if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
@@ -7677,6 +7693,9 @@ static int detach_tasks(struct lb_env *env)
if (capacity(env->dst_rq) > capacity(env->src_rq))
env->flags |= LBF_IGNORE_SMALL_TASKS;
+ else if (capacity(env->dst_rq) < capacity(env->src_rq) &&
+ !sched_boost())
+ env->flags |= LBF_IGNORE_BIG_TASKS;
redo:
while (!list_empty(tasks)) {
@@ -7741,9 +7760,10 @@ next:
list_move_tail(&p->se.group_node, tasks);
}
- if (env->flags & LBF_IGNORE_SMALL_TASKS && !detached) {
+ if (env->flags & (LBF_IGNORE_SMALL_TASKS | LBF_IGNORE_BIG_TASKS)
+ && !detached) {
tasks = &env->src_rq->cfs_tasks;
- env->flags &= ~LBF_IGNORE_SMALL_TASKS;
+ env->flags &= ~(LBF_IGNORE_SMALL_TASKS | LBF_IGNORE_BIG_TASKS);
env->loop = orig_loop;
goto redo;
}