summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2015-02-10 15:30:55 -0800
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:01:40 -0700
commitb3f9e5ac265af4a30ee9f9c3760e5c98e588e4b1 (patch)
tree3f644d024e3130fbf736c8ed3c64862f6319a682 /kernel
parentfffa33d56acad1c91ae27673c7af5f5415e2151f (diff)
sched: Avoid pulling all tasks from a CPU during load balance
When running load balance, the destination CPU checks the number of running tasks on the busiest CPU without holding the busiest CPUs runqueue lock. This opens the load balancer to a race whereby a third CPU running load balance at the same time; having found the same busiest group and queue, may have already pulled one of the waiting tasks from the busiest CPU. Under scenarios where the source CPU is running the idle task and only a single task remains waiting on the busiest runqueue (nr_running = 1), the destination CPU will end up pulling the only enqueued task from that CPU, leaving the destination CPU with nothing left to run. Fix this race, by reconfirming nr_running for the busiest CPU, after its runqueue lock has been obtained. Change-Id: I42e132b15f96d9d5d7b32ef4de3fb92d2f837e63 Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 696b52673436..46782a66b3b5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9093,6 +9093,13 @@ redo:
more_balance:
raw_spin_lock_irqsave(&busiest->lock, flags);
+ /* The world might have changed. Validate assumptions */
+ if (busiest->nr_running <= 1) {
+ raw_spin_unlock_irqrestore(&busiest->lock, flags);
+ env.flags &= ~LBF_ALL_PINNED;
+ goto no_move;
+ }
+
/*
* cur_ld_moved - load moved in current iteration
* ld_moved - cumulative load moved across iterations
@@ -9180,6 +9187,7 @@ more_balance:
}
}
+no_move:
if (!ld_moved) {
if (!(env.flags & (LBF_PWR_ACTIVE_BALANCE | LBF_SCHED_BOOST)))
schedstat_inc(sd, lb_failed[idle]);