summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPavankumar Kondeti <pkondeti@codeaurora.org>2017-07-14 19:26:34 +0530
committerPavankumar Kondeti <pkondeti@codeaurora.org>2017-07-14 20:57:48 +0530
commitf261bf42cc8e5c111891650010aead9be58cefbc (patch)
tree654dac657712fa2275d4a62c78bb4b0492fd49e1 /kernel
parentd9fbe4b9210bcf0b7553d224f1c7390c15b91022 (diff)
sched: avoid RT tasks contention during sched boost
When placement boost is active, we are currently considering only the highest capacity cluster. If all of the active CPUs in this cluster are busy with RT tasks, the waking task is placed on it's previous CPU, which may be running a RT task. This results in suboptimal performance. Fix this by expanding the search to the other clusters, when there is no eligible CPU found in the highest capacity cluster. Change-Id: Iaab2e397b994c2b219dc086c7a6fa91ca26a5128 Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/rt.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index c03d51a017bf..ee095f4e7230 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1823,6 +1823,7 @@ static int find_lowest_rq_hmp(struct task_struct *task)
* the best one based on our affinity and topology.
*/
+retry:
for_each_sched_cluster(cluster) {
if (boost_on_big && cluster->capacity != max_possible_capacity)
continue;
@@ -1830,6 +1831,15 @@ static int find_lowest_rq_hmp(struct task_struct *task)
cpumask_and(&candidate_mask, &cluster->cpus, lowest_mask);
cpumask_andnot(&candidate_mask, &candidate_mask,
cpu_isolated_mask);
+ /*
+ * When placement boost is active, if there is no eligible CPU
+ * in the highest capacity cluster, we fallback to the other
+ * clusters. So clear the CPUs of the traversed cluster from
+ * the lowest_mask.
+ */
+ if (unlikely(boost_on_big))
+ cpumask_andnot(lowest_mask, lowest_mask,
+ &cluster->cpus);
if (cpumask_empty(&candidate_mask))
continue;
@@ -1869,6 +1879,11 @@ static int find_lowest_rq_hmp(struct task_struct *task)
break;
}
+ if (unlikely(boost_on_big && best_cpu == -1)) {
+ boost_on_big = 0;
+ goto retry;
+ }
+
return best_cpu;
}
#endif /* CONFIG_SCHED_HMP */