summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorJoonwoo Park <joonwoop@codeaurora.org>2015-08-11 15:35:02 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:02:23 -0700
commit44af3b5e0308c9a7061b18344893d2b07a91b1c9 (patch)
tree3d0c822ca8e9689801167f0d59055fcab6299f1f /kernel
parent8623286277c31443d38a024de85adab973498664 (diff)
sched: add preference for prev and sibling CPU in HMP task placement
At present HMP task placement algorithm places wake-up task on any lowest power cost CPU in the system even if the task's previous CPU is also one of the lowest power cost CPU. Placing task on the previous CPU can reduce cache bouncing. Add a bias towards the task's previous CPU and CPU in the same cache domain with previous CPU. Change-Id: Ieab3840432e277048058da76764b3a3f16e20c56 Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c30
1 files changed, 22 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5923011cf92c..fc0ff96a1fd8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3182,9 +3182,9 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
int sync)
{
int i, best_cpu = -1, best_idle_cpu = -1, best_capacity_cpu = -1;
- int prev_cpu = task_cpu(p);
- int cpu_cost, min_cost = INT_MAX;
- u64 tload, cpu_load;
+ int prev_cpu = task_cpu(p), best_sibling_cpu = -1;
+ int cpu_cost, min_cost = INT_MAX, best_sibling_cpu_cost = INT_MAX;
+ u64 tload, cpu_load, best_sibling_cpu_load = ULLONG_MAX;
u64 min_load = ULLONG_MAX;
s64 spare_capacity, highest_spare_capacity = 0;
int boost = sched_boost();
@@ -3238,21 +3238,32 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
cpu_cost = power_cost(tload + cpu_load, i);
+ if (cpu_cost > min_cost)
+ continue;
+
/*
* If the task fits in a CPU in a lower power band, that
* overrides all other considerations.
*/
if (power_delta_exceeded(cpu_cost, min_cost)) {
- if (cpu_cost > min_cost)
- continue;
-
min_cost = cpu_cost;
min_load = ULLONG_MAX;
best_cpu = -1;
}
- if (cpu_cost < min_cost ||
- (cpu_cost == min_cost && cpu_load < min_load)) {
+ if (i != prev_cpu && cpus_share_cache(prev_cpu, i)) {
+ if (best_sibling_cpu_cost > cpu_cost ||
+ (best_sibling_cpu_cost == cpu_cost &&
+ best_sibling_cpu_load > cpu_load)) {
+ best_sibling_cpu_cost = cpu_cost;
+ best_sibling_cpu_load = cpu_load;
+ best_sibling_cpu = i;
+ }
+ }
+
+ if ((cpu_cost < min_cost) ||
+ ((best_cpu != prev_cpu && min_load > cpu_load) ||
+ i == prev_cpu)) {
if (need_idle) {
if (idle_cpu(i)) {
min_cost = cpu_cost;
@@ -3274,6 +3285,9 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
best_cpu = prev_cpu;
else
best_cpu = best_capacity_cpu;
+ } else {
+ if (best_cpu != prev_cpu && min_cost == best_sibling_cpu_cost)
+ best_cpu = best_sibling_cpu;
}
trace_sched_task_load(p, boost, reason, sync, need_idle);