summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoonwoo Park <joonwoop@codeaurora.org>2016-04-18 11:44:32 -0700
committerJeevan Shriram <jshriram@codeaurora.org>2016-04-22 15:05:34 -0700
commit16c433e4c56d10c55d9e5cf2a773f9996e5dc241 (patch)
tree294c61075bc5eed8121c2835c3550617a6bb996f
parent2e0ebb0155f275b24a5feedfc8f28c5225fb8db6 (diff)
sched: fix excessive task packing where CONFIG_SCHED_HMP_CSTATE_AWARE=y
At present among the same power cost and c-state CPUs scheduler places newly waking up task on the most loaded CPU which can incur too much of task packing on the same CPU. Place onto the most loaded CPU only when the best CPU is in idle cstate, otherwise spread out by placing onto the least loaded CPU. CRs-fixed: 1006303 Change-Id: I8ae7332971b3293d912b1582f75e33fd81407d86 Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
-rw-r--r--kernel/sched/fair.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c0e35bc86b92..7e25cce86c94 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3138,7 +3138,7 @@ struct cluster_cpu_stats {
int best_capacity_cpu, best_cpu, best_sibling_cpu;
int min_cost, best_sibling_cpu_cost;
int best_cpu_cstate;
- u64 min_load, max_load, best_sibling_cpu_load;
+ u64 min_load, best_load, best_sibling_cpu_load;
s64 highest_spare_capacity;
};
@@ -3409,7 +3409,7 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
if (cpu_cost < stats->min_cost) {
stats->min_cost = cpu_cost;
stats->best_cpu_cstate = cpu_cstate;
- stats->max_load = env->cpu_load;
+ stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
return;
}
@@ -3421,7 +3421,7 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
if (cpu_cstate < stats->best_cpu_cstate) {
stats->best_cpu_cstate = cpu_cstate;
- stats->max_load = env->cpu_load;
+ stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
return;
}
@@ -3432,8 +3432,10 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
return;
}
- if (stats->best_cpu != prev_cpu && env->cpu_load > stats->max_load) {
- stats->max_load = env->cpu_load;
+ if (stats->best_cpu != prev_cpu &&
+ ((cpu_cstate == 0 && env->cpu_load < stats->best_load) ||
+ (cpu_cstate > 0 && env->cpu_load > stats->best_load))) {
+ stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
}
}
@@ -3520,10 +3522,10 @@ static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats)
stats->best_capacity_cpu = stats->best_sibling_cpu = -1;
stats->min_cost = stats->best_sibling_cpu_cost = INT_MAX;
stats->min_load = stats->best_sibling_cpu_load = ULLONG_MAX;
- stats->max_load = 0;
stats->highest_spare_capacity = 0;
stats->least_loaded_cpu = -1;
stats->best_cpu_cstate = INT_MAX;
+ /* No need to initialize stats->best_load */
}
/*