From 9d128dbca33500c066648db5d9054263272d6e83 Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Wed, 13 Apr 2016 15:13:56 +0530 Subject: sched: don't assume higher capacity means higher power in lb The load balancer restrictions are in place to control the tasks migration from the lower capacity cluster to higher capacity cluster to save power. The assumption here is that higher capacity cluster will have higher power cost which may not be necessarily true for all platforms. Use power cost based checks instead of capacity based checks while applying the inter cluster migration restrictions. Change-Id: Id9519eb8f7b183a2e9fca87a23cf95e951aa4005 Signed-off-by: Pavankumar Kondeti --- kernel/sched/fair.c | 10 ++-------- kernel/sched/hmp.c | 7 +++++++ kernel/sched/sched.h | 1 + 3 files changed, 10 insertions(+), 8 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e893b0fcac6b..07f83f2f53a8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7226,9 +7226,7 @@ bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds) local_pwr_cost = cpu_max_power_cost(local_cpu); busiest_pwr_cost = cpu_max_power_cost(busiest_cpu); - if (local_capacity < busiest_capacity || - (local_capacity == busiest_capacity && - local_pwr_cost <= busiest_pwr_cost)) + if (local_pwr_cost <= busiest_pwr_cost) return 0; if (local_capacity > busiest_capacity && @@ -8843,9 +8841,6 @@ static inline int find_new_hmp_ilb(int type) for_each_cpu_and(ilb, nohz.idle_cpus_mask, sched_domain_span(sd)) { if (idle_cpu(ilb) && (type != NOHZ_KICK_RESTRICT || - (hmp_capable() && - cpu_max_possible_capacity(ilb) <= - cpu_max_possible_capacity(call_cpu)) || cpu_max_power_cost(ilb) <= cpu_max_power_cost(call_cpu))) { rcu_read_unlock(); @@ -9188,8 +9183,7 @@ static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type) if (!sysctl_sched_restrict_cluster_spill || sched_boost()) return 1; - if (hmp_capable() && cpu_max_possible_capacity(cpu) == - max_possible_capacity) + if (cpu_max_power_cost(cpu) == max_power_cost) return 1; rcu_read_lock(); diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index 5002619961ce..53fb246cb87d 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -356,6 +356,8 @@ DECLARE_BITMAP(all_cluster_ids, NR_CPUS); struct sched_cluster *sched_cluster[NR_CPUS]; int num_clusters; +unsigned int max_power_cost = 1; + struct sched_cluster init_cluster = { .list = LIST_HEAD_INIT(init_cluster.list), .id = 0, @@ -465,6 +467,7 @@ static void sort_clusters(void) { struct sched_cluster *cluster; struct list_head new_head; + unsigned int tmp_max = 1; INIT_LIST_HEAD(&new_head); @@ -473,7 +476,11 @@ static void sort_clusters(void) max_task_load()); cluster->min_power_cost = power_cost(cluster_first_cpu(cluster), 0); + + if (cluster->max_power_cost > tmp_max) + tmp_max = cluster->max_power_cost; } + max_power_cost = tmp_max; move_list(&new_head, &cluster_head, true); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ec7721112b05..dddd9b4183ad 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1039,6 +1039,7 @@ extern unsigned int min_capacity; extern unsigned int max_load_scale_factor; extern unsigned int max_possible_capacity; extern unsigned int min_max_possible_capacity; +extern unsigned int max_power_cost; extern unsigned int sched_upmigrate; extern unsigned int sched_downmigrate; extern unsigned int sched_init_task_load_windows; -- cgit v1.2.3