summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDietmar Eggemann <dietmar.eggemann@arm.com>2017-01-26 16:04:34 +0000
committerAndres Oportus <andresoportus@google.com>2017-06-02 08:01:52 -0700
commit3b6ba235bcf3827d4911f3eb27bae5ac0f4dbbc8 (patch)
treef2e99501708d12d2456c85f93aef6b1068d8f52d /kernel
parent168228463cacf92ee12563ddf612b7489dc3d3fa (diff)
sched/fair: Decommission energy_aware_wake_cpu()
The EAS functionality in the wakeup path will be brought back by the following patch ("sched/fair: Energy-aware wake-up task placement") providing the function select_energy_cpu_brute(). Change-Id: I927fb9e8261cfacfe404695f853941c7959aa146 [ Trivial merge conflicts resolved. ] Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> (cherry picked from commit 80aee424fb7765a777267e144037642625a71304) Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c120
1 files changed, 1 insertions, 119 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 976e7457b4fa..ef16d4d3cc00 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5836,122 +5836,6 @@ static inline int find_best_target(struct task_struct *p, bool boosted, bool pre
return target_cpu;
}
-static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
-{
- struct sched_domain *sd;
- struct sched_group *sg, *sg_target;
- int target_max_cap = INT_MAX;
- int target_cpu = task_cpu(p);
- unsigned long task_util_boosted, new_util;
- int i;
-
- if (sysctl_sched_sync_hint_enable && sync) {
- int cpu = smp_processor_id();
- cpumask_t search_cpus;
- cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
- if (cpumask_test_cpu(cpu, &search_cpus))
- return cpu;
- }
-
- sd = rcu_dereference(per_cpu(sd_ea, task_cpu(p)));
-
- if (!sd)
- return target;
-
- sg = sd->groups;
- sg_target = sg;
-
- if (sysctl_sched_is_big_little) {
-
- /*
- * Find group with sufficient capacity. We only get here if no cpu is
- * overutilized. We may end up overutilizing a cpu by adding the task,
- * but that should not be any worse than select_idle_sibling().
- * load_balance() should sort it out later as we get above the tipping
- * point.
- */
- do {
- /* Assuming all cpus are the same in group */
- int max_cap_cpu = group_first_cpu(sg);
-
- /*
- * Assume smaller max capacity means more energy-efficient.
- * Ideally we should query the energy model for the right
- * answer but it easily ends up in an exhaustive search.
- */
- if (capacity_of(max_cap_cpu) < target_max_cap &&
- task_fits_max(p, max_cap_cpu)) {
- sg_target = sg;
- target_max_cap = capacity_of(max_cap_cpu);
- }
- } while (sg = sg->next, sg != sd->groups);
-
- task_util_boosted = boosted_task_util(p);
- /* Find cpu with sufficient capacity */
- for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg_target)) {
- /*
- * p's blocked utilization is still accounted for on prev_cpu
- * so prev_cpu will receive a negative bias due to the double
- * accounting. However, the blocked utilization may be zero.
- */
- new_util = cpu_util(i) + task_util_boosted;
-
- /*
- * Ensure minimum capacity to grant the required boost.
- * The target CPU can be already at a capacity level higher
- * than the one required to boost the task.
- */
- if (new_util > capacity_orig_of(i))
- continue;
-
- if (new_util < capacity_curr_of(i)) {
- target_cpu = i;
- if (cpu_rq(i)->nr_running)
- break;
- }
-
- /* cpu has capacity at higher OPP, keep it as fallback */
- if (target_cpu == task_cpu(p))
- target_cpu = i;
- }
- } else {
- /*
- * Find a cpu with sufficient capacity
- */
-#ifdef CONFIG_CGROUP_SCHEDTUNE
- bool boosted = schedtune_task_boost(p) > 0;
- bool prefer_idle = schedtune_prefer_idle(p) > 0;
-#else
- bool boosted = 0;
- bool prefer_idle = 0;
-#endif
- int tmp_target = find_best_target(p, boosted, prefer_idle);
- if (tmp_target >= 0) {
- target_cpu = tmp_target;
- if ((boosted || prefer_idle) && idle_cpu(target_cpu))
- return target_cpu;
- }
- }
-
- if (target_cpu != task_cpu(p)) {
- struct energy_env eenv = {
- .util_delta = task_util(p),
- .src_cpu = task_cpu(p),
- .dst_cpu = target_cpu,
- .task = p,
- };
-
- /* Not enough spare capacity on previous cpu */
- if (cpu_overutilized(task_cpu(p)))
- return target_cpu;
-
- if (energy_diff(&eenv) >= 0)
- return task_cpu(p);
- }
-
- return target_cpu;
-}
-
/*
* cpu_util_wake: Compute cpu utilization with any contributions from
* the waking task p removed.
@@ -6047,9 +5931,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
}
if (!sd) {
- if (energy_aware() && !cpu_rq(cpu)->rd->overutilized)
- new_cpu = energy_aware_wake_cpu(p, prev_cpu, sync);
- else if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
+ if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
} else while (sd) {