summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorOlav Haugan <ohaugan@codeaurora.org>2014-10-07 17:27:44 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:00:52 -0700
commit604c41065b982ee94b45233d2da876d873284544 (patch)
tree2442322efa6abd32ecb24c9063b2a64a6a83eb8e /kernel
parentc12a2b5ab9163585020f5cb81d2a0276275ce111 (diff)
sched: Add checks for frequency change
We need to check for frequency change when a task is migrated due to affinity change and during active balance. Change-Id: I96676db04d34b5b91edd83431c236a1c28166985 Signed-off-by: Olav Haugan <ohaugan@codeaurora.org> [rameezmustafa@codeaurora.org]: Port to msm-3.18] Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org [joonwoop@codeaurora.org: fixed minor conflict in core.c] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/fair.c13
2 files changed, 17 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 45030994b33e..13ae48336c92 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2659,6 +2659,12 @@ static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_
src_cpu = cpu_of(rq);
rq = move_queued_task(rq, p, dest_cpu);
+
+ if (!same_freq_domain(src_cpu, dest_cpu)) {
+ check_for_freq_change(rq);
+ check_for_freq_change(cpu_rq(dest_cpu));
+ }
+
if (task_notify_on_migrate(p)) {
struct migration_notify_data mnd;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e3ed01a638df..ff9c396dce5b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8721,6 +8721,7 @@ static int active_load_balance_cpu_stop(void *data)
.flags = 0,
.loop = 0,
};
+ bool moved = false;
raw_spin_lock_irq(&busiest_rq->lock);
@@ -8751,6 +8752,7 @@ static int active_load_balance_cpu_stop(void *data)
cpu_online(target_cpu)) {
detach_task(push_task, &env);
push_task_detached = 1;
+ moved = true;
}
goto out_unlock;
}
@@ -8768,10 +8770,12 @@ static int active_load_balance_cpu_stop(void *data)
schedstat_inc(sd, alb_count);
p = detach_one_task(&env);
- if (p)
+ if (p) {
schedstat_inc(sd, alb_pushed);
- else
+ moved = true;
+ } else {
schedstat_inc(sd, alb_failed);
+ }
}
rcu_read_unlock();
out_unlock:
@@ -8796,6 +8800,11 @@ out_unlock:
local_irq_enable();
+ if (moved && !same_freq_domain(busiest_cpu, target_cpu)) {
+ check_for_freq_change(busiest_rq);
+ check_for_freq_change(target_rq);
+ }
+
if (per_cpu(dbs_boost_needed, target_cpu)) {
struct migration_notify_data mnd;