summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@codeaurora.org>2014-07-25 08:04:27 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:00:10 -0700
commit8e526b1ab439c3a013d4e27b7e0dc0fae031599d (patch)
tree859c2b2ca41132f1c3323e352a7b3830c61717df
parentc820f1c5f27c54278d8f124dd4fc8b7b507ef579 (diff)
sched: Fix herding issue
check_for_migration() could run concurrently on multiple cpus, resulting in multiple tasks wanting to migrate to same cpu. This could cause cpus to be underutilized and lead to increased scheduling latencies for tasks. Fix this by serializing select_best_cpu() calls from cpus running check_for_migration() check and marking selected cpus as reserved, so that subsequent call to select_best_cpu() from check_for_migration() will skip reserved cpus. Change-Id: I73a22cacab32dee3c14267a98b700f572aa3900c Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org> [rameezmustafa@codeaurora.org]: Port to msm-3.18] Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c54
-rw-r--r--kernel/sched/sched.h25
3 files changed, 65 insertions, 16 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bb949ae0fdb9..ff911672e88f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1133,9 +1133,11 @@ static inline void clear_hmp_request(int cpu)
unsigned long flags;
clear_boost_kick(cpu);
+ clear_reserved(cpu);
if (rq->push_task) {
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->push_task) {
+ clear_reserved(rq->push_cpu);
put_task_struct(rq->push_task);
rq->push_task = NULL;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e8aba1a21779..e24a58e3a98e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3040,6 +3040,9 @@ static int skip_cpu(struct task_struct *p, int cpu, int reason)
if (!reason)
return 0;
+ if (is_reserved(cpu))
+ return 1;
+
switch (reason) {
case MOVE_TO_BIG_CPU:
skip = (rq->capacity <= task_rq->capacity);
@@ -3396,23 +3399,13 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p)
return 0;
}
-/*
- * Check if currently running task should be migrated to a better cpu.
- *
- * Todo: Effect this via changes to nohz_balancer_kick() and load balance?
- */
-void check_for_migration(struct rq *rq, struct task_struct *p)
+static DEFINE_RAW_SPINLOCK(migration_lock);
+
+static inline int
+kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
{
- int cpu = cpu_of(rq), new_cpu = cpu;
unsigned long flags;
- int active_balance = 0, rc;
-
- rc = migration_needed(rq, p);
- if (rc)
- new_cpu = select_best_cpu(p, cpu, rc);
-
- if (new_cpu == cpu)
- return;
+ int rc = 0;
/* Invoke active balance to force migrate currently running task */
raw_spin_lock_irqsave(&rq->lock, flags);
@@ -3421,10 +3414,38 @@ void check_for_migration(struct rq *rq, struct task_struct *p)
rq->push_cpu = new_cpu;
get_task_struct(p);
rq->push_task = p;
- active_balance = 1;
+ rc = 1;
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
+ return rc;
+}
+
+/*
+ * Check if currently running task should be migrated to a better cpu.
+ *
+ * Todo: Effect this via changes to nohz_balancer_kick() and load balance?
+ */
+void check_for_migration(struct rq *rq, struct task_struct *p)
+{
+ int cpu = cpu_of(rq), new_cpu;
+ int active_balance = 0, reason;
+
+ reason = migration_needed(rq, p);
+ if (!reason)
+ return;
+
+ raw_spin_lock(&migration_lock);
+ new_cpu = select_best_cpu(p, cpu, reason);
+
+ if (new_cpu != cpu) {
+ active_balance = kick_active_balance(rq, p, new_cpu);
+ if (active_balance)
+ mark_reserved(new_cpu);
+ }
+
+ raw_spin_unlock(&migration_lock);
+
if (active_balance)
stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq,
&rq->active_balance_work);
@@ -8702,6 +8723,7 @@ out_unlock:
if (push_task_detached)
attach_one_task(target_rq, push_task);
put_task_struct(push_task);
+ clear_reserved(target_cpu);
}
if (p)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 39f710719aac..805b8ebd7d7f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1018,6 +1018,29 @@ static inline unsigned long capacity_scale_cpu_freq(int cpu)
#ifdef CONFIG_SCHED_HMP
#define BOOST_KICK 0
+#define CPU_RESERVED 1
+
+static inline int is_reserved(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ return test_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline int mark_reserved(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ /* Name boost_flags as hmp_flags? */
+ return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline void clear_reserved(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ clear_bit(CPU_RESERVED, &rq->hmp_flags);
+}
extern unsigned int sched_enable_hmp;
extern unsigned int sched_enable_power_aware;
@@ -1051,6 +1074,8 @@ static inline void dec_nr_big_small_task(struct rq *rq, struct task_struct *p)
{
}
+static inline void clear_reserved(int cpu) { }
+
#define power_cost_at_freq(...) 0
#define trace_sched_cpu_load(...)