summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2014-12-03 10:18:12 -0800
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:01:05 -0700
commit57ee8ef06eb2ac56c9c227e687e9df8d02bd691e (patch)
treea498be874ac73a63d4dccef81b177cbee608c4db
parentdc66ef50f5572f811e068a78ef99c92782fbfc0d (diff)
sched: Make RT tasks eligible for boost
During sched boost RT tasks currently end up going to the lowest power cluster. This can be a performance bottleneck especially if the frequency and IPC differences between clusters are high. Furthermore, when RT tasks go over to the little cluster during boost, the load balancer keeps attempting to pull work over to the big cluster. This results in pre-emption of the executing RT task causing more delays. Finally, containing more work on a single cluster during boost might help save some power if the little cluster can then enter deeper low power modes. Change-Id: I177b2e81be5657c23e7ac43889472561ce9993a9 Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
-rw-r--r--kernel/sched/fair.c12
-rw-r--r--kernel/sched/rt.c4
-rw-r--r--kernel/sched/sched.h11
3 files changed, 16 insertions, 11 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3614bb03cab5..6c7d2e4d5e90 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2831,7 +2831,7 @@ static void boost_kick_cpus(void)
}
}
-static inline int sched_boost(void)
+int sched_boost(void)
{
return boost_refcount > 0;
}
@@ -3622,11 +3622,6 @@ void check_for_migration(struct rq *rq, struct task_struct *p)
&rq->active_balance_work);
}
-static inline int capacity(struct rq *rq)
-{
- return rq->capacity;
-}
-
static inline int nr_big_tasks(struct rq *rq)
{
return rq->nr_big_tasks;
@@ -3687,11 +3682,6 @@ static inline int nr_big_tasks(struct rq *rq)
return 0;
}
-static inline int capacity(struct rq *rq)
-{
- return SCHED_LOAD_SCALE;
-}
-
#endif /* CONFIG_SCHED_HMP */
#ifdef CONFIG_SCHED_HMP
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 280b9a8da5f8..f2f9b92f75cb 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1659,6 +1659,10 @@ static int find_lowest_rq_hmp(struct task_struct *task)
cpu_cost = power_cost_at_freq(i, ACCESS_ONCE(rq->min_freq));
trace_sched_cpu_load(rq, idle_cpu(i),
mostly_idle_cpu(i), cpu_cost);
+
+ if (sched_boost() && capacity(rq) != max_capacity)
+ continue;
+
if (cpu_cost < min_cost) {
min_cost = cpu_cost;
best_cpu = i;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index fcdf4063ac11..117578626e8f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -995,6 +995,11 @@ unsigned int max_task_load(void);
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
u64 delta, u64 wallclock);
+static inline int capacity(struct rq *rq)
+{
+ return rq->capacity;
+}
+
static inline void
inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
{
@@ -1026,6 +1031,11 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
static inline int pct_task_load(struct task_struct *p) { return 0; }
+static inline int capacity(struct rq *rq)
+{
+ return SCHED_LOAD_SCALE;
+}
+
static inline void
inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
{
@@ -1120,6 +1130,7 @@ extern void set_hmp_defaults(void);
extern unsigned int power_cost_at_freq(int cpu, unsigned int freq);
extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
extern void boost_kick(int cpu);
+extern int sched_boost(void);
#else /* CONFIG_SCHED_HMP */