summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2017-01-04 15:56:51 -0800
committerSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2017-01-10 11:01:52 -0800
commit47f7e0415af9fd1078b51a45a7b18701eb7d5177 (patch)
tree3cc1dc68321f14a7b2e520ec02bbacc9a6452383 /kernel
parenta6d83d2e8ed916c5a097c7c047dc39cb9183b0e9 (diff)
sched: Convert the global wake_up_idle flag to a per cluster flag
Since clusters can vary significantly in the power and performance characteristics, there may be a need to have different CPU selection policies based on which cluster a task is being placed on. For example the placement policy can be more aggressive in using idle CPUs on cluster that are power efficient and less aggressive on clusters that are geared towards performance. Add support for per cluster wake_up_idle flag to allow greater flexibility in placement policies. Change-Id: I18cd3d907cd965db03a13f4655870dc10c07acfe Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c45
-rw-r--r--kernel/sched/hmp.c14
-rw-r--r--kernel/sched/sched.h1
-rw-r--r--kernel/sysctl.c7
4 files changed, 34 insertions, 33 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 87538f7d495a..e1c8ec0458b3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -86,14 +86,6 @@ static unsigned int sched_nr_latency = 8;
unsigned int sysctl_sched_child_runs_first __read_mostly;
/*
- * Controls whether, when SD_SHARE_PKG_RESOURCES is on, if all
- * tasks go to idle CPUs when woken. If this is off, note that the
- * per-task flag PF_WAKE_UP_IDLE can still cause a task to go to an
- * idle CPU upon being woken.
- */
-unsigned int __read_mostly sysctl_sched_wake_to_idle;
-
-/*
* SCHED_OTHER wake-up granularity.
* (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
*
@@ -2649,6 +2641,21 @@ struct cluster_cpu_stats {
s64 highest_spare_capacity;
};
+/*
+ * Should task be woken to any available idle cpu?
+ *
+ * Waking tasks to idle cpu has mixed implications on both performance and
+ * power. In many cases, scheduler can't estimate correctly impact of using idle
+ * cpus on either performance or power. PF_WAKE_UP_IDLE allows external kernel
+ * module to pass a strong hint to scheduler that the task in question should be
+ * woken to idle cpu, generally to improve performance.
+ */
+static inline int wake_to_idle(struct task_struct *p)
+{
+ return (current->flags & PF_WAKE_UP_IDLE) ||
+ (p->flags & PF_WAKE_UP_IDLE);
+}
+
static int spill_threshold_crossed(struct cpu_select_env *env, struct rq *rq)
{
u64 total_load;
@@ -3009,6 +3016,8 @@ static void find_best_cpu_in_cluster(struct sched_cluster *c,
if (env->ignore_prev_cpu)
cpumask_clear_cpu(env->prev_cpu, &search_cpus);
+ env->need_idle = wake_to_idle(env->p) || c->wake_up_idle;
+
for_each_cpu(i, &search_cpus) {
env->cpu_load = cpu_load_sync(i, env->sync);
@@ -3052,21 +3061,6 @@ static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats)
/* No need to initialize stats->best_load */
}
-/*
- * Should task be woken to any available idle cpu?
- *
- * Waking tasks to idle cpu has mixed implications on both performance and
- * power. In many cases, scheduler can't estimate correctly impact of using idle
- * cpus on either performance or power. PF_WAKE_UP_IDLE allows external kernel
- * module to pass a strong hint to scheduler that the task in question should be
- * woken to idle cpu, generally to improve performance.
- */
-static inline int wake_to_idle(struct task_struct *p)
-{
- return (current->flags & PF_WAKE_UP_IDLE) ||
- (p->flags & PF_WAKE_UP_IDLE) || sysctl_sched_wake_to_idle;
-}
-
static inline bool env_has_special_flags(struct cpu_select_env *env)
{
if (env->need_idle || env->boost_policy != SCHED_BOOST_NONE ||
@@ -6755,9 +6749,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
return i;
}
- if (!sysctl_sched_wake_to_idle &&
- !(current->flags & PF_WAKE_UP_IDLE) &&
- !(p->flags & PF_WAKE_UP_IDLE))
+ if (!(current->flags & PF_WAKE_UP_IDLE) &&
+ !(p->flags & PF_WAKE_UP_IDLE))
return target;
/*
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 180e2fcf785b..12fa618a8135 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -377,6 +377,7 @@ struct sched_cluster init_cluster = {
.dstate_wakeup_latency = 0,
.exec_scale_factor = 1024,
.notifier_sent = 0,
+ .wake_up_idle = 0,
};
static void update_all_clusters_stats(void)
@@ -677,6 +678,19 @@ unsigned int sched_get_static_cluster_pwr_cost(int cpu)
return cpu_rq(cpu)->cluster->static_cluster_pwr_cost;
}
+int sched_set_cluster_wake_idle(int cpu, unsigned int wake_idle)
+{
+ struct sched_cluster *cluster = cpu_rq(cpu)->cluster;
+
+ cluster->wake_up_idle = !!wake_idle;
+ return 0;
+}
+
+unsigned int sched_get_cluster_wake_idle(int cpu)
+{
+ return cpu_rq(cpu)->cluster->wake_up_idle;
+}
+
/*
* sched_window_stats_policy and sched_ravg_hist_size have a 'sysctl' copy
* associated with them. This is required for atomic update of those variables
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a3abdf19ff4c..c110c4aaf2be 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -389,6 +389,7 @@ struct sched_cluster {
int dstate, dstate_wakeup_latency, dstate_wakeup_energy;
unsigned int static_cluster_pwr_cost;
int notifier_sent;
+ bool wake_up_idle;
};
extern unsigned long all_cluster_ids[];
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 12ea4f09c04b..eced92aa492a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -289,13 +289,6 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
- .procname = "sched_wake_to_idle",
- .data = &sysctl_sched_wake_to_idle,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
#ifdef CONFIG_SCHED_HMP
{
.procname = "sched_freq_reporting_policy",