summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h4
-rw-r--r--init/Kconfig9
-rw-r--r--kernel/sched/core.c11
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/sched/sched.h14
-rw-r--r--kernel/sysctl.c2
6 files changed, 28 insertions, 18 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4a464ae84a38..5cdba398f367 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1203,7 +1203,7 @@ struct sched_avg {
u64 last_update_time, load_sum;
u32 util_sum, period_contrib;
unsigned long load_avg, util_avg;
-#ifdef CONFIG_SCHED_FREQ_INPUT
+#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
u32 runnable_avg_sum_scaled;
#endif
};
@@ -1427,7 +1427,7 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
-#ifdef CONFIG_SCHED_FREQ_INPUT
+#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
struct ravg ravg;
#endif
#ifdef CONFIG_CGROUP_SCHED
diff --git a/init/Kconfig b/init/Kconfig
index 235c7a2c0d20..ad08a40a304b 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1153,6 +1153,15 @@ config CGROUP_WRITEBACK
endif # CGROUPS
+config SCHED_HMP
+ bool "Scheduler support for heterogenous multi-processor systems"
+ depends on SMP && FAIR_GROUP_SCHED
+ help
+ This feature will let the scheduler optimize task placement on
+ systems made of heterogeneous cpus i.e cpus that differ either
+ in their instructions per-cycle capability or the maximum
+ frequency they can attain.
+
config CHECKPOINT_RESTORE
bool "Checkpoint/restore support" if EXPERT
select PROC_CHILDREN
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 35ecc5ed1f13..611dec66c978 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1746,7 +1746,7 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
wq_worker_waking_up(p, cpu_of(rq));
}
-#ifdef CONFIG_SCHED_FREQ_INPUT
+#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
/* Window size (in ns) */
__read_mostly unsigned int sched_ravg_window = 10000000;
@@ -1893,7 +1893,7 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum)
p->ravg.mark_start = wallclock;
}
-#endif /* CONFIG_SCHED_FREQ_INPUT */
+#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
/*
* Mark the task runnable and perform wakeup-preemption.
@@ -7573,7 +7573,8 @@ void __init sched_init_smp(void)
#endif /* CONFIG_SMP */
-#ifdef CONFIG_SCHED_FREQ_INPUT
+#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
+
/*
* Maximum possible frequency across all cpus. Task demand and cpu
* capacity (cpu_power) metrics are scaled in reference to it.
@@ -7660,7 +7661,7 @@ static int register_sched_callback(void)
*/
core_initcall(register_sched_callback);
-#endif /* CONFIG_SCHED_FREQ_INPUT */
+#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
int in_sched_functions(unsigned long addr)
{
@@ -7799,7 +7800,7 @@ void __init sched_init(void)
rq->online = 0;
rq->idle_stamp = 0;
rq->avg_idle = 2*sysctl_sched_migration_cost;
-#ifdef CONFIG_SCHED_FREQ_INPUT
+#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
rq->cur_freq = 1;
rq->max_freq = 1;
rq->min_freq = 1;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e8a510a2187b..f0a37a7a7060 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2911,7 +2911,7 @@ static inline int idle_balance(struct rq *rq)
#endif /* CONFIG_SMP */
-#ifdef CONFIG_SCHED_FREQ_INPUT
+#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
static inline unsigned int task_load(struct task_struct *p)
{
@@ -2974,7 +2974,7 @@ static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods)
periods);
}
-#else /* CONFIG_SCHED_FREQ_INPUT */
+#else /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
static inline void
add_to_scaled_stat(int cpu, struct sched_avg *sa, u64 delta)
@@ -2985,7 +2985,7 @@ static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods)
{
}
-#endif /* CONFIG_SCHED_FREQ_INPUT */
+#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index bb7f283b6dac..0267497dd821 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -643,7 +643,7 @@ struct rq {
u64 max_idle_balance_cost;
#endif
-#ifdef CONFIG_SCHED_FREQ_INPUT
+#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
/*
* max_freq = user or thermal defined maximum
* max_possible_freq = maximum supported by hardware
@@ -917,7 +917,7 @@ static inline void sched_ttwu_pending(void) { }
#include "stats.h"
#include "auto_group.h"
-#ifdef CONFIG_SCHED_FREQ_INPUT
+#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
extern unsigned int sched_ravg_window;
extern unsigned int max_possible_freq;
@@ -938,7 +938,7 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
BUG_ON((s64)rq->cumulative_runnable_avg < 0);
}
-#else /* CONFIG_SCHED_FREQ_INPUT */
+#else /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
static inline int pct_task_load(struct task_struct *p) { return 0; }
@@ -954,7 +954,7 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
static inline void init_new_task_load(struct task_struct *p) { }
-#endif /* CONFIG_SCHED_FREQ_INPUT */
+#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
#ifdef CONFIG_CGROUP_SCHED
@@ -1289,15 +1289,15 @@ struct sched_class {
#endif
};
-#ifdef CONFIG_SCHED_FREQ_INPUT
+#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
extern void
update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum);
-#else /* CONFIG_SCHED_FREQ_INPUT */
+#else /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
static inline void
update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum)
{
}
-#endif /* CONFIG_SCHED_FREQ_INPUT */
+#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
{
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 25f96e3a83a9..f0192827164d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -292,7 +292,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
-#ifdef CONFIG_SCHED_FREQ_INPUT
+#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
{
.procname = "sched_window_stats_policy",
.data = &sysctl_sched_window_stats_policy,