summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-07-28 10:53:01 -0700
committerSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-08-22 11:32:57 -0700
commite97839440656ab1cae408d36cda646221165c934 (patch)
tree4c052f8c2bd9860e11917a906390b538b6942e7d
parentef1e55638d7d37fdee298ff8821b359052ef17ce (diff)
sched: Remove unused PELT extensions for HMP scheduling
PELT extensions for HMP have never been used since the early days of the HMP scheduler. Furthermore, changes to PELT itself in newer kernel versions render some of the code redundant or incorrect. These extensions have not been tested for a long time and are practically dead code. Remove it so that future upgrades become easier. Change-Id: I029f327406ca00b2370c93134158b61dda3b81e3 Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
-rw-r--r--include/linux/sched.h3
-rw-r--r--kernel/sched/core.c22
-rw-r--r--kernel/sched/debug.c3
-rw-r--r--kernel/sched/fair.c75
-rw-r--r--kernel/sched/sched.h13
5 files changed, 15 insertions, 101 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 06dd540192c7..5e644bdd107c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1232,9 +1232,6 @@ struct sched_avg {
u64 last_update_time, load_sum;
u32 util_sum, period_contrib;
unsigned long load_avg, util_avg;
-#ifdef CONFIG_SCHED_HMP
- u32 runnable_avg_sum_scaled;
-#endif
};
#ifdef CONFIG_SCHEDSTATS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 645db3ef9cbb..b599a01c4f53 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1801,9 +1801,6 @@ __read_mostly unsigned int sysctl_sched_pred_alert_freq = 10 * 1024 * 1024;
#endif /* CONFIG_SCHED_FREQ_INPUT */
-/* 1 -> use PELT based load stats, 0 -> use window-based load stats */
-unsigned int __read_mostly sched_use_pelt;
-
/*
* Maximum possible frequency across all cpus. Task demand and cpu
* capacity (cpu_power) metrics are scaled in reference to it.
@@ -1911,11 +1908,17 @@ static inline int exiting_task(struct task_struct *p)
static int __init set_sched_ravg_window(char *str)
{
- get_option(&str, &sched_ravg_window);
+ unsigned int window_size;
- sched_use_pelt = (sched_ravg_window < MIN_SCHED_RAVG_WINDOW ||
- sched_ravg_window > MAX_SCHED_RAVG_WINDOW);
+ get_option(&str, &window_size);
+
+ if (window_size < MIN_SCHED_RAVG_WINDOW ||
+ window_size > MAX_SCHED_RAVG_WINDOW) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ sched_ravg_window = window_size;
return 0;
}
@@ -2949,7 +2952,7 @@ static void
update_task_ravg(struct task_struct *p, struct rq *rq, int event,
u64 wallclock, u64 irqtime)
{
- if (sched_use_pelt || !rq->window_start || sched_disable_window_stats)
+ if (!rq->window_start || sched_disable_window_stats)
return;
lockdep_assert_held(&rq->lock);
@@ -3430,9 +3433,8 @@ int sched_set_window(u64 window_start, unsigned int window_size)
s64 ws;
unsigned long flags;
- if (sched_use_pelt ||
- (window_size * TICK_NSEC < MIN_SCHED_RAVG_WINDOW))
- return -EINVAL;
+ if (window_size * TICK_NSEC < MIN_SCHED_RAVG_WINDOW)
+ return -EINVAL;
mutex_lock(&policy_mutex);
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index d1c0ef4bf07d..fc9878eee5df 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -417,10 +417,8 @@ static void sched_debug_header(struct seq_file *m)
P(sched_upmigrate);
P(sched_downmigrate);
P(sched_init_task_load_windows);
- P(sched_init_task_load_pelt);
P(min_capacity);
P(max_capacity);
- P(sched_use_pelt);
P(sched_ravg_window);
#endif
#undef PN
@@ -644,7 +642,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
__P(load_avg);
#ifdef CONFIG_SCHED_HMP
P(ravg.demand);
- P(se.avg.runnable_avg_sum_scaled);
#endif
#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 18b859c44574..034c9414cabd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2577,9 +2577,6 @@ static u32 __compute_runnable_contrib(u64 n)
return contrib + runnable_avg_yN_sum[n];
}
-static void add_to_scaled_stat(int cpu, struct sched_avg *sa, u64 delta);
-static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods);
-
struct cpu_pwr_stats __weak *get_cpu_pwr_stats(void)
{
return NULL;
@@ -2594,15 +2591,11 @@ enum sched_boost_type {
#ifdef CONFIG_SCHED_HMP
/* Initial task load. Newly created tasks are assigned this load. */
-unsigned int __read_mostly sched_init_task_load_pelt;
unsigned int __read_mostly sched_init_task_load_windows;
unsigned int __read_mostly sysctl_sched_init_task_load_pct = 15;
unsigned int max_task_load(void)
{
- if (sched_use_pelt)
- return LOAD_AVG_MAX;
-
return sched_ravg_window;
}
@@ -2723,11 +2716,6 @@ void set_hmp_defaults(void)
sched_major_task_runtime =
mult_frac(sched_ravg_window, MAJOR_TASK_PCT, 100);
#endif
-
- sched_init_task_load_pelt =
- div64_u64((u64)sysctl_sched_init_task_load_pct *
- (u64)LOAD_AVG_MAX, 100);
-
sched_init_task_load_windows =
div64_u64((u64)sysctl_sched_init_task_load_pct *
(u64)sched_ravg_window, 100);
@@ -4323,7 +4311,6 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
u32 contrib;
unsigned int delta_w, scaled_delta_w, decayed = 0;
unsigned long scale_freq, scale_cpu;
- struct sched_entity *se = NULL;
delta = now - sa->last_update_time;
/*
@@ -4344,12 +4331,6 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
return 0;
sa->last_update_time = now;
- if (sched_use_pelt && cfs_rq && weight) {
- se = container_of(sa, struct sched_entity, avg);
- if (entity_is_task(se) && se->on_rq)
- dec_hmp_sched_stats_fair(rq_of(cfs_rq), task_of(se));
- }
-
scale_freq = arch_scale_freq_capacity(NULL, cpu);
scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
@@ -4370,7 +4351,6 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
scaled_delta_w = cap_scale(delta_w, scale_freq);
if (weight) {
sa->load_sum += weight * scaled_delta_w;
- add_to_scaled_stat(cpu, sa, delta_w);
if (cfs_rq) {
cfs_rq->runnable_load_sum +=
weight * scaled_delta_w;
@@ -4397,7 +4377,6 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
contrib = cap_scale(contrib, scale_freq);
if (weight) {
sa->load_sum += weight * contrib;
- add_to_scaled_stat(cpu, sa, contrib);
if (cfs_rq)
cfs_rq->runnable_load_sum += weight * contrib;
}
@@ -4409,14 +4388,10 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
scaled_delta = cap_scale(delta, scale_freq);
if (weight) {
sa->load_sum += weight * scaled_delta;
- add_to_scaled_stat(cpu, sa, delta);
if (cfs_rq)
cfs_rq->runnable_load_sum += weight * scaled_delta;
}
- if (se && entity_is_task(se) && se->on_rq)
- inc_hmp_sched_stats_fair(rq_of(cfs_rq), task_of(se));
-
if (running)
sa->util_sum += scaled_delta * scale_cpu;
@@ -4676,7 +4651,6 @@ void init_new_task_load(struct task_struct *p)
{
int i;
u32 init_load_windows = sched_init_task_load_windows;
- u32 init_load_pelt = sched_init_task_load_pelt;
u32 init_load_pct = current->init_load_pct;
p->init_load_pct = 0;
@@ -4685,18 +4659,14 @@ void init_new_task_load(struct task_struct *p)
memset(&p->ravg, 0, sizeof(struct ravg));
p->cpu_cycles = 0;
- if (init_load_pct) {
- init_load_pelt = div64_u64((u64)init_load_pct *
- (u64)LOAD_AVG_MAX, 100);
+ if (init_load_pct)
init_load_windows = div64_u64((u64)init_load_pct *
(u64)sched_ravg_window, 100);
- }
p->ravg.demand = init_load_windows;
clear_ravg_pred_demand();
for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
p->ravg.sum_history[i] = init_load_windows;
- p->se.avg.runnable_avg_sum_scaled = init_load_pelt;
}
#else /* CONFIG_SCHED_HMP */
@@ -4719,40 +4689,6 @@ unsigned int pct_task_load(struct task_struct *p)
return load;
}
-/*
- * Add scaled version of 'delta' to runnable_avg_sum_scaled
- * 'delta' is scaled in reference to "best" cpu
- */
-static inline void
-add_to_scaled_stat(int cpu, struct sched_avg *sa, u64 delta)
-{
- int cur_freq = cpu_cur_freq(cpu);
- u64 scaled_delta;
- int sf;
-
- if (!sched_enable_hmp)
- return;
-
- if (unlikely(cur_freq > max_possible_freq))
- cur_freq = max_possible_freq;
-
- scaled_delta = div64_u64(delta * cur_freq, max_possible_freq);
- sf = (cpu_efficiency(cpu) * 1024) / max_possible_efficiency;
- scaled_delta *= sf;
- scaled_delta >>= 10;
- sa->runnable_avg_sum_scaled += scaled_delta;
-}
-
-static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods)
-{
- if (!sched_enable_hmp)
- return;
-
- sa->runnable_avg_sum_scaled =
- decay_load(sa->runnable_avg_sum_scaled,
- periods);
-}
-
#ifdef CONFIG_CFS_BANDWIDTH
static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
@@ -4814,15 +4750,6 @@ static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
#else /* CONFIG_SCHED_HMP */
-static inline void
-add_to_scaled_stat(int cpu, struct sched_avg *sa, u64 delta)
-{
-}
-
-static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods)
-{
-}
-
static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq) { }
static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 3209b13207ea..28e2cb22e313 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1047,7 +1047,6 @@ extern void init_new_task_load(struct task_struct *p);
extern struct mutex policy_mutex;
extern unsigned int sched_ravg_window;
-extern unsigned int sched_use_pelt;
extern unsigned int sched_disable_window_stats;
extern unsigned int sched_enable_hmp;
extern unsigned int max_possible_freq;
@@ -1062,7 +1061,6 @@ extern unsigned int max_possible_capacity;
extern unsigned int min_max_possible_capacity;
extern unsigned int sched_upmigrate;
extern unsigned int sched_downmigrate;
-extern unsigned int sched_init_task_load_pelt;
extern unsigned int sched_init_task_load_windows;
extern unsigned int up_down_migrate_scale_factor;
extern unsigned int sysctl_sched_restrict_cluster_spill;
@@ -1179,9 +1177,6 @@ static inline u64 scale_load_to_cpu(u64 task_load, int cpu)
static inline unsigned int task_load(struct task_struct *p)
{
- if (sched_use_pelt)
- return p->se.avg.runnable_avg_sum_scaled;
-
return p->ravg.demand;
}
@@ -1202,8 +1197,7 @@ inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
if (!sched_enable_hmp || sched_disable_window_stats)
return;
- task_load = sched_use_pelt ? p->se.avg.runnable_avg_sum_scaled :
- (sched_disable_window_stats ? 0 : p->ravg.demand);
+ task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
stats->cumulative_runnable_avg += task_load;
set_pred_demands_sum(stats, stats->pred_demands_sum +
@@ -1219,8 +1213,7 @@ dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
if (!sched_enable_hmp || sched_disable_window_stats)
return;
- task_load = sched_use_pelt ? p->se.avg.runnable_avg_sum_scaled :
- (sched_disable_window_stats ? 0 : p->ravg.demand);
+ task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
stats->cumulative_runnable_avg -= task_load;
@@ -1287,8 +1280,6 @@ struct related_thread_group *task_related_thread_group(struct task_struct *p)
#else /* CONFIG_SCHED_HMP */
-#define sched_use_pelt 0
-
struct hmp_sched_stats;
struct related_thread_group;