diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-10-15 17:00:03 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 17:00:03 +0200 |
commit | a25707f3aef9cf68c341eba5960d580f364e4e6f (patch) | |
tree | 77f13a0d32f68217cf6be32b1ab755bf7c1c0665 | |
parent | 8ebc91d93669af39dbed50914d7daf457eeb43be (diff) |
sched: remove precise CPU load
CPU load calculations are statistical anyway, and there's little benefit
from having it calculated on every scheduling event. So remove this code,
it gets rid of a divide from the scheduler wakeup and context-switch
fastpath.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | kernel/sched.c | 42 | ||||
-rw-r--r-- | kernel/sched_debug.c | 2 | ||||
-rw-r--r-- | kernel/sched_fair.c | 6 |
3 files changed, 9 insertions, 41 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index d4dabfcc776c..25cc9b2a8c15 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1972,42 +1972,11 @@ unsigned long nr_active(void) */ static void update_cpu_load(struct rq *this_rq) { - u64 fair_delta64, exec_delta64, idle_delta64, sample_interval64, tmp64; unsigned long total_load = this_rq->ls.load.weight; unsigned long this_load = total_load; - struct load_stat *ls = &this_rq->ls; int i, scale; this_rq->nr_load_updates++; - if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD))) - goto do_avg; - - /* Update delta_fair/delta_exec fields first */ - update_curr_load(this_rq); - - fair_delta64 = ls->delta_fair + 1; - ls->delta_fair = 0; - - exec_delta64 = ls->delta_exec + 1; - ls->delta_exec = 0; - - sample_interval64 = this_rq->clock - ls->load_update_last; - ls->load_update_last = this_rq->clock; - - if ((s64)sample_interval64 < (s64)TICK_NSEC) - sample_interval64 = TICK_NSEC; - - if (exec_delta64 > sample_interval64) - exec_delta64 = sample_interval64; - - idle_delta64 = sample_interval64 - exec_delta64; - - tmp64 = div64_64(SCHED_LOAD_SCALE * exec_delta64, fair_delta64); - tmp64 = div64_64(tmp64 * exec_delta64, sample_interval64); - - this_load = (unsigned long)tmp64; - -do_avg: /* Update our load: */ for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { @@ -2017,7 +1986,13 @@ do_avg: old_load = this_rq->cpu_load[i]; new_load = this_load; - + /* + * Round up the averaging division if load is increasing. This + * prevents us from getting stuck on 9 if the load is 10, for + * example. + */ + if (new_load > old_load) + new_load += scale-1; this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; } } @@ -6484,7 +6459,6 @@ static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) void __init sched_init(void) { - u64 now = sched_clock(); int highest_cpu = 0; int i, j; @@ -6509,8 +6483,6 @@ void __init sched_init(void) INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); list_add(&rq->cfs.leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); #endif - rq->ls.load_update_last = now; - rq->ls.load_update_start = now; for (j = 0; j < CPU_LOAD_IDX_MAX; j++) rq->cpu_load[j] = 0; diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index fd080f686f18..6b789dae7fdf 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -145,8 +145,6 @@ static void print_cpu(struct seq_file *m, int cpu) P(nr_running); SEQ_printf(m, " .%-30s: %lu\n", "load", rq->ls.load.weight); - P(ls.delta_fair); - P(ls.delta_exec); P(nr_switches); P(nr_load_updates); P(nr_uninterruptible); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 2138c40f4836..105d57b41aa2 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -94,16 +94,14 @@ enum { SCHED_FEAT_FAIR_SLEEPERS = 1, SCHED_FEAT_SLEEPER_AVG = 2, SCHED_FEAT_SLEEPER_LOAD_AVG = 4, - SCHED_FEAT_PRECISE_CPU_LOAD = 8, - SCHED_FEAT_START_DEBIT = 16, - SCHED_FEAT_SKIP_INITIAL = 32, + SCHED_FEAT_START_DEBIT = 8, + SCHED_FEAT_SKIP_INITIAL = 16, }; const_debug unsigned int sysctl_sched_features = SCHED_FEAT_FAIR_SLEEPERS *1 | SCHED_FEAT_SLEEPER_AVG *0 | SCHED_FEAT_SLEEPER_LOAD_AVG *1 | - SCHED_FEAT_PRECISE_CPU_LOAD *1 | SCHED_FEAT_START_DEBIT *1 | SCHED_FEAT_SKIP_INITIAL *0; |