diff options
author | Srivatsa Vaddagiri <vatsa@codeaurora.org> | 2015-01-30 11:52:37 +0530 |
---|---|---|
committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:01:42 -0700 |
commit | c41a54cb8dfbfa7e363b2d1df530e819b7624b7d (patch) | |
tree | 0f6de9c3fac88af94c232d37628cca2c7bb90e52 | |
parent | 44d892787efab3795cf8c708b0cb5534cd90cdeb (diff) |
sched: Keep track of average nr_big_tasks
Extend sched_get_nr_running_avg() API to return average nr_big_tasks,
in addition to average nr_running and average nr_io_wait tasks. Also
add a new trace point to record values returned by
sched_get_nr_running_avg() API.
Change-Id: Id3591e6d04da8db484b4d1cb9d95dba075f5ab9a
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
[rameezmustafa@codeaurora.org: Resolve trivial merge conflicts]
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | include/trace/events/sched.h | 22 | ||||
-rw-r--r-- | kernel/sched/fair.c | 24 | ||||
-rw-r--r-- | kernel/sched/sched.h | 6 | ||||
-rw-r--r-- | kernel/sched/sched_avg.c | 32 |
5 files changed, 78 insertions, 8 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 82d7dd8300a4..ccfd15287c91 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -175,7 +175,7 @@ extern unsigned long nr_iowait_cpu(int cpu); extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); extern void sched_update_nr_prod(int cpu, long delta, bool inc); -extern void sched_get_nr_running_avg(int *avg, int *iowait_avg); +extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg); extern void calc_global_load(unsigned long ticks); diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 1ac6edf6f8e4..2b59817d6560 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -1057,6 +1057,28 @@ TRACE_EVENT(sched_wake_idle_without_ipi, TP_printk("cpu=%d", __entry->cpu) ); + +TRACE_EVENT(sched_get_nr_running_avg, + + TP_PROTO(int avg, int big_avg, int iowait_avg), + + TP_ARGS(avg, big_avg, iowait_avg), + + TP_STRUCT__entry( + __field( int, avg ) + __field( int, big_avg ) + __field( int, iowait_avg ) + ), + + TP_fast_assign( + __entry->avg = avg; + __entry->big_avg = big_avg; + __entry->iowait_avg = iowait_avg; + ), + + TP_printk("avg=%d big_avg=%d iowait_avg=%d", + __entry->avg, __entry->big_avg, __entry->iowait_avg) +); #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 46782a66b3b5..b757bfb94222 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3558,6 +3558,30 @@ static inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { } #endif /* CONFIG_CFS_BANDWIDTH */ /* + * Return total number of tasks "eligible" to run on highest capacity cpu + * + * This is simply nr_big_tasks for cpus which are not of max_capacity and + * (nr_running - nr_small_tasks) for cpus of max_capacity + */ +unsigned int nr_eligible_big_tasks(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + int nr_big = rq->hmp_stats.nr_big_tasks; + int nr = rq->nr_running; + int nr_small = rq->hmp_stats.nr_small_tasks; + + if (rq->capacity != max_capacity) + return nr_big; + + /* Consider all (except small) tasks on max_capacity cpu as big tasks */ + nr_big = nr - nr_small; + if (nr_big < 0) + nr_big = 0; + + return nr_big; +} + +/* * reset_cpu_hmp_stats - reset HMP stats for a cpu * nr_big_tasks, nr_small_tasks * cumulative_runnable_avg (iff reset_cra is true) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 57190adbe5c6..eae7973b37b9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1003,6 +1003,7 @@ extern unsigned int max_task_load(void); extern void sched_account_irqtime(int cpu, struct task_struct *curr, u64 delta, u64 wallclock); unsigned int cpu_temp(int cpu); +extern unsigned int nr_eligible_big_tasks(int cpu); static inline int capacity(struct rq *rq) { @@ -1085,6 +1086,11 @@ static inline u64 scale_load_to_cpu(u64 load, int cpu) return load; } +static inline unsigned int nr_eligible_big_tasks(int cpu) +{ + return 0; +} + static inline int pct_task_load(struct task_struct *p) { return 0; } static inline int capacity(struct rq *rq) diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c index 4f5a67ec4dd6..cdb1d7c53849 100644 --- a/kernel/sched/sched_avg.c +++ b/kernel/sched/sched_avg.c @@ -18,32 +18,38 @@ #include <linux/hrtimer.h> #include <linux/sched.h> #include <linux/math64.h> +#include <trace/events/sched.h> + +#include "sched.h" static DEFINE_PER_CPU(u64, nr_prod_sum); static DEFINE_PER_CPU(u64, last_time); +static DEFINE_PER_CPU(u64, nr_big_prod_sum); static DEFINE_PER_CPU(u64, nr); + static DEFINE_PER_CPU(unsigned long, iowait_prod_sum); static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock); static s64 last_get_time; /** * sched_get_nr_running_avg - * @return: Average nr_running and iowait value since last poll. + * @return: Average nr_running, iowait and nr_big_tasks value since last poll. * Returns the avg * 100 to return up to two decimal points * of accuracy. * * Obtains the average nr_running value since the last poll. * This function may not be called concurrently with itself */ -void sched_get_nr_running_avg(int *avg, int *iowait_avg) +void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg) { int cpu; u64 curr_time = sched_clock(); u64 diff = curr_time - last_get_time; - u64 tmp_avg = 0, tmp_iowait = 0; + u64 tmp_avg = 0, tmp_iowait = 0, tmp_big_avg = 0; *avg = 0; *iowait_avg = 0; + *big_avg = 0; if (!diff) return; @@ -57,12 +63,21 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg) tmp_avg += per_cpu(nr_prod_sum, cpu); tmp_avg += per_cpu(nr, cpu) * (curr_time - per_cpu(last_time, cpu)); + + tmp_big_avg += per_cpu(nr_big_prod_sum, cpu); + tmp_big_avg += nr_eligible_big_tasks(cpu) * + (curr_time - per_cpu(last_time, cpu)); + tmp_iowait += per_cpu(iowait_prod_sum, cpu); tmp_iowait += nr_iowait_cpu(cpu) * (curr_time - per_cpu(last_time, cpu)); + per_cpu(last_time, cpu) = curr_time; + per_cpu(nr_prod_sum, cpu) = 0; + per_cpu(nr_big_prod_sum, cpu) = 0; per_cpu(iowait_prod_sum, cpu) = 0; + spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags); } @@ -70,12 +85,14 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg) last_get_time = curr_time; *avg = (int)div64_u64(tmp_avg * 100, diff); + *big_avg = (int)div64_u64(tmp_big_avg * 100, diff); *iowait_avg = (int)div64_u64(tmp_iowait * 100, diff); - BUG_ON(*avg < 0); - pr_debug("%s - avg:%d\n", __func__, *avg); - BUG_ON(*iowait_avg < 0); - pr_debug("%s - iowait_avg:%d\n", __func__, *iowait_avg); + trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg); + + BUG_ON(*avg < 0 || *big_avg < 0 || *iowait_avg < 0); + pr_debug("%s - avg:%d big_avg:%d iowait_avg:%d\n", + __func__, *avg, *big_avg, *iowait_avg); } EXPORT_SYMBOL(sched_get_nr_running_avg); @@ -104,6 +121,7 @@ void sched_update_nr_prod(int cpu, long delta, bool inc) BUG_ON((s64)per_cpu(nr, cpu) < 0); per_cpu(nr_prod_sum, cpu) += nr_running * diff; + per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff; per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff; spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags); } |