diff options
author | Pavankumar Kondeti <pkondeti@codeaurora.org> | 2017-02-27 09:25:59 +0530 |
---|---|---|
committer | Pavankumar Kondeti <pkondeti@codeaurora.org> | 2017-02-27 10:00:51 +0530 |
commit | 73f527b67c02b5273d71bfcc25be7693e3cbad86 (patch) | |
tree | 2e3298f7a59ef7d3ee654f8192e81cc623dd86aa | |
parent | 98094e2149dad0658346d504edde8aac9a921500 (diff) |
sched: Print aggregation status in sched_get_busy trace event
Aggregation for frequency is not enabled all the time. The aggregated
load is attached to the most busy CPU only when the group load is above
a certain threshold. Print the aggregation status in sched_get_busy
trace event to make debugging and testing easier.
Change-Id: Icb916f362ea0fa8b5dc7d23cb384168d86159687
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
-rw-r--r-- | include/trace/events/sched.h | 10 | ||||
-rw-r--r-- | kernel/sched/hmp.c | 4 |
2 files changed, 9 insertions, 5 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 0cd236442864..0b92317f6263 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -631,9 +631,9 @@ TRACE_EVENT(sched_migration_update_sum, TRACE_EVENT(sched_get_busy, - TP_PROTO(int cpu, u64 load, u64 nload, u64 pload, int early), + TP_PROTO(int cpu, u64 load, u64 nload, u64 pload, int early, bool aggregated), - TP_ARGS(cpu, load, nload, pload, early), + TP_ARGS(cpu, load, nload, pload, early, aggregated), TP_STRUCT__entry( __field( int, cpu ) @@ -641,6 +641,7 @@ TRACE_EVENT(sched_get_busy, __field( u64, nload ) __field( u64, pload ) __field( int, early ) + __field( bool, aggregated ) ), TP_fast_assign( @@ -649,11 +650,12 @@ TRACE_EVENT(sched_get_busy, __entry->nload = nload; __entry->pload = pload; __entry->early = early; + __entry->aggregated = aggregated; ), - TP_printk("cpu %d load %lld new_task_load %lld predicted_load %lld early %d", + TP_printk("cpu %d load %lld new_task_load %lld predicted_load %lld early %d aggregated %d", __entry->cpu, __entry->load, __entry->nload, - __entry->pload, __entry->early) + __entry->pload, __entry->early, __entry->aggregated) ); TRACE_EVENT(sched_freq_alert, diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index 744c60dfb4fb..df47c26ab6d2 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -3274,7 +3274,9 @@ exit_early: trace_sched_get_busy(cpu, busy[i].prev_load, busy[i].new_task_load, busy[i].predicted_load, - early_detection[i]); + early_detection[i], + aggregate_load && + cpu == max_busy_cpu); i++; } } |