diff options
author | Srivatsa Vaddagiri <vatsa@codeaurora.org> | 2014-08-21 15:57:22 +0530 |
---|---|---|
committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:00:49 -0700 |
commit | 977dc392f77522559697cbedfb2d48ad7be96aec (patch) | |
tree | 240b406af1f27cf0d8bffb16b5c736ef861fafb2 /kernel | |
parent | c9d0953c31f18744f29044f041e87033d044b0d7 (diff) |
sched: window-stats: ftrace event improvements
Add two new ftrace event:
* trace_sched_freq_alert, to log notifications sent
to governor for requesting change in frequency.
* trace_sched_get_busy, to log cpu busytime information returned by
scheduler
Extend existing ftrace events as follows:
* sched_update_task_ravg() event to log irqtime parameter
* sched_migration_update_sum() to log threadid which is being migrated
(and thus responsible for update of curr_runnable_sum and
prev_runnable_sum counters)
Change-Id: Ia68ce0953a2d21d319a1db7f916c51ff6a91557c
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 10 |
1 files changed, 7 insertions, 3 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a23148908738..def99b4ba90a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1348,6 +1348,8 @@ void check_for_freq_change(struct rq *rq) if (!send_notification(rq, freq_required)) return; + trace_sched_freq_alert(max_demand_cpu, rq->cur_freq, freq_required); + atomic_notifier_call_chain( &load_alert_notifier_head, 0, (void *)(long)max_demand_cpu); @@ -1685,7 +1687,7 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq, } done: - trace_sched_update_task_ravg(p, rq, event, wallclock); + trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime); p->ravg.mark_start = wallclock; } @@ -1954,6 +1956,8 @@ unsigned long sched_get_busy(int cpu) load = div64_u64(load * (u64)rq->max_freq, (u64)rq->max_possible_freq); load = div64_u64(load, NSEC_PER_USEC); + trace_sched_get_busy(cpu, load); + return load; } @@ -2296,8 +2300,8 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu) BUG_ON((s64)src_rq->prev_runnable_sum < 0); BUG_ON((s64)src_rq->curr_runnable_sum < 0); - trace_sched_migration_update_sum(src_rq); - trace_sched_migration_update_sum(dest_rq); + trace_sched_migration_update_sum(src_rq, p); + trace_sched_migration_update_sum(dest_rq, p); done: if (p->state == TASK_WAKING) |