diff options
author | Pavankumar Kondeti <pkondeti@codeaurora.org> | 2018-01-09 11:02:30 +0530 |
---|---|---|
committer | Lingutla Chandrasekhar <clingutla@codeaurora.org> | 2018-04-24 11:33:10 +0530 |
commit | 10e91c5e578fb378b53b8f8e7a3e8d1e6cf6e1f5 (patch) | |
tree | 64bf964c901bc085d12c32d1b008bfa4b4402655 /kernel | |
parent | 893b274e18b341e8c998282b60385db54101eb8f (diff) |
sched: Add trace point to track preemption disable callers
Add trace point to track preemption disable callers to
isolate issues unrelated to scheduler and improve debug
turn around time.
Change-Id: If9303b7165167e8f79cd339929daf4afc31a61c4
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
Signed-off-by: Lingutla Chandrasekhar <clingutla@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 38 | ||||
-rw-r--r-- | kernel/sysctl.c | 10 |
2 files changed, 47 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fffc50b0191f..c1ecb07de762 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3268,9 +3268,24 @@ notrace unsigned long get_parent_ip(unsigned long addr) #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ defined(CONFIG_PREEMPT_TRACER)) +/* + * preemptoff stack tracing threshold in ns. + * default: 1ms + */ +unsigned int sysctl_preemptoff_tracing_threshold_ns = 1000000UL; + +struct preempt_store { + u64 ts; + unsigned long caddr[4]; + bool irqs_disabled; +}; + +static DEFINE_PER_CPU(struct preempt_store, the_ps); void preempt_count_add(int val) { + struct preempt_store *ps = &per_cpu(the_ps, raw_smp_processor_id()); + #ifdef CONFIG_DEBUG_PREEMPT /* * Underflow? @@ -3291,6 +3306,13 @@ void preempt_count_add(int val) #ifdef CONFIG_DEBUG_PREEMPT current->preempt_disable_ip = ip; #endif + ps->ts = sched_clock(); + ps->caddr[0] = CALLER_ADDR0; + ps->caddr[1] = CALLER_ADDR1; + ps->caddr[2] = CALLER_ADDR2; + ps->caddr[3] = CALLER_ADDR3; + ps->irqs_disabled = irqs_disabled(); + trace_preempt_off(CALLER_ADDR0, ip); } } @@ -3313,8 +3335,22 @@ void preempt_count_sub(int val) return; #endif - if (preempt_count() == val) + if (preempt_count() == val) { + struct preempt_store *ps = &per_cpu(the_ps, + raw_smp_processor_id()); + u64 delta = sched_clock() - ps->ts; + + /* + * Trace preempt disable stack if preemption + * is disabled for more than the threshold. + */ + if (delta > sysctl_preemptoff_tracing_threshold_ns) + trace_sched_preempt_disable(delta, ps->irqs_disabled, + ps->caddr[0], ps->caddr[1], + ps->caddr[2], ps->caddr[3]); + trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); + } __preempt_count_sub(val); } EXPORT_SYMBOL(preempt_count_sub); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index bc4ca30ddc21..5ada14880707 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -291,6 +291,16 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, +#if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_IRQSOFF_TRACER) + { + .procname = "preemptoff_tracing_threshold_ns", + .data = &sysctl_preemptoff_tracing_threshold_ns, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + +#endif #ifdef CONFIG_SCHED_HMP { .procname = "sched_freq_reporting_policy", |