From f92056088e7de9dcda610aec7527ea03857e05ac Mon Sep 17 00:00:00 2001 From: Lingutla Chandrasekhar Date: Thu, 11 Jun 2020 09:49:14 +0530 Subject: [PATCH] trace: increase caller depth for preempt/irqs disable events preempt/irq disable events tracks and prints 4 callers (3 actual callers and current trace event caller), who disables irqs and preemption for longer duration. It may not be sufficient for all scenarios, so extend these events to track 5 callers and exclude current trace event caller. Change-Id: I464dd738a78d3a89fea62fcf4aaee9897bb03c37 Signed-off-by: Lingutla Chandrasekhar --- include/trace/events/preemptirq.h | 29 ++++++++++++++++++----------- kernel/trace/trace_irqsoff.c | 27 +++++++++++++++------------ 2 files changed, 33 insertions(+), 23 deletions(-) diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h index 752da323a87a..eb646c4f3e8a 100644 --- a/include/trace/events/preemptirq.h +++ b/include/trace/events/preemptirq.h @@ -65,9 +65,10 @@ DEFINE_EVENT(preemptirq_template, preempt_enable, TRACE_EVENT(irqs_disable, TP_PROTO(u64 delta, unsigned long caddr0, unsigned long caddr1, - unsigned long caddr2, unsigned long caddr3), + unsigned long caddr2, unsigned long caddr3, + unsigned long caddr4), - TP_ARGS(delta, caddr0, caddr1, caddr2, caddr3), + TP_ARGS(delta, caddr0, caddr1, caddr2, caddr3, caddr4), TP_STRUCT__entry( __field(u64, delta) @@ -75,6 +76,7 @@ TRACE_EVENT(irqs_disable, __field(void*, caddr1) __field(void*, caddr2) __field(void*, caddr3) + __field(void*, caddr4) ), TP_fast_assign( @@ -83,20 +85,22 @@ TRACE_EVENT(irqs_disable, __entry->caddr1 = (void *)caddr1; __entry->caddr2 = (void *)caddr2; __entry->caddr3 = (void *)caddr3; + __entry->caddr4 = (void *)caddr4; ), - TP_printk("delta=%llu(ns) Callers:(%ps<-%ps<-%ps<-%ps)", __entry->delta, - __entry->caddr0, __entry->caddr1, - __entry->caddr2, __entry->caddr3) + TP_printk("delta=%llu(ns) Callers:(%ps<-%ps<-%ps<-%ps<-%ps)", + __entry->delta, __entry->caddr0, + __entry->caddr1, __entry->caddr2, + __entry->caddr3, __entry->caddr4) ); TRACE_EVENT(sched_preempt_disable, - TP_PROTO(u64 delta, bool irqs_disabled, - unsigned long caddr0, unsigned long caddr1, - unsigned long caddr2, unsigned long caddr3), + TP_PROTO(u64 delta, bool irqs_disabled, unsigned long caddr0, + unsigned long caddr1, unsigned long caddr2, + unsigned long caddr3, unsigned long caddr4), - TP_ARGS(delta, irqs_disabled, caddr0, caddr1, caddr2, caddr3), + TP_ARGS(delta, irqs_disabled, caddr0, caddr1, caddr2, caddr3, caddr4), TP_STRUCT__entry( __field(u64, delta) @@ -105,6 +109,7 @@ TRACE_EVENT(sched_preempt_disable, __field(void*, caddr1) __field(void*, caddr2) __field(void*, caddr3) + __field(void*, caddr4) ), TP_fast_assign( @@ -114,12 +119,14 @@ TRACE_EVENT(sched_preempt_disable, __entry->caddr1 = (void *)caddr1; __entry->caddr2 = (void *)caddr2; __entry->caddr3 = (void *)caddr3; + __entry->caddr4 = (void *)caddr4; ), - TP_printk("delta=%llu(ns) irqs_d=%d Callers:(%ps<-%ps<-%ps<-%ps)", + TP_printk("delta=%llu(ns) irqs_d=%d Callers:(%ps<-%ps<-%ps<-%ps<-%ps)", __entry->delta, __entry->irqs_disabled, __entry->caddr0, __entry->caddr1, - __entry->caddr2, __entry->caddr3) + __entry->caddr2, __entry->caddr3, + __entry->caddr4) ); #endif /* _TRACE_PREEMPTIRQ_H */ diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 64d63a5b836d..c606f6a07bd0 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -617,7 +617,7 @@ unsigned int sysctl_irqsoff_tracing_threshold_ns = 1000000UL; struct irqsoff_store { u64 ts; - unsigned long caddr[4]; + unsigned long caddr[5]; }; static DEFINE_PER_CPU(struct irqsoff_store, the_irqsoff); @@ -640,7 +640,8 @@ void tracer_hardirqs_on(unsigned long a0, unsigned long a1) if (!is_idle_task(current) && delta > sysctl_irqsoff_tracing_threshold_ns) trace_irqs_disable(delta, is->caddr[0], is->caddr[1], - is->caddr[2], is->caddr[3]); + is->caddr[2], is->caddr[3], + is->caddr[4]); is->ts = 0; lockdep_on(); #endif /* CONFIG_PREEMPTIRQ_EVENTS */ @@ -658,10 +659,11 @@ void tracer_hardirqs_off(unsigned long a0, unsigned long a1) lockdep_off(); is = &per_cpu(the_irqsoff, raw_smp_processor_id()); is->ts = sched_clock(); - is->caddr[0] = CALLER_ADDR0; - is->caddr[1] = CALLER_ADDR1; - is->caddr[2] = CALLER_ADDR2; - is->caddr[3] = CALLER_ADDR3; + is->caddr[0] = CALLER_ADDR1; + is->caddr[1] = CALLER_ADDR2; + is->caddr[2] = CALLER_ADDR3; + is->caddr[3] = CALLER_ADDR4; + is->caddr[4] = CALLER_ADDR5; lockdep_on(); #endif /* CONFIG_PREEMPTIRQ_EVENTS */ @@ -712,7 +714,7 @@ unsigned int sysctl_preemptoff_tracing_threshold_ns = 1000000UL; struct preempt_store { u64 ts; - unsigned long caddr[4]; + unsigned long caddr[5]; bool irqs_disabled; int pid; unsigned long ncsw; @@ -750,7 +752,7 @@ void tracer_preempt_on(unsigned long a0, unsigned long a1) if (delta > sysctl_preemptoff_tracing_threshold_ns) trace_sched_preempt_disable(delta, ps->irqs_disabled, ps->caddr[0], ps->caddr[1], - ps->caddr[2], ps->caddr[3]); + ps->caddr[2], ps->caddr[3], ps->caddr[4]); ps->ts = 0; lockdep_on(); #endif /* CONFIG_PREEMPTIRQ_EVENTS */ @@ -768,10 +770,11 @@ void tracer_preempt_off(unsigned long a0, unsigned long a1) lockdep_off(); ps = &per_cpu(the_ps, raw_smp_processor_id()); ps->ts = sched_clock(); - ps->caddr[0] = CALLER_ADDR0; - ps->caddr[1] = CALLER_ADDR1; - ps->caddr[2] = CALLER_ADDR2; - ps->caddr[3] = CALLER_ADDR3; + ps->caddr[0] = CALLER_ADDR1; + ps->caddr[1] = CALLER_ADDR2; + ps->caddr[2] = CALLER_ADDR3; + ps->caddr[3] = CALLER_ADDR4; + ps->caddr[4] = CALLER_ADDR5; ps->irqs_disabled = irqs_disabled(); ps->pid = current->pid; ps->ncsw = current->nvcsw + current->nivcsw;