Merge branch 'tip/perf/urgent-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/urgent
This commit is contained in:
commit
a89d4bd055
3 changed files with 36 additions and 5 deletions
|
@ -824,27 +824,27 @@ static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
|
||||||
#define __PV_IS_CALLEE_SAVE(func) \
|
#define __PV_IS_CALLEE_SAVE(func) \
|
||||||
((struct paravirt_callee_save) { func })
|
((struct paravirt_callee_save) { func })
|
||||||
|
|
||||||
static inline unsigned long arch_local_save_flags(void)
|
static inline notrace unsigned long arch_local_save_flags(void)
|
||||||
{
|
{
|
||||||
return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
|
return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_local_irq_restore(unsigned long f)
|
static inline notrace void arch_local_irq_restore(unsigned long f)
|
||||||
{
|
{
|
||||||
PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
|
PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_local_irq_disable(void)
|
static inline notrace void arch_local_irq_disable(void)
|
||||||
{
|
{
|
||||||
PVOP_VCALLEE0(pv_irq_ops.irq_disable);
|
PVOP_VCALLEE0(pv_irq_ops.irq_disable);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_local_irq_enable(void)
|
static inline notrace void arch_local_irq_enable(void)
|
||||||
{
|
{
|
||||||
PVOP_VCALLEE0(pv_irq_ops.irq_enable);
|
PVOP_VCALLEE0(pv_irq_ops.irq_enable);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long arch_local_irq_save(void)
|
static inline notrace unsigned long arch_local_irq_save(void)
|
||||||
{
|
{
|
||||||
unsigned long f;
|
unsigned long f;
|
||||||
|
|
||||||
|
|
|
@ -2326,6 +2326,18 @@ static void find_module_sections(struct module *mod, struct load_info *info)
|
||||||
kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
|
kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
|
||||||
mod->num_trace_events, GFP_KERNEL);
|
mod->num_trace_events, GFP_KERNEL);
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef CONFIG_TRACING
|
||||||
|
mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
|
||||||
|
sizeof(*mod->trace_bprintk_fmt_start),
|
||||||
|
&mod->num_trace_bprintk_fmt);
|
||||||
|
/*
|
||||||
|
* This section contains pointers to allocated objects in the trace
|
||||||
|
* code and not scanning it leads to false positives.
|
||||||
|
*/
|
||||||
|
kmemleak_scan_area(mod->trace_bprintk_fmt_start,
|
||||||
|
sizeof(*mod->trace_bprintk_fmt_start) *
|
||||||
|
mod->num_trace_bprintk_fmt, GFP_KERNEL);
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
|
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
|
||||||
/* sechdrs[0].sh_size is always zero */
|
/* sechdrs[0].sh_size is always zero */
|
||||||
mod->ftrace_callsites = section_objs(info, "__mcount_loc",
|
mod->ftrace_callsites = section_objs(info, "__mcount_loc",
|
||||||
|
|
|
@ -1284,6 +1284,8 @@ void trace_dump_stack(void)
|
||||||
__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
|
__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(int, user_stack_count);
|
||||||
|
|
||||||
void
|
void
|
||||||
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
||||||
{
|
{
|
||||||
|
@ -1302,6 +1304,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
||||||
if (unlikely(in_nmi()))
|
if (unlikely(in_nmi()))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* prevent recursion, since the user stack tracing may
|
||||||
|
* trigger other kernel events.
|
||||||
|
*/
|
||||||
|
preempt_disable();
|
||||||
|
if (__this_cpu_read(user_stack_count))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
__this_cpu_inc(user_stack_count);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
|
event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
|
||||||
sizeof(*entry), flags, pc);
|
sizeof(*entry), flags, pc);
|
||||||
if (!event)
|
if (!event)
|
||||||
|
@ -1319,6 +1333,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
||||||
save_stack_trace_user(&trace);
|
save_stack_trace_user(&trace);
|
||||||
if (!filter_check_discard(call, entry, buffer, event))
|
if (!filter_check_discard(call, entry, buffer, event))
|
||||||
ring_buffer_unlock_commit(buffer, event);
|
ring_buffer_unlock_commit(buffer, event);
|
||||||
|
|
||||||
|
__this_cpu_dec(user_stack_count);
|
||||||
|
|
||||||
|
out:
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef UNUSED
|
#ifdef UNUSED
|
||||||
|
|
Loading…
Reference in a new issue