Merge branch 'tracing/ftrace' into tracing/urgent
This commit is contained in:
commit
debfcaf93e
40 changed files with 113 additions and 68 deletions
2
Makefile
2
Makefile
|
@ -536,7 +536,7 @@ KBUILD_CFLAGS += -g
|
|||
KBUILD_AFLAGS += -gdwarf-2
|
||||
endif
|
||||
|
||||
ifdef CONFIG_FTRACE
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
KBUILD_CFLAGS += -pg
|
||||
endif
|
||||
|
||||
|
|
|
@ -16,8 +16,8 @@ config ARM
|
|||
select HAVE_ARCH_KGDB
|
||||
select HAVE_KPROBES if (!XIP_KERNEL)
|
||||
select HAVE_KRETPROBES if (HAVE_KPROBES)
|
||||
select HAVE_FTRACE if (!XIP_KERNEL)
|
||||
select HAVE_DYNAMIC_FTRACE if (HAVE_FTRACE)
|
||||
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
|
||||
select HAVE_DYNAMIC_FTRACE if (HAVE_FUNCTION_TRACER)
|
||||
select HAVE_GENERIC_DMA_COHERENT
|
||||
help
|
||||
The ARM series is a line of low-power-consumption RISC chip designs
|
||||
|
|
|
@ -70,7 +70,7 @@ SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
|
|||
targets := vmlinux vmlinux.lds piggy.gz piggy.o font.o font.c \
|
||||
head.o misc.o $(OBJS)
|
||||
|
||||
ifeq ($(CONFIG_FTRACE),y)
|
||||
ifeq ($(CONFIG_FUNCTION_TRACER),y)
|
||||
ORIG_CFLAGS := $(KBUILD_CFLAGS)
|
||||
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
|
||||
endif
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#ifndef _ASM_ARM_FTRACE
|
||||
#define _ASM_ARM_FTRACE
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#define MCOUNT_ADDR ((long)(mcount))
|
||||
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
|
||||
|
||||
|
|
|
@ -183,6 +183,6 @@ EXPORT_SYMBOL(_find_next_bit_be);
|
|||
|
||||
EXPORT_SYMBOL(copy_page);
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
EXPORT_SYMBOL(mcount);
|
||||
#endif
|
||||
|
|
|
@ -101,7 +101,7 @@ ENDPROC(ret_from_fork)
|
|||
#undef CALL
|
||||
#define CALL(x) .long x
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
ENTRY(mcount)
|
||||
stmdb sp!, {r0-r3, lr}
|
||||
|
@ -149,7 +149,7 @@ trace:
|
|||
ftrace_stub:
|
||||
mov pc, lr
|
||||
|
||||
#endif /* CONFIG_FTRACE */
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
/*=============================================================================
|
||||
* SWI handler
|
||||
|
|
|
@ -112,7 +112,7 @@ config PPC
|
|||
bool
|
||||
default y
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_FTRACE
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
select HAVE_IDE
|
||||
select HAVE_IOREMAP_PROT
|
||||
|
|
|
@ -122,7 +122,7 @@ KBUILD_CFLAGS += -mcpu=powerpc
|
|||
endif
|
||||
|
||||
# Work around a gcc code-gen bug with -fno-omit-frame-pointer.
|
||||
ifeq ($(CONFIG_FTRACE),y)
|
||||
ifeq ($(CONFIG_FUNCTION_TRACER),y)
|
||||
KBUILD_CFLAGS += -mno-sched-epilog
|
||||
endif
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#ifndef _ASM_POWERPC_FTRACE
|
||||
#define _ASM_POWERPC_FTRACE
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#define MCOUNT_ADDR ((long)(_mcount))
|
||||
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ CFLAGS_prom_init.o += -fPIC
|
|||
CFLAGS_btext.o += -fPIC
|
||||
endif
|
||||
|
||||
ifdef CONFIG_FTRACE
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# Do not trace early boot code
|
||||
CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
|
||||
CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog
|
||||
|
|
|
@ -1158,7 +1158,7 @@ machine_check_in_rtas:
|
|||
|
||||
#endif /* CONFIG_PPC_RTAS */
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
_GLOBAL(mcount)
|
||||
_GLOBAL(_mcount)
|
||||
|
|
|
@ -884,7 +884,7 @@ _GLOBAL(enter_prom)
|
|||
mtlr r0
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
_GLOBAL(mcount)
|
||||
_GLOBAL(_mcount)
|
||||
|
|
|
@ -68,7 +68,7 @@ EXPORT_SYMBOL(single_step_exception);
|
|||
EXPORT_SYMBOL(sys_sigreturn);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
EXPORT_SYMBOL(_mcount);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
CFLAGS_bootx_init.o += -fPIC
|
||||
|
||||
ifdef CONFIG_FTRACE
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# Do not trace early boot code
|
||||
CFLAGS_REMOVE_bootx_init.o = -pg -mno-sched-epilog
|
||||
endif
|
||||
|
|
|
@ -12,7 +12,7 @@ config SPARC64
|
|||
bool
|
||||
default y
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_FTRACE
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_IDE
|
||||
select HAVE_LMB
|
||||
select HAVE_ARCH_KGDB
|
||||
|
|
|
@ -33,7 +33,7 @@ config DEBUG_PAGEALLOC
|
|||
|
||||
config MCOUNT
|
||||
bool
|
||||
depends on STACK_DEBUG || FTRACE
|
||||
depends on STACK_DEBUG || FUNCTION_TRACER
|
||||
default y
|
||||
|
||||
config FRAME_POINTER
|
||||
|
|
|
@ -93,7 +93,7 @@ mcount:
|
|||
nop
|
||||
1:
|
||||
#endif
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
mov %o7, %o0
|
||||
.globl mcount_call
|
||||
|
@ -119,7 +119,7 @@ mcount_call:
|
|||
.size _mcount,.-_mcount
|
||||
.size mcount,.-mcount
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
.globl ftrace_stub
|
||||
.type ftrace_stub,#function
|
||||
ftrace_stub:
|
||||
|
|
|
@ -28,7 +28,7 @@ config X86
|
|||
select HAVE_KRETPROBES
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_FTRACE
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
|
||||
select HAVE_ARCH_KGDB if !X86_VOYAGER
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
|
|
|
@ -6,7 +6,7 @@ extra-y := head_$(BITS).o head$(BITS).o head.o init_task.o vmlinu
|
|||
|
||||
CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
|
||||
|
||||
ifdef CONFIG_FTRACE
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# Do not profile debug and lowlevel utilities
|
||||
CFLAGS_REMOVE_tsc.o = -pg
|
||||
CFLAGS_REMOVE_rtc.o = -pg
|
||||
|
|
|
@ -1149,7 +1149,7 @@ ENDPROC(xen_failsafe_callback)
|
|||
|
||||
#endif /* CONFIG_XEN */
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
ENTRY(mcount)
|
||||
|
@ -1204,7 +1204,7 @@ trace:
|
|||
jmp ftrace_stub
|
||||
END(mcount)
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
#endif /* CONFIG_FTRACE */
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
.section .rodata,"a"
|
||||
#include "syscall_table_32.S"
|
||||
|
|
|
@ -61,7 +61,7 @@
|
|||
|
||||
.code64
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
ENTRY(mcount)
|
||||
retq
|
||||
|
@ -138,7 +138,7 @@ trace:
|
|||
jmp ftrace_stub
|
||||
END(mcount)
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
#endif /* CONFIG_FTRACE */
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifndef CONFIG_PREEMPT
|
||||
#define retint_kernel retint_restore_args
|
||||
|
|
|
@ -62,6 +62,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
|
|||
unsigned char *new_code)
|
||||
{
|
||||
unsigned char replaced[MCOUNT_INSN_SIZE];
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Note: Due to modules and __init, code can
|
||||
|
@ -77,8 +78,9 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
|
|||
if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
|
||||
return 2;
|
||||
|
||||
WARN_ON_ONCE(__copy_to_user_inatomic((char __user *)ip, new_code,
|
||||
MCOUNT_INSN_SIZE));
|
||||
ret = __copy_to_user_inatomic((char __user *)ip, new_code,
|
||||
MCOUNT_INSN_SIZE);
|
||||
WARN_ON_ONCE(ret);
|
||||
|
||||
sync_core();
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#include <asm/desc.h>
|
||||
#include <asm/ftrace.h>
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
/* mcount is defined in assembly */
|
||||
EXPORT_SYMBOL(mcount);
|
||||
#endif
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <asm/desc.h>
|
||||
#include <asm/ftrace.h>
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
/* mcount is defined in assembly */
|
||||
EXPORT_SYMBOL(mcount);
|
||||
#endif
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
ifdef CONFIG_FTRACE
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# Do not profile debug and lowlevel utilities
|
||||
CFLAGS_REMOVE_spinlock.o = -pg
|
||||
CFLAGS_REMOVE_time.o = -pg
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#ifndef ASM_X86__FTRACE_H
|
||||
#define ASM_X86__FTRACE_H
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#define MCOUNT_ADDR ((long)(mcount))
|
||||
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
|
||||
|
||||
|
@ -19,6 +19,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
|||
}
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_FTRACE */
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#endif /* ASM_X86__FTRACE_H */
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/kallsyms.h>
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
|
||||
extern int ftrace_enabled;
|
||||
extern int
|
||||
|
@ -36,12 +36,12 @@ void clear_ftrace_function(void);
|
|||
|
||||
extern void ftrace_stub(unsigned long a0, unsigned long a1);
|
||||
|
||||
#else /* !CONFIG_FTRACE */
|
||||
#else /* !CONFIG_FUNCTION_TRACER */
|
||||
# define register_ftrace_function(ops) do { } while (0)
|
||||
# define unregister_ftrace_function(ops) do { } while (0)
|
||||
# define clear_ftrace_function(ops) do { } while (0)
|
||||
static inline void ftrace_kill_atomic(void) { }
|
||||
#endif /* CONFIG_FTRACE */
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
# define FTRACE_HASHBITS 10
|
||||
|
@ -101,7 +101,7 @@ void ftrace_kill_atomic(void);
|
|||
|
||||
static inline void tracer_disable(void)
|
||||
{
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
ftrace_enabled = 0;
|
||||
#endif
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ static inline void tracer_disable(void)
|
|||
*/
|
||||
static inline int __ftrace_enabled_save(void)
|
||||
{
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
int saved_ftrace_enabled = ftrace_enabled;
|
||||
ftrace_enabled = 0;
|
||||
return saved_ftrace_enabled;
|
||||
|
@ -124,7 +124,7 @@ static inline int __ftrace_enabled_save(void)
|
|||
|
||||
static inline void __ftrace_enabled_restore(int enabled)
|
||||
{
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
ftrace_enabled = enabled;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
|
|||
|
||||
CFLAGS_REMOVE_sched.o = -mno-spe
|
||||
|
||||
ifdef CONFIG_FTRACE
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# Do not trace debug files and internal ftrace files
|
||||
CFLAGS_REMOVE_lockdep.o = -pg
|
||||
CFLAGS_REMOVE_lockdep_proc.o = -pg
|
||||
|
@ -88,7 +88,7 @@ obj-$(CONFIG_MARKERS) += marker.o
|
|||
obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
|
||||
obj-$(CONFIG_LATENCYTOP) += latencytop.o
|
||||
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
|
||||
obj-$(CONFIG_FTRACE) += trace/
|
||||
obj-$(CONFIG_FUNCTION_TRACER) += trace/
|
||||
obj-$(CONFIG_TRACING) += trace/
|
||||
obj-$(CONFIG_SMP) += sched_cpupri.o
|
||||
|
||||
|
|
|
@ -464,7 +464,7 @@ static struct ctl_table kern_table[] = {
|
|||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "ftrace_enabled",
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
#
|
||||
# Architectures that offer an FTRACE implementation should select HAVE_FTRACE:
|
||||
# Architectures that offer an FUNCTION_TRACER implementation should
|
||||
# select HAVE_FUNCTION_TRACER:
|
||||
#
|
||||
|
||||
config NOP_TRACER
|
||||
bool
|
||||
|
||||
config HAVE_FTRACE
|
||||
config HAVE_FUNCTION_TRACER
|
||||
bool
|
||||
select NOP_TRACER
|
||||
|
||||
|
@ -28,9 +29,9 @@ config TRACING
|
|||
select STACKTRACE
|
||||
select TRACEPOINTS
|
||||
|
||||
config FTRACE
|
||||
config FUNCTION_TRACER
|
||||
bool "Kernel Function Tracer"
|
||||
depends on HAVE_FTRACE
|
||||
depends on HAVE_FUNCTION_TRACER
|
||||
depends on DEBUG_KERNEL
|
||||
select FRAME_POINTER
|
||||
select TRACING
|
||||
|
@ -49,7 +50,6 @@ config IRQSOFF_TRACER
|
|||
default n
|
||||
depends on TRACE_IRQFLAGS_SUPPORT
|
||||
depends on GENERIC_TIME
|
||||
depends on HAVE_FTRACE
|
||||
depends on DEBUG_KERNEL
|
||||
select TRACE_IRQFLAGS
|
||||
select TRACING
|
||||
|
@ -73,7 +73,6 @@ config PREEMPT_TRACER
|
|||
default n
|
||||
depends on GENERIC_TIME
|
||||
depends on PREEMPT
|
||||
depends on HAVE_FTRACE
|
||||
depends on DEBUG_KERNEL
|
||||
select TRACING
|
||||
select TRACER_MAX_TRACE
|
||||
|
@ -101,7 +100,6 @@ config SYSPROF_TRACER
|
|||
|
||||
config SCHED_TRACER
|
||||
bool "Scheduling Latency Tracer"
|
||||
depends on HAVE_FTRACE
|
||||
depends on DEBUG_KERNEL
|
||||
select TRACING
|
||||
select CONTEXT_SWITCH_TRACER
|
||||
|
@ -112,7 +110,6 @@ config SCHED_TRACER
|
|||
|
||||
config CONTEXT_SWITCH_TRACER
|
||||
bool "Trace process context switches"
|
||||
depends on HAVE_FTRACE
|
||||
depends on DEBUG_KERNEL
|
||||
select TRACING
|
||||
select MARKERS
|
||||
|
@ -122,7 +119,6 @@ config CONTEXT_SWITCH_TRACER
|
|||
|
||||
config BOOT_TRACER
|
||||
bool "Trace boot initcalls"
|
||||
depends on HAVE_FTRACE
|
||||
depends on DEBUG_KERNEL
|
||||
select TRACING
|
||||
help
|
||||
|
@ -141,9 +137,9 @@ config BOOT_TRACER
|
|||
|
||||
config STACK_TRACER
|
||||
bool "Trace max stack"
|
||||
depends on HAVE_FTRACE
|
||||
depends on HAVE_FUNCTION_TRACER
|
||||
depends on DEBUG_KERNEL
|
||||
select FTRACE
|
||||
select FUNCTION_TRACER
|
||||
select STACKTRACE
|
||||
help
|
||||
This special tracer records the maximum stack footprint of the
|
||||
|
@ -160,7 +156,7 @@ config STACK_TRACER
|
|||
|
||||
config DYNAMIC_FTRACE
|
||||
bool "enable/disable ftrace tracepoints dynamically"
|
||||
depends on FTRACE
|
||||
depends on FUNCTION_TRACER
|
||||
depends on HAVE_DYNAMIC_FTRACE
|
||||
depends on DEBUG_KERNEL
|
||||
default y
|
||||
|
@ -170,7 +166,7 @@ config DYNAMIC_FTRACE
|
|||
with a No-Op instruction) as they are called. A table is
|
||||
created to dynamically enable them again.
|
||||
|
||||
This way a CONFIG_FTRACE kernel is slightly larger, but otherwise
|
||||
This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise
|
||||
has native performance as long as no tracing is active.
|
||||
|
||||
The changes to the code are done by a kernel thread that
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
|
||||
# Do not instrument the tracer itself:
|
||||
|
||||
ifdef CONFIG_FTRACE
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
ORIG_CFLAGS := $(KBUILD_CFLAGS)
|
||||
KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
|
||||
|
||||
|
@ -10,13 +10,13 @@ CFLAGS_trace_selftest_dynamic.o = -pg
|
|||
obj-y += trace_selftest_dynamic.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_FTRACE) += libftrace.o
|
||||
obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
|
||||
obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
|
||||
|
||||
obj-$(CONFIG_TRACING) += trace.o
|
||||
obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
|
||||
obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o
|
||||
obj-$(CONFIG_FTRACE) += trace_functions.o
|
||||
obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
|
||||
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
|
||||
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
|
||||
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
|
||||
|
|
|
@ -164,10 +164,14 @@ static DEFINE_SPINLOCK(ftrace_hash_lock);
|
|||
#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
|
||||
#define ftrace_hash_unlock(flags) \
|
||||
spin_unlock_irqrestore(&ftrace_hash_lock, flags)
|
||||
static void ftrace_release_hash(unsigned long start, unsigned long end);
|
||||
#else
|
||||
/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
|
||||
#define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
|
||||
#define ftrace_hash_unlock(flags) do { } while(0)
|
||||
static inline void ftrace_release_hash(unsigned long start, unsigned long end)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -347,6 +351,7 @@ void ftrace_release(void *start, unsigned long size)
|
|||
}
|
||||
spin_unlock(&ftrace_lock);
|
||||
|
||||
ftrace_release_hash(s, e);
|
||||
}
|
||||
|
||||
static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
|
||||
|
@ -1659,6 +1664,44 @@ void __init ftrace_init(void)
|
|||
ftrace_disabled = 1;
|
||||
}
|
||||
#else /* CONFIG_FTRACE_MCOUNT_RECORD */
|
||||
|
||||
static void ftrace_release_hash(unsigned long start, unsigned long end)
|
||||
{
|
||||
struct dyn_ftrace *rec;
|
||||
struct hlist_node *t, *n;
|
||||
struct hlist_head *head, temp_list;
|
||||
unsigned long flags;
|
||||
int i, cpu;
|
||||
|
||||
preempt_disable_notrace();
|
||||
|
||||
/* disable incase we call something that calls mcount */
|
||||
cpu = raw_smp_processor_id();
|
||||
per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
|
||||
|
||||
ftrace_hash_lock(flags);
|
||||
|
||||
for (i = 0; i < FTRACE_HASHSIZE; i++) {
|
||||
INIT_HLIST_HEAD(&temp_list);
|
||||
head = &ftrace_hash[i];
|
||||
|
||||
/* all CPUS are stopped, we are safe to modify code */
|
||||
hlist_for_each_entry_safe(rec, t, n, head, node) {
|
||||
if (rec->flags & FTRACE_FL_FREE)
|
||||
continue;
|
||||
|
||||
if ((rec->ip >= start) && (rec->ip < end))
|
||||
ftrace_free_rec(rec);
|
||||
}
|
||||
}
|
||||
|
||||
ftrace_hash_unlock(flags);
|
||||
|
||||
per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
|
||||
preempt_enable_notrace();
|
||||
|
||||
}
|
||||
|
||||
static int ftraced(void *ignore)
|
||||
{
|
||||
unsigned long usecs;
|
||||
|
|
|
@ -851,7 +851,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
|
|||
preempt_enable_notrace();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
static void
|
||||
function_trace_call(unsigned long ip, unsigned long parent_ip)
|
||||
{
|
||||
|
|
|
@ -335,7 +335,7 @@ void update_max_tr_single(struct trace_array *tr,
|
|||
|
||||
extern cycle_t ftrace_now(int cpu);
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
void tracing_start_function_trace(void);
|
||||
void tracing_stop_function_trace(void);
|
||||
#else
|
||||
|
|
|
@ -64,7 +64,7 @@ static void function_trace_ctrl_update(struct trace_array *tr)
|
|||
|
||||
static struct tracer function_trace __read_mostly =
|
||||
{
|
||||
.name = "ftrace",
|
||||
.name = "function",
|
||||
.init = function_trace_init,
|
||||
.reset = function_trace_reset,
|
||||
.ctrl_update = function_trace_ctrl_update,
|
||||
|
|
|
@ -63,7 +63,7 @@ irq_trace(void)
|
|||
*/
|
||||
static __cacheline_aligned_in_smp unsigned long max_sequence;
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
/*
|
||||
* irqsoff uses its own tracer function to keep the overhead down:
|
||||
*/
|
||||
|
@ -104,7 +104,7 @@ static struct ftrace_ops trace_ops __read_mostly =
|
|||
{
|
||||
.func = irqsoff_tracer_call,
|
||||
};
|
||||
#endif /* CONFIG_FTRACE */
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
/*
|
||||
* Should this new latency be reported/recorded?
|
||||
|
|
|
@ -31,7 +31,7 @@ static raw_spinlock_t wakeup_lock =
|
|||
|
||||
static void __wakeup_reset(struct trace_array *tr);
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
/*
|
||||
* irqsoff uses its own tracer function to keep the overhead down:
|
||||
*/
|
||||
|
@ -96,7 +96,7 @@ static struct ftrace_ops trace_ops __read_mostly =
|
|||
{
|
||||
.func = wakeup_tracer_call,
|
||||
};
|
||||
#endif /* CONFIG_FTRACE */
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
/*
|
||||
* Should this new latency be reported/recorded?
|
||||
|
|
|
@ -70,7 +70,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
|
@ -226,7 +226,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
|
|||
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_FTRACE */
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifdef CONFIG_IRQSOFF_TRACER
|
||||
int
|
||||
|
|
|
@ -44,6 +44,10 @@ static inline void check_stack(void)
|
|||
if (this_size <= max_stack_size)
|
||||
return;
|
||||
|
||||
/* we do not handle interrupt stacks yet */
|
||||
if (!object_is_on_stack(&this_size))
|
||||
return;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
__raw_spin_lock(&max_stack_lock);
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# Makefile for some libs needed in the kernel.
|
||||
#
|
||||
|
||||
ifdef CONFIG_FTRACE
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
ORIG_CFLAGS := $(KBUILD_CFLAGS)
|
||||
KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
|
||||
endif
|
||||
|
|
Loading…
Reference in a new issue