ftrace: Add default recursion protection for function tracing
As more users of the function tracer utility are being added, they do not always add the necessary recursion protection. To protect from function recursion due to tracing, if the callback ftrace_ops does not specifically specify that it protects against recursion (by setting the FTRACE_OPS_FL_RECURSION_SAFE flag), the list operation will be called by the mcount trampoline which adds recursion protection. If the flag is set, then the function will be called directly with no extra protection. Note, the list operation is called if more than one function callback is registered, or if the arch does not support all of the function tracer features. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
5767cfeaa9
commit
4740974a68
8 changed files with 24 additions and 8 deletions
|
@ -85,6 +85,10 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
|
|||
* passing regs to the handler.
|
||||
* Note, if this flag is set, the SAVE_REGS flag will automatically
|
||||
* get set upon registering the ftrace_ops, if the arch supports it.
|
||||
* RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
|
||||
* that the call back has its own recursion protection. If it does
|
||||
* not set this, then the ftrace infrastructure will add recursion
|
||||
* protection for the caller.
|
||||
*/
|
||||
enum {
|
||||
FTRACE_OPS_FL_ENABLED = 1 << 0,
|
||||
|
@ -93,6 +97,7 @@ enum {
|
|||
FTRACE_OPS_FL_CONTROL = 1 << 3,
|
||||
FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
|
||||
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
|
||||
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
|
||||
};
|
||||
|
||||
struct ftrace_ops {
|
||||
|
|
|
@ -66,6 +66,7 @@
|
|||
|
||||
static struct ftrace_ops ftrace_list_end __read_mostly = {
|
||||
.func = ftrace_stub,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
|
||||
/* ftrace_enabled is a method to turn ftrace on or off */
|
||||
|
@ -221,12 +222,13 @@ static void update_ftrace_function(void)
|
|||
|
||||
/*
|
||||
* If we are at the end of the list and this ops is
|
||||
* not dynamic and the arch supports passing ops, then have the
|
||||
* mcount trampoline call the function directly.
|
||||
* recursion safe and not dynamic and the arch supports passing ops,
|
||||
* then have the mcount trampoline call the function directly.
|
||||
*/
|
||||
if (ftrace_ops_list == &ftrace_list_end ||
|
||||
(ftrace_ops_list->next == &ftrace_list_end &&
|
||||
!(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
|
||||
(ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
|
||||
!FTRACE_FORCE_LIST_FUNC)) {
|
||||
/* Set the ftrace_ops that the arch callback uses */
|
||||
if (ftrace_ops_list == &global_ops)
|
||||
|
@ -867,6 +869,7 @@ static void unregister_ftrace_profiler(void)
|
|||
#else
|
||||
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
|
||||
.func = function_profile_call,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
|
||||
static int register_ftrace_profiler(void)
|
||||
|
@ -1049,6 +1052,7 @@ static struct ftrace_ops global_ops = {
|
|||
.func = ftrace_stub,
|
||||
.notrace_hash = EMPTY_HASH,
|
||||
.filter_hash = EMPTY_HASH,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(ftrace_regex_lock);
|
||||
|
@ -3967,6 +3971,7 @@ void __init ftrace_init(void)
|
|||
|
||||
static struct ftrace_ops global_ops = {
|
||||
.func = ftrace_stub,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
|
||||
static int __init ftrace_nodyn_init(void)
|
||||
|
@ -4023,6 +4028,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
|
|||
|
||||
static struct ftrace_ops control_ops = {
|
||||
.func = ftrace_ops_control_func,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
|
||||
static inline void
|
||||
|
|
|
@ -1721,6 +1721,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
|
|||
static struct ftrace_ops trace_ops __initdata =
|
||||
{
|
||||
.func = function_test_events_call,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
|
||||
static __init void event_trace_self_test_with_function(void)
|
||||
|
|
|
@ -153,13 +153,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
|
|||
static struct ftrace_ops trace_ops __read_mostly =
|
||||
{
|
||||
.func = function_trace_call,
|
||||
.flags = FTRACE_OPS_FL_GLOBAL,
|
||||
.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
|
||||
static struct ftrace_ops trace_stack_ops __read_mostly =
|
||||
{
|
||||
.func = function_stack_trace_call,
|
||||
.flags = FTRACE_OPS_FL_GLOBAL,
|
||||
.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
|
||||
/* Our two options */
|
||||
|
|
|
@ -154,7 +154,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
|
|||
static struct ftrace_ops trace_ops __read_mostly =
|
||||
{
|
||||
.func = irqsoff_tracer_call,
|
||||
.flags = FTRACE_OPS_FL_GLOBAL,
|
||||
.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
|
|
|
@ -130,7 +130,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
|
|||
static struct ftrace_ops trace_ops __read_mostly =
|
||||
{
|
||||
.func = wakeup_tracer_call,
|
||||
.flags = FTRACE_OPS_FL_GLOBAL,
|
||||
.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
|
|
|
@ -148,19 +148,22 @@ static void trace_selftest_test_dyn_func(unsigned long ip,
|
|||
|
||||
static struct ftrace_ops test_probe1 = {
|
||||
.func = trace_selftest_test_probe1_func,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
|
||||
static struct ftrace_ops test_probe2 = {
|
||||
.func = trace_selftest_test_probe2_func,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
|
||||
static struct ftrace_ops test_probe3 = {
|
||||
.func = trace_selftest_test_probe3_func,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
|
||||
static struct ftrace_ops test_global = {
|
||||
.func = trace_selftest_test_global_func,
|
||||
.flags = FTRACE_OPS_FL_GLOBAL,
|
||||
.func = trace_selftest_test_global_func,
|
||||
.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
|
||||
static void print_counts(void)
|
||||
|
|
|
@ -137,6 +137,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
|
|||
static struct ftrace_ops trace_ops __read_mostly =
|
||||
{
|
||||
.func = stack_trace_call,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
|
|
Loading…
Reference in a new issue