tracing: Move a printk out of ftrace_raw_reg_event_foo()
Move the printk from each ftrace_raw_reg_event_foo() to its caller ftrace_event_enable_disable(). This avoids each regfunc trace event callbacks to handle a same error report that can be carried from the caller. See how much space this saves: text data bss dec hex filename 5345151 1961864 7103260 14410275 dbe223 vmlinux.o.old 5331487 1961864 7103260 14396611 dbacc3 vmlinux.o Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Cc: Jason Baron <jbaron@redhat.com> LKML-Reference: <4B1DC4AC.802@cn.fujitsu.com> [start cmdline record before calling regfunc to avoid lost window of pid to comm resolution] Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
This commit is contained in:
parent
614a71a26b
commit
3b8e427381
3 changed files with 19 additions and 27 deletions
|
@ -555,13 +555,7 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
|
|||
*
|
||||
* static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
|
||||
* {
|
||||
* int ret;
|
||||
*
|
||||
* ret = register_trace_<call>(ftrace_event_<call>);
|
||||
* if (!ret)
|
||||
* pr_info("event trace: Could not activate trace point "
|
||||
* "probe to <call>");
|
||||
* return ret;
|
||||
* return register_trace_<call>(ftrace_event_<call>);
|
||||
* }
|
||||
*
|
||||
* static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
|
||||
|
@ -710,13 +704,7 @@ static void ftrace_raw_event_##call(proto) \
|
|||
\
|
||||
static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
|
||||
{ \
|
||||
int ret; \
|
||||
\
|
||||
ret = register_trace_##call(ftrace_raw_event_##call); \
|
||||
if (ret) \
|
||||
pr_info("event trace: Could not activate trace point " \
|
||||
"probe to %s\n", #call); \
|
||||
return ret; \
|
||||
return register_trace_##call(ftrace_raw_event_##call); \
|
||||
} \
|
||||
\
|
||||
static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
|
||||
|
|
|
@ -118,9 +118,11 @@ int trace_event_raw_init(struct ftrace_event_call *call)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(trace_event_raw_init);
|
||||
|
||||
static void ftrace_event_enable_disable(struct ftrace_event_call *call,
|
||||
static int ftrace_event_enable_disable(struct ftrace_event_call *call,
|
||||
int enable)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (enable) {
|
||||
case 0:
|
||||
if (call->enabled) {
|
||||
|
@ -131,12 +133,20 @@ static void ftrace_event_enable_disable(struct ftrace_event_call *call,
|
|||
break;
|
||||
case 1:
|
||||
if (!call->enabled) {
|
||||
call->enabled = 1;
|
||||
tracing_start_cmdline_record();
|
||||
call->regfunc(call);
|
||||
ret = call->regfunc(call);
|
||||
if (ret) {
|
||||
tracing_stop_cmdline_record();
|
||||
pr_info("event trace: Could not enable event "
|
||||
"%s\n", call->name);
|
||||
break;
|
||||
}
|
||||
call->enabled = 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ftrace_clear_events(void)
|
||||
|
@ -415,7 +425,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|||
case 0:
|
||||
case 1:
|
||||
mutex_lock(&event_mutex);
|
||||
ftrace_event_enable_disable(call, val);
|
||||
ret = ftrace_event_enable_disable(call, val);
|
||||
mutex_unlock(&event_mutex);
|
||||
break;
|
||||
|
||||
|
@ -425,7 +435,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|||
|
||||
*ppos += cnt;
|
||||
|
||||
return cnt;
|
||||
return ret ? ret : cnt;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
|
|
@ -325,10 +325,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call)
|
|||
mutex_lock(&syscall_trace_lock);
|
||||
if (!sys_refcount_enter)
|
||||
ret = register_trace_sys_enter(ftrace_syscall_enter);
|
||||
if (ret) {
|
||||
pr_info("event trace: Could not activate"
|
||||
"syscall entry trace point");
|
||||
} else {
|
||||
if (!ret) {
|
||||
set_bit(num, enabled_enter_syscalls);
|
||||
sys_refcount_enter++;
|
||||
}
|
||||
|
@ -362,10 +359,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call)
|
|||
mutex_lock(&syscall_trace_lock);
|
||||
if (!sys_refcount_exit)
|
||||
ret = register_trace_sys_exit(ftrace_syscall_exit);
|
||||
if (ret) {
|
||||
pr_info("event trace: Could not activate"
|
||||
"syscall exit trace point");
|
||||
} else {
|
||||
if (!ret) {
|
||||
set_bit(num, enabled_exit_syscalls);
|
||||
sys_refcount_exit++;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue