tracing: Lock event_mutex before synth_event_mutex
[ Upstream commit fc800a10be26017f8f338bc8e500d48e3e6429d9 ] synthetic event is using synth_event_mutex for protecting synth_event_list, and event_trigger_write() path acquires locks as below order. event_trigger_write(event_mutex) ->trigger_process_regex(trigger_cmd_mutex) ->event_hist_trigger_func(synth_event_mutex) On the other hand, synthetic event creation and deletion paths call trace_add_event_call() and trace_remove_event_call() which acquires event_mutex. In that case, if we keep the synth_event_mutex locked while registering/unregistering synthetic events, its dependency will be inversed. To avoid this issue, current synthetic event is using a 2 phase process to create/delete events. For example, it searches existing events under synth_event_mutex to check for event-name conflicts, and unlocks synth_event_mutex, then registers a new event under event_mutex locked. Finally, it locks synth_event_mutex and tries to add the new event to the list. But it can introduce complexity and a chance for name conflicts. To solve this simpler, this introduces trace_add_event_call_nolock() and trace_remove_event_call_nolock() which don't acquire event_mutex inside. synthetic event can lock event_mutex before synth_event_mutex to solve the lock dependency issue simpler. Link: http://lkml.kernel.org/r/154140844377.17322.13781091165954002713.stgit@devbox Reviewed-by: Tom Zanussi <tom.zanussi@linux.intel.com> Tested-by: Tom Zanussi <tom.zanussi@linux.intel.com> Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
67547b9b46
commit
dee3f77032
3 changed files with 42 additions and 22 deletions
|
@ -529,6 +529,8 @@ extern int trace_event_raw_init(struct trace_event_call *call);
|
|||
extern int trace_define_field(struct trace_event_call *call, const char *type,
|
||||
const char *name, int offset, int size,
|
||||
int is_signed, int filter_type);
|
||||
extern int trace_add_event_call_nolock(struct trace_event_call *call);
|
||||
extern int trace_remove_event_call_nolock(struct trace_event_call *call);
|
||||
extern int trace_add_event_call(struct trace_event_call *call);
|
||||
extern int trace_remove_event_call(struct trace_event_call *call);
|
||||
extern int trace_event_get_offsets(struct trace_event_call *call);
|
||||
|
|
|
@ -2302,11 +2302,11 @@ __trace_early_add_new_event(struct trace_event_call *call,
|
|||
struct ftrace_module_file_ops;
|
||||
static void __add_event_to_tracers(struct trace_event_call *call);
|
||||
|
||||
/* Add an additional event_call dynamically */
|
||||
int trace_add_event_call(struct trace_event_call *call)
|
||||
int trace_add_event_call_nolock(struct trace_event_call *call)
|
||||
{
|
||||
int ret;
|
||||
mutex_lock(&event_mutex);
|
||||
lockdep_assert_held(&event_mutex);
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
|
||||
ret = __register_event(call, NULL);
|
||||
|
@ -2314,6 +2314,16 @@ int trace_add_event_call(struct trace_event_call *call)
|
|||
__add_event_to_tracers(call);
|
||||
|
||||
mutex_unlock(&trace_types_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Add an additional event_call dynamically */
|
||||
int trace_add_event_call(struct trace_event_call *call)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
ret = trace_add_event_call_nolock(call);
|
||||
mutex_unlock(&event_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2363,17 +2373,29 @@ static int probe_remove_event_call(struct trace_event_call *call)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* no event_mutex version */
|
||||
int trace_remove_event_call_nolock(struct trace_event_call *call)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&event_mutex);
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
down_write(&trace_event_sem);
|
||||
ret = probe_remove_event_call(call);
|
||||
up_write(&trace_event_sem);
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Remove an event_call */
|
||||
int trace_remove_event_call(struct trace_event_call *call)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
mutex_lock(&trace_types_lock);
|
||||
down_write(&trace_event_sem);
|
||||
ret = probe_remove_event_call(call);
|
||||
up_write(&trace_event_sem);
|
||||
mutex_unlock(&trace_types_lock);
|
||||
ret = trace_remove_event_call_nolock(call);
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -914,7 +914,7 @@ static int register_synth_event(struct synth_event *event)
|
|||
call->data = event;
|
||||
call->tp = event->tp;
|
||||
|
||||
ret = trace_add_event_call(call);
|
||||
ret = trace_add_event_call_nolock(call);
|
||||
if (ret) {
|
||||
pr_warn("Failed to register synthetic event: %s\n",
|
||||
trace_event_name(call));
|
||||
|
@ -938,7 +938,7 @@ static int unregister_synth_event(struct synth_event *event)
|
|||
struct trace_event_call *call = &event->call;
|
||||
int ret;
|
||||
|
||||
ret = trace_remove_event_call(call);
|
||||
ret = trace_remove_event_call_nolock(call);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1015,12 +1015,10 @@ static void add_or_delete_synth_event(struct synth_event *event, int delete)
|
|||
if (delete)
|
||||
free_synth_event(event);
|
||||
else {
|
||||
mutex_lock(&synth_event_mutex);
|
||||
if (!find_synth_event(event->name))
|
||||
list_add(&event->list, &synth_event_list);
|
||||
else
|
||||
free_synth_event(event);
|
||||
mutex_unlock(&synth_event_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1032,6 +1030,7 @@ static int create_synth_event(int argc, char **argv)
|
|||
int i, consumed = 0, n_fields = 0, ret = 0;
|
||||
char *name;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
mutex_lock(&synth_event_mutex);
|
||||
|
||||
/*
|
||||
|
@ -1104,8 +1103,6 @@ static int create_synth_event(int argc, char **argv)
|
|||
goto err;
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&synth_event_mutex);
|
||||
|
||||
if (event) {
|
||||
if (delete_event) {
|
||||
ret = unregister_synth_event(event);
|
||||
|
@ -1115,10 +1112,13 @@ static int create_synth_event(int argc, char **argv)
|
|||
add_or_delete_synth_event(event, ret);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&synth_event_mutex);
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return ret;
|
||||
err:
|
||||
mutex_unlock(&synth_event_mutex);
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
for (i = 0; i < n_fields; i++)
|
||||
free_synth_field(fields[i]);
|
||||
|
@ -1129,12 +1129,10 @@ static int create_synth_event(int argc, char **argv)
|
|||
|
||||
static int release_all_synth_events(void)
|
||||
{
|
||||
struct list_head release_events;
|
||||
struct synth_event *event, *e;
|
||||
int ret = 0;
|
||||
|
||||
INIT_LIST_HEAD(&release_events);
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
mutex_lock(&synth_event_mutex);
|
||||
|
||||
list_for_each_entry(event, &synth_event_list, list) {
|
||||
|
@ -1144,16 +1142,14 @@ static int release_all_synth_events(void)
|
|||
}
|
||||
}
|
||||
|
||||
list_splice_init(&event->list, &release_events);
|
||||
|
||||
mutex_unlock(&synth_event_mutex);
|
||||
|
||||
list_for_each_entry_safe(event, e, &release_events, list) {
|
||||
list_for_each_entry_safe(event, e, &synth_event_list, list) {
|
||||
list_del(&event->list);
|
||||
|
||||
ret = unregister_synth_event(event);
|
||||
add_or_delete_synth_event(event, !ret);
|
||||
}
|
||||
mutex_unlock(&synth_event_mutex);
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue