tracing/ftrace: fix a race condition in sched_switch tracer
Impact: fix race condition in sched_switch tracer This patch fixes a race condition in the sched_switch tracer. If several tasks (IE: concurrent initcalls) are playing with tracing_start_cmdline_record() and tracing_stop_cmdline_record(), the following situation could happen: _ Task A and B are using the same tracepoint probe. Task A holds it. Task B is sleeping and doesn't hold it. _ Task A frees the sched tracer, then sched_ref is decremented to 0. _ Task A is preempted and hadn't yet unregistered its tracepoint probe, then B runs. _ B increments sched_ref, sees it's 1 and then guess it has to register its probe. But it has not been freed by task A. _ A lot of bad things can happen after that... Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
71566a0d16
commit
07695fa04e
1 changed files with 5 additions and 0 deletions
|
@ -17,6 +17,7 @@
|
|||
static struct trace_array *ctx_trace;
|
||||
static int __read_mostly tracer_enabled;
|
||||
static atomic_t sched_ref;
|
||||
static DEFINE_MUTEX(tracepoint_mutex);
|
||||
|
||||
static void
|
||||
probe_sched_switch(struct rq *__rq, struct task_struct *prev,
|
||||
|
@ -125,18 +126,22 @@ static void tracing_start_sched_switch(void)
|
|||
{
|
||||
long ref;
|
||||
|
||||
mutex_lock(&tracepoint_mutex);
|
||||
ref = atomic_inc_return(&sched_ref);
|
||||
if (ref == 1)
|
||||
tracing_sched_register();
|
||||
mutex_unlock(&tracepoint_mutex);
|
||||
}
|
||||
|
||||
static void tracing_stop_sched_switch(void)
|
||||
{
|
||||
long ref;
|
||||
|
||||
mutex_lock(&tracepoint_mutex);
|
||||
ref = atomic_dec_and_test(&sched_ref);
|
||||
if (ref)
|
||||
tracing_sched_unregister();
|
||||
mutex_unlock(&tracepoint_mutex);
|
||||
}
|
||||
|
||||
void tracing_start_cmdline_record(void)
|
||||
|
|
Loading…
Reference in a new issue