Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Thomas Gleixner: - Fix for a task exit cleanup race caused by a missing a preempt disable - Cleanup of the event notification functions with a massive reduction of duplicated code * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf: Factor out auxiliary events notification perf: Fix EXIT event notification
This commit is contained in:
commit
652df602f8
1 changed files with 89 additions and 151 deletions
|
@ -4394,6 +4394,64 @@ perf_event_read_event(struct perf_event *event,
|
|||
perf_output_end(&handle);
|
||||
}
|
||||
|
||||
typedef int (perf_event_aux_match_cb)(struct perf_event *event, void *data);
|
||||
typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
|
||||
|
||||
static void
|
||||
perf_event_aux_ctx(struct perf_event_context *ctx,
|
||||
perf_event_aux_match_cb match,
|
||||
perf_event_aux_output_cb output,
|
||||
void *data)
|
||||
{
|
||||
struct perf_event *event;
|
||||
|
||||
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
|
||||
if (event->state < PERF_EVENT_STATE_INACTIVE)
|
||||
continue;
|
||||
if (!event_filter_match(event))
|
||||
continue;
|
||||
if (match(event, data))
|
||||
output(event, data);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
perf_event_aux(perf_event_aux_match_cb match,
|
||||
perf_event_aux_output_cb output,
|
||||
void *data,
|
||||
struct perf_event_context *task_ctx)
|
||||
{
|
||||
struct perf_cpu_context *cpuctx;
|
||||
struct perf_event_context *ctx;
|
||||
struct pmu *pmu;
|
||||
int ctxn;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
||||
if (cpuctx->unique_pmu != pmu)
|
||||
goto next;
|
||||
perf_event_aux_ctx(&cpuctx->ctx, match, output, data);
|
||||
if (task_ctx)
|
||||
goto next;
|
||||
ctxn = pmu->task_ctx_nr;
|
||||
if (ctxn < 0)
|
||||
goto next;
|
||||
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
|
||||
if (ctx)
|
||||
perf_event_aux_ctx(ctx, match, output, data);
|
||||
next:
|
||||
put_cpu_ptr(pmu->pmu_cpu_context);
|
||||
}
|
||||
|
||||
if (task_ctx) {
|
||||
preempt_disable();
|
||||
perf_event_aux_ctx(task_ctx, match, output, data);
|
||||
preempt_enable();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* task tracking -- fork/exit
|
||||
*
|
||||
|
@ -4416,8 +4474,9 @@ struct perf_task_event {
|
|||
};
|
||||
|
||||
static void perf_event_task_output(struct perf_event *event,
|
||||
struct perf_task_event *task_event)
|
||||
void *data)
|
||||
{
|
||||
struct perf_task_event *task_event = data;
|
||||
struct perf_output_handle handle;
|
||||
struct perf_sample_data sample;
|
||||
struct task_struct *task = task_event->task;
|
||||
|
@ -4445,62 +4504,11 @@ static void perf_event_task_output(struct perf_event *event,
|
|||
task_event->event_id.header.size = size;
|
||||
}
|
||||
|
||||
static int perf_event_task_match(struct perf_event *event)
|
||||
static int perf_event_task_match(struct perf_event *event,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
if (event->state < PERF_EVENT_STATE_INACTIVE)
|
||||
return 0;
|
||||
|
||||
if (!event_filter_match(event))
|
||||
return 0;
|
||||
|
||||
if (event->attr.comm || event->attr.mmap ||
|
||||
event->attr.mmap_data || event->attr.task)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void perf_event_task_ctx(struct perf_event_context *ctx,
|
||||
struct perf_task_event *task_event)
|
||||
{
|
||||
struct perf_event *event;
|
||||
|
||||
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
|
||||
if (perf_event_task_match(event))
|
||||
perf_event_task_output(event, task_event);
|
||||
}
|
||||
}
|
||||
|
||||
static void perf_event_task_event(struct perf_task_event *task_event)
|
||||
{
|
||||
struct perf_cpu_context *cpuctx;
|
||||
struct perf_event_context *ctx;
|
||||
struct pmu *pmu;
|
||||
int ctxn;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
||||
if (cpuctx->unique_pmu != pmu)
|
||||
goto next;
|
||||
perf_event_task_ctx(&cpuctx->ctx, task_event);
|
||||
|
||||
ctx = task_event->task_ctx;
|
||||
if (!ctx) {
|
||||
ctxn = pmu->task_ctx_nr;
|
||||
if (ctxn < 0)
|
||||
goto next;
|
||||
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
|
||||
if (ctx)
|
||||
perf_event_task_ctx(ctx, task_event);
|
||||
}
|
||||
next:
|
||||
put_cpu_ptr(pmu->pmu_cpu_context);
|
||||
}
|
||||
if (task_event->task_ctx)
|
||||
perf_event_task_ctx(task_event->task_ctx, task_event);
|
||||
|
||||
rcu_read_unlock();
|
||||
return event->attr.comm || event->attr.mmap ||
|
||||
event->attr.mmap_data || event->attr.task;
|
||||
}
|
||||
|
||||
static void perf_event_task(struct task_struct *task,
|
||||
|
@ -4531,7 +4539,10 @@ static void perf_event_task(struct task_struct *task,
|
|||
},
|
||||
};
|
||||
|
||||
perf_event_task_event(&task_event);
|
||||
perf_event_aux(perf_event_task_match,
|
||||
perf_event_task_output,
|
||||
&task_event,
|
||||
task_ctx);
|
||||
}
|
||||
|
||||
void perf_event_fork(struct task_struct *task)
|
||||
|
@ -4557,8 +4568,9 @@ struct perf_comm_event {
|
|||
};
|
||||
|
||||
static void perf_event_comm_output(struct perf_event *event,
|
||||
struct perf_comm_event *comm_event)
|
||||
void *data)
|
||||
{
|
||||
struct perf_comm_event *comm_event = data;
|
||||
struct perf_output_handle handle;
|
||||
struct perf_sample_data sample;
|
||||
int size = comm_event->event_id.header.size;
|
||||
|
@ -4585,39 +4597,16 @@ static void perf_event_comm_output(struct perf_event *event,
|
|||
comm_event->event_id.header.size = size;
|
||||
}
|
||||
|
||||
static int perf_event_comm_match(struct perf_event *event)
|
||||
static int perf_event_comm_match(struct perf_event *event,
|
||||
void *data __maybe_unused)
|
||||
{
|
||||
if (event->state < PERF_EVENT_STATE_INACTIVE)
|
||||
return 0;
|
||||
|
||||
if (!event_filter_match(event))
|
||||
return 0;
|
||||
|
||||
if (event->attr.comm)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void perf_event_comm_ctx(struct perf_event_context *ctx,
|
||||
struct perf_comm_event *comm_event)
|
||||
{
|
||||
struct perf_event *event;
|
||||
|
||||
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
|
||||
if (perf_event_comm_match(event))
|
||||
perf_event_comm_output(event, comm_event);
|
||||
}
|
||||
return event->attr.comm;
|
||||
}
|
||||
|
||||
static void perf_event_comm_event(struct perf_comm_event *comm_event)
|
||||
{
|
||||
struct perf_cpu_context *cpuctx;
|
||||
struct perf_event_context *ctx;
|
||||
char comm[TASK_COMM_LEN];
|
||||
unsigned int size;
|
||||
struct pmu *pmu;
|
||||
int ctxn;
|
||||
|
||||
memset(comm, 0, sizeof(comm));
|
||||
strlcpy(comm, comm_event->task->comm, sizeof(comm));
|
||||
|
@ -4627,24 +4616,11 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
|
|||
comm_event->comm_size = size;
|
||||
|
||||
comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
||||
if (cpuctx->unique_pmu != pmu)
|
||||
goto next;
|
||||
perf_event_comm_ctx(&cpuctx->ctx, comm_event);
|
||||
|
||||
ctxn = pmu->task_ctx_nr;
|
||||
if (ctxn < 0)
|
||||
goto next;
|
||||
|
||||
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
|
||||
if (ctx)
|
||||
perf_event_comm_ctx(ctx, comm_event);
|
||||
next:
|
||||
put_cpu_ptr(pmu->pmu_cpu_context);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
perf_event_aux(perf_event_comm_match,
|
||||
perf_event_comm_output,
|
||||
comm_event,
|
||||
NULL);
|
||||
}
|
||||
|
||||
void perf_event_comm(struct task_struct *task)
|
||||
|
@ -4706,8 +4682,9 @@ struct perf_mmap_event {
|
|||
};
|
||||
|
||||
static void perf_event_mmap_output(struct perf_event *event,
|
||||
struct perf_mmap_event *mmap_event)
|
||||
void *data)
|
||||
{
|
||||
struct perf_mmap_event *mmap_event = data;
|
||||
struct perf_output_handle handle;
|
||||
struct perf_sample_data sample;
|
||||
int size = mmap_event->event_id.header.size;
|
||||
|
@ -4734,46 +4711,24 @@ static void perf_event_mmap_output(struct perf_event *event,
|
|||
}
|
||||
|
||||
static int perf_event_mmap_match(struct perf_event *event,
|
||||
struct perf_mmap_event *mmap_event,
|
||||
int executable)
|
||||
void *data)
|
||||
{
|
||||
if (event->state < PERF_EVENT_STATE_INACTIVE)
|
||||
return 0;
|
||||
struct perf_mmap_event *mmap_event = data;
|
||||
struct vm_area_struct *vma = mmap_event->vma;
|
||||
int executable = vma->vm_flags & VM_EXEC;
|
||||
|
||||
if (!event_filter_match(event))
|
||||
return 0;
|
||||
|
||||
if ((!executable && event->attr.mmap_data) ||
|
||||
(executable && event->attr.mmap))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void perf_event_mmap_ctx(struct perf_event_context *ctx,
|
||||
struct perf_mmap_event *mmap_event,
|
||||
int executable)
|
||||
{
|
||||
struct perf_event *event;
|
||||
|
||||
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
|
||||
if (perf_event_mmap_match(event, mmap_event, executable))
|
||||
perf_event_mmap_output(event, mmap_event);
|
||||
}
|
||||
return (!executable && event->attr.mmap_data) ||
|
||||
(executable && event->attr.mmap);
|
||||
}
|
||||
|
||||
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
|
||||
{
|
||||
struct perf_cpu_context *cpuctx;
|
||||
struct perf_event_context *ctx;
|
||||
struct vm_area_struct *vma = mmap_event->vma;
|
||||
struct file *file = vma->vm_file;
|
||||
unsigned int size;
|
||||
char tmp[16];
|
||||
char *buf = NULL;
|
||||
const char *name;
|
||||
struct pmu *pmu;
|
||||
int ctxn;
|
||||
|
||||
memset(tmp, 0, sizeof(tmp));
|
||||
|
||||
|
@ -4829,27 +4784,10 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
|
|||
|
||||
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
||||
if (cpuctx->unique_pmu != pmu)
|
||||
goto next;
|
||||
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
|
||||
vma->vm_flags & VM_EXEC);
|
||||
|
||||
ctxn = pmu->task_ctx_nr;
|
||||
if (ctxn < 0)
|
||||
goto next;
|
||||
|
||||
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
|
||||
if (ctx) {
|
||||
perf_event_mmap_ctx(ctx, mmap_event,
|
||||
vma->vm_flags & VM_EXEC);
|
||||
}
|
||||
next:
|
||||
put_cpu_ptr(pmu->pmu_cpu_context);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
perf_event_aux(perf_event_mmap_match,
|
||||
perf_event_mmap_output,
|
||||
mmap_event,
|
||||
NULL);
|
||||
|
||||
kfree(buf);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue