perf: Humanize the number of contexts

Instead of hardcoding the number of contexts for the recursions
barriers, define a cpp constant to make the code more
self-explanatory.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
This commit is contained in:
Frederic Weisbecker 2010-08-14 20:45:13 +02:00
parent 927c7a9e92
commit 7ae07ea3a4
3 changed files with 14 additions and 12 deletions

View file

@ -808,6 +808,12 @@ struct perf_event_context {
struct rcu_head rcu_head;
};
/*
* Number of contexts where an event can trigger:
* task, softirq, hardirq, nmi.
*/
#define PERF_NR_CONTEXTS 4
/**
* struct perf_event_cpu_context - per cpu event context structure
*/
@ -821,12 +827,8 @@ struct perf_cpu_context {
struct mutex hlist_mutex;
int hlist_refcount;
/*
* Recursion avoidance:
*
* task, softirq, irq, nmi context
*/
int recursion[4];
/* Recursion avoidance in each contexts */
int recursion[PERF_NR_CONTEXTS];
};
struct perf_output_handle {

View file

@ -1772,7 +1772,7 @@ struct callchain_cpus_entries {
struct perf_callchain_entry *cpu_entries[0];
};
static DEFINE_PER_CPU(int, callchain_recursion[4]);
static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
static atomic_t nr_callchain_events;
static DEFINE_MUTEX(callchain_mutex);
struct callchain_cpus_entries *callchain_cpus_entries;
@ -1828,7 +1828,7 @@ static int alloc_callchain_buffers(void)
if (!entries)
return -ENOMEM;
size = sizeof(struct perf_callchain_entry) * 4;
size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
for_each_possible_cpu(cpu) {
entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,

View file

@ -9,7 +9,7 @@
#include <linux/kprobes.h>
#include "trace.h"
static char *perf_trace_buf[4];
static char *perf_trace_buf[PERF_NR_CONTEXTS];
/*
* Force it to be aligned to unsigned long to avoid misaligned accesses
@ -45,7 +45,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
char *buf;
int i;
for (i = 0; i < 4; i++) {
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
buf = (char *)alloc_percpu(perf_trace_t);
if (!buf)
goto fail;
@ -65,7 +65,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
if (!total_ref_count) {
int i;
for (i = 0; i < 4; i++) {
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
free_percpu(perf_trace_buf[i]);
perf_trace_buf[i] = NULL;
}
@ -140,7 +140,7 @@ void perf_trace_destroy(struct perf_event *p_event)
tp_event->perf_events = NULL;
if (!--total_ref_count) {
for (i = 0; i < 4; i++) {
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
free_percpu(perf_trace_buf[i]);
perf_trace_buf[i] = NULL;
}