perf_counter: Log irq_period changes
For the dynamic irq_period code, log whenever we change the period so that analyzing code can normalize the event flow. [ Impact: add new feature to allow more precise profiling ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <20090520102553.298769743@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
d7b629a34f
commit
26b119bc81
2 changed files with 47 additions and 1 deletions
|
@ -257,6 +257,14 @@ enum perf_event_type {
|
|||
*/
|
||||
PERF_EVENT_COMM = 3,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u64 irq_period;
|
||||
* };
|
||||
*/
|
||||
PERF_EVENT_PERIOD = 4,
|
||||
|
||||
/*
|
||||
* When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
|
||||
* will be PERF_RECORD_*
|
||||
|
|
|
@ -1046,7 +1046,9 @@ int perf_counter_task_enable(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void perf_adjust_freq(struct perf_counter_context *ctx)
|
||||
static void perf_log_period(struct perf_counter *counter, u64 period);
|
||||
|
||||
static void perf_adjust_freq(struct perf_counter_context *ctx)
|
||||
{
|
||||
struct perf_counter *counter;
|
||||
u64 irq_period;
|
||||
|
@ -1072,6 +1074,8 @@ void perf_adjust_freq(struct perf_counter_context *ctx)
|
|||
if (!irq_period)
|
||||
irq_period = 1;
|
||||
|
||||
perf_log_period(counter, irq_period);
|
||||
|
||||
counter->hw.irq_period = irq_period;
|
||||
counter->hw.interrupts = 0;
|
||||
}
|
||||
|
@ -2406,6 +2410,40 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
|
|||
perf_counter_mmap_event(&mmap_event);
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
*/
|
||||
|
||||
static void perf_log_period(struct perf_counter *counter, u64 period)
|
||||
{
|
||||
struct perf_output_handle handle;
|
||||
int ret;
|
||||
|
||||
struct {
|
||||
struct perf_event_header header;
|
||||
u64 time;
|
||||
u64 period;
|
||||
} freq_event = {
|
||||
.header = {
|
||||
.type = PERF_EVENT_PERIOD,
|
||||
.misc = 0,
|
||||
.size = sizeof(freq_event),
|
||||
},
|
||||
.time = sched_clock(),
|
||||
.period = period,
|
||||
};
|
||||
|
||||
if (counter->hw.irq_period == period)
|
||||
return;
|
||||
|
||||
ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
perf_output_put(&handle, freq_event);
|
||||
perf_output_end(&handle);
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic counter overflow handling.
|
||||
*/
|
||||
|
|
Loading…
Reference in a new issue