perf_counter, x86: Add mmap counter read support
Update the mmap control page with the needed information to use the userspace RDPMC instruction for self monitoring. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
7f8b4e4e09
commit
194002b274
4 changed files with 20 additions and 1 deletions
|
@ -61,6 +61,8 @@ struct pt_regs;
|
|||
extern unsigned long perf_misc_flags(struct pt_regs *regs);
|
||||
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
|
||||
|
||||
#define PERF_COUNTER_INDEX_OFFSET 1
|
||||
|
||||
/*
|
||||
* Only override the default definitions in include/linux/perf_counter.h
|
||||
* if we have hardware PMU support.
|
||||
|
|
|
@ -87,6 +87,9 @@ union cpuid10_edx {
|
|||
#ifdef CONFIG_PERF_COUNTERS
|
||||
extern void init_hw_perf_counters(void);
|
||||
extern void perf_counters_lapic_init(void);
|
||||
|
||||
#define PERF_COUNTER_INDEX_OFFSET 0
|
||||
|
||||
#else
|
||||
static inline void init_hw_perf_counters(void) { }
|
||||
static inline void perf_counters_lapic_init(void) { }
|
||||
|
|
|
@ -912,6 +912,8 @@ x86_perf_counter_set_period(struct perf_counter *counter,
|
|||
err = checking_wrmsrl(hwc->counter_base + idx,
|
||||
(u64)(-left) & x86_pmu.counter_mask);
|
||||
|
||||
perf_counter_update_userpage(counter);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1034,6 +1036,8 @@ static int x86_pmu_enable(struct perf_counter *counter)
|
|||
x86_perf_counter_set_period(counter, hwc, idx);
|
||||
x86_pmu.enable(hwc, idx);
|
||||
|
||||
perf_counter_update_userpage(counter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1126,6 +1130,8 @@ static void x86_pmu_disable(struct perf_counter *counter)
|
|||
x86_perf_counter_update(counter, hwc, idx);
|
||||
cpuc->counters[idx] = NULL;
|
||||
clear_bit(idx, cpuc->used_mask);
|
||||
|
||||
perf_counter_update_userpage(counter);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1753,6 +1753,14 @@ int perf_counter_task_disable(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int perf_counter_index(struct perf_counter *counter)
|
||||
{
|
||||
if (counter->state != PERF_COUNTER_STATE_ACTIVE)
|
||||
return 0;
|
||||
|
||||
return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
|
||||
}
|
||||
|
||||
/*
|
||||
* Callers need to ensure there can be no nesting of this function, otherwise
|
||||
* the seqlock logic goes bad. We can not serialize this because the arch
|
||||
|
@ -1777,7 +1785,7 @@ void perf_counter_update_userpage(struct perf_counter *counter)
|
|||
preempt_disable();
|
||||
++userpg->lock;
|
||||
barrier();
|
||||
userpg->index = counter->hw.idx;
|
||||
userpg->index = perf_counter_index(counter);
|
||||
userpg->offset = atomic64_read(&counter->count);
|
||||
if (counter->state == PERF_COUNTER_STATE_ACTIVE)
|
||||
userpg->offset -= atomic64_read(&counter->hw.prev_count);
|
||||
|
|
Loading…
Reference in a new issue