perf_events, x86: Fix event constraint masks
Since constraints are specified on the event number, not number and unit mask shorten the constraint masks so that we'll actually match something. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> LKML-Reference: <20100127221121.967610372@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
2e8418736d
commit
ed8777fc13
2 changed files with 10 additions and 5 deletions
|
@ -49,7 +49,7 @@
|
|||
INTEL_ARCH_INV_MASK| \
|
||||
INTEL_ARCH_EDGE_MASK|\
|
||||
INTEL_ARCH_UNIT_MASK|\
|
||||
INTEL_ARCH_EVENT_MASK)
|
||||
INTEL_ARCH_EVTSEL_MASK)
|
||||
|
||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
|
||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
|
||||
|
|
|
@ -100,12 +100,17 @@ struct cpu_hw_events {
|
|||
.weight = HWEIGHT64((u64)(n)), \
|
||||
}
|
||||
|
||||
#define INTEL_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
|
||||
#define FIXED_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
|
||||
#define INTEL_EVENT_CONSTRAINT(c, n) \
|
||||
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
|
||||
|
||||
#define EVENT_CONSTRAINT_END EVENT_CONSTRAINT(0, 0, 0)
|
||||
#define FIXED_EVENT_CONSTRAINT(c, n) \
|
||||
EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
|
||||
|
||||
#define for_each_event_constraint(e, c) for ((e) = (c); (e)->cmask; (e)++)
|
||||
#define EVENT_CONSTRAINT_END \
|
||||
EVENT_CONSTRAINT(0, 0, 0)
|
||||
|
||||
#define for_each_event_constraint(e, c) \
|
||||
for ((e) = (c); (e)->cmask; (e)++)
|
||||
|
||||
/*
|
||||
* struct x86_pmu - generic x86 pmu
|
||||
|
|
Loading…
Reference in a new issue