perf: Disable PERF_SAMPLE_BRANCH_* when not supported
PERF_SAMPLE_BRANCH_* is disabled for: - SW events (sw counters, tracepoints) - HW breakpoints - ALL but Intel x86 architecture - AMD64 processors Signed-off-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1328826068-11713-10-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
3e702ff6d1
commit
2481c5fa6d
9 changed files with 57 additions and 0 deletions
|
@ -685,6 +685,10 @@ static int alpha_pmu_event_init(struct perf_event *event)
|
|||
{
|
||||
int err;
|
||||
|
||||
/* does not support taken branch sampling */
|
||||
if (has_branch_stack(event))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (event->attr.type) {
|
||||
case PERF_TYPE_RAW:
|
||||
case PERF_TYPE_HARDWARE:
|
||||
|
|
|
@ -539,6 +539,10 @@ static int armpmu_event_init(struct perf_event *event)
|
|||
int err = 0;
|
||||
atomic_t *active_events = &armpmu->active_events;
|
||||
|
||||
/* does not support taken branch sampling */
|
||||
if (has_branch_stack(event))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (armpmu->map_event(event) == -ENOENT)
|
||||
return -ENOENT;
|
||||
|
||||
|
|
|
@ -606,6 +606,10 @@ static int mipspmu_event_init(struct perf_event *event)
|
|||
{
|
||||
int err = 0;
|
||||
|
||||
/* does not support taken branch sampling */
|
||||
if (has_branch_stack(event))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (event->attr.type) {
|
||||
case PERF_TYPE_RAW:
|
||||
case PERF_TYPE_HARDWARE:
|
||||
|
|
|
@ -1084,6 +1084,10 @@ static int power_pmu_event_init(struct perf_event *event)
|
|||
if (!ppmu)
|
||||
return -ENOENT;
|
||||
|
||||
/* does not support taken branch sampling */
|
||||
if (has_branch_stack(event))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (event->attr.type) {
|
||||
case PERF_TYPE_HARDWARE:
|
||||
ev = event->attr.config;
|
||||
|
|
|
@ -310,6 +310,10 @@ static int sh_pmu_event_init(struct perf_event *event)
|
|||
{
|
||||
int err;
|
||||
|
||||
/* does not support taken branch sampling */
|
||||
if (has_branch_stack(event))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (event->attr.type) {
|
||||
case PERF_TYPE_RAW:
|
||||
case PERF_TYPE_HW_CACHE:
|
||||
|
|
|
@ -1105,6 +1105,10 @@ static int sparc_pmu_event_init(struct perf_event *event)
|
|||
if (atomic_read(&nmi_active) < 0)
|
||||
return -ENODEV;
|
||||
|
||||
/* does not support taken branch sampling */
|
||||
if (has_branch_stack(event))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (attr->type) {
|
||||
case PERF_TYPE_HARDWARE:
|
||||
if (attr->config >= sparc_pmu->max_events)
|
||||
|
|
|
@ -139,6 +139,9 @@ static int amd_pmu_hw_config(struct perf_event *event)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (has_branch_stack(event))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (event->attr.exclude_host && event->attr.exclude_guest)
|
||||
/*
|
||||
* When HO == GO == 1 the hardware treats that as GO == HO == 0
|
||||
|
|
|
@ -5044,6 +5044,12 @@ static int perf_swevent_init(struct perf_event *event)
|
|||
if (event->attr.type != PERF_TYPE_SOFTWARE)
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* no branch sampling for software events
|
||||
*/
|
||||
if (has_branch_stack(event))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (event_id) {
|
||||
case PERF_COUNT_SW_CPU_CLOCK:
|
||||
case PERF_COUNT_SW_TASK_CLOCK:
|
||||
|
@ -5154,6 +5160,12 @@ static int perf_tp_event_init(struct perf_event *event)
|
|||
if (event->attr.type != PERF_TYPE_TRACEPOINT)
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* no branch sampling for tracepoint events
|
||||
*/
|
||||
if (has_branch_stack(event))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = perf_trace_init(event);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -5379,6 +5391,12 @@ static int cpu_clock_event_init(struct perf_event *event)
|
|||
if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* no branch sampling for software events
|
||||
*/
|
||||
if (has_branch_stack(event))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
perf_swevent_init_hrtimer(event);
|
||||
|
||||
return 0;
|
||||
|
@ -5453,6 +5471,12 @@ static int task_clock_event_init(struct perf_event *event)
|
|||
if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* no branch sampling for software events
|
||||
*/
|
||||
if (has_branch_stack(event))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
perf_swevent_init_hrtimer(event);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -581,6 +581,12 @@ static int hw_breakpoint_event_init(struct perf_event *bp)
|
|||
if (bp->attr.type != PERF_TYPE_BREAKPOINT)
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* no branch sampling for breakpoint events
|
||||
*/
|
||||
if (has_branch_stack(bp))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = register_perf_hw_breakpoint(bp);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
Loading…
Reference in a new issue