perf_counter: Rename perf_counter_limit sysctl
Rename perf_counter_limit to perf_counter_max_sample_rate and prohibit creation of counters with a known higher sample frequency. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
0764771dab
commit
df58ab24bf
3 changed files with 23 additions and 12 deletions
|
@ -650,7 +650,7 @@ extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
|
|||
|
||||
extern int sysctl_perf_counter_paranoid;
|
||||
extern int sysctl_perf_counter_mlock;
|
||||
extern int sysctl_perf_counter_limit;
|
||||
extern int sysctl_perf_counter_sample_rate;
|
||||
|
||||
extern void perf_counter_init(void);
|
||||
|
||||
|
|
|
@ -44,11 +44,12 @@ static atomic_t nr_mmap_counters __read_mostly;
|
|||
static atomic_t nr_comm_counters __read_mostly;
|
||||
|
||||
/*
|
||||
* 0 - not paranoid
|
||||
* 1 - disallow cpu counters to unpriv
|
||||
* 2 - disallow kernel profiling to unpriv
|
||||
* perf counter paranoia level:
|
||||
* 0 - not paranoid
|
||||
* 1 - disallow cpu counters to unpriv
|
||||
* 2 - disallow kernel profiling to unpriv
|
||||
*/
|
||||
int sysctl_perf_counter_paranoid __read_mostly; /* do we need to be privileged */
|
||||
int sysctl_perf_counter_paranoid __read_mostly;
|
||||
|
||||
static inline bool perf_paranoid_cpu(void)
|
||||
{
|
||||
|
@ -61,7 +62,11 @@ static inline bool perf_paranoid_kernel(void)
|
|||
}
|
||||
|
||||
int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
|
||||
int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
|
||||
|
||||
/*
|
||||
* max perf counter sample rate
|
||||
*/
|
||||
int sysctl_perf_counter_sample_rate __read_mostly = 100000;
|
||||
|
||||
static atomic64_t perf_counter_id;
|
||||
|
||||
|
@ -1244,7 +1249,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
|
|||
if (interrupts == MAX_INTERRUPTS) {
|
||||
perf_log_throttle(counter, 1);
|
||||
counter->pmu->unthrottle(counter);
|
||||
interrupts = 2*sysctl_perf_counter_limit/HZ;
|
||||
interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
|
||||
}
|
||||
|
||||
if (!counter->attr.freq || !counter->attr.sample_freq)
|
||||
|
@ -1682,7 +1687,7 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
|
|||
|
||||
spin_lock_irq(&ctx->lock);
|
||||
if (counter->attr.freq) {
|
||||
if (value > sysctl_perf_counter_limit) {
|
||||
if (value > sysctl_perf_counter_sample_rate) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
@ -2979,7 +2984,8 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
|
|||
} else {
|
||||
if (hwc->interrupts != MAX_INTERRUPTS) {
|
||||
hwc->interrupts++;
|
||||
if (HZ * hwc->interrupts > (u64)sysctl_perf_counter_limit) {
|
||||
if (HZ * hwc->interrupts >
|
||||
(u64)sysctl_perf_counter_sample_rate) {
|
||||
hwc->interrupts = MAX_INTERRUPTS;
|
||||
perf_log_throttle(counter, 0);
|
||||
ret = 1;
|
||||
|
@ -3639,6 +3645,11 @@ SYSCALL_DEFINE5(perf_counter_open,
|
|||
return -EACCES;
|
||||
}
|
||||
|
||||
if (attr.freq) {
|
||||
if (attr.sample_freq > sysctl_perf_counter_sample_rate)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the target context (task or percpu):
|
||||
*/
|
||||
|
|
|
@ -932,9 +932,9 @@ static struct ctl_table kern_table[] = {
|
|||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "perf_counter_int_limit",
|
||||
.data = &sysctl_perf_counter_limit,
|
||||
.maxlen = sizeof(sysctl_perf_counter_limit),
|
||||
.procname = "perf_counter_max_sample_rate",
|
||||
.data = &sysctl_perf_counter_sample_rate,
|
||||
.maxlen = sizeof(sysctl_perf_counter_sample_rate),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
|
|
Loading…
Reference in a new issue