perf/x86: Warn to early_printk() in case irq_work is too slow
On Mon, Feb 10, 2014 at 08:45:16AM -0800, Dave Hansen wrote:
> The reason I coded this up was that NMIs were firing off so fast that
> nothing else was getting a chance to run. With this patch, at least the
> printk() would come out and I'd have some idea what was going on.
It will start spewing to early_printk() (which is a lot nicer to use
from NMI context too) when it fails to queue the IRQ-work because its
already enqueued.
It does have the false-positive for when two CPUs trigger the warn
concurrently, but that should be rare and some extra clutter on the
early printk shouldn't be a problem.
Cc: hpa@zytor.com
Cc: tglx@linutronix.de
Cc: dzickus@redhat.com
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: mingo@kernel.org
Fixes: 6a02ad66b2
("perf/x86: Push the duration-logging printk() to IRQ context")
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20140211150116.GO27965@twins.programming.kicks-ass.net
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
90ed5b0fa5
commit
cd578abb24
3 changed files with 12 additions and 5 deletions
|
@ -32,7 +32,7 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
|
||||||
|
|
||||||
#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
|
#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
|
||||||
|
|
||||||
void irq_work_queue(struct irq_work *work);
|
bool irq_work_queue(struct irq_work *work);
|
||||||
void irq_work_run(void);
|
void irq_work_run(void);
|
||||||
void irq_work_sync(struct irq_work *work);
|
void irq_work_sync(struct irq_work *work);
|
||||||
|
|
||||||
|
|
|
@ -243,7 +243,7 @@ static void perf_duration_warn(struct irq_work *w)
|
||||||
printk_ratelimited(KERN_WARNING
|
printk_ratelimited(KERN_WARNING
|
||||||
"perf interrupt took too long (%lld > %lld), lowering "
|
"perf interrupt took too long (%lld > %lld), lowering "
|
||||||
"kernel.perf_event_max_sample_rate to %d\n",
|
"kernel.perf_event_max_sample_rate to %d\n",
|
||||||
avg_local_sample_len, allowed_ns,
|
avg_local_sample_len, allowed_ns >> 1,
|
||||||
sysctl_perf_event_sample_rate);
|
sysctl_perf_event_sample_rate);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -283,7 +283,12 @@ void perf_sample_event_took(u64 sample_len_ns)
|
||||||
|
|
||||||
update_perf_cpu_limits();
|
update_perf_cpu_limits();
|
||||||
|
|
||||||
irq_work_queue(&perf_duration_work);
|
if (!irq_work_queue(&perf_duration_work)) {
|
||||||
|
early_printk("perf interrupt took too long (%lld > %lld), lowering "
|
||||||
|
"kernel.perf_event_max_sample_rate to %d\n",
|
||||||
|
avg_local_sample_len, allowed_ns >> 1,
|
||||||
|
sysctl_perf_event_sample_rate);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static atomic64_t perf_event_id;
|
static atomic64_t perf_event_id;
|
||||||
|
|
|
@ -61,11 +61,11 @@ void __weak arch_irq_work_raise(void)
|
||||||
*
|
*
|
||||||
* Can be re-enqueued while the callback is still in progress.
|
* Can be re-enqueued while the callback is still in progress.
|
||||||
*/
|
*/
|
||||||
void irq_work_queue(struct irq_work *work)
|
bool irq_work_queue(struct irq_work *work)
|
||||||
{
|
{
|
||||||
/* Only queue if not already pending */
|
/* Only queue if not already pending */
|
||||||
if (!irq_work_claim(work))
|
if (!irq_work_claim(work))
|
||||||
return;
|
return false;
|
||||||
|
|
||||||
/* Queue the entry and raise the IPI if needed. */
|
/* Queue the entry and raise the IPI if needed. */
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
@ -83,6 +83,8 @@ void irq_work_queue(struct irq_work *work)
|
||||||
}
|
}
|
||||||
|
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(irq_work_queue);
|
EXPORT_SYMBOL_GPL(irq_work_queue);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue