hrtimer: reduce calls to hrtimer_get_softirq_time()
It seems that hrtimer_run_queues() is calling hrtimer_get_softirq_time() more often than it needs to. This can cause frequent contention on systems with large numbers of processors/cores. With this patch, hrtimer_run_queues only calls hrtimer_get_softirq_time() if there is a pending timer in one of the hrtimer bases, and only once. This also combines hrtimer_run_queues() and the inline run_hrtimer_queue() into one function. [ tglx@linutronix.de: coding style ] Signed-off-by: Dimitri Sivanich <sivanich@sgi.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
833df317f9
commit
833883d9ac
1 changed files with 37 additions and 37 deletions
|
@ -1238,51 +1238,51 @@ void hrtimer_run_pending(void)
|
||||||
/*
|
/*
|
||||||
* Called from hardirq context every jiffy
|
* Called from hardirq context every jiffy
|
||||||
*/
|
*/
|
||||||
static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
|
|
||||||
int index)
|
|
||||||
{
|
|
||||||
struct rb_node *node;
|
|
||||||
struct hrtimer_clock_base *base = &cpu_base->clock_base[index];
|
|
||||||
|
|
||||||
if (!base->first)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (base->get_softirq_time)
|
|
||||||
base->softirq_time = base->get_softirq_time();
|
|
||||||
|
|
||||||
spin_lock(&cpu_base->lock);
|
|
||||||
|
|
||||||
while ((node = base->first)) {
|
|
||||||
struct hrtimer *timer;
|
|
||||||
|
|
||||||
timer = rb_entry(node, struct hrtimer, node);
|
|
||||||
if (base->softirq_time.tv64 <= timer->expires.tv64)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
|
|
||||||
__remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0);
|
|
||||||
list_add_tail(&timer->cb_entry,
|
|
||||||
&base->cpu_base->cb_pending);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
__run_hrtimer(timer);
|
|
||||||
}
|
|
||||||
spin_unlock(&cpu_base->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
void hrtimer_run_queues(void)
|
void hrtimer_run_queues(void)
|
||||||
{
|
{
|
||||||
|
struct rb_node *node;
|
||||||
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
|
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
|
||||||
int i;
|
struct hrtimer_clock_base *base;
|
||||||
|
int index, gettime = 1;
|
||||||
|
|
||||||
if (hrtimer_hres_active())
|
if (hrtimer_hres_active())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
hrtimer_get_softirq_time(cpu_base);
|
for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
|
||||||
|
base = &cpu_base->clock_base[index];
|
||||||
|
|
||||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
|
if (!base->first)
|
||||||
run_hrtimer_queue(cpu_base, i);
|
continue;
|
||||||
|
|
||||||
|
if (gettime) {
|
||||||
|
hrtimer_get_softirq_time(cpu_base);
|
||||||
|
gettime = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (base->get_softirq_time)
|
||||||
|
base->softirq_time = base->get_softirq_time();
|
||||||
|
|
||||||
|
spin_lock(&cpu_base->lock);
|
||||||
|
|
||||||
|
while ((node = base->first)) {
|
||||||
|
struct hrtimer *timer;
|
||||||
|
|
||||||
|
timer = rb_entry(node, struct hrtimer, node);
|
||||||
|
if (base->softirq_time.tv64 <= timer->expires.tv64)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
|
||||||
|
__remove_hrtimer(timer, base,
|
||||||
|
HRTIMER_STATE_PENDING, 0);
|
||||||
|
list_add_tail(&timer->cb_entry,
|
||||||
|
&base->cpu_base->cb_pending);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
__run_hrtimer(timer);
|
||||||
|
}
|
||||||
|
spin_unlock(&cpu_base->lock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in a new issue