ia64: Switch do_timer() to xtime_update()
local_cpu_data->itm_next = new_itm; does not need to be protected by xtime_lock. xtime_update() takes the lock itself. Signed-off-by: Torben Hohn <torbenh@gmx.de> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: johnstul@us.ibm.com Cc: hch@infradead.org Cc: yong.zhang0@gmail.com LKML-Reference: <20110127145956.23248.49107.stgit@localhost> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
daad8b581e
commit
1aabd67d2e
2 changed files with 10 additions and 22 deletions
|
@ -190,19 +190,10 @@ timer_interrupt (int irq, void *dev_id)
|
|||
|
||||
new_itm += local_cpu_data->itm_delta;
|
||||
|
||||
if (smp_processor_id() == time_keeper_id) {
|
||||
/*
|
||||
* Here we are in the timer irq handler. We have irqs locally
|
||||
* disabled, but we don't know if the timer_bh is running on
|
||||
* another CPU. We need to avoid to SMP race by acquiring the
|
||||
* xtime_lock.
|
||||
*/
|
||||
write_seqlock(&xtime_lock);
|
||||
do_timer(1);
|
||||
local_cpu_data->itm_next = new_itm;
|
||||
write_sequnlock(&xtime_lock);
|
||||
} else
|
||||
local_cpu_data->itm_next = new_itm;
|
||||
if (smp_processor_id() == time_keeper_id)
|
||||
xtime_update(1);
|
||||
|
||||
local_cpu_data->itm_next = new_itm;
|
||||
|
||||
if (time_after(new_itm, ia64_get_itc()))
|
||||
break;
|
||||
|
@ -222,7 +213,7 @@ timer_interrupt (int irq, void *dev_id)
|
|||
* comfort, we increase the safety margin by
|
||||
* intentionally dropping the next tick(s). We do NOT
|
||||
* update itm.next because that would force us to call
|
||||
* do_timer() which in turn would let our clock run
|
||||
* xtime_update() which in turn would let our clock run
|
||||
* too fast (with the potentially devastating effect
|
||||
* of losing monotony of time).
|
||||
*/
|
||||
|
|
|
@ -139,14 +139,11 @@ consider_steal_time(unsigned long new_itm)
|
|||
run_posix_cpu_timers(p);
|
||||
delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
|
||||
|
||||
if (cpu == time_keeper_id) {
|
||||
write_seqlock(&xtime_lock);
|
||||
do_timer(stolen + blocked);
|
||||
local_cpu_data->itm_next = delta_itm + new_itm;
|
||||
write_sequnlock(&xtime_lock);
|
||||
} else {
|
||||
local_cpu_data->itm_next = delta_itm + new_itm;
|
||||
}
|
||||
if (cpu == time_keeper_id)
|
||||
xtime_update(stolen + blocked);
|
||||
|
||||
local_cpu_data->itm_next = delta_itm + new_itm;
|
||||
|
||||
per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
|
||||
per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue