tick/xen: Provide and use tick_suspend_local() and tick_resume_local()
Xen calls on every cpu into tick_resume() which is just wrong. tick_resume() is for the syscore global suspend/resume invocation. What XEN really wants is a per cpu local resume function. Provide a tick_resume_local() function and use it in XEN. Also provide a complementary tick_suspend_local() and modify tick_unfreeze() and tick_freeze(), respectively, to use the new local tick resume/suspend functions. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> [ Combined two patches, rebased, modified subject/changelog. ] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: David Vrabel <david.vrabel@citrix.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1698741.eezk9tnXtG@vostro.rjw.lan [ Merged to latest timers/core. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
080873ce2d
commit
f46481d0a7
5 changed files with 67 additions and 32 deletions
|
@ -85,7 +85,7 @@ static void xen_vcpu_notify_restore(void *data)
|
|||
if (smp_processor_id() == 0)
|
||||
return;
|
||||
|
||||
tick_resume();
|
||||
tick_resume_local();
|
||||
}
|
||||
|
||||
void xen_arch_resume(void)
|
||||
|
|
|
@ -29,13 +29,13 @@ extern struct tick_device *tick_get_device(int cpu);
|
|||
extern void __init tick_init(void);
|
||||
extern void tick_freeze(void);
|
||||
extern void tick_unfreeze(void);
|
||||
/* Should be core only, but XEN resume magic abuses this interface */
|
||||
extern void tick_resume(void);
|
||||
/* Should be core only, but XEN resume magic requires this */
|
||||
extern void tick_resume_local(void);
|
||||
#else /* CONFIG_GENERIC_CLOCKEVENTS */
|
||||
static inline void tick_init(void) { }
|
||||
static inline void tick_freeze(void) { }
|
||||
static inline void tick_unfreeze(void) { }
|
||||
static inline void tick_resume(void) { }
|
||||
static inline void tick_resume_local(void) { }
|
||||
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
|
||||
|
||||
#ifdef CONFIG_TICK_ONESHOT
|
||||
|
|
|
@ -455,11 +455,26 @@ void tick_suspend_broadcast(void)
|
|||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
}
|
||||
|
||||
int tick_resume_broadcast(void)
|
||||
/*
|
||||
* This is called from tick_resume_local() on a resuming CPU. That's
|
||||
* called from the core resume function, tick_unfreeze() and the magic XEN
|
||||
* resume hackery.
|
||||
*
|
||||
* In none of these cases the broadcast device mode can change and the
|
||||
* bit of the resuming CPU in the broadcast mask is safe as well.
|
||||
*/
|
||||
bool tick_resume_check_broadcast(void)
|
||||
{
|
||||
if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT)
|
||||
return false;
|
||||
else
|
||||
return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask);
|
||||
}
|
||||
|
||||
void tick_resume_broadcast(void)
|
||||
{
|
||||
struct clock_event_device *bc;
|
||||
unsigned long flags;
|
||||
int broadcast = 0;
|
||||
|
||||
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
|
||||
|
@ -472,8 +487,6 @@ int tick_resume_broadcast(void)
|
|||
case TICKDEV_MODE_PERIODIC:
|
||||
if (!cpumask_empty(tick_broadcast_mask))
|
||||
tick_broadcast_start_periodic(bc);
|
||||
broadcast = cpumask_test_cpu(smp_processor_id(),
|
||||
tick_broadcast_mask);
|
||||
break;
|
||||
case TICKDEV_MODE_ONESHOT:
|
||||
if (!cpumask_empty(tick_broadcast_mask))
|
||||
|
@ -482,11 +495,8 @@ int tick_resume_broadcast(void)
|
|||
}
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
|
||||
return broadcast;
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_TICK_ONESHOT
|
||||
|
||||
static cpumask_var_t tick_broadcast_oneshot_mask;
|
||||
|
|
|
@ -373,6 +373,41 @@ void tick_shutdown(unsigned int *cpup)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* tick_suspend_local - Suspend the local tick device
|
||||
*
|
||||
* Called from the local cpu for freeze with interrupts disabled.
|
||||
*
|
||||
* No locks required. Nothing can change the per cpu device.
|
||||
*/
|
||||
static void tick_suspend_local(void)
|
||||
{
|
||||
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
|
||||
|
||||
clockevents_shutdown(td->evtdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* tick_resume_local - Resume the local tick device
|
||||
*
|
||||
* Called from the local CPU for unfreeze or XEN resume magic.
|
||||
*
|
||||
* No locks required. Nothing can change the per cpu device.
|
||||
*/
|
||||
void tick_resume_local(void)
|
||||
{
|
||||
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
|
||||
bool broadcast = tick_resume_check_broadcast();
|
||||
|
||||
clockevents_tick_resume(td->evtdev);
|
||||
if (!broadcast) {
|
||||
if (td->mode == TICKDEV_MODE_PERIODIC)
|
||||
tick_setup_periodic(td->evtdev, 0);
|
||||
else
|
||||
tick_resume_oneshot();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* tick_suspend - Suspend the tick and the broadcast device
|
||||
*
|
||||
|
@ -384,9 +419,7 @@ void tick_shutdown(unsigned int *cpup)
|
|||
*/
|
||||
void tick_suspend(void)
|
||||
{
|
||||
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
|
||||
|
||||
clockevents_shutdown(td->evtdev);
|
||||
tick_suspend_local();
|
||||
tick_suspend_broadcast();
|
||||
}
|
||||
|
||||
|
@ -394,26 +427,14 @@ void tick_suspend(void)
|
|||
* tick_resume - Resume the tick and the broadcast device
|
||||
*
|
||||
* Called from syscore_resume() via timekeeping_resume with only one
|
||||
* CPU online and interrupts disabled or from tick_unfreeze() under
|
||||
* tick_freeze_lock.
|
||||
* CPU online and interrupts disabled.
|
||||
*
|
||||
* No locks required. Nothing can change the per cpu device.
|
||||
*/
|
||||
void tick_resume(void)
|
||||
{
|
||||
struct tick_device *td;
|
||||
int broadcast;
|
||||
|
||||
broadcast = tick_resume_broadcast();
|
||||
td = this_cpu_ptr(&tick_cpu_device);
|
||||
clockevents_tick_resume(td->evtdev);
|
||||
|
||||
if (!broadcast) {
|
||||
if (td->mode == TICKDEV_MODE_PERIODIC)
|
||||
tick_setup_periodic(td->evtdev, 0);
|
||||
else
|
||||
tick_resume_oneshot();
|
||||
}
|
||||
tick_resume_broadcast();
|
||||
tick_resume_local();
|
||||
}
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
|
||||
|
@ -436,7 +457,7 @@ void tick_freeze(void)
|
|||
if (tick_freeze_depth == num_online_cpus()) {
|
||||
timekeeping_suspend();
|
||||
} else {
|
||||
tick_suspend();
|
||||
tick_suspend_local();
|
||||
}
|
||||
|
||||
raw_spin_unlock(&tick_freeze_lock);
|
||||
|
|
|
@ -23,6 +23,7 @@ extern void tick_check_new_device(struct clock_event_device *dev);
|
|||
extern void tick_handover_do_timer(int *cpup);
|
||||
extern void tick_shutdown(unsigned int *cpup);
|
||||
extern void tick_suspend(void);
|
||||
extern void tick_resume(void);
|
||||
extern bool tick_check_replacement(struct clock_event_device *curdev,
|
||||
struct clock_event_device *newdev);
|
||||
extern void tick_install_replacement(struct clock_event_device *dev);
|
||||
|
@ -43,6 +44,7 @@ extern int __clockevents_update_freq(struct clock_event_device *dev, u32 freq);
|
|||
extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt);
|
||||
#else
|
||||
static inline void tick_suspend(void) { }
|
||||
static inline void tick_resume(void) { }
|
||||
#endif /* GENERIC_CLOCKEVENTS */
|
||||
|
||||
/* Oneshot related functions */
|
||||
|
@ -81,7 +83,8 @@ extern int tick_is_broadcast_device(struct clock_event_device *dev);
|
|||
extern void tick_broadcast_on_off(unsigned long reason, int *oncpu);
|
||||
extern void tick_shutdown_broadcast(unsigned int *cpup);
|
||||
extern void tick_suspend_broadcast(void);
|
||||
extern int tick_resume_broadcast(void);
|
||||
extern void tick_resume_broadcast(void);
|
||||
extern bool tick_resume_check_broadcast(void);
|
||||
extern void tick_broadcast_init(void);
|
||||
extern void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast);
|
||||
extern int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq);
|
||||
|
@ -95,7 +98,8 @@ static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { }
|
|||
static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { }
|
||||
static inline void tick_shutdown_broadcast(unsigned int *cpup) { }
|
||||
static inline void tick_suspend_broadcast(void) { }
|
||||
static inline int tick_resume_broadcast(void) { return 0; }
|
||||
static inline void tick_resume_broadcast(void) { }
|
||||
static inline bool tick_resume_check_broadcast(void) { return false; }
|
||||
static inline void tick_broadcast_init(void) { }
|
||||
static inline int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq) { return -ENODEV; }
|
||||
|
||||
|
|
Loading…
Reference in a new issue