clockevents: Simplify locking
Now that the notifier chain is gone there are no other users and it's pointless to nest tick_device_lock inside of clockevents_lock because there is no other use case. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: John Stultz <john.stultz@linaro.org> Cc: Magnus Damm <magnus.damm@gmail.com> Link: http://lkml.kernel.org/r/20130425143436.162888472@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
7172a286ce
commit
7126cac426
1 changed files with 5 additions and 17 deletions
|
@ -33,7 +33,6 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
|
||||||
ktime_t tick_next_period;
|
ktime_t tick_next_period;
|
||||||
ktime_t tick_period;
|
ktime_t tick_period;
|
||||||
int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
|
int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
|
||||||
static DEFINE_RAW_SPINLOCK(tick_device_lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Debugging: see timer_list.c
|
* Debugging: see timer_list.c
|
||||||
|
@ -206,16 +205,14 @@ static void tick_setup_device(struct tick_device *td,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check, if the new registered device should be used.
|
* Check, if the new registered device should be used. Called with
|
||||||
|
* clockevents_lock held and interrupts disabled.
|
||||||
*/
|
*/
|
||||||
void tick_check_new_device(struct clock_event_device *newdev)
|
void tick_check_new_device(struct clock_event_device *newdev)
|
||||||
{
|
{
|
||||||
struct clock_event_device *curdev;
|
struct clock_event_device *curdev;
|
||||||
struct tick_device *td;
|
struct tick_device *td;
|
||||||
int cpu;
|
int cpu;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&tick_device_lock, flags);
|
|
||||||
|
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
if (!cpumask_test_cpu(cpu, newdev->cpumask))
|
if (!cpumask_test_cpu(cpu, newdev->cpumask))
|
||||||
|
@ -273,8 +270,6 @@ void tick_check_new_device(struct clock_event_device *newdev)
|
||||||
tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
|
tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
|
||||||
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
|
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
|
||||||
tick_oneshot_notify();
|
tick_oneshot_notify();
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&tick_device_lock, flags);
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
out_bc:
|
out_bc:
|
||||||
|
@ -282,7 +277,6 @@ void tick_check_new_device(struct clock_event_device *newdev)
|
||||||
* Can the new device be used as a broadcast device ?
|
* Can the new device be used as a broadcast device ?
|
||||||
*/
|
*/
|
||||||
tick_install_broadcast_device(newdev);
|
tick_install_broadcast_device(newdev);
|
||||||
raw_spin_unlock_irqrestore(&tick_device_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -311,9 +305,7 @@ static void tick_shutdown(unsigned int *cpup)
|
||||||
{
|
{
|
||||||
struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
|
struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
|
||||||
struct clock_event_device *dev = td->evtdev;
|
struct clock_event_device *dev = td->evtdev;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&tick_device_lock, flags);
|
|
||||||
td->mode = TICKDEV_MODE_PERIODIC;
|
td->mode = TICKDEV_MODE_PERIODIC;
|
||||||
if (dev) {
|
if (dev) {
|
||||||
/*
|
/*
|
||||||
|
@ -325,26 +317,20 @@ static void tick_shutdown(unsigned int *cpup)
|
||||||
dev->event_handler = clockevents_handle_noop;
|
dev->event_handler = clockevents_handle_noop;
|
||||||
td->evtdev = NULL;
|
td->evtdev = NULL;
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irqrestore(&tick_device_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tick_suspend(void)
|
static void tick_suspend(void)
|
||||||
{
|
{
|
||||||
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
|
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&tick_device_lock, flags);
|
|
||||||
clockevents_shutdown(td->evtdev);
|
clockevents_shutdown(td->evtdev);
|
||||||
raw_spin_unlock_irqrestore(&tick_device_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tick_resume(void)
|
static void tick_resume(void)
|
||||||
{
|
{
|
||||||
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
|
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
|
||||||
unsigned long flags;
|
|
||||||
int broadcast = tick_resume_broadcast();
|
int broadcast = tick_resume_broadcast();
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&tick_device_lock, flags);
|
|
||||||
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
|
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
|
||||||
|
|
||||||
if (!broadcast) {
|
if (!broadcast) {
|
||||||
|
@ -353,9 +339,11 @@ static void tick_resume(void)
|
||||||
else
|
else
|
||||||
tick_resume_oneshot();
|
tick_resume_oneshot();
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irqrestore(&tick_device_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called with clockevents_lock held and interrupts disabled
|
||||||
|
*/
|
||||||
void tick_notify(unsigned long reason, void *dev)
|
void tick_notify(unsigned long reason, void *dev)
|
||||||
{
|
{
|
||||||
switch (reason) {
|
switch (reason) {
|
||||||
|
|
Loading…
Reference in a new issue