arm/bL_switcher: Kill tick suspend hackery
Use the new tick_suspend/resume_local() and get rid of the homebrewn implementation of these in the ARM bL switcher. The check for the cpumask is completely pointless. There is no harm to suspend a per cpu tick device unconditionally. If that's a real issue then we fix it proper at the core level and not with some completely undocumented hacks in some random core code. Move the tick internals to the core code, now that this nuisance is gone. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> [ rjw: Rebase, changelog ] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Cc: Nicolas Pitre <nicolas.pitre@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Link: http://lkml.kernel.org/r/1655112.Ws17YsMfN7@vostro.rjw.lan Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
f46481d0a7
commit
7270d11c56
6 changed files with 22 additions and 36 deletions
|
@ -151,8 +151,6 @@ static int bL_switch_to(unsigned int new_cluster_id)
|
|||
unsigned int mpidr, this_cpu, that_cpu;
|
||||
unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
|
||||
struct completion inbound_alive;
|
||||
struct tick_device *tdev;
|
||||
enum clock_event_state tdev_state;
|
||||
long volatile *handshake_ptr;
|
||||
int ipi_nr, ret;
|
||||
|
||||
|
@ -219,13 +217,7 @@ static int bL_switch_to(unsigned int new_cluster_id)
|
|||
/* redirect GIC's SGIs to our counterpart */
|
||||
gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
|
||||
|
||||
tdev = tick_get_device(this_cpu);
|
||||
if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
|
||||
tdev = NULL;
|
||||
if (tdev) {
|
||||
tdev_state = tdev->evtdev->state;
|
||||
clockevents_set_state(tdev->evtdev, CLOCK_EVT_STATE_SHUTDOWN);
|
||||
}
|
||||
tick_suspend_local();
|
||||
|
||||
ret = cpu_pm_enter();
|
||||
|
||||
|
@ -251,11 +243,7 @@ static int bL_switch_to(unsigned int new_cluster_id)
|
|||
|
||||
ret = cpu_pm_exit();
|
||||
|
||||
if (tdev) {
|
||||
clockevents_set_state(tdev->evtdev, tdev_state);
|
||||
clockevents_program_event(tdev->evtdev,
|
||||
tdev->evtdev->next_event, 1);
|
||||
}
|
||||
tick_resume_local();
|
||||
|
||||
trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr);
|
||||
local_fiq_enable();
|
||||
|
|
|
@ -198,12 +198,6 @@ clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
|
|||
freq, minsec);
|
||||
}
|
||||
|
||||
/* Should be core only, but is abused by arm bl_switcher */
|
||||
extern void clockevents_set_state(struct clock_event_device *dev,
|
||||
enum clock_event_state state);
|
||||
extern int clockevents_program_event(struct clock_event_device *dev,
|
||||
ktime_t expires, bool force);
|
||||
|
||||
extern void clockevents_suspend(void);
|
||||
extern void clockevents_resume(void);
|
||||
|
||||
|
|
|
@ -11,30 +11,19 @@
|
|||
#include <linux/cpumask.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
/* ARM BL switcher abuse support */
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||
enum tick_device_mode {
|
||||
TICKDEV_MODE_PERIODIC,
|
||||
TICKDEV_MODE_ONESHOT,
|
||||
};
|
||||
|
||||
struct tick_device {
|
||||
struct clock_event_device *evtdev;
|
||||
enum tick_device_mode mode;
|
||||
};
|
||||
extern struct tick_device *tick_get_device(int cpu);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||
extern void __init tick_init(void);
|
||||
extern void tick_freeze(void);
|
||||
extern void tick_unfreeze(void);
|
||||
/* Should be core only, but XEN resume magic requires this */
|
||||
/* Should be core only, but ARM BL switcher requires it */
|
||||
extern void tick_suspend_local(void);
|
||||
/* Should be core only, but XEN resume magic and ARM BL switcher require it */
|
||||
extern void tick_resume_local(void);
|
||||
#else /* CONFIG_GENERIC_CLOCKEVENTS */
|
||||
static inline void tick_init(void) { }
|
||||
static inline void tick_freeze(void) { }
|
||||
static inline void tick_unfreeze(void) { }
|
||||
static inline void tick_suspend_local(void) { }
|
||||
static inline void tick_resume_local(void) { }
|
||||
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
|
||||
|
||||
|
|
|
@ -380,7 +380,7 @@ void tick_shutdown(unsigned int *cpup)
|
|||
*
|
||||
* No locks required. Nothing can change the per cpu device.
|
||||
*/
|
||||
static void tick_suspend_local(void)
|
||||
void tick_suspend_local(void)
|
||||
{
|
||||
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ extern bool tick_check_replacement(struct clock_event_device *curdev,
|
|||
struct clock_event_device *newdev);
|
||||
extern void tick_install_replacement(struct clock_event_device *dev);
|
||||
extern int tick_is_oneshot_available(void);
|
||||
extern struct tick_device *tick_get_device(int cpu);
|
||||
|
||||
extern int clockevents_tick_resume(struct clock_event_device *dev);
|
||||
/* Check, if the device is functional or a dummy for broadcast */
|
||||
|
@ -39,6 +40,10 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
|
|||
extern void clockevents_shutdown(struct clock_event_device *dev);
|
||||
extern void clockevents_exchange_device(struct clock_event_device *old,
|
||||
struct clock_event_device *new);
|
||||
extern void clockevents_set_state(struct clock_event_device *dev,
|
||||
enum clock_event_state state);
|
||||
extern int clockevents_program_event(struct clock_event_device *dev,
|
||||
ktime_t expires, bool force);
|
||||
extern void clockevents_handle_noop(struct clock_event_device *dev);
|
||||
extern int __clockevents_update_freq(struct clock_event_device *dev, u32 freq);
|
||||
extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt);
|
||||
|
|
|
@ -3,6 +3,16 @@
|
|||
|
||||
#include <linux/hrtimer.h>
|
||||
|
||||
enum tick_device_mode {
|
||||
TICKDEV_MODE_PERIODIC,
|
||||
TICKDEV_MODE_ONESHOT,
|
||||
};
|
||||
|
||||
struct tick_device {
|
||||
struct clock_event_device *evtdev;
|
||||
enum tick_device_mode mode;
|
||||
};
|
||||
|
||||
enum tick_nohz_mode {
|
||||
NOHZ_MODE_INACTIVE,
|
||||
NOHZ_MODE_LOWRES,
|
||||
|
|
Loading…
Reference in a new issue