[PATCH] x86-64: Fix vgetcpu when CONFIG_HOTPLUG_CPU is disabled
The vgetcpu per CPU initialization previously relied on CPU hotplug events for all CPUs to initialize the per CPU state. That only worked only on kernels with CONFIG_HOTPLUG_CPU enabled. On the others some CPUs didn't get their state initialized properly and vgetcpu wouldn't work. Change the initialization sequence to instead run in a normal initcall (which runs after the normal CPU bootup) and initialize all running CPUs there. Later hotplug CPUs are still handled with an hotplug notifier. This actually simplifies the code somewhat. Signed-off-by: Andi Kleen <ak@suse.de>
This commit is contained in:
parent
fa18f477d0
commit
8c131af1db
4 changed files with 26 additions and 35 deletions
|
@ -376,9 +376,8 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
|
||||||
/* prevent preemption and reschedule on another processor */
|
/* prevent preemption and reschedule on another processor */
|
||||||
int me = get_cpu();
|
int me = get_cpu();
|
||||||
if (cpu == me) {
|
if (cpu == me) {
|
||||||
WARN_ON(1);
|
|
||||||
put_cpu();
|
put_cpu();
|
||||||
return -EBUSY;
|
return 0;
|
||||||
}
|
}
|
||||||
spin_lock_bh(&call_lock);
|
spin_lock_bh(&call_lock);
|
||||||
__smp_call_function_single(cpu, func, info, nonatomic, wait);
|
__smp_call_function_single(cpu, func, info, nonatomic, wait);
|
||||||
|
|
|
@ -876,15 +876,6 @@ static struct irqaction irq0 = {
|
||||||
timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL
|
timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __cpuinit
|
|
||||||
time_cpu_notifier(struct notifier_block *nb, unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
unsigned cpu = (unsigned long) hcpu;
|
|
||||||
if (action == CPU_ONLINE)
|
|
||||||
vsyscall_set_cpu(cpu);
|
|
||||||
return NOTIFY_DONE;
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init time_init(void)
|
void __init time_init(void)
|
||||||
{
|
{
|
||||||
if (nohpet)
|
if (nohpet)
|
||||||
|
@ -925,8 +916,6 @@ void __init time_init(void)
|
||||||
vxtime.last_tsc = get_cycles_sync();
|
vxtime.last_tsc = get_cycles_sync();
|
||||||
set_cyc2ns_scale(cpu_khz);
|
set_cyc2ns_scale(cpu_khz);
|
||||||
setup_irq(0, &irq0);
|
setup_irq(0, &irq0);
|
||||||
hotcpu_notifier(time_cpu_notifier, 0);
|
|
||||||
time_cpu_notifier(NULL, CPU_ONLINE, (void *)(long)smp_processor_id());
|
|
||||||
|
|
||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
time_init_gtod();
|
time_init_gtod();
|
||||||
|
|
|
@ -27,6 +27,9 @@
|
||||||
#include <linux/jiffies.h>
|
#include <linux/jiffies.h>
|
||||||
#include <linux/sysctl.h>
|
#include <linux/sysctl.h>
|
||||||
#include <linux/getcpu.h>
|
#include <linux/getcpu.h>
|
||||||
|
#include <linux/cpu.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
|
#include <linux/notifier.h>
|
||||||
|
|
||||||
#include <asm/vsyscall.h>
|
#include <asm/vsyscall.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
@ -243,32 +246,17 @@ static ctl_table kernel_root_table2[] = {
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void __cpuinit write_rdtscp_cb(void *info)
|
/* Assume __initcall executes before all user space. Hopefully kmod
|
||||||
{
|
doesn't violate that. We'll find out if it does. */
|
||||||
write_rdtscp_aux((unsigned long)info);
|
static void __cpuinit vsyscall_set_cpu(int cpu)
|
||||||
}
|
|
||||||
|
|
||||||
void __cpuinit vsyscall_set_cpu(int cpu)
|
|
||||||
{
|
{
|
||||||
unsigned long *d;
|
unsigned long *d;
|
||||||
unsigned long node = 0;
|
unsigned long node = 0;
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
node = cpu_to_node[cpu];
|
node = cpu_to_node[cpu];
|
||||||
#endif
|
#endif
|
||||||
if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) {
|
if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
|
||||||
void *info = (void *)((node << 12) | cpu);
|
write_rdtscp_aux((node << 12) | cpu);
|
||||||
/* Can happen on preemptive kernel */
|
|
||||||
if (get_cpu() == cpu)
|
|
||||||
write_rdtscp_cb(info);
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
else {
|
|
||||||
/* the notifier is unfortunately not executed on the
|
|
||||||
target CPU */
|
|
||||||
smp_call_function_single(cpu,write_rdtscp_cb,info,0,1);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
put_cpu();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Store cpu number in limit so that it can be loaded quickly
|
/* Store cpu number in limit so that it can be loaded quickly
|
||||||
in user space in vgetcpu.
|
in user space in vgetcpu.
|
||||||
|
@ -280,6 +268,21 @@ void __cpuinit vsyscall_set_cpu(int cpu)
|
||||||
*d |= (node >> 4) << 48;
|
*d |= (node >> 4) << 48;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __cpuinit cpu_vsyscall_init(void *arg)
|
||||||
|
{
|
||||||
|
/* preemption should be already off */
|
||||||
|
vsyscall_set_cpu(raw_smp_processor_id());
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __cpuinit
|
||||||
|
cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
|
||||||
|
{
|
||||||
|
long cpu = (long)arg;
|
||||||
|
if (action == CPU_ONLINE)
|
||||||
|
smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
}
|
||||||
|
|
||||||
static void __init map_vsyscall(void)
|
static void __init map_vsyscall(void)
|
||||||
{
|
{
|
||||||
extern char __vsyscall_0;
|
extern char __vsyscall_0;
|
||||||
|
@ -299,6 +302,8 @@ static int __init vsyscall_init(void)
|
||||||
#ifdef CONFIG_SYSCTL
|
#ifdef CONFIG_SYSCTL
|
||||||
register_sysctl_table(kernel_root_table2, 0);
|
register_sysctl_table(kernel_root_table2, 0);
|
||||||
#endif
|
#endif
|
||||||
|
on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
|
||||||
|
hotcpu_notifier(cpu_vsyscall_notifier, 0);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -59,8 +59,6 @@ extern seqlock_t xtime_lock;
|
||||||
|
|
||||||
extern int sysctl_vsyscall;
|
extern int sysctl_vsyscall;
|
||||||
|
|
||||||
extern void vsyscall_set_cpu(int cpu);
|
|
||||||
|
|
||||||
#define ARCH_HAVE_XTIME_LOCK 1
|
#define ARCH_HAVE_XTIME_LOCK 1
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
Loading…
Reference in a new issue