Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: x86: simplify "make ARCH=x86" and fix kconfig all.config x86: reboot fixup for wrap2c board x86: check boundary in count setup resource x86: fix reboot with no keyboard attached x86: add hpet sanity checks x86: on x86_64, correct reading of PC RTC when update in progress in time_64.c x86: fix freeze in x86_64 RTC update code in time_64.c ntp: fix typo that makes sync_cmos_clock erratic Remove x86 merge artifact from top Makefile x86: fixup cpu_info array conversion x86: show cpuinfo only for online CPUs x86: fix cpu-hotplug regression x86: ignore the sys_getcpu() tcache parameter x86: voyager use correct header file name x86: fix smp init sections x86: fix voyager_cat_init section x86: fix bogus memcpy in es7000_check_dsdt()
This commit is contained in:
commit
2ffbb8377c
16 changed files with 81 additions and 82 deletions
7
Makefile
7
Makefile
|
@ -1332,12 +1332,7 @@ else
|
|||
ALLINCLUDE_ARCHS := $(ALLSOURCE_ARCHS)
|
||||
endif
|
||||
|
||||
# Take care of arch/x86
|
||||
ifeq ($(ARCH), $(SRCARCH))
|
||||
ALLSOURCE_ARCHS := $(ARCH)
|
||||
else
|
||||
ALLSOURCE_ARCHS := $(ARCH) $(SRCARCH)
|
||||
endif
|
||||
ALLSOURCE_ARCHS := $(SRCARCH)
|
||||
|
||||
define find-sources
|
||||
( for arch in $(ALLSOURCE_ARCHS) ; do \
|
||||
|
|
|
@ -637,6 +637,38 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
|
|||
}
|
||||
|
||||
hpet_address = hpet_tbl->address.address;
|
||||
|
||||
/*
|
||||
* Some broken BIOSes advertise HPET at 0x0. We really do not
|
||||
* want to allocate a resource there.
|
||||
*/
|
||||
if (!hpet_address) {
|
||||
printk(KERN_WARNING PREFIX
|
||||
"HPET id: %#x base: %#lx is invalid\n",
|
||||
hpet_tbl->id, hpet_address);
|
||||
return 0;
|
||||
}
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* Some even more broken BIOSes advertise HPET at
|
||||
* 0xfed0000000000000 instead of 0xfed00000. Fix it up and add
|
||||
* some noise:
|
||||
*/
|
||||
if (hpet_address == 0xfed0000000000000UL) {
|
||||
if (!hpet_force_user) {
|
||||
printk(KERN_WARNING PREFIX "HPET id: %#x "
|
||||
"base: 0xfed0000000000000 is bogus\n "
|
||||
"try hpet=force on the kernel command line to "
|
||||
"fix it up to 0xfed00000.\n", hpet_tbl->id);
|
||||
hpet_address = 0;
|
||||
return 0;
|
||||
}
|
||||
printk(KERN_WARNING PREFIX
|
||||
"HPET id: %#x base: 0xfed0000000000000 fixed up "
|
||||
"to 0xfed00000.\n", hpet_tbl->id);
|
||||
hpet_address >>= 32;
|
||||
}
|
||||
#endif
|
||||
printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
|
||||
hpet_tbl->id, hpet_address);
|
||||
|
||||
|
|
|
@ -810,7 +810,7 @@ static __cpuinit int mce_create_device(unsigned int cpu)
|
|||
int err;
|
||||
int i;
|
||||
|
||||
if (!mce_available(&cpu_data(cpu)))
|
||||
if (!mce_available(&boot_cpu_data))
|
||||
return -EIO;
|
||||
|
||||
memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
|
||||
|
|
|
@ -89,8 +89,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
int fpu_exception;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (!cpu_online(n))
|
||||
return 0;
|
||||
n = c->cpu_index;
|
||||
#endif
|
||||
seq_printf(m, "processor\t: %d\n"
|
||||
|
@ -177,14 +175,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
static void *c_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
if (*pos == 0) /* just in case, cpu 0 is not the first */
|
||||
*pos = first_cpu(cpu_possible_map);
|
||||
if ((*pos) < NR_CPUS && cpu_possible(*pos))
|
||||
*pos = first_cpu(cpu_online_map);
|
||||
if ((*pos) < NR_CPUS && cpu_online(*pos))
|
||||
return &cpu_data(*pos);
|
||||
return NULL;
|
||||
}
|
||||
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
*pos = next_cpu(*pos, cpu_possible_map);
|
||||
*pos = next_cpu(*pos, cpu_online_map);
|
||||
return c_start(m, pos);
|
||||
}
|
||||
static void c_stop(struct seq_file *m, void *v)
|
||||
|
|
|
@ -39,6 +39,7 @@ struct device_fixup {
|
|||
static struct device_fixup fixups_table[] = {
|
||||
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, cs5530a_warm_reset },
|
||||
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, cs5536_warm_reset },
|
||||
{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE, cs5530a_warm_reset },
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -892,7 +892,6 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
|
||||
c->cpu_index = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1078,8 +1077,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (!cpu_online(c->cpu_index))
|
||||
return 0;
|
||||
cpu = c->cpu_index;
|
||||
#endif
|
||||
|
||||
|
@ -1171,15 +1168,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
static void *c_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
if (*pos == 0) /* just in case, cpu 0 is not the first */
|
||||
*pos = first_cpu(cpu_possible_map);
|
||||
if ((*pos) < NR_CPUS && cpu_possible(*pos))
|
||||
*pos = first_cpu(cpu_online_map);
|
||||
if ((*pos) < NR_CPUS && cpu_online(*pos))
|
||||
return &cpu_data(*pos);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
*pos = next_cpu(*pos, cpu_possible_map);
|
||||
*pos = next_cpu(*pos, cpu_online_map);
|
||||
return c_start(m, pos);
|
||||
}
|
||||
|
||||
|
|
|
@ -82,18 +82,15 @@ static int set_rtc_mmss(unsigned long nowtime)
|
|||
int retval = 0;
|
||||
int real_seconds, real_minutes, cmos_minutes;
|
||||
unsigned char control, freq_select;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* IRQs are disabled when we're called from the timer interrupt,
|
||||
* no need for spin_lock_irqsave()
|
||||
* set_rtc_mmss is called when irqs are enabled, so disable irqs here
|
||||
*/
|
||||
|
||||
spin_lock(&rtc_lock);
|
||||
|
||||
spin_lock_irqsave(&rtc_lock, flags);
|
||||
/*
|
||||
* Tell the clock it's being set and stop it.
|
||||
*/
|
||||
|
||||
control = CMOS_READ(RTC_CONTROL);
|
||||
CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
|
||||
|
||||
|
@ -138,7 +135,7 @@ static int set_rtc_mmss(unsigned long nowtime)
|
|||
CMOS_WRITE(control, RTC_CONTROL);
|
||||
CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
|
||||
|
||||
spin_unlock(&rtc_lock);
|
||||
spin_unlock_irqrestore(&rtc_lock, flags);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -164,8 +161,16 @@ unsigned long read_persistent_clock(void)
|
|||
unsigned century = 0;
|
||||
|
||||
spin_lock_irqsave(&rtc_lock, flags);
|
||||
/*
|
||||
* if UIP is clear, then we have >= 244 microseconds before RTC
|
||||
* registers will be updated. Spec sheet says that this is the
|
||||
* reliable way to read RTC - registers invalid (off bus) during update
|
||||
*/
|
||||
while ((CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
|
||||
cpu_relax();
|
||||
|
||||
do {
|
||||
|
||||
/* now read all RTC registers while stable with interrupts disabled */
|
||||
sec = CMOS_READ(RTC_SECONDS);
|
||||
min = CMOS_READ(RTC_MINUTES);
|
||||
hour = CMOS_READ(RTC_HOURS);
|
||||
|
@ -177,8 +182,6 @@ unsigned long read_persistent_clock(void)
|
|||
acpi_gbl_FADT.century)
|
||||
century = CMOS_READ(acpi_gbl_FADT.century);
|
||||
#endif
|
||||
} while (sec != CMOS_READ(RTC_SECONDS));
|
||||
|
||||
spin_unlock_irqrestore(&rtc_lock, flags);
|
||||
|
||||
/*
|
||||
|
|
|
@ -568,7 +568,7 @@ static voyager_module_t *voyager_initial_module;
|
|||
* boot cpu *after* all memory initialisation has been done (so we can
|
||||
* use kmalloc) but before smp initialisation, so we can probe the SMP
|
||||
* configuration and pick up necessary information. */
|
||||
void
|
||||
void __init
|
||||
voyager_cat_init(void)
|
||||
{
|
||||
voyager_module_t **modpp = &voyager_initial_module;
|
||||
|
|
|
@ -1900,7 +1900,7 @@ voyager_smp_prepare_cpus(unsigned int max_cpus)
|
|||
smp_boot_cpus();
|
||||
}
|
||||
|
||||
static void __devinit voyager_smp_prepare_boot_cpu(void)
|
||||
static void __cpuinit voyager_smp_prepare_boot_cpu(void)
|
||||
{
|
||||
init_gdt(smp_processor_id());
|
||||
switch_to_new_gdt();
|
||||
|
@ -1911,7 +1911,7 @@ static void __devinit voyager_smp_prepare_boot_cpu(void)
|
|||
cpu_set(smp_processor_id(), cpu_present_map);
|
||||
}
|
||||
|
||||
static int __devinit
|
||||
static int __cpuinit
|
||||
voyager_cpu_up(unsigned int cpu)
|
||||
{
|
||||
/* This only works at boot for x86. See "rewrite" above. */
|
||||
|
|
|
@ -77,6 +77,9 @@ count_resource(struct acpi_resource *acpi_res, void *data)
|
|||
struct acpi_resource_address64 addr;
|
||||
acpi_status status;
|
||||
|
||||
if (info->res_num >= PCI_BUS_NUM_RESOURCES)
|
||||
return AE_OK;
|
||||
|
||||
status = resource_to_addr(acpi_res, &addr);
|
||||
if (ACPI_SUCCESS(status))
|
||||
info->res_num++;
|
||||
|
@ -93,6 +96,9 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
|
|||
unsigned long flags;
|
||||
struct resource *root;
|
||||
|
||||
if (info->res_num >= PCI_BUS_NUM_RESOURCES)
|
||||
return AE_OK;
|
||||
|
||||
status = resource_to_addr(acpi_res, &addr);
|
||||
if (!ACPI_SUCCESS(status))
|
||||
return AE_OK;
|
||||
|
|
|
@ -13,32 +13,17 @@
|
|||
#include <asm/vgtod.h>
|
||||
#include "vextern.h"
|
||||
|
||||
long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
|
||||
long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
|
||||
{
|
||||
unsigned int dummy, p;
|
||||
unsigned long j = 0;
|
||||
|
||||
/* Fast cache - only recompute value once per jiffies and avoid
|
||||
relatively costly rdtscp/cpuid otherwise.
|
||||
This works because the scheduler usually keeps the process
|
||||
on the same CPU and this syscall doesn't guarantee its
|
||||
results anyways.
|
||||
We do this here because otherwise user space would do it on
|
||||
its own in a likely inferior way (no access to jiffies).
|
||||
If you don't like it pass NULL. */
|
||||
if (tcache && tcache->blob[0] == (j = *vdso_jiffies)) {
|
||||
p = tcache->blob[1];
|
||||
} else if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
|
||||
if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
|
||||
/* Load per CPU data from RDTSCP */
|
||||
rdtscp(dummy, dummy, p);
|
||||
} else {
|
||||
/* Load per CPU data from GDT */
|
||||
asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
|
||||
}
|
||||
if (tcache) {
|
||||
tcache->blob[0] = j;
|
||||
tcache->blob[1] = p;
|
||||
}
|
||||
if (cpu)
|
||||
*cpu = p & 0xfff;
|
||||
if (node)
|
||||
|
|
|
@ -49,7 +49,7 @@ static inline void mach_reboot(void)
|
|||
udelay(50);
|
||||
kb_wait();
|
||||
udelay(50);
|
||||
outb(cmd | 0x04, 0x60); /* set "System flag" */
|
||||
outb(cmd | 0x14, 0x60); /* set "System flag" and "Keyboard Disabled" */
|
||||
udelay(50);
|
||||
kb_wait();
|
||||
udelay(50);
|
||||
|
|
|
@ -29,9 +29,9 @@ extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
|
|||
static inline int es7000_check_dsdt(void)
|
||||
{
|
||||
struct acpi_table_header header;
|
||||
memcpy(&header, 0, sizeof(struct acpi_table_header));
|
||||
acpi_get_table_header(ACPI_SIG_DSDT, 0, &header);
|
||||
if (!strncmp(header.oem_id, "UNISYS", 6))
|
||||
|
||||
if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
|
||||
!strncmp(header.oem_id, "UNISYS", 6))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#include <asm/voyager.h>
|
||||
#include <asm/setup_32.h>
|
||||
#include <asm/setup.h>
|
||||
#define VOYAGER_BIOS_INFO ((struct voyager_bios_info *) \
|
||||
(&boot_params.apm_bios_info))
|
||||
|
||||
|
|
20
kernel/sys.c
20
kernel/sys.c
|
@ -1750,7 +1750,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
|
|||
}
|
||||
|
||||
asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
|
||||
struct getcpu_cache __user *cache)
|
||||
struct getcpu_cache __user *unused)
|
||||
{
|
||||
int err = 0;
|
||||
int cpu = raw_smp_processor_id();
|
||||
|
@ -1758,24 +1758,6 @@ asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
|
|||
err |= put_user(cpu, cpup);
|
||||
if (nodep)
|
||||
err |= put_user(cpu_to_node(cpu), nodep);
|
||||
if (cache) {
|
||||
/*
|
||||
* The cache is not needed for this implementation,
|
||||
* but make sure user programs pass something
|
||||
* valid. vsyscall implementations can instead make
|
||||
* good use of the cache. Only use t0 and t1 because
|
||||
* these are available in both 32bit and 64bit ABI (no
|
||||
* need for a compat_getcpu). 32bit has enough
|
||||
* padding
|
||||
*/
|
||||
unsigned long t0, t1;
|
||||
get_user(t0, &cache->blob[0]);
|
||||
get_user(t1, &cache->blob[1]);
|
||||
t0++;
|
||||
t1++;
|
||||
put_user(t0, &cache->blob[0]);
|
||||
put_user(t1, &cache->blob[1]);
|
||||
}
|
||||
return err ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -205,7 +205,7 @@ static void sync_cmos_clock(unsigned long dummy)
|
|||
return;
|
||||
|
||||
getnstimeofday(&now);
|
||||
if (abs(xtime.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
|
||||
if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
|
||||
fail = update_persistent_clock(now);
|
||||
|
||||
next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec;
|
||||
|
|
Loading…
Reference in a new issue