2007-07-11 13:18:32 -06:00
|
|
|
/*
|
|
|
|
* Routines to indentify additional cpu features that are scattered in
|
|
|
|
* cpuid space.
|
|
|
|
*/
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
|
2008-05-08 01:18:43 -06:00
|
|
|
#include <asm/pat.h>
|
2007-07-11 13:18:32 -06:00
|
|
|
#include <asm/processor.h>
|
|
|
|
|
2008-08-23 09:47:10 -06:00
|
|
|
#include <mach_apic.h>
|
|
|
|
|
2007-07-11 13:18:32 -06:00
|
|
|
struct cpuid_bit {
|
|
|
|
u16 feature;
|
|
|
|
u8 reg;
|
|
|
|
u8 bit;
|
|
|
|
u32 level;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum cpuid_regs {
|
|
|
|
CR_EAX = 0,
|
|
|
|
CR_ECX,
|
|
|
|
CR_EDX,
|
|
|
|
CR_EBX
|
|
|
|
};
|
|
|
|
|
|
|
|
void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
u32 max_level;
|
|
|
|
u32 regs[4];
|
|
|
|
const struct cpuid_bit *cb;
|
|
|
|
|
|
|
|
static const struct cpuid_bit cpuid_bits[] = {
|
|
|
|
{ X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
|
|
|
|
{ 0, 0, 0, 0 }
|
|
|
|
};
|
|
|
|
|
|
|
|
for (cb = cpuid_bits; cb->feature; cb++) {
|
|
|
|
|
|
|
|
/* Verify that the level is valid */
|
|
|
|
max_level = cpuid_eax(cb->level & 0xffff0000);
|
|
|
|
if (max_level < cb->level ||
|
|
|
|
max_level > (cb->level | 0xffff))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
cpuid(cb->level, ®s[CR_EAX], ®s[CR_EBX],
|
|
|
|
®s[CR_ECX], ®s[CR_EDX]);
|
|
|
|
|
|
|
|
if (regs[cb->reg] & (1 << cb->bit))
|
2008-01-30 05:30:55 -07:00
|
|
|
set_cpu_cap(c, cb->feature);
|
2007-07-11 13:18:32 -06:00
|
|
|
}
|
|
|
|
}
|
2008-05-08 01:18:43 -06:00
|
|
|
|
2008-08-23 09:47:10 -06:00
|
|
|
/* leaf 0xb SMT level */
|
|
|
|
#define SMT_LEVEL 0
|
|
|
|
|
|
|
|
/* leaf 0xb sub-leaf types */
|
|
|
|
#define INVALID_TYPE 0
|
|
|
|
#define SMT_TYPE 1
|
|
|
|
#define CORE_TYPE 2
|
|
|
|
|
|
|
|
#define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
|
|
|
|
#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
|
|
|
|
#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for extended topology enumeration cpuid leaf 0xb and if it
|
|
|
|
* exists, use it for populating initial_apicid and cpu topology
|
|
|
|
* detection.
|
|
|
|
*/
|
|
|
|
void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
|
|
|
|
{
|
2008-10-30 15:05:39 -06:00
|
|
|
#ifdef CONFIG_X86_SMP
|
2008-08-23 09:47:10 -06:00
|
|
|
unsigned int eax, ebx, ecx, edx, sub_index;
|
|
|
|
unsigned int ht_mask_width, core_plus_mask_width;
|
|
|
|
unsigned int core_select_mask, core_level_siblings;
|
|
|
|
|
|
|
|
if (c->cpuid_level < 0xb)
|
|
|
|
return;
|
|
|
|
|
|
|
|
cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* check if the cpuid leaf 0xb is actually implemented.
|
|
|
|
*/
|
|
|
|
if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
|
|
|
|
return;
|
|
|
|
|
|
|
|
set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* initial apic id, which also represents 32-bit extended x2apic id.
|
|
|
|
*/
|
|
|
|
c->initial_apicid = edx;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Populate HT related information from sub-leaf level 0.
|
|
|
|
*/
|
|
|
|
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
|
|
|
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
|
|
|
|
|
|
|
sub_index = 1;
|
|
|
|
do {
|
|
|
|
cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for the Core type in the implemented sub leaves.
|
|
|
|
*/
|
|
|
|
if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
|
|
|
|
core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
|
|
|
core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
sub_index++;
|
|
|
|
} while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
|
|
|
|
|
|
|
|
core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
|
|
|
|
& core_select_mask;
|
|
|
|
c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width);
|
|
|
|
#else
|
|
|
|
c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
|
|
|
|
c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
|
|
|
|
#endif
|
|
|
|
c->x86_max_cores = (core_level_siblings / smp_num_siblings);
|
|
|
|
|
|
|
|
|
|
|
|
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
|
|
|
|
c->phys_proc_id);
|
|
|
|
if (c->x86_max_cores > 1)
|
|
|
|
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
|
|
|
|
c->cpu_core_id);
|
|
|
|
return;
|
2008-08-23 09:47:11 -06:00
|
|
|
#endif
|
2008-08-23 09:47:10 -06:00
|
|
|
}
|
|
|
|
|
2008-05-08 01:18:43 -06:00
|
|
|
#ifdef CONFIG_X86_PAT
|
|
|
|
void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
|
|
|
|
{
|
2008-06-10 08:05:18 -06:00
|
|
|
if (!cpu_has_pat)
|
|
|
|
pat_disable("PAT not supported by CPU.");
|
|
|
|
|
2008-05-08 01:18:43 -06:00
|
|
|
switch (c->x86_vendor) {
|
|
|
|
case X86_VENDOR_INTEL:
|
2008-08-20 17:45:53 -06:00
|
|
|
/*
|
|
|
|
* There is a known erratum on Pentium III and Core Solo
|
|
|
|
* and Core Duo CPUs.
|
|
|
|
* " Page with PAT set to WC while associated MTRR is UC
|
|
|
|
* may consolidate to UC "
|
|
|
|
* Because of this erratum, it is better to stick with
|
|
|
|
* setting WC in MTRR rather than using PAT on these CPUs.
|
|
|
|
*
|
|
|
|
* Enable PAT WC only on P4, Core 2 or later CPUs.
|
|
|
|
*/
|
|
|
|
if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
|
2008-05-08 01:18:43 -06:00
|
|
|
return;
|
2008-08-20 17:45:53 -06:00
|
|
|
|
|
|
|
pat_disable("PAT WC disabled due to known CPU erratum.");
|
|
|
|
return;
|
|
|
|
|
2008-06-10 08:04:30 -06:00
|
|
|
case X86_VENDOR_AMD:
|
2008-05-22 11:02:23 -06:00
|
|
|
case X86_VENDOR_CENTAUR:
|
|
|
|
case X86_VENDOR_TRANSMETA:
|
|
|
|
return;
|
2008-05-08 01:18:43 -06:00
|
|
|
}
|
|
|
|
|
2008-06-10 08:05:18 -06:00
|
|
|
pat_disable("PAT disabled. Not yet verified on this CPU type.");
|
2008-05-08 01:18:43 -06:00
|
|
|
}
|
|
|
|
#endif
|