x86/speculation/l1tf: Add sysfs reporting for l1tf
L1TF core kernel workarounds are cheap and normally always enabled, However they still should be reported in sysfs if the system is vulnerable or mitigated. Add the necessary CPU feature/bug bits. - Extend the existing checks for Meltdowns to determine if the system is vulnerable. All CPUs which are not vulnerable to Meltdown are also not vulnerable to L1TF - Check for 32bit non PAE and emit a warning as there is no practical way for mitigation due to the limited physical address bits - If the system has more than MAX_PA/2 physical memory the invert page workarounds don't protect the system against the L1TF attack anymore, because an inverted physical address will also point to valid memory. Print a warning in this case and report that the system is vulnerable. Add a function which returns the PFN limit for the L1TF mitigation, which will be used in follow up patches for sanity and range checks. [ tglx: Renamed the CPU feature bit to L1TF_PTEINV ] Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com> Acked-by: Dave Hansen <dave.hansen@intel.com>
This commit is contained in:
parent
10a70416e1
commit
17dbca1193
6 changed files with 77 additions and 0 deletions
|
@ -219,6 +219,7 @@
|
|||
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
|
||||
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
|
||||
|
||||
/* Virtualization flags: Linux defined, word 8 */
|
||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
||||
|
@ -373,5 +374,6 @@
|
|||
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
|
||||
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
|
||||
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
|
||||
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
|
|
@ -181,6 +181,11 @@ extern const struct seq_operations cpuinfo_op;
|
|||
|
||||
extern void cpu_detect(struct cpuinfo_x86 *c);
|
||||
|
||||
static inline unsigned long l1tf_pfn_limit(void)
|
||||
{
|
||||
return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
|
||||
}
|
||||
|
||||
extern void early_cpu_init(void);
|
||||
extern void identify_boot_cpu(void);
|
||||
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
|
||||
|
|
|
@ -27,9 +27,11 @@
|
|||
#include <asm/pgtable.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/e820/api.h>
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void);
|
||||
static void __init ssb_select_mitigation(void);
|
||||
static void __init l1tf_select_mitigation(void);
|
||||
|
||||
/*
|
||||
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
|
||||
|
@ -81,6 +83,8 @@ void __init check_bugs(void)
|
|||
*/
|
||||
ssb_select_mitigation();
|
||||
|
||||
l1tf_select_mitigation();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Check whether we are able to run this kernel safely on SMP.
|
||||
|
@ -205,6 +209,32 @@ static void x86_amd_ssb_disable(void)
|
|||
wrmsrl(MSR_AMD64_LS_CFG, msrval);
|
||||
}
|
||||
|
||||
static void __init l1tf_select_mitigation(void)
|
||||
{
|
||||
u64 half_pa;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
||||
return;
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 2
|
||||
pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
|
||||
return;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is extremely unlikely to happen because almost all
|
||||
* systems have far more MAX_PA/2 than RAM can be fit into
|
||||
* DIMM slots.
|
||||
*/
|
||||
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
|
||||
if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
|
||||
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
|
||||
}
|
||||
|
||||
#ifdef RETPOLINE
|
||||
static bool spectre_v2_bad_module;
|
||||
|
||||
|
@ -678,6 +708,11 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
|||
case X86_BUG_SPEC_STORE_BYPASS:
|
||||
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
|
||||
|
||||
case X86_BUG_L1TF:
|
||||
if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
|
||||
return sprintf(buf, "Mitigation: Page Table Inversion\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -704,4 +739,9 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
|
|||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -984,6 +984,21 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
|
||||
/* in addition to cpu_no_speculation */
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
|
||||
{}
|
||||
};
|
||||
|
||||
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 ia32_cap = 0;
|
||||
|
@ -1010,6 +1025,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|||
return;
|
||||
|
||||
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
||||
|
||||
if (x86_match_cpu(cpu_no_l1tf))
|
||||
return;
|
||||
|
||||
setup_force_cpu_bug(X86_BUG_L1TF);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -540,16 +540,24 @@ ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
|
|||
return sprintf(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
ssize_t __weak cpu_show_l1tf(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
||||
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
||||
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
|
||||
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
|
||||
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
|
||||
|
||||
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_meltdown.attr,
|
||||
&dev_attr_spectre_v1.attr,
|
||||
&dev_attr_spectre_v2.attr,
|
||||
&dev_attr_spec_store_bypass.attr,
|
||||
&dev_attr_l1tf.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
|
|
@ -55,6 +55,8 @@ extern ssize_t cpu_show_spectre_v2(struct device *dev,
|
|||
struct device_attribute *attr, char *buf);
|
||||
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
extern ssize_t cpu_show_l1tf(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
|
||||
extern __printf(4, 5)
|
||||
struct device *cpu_device_create(struct device *parent, void *drvdata,
|
||||
|
|
Loading…
Reference in a new issue