Merge "KVM: arm/arm64: Don't invoke defacto-CnP on first run"
This commit is contained in:
commit
6b72630f13
10 changed files with 153 additions and 11 deletions
|
@ -59,6 +59,7 @@ stable kernels.
|
|||
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
|
||||
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
|
||||
| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
|
||||
| ARM | Cortex-A77 | #1542418 | ARM64_ERRATUM_1542418 |
|
||||
| ARM | MMU-500 | #841119,#826419 | N/A |
|
||||
| | | | |
|
||||
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
|
||||
|
|
|
@ -366,6 +366,11 @@ static inline int hyp_map_aux_data(void)
|
|||
|
||||
#define kvm_phys_to_vttbr(addr) (addr)
|
||||
|
||||
static inline void kvm_workaround_1542418_vmid_rollover(void)
|
||||
{
|
||||
/* not affected */
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __ARM_KVM_MMU_H__ */
|
||||
|
|
|
@ -524,6 +524,22 @@ config ARM64_ERRATUM_1463225
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_1542418
|
||||
bool "Cortex-A77: The core might fetch a stale instuction, violating the ordering of instruction fetches"
|
||||
default y
|
||||
help
|
||||
This option adds a workaround for Arm Cortex-A77 erratum 1542418.
|
||||
|
||||
On the affected Cortex-A77 cores (r0p0 and r1p0), software relying
|
||||
on the prefetch-speculation-protection instead of explicit
|
||||
synchronisation may fetch a stale instruction from a CPU-specific
|
||||
cache. This violates the ordering rules for instruction fetches.
|
||||
|
||||
Work around the erratum by ensuring that 60 ASIDs are selected
|
||||
before any ASID is reused.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_ERRATUM_22375
|
||||
bool "Cavium erratum 22375, 24313"
|
||||
default y
|
||||
|
|
|
@ -54,7 +54,7 @@
|
|||
#define ARM64_WORKAROUND_1463225 33
|
||||
#define ARM64_SSBS 34
|
||||
#define ARM64_WORKAROUND_1188873 35
|
||||
|
||||
#define ARM64_NCAPS 36
|
||||
#define ARM64_WORKAROUND_1542418 36
|
||||
#define ARM64_NCAPS 37
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
|
|
@ -509,6 +509,11 @@ static inline bool system_supports_sve(void)
|
|||
cpus_have_const_cap(ARM64_SVE);
|
||||
}
|
||||
|
||||
static inline bool system_supports_cnp(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#define ARM64_SSBD_UNKNOWN -1
|
||||
#define ARM64_SSBD_FORCE_DISABLE 0
|
||||
#define ARM64_SSBD_KERNEL 1
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include <asm/page.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
/*
|
||||
|
@ -528,5 +529,19 @@ static inline int hyp_map_aux_data(void)
|
|||
|
||||
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
|
||||
|
||||
static inline void kvm_workaround_1542418_vmid_rollover(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1542418) ||
|
||||
!cpus_have_const_cap(ARM64_WORKAROUND_1542418))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
arm64_workaround_1542418_asid_rollover();
|
||||
local_irq_restore(flags);
|
||||
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ARM64_KVM_MMU_H__ */
|
||||
|
|
|
@ -246,6 +246,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
|
||||
void verify_cpu_asid_bits(void);
|
||||
void post_ttbr_update_workaround(void);
|
||||
void arm64_workaround_1542418_asid_rollover(void);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <asm/cpu.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
static bool __maybe_unused
|
||||
|
@ -643,6 +644,18 @@ needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
|
|||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1542418
|
||||
static void run_workaround_1542418_asid_rollover(const struct arm64_cpu_capabilities *c)
|
||||
{
|
||||
/*
|
||||
* If this CPU is affected by the erratum, run the workaround
|
||||
* to protect us in case we are running on a kexec'ed kernel.
|
||||
*/
|
||||
if (c->matches(c, SCOPE_LOCAL_CPU))
|
||||
arm64_workaround_1542418_asid_rollover();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HARDEN_EL2_VECTORS
|
||||
|
||||
static const struct midr_range arm64_harden_el2_vectors[] = {
|
||||
|
@ -875,6 +888,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
|
||||
.matches = needs_tx2_tvm_workaround,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1542418
|
||||
{
|
||||
.desc = "ARM erratum 1542418",
|
||||
.capability = ARM64_WORKAROUND_1542418,
|
||||
ERRATA_MIDR_RANGE(MIDR_CORTEX_A77, 0, 0, 1, 0),
|
||||
.cpu_enable = run_workaround_1542418_asid_rollover,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
|
|
@ -88,6 +88,75 @@ void verify_cpu_asid_bits(void)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* When the CnP is active, the caller must have set the ttbr0 to reserved
|
||||
* before calling this function.
|
||||
* Upon completion, the caller must ensure to:
|
||||
* - restore the ttbr0
|
||||
* - execute isb() to synchronize the change.
|
||||
*/
|
||||
static void __arm64_workaround_1542418_asid_rollover(void)
|
||||
{
|
||||
phys_addr_t ttbr1_baddr;
|
||||
u64 idx, ttbr1; /* ASID is in ttbr1 due to TCR_EL1.A1 */
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1542418) ||
|
||||
!cpus_have_const_cap(ARM64_WORKAROUND_1542418) ||
|
||||
!this_cpu_has_cap(ARM64_WORKAROUND_1542418))
|
||||
return;
|
||||
|
||||
/*
|
||||
* We're about to use an arbitrary set of ASIDs, which may have
|
||||
* live entries in the TLB (and on other CPUs with CnP). Ensure
|
||||
* that we can't allocate conflicting entries using this task's
|
||||
* TTBR0.
|
||||
*/
|
||||
if (!system_supports_cnp())
|
||||
cpu_set_reserved_ttbr0();
|
||||
/* else: the caller must have already set this */
|
||||
|
||||
ttbr1 = read_sysreg(ttbr1_el1);
|
||||
ttbr1_baddr = ttbr1 & ~TTBR_ASID_MASK;
|
||||
|
||||
/*
|
||||
* Select 60 asids to invalidate the branch history for this generation.
|
||||
* If kpti is in use we avoid selecting a user asid as
|
||||
* __sdei_asm_entry_trampoline() uses USER_ASID_FLAG to determine if
|
||||
* the NMI interrupted the kpti trampoline. Avoid using the reserved
|
||||
* asid 0.
|
||||
*/
|
||||
for (idx = 1; idx <= 61; idx++) {
|
||||
write_sysreg((idx2asid(idx) << 48) | ttbr1_baddr, ttbr1_el1);
|
||||
isb();
|
||||
}
|
||||
|
||||
/* restore the current ASID */
|
||||
write_sysreg(ttbr1, ttbr1_el1);
|
||||
|
||||
/*
|
||||
* Rely on local_flush_tlb_all()'s isb to complete the ASID restore.
|
||||
* check_and_switch_context() will call cpu_switch_mm() to (re)set ttbr0_el1.
|
||||
*/
|
||||
}
|
||||
|
||||
void arm64_workaround_1542418_asid_rollover(void)
|
||||
{
|
||||
u64 ttbr0 = read_sysreg(ttbr0_el1);
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
/* Mirror check_and_switch_context() */
|
||||
if (system_supports_cnp())
|
||||
cpu_set_reserved_ttbr0();
|
||||
|
||||
__arm64_workaround_1542418_asid_rollover();
|
||||
isb();
|
||||
|
||||
write_sysreg(ttbr0, ttbr0_el1);
|
||||
isb();
|
||||
}
|
||||
|
||||
static void flush_context(unsigned int cpu)
|
||||
{
|
||||
int i;
|
||||
|
@ -227,8 +296,10 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
|
|||
atomic64_set(&mm->context.id, asid);
|
||||
}
|
||||
|
||||
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
|
||||
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
|
||||
__arm64_workaround_1542418_asid_rollover();
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
atomic64_set(&per_cpu(active_asids, cpu), asid);
|
||||
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
|
||||
|
|
|
@ -382,10 +382,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
* We might get preempted before the vCPU actually runs, but
|
||||
* over-invalidation doesn't affect correctness.
|
||||
*/
|
||||
if (*last_ran != vcpu->vcpu_id) {
|
||||
if (*last_ran != -1 && *last_ran != vcpu->vcpu_id) {
|
||||
kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
|
||||
*last_ran = vcpu->vcpu_id;
|
||||
|
||||
/*
|
||||
* 'last_ran' and this vcpu may share an ASID and hit the
|
||||
* conditions for Cortex-A77 erratum 1542418.
|
||||
*/
|
||||
kvm_workaround_1542418_vmid_rollover();
|
||||
}
|
||||
*last_ran = vcpu->vcpu_id;
|
||||
|
||||
vcpu->cpu = cpu;
|
||||
vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state);
|
||||
|
@ -470,15 +476,16 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
|
|||
return vcpu_mode_priv(vcpu);
|
||||
}
|
||||
|
||||
/* Just ensure a guest exit from a particular CPU */
|
||||
static void exit_vm_noop(void *info)
|
||||
static void exit_vmid_rollover(void *info)
|
||||
{
|
||||
kvm_workaround_1542418_vmid_rollover();
|
||||
}
|
||||
|
||||
void force_vm_exit(const cpumask_t *mask)
|
||||
static void force_vmid_rollover_exit(const cpumask_t *mask)
|
||||
{
|
||||
preempt_disable();
|
||||
smp_call_function_many(mask, exit_vm_noop, NULL, true);
|
||||
smp_call_function_many(mask, exit_vmid_rollover, NULL, true);
|
||||
kvm_workaround_1542418_vmid_rollover();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
|
@ -536,10 +543,10 @@ static void update_vttbr(struct kvm *kvm)
|
|||
|
||||
/*
|
||||
* On SMP we know no other CPUs can use this CPU's or each
|
||||
* other's VMID after force_vm_exit returns since the
|
||||
* other's VMID after force_vmid_rollover_exit returns since the
|
||||
* kvm_vmid_lock blocks them from reentry to the guest.
|
||||
*/
|
||||
force_vm_exit(cpu_all_mask);
|
||||
force_vmid_rollover_exit(cpu_all_mask);
|
||||
/*
|
||||
* Now broadcast TLB + ICACHE invalidation over the inner
|
||||
* shareable domain to make sure all data structures are
|
||||
|
|
Loading…
Reference in a new issue