kvm: x86: fix stale mmio cache bug
The following events can lead to an incorrect KVM_EXIT_MMIO bubbling up to userspace: (1) Guest accesses gpa X without a memory slot. The gfn is cached in struct kvm_vcpu_arch (mmio_gfn). On Intel EPT-enabled hosts, KVM sets the SPTE write-execute-noread so that future accesses cause EPT_MISCONFIGs. (2) Host userspace creates a memory slot via KVM_SET_USER_MEMORY_REGION covering the page just accessed. (3) Guest attempts to read or write to gpa X again. On Intel, this generates an EPT_MISCONFIG. The memory slot generation number that was incremented in (2) would normally take care of this but we fast path mmio faults through quickly_check_mmio_pf(), which only checks the per-vcpu mmio cache. Since we hit the cache, KVM passes a KVM_EXIT_MMIO up to userspace. This patch fixes the issue by using the memslot generation number to validate the mmio cache. Cc: stable@vger.kernel.org Signed-off-by: David Matlack <dmatlack@google.com> [xiaoguangrong: adjust the code to make it simpler for stable-tree fix.] Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Reviewed-by: David Matlack <dmatlack@google.com> Reviewed-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Tested-by: David Matlack <dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ee3d1570b5
commit
56f17dd3fb
3 changed files with 17 additions and 6 deletions
|
@ -477,6 +477,7 @@ struct kvm_vcpu_arch {
|
|||
u64 mmio_gva;
|
||||
unsigned access;
|
||||
gfn_t mmio_gfn;
|
||||
u64 mmio_gen;
|
||||
|
||||
struct kvm_pmu pmu;
|
||||
|
||||
|
|
|
@ -3162,7 +3162,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
|
|||
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
|
||||
return;
|
||||
|
||||
vcpu_clear_mmio_info(vcpu, ~0ul);
|
||||
vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
|
||||
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
|
||||
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
|
||||
hpa_t root = vcpu->arch.mmu.root_hpa;
|
||||
|
|
|
@ -88,15 +88,23 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
|
|||
vcpu->arch.mmio_gva = gva & PAGE_MASK;
|
||||
vcpu->arch.access = access;
|
||||
vcpu->arch.mmio_gfn = gfn;
|
||||
vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
|
||||
}
|
||||
|
||||
static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear the mmio cache info for the given gva,
|
||||
* specially, if gva is ~0ul, we clear all mmio cache info.
|
||||
* Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
|
||||
* clear all mmio cache info.
|
||||
*/
|
||||
#define MMIO_GVA_ANY (~(gva_t)0)
|
||||
|
||||
static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
|
||||
{
|
||||
if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
|
||||
if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
|
||||
return;
|
||||
|
||||
vcpu->arch.mmio_gva = 0;
|
||||
|
@ -104,7 +112,8 @@ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
|
|||
|
||||
static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
|
||||
{
|
||||
if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK))
|
||||
if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
|
||||
vcpu->arch.mmio_gva == (gva & PAGE_MASK))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -112,7 +121,8 @@ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
|
|||
|
||||
static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
|
||||
{
|
||||
if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
|
||||
if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
|
||||
vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
|
Loading…
Reference in a new issue