KVM: MMU: handle large host sptes on invlpg/resync
The invlpg and sync walkers lack knowledge of large host sptes, descending to non-existant pagetable level. Stop at directory level in such case. Fixes SMP Windows XP with hugepages. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
3f353858c9
commit
8791723920
2 changed files with 8 additions and 3 deletions
|
@ -1007,7 +1007,7 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
|
|||
for_each_unsync_children(sp->unsync_child_bitmap, i) {
|
||||
u64 ent = sp->spt[i];
|
||||
|
||||
if (is_shadow_present_pte(ent)) {
|
||||
if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
|
||||
struct kvm_mmu_page *child;
|
||||
child = page_header(ent & PT64_BASE_ADDR_MASK);
|
||||
|
||||
|
|
|
@ -472,14 +472,19 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
|
|||
struct shadow_walker *sw =
|
||||
container_of(_sw, struct shadow_walker, walker);
|
||||
|
||||
if (level == PT_PAGE_TABLE_LEVEL) {
|
||||
/* FIXME: properly handle invlpg on large guest pages */
|
||||
if (level == PT_PAGE_TABLE_LEVEL ||
|
||||
((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
|
||||
struct kvm_mmu_page *sp = page_header(__pa(sptep));
|
||||
|
||||
sw->pte_gpa = (sp->gfn << PAGE_SHIFT);
|
||||
sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
|
||||
|
||||
if (is_shadow_present_pte(*sptep))
|
||||
if (is_shadow_present_pte(*sptep)) {
|
||||
rmap_remove(vcpu->kvm, sptep);
|
||||
if (is_large_pte(*sptep))
|
||||
--vcpu->kvm->stat.lpages;
|
||||
}
|
||||
set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
|
||||
return 1;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue