KVM: MMU: clenaup locking in mmu_free_roots()
Do locking around each case separately instead of having one lock and two unlocks. Move root_hpa assignment out of the lock. Signed-off-by: Gleb Natapov <gleb@redhat.com>
This commit is contained in:
parent
0061d53daf
commit
35af577aac
1 changed files with 5 additions and 2 deletions
|
@ -2869,22 +2869,25 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
|
|||
|
||||
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
|
||||
return;
|
||||
spin_lock(&vcpu->kvm->mmu_lock);
|
||||
|
||||
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
|
||||
(vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
|
||||
vcpu->arch.mmu.direct_map)) {
|
||||
hpa_t root = vcpu->arch.mmu.root_hpa;
|
||||
|
||||
spin_lock(&vcpu->kvm->mmu_lock);
|
||||
sp = page_header(root);
|
||||
--sp->root_count;
|
||||
if (!sp->root_count && sp->role.invalid) {
|
||||
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
|
||||
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
|
||||
}
|
||||
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
|
||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&vcpu->kvm->mmu_lock);
|
||||
for (i = 0; i < 4; ++i) {
|
||||
hpa_t root = vcpu->arch.mmu.pae_root[i];
|
||||
|
||||
|
|
Loading…
Reference in a new issue