KVM: Allow browsing memslots with mmu_lock
This allows reading memslots with only the mmu_lock hold for mmu notifiers that runs in atomic context and with mmu_lock held. Signed-off-by: Andrea Arcangeli <andrea@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
parent
a1708ce8a3
commit
604b38ac03
2 changed files with 30 additions and 11 deletions
|
@ -3974,16 +3974,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
|||
*/
|
||||
if (!user_alloc) {
|
||||
if (npages && !old.rmap) {
|
||||
unsigned long userspace_addr;
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
memslot->userspace_addr = do_mmap(NULL, 0,
|
||||
npages * PAGE_SIZE,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_ANONYMOUS,
|
||||
0);
|
||||
userspace_addr = do_mmap(NULL, 0,
|
||||
npages * PAGE_SIZE,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_ANONYMOUS,
|
||||
0);
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
|
||||
if (IS_ERR((void *)memslot->userspace_addr))
|
||||
return PTR_ERR((void *)memslot->userspace_addr);
|
||||
if (IS_ERR((void *)userspace_addr))
|
||||
return PTR_ERR((void *)userspace_addr);
|
||||
|
||||
/* set userspace_addr atomically for kvm_hva_to_rmapp */
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
memslot->userspace_addr = userspace_addr;
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
} else {
|
||||
if (!old.user_alloc && old.rmap) {
|
||||
int ret;
|
||||
|
|
|
@ -375,7 +375,15 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||
memset(new.rmap, 0, npages * sizeof(*new.rmap));
|
||||
|
||||
new.user_alloc = user_alloc;
|
||||
new.userspace_addr = mem->userspace_addr;
|
||||
/*
|
||||
* hva_to_rmmap() serialzies with the mmu_lock and to be
|
||||
* safe it has to ignore memslots with !user_alloc &&
|
||||
* !userspace_addr.
|
||||
*/
|
||||
if (user_alloc)
|
||||
new.userspace_addr = mem->userspace_addr;
|
||||
else
|
||||
new.userspace_addr = 0;
|
||||
}
|
||||
if (npages && !new.lpage_info) {
|
||||
int largepages = npages / KVM_PAGES_PER_HPAGE;
|
||||
|
@ -408,17 +416,21 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||
}
|
||||
#endif /* not defined CONFIG_S390 */
|
||||
|
||||
if (mem->slot >= kvm->nmemslots)
|
||||
kvm->nmemslots = mem->slot + 1;
|
||||
|
||||
if (!npages)
|
||||
kvm_arch_flush_shadow(kvm);
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
if (mem->slot >= kvm->nmemslots)
|
||||
kvm->nmemslots = mem->slot + 1;
|
||||
|
||||
*memslot = new;
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
|
||||
r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
|
||||
if (r) {
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
*memslot = old;
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue