kvm: change memslot sorting rule from size to GFN

it will allow to use binary search for GFN -> memslot
lookups, reducing lookup cost with large slots amount.

Signed-off-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Igor Mammedov 2014-12-01 17:29:26 +00:00 committed by Paolo Bonzini
parent d4ae84a02b
commit 0e60b0799f

View file

@ -666,10 +666,10 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
} }
/* /*
* Insert memslot and re-sort memslots based on their size, * Insert memslot and re-sort memslots based on their GFN,
* so the larger slots will get better fit. Sorting algorithm * so binary search could be used to lookup GFN.
* takes advantage of having initially sorted array and * Sorting algorithm takes advantage of having initially
* known changed memslot position. * sorted array and known changed memslot position.
*/ */
static void update_memslots(struct kvm_memslots *slots, static void update_memslots(struct kvm_memslots *slots,
struct kvm_memory_slot *new) struct kvm_memory_slot *new)
@ -679,14 +679,19 @@ static void update_memslots(struct kvm_memslots *slots,
struct kvm_memory_slot *mslots = slots->memslots; struct kvm_memory_slot *mslots = slots->memslots;
WARN_ON(mslots[i].id != id); WARN_ON(mslots[i].id != id);
if (!new->npages)
new->base_gfn = 0;
while (i < KVM_MEM_SLOTS_NUM - 1 && while (i < KVM_MEM_SLOTS_NUM - 1 &&
new->npages < mslots[i + 1].npages) { new->base_gfn <= mslots[i + 1].base_gfn) {
if (!mslots[i + 1].npages)
break;
mslots[i] = mslots[i + 1]; mslots[i] = mslots[i + 1];
slots->id_to_index[mslots[i].id] = i; slots->id_to_index[mslots[i].id] = i;
i++; i++;
} }
while (i > 0 && while (i > 0 &&
new->npages > mslots[i - 1].npages) { new->base_gfn > mslots[i - 1].base_gfn) {
mslots[i] = mslots[i - 1]; mslots[i] = mslots[i - 1];
slots->id_to_index[mslots[i].id] = i; slots->id_to_index[mslots[i].id] = i;
i--; i--;