kvm: search_memslots: add simple LRU memslot caching
In typical guest boot workload only 2-3 memslots are used extensively, and at that it's mostly the same memslot lookup operation. Adding LRU cache improves average lookup time from 46 to 28 cycles (~40%) for this workload. Signed-off-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
7f379cff11
commit
d4ae84a02b
1 changed files with 10 additions and 2 deletions
|
@ -353,6 +353,7 @@ struct kvm_memslots {
|
|||
struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
|
||||
/* The mapping table from slot id to the index in memslots[]. */
|
||||
short id_to_index[KVM_MEM_SLOTS_NUM];
|
||||
atomic_t lru_slot;
|
||||
};
|
||||
|
||||
struct kvm {
|
||||
|
@ -790,13 +791,20 @@ static inline void kvm_guest_exit(void)
|
|||
static inline struct kvm_memory_slot *
|
||||
search_memslots(struct kvm_memslots *slots, gfn_t gfn)
|
||||
{
|
||||
struct kvm_memory_slot *memslot;
|
||||
int slot = atomic_read(&slots->lru_slot);
|
||||
struct kvm_memory_slot *memslot = &slots->memslots[slot];
|
||||
|
||||
kvm_for_each_memslot(memslot, slots)
|
||||
if (gfn >= memslot->base_gfn &&
|
||||
gfn < memslot->base_gfn + memslot->npages)
|
||||
return memslot;
|
||||
|
||||
kvm_for_each_memslot(memslot, slots)
|
||||
if (gfn >= memslot->base_gfn &&
|
||||
gfn < memslot->base_gfn + memslot->npages) {
|
||||
atomic_set(&slots->lru_slot, memslot - slots->memslots);
|
||||
return memslot;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue