KVM: X86: Let kvm-clock report the right tsc frequency

This patch changes the kvm_guest_time_update function to use
TSC frequency the guest actually has for updating its clock.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Joerg Roedel 2011-03-25 09:44:47 +01:00 committed by Avi Kivity
parent fbc0db76b7
commit 1e993611d0
2 changed files with 18 additions and 13 deletions

View file

@ -396,7 +396,10 @@ struct kvm_vcpu_arch {
u64 last_kernel_ns; u64 last_kernel_ns;
u64 last_tsc_nsec; u64 last_tsc_nsec;
u64 last_tsc_write; u64 last_tsc_write;
u32 virtual_tsc_khz;
bool tsc_catchup; bool tsc_catchup;
u32 tsc_catchup_mult;
s8 tsc_catchup_shift;
bool nmi_pending; bool nmi_pending;
bool nmi_injected; bool nmi_injected;
@ -466,9 +469,6 @@ struct kvm_arch {
u64 last_tsc_nsec; u64 last_tsc_nsec;
u64 last_tsc_offset; u64 last_tsc_offset;
u64 last_tsc_write; u64 last_tsc_write;
u32 virtual_tsc_khz;
u32 virtual_tsc_mult;
s8 virtual_tsc_shift;
struct kvm_xen_hvm_config xen_hvm_config; struct kvm_xen_hvm_config xen_hvm_config;

View file

@ -969,6 +969,14 @@ static inline int kvm_tsc_changes_freq(void)
return ret; return ret;
} }
static u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.virtual_tsc_khz)
return vcpu->arch.virtual_tsc_khz;
else
return __this_cpu_read(cpu_tsc_khz);
}
static inline u64 nsec_to_cycles(u64 nsec) static inline u64 nsec_to_cycles(u64 nsec)
{ {
u64 ret; u64 ret;
@ -982,20 +990,19 @@ static inline u64 nsec_to_cycles(u64 nsec)
return ret; return ret;
} }
static void kvm_arch_set_tsc_khz(struct kvm *kvm, u32 this_tsc_khz) static void kvm_init_tsc_catchup(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
{ {
/* Compute a scale to convert nanoseconds in TSC cycles */ /* Compute a scale to convert nanoseconds in TSC cycles */
kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000, kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
&kvm->arch.virtual_tsc_shift, &vcpu->arch.tsc_catchup_shift,
&kvm->arch.virtual_tsc_mult); &vcpu->arch.tsc_catchup_mult);
kvm->arch.virtual_tsc_khz = this_tsc_khz;
} }
static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
{ {
u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec, u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec,
vcpu->kvm->arch.virtual_tsc_mult, vcpu->arch.tsc_catchup_mult,
vcpu->kvm->arch.virtual_tsc_shift); vcpu->arch.tsc_catchup_shift);
tsc += vcpu->arch.last_tsc_write; tsc += vcpu->arch.last_tsc_write;
return tsc; return tsc;
} }
@ -1062,8 +1069,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_save(flags); local_irq_save(flags);
kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp); kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
kernel_ns = get_kernel_ns(); kernel_ns = get_kernel_ns();
this_tsc_khz = __this_cpu_read(cpu_tsc_khz); this_tsc_khz = vcpu_tsc_khz(v);
if (unlikely(this_tsc_khz == 0)) { if (unlikely(this_tsc_khz == 0)) {
local_irq_restore(flags); local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
@ -6060,8 +6066,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
} }
vcpu->arch.pio_data = page_address(page); vcpu->arch.pio_data = page_address(page);
if (!kvm->arch.virtual_tsc_khz) kvm_init_tsc_catchup(vcpu, max_tsc_khz);
kvm_arch_set_tsc_khz(kvm, max_tsc_khz);
r = kvm_mmu_create(vcpu); r = kvm_mmu_create(vcpu);
if (r < 0) if (r < 0)