kvm: Prepare to add generic guest entry/exit callbacks
Do some ground preparatory work before adding guest_enter() and guest_exit() context tracking callbacks. Those will be later used to read the guest cputime safely when we run in full dynticks mode. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Gleb Natapov <gleb@redhat.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Namhyung Kim <namhyung.kim@lge.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
6fac4829ce
commit
c11f11fcbd
5 changed files with 32 additions and 21 deletions
|
@ -136,6 +136,7 @@ void vtime_account_system(struct task_struct *tsk)
|
|||
|
||||
account_system_time(tsk, 0, delta, delta);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vtime_account_system);
|
||||
|
||||
void vtime_account_idle(struct task_struct *tsk)
|
||||
{
|
||||
|
|
|
@ -347,6 +347,7 @@ void vtime_account_system(struct task_struct *tsk)
|
|||
if (stolen)
|
||||
account_steal_time(stolen);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vtime_account_system);
|
||||
|
||||
void vtime_account_idle(struct task_struct *tsk)
|
||||
{
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/rcupdate.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <asm/signal.h>
|
||||
|
||||
#include <linux/kvm.h>
|
||||
|
@ -740,15 +741,36 @@ static inline int kvm_deassign_device(struct kvm *kvm,
|
|||
}
|
||||
#endif /* CONFIG_IOMMU_API */
|
||||
|
||||
static inline void kvm_guest_enter(void)
|
||||
static inline void guest_enter(void)
|
||||
{
|
||||
BUG_ON(preemptible());
|
||||
/*
|
||||
* This is running in ioctl context so we can avoid
|
||||
* the call to vtime_account() with its unnecessary idle check.
|
||||
*/
|
||||
vtime_account_system_irqsafe(current);
|
||||
vtime_account_system(current);
|
||||
current->flags |= PF_VCPU;
|
||||
}
|
||||
|
||||
static inline void guest_exit(void)
|
||||
{
|
||||
/*
|
||||
* This is running in ioctl context so we can avoid
|
||||
* the call to vtime_account() with its unnecessary idle check.
|
||||
*/
|
||||
vtime_account_system(current);
|
||||
current->flags &= ~PF_VCPU;
|
||||
}
|
||||
|
||||
static inline void kvm_guest_enter(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(preemptible());
|
||||
|
||||
local_irq_save(flags);
|
||||
guest_enter();
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* KVM does not hold any references to rcu protected data when it
|
||||
* switches CPU into a guest mode. In fact switching to a guest mode
|
||||
* is very similar to exiting to userspase from rcu point of view. In
|
||||
|
@ -761,12 +783,11 @@ static inline void kvm_guest_enter(void)
|
|||
|
||||
static inline void kvm_guest_exit(void)
|
||||
{
|
||||
/*
|
||||
* This is running in ioctl context so we can avoid
|
||||
* the call to vtime_account() with its unnecessary idle check.
|
||||
*/
|
||||
vtime_account_system_irqsafe(current);
|
||||
current->flags &= ~PF_VCPU;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
guest_exit();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -6,7 +6,6 @@ struct task_struct;
|
|||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
extern void vtime_task_switch(struct task_struct *prev);
|
||||
extern void vtime_account_system(struct task_struct *tsk);
|
||||
extern void vtime_account_system_irqsafe(struct task_struct *tsk);
|
||||
extern void vtime_account_idle(struct task_struct *tsk);
|
||||
extern void vtime_account_user(struct task_struct *tsk);
|
||||
extern void vtime_account(struct task_struct *tsk);
|
||||
|
@ -20,7 +19,6 @@ static inline bool vtime_accounting_enabled(void) { return true; }
|
|||
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
static inline void vtime_task_switch(struct task_struct *prev) { }
|
||||
static inline void vtime_account_system(struct task_struct *tsk) { }
|
||||
static inline void vtime_account_system_irqsafe(struct task_struct *tsk) { }
|
||||
static inline void vtime_account_user(struct task_struct *tsk) { }
|
||||
static inline void vtime_account(struct task_struct *tsk) { }
|
||||
static inline bool vtime_accounting_enabled(void) { return false; }
|
||||
|
|
|
@ -465,16 +465,6 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
|
|||
*st = cputime.stime;
|
||||
}
|
||||
|
||||
void vtime_account_system_irqsafe(struct task_struct *tsk)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
vtime_account_system(tsk);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vtime_account_system_irqsafe);
|
||||
|
||||
#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
|
||||
void vtime_task_switch(struct task_struct *prev)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue