45eacc6927
While computing the cputime delta of dynticks CPUs, we are mixing up clocks of differents natures: * local_clock() which takes care of unstable clock sources and fix these if needed. * sched_clock() which is the weaker version of local_clock(). It doesn't compute any fixup in case of unstable source. If the clock source is stable, those two clocks are the same and we can safely compute the difference against two random points. Otherwise it results in random deltas as sched_clock() can randomly drift away, back or forward, from local_clock(). As a consequence, some strange behaviour with unstable tsc has been observed such as non progressing constant zero cputime. (The 'top' command showing no load). Fix this by only using local_clock(), or its irq safe/remote equivalent, in vtime code. Reported-by: Mike Galbraith <efault@gmx.de> Suggested-by: Mike Galbraith <efault@gmx.de> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
69 lines
2.4 KiB
C
69 lines
2.4 KiB
C
#ifndef _LINUX_KERNEL_VTIME_H
|
|
#define _LINUX_KERNEL_VTIME_H
|
|
|
|
struct task_struct;
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
extern void vtime_task_switch(struct task_struct *prev);
|
|
extern void vtime_account_system(struct task_struct *tsk);
|
|
extern void vtime_account_idle(struct task_struct *tsk);
|
|
extern void vtime_account_user(struct task_struct *tsk);
|
|
extern void vtime_account_irq_enter(struct task_struct *tsk);
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
|
static inline bool vtime_accounting_enabled(void) { return true; }
|
|
#endif
|
|
|
|
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
|
|
|
static inline void vtime_task_switch(struct task_struct *prev) { }
|
|
static inline void vtime_account_system(struct task_struct *tsk) { }
|
|
static inline void vtime_account_user(struct task_struct *tsk) { }
|
|
static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
|
|
static inline bool vtime_accounting_enabled(void) { return false; }
|
|
#endif
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
|
extern void arch_vtime_task_switch(struct task_struct *tsk);
|
|
extern void vtime_account_irq_exit(struct task_struct *tsk);
|
|
extern bool vtime_accounting_enabled(void);
|
|
extern void vtime_user_enter(struct task_struct *tsk);
|
|
static inline void vtime_user_exit(struct task_struct *tsk)
|
|
{
|
|
vtime_account_user(tsk);
|
|
}
|
|
extern void vtime_guest_enter(struct task_struct *tsk);
|
|
extern void vtime_guest_exit(struct task_struct *tsk);
|
|
extern void vtime_init_idle(struct task_struct *tsk, int cpu);
|
|
#else
|
|
static inline void vtime_account_irq_exit(struct task_struct *tsk)
|
|
{
|
|
/* On hard|softirq exit we always account to hard|softirq cputime */
|
|
vtime_account_system(tsk);
|
|
}
|
|
static inline void vtime_user_enter(struct task_struct *tsk) { }
|
|
static inline void vtime_user_exit(struct task_struct *tsk) { }
|
|
static inline void vtime_guest_enter(struct task_struct *tsk) { }
|
|
static inline void vtime_guest_exit(struct task_struct *tsk) { }
|
|
static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
|
extern void irqtime_account_irq(struct task_struct *tsk);
|
|
#else
|
|
static inline void irqtime_account_irq(struct task_struct *tsk) { }
|
|
#endif
|
|
|
|
static inline void account_irq_enter_time(struct task_struct *tsk)
|
|
{
|
|
vtime_account_irq_enter(tsk);
|
|
irqtime_account_irq(tsk);
|
|
}
|
|
|
|
static inline void account_irq_exit_time(struct task_struct *tsk)
|
|
{
|
|
vtime_account_irq_exit(tsk);
|
|
irqtime_account_irq(tsk);
|
|
}
|
|
|
|
#endif /* _LINUX_KERNEL_VTIME_H */
|