cputime: Rename thread_group_times to thread_group_cputime_adjusted
We have thread_group_cputime() and thread_group_times(). The naming doesn't provide enough information about the difference between these two APIs. To lower the confusion, rename thread_group_times() to thread_group_cputime_adjusted(). This name better suggests that it's a version of thread_group_cputime() that does some stabilization on the raw cputime values. ie here: scale on top of CFS runtime stats and bound lower value for monotonicity. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
This commit is contained in:
parent
a634f93335
commit
e80d0a1ae8
5 changed files with 13 additions and 13 deletions
|
@ -438,7 +438,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
|||
|
||||
min_flt += sig->min_flt;
|
||||
maj_flt += sig->maj_flt;
|
||||
thread_group_times(task, &utime, &stime);
|
||||
thread_group_cputime_adjusted(task, &utime, &stime);
|
||||
gtime += sig->gtime;
|
||||
}
|
||||
|
||||
|
@ -454,7 +454,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
|||
if (!whole) {
|
||||
min_flt = task->min_flt;
|
||||
maj_flt = task->maj_flt;
|
||||
task_times(task, &utime, &stime);
|
||||
task_cputime_adjusted(task, &utime, &stime);
|
||||
gtime = task->gtime;
|
||||
}
|
||||
|
||||
|
|
|
@ -1751,8 +1751,8 @@ static inline void put_task_struct(struct task_struct *t)
|
|||
__put_task_struct(t);
|
||||
}
|
||||
|
||||
extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
||||
extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
||||
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
||||
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
||||
|
||||
/*
|
||||
* Per process flags
|
||||
|
|
|
@ -1186,11 +1186,11 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
|
|||
* as other threads in the parent group can be right
|
||||
* here reaping other children at the same time.
|
||||
*
|
||||
* We use thread_group_times() to get times for the thread
|
||||
* We use thread_group_cputime_adjusted() to get times for the thread
|
||||
* group, which consolidates times for all threads in the
|
||||
* group including the group leader.
|
||||
*/
|
||||
thread_group_times(p, &tgutime, &tgstime);
|
||||
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
|
||||
spin_lock_irq(&p->real_parent->sighand->siglock);
|
||||
psig = p->real_parent->signal;
|
||||
sig = p->signal;
|
||||
|
|
|
@ -445,13 +445,13 @@ void account_idle_ticks(unsigned long ticks)
|
|||
* Use precise platform statistics if available:
|
||||
*/
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
*ut = p->utime;
|
||||
*st = p->stime;
|
||||
}
|
||||
|
||||
void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
struct task_cputime cputime;
|
||||
|
||||
|
@ -516,7 +516,7 @@ static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
|
|||
return (__force cputime_t) temp;
|
||||
}
|
||||
|
||||
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
cputime_t rtime, utime = p->utime, total = utime + p->stime;
|
||||
|
||||
|
@ -543,7 +543,7 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
|||
/*
|
||||
* Must be called with siglock held.
|
||||
*/
|
||||
void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
struct signal_struct *sig = p->signal;
|
||||
struct task_cputime cputime;
|
||||
|
|
|
@ -1046,7 +1046,7 @@ void do_sys_times(struct tms *tms)
|
|||
cputime_t tgutime, tgstime, cutime, cstime;
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
thread_group_times(current, &tgutime, &tgstime);
|
||||
thread_group_cputime_adjusted(current, &tgutime, &tgstime);
|
||||
cutime = current->signal->cutime;
|
||||
cstime = current->signal->cstime;
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
@ -1704,7 +1704,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
|
|||
utime = stime = 0;
|
||||
|
||||
if (who == RUSAGE_THREAD) {
|
||||
task_times(current, &utime, &stime);
|
||||
task_cputime_adjusted(current, &utime, &stime);
|
||||
accumulate_thread_rusage(p, r);
|
||||
maxrss = p->signal->maxrss;
|
||||
goto out;
|
||||
|
@ -1730,7 +1730,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
|
|||
break;
|
||||
|
||||
case RUSAGE_SELF:
|
||||
thread_group_times(p, &tgutime, &tgstime);
|
||||
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
|
||||
utime += tgutime;
|
||||
stime += tgstime;
|
||||
r->ru_nvcsw += p->signal->nvcsw;
|
||||
|
|
Loading…
Reference in a new issue