posix-cpu-timers: Optimize run_posix_cpu_timers()
We can optimize and simplify things taking into account signal->cputimer is always running when we have configured any process wide cpu timer. In check_process_timers(), we don't have to check if new updated value of signal->cputime_expires is smaller, since we maintain new first expiration time ({prof,virt,sched}_expires) in code flow and all other writes to expiration cache are protected by sighand->siglock . Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
dbb6be6d5e
commit
29f87b793d
1 changed files with 29 additions and 51 deletions
|
@ -1002,16 +1002,9 @@ static void stop_process_timers(struct signal_struct *sig)
|
|||
struct thread_group_cputimer *cputimer = &sig->cputimer;
|
||||
unsigned long flags;
|
||||
|
||||
if (!cputimer->running)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&cputimer->lock, flags);
|
||||
cputimer->running = 0;
|
||||
spin_unlock_irqrestore(&cputimer->lock, flags);
|
||||
|
||||
sig->cputime_expires.prof_exp = cputime_zero;
|
||||
sig->cputime_expires.virt_exp = cputime_zero;
|
||||
sig->cputime_expires.sched_exp = 0;
|
||||
}
|
||||
|
||||
static u32 onecputick;
|
||||
|
@ -1048,6 +1041,23 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* task_cputime_zero - Check a task_cputime struct for all zero fields.
|
||||
*
|
||||
* @cputime: The struct to compare.
|
||||
*
|
||||
* Checks @cputime to see if all fields are zero. Returns true if all fields
|
||||
* are zero, false if any field is nonzero.
|
||||
*/
|
||||
static inline int task_cputime_zero(const struct task_cputime *cputime)
|
||||
{
|
||||
if (cputime_eq(cputime->utime, cputime_zero) &&
|
||||
cputime_eq(cputime->stime, cputime_zero) &&
|
||||
cputime->sum_exec_runtime == 0)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for any per-thread CPU timers that have fired and move them
|
||||
* off the tsk->*_timers list onto the firing list. Per-thread timers
|
||||
|
@ -1064,19 +1074,6 @@ static void check_process_timers(struct task_struct *tsk,
|
|||
struct task_cputime cputime;
|
||||
unsigned long soft;
|
||||
|
||||
/*
|
||||
* Don't sample the current process CPU clocks if there are no timers.
|
||||
*/
|
||||
if (list_empty(&timers[CPUCLOCK_PROF]) &&
|
||||
cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) &&
|
||||
sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
|
||||
list_empty(&timers[CPUCLOCK_VIRT]) &&
|
||||
cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
|
||||
list_empty(&timers[CPUCLOCK_SCHED])) {
|
||||
stop_process_timers(sig);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Collect the current process totals.
|
||||
*/
|
||||
|
@ -1166,18 +1163,11 @@ static void check_process_timers(struct task_struct *tsk,
|
|||
}
|
||||
}
|
||||
|
||||
if (!cputime_eq(prof_expires, cputime_zero) &&
|
||||
(cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) ||
|
||||
cputime_gt(sig->cputime_expires.prof_exp, prof_expires)))
|
||||
sig->cputime_expires.prof_exp = prof_expires;
|
||||
if (!cputime_eq(virt_expires, cputime_zero) &&
|
||||
(cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) ||
|
||||
cputime_gt(sig->cputime_expires.virt_exp, virt_expires)))
|
||||
sig->cputime_expires.virt_exp = virt_expires;
|
||||
if (sched_expires != 0 &&
|
||||
(sig->cputime_expires.sched_exp == 0 ||
|
||||
sig->cputime_expires.sched_exp > sched_expires))
|
||||
sig->cputime_expires.sched_exp = sched_expires;
|
||||
sig->cputime_expires.prof_exp = prof_expires;
|
||||
sig->cputime_expires.virt_exp = virt_expires;
|
||||
sig->cputime_expires.sched_exp = sched_expires;
|
||||
if (task_cputime_zero(&sig->cputime_expires))
|
||||
stop_process_timers(sig);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1249,23 +1239,6 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
|
|||
++timer->it_requeue_pending;
|
||||
}
|
||||
|
||||
/**
|
||||
* task_cputime_zero - Check a task_cputime struct for all zero fields.
|
||||
*
|
||||
* @cputime: The struct to compare.
|
||||
*
|
||||
* Checks @cputime to see if all fields are zero. Returns true if all fields
|
||||
* are zero, false if any field is nonzero.
|
||||
*/
|
||||
static inline int task_cputime_zero(const struct task_cputime *cputime)
|
||||
{
|
||||
if (cputime_eq(cputime->utime, cputime_zero) &&
|
||||
cputime_eq(cputime->stime, cputime_zero) &&
|
||||
cputime->sum_exec_runtime == 0)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* task_cputime_expired - Compare two task_cputime entities.
|
||||
*
|
||||
|
@ -1322,7 +1295,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
|
|||
}
|
||||
|
||||
sig = tsk->signal;
|
||||
if (!task_cputime_zero(&sig->cputime_expires)) {
|
||||
if (sig->cputimer.running) {
|
||||
struct task_cputime group_sample;
|
||||
|
||||
thread_group_cputimer(tsk, &group_sample);
|
||||
|
@ -1359,7 +1332,12 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
|||
* put them on the firing list.
|
||||
*/
|
||||
check_thread_timers(tsk, &firing);
|
||||
check_process_timers(tsk, &firing);
|
||||
/*
|
||||
* If there are any active process wide timers (POSIX 1.b, itimers,
|
||||
* RLIMIT_CPU) cputimer must be running.
|
||||
*/
|
||||
if (tsk->signal->cputimer.running)
|
||||
check_process_timers(tsk, &firing);
|
||||
|
||||
/*
|
||||
* We must release these locks before taking any timer's lock.
|
||||
|
|
Loading…
Reference in a new issue