sched: optimize vruntime based scheduling
optimize vruntime based scheduling. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
bf5c91ba8c
commit
6cb5819514
2 changed files with 6 additions and 2 deletions
|
@ -732,13 +732,14 @@ calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
|
|||
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
|
||||
{
|
||||
lw->weight += inc;
|
||||
lw->inv_weight = WMULT_CONST / lw->weight;
|
||||
if (sched_feat(FAIR_SLEEPERS))
|
||||
lw->inv_weight = WMULT_CONST / lw->weight;
|
||||
}
|
||||
|
||||
static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
|
||||
{
|
||||
lw->weight -= dec;
|
||||
if (likely(lw->weight))
|
||||
if (sched_feat(FAIR_SLEEPERS) && likely(lw->weight))
|
||||
lw->inv_weight = WMULT_CONST / lw->weight;
|
||||
}
|
||||
|
||||
|
|
|
@ -336,6 +336,9 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
|
|||
}
|
||||
curr->vruntime += delta_exec_weighted;
|
||||
|
||||
if (!sched_feat(FAIR_SLEEPERS))
|
||||
return;
|
||||
|
||||
if (unlikely(!load))
|
||||
return;
|
||||
|
||||
|
|
Loading…
Reference in a new issue