sched/fair: Remove rq's runnable avg
The current rq->avg is not used at all since its merge into the kernel, and the code is in the scheduler's hot path, so remove it. Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Signed-off-by: Yuyang Du <yuyang.du@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: arjan@linux.intel.com Cc: bsegall@google.com Cc: fengguang.wu@intel.com Cc: len.brown@intel.com Cc: morten.rasmussen@arm.com Cc: pjt@google.com Cc: rafael.j.wysocki@intel.com Cc: umgwanakikbuti@gmail.com Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1436918682-4971-2-git-send-email-yuyang.du@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
d308b9f1e4
commit
cd126afe83
3 changed files with 5 additions and 29 deletions
|
@ -68,13 +68,8 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
|
|||
#define PN(F) \
|
||||
SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
|
||||
|
||||
if (!se) {
|
||||
struct sched_avg *avg = &cpu_rq(cpu)->avg;
|
||||
P(avg->runnable_avg_sum);
|
||||
P(avg->avg_period);
|
||||
if (!se)
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
PN(se->exec_start);
|
||||
PN(se->vruntime);
|
||||
|
|
|
@ -2724,19 +2724,12 @@ static inline void __update_group_entity_contrib(struct sched_entity *se)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
|
||||
{
|
||||
__update_entity_runnable_avg(rq_clock_task(rq), cpu_of(rq), &rq->avg,
|
||||
runnable, runnable);
|
||||
__update_tg_runnable_avg(&rq->avg, &rq->cfs);
|
||||
}
|
||||
#else /* CONFIG_FAIR_GROUP_SCHED */
|
||||
static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
|
||||
int force_update) {}
|
||||
static inline void __update_tg_runnable_avg(struct sched_avg *sa,
|
||||
struct cfs_rq *cfs_rq) {}
|
||||
static inline void __update_group_entity_contrib(struct sched_entity *se) {}
|
||||
static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
static inline void __update_task_entity_contrib(struct sched_entity *se)
|
||||
|
@ -2940,7 +2933,6 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
|
|||
*/
|
||||
void idle_enter_fair(struct rq *this_rq)
|
||||
{
|
||||
update_rq_runnable_avg(this_rq, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2950,7 +2942,6 @@ void idle_enter_fair(struct rq *this_rq)
|
|||
*/
|
||||
void idle_exit_fair(struct rq *this_rq)
|
||||
{
|
||||
update_rq_runnable_avg(this_rq, 0);
|
||||
}
|
||||
|
||||
static int idle_balance(struct rq *this_rq);
|
||||
|
@ -2959,7 +2950,6 @@ static int idle_balance(struct rq *this_rq);
|
|||
|
||||
static inline void update_entity_load_avg(struct sched_entity *se,
|
||||
int update_cfs_rq) {}
|
||||
static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
|
||||
static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
|
||||
struct sched_entity *se,
|
||||
int wakeup) {}
|
||||
|
@ -4258,10 +4248,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|||
update_entity_load_avg(se, 1);
|
||||
}
|
||||
|
||||
if (!se) {
|
||||
update_rq_runnable_avg(rq, rq->nr_running);
|
||||
if (!se)
|
||||
add_nr_running(rq, 1);
|
||||
}
|
||||
|
||||
hrtick_update(rq);
|
||||
}
|
||||
|
||||
|
@ -4319,10 +4308,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|||
update_entity_load_avg(se, 1);
|
||||
}
|
||||
|
||||
if (!se) {
|
||||
if (!se)
|
||||
sub_nr_running(rq, 1);
|
||||
update_rq_runnable_avg(rq, 1);
|
||||
}
|
||||
|
||||
hrtick_update(rq);
|
||||
}
|
||||
|
||||
|
@ -6005,9 +5993,6 @@ static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
|
|||
*/
|
||||
if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
|
||||
list_del_leaf_cfs_rq(cfs_rq);
|
||||
} else {
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
update_rq_runnable_avg(rq, rq->nr_running);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7988,8 +7973,6 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
|
|||
|
||||
if (numabalancing_enabled)
|
||||
task_tick_numa(rq, curr);
|
||||
|
||||
update_rq_runnable_avg(rq, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -595,8 +595,6 @@ struct rq {
|
|||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
/* list of leaf cfs_rq on this cpu: */
|
||||
struct list_head leaf_cfs_rq_list;
|
||||
|
||||
struct sched_avg avg;
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue