sched: Maintain per-rq runnable averages

Since runqueues do not have a corresponding sched_entity we instead embed a
sched_avg structure directly.

Signed-off-by: Ben Segall <bsegall@google.com>
Reviewed-by: Paul Turner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20120823141506.442637130@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ben Segall 2012-10-04 12:51:20 +02:00 committed by Ingo Molnar
parent 9d85f21c94
commit 18bf2805d9
3 changed files with 26 additions and 4 deletions

View file

@ -61,14 +61,20 @@ static unsigned long nsec_low(unsigned long long nsec)
static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
{
struct sched_entity *se = tg->se[cpu];
if (!se)
return;
#define P(F) \
SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
#define PN(F) \
SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
if (!se) {
struct sched_avg *avg = &cpu_rq(cpu)->avg;
P(avg->runnable_avg_sum);
P(avg->runnable_avg_period);
return;
}
PN(se->exec_start);
PN(se->vruntime);
PN(se->sum_exec_runtime);

View file

@ -1087,8 +1087,14 @@ static inline void update_entity_load_avg(struct sched_entity *se)
__update_entity_runnable_avg(rq_of(cfs_rq_of(se))->clock_task, &se->avg,
se->on_rq);
}
static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
{
__update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable);
}
#else
static inline void update_entity_load_avg(struct sched_entity *se) {}
static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
#endif
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@ -2340,8 +2346,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
update_cfs_shares(cfs_rq);
}
if (!se)
if (!se) {
update_rq_runnable_avg(rq, rq->nr_running);
inc_nr_running(rq);
}
hrtick_update(rq);
}
@ -2399,8 +2407,10 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
update_cfs_shares(cfs_rq);
}
if (!se)
if (!se) {
dec_nr_running(rq);
update_rq_runnable_avg(rq, 1);
}
hrtick_update(rq);
}
@ -4586,6 +4596,8 @@ void idle_balance(int this_cpu, struct rq *this_rq)
if (this_rq->avg_idle < sysctl_sched_migration_cost)
return;
update_rq_runnable_avg(this_rq, 1);
/*
* Drop the rq->lock, but keep IRQ/preempt disabled.
*/
@ -5083,6 +5095,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
cfs_rq = cfs_rq_of(se);
entity_tick(cfs_rq, se, queued);
}
update_rq_runnable_avg(rq, 1);
}
/*

View file

@ -467,6 +467,8 @@ struct rq {
#ifdef CONFIG_SMP
struct llist_head wake_list;
#endif
struct sched_avg avg;
};
static inline int cpu_of(struct rq *rq)