Merge branch 'sched' into sched-devel

Conflicts:

	kernel/sched_rt.c

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar 2008-06-19 09:09:15 +02:00
commit 1cdad71537
2 changed files with 40 additions and 30 deletions

View file

@ -7697,7 +7697,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
else else
rt_se->rt_rq = parent->my_q; rt_se->rt_rq = parent->my_q;
rt_se->rt_rq = &rq->rt;
rt_se->my_q = rt_rq; rt_se->my_q = rt_rq;
rt_se->parent = parent; rt_se->parent = parent;
INIT_LIST_HEAD(&rt_se->run_list); INIT_LIST_HEAD(&rt_se->run_list);
@ -8420,7 +8419,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
#ifdef CONFIG_CGROUP_SCHED #ifdef CONFIG_CGROUP_SCHED
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{ {
struct task_group *tgi, *parent = tg->parent; struct task_group *tgi, *parent = tg ? tg->parent : NULL;
unsigned long total = 0; unsigned long total = 0;
if (!parent) { if (!parent) {

View file

@ -571,14 +571,20 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
#endif #endif
} }
static void enqueue_rt_entity(struct sched_rt_entity *rt_se) static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
{ {
struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active; struct rt_prio_array *array = &rt_rq->active;
struct rt_rq *group_rq = group_rt_rq(rt_se); struct rt_rq *group_rq = group_rt_rq(rt_se);
struct list_head *queue = array->queue + rt_se_prio(rt_se); struct list_head *queue = array->queue + rt_se_prio(rt_se);
if (group_rq && rt_rq_throttled(group_rq)) /*
* Don't enqueue the group if its throttled, or when empty.
* The latter is a consequence of the former when a child group
* get throttled and the current group doesn't have any other
* active members.
*/
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
return; return;
if (rt_se->nr_cpus_allowed == 1) if (rt_se->nr_cpus_allowed == 1)
@ -591,7 +597,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
inc_rt_tasks(rt_se, rt_rq); inc_rt_tasks(rt_se, rt_rq);
} }
static void dequeue_rt_entity(struct sched_rt_entity *rt_se) static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
{ {
struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active; struct rt_prio_array *array = &rt_rq->active;
@ -607,11 +613,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
* Because the prio of an upper entry depends on the lower * Because the prio of an upper entry depends on the lower
* entries, we must remove entries top - down. * entries, we must remove entries top - down.
*/ */
static void dequeue_rt_stack(struct task_struct *p) static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
{ {
struct sched_rt_entity *rt_se, *back = NULL; struct sched_rt_entity *back = NULL;
rt_se = &p->rt;
for_each_sched_rt_entity(rt_se) { for_each_sched_rt_entity(rt_se) {
rt_se->back = back; rt_se->back = back;
back = rt_se; back = rt_se;
@ -619,7 +624,26 @@ static void dequeue_rt_stack(struct task_struct *p)
for (rt_se = back; rt_se; rt_se = rt_se->back) { for (rt_se = back; rt_se; rt_se = rt_se->back) {
if (on_rt_rq(rt_se)) if (on_rt_rq(rt_se))
dequeue_rt_entity(rt_se); __dequeue_rt_entity(rt_se);
}
}
static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
{
dequeue_rt_stack(rt_se);
for_each_sched_rt_entity(rt_se)
__enqueue_rt_entity(rt_se);
}
static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
{
dequeue_rt_stack(rt_se);
for_each_sched_rt_entity(rt_se) {
struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq && rt_rq->rt_nr_running)
__enqueue_rt_entity(rt_se);
} }
} }
@ -633,32 +657,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
if (wakeup) if (wakeup)
rt_se->timeout = 0; rt_se->timeout = 0;
dequeue_rt_stack(p); enqueue_rt_entity(rt_se);
/*
* enqueue everybody, bottom - up.
*/
for_each_sched_rt_entity(rt_se)
enqueue_rt_entity(rt_se);
} }
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
{ {
struct sched_rt_entity *rt_se = &p->rt; struct sched_rt_entity *rt_se = &p->rt;
struct rt_rq *rt_rq;
update_curr_rt(rq); update_curr_rt(rq);
dequeue_rt_entity(rt_se);
dequeue_rt_stack(p);
/*
* re-enqueue all non-empty rt_rq entities.
*/
for_each_sched_rt_entity(rt_se) {
rt_rq = group_rt_rq(rt_se);
if (rt_rq && rt_rq->rt_nr_running)
enqueue_rt_entity(rt_se);
}
} }
/* /*
@ -669,9 +676,13 @@ static
void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
{ {
struct rt_prio_array *array = &rt_rq->active; struct rt_prio_array *array = &rt_rq->active;
struct list_head *queue = array->queue + rt_se_prio(rt_se);
list_del_init(&rt_se->run_list); if (on_rt_rq(rt_se)) {
list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); list_del_init(&rt_se->run_list);
list_add_tail(&rt_se->run_list,
array->queue + rt_se_prio(rt_se));
}
} }
static void requeue_task_rt(struct rq *rq, struct task_struct *p) static void requeue_task_rt(struct rq *rq, struct task_struct *p)