sched: clean up this_rq use in kernel/sched_rt.c

"this_rq" is normally used to denote the RQ on the current cpu
(i.e. "cpu_rq(this_cpu)").  So clean up the usage of this_rq to be
more consistent with the rest of the code.

Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Gregory Haskins 2008-01-25 21:08:09 +01:00 committed by Ingo Molnar
parent 73fe6aae84
commit 697f0a487f

View file

@ -328,21 +328,21 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task,
* running task can migrate over to a CPU that is running a task * running task can migrate over to a CPU that is running a task
* of lesser priority. * of lesser priority.
*/ */
static int push_rt_task(struct rq *this_rq) static int push_rt_task(struct rq *rq)
{ {
struct task_struct *next_task; struct task_struct *next_task;
struct rq *lowest_rq; struct rq *lowest_rq;
int ret = 0; int ret = 0;
int paranoid = RT_MAX_TRIES; int paranoid = RT_MAX_TRIES;
assert_spin_locked(&this_rq->lock); assert_spin_locked(&rq->lock);
next_task = pick_next_highest_task_rt(this_rq, -1); next_task = pick_next_highest_task_rt(rq, -1);
if (!next_task) if (!next_task)
return 0; return 0;
retry: retry:
if (unlikely(next_task == this_rq->curr)) { if (unlikely(next_task == rq->curr)) {
WARN_ON(1); WARN_ON(1);
return 0; return 0;
} }
@ -352,24 +352,24 @@ static int push_rt_task(struct rq *this_rq)
* higher priority than current. If that's the case * higher priority than current. If that's the case
* just reschedule current. * just reschedule current.
*/ */
if (unlikely(next_task->prio < this_rq->curr->prio)) { if (unlikely(next_task->prio < rq->curr->prio)) {
resched_task(this_rq->curr); resched_task(rq->curr);
return 0; return 0;
} }
/* We might release this_rq lock */ /* We might release rq lock */
get_task_struct(next_task); get_task_struct(next_task);
/* find_lock_lowest_rq locks the rq if found */ /* find_lock_lowest_rq locks the rq if found */
lowest_rq = find_lock_lowest_rq(next_task, this_rq); lowest_rq = find_lock_lowest_rq(next_task, rq);
if (!lowest_rq) { if (!lowest_rq) {
struct task_struct *task; struct task_struct *task;
/* /*
* find lock_lowest_rq releases this_rq->lock * find lock_lowest_rq releases rq->lock
* so it is possible that next_task has changed. * so it is possible that next_task has changed.
* If it has, then try again. * If it has, then try again.
*/ */
task = pick_next_highest_task_rt(this_rq, -1); task = pick_next_highest_task_rt(rq, -1);
if (unlikely(task != next_task) && task && paranoid--) { if (unlikely(task != next_task) && task && paranoid--) {
put_task_struct(next_task); put_task_struct(next_task);
next_task = task; next_task = task;
@ -380,7 +380,7 @@ static int push_rt_task(struct rq *this_rq)
assert_spin_locked(&lowest_rq->lock); assert_spin_locked(&lowest_rq->lock);
deactivate_task(this_rq, next_task, 0); deactivate_task(rq, next_task, 0);
set_task_cpu(next_task, lowest_rq->cpu); set_task_cpu(next_task, lowest_rq->cpu);
activate_task(lowest_rq, next_task, 0); activate_task(lowest_rq, next_task, 0);