sched: Remove some #ifdeffery
Remove a few gratuitous #ifdefs in pick_next_task*(). Cc: Ingo Molnar <mingo@kernel.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Juri Lelli <juri.lelli@gmail.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-nnzddp5c4fijyzzxxrwlxghf@git.kernel.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
3f1d2a3181
commit
dc87734106
4 changed files with 60 additions and 21 deletions
|
@ -214,6 +214,16 @@ static inline int has_pushable_dl_tasks(struct rq *rq)
|
||||||
|
|
||||||
static int push_dl_task(struct rq *rq);
|
static int push_dl_task(struct rq *rq);
|
||||||
|
|
||||||
|
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
|
||||||
|
{
|
||||||
|
return dl_task(prev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void set_post_schedule(struct rq *rq)
|
||||||
|
{
|
||||||
|
rq->post_schedule = has_pushable_dl_tasks(rq);
|
||||||
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
|
@ -236,6 +246,19 @@ void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int pull_dl_task(struct rq *rq)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void set_post_schedule(struct rq *rq)
|
||||||
|
{
|
||||||
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
|
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
|
||||||
|
@ -1000,10 +1023,8 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
|
||||||
|
|
||||||
dl_rq = &rq->dl;
|
dl_rq = &rq->dl;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
if (need_pull_dl_task(rq, prev))
|
||||||
if (dl_task(prev))
|
|
||||||
pull_dl_task(rq);
|
pull_dl_task(rq);
|
||||||
#endif
|
|
||||||
|
|
||||||
if (unlikely(!dl_rq->dl_nr_running))
|
if (unlikely(!dl_rq->dl_nr_running))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1024,9 +1045,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
|
||||||
start_hrtick_dl(rq, p);
|
start_hrtick_dl(rq, p);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
set_post_schedule(rq);
|
||||||
rq->post_schedule = has_pushable_dl_tasks(rq);
|
|
||||||
#endif /* CONFIG_SMP */
|
|
||||||
|
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,9 +29,7 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev)
|
||||||
put_prev_task(rq, prev);
|
put_prev_task(rq, prev);
|
||||||
|
|
||||||
schedstat_inc(rq, sched_goidle);
|
schedstat_inc(rq, sched_goidle);
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
idle_enter_fair(rq);
|
idle_enter_fair(rq);
|
||||||
#endif
|
|
||||||
return rq->idle;
|
return rq->idle;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,10 +48,8 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
|
||||||
|
|
||||||
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
|
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
idle_exit_fair(rq);
|
idle_exit_fair(rq);
|
||||||
rq_last_tick_reset(rq);
|
rq_last_tick_reset(rq);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
|
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
|
||||||
|
|
|
@ -231,6 +231,12 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
|
||||||
|
|
||||||
static int pull_rt_task(struct rq *this_rq);
|
static int pull_rt_task(struct rq *this_rq);
|
||||||
|
|
||||||
|
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
|
||||||
|
{
|
||||||
|
/* Try to pull RT tasks here if we lower this rq's prio */
|
||||||
|
return rq->rt.highest_prio.curr > prev->prio;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int rt_overloaded(struct rq *rq)
|
static inline int rt_overloaded(struct rq *rq)
|
||||||
{
|
{
|
||||||
return atomic_read(&rq->rd->rto_count);
|
return atomic_read(&rq->rd->rto_count);
|
||||||
|
@ -317,6 +323,15 @@ static inline int has_pushable_tasks(struct rq *rq)
|
||||||
return !plist_head_empty(&rq->rt.pushable_tasks);
|
return !plist_head_empty(&rq->rt.pushable_tasks);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void set_post_schedule(struct rq *rq)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We detect this state here so that we can avoid taking the RQ
|
||||||
|
* lock again later if there is no need to push
|
||||||
|
*/
|
||||||
|
rq->post_schedule = has_pushable_tasks(rq);
|
||||||
|
}
|
||||||
|
|
||||||
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
||||||
{
|
{
|
||||||
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
||||||
|
@ -361,6 +376,19 @@ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int pull_rt_task(struct rq *this_rq)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void set_post_schedule(struct rq *rq)
|
||||||
|
{
|
||||||
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
|
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
|
||||||
|
@ -1332,11 +1360,8 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
struct rt_rq *rt_rq = &rq->rt;
|
struct rt_rq *rt_rq = &rq->rt;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
if (need_pull_rt_task(rq, prev))
|
||||||
/* Try to pull RT tasks here if we lower this rq's prio */
|
|
||||||
if (rq->rt.highest_prio.curr > prev->prio)
|
|
||||||
pull_rt_task(rq);
|
pull_rt_task(rq);
|
||||||
#endif
|
|
||||||
|
|
||||||
if (!rt_rq->rt_nr_running)
|
if (!rt_rq->rt_nr_running)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1352,13 +1377,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
|
||||||
if (p)
|
if (p)
|
||||||
dequeue_pushable_task(rq, p);
|
dequeue_pushable_task(rq, p);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
set_post_schedule(rq);
|
||||||
/*
|
|
||||||
* We detect this state here so that we can avoid taking the RQ
|
|
||||||
* lock again later if there is no need to push
|
|
||||||
*/
|
|
||||||
rq->post_schedule = has_pushable_tasks(rq);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1172,6 +1172,11 @@ extern void trigger_load_balance(struct rq *rq);
|
||||||
extern void idle_enter_fair(struct rq *this_rq);
|
extern void idle_enter_fair(struct rq *this_rq);
|
||||||
extern void idle_exit_fair(struct rq *this_rq);
|
extern void idle_exit_fair(struct rq *this_rq);
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static inline void idle_enter_fair(struct rq *rq) { }
|
||||||
|
static inline void idle_exit_fair(struct rq *rq) { }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void sysrq_sched_debug_show(void);
|
extern void sysrq_sched_debug_show(void);
|
||||||
|
|
Loading…
Reference in a new issue