sched: wakeup granularity increase

increase wakeup granularity - we were overscheduling a bit.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mike Galbraith <efault@gmx.de>
This commit is contained in:
Ingo Molnar 2007-10-15 17:00:05 +02:00
parent 5c6b5964a0
commit 2e09bf556f

View file

@ -74,7 +74,7 @@ const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
* and reduces their over-scheduling. Synchronous workloads will still * and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies. * have immediate wakeup/sleep latencies.
*/ */
const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL; const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL;
unsigned int sysctl_sched_runtime_limit __read_mostly; unsigned int sysctl_sched_runtime_limit __read_mostly;
@ -582,7 +582,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
* Preempt the current task with a newly woken task if needed: * Preempt the current task with a newly woken task if needed:
*/ */
static void static void
__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *curr) check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{ {
unsigned long ideal_runtime, delta_exec; unsigned long ideal_runtime, delta_exec;
@ -646,8 +646,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{ {
struct sched_entity *next;
/* /*
* Dequeue and enqueue the task to update its * Dequeue and enqueue the task to update its
* position within the tree: * position within the tree:
@ -655,14 +653,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
dequeue_entity(cfs_rq, curr, 0); dequeue_entity(cfs_rq, curr, 0);
enqueue_entity(cfs_rq, curr, 0); enqueue_entity(cfs_rq, curr, 0);
/* if (cfs_rq->nr_running > 1)
* Reschedule if another task tops the current one. check_preempt_tick(cfs_rq, curr);
*/
next = __pick_next_entity(cfs_rq);
if (next == curr)
return;
__check_preempt_curr_fair(cfs_rq, curr);
} }
/************************************************** /**************************************************
@ -852,7 +844,7 @@ static void yield_task_fair(struct rq *rq, struct task_struct *p)
/* /*
* Preempt the current task with a newly woken task if needed: * Preempt the current task with a newly woken task if needed:
*/ */
static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p) static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
{ {
struct task_struct *curr = rq->curr; struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr); struct cfs_rq *cfs_rq = task_cfs_rq(curr);
@ -863,9 +855,12 @@ static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
resched_task(curr); resched_task(curr);
return; return;
} }
if (is_same_group(curr, p)) {
s64 delta = curr->se.vruntime - p->se.vruntime;
if (is_same_group(curr, p)) if (delta > (s64)sysctl_sched_wakeup_granularity)
__check_preempt_curr_fair(cfs_rq, &curr->se); resched_task(curr);
}
} }
static struct task_struct *pick_next_task_fair(struct rq *rq) static struct task_struct *pick_next_task_fair(struct rq *rq)
@ -1095,7 +1090,7 @@ struct sched_class fair_sched_class __read_mostly = {
.dequeue_task = dequeue_task_fair, .dequeue_task = dequeue_task_fair,
.yield_task = yield_task_fair, .yield_task = yield_task_fair,
.check_preempt_curr = check_preempt_curr_fair, .check_preempt_curr = check_preempt_wakeup,
.pick_next_task = pick_next_task_fair, .pick_next_task = pick_next_task_fair,
.put_prev_task = put_prev_task_fair, .put_prev_task = put_prev_task_fair,