sched: stop wake_affine from causing serious imbalance
Prevent short-running wakers of short-running threads from overloading a single cpu via wakeup affinity, and wire up disconnected debug option. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
a381759d6a
commit
b3137bc8e7
1 changed files with 19 additions and 16 deletions
|
@ -996,24 +996,11 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
|
||||||
struct task_struct *curr = this_rq->curr;
|
struct task_struct *curr = this_rq->curr;
|
||||||
unsigned long tl = this_load;
|
unsigned long tl = this_load;
|
||||||
unsigned long tl_per_task;
|
unsigned long tl_per_task;
|
||||||
|
int balanced;
|
||||||
|
|
||||||
if (!(this_sd->flags & SD_WAKE_AFFINE))
|
if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
|
||||||
* If the currently running task will sleep within
|
|
||||||
* a reasonable amount of time then attract this newly
|
|
||||||
* woken task:
|
|
||||||
*/
|
|
||||||
if (sync && curr->sched_class == &fair_sched_class) {
|
|
||||||
if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
|
|
||||||
p->se.avg_overlap < sysctl_sched_migration_cost)
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
schedstat_inc(p, se.nr_wakeups_affine_attempts);
|
|
||||||
tl_per_task = cpu_avg_load_per_task(this_cpu);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If sync wakeup then subtract the (maximum possible)
|
* If sync wakeup then subtract the (maximum possible)
|
||||||
* effect of the currently running task from the load
|
* effect of the currently running task from the load
|
||||||
|
@ -1022,8 +1009,24 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
|
||||||
if (sync)
|
if (sync)
|
||||||
tl -= current->se.load.weight;
|
tl -= current->se.load.weight;
|
||||||
|
|
||||||
|
balanced = 100*(tl + p->se.load.weight) <= imbalance*load;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the currently running task will sleep within
|
||||||
|
* a reasonable amount of time then attract this newly
|
||||||
|
* woken task:
|
||||||
|
*/
|
||||||
|
if (sync && balanced && curr->sched_class == &fair_sched_class) {
|
||||||
|
if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
|
||||||
|
p->se.avg_overlap < sysctl_sched_migration_cost)
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
schedstat_inc(p, se.nr_wakeups_affine_attempts);
|
||||||
|
tl_per_task = cpu_avg_load_per_task(this_cpu);
|
||||||
|
|
||||||
if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
|
if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
|
||||||
100*(tl + p->se.load.weight) <= imbalance*load) {
|
balanced) {
|
||||||
/*
|
/*
|
||||||
* This domain has SD_WAKE_AFFINE and
|
* This domain has SD_WAKE_AFFINE and
|
||||||
* p is cache cold in this domain, and
|
* p is cache cold in this domain, and
|
||||||
|
|
Loading…
Add table
Reference in a new issue