sched: Fix select_idle_sibling() logic in select_task_rq_fair()

Issues in the current select_idle_sibling() logic in select_task_rq_fair()
in the context of a task wake-up:

a) Once we select the idle sibling, we use that domain (spanning the cpu that
   the task is currently woken-up and the idle sibling that we found) in our
   wake_affine() decisions. This domain is completely different from the
   domain(we are supposed to use) that spans the cpu that the task currently
   woken-up and the cpu where the task previously ran.

b) We do select_idle_sibling() check only for the cpu that the task is
   currently woken-up on. If select_task_rq_fair() selects the previously run
   cpu for waking the task, doing a select_idle_sibling() check
   for that cpu also helps and we don't do this currently.

c) In the scenarios where the cpu that the task is woken-up is busy but
   with its HT siblings are idle, we are selecting the task be woken-up
   on the idle HT sibling instead of a core that it previously ran
   and currently completely idle. i.e., we are not taking decisions based on
   wake_affine() but directly selecting an idle sibling that can cause
   an imbalance at the SMT/MC level which will be later corrected by the
   periodic load balancer.

Fix this by first going through the load imbalance calculations using
wake_affine() and once we make a decision of woken-up cpu vs previously-ran cpu,
then choose a possible idle sibling for waking up the task on.

Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1270079265.7835.8.camel@sbs-t61.sc.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Suresh Siddha 2010-03-31 16:47:45 -07:00 committed by Ingo Molnar
parent 669c55e9f9
commit 99bd5e2f24

View file

@ -1375,29 +1375,48 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
/* /*
* Try and locate an idle CPU in the sched_domain. * Try and locate an idle CPU in the sched_domain.
*/ */
static int static int select_idle_sibling(struct task_struct *p, int target)
select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int prev_cpu = task_cpu(p); int prev_cpu = task_cpu(p);
struct sched_domain *sd;
int i; int i;
/* /*
* If this domain spans both cpu and prev_cpu (see the SD_WAKE_AFFINE * If the task is going to be woken-up on this cpu and if it is
* test in select_task_rq_fair) and the prev_cpu is idle then that's * already idle, then it is the right target.
* always a better target than the current cpu.
*/ */
if (target == cpu && !cpu_rq(prev_cpu)->cfs.nr_running) if (target == cpu && idle_cpu(cpu))
return cpu;
/*
* If the task is going to be woken-up on the cpu where it previously
* ran and if it is currently idle, then it the right target.
*/
if (target == prev_cpu && idle_cpu(prev_cpu))
return prev_cpu; return prev_cpu;
/* /*
* Otherwise, iterate the domain and find an elegible idle cpu. * Otherwise, iterate the domains and find an elegible idle cpu.
*/ */
for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) { for_each_domain(target, sd) {
if (!cpu_rq(i)->cfs.nr_running) { if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
target = i;
break; break;
for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
if (idle_cpu(i)) {
target = i;
break;
}
} }
/*
* Lets stop looking for an idle sibling when we reached
* the domain that spans the current cpu and prev_cpu.
*/
if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
break;
} }
return target; return target;
@ -1421,7 +1440,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int prev_cpu = task_cpu(p); int prev_cpu = task_cpu(p);
int new_cpu = cpu; int new_cpu = cpu;
int want_affine = 0, cpu_idle = !current->pid; int want_affine = 0;
int want_sd = 1; int want_sd = 1;
int sync = wake_flags & WF_SYNC; int sync = wake_flags & WF_SYNC;
@ -1460,36 +1479,13 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
} }
/* /*
* While iterating the domains looking for a spanning * If both cpu and prev_cpu are part of this domain,
* WAKE_AFFINE domain, adjust the affine target to any idle cpu * cpu is a valid SD_WAKE_AFFINE target.
* in cache sharing domains along the way.
*/ */
if (want_affine) { if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
int target = -1; cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
affine_sd = tmp;
/* want_affine = 0;
* If both cpu and prev_cpu are part of this domain,
* cpu is a valid SD_WAKE_AFFINE target.
*/
if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
target = cpu;
/*
* If there's an idle sibling in this domain, make that
* the wake_affine target instead of the current cpu.
*/
if (!cpu_idle && tmp->flags & SD_SHARE_PKG_RESOURCES)
target = select_idle_sibling(p, tmp, target);
if (target >= 0) {
if (tmp->flags & SD_WAKE_AFFINE) {
affine_sd = tmp;
want_affine = 0;
if (target != cpu)
cpu_idle = 1;
}
cpu = target;
}
} }
if (!want_sd && !want_affine) if (!want_sd && !want_affine)
@ -1520,8 +1516,10 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
#endif #endif
if (affine_sd) { if (affine_sd) {
if (cpu_idle || cpu == prev_cpu || wake_affine(affine_sd, p, sync)) if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
return cpu; return select_idle_sibling(p, cpu);
else
return select_idle_sibling(p, prev_cpu);
} }
while (sd) { while (sd) {