[PATCH] sched: less aggressive idle balancing
Remove the special casing for idle CPU balancing. Things like this are hurting for example on SMT, where are single sibling being idle doesn't really warrant a really aggressive pull over the NUMA domain, for example. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
db935dbd43
commit
99b61ccf0b
1 changed files with 0 additions and 6 deletions
|
@ -1877,15 +1877,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
|
|||
|
||||
/* Get rid of the scaling factor, rounding down as we divide */
|
||||
*imbalance = *imbalance / SCHED_LOAD_SCALE;
|
||||
|
||||
return busiest;
|
||||
|
||||
out_balanced:
|
||||
if (busiest && (idle == NEWLY_IDLE ||
|
||||
(idle == SCHED_IDLE && max_load > SCHED_LOAD_SCALE)) ) {
|
||||
*imbalance = 1;
|
||||
return busiest;
|
||||
}
|
||||
|
||||
*imbalance = 0;
|
||||
return NULL;
|
||||
|
|
Loading…
Reference in a new issue