sched: fix MC/HT scheduler optimization, without breaking the FUZZ logic.
First fix the check if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) with this if (*imbalance < busiest_load_per_task) As the current check is always false for nice 0 tasks (as SCHED_LOAD_SCALE_FUZZ is same as busiest_load_per_task for nice 0 tasks). With the above change, imbalance was getting reset to 0 in the corner case condition, making the FUZZ logic fail. Fix it by not corrupting the imbalance and change the imbalance, only when it finds that the HT/MC optimization is needed. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
b21010ed64
commit
7fd0d2dde9
1 changed files with 3 additions and 5 deletions
|
@ -2512,7 +2512,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
|
||||||
* a think about bumping its value to force at least one task to be
|
* a think about bumping its value to force at least one task to be
|
||||||
* moved
|
* moved
|
||||||
*/
|
*/
|
||||||
if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) {
|
if (*imbalance < busiest_load_per_task) {
|
||||||
unsigned long tmp, pwr_now, pwr_move;
|
unsigned long tmp, pwr_now, pwr_move;
|
||||||
unsigned int imbn;
|
unsigned int imbn;
|
||||||
|
|
||||||
|
@ -2564,10 +2564,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
|
||||||
pwr_move /= SCHED_LOAD_SCALE;
|
pwr_move /= SCHED_LOAD_SCALE;
|
||||||
|
|
||||||
/* Move if we gain throughput */
|
/* Move if we gain throughput */
|
||||||
if (pwr_move <= pwr_now)
|
if (pwr_move > pwr_now)
|
||||||
goto out_balanced;
|
*imbalance = busiest_load_per_task;
|
||||||
|
|
||||||
*imbalance = busiest_load_per_task;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return busiest;
|
return busiest;
|
||||||
|
|
Loading…
Add table
Reference in a new issue