sched: Remove double calculation in fix_small_imbalance()
The tmp value has been already calculated in: scaled_busy_load_per_task = (busiest->load_per_task * SCHED_POWER_SCALE) / busiest->group_power; Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1394555166-22894-1-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
383afd0971
commit
a2cd42601b
1 changed files with 2 additions and 4 deletions
|
@ -6061,12 +6061,10 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
|
|||
pwr_now /= SCHED_POWER_SCALE;
|
||||
|
||||
/* Amount of load we'd subtract */
|
||||
tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
|
||||
busiest->group_power;
|
||||
if (busiest->avg_load > tmp) {
|
||||
if (busiest->avg_load > scaled_busy_load_per_task) {
|
||||
pwr_move += busiest->group_power *
|
||||
min(busiest->load_per_task,
|
||||
busiest->avg_load - tmp);
|
||||
busiest->avg_load - scaled_busy_load_per_task);
|
||||
}
|
||||
|
||||
/* Amount of load we'd add */
|
||||
|
|
Loading…
Reference in a new issue