diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a171138a9402..aa16cf1eb8fe 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -3031,7 +3031,14 @@ static int load_balance(int this_cpu, struct rq *this_rq, if (!ld_moved) { schedstat_inc(sd, lb_failed[idle]); - sd->nr_balance_failed++; + /* + * Increment the failure counter only on periodic balance. + * We do not want newidle balance, which can be very + * frequent, pollute the failure counter causing + * excessive cache_hot migrations and active balances. + */ + if (idle != CPU_NEWLY_IDLE) + sd->nr_balance_failed++; if (need_active_balance(sd, sd_idle, idle, cpu_of(busiest), this_cpu)) {