sched: Remove irq time from available CPU power
The idea was suggested by Peter Zijlstra here: http://marc.info/?l=linux-kernel&m=127476934517534&w=2 irq time is technically not available to the tasks running on the CPU. This patch removes irq time from CPU power piggybacking on sched_rt_avg_update(). Tested this by keeping CPU X busy with a network intensive task having 75% oa a single CPU irq processing (hard+soft) on a 4-way system. And start seven cycle soakers on the system. Without this change, there will be two tasks on each CPU. With this change, there is a single task on irq busy CPU X and remaining 7 tasks are spread around among other 3 CPUs. Signed-off-by: Venkatesh Pallipadi <venki@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1286237003-12406-8-git-send-email-venki@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
305e6835e0
commit
aa48380851
3 changed files with 30 additions and 1 deletions
|
@ -519,6 +519,10 @@ struct rq {
|
|||
u64 avg_idle;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
u64 prev_irq_time;
|
||||
#endif
|
||||
|
||||
/* calc_load related fields */
|
||||
unsigned long calc_load_update;
|
||||
long calc_load_active;
|
||||
|
@ -643,6 +647,7 @@ static inline struct task_group *task_group(struct task_struct *p)
|
|||
#endif /* CONFIG_CGROUP_SCHED */
|
||||
|
||||
static u64 irq_time_cpu(int cpu);
|
||||
static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time);
|
||||
|
||||
inline void update_rq_clock(struct rq *rq)
|
||||
{
|
||||
|
@ -654,6 +659,8 @@ inline void update_rq_clock(struct rq *rq)
|
|||
irq_time = irq_time_cpu(cpu);
|
||||
if (rq->clock - irq_time > rq->clock_task)
|
||||
rq->clock_task = rq->clock - irq_time;
|
||||
|
||||
sched_irq_time_avg_update(rq, irq_time);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1985,6 +1992,15 @@ void account_system_vtime(struct task_struct *curr)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time)
|
||||
{
|
||||
if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) {
|
||||
u64 delta_irq = curr_irq_time - rq->prev_irq_time;
|
||||
rq->prev_irq_time = curr_irq_time;
|
||||
sched_rt_avg_update(rq, delta_irq);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static u64 irq_time_cpu(int cpu)
|
||||
|
@ -1992,6 +2008,8 @@ static u64 irq_time_cpu(int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { }
|
||||
|
||||
#endif
|
||||
|
||||
#include "sched_idletask.c"
|
||||
|
|
|
@ -2275,7 +2275,13 @@ unsigned long scale_rt_power(int cpu)
|
|||
u64 total, available;
|
||||
|
||||
total = sched_avg_period() + (rq->clock - rq->age_stamp);
|
||||
available = total - rq->rt_avg;
|
||||
|
||||
if (unlikely(total < rq->rt_avg)) {
|
||||
/* Ensures that power won't end up being negative */
|
||||
available = 0;
|
||||
} else {
|
||||
available = total - rq->rt_avg;
|
||||
}
|
||||
|
||||
if (unlikely((s64)total < SCHED_LOAD_SCALE))
|
||||
total = SCHED_LOAD_SCALE;
|
||||
|
|
|
@ -61,3 +61,8 @@ SCHED_FEAT(ASYM_EFF_LOAD, 1)
|
|||
* release the lock. Decreases scheduling overhead.
|
||||
*/
|
||||
SCHED_FEAT(OWNER_SPIN, 1)
|
||||
|
||||
/*
|
||||
* Decrement CPU power based on irq activity
|
||||
*/
|
||||
SCHED_FEAT(NONIRQ_POWER, 1)
|
||||
|
|
Loading…
Reference in a new issue