sched/stat: Simplify the sched_info accounting dependency
Both CONFIG_SCHEDSTATS=y and CONFIG_TASK_DELAY_ACCT=y track task sched_info, which results in ugly #if clauses. Simplify the code by introducing a synthethic CONFIG_SCHED_INFO switch, selected by both. Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: a.p.zijlstra@chello.nl Cc: ricklind@us.ibm.com Link: http://lkml.kernel.org/r/8d19eef800811a94b0f91bcbeb27430a884d7433.1435255405.git.naveen.n.rao@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
407a2c7205
commit
f6db834799
5 changed files with 12 additions and 6 deletions
|
@ -849,7 +849,7 @@ extern struct user_struct root_user;
|
|||
struct backing_dev_info;
|
||||
struct reclaim_state;
|
||||
|
||||
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
||||
#ifdef CONFIG_SCHED_INFO
|
||||
struct sched_info {
|
||||
/* cumulative counters */
|
||||
unsigned long pcount; /* # of times run on this cpu */
|
||||
|
@ -859,7 +859,7 @@ struct sched_info {
|
|||
unsigned long long last_arrival,/* when we last ran on a cpu */
|
||||
last_queued; /* when we were last queued to run */
|
||||
};
|
||||
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
|
||||
#endif /* CONFIG_SCHED_INFO */
|
||||
|
||||
#ifdef CONFIG_TASK_DELAY_ACCT
|
||||
struct task_delay_info {
|
||||
|
@ -1408,7 +1408,7 @@ struct task_struct {
|
|||
int rcu_tasks_idle_cpu;
|
||||
#endif /* #ifdef CONFIG_TASKS_RCU */
|
||||
|
||||
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
||||
#ifdef CONFIG_SCHED_INFO
|
||||
struct sched_info sched_info;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -435,6 +435,7 @@ config TASKSTATS
|
|||
config TASK_DELAY_ACCT
|
||||
bool "Enable per-task delay accounting"
|
||||
depends on TASKSTATS
|
||||
select SCHED_INFO
|
||||
help
|
||||
Collect information on time spent by a task waiting for system
|
||||
resources like cpu, synchronous block I/O completion and swapping
|
||||
|
|
|
@ -1975,7 +1975,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
|||
set_task_cpu(p, cpu);
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
|
||||
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
||||
#ifdef CONFIG_SCHED_INFO
|
||||
if (likely(sched_info_on()))
|
||||
memset(&p->sched_info, 0, sizeof(p->sched_info));
|
||||
#endif
|
||||
|
|
|
@ -47,7 +47,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
|
|||
# define schedstat_set(var, val) do { } while (0)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
||||
#ifdef CONFIG_SCHED_INFO
|
||||
static inline void sched_info_reset_dequeued(struct task_struct *t)
|
||||
{
|
||||
t->sched_info.last_queued = 0;
|
||||
|
@ -156,7 +156,7 @@ sched_info_switch(struct rq *rq,
|
|||
#define sched_info_depart(rq, t) do { } while (0)
|
||||
#define sched_info_arrive(rq, next) do { } while (0)
|
||||
#define sched_info_switch(rq, t, next) do { } while (0)
|
||||
#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
|
||||
#endif /* CONFIG_SCHED_INFO */
|
||||
|
||||
/*
|
||||
* The following are functions that support scheduler-internal time accounting.
|
||||
|
|
|
@ -841,9 +841,14 @@ config SCHED_DEBUG
|
|||
that can help debug the scheduler. The runtime overhead of this
|
||||
option is minimal.
|
||||
|
||||
config SCHED_INFO
|
||||
bool
|
||||
default n
|
||||
|
||||
config SCHEDSTATS
|
||||
bool "Collect scheduler statistics"
|
||||
depends on DEBUG_KERNEL && PROC_FS
|
||||
select SCHED_INFO
|
||||
help
|
||||
If you say Y here, additional code will be inserted into the
|
||||
scheduler and related routines to collect statistics about
|
||||
|
|
Loading…
Reference in a new issue