2007-07-09 10:52:00 -06:00
|
|
|
/*
|
2011-11-15 09:14:39 -07:00
|
|
|
* kernel/sched/debug.c
|
2007-07-09 10:52:00 -06:00
|
|
|
*
|
|
|
|
* Print the CFS rbtree
|
|
|
|
*
|
|
|
|
* Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/kallsyms.h>
|
|
|
|
#include <linux/utsname.h>
|
|
|
|
|
2011-10-25 02:00:11 -06:00
|
|
|
#include "sched.h"
|
|
|
|
|
2011-01-11 03:11:54 -07:00
|
|
|
static DEFINE_SPINLOCK(sched_debug_lock);
|
|
|
|
|
2007-07-09 10:52:00 -06:00
|
|
|
/*
|
|
|
|
* This allows printing both to /proc/sched_debug and
|
|
|
|
* to the console
|
|
|
|
*/
|
|
|
|
#define SEQ_printf(m, x...) \
|
|
|
|
do { \
|
|
|
|
if (m) \
|
|
|
|
seq_printf(m, x); \
|
|
|
|
else \
|
|
|
|
printk(x); \
|
|
|
|
} while (0)
|
|
|
|
|
2007-10-15 09:00:08 -06:00
|
|
|
/*
|
|
|
|
* Ease the printing of nsec fields:
|
|
|
|
*/
|
2007-12-30 09:24:35 -07:00
|
|
|
static long long nsec_high(unsigned long long nsec)
|
2007-10-15 09:00:08 -06:00
|
|
|
{
|
2007-12-30 09:24:35 -07:00
|
|
|
if ((long long)nsec < 0) {
|
2007-10-15 09:00:08 -06:00
|
|
|
nsec = -nsec;
|
|
|
|
do_div(nsec, 1000000);
|
|
|
|
return -nsec;
|
|
|
|
}
|
|
|
|
do_div(nsec, 1000000);
|
|
|
|
|
|
|
|
return nsec;
|
|
|
|
}
|
|
|
|
|
2007-12-30 09:24:35 -07:00
|
|
|
static unsigned long nsec_low(unsigned long long nsec)
|
2007-10-15 09:00:08 -06:00
|
|
|
{
|
2007-12-30 09:24:35 -07:00
|
|
|
if ((long long)nsec < 0)
|
2007-10-15 09:00:08 -06:00
|
|
|
nsec = -nsec;
|
|
|
|
|
|
|
|
return do_div(nsec, 1000000);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
|
|
|
|
|
2008-11-10 09:04:09 -07:00
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
sched: Add 'autogroup' scheduling feature: automated per session task groups
A recurring complaint from CFS users is that parallel kbuild has
a negative impact on desktop interactivity. This patch
implements an idea from Linus, to automatically create task
groups. Currently, only per session autogroups are implemented,
but the patch leaves the way open for enhancement.
Implementation: each task's signal struct contains an inherited
pointer to a refcounted autogroup struct containing a task group
pointer, the default for all tasks pointing to the
init_task_group. When a task calls setsid(), a new task group
is created, the process is moved into the new task group, and a
reference to the preveious task group is dropped. Child
processes inherit this task group thereafter, and increase it's
refcount. When the last thread of a process exits, the
process's reference is dropped, such that when the last process
referencing an autogroup exits, the autogroup is destroyed.
At runqueue selection time, IFF a task has no cgroup assignment,
its current autogroup is used.
Autogroup bandwidth is controllable via setting it's nice level
through the proc filesystem:
cat /proc/<pid>/autogroup
Displays the task's group and the group's nice level.
echo <nice level> > /proc/<pid>/autogroup
Sets the task group's shares to the weight of nice <level> task.
Setting nice level is rate limited for !admin users due to the
abuse risk of task group locking.
The feature is enabled from boot by default if
CONFIG_SCHED_AUTOGROUP=y is selected, but can be disabled via
the boot option noautogroup, and can also be turned on/off on
the fly via:
echo [01] > /proc/sys/kernel/sched_autogroup_enabled
... which will automatically move tasks to/from the root task group.
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Markus Trippelsdorf <markus@trippelsdorf.de>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Paul Turner <pjt@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
[ Removed the task_group_path() debug code, and fixed !EVENTFD build failure. ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
LKML-Reference: <1290281700.28711.9.camel@maggy.simson.net>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-11-30 06:18:03 -07:00
|
|
|
static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
|
2008-11-10 09:04:09 -07:00
|
|
|
{
|
|
|
|
struct sched_entity *se = tg->se[cpu];
|
|
|
|
|
|
|
|
#define P(F) \
|
|
|
|
SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
|
|
|
|
#define PN(F) \
|
|
|
|
SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
|
|
|
|
|
2012-10-04 04:51:20 -06:00
|
|
|
if (!se) {
|
|
|
|
struct sched_avg *avg = &cpu_rq(cpu)->avg;
|
|
|
|
P(avg->runnable_avg_sum);
|
|
|
|
P(avg->runnable_avg_period);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-10 09:04:09 -07:00
|
|
|
PN(se->exec_start);
|
|
|
|
PN(se->vruntime);
|
|
|
|
PN(se->sum_exec_runtime);
|
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
2010-03-10 19:37:45 -07:00
|
|
|
PN(se->statistics.wait_start);
|
|
|
|
PN(se->statistics.sleep_start);
|
|
|
|
PN(se->statistics.block_start);
|
|
|
|
PN(se->statistics.sleep_max);
|
|
|
|
PN(se->statistics.block_max);
|
|
|
|
PN(se->statistics.exec_max);
|
|
|
|
PN(se->statistics.slice_max);
|
|
|
|
PN(se->statistics.wait_max);
|
|
|
|
PN(se->statistics.wait_sum);
|
|
|
|
P(se->statistics.wait_count);
|
2008-11-10 09:04:09 -07:00
|
|
|
#endif
|
|
|
|
P(se->load.weight);
|
2012-10-04 05:18:29 -06:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
P(se->avg.runnable_avg_sum);
|
|
|
|
P(se->avg.runnable_avg_period);
|
2012-10-04 05:18:30 -06:00
|
|
|
P(se->avg.load_avg_contrib);
|
2012-10-04 05:18:30 -06:00
|
|
|
P(se->avg.decay_count);
|
2012-10-04 05:18:29 -06:00
|
|
|
#endif
|
2008-11-10 09:04:09 -07:00
|
|
|
#undef PN
|
|
|
|
#undef P
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-01-11 03:11:54 -07:00
|
|
|
#ifdef CONFIG_CGROUP_SCHED
|
|
|
|
static char group_path[PATH_MAX];
|
|
|
|
|
|
|
|
static char *task_group_path(struct task_group *tg)
|
|
|
|
{
|
2011-01-11 03:12:57 -07:00
|
|
|
if (autogroup_path(tg, group_path, PATH_MAX))
|
|
|
|
return group_path;
|
|
|
|
|
2011-01-11 03:11:54 -07:00
|
|
|
/*
|
|
|
|
* May be NULL if the underlying cgroup isn't fully-created yet
|
|
|
|
*/
|
|
|
|
if (!tg->css.cgroup) {
|
|
|
|
group_path[0] = '\0';
|
|
|
|
return group_path;
|
|
|
|
}
|
|
|
|
cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
|
|
|
|
return group_path;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-07-09 10:52:00 -06:00
|
|
|
static void
|
2007-08-09 03:16:51 -06:00
|
|
|
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
|
2007-07-09 10:52:00 -06:00
|
|
|
{
|
|
|
|
if (rq->curr == p)
|
|
|
|
SEQ_printf(m, "R");
|
|
|
|
else
|
|
|
|
SEQ_printf(m, " ");
|
|
|
|
|
2007-10-15 09:00:08 -06:00
|
|
|
SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
|
2007-07-09 10:52:00 -06:00
|
|
|
p->comm, p->pid,
|
2007-10-15 09:00:08 -06:00
|
|
|
SPLIT_NS(p->se.vruntime),
|
2007-07-09 10:52:00 -06:00
|
|
|
(long long)(p->nvcsw + p->nivcsw),
|
2007-08-05 21:26:59 -06:00
|
|
|
p->prio);
|
2007-08-02 09:41:40 -06:00
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
2008-04-19 11:45:00 -06:00
|
|
|
SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
|
2007-10-15 09:00:08 -06:00
|
|
|
SPLIT_NS(p->se.vruntime),
|
|
|
|
SPLIT_NS(p->se.sum_exec_runtime),
|
2010-03-10 19:37:45 -07:00
|
|
|
SPLIT_NS(p->se.statistics.sum_sleep_runtime));
|
2007-08-02 09:41:40 -06:00
|
|
|
#else
|
2008-04-19 11:45:00 -06:00
|
|
|
SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
|
2007-10-15 09:00:08 -06:00
|
|
|
0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
|
2007-08-02 09:41:40 -06:00
|
|
|
#endif
|
2011-01-11 03:11:54 -07:00
|
|
|
#ifdef CONFIG_CGROUP_SCHED
|
|
|
|
SEQ_printf(m, " %s", task_group_path(task_group(p)));
|
|
|
|
#endif
|
2008-04-19 11:45:00 -06:00
|
|
|
|
|
|
|
SEQ_printf(m, "\n");
|
2007-07-09 10:52:00 -06:00
|
|
|
}
|
|
|
|
|
2007-08-09 03:16:51 -06:00
|
|
|
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
|
2007-07-09 10:52:00 -06:00
|
|
|
{
|
|
|
|
struct task_struct *g, *p;
|
2007-10-25 06:02:45 -06:00
|
|
|
unsigned long flags;
|
2007-07-09 10:52:00 -06:00
|
|
|
|
|
|
|
SEQ_printf(m,
|
|
|
|
"\nrunnable tasks:\n"
|
2007-10-15 09:00:08 -06:00
|
|
|
" task PID tree-key switches prio"
|
|
|
|
" exec-runtime sum-exec sum-sleep\n"
|
2007-10-15 09:00:08 -06:00
|
|
|
"------------------------------------------------------"
|
2007-10-15 09:00:08 -06:00
|
|
|
"----------------------------------------------------\n");
|
2007-07-09 10:52:00 -06:00
|
|
|
|
2007-10-25 06:02:45 -06:00
|
|
|
read_lock_irqsave(&tasklist_lock, flags);
|
2007-07-09 10:52:00 -06:00
|
|
|
|
|
|
|
do_each_thread(g, p) {
|
2011-04-05 09:23:44 -06:00
|
|
|
if (!p->on_rq || task_cpu(p) != rq_cpu)
|
2007-07-09 10:52:00 -06:00
|
|
|
continue;
|
|
|
|
|
2007-08-09 03:16:51 -06:00
|
|
|
print_task(m, rq, p);
|
2007-07-09 10:52:00 -06:00
|
|
|
} while_each_thread(g, p);
|
|
|
|
|
2007-10-25 06:02:45 -06:00
|
|
|
read_unlock_irqrestore(&tasklist_lock, flags);
|
2007-07-09 10:52:00 -06:00
|
|
|
}
|
|
|
|
|
2007-08-09 03:16:47 -06:00
|
|
|
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
2007-07-09 10:52:00 -06:00
|
|
|
{
|
2007-10-15 09:00:06 -06:00
|
|
|
s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
|
|
|
|
spread, rq0_min_vruntime, spread0;
|
2009-06-17 07:20:55 -06:00
|
|
|
struct rq *rq = cpu_rq(cpu);
|
2007-10-15 09:00:05 -06:00
|
|
|
struct sched_entity *last;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2011-01-11 03:11:54 -07:00
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
|
SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
|
|
|
|
#else
|
2008-06-19 06:22:24 -06:00
|
|
|
SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
|
2011-01-11 03:11:54 -07:00
|
|
|
#endif
|
2007-10-15 09:00:08 -06:00
|
|
|
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
|
|
|
|
SPLIT_NS(cfs_rq->exec_clock));
|
2007-10-15 09:00:05 -06:00
|
|
|
|
2009-11-17 06:28:38 -07:00
|
|
|
raw_spin_lock_irqsave(&rq->lock, flags);
|
2007-10-15 09:00:05 -06:00
|
|
|
if (cfs_rq->rb_leftmost)
|
2011-02-01 07:51:03 -07:00
|
|
|
MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
|
2007-10-15 09:00:05 -06:00
|
|
|
last = __pick_last_entity(cfs_rq);
|
|
|
|
if (last)
|
|
|
|
max_vruntime = last->vruntime;
|
2008-11-10 02:46:32 -07:00
|
|
|
min_vruntime = cfs_rq->min_vruntime;
|
2009-06-17 07:20:55 -06:00
|
|
|
rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
|
2009-11-17 06:28:38 -07:00
|
|
|
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
2007-10-15 09:00:08 -06:00
|
|
|
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
|
|
|
|
SPLIT_NS(MIN_vruntime));
|
|
|
|
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
|
|
|
|
SPLIT_NS(min_vruntime));
|
|
|
|
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
|
|
|
|
SPLIT_NS(max_vruntime));
|
2007-10-15 09:00:05 -06:00
|
|
|
spread = max_vruntime - MIN_vruntime;
|
2007-10-15 09:00:08 -06:00
|
|
|
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
|
|
|
|
SPLIT_NS(spread));
|
2007-10-15 09:00:06 -06:00
|
|
|
spread0 = min_vruntime - rq0_min_vruntime;
|
2007-10-15 09:00:08 -06:00
|
|
|
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
|
|
|
|
SPLIT_NS(spread0));
|
2008-11-10 02:46:32 -07:00
|
|
|
SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
|
2007-10-15 09:00:10 -06:00
|
|
|
cfs_rq->nr_spread_over);
|
2012-04-26 05:12:27 -06:00
|
|
|
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
|
2010-11-15 16:47:00 -07:00
|
|
|
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
|
2008-06-27 05:41:14 -06:00
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
|
#ifdef CONFIG_SMP
|
2010-11-15 16:47:00 -07:00
|
|
|
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_avg",
|
|
|
|
SPLIT_NS(cfs_rq->load_avg));
|
|
|
|
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_period",
|
|
|
|
SPLIT_NS(cfs_rq->load_period));
|
|
|
|
SEQ_printf(m, " .%-30s: %ld\n", "load_contrib",
|
|
|
|
cfs_rq->load_contribution);
|
|
|
|
SEQ_printf(m, " .%-30s: %d\n", "load_tg",
|
sched: Add 'autogroup' scheduling feature: automated per session task groups
A recurring complaint from CFS users is that parallel kbuild has
a negative impact on desktop interactivity. This patch
implements an idea from Linus, to automatically create task
groups. Currently, only per session autogroups are implemented,
but the patch leaves the way open for enhancement.
Implementation: each task's signal struct contains an inherited
pointer to a refcounted autogroup struct containing a task group
pointer, the default for all tasks pointing to the
init_task_group. When a task calls setsid(), a new task group
is created, the process is moved into the new task group, and a
reference to the preveious task group is dropped. Child
processes inherit this task group thereafter, and increase it's
refcount. When the last thread of a process exits, the
process's reference is dropped, such that when the last process
referencing an autogroup exits, the autogroup is destroyed.
At runqueue selection time, IFF a task has no cgroup assignment,
its current autogroup is used.
Autogroup bandwidth is controllable via setting it's nice level
through the proc filesystem:
cat /proc/<pid>/autogroup
Displays the task's group and the group's nice level.
echo <nice level> > /proc/<pid>/autogroup
Sets the task group's shares to the weight of nice <level> task.
Setting nice level is rate limited for !admin users due to the
abuse risk of task group locking.
The feature is enabled from boot by default if
CONFIG_SCHED_AUTOGROUP=y is selected, but can be disabled via
the boot option noautogroup, and can also be turned on/off on
the fly via:
echo [01] > /proc/sys/kernel/sched_autogroup_enabled
... which will automatically move tasks to/from the root task group.
Signed-off-by: Mike Galbraith <efault@gmx.de>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Markus Trippelsdorf <markus@trippelsdorf.de>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Paul Turner <pjt@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
[ Removed the task_group_path() debug code, and fixed !EVENTFD build failure. ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
LKML-Reference: <1290281700.28711.9.camel@maggy.simson.net>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-11-30 06:18:03 -07:00
|
|
|
atomic_read(&cfs_rq->tg->load_weight));
|
2012-10-04 05:18:30 -06:00
|
|
|
SEQ_printf(m, " .%-30s: %lld\n", "runnable_load_avg",
|
|
|
|
cfs_rq->runnable_load_avg);
|
2012-10-04 05:18:30 -06:00
|
|
|
SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg",
|
|
|
|
cfs_rq->blocked_load_avg);
|
2008-06-27 05:41:14 -06:00
|
|
|
#endif
|
2010-11-15 16:47:00 -07:00
|
|
|
|
2008-11-10 09:04:09 -07:00
|
|
|
print_cfs_group_stats(m, cpu, cfs_rq->tg);
|
2008-06-27 05:41:14 -06:00
|
|
|
#endif
|
2007-07-09 10:52:00 -06:00
|
|
|
}
|
|
|
|
|
2008-06-19 06:22:24 -06:00
|
|
|
void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
|
|
|
|
{
|
2011-01-11 03:11:54 -07:00
|
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
|
SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
|
|
|
|
#else
|
2008-06-19 06:22:24 -06:00
|
|
|
SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
|
2011-01-11 03:11:54 -07:00
|
|
|
#endif
|
2008-06-19 06:22:24 -06:00
|
|
|
|
|
|
|
#define P(x) \
|
|
|
|
SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
|
|
|
|
#define PN(x) \
|
|
|
|
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
|
|
|
|
|
|
|
|
P(rt_nr_running);
|
|
|
|
P(rt_throttled);
|
|
|
|
PN(rt_time);
|
|
|
|
PN(rt_runtime);
|
|
|
|
|
|
|
|
#undef PN
|
|
|
|
#undef P
|
|
|
|
}
|
|
|
|
|
2010-11-19 13:11:09 -07:00
|
|
|
extern __read_mostly int sched_clock_running;
|
|
|
|
|
2007-08-09 03:16:51 -06:00
|
|
|
static void print_cpu(struct seq_file *m, int cpu)
|
2007-07-09 10:52:00 -06:00
|
|
|
{
|
2009-06-17 07:20:55 -06:00
|
|
|
struct rq *rq = cpu_rq(cpu);
|
2011-01-11 03:11:54 -07:00
|
|
|
unsigned long flags;
|
2007-07-09 10:52:00 -06:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86
|
|
|
|
{
|
|
|
|
unsigned int freq = cpu_khz ? : 1;
|
|
|
|
|
|
|
|
SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
|
|
|
|
cpu, freq / 1000, (freq % 1000));
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
SEQ_printf(m, "\ncpu#%d\n", cpu);
|
|
|
|
#endif
|
|
|
|
|
2012-05-14 06:34:00 -06:00
|
|
|
#define P(x) \
|
|
|
|
do { \
|
|
|
|
if (sizeof(rq->x) == 4) \
|
|
|
|
SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
|
|
|
|
else \
|
|
|
|
SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
|
|
|
|
} while (0)
|
|
|
|
|
2007-10-15 09:00:08 -06:00
|
|
|
#define PN(x) \
|
|
|
|
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
|
2007-07-09 10:52:00 -06:00
|
|
|
|
|
|
|
P(nr_running);
|
|
|
|
SEQ_printf(m, " .%-30s: %lu\n", "load",
|
2007-10-15 09:00:06 -06:00
|
|
|
rq->load.weight);
|
2007-07-09 10:52:00 -06:00
|
|
|
P(nr_switches);
|
|
|
|
P(nr_load_updates);
|
|
|
|
P(nr_uninterruptible);
|
2007-10-15 09:00:08 -06:00
|
|
|
PN(next_balance);
|
2007-07-09 10:52:00 -06:00
|
|
|
P(curr->pid);
|
2007-10-15 09:00:08 -06:00
|
|
|
PN(clock);
|
2007-07-09 10:52:00 -06:00
|
|
|
P(cpu_load[0]);
|
|
|
|
P(cpu_load[1]);
|
|
|
|
P(cpu_load[2]);
|
|
|
|
P(cpu_load[3]);
|
|
|
|
P(cpu_load[4]);
|
|
|
|
#undef P
|
2007-10-15 09:00:08 -06:00
|
|
|
#undef PN
|
2007-07-09 10:52:00 -06:00
|
|
|
|
2008-11-10 02:46:32 -07:00
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
|
|
#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
|
2009-11-04 09:53:50 -07:00
|
|
|
#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
|
2008-11-10 02:46:32 -07:00
|
|
|
|
|
|
|
P(yld_count);
|
|
|
|
|
|
|
|
P(sched_count);
|
|
|
|
P(sched_goidle);
|
2009-11-04 09:53:50 -07:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
P64(avg_idle);
|
|
|
|
#endif
|
2008-11-10 02:46:32 -07:00
|
|
|
|
|
|
|
P(ttwu_count);
|
|
|
|
P(ttwu_local);
|
|
|
|
|
|
|
|
#undef P
|
2011-01-14 00:57:39 -07:00
|
|
|
#undef P64
|
2008-11-10 02:46:32 -07:00
|
|
|
#endif
|
2011-01-11 03:11:54 -07:00
|
|
|
spin_lock_irqsave(&sched_debug_lock, flags);
|
2007-08-09 03:16:47 -06:00
|
|
|
print_cfs_stats(m, cpu);
|
2008-06-19 06:22:24 -06:00
|
|
|
print_rt_stats(m, cpu);
|
2007-07-09 10:52:00 -06:00
|
|
|
|
2011-01-11 03:11:54 -07:00
|
|
|
rcu_read_lock();
|
2007-08-09 03:16:51 -06:00
|
|
|
print_rq(m, rq, cpu);
|
2011-01-11 03:11:54 -07:00
|
|
|
rcu_read_unlock();
|
|
|
|
spin_unlock_irqrestore(&sched_debug_lock, flags);
|
2007-07-09 10:52:00 -06:00
|
|
|
}
|
|
|
|
|
2009-11-30 04:16:47 -07:00
|
|
|
static const char *sched_tunable_scaling_names[] = {
|
|
|
|
"none",
|
|
|
|
"logaritmic",
|
|
|
|
"linear"
|
|
|
|
};
|
|
|
|
|
2007-07-09 10:52:00 -06:00
|
|
|
static int sched_debug_show(struct seq_file *m, void *v)
|
|
|
|
{
|
2010-11-19 13:11:09 -07:00
|
|
|
u64 ktime, sched_clk, cpu_clk;
|
|
|
|
unsigned long flags;
|
2007-07-09 10:52:00 -06:00
|
|
|
int cpu;
|
|
|
|
|
2010-11-19 13:11:09 -07:00
|
|
|
local_irq_save(flags);
|
|
|
|
ktime = ktime_to_ns(ktime_get());
|
|
|
|
sched_clk = sched_clock();
|
|
|
|
cpu_clk = local_clock();
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
|
|
SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
|
2007-07-09 10:52:00 -06:00
|
|
|
init_utsname()->release,
|
|
|
|
(int)strcspn(init_utsname()->version, " "),
|
|
|
|
init_utsname()->version);
|
|
|
|
|
2010-11-19 13:11:09 -07:00
|
|
|
#define P(x) \
|
|
|
|
SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
|
|
|
|
#define PN(x) \
|
|
|
|
SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
|
|
|
|
PN(ktime);
|
|
|
|
PN(sched_clk);
|
|
|
|
PN(cpu_clk);
|
|
|
|
P(jiffies);
|
|
|
|
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
|
|
|
P(sched_clock_stable);
|
|
|
|
#endif
|
|
|
|
#undef PN
|
|
|
|
#undef P
|
|
|
|
|
|
|
|
SEQ_printf(m, "\n");
|
|
|
|
SEQ_printf(m, "sysctl_sched\n");
|
2007-07-09 10:52:00 -06:00
|
|
|
|
2007-10-15 09:00:10 -06:00
|
|
|
#define P(x) \
|
2007-10-15 09:00:10 -06:00
|
|
|
SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
|
2007-10-15 09:00:10 -06:00
|
|
|
#define PN(x) \
|
2007-10-15 09:00:10 -06:00
|
|
|
SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
|
2007-10-15 09:00:10 -06:00
|
|
|
PN(sysctl_sched_latency);
|
2007-11-09 14:39:37 -07:00
|
|
|
PN(sysctl_sched_min_granularity);
|
2007-10-15 09:00:10 -06:00
|
|
|
PN(sysctl_sched_wakeup_granularity);
|
2010-07-19 13:31:16 -06:00
|
|
|
P(sysctl_sched_child_runs_first);
|
2007-10-15 09:00:10 -06:00
|
|
|
P(sysctl_sched_features);
|
|
|
|
#undef PN
|
|
|
|
#undef P
|
|
|
|
|
2009-11-30 04:16:47 -07:00
|
|
|
SEQ_printf(m, " .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
|
|
|
|
sysctl_sched_tunable_scaling,
|
|
|
|
sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
|
|
|
|
|
2007-07-09 10:52:00 -06:00
|
|
|
for_each_online_cpu(cpu)
|
2007-08-09 03:16:51 -06:00
|
|
|
print_cpu(m, cpu);
|
2007-07-09 10:52:00 -06:00
|
|
|
|
|
|
|
SEQ_printf(m, "\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-10-25 02:00:11 -06:00
|
|
|
void sysrq_sched_debug_show(void)
|
2007-07-09 10:52:00 -06:00
|
|
|
{
|
|
|
|
sched_debug_show(NULL, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sched_debug_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, sched_debug_show, NULL);
|
|
|
|
}
|
|
|
|
|
2007-10-15 09:00:19 -06:00
|
|
|
static const struct file_operations sched_debug_fops = {
|
2007-07-09 10:52:00 -06:00
|
|
|
.open = sched_debug_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
2007-07-31 01:38:50 -06:00
|
|
|
.release = single_release,
|
2007-07-09 10:52:00 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
static int __init init_sched_debug_procfs(void)
|
|
|
|
{
|
|
|
|
struct proc_dir_entry *pe;
|
|
|
|
|
2008-10-30 01:23:34 -06:00
|
|
|
pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
|
2007-07-09 10:52:00 -06:00
|
|
|
if (!pe)
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
__initcall(init_sched_debug_procfs);
|
|
|
|
|
|
|
|
void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
|
|
|
|
{
|
2007-10-15 09:00:18 -06:00
|
|
|
unsigned long nr_switches;
|
2007-07-09 10:52:00 -06:00
|
|
|
|
2010-05-26 15:43:22 -06:00
|
|
|
SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
|
|
|
|
get_nr_threads(p));
|
2007-10-15 09:00:18 -06:00
|
|
|
SEQ_printf(m,
|
|
|
|
"---------------------------------------------------------\n");
|
2007-10-15 09:00:18 -06:00
|
|
|
#define __P(F) \
|
|
|
|
SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
|
2007-07-09 10:52:00 -06:00
|
|
|
#define P(F) \
|
2007-10-15 09:00:18 -06:00
|
|
|
SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
|
2007-10-15 09:00:18 -06:00
|
|
|
#define __PN(F) \
|
|
|
|
SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
|
2007-10-15 09:00:08 -06:00
|
|
|
#define PN(F) \
|
2007-10-15 09:00:18 -06:00
|
|
|
SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
|
2007-07-09 10:52:00 -06:00
|
|
|
|
2007-10-15 09:00:08 -06:00
|
|
|
PN(se.exec_start);
|
|
|
|
PN(se.vruntime);
|
|
|
|
PN(se.sum_exec_runtime);
|
2007-08-02 09:41:40 -06:00
|
|
|
|
2007-10-15 09:00:18 -06:00
|
|
|
nr_switches = p->nvcsw + p->nivcsw;
|
|
|
|
|
2007-08-02 09:41:40 -06:00
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
2010-03-10 19:37:45 -07:00
|
|
|
PN(se.statistics.wait_start);
|
|
|
|
PN(se.statistics.sleep_start);
|
|
|
|
PN(se.statistics.block_start);
|
|
|
|
PN(se.statistics.sleep_max);
|
|
|
|
PN(se.statistics.block_max);
|
|
|
|
PN(se.statistics.exec_max);
|
|
|
|
PN(se.statistics.slice_max);
|
|
|
|
PN(se.statistics.wait_max);
|
|
|
|
PN(se.statistics.wait_sum);
|
|
|
|
P(se.statistics.wait_count);
|
|
|
|
PN(se.statistics.iowait_sum);
|
|
|
|
P(se.statistics.iowait_count);
|
2007-10-15 09:00:18 -06:00
|
|
|
P(se.nr_migrations);
|
2010-03-10 19:37:45 -07:00
|
|
|
P(se.statistics.nr_migrations_cold);
|
|
|
|
P(se.statistics.nr_failed_migrations_affine);
|
|
|
|
P(se.statistics.nr_failed_migrations_running);
|
|
|
|
P(se.statistics.nr_failed_migrations_hot);
|
|
|
|
P(se.statistics.nr_forced_migrations);
|
|
|
|
P(se.statistics.nr_wakeups);
|
|
|
|
P(se.statistics.nr_wakeups_sync);
|
|
|
|
P(se.statistics.nr_wakeups_migrate);
|
|
|
|
P(se.statistics.nr_wakeups_local);
|
|
|
|
P(se.statistics.nr_wakeups_remote);
|
|
|
|
P(se.statistics.nr_wakeups_affine);
|
|
|
|
P(se.statistics.nr_wakeups_affine_attempts);
|
|
|
|
P(se.statistics.nr_wakeups_passive);
|
|
|
|
P(se.statistics.nr_wakeups_idle);
|
2007-10-15 09:00:18 -06:00
|
|
|
|
|
|
|
{
|
|
|
|
u64 avg_atom, avg_per_cpu;
|
|
|
|
|
|
|
|
avg_atom = p->se.sum_exec_runtime;
|
|
|
|
if (nr_switches)
|
|
|
|
do_div(avg_atom, nr_switches);
|
|
|
|
else
|
|
|
|
avg_atom = -1LL;
|
|
|
|
|
|
|
|
avg_per_cpu = p->se.sum_exec_runtime;
|
2007-11-28 07:52:56 -07:00
|
|
|
if (p->se.nr_migrations) {
|
2008-05-01 05:34:28 -06:00
|
|
|
avg_per_cpu = div64_u64(avg_per_cpu,
|
|
|
|
p->se.nr_migrations);
|
2007-11-28 07:52:56 -07:00
|
|
|
} else {
|
2007-10-15 09:00:18 -06:00
|
|
|
avg_per_cpu = -1LL;
|
2007-11-28 07:52:56 -07:00
|
|
|
}
|
2007-10-15 09:00:18 -06:00
|
|
|
|
|
|
|
__PN(avg_atom);
|
|
|
|
__PN(avg_per_cpu);
|
|
|
|
}
|
2007-08-02 09:41:40 -06:00
|
|
|
#endif
|
2007-10-15 09:00:18 -06:00
|
|
|
__P(nr_switches);
|
2007-10-15 09:00:18 -06:00
|
|
|
SEQ_printf(m, "%-35s:%21Ld\n",
|
2007-10-15 09:00:18 -06:00
|
|
|
"nr_voluntary_switches", (long long)p->nvcsw);
|
|
|
|
SEQ_printf(m, "%-35s:%21Ld\n",
|
|
|
|
"nr_involuntary_switches", (long long)p->nivcsw);
|
|
|
|
|
2007-07-09 10:52:00 -06:00
|
|
|
P(se.load.weight);
|
|
|
|
P(policy);
|
|
|
|
P(prio);
|
2007-10-15 09:00:08 -06:00
|
|
|
#undef PN
|
2007-10-15 09:00:18 -06:00
|
|
|
#undef __PN
|
|
|
|
#undef P
|
|
|
|
#undef __P
|
2007-07-09 10:52:00 -06:00
|
|
|
|
|
|
|
{
|
2008-11-16 00:07:15 -07:00
|
|
|
unsigned int this_cpu = raw_smp_processor_id();
|
2007-07-09 10:52:00 -06:00
|
|
|
u64 t0, t1;
|
|
|
|
|
2008-11-16 00:07:15 -07:00
|
|
|
t0 = cpu_clock(this_cpu);
|
|
|
|
t1 = cpu_clock(this_cpu);
|
2007-10-15 09:00:18 -06:00
|
|
|
SEQ_printf(m, "%-35s:%21Ld\n",
|
2007-07-09 10:52:00 -06:00
|
|
|
"clock-delta", (long long)(t1-t0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void proc_sched_set_task(struct task_struct *p)
|
|
|
|
{
|
2007-08-02 09:41:40 -06:00
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
2010-03-10 19:37:45 -07:00
|
|
|
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
|
2007-08-02 09:41:40 -06:00
|
|
|
#endif
|
2007-07-09 10:52:00 -06:00
|
|
|
}
|