2009-09-11 23:53:05 -06:00
|
|
|
/*
|
|
|
|
* builtin-timechart.c - make an svg timechart of system activity
|
|
|
|
*
|
|
|
|
* (C) Copyright 2009 Intel Corporation
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Arjan van de Ven <arjan@linux.intel.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; version 2
|
|
|
|
* of the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "builtin.h"
|
|
|
|
|
|
|
|
#include "util/util.h"
|
|
|
|
|
|
|
|
#include "util/color.h"
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include "util/cache.h"
|
|
|
|
#include <linux/rbtree.h>
|
|
|
|
#include "util/symbol.h"
|
|
|
|
#include "util/callchain.h"
|
|
|
|
#include "util/strlist.h"
|
|
|
|
|
|
|
|
#include "perf.h"
|
|
|
|
#include "util/header.h"
|
|
|
|
#include "util/parse-options.h"
|
|
|
|
#include "util/parse-events.h"
|
2009-11-30 23:05:16 -07:00
|
|
|
#include "util/event.h"
|
2009-12-13 14:50:25 -07:00
|
|
|
#include "util/session.h"
|
2009-09-11 23:53:05 -06:00
|
|
|
#include "util/svghelper.h"
|
|
|
|
|
2011-01-03 09:50:45 -07:00
|
|
|
#define SUPPORT_OLD_POWER_EVENTS 1
|
|
|
|
#define PWR_EVENT_EXIT -1
|
|
|
|
|
|
|
|
|
2009-09-11 23:53:05 -06:00
|
|
|
static char const *input_name = "perf.data";
|
|
|
|
static char const *output_name = "output.svg";
|
|
|
|
|
|
|
|
static unsigned int numcpus;
|
|
|
|
static u64 min_freq; /* Lowest CPU frequency seen */
|
|
|
|
static u64 max_freq; /* Highest CPU frequency seen */
|
|
|
|
static u64 turbo_frequency;
|
|
|
|
|
|
|
|
static u64 first_time, last_time;
|
|
|
|
|
2010-04-13 02:37:33 -06:00
|
|
|
static bool power_only;
|
2009-09-24 07:40:13 -06:00
|
|
|
|
2009-09-11 23:53:05 -06:00
|
|
|
|
|
|
|
struct per_pid;
|
|
|
|
struct per_pidcomm;
|
|
|
|
|
|
|
|
struct cpu_sample;
|
|
|
|
struct power_event;
|
|
|
|
struct wake_event;
|
|
|
|
|
|
|
|
struct sample_wrapper;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Datastructure layout:
|
|
|
|
* We keep an list of "pid"s, matching the kernels notion of a task struct.
|
|
|
|
* Each "pid" entry, has a list of "comm"s.
|
|
|
|
* this is because we want to track different programs different, while
|
|
|
|
* exec will reuse the original pid (by design).
|
|
|
|
* Each comm has a list of samples that will be used to draw
|
|
|
|
* final graph.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct per_pid {
|
|
|
|
struct per_pid *next;
|
|
|
|
|
|
|
|
int pid;
|
|
|
|
int ppid;
|
|
|
|
|
|
|
|
u64 start_time;
|
|
|
|
u64 end_time;
|
|
|
|
u64 total_time;
|
|
|
|
int display;
|
|
|
|
|
|
|
|
struct per_pidcomm *all;
|
|
|
|
struct per_pidcomm *current;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
struct per_pidcomm {
|
|
|
|
struct per_pidcomm *next;
|
|
|
|
|
|
|
|
u64 start_time;
|
|
|
|
u64 end_time;
|
|
|
|
u64 total_time;
|
|
|
|
|
|
|
|
int Y;
|
|
|
|
int display;
|
|
|
|
|
|
|
|
long state;
|
|
|
|
u64 state_since;
|
|
|
|
|
|
|
|
char *comm;
|
|
|
|
|
|
|
|
struct cpu_sample *samples;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct sample_wrapper {
|
|
|
|
struct sample_wrapper *next;
|
|
|
|
|
|
|
|
u64 timestamp;
|
|
|
|
unsigned char data[0];
|
|
|
|
};
|
|
|
|
|
|
|
|
#define TYPE_NONE 0
|
|
|
|
#define TYPE_RUNNING 1
|
|
|
|
#define TYPE_WAITING 2
|
|
|
|
#define TYPE_BLOCKED 3
|
|
|
|
|
|
|
|
struct cpu_sample {
|
|
|
|
struct cpu_sample *next;
|
|
|
|
|
|
|
|
u64 start_time;
|
|
|
|
u64 end_time;
|
|
|
|
int type;
|
|
|
|
int cpu;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct per_pid *all_data;
|
|
|
|
|
|
|
|
#define CSTATE 1
|
|
|
|
#define PSTATE 2
|
|
|
|
|
|
|
|
struct power_event {
|
|
|
|
struct power_event *next;
|
|
|
|
int type;
|
|
|
|
int state;
|
|
|
|
u64 start_time;
|
|
|
|
u64 end_time;
|
|
|
|
int cpu;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct wake_event {
|
|
|
|
struct wake_event *next;
|
|
|
|
int waker;
|
|
|
|
int wakee;
|
|
|
|
u64 time;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct power_event *power_events;
|
|
|
|
static struct wake_event *wake_events;
|
|
|
|
|
2009-10-19 16:09:39 -06:00
|
|
|
struct process_filter;
|
|
|
|
struct process_filter {
|
2009-11-30 23:05:16 -07:00
|
|
|
char *name;
|
|
|
|
int pid;
|
|
|
|
struct process_filter *next;
|
2009-10-19 16:09:39 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct process_filter *process_filter;
|
|
|
|
|
|
|
|
|
2009-09-11 23:53:05 -06:00
|
|
|
static struct per_pid *find_create_pid(int pid)
|
|
|
|
{
|
|
|
|
struct per_pid *cursor = all_data;
|
|
|
|
|
|
|
|
while (cursor) {
|
|
|
|
if (cursor->pid == pid)
|
|
|
|
return cursor;
|
|
|
|
cursor = cursor->next;
|
|
|
|
}
|
|
|
|
cursor = malloc(sizeof(struct per_pid));
|
|
|
|
assert(cursor != NULL);
|
|
|
|
memset(cursor, 0, sizeof(struct per_pid));
|
|
|
|
cursor->pid = pid;
|
|
|
|
cursor->next = all_data;
|
|
|
|
all_data = cursor;
|
|
|
|
return cursor;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pid_set_comm(int pid, char *comm)
|
|
|
|
{
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
|
|
|
p = find_create_pid(pid);
|
|
|
|
c = p->all;
|
|
|
|
while (c) {
|
|
|
|
if (c->comm && strcmp(c->comm, comm) == 0) {
|
|
|
|
p->current = c;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!c->comm) {
|
|
|
|
c->comm = strdup(comm);
|
|
|
|
p->current = c;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
c = c->next;
|
|
|
|
}
|
|
|
|
c = malloc(sizeof(struct per_pidcomm));
|
|
|
|
assert(c != NULL);
|
|
|
|
memset(c, 0, sizeof(struct per_pidcomm));
|
|
|
|
c->comm = strdup(comm);
|
|
|
|
p->current = c;
|
|
|
|
c->next = p->all;
|
|
|
|
p->all = c;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pid_fork(int pid, int ppid, u64 timestamp)
|
|
|
|
{
|
|
|
|
struct per_pid *p, *pp;
|
|
|
|
p = find_create_pid(pid);
|
|
|
|
pp = find_create_pid(ppid);
|
|
|
|
p->ppid = ppid;
|
|
|
|
if (pp->current && pp->current->comm && !p->current)
|
|
|
|
pid_set_comm(pid, pp->current->comm);
|
|
|
|
|
|
|
|
p->start_time = timestamp;
|
|
|
|
if (p->current) {
|
|
|
|
p->current->start_time = timestamp;
|
|
|
|
p->current->state_since = timestamp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pid_exit(int pid, u64 timestamp)
|
|
|
|
{
|
|
|
|
struct per_pid *p;
|
|
|
|
p = find_create_pid(pid);
|
|
|
|
p->end_time = timestamp;
|
|
|
|
if (p->current)
|
|
|
|
p->current->end_time = timestamp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
|
|
|
|
{
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
|
|
|
struct cpu_sample *sample;
|
|
|
|
|
|
|
|
p = find_create_pid(pid);
|
|
|
|
c = p->current;
|
|
|
|
if (!c) {
|
|
|
|
c = malloc(sizeof(struct per_pidcomm));
|
|
|
|
assert(c != NULL);
|
|
|
|
memset(c, 0, sizeof(struct per_pidcomm));
|
|
|
|
p->current = c;
|
|
|
|
c->next = p->all;
|
|
|
|
p->all = c;
|
|
|
|
}
|
|
|
|
|
|
|
|
sample = malloc(sizeof(struct cpu_sample));
|
|
|
|
assert(sample != NULL);
|
|
|
|
memset(sample, 0, sizeof(struct cpu_sample));
|
|
|
|
sample->start_time = start;
|
|
|
|
sample->end_time = end;
|
|
|
|
sample->type = type;
|
|
|
|
sample->next = c->samples;
|
|
|
|
sample->cpu = cpu;
|
|
|
|
c->samples = sample;
|
|
|
|
|
|
|
|
if (sample->type == TYPE_RUNNING && end > start && start > 0) {
|
|
|
|
c->total_time += (end-start);
|
|
|
|
p->total_time += (end-start);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c->start_time == 0 || c->start_time > start)
|
|
|
|
c->start_time = start;
|
|
|
|
if (p->start_time == 0 || p->start_time > start)
|
|
|
|
p->start_time = start;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MAX_CPUS 4096
|
|
|
|
|
|
|
|
static u64 cpus_cstate_start_times[MAX_CPUS];
|
|
|
|
static int cpus_cstate_state[MAX_CPUS];
|
|
|
|
static u64 cpus_pstate_start_times[MAX_CPUS];
|
|
|
|
static u64 cpus_pstate_state[MAX_CPUS];
|
|
|
|
|
2011-01-29 09:01:45 -07:00
|
|
|
static int process_comm_event(union perf_event *event,
|
|
|
|
struct perf_sample *sample __used,
|
perf session: Parse sample earlier
At perf_session__process_event, so that we reduce the number of lines in eache
tool sample processing routine that now receives a sample_data pointer already
parsed.
This will also be useful in the next patch, where we'll allow sample the
identity fields in MMAP, FORK, EXIT, etc, when it will be possible to see (cpu,
timestamp) just after before every event.
Also validate callchains in perf_session__process_event, i.e. as early as
possible, and keep a counter of the number of events discarded due to invalid
callchains, warning the user about it if it happens.
There is an assumption that was kept that all events have the same sample_type,
that will be dealt with in the future, when this preexisting limitation will be
removed.
Tested-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Ian Munsie <imunsie@au1.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <1291318772-30880-4-git-send-email-acme@infradead.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-12-02 09:10:21 -07:00
|
|
|
struct perf_session *session __used)
|
2009-09-11 23:53:05 -06:00
|
|
|
{
|
2010-01-16 13:53:19 -07:00
|
|
|
pid_set_comm(event->comm.tid, event->comm.comm);
|
2009-09-11 23:53:05 -06:00
|
|
|
return 0;
|
|
|
|
}
|
2009-12-13 14:50:24 -07:00
|
|
|
|
2011-01-29 09:01:45 -07:00
|
|
|
static int process_fork_event(union perf_event *event,
|
|
|
|
struct perf_sample *sample __used,
|
perf session: Parse sample earlier
At perf_session__process_event, so that we reduce the number of lines in eache
tool sample processing routine that now receives a sample_data pointer already
parsed.
This will also be useful in the next patch, where we'll allow sample the
identity fields in MMAP, FORK, EXIT, etc, when it will be possible to see (cpu,
timestamp) just after before every event.
Also validate callchains in perf_session__process_event, i.e. as early as
possible, and keep a counter of the number of events discarded due to invalid
callchains, warning the user about it if it happens.
There is an assumption that was kept that all events have the same sample_type,
that will be dealt with in the future, when this preexisting limitation will be
removed.
Tested-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Ian Munsie <imunsie@au1.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <1291318772-30880-4-git-send-email-acme@infradead.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-12-02 09:10:21 -07:00
|
|
|
struct perf_session *session __used)
|
2009-09-11 23:53:05 -06:00
|
|
|
{
|
|
|
|
pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-01-29 09:01:45 -07:00
|
|
|
static int process_exit_event(union perf_event *event,
|
|
|
|
struct perf_sample *sample __used,
|
perf session: Parse sample earlier
At perf_session__process_event, so that we reduce the number of lines in eache
tool sample processing routine that now receives a sample_data pointer already
parsed.
This will also be useful in the next patch, where we'll allow sample the
identity fields in MMAP, FORK, EXIT, etc, when it will be possible to see (cpu,
timestamp) just after before every event.
Also validate callchains in perf_session__process_event, i.e. as early as
possible, and keep a counter of the number of events discarded due to invalid
callchains, warning the user about it if it happens.
There is an assumption that was kept that all events have the same sample_type,
that will be dealt with in the future, when this preexisting limitation will be
removed.
Tested-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Ian Munsie <imunsie@au1.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <1291318772-30880-4-git-send-email-acme@infradead.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-12-02 09:10:21 -07:00
|
|
|
struct perf_session *session __used)
|
2009-09-11 23:53:05 -06:00
|
|
|
{
|
|
|
|
pid_exit(event->fork.pid, event->fork.time);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct trace_entry {
|
|
|
|
unsigned short type;
|
|
|
|
unsigned char flags;
|
|
|
|
unsigned char preempt_count;
|
|
|
|
int pid;
|
2009-12-06 04:07:29 -07:00
|
|
|
int lock_depth;
|
2009-09-11 23:53:05 -06:00
|
|
|
};
|
|
|
|
|
2011-01-03 09:50:45 -07:00
|
|
|
#ifdef SUPPORT_OLD_POWER_EVENTS
|
|
|
|
static int use_old_power_events;
|
|
|
|
struct power_entry_old {
|
2009-09-11 23:53:05 -06:00
|
|
|
struct trace_entry te;
|
2010-07-20 17:59:34 -06:00
|
|
|
u64 type;
|
|
|
|
u64 value;
|
|
|
|
u64 cpu_id;
|
2009-09-11 23:53:05 -06:00
|
|
|
};
|
2011-01-03 09:50:45 -07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
struct power_processor_entry {
|
|
|
|
struct trace_entry te;
|
|
|
|
u32 state;
|
|
|
|
u32 cpu_id;
|
|
|
|
};
|
2009-09-11 23:53:05 -06:00
|
|
|
|
|
|
|
#define TASK_COMM_LEN 16
|
|
|
|
struct wakeup_entry {
|
|
|
|
struct trace_entry te;
|
|
|
|
char comm[TASK_COMM_LEN];
|
|
|
|
int pid;
|
|
|
|
int prio;
|
|
|
|
int success;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* trace_flag_type is an enumeration that holds different
|
|
|
|
* states when a trace occurs. These are:
|
|
|
|
* IRQS_OFF - interrupts were disabled
|
|
|
|
* IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
|
|
|
|
* NEED_RESCED - reschedule is requested
|
|
|
|
* HARDIRQ - inside an interrupt handler
|
|
|
|
* SOFTIRQ - inside a softirq handler
|
|
|
|
*/
|
|
|
|
enum trace_flag_type {
|
|
|
|
TRACE_FLAG_IRQS_OFF = 0x01,
|
|
|
|
TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
|
|
|
|
TRACE_FLAG_NEED_RESCHED = 0x04,
|
|
|
|
TRACE_FLAG_HARDIRQ = 0x08,
|
|
|
|
TRACE_FLAG_SOFTIRQ = 0x10,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
struct sched_switch {
|
|
|
|
struct trace_entry te;
|
|
|
|
char prev_comm[TASK_COMM_LEN];
|
|
|
|
int prev_pid;
|
|
|
|
int prev_prio;
|
|
|
|
long prev_state; /* Arjan weeps. */
|
|
|
|
char next_comm[TASK_COMM_LEN];
|
|
|
|
int next_pid;
|
|
|
|
int next_prio;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void c_state_start(int cpu, u64 timestamp, int state)
|
|
|
|
{
|
|
|
|
cpus_cstate_start_times[cpu] = timestamp;
|
|
|
|
cpus_cstate_state[cpu] = state;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void c_state_end(int cpu, u64 timestamp)
|
|
|
|
{
|
|
|
|
struct power_event *pwr;
|
|
|
|
pwr = malloc(sizeof(struct power_event));
|
|
|
|
if (!pwr)
|
|
|
|
return;
|
|
|
|
memset(pwr, 0, sizeof(struct power_event));
|
|
|
|
|
|
|
|
pwr->state = cpus_cstate_state[cpu];
|
|
|
|
pwr->start_time = cpus_cstate_start_times[cpu];
|
|
|
|
pwr->end_time = timestamp;
|
|
|
|
pwr->cpu = cpu;
|
|
|
|
pwr->type = CSTATE;
|
|
|
|
pwr->next = power_events;
|
|
|
|
|
|
|
|
power_events = pwr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
|
|
|
|
{
|
|
|
|
struct power_event *pwr;
|
|
|
|
pwr = malloc(sizeof(struct power_event));
|
|
|
|
|
|
|
|
if (new_freq > 8000000) /* detect invalid data */
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!pwr)
|
|
|
|
return;
|
|
|
|
memset(pwr, 0, sizeof(struct power_event));
|
|
|
|
|
|
|
|
pwr->state = cpus_pstate_state[cpu];
|
|
|
|
pwr->start_time = cpus_pstate_start_times[cpu];
|
|
|
|
pwr->end_time = timestamp;
|
|
|
|
pwr->cpu = cpu;
|
|
|
|
pwr->type = PSTATE;
|
|
|
|
pwr->next = power_events;
|
|
|
|
|
|
|
|
if (!pwr->start_time)
|
|
|
|
pwr->start_time = first_time;
|
|
|
|
|
|
|
|
power_events = pwr;
|
|
|
|
|
|
|
|
cpus_pstate_state[cpu] = new_freq;
|
|
|
|
cpus_pstate_start_times[cpu] = timestamp;
|
|
|
|
|
|
|
|
if ((u64)new_freq > max_freq)
|
|
|
|
max_freq = new_freq;
|
|
|
|
|
|
|
|
if (new_freq < min_freq || min_freq == 0)
|
|
|
|
min_freq = new_freq;
|
|
|
|
|
|
|
|
if (new_freq == max_freq - 1000)
|
|
|
|
turbo_frequency = max_freq;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
|
|
|
|
{
|
|
|
|
struct wake_event *we;
|
|
|
|
struct per_pid *p;
|
|
|
|
struct wakeup_entry *wake = (void *)te;
|
|
|
|
|
|
|
|
we = malloc(sizeof(struct wake_event));
|
|
|
|
if (!we)
|
|
|
|
return;
|
|
|
|
|
|
|
|
memset(we, 0, sizeof(struct wake_event));
|
|
|
|
we->time = timestamp;
|
|
|
|
we->waker = pid;
|
|
|
|
|
|
|
|
if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ))
|
|
|
|
we->waker = -1;
|
|
|
|
|
|
|
|
we->wakee = wake->pid;
|
|
|
|
we->next = wake_events;
|
|
|
|
wake_events = we;
|
|
|
|
p = find_create_pid(we->wakee);
|
|
|
|
|
|
|
|
if (p && p->current && p->current->state == TYPE_NONE) {
|
|
|
|
p->current->state_since = timestamp;
|
|
|
|
p->current->state = TYPE_WAITING;
|
|
|
|
}
|
|
|
|
if (p && p->current && p->current->state == TYPE_BLOCKED) {
|
|
|
|
pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp);
|
|
|
|
p->current->state_since = timestamp;
|
|
|
|
p->current->state = TYPE_WAITING;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
|
|
|
|
{
|
|
|
|
struct per_pid *p = NULL, *prev_p;
|
|
|
|
struct sched_switch *sw = (void *)te;
|
|
|
|
|
|
|
|
|
|
|
|
prev_p = find_create_pid(sw->prev_pid);
|
|
|
|
|
|
|
|
p = find_create_pid(sw->next_pid);
|
|
|
|
|
|
|
|
if (prev_p->current && prev_p->current->state != TYPE_NONE)
|
|
|
|
pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp);
|
|
|
|
if (p && p->current) {
|
|
|
|
if (p->current->state != TYPE_NONE)
|
|
|
|
pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp);
|
|
|
|
|
2010-08-05 14:27:51 -06:00
|
|
|
p->current->state_since = timestamp;
|
|
|
|
p->current->state = TYPE_RUNNING;
|
2009-09-11 23:53:05 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
if (prev_p->current) {
|
|
|
|
prev_p->current->state = TYPE_NONE;
|
|
|
|
prev_p->current->state_since = timestamp;
|
|
|
|
if (sw->prev_state & 2)
|
|
|
|
prev_p->current->state = TYPE_BLOCKED;
|
|
|
|
if (sw->prev_state == 0)
|
|
|
|
prev_p->current->state = TYPE_WAITING;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-29 09:01:45 -07:00
|
|
|
static int process_sample_event(union perf_event *event __used,
|
2011-01-29 08:02:00 -07:00
|
|
|
struct perf_sample *sample,
|
2011-03-15 12:44:01 -06:00
|
|
|
struct perf_evsel *evsel __used,
|
perf session: Parse sample earlier
At perf_session__process_event, so that we reduce the number of lines in eache
tool sample processing routine that now receives a sample_data pointer already
parsed.
This will also be useful in the next patch, where we'll allow sample the
identity fields in MMAP, FORK, EXIT, etc, when it will be possible to see (cpu,
timestamp) just after before every event.
Also validate callchains in perf_session__process_event, i.e. as early as
possible, and keep a counter of the number of events discarded due to invalid
callchains, warning the user about it if it happens.
There is an assumption that was kept that all events have the same sample_type,
that will be dealt with in the future, when this preexisting limitation will be
removed.
Tested-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Ian Munsie <imunsie@au1.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <1291318772-30880-4-git-send-email-acme@infradead.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-12-02 09:10:21 -07:00
|
|
|
struct perf_session *session)
|
2009-09-11 23:53:05 -06:00
|
|
|
{
|
|
|
|
struct trace_entry *te;
|
|
|
|
|
2009-12-14 09:23:00 -07:00
|
|
|
if (session->sample_type & PERF_SAMPLE_TIME) {
|
perf session: Parse sample earlier
At perf_session__process_event, so that we reduce the number of lines in eache
tool sample processing routine that now receives a sample_data pointer already
parsed.
This will also be useful in the next patch, where we'll allow sample the
identity fields in MMAP, FORK, EXIT, etc, when it will be possible to see (cpu,
timestamp) just after before every event.
Also validate callchains in perf_session__process_event, i.e. as early as
possible, and keep a counter of the number of events discarded due to invalid
callchains, warning the user about it if it happens.
There is an assumption that was kept that all events have the same sample_type,
that will be dealt with in the future, when this preexisting limitation will be
removed.
Tested-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Ian Munsie <imunsie@au1.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <1291318772-30880-4-git-send-email-acme@infradead.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-12-02 09:10:21 -07:00
|
|
|
if (!first_time || first_time > sample->time)
|
|
|
|
first_time = sample->time;
|
|
|
|
if (last_time < sample->time)
|
|
|
|
last_time = sample->time;
|
2009-09-11 23:53:05 -06:00
|
|
|
}
|
2009-12-06 04:08:24 -07:00
|
|
|
|
perf session: Parse sample earlier
At perf_session__process_event, so that we reduce the number of lines in eache
tool sample processing routine that now receives a sample_data pointer already
parsed.
This will also be useful in the next patch, where we'll allow sample the
identity fields in MMAP, FORK, EXIT, etc, when it will be possible to see (cpu,
timestamp) just after before every event.
Also validate callchains in perf_session__process_event, i.e. as early as
possible, and keep a counter of the number of events discarded due to invalid
callchains, warning the user about it if it happens.
There is an assumption that was kept that all events have the same sample_type,
that will be dealt with in the future, when this preexisting limitation will be
removed.
Tested-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Ian Munsie <imunsie@au1.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <1291318772-30880-4-git-send-email-acme@infradead.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-12-02 09:10:21 -07:00
|
|
|
te = (void *)sample->raw_data;
|
|
|
|
if (session->sample_type & PERF_SAMPLE_RAW && sample->raw_size > 0) {
|
2009-09-11 23:53:05 -06:00
|
|
|
char *event_str;
|
2011-01-03 09:50:45 -07:00
|
|
|
#ifdef SUPPORT_OLD_POWER_EVENTS
|
|
|
|
struct power_entry_old *peo;
|
|
|
|
peo = (void *)te;
|
|
|
|
#endif
|
2011-03-15 12:44:01 -06:00
|
|
|
/*
|
|
|
|
* FIXME: use evsel, its already mapped from id to perf_evsel,
|
|
|
|
* remove perf_header__find_event infrastructure bits.
|
|
|
|
* Mapping all these "power:cpu_idle" strings to the tracepoint
|
|
|
|
* ID and then just comparing against evsel->attr.config.
|
|
|
|
*
|
|
|
|
* e.g.:
|
|
|
|
*
|
|
|
|
* if (evsel->attr.config == power_cpu_idle_id)
|
|
|
|
*/
|
2009-09-11 23:53:05 -06:00
|
|
|
event_str = perf_header__find_event(te->type);
|
|
|
|
|
|
|
|
if (!event_str)
|
|
|
|
return 0;
|
|
|
|
|
2011-02-27 14:36:46 -07:00
|
|
|
if (sample->cpu > numcpus)
|
|
|
|
numcpus = sample->cpu;
|
|
|
|
|
2011-01-03 09:50:45 -07:00
|
|
|
if (strcmp(event_str, "power:cpu_idle") == 0) {
|
|
|
|
struct power_processor_entry *ppe = (void *)te;
|
|
|
|
if (ppe->state == (u32)PWR_EVENT_EXIT)
|
|
|
|
c_state_end(ppe->cpu_id, sample->time);
|
|
|
|
else
|
|
|
|
c_state_start(ppe->cpu_id, sample->time,
|
|
|
|
ppe->state);
|
|
|
|
}
|
|
|
|
else if (strcmp(event_str, "power:cpu_frequency") == 0) {
|
|
|
|
struct power_processor_entry *ppe = (void *)te;
|
|
|
|
p_state_change(ppe->cpu_id, sample->time, ppe->state);
|
|
|
|
}
|
2009-09-11 23:53:05 -06:00
|
|
|
|
2011-01-03 09:50:45 -07:00
|
|
|
else if (strcmp(event_str, "sched:sched_wakeup") == 0)
|
perf session: Parse sample earlier
At perf_session__process_event, so that we reduce the number of lines in eache
tool sample processing routine that now receives a sample_data pointer already
parsed.
This will also be useful in the next patch, where we'll allow sample the
identity fields in MMAP, FORK, EXIT, etc, when it will be possible to see (cpu,
timestamp) just after before every event.
Also validate callchains in perf_session__process_event, i.e. as early as
possible, and keep a counter of the number of events discarded due to invalid
callchains, warning the user about it if it happens.
There is an assumption that was kept that all events have the same sample_type,
that will be dealt with in the future, when this preexisting limitation will be
removed.
Tested-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Ian Munsie <imunsie@au1.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <1291318772-30880-4-git-send-email-acme@infradead.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-12-02 09:10:21 -07:00
|
|
|
sched_wakeup(sample->cpu, sample->time, sample->pid, te);
|
2009-09-11 23:53:05 -06:00
|
|
|
|
2011-01-03 09:50:45 -07:00
|
|
|
else if (strcmp(event_str, "sched:sched_switch") == 0)
|
perf session: Parse sample earlier
At perf_session__process_event, so that we reduce the number of lines in eache
tool sample processing routine that now receives a sample_data pointer already
parsed.
This will also be useful in the next patch, where we'll allow sample the
identity fields in MMAP, FORK, EXIT, etc, when it will be possible to see (cpu,
timestamp) just after before every event.
Also validate callchains in perf_session__process_event, i.e. as early as
possible, and keep a counter of the number of events discarded due to invalid
callchains, warning the user about it if it happens.
There is an assumption that was kept that all events have the same sample_type,
that will be dealt with in the future, when this preexisting limitation will be
removed.
Tested-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Ian Munsie <imunsie@au1.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <1291318772-30880-4-git-send-email-acme@infradead.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-12-02 09:10:21 -07:00
|
|
|
sched_switch(sample->cpu, sample->time, te);
|
2011-01-03 09:50:45 -07:00
|
|
|
|
|
|
|
#ifdef SUPPORT_OLD_POWER_EVENTS
|
|
|
|
if (use_old_power_events) {
|
|
|
|
if (strcmp(event_str, "power:power_start") == 0)
|
|
|
|
c_state_start(peo->cpu_id, sample->time,
|
|
|
|
peo->value);
|
|
|
|
|
|
|
|
else if (strcmp(event_str, "power:power_end") == 0)
|
|
|
|
c_state_end(sample->cpu, sample->time);
|
|
|
|
|
|
|
|
else if (strcmp(event_str,
|
|
|
|
"power:power_frequency") == 0)
|
|
|
|
p_state_change(peo->cpu_id, sample->time,
|
|
|
|
peo->value);
|
|
|
|
}
|
|
|
|
#endif
|
2009-09-11 23:53:05 -06:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* After the last sample we need to wrap up the current C/P state
|
|
|
|
* and close out each CPU for these.
|
|
|
|
*/
|
|
|
|
static void end_sample_processing(void)
|
|
|
|
{
|
|
|
|
u64 cpu;
|
|
|
|
struct power_event *pwr;
|
|
|
|
|
2009-09-24 07:40:13 -06:00
|
|
|
for (cpu = 0; cpu <= numcpus; cpu++) {
|
2009-09-11 23:53:05 -06:00
|
|
|
pwr = malloc(sizeof(struct power_event));
|
|
|
|
if (!pwr)
|
|
|
|
return;
|
|
|
|
memset(pwr, 0, sizeof(struct power_event));
|
|
|
|
|
|
|
|
/* C state */
|
|
|
|
#if 0
|
|
|
|
pwr->state = cpus_cstate_state[cpu];
|
|
|
|
pwr->start_time = cpus_cstate_start_times[cpu];
|
|
|
|
pwr->end_time = last_time;
|
|
|
|
pwr->cpu = cpu;
|
|
|
|
pwr->type = CSTATE;
|
|
|
|
pwr->next = power_events;
|
|
|
|
|
|
|
|
power_events = pwr;
|
|
|
|
#endif
|
|
|
|
/* P state */
|
|
|
|
|
|
|
|
pwr = malloc(sizeof(struct power_event));
|
|
|
|
if (!pwr)
|
|
|
|
return;
|
|
|
|
memset(pwr, 0, sizeof(struct power_event));
|
|
|
|
|
|
|
|
pwr->state = cpus_pstate_state[cpu];
|
|
|
|
pwr->start_time = cpus_pstate_start_times[cpu];
|
|
|
|
pwr->end_time = last_time;
|
|
|
|
pwr->cpu = cpu;
|
|
|
|
pwr->type = PSTATE;
|
|
|
|
pwr->next = power_events;
|
|
|
|
|
|
|
|
if (!pwr->start_time)
|
|
|
|
pwr->start_time = first_time;
|
|
|
|
if (!pwr->state)
|
|
|
|
pwr->state = min_freq;
|
|
|
|
power_events = pwr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sort the pid datastructure
|
|
|
|
*/
|
|
|
|
static void sort_pids(void)
|
|
|
|
{
|
|
|
|
struct per_pid *new_list, *p, *cursor, *prev;
|
|
|
|
/* sort by ppid first, then by pid, lowest to highest */
|
|
|
|
|
|
|
|
new_list = NULL;
|
|
|
|
|
|
|
|
while (all_data) {
|
|
|
|
p = all_data;
|
|
|
|
all_data = p->next;
|
|
|
|
p->next = NULL;
|
|
|
|
|
|
|
|
if (new_list == NULL) {
|
|
|
|
new_list = p;
|
|
|
|
p->next = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
prev = NULL;
|
|
|
|
cursor = new_list;
|
|
|
|
while (cursor) {
|
|
|
|
if (cursor->ppid > p->ppid ||
|
|
|
|
(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
|
|
|
|
/* must insert before */
|
|
|
|
if (prev) {
|
|
|
|
p->next = prev->next;
|
|
|
|
prev->next = p;
|
|
|
|
cursor = NULL;
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
p->next = new_list;
|
|
|
|
new_list = p;
|
|
|
|
cursor = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
prev = cursor;
|
|
|
|
cursor = cursor->next;
|
|
|
|
if (!cursor)
|
|
|
|
prev->next = p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
all_data = new_list;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void draw_c_p_states(void)
|
|
|
|
{
|
|
|
|
struct power_event *pwr;
|
|
|
|
pwr = power_events;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* two pass drawing so that the P state bars are on top of the C state blocks
|
|
|
|
*/
|
|
|
|
while (pwr) {
|
|
|
|
if (pwr->type == CSTATE)
|
|
|
|
svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
|
|
|
|
pwr = pwr->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
pwr = power_events;
|
|
|
|
while (pwr) {
|
|
|
|
if (pwr->type == PSTATE) {
|
|
|
|
if (!pwr->state)
|
|
|
|
pwr->state = min_freq;
|
|
|
|
svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
|
|
|
|
}
|
|
|
|
pwr = pwr->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void draw_wakeups(void)
|
|
|
|
{
|
|
|
|
struct wake_event *we;
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
|
|
|
|
|
|
|
we = wake_events;
|
|
|
|
while (we) {
|
|
|
|
int from = 0, to = 0;
|
2009-09-20 10:13:28 -06:00
|
|
|
char *task_from = NULL, *task_to = NULL;
|
2009-09-11 23:53:05 -06:00
|
|
|
|
|
|
|
/* locate the column of the waker and wakee */
|
|
|
|
p = all_data;
|
|
|
|
while (p) {
|
|
|
|
if (p->pid == we->waker || p->pid == we->wakee) {
|
|
|
|
c = p->all;
|
|
|
|
while (c) {
|
|
|
|
if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
|
2009-10-19 16:09:39 -06:00
|
|
|
if (p->pid == we->waker && !from) {
|
2009-09-11 23:53:05 -06:00
|
|
|
from = c->Y;
|
2009-10-19 15:46:49 -06:00
|
|
|
task_from = strdup(c->comm);
|
2009-09-20 10:13:28 -06:00
|
|
|
}
|
2009-10-19 16:09:39 -06:00
|
|
|
if (p->pid == we->wakee && !to) {
|
2009-09-11 23:53:05 -06:00
|
|
|
to = c->Y;
|
2009-10-19 15:46:49 -06:00
|
|
|
task_to = strdup(c->comm);
|
2009-09-20 10:13:28 -06:00
|
|
|
}
|
2009-09-11 23:53:05 -06:00
|
|
|
}
|
|
|
|
c = c->next;
|
|
|
|
}
|
2009-10-19 15:46:49 -06:00
|
|
|
c = p->all;
|
|
|
|
while (c) {
|
|
|
|
if (p->pid == we->waker && !from) {
|
|
|
|
from = c->Y;
|
|
|
|
task_from = strdup(c->comm);
|
|
|
|
}
|
|
|
|
if (p->pid == we->wakee && !to) {
|
|
|
|
to = c->Y;
|
|
|
|
task_to = strdup(c->comm);
|
|
|
|
}
|
|
|
|
c = c->next;
|
|
|
|
}
|
2009-09-11 23:53:05 -06:00
|
|
|
}
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
|
2009-10-19 15:46:49 -06:00
|
|
|
if (!task_from) {
|
|
|
|
task_from = malloc(40);
|
|
|
|
sprintf(task_from, "[%i]", we->waker);
|
|
|
|
}
|
|
|
|
if (!task_to) {
|
|
|
|
task_to = malloc(40);
|
|
|
|
sprintf(task_to, "[%i]", we->wakee);
|
|
|
|
}
|
|
|
|
|
2009-09-11 23:53:05 -06:00
|
|
|
if (we->waker == -1)
|
|
|
|
svg_interrupt(we->time, to);
|
|
|
|
else if (from && to && abs(from - to) == 1)
|
|
|
|
svg_wakeline(we->time, from, to);
|
|
|
|
else
|
2009-09-20 10:13:28 -06:00
|
|
|
svg_partial_wakeline(we->time, from, task_from, to, task_to);
|
2009-09-11 23:53:05 -06:00
|
|
|
we = we->next;
|
2009-10-19 15:46:49 -06:00
|
|
|
|
|
|
|
free(task_from);
|
|
|
|
free(task_to);
|
2009-09-11 23:53:05 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void draw_cpu_usage(void)
|
|
|
|
{
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
|
|
|
struct cpu_sample *sample;
|
|
|
|
p = all_data;
|
|
|
|
while (p) {
|
|
|
|
c = p->all;
|
|
|
|
while (c) {
|
|
|
|
sample = c->samples;
|
|
|
|
while (sample) {
|
|
|
|
if (sample->type == TYPE_RUNNING)
|
|
|
|
svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm);
|
|
|
|
|
|
|
|
sample = sample->next;
|
|
|
|
}
|
|
|
|
c = c->next;
|
|
|
|
}
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void draw_process_bars(void)
|
|
|
|
{
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
|
|
|
struct cpu_sample *sample;
|
|
|
|
int Y = 0;
|
|
|
|
|
|
|
|
Y = 2 * numcpus + 2;
|
|
|
|
|
|
|
|
p = all_data;
|
|
|
|
while (p) {
|
|
|
|
c = p->all;
|
|
|
|
while (c) {
|
|
|
|
if (!c->display) {
|
|
|
|
c->Y = 0;
|
|
|
|
c = c->next;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-09-20 10:13:53 -06:00
|
|
|
svg_box(Y, c->start_time, c->end_time, "process");
|
2009-09-11 23:53:05 -06:00
|
|
|
sample = c->samples;
|
|
|
|
while (sample) {
|
|
|
|
if (sample->type == TYPE_RUNNING)
|
2009-09-20 10:13:53 -06:00
|
|
|
svg_sample(Y, sample->cpu, sample->start_time, sample->end_time);
|
2009-09-11 23:53:05 -06:00
|
|
|
if (sample->type == TYPE_BLOCKED)
|
|
|
|
svg_box(Y, sample->start_time, sample->end_time, "blocked");
|
|
|
|
if (sample->type == TYPE_WAITING)
|
2009-09-20 10:13:53 -06:00
|
|
|
svg_waiting(Y, sample->start_time, sample->end_time);
|
2009-09-11 23:53:05 -06:00
|
|
|
sample = sample->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c->comm) {
|
|
|
|
char comm[256];
|
|
|
|
if (c->total_time > 5000000000) /* 5 seconds */
|
|
|
|
sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
|
|
|
|
else
|
|
|
|
sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
|
|
|
|
|
|
|
|
svg_text(Y, c->start_time, comm);
|
|
|
|
}
|
|
|
|
c->Y = Y;
|
|
|
|
Y++;
|
|
|
|
c = c->next;
|
|
|
|
}
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-19 16:09:39 -06:00
|
|
|
static void add_process_filter(const char *string)
|
|
|
|
{
|
|
|
|
struct process_filter *filt;
|
|
|
|
int pid;
|
|
|
|
|
|
|
|
pid = strtoull(string, NULL, 10);
|
|
|
|
filt = malloc(sizeof(struct process_filter));
|
|
|
|
if (!filt)
|
|
|
|
return;
|
|
|
|
|
|
|
|
filt->name = strdup(string);
|
|
|
|
filt->pid = pid;
|
|
|
|
filt->next = process_filter;
|
|
|
|
|
|
|
|
process_filter = filt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
|
|
|
|
{
|
|
|
|
struct process_filter *filt;
|
|
|
|
if (!process_filter)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
filt = process_filter;
|
|
|
|
while (filt) {
|
|
|
|
if (filt->pid && p->pid == filt->pid)
|
|
|
|
return 1;
|
|
|
|
if (strcmp(filt->name, c->comm) == 0)
|
|
|
|
return 1;
|
|
|
|
filt = filt->next;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int determine_display_tasks_filtered(void)
|
|
|
|
{
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
p = all_data;
|
|
|
|
while (p) {
|
|
|
|
p->display = 0;
|
|
|
|
if (p->start_time == 1)
|
|
|
|
p->start_time = first_time;
|
|
|
|
|
|
|
|
/* no exit marker, task kept running to the end */
|
|
|
|
if (p->end_time == 0)
|
|
|
|
p->end_time = last_time;
|
|
|
|
|
|
|
|
c = p->all;
|
|
|
|
|
|
|
|
while (c) {
|
|
|
|
c->display = 0;
|
|
|
|
|
|
|
|
if (c->start_time == 1)
|
|
|
|
c->start_time = first_time;
|
|
|
|
|
|
|
|
if (passes_filter(p, c)) {
|
|
|
|
c->display = 1;
|
|
|
|
p->display = 1;
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c->end_time == 0)
|
|
|
|
c->end_time = last_time;
|
|
|
|
|
|
|
|
c = c->next;
|
|
|
|
}
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2009-09-11 23:53:05 -06:00
|
|
|
static int determine_display_tasks(u64 threshold)
|
|
|
|
{
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
|
|
|
int count = 0;
|
|
|
|
|
2009-10-19 16:09:39 -06:00
|
|
|
if (process_filter)
|
|
|
|
return determine_display_tasks_filtered();
|
|
|
|
|
2009-09-11 23:53:05 -06:00
|
|
|
p = all_data;
|
|
|
|
while (p) {
|
|
|
|
p->display = 0;
|
|
|
|
if (p->start_time == 1)
|
|
|
|
p->start_time = first_time;
|
|
|
|
|
|
|
|
/* no exit marker, task kept running to the end */
|
|
|
|
if (p->end_time == 0)
|
|
|
|
p->end_time = last_time;
|
2009-09-24 07:40:13 -06:00
|
|
|
if (p->total_time >= threshold && !power_only)
|
2009-09-11 23:53:05 -06:00
|
|
|
p->display = 1;
|
|
|
|
|
|
|
|
c = p->all;
|
|
|
|
|
|
|
|
while (c) {
|
|
|
|
c->display = 0;
|
|
|
|
|
|
|
|
if (c->start_time == 1)
|
|
|
|
c->start_time = first_time;
|
|
|
|
|
2009-09-24 07:40:13 -06:00
|
|
|
if (c->total_time >= threshold && !power_only) {
|
2009-09-11 23:53:05 -06:00
|
|
|
c->display = 1;
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c->end_time == 0)
|
|
|
|
c->end_time = last_time;
|
|
|
|
|
|
|
|
c = c->next;
|
|
|
|
}
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define TIME_THRESH 10000000
|
|
|
|
|
|
|
|
static void write_svg_file(const char *filename)
|
|
|
|
{
|
|
|
|
u64 i;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
numcpus++;
|
|
|
|
|
|
|
|
|
|
|
|
count = determine_display_tasks(TIME_THRESH);
|
|
|
|
|
|
|
|
/* We'd like to show at least 15 tasks; be less picky if we have fewer */
|
|
|
|
if (count < 15)
|
|
|
|
count = determine_display_tasks(TIME_THRESH / 10);
|
|
|
|
|
2009-09-20 10:14:16 -06:00
|
|
|
open_svg(filename, numcpus, count, first_time, last_time);
|
2009-09-11 23:53:05 -06:00
|
|
|
|
2009-09-20 10:14:16 -06:00
|
|
|
svg_time_grid();
|
2009-09-11 23:53:05 -06:00
|
|
|
svg_legenda();
|
|
|
|
|
|
|
|
for (i = 0; i < numcpus; i++)
|
|
|
|
svg_cpu_box(i, max_freq, turbo_frequency);
|
|
|
|
|
|
|
|
draw_cpu_usage();
|
|
|
|
draw_process_bars();
|
|
|
|
draw_c_p_states();
|
|
|
|
draw_wakeups();
|
|
|
|
|
|
|
|
svg_close();
|
|
|
|
}
|
|
|
|
|
2009-12-13 14:50:25 -07:00
|
|
|
static struct perf_event_ops event_ops = {
|
2010-04-23 17:18:48 -06:00
|
|
|
.comm = process_comm_event,
|
|
|
|
.fork = process_fork_event,
|
|
|
|
.exit = process_exit_event,
|
|
|
|
.sample = process_sample_event,
|
|
|
|
.ordered_samples = true,
|
2009-11-30 23:05:16 -07:00
|
|
|
};
|
2009-09-11 23:53:05 -06:00
|
|
|
|
2009-11-30 23:05:16 -07:00
|
|
|
static int __cmd_timechart(void)
|
|
|
|
{
|
2010-12-09 20:09:16 -07:00
|
|
|
struct perf_session *session = perf_session__new(input_name, O_RDONLY,
|
|
|
|
0, false, &event_ops);
|
2009-12-27 16:37:02 -07:00
|
|
|
int ret = -EINVAL;
|
2009-09-11 23:53:05 -06:00
|
|
|
|
2009-12-11 16:24:02 -07:00
|
|
|
if (session == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2009-12-27 16:37:02 -07:00
|
|
|
if (!perf_session__has_traces(session, "timechart record"))
|
|
|
|
goto out_delete;
|
|
|
|
|
2009-12-13 14:50:27 -07:00
|
|
|
ret = perf_session__process_events(session, &event_ops);
|
2009-11-30 23:05:16 -07:00
|
|
|
if (ret)
|
2009-12-11 16:24:02 -07:00
|
|
|
goto out_delete;
|
2009-09-11 23:53:05 -06:00
|
|
|
|
|
|
|
end_sample_processing();
|
|
|
|
|
|
|
|
sort_pids();
|
|
|
|
|
|
|
|
write_svg_file(output_name);
|
|
|
|
|
2009-10-21 13:34:06 -06:00
|
|
|
pr_info("Written %2.1f seconds of trace to %s.\n",
|
|
|
|
(last_time - first_time) / 1000000000.0, output_name);
|
2009-12-11 16:24:02 -07:00
|
|
|
out_delete:
|
|
|
|
perf_session__delete(session);
|
|
|
|
return ret;
|
2009-09-11 23:53:05 -06:00
|
|
|
}
|
|
|
|
|
2009-09-19 05:34:42 -06:00
|
|
|
static const char * const timechart_usage[] = {
|
|
|
|
"perf timechart [<options>] {record}",
|
2009-09-11 23:53:05 -06:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2011-01-03 09:50:45 -07:00
|
|
|
#ifdef SUPPORT_OLD_POWER_EVENTS
|
|
|
|
static const char * const record_old_args[] = {
|
2009-09-19 05:34:42 -06:00
|
|
|
"record",
|
|
|
|
"-a",
|
|
|
|
"-R",
|
|
|
|
"-f",
|
|
|
|
"-c", "1",
|
|
|
|
"-e", "power:power_start",
|
|
|
|
"-e", "power:power_end",
|
|
|
|
"-e", "power:power_frequency",
|
|
|
|
"-e", "sched:sched_wakeup",
|
|
|
|
"-e", "sched:sched_switch",
|
|
|
|
};
|
2011-01-03 09:50:45 -07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
static const char * const record_new_args[] = {
|
|
|
|
"record",
|
|
|
|
"-a",
|
|
|
|
"-R",
|
|
|
|
"-f",
|
|
|
|
"-c", "1",
|
|
|
|
"-e", "power:cpu_frequency",
|
|
|
|
"-e", "power:cpu_idle",
|
|
|
|
"-e", "sched:sched_wakeup",
|
|
|
|
"-e", "sched:sched_switch",
|
|
|
|
};
|
2009-09-19 05:34:42 -06:00
|
|
|
|
|
|
|
static int __cmd_record(int argc, const char **argv)
|
|
|
|
{
|
|
|
|
unsigned int rec_argc, i, j;
|
|
|
|
const char **rec_argv;
|
2011-01-03 09:50:45 -07:00
|
|
|
const char * const *record_args = record_new_args;
|
|
|
|
unsigned int record_elems = ARRAY_SIZE(record_new_args);
|
|
|
|
|
|
|
|
#ifdef SUPPORT_OLD_POWER_EVENTS
|
|
|
|
if (!is_valid_tracepoint("power:cpu_idle") &&
|
|
|
|
is_valid_tracepoint("power:power_start")) {
|
|
|
|
use_old_power_events = 1;
|
|
|
|
record_args = record_old_args;
|
|
|
|
record_elems = ARRAY_SIZE(record_old_args);
|
|
|
|
}
|
|
|
|
#endif
|
2009-09-19 05:34:42 -06:00
|
|
|
|
2011-01-03 09:50:45 -07:00
|
|
|
rec_argc = record_elems + argc - 1;
|
2009-09-19 05:34:42 -06:00
|
|
|
rec_argv = calloc(rec_argc + 1, sizeof(char *));
|
|
|
|
|
2010-11-12 19:35:06 -07:00
|
|
|
if (rec_argv == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2011-01-03 09:50:45 -07:00
|
|
|
for (i = 0; i < record_elems; i++)
|
2009-09-19 05:34:42 -06:00
|
|
|
rec_argv[i] = strdup(record_args[i]);
|
|
|
|
|
|
|
|
for (j = 1; j < (unsigned int)argc; j++, i++)
|
|
|
|
rec_argv[i] = argv[j];
|
|
|
|
|
|
|
|
return cmd_record(i, rec_argv, NULL);
|
|
|
|
}
|
|
|
|
|
2009-10-19 16:09:39 -06:00
|
|
|
static int
|
|
|
|
parse_process(const struct option *opt __used, const char *arg, int __used unset)
|
|
|
|
{
|
|
|
|
if (arg)
|
|
|
|
add_process_filter(arg);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-09-11 23:53:05 -06:00
|
|
|
static const struct option options[] = {
|
|
|
|
OPT_STRING('i', "input", &input_name, "file",
|
|
|
|
"input file name"),
|
|
|
|
OPT_STRING('o', "output", &output_name, "file",
|
|
|
|
"output file name"),
|
2009-09-20 10:14:16 -06:00
|
|
|
OPT_INTEGER('w', "width", &svg_page_width,
|
|
|
|
"page width"),
|
2009-10-19 16:09:39 -06:00
|
|
|
OPT_BOOLEAN('P', "power-only", &power_only,
|
2009-09-24 07:40:13 -06:00
|
|
|
"output power data only"),
|
2009-10-19 16:09:39 -06:00
|
|
|
OPT_CALLBACK('p', "process", NULL, "process",
|
|
|
|
"process selector. Pass a pid or process name.",
|
|
|
|
parse_process),
|
2010-12-09 13:27:07 -07:00
|
|
|
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
|
|
|
|
"Look for files with symbols relative to this directory"),
|
2009-09-11 23:53:05 -06:00
|
|
|
OPT_END()
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
int cmd_timechart(int argc, const char **argv, const char *prefix __used)
|
|
|
|
{
|
2009-09-19 05:34:42 -06:00
|
|
|
argc = parse_options(argc, argv, options, timechart_usage,
|
|
|
|
PARSE_OPT_STOP_AT_NON_OPTION);
|
2009-09-11 23:53:05 -06:00
|
|
|
|
2009-12-15 15:04:40 -07:00
|
|
|
symbol__init();
|
|
|
|
|
2009-09-19 05:34:42 -06:00
|
|
|
if (argc && !strncmp(argv[0], "rec", 3))
|
|
|
|
return __cmd_record(argc, argv);
|
|
|
|
else if (argc)
|
|
|
|
usage_with_options(timechart_usage, options);
|
2009-09-11 23:53:05 -06:00
|
|
|
|
|
|
|
setup_pager();
|
|
|
|
|
|
|
|
return __cmd_timechart();
|
|
|
|
}
|