5530998577
Currently the setup_sorting() is called for parsing sort keys and exits if it failed to add the sort key. As it's included in libperf it'd be better returning an error code rather than exiting application inside of the library. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Suggested-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/r/1360130237-9963-2-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
1230 lines
31 KiB
C
1230 lines
31 KiB
C
/*
|
||
* builtin-top.c
|
||
*
|
||
* Builtin top command: Display a continuously updated profile of
|
||
* any workload, CPU or specific PID.
|
||
*
|
||
* Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
|
||
* 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
|
||
*
|
||
* Improvements and fixes by:
|
||
*
|
||
* Arjan van de Ven <arjan@linux.intel.com>
|
||
* Yanmin Zhang <yanmin.zhang@intel.com>
|
||
* Wu Fengguang <fengguang.wu@intel.com>
|
||
* Mike Galbraith <efault@gmx.de>
|
||
* Paul Mackerras <paulus@samba.org>
|
||
*
|
||
* Released under the GPL v2. (and only v2, not any later version)
|
||
*/
|
||
#include "builtin.h"
|
||
|
||
#include "perf.h"
|
||
|
||
#include "util/annotate.h"
|
||
#include "util/cache.h"
|
||
#include "util/color.h"
|
||
#include "util/evlist.h"
|
||
#include "util/evsel.h"
|
||
#include "util/machine.h"
|
||
#include "util/session.h"
|
||
#include "util/symbol.h"
|
||
#include "util/thread.h"
|
||
#include "util/thread_map.h"
|
||
#include "util/top.h"
|
||
#include "util/util.h"
|
||
#include <linux/rbtree.h>
|
||
#include "util/parse-options.h"
|
||
#include "util/parse-events.h"
|
||
#include "util/cpumap.h"
|
||
#include "util/xyarray.h"
|
||
#include "util/sort.h"
|
||
#include "util/intlist.h"
|
||
|
||
#include "util/debug.h"
|
||
|
||
#include <assert.h>
|
||
#include <elf.h>
|
||
#include <fcntl.h>
|
||
|
||
#include <stdio.h>
|
||
#include <termios.h>
|
||
#include <unistd.h>
|
||
#include <inttypes.h>
|
||
|
||
#include <errno.h>
|
||
#include <time.h>
|
||
#include <sched.h>
|
||
|
||
#include <sys/syscall.h>
|
||
#include <sys/ioctl.h>
|
||
#include <sys/poll.h>
|
||
#include <sys/prctl.h>
|
||
#include <sys/wait.h>
|
||
#include <sys/uio.h>
|
||
#include <sys/utsname.h>
|
||
#include <sys/mman.h>
|
||
|
||
#include <linux/unistd.h>
|
||
#include <linux/types.h>
|
||
|
||
static volatile int done;
|
||
|
||
static void perf_top__update_print_entries(struct perf_top *top)
|
||
{
|
||
if (top->print_entries > 9)
|
||
top->print_entries -= 9;
|
||
}
|
||
|
||
static void perf_top__sig_winch(int sig __maybe_unused,
|
||
siginfo_t *info __maybe_unused, void *arg)
|
||
{
|
||
struct perf_top *top = arg;
|
||
|
||
get_term_dimensions(&top->winsize);
|
||
if (!top->print_entries
|
||
|| (top->print_entries+4) > top->winsize.ws_row) {
|
||
top->print_entries = top->winsize.ws_row;
|
||
} else {
|
||
top->print_entries += 4;
|
||
top->winsize.ws_row = top->print_entries;
|
||
}
|
||
perf_top__update_print_entries(top);
|
||
}
|
||
|
||
static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
|
||
{
|
||
struct symbol *sym;
|
||
struct annotation *notes;
|
||
struct map *map;
|
||
int err = -1;
|
||
|
||
if (!he || !he->ms.sym)
|
||
return -1;
|
||
|
||
sym = he->ms.sym;
|
||
map = he->ms.map;
|
||
|
||
/*
|
||
* We can't annotate with just /proc/kallsyms
|
||
*/
|
||
if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) {
|
||
pr_err("Can't annotate %s: No vmlinux file was found in the "
|
||
"path\n", sym->name);
|
||
sleep(1);
|
||
return -1;
|
||
}
|
||
|
||
notes = symbol__annotation(sym);
|
||
if (notes->src != NULL) {
|
||
pthread_mutex_lock(¬es->lock);
|
||
goto out_assign;
|
||
}
|
||
|
||
pthread_mutex_lock(¬es->lock);
|
||
|
||
if (symbol__alloc_hist(sym) < 0) {
|
||
pthread_mutex_unlock(¬es->lock);
|
||
pr_err("Not enough memory for annotating '%s' symbol!\n",
|
||
sym->name);
|
||
sleep(1);
|
||
return err;
|
||
}
|
||
|
||
err = symbol__annotate(sym, map, 0);
|
||
if (err == 0) {
|
||
out_assign:
|
||
top->sym_filter_entry = he;
|
||
}
|
||
|
||
pthread_mutex_unlock(¬es->lock);
|
||
return err;
|
||
}
|
||
|
||
static void __zero_source_counters(struct hist_entry *he)
|
||
{
|
||
struct symbol *sym = he->ms.sym;
|
||
symbol__annotate_zero_histograms(sym);
|
||
}
|
||
|
||
static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
|
||
{
|
||
struct utsname uts;
|
||
int err = uname(&uts);
|
||
|
||
ui__warning("Out of bounds address found:\n\n"
|
||
"Addr: %" PRIx64 "\n"
|
||
"DSO: %s %c\n"
|
||
"Map: %" PRIx64 "-%" PRIx64 "\n"
|
||
"Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
|
||
"Arch: %s\n"
|
||
"Kernel: %s\n"
|
||
"Tools: %s\n\n"
|
||
"Not all samples will be on the annotation output.\n\n"
|
||
"Please report to linux-kernel@vger.kernel.org\n",
|
||
ip, map->dso->long_name, dso__symtab_origin(map->dso),
|
||
map->start, map->end, sym->start, sym->end,
|
||
sym->binding == STB_GLOBAL ? 'g' :
|
||
sym->binding == STB_LOCAL ? 'l' : 'w', sym->name,
|
||
err ? "[unknown]" : uts.machine,
|
||
err ? "[unknown]" : uts.release, perf_version_string);
|
||
if (use_browser <= 0)
|
||
sleep(5);
|
||
|
||
map->erange_warned = true;
|
||
}
|
||
|
||
static void perf_top__record_precise_ip(struct perf_top *top,
|
||
struct hist_entry *he,
|
||
int counter, u64 ip)
|
||
{
|
||
struct annotation *notes;
|
||
struct symbol *sym;
|
||
int err;
|
||
|
||
if (he == NULL || he->ms.sym == NULL ||
|
||
((top->sym_filter_entry == NULL ||
|
||
top->sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1))
|
||
return;
|
||
|
||
sym = he->ms.sym;
|
||
notes = symbol__annotation(sym);
|
||
|
||
if (pthread_mutex_trylock(¬es->lock))
|
||
return;
|
||
|
||
if (notes->src == NULL && symbol__alloc_hist(sym) < 0) {
|
||
pthread_mutex_unlock(¬es->lock);
|
||
pr_err("Not enough memory for annotating '%s' symbol!\n",
|
||
sym->name);
|
||
sleep(1);
|
||
return;
|
||
}
|
||
|
||
ip = he->ms.map->map_ip(he->ms.map, ip);
|
||
err = symbol__inc_addr_samples(sym, he->ms.map, counter, ip);
|
||
|
||
pthread_mutex_unlock(¬es->lock);
|
||
|
||
if (err == -ERANGE && !he->ms.map->erange_warned)
|
||
ui__warn_map_erange(he->ms.map, sym, ip);
|
||
}
|
||
|
||
static void perf_top__show_details(struct perf_top *top)
|
||
{
|
||
struct hist_entry *he = top->sym_filter_entry;
|
||
struct annotation *notes;
|
||
struct symbol *symbol;
|
||
int more;
|
||
|
||
if (!he)
|
||
return;
|
||
|
||
symbol = he->ms.sym;
|
||
notes = symbol__annotation(symbol);
|
||
|
||
pthread_mutex_lock(¬es->lock);
|
||
|
||
if (notes->src == NULL)
|
||
goto out_unlock;
|
||
|
||
printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
|
||
printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
|
||
|
||
more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel->idx,
|
||
0, top->sym_pcnt_filter, top->print_entries, 4);
|
||
if (top->zero)
|
||
symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
|
||
else
|
||
symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
|
||
if (more != 0)
|
||
printf("%d lines not displayed, maybe increase display entries [e]\n", more);
|
||
out_unlock:
|
||
pthread_mutex_unlock(¬es->lock);
|
||
}
|
||
|
||
static const char CONSOLE_CLEAR[] = "[H[2J";
|
||
|
||
static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel,
|
||
struct addr_location *al,
|
||
struct perf_sample *sample)
|
||
{
|
||
struct hist_entry *he;
|
||
|
||
he = __hists__add_entry(&evsel->hists, al, NULL, sample->period);
|
||
if (he == NULL)
|
||
return NULL;
|
||
|
||
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
|
||
return he;
|
||
}
|
||
|
||
static void perf_top__print_sym_table(struct perf_top *top)
|
||
{
|
||
char bf[160];
|
||
int printed = 0;
|
||
const int win_width = top->winsize.ws_col - 1;
|
||
|
||
puts(CONSOLE_CLEAR);
|
||
|
||
perf_top__header_snprintf(top, bf, sizeof(bf));
|
||
printf("%s\n", bf);
|
||
|
||
perf_top__reset_sample_counters(top);
|
||
|
||
printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
|
||
|
||
if (top->sym_evsel->hists.stats.nr_lost_warned !=
|
||
top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) {
|
||
top->sym_evsel->hists.stats.nr_lost_warned =
|
||
top->sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST];
|
||
color_fprintf(stdout, PERF_COLOR_RED,
|
||
"WARNING: LOST %d chunks, Check IO/CPU overload",
|
||
top->sym_evsel->hists.stats.nr_lost_warned);
|
||
++printed;
|
||
}
|
||
|
||
if (top->sym_filter_entry) {
|
||
perf_top__show_details(top);
|
||
return;
|
||
}
|
||
|
||
hists__collapse_resort_threaded(&top->sym_evsel->hists);
|
||
hists__output_resort_threaded(&top->sym_evsel->hists);
|
||
hists__decay_entries_threaded(&top->sym_evsel->hists,
|
||
top->hide_user_symbols,
|
||
top->hide_kernel_symbols);
|
||
hists__output_recalc_col_len(&top->sym_evsel->hists,
|
||
top->winsize.ws_row - 3);
|
||
putchar('\n');
|
||
hists__fprintf(&top->sym_evsel->hists, false,
|
||
top->winsize.ws_row - 4 - printed, win_width, stdout);
|
||
}
|
||
|
||
static void prompt_integer(int *target, const char *msg)
|
||
{
|
||
char *buf = malloc(0), *p;
|
||
size_t dummy = 0;
|
||
int tmp;
|
||
|
||
fprintf(stdout, "\n%s: ", msg);
|
||
if (getline(&buf, &dummy, stdin) < 0)
|
||
return;
|
||
|
||
p = strchr(buf, '\n');
|
||
if (p)
|
||
*p = 0;
|
||
|
||
p = buf;
|
||
while(*p) {
|
||
if (!isdigit(*p))
|
||
goto out_free;
|
||
p++;
|
||
}
|
||
tmp = strtoul(buf, NULL, 10);
|
||
*target = tmp;
|
||
out_free:
|
||
free(buf);
|
||
}
|
||
|
||
static void prompt_percent(int *target, const char *msg)
|
||
{
|
||
int tmp = 0;
|
||
|
||
prompt_integer(&tmp, msg);
|
||
if (tmp >= 0 && tmp <= 100)
|
||
*target = tmp;
|
||
}
|
||
|
||
static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
|
||
{
|
||
char *buf = malloc(0), *p;
|
||
struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
|
||
struct rb_node *next;
|
||
size_t dummy = 0;
|
||
|
||
/* zero counters of active symbol */
|
||
if (syme) {
|
||
__zero_source_counters(syme);
|
||
top->sym_filter_entry = NULL;
|
||
}
|
||
|
||
fprintf(stdout, "\n%s: ", msg);
|
||
if (getline(&buf, &dummy, stdin) < 0)
|
||
goto out_free;
|
||
|
||
p = strchr(buf, '\n');
|
||
if (p)
|
||
*p = 0;
|
||
|
||
next = rb_first(&top->sym_evsel->hists.entries);
|
||
while (next) {
|
||
n = rb_entry(next, struct hist_entry, rb_node);
|
||
if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
|
||
found = n;
|
||
break;
|
||
}
|
||
next = rb_next(&n->rb_node);
|
||
}
|
||
|
||
if (!found) {
|
||
fprintf(stderr, "Sorry, %s is not active.\n", buf);
|
||
sleep(1);
|
||
} else
|
||
perf_top__parse_source(top, found);
|
||
|
||
out_free:
|
||
free(buf);
|
||
}
|
||
|
||
static void perf_top__print_mapped_keys(struct perf_top *top)
|
||
{
|
||
char *name = NULL;
|
||
|
||
if (top->sym_filter_entry) {
|
||
struct symbol *sym = top->sym_filter_entry->ms.sym;
|
||
name = sym->name;
|
||
}
|
||
|
||
fprintf(stdout, "\nMapped keys:\n");
|
||
fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs);
|
||
fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
|
||
|
||
if (top->evlist->nr_entries > 1)
|
||
fprintf(stdout, "\t[E] active event counter. \t(%s)\n", perf_evsel__name(top->sym_evsel));
|
||
|
||
fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
|
||
|
||
fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->sym_pcnt_filter);
|
||
fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
|
||
fprintf(stdout, "\t[S] stop annotation.\n");
|
||
|
||
fprintf(stdout,
|
||
"\t[K] hide kernel_symbols symbols. \t(%s)\n",
|
||
top->hide_kernel_symbols ? "yes" : "no");
|
||
fprintf(stdout,
|
||
"\t[U] hide user symbols. \t(%s)\n",
|
||
top->hide_user_symbols ? "yes" : "no");
|
||
fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0);
|
||
fprintf(stdout, "\t[qQ] quit.\n");
|
||
}
|
||
|
||
static int perf_top__key_mapped(struct perf_top *top, int c)
|
||
{
|
||
switch (c) {
|
||
case 'd':
|
||
case 'e':
|
||
case 'f':
|
||
case 'z':
|
||
case 'q':
|
||
case 'Q':
|
||
case 'K':
|
||
case 'U':
|
||
case 'F':
|
||
case 's':
|
||
case 'S':
|
||
return 1;
|
||
case 'E':
|
||
return top->evlist->nr_entries > 1 ? 1 : 0;
|
||
default:
|
||
break;
|
||
}
|
||
|
||
return 0;
|
||
}
|
||
|
||
static bool perf_top__handle_keypress(struct perf_top *top, int c)
|
||
{
|
||
bool ret = true;
|
||
|
||
if (!perf_top__key_mapped(top, c)) {
|
||
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
|
||
struct termios tc, save;
|
||
|
||
perf_top__print_mapped_keys(top);
|
||
fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
|
||
fflush(stdout);
|
||
|
||
tcgetattr(0, &save);
|
||
tc = save;
|
||
tc.c_lflag &= ~(ICANON | ECHO);
|
||
tc.c_cc[VMIN] = 0;
|
||
tc.c_cc[VTIME] = 0;
|
||
tcsetattr(0, TCSANOW, &tc);
|
||
|
||
poll(&stdin_poll, 1, -1);
|
||
c = getc(stdin);
|
||
|
||
tcsetattr(0, TCSAFLUSH, &save);
|
||
if (!perf_top__key_mapped(top, c))
|
||
return ret;
|
||
}
|
||
|
||
switch (c) {
|
||
case 'd':
|
||
prompt_integer(&top->delay_secs, "Enter display delay");
|
||
if (top->delay_secs < 1)
|
||
top->delay_secs = 1;
|
||
break;
|
||
case 'e':
|
||
prompt_integer(&top->print_entries, "Enter display entries (lines)");
|
||
if (top->print_entries == 0) {
|
||
struct sigaction act = {
|
||
.sa_sigaction = perf_top__sig_winch,
|
||
.sa_flags = SA_SIGINFO,
|
||
};
|
||
perf_top__sig_winch(SIGWINCH, NULL, top);
|
||
sigaction(SIGWINCH, &act, NULL);
|
||
} else {
|
||
perf_top__sig_winch(SIGWINCH, NULL, top);
|
||
signal(SIGWINCH, SIG_DFL);
|
||
}
|
||
break;
|
||
case 'E':
|
||
if (top->evlist->nr_entries > 1) {
|
||
/* Select 0 as the default event: */
|
||
int counter = 0;
|
||
|
||
fprintf(stderr, "\nAvailable events:");
|
||
|
||
list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
|
||
fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
|
||
|
||
prompt_integer(&counter, "Enter details event counter");
|
||
|
||
if (counter >= top->evlist->nr_entries) {
|
||
top->sym_evsel = perf_evlist__first(top->evlist);
|
||
fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
|
||
sleep(1);
|
||
break;
|
||
}
|
||
list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
|
||
if (top->sym_evsel->idx == counter)
|
||
break;
|
||
} else
|
||
top->sym_evsel = perf_evlist__first(top->evlist);
|
||
break;
|
||
case 'f':
|
||
prompt_integer(&top->count_filter, "Enter display event count filter");
|
||
break;
|
||
case 'F':
|
||
prompt_percent(&top->sym_pcnt_filter,
|
||
"Enter details display event filter (percent)");
|
||
break;
|
||
case 'K':
|
||
top->hide_kernel_symbols = !top->hide_kernel_symbols;
|
||
break;
|
||
case 'q':
|
||
case 'Q':
|
||
printf("exiting.\n");
|
||
if (top->dump_symtab)
|
||
perf_session__fprintf_dsos(top->session, stderr);
|
||
ret = false;
|
||
break;
|
||
case 's':
|
||
perf_top__prompt_symbol(top, "Enter details symbol");
|
||
break;
|
||
case 'S':
|
||
if (!top->sym_filter_entry)
|
||
break;
|
||
else {
|
||
struct hist_entry *syme = top->sym_filter_entry;
|
||
|
||
top->sym_filter_entry = NULL;
|
||
__zero_source_counters(syme);
|
||
}
|
||
break;
|
||
case 'U':
|
||
top->hide_user_symbols = !top->hide_user_symbols;
|
||
break;
|
||
case 'z':
|
||
top->zero = !top->zero;
|
||
break;
|
||
default:
|
||
break;
|
||
}
|
||
|
||
return ret;
|
||
}
|
||
|
||
static void perf_top__sort_new_samples(void *arg)
|
||
{
|
||
struct perf_top *t = arg;
|
||
perf_top__reset_sample_counters(t);
|
||
|
||
if (t->evlist->selected != NULL)
|
||
t->sym_evsel = t->evlist->selected;
|
||
|
||
hists__collapse_resort_threaded(&t->sym_evsel->hists);
|
||
hists__output_resort_threaded(&t->sym_evsel->hists);
|
||
hists__decay_entries_threaded(&t->sym_evsel->hists,
|
||
t->hide_user_symbols,
|
||
t->hide_kernel_symbols);
|
||
}
|
||
|
||
static void *display_thread_tui(void *arg)
|
||
{
|
||
struct perf_evsel *pos;
|
||
struct perf_top *top = arg;
|
||
const char *help = "For a higher level overview, try: perf top --sort comm,dso";
|
||
struct hist_browser_timer hbt = {
|
||
.timer = perf_top__sort_new_samples,
|
||
.arg = top,
|
||
.refresh = top->delay_secs,
|
||
};
|
||
|
||
perf_top__sort_new_samples(top);
|
||
|
||
/*
|
||
* Initialize the uid_filter_str, in the future the TUI will allow
|
||
* Zooming in/out UIDs. For now juse use whatever the user passed
|
||
* via --uid.
|
||
*/
|
||
list_for_each_entry(pos, &top->evlist->entries, node)
|
||
pos->hists.uid_filter_str = top->record_opts.target.uid_str;
|
||
|
||
perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
|
||
&top->session->header.env);
|
||
|
||
done = 1;
|
||
return NULL;
|
||
}
|
||
|
||
static void *display_thread(void *arg)
|
||
{
|
||
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
|
||
struct termios tc, save;
|
||
struct perf_top *top = arg;
|
||
int delay_msecs, c;
|
||
|
||
tcgetattr(0, &save);
|
||
tc = save;
|
||
tc.c_lflag &= ~(ICANON | ECHO);
|
||
tc.c_cc[VMIN] = 0;
|
||
tc.c_cc[VTIME] = 0;
|
||
|
||
pthread__unblock_sigwinch();
|
||
repeat:
|
||
delay_msecs = top->delay_secs * 1000;
|
||
tcsetattr(0, TCSANOW, &tc);
|
||
/* trash return*/
|
||
getc(stdin);
|
||
|
||
while (!done) {
|
||
perf_top__print_sym_table(top);
|
||
/*
|
||
* Either timeout expired or we got an EINTR due to SIGWINCH,
|
||
* refresh screen in both cases.
|
||
*/
|
||
switch (poll(&stdin_poll, 1, delay_msecs)) {
|
||
case 0:
|
||
continue;
|
||
case -1:
|
||
if (errno == EINTR)
|
||
continue;
|
||
/* Fall trhu */
|
||
default:
|
||
c = getc(stdin);
|
||
tcsetattr(0, TCSAFLUSH, &save);
|
||
|
||
if (perf_top__handle_keypress(top, c))
|
||
goto repeat;
|
||
done = 1;
|
||
}
|
||
}
|
||
|
||
return NULL;
|
||
}
|
||
|
||
/* Tag samples to be skipped. */
|
||
static const char *skip_symbols[] = {
|
||
"intel_idle",
|
||
"default_idle",
|
||
"native_safe_halt",
|
||
"cpu_idle",
|
||
"enter_idle",
|
||
"exit_idle",
|
||
"mwait_idle",
|
||
"mwait_idle_with_hints",
|
||
"poll_idle",
|
||
"ppc64_runlatch_off",
|
||
"pseries_dedicated_idle_sleep",
|
||
NULL
|
||
};
|
||
|
||
static int symbol_filter(struct map *map __maybe_unused, struct symbol *sym)
|
||
{
|
||
const char *name = sym->name;
|
||
int i;
|
||
|
||
/*
|
||
* ppc64 uses function descriptors and appends a '.' to the
|
||
* start of every instruction address. Remove it.
|
||
*/
|
||
if (name[0] == '.')
|
||
name++;
|
||
|
||
if (!strcmp(name, "_text") ||
|
||
!strcmp(name, "_etext") ||
|
||
!strcmp(name, "_sinittext") ||
|
||
!strncmp("init_module", name, 11) ||
|
||
!strncmp("cleanup_module", name, 14) ||
|
||
strstr(name, "_text_start") ||
|
||
strstr(name, "_text_end"))
|
||
return 1;
|
||
|
||
for (i = 0; skip_symbols[i]; i++) {
|
||
if (!strcmp(skip_symbols[i], name)) {
|
||
sym->ignore = true;
|
||
break;
|
||
}
|
||
}
|
||
|
||
return 0;
|
||
}
|
||
|
||
static void perf_event__process_sample(struct perf_tool *tool,
|
||
const union perf_event *event,
|
||
struct perf_evsel *evsel,
|
||
struct perf_sample *sample,
|
||
struct machine *machine)
|
||
{
|
||
struct perf_top *top = container_of(tool, struct perf_top, tool);
|
||
struct symbol *parent = NULL;
|
||
u64 ip = event->ip.ip;
|
||
struct addr_location al;
|
||
int err;
|
||
|
||
if (!machine && perf_guest) {
|
||
static struct intlist *seen;
|
||
|
||
if (!seen)
|
||
seen = intlist__new(NULL);
|
||
|
||
if (!intlist__has_entry(seen, event->ip.pid)) {
|
||
pr_err("Can't find guest [%d]'s kernel information\n",
|
||
event->ip.pid);
|
||
intlist__add(seen, event->ip.pid);
|
||
}
|
||
return;
|
||
}
|
||
|
||
if (!machine) {
|
||
pr_err("%u unprocessable samples recorded.\r",
|
||
top->session->stats.nr_unprocessable_samples++);
|
||
return;
|
||
}
|
||
|
||
if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
|
||
top->exact_samples++;
|
||
|
||
if (perf_event__preprocess_sample(event, machine, &al, sample,
|
||
symbol_filter) < 0 ||
|
||
al.filtered)
|
||
return;
|
||
|
||
if (!top->kptr_restrict_warned &&
|
||
symbol_conf.kptr_restrict &&
|
||
al.cpumode == PERF_RECORD_MISC_KERNEL) {
|
||
ui__warning(
|
||
"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
|
||
"Check /proc/sys/kernel/kptr_restrict.\n\n"
|
||
"Kernel%s samples will not be resolved.\n",
|
||
!RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
|
||
" modules" : "");
|
||
if (use_browser <= 0)
|
||
sleep(5);
|
||
top->kptr_restrict_warned = true;
|
||
}
|
||
|
||
if (al.sym == NULL) {
|
||
const char *msg = "Kernel samples will not be resolved.\n";
|
||
/*
|
||
* As we do lazy loading of symtabs we only will know if the
|
||
* specified vmlinux file is invalid when we actually have a
|
||
* hit in kernel space and then try to load it. So if we get
|
||
* here and there are _no_ symbols in the DSO backing the
|
||
* kernel map, bail out.
|
||
*
|
||
* We may never get here, for instance, if we use -K/
|
||
* --hide-kernel-symbols, even if the user specifies an
|
||
* invalid --vmlinux ;-)
|
||
*/
|
||
if (!top->kptr_restrict_warned && !top->vmlinux_warned &&
|
||
al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
|
||
RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
|
||
if (symbol_conf.vmlinux_name) {
|
||
ui__warning("The %s file can't be used.\n%s",
|
||
symbol_conf.vmlinux_name, msg);
|
||
} else {
|
||
ui__warning("A vmlinux file was not found.\n%s",
|
||
msg);
|
||
}
|
||
|
||
if (use_browser <= 0)
|
||
sleep(5);
|
||
top->vmlinux_warned = true;
|
||
}
|
||
}
|
||
|
||
if (al.sym == NULL || !al.sym->ignore) {
|
||
struct hist_entry *he;
|
||
|
||
if ((sort__has_parent || symbol_conf.use_callchain) &&
|
||
sample->callchain) {
|
||
err = machine__resolve_callchain(machine, evsel,
|
||
al.thread, sample,
|
||
&parent);
|
||
|
||
if (err)
|
||
return;
|
||
}
|
||
|
||
he = perf_evsel__add_hist_entry(evsel, &al, sample);
|
||
if (he == NULL) {
|
||
pr_err("Problem incrementing symbol period, skipping event\n");
|
||
return;
|
||
}
|
||
|
||
if (symbol_conf.use_callchain) {
|
||
err = callchain_append(he->callchain, &callchain_cursor,
|
||
sample->period);
|
||
if (err)
|
||
return;
|
||
}
|
||
|
||
if (top->sort_has_symbols)
|
||
perf_top__record_precise_ip(top, he, evsel->idx, ip);
|
||
}
|
||
|
||
return;
|
||
}
|
||
|
||
static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
|
||
{
|
||
struct perf_sample sample;
|
||
struct perf_evsel *evsel;
|
||
struct perf_session *session = top->session;
|
||
union perf_event *event;
|
||
struct machine *machine;
|
||
u8 origin;
|
||
int ret;
|
||
|
||
while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
|
||
ret = perf_evlist__parse_sample(top->evlist, event, &sample);
|
||
if (ret) {
|
||
pr_err("Can't parse sample, err = %d\n", ret);
|
||
continue;
|
||
}
|
||
|
||
evsel = perf_evlist__id2evsel(session->evlist, sample.id);
|
||
assert(evsel != NULL);
|
||
|
||
origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
||
|
||
if (event->header.type == PERF_RECORD_SAMPLE)
|
||
++top->samples;
|
||
|
||
switch (origin) {
|
||
case PERF_RECORD_MISC_USER:
|
||
++top->us_samples;
|
||
if (top->hide_user_symbols)
|
||
continue;
|
||
machine = &session->machines.host;
|
||
break;
|
||
case PERF_RECORD_MISC_KERNEL:
|
||
++top->kernel_samples;
|
||
if (top->hide_kernel_symbols)
|
||
continue;
|
||
machine = &session->machines.host;
|
||
break;
|
||
case PERF_RECORD_MISC_GUEST_KERNEL:
|
||
++top->guest_kernel_samples;
|
||
machine = perf_session__find_machine(session, event->ip.pid);
|
||
break;
|
||
case PERF_RECORD_MISC_GUEST_USER:
|
||
++top->guest_us_samples;
|
||
/*
|
||
* TODO: we don't process guest user from host side
|
||
* except simple counting.
|
||
*/
|
||
/* Fall thru */
|
||
default:
|
||
continue;
|
||
}
|
||
|
||
|
||
if (event->header.type == PERF_RECORD_SAMPLE) {
|
||
perf_event__process_sample(&top->tool, event, evsel,
|
||
&sample, machine);
|
||
} else if (event->header.type < PERF_RECORD_MAX) {
|
||
hists__inc_nr_events(&evsel->hists, event->header.type);
|
||
machine__process_event(machine, event);
|
||
} else
|
||
++session->stats.nr_unknown_events;
|
||
}
|
||
}
|
||
|
||
static void perf_top__mmap_read(struct perf_top *top)
|
||
{
|
||
int i;
|
||
|
||
for (i = 0; i < top->evlist->nr_mmaps; i++)
|
||
perf_top__mmap_read_idx(top, i);
|
||
}
|
||
|
||
static int perf_top__start_counters(struct perf_top *top)
|
||
{
|
||
char msg[512];
|
||
struct perf_evsel *counter;
|
||
struct perf_evlist *evlist = top->evlist;
|
||
struct perf_record_opts *opts = &top->record_opts;
|
||
|
||
perf_evlist__config(evlist, opts);
|
||
|
||
list_for_each_entry(counter, &evlist->entries, node) {
|
||
try_again:
|
||
if (perf_evsel__open(counter, top->evlist->cpus,
|
||
top->evlist->threads) < 0) {
|
||
if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
|
||
if (verbose)
|
||
ui__warning("%s\n", msg);
|
||
goto try_again;
|
||
}
|
||
|
||
perf_evsel__open_strerror(counter, &opts->target,
|
||
errno, msg, sizeof(msg));
|
||
ui__error("%s\n", msg);
|
||
goto out_err;
|
||
}
|
||
}
|
||
|
||
if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
|
||
ui__error("Failed to mmap with %d (%s)\n",
|
||
errno, strerror(errno));
|
||
goto out_err;
|
||
}
|
||
|
||
return 0;
|
||
|
||
out_err:
|
||
return -1;
|
||
}
|
||
|
||
static int perf_top__setup_sample_type(struct perf_top *top)
|
||
{
|
||
if (!top->sort_has_symbols) {
|
||
if (symbol_conf.use_callchain) {
|
||
ui__error("Selected -g but \"sym\" not present in --sort/-s.");
|
||
return -EINVAL;
|
||
}
|
||
} else if (callchain_param.mode != CHAIN_NONE) {
|
||
if (callchain_register_param(&callchain_param) < 0) {
|
||
ui__error("Can't register callchain params.\n");
|
||
return -EINVAL;
|
||
}
|
||
}
|
||
|
||
return 0;
|
||
}
|
||
|
||
static int __cmd_top(struct perf_top *top)
|
||
{
|
||
struct perf_record_opts *opts = &top->record_opts;
|
||
pthread_t thread;
|
||
int ret;
|
||
/*
|
||
* FIXME: perf_session__new should allow passing a O_MMAP, so that all this
|
||
* mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
|
||
*/
|
||
top->session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
|
||
if (top->session == NULL)
|
||
return -ENOMEM;
|
||
|
||
ret = perf_top__setup_sample_type(top);
|
||
if (ret)
|
||
goto out_delete;
|
||
|
||
if (perf_target__has_task(&opts->target))
|
||
perf_event__synthesize_thread_map(&top->tool, top->evlist->threads,
|
||
perf_event__process,
|
||
&top->session->machines.host);
|
||
else
|
||
perf_event__synthesize_threads(&top->tool, perf_event__process,
|
||
&top->session->machines.host);
|
||
|
||
ret = perf_top__start_counters(top);
|
||
if (ret)
|
||
goto out_delete;
|
||
|
||
top->session->evlist = top->evlist;
|
||
perf_session__set_id_hdr_size(top->session);
|
||
|
||
/*
|
||
* When perf is starting the traced process, all the events (apart from
|
||
* group members) have enable_on_exec=1 set, so don't spoil it by
|
||
* prematurely enabling them.
|
||
*
|
||
* XXX 'top' still doesn't start workloads like record, trace, but should,
|
||
* so leave the check here.
|
||
*/
|
||
if (!perf_target__none(&opts->target))
|
||
perf_evlist__enable(top->evlist);
|
||
|
||
/* Wait for a minimal set of events before starting the snapshot */
|
||
poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
|
||
|
||
perf_top__mmap_read(top);
|
||
|
||
ret = -1;
|
||
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
|
||
display_thread), top)) {
|
||
ui__error("Could not create display thread.\n");
|
||
goto out_delete;
|
||
}
|
||
|
||
if (top->realtime_prio) {
|
||
struct sched_param param;
|
||
|
||
param.sched_priority = top->realtime_prio;
|
||
if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
|
||
ui__error("Could not set realtime priority.\n");
|
||
goto out_delete;
|
||
}
|
||
}
|
||
|
||
while (!done) {
|
||
u64 hits = top->samples;
|
||
|
||
perf_top__mmap_read(top);
|
||
|
||
if (hits == top->samples)
|
||
ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
|
||
}
|
||
|
||
ret = 0;
|
||
out_delete:
|
||
perf_session__delete(top->session);
|
||
top->session = NULL;
|
||
|
||
return ret;
|
||
}
|
||
|
||
static int
|
||
parse_callchain_opt(const struct option *opt, const char *arg, int unset)
|
||
{
|
||
/*
|
||
* --no-call-graph
|
||
*/
|
||
if (unset)
|
||
return 0;
|
||
|
||
symbol_conf.use_callchain = true;
|
||
|
||
return record_parse_callchain_opt(opt, arg, unset);
|
||
}
|
||
|
||
int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
|
||
{
|
||
int status;
|
||
char errbuf[BUFSIZ];
|
||
struct perf_top top = {
|
||
.count_filter = 5,
|
||
.delay_secs = 2,
|
||
.record_opts = {
|
||
.mmap_pages = UINT_MAX,
|
||
.user_freq = UINT_MAX,
|
||
.user_interval = ULLONG_MAX,
|
||
.freq = 4000, /* 4 KHz */
|
||
.target = {
|
||
.uses_mmap = true,
|
||
},
|
||
},
|
||
.sym_pcnt_filter = 5,
|
||
};
|
||
struct perf_record_opts *opts = &top.record_opts;
|
||
struct perf_target *target = &opts->target;
|
||
const struct option options[] = {
|
||
OPT_CALLBACK('e', "event", &top.evlist, "event",
|
||
"event selector. use 'perf list' to list available events",
|
||
parse_events_option),
|
||
OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
|
||
OPT_STRING('p', "pid", &target->pid, "pid",
|
||
"profile events on existing process id"),
|
||
OPT_STRING('t', "tid", &target->tid, "tid",
|
||
"profile events on existing thread id"),
|
||
OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
|
||
"system-wide collection from all CPUs"),
|
||
OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
|
||
"list of cpus to monitor"),
|
||
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
|
||
"file", "vmlinux pathname"),
|
||
OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
|
||
"hide kernel symbols"),
|
||
OPT_UINTEGER('m', "mmap-pages", &opts->mmap_pages,
|
||
"number of mmap data pages"),
|
||
OPT_INTEGER('r', "realtime", &top.realtime_prio,
|
||
"collect data with this RT SCHED_FIFO priority"),
|
||
OPT_INTEGER('d', "delay", &top.delay_secs,
|
||
"number of seconds to delay between refreshes"),
|
||
OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
|
||
"dump the symbol table used for profiling"),
|
||
OPT_INTEGER('f', "count-filter", &top.count_filter,
|
||
"only display functions with more events than this"),
|
||
OPT_BOOLEAN('g', "group", &opts->group,
|
||
"put the counters into a counter group"),
|
||
OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
|
||
"child tasks do not inherit counters"),
|
||
OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
|
||
"symbol to annotate"),
|
||
OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
|
||
OPT_UINTEGER('F', "freq", &opts->user_freq, "profile at this frequency"),
|
||
OPT_INTEGER('E', "entries", &top.print_entries,
|
||
"display this many functions"),
|
||
OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
|
||
"hide user symbols"),
|
||
OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
|
||
OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
|
||
OPT_INCR('v', "verbose", &verbose,
|
||
"be more verbose (show counter open errors, etc)"),
|
||
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
|
||
"sort by key(s): pid, comm, dso, symbol, parent"),
|
||
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
|
||
"Show a column with the number of samples"),
|
||
OPT_CALLBACK_DEFAULT('G', "call-graph", &top.record_opts,
|
||
"mode[,dump_size]", record_callchain_help,
|
||
&parse_callchain_opt, "fp"),
|
||
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
|
||
"Show a column with the sum of periods"),
|
||
OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
|
||
"only consider symbols in these dsos"),
|
||
OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
|
||
"only consider symbols in these comms"),
|
||
OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
|
||
"only consider these symbols"),
|
||
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
|
||
"Interleave source code with assembly code (default)"),
|
||
OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
|
||
"Display raw encoding of assembly instructions (default)"),
|
||
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
|
||
"Specify disassembler style (e.g. -M intel for intel syntax)"),
|
||
OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
|
||
OPT_END()
|
||
};
|
||
const char * const top_usage[] = {
|
||
"perf top [<options>]",
|
||
NULL
|
||
};
|
||
|
||
top.evlist = perf_evlist__new(NULL, NULL);
|
||
if (top.evlist == NULL)
|
||
return -ENOMEM;
|
||
|
||
symbol_conf.exclude_other = false;
|
||
|
||
argc = parse_options(argc, argv, options, top_usage, 0);
|
||
if (argc)
|
||
usage_with_options(top_usage, options);
|
||
|
||
if (sort_order == default_sort_order)
|
||
sort_order = "dso,symbol";
|
||
|
||
if (setup_sorting() < 0)
|
||
usage_with_options(top_usage, options);
|
||
|
||
if (top.use_stdio)
|
||
use_browser = 0;
|
||
else if (top.use_tui)
|
||
use_browser = 1;
|
||
|
||
setup_browser(false);
|
||
|
||
status = perf_target__validate(target);
|
||
if (status) {
|
||
perf_target__strerror(target, status, errbuf, BUFSIZ);
|
||
ui__warning("%s", errbuf);
|
||
}
|
||
|
||
status = perf_target__parse_uid(target);
|
||
if (status) {
|
||
int saved_errno = errno;
|
||
|
||
perf_target__strerror(target, status, errbuf, BUFSIZ);
|
||
ui__error("%s", errbuf);
|
||
|
||
status = -saved_errno;
|
||
goto out_delete_evlist;
|
||
}
|
||
|
||
if (perf_target__none(target))
|
||
target->system_wide = true;
|
||
|
||
if (perf_evlist__create_maps(top.evlist, target) < 0)
|
||
usage_with_options(top_usage, options);
|
||
|
||
if (!top.evlist->nr_entries &&
|
||
perf_evlist__add_default(top.evlist) < 0) {
|
||
ui__error("Not enough memory for event selector list\n");
|
||
goto out_delete_maps;
|
||
}
|
||
|
||
symbol_conf.nr_events = top.evlist->nr_entries;
|
||
|
||
if (top.delay_secs < 1)
|
||
top.delay_secs = 1;
|
||
|
||
if (opts->user_interval != ULLONG_MAX)
|
||
opts->default_interval = opts->user_interval;
|
||
if (opts->user_freq != UINT_MAX)
|
||
opts->freq = opts->user_freq;
|
||
|
||
/*
|
||
* User specified count overrides default frequency.
|
||
*/
|
||
if (opts->default_interval)
|
||
opts->freq = 0;
|
||
else if (opts->freq) {
|
||
opts->default_interval = opts->freq;
|
||
} else {
|
||
ui__error("frequency and count are zero, aborting\n");
|
||
status = -EINVAL;
|
||
goto out_delete_maps;
|
||
}
|
||
|
||
top.sym_evsel = perf_evlist__first(top.evlist);
|
||
|
||
symbol_conf.priv_size = sizeof(struct annotation);
|
||
|
||
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
|
||
if (symbol__init() < 0)
|
||
return -1;
|
||
|
||
sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout);
|
||
sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout);
|
||
sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout);
|
||
|
||
/*
|
||
* Avoid annotation data structures overhead when symbols aren't on the
|
||
* sort list.
|
||
*/
|
||
top.sort_has_symbols = sort_sym.list.next != NULL;
|
||
|
||
get_term_dimensions(&top.winsize);
|
||
if (top.print_entries == 0) {
|
||
struct sigaction act = {
|
||
.sa_sigaction = perf_top__sig_winch,
|
||
.sa_flags = SA_SIGINFO,
|
||
};
|
||
perf_top__update_print_entries(&top);
|
||
sigaction(SIGWINCH, &act, NULL);
|
||
}
|
||
|
||
status = __cmd_top(&top);
|
||
|
||
out_delete_maps:
|
||
perf_evlist__delete_maps(top.evlist);
|
||
out_delete_evlist:
|
||
perf_evlist__delete(top.evlist);
|
||
|
||
return status;
|
||
}
|