2009-06-26 08:28:00 -06:00
|
|
|
#ifndef __PERF_CALLCHAIN_H
|
|
|
|
#define __PERF_CALLCHAIN_H
|
|
|
|
|
|
|
|
#include "../perf.h"
|
2009-07-01 11:46:08 -06:00
|
|
|
#include <linux/list.h>
|
2009-07-01 09:28:37 -06:00
|
|
|
#include <linux/rbtree.h>
|
2010-05-09 08:47:13 -06:00
|
|
|
#include "event.h"
|
2009-06-30 21:35:14 -06:00
|
|
|
#include "symbol.h"
|
2009-06-26 08:28:00 -06:00
|
|
|
|
2014-05-05 04:46:17 -06:00
|
|
|
enum perf_call_graph_mode {
|
|
|
|
CALLCHAIN_NONE,
|
|
|
|
CALLCHAIN_FP,
|
|
|
|
CALLCHAIN_DWARF,
|
2015-01-05 11:23:04 -07:00
|
|
|
CALLCHAIN_LBR,
|
2014-05-05 04:46:17 -06:00
|
|
|
CALLCHAIN_MAX
|
|
|
|
};
|
|
|
|
|
2009-07-02 09:58:21 -06:00
|
|
|
enum chain_mode {
|
2009-08-07 18:16:24 -06:00
|
|
|
CHAIN_NONE,
|
2009-07-04 23:39:21 -06:00
|
|
|
CHAIN_FLAT,
|
|
|
|
CHAIN_GRAPH_ABS,
|
|
|
|
CHAIN_GRAPH_REL
|
2009-07-02 09:58:21 -06:00
|
|
|
};
|
2009-06-26 08:28:00 -06:00
|
|
|
|
2011-06-07 09:49:46 -06:00
|
|
|
enum chain_order {
|
|
|
|
ORDER_CALLER,
|
|
|
|
ORDER_CALLEE
|
|
|
|
};
|
|
|
|
|
2009-06-26 08:28:00 -06:00
|
|
|
struct callchain_node {
|
|
|
|
struct callchain_node *parent;
|
2009-07-01 04:37:06 -06:00
|
|
|
struct list_head val;
|
2013-10-10 23:15:36 -06:00
|
|
|
struct rb_node rb_node_in; /* to insert nodes in an rbtree */
|
|
|
|
struct rb_node rb_node; /* to sort nodes in an output tree */
|
|
|
|
struct rb_root rb_root_in; /* input tree of children */
|
|
|
|
struct rb_root rb_root; /* sorted output tree of children */
|
2009-07-01 04:37:06 -06:00
|
|
|
unsigned int val_nr;
|
|
|
|
u64 hit;
|
2009-08-06 23:11:05 -06:00
|
|
|
u64 children_hit;
|
2009-06-26 08:28:00 -06:00
|
|
|
};
|
|
|
|
|
2010-08-22 12:05:22 -06:00
|
|
|
struct callchain_root {
|
|
|
|
u64 max_depth;
|
|
|
|
struct callchain_node node;
|
|
|
|
};
|
|
|
|
|
2009-07-04 23:39:21 -06:00
|
|
|
struct callchain_param;
|
|
|
|
|
2010-08-22 12:05:22 -06:00
|
|
|
typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *,
|
2009-07-04 23:39:21 -06:00
|
|
|
u64, struct callchain_param *);
|
|
|
|
|
2013-07-18 16:33:57 -06:00
|
|
|
enum chain_key {
|
|
|
|
CCKEY_FUNCTION,
|
|
|
|
CCKEY_ADDRESS
|
|
|
|
};
|
|
|
|
|
2009-07-04 23:39:21 -06:00
|
|
|
struct callchain_param {
|
2014-09-22 19:01:41 -06:00
|
|
|
bool enabled;
|
|
|
|
enum perf_call_graph_mode record_mode;
|
|
|
|
u32 dump_size;
|
2009-07-04 23:39:21 -06:00
|
|
|
enum chain_mode mode;
|
2010-05-09 17:28:10 -06:00
|
|
|
u32 print_limit;
|
2009-07-04 23:39:21 -06:00
|
|
|
double min_percent;
|
|
|
|
sort_chain_func_t sort;
|
2011-06-07 09:49:46 -06:00
|
|
|
enum chain_order order;
|
2013-07-18 16:33:57 -06:00
|
|
|
enum chain_key key;
|
perf callchain: Support handling complete branch stacks as histograms
Currently branch stacks can be only shown as edge histograms for
individual branches. I never found this display particularly useful.
This implements an alternative mode that creates histograms over
complete branch traces, instead of individual branches, similar to how
normal callgraphs are handled. This is done by putting it in front of
the normal callgraph and then using the normal callgraph histogram
infrastructure to unify them.
This way in complex functions we can understand the control flow that
lead to a particular sample, and may even see some control flow in the
caller for short functions.
Example (simplified, of course for such simple code this is usually not
needed), please run this after the whole patchkit is in, as at this
point in the patch order there is no --branch-history, that will be
added in a patch after this one:
tcall.c:
volatile a = 10000, b = 100000, c;
__attribute__((noinline)) f2()
{
c = a / b;
}
__attribute__((noinline)) f1()
{
f2();
f2();
}
main()
{
int i;
for (i = 0; i < 1000000; i++)
f1();
}
% perf record -b -g ./tsrc/tcall
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.044 MB perf.data (~1923 samples) ]
% perf report --no-children --branch-history
...
54.91% tcall.c:6 [.] f2 tcall
|
|--65.53%-- f2 tcall.c:5
| |
| |--70.83%-- f1 tcall.c:11
| | f1 tcall.c:10
| | main tcall.c:18
| | main tcall.c:18
| | main tcall.c:17
| | main tcall.c:17
| | f1 tcall.c:13
| | f1 tcall.c:13
| | f2 tcall.c:7
| | f2 tcall.c:5
| | f1 tcall.c:12
| | f1 tcall.c:12
| | f2 tcall.c:7
| | f2 tcall.c:5
| | f1 tcall.c:11
| |
| --29.17%-- f1 tcall.c:12
| f1 tcall.c:12
| f2 tcall.c:7
| f2 tcall.c:5
| f1 tcall.c:11
| f1 tcall.c:10
| main tcall.c:18
| main tcall.c:18
| main tcall.c:17
| main tcall.c:17
| f1 tcall.c:13
| f1 tcall.c:13
| f2 tcall.c:7
| f2 tcall.c:5
| f1 tcall.c:12
The default output is unchanged.
This is only implemented in perf report, no change to record or anywhere
else.
This adds the basic code to report:
- add a new "branch" option to the -g option parser to enable this mode
- when the flag is set include the LBR into the callstack in machine.c.
The rest of the history code is unchanged and doesn't know the
difference between LBR entry and normal call entry.
- detect overlaps with the callchain
- remove small loop duplicates in the LBR
Current limitations:
- The LBR flags (mispredict etc.) are not shown in the history
and LBR entries have no special marker.
- It would be nice if annotate marked the LBR entries somehow
(e.g. with arrows)
v2: Various fixes.
v3: Merge further patches into this one. Fix white space.
v4: Improve manpage. Address review feedback.
v5: Rename functions. Better error message without -g. Fix crash without
-b.
v6: Rebase
v7: Rebase. Use NO_ENTRY in memset.
v8: Port to latest tip. Move add_callchain_ip to separate
patch. Skip initial entries in callchain. Minor cleanups.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/r/1415844328-4884-3-git-send-email-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2014-11-12 19:05:20 -07:00
|
|
|
bool branch_callstack;
|
2009-07-04 23:39:21 -06:00
|
|
|
};
|
|
|
|
|
2014-10-09 13:12:24 -06:00
|
|
|
extern struct callchain_param callchain_param;
|
|
|
|
|
2009-06-26 08:28:00 -06:00
|
|
|
struct callchain_list {
|
2009-07-01 04:37:06 -06:00
|
|
|
u64 ip;
|
2010-03-24 13:40:18 -06:00
|
|
|
struct map_symbol ms;
|
2015-05-05 08:55:46 -06:00
|
|
|
struct /* for TUI */ {
|
|
|
|
bool unfolded;
|
|
|
|
bool has_children;
|
|
|
|
};
|
2014-11-12 19:05:24 -07:00
|
|
|
char *srcline;
|
2009-06-26 08:28:00 -06:00
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
2011-01-13 20:51:58 -07:00
|
|
|
/*
|
|
|
|
* A callchain cursor is a single linked list that
|
|
|
|
* let one feed a callchain progressively.
|
2012-07-17 10:20:59 -06:00
|
|
|
* It keeps persistent allocated entries to minimize
|
2011-01-13 20:51:58 -07:00
|
|
|
* allocations.
|
|
|
|
*/
|
|
|
|
struct callchain_cursor_node {
|
|
|
|
u64 ip;
|
|
|
|
struct map *map;
|
|
|
|
struct symbol *sym;
|
|
|
|
struct callchain_cursor_node *next;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct callchain_cursor {
|
|
|
|
u64 nr;
|
|
|
|
struct callchain_cursor_node *first;
|
|
|
|
struct callchain_cursor_node **last;
|
|
|
|
u64 pos;
|
|
|
|
struct callchain_cursor_node *curr;
|
|
|
|
};
|
|
|
|
|
2012-05-30 23:43:26 -06:00
|
|
|
extern __thread struct callchain_cursor callchain_cursor;
|
|
|
|
|
2010-08-22 12:05:22 -06:00
|
|
|
static inline void callchain_init(struct callchain_root *root)
|
2009-06-26 08:28:00 -06:00
|
|
|
{
|
2010-08-22 12:05:22 -06:00
|
|
|
INIT_LIST_HEAD(&root->node.val);
|
2010-07-07 22:06:17 -06:00
|
|
|
|
2010-08-22 12:05:22 -06:00
|
|
|
root->node.parent = NULL;
|
|
|
|
root->node.hit = 0;
|
2010-08-26 18:28:40 -06:00
|
|
|
root->node.children_hit = 0;
|
2013-10-10 23:15:36 -06:00
|
|
|
root->node.rb_root_in = RB_ROOT;
|
2010-08-22 12:05:22 -06:00
|
|
|
root->max_depth = 0;
|
2009-06-26 08:28:00 -06:00
|
|
|
}
|
|
|
|
|
2011-01-13 20:51:59 -07:00
|
|
|
static inline u64 callchain_cumul_hits(struct callchain_node *node)
|
2009-08-06 23:11:05 -06:00
|
|
|
{
|
|
|
|
return node->hit + node->children_hit;
|
|
|
|
}
|
|
|
|
|
2011-01-13 20:52:00 -07:00
|
|
|
int callchain_register_param(struct callchain_param *param);
|
2011-01-13 20:51:58 -07:00
|
|
|
int callchain_append(struct callchain_root *root,
|
|
|
|
struct callchain_cursor *cursor,
|
|
|
|
u64 period);
|
|
|
|
|
|
|
|
int callchain_merge(struct callchain_cursor *cursor,
|
|
|
|
struct callchain_root *dst, struct callchain_root *src);
|
2010-05-09 08:47:13 -06:00
|
|
|
|
2011-01-13 20:51:58 -07:00
|
|
|
/*
|
|
|
|
* Initialize a cursor before adding entries inside, but keep
|
|
|
|
* the previously allocated entries as a cache.
|
|
|
|
*/
|
|
|
|
static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
|
|
|
|
{
|
|
|
|
cursor->nr = 0;
|
|
|
|
cursor->last = &cursor->first;
|
|
|
|
}
|
|
|
|
|
|
|
|
int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
|
|
|
|
struct map *map, struct symbol *sym);
|
|
|
|
|
|
|
|
/* Close a cursor writing session. Initialize for the reader */
|
|
|
|
static inline void callchain_cursor_commit(struct callchain_cursor *cursor)
|
|
|
|
{
|
|
|
|
cursor->curr = cursor->first;
|
|
|
|
cursor->pos = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cursor reading iteration helpers */
|
|
|
|
static inline struct callchain_cursor_node *
|
|
|
|
callchain_cursor_current(struct callchain_cursor *cursor)
|
|
|
|
{
|
|
|
|
if (cursor->pos == cursor->nr)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return cursor->curr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
|
|
|
|
{
|
|
|
|
cursor->curr = cursor->curr->next;
|
|
|
|
cursor->pos++;
|
|
|
|
}
|
2012-12-11 12:46:05 -07:00
|
|
|
|
|
|
|
struct option;
|
2014-01-13 22:25:35 -07:00
|
|
|
struct hist_entry;
|
2012-12-11 12:46:05 -07:00
|
|
|
|
|
|
|
int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
|
2013-10-26 08:25:33 -06:00
|
|
|
int record_callchain_opt(const struct option *opt, const char *arg, int unset);
|
|
|
|
|
2014-01-13 22:25:35 -07:00
|
|
|
int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent,
|
|
|
|
struct perf_evsel *evsel, struct addr_location *al,
|
|
|
|
int max_stack);
|
|
|
|
int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample);
|
2013-10-30 22:58:30 -06:00
|
|
|
int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
|
|
|
|
bool hide_unresolved);
|
2014-01-13 22:25:35 -07:00
|
|
|
|
2012-12-11 12:46:05 -07:00
|
|
|
extern const char record_callchain_help[];
|
2015-08-06 13:44:52 -06:00
|
|
|
extern int parse_callchain_record(const char *arg, struct callchain_param *param);
|
2015-08-04 02:30:20 -06:00
|
|
|
int parse_callchain_record_opt(const char *arg, struct callchain_param *param);
|
2014-04-07 12:55:24 -06:00
|
|
|
int parse_callchain_report_opt(const char *arg);
|
2014-09-22 19:01:43 -06:00
|
|
|
int perf_callchain_config(const char *var, const char *value);
|
2012-09-09 22:38:00 -06:00
|
|
|
|
|
|
|
static inline void callchain_cursor_snapshot(struct callchain_cursor *dest,
|
|
|
|
struct callchain_cursor *src)
|
|
|
|
{
|
|
|
|
*dest = *src;
|
|
|
|
|
|
|
|
dest->first = src->curr;
|
|
|
|
dest->nr -= src->pos;
|
|
|
|
}
|
2014-06-25 09:49:03 -06:00
|
|
|
|
|
|
|
#ifdef HAVE_SKIP_CALLCHAIN_IDX
|
2014-10-23 09:50:25 -06:00
|
|
|
extern int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain);
|
2014-06-25 09:49:03 -06:00
|
|
|
#else
|
2014-10-23 09:50:25 -06:00
|
|
|
static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused,
|
2014-06-25 09:49:03 -06:00
|
|
|
struct ip_callchain *chain __maybe_unused)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-11-12 19:05:23 -07:00
|
|
|
char *callchain_list__sym_name(struct callchain_list *cl,
|
|
|
|
char *bf, size_t bfsize, bool show_dso);
|
|
|
|
|
2014-12-29 22:38:13 -07:00
|
|
|
void free_callchain(struct callchain_root *root);
|
|
|
|
|
2009-09-24 10:02:18 -06:00
|
|
|
#endif /* __PERF_CALLCHAIN_H */
|