perf cpu_map: Add cpu_map event synthesize function
Introduce the perf_event__synthesize_cpu_map function to synthesize a struct cpu_map. Added generic interface: cpu_map_data__alloc cpu_map_data__synthesize to make the cpu_map synthesizing usable for other events. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Tested-by: Kan Liang <kan.liang@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1445784728-21732-9-git-send-email-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
6640b6c227
commit
6c872901af
6 changed files with 216 additions and 0 deletions
|
@ -34,6 +34,7 @@ perf-y += thread-map.o
|
||||||
perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o llvm-src-prologue.o
|
perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o llvm-src-prologue.o
|
||||||
perf-y += bpf.o
|
perf-y += bpf.o
|
||||||
perf-y += topology.o
|
perf-y += topology.o
|
||||||
|
perf-y += cpumap.o
|
||||||
|
|
||||||
$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
|
$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
|
||||||
$(call rule_mkdir)
|
$(call rule_mkdir)
|
||||||
|
|
|
@ -183,6 +183,10 @@ static struct test generic_tests[] = {
|
||||||
.desc = "Test thread map synthesize",
|
.desc = "Test thread map synthesize",
|
||||||
.func = test__thread_map_synthesize,
|
.func = test__thread_map_synthesize,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.desc = "Test cpu map synthesize",
|
||||||
|
.func = test__cpu_map_synthesize,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
.func = NULL,
|
.func = NULL,
|
||||||
},
|
},
|
||||||
|
|
71
tools/perf/tests/cpumap.c
Normal file
71
tools/perf/tests/cpumap.c
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
#include "tests.h"
|
||||||
|
#include "cpumap.h"
|
||||||
|
|
||||||
|
static int process_event_mask(struct perf_tool *tool __maybe_unused,
|
||||||
|
union perf_event *event,
|
||||||
|
struct perf_sample *sample __maybe_unused,
|
||||||
|
struct machine *machine __maybe_unused)
|
||||||
|
{
|
||||||
|
struct cpu_map_event *map = &event->cpu_map;
|
||||||
|
struct cpu_map_mask *mask;
|
||||||
|
struct cpu_map_data *data;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
data = &map->data;
|
||||||
|
|
||||||
|
TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__MASK);
|
||||||
|
|
||||||
|
mask = (struct cpu_map_mask *)data->data;
|
||||||
|
|
||||||
|
TEST_ASSERT_VAL("wrong nr", mask->nr == 1);
|
||||||
|
|
||||||
|
for (i = 0; i < 20; i++) {
|
||||||
|
TEST_ASSERT_VAL("wrong cpu", test_bit(i, mask->mask));
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int process_event_cpus(struct perf_tool *tool __maybe_unused,
|
||||||
|
union perf_event *event,
|
||||||
|
struct perf_sample *sample __maybe_unused,
|
||||||
|
struct machine *machine __maybe_unused)
|
||||||
|
{
|
||||||
|
struct cpu_map_event *map = &event->cpu_map;
|
||||||
|
struct cpu_map_entries *cpus;
|
||||||
|
struct cpu_map_data *data;
|
||||||
|
|
||||||
|
data = &map->data;
|
||||||
|
|
||||||
|
TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__CPUS);
|
||||||
|
|
||||||
|
cpus = (struct cpu_map_entries *)data->data;
|
||||||
|
|
||||||
|
TEST_ASSERT_VAL("wrong nr", cpus->nr == 2);
|
||||||
|
TEST_ASSERT_VAL("wrong cpu", cpus->cpu[0] == 1);
|
||||||
|
TEST_ASSERT_VAL("wrong cpu", cpus->cpu[1] == 256);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int test__cpu_map_synthesize(int subtest __maybe_unused)
|
||||||
|
{
|
||||||
|
struct cpu_map *cpus;
|
||||||
|
|
||||||
|
/* This one is better stores in mask. */
|
||||||
|
cpus = cpu_map__new("0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19");
|
||||||
|
|
||||||
|
TEST_ASSERT_VAL("failed to synthesize map",
|
||||||
|
!perf_event__synthesize_cpu_map(NULL, cpus, process_event_mask, NULL));
|
||||||
|
|
||||||
|
cpu_map__put(cpus);
|
||||||
|
|
||||||
|
/* This one is better stores in cpu values. */
|
||||||
|
cpus = cpu_map__new("1,256");
|
||||||
|
|
||||||
|
TEST_ASSERT_VAL("failed to synthesize map",
|
||||||
|
!perf_event__synthesize_cpu_map(NULL, cpus, process_event_cpus, NULL));
|
||||||
|
|
||||||
|
cpu_map__put(cpus);
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -80,6 +80,7 @@ const char *test__bpf_subtest_get_desc(int subtest);
|
||||||
int test__bpf_subtest_get_nr(void);
|
int test__bpf_subtest_get_nr(void);
|
||||||
int test_session_topology(int subtest);
|
int test_session_topology(int subtest);
|
||||||
int test__thread_map_synthesize(int subtest);
|
int test__thread_map_synthesize(int subtest);
|
||||||
|
int test__cpu_map_synthesize(int subtest);
|
||||||
|
|
||||||
#if defined(__arm__) || defined(__aarch64__)
|
#if defined(__arm__) || defined(__aarch64__)
|
||||||
#ifdef HAVE_DWARF_UNWIND_SUPPORT
|
#ifdef HAVE_DWARF_UNWIND_SUPPORT
|
||||||
|
|
|
@ -737,6 +737,137 @@ int perf_event__synthesize_thread_map2(struct perf_tool *tool,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void synthesize_cpus(struct cpu_map_entries *cpus,
|
||||||
|
struct cpu_map *map)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
cpus->nr = map->nr;
|
||||||
|
|
||||||
|
for (i = 0; i < map->nr; i++)
|
||||||
|
cpus->cpu[i] = map->map[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
static void synthesize_mask(struct cpu_map_mask *mask,
|
||||||
|
struct cpu_map *map, int max)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
mask->nr = BITS_TO_LONGS(max);
|
||||||
|
mask->long_size = sizeof(long);
|
||||||
|
|
||||||
|
for (i = 0; i < map->nr; i++)
|
||||||
|
set_bit(map->map[i], mask->mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t cpus_size(struct cpu_map *map)
|
||||||
|
{
|
||||||
|
return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t mask_size(struct cpu_map *map, int *max)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
*max = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < map->nr; i++) {
|
||||||
|
/* bit possition of the cpu is + 1 */
|
||||||
|
int bit = map->map[i] + 1;
|
||||||
|
|
||||||
|
if (bit > *max)
|
||||||
|
*max = bit;
|
||||||
|
}
|
||||||
|
|
||||||
|
return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
|
||||||
|
{
|
||||||
|
size_t size_cpus, size_mask;
|
||||||
|
bool is_dummy = cpu_map__empty(map);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Both array and mask data have variable size based
|
||||||
|
* on the number of cpus and their actual values.
|
||||||
|
* The size of the 'struct cpu_map_data' is:
|
||||||
|
*
|
||||||
|
* array = size of 'struct cpu_map_entries' +
|
||||||
|
* number of cpus * sizeof(u64)
|
||||||
|
*
|
||||||
|
* mask = size of 'struct cpu_map_mask' +
|
||||||
|
* maximum cpu bit converted to size of longs
|
||||||
|
*
|
||||||
|
* and finaly + the size of 'struct cpu_map_data'.
|
||||||
|
*/
|
||||||
|
size_cpus = cpus_size(map);
|
||||||
|
size_mask = mask_size(map, max);
|
||||||
|
|
||||||
|
if (is_dummy || (size_cpus < size_mask)) {
|
||||||
|
*size += size_cpus;
|
||||||
|
*type = PERF_CPU_MAP__CPUS;
|
||||||
|
} else {
|
||||||
|
*size += size_mask;
|
||||||
|
*type = PERF_CPU_MAP__MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
*size += sizeof(struct cpu_map_data);
|
||||||
|
return zalloc(*size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
|
||||||
|
u16 type, int max)
|
||||||
|
{
|
||||||
|
data->type = type;
|
||||||
|
|
||||||
|
switch (type) {
|
||||||
|
case PERF_CPU_MAP__CPUS:
|
||||||
|
synthesize_cpus((struct cpu_map_entries *) data->data, map);
|
||||||
|
break;
|
||||||
|
case PERF_CPU_MAP__MASK:
|
||||||
|
synthesize_mask((struct cpu_map_mask *) data->data, map, max);
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
|
||||||
|
{
|
||||||
|
size_t size = sizeof(struct cpu_map_event);
|
||||||
|
struct cpu_map_event *event;
|
||||||
|
int max;
|
||||||
|
u16 type;
|
||||||
|
|
||||||
|
event = cpu_map_data__alloc(map, &size, &type, &max);
|
||||||
|
if (!event)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
event->header.type = PERF_RECORD_CPU_MAP;
|
||||||
|
event->header.size = size;
|
||||||
|
event->data.type = type;
|
||||||
|
|
||||||
|
cpu_map_data__synthesize(&event->data, map, type, max);
|
||||||
|
return event;
|
||||||
|
}
|
||||||
|
|
||||||
|
int perf_event__synthesize_cpu_map(struct perf_tool *tool,
|
||||||
|
struct cpu_map *map,
|
||||||
|
perf_event__handler_t process,
|
||||||
|
struct machine *machine)
|
||||||
|
{
|
||||||
|
struct cpu_map_event *event;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
event = cpu_map_event__new(map);
|
||||||
|
if (!event)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
err = process(tool, (union perf_event *) event, NULL, machine);
|
||||||
|
|
||||||
|
free(event);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
|
size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
|
||||||
{
|
{
|
||||||
const char *s;
|
const char *s;
|
||||||
|
|
|
@ -425,6 +425,7 @@ void perf_event__print_totals(void);
|
||||||
|
|
||||||
struct perf_tool;
|
struct perf_tool;
|
||||||
struct thread_map;
|
struct thread_map;
|
||||||
|
struct cpu_map;
|
||||||
|
|
||||||
typedef int (*perf_event__handler_t)(struct perf_tool *tool,
|
typedef int (*perf_event__handler_t)(struct perf_tool *tool,
|
||||||
union perf_event *event,
|
union perf_event *event,
|
||||||
|
@ -440,6 +441,10 @@ int perf_event__synthesize_thread_map2(struct perf_tool *tool,
|
||||||
struct thread_map *threads,
|
struct thread_map *threads,
|
||||||
perf_event__handler_t process,
|
perf_event__handler_t process,
|
||||||
struct machine *machine);
|
struct machine *machine);
|
||||||
|
int perf_event__synthesize_cpu_map(struct perf_tool *tool,
|
||||||
|
struct cpu_map *cpus,
|
||||||
|
perf_event__handler_t process,
|
||||||
|
struct machine *machine);
|
||||||
int perf_event__synthesize_threads(struct perf_tool *tool,
|
int perf_event__synthesize_threads(struct perf_tool *tool,
|
||||||
perf_event__handler_t process,
|
perf_event__handler_t process,
|
||||||
struct machine *machine, bool mmap_data,
|
struct machine *machine, bool mmap_data,
|
||||||
|
@ -550,4 +555,7 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp);
|
||||||
u64 kallsyms__get_function_start(const char *kallsyms_filename,
|
u64 kallsyms__get_function_start(const char *kallsyms_filename,
|
||||||
const char *symbol_name);
|
const char *symbol_name);
|
||||||
|
|
||||||
|
void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max);
|
||||||
|
void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
|
||||||
|
u16 type, int max);
|
||||||
#endif /* __PERF_RECORD_H */
|
#endif /* __PERF_RECORD_H */
|
||||||
|
|
Loading…
Reference in a new issue