perf tools: Refactor all_tids to hold nr and the map
So that later, we can pass the thread_map instance instead of (thread_num, thread_map) for things like perf_evsel__open and friends, just like was done with cpu_map. Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Tom Zanussi <tzanussi@gmail.com> LKML-Reference: <new-submission> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
60d567e2d9
commit
5c98d466e4
5 changed files with 88 additions and 85 deletions
|
@ -54,8 +54,7 @@ static bool sample_id_all_avail = true;
|
||||||
static bool system_wide = false;
|
static bool system_wide = false;
|
||||||
static pid_t target_pid = -1;
|
static pid_t target_pid = -1;
|
||||||
static pid_t target_tid = -1;
|
static pid_t target_tid = -1;
|
||||||
static pid_t *all_tids = NULL;
|
static struct thread_map *threads;
|
||||||
static int thread_num = 0;
|
|
||||||
static pid_t child_pid = -1;
|
static pid_t child_pid = -1;
|
||||||
static bool no_inherit = false;
|
static bool no_inherit = false;
|
||||||
static enum write_mode_t write_mode = WRITE_FORCE;
|
static enum write_mode_t write_mode = WRITE_FORCE;
|
||||||
|
@ -318,9 +317,9 @@ static void create_counter(struct perf_evsel *evsel, int cpu)
|
||||||
retry_sample_id:
|
retry_sample_id:
|
||||||
attr->sample_id_all = sample_id_all_avail ? 1 : 0;
|
attr->sample_id_all = sample_id_all_avail ? 1 : 0;
|
||||||
|
|
||||||
for (thread_index = 0; thread_index < thread_num; thread_index++) {
|
for (thread_index = 0; thread_index < threads->nr; thread_index++) {
|
||||||
try_again:
|
try_again:
|
||||||
FD(evsel, nr_cpu, thread_index) = sys_perf_event_open(attr, all_tids[thread_index], cpu, group_fd, 0);
|
FD(evsel, nr_cpu, thread_index) = sys_perf_event_open(attr, threads->map[thread_index], cpu, group_fd, 0);
|
||||||
|
|
||||||
if (FD(evsel, nr_cpu, thread_index) < 0) {
|
if (FD(evsel, nr_cpu, thread_index) < 0) {
|
||||||
int err = errno;
|
int err = errno;
|
||||||
|
@ -653,7 +652,7 @@ static int __cmd_record(int argc, const char **argv)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!system_wide && target_tid == -1 && target_pid == -1)
|
if (!system_wide && target_tid == -1 && target_pid == -1)
|
||||||
all_tids[0] = child_pid;
|
threads->map[0] = child_pid;
|
||||||
|
|
||||||
close(child_ready_pipe[1]);
|
close(child_ready_pipe[1]);
|
||||||
close(go_pipe[0]);
|
close(go_pipe[0]);
|
||||||
|
@ -793,7 +792,7 @@ static int __cmd_record(int argc, const char **argv)
|
||||||
|
|
||||||
list_for_each_entry(pos, &evsel_list, node) {
|
list_for_each_entry(pos, &evsel_list, node) {
|
||||||
for (thread = 0;
|
for (thread = 0;
|
||||||
thread < thread_num;
|
thread < threads->nr;
|
||||||
thread++)
|
thread++)
|
||||||
ioctl(FD(pos, i, thread),
|
ioctl(FD(pos, i, thread),
|
||||||
PERF_EVENT_IOC_DISABLE);
|
PERF_EVENT_IOC_DISABLE);
|
||||||
|
@ -910,21 +909,13 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
|
||||||
goto out_symbol_exit;
|
goto out_symbol_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (target_pid != -1) {
|
if (target_pid != -1)
|
||||||
target_tid = target_pid;
|
target_tid = target_pid;
|
||||||
thread_num = find_all_tid(target_pid, &all_tids);
|
|
||||||
if (thread_num <= 0) {
|
|
||||||
fprintf(stderr, "Can't find all threads of pid %d\n",
|
|
||||||
target_pid);
|
|
||||||
usage_with_options(record_usage, record_options);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
all_tids=malloc(sizeof(pid_t));
|
|
||||||
if (!all_tids)
|
|
||||||
goto out_symbol_exit;
|
|
||||||
|
|
||||||
all_tids[0] = target_tid;
|
threads = thread_map__new(target_pid, target_tid);
|
||||||
thread_num = 1;
|
if (threads == NULL) {
|
||||||
|
pr_err("Problems finding threads of monitor\n");
|
||||||
|
usage_with_options(record_usage, record_options);
|
||||||
}
|
}
|
||||||
|
|
||||||
cpus = cpu_map__new(cpu_list);
|
cpus = cpu_map__new(cpu_list);
|
||||||
|
@ -934,11 +925,11 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(pos, &evsel_list, node) {
|
list_for_each_entry(pos, &evsel_list, node) {
|
||||||
if (perf_evsel__alloc_fd(pos, cpus->nr, thread_num) < 0)
|
if (perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
|
||||||
goto out_free_fd;
|
goto out_free_fd;
|
||||||
}
|
}
|
||||||
event_array = malloc(
|
event_array = malloc((sizeof(struct pollfd) * MAX_NR_CPUS *
|
||||||
sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num);
|
MAX_COUNTERS * threads->nr));
|
||||||
if (!event_array)
|
if (!event_array)
|
||||||
goto out_free_fd;
|
goto out_free_fd;
|
||||||
|
|
||||||
|
@ -965,8 +956,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
|
||||||
out_free_event_array:
|
out_free_event_array:
|
||||||
free(event_array);
|
free(event_array);
|
||||||
out_free_fd:
|
out_free_fd:
|
||||||
free(all_tids);
|
thread_map__delete(threads);
|
||||||
all_tids = NULL;
|
threads = NULL;
|
||||||
out_symbol_exit:
|
out_symbol_exit:
|
||||||
symbol__exit();
|
symbol__exit();
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -81,8 +81,7 @@ static bool scale = true;
|
||||||
static bool no_aggr = false;
|
static bool no_aggr = false;
|
||||||
static pid_t target_pid = -1;
|
static pid_t target_pid = -1;
|
||||||
static pid_t target_tid = -1;
|
static pid_t target_tid = -1;
|
||||||
static pid_t *all_tids = NULL;
|
static struct thread_map *threads;
|
||||||
static int thread_num = 0;
|
|
||||||
static pid_t child_pid = -1;
|
static pid_t child_pid = -1;
|
||||||
static bool null_run = false;
|
static bool null_run = false;
|
||||||
static bool big_num = true;
|
static bool big_num = true;
|
||||||
|
@ -175,7 +174,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
|
||||||
attr->enable_on_exec = 1;
|
attr->enable_on_exec = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return perf_evsel__open_per_thread(evsel, thread_num, all_tids);
|
return perf_evsel__open_per_thread(evsel, threads->nr, threads->map);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -200,7 +199,7 @@ static int read_counter_aggr(struct perf_evsel *counter)
|
||||||
u64 *count = counter->counts->aggr.values;
|
u64 *count = counter->counts->aggr.values;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (__perf_evsel__read(counter, cpus->nr, thread_num, scale) < 0)
|
if (__perf_evsel__read(counter, cpus->nr, threads->nr, scale) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
for (i = 0; i < 3; i++)
|
for (i = 0; i < 3; i++)
|
||||||
|
@ -298,7 +297,7 @@ static int run_perf_stat(int argc __used, const char **argv)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (target_tid == -1 && target_pid == -1 && !system_wide)
|
if (target_tid == -1 && target_pid == -1 && !system_wide)
|
||||||
all_tids[0] = child_pid;
|
threads->map[0] = child_pid;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wait for the child to be ready to exec.
|
* Wait for the child to be ready to exec.
|
||||||
|
@ -353,7 +352,7 @@ static int run_perf_stat(int argc __used, const char **argv)
|
||||||
} else {
|
} else {
|
||||||
list_for_each_entry(counter, &evsel_list, node) {
|
list_for_each_entry(counter, &evsel_list, node) {
|
||||||
read_counter_aggr(counter);
|
read_counter_aggr(counter);
|
||||||
perf_evsel__close_fd(counter, cpus->nr, thread_num);
|
perf_evsel__close_fd(counter, cpus->nr, threads->nr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -693,6 +692,15 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (target_pid != -1)
|
||||||
|
target_tid = target_pid;
|
||||||
|
|
||||||
|
threads = thread_map__new(target_pid, target_tid);
|
||||||
|
if (threads == NULL) {
|
||||||
|
pr_err("Problems finding threads of monitor\n");
|
||||||
|
usage_with_options(stat_usage, options);
|
||||||
|
}
|
||||||
|
|
||||||
if (system_wide)
|
if (system_wide)
|
||||||
cpus = cpu_map__new(cpu_list);
|
cpus = cpu_map__new(cpu_list);
|
||||||
else
|
else
|
||||||
|
@ -704,27 +712,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (target_pid != -1) {
|
|
||||||
target_tid = target_pid;
|
|
||||||
thread_num = find_all_tid(target_pid, &all_tids);
|
|
||||||
if (thread_num <= 0) {
|
|
||||||
fprintf(stderr, "Can't find all threads of pid %d\n",
|
|
||||||
target_pid);
|
|
||||||
usage_with_options(stat_usage, options);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
all_tids=malloc(sizeof(pid_t));
|
|
||||||
if (!all_tids)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
all_tids[0] = target_tid;
|
|
||||||
thread_num = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
list_for_each_entry(pos, &evsel_list, node) {
|
list_for_each_entry(pos, &evsel_list, node) {
|
||||||
if (perf_evsel__alloc_stat_priv(pos) < 0 ||
|
if (perf_evsel__alloc_stat_priv(pos) < 0 ||
|
||||||
perf_evsel__alloc_counts(pos, cpus->nr) < 0 ||
|
perf_evsel__alloc_counts(pos, cpus->nr) < 0 ||
|
||||||
perf_evsel__alloc_fd(pos, cpus->nr, thread_num) < 0)
|
perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
|
||||||
goto out_free_fd;
|
goto out_free_fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -752,5 +743,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
|
||||||
list_for_each_entry(pos, &evsel_list, node)
|
list_for_each_entry(pos, &evsel_list, node)
|
||||||
perf_evsel__free_stat_priv(pos);
|
perf_evsel__free_stat_priv(pos);
|
||||||
out:
|
out:
|
||||||
|
thread_map__delete(threads);
|
||||||
|
threads = NULL;
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,8 +68,7 @@ static int print_entries;
|
||||||
|
|
||||||
static int target_pid = -1;
|
static int target_pid = -1;
|
||||||
static int target_tid = -1;
|
static int target_tid = -1;
|
||||||
static pid_t *all_tids = NULL;
|
static struct thread_map *threads;
|
||||||
static int thread_num = 0;
|
|
||||||
static bool inherit = false;
|
static bool inherit = false;
|
||||||
static struct cpu_map *cpus;
|
static struct cpu_map *cpus;
|
||||||
static int realtime_prio = 0;
|
static int realtime_prio = 0;
|
||||||
|
@ -1200,7 +1199,7 @@ static void perf_session__mmap_read(struct perf_session *self)
|
||||||
for (i = 0; i < cpus->nr; i++) {
|
for (i = 0; i < cpus->nr; i++) {
|
||||||
list_for_each_entry(counter, &evsel_list, node) {
|
list_for_each_entry(counter, &evsel_list, node) {
|
||||||
for (thread_index = 0;
|
for (thread_index = 0;
|
||||||
thread_index < thread_num;
|
thread_index < threads->nr;
|
||||||
thread_index++) {
|
thread_index++) {
|
||||||
perf_session__mmap_read_counter(self,
|
perf_session__mmap_read_counter(self,
|
||||||
counter, i, thread_index);
|
counter, i, thread_index);
|
||||||
|
@ -1236,10 +1235,10 @@ static void start_counter(int i, struct perf_evsel *evsel)
|
||||||
attr->inherit = (cpu < 0) && inherit;
|
attr->inherit = (cpu < 0) && inherit;
|
||||||
attr->mmap = 1;
|
attr->mmap = 1;
|
||||||
|
|
||||||
for (thread_index = 0; thread_index < thread_num; thread_index++) {
|
for (thread_index = 0; thread_index < threads->nr; thread_index++) {
|
||||||
try_again:
|
try_again:
|
||||||
FD(evsel, i, thread_index) = sys_perf_event_open(attr,
|
FD(evsel, i, thread_index) = sys_perf_event_open(attr,
|
||||||
all_tids[thread_index], cpu, group_fd, 0);
|
threads->map[thread_index], cpu, group_fd, 0);
|
||||||
|
|
||||||
if (FD(evsel, i, thread_index) < 0) {
|
if (FD(evsel, i, thread_index) < 0) {
|
||||||
int err = errno;
|
int err = errno;
|
||||||
|
@ -1410,25 +1409,17 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
|
||||||
if (argc)
|
if (argc)
|
||||||
usage_with_options(top_usage, options);
|
usage_with_options(top_usage, options);
|
||||||
|
|
||||||
if (target_pid != -1) {
|
if (target_pid != -1)
|
||||||
target_tid = target_pid;
|
target_tid = target_pid;
|
||||||
thread_num = find_all_tid(target_pid, &all_tids);
|
|
||||||
if (thread_num <= 0) {
|
|
||||||
fprintf(stderr, "Can't find all threads of pid %d\n",
|
|
||||||
target_pid);
|
|
||||||
usage_with_options(top_usage, options);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
all_tids=malloc(sizeof(pid_t));
|
|
||||||
if (!all_tids)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
all_tids[0] = target_tid;
|
threads = thread_map__new(target_pid, target_tid);
|
||||||
thread_num = 1;
|
if (threads == NULL) {
|
||||||
|
pr_err("Problems finding threads of monitor\n");
|
||||||
|
usage_with_options(top_usage, options);
|
||||||
}
|
}
|
||||||
|
|
||||||
event_array = malloc(
|
event_array = malloc((sizeof(struct pollfd) *
|
||||||
sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num);
|
MAX_NR_CPUS * MAX_COUNTERS * threads->nr));
|
||||||
if (!event_array)
|
if (!event_array)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -1468,8 +1459,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
|
||||||
usage_with_options(top_usage, options);
|
usage_with_options(top_usage, options);
|
||||||
|
|
||||||
list_for_each_entry(pos, &evsel_list, node) {
|
list_for_each_entry(pos, &evsel_list, node) {
|
||||||
if (perf_evsel__alloc_mmap_per_thread(pos, cpus->nr, thread_num) < 0 ||
|
if (perf_evsel__alloc_mmap_per_thread(pos, cpus->nr, threads->nr) < 0 ||
|
||||||
perf_evsel__alloc_fd(pos, cpus->nr, thread_num) < 0)
|
perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
|
||||||
goto out_free_fd;
|
goto out_free_fd;
|
||||||
/*
|
/*
|
||||||
* Fill in the ones not specifically initialized via -c:
|
* Fill in the ones not specifically initialized via -c:
|
||||||
|
|
|
@ -16,35 +16,50 @@ static int filter(const struct dirent *dir)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int find_all_tid(int pid, pid_t ** all_tid)
|
struct thread_map *thread_map__new_by_pid(pid_t pid)
|
||||||
{
|
{
|
||||||
|
struct thread_map *threads;
|
||||||
char name[256];
|
char name[256];
|
||||||
int items;
|
int items;
|
||||||
struct dirent **namelist = NULL;
|
struct dirent **namelist = NULL;
|
||||||
int ret = 0;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
sprintf(name, "/proc/%d/task", pid);
|
sprintf(name, "/proc/%d/task", pid);
|
||||||
items = scandir(name, &namelist, filter, NULL);
|
items = scandir(name, &namelist, filter, NULL);
|
||||||
if (items <= 0)
|
if (items <= 0)
|
||||||
return -ENOENT;
|
return NULL;
|
||||||
*all_tid = malloc(sizeof(pid_t) * items);
|
|
||||||
if (!*all_tid) {
|
threads = malloc(sizeof(*threads) + sizeof(pid_t) * items);
|
||||||
ret = -ENOMEM;
|
if (threads != NULL) {
|
||||||
goto failure;
|
for (i = 0; i < items; i++)
|
||||||
|
threads->map[i] = atoi(namelist[i]->d_name);
|
||||||
|
threads->nr = items;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < items; i++)
|
|
||||||
(*all_tid)[i] = atoi(namelist[i]->d_name);
|
|
||||||
|
|
||||||
ret = items;
|
|
||||||
|
|
||||||
failure:
|
|
||||||
for (i=0; i<items; i++)
|
for (i=0; i<items; i++)
|
||||||
free(namelist[i]);
|
free(namelist[i]);
|
||||||
free(namelist);
|
free(namelist);
|
||||||
|
|
||||||
return ret;
|
return threads;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct thread_map *thread_map__new_by_tid(pid_t tid)
|
||||||
|
{
|
||||||
|
struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
|
||||||
|
|
||||||
|
if (threads != NULL) {
|
||||||
|
threads->map[0] = tid;
|
||||||
|
threads->nr = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return threads;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct thread_map *thread_map__new(pid_t pid, pid_t tid)
|
||||||
|
{
|
||||||
|
if (pid != -1)
|
||||||
|
return thread_map__new_by_pid(pid);
|
||||||
|
return thread_map__new_by_tid(tid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct thread *thread__new(pid_t pid)
|
static struct thread *thread__new(pid_t pid)
|
||||||
|
|
|
@ -18,11 +18,24 @@ struct thread {
|
||||||
int comm_len;
|
int comm_len;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct thread_map {
|
||||||
|
int nr;
|
||||||
|
int map[];
|
||||||
|
};
|
||||||
|
|
||||||
struct perf_session;
|
struct perf_session;
|
||||||
|
|
||||||
void thread__delete(struct thread *self);
|
void thread__delete(struct thread *self);
|
||||||
|
|
||||||
int find_all_tid(int pid, pid_t ** all_tid);
|
struct thread_map *thread_map__new_by_pid(pid_t pid);
|
||||||
|
struct thread_map *thread_map__new_by_tid(pid_t tid);
|
||||||
|
struct thread_map *thread_map__new(pid_t pid, pid_t tid);
|
||||||
|
|
||||||
|
static inline void thread_map__delete(struct thread_map *threads)
|
||||||
|
{
|
||||||
|
free(threads);
|
||||||
|
}
|
||||||
|
|
||||||
int thread__set_comm(struct thread *self, const char *comm);
|
int thread__set_comm(struct thread *self, const char *comm);
|
||||||
int thread__comm_len(struct thread *self);
|
int thread__comm_len(struct thread *self);
|
||||||
struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
|
struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
|
||||||
|
|
Loading…
Add table
Reference in a new issue