Merge branch 'perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux-2.6 into perf/core
This commit is contained in:
commit
75b5293a5d
24 changed files with 785 additions and 377 deletions
|
@ -215,8 +215,9 @@ struct perf_event_attr {
|
|||
*/
|
||||
precise_ip : 2, /* skid constraint */
|
||||
mmap_data : 1, /* non-exec mmap data */
|
||||
sample_id_all : 1, /* sample_type all events */
|
||||
|
||||
__reserved_1 : 46;
|
||||
__reserved_1 : 45;
|
||||
|
||||
union {
|
||||
__u32 wakeup_events; /* wakeup every n events */
|
||||
|
@ -327,6 +328,15 @@ struct perf_event_header {
|
|||
enum perf_event_type {
|
||||
|
||||
/*
|
||||
* If perf_event_attr.sample_id_all is set then all event types will
|
||||
* have the sample_type selected fields related to where/when
|
||||
* (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
|
||||
* described in PERF_RECORD_SAMPLE below, it will be stashed just after
|
||||
* the perf_event_header and the fields already present for the existing
|
||||
* fields, i.e. at the end of the payload. That way a newer perf.data
|
||||
* file will be supported by older perf tools, with these new optional
|
||||
* fields being ignored.
|
||||
*
|
||||
* The MMAP events record the PROT_EXEC mappings so that we can
|
||||
* correlate userspace IPs to code. They have the following structure:
|
||||
*
|
||||
|
@ -759,6 +769,7 @@ struct perf_event {
|
|||
|
||||
struct perf_event_attr attr;
|
||||
u16 header_size;
|
||||
u16 id_header_size;
|
||||
u16 read_size;
|
||||
struct hw_perf_event hw;
|
||||
|
||||
|
|
|
@ -133,6 +133,28 @@ static void unclone_ctx(struct perf_event_context *ctx)
|
|||
}
|
||||
}
|
||||
|
||||
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
|
||||
{
|
||||
/*
|
||||
* only top level events have the pid namespace they were created in
|
||||
*/
|
||||
if (event->parent)
|
||||
event = event->parent;
|
||||
|
||||
return task_tgid_nr_ns(p, event->ns);
|
||||
}
|
||||
|
||||
static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
|
||||
{
|
||||
/*
|
||||
* only top level events have the pid namespace they were created in
|
||||
*/
|
||||
if (event->parent)
|
||||
event = event->parent;
|
||||
|
||||
return task_pid_nr_ns(p, event->ns);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we inherit events we want to return the parent event id
|
||||
* to userspace.
|
||||
|
@ -351,15 +373,30 @@ static void perf_event__header_size(struct perf_event *event)
|
|||
if (sample_type & PERF_SAMPLE_IP)
|
||||
size += sizeof(data->ip);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_ADDR)
|
||||
size += sizeof(data->addr);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_PERIOD)
|
||||
size += sizeof(data->period);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_READ)
|
||||
size += event->read_size;
|
||||
|
||||
event->header_size = size;
|
||||
}
|
||||
|
||||
static void perf_event__id_header_size(struct perf_event *event)
|
||||
{
|
||||
struct perf_sample_data *data;
|
||||
u64 sample_type = event->attr.sample_type;
|
||||
u16 size = 0;
|
||||
|
||||
if (sample_type & PERF_SAMPLE_TID)
|
||||
size += sizeof(data->tid_entry);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_TIME)
|
||||
size += sizeof(data->time);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_ADDR)
|
||||
size += sizeof(data->addr);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_ID)
|
||||
size += sizeof(data->id);
|
||||
|
||||
|
@ -369,13 +406,7 @@ static void perf_event__header_size(struct perf_event *event)
|
|||
if (sample_type & PERF_SAMPLE_CPU)
|
||||
size += sizeof(data->cpu_entry);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_PERIOD)
|
||||
size += sizeof(data->period);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_READ)
|
||||
size += event->read_size;
|
||||
|
||||
event->header_size = size;
|
||||
event->id_header_size = size;
|
||||
}
|
||||
|
||||
static void perf_group_attach(struct perf_event *event)
|
||||
|
@ -3357,6 +3388,73 @@ __always_inline void perf_output_copy(struct perf_output_handle *handle,
|
|||
} while (len);
|
||||
}
|
||||
|
||||
static void __perf_event_header__init_id(struct perf_event_header *header,
|
||||
struct perf_sample_data *data,
|
||||
struct perf_event *event)
|
||||
{
|
||||
u64 sample_type = event->attr.sample_type;
|
||||
|
||||
data->type = sample_type;
|
||||
header->size += event->id_header_size;
|
||||
|
||||
if (sample_type & PERF_SAMPLE_TID) {
|
||||
/* namespace issues */
|
||||
data->tid_entry.pid = perf_event_pid(event, current);
|
||||
data->tid_entry.tid = perf_event_tid(event, current);
|
||||
}
|
||||
|
||||
if (sample_type & PERF_SAMPLE_TIME)
|
||||
data->time = perf_clock();
|
||||
|
||||
if (sample_type & PERF_SAMPLE_ID)
|
||||
data->id = primary_event_id(event);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_STREAM_ID)
|
||||
data->stream_id = event->id;
|
||||
|
||||
if (sample_type & PERF_SAMPLE_CPU) {
|
||||
data->cpu_entry.cpu = raw_smp_processor_id();
|
||||
data->cpu_entry.reserved = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void perf_event_header__init_id(struct perf_event_header *header,
|
||||
struct perf_sample_data *data,
|
||||
struct perf_event *event)
|
||||
{
|
||||
if (event->attr.sample_id_all)
|
||||
__perf_event_header__init_id(header, data, event);
|
||||
}
|
||||
|
||||
static void __perf_event__output_id_sample(struct perf_output_handle *handle,
|
||||
struct perf_sample_data *data)
|
||||
{
|
||||
u64 sample_type = data->type;
|
||||
|
||||
if (sample_type & PERF_SAMPLE_TID)
|
||||
perf_output_put(handle, data->tid_entry);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_TIME)
|
||||
perf_output_put(handle, data->time);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_ID)
|
||||
perf_output_put(handle, data->id);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_STREAM_ID)
|
||||
perf_output_put(handle, data->stream_id);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_CPU)
|
||||
perf_output_put(handle, data->cpu_entry);
|
||||
}
|
||||
|
||||
static void perf_event__output_id_sample(struct perf_event *event,
|
||||
struct perf_output_handle *handle,
|
||||
struct perf_sample_data *sample)
|
||||
{
|
||||
if (event->attr.sample_id_all)
|
||||
__perf_event__output_id_sample(handle, sample);
|
||||
}
|
||||
|
||||
int perf_output_begin(struct perf_output_handle *handle,
|
||||
struct perf_event *event, unsigned int size,
|
||||
int nmi, int sample)
|
||||
|
@ -3364,6 +3462,7 @@ int perf_output_begin(struct perf_output_handle *handle,
|
|||
struct perf_buffer *buffer;
|
||||
unsigned long tail, offset, head;
|
||||
int have_lost;
|
||||
struct perf_sample_data sample_data;
|
||||
struct {
|
||||
struct perf_event_header header;
|
||||
u64 id;
|
||||
|
@ -3390,8 +3489,12 @@ int perf_output_begin(struct perf_output_handle *handle,
|
|||
goto out;
|
||||
|
||||
have_lost = local_read(&buffer->lost);
|
||||
if (have_lost)
|
||||
size += sizeof(lost_event);
|
||||
if (have_lost) {
|
||||
lost_event.header.size = sizeof(lost_event);
|
||||
perf_event_header__init_id(&lost_event.header, &sample_data,
|
||||
event);
|
||||
size += lost_event.header.size;
|
||||
}
|
||||
|
||||
perf_output_get_handle(handle);
|
||||
|
||||
|
@ -3422,11 +3525,11 @@ int perf_output_begin(struct perf_output_handle *handle,
|
|||
if (have_lost) {
|
||||
lost_event.header.type = PERF_RECORD_LOST;
|
||||
lost_event.header.misc = 0;
|
||||
lost_event.header.size = sizeof(lost_event);
|
||||
lost_event.id = event->id;
|
||||
lost_event.lost = local_xchg(&buffer->lost, 0);
|
||||
|
||||
perf_output_put(handle, lost_event);
|
||||
perf_event__output_id_sample(event, handle, &sample_data);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -3459,28 +3562,6 @@ void perf_output_end(struct perf_output_handle *handle)
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
|
||||
{
|
||||
/*
|
||||
* only top level events have the pid namespace they were created in
|
||||
*/
|
||||
if (event->parent)
|
||||
event = event->parent;
|
||||
|
||||
return task_tgid_nr_ns(p, event->ns);
|
||||
}
|
||||
|
||||
static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
|
||||
{
|
||||
/*
|
||||
* only top level events have the pid namespace they were created in
|
||||
*/
|
||||
if (event->parent)
|
||||
event = event->parent;
|
||||
|
||||
return task_pid_nr_ns(p, event->ns);
|
||||
}
|
||||
|
||||
static void perf_output_read_one(struct perf_output_handle *handle,
|
||||
struct perf_event *event,
|
||||
u64 enabled, u64 running)
|
||||
|
@ -3655,37 +3736,17 @@ void perf_prepare_sample(struct perf_event_header *header,
|
|||
{
|
||||
u64 sample_type = event->attr.sample_type;
|
||||
|
||||
data->type = sample_type;
|
||||
|
||||
header->type = PERF_RECORD_SAMPLE;
|
||||
header->size = sizeof(*header) + event->header_size;
|
||||
|
||||
header->misc = 0;
|
||||
header->misc |= perf_misc_flags(regs);
|
||||
|
||||
__perf_event_header__init_id(header, data, event);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_IP)
|
||||
data->ip = perf_instruction_pointer(regs);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_TID) {
|
||||
/* namespace issues */
|
||||
data->tid_entry.pid = perf_event_pid(event, current);
|
||||
data->tid_entry.tid = perf_event_tid(event, current);
|
||||
}
|
||||
|
||||
if (sample_type & PERF_SAMPLE_TIME)
|
||||
data->time = perf_clock();
|
||||
|
||||
if (sample_type & PERF_SAMPLE_ID)
|
||||
data->id = primary_event_id(event);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_STREAM_ID)
|
||||
data->stream_id = event->id;
|
||||
|
||||
if (sample_type & PERF_SAMPLE_CPU) {
|
||||
data->cpu_entry.cpu = raw_smp_processor_id();
|
||||
data->cpu_entry.reserved = 0;
|
||||
}
|
||||
|
||||
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
|
||||
int size = 1;
|
||||
|
||||
|
@ -3749,6 +3810,7 @@ perf_event_read_event(struct perf_event *event,
|
|||
struct task_struct *task)
|
||||
{
|
||||
struct perf_output_handle handle;
|
||||
struct perf_sample_data sample;
|
||||
struct perf_read_event read_event = {
|
||||
.header = {
|
||||
.type = PERF_RECORD_READ,
|
||||
|
@ -3760,12 +3822,14 @@ perf_event_read_event(struct perf_event *event,
|
|||
};
|
||||
int ret;
|
||||
|
||||
perf_event_header__init_id(&read_event.header, &sample, event);
|
||||
ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
perf_output_put(&handle, read_event);
|
||||
perf_output_read(&handle, event);
|
||||
perf_event__output_id_sample(event, &handle, &sample);
|
||||
|
||||
perf_output_end(&handle);
|
||||
}
|
||||
|
@ -3795,14 +3859,16 @@ static void perf_event_task_output(struct perf_event *event,
|
|||
struct perf_task_event *task_event)
|
||||
{
|
||||
struct perf_output_handle handle;
|
||||
struct perf_sample_data sample;
|
||||
struct task_struct *task = task_event->task;
|
||||
int size, ret;
|
||||
int ret, size = task_event->event_id.header.size;
|
||||
|
||||
size = task_event->event_id.header.size;
|
||||
ret = perf_output_begin(&handle, event, size, 0, 0);
|
||||
perf_event_header__init_id(&task_event->event_id.header, &sample, event);
|
||||
|
||||
ret = perf_output_begin(&handle, event,
|
||||
task_event->event_id.header.size, 0, 0);
|
||||
if (ret)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
task_event->event_id.pid = perf_event_pid(event, task);
|
||||
task_event->event_id.ppid = perf_event_pid(event, current);
|
||||
|
@ -3812,7 +3878,11 @@ static void perf_event_task_output(struct perf_event *event,
|
|||
|
||||
perf_output_put(&handle, task_event->event_id);
|
||||
|
||||
perf_event__output_id_sample(event, &handle, &sample);
|
||||
|
||||
perf_output_end(&handle);
|
||||
out:
|
||||
task_event->event_id.header.size = size;
|
||||
}
|
||||
|
||||
static int perf_event_task_match(struct perf_event *event)
|
||||
|
@ -3925,11 +3995,16 @@ static void perf_event_comm_output(struct perf_event *event,
|
|||
struct perf_comm_event *comm_event)
|
||||
{
|
||||
struct perf_output_handle handle;
|
||||
struct perf_sample_data sample;
|
||||
int size = comm_event->event_id.header.size;
|
||||
int ret = perf_output_begin(&handle, event, size, 0, 0);
|
||||
int ret;
|
||||
|
||||
perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
|
||||
ret = perf_output_begin(&handle, event,
|
||||
comm_event->event_id.header.size, 0, 0);
|
||||
|
||||
if (ret)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
|
||||
comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
|
||||
|
@ -3937,7 +4012,12 @@ static void perf_event_comm_output(struct perf_event *event,
|
|||
perf_output_put(&handle, comm_event->event_id);
|
||||
perf_output_copy(&handle, comm_event->comm,
|
||||
comm_event->comm_size);
|
||||
|
||||
perf_event__output_id_sample(event, &handle, &sample);
|
||||
|
||||
perf_output_end(&handle);
|
||||
out:
|
||||
comm_event->event_id.header.size = size;
|
||||
}
|
||||
|
||||
static int perf_event_comm_match(struct perf_event *event)
|
||||
|
@ -3982,7 +4062,6 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
|
|||
comm_event->comm_size = size;
|
||||
|
||||
comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
||||
|
@ -4061,11 +4140,15 @@ static void perf_event_mmap_output(struct perf_event *event,
|
|||
struct perf_mmap_event *mmap_event)
|
||||
{
|
||||
struct perf_output_handle handle;
|
||||
struct perf_sample_data sample;
|
||||
int size = mmap_event->event_id.header.size;
|
||||
int ret = perf_output_begin(&handle, event, size, 0, 0);
|
||||
int ret;
|
||||
|
||||
perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
|
||||
ret = perf_output_begin(&handle, event,
|
||||
mmap_event->event_id.header.size, 0, 0);
|
||||
if (ret)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
mmap_event->event_id.pid = perf_event_pid(event, current);
|
||||
mmap_event->event_id.tid = perf_event_tid(event, current);
|
||||
|
@ -4073,7 +4156,12 @@ static void perf_event_mmap_output(struct perf_event *event,
|
|||
perf_output_put(&handle, mmap_event->event_id);
|
||||
perf_output_copy(&handle, mmap_event->file_name,
|
||||
mmap_event->file_size);
|
||||
|
||||
perf_event__output_id_sample(event, &handle, &sample);
|
||||
|
||||
perf_output_end(&handle);
|
||||
out:
|
||||
mmap_event->event_id.header.size = size;
|
||||
}
|
||||
|
||||
static int perf_event_mmap_match(struct perf_event *event,
|
||||
|
@ -4226,6 +4314,7 @@ void perf_event_mmap(struct vm_area_struct *vma)
|
|||
static void perf_log_throttle(struct perf_event *event, int enable)
|
||||
{
|
||||
struct perf_output_handle handle;
|
||||
struct perf_sample_data sample;
|
||||
int ret;
|
||||
|
||||
struct {
|
||||
|
@ -4247,11 +4336,15 @@ static void perf_log_throttle(struct perf_event *event, int enable)
|
|||
if (enable)
|
||||
throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
|
||||
|
||||
ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
|
||||
perf_event_header__init_id(&throttle_event.header, &sample, event);
|
||||
|
||||
ret = perf_output_begin(&handle, event,
|
||||
throttle_event.header.size, 1, 0);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
perf_output_put(&handle, throttle_event);
|
||||
perf_event__output_id_sample(event, &handle, &sample);
|
||||
perf_output_end(&handle);
|
||||
}
|
||||
|
||||
|
@ -5745,6 +5838,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
|||
* Precalculate sample_data sizes
|
||||
*/
|
||||
perf_event__header_size(event);
|
||||
perf_event__id_header_size(event);
|
||||
|
||||
/*
|
||||
* Drop the reference on the group_event after placing the
|
||||
|
@ -6098,6 +6192,12 @@ inherit_event(struct perf_event *parent_event,
|
|||
child_event->ctx = child_ctx;
|
||||
child_event->overflow_handler = parent_event->overflow_handler;
|
||||
|
||||
/*
|
||||
* Precalculate sample_data sizes
|
||||
*/
|
||||
perf_event__header_size(child_event);
|
||||
perf_event__id_header_size(child_event);
|
||||
|
||||
/*
|
||||
* Link it up in the child's context:
|
||||
*/
|
||||
|
|
|
@ -108,6 +108,11 @@ OPTIONS
|
|||
--data::
|
||||
Sample addresses.
|
||||
|
||||
-T::
|
||||
--timestamp::
|
||||
Sample timestamps. Use it with 'perf report -D' to see the timestamps,
|
||||
for instance.
|
||||
|
||||
-n::
|
||||
--no-samples::
|
||||
Don't sample.
|
||||
|
|
|
@ -58,12 +58,12 @@ static int hists__add_entry(struct hists *self, struct addr_location *al)
|
|||
return hist_entry__inc_addr_samples(he, al->addr);
|
||||
}
|
||||
|
||||
static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
static int process_sample_event(event_t *event, struct sample_data *sample,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct addr_location al;
|
||||
struct sample_data data;
|
||||
|
||||
if (event__preprocess_sample(event, session, &al, &data, NULL) < 0) {
|
||||
if (event__preprocess_sample(event, session, &al, sample, NULL) < 0) {
|
||||
pr_warning("problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
return -1;
|
||||
|
|
|
@ -30,12 +30,13 @@ static int hists__add_entry(struct hists *self,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int diff__process_sample_event(event_t *event, struct perf_session *session)
|
||||
static int diff__process_sample_event(event_t *event,
|
||||
struct sample_data *sample,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct addr_location al;
|
||||
struct sample_data data = { .period = 1, };
|
||||
|
||||
if (event__preprocess_sample(event, session, &al, &data, NULL) < 0) {
|
||||
if (event__preprocess_sample(event, session, &al, sample, NULL) < 0) {
|
||||
pr_warning("problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
return -1;
|
||||
|
@ -44,12 +45,12 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi
|
|||
if (al.filtered || al.sym == NULL)
|
||||
return 0;
|
||||
|
||||
if (hists__add_entry(&session->hists, &al, data.period)) {
|
||||
if (hists__add_entry(&session->hists, &al, sample->period)) {
|
||||
pr_warning("problem incrementing symbol period, skipping event\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
session->hists.stats.total_period += data.period;
|
||||
session->hists.stats.total_period += sample->period;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -16,8 +16,8 @@
|
|||
static char const *input_name = "-";
|
||||
static bool inject_build_ids;
|
||||
|
||||
static int event__repipe(event_t *event __used,
|
||||
struct perf_session *session __used)
|
||||
static int event__repipe_synth(event_t *event,
|
||||
struct perf_session *session __used)
|
||||
{
|
||||
uint32_t size;
|
||||
void *buf = event;
|
||||
|
@ -36,22 +36,30 @@ static int event__repipe(event_t *event __used,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int event__repipe_mmap(event_t *self, struct perf_session *session)
|
||||
static int event__repipe(event_t *event, struct sample_data *sample __used,
|
||||
struct perf_session *session)
|
||||
{
|
||||
return event__repipe_synth(event, session);
|
||||
}
|
||||
|
||||
static int event__repipe_mmap(event_t *self, struct sample_data *sample,
|
||||
struct perf_session *session)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = event__process_mmap(self, session);
|
||||
event__repipe(self, session);
|
||||
err = event__process_mmap(self, sample, session);
|
||||
event__repipe(self, sample, session);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int event__repipe_task(event_t *self, struct perf_session *session)
|
||||
static int event__repipe_task(event_t *self, struct sample_data *sample,
|
||||
struct perf_session *session)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = event__process_task(self, session);
|
||||
event__repipe(self, session);
|
||||
err = event__process_task(self, sample, session);
|
||||
event__repipe(self, sample, session);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -61,7 +69,7 @@ static int event__repipe_tracing_data(event_t *self,
|
|||
{
|
||||
int err;
|
||||
|
||||
event__repipe(self, session);
|
||||
event__repipe_synth(self, session);
|
||||
err = event__process_tracing_data(self, session);
|
||||
|
||||
return err;
|
||||
|
@ -111,7 +119,8 @@ static int dso__inject_build_id(struct dso *self, struct perf_session *session)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int event__inject_buildid(event_t *event, struct perf_session *session)
|
||||
static int event__inject_buildid(event_t *event, struct sample_data *sample,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct addr_location al;
|
||||
struct thread *thread;
|
||||
|
@ -146,7 +155,7 @@ static int event__inject_buildid(event_t *event, struct perf_session *session)
|
|||
}
|
||||
|
||||
repipe:
|
||||
event__repipe(event, session);
|
||||
event__repipe(event, sample, session);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -160,10 +169,10 @@ struct perf_event_ops inject_ops = {
|
|||
.read = event__repipe,
|
||||
.throttle = event__repipe,
|
||||
.unthrottle = event__repipe,
|
||||
.attr = event__repipe,
|
||||
.event_type = event__repipe,
|
||||
.tracing_data = event__repipe,
|
||||
.build_id = event__repipe,
|
||||
.attr = event__repipe_synth,
|
||||
.event_type = event__repipe_synth,
|
||||
.tracing_data = event__repipe_synth,
|
||||
.build_id = event__repipe_synth,
|
||||
};
|
||||
|
||||
extern volatile int session_done;
|
||||
|
|
|
@ -304,22 +304,11 @@ process_raw_event(event_t *raw_event __used, void *data,
|
|||
}
|
||||
}
|
||||
|
||||
static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
static int process_sample_event(event_t *event, struct sample_data *sample,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct sample_data data;
|
||||
struct thread *thread;
|
||||
struct thread *thread = perf_session__findnew(session, event->ip.pid);
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.time = -1;
|
||||
data.cpu = -1;
|
||||
data.period = 1;
|
||||
|
||||
event__parse_sample(event, session->sample_type, &data);
|
||||
|
||||
dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
|
||||
data.pid, data.tid, data.ip, data.period);
|
||||
|
||||
thread = perf_session__findnew(session, event->ip.pid);
|
||||
if (thread == NULL) {
|
||||
pr_debug("problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
|
@ -328,8 +317,8 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
|||
|
||||
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
|
||||
|
||||
process_raw_event(event, data.raw_data, data.cpu,
|
||||
data.time, thread);
|
||||
process_raw_event(event, sample->raw_data, sample->cpu,
|
||||
sample->time, thread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -747,6 +736,9 @@ static int __cmd_record(int argc, const char **argv)
|
|||
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
|
||||
rec_argv = calloc(rec_argc + 1, sizeof(char *));
|
||||
|
||||
if (rec_argv == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(record_args); i++)
|
||||
rec_argv[i] = strdup(record_args[i]);
|
||||
|
||||
|
|
|
@ -834,22 +834,18 @@ static void dump_info(void)
|
|||
die("Unknown type of information\n");
|
||||
}
|
||||
|
||||
static int process_sample_event(event_t *self, struct perf_session *s)
|
||||
static int process_sample_event(event_t *self, struct sample_data *sample,
|
||||
struct perf_session *s)
|
||||
{
|
||||
struct sample_data data;
|
||||
struct thread *thread;
|
||||
struct thread *thread = perf_session__findnew(s, sample->tid);
|
||||
|
||||
bzero(&data, sizeof(data));
|
||||
event__parse_sample(self, s->sample_type, &data);
|
||||
|
||||
thread = perf_session__findnew(s, data.tid);
|
||||
if (thread == NULL) {
|
||||
pr_debug("problem processing %d event, skipping it.\n",
|
||||
self->header.type);
|
||||
return -1;
|
||||
}
|
||||
|
||||
process_raw_event(data.raw_data, data.cpu, data.time, thread);
|
||||
process_raw_event(sample->raw_data, sample->cpu, sample->time, thread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -947,6 +943,9 @@ static int __cmd_record(int argc, const char **argv)
|
|||
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
|
||||
rec_argv = calloc(rec_argc + 1, sizeof(char *));
|
||||
|
||||
if (rec_argv == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(record_args); i++)
|
||||
rec_argv[i] = strdup(record_args[i]);
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
|
|||
|
||||
static u64 user_interval = ULLONG_MAX;
|
||||
static u64 default_interval = 0;
|
||||
static u64 sample_type;
|
||||
|
||||
static int nr_cpus = 0;
|
||||
static unsigned int page_size;
|
||||
|
@ -48,6 +49,7 @@ static const char *output_name = "perf.data";
|
|||
static int group = 0;
|
||||
static int realtime_prio = 0;
|
||||
static bool raw_samples = false;
|
||||
static bool sample_id_all_avail = true;
|
||||
static bool system_wide = false;
|
||||
static pid_t target_pid = -1;
|
||||
static pid_t target_tid = -1;
|
||||
|
@ -60,6 +62,7 @@ static bool call_graph = false;
|
|||
static bool inherit_stat = false;
|
||||
static bool no_samples = false;
|
||||
static bool sample_address = false;
|
||||
static bool sample_time = false;
|
||||
static bool no_buildid = false;
|
||||
static bool no_buildid_cache = false;
|
||||
|
||||
|
@ -129,6 +132,7 @@ static void write_output(void *buf, size_t size)
|
|||
}
|
||||
|
||||
static int process_synthesized_event(event_t *event,
|
||||
struct sample_data *sample __used,
|
||||
struct perf_session *self __used)
|
||||
{
|
||||
write_output(event, event->header.size);
|
||||
|
@ -281,12 +285,18 @@ static void create_counter(int counter, int cpu)
|
|||
if (system_wide)
|
||||
attr->sample_type |= PERF_SAMPLE_CPU;
|
||||
|
||||
if (sample_time)
|
||||
attr->sample_type |= PERF_SAMPLE_TIME;
|
||||
|
||||
if (raw_samples) {
|
||||
attr->sample_type |= PERF_SAMPLE_TIME;
|
||||
attr->sample_type |= PERF_SAMPLE_RAW;
|
||||
attr->sample_type |= PERF_SAMPLE_CPU;
|
||||
}
|
||||
|
||||
if (!sample_type)
|
||||
sample_type = attr->sample_type;
|
||||
|
||||
attr->mmap = track;
|
||||
attr->comm = track;
|
||||
attr->inherit = !no_inherit;
|
||||
|
@ -294,6 +304,8 @@ static void create_counter(int counter, int cpu)
|
|||
attr->disabled = 1;
|
||||
attr->enable_on_exec = 1;
|
||||
}
|
||||
retry_sample_id:
|
||||
attr->sample_id_all = sample_id_all_avail ? 1 : 0;
|
||||
|
||||
for (thread_index = 0; thread_index < thread_num; thread_index++) {
|
||||
try_again:
|
||||
|
@ -310,6 +322,12 @@ static void create_counter(int counter, int cpu)
|
|||
else if (err == ENODEV && cpu_list) {
|
||||
die("No such device - did you specify"
|
||||
" an out-of-range profile CPU?\n");
|
||||
} else if (err == EINVAL && sample_id_all_avail) {
|
||||
/*
|
||||
* Old kernel, no attr->sample_id_type_all field
|
||||
*/
|
||||
sample_id_all_avail = false;
|
||||
goto retry_sample_id;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -642,6 +660,8 @@ static int __cmd_record(int argc, const char **argv)
|
|||
open_counters(cpumap[i]);
|
||||
}
|
||||
|
||||
perf_session__set_sample_type(session, sample_type);
|
||||
|
||||
if (pipe_output) {
|
||||
err = perf_header__write_pipe(output);
|
||||
if (err < 0)
|
||||
|
@ -654,6 +674,8 @@ static int __cmd_record(int argc, const char **argv)
|
|||
|
||||
post_processing_offset = lseek(output, 0, SEEK_CUR);
|
||||
|
||||
perf_session__set_sample_id_all(session, sample_id_all_avail);
|
||||
|
||||
if (pipe_output) {
|
||||
err = event__synthesize_attrs(&session->header,
|
||||
process_synthesized_event,
|
||||
|
@ -834,6 +856,7 @@ const struct option record_options[] = {
|
|||
"per thread counts"),
|
||||
OPT_BOOLEAN('d', "data", &sample_address,
|
||||
"Sample addresses"),
|
||||
OPT_BOOLEAN('T', "timestamp", &sample_time, "Sample timestamps"),
|
||||
OPT_BOOLEAN('n', "no-samples", &no_samples,
|
||||
"don't sample"),
|
||||
OPT_BOOLEAN('N', "no-buildid-cache", &no_buildid_cache,
|
||||
|
|
|
@ -150,13 +150,13 @@ static int add_event_total(struct perf_session *session,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
static int process_sample_event(event_t *event, struct sample_data *sample,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct sample_data data = { .period = 1, };
|
||||
struct addr_location al;
|
||||
struct perf_event_attr *attr;
|
||||
|
||||
if (event__preprocess_sample(event, session, &al, &data, NULL) < 0) {
|
||||
if (event__preprocess_sample(event, session, &al, sample, NULL) < 0) {
|
||||
fprintf(stderr, "problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
return -1;
|
||||
|
@ -165,14 +165,14 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
|||
if (al.filtered || (hide_unresolved && al.sym == NULL))
|
||||
return 0;
|
||||
|
||||
if (perf_session__add_hist_entry(session, &al, &data)) {
|
||||
if (perf_session__add_hist_entry(session, &al, sample)) {
|
||||
pr_debug("problem incrementing symbol period, skipping event\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
attr = perf_header__find_attr(data.id, &session->header);
|
||||
attr = perf_header__find_attr(sample->id, &session->header);
|
||||
|
||||
if (add_event_total(session, &data, attr)) {
|
||||
if (add_event_total(session, sample, attr)) {
|
||||
pr_debug("problem adding event period\n");
|
||||
return -1;
|
||||
}
|
||||
|
@ -180,7 +180,8 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int process_read_event(event_t *event, struct perf_session *session __used)
|
||||
static int process_read_event(event_t *event, struct sample_data *sample __used,
|
||||
struct perf_session *session __used)
|
||||
{
|
||||
struct perf_event_attr *attr;
|
||||
|
||||
|
|
|
@ -1606,25 +1606,15 @@ process_raw_event(event_t *raw_event __used, struct perf_session *session,
|
|||
process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread);
|
||||
}
|
||||
|
||||
static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
static int process_sample_event(event_t *event, struct sample_data *sample,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct sample_data data;
|
||||
struct thread *thread;
|
||||
|
||||
if (!(session->sample_type & PERF_SAMPLE_RAW))
|
||||
return 0;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.time = -1;
|
||||
data.cpu = -1;
|
||||
data.period = -1;
|
||||
|
||||
event__parse_sample(event, session->sample_type, &data);
|
||||
|
||||
dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
|
||||
data.pid, data.tid, data.ip, data.period);
|
||||
|
||||
thread = perf_session__findnew(session, data.pid);
|
||||
thread = perf_session__findnew(session, sample->pid);
|
||||
if (thread == NULL) {
|
||||
pr_debug("problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
|
@ -1633,10 +1623,11 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
|||
|
||||
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
|
||||
|
||||
if (profile_cpu != -1 && profile_cpu != (int)data.cpu)
|
||||
if (profile_cpu != -1 && profile_cpu != (int)sample->cpu)
|
||||
return 0;
|
||||
|
||||
process_raw_event(event, session, data.raw_data, data.cpu, data.time, thread);
|
||||
process_raw_event(event, session, sample->raw_data, sample->cpu,
|
||||
sample->time, thread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1869,6 +1860,9 @@ static int __cmd_record(int argc, const char **argv)
|
|||
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
|
||||
rec_argv = calloc(rec_argc + 1, sizeof(char *));
|
||||
|
||||
if (rec_argv)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(record_args); i++)
|
||||
rec_argv[i] = strdup(record_args[i]);
|
||||
|
||||
|
|
|
@ -63,22 +63,11 @@ static int cleanup_scripting(void)
|
|||
|
||||
static char const *input_name = "perf.data";
|
||||
|
||||
static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
static int process_sample_event(event_t *event, struct sample_data *sample,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct sample_data data;
|
||||
struct thread *thread;
|
||||
struct thread *thread = perf_session__findnew(session, event->ip.pid);
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.time = -1;
|
||||
data.cpu = -1;
|
||||
data.period = 1;
|
||||
|
||||
event__parse_sample(event, session->sample_type, &data);
|
||||
|
||||
dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
|
||||
data.pid, data.tid, data.ip, data.period);
|
||||
|
||||
thread = perf_session__findnew(session, event->ip.pid);
|
||||
if (thread == NULL) {
|
||||
pr_debug("problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
|
@ -87,13 +76,13 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
|||
|
||||
if (session->sample_type & PERF_SAMPLE_RAW) {
|
||||
if (debug_mode) {
|
||||
if (data.time < last_timestamp) {
|
||||
if (sample->time < last_timestamp) {
|
||||
pr_err("Samples misordered, previous: %llu "
|
||||
"this: %llu\n", last_timestamp,
|
||||
data.time);
|
||||
sample->time);
|
||||
nr_unordered++;
|
||||
}
|
||||
last_timestamp = data.time;
|
||||
last_timestamp = sample->time;
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
|
@ -101,18 +90,19 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
|||
* field, although it should be the same than this perf
|
||||
* event pid
|
||||
*/
|
||||
scripting_ops->process_event(data.cpu, data.raw_data,
|
||||
data.raw_size,
|
||||
data.time, thread->comm);
|
||||
scripting_ops->process_event(sample->cpu, sample->raw_data,
|
||||
sample->raw_size,
|
||||
sample->time, thread->comm);
|
||||
}
|
||||
|
||||
session->hists.stats.total_period += data.period;
|
||||
session->hists.stats.total_period += sample->period;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 nr_lost;
|
||||
|
||||
static int process_lost_event(event_t *event, struct perf_session *session __used)
|
||||
static int process_lost_event(event_t *event, struct sample_data *sample __used,
|
||||
struct perf_session *session __used)
|
||||
{
|
||||
nr_lost += event->lost.lost;
|
||||
|
||||
|
@ -397,10 +387,10 @@ static struct script_desc *script_desc__findnew(const char *name)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static char *ends_with(char *str, const char *suffix)
|
||||
static const char *ends_with(const char *str, const char *suffix)
|
||||
{
|
||||
size_t suffix_len = strlen(suffix);
|
||||
char *p = str;
|
||||
const char *p = str;
|
||||
|
||||
if (strlen(str) > suffix_len) {
|
||||
p = str + strlen(str) - suffix_len;
|
||||
|
@ -492,7 +482,7 @@ static int list_available_scripts(const struct option *opt __used,
|
|||
|
||||
for_each_script(lang_path, lang_dir, script_dirent, script_next) {
|
||||
script_root = strdup(script_dirent.d_name);
|
||||
str = ends_with(script_root, REPORT_SUFFIX);
|
||||
str = (char *)ends_with(script_root, REPORT_SUFFIX);
|
||||
if (str) {
|
||||
*str = '\0';
|
||||
desc = script_desc__findnew(script_root);
|
||||
|
@ -540,7 +530,7 @@ static char *get_script_path(const char *script_root, const char *suffix)
|
|||
|
||||
for_each_script(lang_path, lang_dir, script_dirent, script_next) {
|
||||
__script_root = strdup(script_dirent.d_name);
|
||||
str = ends_with(__script_root, suffix);
|
||||
str = (char *)ends_with(__script_root, suffix);
|
||||
if (str) {
|
||||
*str = '\0';
|
||||
if (strcmp(__script_root, script_root))
|
||||
|
@ -560,7 +550,7 @@ static char *get_script_path(const char *script_root, const char *suffix)
|
|||
|
||||
static bool is_top_script(const char *script_path)
|
||||
{
|
||||
return ends_with((char *)script_path, "top") == NULL ? false : true;
|
||||
return ends_with(script_path, "top") == NULL ? false : true;
|
||||
}
|
||||
|
||||
static int has_required_arg(char *script_path)
|
||||
|
|
|
@ -272,19 +272,22 @@ static int cpus_cstate_state[MAX_CPUS];
|
|||
static u64 cpus_pstate_start_times[MAX_CPUS];
|
||||
static u64 cpus_pstate_state[MAX_CPUS];
|
||||
|
||||
static int process_comm_event(event_t *event, struct perf_session *session __used)
|
||||
static int process_comm_event(event_t *event, struct sample_data *sample __used,
|
||||
struct perf_session *session __used)
|
||||
{
|
||||
pid_set_comm(event->comm.tid, event->comm.comm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int process_fork_event(event_t *event, struct perf_session *session __used)
|
||||
static int process_fork_event(event_t *event, struct sample_data *sample __used,
|
||||
struct perf_session *session __used)
|
||||
{
|
||||
pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int process_exit_event(event_t *event, struct perf_session *session __used)
|
||||
static int process_exit_event(event_t *event, struct sample_data *sample __used,
|
||||
struct perf_session *session __used)
|
||||
{
|
||||
pid_exit(event->fork.pid, event->fork.time);
|
||||
return 0;
|
||||
|
@ -470,24 +473,21 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
|
|||
}
|
||||
|
||||
|
||||
static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
static int process_sample_event(event_t *event __used,
|
||||
struct sample_data *sample,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct sample_data data;
|
||||
struct trace_entry *te;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
|
||||
event__parse_sample(event, session->sample_type, &data);
|
||||
|
||||
if (session->sample_type & PERF_SAMPLE_TIME) {
|
||||
if (!first_time || first_time > data.time)
|
||||
first_time = data.time;
|
||||
if (last_time < data.time)
|
||||
last_time = data.time;
|
||||
if (!first_time || first_time > sample->time)
|
||||
first_time = sample->time;
|
||||
if (last_time < sample->time)
|
||||
last_time = sample->time;
|
||||
}
|
||||
|
||||
te = (void *)data.raw_data;
|
||||
if (session->sample_type & PERF_SAMPLE_RAW && data.raw_size > 0) {
|
||||
te = (void *)sample->raw_data;
|
||||
if (session->sample_type & PERF_SAMPLE_RAW && sample->raw_size > 0) {
|
||||
char *event_str;
|
||||
struct power_entry *pe;
|
||||
|
||||
|
@ -499,19 +499,19 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
|||
return 0;
|
||||
|
||||
if (strcmp(event_str, "power:power_start") == 0)
|
||||
c_state_start(pe->cpu_id, data.time, pe->value);
|
||||
c_state_start(pe->cpu_id, sample->time, pe->value);
|
||||
|
||||
if (strcmp(event_str, "power:power_end") == 0)
|
||||
c_state_end(pe->cpu_id, data.time);
|
||||
c_state_end(pe->cpu_id, sample->time);
|
||||
|
||||
if (strcmp(event_str, "power:power_frequency") == 0)
|
||||
p_state_change(pe->cpu_id, data.time, pe->value);
|
||||
p_state_change(pe->cpu_id, sample->time, pe->value);
|
||||
|
||||
if (strcmp(event_str, "sched:sched_wakeup") == 0)
|
||||
sched_wakeup(data.cpu, data.time, data.pid, te);
|
||||
sched_wakeup(sample->cpu, sample->time, sample->pid, te);
|
||||
|
||||
if (strcmp(event_str, "sched:sched_switch") == 0)
|
||||
sched_switch(data.cpu, data.time, te);
|
||||
sched_switch(sample->cpu, sample->time, te);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -989,6 +989,9 @@ static int __cmd_record(int argc, const char **argv)
|
|||
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
|
||||
rec_argv = calloc(rec_argc + 1, sizeof(char *));
|
||||
|
||||
if (rec_argv == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(record_args); i++)
|
||||
rec_argv[i] = strdup(record_args[i]);
|
||||
|
||||
|
|
|
@ -977,12 +977,12 @@ static int symbol_filter(struct map *map, struct symbol *sym)
|
|||
}
|
||||
|
||||
static void event__process_sample(const event_t *self,
|
||||
struct perf_session *session, int counter)
|
||||
struct sample_data *sample,
|
||||
struct perf_session *session, int counter)
|
||||
{
|
||||
u64 ip = self->ip.ip;
|
||||
struct sym_entry *syme;
|
||||
struct addr_location al;
|
||||
struct sample_data data;
|
||||
struct machine *machine;
|
||||
u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
||||
|
||||
|
@ -1025,7 +1025,7 @@ static void event__process_sample(const event_t *self,
|
|||
if (self->header.misc & PERF_RECORD_MISC_EXACT_IP)
|
||||
exact_samples++;
|
||||
|
||||
if (event__preprocess_sample(self, session, &al, &data,
|
||||
if (event__preprocess_sample(self, session, &al, sample,
|
||||
symbol_filter) < 0 ||
|
||||
al.filtered)
|
||||
return;
|
||||
|
@ -1105,6 +1105,7 @@ static void perf_session__mmap_read_counter(struct perf_session *self,
|
|||
unsigned int head = mmap_read_head(md);
|
||||
unsigned int old = md->prev;
|
||||
unsigned char *data = md->base + page_size;
|
||||
struct sample_data sample;
|
||||
int diff;
|
||||
|
||||
/*
|
||||
|
@ -1152,10 +1153,11 @@ static void perf_session__mmap_read_counter(struct perf_session *self,
|
|||
event = &event_copy;
|
||||
}
|
||||
|
||||
event__parse_sample(event, self, &sample);
|
||||
if (event->header.type == PERF_RECORD_SAMPLE)
|
||||
event__process_sample(event, self, md->counter);
|
||||
event__process_sample(event, &sample, self, md->counter);
|
||||
else
|
||||
event__process(event, self);
|
||||
event__process(event, &sample, self);
|
||||
old += size;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,9 @@
|
|||
#include <linux/kernel.h>
|
||||
#include "debug.h"
|
||||
|
||||
static int build_id__mark_dso_hit(event_t *event, struct perf_session *session)
|
||||
static int build_id__mark_dso_hit(event_t *event,
|
||||
struct sample_data *sample __used,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct addr_location al;
|
||||
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
||||
|
@ -35,7 +37,8 @@ static int build_id__mark_dso_hit(event_t *event, struct perf_session *session)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int event__exit_del_thread(event_t *self, struct perf_session *session)
|
||||
static int event__exit_del_thread(event_t *self, struct sample_data *sample __used,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct thread *thread = perf_session__findnew(session, self->fork.tid);
|
||||
|
||||
|
|
|
@ -24,11 +24,19 @@ const char *event__name[] = {
|
|||
[PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
|
||||
};
|
||||
|
||||
static pid_t event__synthesize_comm(pid_t pid, int full,
|
||||
static struct sample_data synth_sample = {
|
||||
.pid = -1,
|
||||
.tid = -1,
|
||||
.time = -1,
|
||||
.stream_id = -1,
|
||||
.cpu = -1,
|
||||
.period = 1,
|
||||
};
|
||||
|
||||
static pid_t event__synthesize_comm(event_t *event, pid_t pid, int full,
|
||||
event__handler_t process,
|
||||
struct perf_session *session)
|
||||
{
|
||||
event_t ev;
|
||||
char filename[PATH_MAX];
|
||||
char bf[BUFSIZ];
|
||||
FILE *fp;
|
||||
|
@ -49,34 +57,39 @@ static pid_t event__synthesize_comm(pid_t pid, int full,
|
|||
return 0;
|
||||
}
|
||||
|
||||
memset(&ev.comm, 0, sizeof(ev.comm));
|
||||
while (!ev.comm.comm[0] || !ev.comm.pid) {
|
||||
if (fgets(bf, sizeof(bf), fp) == NULL)
|
||||
goto out_failure;
|
||||
memset(&event->comm, 0, sizeof(event->comm));
|
||||
|
||||
while (!event->comm.comm[0] || !event->comm.pid) {
|
||||
if (fgets(bf, sizeof(bf), fp) == NULL) {
|
||||
pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (memcmp(bf, "Name:", 5) == 0) {
|
||||
char *name = bf + 5;
|
||||
while (*name && isspace(*name))
|
||||
++name;
|
||||
size = strlen(name) - 1;
|
||||
memcpy(ev.comm.comm, name, size++);
|
||||
memcpy(event->comm.comm, name, size++);
|
||||
} else if (memcmp(bf, "Tgid:", 5) == 0) {
|
||||
char *tgids = bf + 5;
|
||||
while (*tgids && isspace(*tgids))
|
||||
++tgids;
|
||||
tgid = ev.comm.pid = atoi(tgids);
|
||||
tgid = event->comm.pid = atoi(tgids);
|
||||
}
|
||||
}
|
||||
|
||||
ev.comm.header.type = PERF_RECORD_COMM;
|
||||
event->comm.header.type = PERF_RECORD_COMM;
|
||||
size = ALIGN(size, sizeof(u64));
|
||||
ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size);
|
||||
|
||||
memset(event->comm.comm + size, 0, session->id_hdr_size);
|
||||
event->comm.header.size = (sizeof(event->comm) -
|
||||
(sizeof(event->comm.comm) - size) +
|
||||
session->id_hdr_size);
|
||||
if (!full) {
|
||||
ev.comm.tid = pid;
|
||||
event->comm.tid = pid;
|
||||
|
||||
process(&ev, session);
|
||||
goto out_fclose;
|
||||
process(event, &synth_sample, session);
|
||||
goto out;
|
||||
}
|
||||
|
||||
snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
|
||||
|
@ -91,22 +104,19 @@ static pid_t event__synthesize_comm(pid_t pid, int full,
|
|||
if (*end)
|
||||
continue;
|
||||
|
||||
ev.comm.tid = pid;
|
||||
event->comm.tid = pid;
|
||||
|
||||
process(&ev, session);
|
||||
process(event, &synth_sample, session);
|
||||
}
|
||||
|
||||
closedir(tasks);
|
||||
|
||||
out_fclose:
|
||||
out:
|
||||
fclose(fp);
|
||||
return tgid;
|
||||
|
||||
out_failure:
|
||||
pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
|
||||
return -1;
|
||||
return tgid;
|
||||
}
|
||||
|
||||
static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
|
||||
static int event__synthesize_mmap_events(event_t *event, pid_t pid, pid_t tgid,
|
||||
event__handler_t process,
|
||||
struct perf_session *session)
|
||||
{
|
||||
|
@ -124,29 +134,25 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
|
|||
return -1;
|
||||
}
|
||||
|
||||
event->header.type = PERF_RECORD_MMAP;
|
||||
/*
|
||||
* Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
|
||||
*/
|
||||
event->header.misc = PERF_RECORD_MISC_USER;
|
||||
|
||||
while (1) {
|
||||
char bf[BUFSIZ], *pbf = bf;
|
||||
event_t ev = {
|
||||
.header = {
|
||||
.type = PERF_RECORD_MMAP,
|
||||
/*
|
||||
* Just like the kernel, see __perf_event_mmap
|
||||
* in kernel/perf_event.c
|
||||
*/
|
||||
.misc = PERF_RECORD_MISC_USER,
|
||||
},
|
||||
};
|
||||
int n;
|
||||
size_t size;
|
||||
if (fgets(bf, sizeof(bf), fp) == NULL)
|
||||
break;
|
||||
|
||||
/* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
|
||||
n = hex2u64(pbf, &ev.mmap.start);
|
||||
n = hex2u64(pbf, &event->mmap.start);
|
||||
if (n < 0)
|
||||
continue;
|
||||
pbf += n + 1;
|
||||
n = hex2u64(pbf, &ev.mmap.len);
|
||||
n = hex2u64(pbf, &event->mmap.len);
|
||||
if (n < 0)
|
||||
continue;
|
||||
pbf += n + 3;
|
||||
|
@ -161,19 +167,21 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
|
|||
continue;
|
||||
|
||||
pbf += 3;
|
||||
n = hex2u64(pbf, &ev.mmap.pgoff);
|
||||
n = hex2u64(pbf, &event->mmap.pgoff);
|
||||
|
||||
size = strlen(execname);
|
||||
execname[size - 1] = '\0'; /* Remove \n */
|
||||
memcpy(ev.mmap.filename, execname, size);
|
||||
memcpy(event->mmap.filename, execname, size);
|
||||
size = ALIGN(size, sizeof(u64));
|
||||
ev.mmap.len -= ev.mmap.start;
|
||||
ev.mmap.header.size = (sizeof(ev.mmap) -
|
||||
(sizeof(ev.mmap.filename) - size));
|
||||
ev.mmap.pid = tgid;
|
||||
ev.mmap.tid = pid;
|
||||
event->mmap.len -= event->mmap.start;
|
||||
event->mmap.header.size = (sizeof(event->mmap) -
|
||||
(sizeof(event->mmap.filename) - size));
|
||||
memset(event->mmap.filename + size, 0, session->id_hdr_size);
|
||||
event->mmap.header.size += session->id_hdr_size;
|
||||
event->mmap.pid = tgid;
|
||||
event->mmap.tid = pid;
|
||||
|
||||
process(&ev, session);
|
||||
process(event, &synth_sample, session);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -187,20 +195,27 @@ int event__synthesize_modules(event__handler_t process,
|
|||
{
|
||||
struct rb_node *nd;
|
||||
struct map_groups *kmaps = &machine->kmaps;
|
||||
u16 misc;
|
||||
event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size);
|
||||
|
||||
if (event == NULL) {
|
||||
pr_debug("Not enough memory synthesizing mmap event "
|
||||
"for kernel modules\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
event->header.type = PERF_RECORD_MMAP;
|
||||
|
||||
/*
|
||||
* kernel uses 0 for user space maps, see kernel/perf_event.c
|
||||
* __perf_event_mmap
|
||||
*/
|
||||
if (machine__is_host(machine))
|
||||
misc = PERF_RECORD_MISC_KERNEL;
|
||||
event->header.misc = PERF_RECORD_MISC_KERNEL;
|
||||
else
|
||||
misc = PERF_RECORD_MISC_GUEST_KERNEL;
|
||||
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
|
||||
|
||||
for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
|
||||
nd; nd = rb_next(nd)) {
|
||||
event_t ev;
|
||||
size_t size;
|
||||
struct map *pos = rb_entry(nd, struct map, rb_node);
|
||||
|
||||
|
@ -208,39 +223,78 @@ int event__synthesize_modules(event__handler_t process,
|
|||
continue;
|
||||
|
||||
size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
|
||||
memset(&ev, 0, sizeof(ev));
|
||||
ev.mmap.header.misc = misc;
|
||||
ev.mmap.header.type = PERF_RECORD_MMAP;
|
||||
ev.mmap.header.size = (sizeof(ev.mmap) -
|
||||
(sizeof(ev.mmap.filename) - size));
|
||||
ev.mmap.start = pos->start;
|
||||
ev.mmap.len = pos->end - pos->start;
|
||||
ev.mmap.pid = machine->pid;
|
||||
event->mmap.header.type = PERF_RECORD_MMAP;
|
||||
event->mmap.header.size = (sizeof(event->mmap) -
|
||||
(sizeof(event->mmap.filename) - size));
|
||||
memset(event->mmap.filename + size, 0, session->id_hdr_size);
|
||||
event->mmap.header.size += session->id_hdr_size;
|
||||
event->mmap.start = pos->start;
|
||||
event->mmap.len = pos->end - pos->start;
|
||||
event->mmap.pid = machine->pid;
|
||||
|
||||
memcpy(ev.mmap.filename, pos->dso->long_name,
|
||||
memcpy(event->mmap.filename, pos->dso->long_name,
|
||||
pos->dso->long_name_len + 1);
|
||||
process(&ev, session);
|
||||
process(event, &synth_sample, session);
|
||||
}
|
||||
|
||||
free(event);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __event__synthesize_thread(event_t *comm_event, event_t *mmap_event,
|
||||
pid_t pid, event__handler_t process,
|
||||
struct perf_session *session)
|
||||
{
|
||||
pid_t tgid = event__synthesize_comm(comm_event, pid, 1, process,
|
||||
session);
|
||||
if (tgid == -1)
|
||||
return -1;
|
||||
return event__synthesize_mmap_events(mmap_event, pid, tgid,
|
||||
process, session);
|
||||
}
|
||||
|
||||
int event__synthesize_thread(pid_t pid, event__handler_t process,
|
||||
struct perf_session *session)
|
||||
{
|
||||
pid_t tgid = event__synthesize_comm(pid, 1, process, session);
|
||||
if (tgid == -1)
|
||||
return -1;
|
||||
return event__synthesize_mmap_events(pid, tgid, process, session);
|
||||
event_t *comm_event, *mmap_event;
|
||||
int err = -1;
|
||||
|
||||
comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
|
||||
if (comm_event == NULL)
|
||||
goto out;
|
||||
|
||||
mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
|
||||
if (mmap_event == NULL)
|
||||
goto out_free_comm;
|
||||
|
||||
err = __event__synthesize_thread(comm_event, mmap_event, pid,
|
||||
process, session);
|
||||
free(mmap_event);
|
||||
out_free_comm:
|
||||
free(comm_event);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
void event__synthesize_threads(event__handler_t process,
|
||||
struct perf_session *session)
|
||||
int event__synthesize_threads(event__handler_t process,
|
||||
struct perf_session *session)
|
||||
{
|
||||
DIR *proc;
|
||||
struct dirent dirent, *next;
|
||||
event_t *comm_event, *mmap_event;
|
||||
int err = -1;
|
||||
|
||||
comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
|
||||
if (comm_event == NULL)
|
||||
goto out;
|
||||
|
||||
mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
|
||||
if (mmap_event == NULL)
|
||||
goto out_free_comm;
|
||||
|
||||
proc = opendir("/proc");
|
||||
if (proc == NULL)
|
||||
goto out_free_mmap;
|
||||
|
||||
while (!readdir_r(proc, &dirent, &next) && next) {
|
||||
char *end;
|
||||
|
@ -249,10 +303,18 @@ void event__synthesize_threads(event__handler_t process,
|
|||
if (*end) /* only interested in proper numerical dirents */
|
||||
continue;
|
||||
|
||||
event__synthesize_thread(pid, process, session);
|
||||
__event__synthesize_thread(comm_event, mmap_event, pid,
|
||||
process, session);
|
||||
}
|
||||
|
||||
closedir(proc);
|
||||
err = 0;
|
||||
out_free_mmap:
|
||||
free(mmap_event);
|
||||
out_free_comm:
|
||||
free(comm_event);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
struct process_symbol_args {
|
||||
|
@ -286,18 +348,20 @@ int event__synthesize_kernel_mmap(event__handler_t process,
|
|||
char path[PATH_MAX];
|
||||
char name_buff[PATH_MAX];
|
||||
struct map *map;
|
||||
|
||||
event_t ev = {
|
||||
.header = {
|
||||
.type = PERF_RECORD_MMAP,
|
||||
},
|
||||
};
|
||||
int err;
|
||||
/*
|
||||
* We should get this from /sys/kernel/sections/.text, but till that is
|
||||
* available use this, and after it is use this as a fallback for older
|
||||
* kernels.
|
||||
*/
|
||||
struct process_symbol_args args = { .name = symbol_name, };
|
||||
event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size);
|
||||
|
||||
if (event == NULL) {
|
||||
pr_debug("Not enough memory synthesizing mmap event "
|
||||
"for kernel modules\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
|
||||
if (machine__is_host(machine)) {
|
||||
|
@ -305,10 +369,10 @@ int event__synthesize_kernel_mmap(event__handler_t process,
|
|||
* kernel uses PERF_RECORD_MISC_USER for user space maps,
|
||||
* see kernel/perf_event.c __perf_event_mmap
|
||||
*/
|
||||
ev.header.misc = PERF_RECORD_MISC_KERNEL;
|
||||
event->header.misc = PERF_RECORD_MISC_KERNEL;
|
||||
filename = "/proc/kallsyms";
|
||||
} else {
|
||||
ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
|
||||
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
|
||||
if (machine__is_default_guest(machine))
|
||||
filename = (char *) symbol_conf.default_guest_kallsyms;
|
||||
else {
|
||||
|
@ -321,17 +385,21 @@ int event__synthesize_kernel_mmap(event__handler_t process,
|
|||
return -ENOENT;
|
||||
|
||||
map = machine->vmlinux_maps[MAP__FUNCTION];
|
||||
size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename),
|
||||
size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
|
||||
"%s%s", mmap_name, symbol_name) + 1;
|
||||
size = ALIGN(size, sizeof(u64));
|
||||
ev.mmap.header.size = (sizeof(ev.mmap) -
|
||||
(sizeof(ev.mmap.filename) - size));
|
||||
ev.mmap.pgoff = args.start;
|
||||
ev.mmap.start = map->start;
|
||||
ev.mmap.len = map->end - ev.mmap.start;
|
||||
ev.mmap.pid = machine->pid;
|
||||
event->mmap.header.type = PERF_RECORD_MMAP;
|
||||
event->mmap.header.size = (sizeof(event->mmap) -
|
||||
(sizeof(event->mmap.filename) - size) + session->id_hdr_size);
|
||||
event->mmap.pgoff = args.start;
|
||||
event->mmap.start = map->start;
|
||||
event->mmap.len = map->end - event->mmap.start;
|
||||
event->mmap.pid = machine->pid;
|
||||
|
||||
return process(&ev, session);
|
||||
err = process(event, &synth_sample, session);
|
||||
free(event);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void thread__comm_adjust(struct thread *self, struct hists *hists)
|
||||
|
@ -361,7 +429,8 @@ static int thread__set_comm_adjust(struct thread *self, const char *comm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int event__process_comm(event_t *self, struct perf_session *session)
|
||||
int event__process_comm(event_t *self, struct sample_data *sample __used,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct thread *thread = perf_session__findnew(session, self->comm.tid);
|
||||
|
||||
|
@ -376,7 +445,8 @@ int event__process_comm(event_t *self, struct perf_session *session)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int event__process_lost(event_t *self, struct perf_session *session)
|
||||
int event__process_lost(event_t *self, struct sample_data *sample __used,
|
||||
struct perf_session *session)
|
||||
{
|
||||
dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
|
||||
session->hists.stats.total_lost += self->lost.lost;
|
||||
|
@ -485,7 +555,8 @@ static int event__process_kernel_mmap(event_t *self,
|
|||
return -1;
|
||||
}
|
||||
|
||||
int event__process_mmap(event_t *self, struct perf_session *session)
|
||||
int event__process_mmap(event_t *self, struct sample_data *sample __used,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct machine *machine;
|
||||
struct thread *thread;
|
||||
|
@ -526,7 +597,8 @@ int event__process_mmap(event_t *self, struct perf_session *session)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int event__process_task(event_t *self, struct perf_session *session)
|
||||
int event__process_task(event_t *self, struct sample_data *sample __used,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct thread *thread = perf_session__findnew(session, self->fork.tid);
|
||||
struct thread *parent = perf_session__findnew(session, self->fork.ptid);
|
||||
|
@ -548,18 +620,19 @@ int event__process_task(event_t *self, struct perf_session *session)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int event__process(event_t *event, struct perf_session *session)
|
||||
int event__process(event_t *event, struct sample_data *sample,
|
||||
struct perf_session *session)
|
||||
{
|
||||
switch (event->header.type) {
|
||||
case PERF_RECORD_COMM:
|
||||
event__process_comm(event, session);
|
||||
event__process_comm(event, sample, session);
|
||||
break;
|
||||
case PERF_RECORD_MMAP:
|
||||
event__process_mmap(event, session);
|
||||
event__process_mmap(event, sample, session);
|
||||
break;
|
||||
case PERF_RECORD_FORK:
|
||||
case PERF_RECORD_EXIT:
|
||||
event__process_task(event, session);
|
||||
event__process_task(event, sample, session);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -674,32 +747,8 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session,
|
|||
symbol_filter_t filter)
|
||||
{
|
||||
u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
||||
struct thread *thread;
|
||||
struct thread *thread = perf_session__findnew(session, self->ip.pid);
|
||||
|
||||
event__parse_sample(self, session->sample_type, data);
|
||||
|
||||
dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld cpu:%d\n",
|
||||
self->header.misc, data->pid, data->tid, data->ip,
|
||||
data->period, data->cpu);
|
||||
|
||||
if (session->sample_type & PERF_SAMPLE_CALLCHAIN) {
|
||||
unsigned int i;
|
||||
|
||||
dump_printf("... chain: nr:%Lu\n", data->callchain->nr);
|
||||
|
||||
if (!ip_callchain__valid(data->callchain, self)) {
|
||||
pr_debug("call-chain problem with event, "
|
||||
"skipping it.\n");
|
||||
goto out_filtered;
|
||||
}
|
||||
|
||||
if (dump_trace) {
|
||||
for (i = 0; i < data->callchain->nr; i++)
|
||||
dump_printf("..... %2d: %016Lx\n",
|
||||
i, data->callchain->ips[i]);
|
||||
}
|
||||
}
|
||||
thread = perf_session__findnew(session, self->ip.pid);
|
||||
if (thread == NULL)
|
||||
return -1;
|
||||
|
||||
|
@ -766,9 +815,65 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int event__parse_sample(const event_t *event, u64 type, struct sample_data *data)
|
||||
static int event__parse_id_sample(const event_t *event,
|
||||
struct perf_session *session,
|
||||
struct sample_data *sample)
|
||||
{
|
||||
const u64 *array = event->sample.array;
|
||||
const u64 *array;
|
||||
u64 type;
|
||||
|
||||
sample->cpu = sample->pid = sample->tid = -1;
|
||||
sample->stream_id = sample->id = sample->time = -1ULL;
|
||||
|
||||
if (!session->sample_id_all)
|
||||
return 0;
|
||||
|
||||
array = event->sample.array;
|
||||
array += ((event->header.size -
|
||||
sizeof(event->header)) / sizeof(u64)) - 1;
|
||||
type = session->sample_type;
|
||||
|
||||
if (type & PERF_SAMPLE_CPU) {
|
||||
u32 *p = (u32 *)array;
|
||||
sample->cpu = *p;
|
||||
array--;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_STREAM_ID) {
|
||||
sample->stream_id = *array;
|
||||
array--;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_ID) {
|
||||
sample->id = *array;
|
||||
array--;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_TIME) {
|
||||
sample->time = *array;
|
||||
array--;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_TID) {
|
||||
u32 *p = (u32 *)array;
|
||||
sample->pid = p[0];
|
||||
sample->tid = p[1];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int event__parse_sample(const event_t *event, struct perf_session *session,
|
||||
struct sample_data *data)
|
||||
{
|
||||
const u64 *array;
|
||||
u64 type;
|
||||
|
||||
if (event->header.type != PERF_RECORD_SAMPLE)
|
||||
return event__parse_id_sample(event, session, data);
|
||||
|
||||
array = event->sample.array;
|
||||
type = session->sample_type;
|
||||
|
||||
if (type & PERF_SAMPLE_IP) {
|
||||
data->ip = event->ip.ip;
|
||||
|
|
|
@ -135,12 +135,15 @@ void event__print_totals(void);
|
|||
|
||||
struct perf_session;
|
||||
|
||||
typedef int (*event__handler_t)(event_t *event, struct perf_session *session);
|
||||
typedef int (*event__handler_synth_t)(event_t *event,
|
||||
struct perf_session *session);
|
||||
typedef int (*event__handler_t)(event_t *event, struct sample_data *sample,
|
||||
struct perf_session *session);
|
||||
|
||||
int event__synthesize_thread(pid_t pid, event__handler_t process,
|
||||
struct perf_session *session);
|
||||
void event__synthesize_threads(event__handler_t process,
|
||||
struct perf_session *session);
|
||||
int event__synthesize_threads(event__handler_t process,
|
||||
struct perf_session *session);
|
||||
int event__synthesize_kernel_mmap(event__handler_t process,
|
||||
struct perf_session *session,
|
||||
struct machine *machine,
|
||||
|
@ -150,17 +153,23 @@ int event__synthesize_modules(event__handler_t process,
|
|||
struct perf_session *session,
|
||||
struct machine *machine);
|
||||
|
||||
int event__process_comm(event_t *self, struct perf_session *session);
|
||||
int event__process_lost(event_t *self, struct perf_session *session);
|
||||
int event__process_mmap(event_t *self, struct perf_session *session);
|
||||
int event__process_task(event_t *self, struct perf_session *session);
|
||||
int event__process(event_t *event, struct perf_session *session);
|
||||
int event__process_comm(event_t *self, struct sample_data *sample,
|
||||
struct perf_session *session);
|
||||
int event__process_lost(event_t *self, struct sample_data *sample,
|
||||
struct perf_session *session);
|
||||
int event__process_mmap(event_t *self, struct sample_data *sample,
|
||||
struct perf_session *session);
|
||||
int event__process_task(event_t *self, struct sample_data *sample,
|
||||
struct perf_session *session);
|
||||
int event__process(event_t *event, struct sample_data *sample,
|
||||
struct perf_session *session);
|
||||
|
||||
struct addr_location;
|
||||
int event__preprocess_sample(const event_t *self, struct perf_session *session,
|
||||
struct addr_location *al, struct sample_data *data,
|
||||
symbol_filter_t filter);
|
||||
int event__parse_sample(const event_t *event, u64 type, struct sample_data *data);
|
||||
int event__parse_sample(const event_t *event, struct perf_session *session,
|
||||
struct sample_data *sample);
|
||||
|
||||
extern const char *event__name[];
|
||||
|
||||
|
|
|
@ -946,6 +946,24 @@ u64 perf_header__sample_type(struct perf_header *header)
|
|||
return type;
|
||||
}
|
||||
|
||||
bool perf_header__sample_id_all(const struct perf_header *header)
|
||||
{
|
||||
bool value = false, first = true;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < header->attrs; i++) {
|
||||
struct perf_header_attr *attr = header->attr[i];
|
||||
|
||||
if (first) {
|
||||
value = attr->attr.sample_id_all;
|
||||
first = false;
|
||||
} else if (value != attr->attr.sample_id_all)
|
||||
die("non matching sample_id_all");
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
struct perf_event_attr *
|
||||
perf_header__find_attr(u64 id, struct perf_header *header)
|
||||
{
|
||||
|
@ -987,21 +1005,23 @@ int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
|
|||
|
||||
ev = malloc(size);
|
||||
|
||||
if (ev == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ev->attr.attr = *attr;
|
||||
memcpy(ev->attr.id, id, ids * sizeof(u64));
|
||||
|
||||
ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
|
||||
ev->attr.header.size = size;
|
||||
|
||||
err = process(ev, session);
|
||||
err = process(ev, NULL, session);
|
||||
|
||||
free(ev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int event__synthesize_attrs(struct perf_header *self,
|
||||
event__handler_t process,
|
||||
int event__synthesize_attrs(struct perf_header *self, event__handler_t process,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct perf_header_attr *attr;
|
||||
|
@ -1071,7 +1091,7 @@ int event__synthesize_event_type(u64 event_id, char *name,
|
|||
ev.event_type.header.size = sizeof(ev.event_type) -
|
||||
(sizeof(ev.event_type.event_type.name) - size);
|
||||
|
||||
err = process(&ev, session);
|
||||
err = process(&ev, NULL, session);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1126,7 +1146,7 @@ int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs,
|
|||
ev.tracing_data.header.size = sizeof(ev.tracing_data);
|
||||
ev.tracing_data.size = aligned_size;
|
||||
|
||||
process(&ev, session);
|
||||
process(&ev, NULL, session);
|
||||
|
||||
err = read_tracing_data(fd, pattrs, nb_events);
|
||||
write_padded(fd, NULL, 0, padding);
|
||||
|
@ -1186,7 +1206,7 @@ int event__synthesize_build_id(struct dso *pos, u16 misc,
|
|||
ev.build_id.header.size = sizeof(ev.build_id) + len;
|
||||
memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
|
||||
|
||||
err = process(&ev, session);
|
||||
err = process(&ev, NULL, session);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -81,6 +81,7 @@ void perf_header_attr__delete(struct perf_header_attr *self);
|
|||
int perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
|
||||
|
||||
u64 perf_header__sample_type(struct perf_header *header);
|
||||
bool perf_header__sample_id_all(const struct perf_header *header);
|
||||
struct perf_event_attr *
|
||||
perf_header__find_attr(u64 id, struct perf_header *header);
|
||||
void perf_header__set_feat(struct perf_header *self, int feat);
|
||||
|
|
|
@ -52,8 +52,10 @@ struct sym_priv {
|
|||
struct events_stats {
|
||||
u64 total_period;
|
||||
u64 total_lost;
|
||||
u64 total_invalid_chains;
|
||||
u32 nr_events[PERF_RECORD_HEADER_MAX];
|
||||
u32 nr_unknown_events;
|
||||
u32 nr_invalid_chains;
|
||||
};
|
||||
|
||||
enum hist_column {
|
||||
|
|
|
@ -119,6 +119,10 @@ struct option {
|
|||
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG }
|
||||
#define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \
|
||||
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT }
|
||||
#define OPT_CALLBACK_DEFAULT_NOOPT(s, l, v, a, h, f, d) \
|
||||
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l),\
|
||||
.value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d,\
|
||||
.flags = PARSE_OPT_LASTARG_DEFAULT | PARSE_OPT_NOARG}
|
||||
|
||||
/* parse_options() will filter out the processed options and leave the
|
||||
* non-option argments in argv[].
|
||||
|
|
|
@ -65,9 +65,49 @@ static int perf_session__open(struct perf_session *self, bool force)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static void perf_session__id_header_size(struct perf_session *session)
|
||||
{
|
||||
struct sample_data *data;
|
||||
u64 sample_type = session->sample_type;
|
||||
u16 size = 0;
|
||||
|
||||
if (!session->sample_id_all)
|
||||
goto out;
|
||||
|
||||
if (sample_type & PERF_SAMPLE_TID)
|
||||
size += sizeof(data->tid) * 2;
|
||||
|
||||
if (sample_type & PERF_SAMPLE_TIME)
|
||||
size += sizeof(data->time);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_ID)
|
||||
size += sizeof(data->id);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_STREAM_ID)
|
||||
size += sizeof(data->stream_id);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_CPU)
|
||||
size += sizeof(data->cpu) * 2;
|
||||
out:
|
||||
session->id_hdr_size = size;
|
||||
}
|
||||
|
||||
void perf_session__set_sample_id_all(struct perf_session *session, bool value)
|
||||
{
|
||||
session->sample_id_all = value;
|
||||
perf_session__id_header_size(session);
|
||||
}
|
||||
|
||||
void perf_session__set_sample_type(struct perf_session *session, u64 type)
|
||||
{
|
||||
session->sample_type = type;
|
||||
}
|
||||
|
||||
void perf_session__update_sample_type(struct perf_session *self)
|
||||
{
|
||||
self->sample_type = perf_header__sample_type(&self->header);
|
||||
self->sample_id_all = perf_header__sample_id_all(&self->header);
|
||||
perf_session__id_header_size(self);
|
||||
}
|
||||
|
||||
int perf_session__create_kernel_maps(struct perf_session *self)
|
||||
|
@ -240,7 +280,15 @@ struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
|
|||
return syms;
|
||||
}
|
||||
|
||||
static int process_event_synth_stub(event_t *event __used,
|
||||
struct perf_session *session __used)
|
||||
{
|
||||
dump_printf(": unhandled!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int process_event_stub(event_t *event __used,
|
||||
struct sample_data *sample __used,
|
||||
struct perf_session *session __used)
|
||||
{
|
||||
dump_printf(": unhandled!\n");
|
||||
|
@ -280,13 +328,13 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
|
|||
if (handler->unthrottle == NULL)
|
||||
handler->unthrottle = process_event_stub;
|
||||
if (handler->attr == NULL)
|
||||
handler->attr = process_event_stub;
|
||||
handler->attr = process_event_synth_stub;
|
||||
if (handler->event_type == NULL)
|
||||
handler->event_type = process_event_stub;
|
||||
handler->event_type = process_event_synth_stub;
|
||||
if (handler->tracing_data == NULL)
|
||||
handler->tracing_data = process_event_stub;
|
||||
handler->tracing_data = process_event_synth_stub;
|
||||
if (handler->build_id == NULL)
|
||||
handler->build_id = process_event_stub;
|
||||
handler->build_id = process_event_synth_stub;
|
||||
if (handler->finished_round == NULL) {
|
||||
if (handler->ordered_samples)
|
||||
handler->finished_round = process_finished_round;
|
||||
|
@ -413,12 +461,18 @@ static void perf_session_free_sample_buffers(struct perf_session *session)
|
|||
}
|
||||
}
|
||||
|
||||
static int perf_session_deliver_event(struct perf_session *session,
|
||||
event_t *event,
|
||||
struct sample_data *sample,
|
||||
struct perf_event_ops *ops);
|
||||
|
||||
static void flush_sample_queue(struct perf_session *s,
|
||||
struct perf_event_ops *ops)
|
||||
{
|
||||
struct ordered_samples *os = &s->ordered_samples;
|
||||
struct list_head *head = &os->samples;
|
||||
struct sample_queue *tmp, *iter;
|
||||
struct sample_data sample;
|
||||
u64 limit = os->next_flush;
|
||||
u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
|
||||
|
||||
|
@ -429,7 +483,8 @@ static void flush_sample_queue(struct perf_session *s,
|
|||
if (iter->timestamp > limit)
|
||||
break;
|
||||
|
||||
ops->sample(iter->event, s);
|
||||
event__parse_sample(iter->event, s, &sample);
|
||||
perf_session_deliver_event(s, iter->event, &sample, ops);
|
||||
|
||||
os->last_flush = iter->timestamp;
|
||||
list_del(&iter->list);
|
||||
|
@ -494,8 +549,7 @@ static int process_finished_round(event_t *event __used,
|
|||
}
|
||||
|
||||
/* The queue is ordered by time */
|
||||
static void __queue_sample_event(struct sample_queue *new,
|
||||
struct perf_session *s)
|
||||
static void __queue_event(struct sample_queue *new, struct perf_session *s)
|
||||
{
|
||||
struct ordered_samples *os = &s->ordered_samples;
|
||||
struct sample_queue *sample = os->last_sample;
|
||||
|
@ -541,14 +595,17 @@ static void __queue_sample_event(struct sample_queue *new,
|
|||
|
||||
#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
|
||||
|
||||
static int queue_sample_event(event_t *event, struct sample_data *data,
|
||||
struct perf_session *s)
|
||||
static int perf_session_queue_event(struct perf_session *s, event_t *event,
|
||||
struct sample_data *data)
|
||||
{
|
||||
struct ordered_samples *os = &s->ordered_samples;
|
||||
struct list_head *sc = &os->sample_cache;
|
||||
u64 timestamp = data->time;
|
||||
struct sample_queue *new;
|
||||
|
||||
if (!timestamp)
|
||||
return -ETIME;
|
||||
|
||||
if (timestamp < s->ordered_samples.last_flush) {
|
||||
printf("Warning: Timestamp below last timeslice flush\n");
|
||||
return -EINVAL;
|
||||
|
@ -573,79 +630,142 @@ static int queue_sample_event(event_t *event, struct sample_data *data,
|
|||
new->timestamp = timestamp;
|
||||
new->event = event;
|
||||
|
||||
__queue_sample_event(new, s);
|
||||
__queue_event(new, s);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_session__process_sample(event_t *event, struct perf_session *s,
|
||||
struct perf_event_ops *ops)
|
||||
static void callchain__dump(struct sample_data *sample)
|
||||
{
|
||||
struct sample_data data;
|
||||
unsigned int i;
|
||||
|
||||
if (!ops->ordered_samples)
|
||||
return ops->sample(event, s);
|
||||
if (!dump_trace)
|
||||
return;
|
||||
|
||||
bzero(&data, sizeof(struct sample_data));
|
||||
event__parse_sample(event, s->sample_type, &data);
|
||||
printf("... chain: nr:%Lu\n", sample->callchain->nr);
|
||||
|
||||
queue_sample_event(event, &data, s);
|
||||
|
||||
return 0;
|
||||
for (i = 0; i < sample->callchain->nr; i++)
|
||||
printf("..... %2d: %016Lx\n", i, sample->callchain->ips[i]);
|
||||
}
|
||||
|
||||
static int perf_session__process_event(struct perf_session *self,
|
||||
static void perf_session__print_tstamp(struct perf_session *session,
|
||||
event_t *event,
|
||||
struct sample_data *sample)
|
||||
{
|
||||
if (event->header.type != PERF_RECORD_SAMPLE &&
|
||||
!session->sample_id_all) {
|
||||
fputs("-1 -1 ", stdout);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((session->sample_type & PERF_SAMPLE_CPU))
|
||||
printf("%u ", sample->cpu);
|
||||
|
||||
if (session->sample_type & PERF_SAMPLE_TIME)
|
||||
printf("%Lu ", sample->time);
|
||||
}
|
||||
|
||||
static int perf_session_deliver_event(struct perf_session *session,
|
||||
event_t *event,
|
||||
struct sample_data *sample,
|
||||
struct perf_event_ops *ops)
|
||||
{
|
||||
switch (event->header.type) {
|
||||
case PERF_RECORD_SAMPLE:
|
||||
return ops->sample(event, sample, session);
|
||||
case PERF_RECORD_MMAP:
|
||||
return ops->mmap(event, sample, session);
|
||||
case PERF_RECORD_COMM:
|
||||
return ops->comm(event, sample, session);
|
||||
case PERF_RECORD_FORK:
|
||||
return ops->fork(event, sample, session);
|
||||
case PERF_RECORD_EXIT:
|
||||
return ops->exit(event, sample, session);
|
||||
case PERF_RECORD_LOST:
|
||||
return ops->lost(event, sample, session);
|
||||
case PERF_RECORD_READ:
|
||||
return ops->read(event, sample, session);
|
||||
case PERF_RECORD_THROTTLE:
|
||||
return ops->throttle(event, sample, session);
|
||||
case PERF_RECORD_UNTHROTTLE:
|
||||
return ops->unthrottle(event, sample, session);
|
||||
default:
|
||||
++session->hists.stats.nr_unknown_events;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
static int perf_session__process_event(struct perf_session *session,
|
||||
event_t *event,
|
||||
struct perf_event_ops *ops,
|
||||
u64 file_offset)
|
||||
{
|
||||
struct sample_data sample;
|
||||
int ret;
|
||||
|
||||
trace_event(event);
|
||||
|
||||
if (session->header.needs_swap && event__swap_ops[event->header.type])
|
||||
event__swap_ops[event->header.type](event);
|
||||
|
||||
if (event->header.type >= PERF_RECORD_MMAP &&
|
||||
event->header.type <= PERF_RECORD_SAMPLE) {
|
||||
event__parse_sample(event, session, &sample);
|
||||
if (dump_trace)
|
||||
perf_session__print_tstamp(session, event, &sample);
|
||||
}
|
||||
|
||||
if (event->header.type < PERF_RECORD_HEADER_MAX) {
|
||||
dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
|
||||
file_offset, event->header.size,
|
||||
event__name[event->header.type]);
|
||||
hists__inc_nr_events(&self->hists, event->header.type);
|
||||
hists__inc_nr_events(&session->hists, event->header.type);
|
||||
}
|
||||
|
||||
if (self->header.needs_swap && event__swap_ops[event->header.type])
|
||||
event__swap_ops[event->header.type](event);
|
||||
|
||||
/* These events are processed right away */
|
||||
switch (event->header.type) {
|
||||
case PERF_RECORD_SAMPLE:
|
||||
return perf_session__process_sample(event, self, ops);
|
||||
case PERF_RECORD_MMAP:
|
||||
return ops->mmap(event, self);
|
||||
case PERF_RECORD_COMM:
|
||||
return ops->comm(event, self);
|
||||
case PERF_RECORD_FORK:
|
||||
return ops->fork(event, self);
|
||||
case PERF_RECORD_EXIT:
|
||||
return ops->exit(event, self);
|
||||
case PERF_RECORD_LOST:
|
||||
return ops->lost(event, self);
|
||||
case PERF_RECORD_READ:
|
||||
return ops->read(event, self);
|
||||
case PERF_RECORD_THROTTLE:
|
||||
return ops->throttle(event, self);
|
||||
case PERF_RECORD_UNTHROTTLE:
|
||||
return ops->unthrottle(event, self);
|
||||
dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n",
|
||||
event->header.misc,
|
||||
sample.pid, sample.tid, sample.ip, sample.period);
|
||||
|
||||
if (session->sample_type & PERF_SAMPLE_CALLCHAIN) {
|
||||
if (!ip_callchain__valid(sample.callchain, event)) {
|
||||
pr_debug("call-chain problem with event, "
|
||||
"skipping it.\n");
|
||||
++session->hists.stats.nr_invalid_chains;
|
||||
session->hists.stats.total_invalid_chains +=
|
||||
sample.period;
|
||||
return 0;
|
||||
}
|
||||
|
||||
callchain__dump(&sample);
|
||||
}
|
||||
break;
|
||||
|
||||
case PERF_RECORD_HEADER_ATTR:
|
||||
return ops->attr(event, self);
|
||||
return ops->attr(event, session);
|
||||
case PERF_RECORD_HEADER_EVENT_TYPE:
|
||||
return ops->event_type(event, self);
|
||||
return ops->event_type(event, session);
|
||||
case PERF_RECORD_HEADER_TRACING_DATA:
|
||||
/* setup for reading amidst mmap */
|
||||
lseek(self->fd, file_offset, SEEK_SET);
|
||||
return ops->tracing_data(event, self);
|
||||
lseek(session->fd, file_offset, SEEK_SET);
|
||||
return ops->tracing_data(event, session);
|
||||
case PERF_RECORD_HEADER_BUILD_ID:
|
||||
return ops->build_id(event, self);
|
||||
return ops->build_id(event, session);
|
||||
case PERF_RECORD_FINISHED_ROUND:
|
||||
return ops->finished_round(event, self, ops);
|
||||
return ops->finished_round(event, session, ops);
|
||||
default:
|
||||
++self->hists.stats.nr_unknown_events;
|
||||
return -1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ops->ordered_samples) {
|
||||
ret = perf_session_queue_event(session, event, &sample);
|
||||
if (ret != -ETIME)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return perf_session_deliver_event(session, event, &sample, ops);
|
||||
}
|
||||
|
||||
void perf_event_header__bswap(struct perf_event_header *self)
|
||||
|
@ -894,6 +1014,14 @@ int __perf_session__process_events(struct perf_session *session,
|
|||
session->hists.stats.nr_unknown_events);
|
||||
}
|
||||
|
||||
if (session->hists.stats.nr_invalid_chains != 0) {
|
||||
ui__warning("Found invalid callchains!\n\n"
|
||||
"%u out of %u events were discarded for this reason.\n\n"
|
||||
"Consider reporting to linux-kernel@vger.kernel.org.\n\n",
|
||||
session->hists.stats.nr_invalid_chains,
|
||||
session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
|
||||
}
|
||||
|
||||
perf_session_free_sample_buffers(session);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -46,6 +46,8 @@ struct perf_session {
|
|||
int fd;
|
||||
bool fd_pipe;
|
||||
bool repipe;
|
||||
bool sample_id_all;
|
||||
u16 id_hdr_size;
|
||||
int cwdlen;
|
||||
char *cwd;
|
||||
struct ordered_samples ordered_samples;
|
||||
|
@ -54,7 +56,9 @@ struct perf_session {
|
|||
|
||||
struct perf_event_ops;
|
||||
|
||||
typedef int (*event_op)(event_t *self, struct perf_session *session);
|
||||
typedef int (*event_op)(event_t *self, struct sample_data *sample,
|
||||
struct perf_session *session);
|
||||
typedef int (*event_synth_op)(event_t *self, struct perf_session *session);
|
||||
typedef int (*event_op2)(event_t *self, struct perf_session *session,
|
||||
struct perf_event_ops *ops);
|
||||
|
||||
|
@ -67,8 +71,8 @@ struct perf_event_ops {
|
|||
lost,
|
||||
read,
|
||||
throttle,
|
||||
unthrottle,
|
||||
attr,
|
||||
unthrottle;
|
||||
event_synth_op attr,
|
||||
event_type,
|
||||
tracing_data,
|
||||
build_id;
|
||||
|
@ -104,6 +108,8 @@ int perf_session__create_kernel_maps(struct perf_session *self);
|
|||
|
||||
int do_read(int fd, void *buf, size_t size);
|
||||
void perf_session__update_sample_type(struct perf_session *self);
|
||||
void perf_session__set_sample_id_all(struct perf_session *session, bool value);
|
||||
void perf_session__set_sample_type(struct perf_session *session, u64 type);
|
||||
void perf_session__remove_thread(struct perf_session *self, struct thread *th);
|
||||
|
||||
static inline
|
||||
|
|
|
@ -170,7 +170,7 @@ static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
|
|||
return repsep_snprintf(bf, size, "%-*s", width, dso_name);
|
||||
}
|
||||
|
||||
return repsep_snprintf(bf, size, "%*Lx", width, self->ip);
|
||||
return repsep_snprintf(bf, size, "%-*s", width, "[unknown]");
|
||||
}
|
||||
|
||||
/* --sort symbol */
|
||||
|
@ -196,7 +196,7 @@ static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
|
|||
|
||||
if (verbose) {
|
||||
char o = self->ms.map ? dso__symtab_origin(self->ms.map->dso) : '!';
|
||||
ret += repsep_snprintf(bf, size, "%*Lx %c ",
|
||||
ret += repsep_snprintf(bf, size, "%-#*llx %c ",
|
||||
BITS_PER_LONG / 4, self->ip, o);
|
||||
}
|
||||
|
||||
|
@ -205,7 +205,7 @@ static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
|
|||
ret += repsep_snprintf(bf + ret, size - ret, "%s",
|
||||
self->ms.sym->name);
|
||||
else
|
||||
ret += repsep_snprintf(bf + ret, size - ret, "%*Lx",
|
||||
ret += repsep_snprintf(bf + ret, size - ret, "%-#*llx",
|
||||
BITS_PER_LONG / 4, self->ip);
|
||||
|
||||
return ret;
|
||||
|
|
Loading…
Reference in a new issue