Merge branch 'tip/tracing/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace
This commit is contained in:
commit
4040068dce
8 changed files with 42 additions and 39 deletions
|
@ -468,8 +468,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
|
|||
* ignore such a protection.
|
||||
*/
|
||||
asm volatile(
|
||||
"1: " _ASM_MOV " (%[parent_old]), %[old]\n"
|
||||
"2: " _ASM_MOV " %[return_hooker], (%[parent_replaced])\n"
|
||||
"1: " _ASM_MOV " (%[parent]), %[old]\n"
|
||||
"2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
|
||||
" movl $0, %[faulted]\n"
|
||||
"3:\n"
|
||||
|
||||
|
@ -481,9 +481,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
|
|||
_ASM_EXTABLE(1b, 4b)
|
||||
_ASM_EXTABLE(2b, 4b)
|
||||
|
||||
: [parent_replaced] "=r" (parent), [old] "=r" (old),
|
||||
[faulted] "=r" (faulted)
|
||||
: [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
|
||||
: [old] "=r" (old), [faulted] "=r" (faulted)
|
||||
: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
|
||||
: "memory"
|
||||
);
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ struct ring_buffer;
|
|||
struct ring_buffer_iter;
|
||||
|
||||
/*
|
||||
* Don't reference this struct directly, use functions below.
|
||||
* Don't refer to this struct directly, use functions below.
|
||||
*/
|
||||
struct ring_buffer_event {
|
||||
u32 type:2, len:3, time_delta:27;
|
||||
|
|
|
@ -91,7 +91,7 @@ EXPORT_SYMBOL_GPL(tracing_off);
|
|||
* tracing_off_permanent - permanently disable ring buffers
|
||||
*
|
||||
* This function, once called, will disable all ring buffers
|
||||
* permanenty.
|
||||
* permanently.
|
||||
*/
|
||||
void tracing_off_permanent(void)
|
||||
{
|
||||
|
@ -210,7 +210,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
|
|||
|
||||
struct buffer_data_page {
|
||||
u64 time_stamp; /* page time stamp */
|
||||
local_t commit; /* write commited index */
|
||||
local_t commit; /* write committed index */
|
||||
unsigned char data[]; /* data of buffer page */
|
||||
};
|
||||
|
||||
|
@ -260,7 +260,7 @@ struct ring_buffer_per_cpu {
|
|||
struct list_head pages;
|
||||
struct buffer_page *head_page; /* read from head */
|
||||
struct buffer_page *tail_page; /* write to tail */
|
||||
struct buffer_page *commit_page; /* commited pages */
|
||||
struct buffer_page *commit_page; /* committed pages */
|
||||
struct buffer_page *reader_page;
|
||||
unsigned long overrun;
|
||||
unsigned long entries;
|
||||
|
@ -303,7 +303,7 @@ struct ring_buffer_iter {
|
|||
* check_pages - integrity check of buffer pages
|
||||
* @cpu_buffer: CPU buffer with pages to test
|
||||
*
|
||||
* As a safty measure we check to make sure the data pages have not
|
||||
* As a safety measure we check to make sure the data pages have not
|
||||
* been corrupted.
|
||||
*/
|
||||
static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
|
@ -2332,13 +2332,14 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
|
|||
EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
|
||||
|
||||
static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
struct buffer_data_page *bpage)
|
||||
struct buffer_data_page *bpage,
|
||||
unsigned int offset)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
unsigned long head;
|
||||
|
||||
__raw_spin_lock(&cpu_buffer->lock);
|
||||
for (head = 0; head < local_read(&bpage->commit);
|
||||
for (head = offset; head < local_read(&bpage->commit);
|
||||
head += rb_event_length(event)) {
|
||||
|
||||
event = __rb_data_page_index(bpage, head);
|
||||
|
@ -2406,12 +2407,12 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
|
|||
* to swap with a page in the ring buffer.
|
||||
*
|
||||
* for example:
|
||||
* rpage = ring_buffer_alloc_page(buffer);
|
||||
* rpage = ring_buffer_alloc_read_page(buffer);
|
||||
* if (!rpage)
|
||||
* return error;
|
||||
* ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
|
||||
* if (ret)
|
||||
* process_page(rpage);
|
||||
* if (ret >= 0)
|
||||
* process_page(rpage, ret);
|
||||
*
|
||||
* When @full is set, the function will not return true unless
|
||||
* the writer is off the reader page.
|
||||
|
@ -2422,8 +2423,8 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
|
|||
* responsible for that.
|
||||
*
|
||||
* Returns:
|
||||
* 1 if data has been transferred
|
||||
* 0 if no data has been transferred.
|
||||
* >=0 if data has been transferred, returns the offset of consumed data.
|
||||
* <0 if no data has been transferred.
|
||||
*/
|
||||
int ring_buffer_read_page(struct ring_buffer *buffer,
|
||||
void **data_page, int cpu, int full)
|
||||
|
@ -2432,7 +2433,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
|
|||
struct ring_buffer_event *event;
|
||||
struct buffer_data_page *bpage;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
unsigned int read;
|
||||
int ret = -1;
|
||||
|
||||
if (!data_page)
|
||||
return 0;
|
||||
|
@ -2454,25 +2456,29 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
|
|||
/* check for data */
|
||||
if (!local_read(&cpu_buffer->reader_page->page->commit))
|
||||
goto out;
|
||||
|
||||
read = cpu_buffer->reader_page->read;
|
||||
/*
|
||||
* If the writer is already off of the read page, then simply
|
||||
* switch the read page with the given page. Otherwise
|
||||
* we need to copy the data from the reader to the writer.
|
||||
*/
|
||||
if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
|
||||
unsigned int read = cpu_buffer->reader_page->read;
|
||||
unsigned int commit = rb_page_commit(cpu_buffer->reader_page);
|
||||
struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
|
||||
|
||||
if (full)
|
||||
goto out;
|
||||
/* The writer is still on the reader page, we must copy */
|
||||
bpage = cpu_buffer->reader_page->page;
|
||||
memcpy(bpage->data,
|
||||
cpu_buffer->reader_page->page->data + read,
|
||||
local_read(&bpage->commit) - read);
|
||||
memcpy(bpage->data + read, rpage->data + read, commit - read);
|
||||
|
||||
/* consume what was read */
|
||||
cpu_buffer->reader_page += read;
|
||||
cpu_buffer->reader_page->read = commit;
|
||||
|
||||
/* update bpage */
|
||||
local_set(&bpage->commit, commit);
|
||||
if (!read)
|
||||
bpage->time_stamp = rpage->time_stamp;
|
||||
} else {
|
||||
/* swap the pages */
|
||||
rb_init_page(bpage);
|
||||
|
@ -2481,10 +2487,10 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
|
|||
cpu_buffer->reader_page->read = 0;
|
||||
*data_page = bpage;
|
||||
}
|
||||
ret = 1;
|
||||
ret = read;
|
||||
|
||||
/* update the entry counter */
|
||||
rb_remove_entries(cpu_buffer, bpage);
|
||||
rb_remove_entries(cpu_buffer, bpage, read);
|
||||
out:
|
||||
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
|
|
|
@ -1963,7 +1963,7 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf,
|
|||
struct tracer_opt *trace_opts = current_trace->flags->opts;
|
||||
|
||||
|
||||
/* calulate max size */
|
||||
/* calculate max size */
|
||||
for (i = 0; trace_options[i]; i++) {
|
||||
len += strlen(trace_options[i]);
|
||||
len += 3; /* "no" and space */
|
||||
|
|
|
@ -91,8 +91,6 @@ void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
|
|||
|
||||
int enable_branch_tracing(struct trace_array *tr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&branch_tracing_mutex);
|
||||
branch_tracer = tr;
|
||||
/*
|
||||
|
@ -103,7 +101,7 @@ int enable_branch_tracing(struct trace_array *tr)
|
|||
branch_tracing_enabled++;
|
||||
mutex_unlock(&branch_tracing_mutex);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void disable_branch_tracing(void)
|
||||
|
|
|
@ -186,30 +186,30 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu)
|
|||
ret = trace_seq_printf(s,
|
||||
" ------------------------------------------\n");
|
||||
if (!ret)
|
||||
TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = print_graph_cpu(s, cpu);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = print_graph_proc(s, prev_pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_printf(s, " => ");
|
||||
if (!ret)
|
||||
TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = print_graph_proc(s, pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_printf(s,
|
||||
"\n ------------------------------------------\n\n");
|
||||
if (!ret)
|
||||
TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return ret;
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static struct ftrace_graph_ret_entry *
|
||||
|
|
|
@ -75,7 +75,7 @@ static void bts_trace_start(struct trace_array *tr)
|
|||
}
|
||||
|
||||
/*
|
||||
* Start tracing on the current cpu.
|
||||
* Stop tracing on the current cpu.
|
||||
* The argument is ignored.
|
||||
*
|
||||
* pre: bts_tracer_mutex must be locked.
|
||||
|
|
|
@ -88,7 +88,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
|
|||
}
|
||||
}
|
||||
|
||||
const static struct stacktrace_ops backtrace_ops = {
|
||||
static const struct stacktrace_ops backtrace_ops = {
|
||||
.warning = backtrace_warning,
|
||||
.warning_symbol = backtrace_warning_symbol,
|
||||
.stack = backtrace_stack,
|
||||
|
|
Loading…
Reference in a new issue