2006-07-03 01:24:40 -06:00
|
|
|
/*
|
|
|
|
* Stack trace management functions
|
|
|
|
*
|
2009-01-30 18:03:42 -07:00
|
|
|
* Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
2006-07-03 01:24:40 -06:00
|
|
|
*/
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/stacktrace.h>
|
2006-09-26 02:52:34 -06:00
|
|
|
#include <linux/module.h>
|
2008-11-22 04:28:47 -07:00
|
|
|
#include <linux/uaccess.h>
|
2006-09-26 02:52:34 -06:00
|
|
|
#include <asm/stacktrace.h>
|
2006-07-03 01:24:40 -06:00
|
|
|
|
2006-09-26 02:52:34 -06:00
|
|
|
static int save_stack_stack(void *data, char *name)
|
2006-07-03 01:24:40 -06:00
|
|
|
{
|
2009-05-14 21:19:09 -06:00
|
|
|
return 0;
|
2006-09-26 02:52:34 -06:00
|
|
|
}
|
2006-07-03 01:24:40 -06:00
|
|
|
|
2010-06-03 13:32:43 -06:00
|
|
|
static void
|
|
|
|
__save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched)
|
2006-09-26 02:52:34 -06:00
|
|
|
{
|
2008-01-30 05:33:23 -07:00
|
|
|
struct stack_trace *trace = data;
|
2010-06-03 13:32:39 -06:00
|
|
|
#ifdef CONFIG_FRAME_POINTER
|
2008-02-22 11:23:58 -07:00
|
|
|
if (!reliable)
|
|
|
|
return;
|
2010-06-03 13:32:39 -06:00
|
|
|
#endif
|
2010-06-03 13:32:43 -06:00
|
|
|
if (nosched && in_sched_functions(addr))
|
|
|
|
return;
|
2006-09-26 02:52:34 -06:00
|
|
|
if (trace->skip > 0) {
|
|
|
|
trace->skip--;
|
|
|
|
return;
|
2006-07-03 01:24:40 -06:00
|
|
|
}
|
2007-02-13 05:26:21 -07:00
|
|
|
if (trace->nr_entries < trace->max_entries)
|
2006-09-26 02:52:34 -06:00
|
|
|
trace->entries[trace->nr_entries++] = addr;
|
2006-07-03 01:24:40 -06:00
|
|
|
}
|
|
|
|
|
2010-06-03 13:32:43 -06:00
|
|
|
static void save_stack_address(void *data, unsigned long addr, int reliable)
|
|
|
|
{
|
|
|
|
return __save_stack_address(data, addr, reliable, false);
|
|
|
|
}
|
|
|
|
|
2008-01-30 05:33:07 -07:00
|
|
|
static void
|
|
|
|
save_stack_address_nosched(void *data, unsigned long addr, int reliable)
|
2008-01-25 13:08:34 -07:00
|
|
|
{
|
2010-06-03 13:32:43 -06:00
|
|
|
return __save_stack_address(data, addr, reliable, true);
|
2008-01-25 13:08:34 -07:00
|
|
|
}
|
|
|
|
|
2007-10-17 10:04:37 -06:00
|
|
|
static const struct stacktrace_ops save_stack_ops = {
|
2009-12-16 21:40:33 -07:00
|
|
|
.stack = save_stack_stack,
|
|
|
|
.address = save_stack_address,
|
|
|
|
.walk_stack = print_context_stack,
|
2006-09-26 02:52:34 -06:00
|
|
|
};
|
2006-07-03 01:24:40 -06:00
|
|
|
|
2008-01-25 13:08:34 -07:00
|
|
|
static const struct stacktrace_ops save_stack_ops_nosched = {
|
2009-12-16 21:40:33 -07:00
|
|
|
.stack = save_stack_stack,
|
|
|
|
.address = save_stack_address_nosched,
|
|
|
|
.walk_stack = print_context_stack,
|
2008-01-25 13:08:34 -07:00
|
|
|
};
|
|
|
|
|
2006-07-03 01:24:40 -06:00
|
|
|
/*
|
|
|
|
* Save stack-backtrace addresses into a stack_trace buffer.
|
|
|
|
*/
|
2007-05-08 01:23:29 -06:00
|
|
|
void save_stack_trace(struct stack_trace *trace)
|
2006-07-03 01:24:40 -06:00
|
|
|
{
|
2011-03-17 20:40:06 -06:00
|
|
|
dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
|
2007-02-13 05:26:21 -07:00
|
|
|
if (trace->nr_entries < trace->max_entries)
|
|
|
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
2006-07-03 01:24:40 -06:00
|
|
|
}
|
2008-06-27 13:20:17 -06:00
|
|
|
EXPORT_SYMBOL_GPL(save_stack_trace);
|
2008-01-25 13:08:34 -07:00
|
|
|
|
x86: Eliminate bp argument from the stack tracing routines
The various stack tracing routines take a 'bp' argument in which the
caller is supposed to provide the base pointer to use, or 0 if doesn't
have one. Since bp is garbage whenever CONFIG_FRAME_POINTER is not
defined, this means all callers in principle should either always pass
0, or be conditional on CONFIG_FRAME_POINTER.
However, there are only really three use cases for stack tracing:
(a) Trace the current task, including IRQ stack if any
(b) Trace the current task, but skip IRQ stack
(c) Trace some other task
In all cases, if CONFIG_FRAME_POINTER is not defined, bp should just
be 0. If it _is_ defined, then
- in case (a) bp should be gotten directly from the CPU's register, so
the caller should pass NULL for regs,
- in case (b) the caller should should pass the IRQ registers to
dump_trace(),
- in case (c) bp should be gotten from the top of the task's stack, so
the caller should pass NULL for regs.
Hence, the bp argument is not necessary because the combination of
task and regs is sufficient to determine an appropriate value for bp.
This patch introduces a new inline function stack_frame(task, regs)
that computes the desired bp. This function is then called from the
two versions of dump_stack().
Signed-off-by: Soren Sandmann <ssp@redhat.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arjan van de Ven <arjan@infradead.org>,
Cc: Frederic Weisbecker <fweisbec@gmail.com>,
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>,
LKML-Reference: <m3oc9rop28.fsf@dhcp-100-3-82.bos.redhat.com>>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
2010-11-05 03:59:39 -06:00
|
|
|
void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs)
|
2008-05-20 03:15:43 -06:00
|
|
|
{
|
2011-03-17 20:40:06 -06:00
|
|
|
dump_trace(current, regs, NULL, 0, &save_stack_ops, trace);
|
2008-05-20 03:15:43 -06:00
|
|
|
if (trace->nr_entries < trace->max_entries)
|
|
|
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
|
|
}
|
|
|
|
|
2008-01-25 13:08:34 -07:00
|
|
|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|
|
|
{
|
2011-03-17 20:40:06 -06:00
|
|
|
dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
|
2008-01-25 13:08:34 -07:00
|
|
|
if (trace->nr_entries < trace->max_entries)
|
|
|
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
|
|
}
|
2008-06-27 13:20:17 -06:00
|
|
|
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
2008-11-22 04:28:47 -07:00
|
|
|
|
|
|
|
/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
|
|
|
|
|
2010-05-19 13:35:17 -06:00
|
|
|
struct stack_frame_user {
|
2008-11-22 04:28:47 -07:00
|
|
|
const void __user *next_fp;
|
2008-11-23 03:39:06 -07:00
|
|
|
unsigned long ret_addr;
|
2008-11-22 04:28:47 -07:00
|
|
|
};
|
|
|
|
|
2010-05-19 13:35:17 -06:00
|
|
|
static int
|
|
|
|
copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
|
2008-11-22 04:28:47 -07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = 1;
|
|
|
|
pagefault_disable();
|
|
|
|
if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
|
|
|
|
ret = 0;
|
|
|
|
pagefault_enable();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-11-23 03:39:06 -07:00
|
|
|
static inline void __save_stack_trace_user(struct stack_trace *trace)
|
|
|
|
{
|
|
|
|
const struct pt_regs *regs = task_pt_regs(current);
|
|
|
|
const void __user *fp = (const void __user *)regs->bp;
|
|
|
|
|
|
|
|
if (trace->nr_entries < trace->max_entries)
|
|
|
|
trace->entries[trace->nr_entries++] = regs->ip;
|
|
|
|
|
|
|
|
while (trace->nr_entries < trace->max_entries) {
|
2010-05-19 13:35:17 -06:00
|
|
|
struct stack_frame_user frame;
|
2008-11-23 03:39:06 -07:00
|
|
|
|
|
|
|
frame.next_fp = NULL;
|
|
|
|
frame.ret_addr = 0;
|
|
|
|
if (!copy_stack_frame(fp, &frame))
|
|
|
|
break;
|
|
|
|
if ((unsigned long)fp < regs->sp)
|
|
|
|
break;
|
|
|
|
if (frame.ret_addr) {
|
|
|
|
trace->entries[trace->nr_entries++] =
|
|
|
|
frame.ret_addr;
|
|
|
|
}
|
|
|
|
if (fp == frame.next_fp)
|
|
|
|
break;
|
|
|
|
fp = frame.next_fp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-22 04:28:47 -07:00
|
|
|
void save_stack_trace_user(struct stack_trace *trace)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Trace user stack if we are not a kernel thread
|
|
|
|
*/
|
|
|
|
if (current->mm) {
|
2008-11-23 03:39:06 -07:00
|
|
|
__save_stack_trace_user(trace);
|
2008-11-22 04:28:47 -07:00
|
|
|
}
|
|
|
|
if (trace->nr_entries < trace->max_entries)
|
|
|
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
|
|
}
|
|
|
|
|