2006-11-16 14:38:57 -07:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/stacktrace.h>
|
|
|
|
#include <linux/thread_info.h>
|
2010-04-21 04:08:11 -06:00
|
|
|
#include <linux/ftrace.h>
|
2011-07-22 11:18:16 -06:00
|
|
|
#include <linux/export.h>
|
2006-11-16 14:38:57 -07:00
|
|
|
#include <asm/ptrace.h>
|
2008-03-24 21:06:24 -06:00
|
|
|
#include <asm/stacktrace.h>
|
2006-11-16 14:38:57 -07:00
|
|
|
|
2008-08-12 19:33:56 -06:00
|
|
|
#include "kstack.h"
|
|
|
|
|
2008-11-28 02:19:41 -07:00
|
|
|
static void __save_stack_trace(struct thread_info *tp,
|
|
|
|
struct stack_trace *trace,
|
|
|
|
bool skip_sched)
|
2006-11-16 14:38:57 -07:00
|
|
|
{
|
2008-08-13 18:17:52 -06:00
|
|
|
unsigned long ksp, fp;
|
2010-04-21 04:08:11 -06:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
struct task_struct *t;
|
|
|
|
int graph = 0;
|
|
|
|
#endif
|
2006-11-16 14:38:57 -07:00
|
|
|
|
2008-11-28 02:19:41 -07:00
|
|
|
if (tp == current_thread_info()) {
|
|
|
|
stack_trace_flush();
|
|
|
|
__asm__ __volatile__("mov %%fp, %0" : "=r" (ksp));
|
|
|
|
} else {
|
|
|
|
ksp = tp->ksp;
|
|
|
|
}
|
2006-11-16 14:38:57 -07:00
|
|
|
|
|
|
|
fp = ksp + STACK_BIAS;
|
2010-04-21 04:08:11 -06:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
t = tp->task;
|
|
|
|
#endif
|
2006-11-16 14:38:57 -07:00
|
|
|
do {
|
2008-05-21 19:15:53 -06:00
|
|
|
struct sparc_stackf *sf;
|
2008-04-24 04:28:52 -06:00
|
|
|
struct pt_regs *regs;
|
|
|
|
unsigned long pc;
|
2006-11-16 14:38:57 -07:00
|
|
|
|
2008-08-12 19:33:56 -06:00
|
|
|
if (!kstack_valid(tp, fp))
|
2006-11-16 14:38:57 -07:00
|
|
|
break;
|
|
|
|
|
2008-05-21 19:15:53 -06:00
|
|
|
sf = (struct sparc_stackf *) fp;
|
|
|
|
regs = (struct pt_regs *) (sf + 1);
|
2008-04-24 04:28:52 -06:00
|
|
|
|
2008-08-12 19:33:56 -06:00
|
|
|
if (kstack_is_trap_frame(tp, regs)) {
|
2008-05-21 19:15:53 -06:00
|
|
|
if (!(regs->tstate & TSTATE_PRIV))
|
|
|
|
break;
|
2008-04-24 04:28:52 -06:00
|
|
|
pc = regs->tpc;
|
|
|
|
fp = regs->u_regs[UREG_I6] + STACK_BIAS;
|
|
|
|
} else {
|
2008-05-21 19:15:53 -06:00
|
|
|
pc = sf->callers_pc;
|
|
|
|
fp = (unsigned long)sf->fp + STACK_BIAS;
|
2008-04-24 04:28:52 -06:00
|
|
|
}
|
|
|
|
|
2006-11-16 14:38:57 -07:00
|
|
|
if (trace->skip > 0)
|
|
|
|
trace->skip--;
|
2010-04-21 04:08:11 -06:00
|
|
|
else if (!skip_sched || !in_sched_functions(pc)) {
|
2008-04-24 04:28:52 -06:00
|
|
|
trace->entries[trace->nr_entries++] = pc;
|
2010-04-21 04:08:11 -06:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
|
|
|
|
int index = t->curr_ret_stack;
|
|
|
|
if (t->ret_stack && index >= graph) {
|
|
|
|
pc = t->ret_stack[index - graph].ret;
|
|
|
|
if (trace->nr_entries <
|
|
|
|
trace->max_entries)
|
|
|
|
trace->entries[trace->nr_entries++] = pc;
|
|
|
|
graph++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
2006-11-16 14:38:57 -07:00
|
|
|
} while (trace->nr_entries < trace->max_entries);
|
|
|
|
}
|
2008-11-28 02:19:41 -07:00
|
|
|
|
|
|
|
void save_stack_trace(struct stack_trace *trace)
|
|
|
|
{
|
|
|
|
__save_stack_trace(current_thread_info(), trace, false);
|
|
|
|
}
|
2008-07-03 01:17:55 -06:00
|
|
|
EXPORT_SYMBOL_GPL(save_stack_trace);
|
2008-11-28 02:19:41 -07:00
|
|
|
|
|
|
|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|
|
|
{
|
|
|
|
struct thread_info *tp = task_thread_info(tsk);
|
|
|
|
|
|
|
|
__save_stack_trace(tp, trace, true);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|