Blackfin: add support for dynamic ftrace
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
This commit is contained in:
parent
67df6cc665
commit
f507442962
5 changed files with 182 additions and 10 deletions
|
@ -25,6 +25,8 @@ config BLACKFIN
|
|||
def_bool y
|
||||
select HAVE_ARCH_KGDB
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
|
|
|
@ -12,6 +12,22 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
extern void _mcount(void);
|
||||
#define MCOUNT_ADDR ((unsigned long)_mcount)
|
||||
|
||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
{
|
||||
return addr;
|
||||
}
|
||||
|
||||
struct dyn_arch_ftrace {
|
||||
/* No extra data needed for Blackfin */
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
#include <linux/mm.h>
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ else
|
|||
obj-y += time.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
|
||||
obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o
|
||||
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
|
||||
CFLAGS_REMOVE_ftrace.o = -pg
|
||||
|
|
|
@ -10,6 +10,18 @@
|
|||
|
||||
.text
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
/* Simple stub so we can boot the kernel until runtime patching has
|
||||
* disabled all calls to this. Then it'll be unused.
|
||||
*/
|
||||
ENTRY(__mcount)
|
||||
# if ANOMALY_05000371
|
||||
nop; nop; nop; nop;
|
||||
# endif
|
||||
rts;
|
||||
ENDPROC(__mcount)
|
||||
|
||||
/* GCC will have called us before setting up the function prologue, so we
|
||||
* can clobber the normal scratch registers, but we need to make sure to
|
||||
* save/restore the registers used for argument passing (R0-R2) in case
|
||||
|
@ -20,15 +32,65 @@
|
|||
* function. And since GCC pushed the previous RETS for us, the previous
|
||||
* function will be waiting there. mmmm pie.
|
||||
*/
|
||||
ENTRY(__mcount)
|
||||
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
ENTRY(_ftrace_caller)
|
||||
# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
/* optional micro optimization: return if stopped */
|
||||
p1.l = _function_trace_stop;
|
||||
p1.h = _function_trace_stop;
|
||||
r3 = [p1];
|
||||
cc = r3 == 0;
|
||||
if ! cc jump _ftrace_stub (bp);
|
||||
#endif
|
||||
# endif
|
||||
|
||||
/* save first/second/third function arg and the return register */
|
||||
[--sp] = r2;
|
||||
[--sp] = r0;
|
||||
[--sp] = r1;
|
||||
[--sp] = rets;
|
||||
|
||||
/* function_trace_call(unsigned long ip, unsigned long parent_ip):
|
||||
* ip: this point was called by ...
|
||||
* parent_ip: ... this function
|
||||
* the ip itself will need adjusting for the mcount call
|
||||
*/
|
||||
r0 = rets;
|
||||
r1 = [sp + 16]; /* skip the 4 local regs on stack */
|
||||
r0 += -MCOUNT_INSN_SIZE;
|
||||
|
||||
.globl _ftrace_call
|
||||
_ftrace_call:
|
||||
call _ftrace_stub
|
||||
|
||||
# ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
.globl _ftrace_graph_call
|
||||
_ftrace_graph_call:
|
||||
nop; /* jump _ftrace_graph_caller; */
|
||||
# endif
|
||||
|
||||
/* restore state and get out of dodge */
|
||||
.Lfinish_trace:
|
||||
rets = [sp++];
|
||||
r1 = [sp++];
|
||||
r0 = [sp++];
|
||||
r2 = [sp++];
|
||||
|
||||
.globl _ftrace_stub
|
||||
_ftrace_stub:
|
||||
rts;
|
||||
ENDPROC(_ftrace_caller)
|
||||
|
||||
#else
|
||||
|
||||
/* See documentation for _ftrace_caller */
|
||||
ENTRY(__mcount)
|
||||
# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
/* optional micro optimization: return if stopped */
|
||||
p1.l = _function_trace_stop;
|
||||
p1.h = _function_trace_stop;
|
||||
r3 = [p1];
|
||||
cc = r3 == 0;
|
||||
if ! cc jump _ftrace_stub (bp);
|
||||
# endif
|
||||
|
||||
/* save third function arg early so we can do testing below */
|
||||
[--sp] = r2;
|
||||
|
@ -44,7 +106,7 @@ ENTRY(__mcount)
|
|||
cc = r2 == r3;
|
||||
if ! cc jump .Ldo_trace;
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
# ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
/* if the ftrace_graph_return function pointer is not set to
|
||||
* the ftrace_stub entry, call prepare_ftrace_return().
|
||||
*/
|
||||
|
@ -64,7 +126,7 @@ ENTRY(__mcount)
|
|||
r3 = [p0];
|
||||
cc = r2 == r3;
|
||||
if ! cc jump _ftrace_graph_caller;
|
||||
#endif
|
||||
# endif
|
||||
|
||||
r2 = [sp++];
|
||||
rts;
|
||||
|
@ -103,6 +165,8 @@ _ftrace_stub:
|
|||
rts;
|
||||
ENDPROC(__mcount)
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
/* The prepare_ftrace_return() function is similar to the trace function
|
||||
* except it takes a pointer to the location of the frompc. This is so
|
||||
|
@ -110,6 +174,7 @@ ENDPROC(__mcount)
|
|||
* purposes.
|
||||
*/
|
||||
ENTRY(_ftrace_graph_caller)
|
||||
# ifndef CONFIG_DYNAMIC_FTRACE
|
||||
/* save first/second function arg and the return register */
|
||||
[--sp] = r0;
|
||||
[--sp] = r1;
|
||||
|
@ -118,9 +183,13 @@ ENTRY(_ftrace_graph_caller)
|
|||
/* prepare_ftrace_return(parent, self_addr, frame_pointer) */
|
||||
r0 = sp; /* unsigned long *parent */
|
||||
r1 = rets; /* unsigned long self_addr */
|
||||
#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
# else
|
||||
r0 = sp; /* unsigned long *parent */
|
||||
r1 = [sp]; /* unsigned long self_addr */
|
||||
# endif
|
||||
# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
r2 = fp; /* unsigned long frame_pointer */
|
||||
#endif
|
||||
# endif
|
||||
r0 += 16; /* skip the 4 local regs on stack */
|
||||
r1 += -MCOUNT_INSN_SIZE;
|
||||
call _prepare_ftrace_return;
|
||||
|
@ -139,9 +208,9 @@ ENTRY(_return_to_handler)
|
|||
[--sp] = r1;
|
||||
|
||||
/* get original return address */
|
||||
#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
r0 = fp; /* Blackfin is sane, so omit this */
|
||||
#endif
|
||||
# endif
|
||||
call _ftrace_return_to_handler;
|
||||
rets = r0;
|
||||
|
||||
|
|
|
@ -1,17 +1,101 @@
|
|||
/*
|
||||
* ftrace graph code
|
||||
*
|
||||
* Copyright (C) 2009 Analog Devices Inc.
|
||||
* Copyright (C) 2009-2010 Analog Devices Inc.
|
||||
* Licensed under the GPL-2 or later.
|
||||
*/
|
||||
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
static const unsigned char mnop[] = {
|
||||
0x03, 0xc0, 0x00, 0x18, /* MNOP; */
|
||||
0x03, 0xc0, 0x00, 0x18, /* MNOP; */
|
||||
};
|
||||
|
||||
static void bfin_make_pcrel24(unsigned char *insn, unsigned long src,
|
||||
unsigned long dst)
|
||||
{
|
||||
uint32_t pcrel = (dst - src) >> 1;
|
||||
insn[0] = pcrel >> 16;
|
||||
insn[1] = 0xe3;
|
||||
insn[2] = pcrel;
|
||||
insn[3] = pcrel >> 8;
|
||||
}
|
||||
#define bfin_make_pcrel24(insn, src, dst) bfin_make_pcrel24(insn, src, (unsigned long)(dst))
|
||||
|
||||
static int ftrace_modify_code(unsigned long ip, const unsigned char *code,
|
||||
unsigned long len)
|
||||
{
|
||||
int ret = probe_kernel_write((void *)ip, (void *)code, len);
|
||||
flush_icache_range(ip, ip + len);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
unsigned long addr)
|
||||
{
|
||||
/* Turn the mcount call site into two MNOPs as those are 32bit insns */
|
||||
return ftrace_modify_code(rec->ip, mnop, sizeof(mnop));
|
||||
}
|
||||
|
||||
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
/* Restore the mcount call site */
|
||||
unsigned char call[8];
|
||||
call[0] = 0x67; /* [--SP] = RETS; */
|
||||
call[1] = 0x01;
|
||||
bfin_make_pcrel24(&call[2], rec->ip + 2, addr);
|
||||
call[6] = 0x27; /* RETS = [SP++]; */
|
||||
call[7] = 0x01;
|
||||
return ftrace_modify_code(rec->ip, call, sizeof(call));
|
||||
}
|
||||
|
||||
int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||
{
|
||||
unsigned char call[4];
|
||||
unsigned long ip = (unsigned long)&ftrace_call;
|
||||
bfin_make_pcrel24(call, ip, func);
|
||||
return ftrace_modify_code(ip, call, sizeof(call));
|
||||
}
|
||||
|
||||
int __init ftrace_dyn_arch_init(void *data)
|
||||
{
|
||||
/* return value is done indirectly via data */
|
||||
*(unsigned long *)data = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
|
||||
# ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
extern void ftrace_graph_call(void);
|
||||
|
||||
int ftrace_enable_ftrace_graph_caller(void)
|
||||
{
|
||||
unsigned long ip = (unsigned long)&ftrace_graph_call;
|
||||
uint16_t jump_pcrel12 = ((unsigned long)&ftrace_graph_caller - ip) >> 1;
|
||||
jump_pcrel12 |= 0x2000;
|
||||
return ftrace_modify_code(ip, (void *)&jump_pcrel12, sizeof(jump_pcrel12));
|
||||
}
|
||||
|
||||
int ftrace_disable_ftrace_graph_caller(void)
|
||||
{
|
||||
return ftrace_modify_code((unsigned long)&ftrace_graph_call, empty_zero_page, 2);
|
||||
}
|
||||
|
||||
# endif
|
||||
|
||||
/*
|
||||
* Hook the return address and push it in the stack of return addrs
|
||||
* in current thread info.
|
||||
|
|
Loading…
Reference in a new issue