d61f82d066
This patch replaces the indirect call to the mcount function pointer with a direct call that will be patched by the dynamic ftrace routines. On boot up, the mcount function calls the ftace_stub function. When the dynamic ftrace code is initialized, the ftrace_stub is replaced with a call to the ftrace_record_ip, which records the instruction pointers of the locations that call it. Later, the ftraced daemon will call kstop_machine and patch all the locations to nops. When a ftrace is enabled, the original calls to mcount will now be set top call ftrace_caller, which will do a direct call to the registered ftrace function. This direct call is also patched when the function that should be called is updated. All patching is performed by a kstop_machine routine to prevent any type of race conditions that is associated with modifying code on the fly. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
159 lines
3.3 KiB
C
159 lines
3.3 KiB
C
/*
|
|
* Code for replacing ftrace calls with jumps.
|
|
*
|
|
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
|
|
*
|
|
* Thanks goes to Ingo Molnar, for suggesting the idea.
|
|
* Mathieu Desnoyers, for suggesting postponing the modifications.
|
|
* Arjan van de Ven, for keeping me straight, and explaining to me
|
|
* the dangers of modifying code on the run.
|
|
*/
|
|
|
|
#include <linux/spinlock.h>
|
|
#include <linux/hardirq.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/init.h>
|
|
#include <linux/list.h>
|
|
|
|
#include <asm/alternative.h>
|
|
|
|
#define CALL_BACK 5
|
|
|
|
/* Long is fine, even if it is only 4 bytes ;-) */
|
|
static long *ftrace_nop;
|
|
|
|
union ftrace_code_union {
|
|
char code[5];
|
|
struct {
|
|
char e8;
|
|
int offset;
|
|
} __attribute__((packed));
|
|
};
|
|
|
|
notrace int ftrace_ip_converted(unsigned long ip)
|
|
{
|
|
unsigned long save;
|
|
|
|
ip -= CALL_BACK;
|
|
save = *(long *)ip;
|
|
|
|
return save == *ftrace_nop;
|
|
}
|
|
|
|
static int notrace ftrace_calc_offset(long ip, long addr)
|
|
{
|
|
return (int)(addr - ip);
|
|
}
|
|
|
|
notrace unsigned char *ftrace_nop_replace(void)
|
|
{
|
|
return (char *)ftrace_nop;
|
|
}
|
|
|
|
notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
|
|
{
|
|
static union ftrace_code_union calc;
|
|
|
|
calc.e8 = 0xe8;
|
|
calc.offset = ftrace_calc_offset(ip, addr);
|
|
|
|
/*
|
|
* No locking needed, this must be called via kstop_machine
|
|
* which in essence is like running on a uniprocessor machine.
|
|
*/
|
|
return calc.code;
|
|
}
|
|
|
|
notrace int
|
|
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
|
|
unsigned char *new_code)
|
|
{
|
|
unsigned replaced;
|
|
unsigned old = *(unsigned *)old_code; /* 4 bytes */
|
|
unsigned new = *(unsigned *)new_code; /* 4 bytes */
|
|
unsigned char newch = new_code[4];
|
|
int faulted = 0;
|
|
|
|
/* move the IP back to the start of the call */
|
|
ip -= CALL_BACK;
|
|
|
|
/*
|
|
* Note: Due to modules and __init, code can
|
|
* disappear and change, we need to protect against faulting
|
|
* as well as code changing.
|
|
*
|
|
* No real locking needed, this code is run through
|
|
* kstop_machine.
|
|
*/
|
|
asm volatile (
|
|
"1: lock\n"
|
|
" cmpxchg %3, (%2)\n"
|
|
" jnz 2f\n"
|
|
" movb %b4, 4(%2)\n"
|
|
"2:\n"
|
|
".section .fixup, \"ax\"\n"
|
|
" movl $1, %0\n"
|
|
"3: jmp 2b\n"
|
|
".previous\n"
|
|
_ASM_EXTABLE(1b, 3b)
|
|
: "=r"(faulted), "=a"(replaced)
|
|
: "r"(ip), "r"(new), "r"(newch),
|
|
"0"(faulted), "a"(old)
|
|
: "memory");
|
|
sync_core();
|
|
|
|
if (replaced != old && replaced != new)
|
|
faulted = 2;
|
|
|
|
return faulted;
|
|
}
|
|
|
|
notrace int ftrace_update_ftrace_func(ftrace_func_t func)
|
|
{
|
|
unsigned long ip = (unsigned long)(&ftrace_call);
|
|
unsigned char old[5], *new;
|
|
int ret;
|
|
|
|
ip += CALL_BACK;
|
|
|
|
memcpy(old, &ftrace_call, 5);
|
|
new = ftrace_call_replace(ip, (unsigned long)func);
|
|
ret = ftrace_modify_code(ip, old, new);
|
|
|
|
return ret;
|
|
}
|
|
|
|
notrace int ftrace_mcount_set(unsigned long *data)
|
|
{
|
|
unsigned long ip = (long)(&mcount_call);
|
|
unsigned long *addr = data;
|
|
unsigned char old[5], *new;
|
|
|
|
/* ip is at the location, but modify code will subtact this */
|
|
ip += CALL_BACK;
|
|
|
|
/*
|
|
* Replace the mcount stub with a pointer to the
|
|
* ip recorder function.
|
|
*/
|
|
memcpy(old, &mcount_call, 5);
|
|
new = ftrace_call_replace(ip, *addr);
|
|
*addr = ftrace_modify_code(ip, old, new);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int __init ftrace_dyn_arch_init(void *data)
|
|
{
|
|
const unsigned char *const *noptable = find_nop_table();
|
|
|
|
/* This is running in kstop_machine */
|
|
|
|
ftrace_mcount_set(data);
|
|
|
|
ftrace_nop = (unsigned long *)noptable[CALL_BACK];
|
|
|
|
return 0;
|
|
}
|
|
|