kprobes/x86: ftrace based optimization for x86
Add function tracer based kprobe optimization support handlers on x86. This allows kprobes to use function tracer for probing on mcount call. Link: http://lkml.kernel.org/r/20120605102838.27845.26317.stgit@localhost.localdomain Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: "Frank Ch. Eigler" <fche@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> [ Updated to new port of ftrace save regs functions ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
ae6aa16fdc
commit
e525389651
4 changed files with 51 additions and 2 deletions
|
@ -27,6 +27,7 @@
|
|||
#include <asm/insn.h>
|
||||
|
||||
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
||||
#define ARCH_SUPPORTS_KPROBES_ON_FTRACE
|
||||
|
||||
struct pt_regs;
|
||||
struct kprobe;
|
||||
|
|
|
@ -1052,6 +1052,54 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef KPROBES_CAN_USE_FTRACE
|
||||
/* Ftrace callback handler for kprobes */
|
||||
void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *ops, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *p;
|
||||
struct kprobe_ctlblk *kcb;
|
||||
unsigned long flags;
|
||||
|
||||
/* Disable irq for emulating a breakpoint and avoiding preempt */
|
||||
local_irq_save(flags);
|
||||
|
||||
p = get_kprobe((kprobe_opcode_t *)ip);
|
||||
if (unlikely(!p) || kprobe_disabled(p))
|
||||
goto end;
|
||||
|
||||
kcb = get_kprobe_ctlblk();
|
||||
if (kprobe_running()) {
|
||||
kprobes_inc_nmissed_count(p);
|
||||
} else {
|
||||
regs->ip += sizeof(kprobe_opcode_t);
|
||||
|
||||
__this_cpu_write(current_kprobe, p);
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
if (p->pre_handler)
|
||||
p->pre_handler(p, regs);
|
||||
|
||||
if (unlikely(p->post_handler)) {
|
||||
/* Emulate singlestep as if there is a 5byte nop */
|
||||
regs->ip = ip + MCOUNT_INSN_SIZE;
|
||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||
p->post_handler(p, regs, 0);
|
||||
}
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
regs->ip = ip; /* Recover for next callback */
|
||||
}
|
||||
end:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
|
||||
{
|
||||
p->ainsn.insn = NULL;
|
||||
p->ainsn.boostable = -1;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int __init arch_init_kprobes(void)
|
||||
{
|
||||
return arch_init_optprobes();
|
||||
|
|
|
@ -318,7 +318,7 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
|
|||
#endif /* CONFIG_OPTPROBES */
|
||||
#ifdef KPROBES_CAN_USE_FTRACE
|
||||
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
||||
struct pt_regs *regs);
|
||||
struct ftrace_ops *ops, struct pt_regs *regs);
|
||||
extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -921,7 +921,7 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
|
|||
|
||||
#ifdef KPROBES_CAN_USE_FTRACE
|
||||
static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
|
||||
.regs_func = kprobe_ftrace_handler,
|
||||
.func = kprobe_ftrace_handler,
|
||||
.flags = FTRACE_OPS_FL_SAVE_REGS,
|
||||
};
|
||||
static int kprobe_ftrace_enabled;
|
||||
|
|
Loading…
Reference in a new issue