9a556ab998
Currently kprobes check whether the copied instruction modifies IF (interrupt flag) on each probe hit. This results not only in introducing overhead but also involving inat_get_opcode_attribute into the kprobes hot path, and it can cause an infinite recursive call (and kernel panic in the end). Actually, since the copied instruction itself can never be modified on the buffer, it is needless to analyze the instruction on every probe hit. To fix this issue, we check it only once when registering probe and store the result on ainsn->if_modifier. Reported-by: Timo Juhani Lindfors <timo.lindfors@iki.fi> Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: yrl.pp-manager.tt@hitachi.com Cc: Steven Rostedt <rostedt@goodmis.org> Cc: David S. Miller <davem@davemloft.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20130314115242.19690.33573.stgit@mhiramat-M0-7522 Signed-off-by: Ingo Molnar <mingo@kernel.org>
119 lines
3.8 KiB
C
119 lines
3.8 KiB
C
#ifndef _ASM_X86_KPROBES_H
|
|
#define _ASM_X86_KPROBES_H
|
|
/*
|
|
* Kernel Probes (KProbes)
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
*
|
|
* Copyright (C) IBM Corporation, 2002, 2004
|
|
*
|
|
* See arch/x86/kernel/kprobes.c for x86 kprobes history.
|
|
*/
|
|
#include <linux/types.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/percpu.h>
|
|
#include <asm/insn.h>
|
|
|
|
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
|
#define ARCH_SUPPORTS_KPROBES_ON_FTRACE
|
|
|
|
struct pt_regs;
|
|
struct kprobe;
|
|
|
|
typedef u8 kprobe_opcode_t;
|
|
#define BREAKPOINT_INSTRUCTION 0xcc
|
|
#define RELATIVEJUMP_OPCODE 0xe9
|
|
#define RELATIVEJUMP_SIZE 5
|
|
#define RELATIVECALL_OPCODE 0xe8
|
|
#define RELATIVE_ADDR_SIZE 4
|
|
#define MAX_STACK_SIZE 64
|
|
#define MIN_STACK_SIZE(ADDR) \
|
|
(((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
|
|
THREAD_SIZE - (unsigned long)(ADDR))) \
|
|
? (MAX_STACK_SIZE) \
|
|
: (((unsigned long)current_thread_info()) + \
|
|
THREAD_SIZE - (unsigned long)(ADDR)))
|
|
|
|
#define flush_insn_slot(p) do { } while (0)
|
|
|
|
/* optinsn template addresses */
|
|
extern kprobe_opcode_t optprobe_template_entry;
|
|
extern kprobe_opcode_t optprobe_template_val;
|
|
extern kprobe_opcode_t optprobe_template_call;
|
|
extern kprobe_opcode_t optprobe_template_end;
|
|
#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
|
|
#define MAX_OPTINSN_SIZE \
|
|
(((unsigned long)&optprobe_template_end - \
|
|
(unsigned long)&optprobe_template_entry) + \
|
|
MAX_OPTIMIZED_LENGTH + RELATIVEJUMP_SIZE)
|
|
|
|
extern const int kretprobe_blacklist_size;
|
|
|
|
void arch_remove_kprobe(struct kprobe *p);
|
|
void kretprobe_trampoline(void);
|
|
|
|
/* Architecture specific copy of original instruction*/
|
|
struct arch_specific_insn {
|
|
/* copy of the original instruction */
|
|
kprobe_opcode_t *insn;
|
|
/*
|
|
* boostable = -1: This instruction type is not boostable.
|
|
* boostable = 0: This instruction type is boostable.
|
|
* boostable = 1: This instruction has been boosted: we have
|
|
* added a relative jump after the instruction copy in insn,
|
|
* so no single-step and fixup are needed (unless there's
|
|
* a post_handler or break_handler).
|
|
*/
|
|
int boostable;
|
|
bool if_modifier;
|
|
};
|
|
|
|
struct arch_optimized_insn {
|
|
/* copy of the original instructions */
|
|
kprobe_opcode_t copied_insn[RELATIVE_ADDR_SIZE];
|
|
/* detour code buffer */
|
|
kprobe_opcode_t *insn;
|
|
/* the size of instructions copied to detour code buffer */
|
|
size_t size;
|
|
};
|
|
|
|
/* Return true (!0) if optinsn is prepared for optimization. */
|
|
static inline int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
|
|
{
|
|
return optinsn->size;
|
|
}
|
|
|
|
struct prev_kprobe {
|
|
struct kprobe *kp;
|
|
unsigned long status;
|
|
unsigned long old_flags;
|
|
unsigned long saved_flags;
|
|
};
|
|
|
|
/* per-cpu kprobe control block */
|
|
struct kprobe_ctlblk {
|
|
unsigned long kprobe_status;
|
|
unsigned long kprobe_old_flags;
|
|
unsigned long kprobe_saved_flags;
|
|
unsigned long *jprobe_saved_sp;
|
|
struct pt_regs jprobe_saved_regs;
|
|
kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
|
|
struct prev_kprobe prev_kprobe;
|
|
};
|
|
|
|
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
|
|
extern int kprobe_exceptions_notify(struct notifier_block *self,
|
|
unsigned long val, void *data);
|
|
#endif /* _ASM_X86_KPROBES_H */
|