2005-04-16 16:20:36 -06:00
|
|
|
#ifndef _LINUX_PTRACE_H
|
|
|
|
#define _LINUX_PTRACE_H
|
|
|
|
/* ptrace.h */
|
|
|
|
/* structs and defines to help the user use the ptrace system call. */
|
|
|
|
|
|
|
|
/* has the defines to get at the registers. */
|
|
|
|
|
|
|
|
#define PTRACE_TRACEME 0
|
|
|
|
#define PTRACE_PEEKTEXT 1
|
|
|
|
#define PTRACE_PEEKDATA 2
|
|
|
|
#define PTRACE_PEEKUSR 3
|
|
|
|
#define PTRACE_POKETEXT 4
|
|
|
|
#define PTRACE_POKEDATA 5
|
|
|
|
#define PTRACE_POKEUSR 6
|
|
|
|
#define PTRACE_CONT 7
|
|
|
|
#define PTRACE_KILL 8
|
|
|
|
#define PTRACE_SINGLESTEP 9
|
|
|
|
|
|
|
|
#define PTRACE_ATTACH 0x10
|
|
|
|
#define PTRACE_DETACH 0x11
|
|
|
|
|
|
|
|
#define PTRACE_SYSCALL 24
|
[PATCH] UML Support - Ptrace: adds the host SYSEMU support, for UML and general usage
Jeff Dike <jdike@addtoit.com>,
Paolo 'Blaisorblade' Giarrusso <blaisorblade_spam@yahoo.it>,
Bodo Stroesser <bstroesser@fujitsu-siemens.com>
Adds a new ptrace(2) mode, called PTRACE_SYSEMU, resembling PTRACE_SYSCALL
except that the kernel does not execute the requested syscall; this is useful
to improve performance for virtual environments, like UML, which want to run
the syscall on their own.
In fact, using PTRACE_SYSCALL means stopping child execution twice, on entry
and on exit, and each time you also have two context switches; with SYSEMU you
avoid the 2nd stop and so save two context switches per syscall.
Also, some architectures don't have support in the host for changing the
syscall number via ptrace(), which is currently needed to skip syscall
execution (UML turns any syscall into getpid() to avoid it being executed on
the host). Fixing that is hard, while SYSEMU is easier to implement.
* This version of the patch includes some suggestions of Jeff Dike to avoid
adding any instructions to the syscall fast path, plus some other little
changes, by myself, to make it work even when the syscall is executed with
SYSENTER (but I'm unsure about them). It has been widely tested for quite a
lot of time.
* Various fixed were included to handle the various switches between
various states, i.e. when for instance a syscall entry is traced with one of
PT_SYSCALL / _SYSEMU / _SINGLESTEP and another one is used on exit.
Basically, this is done by remembering which one of them was used even after
the call to ptrace_notify().
* We're combining TIF_SYSCALL_EMU with TIF_SYSCALL_TRACE or TIF_SINGLESTEP
to make do_syscall_trace() notice that the current syscall was started with
SYSEMU on entry, so that no notification ought to be done in the exit path;
this is a bit of a hack, so this problem is solved in another way in next
patches.
* Also, the effects of the patch:
"Ptrace - i386: fix Syscall Audit interaction with singlestep"
are cancelled; they are restored back in the last patch of this series.
Detailed descriptions of the patches doing this kind of processing follow (but
I've already summed everything up).
* Fix behaviour when changing interception kind #1.
In do_syscall_trace(), we check the status of the TIF_SYSCALL_EMU flag
only after doing the debugger notification; but the debugger might have
changed the status of this flag because he continued execution with
PTRACE_SYSCALL, so this is wrong. This patch fixes it by saving the flag
status before calling ptrace_notify().
* Fix behaviour when changing interception kind #2:
avoid intercepting syscall on return when using SYSCALL again.
A guest process switching from using PTRACE_SYSEMU to PTRACE_SYSCALL
crashes.
The problem is in arch/i386/kernel/entry.S. The current SYSEMU patch
inhibits the syscall-handler to be called, but does not prevent
do_syscall_trace() to be called after this for syscall completion
interception.
The appended patch fixes this. It reuses the flag TIF_SYSCALL_EMU to
remember "we come from PTRACE_SYSEMU and now are in PTRACE_SYSCALL", since
the flag is unused in the depicted situation.
* Fix behaviour when changing interception kind #3:
avoid intercepting syscall on return when using SINGLESTEP.
When testing 2.6.9 and the skas3.v6 patch, with my latest patch and had
problems with singlestepping on UML in SKAS with SYSEMU. It looped
receiving SIGTRAPs without moving forward. EIP of the traced process was
the same for all SIGTRAPs.
What's missing is to handle switching from PTRACE_SYSCALL_EMU to
PTRACE_SINGLESTEP in a way very similar to what is done for the change from
PTRACE_SYSCALL_EMU to PTRACE_SYSCALL_TRACE.
I.e., after calling ptrace(PTRACE_SYSEMU), on the return path, the debugger is
notified and then wake ups the process; the syscall is executed (or skipped,
when do_syscall_trace() returns 0, i.e. when using PTRACE_SYSEMU), and
do_syscall_trace() is called again. Since we are on the return path of a
SYSEMU'd syscall, if the wake up is performed through ptrace(PTRACE_SYSCALL),
we must still avoid notifying the parent of the syscall exit. Now, this
behaviour is extended even to resuming with PTRACE_SINGLESTEP.
Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Cc: Jeff Dike <jdike@addtoit.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-03 16:57:18 -06:00
|
|
|
#define PTRACE_SYSEMU 31
|
2005-09-03 16:57:20 -06:00
|
|
|
#define PTRACE_SYSEMU_SINGLESTEP 32
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* 0x4200-0x4300 are reserved for architecture-independent additions. */
|
|
|
|
#define PTRACE_SETOPTIONS 0x4200
|
|
|
|
#define PTRACE_GETEVENTMSG 0x4201
|
|
|
|
#define PTRACE_GETSIGINFO 0x4202
|
|
|
|
#define PTRACE_SETSIGINFO 0x4203
|
|
|
|
|
|
|
|
/* options set using PTRACE_SETOPTIONS */
|
|
|
|
#define PTRACE_O_TRACESYSGOOD 0x00000001
|
|
|
|
#define PTRACE_O_TRACEFORK 0x00000002
|
|
|
|
#define PTRACE_O_TRACEVFORK 0x00000004
|
|
|
|
#define PTRACE_O_TRACECLONE 0x00000008
|
|
|
|
#define PTRACE_O_TRACEEXEC 0x00000010
|
|
|
|
#define PTRACE_O_TRACEVFORKDONE 0x00000020
|
|
|
|
#define PTRACE_O_TRACEEXIT 0x00000040
|
|
|
|
|
|
|
|
#define PTRACE_O_MASK 0x0000007f
|
|
|
|
|
|
|
|
/* Wait extended result codes for the above trace options. */
|
|
|
|
#define PTRACE_EVENT_FORK 1
|
|
|
|
#define PTRACE_EVENT_VFORK 2
|
|
|
|
#define PTRACE_EVENT_CLONE 3
|
|
|
|
#define PTRACE_EVENT_EXEC 4
|
|
|
|
#define PTRACE_EVENT_VFORK_DONE 5
|
|
|
|
#define PTRACE_EVENT_EXIT 6
|
|
|
|
|
|
|
|
#include <asm/ptrace.h>
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
/*
|
|
|
|
* Ptrace flags
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define PT_PTRACED 0x00000001
|
|
|
|
#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
|
|
|
|
#define PT_TRACESYSGOOD 0x00000004
|
|
|
|
#define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */
|
|
|
|
#define PT_TRACE_FORK 0x00000010
|
|
|
|
#define PT_TRACE_VFORK 0x00000020
|
|
|
|
#define PT_TRACE_CLONE 0x00000040
|
|
|
|
#define PT_TRACE_EXEC 0x00000080
|
|
|
|
#define PT_TRACE_VFORK_DONE 0x00000100
|
|
|
|
#define PT_TRACE_EXIT 0x00000200
|
|
|
|
#define PT_ATTACHED 0x00000400 /* parent != real_parent */
|
|
|
|
|
|
|
|
#define PT_TRACE_MASK 0x000003f4
|
|
|
|
|
|
|
|
/* single stepping state bits (used on ARM and PA-RISC) */
|
|
|
|
#define PT_SINGLESTEP_BIT 31
|
|
|
|
#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
|
|
|
|
#define PT_BLOCKSTEP_BIT 30
|
|
|
|
#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
|
|
|
|
|
|
|
|
#include <linux/compiler.h> /* For unlikely. */
|
|
|
|
#include <linux/sched.h> /* For struct task_struct. */
|
|
|
|
|
|
|
|
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
|
|
|
|
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
|
|
|
|
extern int ptrace_attach(struct task_struct *tsk);
|
|
|
|
extern int ptrace_detach(struct task_struct *, unsigned int);
|
|
|
|
extern void ptrace_disable(struct task_struct *);
|
|
|
|
extern int ptrace_check_attach(struct task_struct *task, int kill);
|
|
|
|
extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
|
|
|
|
extern void ptrace_notify(int exit_code);
|
|
|
|
extern void __ptrace_link(struct task_struct *child,
|
|
|
|
struct task_struct *new_parent);
|
|
|
|
extern void __ptrace_unlink(struct task_struct *child);
|
|
|
|
extern void ptrace_untrace(struct task_struct *child);
|
2005-09-06 16:18:24 -06:00
|
|
|
extern int ptrace_may_attach(struct task_struct *task);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
static inline void ptrace_link(struct task_struct *child,
|
|
|
|
struct task_struct *new_parent)
|
|
|
|
{
|
|
|
|
if (unlikely(child->ptrace))
|
|
|
|
__ptrace_link(child, new_parent);
|
|
|
|
}
|
|
|
|
static inline void ptrace_unlink(struct task_struct *child)
|
|
|
|
{
|
|
|
|
if (unlikely(child->ptrace))
|
|
|
|
__ptrace_unlink(child);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef force_successful_syscall_return
|
|
|
|
/*
|
|
|
|
* System call handlers that, upon successful completion, need to return a
|
|
|
|
* negative value should call force_successful_syscall_return() right before
|
|
|
|
* returning. On architectures where the syscall convention provides for a
|
|
|
|
* separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
|
|
|
|
* others), this macro can be used to ensure that the error flag will not get
|
|
|
|
* set. On architectures which do not support a separate error flag, the macro
|
|
|
|
* is a no-op and the spurious error condition needs to be filtered out by some
|
|
|
|
* other means (e.g., in user-level, by passing an extra argument to the
|
|
|
|
* syscall handler, or something along those lines).
|
|
|
|
*/
|
|
|
|
#define force_successful_syscall_return() do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|