2005-04-16 16:20:36 -06:00
|
|
|
#ifndef _LINUX_PTRACE_H
|
|
|
|
#define _LINUX_PTRACE_H
|
|
|
|
/* ptrace.h */
|
|
|
|
/* structs and defines to help the user use the ptrace system call. */
|
|
|
|
|
|
|
|
/* has the defines to get at the registers. */
|
|
|
|
|
|
|
|
#define PTRACE_TRACEME 0
|
|
|
|
#define PTRACE_PEEKTEXT 1
|
|
|
|
#define PTRACE_PEEKDATA 2
|
|
|
|
#define PTRACE_PEEKUSR 3
|
|
|
|
#define PTRACE_POKETEXT 4
|
|
|
|
#define PTRACE_POKEDATA 5
|
|
|
|
#define PTRACE_POKEUSR 6
|
|
|
|
#define PTRACE_CONT 7
|
|
|
|
#define PTRACE_KILL 8
|
|
|
|
#define PTRACE_SINGLESTEP 9
|
|
|
|
|
2006-09-29 03:00:45 -06:00
|
|
|
#define PTRACE_ATTACH 16
|
|
|
|
#define PTRACE_DETACH 17
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
#define PTRACE_SYSCALL 24
|
|
|
|
|
|
|
|
/* 0x4200-0x4300 are reserved for architecture-independent additions. */
|
|
|
|
#define PTRACE_SETOPTIONS 0x4200
|
|
|
|
#define PTRACE_GETEVENTMSG 0x4201
|
|
|
|
#define PTRACE_GETSIGINFO 0x4202
|
|
|
|
#define PTRACE_SETSIGINFO 0x4203
|
|
|
|
|
|
|
|
/* options set using PTRACE_SETOPTIONS */
|
|
|
|
#define PTRACE_O_TRACESYSGOOD 0x00000001
|
|
|
|
#define PTRACE_O_TRACEFORK 0x00000002
|
|
|
|
#define PTRACE_O_TRACEVFORK 0x00000004
|
|
|
|
#define PTRACE_O_TRACECLONE 0x00000008
|
|
|
|
#define PTRACE_O_TRACEEXEC 0x00000010
|
|
|
|
#define PTRACE_O_TRACEVFORKDONE 0x00000020
|
|
|
|
#define PTRACE_O_TRACEEXIT 0x00000040
|
|
|
|
|
|
|
|
#define PTRACE_O_MASK 0x0000007f
|
|
|
|
|
|
|
|
/* Wait extended result codes for the above trace options. */
|
|
|
|
#define PTRACE_EVENT_FORK 1
|
|
|
|
#define PTRACE_EVENT_VFORK 2
|
|
|
|
#define PTRACE_EVENT_CLONE 3
|
|
|
|
#define PTRACE_EVENT_EXEC 4
|
|
|
|
#define PTRACE_EVENT_VFORK_DONE 5
|
|
|
|
#define PTRACE_EVENT_EXIT 6
|
|
|
|
|
|
|
|
#include <asm/ptrace.h>
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
/*
|
|
|
|
* Ptrace flags
|
2006-06-23 03:05:18 -06:00
|
|
|
*
|
|
|
|
* The owner ship rules for task->ptrace which holds the ptrace
|
|
|
|
* flags is simple. When a task is running it owns it's task->ptrace
|
|
|
|
* flags. When the a task is stopped the ptracer owns task->ptrace.
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
|
|
|
|
|
|
|
#define PT_PTRACED 0x00000001
|
|
|
|
#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
|
|
|
|
#define PT_TRACESYSGOOD 0x00000004
|
|
|
|
#define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */
|
|
|
|
#define PT_TRACE_FORK 0x00000010
|
|
|
|
#define PT_TRACE_VFORK 0x00000020
|
|
|
|
#define PT_TRACE_CLONE 0x00000040
|
|
|
|
#define PT_TRACE_EXEC 0x00000080
|
|
|
|
#define PT_TRACE_VFORK_DONE 0x00000100
|
|
|
|
#define PT_TRACE_EXIT 0x00000200
|
|
|
|
|
|
|
|
#define PT_TRACE_MASK 0x000003f4
|
|
|
|
|
|
|
|
/* single stepping state bits (used on ARM and PA-RISC) */
|
|
|
|
#define PT_SINGLESTEP_BIT 31
|
|
|
|
#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
|
|
|
|
#define PT_BLOCKSTEP_BIT 30
|
|
|
|
#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
|
|
|
|
|
|
|
|
#include <linux/compiler.h> /* For unlikely. */
|
|
|
|
#include <linux/sched.h> /* For struct task_struct. */
|
|
|
|
|
2005-11-07 01:59:47 -07:00
|
|
|
|
|
|
|
extern long arch_ptrace(struct task_struct *child, long request, long addr, long data);
|
2006-01-08 02:02:33 -07:00
|
|
|
extern struct task_struct *ptrace_get_task_struct(pid_t pid);
|
|
|
|
extern int ptrace_traceme(void);
|
2005-04-16 16:20:36 -06:00
|
|
|
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
|
|
|
|
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
|
|
|
|
extern int ptrace_attach(struct task_struct *tsk);
|
|
|
|
extern int ptrace_detach(struct task_struct *, unsigned int);
|
|
|
|
extern void ptrace_disable(struct task_struct *);
|
|
|
|
extern int ptrace_check_attach(struct task_struct *task, int kill);
|
|
|
|
extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
|
|
|
|
extern void ptrace_notify(int exit_code);
|
|
|
|
extern void __ptrace_link(struct task_struct *child,
|
|
|
|
struct task_struct *new_parent);
|
|
|
|
extern void __ptrace_unlink(struct task_struct *child);
|
2009-04-02 17:58:14 -06:00
|
|
|
extern int __ptrace_detach(struct task_struct *tracer, struct task_struct *p);
|
2008-12-19 07:10:24 -07:00
|
|
|
extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags);
|
Security: split proc ptrace checking into read vs. attach
Enable security modules to distinguish reading of process state via
proc from full ptrace access by renaming ptrace_may_attach to
ptrace_may_access and adding a mode argument indicating whether only
read access or full attach access is requested. This allows security
modules to permit access to reading process state without granting
full ptrace access. The base DAC/capability checking remains unchanged.
Read access to /proc/pid/mem continues to apply a full ptrace attach
check since check_mem_permission() already requires the current task
to already be ptracing the target. The other ptrace checks within
proc for elements like environ, maps, and fds are changed to pass the
read mode instead of attach.
In the SELinux case, we model such reading of process state as a
reading of a proc file labeled with the target process' label. This
enables SELinux policy to permit such reading of process state without
permitting control or manipulation of the target process, as there are
a number of cases where programs probe for such information via proc
but do not need to be able to control the target (e.g. procps,
lsof, PolicyKit, ConsoleKit). At present we have to choose between
allowing full ptrace in policy (more permissive than required/desired)
or breaking functionality (or in some cases just silencing the denials
via dontaudit rules but this can hide genuine attacks).
This version of the patch incorporates comments from Casey Schaufler
(change/replace existing ptrace_may_attach interface, pass access
mode), and Chris Wright (provide greater consistency in the checking).
Note that like their predecessors __ptrace_may_attach and
ptrace_may_attach, the __ptrace_may_access and ptrace_may_access
interfaces use different return value conventions from each other (0
or -errno vs. 1 or 0). I retained this difference to avoid any
changes to the caller logic but made the difference clearer by
changing the latter interface to return a bool rather than an int and
by adding a comment about it to ptrace.h for any future callers.
Signed-off-by: Stephen Smalley <sds@tycho.nsa.gov>
Acked-by: Chris Wright <chrisw@sous-sol.org>
Signed-off-by: James Morris <jmorris@namei.org>
2008-05-19 06:32:49 -06:00
|
|
|
#define PTRACE_MODE_READ 1
|
|
|
|
#define PTRACE_MODE_ATTACH 2
|
|
|
|
/* Returns 0 on success, -errno on denial. */
|
|
|
|
extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
|
|
|
|
/* Returns true on success, false on denial. */
|
|
|
|
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2008-04-30 01:53:13 -06:00
|
|
|
static inline int ptrace_reparented(struct task_struct *child)
|
|
|
|
{
|
|
|
|
return child->real_parent != child->parent;
|
|
|
|
}
|
2005-04-16 16:20:36 -06:00
|
|
|
static inline void ptrace_link(struct task_struct *child,
|
|
|
|
struct task_struct *new_parent)
|
|
|
|
{
|
|
|
|
if (unlikely(child->ptrace))
|
|
|
|
__ptrace_link(child, new_parent);
|
|
|
|
}
|
|
|
|
static inline void ptrace_unlink(struct task_struct *child)
|
|
|
|
{
|
|
|
|
if (unlikely(child->ptrace))
|
|
|
|
__ptrace_unlink(child);
|
|
|
|
}
|
|
|
|
|
2007-07-17 05:03:43 -06:00
|
|
|
int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data);
|
2007-07-17 05:03:44 -06:00
|
|
|
int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
tracehook: add linux/tracehook.h
This patch series introduces the "tracehook" interface layer of inlines in
<linux/tracehook.h>. There are more details in the log entry for patch
01/23 and in the header file comments inside that patch. Most of these
changes move code around with little or no change, and they should not
break anything or change any behavior.
This sets a new standard for uniform arch support to enable clean
arch-independent implementations of new debugging and tracing stuff,
denoted by CONFIG_HAVE_ARCH_TRACEHOOK. Patch 20/23 adds that symbol to
arch/Kconfig, with comments listing everything an arch has to do before
setting "select HAVE_ARCH_TRACEHOOK". These are elaborted a bit at:
http://sourceware.org/systemtap/wiki/utrace/arch/HowTo
The new inlines that arch code must define or call have detailed kerneldoc
comments in the generic header files that say what is required.
No arch is obligated to do any work, and no arch's build should be broken
by these changes. There are several steps that each arch should take so
it can set HAVE_ARCH_TRACEHOOK. Most of these are simple. Providing this
support will let new things people add for doing debugging and tracing of
user-level threads "just work" for your arch in the future. For an arch
that does not provide HAVE_ARCH_TRACEHOOK, some new options for such
features will not be available for config.
I have done some arch work and will submit this to the arch maintainers
after the generic tracehook series settles in. For now, that work is
available in my GIT repositories, and in patch and mbox-of-patches form at
http://people.redhat.com/roland/utrace/2.6-current/
This paves the way for my "utrace" work, to be submitted later. But it is
not innately tied to that. I hope that the tracehook series can go in
soon regardless of what eventually does or doesn't go on top of it. For
anyone implementing any kind of new tracing/debugging plan, or just
understanding all the context of the existing ptrace implementation,
having tracehook.h makes things much easier to find and understand.
This patch:
This adds the new kernel-internal header file <linux/tracehook.h>. This
is not yet used at all. The comments in the header introduce what the
following series of patches is about.
The aim is to formalize and consolidate all the places that the core
kernel code and the arch code now ties into the ptrace implementation.
These patches mostly don't cause any functional change. They just move
the details of ptrace logic out of core code into tracehook.h inlines,
where they are mostly compiled away to the same as before. All that
changes is that everything is thoroughly documented and any future
reworking of ptrace, or addition of something new, would not have to touch
core code all over, just change the tracehook.h inlines.
The new linux/ptrace.h inlines are used by the following patches in the
new tracehook_*() inlines. Using these helpers for the ptrace event stops
makes it simple to change or disable the old ptrace implementation of
these stops conditionally later.
Signed-off-by: Roland McGrath <roland@redhat.com>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-25 20:45:43 -06:00
|
|
|
/**
|
|
|
|
* task_ptrace - return %PT_* flags that apply to a task
|
|
|
|
* @task: pointer to &task_struct in question
|
|
|
|
*
|
|
|
|
* Returns the %PT_* flags that apply to @task.
|
|
|
|
*/
|
|
|
|
static inline int task_ptrace(struct task_struct *task)
|
|
|
|
{
|
|
|
|
return task->ptrace;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ptrace_event - possibly stop for a ptrace event notification
|
|
|
|
* @mask: %PT_* bit to check in @current->ptrace
|
|
|
|
* @event: %PTRACE_EVENT_* value to report if @mask is set
|
|
|
|
* @message: value for %PTRACE_GETEVENTMSG to return
|
|
|
|
*
|
|
|
|
* This checks the @mask bit to see if ptrace wants stops for this event.
|
|
|
|
* If so we stop, reporting @event and @message to the ptrace parent.
|
|
|
|
*
|
|
|
|
* Returns nonzero if we did a ptrace notification, zero if not.
|
|
|
|
*
|
|
|
|
* Called without locks.
|
|
|
|
*/
|
|
|
|
static inline int ptrace_event(int mask, int event, unsigned long message)
|
|
|
|
{
|
|
|
|
if (mask && likely(!(current->ptrace & mask)))
|
|
|
|
return 0;
|
|
|
|
current->ptrace_message = message;
|
|
|
|
ptrace_notify((event << 8) | SIGTRAP);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-07-25 20:45:47 -06:00
|
|
|
/**
|
|
|
|
* ptrace_init_task - initialize ptrace state for a new child
|
|
|
|
* @child: new child task
|
|
|
|
* @ptrace: true if child should be ptrace'd by parent's tracer
|
|
|
|
*
|
|
|
|
* This is called immediately after adding @child to its parent's children
|
|
|
|
* list. @ptrace is false in the normal case, and true to ptrace @child.
|
|
|
|
*
|
|
|
|
* Called with current's siglock and write_lock_irq(&tasklist_lock) held.
|
|
|
|
*/
|
|
|
|
static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
|
|
|
|
{
|
|
|
|
INIT_LIST_HEAD(&child->ptrace_entry);
|
|
|
|
INIT_LIST_HEAD(&child->ptraced);
|
|
|
|
child->parent = child->real_parent;
|
|
|
|
child->ptrace = 0;
|
|
|
|
if (unlikely(ptrace)) {
|
|
|
|
child->ptrace = current->ptrace;
|
2008-08-07 17:55:03 -06:00
|
|
|
ptrace_link(child, current->parent);
|
2008-07-25 20:45:47 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-25 20:45:48 -06:00
|
|
|
/**
|
|
|
|
* ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
|
|
|
|
* @task: task in %EXIT_DEAD state
|
|
|
|
*
|
|
|
|
* Called with write_lock(&tasklist_lock) held.
|
|
|
|
*/
|
|
|
|
static inline void ptrace_release_task(struct task_struct *task)
|
|
|
|
{
|
|
|
|
BUG_ON(!list_empty(&task->ptraced));
|
|
|
|
ptrace_unlink(task);
|
|
|
|
BUG_ON(!list_empty(&task->ptrace_entry));
|
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
#ifndef force_successful_syscall_return
|
|
|
|
/*
|
|
|
|
* System call handlers that, upon successful completion, need to return a
|
|
|
|
* negative value should call force_successful_syscall_return() right before
|
|
|
|
* returning. On architectures where the syscall convention provides for a
|
|
|
|
* separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
|
|
|
|
* others), this macro can be used to ensure that the error flag will not get
|
|
|
|
* set. On architectures which do not support a separate error flag, the macro
|
|
|
|
* is a no-op and the spurious error condition needs to be filtered out by some
|
|
|
|
* other means (e.g., in user-level, by passing an extra argument to the
|
|
|
|
* syscall handler, or something along those lines).
|
|
|
|
*/
|
|
|
|
#define force_successful_syscall_return() do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2008-01-30 05:30:47 -07:00
|
|
|
/*
|
|
|
|
* <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
|
|
|
|
*
|
|
|
|
* These do-nothing inlines are used when the arch does not
|
|
|
|
* implement single-step. The kerneldoc comments are here
|
|
|
|
* to document the interface for all arch definitions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef arch_has_single_step
|
|
|
|
/**
|
|
|
|
* arch_has_single_step - does this CPU support user-mode single-step?
|
|
|
|
*
|
|
|
|
* If this is defined, then there must be function declarations or
|
|
|
|
* inlines for user_enable_single_step() and user_disable_single_step().
|
|
|
|
* arch_has_single_step() should evaluate to nonzero iff the machine
|
|
|
|
* supports instruction single-step for user mode.
|
|
|
|
* It can be a constant or it can test a CPU feature bit.
|
|
|
|
*/
|
|
|
|
#define arch_has_single_step() (0)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* user_enable_single_step - single-step in user-mode task
|
|
|
|
* @task: either current or a task stopped in %TASK_TRACED
|
|
|
|
*
|
|
|
|
* This can only be called when arch_has_single_step() has returned nonzero.
|
|
|
|
* Set @task so that when it returns to user mode, it will trap after the
|
2008-01-30 05:30:53 -07:00
|
|
|
* next single instruction executes. If arch_has_block_step() is defined,
|
|
|
|
* this must clear the effects of user_enable_block_step() too.
|
2008-01-30 05:30:47 -07:00
|
|
|
*/
|
|
|
|
static inline void user_enable_single_step(struct task_struct *task)
|
|
|
|
{
|
|
|
|
BUG(); /* This can never be called. */
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* user_disable_single_step - cancel user-mode single-step
|
|
|
|
* @task: either current or a task stopped in %TASK_TRACED
|
|
|
|
*
|
2008-01-30 05:30:53 -07:00
|
|
|
* Clear @task of the effects of user_enable_single_step() and
|
|
|
|
* user_enable_block_step(). This can be called whether or not either
|
|
|
|
* of those was ever called on @task, and even if arch_has_single_step()
|
|
|
|
* returned zero.
|
2008-01-30 05:30:47 -07:00
|
|
|
*/
|
|
|
|
static inline void user_disable_single_step(struct task_struct *task)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* arch_has_single_step */
|
|
|
|
|
2008-01-30 05:30:53 -07:00
|
|
|
#ifndef arch_has_block_step
|
|
|
|
/**
|
|
|
|
* arch_has_block_step - does this CPU support user-mode block-step?
|
|
|
|
*
|
|
|
|
* If this is defined, then there must be a function declaration or inline
|
|
|
|
* for user_enable_block_step(), and arch_has_single_step() must be defined
|
|
|
|
* too. arch_has_block_step() should evaluate to nonzero iff the machine
|
|
|
|
* supports step-until-branch for user mode. It can be a constant or it
|
|
|
|
* can test a CPU feature bit.
|
|
|
|
*/
|
2008-01-30 05:30:53 -07:00
|
|
|
#define arch_has_block_step() (0)
|
2008-01-30 05:30:53 -07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* user_enable_block_step - step until branch in user-mode task
|
|
|
|
* @task: either current or a task stopped in %TASK_TRACED
|
|
|
|
*
|
|
|
|
* This can only be called when arch_has_block_step() has returned nonzero,
|
|
|
|
* and will never be called when single-instruction stepping is being used.
|
|
|
|
* Set @task so that when it returns to user mode, it will trap after the
|
|
|
|
* next branch or trap taken.
|
|
|
|
*/
|
|
|
|
static inline void user_enable_block_step(struct task_struct *task)
|
|
|
|
{
|
|
|
|
BUG(); /* This can never be called. */
|
|
|
|
}
|
|
|
|
#endif /* arch_has_block_step */
|
|
|
|
|
2008-02-06 02:37:37 -07:00
|
|
|
#ifndef arch_ptrace_stop_needed
|
|
|
|
/**
|
|
|
|
* arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
|
|
|
|
* @code: current->exit_code value ptrace will stop with
|
|
|
|
* @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
|
|
|
|
*
|
|
|
|
* This is called with the siglock held, to decide whether or not it's
|
|
|
|
* necessary to release the siglock and call arch_ptrace_stop() with the
|
|
|
|
* same @code and @info arguments. It can be defined to a constant if
|
|
|
|
* arch_ptrace_stop() is never required, or always is. On machines where
|
|
|
|
* this makes sense, it should be defined to a quick test to optimize out
|
|
|
|
* calling arch_ptrace_stop() when it would be superfluous. For example,
|
|
|
|
* if the thread has not been back to user mode since the last stop, the
|
|
|
|
* thread state might indicate that nothing needs to be done.
|
|
|
|
*/
|
|
|
|
#define arch_ptrace_stop_needed(code, info) (0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef arch_ptrace_stop
|
|
|
|
/**
|
|
|
|
* arch_ptrace_stop - Do machine-specific work before stopping for ptrace
|
|
|
|
* @code: current->exit_code value ptrace will stop with
|
|
|
|
* @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
|
|
|
|
*
|
|
|
|
* This is called with no locks held when arch_ptrace_stop_needed() has
|
|
|
|
* just returned nonzero. It is allowed to block, e.g. for user memory
|
|
|
|
* access. The arch can have machine-specific work to be done before
|
|
|
|
* ptrace stops. On ia64, register backing store gets written back to user
|
|
|
|
* memory here. Since this can be costly (requires dropping the siglock),
|
|
|
|
* we only do it when the arch requires it for this particular stop, as
|
|
|
|
* indicated by arch_ptrace_stop_needed().
|
|
|
|
*/
|
|
|
|
#define arch_ptrace_stop(code, info) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2008-12-19 07:10:24 -07:00
|
|
|
#ifndef arch_ptrace_untrace
|
|
|
|
/*
|
|
|
|
* Do machine-specific work before untracing child.
|
|
|
|
*
|
|
|
|
* This is called for a normal detach as well as from ptrace_exit()
|
|
|
|
* when the tracing task dies.
|
|
|
|
*
|
|
|
|
* Called with write_lock(&tasklist_lock) held.
|
|
|
|
*/
|
|
|
|
#define arch_ptrace_untrace(task) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef arch_ptrace_fork
|
|
|
|
/*
|
|
|
|
* Do machine-specific work to initialize a new task.
|
|
|
|
*
|
|
|
|
* This is called from copy_process().
|
|
|
|
*/
|
|
|
|
#define arch_ptrace_fork(child, clone_flags) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2008-07-25 20:45:59 -06:00
|
|
|
extern int task_current_syscall(struct task_struct *target, long *callno,
|
|
|
|
unsigned long args[6], unsigned int maxargs,
|
|
|
|
unsigned long *sp, unsigned long *pc);
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|