Merge branch 'parisc-4.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux
Pull parisc updates from Helge Deller: - Add native high-resolution timing code for sched_clock() and other timing functions based on the processor internal cr16 cycle counters - Add syscall tracepoint support - Add regset support - Speed up get_user() and put_user() functions - Updated futex.h to match generic implementation (John David Anglin) - A few smaller ftrace build fixes - Fixed thuge-gen kernel self test to utilize architectured MAP_HUGETLB value - Added parisc architecture to seccomp_bpf kernel self test - Various typo fixes (Andrea Gelmini) * 'parisc-4.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: parisc: Whitespace cleanups in unistd.h parisc: Use long jump to reach ftrace_return_to_handler() parisc: Fix typo in fpudispatch.c parisc: Fix typos in eisa_eeprom.h parisc: Fix typo in ldcw.h parisc: Fix typo in pdc.h parisc: Update futex.h to match generic implementation parisc: Merge ftrace C-helper and assembler functions into .text.hot section selftests/thuge-gen: Use platform specific MAP_HUGETLB value parisc: Add native high-resolution sched_clock() implementation parisc: Add ARCH_TRACEHOOK and regset support parisc: Add 64bit get_user() and put_user() for 32bit kernel parisc: Simplify and speed up get_user() and put_user() parisc: Add syscall tracepoint support
This commit is contained in:
commit
24c82fbb86
21 changed files with 611 additions and 139 deletions
|
@ -6,6 +6,7 @@ config PARISC
|
|||
select HAVE_OPROFILE
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
select RTC_CLASS
|
||||
select RTC_DRV_GENERIC
|
||||
|
@ -31,6 +32,8 @@ config PARISC
|
|||
select HAVE_DEBUG_STACKOVERFLOW
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT)
|
||||
select ARCH_NO_COHERENT_DMA_MMAP
|
||||
select CPU_NO_EFFICIENT_FFS
|
||||
|
||||
|
|
|
@ -52,8 +52,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
|
|||
/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
|
||||
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
|
||||
unsigned int new_);
|
||||
extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr,
|
||||
unsigned long old, unsigned long new_);
|
||||
extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
|
||||
|
||||
/* don't worry...optimizer will get rid of most of this */
|
||||
static inline unsigned long
|
||||
|
@ -61,7 +60,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
|
|||
{
|
||||
switch (size) {
|
||||
#ifdef CONFIG_64BIT
|
||||
case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
|
||||
case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
|
||||
#endif
|
||||
case 4: return __cmpxchg_u32((unsigned int *)ptr,
|
||||
(unsigned int)old, (unsigned int)new_);
|
||||
|
@ -86,7 +85,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
|||
{
|
||||
switch (size) {
|
||||
#ifdef CONFIG_64BIT
|
||||
case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
|
||||
case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
|
||||
#endif
|
||||
case 4: return __cmpxchg_u32(ptr, old, new_);
|
||||
default:
|
||||
|
@ -111,4 +110,6 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
|||
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
|
||||
#endif
|
||||
|
||||
#define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
|
||||
|
||||
#endif /* _ASM_PARISC_CMPXCHG_H_ */
|
||||
|
|
|
@ -99,7 +99,7 @@ struct eeprom_eisa_slot_info
|
|||
#define HPEE_MEMORY_DECODE_24BITS 0x04
|
||||
#define HPEE_MEMORY_DECODE_32BITS 0x08
|
||||
/* byte 2 and 3 are a 16bit LE value
|
||||
* containging the memory size in kilobytes */
|
||||
* containing the memory size in kilobytes */
|
||||
/* byte 4,5,6 are a 24bit LE value
|
||||
* containing the memory base address */
|
||||
|
||||
|
@ -135,7 +135,7 @@ struct eeprom_eisa_slot_info
|
|||
#define HPEE_PORT_SHARED 0x40
|
||||
#define HPEE_PORT_MORE 0x80
|
||||
/* byte 1 and 2 is a 16bit LE value
|
||||
* conating the start port number */
|
||||
* containing the start port number */
|
||||
|
||||
#define HPEE_PORT_INIT_MAX_LEN 60 /* in bytes here */
|
||||
/* port init entry byte 0 */
|
||||
|
|
|
@ -6,6 +6,8 @@ extern void mcount(void);
|
|||
|
||||
#define MCOUNT_INSN_SIZE 4
|
||||
|
||||
extern unsigned long sys_call_table[];
|
||||
|
||||
extern unsigned long return_address(unsigned int);
|
||||
|
||||
#define ftrace_return_address(n) return_address(n)
|
||||
|
|
|
@ -35,70 +35,57 @@ static inline int
|
|||
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
unsigned long int flags;
|
||||
u32 val;
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
int oparg = (encoded_op << 8) >> 20;
|
||||
int cmparg = (encoded_op << 20) >> 20;
|
||||
int oldval = 0, ret;
|
||||
int oldval, ret;
|
||||
u32 tmp;
|
||||
|
||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
|
||||
return -EFAULT;
|
||||
|
||||
_futex_spin_lock_irqsave(uaddr, &flags);
|
||||
pagefault_disable();
|
||||
|
||||
_futex_spin_lock_irqsave(uaddr, &flags);
|
||||
ret = -EFAULT;
|
||||
if (unlikely(get_user(oldval, uaddr) != 0))
|
||||
goto out_pagefault_enable;
|
||||
|
||||
ret = 0;
|
||||
tmp = oldval;
|
||||
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
/* *(int *)UADDR2 = OPARG; */
|
||||
ret = get_user(oldval, uaddr);
|
||||
if (!ret)
|
||||
ret = put_user(oparg, uaddr);
|
||||
tmp = oparg;
|
||||
break;
|
||||
case FUTEX_OP_ADD:
|
||||
/* *(int *)UADDR2 += OPARG; */
|
||||
ret = get_user(oldval, uaddr);
|
||||
if (!ret) {
|
||||
val = oldval + oparg;
|
||||
ret = put_user(val, uaddr);
|
||||
}
|
||||
tmp += oparg;
|
||||
break;
|
||||
case FUTEX_OP_OR:
|
||||
/* *(int *)UADDR2 |= OPARG; */
|
||||
ret = get_user(oldval, uaddr);
|
||||
if (!ret) {
|
||||
val = oldval | oparg;
|
||||
ret = put_user(val, uaddr);
|
||||
}
|
||||
tmp |= oparg;
|
||||
break;
|
||||
case FUTEX_OP_ANDN:
|
||||
/* *(int *)UADDR2 &= ~OPARG; */
|
||||
ret = get_user(oldval, uaddr);
|
||||
if (!ret) {
|
||||
val = oldval & ~oparg;
|
||||
ret = put_user(val, uaddr);
|
||||
}
|
||||
tmp &= ~oparg;
|
||||
break;
|
||||
case FUTEX_OP_XOR:
|
||||
/* *(int *)UADDR2 ^= OPARG; */
|
||||
ret = get_user(oldval, uaddr);
|
||||
if (!ret) {
|
||||
val = oldval ^ oparg;
|
||||
ret = put_user(val, uaddr);
|
||||
}
|
||||
tmp ^= oparg;
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
|
||||
if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
|
||||
ret = -EFAULT;
|
||||
|
||||
out_pagefault_enable:
|
||||
pagefault_enable();
|
||||
_futex_spin_unlock_irqrestore(uaddr, &flags);
|
||||
|
||||
pagefault_enable();
|
||||
|
||||
if (!ret) {
|
||||
if (ret == 0) {
|
||||
switch (cmp) {
|
||||
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
|
||||
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
|
||||
|
@ -112,12 +99,10 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Non-atomic version */
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret;
|
||||
u32 val;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -137,17 +122,20 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||
*/
|
||||
|
||||
_futex_spin_lock_irqsave(uaddr, &flags);
|
||||
if (unlikely(get_user(val, uaddr) != 0)) {
|
||||
_futex_spin_unlock_irqrestore(uaddr, &flags);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
ret = get_user(val, uaddr);
|
||||
|
||||
if (!ret && val == oldval)
|
||||
ret = put_user(newval, uaddr);
|
||||
if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
|
||||
_futex_spin_unlock_irqrestore(uaddr, &flags);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
*uval = val;
|
||||
|
||||
_futex_spin_unlock_irqrestore(uaddr, &flags);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /*__KERNEL__*/
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
memory to indicate to the compiler that the assembly code reads
|
||||
or writes to items other than those listed in the input and output
|
||||
operands. This may pessimize the code somewhat but __ldcw is
|
||||
usually used within code blocks surrounded by memory barriors. */
|
||||
usually used within code blocks surrounded by memory barriers. */
|
||||
#define __ldcw(a) ({ \
|
||||
unsigned __ret; \
|
||||
__asm__ __volatile__(__LDCW " 0(%1),%0" \
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
#include <linux/err.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#define NR_syscalls (__NR_Linux_syscalls)
|
||||
|
||||
static inline long syscall_get_nr(struct task_struct *tsk,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
|
@ -33,12 +35,19 @@ static inline void syscall_get_arguments(struct task_struct *tsk,
|
|||
args[1] = regs->gr[25];
|
||||
case 1:
|
||||
args[0] = regs->gr[26];
|
||||
case 0:
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static inline long syscall_get_return_value(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return regs->gr[28];
|
||||
}
|
||||
|
||||
static inline void syscall_set_return_value(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
int error, long val)
|
||||
|
|
|
@ -55,6 +55,7 @@ struct thread_info {
|
|||
#define TIF_SINGLESTEP 9 /* single stepping? */
|
||||
#define TIF_BLOCKSTEP 10 /* branch stepping? */
|
||||
#define TIF_SECCOMP 11 /* secure computing */
|
||||
#define TIF_SYSCALL_TRACEPOINT 12 /* syscall tracepoint instrumentation */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
|
@ -66,12 +67,13 @@ struct thread_info {
|
|||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||
|
||||
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
|
||||
_TIF_NEED_RESCHED)
|
||||
#define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
|
||||
_TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \
|
||||
_TIF_SECCOMP)
|
||||
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
# ifdef CONFIG_COMPAT
|
||||
|
|
|
@ -40,14 +40,10 @@ static inline long access_ok(int type, const void __user * addr,
|
|||
#define get_user __get_user
|
||||
|
||||
#if !defined(CONFIG_64BIT)
|
||||
#define LDD_KERNEL(ptr) BUILD_BUG()
|
||||
#define LDD_USER(ptr) BUILD_BUG()
|
||||
#define STD_KERNEL(x, ptr) __put_kernel_asm64(x, ptr)
|
||||
#define LDD_USER(ptr) __get_user_asm64(ptr)
|
||||
#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
|
||||
#else
|
||||
#define LDD_KERNEL(ptr) __get_kernel_asm("ldd", ptr)
|
||||
#define LDD_USER(ptr) __get_user_asm("ldd", ptr)
|
||||
#define STD_KERNEL(x, ptr) __put_kernel_asm("std", x, ptr)
|
||||
#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
|
||||
#endif
|
||||
|
||||
|
@ -80,70 +76,70 @@ struct exception_data {
|
|||
unsigned long fault_addr;
|
||||
};
|
||||
|
||||
/*
|
||||
* load_sr2() preloads the space register %%sr2 - based on the value of
|
||||
* get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
|
||||
* is 0), or with the current value of %%sr3 to access user space (USER_DS)
|
||||
* memory. The following __get_user_asm() and __put_user_asm() functions have
|
||||
* %%sr2 hard-coded to access the requested memory.
|
||||
*/
|
||||
#define load_sr2() \
|
||||
__asm__(" or,= %0,%%r0,%%r0\n\t" \
|
||||
" mfsp %%sr3,%0\n\t" \
|
||||
" mtsp %0,%%sr2\n\t" \
|
||||
: : "r"(get_fs()) : )
|
||||
|
||||
#define __get_user(x, ptr) \
|
||||
({ \
|
||||
register long __gu_err __asm__ ("r8") = 0; \
|
||||
register long __gu_val __asm__ ("r9") = 0; \
|
||||
\
|
||||
if (segment_eq(get_fs(), KERNEL_DS)) { \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __get_kernel_asm("ldb", ptr); break; \
|
||||
case 2: __get_kernel_asm("ldh", ptr); break; \
|
||||
case 4: __get_kernel_asm("ldw", ptr); break; \
|
||||
case 8: LDD_KERNEL(ptr); break; \
|
||||
default: BUILD_BUG(); break; \
|
||||
} \
|
||||
} \
|
||||
else { \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
load_sr2(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __get_user_asm("ldb", ptr); break; \
|
||||
case 2: __get_user_asm("ldh", ptr); break; \
|
||||
case 4: __get_user_asm("ldw", ptr); break; \
|
||||
case 8: LDD_USER(ptr); break; \
|
||||
default: BUILD_BUG(); break; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
(x) = (__force __typeof__(*(ptr))) __gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
#define __get_kernel_asm(ldx, ptr) \
|
||||
__asm__("\n1:\t" ldx "\t0(%2),%0\n\t" \
|
||||
#define __get_user_asm(ldx, ptr) \
|
||||
__asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
|
||||
: "=r"(__gu_val), "=r"(__gu_err) \
|
||||
: "r"(ptr), "1"(__gu_err) \
|
||||
: "r1");
|
||||
|
||||
#define __get_user_asm(ldx, ptr) \
|
||||
__asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
|
||||
: "=r"(__gu_val), "=r"(__gu_err) \
|
||||
#if !defined(CONFIG_64BIT)
|
||||
|
||||
#define __get_user_asm64(ptr) \
|
||||
__asm__("\n1:\tldw 0(%%sr2,%2),%0" \
|
||||
"\n2:\tldw 4(%%sr2,%2),%R0\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
|
||||
ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
|
||||
: "=r"(__gu_val), "=r"(__gu_err) \
|
||||
: "r"(ptr), "1"(__gu_err) \
|
||||
: "r1");
|
||||
|
||||
#endif /* !defined(CONFIG_64BIT) */
|
||||
|
||||
|
||||
#define __put_user(x, ptr) \
|
||||
({ \
|
||||
register long __pu_err __asm__ ("r8") = 0; \
|
||||
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
|
||||
\
|
||||
if (segment_eq(get_fs(), KERNEL_DS)) { \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __put_kernel_asm("stb", __x, ptr); break; \
|
||||
case 2: __put_kernel_asm("sth", __x, ptr); break; \
|
||||
case 4: __put_kernel_asm("stw", __x, ptr); break; \
|
||||
case 8: STD_KERNEL(__x, ptr); break; \
|
||||
default: BUILD_BUG(); break; \
|
||||
} \
|
||||
} \
|
||||
else { \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
load_sr2(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __put_user_asm("stb", __x, ptr); break; \
|
||||
case 2: __put_user_asm("sth", __x, ptr); break; \
|
||||
case 4: __put_user_asm("stw", __x, ptr); break; \
|
||||
case 8: STD_USER(__x, ptr); break; \
|
||||
default: BUILD_BUG(); break; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
__pu_err; \
|
||||
|
@ -159,17 +155,9 @@ struct exception_data {
|
|||
* r8/r9 are already listed as err/val.
|
||||
*/
|
||||
|
||||
#define __put_kernel_asm(stx, x, ptr) \
|
||||
__asm__ __volatile__ ( \
|
||||
"\n1:\t" stx "\t%2,0(%1)\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
|
||||
: "=r"(__pu_err) \
|
||||
: "r"(ptr), "r"(x), "0"(__pu_err) \
|
||||
: "r1")
|
||||
|
||||
#define __put_user_asm(stx, x, ptr) \
|
||||
__asm__ __volatile__ ( \
|
||||
"\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t" \
|
||||
"\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
|
||||
: "=r"(__pu_err) \
|
||||
: "r"(ptr), "r"(x), "0"(__pu_err) \
|
||||
|
@ -178,21 +166,10 @@ struct exception_data {
|
|||
|
||||
#if !defined(CONFIG_64BIT)
|
||||
|
||||
#define __put_kernel_asm64(__val, ptr) do { \
|
||||
__asm__ __volatile__ ( \
|
||||
"\n1:\tstw %2,0(%1)" \
|
||||
"\n2:\tstw %R2,4(%1)\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
|
||||
ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
|
||||
: "=r"(__pu_err) \
|
||||
: "r"(ptr), "r"(__val), "0"(__pu_err) \
|
||||
: "r1"); \
|
||||
} while (0)
|
||||
|
||||
#define __put_user_asm64(__val, ptr) do { \
|
||||
__asm__ __volatile__ ( \
|
||||
"\n1:\tstw %2,0(%%sr3,%1)" \
|
||||
"\n2:\tstw %R2,4(%%sr3,%1)\n\t" \
|
||||
"\n1:\tstw %2,0(%%sr2,%1)" \
|
||||
"\n2:\tstw %R2,4(%%sr2,%1)\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
|
||||
ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
|
||||
: "=r"(__pu_err) \
|
||||
|
|
|
@ -59,7 +59,7 @@
|
|||
#define PDC_MODEL_GET_BOOT__OP 8 /* returns boot test options */
|
||||
#define PDC_MODEL_SET_BOOT__OP 9 /* set boot test options */
|
||||
|
||||
#define PA89_INSTRUCTION_SET 0x4 /* capatibilies returned */
|
||||
#define PA89_INSTRUCTION_SET 0x4 /* capabilities returned */
|
||||
#define PA90_INSTRUCTION_SET 0x8
|
||||
|
||||
#define PDC_CACHE 5 /* return/set cache (& TLB) info*/
|
||||
|
|
|
@ -13,6 +13,11 @@
|
|||
* N.B. gdb/strace care about the size and offsets within this
|
||||
* structure. If you change things, you may break object compatibility
|
||||
* for those applications.
|
||||
*
|
||||
* Please do NOT use this structure for future programs, but use
|
||||
* user_regs_struct (see below) instead.
|
||||
*
|
||||
* It can be accessed through PTRACE_PEEKUSR/PTRACE_POKEUSR only.
|
||||
*/
|
||||
|
||||
struct pt_regs {
|
||||
|
@ -33,6 +38,45 @@ struct pt_regs {
|
|||
unsigned long ipsw; /* CR22 */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct user_regs_struct - User general purpose registers
|
||||
*
|
||||
* This is the user-visible general purpose register state structure
|
||||
* which is used to define the elf_gregset_t.
|
||||
*
|
||||
* It can be accessed through PTRACE_GETREGSET with NT_PRSTATUS
|
||||
* and through PTRACE_GETREGS.
|
||||
*/
|
||||
struct user_regs_struct {
|
||||
unsigned long gr[32]; /* PSW is in gr[0] */
|
||||
unsigned long sr[8];
|
||||
unsigned long iaoq[2];
|
||||
unsigned long iasq[2];
|
||||
unsigned long sar; /* CR11 */
|
||||
unsigned long iir; /* CR19 */
|
||||
unsigned long isr; /* CR20 */
|
||||
unsigned long ior; /* CR21 */
|
||||
unsigned long ipsw; /* CR22 */
|
||||
unsigned long cr0;
|
||||
unsigned long cr24, cr25, cr26, cr27, cr28, cr29, cr30, cr31;
|
||||
unsigned long cr8, cr9, cr12, cr13, cr10, cr15;
|
||||
unsigned long _pad[80-64]; /* pad to ELF_NGREG (80) */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct user_fp_struct - User floating point registers
|
||||
*
|
||||
* This is the user-visible floating point register state structure.
|
||||
* It uses the same layout and size as elf_fpregset_t.
|
||||
*
|
||||
* It can be accessed through PTRACE_GETREGSET with NT_PRFPREG
|
||||
* and through PTRACE_GETFPREGS.
|
||||
*/
|
||||
struct user_fp_struct {
|
||||
__u64 fr[32];
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* The numbers chosen here are somewhat arbitrary but absolutely MUST
|
||||
* not overlap with any of the number assigned in <linux/ptrace.h>.
|
||||
|
@ -43,5 +87,9 @@ struct pt_regs {
|
|||
*/
|
||||
#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
|
||||
|
||||
#define PTRACE_GETREGS 18
|
||||
#define PTRACE_SETREGS 19
|
||||
#define PTRACE_GETFPREGS 14
|
||||
#define PTRACE_SETFPREGS 15
|
||||
|
||||
#endif /* _UAPI_PARISC_PTRACE_H */
|
||||
|
|
|
@ -102,7 +102,7 @@
|
|||
#define __NR_uselib (__NR_Linux + 86)
|
||||
#define __NR_swapon (__NR_Linux + 87)
|
||||
#define __NR_reboot (__NR_Linux + 88)
|
||||
#define __NR_mmap2 (__NR_Linux + 89)
|
||||
#define __NR_mmap2 (__NR_Linux + 89)
|
||||
#define __NR_mmap (__NR_Linux + 90)
|
||||
#define __NR_munmap (__NR_Linux + 91)
|
||||
#define __NR_truncate (__NR_Linux + 92)
|
||||
|
@ -114,7 +114,7 @@
|
|||
#define __NR_recv (__NR_Linux + 98)
|
||||
#define __NR_statfs (__NR_Linux + 99)
|
||||
#define __NR_fstatfs (__NR_Linux + 100)
|
||||
#define __NR_stat64 (__NR_Linux + 101)
|
||||
#define __NR_stat64 (__NR_Linux + 101)
|
||||
/* #define __NR_socketcall (__NR_Linux + 102) */
|
||||
#define __NR_syslog (__NR_Linux + 103)
|
||||
#define __NR_setitimer (__NR_Linux + 104)
|
||||
|
@ -140,17 +140,17 @@
|
|||
#define __NR_adjtimex (__NR_Linux + 124)
|
||||
#define __NR_mprotect (__NR_Linux + 125)
|
||||
#define __NR_sigprocmask (__NR_Linux + 126)
|
||||
#define __NR_create_module (__NR_Linux + 127)
|
||||
#define __NR_create_module (__NR_Linux + 127) /* not used */
|
||||
#define __NR_init_module (__NR_Linux + 128)
|
||||
#define __NR_delete_module (__NR_Linux + 129)
|
||||
#define __NR_get_kernel_syms (__NR_Linux + 130)
|
||||
#define __NR_get_kernel_syms (__NR_Linux + 130) /* not used */
|
||||
#define __NR_quotactl (__NR_Linux + 131)
|
||||
#define __NR_getpgid (__NR_Linux + 132)
|
||||
#define __NR_fchdir (__NR_Linux + 133)
|
||||
#define __NR_bdflush (__NR_Linux + 134)
|
||||
#define __NR_sysfs (__NR_Linux + 135)
|
||||
#define __NR_personality (__NR_Linux + 136)
|
||||
#define __NR_afs_syscall (__NR_Linux + 137) /* Syscall for Andrew File System */
|
||||
#define __NR_afs_syscall (__NR_Linux + 137) /* not used */
|
||||
#define __NR_setfsuid (__NR_Linux + 138)
|
||||
#define __NR_setfsgid (__NR_Linux + 139)
|
||||
#define __NR__llseek (__NR_Linux + 140)
|
||||
|
@ -180,9 +180,9 @@
|
|||
#define __NR_setresuid (__NR_Linux + 164)
|
||||
#define __NR_getresuid (__NR_Linux + 165)
|
||||
#define __NR_sigaltstack (__NR_Linux + 166)
|
||||
#define __NR_query_module (__NR_Linux + 167)
|
||||
#define __NR_query_module (__NR_Linux + 167) /* not used */
|
||||
#define __NR_poll (__NR_Linux + 168)
|
||||
#define __NR_nfsservctl (__NR_Linux + 169)
|
||||
#define __NR_nfsservctl (__NR_Linux + 169) /* not used */
|
||||
#define __NR_setresgid (__NR_Linux + 170)
|
||||
#define __NR_getresgid (__NR_Linux + 171)
|
||||
#define __NR_prctl (__NR_Linux + 172)
|
||||
|
@ -209,18 +209,16 @@
|
|||
#define __NR_shmdt (__NR_Linux + 193)
|
||||
#define __NR_shmget (__NR_Linux + 194)
|
||||
#define __NR_shmctl (__NR_Linux + 195)
|
||||
|
||||
#define __NR_getpmsg (__NR_Linux + 196) /* Somebody *wants* streams? */
|
||||
#define __NR_putpmsg (__NR_Linux + 197)
|
||||
|
||||
#define __NR_getpmsg (__NR_Linux + 196) /* not used */
|
||||
#define __NR_putpmsg (__NR_Linux + 197) /* not used */
|
||||
#define __NR_lstat64 (__NR_Linux + 198)
|
||||
#define __NR_truncate64 (__NR_Linux + 199)
|
||||
#define __NR_ftruncate64 (__NR_Linux + 200)
|
||||
#define __NR_getdents64 (__NR_Linux + 201)
|
||||
#define __NR_fcntl64 (__NR_Linux + 202)
|
||||
#define __NR_attrctl (__NR_Linux + 203)
|
||||
#define __NR_acl_get (__NR_Linux + 204)
|
||||
#define __NR_acl_set (__NR_Linux + 205)
|
||||
#define __NR_attrctl (__NR_Linux + 203) /* not used */
|
||||
#define __NR_acl_get (__NR_Linux + 204) /* not used */
|
||||
#define __NR_acl_set (__NR_Linux + 205) /* not used */
|
||||
#define __NR_gettid (__NR_Linux + 206)
|
||||
#define __NR_readahead (__NR_Linux + 207)
|
||||
#define __NR_tkill (__NR_Linux + 208)
|
||||
|
@ -228,8 +226,8 @@
|
|||
#define __NR_futex (__NR_Linux + 210)
|
||||
#define __NR_sched_setaffinity (__NR_Linux + 211)
|
||||
#define __NR_sched_getaffinity (__NR_Linux + 212)
|
||||
#define __NR_set_thread_area (__NR_Linux + 213)
|
||||
#define __NR_get_thread_area (__NR_Linux + 214)
|
||||
#define __NR_set_thread_area (__NR_Linux + 213) /* not used */
|
||||
#define __NR_get_thread_area (__NR_Linux + 214) /* not used */
|
||||
#define __NR_io_setup (__NR_Linux + 215)
|
||||
#define __NR_io_destroy (__NR_Linux + 216)
|
||||
#define __NR_io_getevents (__NR_Linux + 217)
|
||||
|
@ -278,7 +276,7 @@
|
|||
#define __NR_mbind (__NR_Linux + 260)
|
||||
#define __NR_get_mempolicy (__NR_Linux + 261)
|
||||
#define __NR_set_mempolicy (__NR_Linux + 262)
|
||||
#define __NR_vserver (__NR_Linux + 263)
|
||||
#define __NR_vserver (__NR_Linux + 263) /* not used */
|
||||
#define __NR_add_key (__NR_Linux + 264)
|
||||
#define __NR_request_key (__NR_Linux + 265)
|
||||
#define __NR_keyctl (__NR_Linux + 266)
|
||||
|
@ -318,7 +316,7 @@
|
|||
#define __NR_kexec_load (__NR_Linux + 300)
|
||||
#define __NR_utimensat (__NR_Linux + 301)
|
||||
#define __NR_signalfd (__NR_Linux + 302)
|
||||
#define __NR_timerfd (__NR_Linux + 303)
|
||||
#define __NR_timerfd (__NR_Linux + 303) /* not used */
|
||||
#define __NR_eventfd (__NR_Linux + 304)
|
||||
#define __NR_fallocate (__NR_Linux + 305)
|
||||
#define __NR_timerfd_create (__NR_Linux + 306)
|
||||
|
|
|
@ -667,7 +667,7 @@
|
|||
* boundary
|
||||
*/
|
||||
|
||||
.text
|
||||
.section .text.hot
|
||||
.align 2048
|
||||
|
||||
ENTRY(fault_vector_20)
|
||||
|
@ -2019,6 +2019,7 @@ ftrace_stub:
|
|||
.procend
|
||||
ENDPROC(mcount)
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
.align 8
|
||||
.globl return_to_handler
|
||||
.type return_to_handler, @function
|
||||
|
@ -2040,11 +2041,17 @@ parisc_return_to_handler:
|
|||
#endif
|
||||
|
||||
/* call ftrace_return_to_handler(0) */
|
||||
.import ftrace_return_to_handler,code
|
||||
load32 ftrace_return_to_handler,%ret0
|
||||
load32 .Lftrace_ret,%r2
|
||||
#ifdef CONFIG_64BIT
|
||||
ldo -16(%sp),%ret1 /* Reference param save area */
|
||||
bve (%ret0)
|
||||
#else
|
||||
bv %r0(%ret0)
|
||||
#endif
|
||||
BL ftrace_return_to_handler,%r2
|
||||
ldi 0,%r26
|
||||
.Lftrace_ret:
|
||||
copy %ret0,%rp
|
||||
|
||||
/* restore original return values */
|
||||
|
@ -2062,6 +2069,8 @@ parisc_return_to_handler:
|
|||
.procend
|
||||
ENDPROC(return_to_handler)
|
||||
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
|
|
|
@ -18,12 +18,15 @@
|
|||
#include <asm/ftrace.h>
|
||||
|
||||
|
||||
#define __hot __attribute__ ((__section__ (".text.hot")))
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
/*
|
||||
* Hook the return address and push it in the stack of return addrs
|
||||
* in current thread info.
|
||||
*/
|
||||
static void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
|
||||
static void __hot prepare_ftrace_return(unsigned long *parent,
|
||||
unsigned long self_addr)
|
||||
{
|
||||
unsigned long old;
|
||||
struct ftrace_graph_ent trace;
|
||||
|
@ -53,7 +56,7 @@ static void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr
|
|||
}
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
||||
void notrace ftrace_function_trampoline(unsigned long parent,
|
||||
void notrace __hot ftrace_function_trampoline(unsigned long parent,
|
||||
unsigned long self_addr,
|
||||
unsigned long org_sp_gr3)
|
||||
{
|
||||
|
|
|
@ -4,18 +4,20 @@
|
|||
* Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc.
|
||||
* Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx>
|
||||
* Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
|
||||
* Copyright (C) 2008 Helge Deller <deller@gmx.de>
|
||||
* Copyright (C) 2008-2016 Helge Deller <deller@gmx.de>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/user.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/regset.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/seccomp.h>
|
||||
#include <linux/compat.h>
|
||||
|
@ -30,6 +32,17 @@
|
|||
/* PSW bits we allow the debugger to modify */
|
||||
#define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB)
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/syscalls.h>
|
||||
|
||||
/*
|
||||
* These are our native regset flavors.
|
||||
*/
|
||||
enum parisc_regset {
|
||||
REGSET_GENERAL,
|
||||
REGSET_FP
|
||||
};
|
||||
|
||||
/*
|
||||
* Called by kernel/ptrace.c when detaching..
|
||||
*
|
||||
|
@ -114,6 +127,7 @@ void user_enable_block_step(struct task_struct *task)
|
|||
long arch_ptrace(struct task_struct *child, long request,
|
||||
unsigned long addr, unsigned long data)
|
||||
{
|
||||
unsigned long __user *datap = (unsigned long __user *)data;
|
||||
unsigned long tmp;
|
||||
long ret = -EIO;
|
||||
|
||||
|
@ -126,7 +140,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
addr >= sizeof(struct pt_regs))
|
||||
break;
|
||||
tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
|
||||
ret = put_user(tmp, (unsigned long __user *) data);
|
||||
ret = put_user(tmp, datap);
|
||||
break;
|
||||
|
||||
/* Write the word at location addr in the USER area. This will need
|
||||
|
@ -165,6 +179,34 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
}
|
||||
break;
|
||||
|
||||
case PTRACE_GETREGS: /* Get all gp regs from the child. */
|
||||
return copy_regset_to_user(child,
|
||||
task_user_regset_view(current),
|
||||
REGSET_GENERAL,
|
||||
0, sizeof(struct user_regs_struct),
|
||||
datap);
|
||||
|
||||
case PTRACE_SETREGS: /* Set all gp regs in the child. */
|
||||
return copy_regset_from_user(child,
|
||||
task_user_regset_view(current),
|
||||
REGSET_GENERAL,
|
||||
0, sizeof(struct user_regs_struct),
|
||||
datap);
|
||||
|
||||
case PTRACE_GETFPREGS: /* Get the child FPU state. */
|
||||
return copy_regset_to_user(child,
|
||||
task_user_regset_view(current),
|
||||
REGSET_FP,
|
||||
0, sizeof(struct user_fp_struct),
|
||||
datap);
|
||||
|
||||
case PTRACE_SETFPREGS: /* Set the child FPU state. */
|
||||
return copy_regset_from_user(child,
|
||||
task_user_regset_view(current),
|
||||
REGSET_FP,
|
||||
0, sizeof(struct user_fp_struct),
|
||||
datap);
|
||||
|
||||
default:
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
break;
|
||||
|
@ -283,6 +325,10 @@ long do_syscall_trace_enter(struct pt_regs *regs)
|
|||
regs->gr[20] = -1UL;
|
||||
goto out;
|
||||
}
|
||||
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
|
||||
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
||||
trace_sys_enter(regs, regs->gr[20]);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
if (!is_compat_task())
|
||||
|
@ -311,6 +357,324 @@ void do_syscall_trace_exit(struct pt_regs *regs)
|
|||
|
||||
audit_syscall_exit(regs);
|
||||
|
||||
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
|
||||
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
||||
trace_sys_exit(regs, regs->gr[20]);
|
||||
#endif
|
||||
|
||||
if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
tracehook_report_syscall_exit(regs, stepping);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* regset functions.
|
||||
*/
|
||||
|
||||
static int fpr_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
struct pt_regs *regs = task_regs(target);
|
||||
__u64 *k = kbuf;
|
||||
__u64 __user *u = ubuf;
|
||||
__u64 reg;
|
||||
|
||||
pos /= sizeof(reg);
|
||||
count /= sizeof(reg);
|
||||
|
||||
if (kbuf)
|
||||
for (; count > 0 && pos < ELF_NFPREG; --count)
|
||||
*k++ = regs->fr[pos++];
|
||||
else
|
||||
for (; count > 0 && pos < ELF_NFPREG; --count)
|
||||
if (__put_user(regs->fr[pos++], u++))
|
||||
return -EFAULT;
|
||||
|
||||
kbuf = k;
|
||||
ubuf = u;
|
||||
pos *= sizeof(reg);
|
||||
count *= sizeof(reg);
|
||||
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||
ELF_NFPREG * sizeof(reg), -1);
|
||||
}
|
||||
|
||||
static int fpr_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
struct pt_regs *regs = task_regs(target);
|
||||
const __u64 *k = kbuf;
|
||||
const __u64 __user *u = ubuf;
|
||||
__u64 reg;
|
||||
|
||||
pos /= sizeof(reg);
|
||||
count /= sizeof(reg);
|
||||
|
||||
if (kbuf)
|
||||
for (; count > 0 && pos < ELF_NFPREG; --count)
|
||||
regs->fr[pos++] = *k++;
|
||||
else
|
||||
for (; count > 0 && pos < ELF_NFPREG; --count) {
|
||||
if (__get_user(reg, u++))
|
||||
return -EFAULT;
|
||||
regs->fr[pos++] = reg;
|
||||
}
|
||||
|
||||
kbuf = k;
|
||||
ubuf = u;
|
||||
pos *= sizeof(reg);
|
||||
count *= sizeof(reg);
|
||||
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
ELF_NFPREG * sizeof(reg), -1);
|
||||
}
|
||||
|
||||
#define RI(reg) (offsetof(struct user_regs_struct,reg) / sizeof(long))
|
||||
|
||||
static unsigned long get_reg(struct pt_regs *regs, int num)
|
||||
{
|
||||
switch (num) {
|
||||
case RI(gr[0]) ... RI(gr[31]): return regs->gr[num - RI(gr[0])];
|
||||
case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])];
|
||||
case RI(iasq[0]): return regs->iasq[0];
|
||||
case RI(iasq[1]): return regs->iasq[1];
|
||||
case RI(iaoq[0]): return regs->iaoq[0];
|
||||
case RI(iaoq[1]): return regs->iaoq[1];
|
||||
case RI(sar): return regs->sar;
|
||||
case RI(iir): return regs->iir;
|
||||
case RI(isr): return regs->isr;
|
||||
case RI(ior): return regs->ior;
|
||||
case RI(ipsw): return regs->ipsw;
|
||||
case RI(cr27): return regs->cr27;
|
||||
case RI(cr0): return mfctl(0);
|
||||
case RI(cr24): return mfctl(24);
|
||||
case RI(cr25): return mfctl(25);
|
||||
case RI(cr26): return mfctl(26);
|
||||
case RI(cr28): return mfctl(28);
|
||||
case RI(cr29): return mfctl(29);
|
||||
case RI(cr30): return mfctl(30);
|
||||
case RI(cr31): return mfctl(31);
|
||||
case RI(cr8): return mfctl(8);
|
||||
case RI(cr9): return mfctl(9);
|
||||
case RI(cr12): return mfctl(12);
|
||||
case RI(cr13): return mfctl(13);
|
||||
case RI(cr10): return mfctl(10);
|
||||
case RI(cr15): return mfctl(15);
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void set_reg(struct pt_regs *regs, int num, unsigned long val)
|
||||
{
|
||||
switch (num) {
|
||||
case RI(gr[0]): /*
|
||||
* PSW is in gr[0].
|
||||
* Allow writing to Nullify, Divide-step-correction,
|
||||
* and carry/borrow bits.
|
||||
* BEWARE, if you set N, and then single step, it won't
|
||||
* stop on the nullified instruction.
|
||||
*/
|
||||
val &= USER_PSW_BITS;
|
||||
regs->gr[0] &= ~USER_PSW_BITS;
|
||||
regs->gr[0] |= val;
|
||||
return;
|
||||
case RI(gr[1]) ... RI(gr[31]):
|
||||
regs->gr[num - RI(gr[0])] = val;
|
||||
return;
|
||||
case RI(iaoq[0]):
|
||||
case RI(iaoq[1]):
|
||||
regs->iaoq[num - RI(iaoq[0])] = val;
|
||||
return;
|
||||
case RI(sar): regs->sar = val;
|
||||
return;
|
||||
default: return;
|
||||
#if 0
|
||||
/* do not allow to change any of the following registers (yet) */
|
||||
case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])];
|
||||
case RI(iasq[0]): return regs->iasq[0];
|
||||
case RI(iasq[1]): return regs->iasq[1];
|
||||
case RI(iir): return regs->iir;
|
||||
case RI(isr): return regs->isr;
|
||||
case RI(ior): return regs->ior;
|
||||
case RI(ipsw): return regs->ipsw;
|
||||
case RI(cr27): return regs->cr27;
|
||||
case cr0, cr24, cr25, cr26, cr27, cr28, cr29, cr30, cr31;
|
||||
case cr8, cr9, cr12, cr13, cr10, cr15;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
static int gpr_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
struct pt_regs *regs = task_regs(target);
|
||||
unsigned long *k = kbuf;
|
||||
unsigned long __user *u = ubuf;
|
||||
unsigned long reg;
|
||||
|
||||
pos /= sizeof(reg);
|
||||
count /= sizeof(reg);
|
||||
|
||||
if (kbuf)
|
||||
for (; count > 0 && pos < ELF_NGREG; --count)
|
||||
*k++ = get_reg(regs, pos++);
|
||||
else
|
||||
for (; count > 0 && pos < ELF_NGREG; --count)
|
||||
if (__put_user(get_reg(regs, pos++), u++))
|
||||
return -EFAULT;
|
||||
kbuf = k;
|
||||
ubuf = u;
|
||||
pos *= sizeof(reg);
|
||||
count *= sizeof(reg);
|
||||
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||
ELF_NGREG * sizeof(reg), -1);
|
||||
}
|
||||
|
||||
static int gpr_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
struct pt_regs *regs = task_regs(target);
|
||||
const unsigned long *k = kbuf;
|
||||
const unsigned long __user *u = ubuf;
|
||||
unsigned long reg;
|
||||
|
||||
pos /= sizeof(reg);
|
||||
count /= sizeof(reg);
|
||||
|
||||
if (kbuf)
|
||||
for (; count > 0 && pos < ELF_NGREG; --count)
|
||||
set_reg(regs, pos++, *k++);
|
||||
else
|
||||
for (; count > 0 && pos < ELF_NGREG; --count) {
|
||||
if (__get_user(reg, u++))
|
||||
return -EFAULT;
|
||||
set_reg(regs, pos++, reg);
|
||||
}
|
||||
|
||||
kbuf = k;
|
||||
ubuf = u;
|
||||
pos *= sizeof(reg);
|
||||
count *= sizeof(reg);
|
||||
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
ELF_NGREG * sizeof(reg), -1);
|
||||
}
|
||||
|
||||
static const struct user_regset native_regsets[] = {
|
||||
[REGSET_GENERAL] = {
|
||||
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
|
||||
.size = sizeof(long), .align = sizeof(long),
|
||||
.get = gpr_get, .set = gpr_set
|
||||
},
|
||||
[REGSET_FP] = {
|
||||
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
|
||||
.size = sizeof(__u64), .align = sizeof(__u64),
|
||||
.get = fpr_get, .set = fpr_set
|
||||
}
|
||||
};
|
||||
|
||||
static const struct user_regset_view user_parisc_native_view = {
|
||||
.name = "parisc", .e_machine = ELF_ARCH, .ei_osabi = ELFOSABI_LINUX,
|
||||
.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
|
||||
};
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#include <linux/compat.h>
|
||||
|
||||
static int gpr32_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
struct pt_regs *regs = task_regs(target);
|
||||
compat_ulong_t *k = kbuf;
|
||||
compat_ulong_t __user *u = ubuf;
|
||||
compat_ulong_t reg;
|
||||
|
||||
pos /= sizeof(reg);
|
||||
count /= sizeof(reg);
|
||||
|
||||
if (kbuf)
|
||||
for (; count > 0 && pos < ELF_NGREG; --count)
|
||||
*k++ = get_reg(regs, pos++);
|
||||
else
|
||||
for (; count > 0 && pos < ELF_NGREG; --count)
|
||||
if (__put_user((compat_ulong_t) get_reg(regs, pos++), u++))
|
||||
return -EFAULT;
|
||||
|
||||
kbuf = k;
|
||||
ubuf = u;
|
||||
pos *= sizeof(reg);
|
||||
count *= sizeof(reg);
|
||||
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||
ELF_NGREG * sizeof(reg), -1);
|
||||
}
|
||||
|
||||
static int gpr32_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
struct pt_regs *regs = task_regs(target);
|
||||
const compat_ulong_t *k = kbuf;
|
||||
const compat_ulong_t __user *u = ubuf;
|
||||
compat_ulong_t reg;
|
||||
|
||||
pos /= sizeof(reg);
|
||||
count /= sizeof(reg);
|
||||
|
||||
if (kbuf)
|
||||
for (; count > 0 && pos < ELF_NGREG; --count)
|
||||
set_reg(regs, pos++, *k++);
|
||||
else
|
||||
for (; count > 0 && pos < ELF_NGREG; --count) {
|
||||
if (__get_user(reg, u++))
|
||||
return -EFAULT;
|
||||
set_reg(regs, pos++, reg);
|
||||
}
|
||||
|
||||
kbuf = k;
|
||||
ubuf = u;
|
||||
pos *= sizeof(reg);
|
||||
count *= sizeof(reg);
|
||||
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
ELF_NGREG * sizeof(reg), -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* These are the regset flavors matching the 32bit native set.
|
||||
*/
|
||||
static const struct user_regset compat_regsets[] = {
|
||||
[REGSET_GENERAL] = {
|
||||
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
|
||||
.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
|
||||
.get = gpr32_get, .set = gpr32_set
|
||||
},
|
||||
[REGSET_FP] = {
|
||||
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
|
||||
.size = sizeof(__u64), .align = sizeof(__u64),
|
||||
.get = fpr_get, .set = fpr_set
|
||||
}
|
||||
};
|
||||
|
||||
static const struct user_regset_view user_parisc_compat_view = {
|
||||
.name = "parisc", .e_machine = EM_PARISC, .ei_osabi = ELFOSABI_LINUX,
|
||||
.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
|
||||
};
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct user_regs_struct)/sizeof(long) != ELF_NGREG);
|
||||
BUILD_BUG_ON(sizeof(struct user_fp_struct)/sizeof(__u64) != ELF_NFPREG);
|
||||
#ifdef CONFIG_64BIT
|
||||
if (is_compat_task())
|
||||
return &user_parisc_compat_view;
|
||||
#endif
|
||||
return &user_parisc_native_view;
|
||||
}
|
||||
|
|
|
@ -912,6 +912,7 @@ END(lws_table)
|
|||
|
||||
.align 8
|
||||
ENTRY(sys_call_table)
|
||||
.export sys_call_table,data
|
||||
#include "syscall_table.S"
|
||||
END(sys_call_table)
|
||||
|
||||
|
|
|
@ -38,6 +38,18 @@
|
|||
|
||||
static unsigned long clocktick __read_mostly; /* timer cycles per tick */
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
/*
|
||||
* The processor-internal cycle counter (Control Register 16) is used as time
|
||||
* source for the sched_clock() function. This register is 64bit wide on a
|
||||
* 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always
|
||||
* requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits
|
||||
* with a per-cpu variable which we increase every time the counter
|
||||
* wraps-around (which happens every ~4 secounds).
|
||||
*/
|
||||
static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We keep time on PA-RISC Linux by using the Interval Timer which is
|
||||
* a pair of registers; one is read-only and one is write-only; both
|
||||
|
@ -108,6 +120,12 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
|
|||
*/
|
||||
mtctl(next_tick, 16);
|
||||
|
||||
#if !defined(CONFIG_64BIT)
|
||||
/* check for overflow on a 32bit kernel (every ~4 seconds). */
|
||||
if (unlikely(next_tick < now))
|
||||
this_cpu_inc(cr16_high_32_bits);
|
||||
#endif
|
||||
|
||||
/* Skip one clocktick on purpose if we missed next_tick.
|
||||
* The new CR16 must be "later" than current CR16 otherwise
|
||||
* itimer would not fire until CR16 wrapped - e.g 4 seconds
|
||||
|
@ -219,6 +237,12 @@ void __init start_cpu_itimer(void)
|
|||
unsigned int cpu = smp_processor_id();
|
||||
unsigned long next_tick = mfctl(16) + clocktick;
|
||||
|
||||
#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
|
||||
/* With multiple 64bit CPUs online, the cr16's are not syncronized. */
|
||||
if (cpu != 0)
|
||||
clear_sched_clock_stable();
|
||||
#endif
|
||||
|
||||
mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
|
||||
|
||||
per_cpu(cpu_data, cpu).it_value = next_tick;
|
||||
|
@ -246,15 +270,52 @@ void read_persistent_clock(struct timespec *ts)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* sched_clock() framework
|
||||
*/
|
||||
|
||||
static u32 cyc2ns_mul __read_mostly;
|
||||
static u32 cyc2ns_shift __read_mostly;
|
||||
|
||||
u64 sched_clock(void)
|
||||
{
|
||||
u64 now;
|
||||
|
||||
/* Get current cycle counter (Control Register 16). */
|
||||
#ifdef CONFIG_64BIT
|
||||
now = mfctl(16);
|
||||
#else
|
||||
now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32);
|
||||
#endif
|
||||
|
||||
/* return the value in ns (cycles_2_ns) */
|
||||
return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* timer interrupt and sched_clock() initialization
|
||||
*/
|
||||
|
||||
void __init time_init(void)
|
||||
{
|
||||
unsigned long current_cr16_khz;
|
||||
|
||||
current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
|
||||
clocktick = (100 * PAGE0->mem_10msec) / HZ;
|
||||
|
||||
/* calculate mult/shift values for cr16 */
|
||||
clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
|
||||
NSEC_PER_MSEC, 0);
|
||||
|
||||
#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
|
||||
/* At bootup only one 64bit CPU is online and cr16 is "stable" */
|
||||
set_sched_clock_stable();
|
||||
#endif
|
||||
|
||||
start_cpu_itimer(); /* get CPU 0 started */
|
||||
|
||||
/* register at clocksource framework */
|
||||
current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
|
||||
clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
|
||||
}
|
||||
|
|
|
@ -55,11 +55,10 @@ unsigned long __xchg8(char x, char *ptr)
|
|||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new)
|
||||
u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long prev;
|
||||
u64 prev;
|
||||
|
||||
_atomic_spin_lock_irqsave(ptr, flags);
|
||||
if ((prev = *ptr) == old)
|
||||
|
@ -67,7 +66,6 @@ unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsi
|
|||
_atomic_spin_unlock_irqrestore(ptr, flags);
|
||||
return prev;
|
||||
}
|
||||
#endif
|
||||
|
||||
unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
|
||||
{
|
||||
|
|
|
@ -184,7 +184,7 @@ static void parisc_linux_get_fpu_type(u_int fpregs[])
|
|||
|
||||
/*
|
||||
* this routine will decode the excepting floating point instruction and
|
||||
* call the approiate emulation routine.
|
||||
* call the appropriate emulation routine.
|
||||
* It is called by decode_fpu with the following parameters:
|
||||
* fpudispatch(current_ir, unimplemented_code, 0, &Fpu_register)
|
||||
* where current_ir is the instruction to be emulated,
|
||||
|
|
|
@ -1234,6 +1234,10 @@ TEST_F(TRACE_poke, getpid_runs_normally)
|
|||
# define ARCH_REGS struct user_pt_regs
|
||||
# define SYSCALL_NUM regs[8]
|
||||
# define SYSCALL_RET regs[0]
|
||||
#elif defined(__hppa__)
|
||||
# define ARCH_REGS struct user_regs_struct
|
||||
# define SYSCALL_NUM gr[20]
|
||||
# define SYSCALL_RET gr[28]
|
||||
#elif defined(__powerpc__)
|
||||
# define ARCH_REGS struct pt_regs
|
||||
# define SYSCALL_NUM gpr[0]
|
||||
|
@ -1303,7 +1307,7 @@ void change_syscall(struct __test_metadata *_metadata,
|
|||
EXPECT_EQ(0, ret);
|
||||
|
||||
#if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \
|
||||
defined(__s390__)
|
||||
defined(__s390__) || defined(__hppa__)
|
||||
{
|
||||
regs.SYSCALL_NUM = syscall;
|
||||
}
|
||||
|
@ -1505,6 +1509,8 @@ TEST_F(TRACE_syscall, syscall_dropped)
|
|||
# define __NR_seccomp 383
|
||||
# elif defined(__aarch64__)
|
||||
# define __NR_seccomp 277
|
||||
# elif defined(__hppa__)
|
||||
# define __NR_seccomp 338
|
||||
# elif defined(__powerpc__)
|
||||
# define __NR_seccomp 358
|
||||
# elif defined(__s390__)
|
||||
|
|
|
@ -30,7 +30,9 @@
|
|||
#define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT)
|
||||
#define MAP_HUGE_SHIFT 26
|
||||
#define MAP_HUGE_MASK 0x3f
|
||||
#if !defined(MAP_HUGETLB)
|
||||
#define MAP_HUGETLB 0x40000
|
||||
#endif
|
||||
|
||||
#define SHM_HUGETLB 04000 /* segment will use huge TLB pages */
|
||||
#define SHM_HUGE_SHIFT 26
|
||||
|
|
Loading…
Reference in a new issue