63bcff2a30
When Supervisor Mode Access Prevention (SMAP) is enabled, access to userspace from the kernel is controlled by the AC flag. To make the performance of manipulating that flag acceptable, there are two new instructions, STAC and CLAC, to set and clear it. This patch adds those instructions, via alternative(), when the SMAP feature is enabled. It also adds X86_EFLAGS_AC unconditionally to the SYSCALL entry mask; there is simply no reason to make that one conditional. Signed-off-by: H. Peter Anvin <hpa@linux.intel.com> Link: http://lkml.kernel.org/r/1348256595-29119-9-git-send-email-hpa@linux.intel.com
113 lines
2.2 KiB
ArmAsm
113 lines
2.2 KiB
ArmAsm
/*
|
|
* __get_user functions.
|
|
*
|
|
* (C) Copyright 1998 Linus Torvalds
|
|
* (C) Copyright 2005 Andi Kleen
|
|
* (C) Copyright 2008 Glauber Costa
|
|
*
|
|
* These functions have a non-standard call interface
|
|
* to make them more efficient, especially as they
|
|
* return an error value in addition to the "real"
|
|
* return value.
|
|
*/
|
|
|
|
/*
|
|
* __get_user_X
|
|
*
|
|
* Inputs: %[r|e]ax contains the address.
|
|
* The register is modified, but all changes are undone
|
|
* before returning because the C code doesn't know about it.
|
|
*
|
|
* Outputs: %[r|e]ax is error code (0 or -EFAULT)
|
|
* %[r|e]dx contains zero-extended value
|
|
*
|
|
*
|
|
* These functions should not modify any other registers,
|
|
* as they get called from within inline assembly.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/dwarf2.h>
|
|
#include <asm/page_types.h>
|
|
#include <asm/errno.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/asm.h>
|
|
#include <asm/smap.h>
|
|
|
|
.text
|
|
ENTRY(__get_user_1)
|
|
CFI_STARTPROC
|
|
GET_THREAD_INFO(%_ASM_DX)
|
|
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
ASM_STAC
|
|
1: movzb (%_ASM_AX),%edx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(__get_user_1)
|
|
|
|
ENTRY(__get_user_2)
|
|
CFI_STARTPROC
|
|
add $1,%_ASM_AX
|
|
jc bad_get_user
|
|
GET_THREAD_INFO(%_ASM_DX)
|
|
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
ASM_STAC
|
|
2: movzwl -1(%_ASM_AX),%edx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(__get_user_2)
|
|
|
|
ENTRY(__get_user_4)
|
|
CFI_STARTPROC
|
|
add $3,%_ASM_AX
|
|
jc bad_get_user
|
|
GET_THREAD_INFO(%_ASM_DX)
|
|
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
ASM_STAC
|
|
3: mov -3(%_ASM_AX),%edx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(__get_user_4)
|
|
|
|
#ifdef CONFIG_X86_64
|
|
ENTRY(__get_user_8)
|
|
CFI_STARTPROC
|
|
add $7,%_ASM_AX
|
|
jc bad_get_user
|
|
GET_THREAD_INFO(%_ASM_DX)
|
|
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
ASM_STAC
|
|
4: movq -7(%_ASM_AX),%_ASM_DX
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(__get_user_8)
|
|
#endif
|
|
|
|
bad_get_user:
|
|
CFI_STARTPROC
|
|
xor %edx,%edx
|
|
mov $(-EFAULT),%_ASM_AX
|
|
ASM_CLAC
|
|
ret
|
|
CFI_ENDPROC
|
|
END(bad_get_user)
|
|
|
|
_ASM_EXTABLE(1b,bad_get_user)
|
|
_ASM_EXTABLE(2b,bad_get_user)
|
|
_ASM_EXTABLE(3b,bad_get_user)
|
|
#ifdef CONFIG_X86_64
|
|
_ASM_EXTABLE(4b,bad_get_user)
|
|
#endif
|