x86/asm/entry: Change all 'user_mode_vm()' calls to 'user_mode()'
user_mode_vm() and user_mode() are now the same. Change all callers of user_mode_vm() to user_mode(). The next patch will remove the definition of user_mode_vm. Signed-off-by: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brad Spengler <spender@grsecurity.net> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/43b1f57f3df70df5a08b0925897c660725015554.1426728647.git.luto@kernel.org [ Merged to a more recent kernel. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
efa7045103
commit
f39b6f0ef8
17 changed files with 30 additions and 30 deletions
|
@ -715,7 +715,7 @@ int poke_int3_handler(struct pt_regs *regs)
|
|||
if (likely(!bp_patching_in_progress))
|
||||
return 0;
|
||||
|
||||
if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
|
||||
if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
|
||||
return 0;
|
||||
|
||||
/* set up the specified breakpoint handler */
|
||||
|
|
|
@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
|
|||
#ifdef CONFIG_X86_32
|
||||
struct pt_regs fixed_regs;
|
||||
|
||||
if (!user_mode_vm(regs)) {
|
||||
if (!user_mode(regs)) {
|
||||
crash_fixup_ss_esp(&fixed_regs, regs);
|
||||
regs = &fixed_regs;
|
||||
}
|
||||
|
|
|
@ -278,7 +278,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
|
|||
print_modules();
|
||||
show_regs(regs);
|
||||
#ifdef CONFIG_X86_32
|
||||
if (user_mode_vm(regs)) {
|
||||
if (user_mode(regs)) {
|
||||
sp = regs->sp;
|
||||
ss = regs->ss & 0xffff;
|
||||
} else {
|
||||
|
@ -307,7 +307,7 @@ void die(const char *str, struct pt_regs *regs, long err)
|
|||
unsigned long flags = oops_begin();
|
||||
int sig = SIGSEGV;
|
||||
|
||||
if (!user_mode_vm(regs))
|
||||
if (!user_mode(regs))
|
||||
report_bug(regs->ip, regs);
|
||||
|
||||
if (__die(str, regs, err))
|
||||
|
|
|
@ -123,13 +123,13 @@ void show_regs(struct pt_regs *regs)
|
|||
int i;
|
||||
|
||||
show_regs_print_info(KERN_EMERG);
|
||||
__show_regs(regs, !user_mode_vm(regs));
|
||||
__show_regs(regs, !user_mode(regs));
|
||||
|
||||
/*
|
||||
* When in-kernel, we also print out the stack and code at the
|
||||
* time of the fault..
|
||||
*/
|
||||
if (!user_mode_vm(regs)) {
|
||||
if (!user_mode(regs)) {
|
||||
unsigned int code_prologue = code_bytes * 43 / 64;
|
||||
unsigned int code_len = code_bytes;
|
||||
unsigned char c;
|
||||
|
|
|
@ -68,7 +68,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
|
|||
static inline bool interrupted_user_mode(void)
|
||||
{
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
return regs && user_mode_vm(regs);
|
||||
return regs && user_mode(regs);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -165,7 +165,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
|
|||
if (unlikely(!desc))
|
||||
return false;
|
||||
|
||||
if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
|
||||
if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
|
||||
if (unlikely(overflow))
|
||||
print_stack_overflow();
|
||||
desc->handle_irq(irq, desc);
|
||||
|
|
|
@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
|
|||
u64 estack_top, estack_bottom;
|
||||
u64 curbase = (u64)task_stack_page(current);
|
||||
|
||||
if (user_mode_vm(regs))
|
||||
if (user_mode(regs))
|
||||
return;
|
||||
|
||||
if (regs->sp >= curbase + sizeof(struct thread_info) +
|
||||
|
|
|
@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
|
|||
#ifdef CONFIG_X86_32
|
||||
switch (regno) {
|
||||
case GDB_SS:
|
||||
if (!user_mode_vm(regs))
|
||||
if (!user_mode(regs))
|
||||
*(unsigned long *)mem = __KERNEL_DS;
|
||||
break;
|
||||
case GDB_SP:
|
||||
if (!user_mode_vm(regs))
|
||||
if (!user_mode(regs))
|
||||
*(unsigned long *)mem = kernel_stack_pointer(regs);
|
||||
break;
|
||||
case GDB_GS:
|
||||
|
|
|
@ -602,7 +602,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
|
|||
struct kprobe *p;
|
||||
struct kprobe_ctlblk *kcb;
|
||||
|
||||
if (user_mode_vm(regs))
|
||||
if (user_mode(regs))
|
||||
return 0;
|
||||
|
||||
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
|
||||
|
@ -1007,7 +1007,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
|
|||
struct die_args *args = data;
|
||||
int ret = NOTIFY_DONE;
|
||||
|
||||
if (args->regs && user_mode_vm(args->regs))
|
||||
if (args->regs && user_mode(args->regs))
|
||||
return ret;
|
||||
|
||||
if (val == DIE_GPF) {
|
||||
|
|
|
@ -73,7 +73,7 @@ void __show_regs(struct pt_regs *regs, int all)
|
|||
unsigned long sp;
|
||||
unsigned short ss, gs;
|
||||
|
||||
if (user_mode_vm(regs)) {
|
||||
if (user_mode(regs)) {
|
||||
sp = regs->sp;
|
||||
ss = regs->ss & 0xffff;
|
||||
gs = get_user_gs(regs);
|
||||
|
|
|
@ -1415,7 +1415,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
|
|||
memset(info, 0, sizeof(*info));
|
||||
info->si_signo = SIGTRAP;
|
||||
info->si_code = si_code;
|
||||
info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
|
||||
info->si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL;
|
||||
}
|
||||
|
||||
void user_single_step_siginfo(struct task_struct *tsk,
|
||||
|
|
|
@ -30,7 +30,7 @@ unsigned long profile_pc(struct pt_regs *regs)
|
|||
{
|
||||
unsigned long pc = instruction_pointer(regs);
|
||||
|
||||
if (!user_mode_vm(regs) && in_lock_functions(pc)) {
|
||||
if (!user_mode(regs) && in_lock_functions(pc)) {
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
return *(unsigned long *)(regs->bp + sizeof(long));
|
||||
#else
|
||||
|
|
|
@ -112,7 +112,7 @@ enum ctx_state ist_enter(struct pt_regs *regs)
|
|||
{
|
||||
enum ctx_state prev_state;
|
||||
|
||||
if (user_mode_vm(regs)) {
|
||||
if (user_mode(regs)) {
|
||||
/* Other than that, we're just an exception. */
|
||||
prev_state = exception_enter();
|
||||
} else {
|
||||
|
@ -146,7 +146,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
|
|||
/* Must be before exception_exit. */
|
||||
preempt_count_sub(HARDIRQ_OFFSET);
|
||||
|
||||
if (user_mode_vm(regs))
|
||||
if (user_mode(regs))
|
||||
return exception_exit(prev_state);
|
||||
else
|
||||
rcu_nmi_exit();
|
||||
|
@ -158,7 +158,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
|
|||
*
|
||||
* IST exception handlers normally cannot schedule. As a special
|
||||
* exception, if the exception interrupted userspace code (i.e.
|
||||
* user_mode_vm(regs) would return true) and the exception was not
|
||||
* user_mode(regs) would return true) and the exception was not
|
||||
* a double fault, it can be safe to schedule. ist_begin_non_atomic()
|
||||
* begins a non-atomic section within an ist_enter()/ist_exit() region.
|
||||
* Callers are responsible for enabling interrupts themselves inside
|
||||
|
@ -167,7 +167,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
|
|||
*/
|
||||
void ist_begin_non_atomic(struct pt_regs *regs)
|
||||
{
|
||||
BUG_ON(!user_mode_vm(regs));
|
||||
BUG_ON(!user_mode(regs));
|
||||
|
||||
/*
|
||||
* Sanity check: we need to be on the normal thread stack. This
|
||||
|
@ -384,7 +384,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
|
|||
goto exit;
|
||||
conditional_sti(regs);
|
||||
|
||||
if (!user_mode_vm(regs))
|
||||
if (!user_mode(regs))
|
||||
die("bounds", regs, error_code);
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
|
||||
|
@ -587,7 +587,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
|
|||
/* Copy the remainder of the stack from the current stack. */
|
||||
memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
|
||||
|
||||
BUG_ON(!user_mode_vm(&new_stack->regs));
|
||||
BUG_ON(!user_mode(&new_stack->regs));
|
||||
return new_stack;
|
||||
}
|
||||
NOKPROBE_SYMBOL(fixup_bad_iret);
|
||||
|
@ -637,7 +637,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
|
|||
* then it's very likely the result of an icebp/int01 trap.
|
||||
* User wants a sigtrap for that.
|
||||
*/
|
||||
if (!dr6 && user_mode_vm(regs))
|
||||
if (!dr6 && user_mode(regs))
|
||||
user_icebp = 1;
|
||||
|
||||
/* Catch kmemcheck conditions first of all! */
|
||||
|
@ -721,7 +721,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
|
|||
return;
|
||||
conditional_sti(regs);
|
||||
|
||||
if (!user_mode_vm(regs))
|
||||
if (!user_mode(regs))
|
||||
{
|
||||
if (!fixup_exception(regs)) {
|
||||
task->thread.error_code = error_code;
|
||||
|
|
|
@ -912,7 +912,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
|
|||
int ret = NOTIFY_DONE;
|
||||
|
||||
/* We are only interested in userspace traps */
|
||||
if (regs && !user_mode_vm(regs))
|
||||
if (regs && !user_mode(regs))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (val) {
|
||||
|
|
|
@ -59,7 +59,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
|
|||
int ret = 0;
|
||||
|
||||
/* kprobe_running() needs smp_processor_id() */
|
||||
if (kprobes_built_in() && !user_mode_vm(regs)) {
|
||||
if (kprobes_built_in() && !user_mode(regs)) {
|
||||
preempt_disable();
|
||||
if (kprobe_running() && kprobe_fault_handler(regs, 14))
|
||||
ret = 1;
|
||||
|
@ -1035,7 +1035,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
|
|||
if (error_code & PF_USER)
|
||||
return false;
|
||||
|
||||
if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
|
||||
if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -1140,7 +1140,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
|
|||
* User-mode registers count as a user access even for any
|
||||
* potential system fault or CPU buglet:
|
||||
*/
|
||||
if (user_mode_vm(regs)) {
|
||||
if (user_mode(regs)) {
|
||||
local_irq_enable();
|
||||
error_code |= PF_USER;
|
||||
flags |= FAULT_FLAG_USER;
|
||||
|
|
|
@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
|
|||
{
|
||||
struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
|
||||
|
||||
if (!user_mode_vm(regs)) {
|
||||
if (!user_mode(regs)) {
|
||||
unsigned long stack = kernel_stack_pointer(regs);
|
||||
if (depth)
|
||||
dump_trace(NULL, regs, (unsigned long *)stack, 0,
|
||||
|
|
|
@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
|
|||
|
||||
if (((die_args->trapnr == X86_TRAP_MF) ||
|
||||
(die_args->trapnr == X86_TRAP_XF)) &&
|
||||
!user_mode_vm(die_args->regs))
|
||||
!user_mode(die_args->regs))
|
||||
xpc_die_deactivate();
|
||||
|
||||
break;
|
||||
|
|
Loading…
Reference in a new issue