Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/asm/entry: Change all 'user_mode_vm()' calls to 'user_mode()'

user_mode_vm() and user_mode() are now the same. Change all callers
of user_mode_vm() to user_mode().

The next patch will remove the definition of user_mode_vm.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brad Spengler <spender@grsecurity.net>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/43b1f57f3df70df5a08b0925897c660725015554.1426728647.git.luto@kernel.org
[ Merged to a more recent kernel. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Andy Lutomirski and committed by
Ingo Molnar
f39b6f0e efa70451

+30 -30
+1 -1
arch/x86/kernel/alternative.c
··· 715 715 if (likely(!bp_patching_in_progress)) 716 716 return 0; 717 717 718 - if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr) 718 + if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr) 719 719 return 0; 720 720 721 721 /* set up the specified breakpoint handler */
+1 -1
arch/x86/kernel/crash.c
··· 105 105 #ifdef CONFIG_X86_32 106 106 struct pt_regs fixed_regs; 107 107 108 - if (!user_mode_vm(regs)) { 108 + if (!user_mode(regs)) { 109 109 crash_fixup_ss_esp(&fixed_regs, regs); 110 110 regs = &fixed_regs; 111 111 }
+2 -2
arch/x86/kernel/dumpstack.c
··· 278 278 print_modules(); 279 279 show_regs(regs); 280 280 #ifdef CONFIG_X86_32 281 - if (user_mode_vm(regs)) { 281 + if (user_mode(regs)) { 282 282 sp = regs->sp; 283 283 ss = regs->ss & 0xffff; 284 284 } else { ··· 307 307 unsigned long flags = oops_begin(); 308 308 int sig = SIGSEGV; 309 309 310 - if (!user_mode_vm(regs)) 310 + if (!user_mode(regs)) 311 311 report_bug(regs->ip, regs); 312 312 313 313 if (__die(str, regs, err))
+2 -2
arch/x86/kernel/dumpstack_32.c
··· 123 123 int i; 124 124 125 125 show_regs_print_info(KERN_EMERG); 126 - __show_regs(regs, !user_mode_vm(regs)); 126 + __show_regs(regs, !user_mode(regs)); 127 127 128 128 /* 129 129 * When in-kernel, we also print out the stack and code at the 130 130 * time of the fault.. 131 131 */ 132 - if (!user_mode_vm(regs)) { 132 + if (!user_mode(regs)) { 133 133 unsigned int code_prologue = code_bytes * 43 / 64; 134 134 unsigned int code_len = code_bytes; 135 135 unsigned char c;
+1 -1
arch/x86/kernel/i387.c
··· 68 68 static inline bool interrupted_user_mode(void) 69 69 { 70 70 struct pt_regs *regs = get_irq_regs(); 71 - return regs && user_mode_vm(regs); 71 + return regs && user_mode(regs); 72 72 } 73 73 74 74 /*
+1 -1
arch/x86/kernel/irq_32.c
··· 165 165 if (unlikely(!desc)) 166 166 return false; 167 167 168 - if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) { 168 + if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) { 169 169 if (unlikely(overflow)) 170 170 print_stack_overflow(); 171 171 desc->handle_irq(irq, desc);
+1 -1
arch/x86/kernel/irq_64.c
··· 44 44 u64 estack_top, estack_bottom; 45 45 u64 curbase = (u64)task_stack_page(current); 46 46 47 - if (user_mode_vm(regs)) 47 + if (user_mode(regs)) 48 48 return; 49 49 50 50 if (regs->sp >= curbase + sizeof(struct thread_info) +
+2 -2
arch/x86/kernel/kgdb.c
··· 126 126 #ifdef CONFIG_X86_32 127 127 switch (regno) { 128 128 case GDB_SS: 129 - if (!user_mode_vm(regs)) 129 + if (!user_mode(regs)) 130 130 *(unsigned long *)mem = __KERNEL_DS; 131 131 break; 132 132 case GDB_SP: 133 - if (!user_mode_vm(regs)) 133 + if (!user_mode(regs)) 134 134 *(unsigned long *)mem = kernel_stack_pointer(regs); 135 135 break; 136 136 case GDB_GS:
+2 -2
arch/x86/kernel/kprobes/core.c
··· 602 602 struct kprobe *p; 603 603 struct kprobe_ctlblk *kcb; 604 604 605 - if (user_mode_vm(regs)) 605 + if (user_mode(regs)) 606 606 return 0; 607 607 608 608 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); ··· 1007 1007 struct die_args *args = data; 1008 1008 int ret = NOTIFY_DONE; 1009 1009 1010 - if (args->regs && user_mode_vm(args->regs)) 1010 + if (args->regs && user_mode(args->regs)) 1011 1011 return ret; 1012 1012 1013 1013 if (val == DIE_GPF) {
+1 -1
arch/x86/kernel/process_32.c
··· 73 73 unsigned long sp; 74 74 unsigned short ss, gs; 75 75 76 - if (user_mode_vm(regs)) { 76 + if (user_mode(regs)) { 77 77 sp = regs->sp; 78 78 ss = regs->ss & 0xffff; 79 79 gs = get_user_gs(regs);
+1 -1
arch/x86/kernel/ptrace.c
··· 1415 1415 memset(info, 0, sizeof(*info)); 1416 1416 info->si_signo = SIGTRAP; 1417 1417 info->si_code = si_code; 1418 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL; 1418 + info->si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL; 1419 1419 } 1420 1420 1421 1421 void user_single_step_siginfo(struct task_struct *tsk,
+1 -1
arch/x86/kernel/time.c
··· 30 30 { 31 31 unsigned long pc = instruction_pointer(regs); 32 32 33 - if (!user_mode_vm(regs) && in_lock_functions(pc)) { 33 + if (!user_mode(regs) && in_lock_functions(pc)) { 34 34 #ifdef CONFIG_FRAME_POINTER 35 35 return *(unsigned long *)(regs->bp + sizeof(long)); 36 36 #else
+8 -8
arch/x86/kernel/traps.c
··· 112 112 { 113 113 enum ctx_state prev_state; 114 114 115 - if (user_mode_vm(regs)) { 115 + if (user_mode(regs)) { 116 116 /* Other than that, we're just an exception. */ 117 117 prev_state = exception_enter(); 118 118 } else { ··· 146 146 /* Must be before exception_exit. */ 147 147 preempt_count_sub(HARDIRQ_OFFSET); 148 148 149 - if (user_mode_vm(regs)) 149 + if (user_mode(regs)) 150 150 return exception_exit(prev_state); 151 151 else 152 152 rcu_nmi_exit(); ··· 158 158 * 159 159 * IST exception handlers normally cannot schedule. As a special 160 160 * exception, if the exception interrupted userspace code (i.e. 161 - * user_mode_vm(regs) would return true) and the exception was not 161 + * user_mode(regs) would return true) and the exception was not 162 162 * a double fault, it can be safe to schedule. ist_begin_non_atomic() 163 163 * begins a non-atomic section within an ist_enter()/ist_exit() region. 164 164 * Callers are responsible for enabling interrupts themselves inside ··· 167 167 */ 168 168 void ist_begin_non_atomic(struct pt_regs *regs) 169 169 { 170 - BUG_ON(!user_mode_vm(regs)); 170 + BUG_ON(!user_mode(regs)); 171 171 172 172 /* 173 173 * Sanity check: we need to be on the normal thread stack. This ··· 384 384 goto exit; 385 385 conditional_sti(regs); 386 386 387 - if (!user_mode_vm(regs)) 387 + if (!user_mode(regs)) 388 388 die("bounds", regs, error_code); 389 389 390 390 if (!cpu_feature_enabled(X86_FEATURE_MPX)) { ··· 587 587 /* Copy the remainder of the stack from the current stack. */ 588 588 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip)); 589 589 590 - BUG_ON(!user_mode_vm(&new_stack->regs)); 590 + BUG_ON(!user_mode(&new_stack->regs)); 591 591 return new_stack; 592 592 } 593 593 NOKPROBE_SYMBOL(fixup_bad_iret); ··· 637 637 * then it's very likely the result of an icebp/int01 trap. 638 638 * User wants a sigtrap for that. 639 639 */ 640 - if (!dr6 && user_mode_vm(regs)) 640 + if (!dr6 && user_mode(regs)) 641 641 user_icebp = 1; 642 642 643 643 /* Catch kmemcheck conditions first of all! */ ··· 721 721 return; 722 722 conditional_sti(regs); 723 723 724 - if (!user_mode_vm(regs)) 724 + if (!user_mode(regs)) 725 725 { 726 726 if (!fixup_exception(regs)) { 727 727 task->thread.error_code = error_code;
+1 -1
arch/x86/kernel/uprobes.c
··· 912 912 int ret = NOTIFY_DONE; 913 913 914 914 /* We are only interested in userspace traps */ 915 - if (regs && !user_mode_vm(regs)) 915 + if (regs && !user_mode(regs)) 916 916 return NOTIFY_DONE; 917 917 918 918 switch (val) {
+3 -3
arch/x86/mm/fault.c
··· 59 59 int ret = 0; 60 60 61 61 /* kprobe_running() needs smp_processor_id() */ 62 - if (kprobes_built_in() && !user_mode_vm(regs)) { 62 + if (kprobes_built_in() && !user_mode(regs)) { 63 63 preempt_disable(); 64 64 if (kprobe_running() && kprobe_fault_handler(regs, 14)) 65 65 ret = 1; ··· 1035 1035 if (error_code & PF_USER) 1036 1036 return false; 1037 1037 1038 - if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC)) 1038 + if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC)) 1039 1039 return false; 1040 1040 1041 1041 return true; ··· 1140 1140 * User-mode registers count as a user access even for any 1141 1141 * potential system fault or CPU buglet: 1142 1142 */ 1143 - if (user_mode_vm(regs)) { 1143 + if (user_mode(regs)) { 1144 1144 local_irq_enable(); 1145 1145 error_code |= PF_USER; 1146 1146 flags |= FAULT_FLAG_USER;
+1 -1
arch/x86/oprofile/backtrace.c
··· 111 111 { 112 112 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs); 113 113 114 - if (!user_mode_vm(regs)) { 114 + if (!user_mode(regs)) { 115 115 unsigned long stack = kernel_stack_pointer(regs); 116 116 if (depth) 117 117 dump_trace(NULL, regs, (unsigned long *)stack, 0,
+1 -1
drivers/misc/sgi-xp/xpc_main.c
··· 1210 1210 1211 1211 if (((die_args->trapnr == X86_TRAP_MF) || 1212 1212 (die_args->trapnr == X86_TRAP_XF)) && 1213 - !user_mode_vm(die_args->regs)) 1213 + !user_mode(die_args->regs)) 1214 1214 xpc_die_deactivate(); 1215 1215 1216 1216 break;