Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/entry, x86/paravirt: Remove the unused usergs_sysret32 PV op

As result of commit "x86/xen: Avoid fast syscall path for Xen PV
guests", usergs_sysret32 pv op is not called by Xen PV guests
anymore and since they were the only ones who used it we can
safely remove it.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Borislav Petkov <bp@suse.de>
Acked-by: Andy Lutomirski <luto@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: david.vrabel@citrix.com
Cc: konrad.wilk@oracle.com
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1447970147-1733-4-git-send-email-boris.ostrovsky@oracle.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Boris Ostrovsky and committed by
Ingo Molnar
75ef8219 88c15ec9

+2 -48
+2 -8
arch/x86/entry/entry_64_compat.S
··· 18 18 19 19 .section .entry.text, "ax" 20 20 21 - #ifdef CONFIG_PARAVIRT 22 - ENTRY(native_usergs_sysret32) 23 - swapgs 24 - sysretl 25 - ENDPROC(native_usergs_sysret32) 26 - #endif 27 - 28 21 /* 29 22 * 32-bit SYSENTER instruction entry. 30 23 * ··· 231 238 xorq %r9, %r9 232 239 xorq %r10, %r10 233 240 movq RSP-ORIG_RAX(%rsp), %rsp 234 - USERGS_SYSRET32 241 + swapgs 242 + sysretl 235 243 END(entry_SYSCALL_compat) 236 244 237 245 /*
-5
arch/x86/include/asm/paravirt.h
··· 922 922 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ 923 923 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 924 924 925 - #define USERGS_SYSRET32 \ 926 - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \ 927 - CLBR_NONE, \ 928 - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32)) 929 - 930 925 #ifdef CONFIG_X86_32 931 926 #define GET_CR0_INTO_EAX \ 932 927 push %ecx; push %edx; \
-8
arch/x86/include/asm/paravirt_types.h
··· 165 165 */ 166 166 void (*usergs_sysret64)(void); 167 167 168 - /* 169 - * Switch to usermode gs and return to 32-bit usermode using 170 - * sysret. Used to return to 32-on-64 compat processes. 171 - * Other usermode register state, including %esp, must already 172 - * be restored. 173 - */ 174 - void (*usergs_sysret32)(void); 175 - 176 168 /* Normal iret. Jump to this with the standard iret stack 177 169 frame set up. */ 178 170 void (*iret)(void);
-1
arch/x86/kernel/asm-offsets_64.c
··· 23 23 { 24 24 #ifdef CONFIG_PARAVIRT 25 25 OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame); 26 - OFFSET(PV_CPU_usergs_sysret32, pv_cpu_ops, usergs_sysret32); 27 26 OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64); 28 27 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); 29 28 BLANK();
-5
arch/x86/kernel/paravirt.c
··· 162 162 ret = paravirt_patch_ident_64(insnbuf, len); 163 163 164 164 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || 165 - type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) || 166 165 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64)) 167 166 /* If operation requires a jmp, then jmp */ 168 167 ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); ··· 216 217 217 218 /* These are in entry.S */ 218 219 extern void native_iret(void); 219 - extern void native_usergs_sysret32(void); 220 220 extern void native_usergs_sysret64(void); 221 221 222 222 static struct resource reserve_ioports = { ··· 374 376 .load_sp0 = native_load_sp0, 375 377 376 378 #ifdef CONFIG_X86_64 377 - #ifdef CONFIG_IA32_EMULATION 378 - .usergs_sysret32 = native_usergs_sysret32, 379 - #endif 380 379 .usergs_sysret64 = native_usergs_sysret64, 381 380 #endif 382 381 .iret = native_iret,
-2
arch/x86/kernel/paravirt_patch_64.c
··· 14 14 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); 15 15 16 16 DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); 17 - DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl"); 18 17 DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); 19 18 20 19 DEF_NATIVE(, mov32, "mov %edi, %eax"); ··· 53 54 PATCH_SITE(pv_irq_ops, save_fl); 54 55 PATCH_SITE(pv_irq_ops, irq_enable); 55 56 PATCH_SITE(pv_irq_ops, irq_disable); 56 - PATCH_SITE(pv_cpu_ops, usergs_sysret32); 57 57 PATCH_SITE(pv_cpu_ops, usergs_sysret64); 58 58 PATCH_SITE(pv_cpu_ops, swapgs); 59 59 PATCH_SITE(pv_mmu_ops, read_cr2);
-19
arch/x86/xen/xen-asm_64.S
··· 68 68 ENDPATCH(xen_sysret64) 69 69 RELOC(xen_sysret64, 1b+1) 70 70 71 - ENTRY(xen_sysret32) 72 - /* 73 - * We're already on the usermode stack at this point, but 74 - * still with the kernel gs, so we can easily switch back 75 - */ 76 - movq %rsp, PER_CPU_VAR(rsp_scratch) 77 - movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 78 - 79 - pushq $__USER32_DS 80 - pushq PER_CPU_VAR(rsp_scratch) 81 - pushq %r11 82 - pushq $__USER32_CS 83 - pushq %rcx 84 - 85 - pushq $0 86 - 1: jmp hypercall_iret 87 - ENDPATCH(xen_sysret32) 88 - RELOC(xen_sysret32, 1b+1) 89 - 90 71 /* 91 72 * Xen handles syscall callbacks much like ordinary exceptions, which 92 73 * means we have: