Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/paravirt: Remove the unused irq_enable_sysexit pv op

As result of commit "x86/xen: Avoid fast syscall path for Xen PV
guests", the irq_enable_sysexit pv op is not called by Xen PV guests
anymore and since they were the only ones who used it we can
safely remove it.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Borislav Petkov <bp@suse.de>
Acked-by: Andy Lutomirski <luto@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: david.vrabel@citrix.com
Cc: konrad.wilk@oracle.com
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1447970147-1733-3-git-send-email-boris.ostrovsky@oracle.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Boris Ostrovsky and committed by
Ingo Molnar
88c15ec9 5fdf5d37

+2 -55
+2 -6
arch/x86/entry/entry_32.S
··· 329 329 * Return back to the vDSO, which will pop ecx and edx. 330 330 * Don't bother with DS and ES (they already contain __USER_DS). 331 331 */ 332 - ENABLE_INTERRUPTS_SYSEXIT 332 + sti 333 + sysexit 333 334 334 335 .pushsection .fixup, "ax" 335 336 2: movl $0, PT_FS(%esp) ··· 553 552 iret 554 553 _ASM_EXTABLE(native_iret, iret_exc) 555 554 END(native_iret) 556 - 557 - ENTRY(native_irq_enable_sysexit) 558 - sti 559 - sysexit 560 - END(native_irq_enable_sysexit) 561 555 #endif 562 556 563 557 ENTRY(overflow)
-7
arch/x86/include/asm/paravirt.h
··· 932 932 push %ecx; push %edx; \ 933 933 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ 934 934 pop %edx; pop %ecx 935 - 936 - #define ENABLE_INTERRUPTS_SYSEXIT \ 937 - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ 938 - CLBR_NONE, \ 939 - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) 940 - 941 - 942 935 #else /* !CONFIG_X86_32 */ 943 936 944 937 /*
-9
arch/x86/include/asm/paravirt_types.h
··· 157 157 158 158 u64 (*read_pmc)(int counter); 159 159 160 - #ifdef CONFIG_X86_32 161 - /* 162 - * Atomically enable interrupts and return to userspace. This 163 - * is only used in 32-bit kernels. 64-bit kernels use 164 - * usergs_sysret32 instead. 165 - */ 166 - void (*irq_enable_sysexit)(void); 167 - #endif 168 - 169 160 /* 170 161 * Switch to usermode gs and return to 64-bit usermode using 171 162 * sysret. Only used in 64-bit kernels to return to 64-bit
-3
arch/x86/kernel/asm-offsets.c
··· 65 65 OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); 66 66 OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); 67 67 OFFSET(PV_CPU_iret, pv_cpu_ops, iret); 68 - #ifdef CONFIG_X86_32 69 - OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); 70 - #endif 71 68 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); 72 69 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); 73 70 #endif
-7
arch/x86/kernel/paravirt.c
··· 162 162 ret = paravirt_patch_ident_64(insnbuf, len); 163 163 164 164 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || 165 - #ifdef CONFIG_X86_32 166 - type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) || 167 - #endif 168 165 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) || 169 166 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64)) 170 167 /* If operation requires a jmp, then jmp */ ··· 217 220 218 221 /* These are in entry.S */ 219 222 extern void native_iret(void); 220 - extern void native_irq_enable_sysexit(void); 221 223 extern void native_usergs_sysret32(void); 222 224 extern void native_usergs_sysret64(void); 223 225 ··· 375 379 376 380 .load_sp0 = native_load_sp0, 377 381 378 - #if defined(CONFIG_X86_32) 379 - .irq_enable_sysexit = native_irq_enable_sysexit, 380 - #endif 381 382 #ifdef CONFIG_X86_64 382 383 #ifdef CONFIG_IA32_EMULATION 383 384 .usergs_sysret32 = native_usergs_sysret32,
-2
arch/x86/kernel/paravirt_patch_32.c
··· 5 5 DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf"); 6 6 DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax"); 7 7 DEF_NATIVE(pv_cpu_ops, iret, "iret"); 8 - DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit"); 9 8 DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax"); 10 9 DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3"); 11 10 DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); ··· 45 46 PATCH_SITE(pv_irq_ops, restore_fl); 46 47 PATCH_SITE(pv_irq_ops, save_fl); 47 48 PATCH_SITE(pv_cpu_ops, iret); 48 - PATCH_SITE(pv_cpu_ops, irq_enable_sysexit); 49 49 PATCH_SITE(pv_mmu_ops, read_cr2); 50 50 PATCH_SITE(pv_mmu_ops, read_cr3); 51 51 PATCH_SITE(pv_mmu_ops, write_cr3);
-1
arch/x86/kernel/paravirt_patch_64.c
··· 13 13 DEF_NATIVE(pv_cpu_ops, clts, "clts"); 14 14 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); 15 15 16 - DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "swapgs; sti; sysexit"); 17 16 DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); 18 17 DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl"); 19 18 DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
-3
arch/x86/xen/enlighten.c
··· 1229 1229 1230 1230 .iret = xen_iret, 1231 1231 #ifdef CONFIG_X86_64 1232 - .usergs_sysret32 = xen_sysret32, 1233 1232 .usergs_sysret64 = xen_sysret64, 1234 - #else 1235 - .irq_enable_sysexit = xen_sysexit, 1236 1233 #endif 1237 1234 1238 1235 .load_tr_desc = paravirt_nop,
-14
arch/x86/xen/xen-asm_32.S
··· 35 35 ret 36 36 37 37 /* 38 - * We can't use sysexit directly, because we're not running in ring0. 39 - * But we can easily fake it up using iret. Assuming xen_sysexit is 40 - * jumped to with a standard stack frame, we can just strip it back to 41 - * a standard iret frame and use iret. 42 - */ 43 - ENTRY(xen_sysexit) 44 - movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */ 45 - orl $X86_EFLAGS_IF, PT_EFLAGS(%esp) 46 - lea PT_EIP(%esp), %esp 47 - 48 - jmp xen_iret 49 - ENDPROC(xen_sysexit) 50 - 51 - /* 52 38 * This is run where a normal iret would be run, with the same stack setup: 53 39 * 8: eflags 54 40 * 4: cs
-3
arch/x86/xen/xen-ops.h
··· 139 139 140 140 /* These are not functions, and cannot be called normally */ 141 141 __visible void xen_iret(void); 142 - #ifdef CONFIG_X86_32 143 - __visible void xen_sysexit(void); 144 - #endif 145 142 __visible void xen_sysret32(void); 146 143 __visible void xen_sysret64(void); 147 144 __visible void xen_adjust_exception_frame(void);