Merge branch 'x86-asmlinkage-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 asmlinkage (LTO) changes from Peter Anvin:
"This patchset adds more infrastructure for link time optimization
(LTO).

This patchset was pulled into my tree late because of a
miscommunication (part of the patchset was picked up by other
maintainers). However, the patchset is strictly build-related and
seems to be okay in testing"

* 'x86-asmlinkage-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86, asmlinkage, xen: Fix type of NMI
x86, asmlinkage, xen, kvm: Make {xen,kvm}_lock_spinning global and visible
x86: Use inline assembler instead of global register variable to get sp
x86, asmlinkage, paravirt: Make paravirt thunks global
x86, asmlinkage, paravirt: Don't rely on local assembler labels
x86, asmlinkage, lguest: Fix C functions used by inline assembler

+37 -34
+1 -1
arch/x86/include/asm/paravirt.h
··· 781 781 */ 782 782 #define PV_CALLEE_SAVE_REGS_THUNK(func) \ 783 783 extern typeof(func) __raw_callee_save_##func; \ 784 - static void *__##func##__ __used = func; \ 785 784 \ 786 785 asm(".pushsection .text;" \ 786 + ".globl __raw_callee_save_" #func " ; " \ 787 787 "__raw_callee_save_" #func ": " \ 788 788 PV_SAVE_ALL_CALLER_REGS \ 789 789 "call " #func ";" \
+5 -4
arch/x86/include/asm/paravirt_types.h
··· 388 388 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") 389 389 390 390 /* Simple instruction patching code. */ 391 - #define DEF_NATIVE(ops, name, code) \ 392 - extern const char start_##ops##_##name[] __visible, \ 393 - end_##ops##_##name[] __visible; \ 394 - asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") 391 + #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" 392 + 393 + #define DEF_NATIVE(ops, name, code) \ 394 + __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \ 395 + asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name)) 395 396 396 397 unsigned paravirt_patch_nop(void); 397 398 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
+5 -3
arch/x86/include/asm/thread_info.h
··· 163 163 */ 164 164 #ifndef __ASSEMBLY__ 165 165 166 - 167 - /* how to get the current stack pointer from C */ 168 - register unsigned long current_stack_pointer asm("esp") __used; 166 + #define current_stack_pointer ({ \ 167 + unsigned long sp; \ 168 + asm("mov %%esp,%0" : "=g" (sp)); \ 169 + sp; \ 170 + }) 169 171 170 172 /* how to get the thread information struct from C */ 171 173 static inline struct thread_info *current_thread_info(void)
+1 -1
arch/x86/kernel/kvm.c
··· 673 673 /* Track spinlock on which a cpu is waiting */ 674 674 static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting); 675 675 676 - static void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) 676 + __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) 677 677 { 678 678 struct kvm_lock_waiting *w; 679 679 int cpu;
+4 -4
arch/x86/kernel/vsmp_64.c
··· 33 33 * and vice versa. 34 34 */ 35 35 36 - static unsigned long vsmp_save_fl(void) 36 + asmlinkage unsigned long vsmp_save_fl(void) 37 37 { 38 38 unsigned long flags = native_save_fl(); 39 39 ··· 43 43 } 44 44 PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl); 45 45 46 - static void vsmp_restore_fl(unsigned long flags) 46 + __visible void vsmp_restore_fl(unsigned long flags) 47 47 { 48 48 if (flags & X86_EFLAGS_IF) 49 49 flags &= ~X86_EFLAGS_AC; ··· 53 53 } 54 54 PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl); 55 55 56 - static void vsmp_irq_disable(void) 56 + asmlinkage void vsmp_irq_disable(void) 57 57 { 58 58 unsigned long flags = native_save_fl(); 59 59 ··· 61 61 } 62 62 PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable); 63 63 64 - static void vsmp_irq_enable(void) 64 + asmlinkage void vsmp_irq_enable(void) 65 65 { 66 66 unsigned long flags = native_save_fl(); 67 67
+6 -6
arch/x86/lguest/boot.c
··· 233 233 * flags word contains all kind of stuff, but in practice Linux only cares 234 234 * about the interrupt flag. Our "save_flags()" just returns that. 235 235 */ 236 - static unsigned long save_fl(void) 236 + asmlinkage unsigned long lguest_save_fl(void) 237 237 { 238 238 return lguest_data.irq_enabled; 239 239 } 240 240 241 241 /* Interrupts go off... */ 242 - static void irq_disable(void) 242 + asmlinkage void lguest_irq_disable(void) 243 243 { 244 244 lguest_data.irq_enabled = 0; 245 245 } ··· 253 253 * PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the 254 254 * C function, then restores it. 255 255 */ 256 - PV_CALLEE_SAVE_REGS_THUNK(save_fl); 257 - PV_CALLEE_SAVE_REGS_THUNK(irq_disable); 256 + PV_CALLEE_SAVE_REGS_THUNK(lguest_save_fl); 257 + PV_CALLEE_SAVE_REGS_THUNK(lguest_irq_disable); 258 258 /*:*/ 259 259 260 260 /* These are in i386_head.S */ ··· 1291 1291 */ 1292 1292 1293 1293 /* Interrupt-related operations */ 1294 - pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl); 1294 + pv_irq_ops.save_fl = PV_CALLEE_SAVE(lguest_save_fl); 1295 1295 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl); 1296 - pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable); 1296 + pv_irq_ops.irq_disable = PV_CALLEE_SAVE(lguest_irq_disable); 1297 1297 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable); 1298 1298 pv_irq_ops.safe_halt = lguest_safe_halt; 1299 1299
+4 -4
arch/x86/xen/irq.c
··· 23 23 (void)HYPERVISOR_xen_version(0, NULL); 24 24 } 25 25 26 - static unsigned long xen_save_fl(void) 26 + asmlinkage unsigned long xen_save_fl(void) 27 27 { 28 28 struct vcpu_info *vcpu; 29 29 unsigned long flags; ··· 41 41 } 42 42 PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl); 43 43 44 - static void xen_restore_fl(unsigned long flags) 44 + __visible void xen_restore_fl(unsigned long flags) 45 45 { 46 46 struct vcpu_info *vcpu; 47 47 ··· 63 63 } 64 64 PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl); 65 65 66 - static void xen_irq_disable(void) 66 + asmlinkage void xen_irq_disable(void) 67 67 { 68 68 /* There's a one instruction preempt window here. We need to 69 69 make sure we're don't switch CPUs between getting the vcpu ··· 74 74 } 75 75 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); 76 76 77 - static void xen_irq_enable(void) 77 + asmlinkage void xen_irq_enable(void) 78 78 { 79 79 struct vcpu_info *vcpu; 80 80
+8 -8
arch/x86/xen/mmu.c
··· 431 431 return val; 432 432 } 433 433 434 - static pteval_t xen_pte_val(pte_t pte) 434 + __visible pteval_t xen_pte_val(pte_t pte) 435 435 { 436 436 pteval_t pteval = pte.pte; 437 437 #if 0 ··· 448 448 } 449 449 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); 450 450 451 - static pgdval_t xen_pgd_val(pgd_t pgd) 451 + __visible pgdval_t xen_pgd_val(pgd_t pgd) 452 452 { 453 453 return pte_mfn_to_pfn(pgd.pgd); 454 454 } ··· 479 479 WARN_ON(pat != 0x0007010600070106ull); 480 480 } 481 481 482 - static pte_t xen_make_pte(pteval_t pte) 482 + __visible pte_t xen_make_pte(pteval_t pte) 483 483 { 484 484 phys_addr_t addr = (pte & PTE_PFN_MASK); 485 485 #if 0 ··· 514 514 } 515 515 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); 516 516 517 - static pgd_t xen_make_pgd(pgdval_t pgd) 517 + __visible pgd_t xen_make_pgd(pgdval_t pgd) 518 518 { 519 519 pgd = pte_pfn_to_mfn(pgd); 520 520 return native_make_pgd(pgd); 521 521 } 522 522 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); 523 523 524 - static pmdval_t xen_pmd_val(pmd_t pmd) 524 + __visible pmdval_t xen_pmd_val(pmd_t pmd) 525 525 { 526 526 return pte_mfn_to_pfn(pmd.pmd); 527 527 } ··· 580 580 } 581 581 #endif /* CONFIG_X86_PAE */ 582 582 583 - static pmd_t xen_make_pmd(pmdval_t pmd) 583 + __visible pmd_t xen_make_pmd(pmdval_t pmd) 584 584 { 585 585 pmd = pte_pfn_to_mfn(pmd); 586 586 return native_make_pmd(pmd); ··· 588 588 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); 589 589 590 590 #if PAGETABLE_LEVELS == 4 591 - static pudval_t xen_pud_val(pud_t pud) 591 + __visible pudval_t xen_pud_val(pud_t pud) 592 592 { 593 593 return pte_mfn_to_pfn(pud.pud); 594 594 } 595 595 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); 596 596 597 - static pud_t xen_make_pud(pudval_t pud) 597 + __visible pud_t xen_make_pud(pudval_t pud) 598 598 { 599 599 pud = pte_pfn_to_mfn(pud); 600 600
+2 -2
arch/x86/xen/setup.c
··· 35 35 extern const char xen_hypervisor_callback[]; 36 36 extern const char xen_failsafe_callback[]; 37 37 #ifdef CONFIG_X86_64 38 - extern const char nmi[]; 38 + extern asmlinkage void nmi(void); 39 39 #endif 40 40 extern void xen_sysenter_target(void); 41 41 extern void xen_syscall_target(void); ··· 577 577 void xen_enable_nmi(void) 578 578 { 579 579 #ifdef CONFIG_X86_64 580 - if (register_callback(CALLBACKTYPE_nmi, nmi)) 580 + if (register_callback(CALLBACKTYPE_nmi, (char *)nmi)) 581 581 BUG(); 582 582 #endif 583 583 }
+1 -1
arch/x86/xen/spinlock.c
··· 106 106 static cpumask_t waiting_cpus; 107 107 108 108 static bool xen_pvspin = true; 109 - static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) 109 + __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) 110 110 { 111 111 int irq = __this_cpu_read(lock_kicker_irq); 112 112 struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting);