Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Peter Anvin:
"A somewhat unpleasantly large collection of small fixes. The big ones
are the __visible tree sweep and a fix for 'earlyprintk=efi,keep'. It
was using __init functions with predictably suboptimal results.

Another key fix is a build fix which would produce output that simply
would not decompress correctly in some configuration, due to the
existing Makefiles picking up an unfortunate local label and mistaking
it for the global symbol _end.

Additional fixes include the handling of 64-bit numbers when setting
the vdso data page (a latent bug which became manifest when i386
started exporting a vdso with time functions), a fix to the new MSR
manipulation accessors which would cause features to not get properly
unblocked, a build fix for 32-bit userland, and a few new platform
quirks"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86, vdso, time: Cast tv_nsec to u64 for proper shifting in update_vsyscall()
x86: Fix typo in MSR_IA32_MISC_ENABLE_LIMIT_CPUID macro
x86: Fix typo preventing msr_set/clear_bit from having an effect
x86/intel: Add quirk to disable HPET for the Baytrail platform
x86/hpet: Make boot_hpet_disable extern
x86-64, build: Fix stack protector Makefile breakage with 32-bit userland
x86/reboot: Add reboot quirk for Certec BPC600
asmlinkage: Add explicit __visible to drivers/*, lib/*, kernel/*
asmlinkage, x86: Add explicit __visible to arch/x86/*
asmlinkage: Revert "lto: Make asmlinkage __visible"
x86, build: Don't get confused by local symbols
x86/efi: earlyprintk=efi,keep fix

Changed files
+150 -77
arch
drivers
pnp
pnpbios
include
linux
init
kernel
lib
+1
arch/x86/Makefile
··· 79 79 UTS_MACHINE := x86_64 80 80 CHECKFLAGS += -D__x86_64__ -m64 81 81 82 + biarch := -m64 82 83 KBUILD_AFLAGS += -m64 83 84 KBUILD_CFLAGS += -m64 84 85
+2 -2
arch/x86/boot/Makefile
··· 71 71 72 72 SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) 73 73 74 - sed-voffset := -e 's/^\([0-9a-fA-F]*\) . \(_text\|_end\)$$/\#define VO_\2 0x\1/p' 74 + sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|_end\)$$/\#define VO_\2 0x\1/p' 75 75 76 76 quiet_cmd_voffset = VOFFSET $@ 77 77 cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@ ··· 80 80 $(obj)/voffset.h: vmlinux FORCE 81 81 $(call if_changed,voffset) 82 82 83 - sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' 83 + sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' 84 84 85 85 quiet_cmd_zoffset = ZOFFSET $@ 86 86 cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
+1 -1
arch/x86/boot/compressed/misc.c
··· 354 354 free(phdrs); 355 355 } 356 356 357 - asmlinkage void *decompress_kernel(void *rmode, memptr heap, 357 + asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, 358 358 unsigned char *input_data, 359 359 unsigned long input_len, 360 360 unsigned char *output,
+1
arch/x86/include/asm/hpet.h
··· 63 63 /* hpet memory map physical address */ 64 64 extern unsigned long hpet_address; 65 65 extern unsigned long force_hpet_address; 66 + extern int boot_hpet_disable; 66 67 extern u8 hpet_blockid; 67 68 extern int hpet_force_user; 68 69 extern u8 hpet_msi_disable;
+1 -1
arch/x86/include/uapi/asm/msr-index.h
··· 384 384 #define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18 385 385 #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT) 386 386 #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22 387 - #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT); 387 + #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) 388 388 #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23 389 389 #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT) 390 390 #define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34
+1 -1
arch/x86/kernel/acpi/sleep.c
··· 31 31 * 32 32 * Wrapper around acpi_enter_sleep_state() to be called by assmebly. 33 33 */ 34 - acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state) 34 + acpi_status asmlinkage __visible x86_acpi_enter_sleep_state(u8 state) 35 35 { 36 36 return acpi_enter_sleep_state(state); 37 37 }
+1 -1
arch/x86/kernel/apic/io_apic.c
··· 2189 2189 cfg->move_in_progress = 0; 2190 2190 } 2191 2191 2192 - asmlinkage void smp_irq_move_cleanup_interrupt(void) 2192 + asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) 2193 2193 { 2194 2194 unsigned vector, me; 2195 2195
+2 -2
arch/x86/kernel/cpu/mcheck/therm_throt.c
··· 429 429 smp_thermal_vector(); 430 430 } 431 431 432 - asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) 432 + asmlinkage __visible void smp_thermal_interrupt(struct pt_regs *regs) 433 433 { 434 434 entering_irq(); 435 435 __smp_thermal_interrupt(); 436 436 exiting_ack_irq(); 437 437 } 438 438 439 - asmlinkage void smp_trace_thermal_interrupt(struct pt_regs *regs) 439 + asmlinkage __visible void smp_trace_thermal_interrupt(struct pt_regs *regs) 440 440 { 441 441 entering_irq(); 442 442 trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
+2 -2
arch/x86/kernel/cpu/mcheck/threshold.c
··· 24 24 mce_threshold_vector(); 25 25 } 26 26 27 - asmlinkage void smp_threshold_interrupt(void) 27 + asmlinkage __visible void smp_threshold_interrupt(void) 28 28 { 29 29 entering_irq(); 30 30 __smp_threshold_interrupt(); 31 31 exiting_ack_irq(); 32 32 } 33 33 34 - asmlinkage void smp_trace_threshold_interrupt(void) 34 + asmlinkage __visible void smp_trace_threshold_interrupt(void) 35 35 { 36 36 entering_irq(); 37 37 trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
+16
arch/x86/kernel/early-quirks.c
··· 17 17 #include <asm/dma.h> 18 18 #include <asm/io_apic.h> 19 19 #include <asm/apic.h> 20 + #include <asm/hpet.h> 20 21 #include <asm/iommu.h> 21 22 #include <asm/gart.h> 22 23 #include <asm/irq_remapping.h> ··· 531 530 } 532 531 } 533 532 533 + static void __init force_disable_hpet(int num, int slot, int func) 534 + { 535 + #ifdef CONFIG_HPET_TIMER 536 + boot_hpet_disable = 1; 537 + pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n"); 538 + #endif 539 + } 540 + 541 + 534 542 #define QFLAG_APPLY_ONCE 0x1 535 543 #define QFLAG_APPLIED 0x2 536 544 #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) ··· 577 567 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 578 568 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, 579 569 QFLAG_APPLY_ONCE, intel_graphics_stolen }, 570 + /* 571 + * HPET on current version of Baytrail platform has accuracy 572 + * problems, disable it for now: 573 + */ 574 + { PCI_VENDOR_ID_INTEL, 0x0f00, 575 + PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, 580 576 {} 581 577 }; 582 578
+1 -1
arch/x86/kernel/head32.c
··· 29 29 reserve_ebda_region(); 30 30 } 31 31 32 - asmlinkage void __init i386_start_kernel(void) 32 + asmlinkage __visible void __init i386_start_kernel(void) 33 33 { 34 34 sanitize_boot_params(&boot_params); 35 35
+1 -1
arch/x86/kernel/head64.c
··· 137 137 } 138 138 } 139 139 140 - asmlinkage void __init x86_64_start_kernel(char * real_mode_data) 140 + asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) 141 141 { 142 142 int i; 143 143
+1 -1
arch/x86/kernel/hpet.c
··· 88 88 /* 89 89 * HPET command line enable / disable 90 90 */ 91 - static int boot_hpet_disable; 91 + int boot_hpet_disable; 92 92 int hpet_force_user; 93 93 static int hpet_verbose; 94 94
+1 -1
arch/x86/kernel/process_64.c
··· 52 52 53 53 asmlinkage extern void ret_from_fork(void); 54 54 55 - asmlinkage DEFINE_PER_CPU(unsigned long, old_rsp); 55 + __visible DEFINE_PER_CPU(unsigned long, old_rsp); 56 56 57 57 /* Prints also some state that isn't saved in the pt_regs */ 58 58 void __show_regs(struct pt_regs *regs, int all)
+10
arch/x86/kernel/reboot.c
··· 191 191 }, 192 192 }, 193 193 194 + /* Certec */ 195 + { /* Handle problems with rebooting on Certec BPC600 */ 196 + .callback = set_pci_reboot, 197 + .ident = "Certec BPC600", 198 + .matches = { 199 + DMI_MATCH(DMI_SYS_VENDOR, "Certec"), 200 + DMI_MATCH(DMI_PRODUCT_NAME, "BPC600"), 201 + }, 202 + }, 203 + 194 204 /* Dell */ 195 205 { /* Handle problems with rebooting on Dell DXP061 */ 196 206 .callback = set_bios_reboot,
+1 -1
arch/x86/kernel/smp.c
··· 168 168 * this function calls the 'stop' function on all other CPUs in the system. 169 169 */ 170 170 171 - asmlinkage void smp_reboot_interrupt(void) 171 + asmlinkage __visible void smp_reboot_interrupt(void) 172 172 { 173 173 ack_APIC_irq(); 174 174 irq_enter();
+3 -3
arch/x86/kernel/traps.c
··· 357 357 * for scheduling or signal handling. The actual stack switch is done in 358 358 * entry.S 359 359 */ 360 - asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) 360 + asmlinkage __visible __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) 361 361 { 362 362 struct pt_regs *regs = eregs; 363 363 /* Did already sync */ ··· 601 601 #endif 602 602 } 603 603 604 - asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) 604 + asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void) 605 605 { 606 606 } 607 607 608 - asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) 608 + asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void) 609 609 { 610 610 } 611 611
+3 -3
arch/x86/kernel/vsmp_64.c
··· 36 36 * and vice versa. 37 37 */ 38 38 39 - asmlinkage unsigned long vsmp_save_fl(void) 39 + asmlinkage __visible unsigned long vsmp_save_fl(void) 40 40 { 41 41 unsigned long flags = native_save_fl(); 42 42 ··· 56 56 } 57 57 PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl); 58 58 59 - asmlinkage void vsmp_irq_disable(void) 59 + asmlinkage __visible void vsmp_irq_disable(void) 60 60 { 61 61 unsigned long flags = native_save_fl(); 62 62 ··· 64 64 } 65 65 PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable); 66 66 67 - asmlinkage void vsmp_irq_enable(void) 67 + asmlinkage __visible void vsmp_irq_enable(void) 68 68 { 69 69 unsigned long flags = native_save_fl(); 70 70
+1 -1
arch/x86/kernel/vsyscall_gtod.c
··· 43 43 vdata->monotonic_time_sec = tk->xtime_sec 44 44 + tk->wall_to_monotonic.tv_sec; 45 45 vdata->monotonic_time_snsec = tk->xtime_nsec 46 - + (tk->wall_to_monotonic.tv_nsec 46 + + ((u64)tk->wall_to_monotonic.tv_nsec 47 47 << tk->shift); 48 48 while (vdata->monotonic_time_snsec >= 49 49 (((u64)NSEC_PER_SEC) << tk->shift)) {
+1 -1
arch/x86/kvm/x86.c
··· 280 280 } 281 281 EXPORT_SYMBOL_GPL(kvm_set_apic_base); 282 282 283 - asmlinkage void kvm_spurious_fault(void) 283 + asmlinkage __visible void kvm_spurious_fault(void) 284 284 { 285 285 /* Fault while not rebooting. We want the trace. */ 286 286 BUG();
+2 -2
arch/x86/lguest/boot.c
··· 233 233 * flags word contains all kind of stuff, but in practice Linux only cares 234 234 * about the interrupt flag. Our "save_flags()" just returns that. 235 235 */ 236 - asmlinkage unsigned long lguest_save_fl(void) 236 + asmlinkage __visible unsigned long lguest_save_fl(void) 237 237 { 238 238 return lguest_data.irq_enabled; 239 239 } 240 240 241 241 /* Interrupts go off... */ 242 - asmlinkage void lguest_irq_disable(void) 242 + asmlinkage __visible void lguest_irq_disable(void) 243 243 { 244 244 lguest_data.irq_enabled = 0; 245 245 }
+1 -1
arch/x86/lib/msr.c
··· 76 76 if (m1.q == m.q) 77 77 return 0; 78 78 79 - err = msr_write(msr, &m); 79 + err = msr_write(msr, &m1); 80 80 if (err) 81 81 return err; 82 82
+8 -8
arch/x86/math-emu/errors.c
··· 302 302 0x242 in div_Xsig.S 303 303 */ 304 304 305 - asmlinkage void FPU_exception(int n) 305 + asmlinkage __visible void FPU_exception(int n) 306 306 { 307 307 int i, int_type; 308 308 ··· 492 492 493 493 /* Invalid arith operation on Valid registers */ 494 494 /* Returns < 0 if the exception is unmasked */ 495 - asmlinkage int arith_invalid(int deststnr) 495 + asmlinkage __visible int arith_invalid(int deststnr) 496 496 { 497 497 498 498 EXCEPTION(EX_Invalid); ··· 507 507 } 508 508 509 509 /* Divide a finite number by zero */ 510 - asmlinkage int FPU_divide_by_zero(int deststnr, u_char sign) 510 + asmlinkage __visible int FPU_divide_by_zero(int deststnr, u_char sign) 511 511 { 512 512 FPU_REG *dest = &st(deststnr); 513 513 int tag = TAG_Valid; ··· 539 539 } 540 540 541 541 /* This may be called often, so keep it lean */ 542 - asmlinkage void set_precision_flag_up(void) 542 + asmlinkage __visible void set_precision_flag_up(void) 543 543 { 544 544 if (control_word & CW_Precision) 545 545 partial_status |= (SW_Precision | SW_C1); /* The masked response */ ··· 548 548 } 549 549 550 550 /* This may be called often, so keep it lean */ 551 - asmlinkage void set_precision_flag_down(void) 551 + asmlinkage __visible void set_precision_flag_down(void) 552 552 { 553 553 if (control_word & CW_Precision) { /* The masked response */ 554 554 partial_status &= ~SW_C1; ··· 557 557 EXCEPTION(EX_Precision); 558 558 } 559 559 560 - asmlinkage int denormal_operand(void) 560 + asmlinkage __visible int denormal_operand(void) 561 561 { 562 562 if (control_word & CW_Denormal) { /* The masked response */ 563 563 partial_status |= SW_Denorm_Op; ··· 568 568 } 569 569 } 570 570 571 - asmlinkage int arith_overflow(FPU_REG *dest) 571 + asmlinkage __visible int arith_overflow(FPU_REG *dest) 572 572 { 573 573 int tag = TAG_Valid; 574 574 ··· 596 596 597 597 } 598 598 599 - asmlinkage int arith_underflow(FPU_REG *dest) 599 + asmlinkage __visible int arith_underflow(FPU_REG *dest) 600 600 { 601 601 int tag = TAG_Valid; 602 602
+64 -19
arch/x86/platform/efi/early_printk.c
··· 14 14 15 15 static const struct font_desc *font; 16 16 static u32 efi_x, efi_y; 17 + static void *efi_fb; 18 + static bool early_efi_keep; 17 19 18 - static __init void early_efi_clear_scanline(unsigned int y) 20 + /* 21 + * efi earlyprintk need use early_ioremap to map the framebuffer. 22 + * But early_ioremap is not usable for earlyprintk=efi,keep, ioremap should 23 + * be used instead. ioremap will be available after paging_init() which is 24 + * earlier than initcall callbacks. Thus adding this early initcall function 25 + * early_efi_map_fb to map the whole efi framebuffer. 26 + */ 27 + static __init int early_efi_map_fb(void) 19 28 { 20 - unsigned long base, *dst; 21 - u16 len; 29 + unsigned long base, size; 30 + 31 + if (!early_efi_keep) 32 + return 0; 22 33 23 34 base = boot_params.screen_info.lfb_base; 24 - len = boot_params.screen_info.lfb_linelength; 35 + size = boot_params.screen_info.lfb_size; 36 + efi_fb = ioremap(base, size); 25 37 26 - dst = early_ioremap(base + y*len, len); 38 + return efi_fb ? 0 : -ENOMEM; 39 + } 40 + early_initcall(early_efi_map_fb); 41 + 42 + /* 43 + * early_efi_map maps efi framebuffer region [start, start + len -1] 44 + * In case earlyprintk=efi,keep we have the whole framebuffer mapped already 45 + * so just return the offset efi_fb + start. 46 + */ 47 + static __init_refok void *early_efi_map(unsigned long start, unsigned long len) 48 + { 49 + unsigned long base; 50 + 51 + base = boot_params.screen_info.lfb_base; 52 + 53 + if (efi_fb) 54 + return (efi_fb + start); 55 + else 56 + return early_ioremap(base + start, len); 57 + } 58 + 59 + static __init_refok void early_efi_unmap(void *addr, unsigned long len) 60 + { 61 + if (!efi_fb) 62 + early_iounmap(addr, len); 63 + } 64 + 65 + static void early_efi_clear_scanline(unsigned int y) 66 + { 67 + unsigned long *dst; 68 + u16 len; 69 + 70 + len = boot_params.screen_info.lfb_linelength; 71 + dst = early_efi_map(y*len, len); 27 72 if (!dst) 28 73 return; 29 74 30 75 memset(dst, 0, len); 31 - early_iounmap(dst, len); 76 + early_efi_unmap(dst, len); 32 77 } 33 78 34 - static __init void early_efi_scroll_up(void) 79 + static void early_efi_scroll_up(void) 35 80 { 36 - unsigned long base, *dst, *src; 81 + unsigned long *dst, *src; 37 82 u16 len; 38 83 u32 i, height; 39 84 40 - base = boot_params.screen_info.lfb_base; 41 85 len = boot_params.screen_info.lfb_linelength; 42 86 height = boot_params.screen_info.lfb_height; 43 87 44 88 for (i = 0; i < height - font->height; i++) { 45 - dst = early_ioremap(base + i*len, len); 89 + dst = early_efi_map(i*len, len); 46 90 if (!dst) 47 91 return; 48 92 49 - src = early_ioremap(base + (i + font->height) * len, len); 93 + src = early_efi_map((i + font->height) * len, len); 50 94 if (!src) { 51 - early_iounmap(dst, len); 95 + early_efi_unmap(dst, len); 52 96 return; 53 97 } 54 98 55 99 memmove(dst, src, len); 56 100 57 - early_iounmap(src, len); 58 - early_iounmap(dst, len); 101 + early_efi_unmap(src, len); 102 + early_efi_unmap(dst, len); 59 103 } 60 104 } 61 105 ··· 123 79 } 124 80 } 125 81 126 - static __init void 82 + static void 127 83 early_efi_write(struct console *con, const char *str, unsigned int num) 128 84 { 129 85 struct screen_info *si; 130 - unsigned long base; 131 86 unsigned int len; 132 87 const char *s; 133 88 void *dst; 134 89 135 - base = boot_params.screen_info.lfb_base; 136 90 si = &boot_params.screen_info; 137 91 len = si->lfb_linelength; 138 92 ··· 151 109 for (h = 0; h < font->height; h++) { 152 110 unsigned int n, x; 153 111 154 - dst = early_ioremap(base + (efi_y + h) * len, len); 112 + dst = early_efi_map((efi_y + h) * len, len); 155 113 if (!dst) 156 114 return; 157 115 ··· 165 123 s++; 166 124 } 167 125 168 - early_iounmap(dst, len); 126 + early_efi_unmap(dst, len); 169 127 } 170 128 171 129 num -= count; ··· 221 179 for (i = 0; i < (yres - efi_y) / font->height; i++) 222 180 early_efi_scroll_up(); 223 181 182 + /* early_console_register will unset CON_BOOT in case ,keep */ 183 + if (!(con->flags & CON_BOOT)) 184 + early_efi_keep = true; 224 185 return 0; 225 186 } 226 187
+1 -1
arch/x86/platform/olpc/olpc-xo1-pm.c
··· 75 75 return 0; 76 76 } 77 77 78 - asmlinkage int xo1_do_sleep(u8 sleep_state) 78 + asmlinkage __visible int xo1_do_sleep(u8 sleep_state) 79 79 { 80 80 void *pgd_addr = __va(read_cr3()); 81 81
+1 -1
arch/x86/power/hibernate_64.c
··· 23 23 extern __visible const void __nosave_begin, __nosave_end; 24 24 25 25 /* Defined in hibernate_asm_64.S */ 26 - extern asmlinkage int restore_image(void); 26 + extern asmlinkage __visible int restore_image(void); 27 27 28 28 /* 29 29 * Address to jump to in the last phase of restore in order to get to the image
+1 -1
arch/x86/xen/enlighten.c
··· 1515 1515 } 1516 1516 1517 1517 /* First C function to be called on Xen boot */ 1518 - asmlinkage void __init xen_start_kernel(void) 1518 + asmlinkage __visible void __init xen_start_kernel(void) 1519 1519 { 1520 1520 struct physdev_set_iopl set_iopl; 1521 1521 int rc;
+3 -3
arch/x86/xen/irq.c
··· 23 23 (void)HYPERVISOR_xen_version(0, NULL); 24 24 } 25 25 26 - asmlinkage unsigned long xen_save_fl(void) 26 + asmlinkage __visible unsigned long xen_save_fl(void) 27 27 { 28 28 struct vcpu_info *vcpu; 29 29 unsigned long flags; ··· 63 63 } 64 64 PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl); 65 65 66 - asmlinkage void xen_irq_disable(void) 66 + asmlinkage __visible void xen_irq_disable(void) 67 67 { 68 68 /* There's a one instruction preempt window here. We need to 69 69 make sure we're don't switch CPUs between getting the vcpu ··· 74 74 } 75 75 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); 76 76 77 - asmlinkage void xen_irq_enable(void) 77 + asmlinkage __visible void xen_irq_enable(void) 78 78 { 79 79 struct vcpu_info *vcpu; 80 80
+1 -1
drivers/pnp/pnpbios/bioscalls.c
··· 37 37 * kernel begins at offset 3GB... 38 38 */ 39 39 40 - asmlinkage void pnp_bios_callfunc(void); 40 + asmlinkage __visible void pnp_bios_callfunc(void); 41 41 42 42 __asm__(".text \n" 43 43 __ALIGN_STR "\n"
+2 -2
include/linux/linkage.h
··· 12 12 #endif 13 13 14 14 #ifdef __cplusplus 15 - #define CPP_ASMLINKAGE extern "C" __visible 15 + #define CPP_ASMLINKAGE extern "C" 16 16 #else 17 - #define CPP_ASMLINKAGE __visible 17 + #define CPP_ASMLINKAGE 18 18 #endif 19 19 20 20 #ifndef asmlinkage
+1 -1
init/main.c
··· 476 476 vmalloc_init(); 477 477 } 478 478 479 - asmlinkage void __init start_kernel(void) 479 + asmlinkage __visible void __init start_kernel(void) 480 480 { 481 481 char * command_line; 482 482 extern const struct kernel_param __start___param[], __stop___param[];
+1 -1
kernel/context_tracking.c
··· 120 120 * instead of preempt_schedule() to exit user context if needed before 121 121 * calling the scheduler. 122 122 */ 123 - asmlinkage void __sched notrace preempt_schedule_context(void) 123 + asmlinkage __visible void __sched notrace preempt_schedule_context(void) 124 124 { 125 125 enum ctx_state prev_ctx; 126 126
+1 -1
kernel/locking/lockdep.c
··· 4188 4188 } 4189 4189 EXPORT_SYMBOL_GPL(debug_show_held_locks); 4190 4190 4191 - asmlinkage void lockdep_sys_exit(void) 4191 + asmlinkage __visible void lockdep_sys_exit(void) 4192 4192 { 4193 4193 struct task_struct *curr = current; 4194 4194
+1 -1
kernel/power/snapshot.c
··· 1586 1586 return -ENOMEM; 1587 1587 } 1588 1588 1589 - asmlinkage int swsusp_save(void) 1589 + asmlinkage __visible int swsusp_save(void) 1590 1590 { 1591 1591 unsigned int nr_pages, nr_highmem; 1592 1592
+2 -2
kernel/printk/printk.c
··· 1674 1674 * 1675 1675 * See the vsnprintf() documentation for format string extensions over C99. 1676 1676 */ 1677 - asmlinkage int printk(const char *fmt, ...) 1677 + asmlinkage __visible int printk(const char *fmt, ...) 1678 1678 { 1679 1679 va_list args; 1680 1680 int r; ··· 1737 1737 } 1738 1738 } 1739 1739 1740 - asmlinkage void early_printk(const char *fmt, ...) 1740 + asmlinkage __visible void early_printk(const char *fmt, ...) 1741 1741 { 1742 1742 va_list ap; 1743 1743
+5 -5
kernel/sched/core.c
··· 2192 2192 * schedule_tail - first thing a freshly forked thread must call. 2193 2193 * @prev: the thread we just switched away from. 2194 2194 */ 2195 - asmlinkage void schedule_tail(struct task_struct *prev) 2195 + asmlinkage __visible void schedule_tail(struct task_struct *prev) 2196 2196 __releases(rq->lock) 2197 2197 { 2198 2198 struct rq *rq = this_rq(); ··· 2741 2741 blk_schedule_flush_plug(tsk); 2742 2742 } 2743 2743 2744 - asmlinkage void __sched schedule(void) 2744 + asmlinkage __visible void __sched schedule(void) 2745 2745 { 2746 2746 struct task_struct *tsk = current; 2747 2747 ··· 2751 2751 EXPORT_SYMBOL(schedule); 2752 2752 2753 2753 #ifdef CONFIG_CONTEXT_TRACKING 2754 - asmlinkage void __sched schedule_user(void) 2754 + asmlinkage __visible void __sched schedule_user(void) 2755 2755 { 2756 2756 /* 2757 2757 * If we come here after a random call to set_need_resched(), ··· 2783 2783 * off of preempt_enable. Kernel preemptions off return from interrupt 2784 2784 * occur there and call schedule directly. 2785 2785 */ 2786 - asmlinkage void __sched notrace preempt_schedule(void) 2786 + asmlinkage __visible void __sched notrace preempt_schedule(void) 2787 2787 { 2788 2788 /* 2789 2789 * If there is a non-zero preempt_count or interrupts are disabled, ··· 2813 2813 * Note, that this is called and return with irqs disabled. This will 2814 2814 * protect us against recursive calling from irq. 2815 2815 */ 2816 - asmlinkage void __sched preempt_schedule_irq(void) 2816 + asmlinkage __visible void __sched preempt_schedule_irq(void) 2817 2817 { 2818 2818 enum ctx_state prev_state; 2819 2819
+2 -2
kernel/softirq.c
··· 223 223 static inline void lockdep_softirq_end(bool in_hardirq) { } 224 224 #endif 225 225 226 - asmlinkage void __do_softirq(void) 226 + asmlinkage __visible void __do_softirq(void) 227 227 { 228 228 unsigned long end = jiffies + MAX_SOFTIRQ_TIME; 229 229 unsigned long old_flags = current->flags; ··· 299 299 tsk_restore_flags(current, old_flags, PF_MEMALLOC); 300 300 } 301 301 302 - asmlinkage void do_softirq(void) 302 + asmlinkage __visible void do_softirq(void) 303 303 { 304 304 __u32 pending; 305 305 unsigned long flags;
+2 -2
lib/dump_stack.c
··· 23 23 #ifdef CONFIG_SMP 24 24 static atomic_t dump_lock = ATOMIC_INIT(-1); 25 25 26 - asmlinkage void dump_stack(void) 26 + asmlinkage __visible void dump_stack(void) 27 27 { 28 28 int was_locked; 29 29 int old; ··· 55 55 preempt_enable(); 56 56 } 57 57 #else 58 - asmlinkage void dump_stack(void) 58 + asmlinkage __visible void dump_stack(void) 59 59 { 60 60 __dump_stack(); 61 61 }