Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild

Pull kbuild updates from Michal Marek:

- EXPORT_SYMBOL for asm source by Al Viro.

This does bring a regression, because genksyms no longer generates
checksums for these symbols (CONFIG_MODVERSIONS). Nick Piggin is
working on a patch to fix this.

Plus, we are talking about functions like strcpy(), which rarely
change prototypes.

- Fixes for PPC fallout of the above by Stephen Rothwell and Nick
Piggin

- fixdep speedup by Alexey Dobriyan.

- preparatory work by Nick Piggin to allow architectures to build with
-ffunction-sections, -fdata-sections and --gc-sections

- CONFIG_THIN_ARCHIVES support by Stephen Rothwell

- fix for filenames with colons in the initramfs source by me.

* 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild: (22 commits)
initramfs: Escape colons in depfile
ppc: there is no clear_pages to export
powerpc/64: whitelist unresolved modversions CRCs
kbuild: -ffunction-sections fix for archs with conflicting sections
kbuild: add arch specific post-link Makefile
kbuild: allow archs to select link dead code/data elimination
kbuild: allow architectures to use thin archives instead of ld -r
kbuild: Regenerate genksyms lexer
kbuild: genksyms fix for typeof handling
fixdep: faster CONFIG_ search
ia64: move exports to definitions
sparc32: debride memcpy.S a bit
[sparc] unify 32bit and 64bit string.h
sparc: move exports to definitions
ppc: move exports to definitions
arm: move exports to definitions
s390: move exports to definitions
m68k: move exports to definitions
alpha: move exports to actual definitions
x86: move exports to actual definitions
...

+1094 -1403
+16
Documentation/kbuild/makefiles.txt
··· 41 41 --- 6.8 Custom kbuild commands 42 42 --- 6.9 Preprocessing linker scripts 43 43 --- 6.10 Generic header files 44 + --- 6.11 Post-link pass 44 45 45 46 === 7 Kbuild syntax for exported headers 46 47 --- 7.1 header-y ··· 1237 1236 The recommended approach how to use a generic header file is 1238 1237 to list the file in the Kbuild file. 1239 1238 See "7.4 generic-y" for further info on syntax etc. 1239 + 1240 + --- 6.11 Post-link pass 1241 + 1242 + If the file arch/xxx/Makefile.postlink exists, this makefile 1243 + will be invoked for post-link objects (vmlinux and modules.ko) 1244 + for architectures to run post-link passes on. Must also handle 1245 + the clean target. 1246 + 1247 + This pass runs after kallsyms generation. If the architecture 1248 + needs to modify symbol locations, rather than manipulate the 1249 + kallsyms, it may be easier to add another postlink target for 1250 + .tmp_vmlinux? targets to be called from link-vmlinux.sh. 1251 + 1252 + For example, powerpc uses this to check relocation sanity of 1253 + the linked vmlinux file. 1240 1254 1241 1255 === 7 Kbuild syntax for exported headers 1242 1256
+16 -3
Makefile
··· 623 623 KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) 624 624 KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) 625 625 626 + ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION 627 + KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,) 628 + KBUILD_CFLAGS += $(call cc-option,-fdata-sections,) 629 + endif 630 + 626 631 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE 627 632 KBUILD_CFLAGS += -Os 628 633 else ··· 808 803 KBUILD_LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID) 809 804 LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID) 810 805 806 + ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION 807 + LDFLAGS_vmlinux += $(call ld-option, --gc-sections,) 808 + endif 809 + 811 810 ifeq ($(CONFIG_STRIP_ASM_SYMS),y) 812 811 LDFLAGS_vmlinux += $(call ld-option, -X,) 813 812 endif ··· 951 942 include/generated/autoksyms.h: FORCE 952 943 $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh true 953 944 954 - # Final link of vmlinux 955 - cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) 956 - quiet_cmd_link-vmlinux = LINK $@ 945 + ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink) 946 + 947 + # Final link of vmlinux with optional arch pass after final link 948 + cmd_link-vmlinux = \ 949 + $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \ 950 + $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true) 957 951 958 952 vmlinux: scripts/link-vmlinux.sh vmlinux_prereq $(vmlinux-deps) FORCE 959 953 +$(call if_changed,link-vmlinux) ··· 1283 1271 1284 1272 vmlinuxclean: 1285 1273 $(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean 1274 + $(Q)$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) clean) 1286 1275 1287 1276 clean: archclean vmlinuxclean 1288 1277
+21
arch/Kconfig
··· 450 450 451 451 endchoice 452 452 453 + config THIN_ARCHIVES 454 + bool 455 + help 456 + Select this if the architecture wants to use thin archives 457 + instead of ld -r to create the built-in.o files. 458 + 459 + config LD_DEAD_CODE_DATA_ELIMINATION 460 + bool 461 + help 462 + Select this if the architecture wants to do dead code and 463 + data elimination with the linker by compiling with 464 + -ffunction-sections -fdata-sections and linking with 465 + --gc-sections. 466 + 467 + This requires that the arch annotates or otherwise protects 468 + its external entry points from being discarded. Linker scripts 469 + must also merge .text.*, .data.*, and .bss.* correctly into 470 + output sections. Care must be taken not to pull in unrelated 471 + sections (e.g., '.text.init'). Typically '.' in section names 472 + is used to distinguish them from label names / C identifiers. 473 + 453 474 config HAVE_ARCH_WITHIN_STACK_FRAMES 454 475 bool 455 476 help
+1
arch/alpha/include/asm/Kbuild
··· 3 3 generic-y += clkdev.h 4 4 generic-y += cputime.h 5 5 generic-y += exec.h 6 + generic-y += export.h 6 7 generic-y += irq_work.h 7 8 generic-y += mcs_spinlock.h 8 9 generic-y += mm-arch-hooks.h
+1 -1
arch/alpha/kernel/Makefile
··· 8 8 9 9 obj-y := entry.o traps.o process.o osf_sys.o irq.o \ 10 10 irq_alpha.o signal.o setup.o ptrace.o time.o \ 11 - alpha_ksyms.o systbls.o err_common.o io.o 11 + systbls.o err_common.o io.o 12 12 13 13 obj-$(CONFIG_VGA_HOSE) += console.o 14 14 obj-$(CONFIG_SMP) += smp.o
-102
arch/alpha/kernel/alpha_ksyms.c
··· 1 - /* 2 - * linux/arch/alpha/kernel/alpha_ksyms.c 3 - * 4 - * Export the alpha-specific functions that are needed for loadable 5 - * modules. 6 - */ 7 - 8 - #include <linux/module.h> 9 - #include <asm/console.h> 10 - #include <asm/uaccess.h> 11 - #include <asm/checksum.h> 12 - #include <asm/fpu.h> 13 - #include <asm/machvec.h> 14 - 15 - #include <linux/syscalls.h> 16 - 17 - /* these are C runtime functions with special calling conventions: */ 18 - extern void __divl (void); 19 - extern void __reml (void); 20 - extern void __divq (void); 21 - extern void __remq (void); 22 - extern void __divlu (void); 23 - extern void __remlu (void); 24 - extern void __divqu (void); 25 - extern void __remqu (void); 26 - 27 - EXPORT_SYMBOL(alpha_mv); 28 - EXPORT_SYMBOL(callback_getenv); 29 - EXPORT_SYMBOL(callback_setenv); 30 - EXPORT_SYMBOL(callback_save_env); 31 - 32 - /* platform dependent support */ 33 - EXPORT_SYMBOL(strcat); 34 - EXPORT_SYMBOL(strcpy); 35 - EXPORT_SYMBOL(strlen); 36 - EXPORT_SYMBOL(strncpy); 37 - EXPORT_SYMBOL(strncat); 38 - EXPORT_SYMBOL(strchr); 39 - EXPORT_SYMBOL(strrchr); 40 - EXPORT_SYMBOL(memmove); 41 - EXPORT_SYMBOL(__memcpy); 42 - EXPORT_SYMBOL(__memset); 43 - EXPORT_SYMBOL(___memset); 44 - EXPORT_SYMBOL(__memsetw); 45 - EXPORT_SYMBOL(__constant_c_memset); 46 - EXPORT_SYMBOL(copy_page); 47 - EXPORT_SYMBOL(clear_page); 48 - 49 - EXPORT_SYMBOL(alpha_read_fp_reg); 50 - EXPORT_SYMBOL(alpha_read_fp_reg_s); 51 - EXPORT_SYMBOL(alpha_write_fp_reg); 52 - EXPORT_SYMBOL(alpha_write_fp_reg_s); 53 - 54 - /* Networking helper routines. */ 55 - EXPORT_SYMBOL(csum_tcpudp_magic); 56 - EXPORT_SYMBOL(ip_compute_csum); 57 - EXPORT_SYMBOL(ip_fast_csum); 58 - EXPORT_SYMBOL(csum_partial_copy_nocheck); 59 - EXPORT_SYMBOL(csum_partial_copy_from_user); 60 - EXPORT_SYMBOL(csum_ipv6_magic); 61 - 62 - #ifdef CONFIG_MATHEMU_MODULE 63 - extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long); 64 - extern long (*alpha_fp_emul) (unsigned long pc); 65 - EXPORT_SYMBOL(alpha_fp_emul_imprecise); 66 - EXPORT_SYMBOL(alpha_fp_emul); 67 - #endif 68 - 69 - /* 70 - * The following are specially called from the uaccess assembly stubs. 71 - */ 72 - EXPORT_SYMBOL(__copy_user); 73 - EXPORT_SYMBOL(__do_clear_user); 74 - 75 - /* 76 - * SMP-specific symbols. 77 - */ 78 - 79 - #ifdef CONFIG_SMP 80 - EXPORT_SYMBOL(_atomic_dec_and_lock); 81 - #endif /* CONFIG_SMP */ 82 - 83 - /* 84 - * The following are special because they're not called 85 - * explicitly (the C compiler or assembler generates them in 86 - * response to division operations). Fortunately, their 87 - * interface isn't gonna change any time soon now, so it's OK 88 - * to leave it out of version control. 89 - */ 90 - # undef memcpy 91 - # undef memset 92 - EXPORT_SYMBOL(__divl); 93 - EXPORT_SYMBOL(__divlu); 94 - EXPORT_SYMBOL(__divq); 95 - EXPORT_SYMBOL(__divqu); 96 - EXPORT_SYMBOL(__reml); 97 - EXPORT_SYMBOL(__remlu); 98 - EXPORT_SYMBOL(__remq); 99 - EXPORT_SYMBOL(__remqu); 100 - EXPORT_SYMBOL(memcpy); 101 - EXPORT_SYMBOL(memset); 102 - EXPORT_SYMBOL(memchr);
+4 -2
arch/alpha/kernel/machvec_impl.h
··· 144 144 else beforehand. Fine. We'll do it ourselves. */ 145 145 #if 0 146 146 #define ALIAS_MV(system) \ 147 - struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv"))); 147 + struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv"))); \ 148 + EXPORT_SYMBOL(alpha_mv); 148 149 #else 149 150 #define ALIAS_MV(system) \ 150 - asm(".global alpha_mv\nalpha_mv = " #system "_mv"); 151 + asm(".global alpha_mv\nalpha_mv = " #system "_mv"); \ 152 + EXPORT_SYMBOL(alpha_mv); 151 153 #endif 152 154 #endif /* GENERIC */
+1
arch/alpha/kernel/setup.c
··· 115 115 116 116 #ifdef CONFIG_ALPHA_GENERIC 117 117 struct alpha_machine_vector alpha_mv; 118 + EXPORT_SYMBOL(alpha_mv); 118 119 #endif 119 120 120 121 #ifndef alpha_using_srm
+5
arch/alpha/lib/callback_srm.S
··· 3 3 */ 4 4 5 5 #include <asm/console.h> 6 + #include <asm/export.h> 6 7 7 8 .text 8 9 #define HWRPB_CRB_OFFSET 0xc0 ··· 93 92 CALLBACK(save_env, CCB_SAVE_ENV, 1) 94 93 CALLBACK(pswitch, CCB_PSWITCH, 3) 95 94 CALLBACK(bios_emul, CCB_BIOS_EMUL, 5) 95 + 96 + EXPORT_SYMBOL(callback_getenv) 97 + EXPORT_SYMBOL(callback_setenv) 98 + EXPORT_SYMBOL(callback_save_env) 96 99 97 100 .data 98 101 __alpha_using_srm: # For use by bootpheader
+3
arch/alpha/lib/checksum.c
··· 48 48 (__force u64)saddr + (__force u64)daddr + 49 49 (__force u64)sum + ((len + proto) << 8)); 50 50 } 51 + EXPORT_SYMBOL(csum_tcpudp_magic); 51 52 52 53 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 53 54 __u32 len, __u8 proto, __wsum sum) ··· 145 144 { 146 145 return (__force __sum16)~do_csum(iph,ihl*4); 147 146 } 147 + EXPORT_SYMBOL(ip_fast_csum); 148 148 149 149 /* 150 150 * computes the checksum of a memory block at buff, length len, ··· 180 178 { 181 179 return (__force __sum16)~from64to16(do_csum(buff,len)); 182 180 } 181 + EXPORT_SYMBOL(ip_compute_csum);
+2 -1
arch/alpha/lib/clear_page.S
··· 3 3 * 4 4 * Zero an entire page. 5 5 */ 6 - 6 + #include <asm/export.h> 7 7 .text 8 8 .align 4 9 9 .global clear_page ··· 37 37 nop 38 38 39 39 .end clear_page 40 + EXPORT_SYMBOL(clear_page)
+2
arch/alpha/lib/clear_user.S
··· 24 24 * Clobbers: 25 25 * $1,$2,$3,$4,$5,$6 26 26 */ 27 + #include <asm/export.h> 27 28 28 29 /* Allow an exception for an insn; exit if we get one. */ 29 30 #define EX(x,y...) \ ··· 112 111 ret $31, ($28), 1 # .. e1 : 113 112 114 113 .end __do_clear_user 114 + EXPORT_SYMBOL(__do_clear_user)
+2 -1
arch/alpha/lib/copy_page.S
··· 3 3 * 4 4 * Copy an entire page. 5 5 */ 6 - 6 + #include <asm/export.h> 7 7 .text 8 8 .align 4 9 9 .global copy_page ··· 47 47 nop 48 48 49 49 .end copy_page 50 + EXPORT_SYMBOL(copy_page)
+3
arch/alpha/lib/copy_user.S
··· 26 26 * $1,$2,$3,$4,$5,$6,$7 27 27 */ 28 28 29 + #include <asm/export.h> 30 + 29 31 /* Allow an exception for an insn; exit if we get one. */ 30 32 #define EXI(x,y...) \ 31 33 99: x,##y; \ ··· 145 143 ret $31,($28),1 146 144 147 145 .end __copy_user 146 + EXPORT_SYMBOL(__copy_user)
+2
arch/alpha/lib/csum_ipv6_magic.S
··· 12 12 * added by Ivan Kokshaysky <ink@jurassic.park.msu.ru> 13 13 */ 14 14 15 + #include <asm/export.h> 15 16 .globl csum_ipv6_magic 16 17 .align 4 17 18 .ent csum_ipv6_magic ··· 114 113 ret # .. e1 : 115 114 116 115 .end csum_ipv6_magic 116 + EXPORT_SYMBOL(csum_ipv6_magic)
+2
arch/alpha/lib/csum_partial_copy.c
··· 374 374 } 375 375 return (__force __wsum)checksum; 376 376 } 377 + EXPORT_SYMBOL(csum_partial_copy_from_user); 377 378 378 379 __wsum 379 380 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) ··· 387 386 set_fs(oldfs); 388 387 return checksum; 389 388 } 389 + EXPORT_SYMBOL(csum_partial_copy_nocheck);
+2
arch/alpha/lib/dec_and_lock.c
··· 7 7 8 8 #include <linux/spinlock.h> 9 9 #include <linux/atomic.h> 10 + #include <linux/export.h> 10 11 11 12 asm (".text \n\ 12 13 .global _atomic_dec_and_lock \n\ ··· 40 39 spin_unlock(lock); 41 40 return 0; 42 41 } 42 + EXPORT_SYMBOL(_atomic_dec_and_lock);
+3
arch/alpha/lib/divide.S
··· 45 45 * $28 - compare status 46 46 */ 47 47 48 + #include <asm/export.h> 48 49 #define halt .long 0 49 50 50 51 /* ··· 152 151 addq $30,STACK,$30 153 152 ret $31,($23),1 154 153 .end ufunction 154 + EXPORT_SYMBOL(ufunction) 155 155 156 156 /* 157 157 * Uhh.. Ugly signed division. I'd rather not have it at all, but ··· 195 193 addq $30,STACK,$30 196 194 ret $31,($23),1 197 195 .end sfunction 196 + EXPORT_SYMBOL(sfunction)
+2 -1
arch/alpha/lib/ev6-clear_page.S
··· 3 3 * 4 4 * Zero an entire page. 5 5 */ 6 - 6 + #include <asm/export.h> 7 7 .text 8 8 .align 4 9 9 .global clear_page ··· 52 52 nop 53 53 54 54 .end clear_page 55 + EXPORT_SYMBOL(clear_page)
+2 -1
arch/alpha/lib/ev6-clear_user.S
··· 43 43 * want to leave a hole (and we also want to avoid repeating lots of work) 44 44 */ 45 45 46 + #include <asm/export.h> 46 47 /* Allow an exception for an insn; exit if we get one. */ 47 48 #define EX(x,y...) \ 48 49 99: x,##y; \ ··· 223 222 nop # .. E .. .. : 224 223 ret $31, ($28), 1 # L0 .. .. .. : L U L U 225 224 .end __do_clear_user 226 - 225 + EXPORT_SYMBOL(__do_clear_user)
+2 -1
arch/alpha/lib/ev6-copy_page.S
··· 56 56 destination pages are in the dcache, but it is my guess that this is 57 57 less important than the dcache miss case. */ 58 58 59 - 59 + #include <asm/export.h> 60 60 .text 61 61 .align 4 62 62 .global copy_page ··· 201 201 nop 202 202 203 203 .end copy_page 204 + EXPORT_SYMBOL(copy_page)
+2 -1
arch/alpha/lib/ev6-copy_user.S
··· 37 37 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 38 38 */ 39 39 40 + #include <asm/export.h> 40 41 /* Allow an exception for an insn; exit if we get one. */ 41 42 #define EXI(x,y...) \ 42 43 99: x,##y; \ ··· 257 256 ret $31,($28),1 # L0 258 257 259 258 .end __copy_user 260 - 259 + EXPORT_SYMBOL(__copy_user)
+2
arch/alpha/lib/ev6-csum_ipv6_magic.S
··· 52 52 * may cause additional delay in rare cases (load-load replay traps). 53 53 */ 54 54 55 + #include <asm/export.h> 55 56 .globl csum_ipv6_magic 56 57 .align 4 57 58 .ent csum_ipv6_magic ··· 149 148 ret # L0 : L U L U 150 149 151 150 .end csum_ipv6_magic 151 + EXPORT_SYMBOL(csum_ipv6_magic)
+3
arch/alpha/lib/ev6-divide.S
··· 55 55 * Try not to change the actual algorithm if possible for consistency. 56 56 */ 57 57 58 + #include <asm/export.h> 58 59 #define halt .long 0 59 60 60 61 /* ··· 206 205 addq $30,STACK,$30 # E : 207 206 ret $31,($23),1 # L0 : L U U L 208 207 .end ufunction 208 + EXPORT_SYMBOL(ufunction) 209 209 210 210 /* 211 211 * Uhh.. Ugly signed division. I'd rather not have it at all, but ··· 259 257 addq $30,STACK,$30 # E : 260 258 ret $31,($23),1 # L0 : L U U L 261 259 .end sfunction 260 + EXPORT_SYMBOL(sfunction)
+2 -1
arch/alpha/lib/ev6-memchr.S
··· 27 27 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 28 28 * Try not to change the actual algorithm if possible for consistency. 29 29 */ 30 - 30 + #include <asm/export.h> 31 31 .set noreorder 32 32 .set noat 33 33 ··· 189 189 ret # L0 : 190 190 191 191 .end memchr 192 + EXPORT_SYMBOL(memchr)
+2 -1
arch/alpha/lib/ev6-memcpy.S
··· 19 19 * Temp usage notes: 20 20 * $1,$2, - scratch 21 21 */ 22 - 22 + #include <asm/export.h> 23 23 .set noreorder 24 24 .set noat 25 25 ··· 242 242 nop # E : 243 243 244 244 .end memcpy 245 + EXPORT_SYMBOL(memcpy) 245 246 246 247 /* For backwards module compatibility. */ 247 248 __memcpy = memcpy
+6 -1
arch/alpha/lib/ev6-memset.S
··· 26 26 * as fixes will need to be made in multiple places. The performance gain 27 27 * is worth it. 28 28 */ 29 - 29 + #include <asm/export.h> 30 30 .set noat 31 31 .set noreorder 32 32 .text ··· 229 229 nop 230 230 ret $31,($26),1 # L0 : 231 231 .end ___memset 232 + EXPORT_SYMBOL(___memset) 232 233 233 234 /* 234 235 * This is the original body of code, prior to replication and ··· 407 406 nop 408 407 ret $31,($26),1 # L0 : 409 408 .end __constant_c_memset 409 + EXPORT_SYMBOL(__constant_c_memset) 410 410 411 411 /* 412 412 * This is a replicant of the __constant_c_memset code, rescheduled ··· 596 594 ret $31,($26),1 # L0 : 597 595 598 596 .end __memsetw 597 + EXPORT_SYMBOL(__memsetw) 599 598 600 599 memset = ___memset 601 600 __memset = ___memset 601 + EXPORT_SYMBOL(memset) 602 + EXPORT_SYMBOL(__memset)
+2 -1
arch/alpha/lib/ev67-strcat.S
··· 19 19 * string once. 20 20 */ 21 21 22 - 22 + #include <asm/export.h> 23 23 .text 24 24 25 25 .align 4 ··· 52 52 br __stxcpy # L0 : 53 53 54 54 .end strcat 55 + EXPORT_SYMBOL(strcat)
+2 -1
arch/alpha/lib/ev67-strchr.S
··· 15 15 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 16 16 * Try not to change the actual algorithm if possible for consistency. 17 17 */ 18 - 18 + #include <asm/export.h> 19 19 #include <asm/regdef.h> 20 20 21 21 .set noreorder ··· 86 86 ret # L0 : 87 87 88 88 .end strchr 89 + EXPORT_SYMBOL(strchr)
+2 -1
arch/alpha/lib/ev67-strlen.S
··· 17 17 * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 18 18 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 19 19 */ 20 - 20 + #include <asm/export.h> 21 21 .set noreorder 22 22 .set noat 23 23 ··· 47 47 ret $31, ($26) # L0 : 48 48 49 49 .end strlen 50 + EXPORT_SYMBOL(strlen)
+2 -1
arch/alpha/lib/ev67-strncat.S
··· 20 20 * Try not to change the actual algorithm if possible for consistency. 21 21 */ 22 22 23 - 23 + #include <asm/export.h> 24 24 .text 25 25 26 26 .align 4 ··· 92 92 ret # L0 : 93 93 94 94 .end strncat 95 + EXPORT_SYMBOL(strncat)
+2 -1
arch/alpha/lib/ev67-strrchr.S
··· 18 18 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 19 19 */ 20 20 21 - 21 + #include <asm/export.h> 22 22 #include <asm/regdef.h> 23 23 24 24 .set noreorder ··· 107 107 nop 108 108 109 109 .end strrchr 110 + EXPORT_SYMBOL(strrchr)
+7
arch/alpha/lib/fpreg.c
··· 4 4 * (C) Copyright 1998 Linus Torvalds 5 5 */ 6 6 7 + #include <linux/compiler.h> 8 + #include <linux/export.h> 9 + 7 10 #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) 8 11 #define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val)); 9 12 #else ··· 55 52 } 56 53 return val; 57 54 } 55 + EXPORT_SYMBOL(alpha_read_fp_reg); 58 56 59 57 #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) 60 58 #define LDT(reg,val) asm volatile ("itoft %0,$f"#reg : : "r"(val)); ··· 101 97 case 31: LDT(31, val); break; 102 98 } 103 99 } 100 + EXPORT_SYMBOL(alpha_write_fp_reg); 104 101 105 102 #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) 106 103 #define STS(reg,val) asm volatile ("ftois $f"#reg",%0" : "=r"(val)); ··· 151 146 } 152 147 return val; 153 148 } 149 + EXPORT_SYMBOL(alpha_read_fp_reg_s); 154 150 155 151 #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) 156 152 #define LDS(reg,val) asm volatile ("itofs %0,$f"#reg : : "r"(val)); ··· 197 191 case 31: LDS(31, val); break; 198 192 } 199 193 } 194 + EXPORT_SYMBOL(alpha_write_fp_reg_s);
+2 -1
arch/alpha/lib/memchr.S
··· 31 31 - only minimum number of quadwords may be accessed 32 32 - the third argument is an unsigned long 33 33 */ 34 - 34 + #include <asm/export.h> 35 35 .set noreorder 36 36 .set noat 37 37 ··· 162 162 ret # .. e1 : 163 163 164 164 .end memchr 165 + EXPORT_SYMBOL(memchr)
+2 -3
arch/alpha/lib/memcpy.c
··· 16 16 */ 17 17 18 18 #include <linux/types.h> 19 + #include <linux/export.h> 19 20 20 21 /* 21 22 * This should be done in one go with ldq_u*2/mask/stq_u. Do it ··· 159 158 __memcpy_unaligned_up ((unsigned long) dest, (unsigned long) src, n); 160 159 return dest; 161 160 } 162 - 163 - /* For backward modules compatibility, define __memcpy. */ 164 - asm("__memcpy = memcpy; .globl __memcpy"); 161 + EXPORT_SYMBOL(memcpy);
+2 -1
arch/alpha/lib/memmove.S
··· 6 6 * This is hand-massaged output from the original memcpy.c. We defer to 7 7 * memcpy whenever possible; the backwards copy loops are not unrolled. 8 8 */ 9 - 9 + #include <asm/export.h> 10 10 .set noat 11 11 .set noreorder 12 12 .text ··· 179 179 nop 180 180 181 181 .end memmove 182 + EXPORT_SYMBOL(memmove)
+6 -1
arch/alpha/lib/memset.S
··· 13 13 * The scheduling comments are according to the EV5 documentation (and done by 14 14 * hand, so they might well be incorrect, please do tell me about it..) 15 15 */ 16 - 16 + #include <asm/export.h> 17 17 .set noat 18 18 .set noreorder 19 19 .text ··· 106 106 end: 107 107 ret $31,($26),1 /* E1 */ 108 108 .end ___memset 109 + EXPORT_SYMBOL(___memset) 110 + EXPORT_SYMBOL(__constant_c_memset) 109 111 110 112 .align 5 111 113 .ent __memsetw ··· 124 122 br __constant_c_memset /* .. E1 */ 125 123 126 124 .end __memsetw 125 + EXPORT_SYMBOL(__memsetw) 127 126 128 127 memset = ___memset 129 128 __memset = ___memset 129 + EXPORT_SYMBOL(memset) 130 + EXPORT_SYMBOL(__memset)
+2
arch/alpha/lib/strcat.S
··· 4 4 * 5 5 * Append a null-terminated string from SRC to DST. 6 6 */ 7 + #include <asm/export.h> 7 8 8 9 .text 9 10 ··· 51 50 br __stxcpy 52 51 53 52 .end strcat 53 + EXPORT_SYMBOL(strcat);
+2 -1
arch/alpha/lib/strchr.S
··· 5 5 * Return the address of a given character within a null-terminated 6 6 * string, or null if it is not found. 7 7 */ 8 - 8 + #include <asm/export.h> 9 9 #include <asm/regdef.h> 10 10 11 11 .set noreorder ··· 68 68 ret # .. e1 : 69 69 70 70 .end strchr 71 + EXPORT_SYMBOL(strchr)
+2 -1
arch/alpha/lib/strcpy.S
··· 5 5 * Copy a null-terminated string from SRC to DST. Return a pointer 6 6 * to the null-terminator in the source. 7 7 */ 8 - 8 + #include <asm/export.h> 9 9 .text 10 10 11 11 .align 3 ··· 21 21 br __stxcpy # do the copy 22 22 23 23 .end strcpy 24 + EXPORT_SYMBOL(strcpy)
+2 -1
arch/alpha/lib/strlen.S
··· 11 11 * do this instead of the 9 instructions that 12 12 * binary search needs). 13 13 */ 14 - 14 + #include <asm/export.h> 15 15 .set noreorder 16 16 .set noat 17 17 ··· 55 55 ret $31, ($26) 56 56 57 57 .end strlen 58 + EXPORT_SYMBOL(strlen)
+2 -1
arch/alpha/lib/strncat.S
··· 9 9 * past count, whereas libc may write to count+1. This follows the generic 10 10 * implementation in lib/string.c and is, IMHO, more sensible. 11 11 */ 12 - 12 + #include <asm/export.h> 13 13 .text 14 14 15 15 .align 3 ··· 82 82 ret 83 83 84 84 .end strncat 85 + EXPORT_SYMBOL(strncat)
+2 -1
arch/alpha/lib/strncpy.S
··· 10 10 * version has cropped that bit o' nastiness as well as assuming that 11 11 * __stxncpy is in range of a branch. 12 12 */ 13 - 13 + #include <asm/export.h> 14 14 .set noat 15 15 .set noreorder 16 16 ··· 79 79 ret 80 80 81 81 .end strncpy 82 + EXPORT_SYMBOL(strncpy)
+2 -1
arch/alpha/lib/strrchr.S
··· 5 5 * Return the address of the last occurrence of a given character 6 6 * within a null-terminated string, or null if it is not found. 7 7 */ 8 - 8 + #include <asm/export.h> 9 9 #include <asm/regdef.h> 10 10 11 11 .set noreorder ··· 85 85 ret # .. e1 : 86 86 87 87 .end strrchr 88 + EXPORT_SYMBOL(strrchr)
+1
arch/arm/include/asm/Kbuild
··· 8 8 generic-y += emergency-restart.h 9 9 generic-y += errno.h 10 10 generic-y += exec.h 11 + generic-y += export.h 11 12 generic-y += ioctl.h 12 13 generic-y += ipcbuf.h 13 14 generic-y += irq_regs.h
+1 -1
arch/arm/kernel/Makefile
··· 33 33 obj-$(CONFIG_CPU_IDLE) += cpuidle.o 34 34 obj-$(CONFIG_ISA_DMA_API) += dma.o 35 35 obj-$(CONFIG_FIQ) += fiq.o fiqasm.o 36 - obj-$(CONFIG_MODULES) += armksyms.o module.o 36 + obj-$(CONFIG_MODULES) += module.o 37 37 obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o 38 38 obj-$(CONFIG_ISA_DMA) += dma-isa.o 39 39 obj-$(CONFIG_PCI) += bios32.o isa.o
-183
arch/arm/kernel/armksyms.c
··· 1 - /* 2 - * linux/arch/arm/kernel/armksyms.c 3 - * 4 - * Copyright (C) 2000 Russell King 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - */ 10 - #include <linux/export.h> 11 - #include <linux/sched.h> 12 - #include <linux/string.h> 13 - #include <linux/cryptohash.h> 14 - #include <linux/delay.h> 15 - #include <linux/in6.h> 16 - #include <linux/syscalls.h> 17 - #include <linux/uaccess.h> 18 - #include <linux/io.h> 19 - #include <linux/arm-smccc.h> 20 - 21 - #include <asm/checksum.h> 22 - #include <asm/ftrace.h> 23 - 24 - /* 25 - * libgcc functions - functions that are used internally by the 26 - * compiler... (prototypes are not correct though, but that 27 - * doesn't really matter since they're not versioned). 28 - */ 29 - extern void __ashldi3(void); 30 - extern void __ashrdi3(void); 31 - extern void __divsi3(void); 32 - extern void __lshrdi3(void); 33 - extern void __modsi3(void); 34 - extern void __muldi3(void); 35 - extern void __ucmpdi2(void); 36 - extern void __udivsi3(void); 37 - extern void __umodsi3(void); 38 - extern void __do_div64(void); 39 - extern void __bswapsi2(void); 40 - extern void __bswapdi2(void); 41 - 42 - extern void __aeabi_idiv(void); 43 - extern void __aeabi_idivmod(void); 44 - extern void __aeabi_lasr(void); 45 - extern void __aeabi_llsl(void); 46 - extern void __aeabi_llsr(void); 47 - extern void __aeabi_lmul(void); 48 - extern void __aeabi_uidiv(void); 49 - extern void __aeabi_uidivmod(void); 50 - extern void __aeabi_ulcmp(void); 51 - 52 - extern void fpundefinstr(void); 53 - 54 - void mmioset(void *, unsigned int, size_t); 55 - void mmiocpy(void *, const void *, size_t); 56 - 57 - /* platform dependent support */ 58 - EXPORT_SYMBOL(arm_delay_ops); 59 - 60 - /* networking */ 61 - EXPORT_SYMBOL(csum_partial); 62 - EXPORT_SYMBOL(csum_partial_copy_from_user); 63 - EXPORT_SYMBOL(csum_partial_copy_nocheck); 64 - EXPORT_SYMBOL(__csum_ipv6_magic); 65 - 66 - /* io */ 67 - #ifndef __raw_readsb 68 - EXPORT_SYMBOL(__raw_readsb); 69 - #endif 70 - #ifndef __raw_readsw 71 - EXPORT_SYMBOL(__raw_readsw); 72 - #endif 73 - #ifndef __raw_readsl 74 - EXPORT_SYMBOL(__raw_readsl); 75 - #endif 76 - #ifndef __raw_writesb 77 - EXPORT_SYMBOL(__raw_writesb); 78 - #endif 79 - #ifndef __raw_writesw 80 - EXPORT_SYMBOL(__raw_writesw); 81 - #endif 82 - #ifndef __raw_writesl 83 - EXPORT_SYMBOL(__raw_writesl); 84 - #endif 85 - 86 - /* string / mem functions */ 87 - EXPORT_SYMBOL(strchr); 88 - EXPORT_SYMBOL(strrchr); 89 - EXPORT_SYMBOL(memset); 90 - EXPORT_SYMBOL(memcpy); 91 - EXPORT_SYMBOL(memmove); 92 - EXPORT_SYMBOL(memchr); 93 - EXPORT_SYMBOL(__memzero); 94 - 95 - EXPORT_SYMBOL(mmioset); 96 - EXPORT_SYMBOL(mmiocpy); 97 - 98 - #ifdef CONFIG_MMU 99 - EXPORT_SYMBOL(copy_page); 100 - 101 - EXPORT_SYMBOL(arm_copy_from_user); 102 - EXPORT_SYMBOL(arm_copy_to_user); 103 - EXPORT_SYMBOL(arm_clear_user); 104 - 105 - EXPORT_SYMBOL(__get_user_1); 106 - EXPORT_SYMBOL(__get_user_2); 107 - EXPORT_SYMBOL(__get_user_4); 108 - EXPORT_SYMBOL(__get_user_8); 109 - 110 - #ifdef __ARMEB__ 111 - EXPORT_SYMBOL(__get_user_64t_1); 112 - EXPORT_SYMBOL(__get_user_64t_2); 113 - EXPORT_SYMBOL(__get_user_64t_4); 114 - EXPORT_SYMBOL(__get_user_32t_8); 115 - #endif 116 - 117 - EXPORT_SYMBOL(__put_user_1); 118 - EXPORT_SYMBOL(__put_user_2); 119 - EXPORT_SYMBOL(__put_user_4); 120 - EXPORT_SYMBOL(__put_user_8); 121 - #endif 122 - 123 - /* gcc lib functions */ 124 - EXPORT_SYMBOL(__ashldi3); 125 - EXPORT_SYMBOL(__ashrdi3); 126 - EXPORT_SYMBOL(__divsi3); 127 - EXPORT_SYMBOL(__lshrdi3); 128 - EXPORT_SYMBOL(__modsi3); 129 - EXPORT_SYMBOL(__muldi3); 130 - EXPORT_SYMBOL(__ucmpdi2); 131 - EXPORT_SYMBOL(__udivsi3); 132 - EXPORT_SYMBOL(__umodsi3); 133 - EXPORT_SYMBOL(__do_div64); 134 - EXPORT_SYMBOL(__bswapsi2); 135 - EXPORT_SYMBOL(__bswapdi2); 136 - 137 - #ifdef CONFIG_AEABI 138 - EXPORT_SYMBOL(__aeabi_idiv); 139 - EXPORT_SYMBOL(__aeabi_idivmod); 140 - EXPORT_SYMBOL(__aeabi_lasr); 141 - EXPORT_SYMBOL(__aeabi_llsl); 142 - EXPORT_SYMBOL(__aeabi_llsr); 143 - EXPORT_SYMBOL(__aeabi_lmul); 144 - EXPORT_SYMBOL(__aeabi_uidiv); 145 - EXPORT_SYMBOL(__aeabi_uidivmod); 146 - EXPORT_SYMBOL(__aeabi_ulcmp); 147 - #endif 148 - 149 - /* bitops */ 150 - EXPORT_SYMBOL(_set_bit); 151 - EXPORT_SYMBOL(_test_and_set_bit); 152 - EXPORT_SYMBOL(_clear_bit); 153 - EXPORT_SYMBOL(_test_and_clear_bit); 154 - EXPORT_SYMBOL(_change_bit); 155 - EXPORT_SYMBOL(_test_and_change_bit); 156 - EXPORT_SYMBOL(_find_first_zero_bit_le); 157 - EXPORT_SYMBOL(_find_next_zero_bit_le); 158 - EXPORT_SYMBOL(_find_first_bit_le); 159 - EXPORT_SYMBOL(_find_next_bit_le); 160 - 161 - #ifdef __ARMEB__ 162 - EXPORT_SYMBOL(_find_first_zero_bit_be); 163 - EXPORT_SYMBOL(_find_next_zero_bit_be); 164 - EXPORT_SYMBOL(_find_first_bit_be); 165 - EXPORT_SYMBOL(_find_next_bit_be); 166 - #endif 167 - 168 - #ifdef CONFIG_FUNCTION_TRACER 169 - #ifdef CONFIG_OLD_MCOUNT 170 - EXPORT_SYMBOL(mcount); 171 - #endif 172 - EXPORT_SYMBOL(__gnu_mcount_nc); 173 - #endif 174 - 175 - #ifdef CONFIG_ARM_PATCH_PHYS_VIRT 176 - EXPORT_SYMBOL(__pv_phys_pfn_offset); 177 - EXPORT_SYMBOL(__pv_offset); 178 - #endif 179 - 180 - #ifdef CONFIG_HAVE_ARM_SMCCC 181 - EXPORT_SYMBOL(arm_smccc_smc); 182 - EXPORT_SYMBOL(arm_smccc_hvc); 183 - #endif
+3
arch/arm/kernel/entry-ftrace.S
··· 7 7 #include <asm/assembler.h> 8 8 #include <asm/ftrace.h> 9 9 #include <asm/unwind.h> 10 + #include <asm/export.h> 10 11 11 12 #include "entry-header.S" 12 13 ··· 154 153 __mcount _old 155 154 #endif 156 155 ENDPROC(mcount) 156 + EXPORT_SYMBOL(mcount) 157 157 158 158 #ifdef CONFIG_DYNAMIC_FTRACE 159 159 ENTRY(ftrace_caller_old) ··· 207 205 #endif 208 206 UNWIND(.fnend) 209 207 ENDPROC(__gnu_mcount_nc) 208 + EXPORT_SYMBOL(__gnu_mcount_nc) 210 209 211 210 #ifdef CONFIG_DYNAMIC_FTRACE 212 211 ENTRY(ftrace_caller)
+3
arch/arm/kernel/head.S
··· 22 22 #include <asm/memory.h> 23 23 #include <asm/thread_info.h> 24 24 #include <asm/pgtable.h> 25 + #include <asm/export.h> 25 26 26 27 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) 27 28 #include CONFIG_DEBUG_LL_INCLUDE ··· 728 727 __pv_offset: 729 728 .quad 0 730 729 .size __pv_offset, . -__pv_offset 730 + EXPORT_SYMBOL(__pv_phys_pfn_offset) 731 + EXPORT_SYMBOL(__pv_offset) 731 732 #endif 732 733 733 734 #include "head-common.S"
+3
arch/arm/kernel/smccc-call.S
··· 16 16 #include <asm/opcodes-sec.h> 17 17 #include <asm/opcodes-virt.h> 18 18 #include <asm/unwind.h> 19 + #include <asm/export.h> 19 20 20 21 /* 21 22 * Wrap c macros in asm macros to delay expansion until after the ··· 52 51 ENTRY(arm_smccc_smc) 53 52 SMCCC SMCCC_SMC 54 53 ENDPROC(arm_smccc_smc) 54 + EXPORT_SYMBOL(arm_smccc_smc) 55 55 56 56 /* 57 57 * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, ··· 62 60 ENTRY(arm_smccc_hvc) 63 61 SMCCC SMCCC_HVC 64 62 ENDPROC(arm_smccc_hvc) 63 + EXPORT_SYMBOL(arm_smccc_hvc)
+3
arch/arm/lib/ashldi3.S
··· 28 28 29 29 #include <linux/linkage.h> 30 30 #include <asm/assembler.h> 31 + #include <asm/export.h> 31 32 32 33 #ifdef __ARMEB__ 33 34 #define al r1 ··· 53 52 54 53 ENDPROC(__ashldi3) 55 54 ENDPROC(__aeabi_llsl) 55 + EXPORT_SYMBOL(__ashldi3) 56 + EXPORT_SYMBOL(__aeabi_llsl)
+3
arch/arm/lib/ashrdi3.S
··· 28 28 29 29 #include <linux/linkage.h> 30 30 #include <asm/assembler.h> 31 + #include <asm/export.h> 31 32 32 33 #ifdef __ARMEB__ 33 34 #define al r1 ··· 53 52 54 53 ENDPROC(__ashrdi3) 55 54 ENDPROC(__aeabi_lasr) 55 + EXPORT_SYMBOL(__ashrdi3) 56 + EXPORT_SYMBOL(__aeabi_lasr)
+5
arch/arm/lib/bitops.h
··· 1 1 #include <asm/assembler.h> 2 2 #include <asm/unwind.h> 3 + #include <asm/export.h> 3 4 4 5 #if __LINUX_ARM_ARCH__ >= 6 5 6 .macro bitop, name, instr ··· 26 25 bx lr 27 26 UNWIND( .fnend ) 28 27 ENDPROC(\name ) 28 + EXPORT_SYMBOL(\name ) 29 29 .endm 30 30 31 31 .macro testop, name, instr, store ··· 57 55 2: bx lr 58 56 UNWIND( .fnend ) 59 57 ENDPROC(\name ) 58 + EXPORT_SYMBOL(\name ) 60 59 .endm 61 60 #else 62 61 .macro bitop, name, instr ··· 77 74 ret lr 78 75 UNWIND( .fnend ) 79 76 ENDPROC(\name ) 77 + EXPORT_SYMBOL(\name ) 80 78 .endm 81 79 82 80 /** ··· 106 102 ret lr 107 103 UNWIND( .fnend ) 108 104 ENDPROC(\name ) 105 + EXPORT_SYMBOL(\name ) 109 106 .endm 110 107 #endif
+3
arch/arm/lib/bswapsdi2.S
··· 1 1 #include <linux/linkage.h> 2 2 #include <asm/assembler.h> 3 + #include <asm/export.h> 3 4 4 5 #if __LINUX_ARM_ARCH__ >= 6 5 6 ENTRY(__bswapsi2) ··· 36 35 ret lr 37 36 ENDPROC(__bswapdi2) 38 37 #endif 38 + EXPORT_SYMBOL(__bswapsi2) 39 + EXPORT_SYMBOL(__bswapdi2)
+4
arch/arm/lib/clear_user.S
··· 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 12 #include <asm/unwind.h> 13 + #include <asm/export.h> 13 14 14 15 .text 15 16 ··· 51 50 UNWIND(.fnend) 52 51 ENDPROC(arm_clear_user) 53 52 ENDPROC(__clear_user_std) 53 + #ifndef CONFIG_UACCESS_WITH_MEMCPY 54 + EXPORT_SYMBOL(arm_clear_user) 55 + #endif 54 56 55 57 .pushsection .text.fixup,"ax" 56 58 .align 0
+2
arch/arm/lib/copy_from_user.S
··· 13 13 #include <linux/linkage.h> 14 14 #include <asm/assembler.h> 15 15 #include <asm/unwind.h> 16 + #include <asm/export.h> 16 17 17 18 /* 18 19 * Prototype: ··· 95 94 #include "copy_template.S" 96 95 97 96 ENDPROC(arm_copy_from_user) 97 + EXPORT_SYMBOL(arm_copy_from_user) 98 98 99 99 .pushsection .fixup,"ax" 100 100 .align 0
+2
arch/arm/lib/copy_page.S
··· 13 13 #include <asm/assembler.h> 14 14 #include <asm/asm-offsets.h> 15 15 #include <asm/cache.h> 16 + #include <asm/export.h> 16 17 17 18 #define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 )) 18 19 ··· 46 45 PLD( beq 2b ) 47 46 ldmfd sp!, {r4, pc} @ 3 48 47 ENDPROC(copy_page) 48 + EXPORT_SYMBOL(copy_page)
+4
arch/arm/lib/copy_to_user.S
··· 13 13 #include <linux/linkage.h> 14 14 #include <asm/assembler.h> 15 15 #include <asm/unwind.h> 16 + #include <asm/export.h> 16 17 17 18 /* 18 19 * Prototype: ··· 100 99 101 100 ENDPROC(arm_copy_to_user) 102 101 ENDPROC(__copy_to_user_std) 102 + #ifndef CONFIG_UACCESS_WITH_MEMCPY 103 + EXPORT_SYMBOL(arm_copy_to_user) 104 + #endif 103 105 104 106 .pushsection .text.fixup,"ax" 105 107 .align 0
+2 -1
arch/arm/lib/csumipv6.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 + #include <asm/export.h> 12 13 13 14 .text 14 15 ··· 31 30 adcs r0, r0, #0 32 31 ldmfd sp!, {pc} 33 32 ENDPROC(__csum_ipv6_magic) 34 - 33 + EXPORT_SYMBOL(__csum_ipv6_magic)
+2
arch/arm/lib/csumpartial.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 + #include <asm/export.h> 12 13 13 14 .text 14 15 ··· 141 140 bne 4b 142 141 b .Lless4 143 142 ENDPROC(csum_partial) 143 + EXPORT_SYMBOL(csum_partial)
+1
arch/arm/lib/csumpartialcopy.S
··· 49 49 50 50 #define FN_ENTRY ENTRY(csum_partial_copy_nocheck) 51 51 #define FN_EXIT ENDPROC(csum_partial_copy_nocheck) 52 + #define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_nocheck) 52 53 53 54 #include "csumpartialcopygeneric.S"
+2
arch/arm/lib/csumpartialcopygeneric.S
··· 8 8 * published by the Free Software Foundation. 9 9 */ 10 10 #include <asm/assembler.h> 11 + #include <asm/export.h> 11 12 12 13 /* 13 14 * unsigned int ··· 332 331 mov r5, r4, get_byte_1 333 332 b .Lexit 334 333 FN_EXIT 334 + FN_EXPORT
+1
arch/arm/lib/csumpartialcopyuser.S
··· 73 73 74 74 #define FN_ENTRY ENTRY(csum_partial_copy_from_user) 75 75 #define FN_EXIT ENDPROC(csum_partial_copy_from_user) 76 + #define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_from_user) 76 77 77 78 #include "csumpartialcopygeneric.S" 78 79
+2
arch/arm/lib/delay.c
··· 24 24 #include <linux/init.h> 25 25 #include <linux/kernel.h> 26 26 #include <linux/module.h> 27 + #include <linux/export.h> 27 28 #include <linux/timex.h> 28 29 29 30 /* ··· 35 34 .const_udelay = __loop_const_udelay, 36 35 .udelay = __loop_udelay, 37 36 }; 37 + EXPORT_SYMBOL(arm_delay_ops); 38 38 39 39 static const struct delay_timer *delay_timer; 40 40 static bool delay_calibrated;
+2
arch/arm/lib/div64.S
··· 15 15 #include <linux/linkage.h> 16 16 #include <asm/assembler.h> 17 17 #include <asm/unwind.h> 18 + #include <asm/export.h> 18 19 19 20 #ifdef __ARMEB__ 20 21 #define xh r0 ··· 211 210 212 211 UNWIND(.fnend) 213 212 ENDPROC(__do_div64) 213 + EXPORT_SYMBOL(__do_div64)
+9
arch/arm/lib/findbit.S
··· 15 15 */ 16 16 #include <linux/linkage.h> 17 17 #include <asm/assembler.h> 18 + #include <asm/export.h> 18 19 .text 19 20 20 21 /* ··· 38 37 3: mov r0, r1 @ no free bits 39 38 ret lr 40 39 ENDPROC(_find_first_zero_bit_le) 40 + EXPORT_SYMBOL(_find_first_zero_bit_le) 41 41 42 42 /* 43 43 * Purpose : Find next 'zero' bit ··· 59 57 add r2, r2, #1 @ align bit pointer 60 58 b 2b @ loop for next bit 61 59 ENDPROC(_find_next_zero_bit_le) 60 + EXPORT_SYMBOL(_find_next_zero_bit_le) 62 61 63 62 /* 64 63 * Purpose : Find a 'one' bit ··· 81 78 3: mov r0, r1 @ no free bits 82 79 ret lr 83 80 ENDPROC(_find_first_bit_le) 81 + EXPORT_SYMBOL(_find_first_bit_le) 84 82 85 83 /* 86 84 * Purpose : Find next 'one' bit ··· 101 97 add r2, r2, #1 @ align bit pointer 102 98 b 2b @ loop for next bit 103 99 ENDPROC(_find_next_bit_le) 100 + EXPORT_SYMBOL(_find_next_bit_le) 104 101 105 102 #ifdef __ARMEB__ 106 103 ··· 121 116 3: mov r0, r1 @ no free bits 122 117 ret lr 123 118 ENDPROC(_find_first_zero_bit_be) 119 + EXPORT_SYMBOL(_find_first_zero_bit_be) 124 120 125 121 ENTRY(_find_next_zero_bit_be) 126 122 teq r1, #0 ··· 139 133 add r2, r2, #1 @ align bit pointer 140 134 b 2b @ loop for next bit 141 135 ENDPROC(_find_next_zero_bit_be) 136 + EXPORT_SYMBOL(_find_next_zero_bit_be) 142 137 143 138 ENTRY(_find_first_bit_be) 144 139 teq r1, #0 ··· 157 150 3: mov r0, r1 @ no free bits 158 151 ret lr 159 152 ENDPROC(_find_first_bit_be) 153 + EXPORT_SYMBOL(_find_first_bit_be) 160 154 161 155 ENTRY(_find_next_bit_be) 162 156 teq r1, #0 ··· 174 166 add r2, r2, #1 @ align bit pointer 175 167 b 2b @ loop for next bit 176 168 ENDPROC(_find_next_bit_be) 169 + EXPORT_SYMBOL(_find_next_bit_be) 177 170 178 171 #endif 179 172
+9
arch/arm/lib/getuser.S
··· 31 31 #include <asm/assembler.h> 32 32 #include <asm/errno.h> 33 33 #include <asm/domain.h> 34 + #include <asm/export.h> 34 35 35 36 ENTRY(__get_user_1) 36 37 check_uaccess r0, 1, r1, r2, __get_user_bad ··· 39 38 mov r0, #0 40 39 ret lr 41 40 ENDPROC(__get_user_1) 41 + EXPORT_SYMBOL(__get_user_1) 42 42 43 43 ENTRY(__get_user_2) 44 44 check_uaccess r0, 2, r1, r2, __get_user_bad ··· 60 58 mov r0, #0 61 59 ret lr 62 60 ENDPROC(__get_user_2) 61 + EXPORT_SYMBOL(__get_user_2) 63 62 64 63 ENTRY(__get_user_4) 65 64 check_uaccess r0, 4, r1, r2, __get_user_bad ··· 68 65 mov r0, #0 69 66 ret lr 70 67 ENDPROC(__get_user_4) 68 + EXPORT_SYMBOL(__get_user_4) 71 69 72 70 ENTRY(__get_user_8) 73 71 check_uaccess r0, 8, r1, r2, __get_user_bad ··· 82 78 mov r0, #0 83 79 ret lr 84 80 ENDPROC(__get_user_8) 81 + EXPORT_SYMBOL(__get_user_8) 85 82 86 83 #ifdef __ARMEB__ 87 84 ENTRY(__get_user_32t_8) ··· 96 91 mov r0, #0 97 92 ret lr 98 93 ENDPROC(__get_user_32t_8) 94 + EXPORT_SYMBOL(__get_user_32t_8) 99 95 100 96 ENTRY(__get_user_64t_1) 101 97 check_uaccess r0, 1, r1, r2, __get_user_bad8 ··· 104 98 mov r0, #0 105 99 ret lr 106 100 ENDPROC(__get_user_64t_1) 101 + EXPORT_SYMBOL(__get_user_64t_1) 107 102 108 103 ENTRY(__get_user_64t_2) 109 104 check_uaccess r0, 2, r1, r2, __get_user_bad8 ··· 121 114 mov r0, #0 122 115 ret lr 123 116 ENDPROC(__get_user_64t_2) 117 + EXPORT_SYMBOL(__get_user_64t_2) 124 118 125 119 ENTRY(__get_user_64t_4) 126 120 check_uaccess r0, 4, r1, r2, __get_user_bad8 ··· 129 121 mov r0, #0 130 122 ret lr 131 123 ENDPROC(__get_user_64t_4) 124 + EXPORT_SYMBOL(__get_user_64t_4) 132 125 #endif 133 126 134 127 __get_user_bad8:
+2
arch/arm/lib/io-readsb.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 + #include <asm/export.h> 12 13 13 14 .Linsb_align: rsb ip, ip, #4 14 15 cmp ip, r2 ··· 122 121 123 122 ldmfd sp!, {r4 - r6, pc} 124 123 ENDPROC(__raw_readsb) 124 + EXPORT_SYMBOL(__raw_readsb)
+2
arch/arm/lib/io-readsl.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 + #include <asm/export.h> 12 13 13 14 ENTRY(__raw_readsl) 14 15 teq r2, #0 @ do we have to check for the zero len? ··· 78 77 strb r3, [r1, #0] 79 78 ret lr 80 79 ENDPROC(__raw_readsl) 80 + EXPORT_SYMBOL(__raw_readsl)
+2 -1
arch/arm/lib/io-readsw-armv3.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 + #include <asm/export.h> 12 13 13 14 .Linsw_bad_alignment: 14 15 adr r0, .Linsw_bad_align_msg ··· 104 103 105 104 ldmfd sp!, {r4, r5, r6, pc} 106 105 107 - 106 + EXPORT_SYMBOL(__raw_readsw)
+2
arch/arm/lib/io-readsw-armv4.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 + #include <asm/export.h> 12 13 13 14 .macro pack, rd, hw1, hw2 14 15 #ifndef __ARMEB__ ··· 130 129 strneb ip, [r1] 131 130 ldmfd sp!, {r4, pc} 132 131 ENDPROC(__raw_readsw) 132 + EXPORT_SYMBOL(__raw_readsw)
+2
arch/arm/lib/io-writesb.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 + #include <asm/export.h> 12 13 13 14 .macro outword, rd 14 15 #ifndef __ARMEB__ ··· 93 92 94 93 ldmfd sp!, {r4, r5, pc} 95 94 ENDPROC(__raw_writesb) 95 + EXPORT_SYMBOL(__raw_writesb)
+2
arch/arm/lib/io-writesl.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 + #include <asm/export.h> 12 13 13 14 ENTRY(__raw_writesl) 14 15 teq r2, #0 @ do we have to check for the zero len? ··· 66 65 bne 6b 67 66 ret lr 68 67 ENDPROC(__raw_writesl) 68 + EXPORT_SYMBOL(__raw_writesl)
+2
arch/arm/lib/io-writesw-armv3.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 + #include <asm/export.h> 12 13 13 14 .Loutsw_bad_alignment: 14 15 adr r0, .Loutsw_bad_align_msg ··· 125 124 strne ip, [r0] 126 125 127 126 ldmfd sp!, {r4, r5, r6, pc} 127 + EXPORT_SYMBOL(__raw_writesw)
+2
arch/arm/lib/io-writesw-armv4.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 + #include <asm/export.h> 12 13 13 14 .macro outword, rd 14 15 #ifndef __ARMEB__ ··· 99 98 strneh ip, [r0] 100 99 ret lr 101 100 ENDPROC(__raw_writesw) 101 + EXPORT_SYMBOL(__raw_writesw)
+9
arch/arm/lib/lib1funcs.S
··· 36 36 #include <linux/linkage.h> 37 37 #include <asm/assembler.h> 38 38 #include <asm/unwind.h> 39 + #include <asm/export.h> 39 40 40 41 .macro ARM_DIV_BODY dividend, divisor, result, curbit 41 42 ··· 239 238 UNWIND(.fnend) 240 239 ENDPROC(__udivsi3) 241 240 ENDPROC(__aeabi_uidiv) 241 + EXPORT_SYMBOL(__udivsi3) 242 + EXPORT_SYMBOL(__aeabi_uidiv) 242 243 243 244 ENTRY(__umodsi3) 244 245 UNWIND(.fnstart) ··· 259 256 260 257 UNWIND(.fnend) 261 258 ENDPROC(__umodsi3) 259 + EXPORT_SYMBOL(__umodsi3) 262 260 263 261 #ifdef CONFIG_ARM_PATCH_IDIV 264 262 .align 3 ··· 307 303 UNWIND(.fnend) 308 304 ENDPROC(__divsi3) 309 305 ENDPROC(__aeabi_idiv) 306 + EXPORT_SYMBOL(__divsi3) 307 + EXPORT_SYMBOL(__aeabi_idiv) 310 308 311 309 ENTRY(__modsi3) 312 310 UNWIND(.fnstart) ··· 333 327 334 328 UNWIND(.fnend) 335 329 ENDPROC(__modsi3) 330 + EXPORT_SYMBOL(__modsi3) 336 331 337 332 #ifdef CONFIG_AEABI 338 333 ··· 350 343 351 344 UNWIND(.fnend) 352 345 ENDPROC(__aeabi_uidivmod) 346 + EXPORT_SYMBOL(__aeabi_uidivmod) 353 347 354 348 ENTRY(__aeabi_idivmod) 355 349 UNWIND(.fnstart) ··· 364 356 365 357 UNWIND(.fnend) 366 358 ENDPROC(__aeabi_idivmod) 359 + EXPORT_SYMBOL(__aeabi_idivmod) 367 360 368 361 #endif 369 362
+3
arch/arm/lib/lshrdi3.S
··· 28 28 29 29 #include <linux/linkage.h> 30 30 #include <asm/assembler.h> 31 + #include <asm/export.h> 31 32 32 33 #ifdef __ARMEB__ 33 34 #define al r1 ··· 53 52 54 53 ENDPROC(__lshrdi3) 55 54 ENDPROC(__aeabi_llsr) 55 + EXPORT_SYMBOL(__lshrdi3) 56 + EXPORT_SYMBOL(__aeabi_llsr)
+2
arch/arm/lib/memchr.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <asm/assembler.h> 14 + #include <asm/export.h> 14 15 15 16 .text 16 17 .align 5 ··· 25 24 2: movne r0, #0 26 25 ret lr 27 26 ENDPROC(memchr) 27 + EXPORT_SYMBOL(memchr)
+3
arch/arm/lib/memcpy.S
··· 13 13 #include <linux/linkage.h> 14 14 #include <asm/assembler.h> 15 15 #include <asm/unwind.h> 16 + #include <asm/export.h> 16 17 17 18 #define LDR1W_SHIFT 0 18 19 #define STR1W_SHIFT 0 ··· 69 68 70 69 ENDPROC(memcpy) 71 70 ENDPROC(mmiocpy) 71 + EXPORT_SYMBOL(memcpy) 72 + EXPORT_SYMBOL(mmiocpy)
+2
arch/arm/lib/memmove.S
··· 13 13 #include <linux/linkage.h> 14 14 #include <asm/assembler.h> 15 15 #include <asm/unwind.h> 16 + #include <asm/export.h> 16 17 17 18 .text 18 19 ··· 226 225 18: backward_copy_shift push=24 pull=8 227 226 228 227 ENDPROC(memmove) 228 + EXPORT_SYMBOL(memmove)
+3
arch/arm/lib/memset.S
··· 12 12 #include <linux/linkage.h> 13 13 #include <asm/assembler.h> 14 14 #include <asm/unwind.h> 15 + #include <asm/export.h> 15 16 16 17 .text 17 18 .align 5 ··· 136 135 UNWIND( .fnend ) 137 136 ENDPROC(memset) 138 137 ENDPROC(mmioset) 138 + EXPORT_SYMBOL(memset) 139 + EXPORT_SYMBOL(mmioset)
+2
arch/arm/lib/memzero.S
··· 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 12 #include <asm/unwind.h> 13 + #include <asm/export.h> 13 14 14 15 .text 15 16 .align 5 ··· 136 135 ret lr @ 1 137 136 UNWIND( .fnend ) 138 137 ENDPROC(__memzero) 138 + EXPORT_SYMBOL(__memzero)
+3
arch/arm/lib/muldi3.S
··· 12 12 13 13 #include <linux/linkage.h> 14 14 #include <asm/assembler.h> 15 + #include <asm/export.h> 15 16 16 17 #ifdef __ARMEB__ 17 18 #define xh r0 ··· 47 46 48 47 ENDPROC(__muldi3) 49 48 ENDPROC(__aeabi_lmul) 49 + EXPORT_SYMBOL(__muldi3) 50 + EXPORT_SYMBOL(__aeabi_lmul)
+5
arch/arm/lib/putuser.S
··· 31 31 #include <asm/assembler.h> 32 32 #include <asm/errno.h> 33 33 #include <asm/domain.h> 34 + #include <asm/export.h> 34 35 35 36 ENTRY(__put_user_1) 36 37 check_uaccess r0, 1, r1, ip, __put_user_bad ··· 39 38 mov r0, #0 40 39 ret lr 41 40 ENDPROC(__put_user_1) 41 + EXPORT_SYMBOL(__put_user_1) 42 42 43 43 ENTRY(__put_user_2) 44 44 check_uaccess r0, 2, r1, ip, __put_user_bad ··· 64 62 mov r0, #0 65 63 ret lr 66 64 ENDPROC(__put_user_2) 65 + EXPORT_SYMBOL(__put_user_2) 67 66 68 67 ENTRY(__put_user_4) 69 68 check_uaccess r0, 4, r1, ip, __put_user_bad ··· 72 69 mov r0, #0 73 70 ret lr 74 71 ENDPROC(__put_user_4) 72 + EXPORT_SYMBOL(__put_user_4) 75 73 76 74 ENTRY(__put_user_8) 77 75 check_uaccess r0, 8, r1, ip, __put_user_bad ··· 86 82 mov r0, #0 87 83 ret lr 88 84 ENDPROC(__put_user_8) 85 + EXPORT_SYMBOL(__put_user_8) 89 86 90 87 __put_user_bad: 91 88 mov r0, #-EFAULT
+2
arch/arm/lib/strchr.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <asm/assembler.h> 14 + #include <asm/export.h> 14 15 15 16 .text 16 17 .align 5 ··· 26 25 subeq r0, r0, #1 27 26 ret lr 28 27 ENDPROC(strchr) 28 + EXPORT_SYMBOL(strchr)
+2
arch/arm/lib/strrchr.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <asm/assembler.h> 14 + #include <asm/export.h> 14 15 15 16 .text 16 17 .align 5 ··· 25 24 mov r0, r3 26 25 ret lr 27 26 ENDPROC(strrchr) 27 + EXPORT_SYMBOL(strrchr)
+3
arch/arm/lib/uaccess_with_memcpy.c
··· 19 19 #include <linux/gfp.h> 20 20 #include <linux/highmem.h> 21 21 #include <linux/hugetlb.h> 22 + #include <linux/export.h> 22 23 #include <asm/current.h> 23 24 #include <asm/page.h> 24 25 ··· 157 156 } 158 157 return n; 159 158 } 159 + EXPORT_SYMBOL(arm_copy_to_user); 160 160 161 161 static unsigned long noinline 162 162 __clear_user_memset(void __user *addr, unsigned long n) ··· 215 213 } 216 214 return n; 217 215 } 216 + EXPORT_SYMBOL(arm_clear_user); 218 217 219 218 #if 0 220 219
+3
arch/arm/lib/ucmpdi2.S
··· 12 12 13 13 #include <linux/linkage.h> 14 14 #include <asm/assembler.h> 15 + #include <asm/export.h> 15 16 16 17 #ifdef __ARMEB__ 17 18 #define xh r0 ··· 36 35 ret lr 37 36 38 37 ENDPROC(__ucmpdi2) 38 + EXPORT_SYMBOL(__ucmpdi2) 39 39 40 40 #ifdef CONFIG_AEABI 41 41 ··· 50 48 ret lr 51 49 52 50 ENDPROC(__aeabi_ulcmp) 51 + EXPORT_SYMBOL(__aeabi_ulcmp) 53 52 54 53 #endif 55 54
-1
arch/arm/mach-imx/Makefile
··· 32 32 33 33 ifdef CONFIG_SND_IMX_SOC 34 34 obj-y += ssi-fiq.o 35 - obj-y += ssi-fiq-ksym.o 36 35 endif 37 36 38 37 # i.MX21 based machines
-20
arch/arm/mach-imx/ssi-fiq-ksym.c
··· 1 - /* 2 - * Exported ksyms for the SSI FIQ handler 3 - * 4 - * Copyright (C) 2009, Sascha Hauer <s.hauer@pengutronix.de> 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - */ 10 - 11 - #include <linux/module.h> 12 - 13 - #include <linux/platform_data/asoc-imx-ssi.h> 14 - 15 - EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer); 16 - EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer); 17 - EXPORT_SYMBOL(imx_ssi_fiq_start); 18 - EXPORT_SYMBOL(imx_ssi_fiq_end); 19 - EXPORT_SYMBOL(imx_ssi_fiq_base); 20 -
+6 -1
arch/arm/mach-imx/ssi-fiq.S
··· 8 8 9 9 #include <linux/linkage.h> 10 10 #include <asm/assembler.h> 11 + #include <asm/export.h> 11 12 12 13 /* 13 14 * r8 = bit 0-15: tx offset, bit 16-31: tx buffer size ··· 145 144 .word 0x0 146 145 .L_imx_ssi_fiq_end: 147 146 imx_ssi_fiq_end: 148 - 147 + EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer) 148 + EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer) 149 + EXPORT_SYMBOL(imx_ssi_fiq_start) 150 + EXPORT_SYMBOL(imx_ssi_fiq_end) 151 + EXPORT_SYMBOL(imx_ssi_fiq_base)
+1 -1
arch/ia64/hp/sim/boot/Makefile
··· 33 33 LDFLAGS_bootloader = -static -T 34 34 35 35 $(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o $(obj)/boot_head.o $(obj)/fw-emu.o \ 36 - lib/lib.a arch/ia64/lib/built-in.o arch/ia64/lib/lib.a FORCE 36 + lib/lib.a arch/ia64/lib/lib.a FORCE 37 37 $(call if_changed,ld)
+3
arch/ia64/include/asm/export.h
··· 1 + /* EXPORT_DATA_SYMBOL != EXPORT_SYMBOL here */ 2 + #define KSYM_FUNC(name) @fptr(name) 3 + #include <asm-generic/export.h>
+3
arch/ia64/kernel/entry.S
··· 48 48 #include <asm/thread_info.h> 49 49 #include <asm/unistd.h> 50 50 #include <asm/ftrace.h> 51 + #include <asm/export.h> 51 52 52 53 #include "minstate.h" 53 54 ··· 1346 1345 mov rp=loc0 1347 1346 br.ret.sptk.many rp 1348 1347 END(unw_init_running) 1348 + EXPORT_SYMBOL(unw_init_running) 1349 1349 1350 1350 #ifdef CONFIG_FUNCTION_TRACER 1351 1351 #ifdef CONFIG_DYNAMIC_FTRACE 1352 1352 GLOBAL_ENTRY(_mcount) 1353 1353 br ftrace_stub 1354 1354 END(_mcount) 1355 + EXPORT_SYMBOL(_mcount) 1355 1356 1356 1357 .here: 1357 1358 br.ret.sptk.many b0
+2
arch/ia64/kernel/esi_stub.S
··· 35 35 36 36 #include <asm/processor.h> 37 37 #include <asm/asmmacro.h> 38 + #include <asm/export.h> 38 39 39 40 /* 40 41 * Inputs: ··· 95 94 mov gp=loc2 96 95 br.ret.sptk.many rp 97 96 END(esi_call_phys) 97 + EXPORT_SYMBOL_GPL(esi_call_phys)
+2
arch/ia64/kernel/head.S
··· 32 32 #include <asm/mca_asm.h> 33 33 #include <linux/init.h> 34 34 #include <linux/linkage.h> 35 + #include <asm/export.h> 35 36 36 37 #ifdef CONFIG_HOTPLUG_CPU 37 38 #define SAL_PSR_BITS_TO_SET \ ··· 169 168 __PAGE_ALIGNED_DATA 170 169 171 170 .global empty_zero_page 171 + EXPORT_DATA_SYMBOL_GPL(empty_zero_page) 172 172 empty_zero_page: 173 173 .skip PAGE_SIZE 174 174
+2 -92
arch/ia64/kernel/ia64_ksyms.c
··· 1 1 /* 2 2 * Architecture-specific kernel symbols 3 - * 4 - * Don't put any exports here unless it's defined in an assembler file. 5 - * All other exports should be put directly after the definition. 6 3 */ 7 4 8 - #include <linux/module.h> 9 - 10 - #include <linux/string.h> 11 - EXPORT_SYMBOL(memset); 12 - EXPORT_SYMBOL(memcpy); 13 - EXPORT_SYMBOL(strlen); 14 - 15 - #include <asm/pgtable.h> 16 - EXPORT_SYMBOL_GPL(empty_zero_page); 17 - 18 - #include <asm/checksum.h> 19 - EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ 20 - EXPORT_SYMBOL(csum_ipv6_magic); 21 - 22 - #include <asm/page.h> 23 - EXPORT_SYMBOL(clear_page); 24 - EXPORT_SYMBOL(copy_page); 25 - 26 5 #ifdef CONFIG_VIRTUAL_MEM_MAP 6 + #include <linux/compiler.h> 7 + #include <linux/export.h> 27 8 #include <linux/bootmem.h> 28 9 EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */ 29 10 EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ 30 11 #endif 31 - 32 - #include <asm/processor.h> 33 - EXPORT_SYMBOL(ia64_cpu_info); 34 - #ifdef CONFIG_SMP 35 - EXPORT_SYMBOL(local_per_cpu_offset); 36 - #endif 37 - 38 - #include <asm/uaccess.h> 39 - EXPORT_SYMBOL(__copy_user); 40 - EXPORT_SYMBOL(__do_clear_user); 41 - EXPORT_SYMBOL(__strlen_user); 42 - EXPORT_SYMBOL(__strncpy_from_user); 43 - EXPORT_SYMBOL(__strnlen_user); 44 - 45 - /* from arch/ia64/lib */ 46 - extern void __divsi3(void); 47 - extern void __udivsi3(void); 48 - extern void __modsi3(void); 49 - extern void __umodsi3(void); 50 - extern void __divdi3(void); 51 - extern void __udivdi3(void); 52 - extern void __moddi3(void); 53 - extern void __umoddi3(void); 54 - 55 - EXPORT_SYMBOL(__divsi3); 56 - EXPORT_SYMBOL(__udivsi3); 57 - EXPORT_SYMBOL(__modsi3); 58 - EXPORT_SYMBOL(__umodsi3); 59 - EXPORT_SYMBOL(__divdi3); 60 - EXPORT_SYMBOL(__udivdi3); 61 - EXPORT_SYMBOL(__moddi3); 62 - EXPORT_SYMBOL(__umoddi3); 63 - 64 - #if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE) 65 - extern void xor_ia64_2(void); 66 - extern void xor_ia64_3(void); 67 - extern void xor_ia64_4(void); 68 - extern void xor_ia64_5(void); 69 - 70 - EXPORT_SYMBOL(xor_ia64_2); 71 - EXPORT_SYMBOL(xor_ia64_3); 72 - EXPORT_SYMBOL(xor_ia64_4); 73 - EXPORT_SYMBOL(xor_ia64_5); 74 - #endif 75 - 76 - #include <asm/pal.h> 77 - EXPORT_SYMBOL(ia64_pal_call_phys_stacked); 78 - EXPORT_SYMBOL(ia64_pal_call_phys_static); 79 - EXPORT_SYMBOL(ia64_pal_call_stacked); 80 - EXPORT_SYMBOL(ia64_pal_call_static); 81 - EXPORT_SYMBOL(ia64_load_scratch_fpregs); 82 - EXPORT_SYMBOL(ia64_save_scratch_fpregs); 83 - 84 - #include <asm/unwind.h> 85 - EXPORT_SYMBOL(unw_init_running); 86 - 87 - #if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE) 88 - extern void esi_call_phys (void); 89 - EXPORT_SYMBOL_GPL(esi_call_phys); 90 - #endif 91 - extern char ia64_ivt[]; 92 - EXPORT_SYMBOL(ia64_ivt); 93 - 94 - #include <asm/ftrace.h> 95 - #ifdef CONFIG_FUNCTION_TRACER 96 - /* mcount is defined in assembly */ 97 - EXPORT_SYMBOL(_mcount); 98 - #endif 99 - 100 - #include <asm/cacheflush.h> 101 - EXPORT_SYMBOL_GPL(flush_icache_range);
+2
arch/ia64/kernel/ivt.S
··· 57 57 #include <asm/thread_info.h> 58 58 #include <asm/unistd.h> 59 59 #include <asm/errno.h> 60 + #include <asm/export.h> 60 61 61 62 #if 0 62 63 # define PSR_DEFAULT_BITS psr.ac ··· 86 85 87 86 .align 32768 // align on 32KB boundary 88 87 .global ia64_ivt 88 + EXPORT_DATA_SYMBOL(ia64_ivt) 89 89 ia64_ivt: 90 90 ///////////////////////////////////////////////////////////////////////////////////////// 91 91 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
+7
arch/ia64/kernel/pal.S
··· 14 14 15 15 #include <asm/asmmacro.h> 16 16 #include <asm/processor.h> 17 + #include <asm/export.h> 17 18 18 19 .data 19 20 pal_entry_point: ··· 88 87 srlz.d // seralize restoration of psr.l 89 88 br.ret.sptk.many b0 90 89 END(ia64_pal_call_static) 90 + EXPORT_SYMBOL(ia64_pal_call_static) 91 91 92 92 /* 93 93 * Make a PAL call using the stacked registers calling convention. ··· 124 122 srlz.d // serialize restoration of psr.l 125 123 br.ret.sptk.many b0 126 124 END(ia64_pal_call_stacked) 125 + EXPORT_SYMBOL(ia64_pal_call_stacked) 127 126 128 127 /* 129 128 * Make a physical mode PAL call using the static registers calling convention. ··· 196 193 srlz.d // seralize restoration of psr.l 197 194 br.ret.sptk.many b0 198 195 END(ia64_pal_call_phys_static) 196 + EXPORT_SYMBOL(ia64_pal_call_phys_static) 199 197 200 198 /* 201 199 * Make a PAL call using the stacked registers in physical mode. ··· 254 250 srlz.d // seralize restoration of psr.l 255 251 br.ret.sptk.many b0 256 252 END(ia64_pal_call_phys_stacked) 253 + EXPORT_SYMBOL(ia64_pal_call_phys_stacked) 257 254 258 255 /* 259 256 * Save scratch fp scratch regs which aren't saved in pt_regs already ··· 280 275 stf.spill [r2] = f15,32 281 276 br.ret.sptk.many rp 282 277 END(ia64_save_scratch_fpregs) 278 + EXPORT_SYMBOL(ia64_save_scratch_fpregs) 283 279 284 280 /* 285 281 * Load scratch fp scratch regs (fp10-fp15) ··· 302 296 ldf.fill f15 = [r2],32 303 297 br.ret.sptk.many rp 304 298 END(ia64_load_scratch_fpregs) 299 + EXPORT_SYMBOL(ia64_load_scratch_fpregs)
+4
arch/ia64/kernel/setup.c
··· 71 71 #endif 72 72 73 73 DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); 74 + EXPORT_SYMBOL(ia64_cpu_info); 74 75 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 76 + #ifdef CONFIG_SMP 77 + EXPORT_SYMBOL(local_per_cpu_offset); 78 + #endif 75 79 unsigned long ia64_cycles_per_usec; 76 80 struct ia64_boot_param *ia64_boot_param; 77 81 struct screen_info screen_info;
+3 -5
arch/ia64/lib/Makefile
··· 2 2 # Makefile for ia64-specific library routines.. 3 3 # 4 4 5 - obj-y := io.o 6 - 7 - lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ 5 + lib-y := io.o __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ 8 6 __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \ 9 7 checksum.o clear_page.o csum_partial_copy.o \ 10 8 clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ 11 9 flush.o ip_fast_csum.o do_csum.o \ 12 10 memset.o strlen.o xor.o 13 11 14 - obj-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o 15 - obj-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o 12 + lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o 13 + lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o 16 14 lib-$(CONFIG_PERFMON) += carta_random.o 17 15 18 16 AFLAGS___divdi3.o =
+2
arch/ia64/lib/clear_page.S
··· 11 11 12 12 #include <asm/asmmacro.h> 13 13 #include <asm/page.h> 14 + #include <asm/export.h> 14 15 15 16 #ifdef CONFIG_ITANIUM 16 17 # define L3_LINE_SIZE 64 // Itanium L3 line size ··· 75 74 mov ar.lc = saved_lc // restore lc 76 75 br.ret.sptk.many rp 77 76 END(clear_page) 77 + EXPORT_SYMBOL(clear_page)
+2
arch/ia64/lib/clear_user.S
··· 12 12 */ 13 13 14 14 #include <asm/asmmacro.h> 15 + #include <asm/export.h> 15 16 16 17 // 17 18 // arguments ··· 208 207 mov ar.lc=saved_lc 209 208 br.ret.sptk.many rp 210 209 END(__do_clear_user) 210 + EXPORT_SYMBOL(__do_clear_user)
+2
arch/ia64/lib/copy_page.S
··· 16 16 */ 17 17 #include <asm/asmmacro.h> 18 18 #include <asm/page.h> 19 + #include <asm/export.h> 19 20 20 21 #define PIPE_DEPTH 3 21 22 #define EPI p[PIPE_DEPTH-1] ··· 97 96 mov ar.lc=saved_lc 98 97 br.ret.sptk.many rp 99 98 END(copy_page) 99 + EXPORT_SYMBOL(copy_page)
+2
arch/ia64/lib/copy_page_mck.S
··· 61 61 */ 62 62 #include <asm/asmmacro.h> 63 63 #include <asm/page.h> 64 + #include <asm/export.h> 64 65 65 66 #define PREFETCH_DIST 8 // McKinley sustains 16 outstanding L2 misses (8 ld, 8 st) 66 67 ··· 184 183 mov pr = saved_pr, -1 185 184 br.ret.sptk.many rp 186 185 END(copy_page) 186 + EXPORT_SYMBOL(copy_page)
+2
arch/ia64/lib/copy_user.S
··· 30 30 */ 31 31 32 32 #include <asm/asmmacro.h> 33 + #include <asm/export.h> 33 34 34 35 // 35 36 // Tuneable parameters ··· 609 608 mov ar.pfs=saved_pfs 610 609 br.ret.sptk.many rp 611 610 END(__copy_user) 611 + EXPORT_SYMBOL(__copy_user)
+2
arch/ia64/lib/flush.S
··· 8 8 */ 9 9 10 10 #include <asm/asmmacro.h> 11 + #include <asm/export.h> 11 12 12 13 13 14 /* ··· 61 60 mov ar.lc=r3 // restore ar.lc 62 61 br.ret.sptk.many rp 63 62 END(flush_icache_range) 63 + EXPORT_SYMBOL_GPL(flush_icache_range) 64 64 65 65 /* 66 66 * clflush_cache_range(start,size)
+2
arch/ia64/lib/idiv32.S
··· 15 15 */ 16 16 17 17 #include <asm/asmmacro.h> 18 + #include <asm/export.h> 18 19 19 20 #ifdef MODULO 20 21 # define OP mod ··· 82 81 getf.sig r8 = f6 // transfer result to result register 83 82 br.ret.sptk.many rp 84 83 END(NAME) 84 + EXPORT_SYMBOL(NAME)
+2
arch/ia64/lib/idiv64.S
··· 15 15 */ 16 16 17 17 #include <asm/asmmacro.h> 18 + #include <asm/export.h> 18 19 19 20 #ifdef MODULO 20 21 # define OP mod ··· 79 78 getf.sig r8 = f11 // transfer result to result register 80 79 br.ret.sptk.many rp 81 80 END(NAME) 81 + EXPORT_SYMBOL(NAME)
+3
arch/ia64/lib/ip_fast_csum.S
··· 13 13 */ 14 14 15 15 #include <asm/asmmacro.h> 16 + #include <asm/export.h> 16 17 17 18 /* 18 19 * Since we know that most likely this function is called with buf aligned ··· 93 92 mov b0=r34 94 93 br.ret.sptk.many b0 95 94 END(ip_fast_csum) 95 + EXPORT_SYMBOL(ip_fast_csum) 96 96 97 97 GLOBAL_ENTRY(csum_ipv6_magic) 98 98 ld4 r20=[in0],4 ··· 144 142 andcm r8=r9,r8 145 143 br.ret.sptk.many b0 146 144 END(csum_ipv6_magic) 145 + EXPORT_SYMBOL(csum_ipv6_magic)
+2
arch/ia64/lib/memcpy.S
··· 14 14 * David Mosberger-Tang <davidm@hpl.hp.com> 15 15 */ 16 16 #include <asm/asmmacro.h> 17 + #include <asm/export.h> 17 18 18 19 GLOBAL_ENTRY(memcpy) 19 20 ··· 300 299 COPY(56, 0) 301 300 302 301 END(memcpy) 302 + EXPORT_SYMBOL(memcpy)
+3
arch/ia64/lib/memcpy_mck.S
··· 15 15 */ 16 16 #include <asm/asmmacro.h> 17 17 #include <asm/page.h> 18 + #include <asm/export.h> 18 19 19 20 #define EK(y...) EX(y) 20 21 ··· 79 78 br.cond.sptk .common_code 80 79 ;; 81 80 END(memcpy) 81 + EXPORT_SYMBOL(memcpy) 82 82 GLOBAL_ENTRY(__copy_user) 83 83 .prologue 84 84 // check dest alignment ··· 666 664 667 665 /* end of McKinley specific optimization */ 668 666 END(__copy_user) 667 + EXPORT_SYMBOL(__copy_user)
+2
arch/ia64/lib/memset.S
··· 18 18 to get peak speed when value = 0. */ 19 19 20 20 #include <asm/asmmacro.h> 21 + #include <asm/export.h> 21 22 #undef ret 22 23 23 24 #define dest in0 ··· 361 360 br.ret.sptk.many rp 362 361 } 363 362 END(memset) 363 + EXPORT_SYMBOL(memset)
+2
arch/ia64/lib/strlen.S
··· 17 17 */ 18 18 19 19 #include <asm/asmmacro.h> 20 + #include <asm/export.h> 20 21 21 22 // 22 23 // ··· 191 190 mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what 192 191 br.ret.sptk.many rp // end of successful recovery code 193 192 END(strlen) 193 + EXPORT_SYMBOL(strlen)
+2
arch/ia64/lib/strlen_user.S
··· 16 16 */ 17 17 18 18 #include <asm/asmmacro.h> 19 + #include <asm/export.h> 19 20 20 21 // 21 22 // int strlen_user(char *) ··· 197 196 mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what 198 197 br.ret.sptk.many rp 199 198 END(__strlen_user) 199 + EXPORT_SYMBOL(__strlen_user)
+2
arch/ia64/lib/strncpy_from_user.S
··· 17 17 */ 18 18 19 19 #include <asm/asmmacro.h> 20 + #include <asm/export.h> 20 21 21 22 GLOBAL_ENTRY(__strncpy_from_user) 22 23 alloc r2=ar.pfs,3,0,0,0 ··· 43 42 [.Lexit:] 44 43 br.ret.sptk.many rp 45 44 END(__strncpy_from_user) 45 + EXPORT_SYMBOL(__strncpy_from_user)
+2
arch/ia64/lib/strnlen_user.S
··· 13 13 */ 14 14 15 15 #include <asm/asmmacro.h> 16 + #include <asm/export.h> 16 17 17 18 GLOBAL_ENTRY(__strnlen_user) 18 19 .prologue ··· 44 43 mov ar.lc=r16 // restore ar.lc 45 44 br.ret.sptk.many rp 46 45 END(__strnlen_user) 46 + EXPORT_SYMBOL(__strnlen_user)
+5
arch/ia64/lib/xor.S
··· 14 14 */ 15 15 16 16 #include <asm/asmmacro.h> 17 + #include <asm/export.h> 17 18 18 19 GLOBAL_ENTRY(xor_ia64_2) 19 20 .prologue ··· 52 51 mov pr = r29, -1 53 52 br.ret.sptk.few rp 54 53 END(xor_ia64_2) 54 + EXPORT_SYMBOL(xor_ia64_2) 55 55 56 56 GLOBAL_ENTRY(xor_ia64_3) 57 57 .prologue ··· 93 91 mov pr = r29, -1 94 92 br.ret.sptk.few rp 95 93 END(xor_ia64_3) 94 + EXPORT_SYMBOL(xor_ia64_3) 96 95 97 96 GLOBAL_ENTRY(xor_ia64_4) 98 97 .prologue ··· 137 134 mov pr = r29, -1 138 135 br.ret.sptk.few rp 139 136 END(xor_ia64_4) 137 + EXPORT_SYMBOL(xor_ia64_4) 140 138 141 139 GLOBAL_ENTRY(xor_ia64_5) 142 140 .prologue ··· 186 182 mov pr = r29, -1 187 183 br.ret.sptk.few rp 188 184 END(xor_ia64_5) 185 + EXPORT_SYMBOL(xor_ia64_5)
+3
arch/m68k/include/asm/export.h
··· 1 + #define KSYM_ALIGN 2 2 + #define KCRC_ALIGN 2 3 + #include <asm-generic/export.h>
+1 -1
arch/m68k/kernel/Makefile
··· 13 13 extra-$(CONFIG_SUN3) := sun3-head.o 14 14 extra-y += vmlinux.lds 15 15 16 - obj-y := entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o 16 + obj-y := entry.o irq.o module.o process.o ptrace.o 17 17 obj-y += setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o 18 18 19 19 obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
-32
arch/m68k/kernel/m68k_ksyms.c
··· 1 - #include <linux/module.h> 2 - 3 - asmlinkage long long __ashldi3 (long long, int); 4 - asmlinkage long long __ashrdi3 (long long, int); 5 - asmlinkage long long __lshrdi3 (long long, int); 6 - asmlinkage long long __muldi3 (long long, long long); 7 - 8 - /* The following are special because they're not called 9 - explicitly (the C compiler generates them). Fortunately, 10 - their interface isn't gonna change any time soon now, so 11 - it's OK to leave it out of version control. */ 12 - EXPORT_SYMBOL(__ashldi3); 13 - EXPORT_SYMBOL(__ashrdi3); 14 - EXPORT_SYMBOL(__lshrdi3); 15 - EXPORT_SYMBOL(__muldi3); 16 - 17 - #if defined(CONFIG_CPU_HAS_NO_MULDIV64) 18 - /* 19 - * Simpler 68k and ColdFire parts also need a few other gcc functions. 20 - */ 21 - extern long long __divsi3(long long, long long); 22 - extern long long __modsi3(long long, long long); 23 - extern long long __mulsi3(long long, long long); 24 - extern long long __udivsi3(long long, long long); 25 - extern long long __umodsi3(long long, long long); 26 - 27 - EXPORT_SYMBOL(__divsi3); 28 - EXPORT_SYMBOL(__modsi3); 29 - EXPORT_SYMBOL(__mulsi3); 30 - EXPORT_SYMBOL(__udivsi3); 31 - EXPORT_SYMBOL(__umodsi3); 32 - #endif
+4
arch/m68k/lib/ashldi3.c
··· 13 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 14 GNU General Public License for more details. */ 15 15 16 + #include <linux/compiler.h> 17 + #include <linux/export.h> 18 + 16 19 #define BITS_PER_UNIT 8 17 20 18 21 typedef int SItype __attribute__ ((mode (SI))); ··· 58 55 59 56 return w.ll; 60 57 } 58 + EXPORT_SYMBOL(__ashldi3);
+4
arch/m68k/lib/ashrdi3.c
··· 13 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 14 GNU General Public License for more details. */ 15 15 16 + #include <linux/compiler.h> 17 + #include <linux/export.h> 18 + 16 19 #define BITS_PER_UNIT 8 17 20 18 21 typedef int SItype __attribute__ ((mode (SI))); ··· 59 56 60 57 return w.ll; 61 58 } 59 + EXPORT_SYMBOL(__ashrdi3);
+3
arch/m68k/lib/divsi3.S
··· 33 33 D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992 34 34 */ 35 35 36 + #include <asm/export.h> 37 + 36 38 /* These are predefined by new versions of GNU cpp. */ 37 39 38 40 #ifndef __USER_LABEL_PREFIX__ ··· 120 118 L3: movel sp@+, d2 121 119 rts 122 120 121 + EXPORT_SYMBOL(__divsi3)
+4
arch/m68k/lib/lshrdi3.c
··· 13 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 14 GNU General Public License for more details. */ 15 15 16 + #include <linux/compiler.h> 17 + #include <linux/export.h> 18 + 16 19 #define BITS_PER_UNIT 8 17 20 18 21 typedef int SItype __attribute__ ((mode (SI))); ··· 58 55 59 56 return w.ll; 60 57 } 58 + EXPORT_SYMBOL(__lshrdi3);
+3
arch/m68k/lib/modsi3.S
··· 33 33 D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992 34 34 */ 35 35 36 + #include <asm/export.h> 37 + 36 38 /* These are predefined by new versions of GNU cpp. */ 37 39 38 40 #ifndef __USER_LABEL_PREFIX__ ··· 108 106 movel d1, d0 109 107 rts 110 108 109 + EXPORT_SYMBOL(__modsi3)
+4
arch/m68k/lib/muldi3.c
··· 14 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 15 GNU General Public License for more details. */ 16 16 17 + #include <linux/compiler.h> 18 + #include <linux/export.h> 19 + 17 20 #ifdef CONFIG_CPU_HAS_NO_MULDIV64 18 21 19 22 #define SI_TYPE_SIZE 32 ··· 93 90 94 91 return w.ll; 95 92 } 93 + EXPORT_SYMBOL(__muldi3);
+2 -2
arch/m68k/lib/mulsi3.S
··· 32 32 Some of this code comes from MINIX, via the folks at ericsson. 33 33 D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992 34 34 */ 35 - 35 + #include <asm/export.h> 36 36 /* These are predefined by new versions of GNU cpp. */ 37 37 38 38 #ifndef __USER_LABEL_PREFIX__ ··· 102 102 addl d1, d0 103 103 104 104 rts 105 - 105 + EXPORT_SYMBOL(__mulsi3)
+2 -2
arch/m68k/lib/udivsi3.S
··· 32 32 Some of this code comes from MINIX, via the folks at ericsson. 33 33 D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992 34 34 */ 35 - 35 + #include <asm/export.h> 36 36 /* These are predefined by new versions of GNU cpp. */ 37 37 38 38 #ifndef __USER_LABEL_PREFIX__ ··· 154 154 unlk a6 | and return 155 155 rts 156 156 #endif /* __mcf5200__ || __mcoldfire__ */ 157 - 157 + EXPORT_SYMBOL(__udivsi3)
+2 -2
arch/m68k/lib/umodsi3.S
··· 32 32 Some of this code comes from MINIX, via the folks at ericsson. 33 33 D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992 34 34 */ 35 - 35 + #include <asm/export.h> 36 36 /* These are predefined by new versions of GNU cpp. */ 37 37 38 38 #ifndef __USER_LABEL_PREFIX__ ··· 105 105 subl d0, d1 /* d1 = a - (a/b)*b */ 106 106 movel d1, d0 107 107 rts 108 - 108 + EXPORT_SYMBOL(__umodsi3)
+1
arch/powerpc/include/asm/Kbuild
··· 1 1 generic-y += clkdev.h 2 2 generic-y += div64.h 3 + generic-y += export.h 3 4 generic-y += irq_regs.h 4 5 generic-y += irq_work.h 5 6 generic-y += local64.h
-4
arch/powerpc/kernel/Makefile
··· 90 90 obj-$(CONFIG_PPC32) += entry_32.o setup_32.o 91 91 obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o 92 92 obj-$(CONFIG_KGDB) += kgdb.o 93 - obj-$(CONFIG_MODULES) += ppc_ksyms.o 94 - ifeq ($(CONFIG_PPC32),y) 95 - obj-$(CONFIG_MODULES) += ppc_ksyms_32.o 96 - endif 97 93 obj-$(CONFIG_BOOTX_TEXT) += btext.o 98 94 obj-$(CONFIG_SMP) += smp.o 99 95 obj-$(CONFIG_KPROBES) += kprobes.o
+2
arch/powerpc/kernel/entry_32.S
··· 33 33 #include <asm/unistd.h> 34 34 #include <asm/ftrace.h> 35 35 #include <asm/ptrace.h> 36 + #include <asm/export.h> 36 37 37 38 /* 38 39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE. ··· 1359 1358 MCOUNT_RESTORE_FRAME 1360 1359 bctr 1361 1360 #endif 1361 + EXPORT_SYMBOL(_mcount) 1362 1362 1363 1363 _GLOBAL(ftrace_stub) 1364 1364 blr
+3
arch/powerpc/kernel/entry_64.S
··· 38 38 #include <asm/context_tracking.h> 39 39 #include <asm/tm.h> 40 40 #include <asm/ppc-opcode.h> 41 + #include <asm/export.h> 41 42 42 43 /* 43 44 * System calls. ··· 1178 1177 #ifdef CONFIG_DYNAMIC_FTRACE 1179 1178 _GLOBAL(mcount) 1180 1179 _GLOBAL(_mcount) 1180 + EXPORT_SYMBOL(_mcount) 1181 1181 mflr r12 1182 1182 mtctr r12 1183 1183 mtlr r0 ··· 1415 1413 1416 1414 #else 1417 1415 _GLOBAL_TOC(_mcount) 1416 + EXPORT_SYMBOL(_mcount) 1418 1417 /* Taken from output of objdump from lib64/glibc */ 1419 1418 mflr r3 1420 1419 ld r11, 0(r1)
+2
arch/powerpc/kernel/epapr_hcalls.S
··· 16 16 #include <asm/ppc_asm.h> 17 17 #include <asm/asm-compat.h> 18 18 #include <asm/asm-offsets.h> 19 + #include <asm/export.h> 19 20 20 21 #ifndef CONFIG_PPC64 21 22 /* epapr_ev_idle() was derived from e500_idle() */ ··· 54 53 nop 55 54 nop 56 55 blr 56 + EXPORT_SYMBOL(epapr_hypercall_start)
+3
arch/powerpc/kernel/fpu.S
··· 24 24 #include <asm/ppc_asm.h> 25 25 #include <asm/asm-offsets.h> 26 26 #include <asm/ptrace.h> 27 + #include <asm/export.h> 27 28 28 29 #ifdef CONFIG_VSX 29 30 #define __REST_32FPVSRS(n,c,base) \ ··· 60 59 MTFSF_L(fr0) 61 60 REST_32FPVSRS(0, R4, R3) 62 61 blr 62 + EXPORT_SYMBOL(load_fp_state) 63 63 64 64 /* 65 65 * Store FP state into memory, including FPSCR ··· 71 69 mffs fr0 72 70 stfd fr0,FPSTATE_FPSCR(r3) 73 71 blr 72 + EXPORT_SYMBOL(store_fp_state) 74 73 75 74 /* 76 75 * This task wants to use the FPU now.
+5
arch/powerpc/kernel/head_32.S
··· 34 34 #include <asm/ptrace.h> 35 35 #include <asm/bug.h> 36 36 #include <asm/kvm_book3s_asm.h> 37 + #include <asm/export.h> 37 38 38 39 /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ 39 40 #define LOAD_BAT(n, reg, RA, RB) \ ··· 739 738 740 739 .globl mol_trampoline 741 740 .set mol_trampoline, i0x2f00 741 + EXPORT_SYMBOL(mol_trampoline) 742 742 743 743 . = 0x3000 744 744 ··· 1047 1045 4: trap 1048 1046 EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0 1049 1047 blr 1048 + EXPORT_SYMBOL(switch_mmu_context) 1050 1049 1051 1050 /* 1052 1051 * An undocumented "feature" of 604e requires that the v bit ··· 1275 1272 .globl empty_zero_page 1276 1273 empty_zero_page: 1277 1274 .space 4096 1275 + EXPORT_SYMBOL(empty_zero_page) 1278 1276 1279 1277 .globl swapper_pg_dir 1280 1278 swapper_pg_dir: ··· 1289 1285 .long 0, 0, 0, 0, 0, 0, 0, 0 1290 1286 .long 0, 0, 0, 0, 0, 0, 0, 0 1291 1287 .long 0, 0, 0, 0, 0, 0, 0, 0 1288 + EXPORT_SYMBOL(intercept_table) 1292 1289 1293 1290 /* Room for two PTE pointers, usually the kernel and current user pointers 1294 1291 * to their respective root page table.
+2
arch/powerpc/kernel/head_40x.S
··· 41 41 #include <asm/ppc_asm.h> 42 42 #include <asm/asm-offsets.h> 43 43 #include <asm/ptrace.h> 44 + #include <asm/export.h> 44 45 45 46 /* As with the other PowerPC ports, it is expected that when code 46 47 * execution begins here, the following registers contain valid, yet ··· 972 971 .globl empty_zero_page 973 972 empty_zero_page: 974 973 .space 4096 974 + EXPORT_SYMBOL(empty_zero_page) 975 975 .globl swapper_pg_dir 976 976 swapper_pg_dir: 977 977 .space PGD_TABLE_SIZE
+2
arch/powerpc/kernel/head_44x.S
··· 39 39 #include <asm/asm-offsets.h> 40 40 #include <asm/ptrace.h> 41 41 #include <asm/synch.h> 42 + #include <asm/export.h> 42 43 #include "head_booke.h" 43 44 44 45 ··· 1255 1254 .globl empty_zero_page 1256 1255 empty_zero_page: 1257 1256 .space PAGE_SIZE 1257 + EXPORT_SYMBOL(empty_zero_page) 1258 1258 1259 1259 /* 1260 1260 * To support >32-bit physical addresses, we use an 8KB pgdir.
+2
arch/powerpc/kernel/head_64.S
··· 43 43 #include <asm/hw_irq.h> 44 44 #include <asm/cputhreads.h> 45 45 #include <asm/ppc-opcode.h> 46 + #include <asm/export.h> 46 47 47 48 /* The physical memory is laid out such that the secondary processor 48 49 * spin code sits at 0x0000...0x00ff. On server, the vectors follow ··· 1003 1002 .globl empty_zero_page 1004 1003 empty_zero_page: 1005 1004 .space PAGE_SIZE 1005 + EXPORT_SYMBOL(empty_zero_page)
+2
arch/powerpc/kernel/head_8xx.S
··· 31 31 #include <asm/asm-offsets.h> 32 32 #include <asm/ptrace.h> 33 33 #include <asm/fixmap.h> 34 + #include <asm/export.h> 34 35 35 36 /* Macro to make the code more readable. */ 36 37 #ifdef CONFIG_8xx_CPU6 ··· 885 884 .align PAGE_SHIFT 886 885 empty_zero_page: 887 886 .space PAGE_SIZE 887 + EXPORT_SYMBOL(empty_zero_page) 888 888 889 889 .globl swapper_pg_dir 890 890 swapper_pg_dir:
+2
arch/powerpc/kernel/head_fsl_booke.S
··· 42 42 #include <asm/asm-offsets.h> 43 43 #include <asm/cache.h> 44 44 #include <asm/ptrace.h> 45 + #include <asm/export.h> 45 46 #include "head_booke.h" 46 47 47 48 /* As with the other PowerPC ports, it is expected that when code ··· 1224 1223 .globl empty_zero_page 1225 1224 empty_zero_page: 1226 1225 .space 4096 1226 + EXPORT_SYMBOL(empty_zero_page) 1227 1227 .globl swapper_pg_dir 1228 1228 swapper_pg_dir: 1229 1229 .space PGD_TABLE_SIZE
+2
arch/powerpc/kernel/misc.S
··· 18 18 #include <asm/unistd.h> 19 19 #include <asm/asm-compat.h> 20 20 #include <asm/asm-offsets.h> 21 + #include <asm/export.h> 21 22 22 23 .text 23 24 ··· 119 118 _GLOBAL(current_stack_pointer) 120 119 PPC_LL r3,0(r1) 121 120 blr 121 + EXPORT_SYMBOL(current_stack_pointer)
+10
arch/powerpc/kernel/misc_32.S
··· 33 33 #include <asm/kexec.h> 34 34 #include <asm/bug.h> 35 35 #include <asm/ptrace.h> 36 + #include <asm/export.h> 36 37 37 38 .text 38 39 ··· 320 319 #endif /* CONFIG_4xx */ 321 320 isync 322 321 blr 322 + EXPORT_SYMBOL(flush_instruction_cache) 323 323 #endif /* CONFIG_PPC_8xx */ 324 324 325 325 /* ··· 361 359 isync 362 360 blr 363 361 _ASM_NOKPROBE_SYMBOL(flush_icache_range) 362 + EXPORT_SYMBOL(flush_icache_range) 364 363 365 364 /* 366 365 * Flush a particular page from the data cache to RAM. ··· 500 497 li r0,MAX_COPY_PREFETCH 501 498 li r11,4 502 499 b 2b 500 + EXPORT_SYMBOL(copy_page) 503 501 504 502 /* 505 503 * Extended precision shifts. ··· 528 524 sraw r3,r3,r5 # MSW = MSW >> count 529 525 or r4,r4,r7 # LSW |= t2 530 526 blr 527 + EXPORT_SYMBOL(__ashrdi3) 531 528 532 529 _GLOBAL(__ashldi3) 533 530 subfic r6,r5,32 ··· 540 535 slw r4,r4,r5 # LSW = LSW << count 541 536 or r3,r3,r7 # MSW |= t2 542 537 blr 538 + EXPORT_SYMBOL(__ashldi3) 543 539 544 540 _GLOBAL(__lshrdi3) 545 541 subfic r6,r5,32 ··· 552 546 srw r3,r3,r5 # MSW = MSW >> count 553 547 or r4,r4,r7 # LSW |= t2 554 548 blr 549 + EXPORT_SYMBOL(__lshrdi3) 555 550 556 551 /* 557 552 * 64-bit comparison: __cmpdi2(s64 a, s64 b) ··· 568 561 bltlr 569 562 li r3,2 570 563 blr 564 + EXPORT_SYMBOL(__cmpdi2) 571 565 /* 572 566 * 64-bit comparison: __ucmpdi2(u64 a, u64 b) 573 567 * Returns 0 if a < b, 1 if a == b, 2 if a > b. ··· 583 575 bltlr 584 576 li r3,2 585 577 blr 578 + EXPORT_SYMBOL(__ucmpdi2) 586 579 587 580 _GLOBAL(__bswapdi2) 588 581 rotlwi r9,r4,8 ··· 595 586 mr r3,r9 596 587 mr r4,r10 597 588 blr 589 + EXPORT_SYMBOL(__bswapdi2) 598 590 599 591 #ifdef CONFIG_SMP 600 592 _GLOBAL(start_secondary_resume)
+4
arch/powerpc/kernel/misc_64.S
··· 27 27 #include <asm/kexec.h> 28 28 #include <asm/ptrace.h> 29 29 #include <asm/mmu.h> 30 + #include <asm/export.h> 30 31 31 32 .text 32 33 ··· 111 110 isync 112 111 blr 113 112 _ASM_NOKPROBE_SYMBOL(flush_icache_range) 113 + EXPORT_SYMBOL(flush_icache_range) 114 114 115 115 /* 116 116 * Like above, but only do the D-cache. ··· 142 140 bdnz 0b 143 141 sync 144 142 blr 143 + EXPORT_SYMBOL(flush_dcache_range) 145 144 146 145 /* 147 146 * Like above, but works on non-mapped physical addresses. ··· 246 243 blr 247 244 248 245 _GLOBAL(__bswapdi2) 246 + EXPORT_SYMBOL(__bswapdi2) 249 247 srdi r8,r3,32 250 248 rlwinm r7,r3,8,0xffffffff 251 249 rlwimi r7,r3,24,0,7
+1
arch/powerpc/kernel/pci-common.c
··· 56 56 57 57 /* ISA Memory physical address */ 58 58 resource_size_t isa_mem_base; 59 + EXPORT_SYMBOL(isa_mem_base); 59 60 60 61 61 62 static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
+2
arch/powerpc/kernel/pci_32.c
··· 32 32 unsigned long isa_io_base = 0; 33 33 unsigned long pci_dram_offset = 0; 34 34 int pcibios_assign_bus_offset = 1; 35 + EXPORT_SYMBOL(isa_io_base); 36 + EXPORT_SYMBOL(pci_dram_offset); 35 37 36 38 void pcibios_make_OF_bus_map(void); 37 39
-37
arch/powerpc/kernel/ppc_ksyms.c
··· 1 - #include <linux/ftrace.h> 2 - #include <linux/mm.h> 3 - 4 - #include <asm/processor.h> 5 - #include <asm/switch_to.h> 6 - #include <asm/cacheflush.h> 7 - #include <asm/epapr_hcalls.h> 8 - 9 - #ifdef CONFIG_PPC64 10 - EXPORT_SYMBOL(flush_dcache_range); 11 - #endif 12 - EXPORT_SYMBOL(flush_icache_range); 13 - 14 - EXPORT_SYMBOL(empty_zero_page); 15 - 16 - long long __bswapdi2(long long); 17 - EXPORT_SYMBOL(__bswapdi2); 18 - 19 - #ifdef CONFIG_FUNCTION_TRACER 20 - EXPORT_SYMBOL(_mcount); 21 - #endif 22 - 23 - #ifdef CONFIG_PPC_FPU 24 - EXPORT_SYMBOL(load_fp_state); 25 - EXPORT_SYMBOL(store_fp_state); 26 - #endif 27 - 28 - #ifdef CONFIG_ALTIVEC 29 - EXPORT_SYMBOL(load_vr_state); 30 - EXPORT_SYMBOL(store_vr_state); 31 - #endif 32 - 33 - #ifdef CONFIG_EPAPR_PARAVIRT 34 - EXPORT_SYMBOL(epapr_hypercall_start); 35 - #endif 36 - 37 - EXPORT_SYMBOL(current_stack_pointer);
-60
arch/powerpc/kernel/ppc_ksyms_32.c
··· 1 - #include <linux/export.h> 2 - #include <linux/smp.h> 3 - 4 - #include <asm/page.h> 5 - #include <asm/dma.h> 6 - #include <asm/io.h> 7 - #include <asm/hw_irq.h> 8 - #include <asm/time.h> 9 - #include <asm/mmu_context.h> 10 - #include <asm/pgtable.h> 11 - #include <asm/dcr.h> 12 - 13 - EXPORT_SYMBOL(ISA_DMA_THRESHOLD); 14 - EXPORT_SYMBOL(DMA_MODE_READ); 15 - EXPORT_SYMBOL(DMA_MODE_WRITE); 16 - 17 - #if defined(CONFIG_PCI) 18 - EXPORT_SYMBOL(isa_io_base); 19 - EXPORT_SYMBOL(isa_mem_base); 20 - EXPORT_SYMBOL(pci_dram_offset); 21 - #endif 22 - 23 - #ifdef CONFIG_SMP 24 - EXPORT_SYMBOL(smp_hw_index); 25 - #endif 26 - 27 - long long __ashrdi3(long long, int); 28 - long long __ashldi3(long long, int); 29 - long long __lshrdi3(long long, int); 30 - int __ucmpdi2(unsigned long long, unsigned long long); 31 - int __cmpdi2(long long, long long); 32 - EXPORT_SYMBOL(__ashrdi3); 33 - EXPORT_SYMBOL(__ashldi3); 34 - EXPORT_SYMBOL(__lshrdi3); 35 - EXPORT_SYMBOL(__ucmpdi2); 36 - EXPORT_SYMBOL(__cmpdi2); 37 - 38 - EXPORT_SYMBOL(timer_interrupt); 39 - EXPORT_SYMBOL(tb_ticks_per_jiffy); 40 - 41 - EXPORT_SYMBOL(switch_mmu_context); 42 - 43 - #ifdef CONFIG_PPC_STD_MMU_32 44 - extern long mol_trampoline; 45 - EXPORT_SYMBOL(mol_trampoline); /* For MOL */ 46 - EXPORT_SYMBOL(flush_hash_pages); /* For MOL */ 47 - #ifdef CONFIG_SMP 48 - extern int mmu_hash_lock; 49 - EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */ 50 - #endif /* CONFIG_SMP */ 51 - extern long *intercept_table; 52 - EXPORT_SYMBOL(intercept_table); 53 - #endif /* CONFIG_PPC_STD_MMU_32 */ 54 - 55 - #ifdef CONFIG_PPC_DCR_NATIVE 56 - EXPORT_SYMBOL(__mtdcr); 57 - EXPORT_SYMBOL(__mfdcr); 58 - #endif 59 - 60 - EXPORT_SYMBOL(flush_instruction_cache);
+6
arch/powerpc/kernel/setup_32.c
··· 16 16 #include <linux/cpu.h> 17 17 #include <linux/console.h> 18 18 #include <linux/memblock.h> 19 + #include <linux/export.h> 19 20 20 21 #include <asm/io.h> 21 22 #include <asm/prom.h> ··· 48 47 EXPORT_SYMBOL_GPL(boot_cpuid_phys); 49 48 50 49 int smp_hw_index[NR_CPUS]; 50 + EXPORT_SYMBOL(smp_hw_index); 51 51 52 52 unsigned long ISA_DMA_THRESHOLD; 53 53 unsigned int DMA_MODE_READ; 54 54 unsigned int DMA_MODE_WRITE; 55 + 56 + EXPORT_SYMBOL(ISA_DMA_THRESHOLD); 57 + EXPORT_SYMBOL(DMA_MODE_READ); 58 + EXPORT_SYMBOL(DMA_MODE_WRITE); 55 59 56 60 /* 57 61 * These are used in binfmt_elf.c to put aux entries on the stack
+1
arch/powerpc/kernel/time.c
··· 596 596 irq_exit(); 597 597 set_irq_regs(old_regs); 598 598 } 599 + EXPORT_SYMBOL(timer_interrupt); 599 600 600 601 /* 601 602 * Hypervisor decrementer interrupts shouldn't occur but are sometimes
+3
arch/powerpc/kernel/vector.S
··· 6 6 #include <asm/thread_info.h> 7 7 #include <asm/page.h> 8 8 #include <asm/ptrace.h> 9 + #include <asm/export.h> 9 10 10 11 /* 11 12 * Load state from memory into VMX registers including VSCR. ··· 18 17 mtvscr v0 19 18 REST_32VRS(0,r4,r3) 20 19 blr 20 + EXPORT_SYMBOL(load_vr_state) 21 21 22 22 /* 23 23 * Store VMX state into memory, including VSCR. ··· 30 28 li r4, VRSTATE_VSCR 31 29 stvx v0, r4, r3 32 30 blr 31 + EXPORT_SYMBOL(store_vr_state) 33 32 34 33 /* 35 34 * Disable VMX for the task which had it previously,
+1 -1
arch/powerpc/lib/Makefile
··· 9 9 CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE) 10 10 CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE) 11 11 12 - obj-y += string.o alloc.o crtsavres.o ppc_ksyms.o code-patching.o \ 12 + obj-y += string.o alloc.o crtsavres.o code-patching.o \ 13 13 feature-fixups.o 14 14 15 15 obj-$(CONFIG_PPC32) += div64.o copy_32.o
+3
arch/powerpc/lib/checksum_32.S
··· 17 17 #include <asm/cache.h> 18 18 #include <asm/errno.h> 19 19 #include <asm/ppc_asm.h> 20 + #include <asm/export.h> 20 21 21 22 .text 22 23 ··· 69 68 adde r5,r5,r0 70 69 5: addze r3,r5 /* add in final carry */ 71 70 blr 71 + EXPORT_SYMBOL(__csum_partial) 72 72 73 73 /* 74 74 * Computes the checksum of a memory block at src, length len, ··· 299 297 .long 41b,dst_error 300 298 .long 50b,src_error 301 299 .long 51b,dst_error 300 + EXPORT_SYMBOL(csum_partial_copy_generic)
+3
arch/powerpc/lib/checksum_64.S
··· 16 16 #include <asm/processor.h> 17 17 #include <asm/errno.h> 18 18 #include <asm/ppc_asm.h> 19 + #include <asm/export.h> 19 20 20 21 /* 21 22 * Computes the checksum of a memory block at buff, length len, ··· 177 176 add r3,r4,r0 178 177 srdi r3,r3,32 179 178 blr 179 + EXPORT_SYMBOL(__csum_partial) 180 180 181 181 182 182 .macro srcnr ··· 432 430 li r6,-EFAULT 433 431 stw r6,0(r8) 434 432 blr 433 + EXPORT_SYMBOL(csum_partial_copy_generic)
+5
arch/powerpc/lib/copy_32.S
··· 12 12 #include <asm/cache.h> 13 13 #include <asm/errno.h> 14 14 #include <asm/ppc_asm.h> 15 + #include <asm/export.h> 15 16 16 17 #define COPY_16_BYTES \ 17 18 lwz r7,4(r4); \ ··· 93 92 subf r6,r0,r6 94 93 cmplwi 0,r4,0 95 94 bne 2f /* Use normal procedure if r4 is not zero */ 95 + EXPORT_SYMBOL(memset) 96 96 _GLOBAL(memset_nocache_branch) 97 97 b 2f /* Skip optimised bloc until cache is enabled */ 98 98 ··· 218 216 stbu r0,1(r6) 219 217 bdnz 40b 220 218 65: blr 219 + EXPORT_SYMBOL(memcpy) 220 + EXPORT_SYMBOL(memmove) 221 221 222 222 generic_memcpy: 223 223 srwi. r7,r5,3 ··· 511 507 .long 112b,120b 512 508 .long 114b,120b 513 509 .text 510 + EXPORT_SYMBOL(__copy_tofrom_user)
+2
arch/powerpc/lib/copypage_64.S
··· 10 10 #include <asm/processor.h> 11 11 #include <asm/ppc_asm.h> 12 12 #include <asm/asm-offsets.h> 13 + #include <asm/export.h> 13 14 14 15 .section ".toc","aw" 15 16 PPC64_CACHES: ··· 111 110 std r11,120(r3) 112 111 std r12,128(r3) 113 112 blr 113 + EXPORT_SYMBOL(copy_page)
+2
arch/powerpc/lib/copyuser_64.S
··· 8 8 */ 9 9 #include <asm/processor.h> 10 10 #include <asm/ppc_asm.h> 11 + #include <asm/export.h> 11 12 12 13 #ifdef __BIG_ENDIAN__ 13 14 #define sLd sld /* Shift towards low-numbered address. */ ··· 672 671 .llong 89b,100b 673 672 .llong 90b,100b 674 673 .llong 91b,100b 674 + EXPORT_SYMBOL(__copy_tofrom_user)
+5
arch/powerpc/lib/hweight_64.S
··· 19 19 */ 20 20 #include <asm/processor.h> 21 21 #include <asm/ppc_asm.h> 22 + #include <asm/export.h> 22 23 23 24 /* Note: This code relies on -mminimal-toc */ 24 25 ··· 33 32 clrldi r3,r3,64-8 34 33 blr 35 34 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) 35 + EXPORT_SYMBOL(__arch_hweight8) 36 36 37 37 _GLOBAL(__arch_hweight16) 38 38 BEGIN_FTR_SECTION ··· 56 54 blr 57 55 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50) 58 56 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) 57 + EXPORT_SYMBOL(__arch_hweight16) 59 58 60 59 _GLOBAL(__arch_hweight32) 61 60 BEGIN_FTR_SECTION ··· 82 79 blr 83 80 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51) 84 81 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) 82 + EXPORT_SYMBOL(__arch_hweight32) 85 83 86 84 _GLOBAL(__arch_hweight64) 87 85 BEGIN_FTR_SECTION ··· 112 108 blr 113 109 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52) 114 110 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) 111 + EXPORT_SYMBOL(__arch_hweight64)
+3
arch/powerpc/lib/mem_64.S
··· 11 11 #include <asm/processor.h> 12 12 #include <asm/errno.h> 13 13 #include <asm/ppc_asm.h> 14 + #include <asm/export.h> 14 15 15 16 _GLOBAL(memset) 16 17 neg r0,r3 ··· 78 77 10: bflr 31 79 78 stb r4,0(r6) 80 79 blr 80 + EXPORT_SYMBOL(memset) 81 81 82 82 _GLOBAL_TOC(memmove) 83 83 cmplw 0,r3,r4 ··· 121 119 beq 2b 122 120 mtctr r7 123 121 b 1b 122 + EXPORT_SYMBOL(memmove)
+2
arch/powerpc/lib/memcmp_64.S
··· 8 8 * 2 of the License, or (at your option) any later version. 9 9 */ 10 10 #include <asm/ppc_asm.h> 11 + #include <asm/export.h> 11 12 12 13 #define off8 r6 13 14 #define off16 r7 ··· 232 231 ld r28,-32(r1) 233 232 ld r27,-40(r1) 234 233 blr 234 + EXPORT_SYMBOL(memcmp)
+2
arch/powerpc/lib/memcpy_64.S
··· 8 8 */ 9 9 #include <asm/processor.h> 10 10 #include <asm/ppc_asm.h> 11 + #include <asm/export.h> 11 12 12 13 .align 7 13 14 _GLOBAL_TOC(memcpy) ··· 220 219 4: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ 221 220 blr 222 221 #endif 222 + EXPORT_SYMBOL(memcpy)
-29
arch/powerpc/lib/ppc_ksyms.c
··· 1 - #include <linux/string.h> 2 - #include <linux/uaccess.h> 3 - #include <linux/bitops.h> 4 - #include <net/checksum.h> 5 - 6 - EXPORT_SYMBOL(memcpy); 7 - EXPORT_SYMBOL(memset); 8 - EXPORT_SYMBOL(memmove); 9 - EXPORT_SYMBOL(memcmp); 10 - EXPORT_SYMBOL(memchr); 11 - 12 - EXPORT_SYMBOL(strncpy); 13 - EXPORT_SYMBOL(strncmp); 14 - 15 - #ifndef CONFIG_GENERIC_CSUM 16 - EXPORT_SYMBOL(__csum_partial); 17 - EXPORT_SYMBOL(csum_partial_copy_generic); 18 - #endif 19 - 20 - EXPORT_SYMBOL(__copy_tofrom_user); 21 - EXPORT_SYMBOL(__clear_user); 22 - EXPORT_SYMBOL(copy_page); 23 - 24 - #ifdef CONFIG_PPC64 25 - EXPORT_SYMBOL(__arch_hweight8); 26 - EXPORT_SYMBOL(__arch_hweight16); 27 - EXPORT_SYMBOL(__arch_hweight32); 28 - EXPORT_SYMBOL(__arch_hweight64); 29 - #endif
+6
arch/powerpc/lib/string.S
··· 11 11 #include <asm/processor.h> 12 12 #include <asm/errno.h> 13 13 #include <asm/ppc_asm.h> 14 + #include <asm/export.h> 14 15 15 16 .section __ex_table,"a" 16 17 PPC_LONG_ALIGN ··· 37 36 2: stbu r0,1(r6) /* clear it out if so */ 38 37 bdnz 2b 39 38 blr 39 + EXPORT_SYMBOL(strncpy) 40 40 41 41 _GLOBAL(strncmp) 42 42 PPC_LCMPI 0,r5,0 ··· 55 53 blr 56 54 2: li r3,0 57 55 blr 56 + EXPORT_SYMBOL(strncmp) 58 57 59 58 #ifdef CONFIG_PPC32 60 59 _GLOBAL(memcmp) ··· 71 68 blr 72 69 2: li r3,0 73 70 blr 71 + EXPORT_SYMBOL(memcmp) 74 72 #endif 75 73 76 74 _GLOBAL(memchr) ··· 86 82 beqlr 87 83 2: li r3,0 88 84 blr 85 + EXPORT_SYMBOL(memchr) 89 86 90 87 #ifdef CONFIG_PPC32 91 88 _GLOBAL(__clear_user) ··· 130 125 PPC_LONG 1b,91b 131 126 PPC_LONG 8b,92b 132 127 .text 128 + EXPORT_SYMBOL(__clear_user) 133 129 #endif
+2
arch/powerpc/lib/string_64.S
··· 20 20 21 21 #include <asm/ppc_asm.h> 22 22 #include <asm/asm-offsets.h> 23 + #include <asm/export.h> 23 24 24 25 .section ".toc","aw" 25 26 PPC64_CACHES: ··· 201 200 cmpdi r4,32 202 201 blt .Lshort_clear 203 202 b .Lmedium_clear 203 + EXPORT_SYMBOL(__clear_user)
+3
arch/powerpc/mm/hash_low_32.S
··· 26 26 #include <asm/ppc_asm.h> 27 27 #include <asm/thread_info.h> 28 28 #include <asm/asm-offsets.h> 29 + #include <asm/export.h> 29 30 30 31 #ifdef CONFIG_SMP 31 32 .section .bss ··· 34 33 .globl mmu_hash_lock 35 34 mmu_hash_lock: 36 35 .space 4 36 + EXPORT_SYMBOL(mmu_hash_lock) 37 37 #endif /* CONFIG_SMP */ 38 38 39 39 /* ··· 577 575 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */ 578 576 stwcx. r8,0,r5 /* update the pte */ 579 577 bne- 33b 578 + EXPORT_SYMBOL(flush_hash_pages) 580 579 581 580 /* Get the address of the primary PTE group in the hash table (r3) */ 582 581 _GLOBAL(flush_hash_patch_A)
+3 -1
arch/powerpc/relocs_check.sh
··· 30 30 # On PPC64: 31 31 # R_PPC64_RELATIVE, R_PPC64_NONE 32 32 # R_PPC64_ADDR64 mach_<name> 33 + # R_PPC64_ADDR64 __crc_<name> 33 34 # On PPC: 34 35 # R_PPC_RELATIVE, R_PPC_ADDR16_HI, 35 36 # R_PPC_ADDR16_HA,R_PPC_ADDR16_LO, ··· 42 41 R_PPC_ADDR16_HA 43 42 R_PPC_RELATIVE 44 43 R_PPC_NONE' | 45 - grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_' 44 + grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_' | 45 + grep -E -v '\<R_PPC64_ADDR64[[:space:]]+__crc_' 46 46 ) 47 47 48 48 if [ -z "$bad_relocs" ]; then
+3
arch/powerpc/sysdev/dcr-low.S
··· 12 12 #include <asm/ppc_asm.h> 13 13 #include <asm/processor.h> 14 14 #include <asm/bug.h> 15 + #include <asm/export.h> 15 16 16 17 #define DCR_ACCESS_PROLOG(table) \ 17 18 cmpli cr0,r3,1024; \ ··· 29 28 30 29 _GLOBAL(__mfdcr) 31 30 DCR_ACCESS_PROLOG(__mfdcr_table) 31 + EXPORT_SYMBOL(__mfdcr) 32 32 33 33 _GLOBAL(__mtdcr) 34 34 DCR_ACCESS_PROLOG(__mtdcr_table) 35 + EXPORT_SYMBOL(__mtdcr) 35 36 36 37 __mfdcr_table: 37 38 mfdcr r3,0; blr
+1
arch/s390/include/asm/Kbuild
··· 1 1 2 2 3 3 generic-y += clkdev.h 4 + generic-y += export.h 4 5 generic-y += irq_work.h 5 6 generic-y += mcs_spinlock.h 6 7 generic-y += mm-arch-hooks.h
+1 -1
arch/s390/kernel/Makefile
··· 61 61 62 62 extra-y += head.o head64.o vmlinux.lds 63 63 64 - obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 64 + obj-$(CONFIG_MODULES) += module.o 65 65 obj-$(CONFIG_SMP) += smp.o 66 66 obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o 67 67 obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o
+6
arch/s390/kernel/entry.S
··· 23 23 #include <asm/vx-insn.h> 24 24 #include <asm/setup.h> 25 25 #include <asm/nmi.h> 26 + #include <asm/export.h> 26 27 27 28 __PT_R0 = __PT_GPRS 28 29 __PT_R1 = __PT_GPRS + 8 ··· 260 259 261 260 EX_TABLE(.Lrewind_pad,.Lsie_fault) 262 261 EX_TABLE(sie_exit,.Lsie_fault) 262 + EXPORT_SYMBOL(sie64a) 263 + EXPORT_SYMBOL(sie_exit) 263 264 #endif 264 265 265 266 /* ··· 828 825 oi __LC_CPU_FLAGS+7,_CIF_FPU 829 826 br %r14 830 827 .Lsave_fpu_regs_end: 828 + #if IS_ENABLED(CONFIG_KVM) 829 + EXPORT_SYMBOL(save_fpu_regs) 830 + #endif 831 831 832 832 /* 833 833 * Load floating-point controls and floating-point or vector registers.
+3
arch/s390/kernel/mcount.S
··· 9 9 #include <asm/asm-offsets.h> 10 10 #include <asm/ftrace.h> 11 11 #include <asm/ptrace.h> 12 + #include <asm/export.h> 12 13 13 14 .section .kprobes.text, "ax" 14 15 ··· 23 22 24 23 ENTRY(_mcount) 25 24 br %r14 25 + 26 + EXPORT_SYMBOL(_mcount) 26 27 27 28 ENTRY(ftrace_caller) 28 29 .globl ftrace_regs_caller
-15
arch/s390/kernel/s390_ksyms.c
··· 1 - #include <linux/module.h> 2 - #include <linux/kvm_host.h> 3 - #include <asm/fpu/api.h> 4 - #include <asm/ftrace.h> 5 - 6 - #ifdef CONFIG_FUNCTION_TRACER 7 - EXPORT_SYMBOL(_mcount); 8 - #endif 9 - #if IS_ENABLED(CONFIG_KVM) 10 - EXPORT_SYMBOL(sie64a); 11 - EXPORT_SYMBOL(sie_exit); 12 - EXPORT_SYMBOL(save_fpu_regs); 13 - #endif 14 - EXPORT_SYMBOL(memcpy); 15 - EXPORT_SYMBOL(memset);
+3
arch/s390/lib/mem.S
··· 5 5 */ 6 6 7 7 #include <linux/linkage.h> 8 + #include <asm/export.h> 8 9 9 10 /* 10 11 * memset implementation ··· 61 60 xc 0(1,%r1),0(%r1) 62 61 .Lmemset_mvc: 63 62 mvc 1(1,%r1),0(%r1) 63 + EXPORT_SYMBOL(memset) 64 64 65 65 /* 66 66 * memcpy implementation ··· 88 86 j .Lmemcpy_rest 89 87 .Lmemcpy_mvc: 90 88 mvc 0(1,%r1),0(%r3) 89 + EXPORT_SYMBOL(memcpy)
+1
arch/sparc/include/asm/Kbuild
··· 6 6 generic-y += div64.h 7 7 generic-y += emergency-restart.h 8 8 generic-y += exec.h 9 + generic-y += export.h 9 10 generic-y += irq_regs.h 10 11 generic-y += irq_work.h 11 12 generic-y += linkage.h
+34
arch/sparc/include/asm/string.h
··· 5 5 #else 6 6 #include <asm/string_32.h> 7 7 #endif 8 + 9 + /* First the mem*() things. */ 10 + #define __HAVE_ARCH_MEMMOVE 11 + void *memmove(void *, const void *, __kernel_size_t); 12 + 13 + #define __HAVE_ARCH_MEMCPY 14 + #define memcpy(t, f, n) __builtin_memcpy(t, f, n) 15 + 16 + #define __HAVE_ARCH_MEMSET 17 + #define memset(s, c, count) __builtin_memset(s, c, count) 18 + 19 + #define __HAVE_ARCH_MEMSCAN 20 + 21 + #define memscan(__arg0, __char, __arg2) \ 22 + ({ \ 23 + void *__memscan_zero(void *, size_t); \ 24 + void *__memscan_generic(void *, int, size_t); \ 25 + void *__retval, *__addr = (__arg0); \ 26 + size_t __size = (__arg2); \ 27 + \ 28 + if(__builtin_constant_p(__char) && !(__char)) \ 29 + __retval = __memscan_zero(__addr, __size); \ 30 + else \ 31 + __retval = __memscan_generic(__addr, (__char), __size); \ 32 + \ 33 + __retval; \ 34 + }) 35 + 36 + #define __HAVE_ARCH_MEMCMP 37 + int memcmp(const void *,const void *,__kernel_size_t); 38 + 39 + #define __HAVE_ARCH_STRNCMP 40 + int strncmp(const char *, const char *, __kernel_size_t); 41 + 8 42 #endif
-56
arch/sparc/include/asm/string_32.h
··· 11 11 12 12 #include <asm/page.h> 13 13 14 - /* Really, userland/ksyms should not see any of this stuff. */ 15 - 16 - #ifdef __KERNEL__ 17 - 18 - void __memmove(void *,const void *,__kernel_size_t); 19 - 20 - #ifndef EXPORT_SYMTAB_STROPS 21 - 22 - /* First the mem*() things. */ 23 - #define __HAVE_ARCH_MEMMOVE 24 - #undef memmove 25 - #define memmove(_to, _from, _n) \ 26 - ({ \ 27 - void *_t = (_to); \ 28 - __memmove(_t, (_from), (_n)); \ 29 - _t; \ 30 - }) 31 - 32 - #define __HAVE_ARCH_MEMCPY 33 - #define memcpy(t, f, n) __builtin_memcpy(t, f, n) 34 - 35 - #define __HAVE_ARCH_MEMSET 36 - #define memset(s, c, count) __builtin_memset(s, c, count) 37 - 38 - #define __HAVE_ARCH_MEMSCAN 39 - 40 - #undef memscan 41 - #define memscan(__arg0, __char, __arg2) \ 42 - ({ \ 43 - void *__memscan_zero(void *, size_t); \ 44 - void *__memscan_generic(void *, int, size_t); \ 45 - void *__retval, *__addr = (__arg0); \ 46 - size_t __size = (__arg2); \ 47 - \ 48 - if(__builtin_constant_p(__char) && !(__char)) \ 49 - __retval = __memscan_zero(__addr, __size); \ 50 - else \ 51 - __retval = __memscan_generic(__addr, (__char), __size); \ 52 - \ 53 - __retval; \ 54 - }) 55 - 56 - #define __HAVE_ARCH_MEMCMP 57 - int memcmp(const void *,const void *,__kernel_size_t); 58 - 59 - /* Now the str*() stuff... */ 60 - #define __HAVE_ARCH_STRLEN 61 - __kernel_size_t strlen(const char *); 62 - 63 - #define __HAVE_ARCH_STRNCMP 64 - int strncmp(const char *, const char *, __kernel_size_t); 65 - 66 - #endif /* !EXPORT_SYMTAB_STROPS */ 67 - 68 - #endif /* __KERNEL__ */ 69 - 70 14 #endif /* !(__SPARC_STRING_H__) */
-44
arch/sparc/include/asm/string_64.h
··· 9 9 #ifndef __SPARC64_STRING_H__ 10 10 #define __SPARC64_STRING_H__ 11 11 12 - /* Really, userland/ksyms should not see any of this stuff. */ 13 - 14 - #ifdef __KERNEL__ 15 - 16 12 #include <asm/asi.h> 17 - 18 - #ifndef EXPORT_SYMTAB_STROPS 19 - 20 - /* First the mem*() things. */ 21 - #define __HAVE_ARCH_MEMMOVE 22 - void *memmove(void *, const void *, __kernel_size_t); 23 - 24 - #define __HAVE_ARCH_MEMCPY 25 - #define memcpy(t, f, n) __builtin_memcpy(t, f, n) 26 - 27 - #define __HAVE_ARCH_MEMSET 28 - #define memset(s, c, count) __builtin_memset(s, c, count) 29 - 30 - #define __HAVE_ARCH_MEMSCAN 31 - 32 - #undef memscan 33 - #define memscan(__arg0, __char, __arg2) \ 34 - ({ \ 35 - void *__memscan_zero(void *, size_t); \ 36 - void *__memscan_generic(void *, int, size_t); \ 37 - void *__retval, *__addr = (__arg0); \ 38 - size_t __size = (__arg2); \ 39 - \ 40 - if(__builtin_constant_p(__char) && !(__char)) \ 41 - __retval = __memscan_zero(__addr, __size); \ 42 - else \ 43 - __retval = __memscan_generic(__addr, (__char), __size); \ 44 - \ 45 - __retval; \ 46 - }) 47 - 48 - #define __HAVE_ARCH_MEMCMP 49 - int memcmp(const void *,const void *,__kernel_size_t); 50 13 51 14 /* Now the str*() stuff... */ 52 15 #define __HAVE_ARCH_STRLEN 53 16 __kernel_size_t strlen(const char *); 54 - 55 - #define __HAVE_ARCH_STRNCMP 56 - int strncmp(const char *, const char *, __kernel_size_t); 57 - 58 - #endif /* !EXPORT_SYMTAB_STROPS */ 59 - 60 - #endif /* __KERNEL__ */ 61 17 62 18 #endif /* !(__SPARC64_STRING_H__) */
+1 -1
arch/sparc/kernel/Makefile
··· 86 86 obj-$(CONFIG_SUN_PM) += apc.o pmc.o 87 87 88 88 obj-$(CONFIG_MODULES) += module.o 89 - obj-$(CONFIG_MODULES) += sparc_ksyms_$(BITS).o 89 + obj-$(CONFIG_MODULES) += sparc_ksyms.o 90 90 obj-$(CONFIG_SPARC_LED) += led.o 91 91 obj-$(CONFIG_KGDB) += kgdb_$(BITS).o 92 92
+3
arch/sparc/kernel/entry.S
··· 29 29 #include <asm/unistd.h> 30 30 31 31 #include <asm/asmmacro.h> 32 + #include <asm/export.h> 32 33 33 34 #define curptr g6 34 35 ··· 1208 1207 1209 1208 ret 1210 1209 restore 1210 + EXPORT_SYMBOL(__udelay) 1211 + EXPORT_SYMBOL(__ndelay) 1211 1212 1212 1213 /* Handle a software breakpoint */ 1213 1214 /* We have to inform parent that child has stopped */
+3
arch/sparc/kernel/head_32.S
··· 24 24 #include <asm/thread_info.h> /* TI_UWINMASK */ 25 25 #include <asm/errno.h> 26 26 #include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */ 27 + #include <asm/export.h> 27 28 28 29 .data 29 30 /* The following are used with the prom_vector node-ops to figure out ··· 61 60 */ 62 61 .globl empty_zero_page 63 62 empty_zero_page: .skip PAGE_SIZE 63 + EXPORT_SYMBOL(empty_zero_page) 64 64 65 65 .global root_flags 66 66 .global ram_flags ··· 815 813 __ret_efault: 816 814 ret 817 815 restore %g0, -EFAULT, %o0 816 + EXPORT_SYMBOL(__ret_efault)
+6 -1
arch/sparc/kernel/head_64.S
··· 32 32 #include <asm/estate.h> 33 33 #include <asm/sfafsr.h> 34 34 #include <asm/unistd.h> 35 - 35 + #include <asm/export.h> 36 + 36 37 /* This section from from _start to sparc64_boot_end should fit into 37 38 * 0x0000000000404000 to 0x0000000000408000. 38 39 */ ··· 144 143 .skip 64 145 144 prom_root_node: 146 145 .word 0 146 + EXPORT_SYMBOL(prom_root_node) 147 147 prom_mmu_ihandle_cache: 148 148 .word 0 149 149 prom_boot_mapped_pc: ··· 160 158 .word 0 161 159 sun4v_chip_type: 162 160 .word SUN4V_CHIP_INVALID 161 + EXPORT_SYMBOL(sun4v_chip_type) 163 162 1: 164 163 rd %pc, %l0 165 164 ··· 923 920 .globl prom_tba, tlb_type 924 921 prom_tba: .xword 0 925 922 tlb_type: .word 0 /* Must NOT end up in BSS */ 923 + EXPORT_SYMBOL(tlb_type) 926 924 .section ".fixup",#alloc,#execinstr 927 925 928 926 .globl __ret_efault, __retl_efault, __ret_one, __retl_one ··· 931 927 ret 932 928 restore %g0, -EFAULT, %o0 933 929 ENDPROC(__ret_efault) 930 + EXPORT_SYMBOL(__ret_efault) 934 931 935 932 ENTRY(__retl_efault) 936 933 retl
+2
arch/sparc/kernel/helpers.S
··· 15 15 2: retl 16 16 nop 17 17 .size __flushw_user,.-__flushw_user 18 + EXPORT_SYMBOL(__flushw_user) 18 19 19 20 /* Flush %fp and %i7 to the stack for all register 20 21 * windows active inside of the cpu. This allows ··· 62 61 .size hard_smp_processor_id,.-hard_smp_processor_id 63 62 #endif 64 63 .size real_hard_smp_processor_id,.-real_hard_smp_processor_id 64 + EXPORT_SYMBOL_GPL(real_hard_smp_processor_id)
+5
arch/sparc/kernel/hvcalls.S
··· 343 343 0: retl 344 344 nop 345 345 ENDPROC(sun4v_mach_set_watchdog) 346 + EXPORT_SYMBOL(sun4v_mach_set_watchdog) 346 347 347 348 /* No inputs and does not return. */ 348 349 ENTRY(sun4v_mach_sir) ··· 777 776 retl 778 777 nop 779 778 ENDPROC(sun4v_niagara_getperf) 779 + EXPORT_SYMBOL(sun4v_niagara_getperf) 780 780 781 781 ENTRY(sun4v_niagara_setperf) 782 782 mov HV_FAST_SET_PERFREG, %o5 ··· 785 783 retl 786 784 nop 787 785 ENDPROC(sun4v_niagara_setperf) 786 + EXPORT_SYMBOL(sun4v_niagara_setperf) 788 787 789 788 ENTRY(sun4v_niagara2_getperf) 790 789 mov %o0, %o4 ··· 795 792 retl 796 793 nop 797 794 ENDPROC(sun4v_niagara2_getperf) 795 + EXPORT_SYMBOL(sun4v_niagara2_getperf) 798 796 799 797 ENTRY(sun4v_niagara2_setperf) 800 798 mov HV_FAST_N2_SET_PERFREG, %o5 ··· 803 799 retl 804 800 nop 805 801 ENDPROC(sun4v_niagara2_setperf) 802 + EXPORT_SYMBOL(sun4v_niagara2_setperf) 806 803 807 804 ENTRY(sun4v_reboot_data_set) 808 805 mov HV_FAST_REBOOT_DATA_SET, %o5
+12
arch/sparc/kernel/sparc_ksyms.c
··· 1 + /* 2 + * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support. 3 + * 4 + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 5 + * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 6 + */ 7 + 8 + #include <linux/init.h> 9 + #include <linux/export.h> 10 + 11 + /* This is needed only for drivers/sbus/char/openprom.c */ 12 + EXPORT_SYMBOL(saved_command_line);
-31
arch/sparc/kernel/sparc_ksyms_32.c
··· 1 - /* 2 - * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support. 3 - * 4 - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 5 - * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 6 - */ 7 - 8 - #include <linux/module.h> 9 - 10 - #include <asm/pgtable.h> 11 - #include <asm/uaccess.h> 12 - #include <asm/delay.h> 13 - #include <asm/head.h> 14 - #include <asm/dma.h> 15 - 16 - struct poll { 17 - int fd; 18 - short events; 19 - short revents; 20 - }; 21 - 22 - /* from entry.S */ 23 - EXPORT_SYMBOL(__udelay); 24 - EXPORT_SYMBOL(__ndelay); 25 - 26 - /* from head_32.S */ 27 - EXPORT_SYMBOL(__ret_efault); 28 - EXPORT_SYMBOL(empty_zero_page); 29 - 30 - /* Exporting a symbol from /init/main.c */ 31 - EXPORT_SYMBOL(saved_command_line);
-53
arch/sparc/kernel/sparc_ksyms_64.c
··· 1 - /* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support. 2 - * 3 - * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net) 4 - * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 5 - * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz) 6 - */ 7 - 8 - #include <linux/export.h> 9 - #include <linux/pci.h> 10 - #include <linux/bitops.h> 11 - 12 - #include <asm/cpudata.h> 13 - #include <asm/uaccess.h> 14 - #include <asm/spitfire.h> 15 - #include <asm/oplib.h> 16 - #include <asm/hypervisor.h> 17 - #include <asm/cacheflush.h> 18 - 19 - struct poll { 20 - int fd; 21 - short events; 22 - short revents; 23 - }; 24 - 25 - /* from helpers.S */ 26 - EXPORT_SYMBOL(__flushw_user); 27 - EXPORT_SYMBOL_GPL(real_hard_smp_processor_id); 28 - 29 - /* from head_64.S */ 30 - EXPORT_SYMBOL(__ret_efault); 31 - EXPORT_SYMBOL(tlb_type); 32 - EXPORT_SYMBOL(sun4v_chip_type); 33 - EXPORT_SYMBOL(prom_root_node); 34 - 35 - /* from hvcalls.S */ 36 - EXPORT_SYMBOL(sun4v_niagara_getperf); 37 - EXPORT_SYMBOL(sun4v_niagara_setperf); 38 - EXPORT_SYMBOL(sun4v_niagara2_getperf); 39 - EXPORT_SYMBOL(sun4v_niagara2_setperf); 40 - EXPORT_SYMBOL(sun4v_mach_set_watchdog); 41 - 42 - /* from hweight.S */ 43 - EXPORT_SYMBOL(__arch_hweight8); 44 - EXPORT_SYMBOL(__arch_hweight16); 45 - EXPORT_SYMBOL(__arch_hweight32); 46 - EXPORT_SYMBOL(__arch_hweight64); 47 - 48 - /* from ffs_ffz.S */ 49 - EXPORT_SYMBOL(ffs); 50 - EXPORT_SYMBOL(__ffs); 51 - 52 - /* Exporting a symbol from /init/main.c */ 53 - EXPORT_SYMBOL(saved_command_line);
-1
arch/sparc/lib/Makefile
··· 43 43 44 44 obj-$(CONFIG_SPARC64) += iomap.o 45 45 obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o 46 - obj-y += ksyms.o 47 46 obj-$(CONFIG_SPARC64) += PeeCeeI.o
+2
arch/sparc/lib/U1memcpy.S
··· 7 7 #ifdef __KERNEL__ 8 8 #include <asm/visasm.h> 9 9 #include <asm/asi.h> 10 + #include <asm/export.h> 10 11 #define GLOBAL_SPARE g7 11 12 #else 12 13 #define GLOBAL_SPARE g5 ··· 568 567 mov EX_RETVAL(%o4), %o0 569 568 570 569 .size FUNC_NAME, .-FUNC_NAME 570 + EXPORT_SYMBOL(FUNC_NAME)
+2
arch/sparc/lib/VISsave.S
··· 13 13 #include <asm/ptrace.h> 14 14 #include <asm/visasm.h> 15 15 #include <asm/thread_info.h> 16 + #include <asm/export.h> 16 17 17 18 /* On entry: %o5=current FPRS value, %g7 is callers address */ 18 19 /* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */ ··· 80 79 80: jmpl %g7 + %g0, %g0 81 80 nop 82 81 ENDPROC(VISenter) 82 + EXPORT_SYMBOL(VISenter)
+2
arch/sparc/lib/ashldi3.S
··· 6 6 */ 7 7 8 8 #include <linux/linkage.h> 9 + #include <asm/export.h> 9 10 10 11 .text 11 12 ENTRY(__ashldi3) ··· 34 33 retl 35 34 nop 36 35 ENDPROC(__ashldi3) 36 + EXPORT_SYMBOL(__ashldi3)
+2
arch/sparc/lib/ashrdi3.S
··· 6 6 */ 7 7 8 8 #include <linux/linkage.h> 9 + #include <asm/export.h> 9 10 10 11 .text 11 12 ENTRY(__ashrdi3) ··· 36 35 jmpl %o7 + 8, %g0 37 36 nop 38 37 ENDPROC(__ashrdi3) 38 + EXPORT_SYMBOL(__ashrdi3)
+12 -4
arch/sparc/lib/atomic_64.S
··· 6 6 #include <linux/linkage.h> 7 7 #include <asm/asi.h> 8 8 #include <asm/backoff.h> 9 + #include <asm/export.h> 9 10 10 11 .text 11 12 ··· 30 29 nop; \ 31 30 2: BACKOFF_SPIN(%o2, %o3, 1b); \ 32 31 ENDPROC(atomic_##op); \ 32 + EXPORT_SYMBOL(atomic_##op); 33 33 34 34 #define ATOMIC_OP_RETURN(op) \ 35 35 ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ ··· 44 42 retl; \ 45 43 sra %g1, 0, %o0; \ 46 44 2: BACKOFF_SPIN(%o2, %o3, 1b); \ 47 - ENDPROC(atomic_##op##_return); 45 + ENDPROC(atomic_##op##_return); \ 46 + EXPORT_SYMBOL(atomic_##op##_return); 48 47 49 48 #define ATOMIC_FETCH_OP(op) \ 50 49 ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ··· 59 56 retl; \ 60 57 sra %g1, 0, %o0; \ 61 58 2: BACKOFF_SPIN(%o2, %o3, 1b); \ 62 - ENDPROC(atomic_fetch_##op); 59 + ENDPROC(atomic_fetch_##op); \ 60 + EXPORT_SYMBOL(atomic_fetch_##op); 63 61 64 62 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) 65 63 ··· 92 88 nop; \ 93 89 2: BACKOFF_SPIN(%o2, %o3, 1b); \ 94 90 ENDPROC(atomic64_##op); \ 91 + EXPORT_SYMBOL(atomic64_##op); 95 92 96 93 #define ATOMIC64_OP_RETURN(op) \ 97 94 ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ ··· 106 101 retl; \ 107 102 op %g1, %o0, %o0; \ 108 103 2: BACKOFF_SPIN(%o2, %o3, 1b); \ 109 - ENDPROC(atomic64_##op##_return); 104 + ENDPROC(atomic64_##op##_return); \ 105 + EXPORT_SYMBOL(atomic64_##op##_return); 110 106 111 107 #define ATOMIC64_FETCH_OP(op) \ 112 108 ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ··· 121 115 retl; \ 122 116 mov %g1, %o0; \ 123 117 2: BACKOFF_SPIN(%o2, %o3, 1b); \ 124 - ENDPROC(atomic64_fetch_##op); 118 + ENDPROC(atomic64_fetch_##op); \ 119 + EXPORT_SYMBOL(atomic64_fetch_##op); 125 120 126 121 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) 127 122 ··· 154 147 sub %g1, 1, %o0 155 148 2: BACKOFF_SPIN(%o2, %o3, 1b) 156 149 ENDPROC(atomic64_dec_if_positive) 150 + EXPORT_SYMBOL(atomic64_dec_if_positive)
+7
arch/sparc/lib/bitops.S
··· 6 6 #include <linux/linkage.h> 7 7 #include <asm/asi.h> 8 8 #include <asm/backoff.h> 9 + #include <asm/export.h> 9 10 10 11 .text 11 12 ··· 30 29 nop 31 30 2: BACKOFF_SPIN(%o3, %o4, 1b) 32 31 ENDPROC(test_and_set_bit) 32 + EXPORT_SYMBOL(test_and_set_bit) 33 33 34 34 ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */ 35 35 BACKOFF_SETUP(%o3) ··· 52 50 nop 53 51 2: BACKOFF_SPIN(%o3, %o4, 1b) 54 52 ENDPROC(test_and_clear_bit) 53 + EXPORT_SYMBOL(test_and_clear_bit) 55 54 56 55 ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */ 57 56 BACKOFF_SETUP(%o3) ··· 74 71 nop 75 72 2: BACKOFF_SPIN(%o3, %o4, 1b) 76 73 ENDPROC(test_and_change_bit) 74 + EXPORT_SYMBOL(test_and_change_bit) 77 75 78 76 ENTRY(set_bit) /* %o0=nr, %o1=addr */ 79 77 BACKOFF_SETUP(%o3) ··· 94 90 nop 95 91 2: BACKOFF_SPIN(%o3, %o4, 1b) 96 92 ENDPROC(set_bit) 93 + EXPORT_SYMBOL(set_bit) 97 94 98 95 ENTRY(clear_bit) /* %o0=nr, %o1=addr */ 99 96 BACKOFF_SETUP(%o3) ··· 114 109 nop 115 110 2: BACKOFF_SPIN(%o3, %o4, 1b) 116 111 ENDPROC(clear_bit) 112 + EXPORT_SYMBOL(clear_bit) 117 113 118 114 ENTRY(change_bit) /* %o0=nr, %o1=addr */ 119 115 BACKOFF_SETUP(%o3) ··· 134 128 nop 135 129 2: BACKOFF_SPIN(%o3, %o4, 1b) 136 130 ENDPROC(change_bit) 131 + EXPORT_SYMBOL(change_bit)
+3
arch/sparc/lib/blockops.S
··· 6 6 7 7 #include <linux/linkage.h> 8 8 #include <asm/page.h> 9 + #include <asm/export.h> 9 10 10 11 /* Zero out 64 bytes of memory at (buf + offset). 11 12 * Assumes %g1 contains zero. ··· 65 64 retl 66 65 nop 67 66 ENDPROC(bzero_1page) 67 + EXPORT_SYMBOL(bzero_1page) 68 68 69 69 ENTRY(__copy_1page) 70 70 /* NOTE: If you change the number of insns of this routine, please check ··· 89 87 retl 90 88 nop 91 89 ENDPROC(__copy_1page) 90 + EXPORT_SYMBOL(__copy_1page)
+4
arch/sparc/lib/bzero.S
··· 5 5 */ 6 6 7 7 #include <linux/linkage.h> 8 + #include <asm/export.h> 8 9 9 10 .text 10 11 ··· 79 78 mov %o3, %o0 80 79 ENDPROC(__bzero) 81 80 ENDPROC(memset) 81 + EXPORT_SYMBOL(__bzero) 82 + EXPORT_SYMBOL(memset) 82 83 83 84 #define EX_ST(x,y) \ 84 85 98: x,y; \ ··· 146 143 retl 147 144 clr %o0 148 145 ENDPROC(__clear_user) 146 + EXPORT_SYMBOL(__clear_user)
+3
arch/sparc/lib/checksum_32.S
··· 14 14 */ 15 15 16 16 #include <asm/errno.h> 17 + #include <asm/export.h> 17 18 18 19 #define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \ 19 20 ldd [buf + offset + 0x00], t0; \ ··· 105 104 * buffer of size 0x20. Follow the code path for that case. 106 105 */ 107 106 .globl csum_partial 107 + EXPORT_SYMBOL(csum_partial) 108 108 csum_partial: /* %o0=buf, %o1=len, %o2=sum */ 109 109 andcc %o0, 0x7, %g0 ! alignment problems? 110 110 bne csum_partial_fix_alignment ! yep, handle it ··· 337 335 */ 338 336 .align 8 339 337 .globl __csum_partial_copy_sparc_generic 338 + EXPORT_SYMBOL(__csum_partial_copy_sparc_generic) 340 339 __csum_partial_copy_sparc_generic: 341 340 /* %o0=src, %o1=dest, %g1=len, %g7=sum */ 342 341 xor %o0, %o1, %o4 ! get changing bits
+2
arch/sparc/lib/checksum_64.S
··· 13 13 * BSD4.4 portable checksum routine 14 14 */ 15 15 16 + #include <asm/export.h> 16 17 .text 17 18 18 19 csum_partial_fix_alignment: ··· 38 37 39 38 .align 32 40 39 .globl csum_partial 40 + EXPORT_SYMBOL(csum_partial) 41 41 csum_partial: /* %o0=buff, %o1=len, %o2=sum */ 42 42 prefetch [%o0 + 0x000], #n_reads 43 43 clr %o4
+3
arch/sparc/lib/clear_page.S
··· 10 10 #include <asm/pgtable.h> 11 11 #include <asm/spitfire.h> 12 12 #include <asm/head.h> 13 + #include <asm/export.h> 13 14 14 15 /* What we used to do was lock a TLB entry into a specific 15 16 * TLB slot, clear the page with interrupts disabled, then ··· 27 26 .text 28 27 29 28 .globl _clear_page 29 + EXPORT_SYMBOL(_clear_page) 30 30 _clear_page: /* %o0=dest */ 31 31 ba,pt %xcc, clear_page_common 32 32 clr %o4 ··· 37 35 */ 38 36 .align 32 39 37 .globl clear_user_page 38 + EXPORT_SYMBOL(clear_user_page) 40 39 clear_user_page: /* %o0=dest, %o1=vaddr */ 41 40 lduw [%g6 + TI_PRE_COUNT], %o2 42 41 sethi %hi(PAGE_OFFSET), %g2
+2
arch/sparc/lib/copy_in_user.S
··· 5 5 6 6 #include <linux/linkage.h> 7 7 #include <asm/asi.h> 8 + #include <asm/export.h> 8 9 9 10 #define XCC xcc 10 11 ··· 91 90 retl 92 91 clr %o0 93 92 ENDPROC(___copy_in_user) 93 + EXPORT_SYMBOL(___copy_in_user)
+2
arch/sparc/lib/copy_page.S
··· 10 10 #include <asm/pgtable.h> 11 11 #include <asm/spitfire.h> 12 12 #include <asm/head.h> 13 + #include <asm/export.h> 13 14 14 15 /* What we used to do was lock a TLB entry into a specific 15 16 * TLB slot, clear the page with interrupts disabled, then ··· 45 44 .align 32 46 45 .globl copy_user_page 47 46 .type copy_user_page,#function 47 + EXPORT_SYMBOL(copy_user_page) 48 48 copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ 49 49 lduw [%g6 + TI_PRE_COUNT], %o4 50 50 sethi %hi(PAGE_OFFSET), %g2
+2
arch/sparc/lib/copy_user.S
··· 15 15 #include <asm/asmmacro.h> 16 16 #include <asm/page.h> 17 17 #include <asm/thread_info.h> 18 + #include <asm/export.h> 18 19 19 20 /* Work around cpp -rob */ 20 21 #define ALLOC #alloc ··· 120 119 __copy_user_begin: 121 120 122 121 .globl __copy_user 122 + EXPORT_SYMBOL(__copy_user) 123 123 dword_align: 124 124 andcc %o1, 1, %g0 125 125 be 4f
+3
arch/sparc/lib/csum_copy.S
··· 3 3 * Copyright (C) 2005 David S. Miller <davem@davemloft.net> 4 4 */ 5 5 6 + #include <asm/export.h> 7 + 6 8 #ifdef __KERNEL__ 7 9 #define GLOBAL_SPARE %g7 8 10 #else ··· 65 63 add %o5, %o4, %o4 66 64 67 65 .globl FUNC_NAME 66 + EXPORT_SYMBOL(FUNC_NAME) 68 67 FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */ 69 68 LOAD(prefetch, %o0 + 0x000, #n_reads) 70 69 xor %o0, %o1, %g1
+2
arch/sparc/lib/divdi3.S
··· 17 17 the Free Software Foundation, 59 Temple Place - Suite 330, 18 18 Boston, MA 02111-1307, USA. */ 19 19 20 + #include <asm/export.h> 20 21 .text 21 22 .align 4 22 23 .globl __divdi3 ··· 280 279 .LL81: 281 280 ret 282 281 restore 282 + EXPORT_SYMBOL(__divdi3)
+3
arch/sparc/lib/ffs.S
··· 1 1 #include <linux/linkage.h> 2 + #include <asm/export.h> 2 3 3 4 .register %g2,#scratch 4 5 ··· 66 65 add %o2, %g1, %o0 67 66 ENDPROC(ffs) 68 67 ENDPROC(__ffs) 68 + EXPORT_SYMBOL(__ffs) 69 + EXPORT_SYMBOL(ffs) 69 70 70 71 .section .popc_6insn_patch, "ax" 71 72 .word ffs
+5
arch/sparc/lib/hweight.S
··· 1 1 #include <linux/linkage.h> 2 + #include <asm/export.h> 2 3 3 4 .text 4 5 .align 32 ··· 8 7 nop 9 8 nop 10 9 ENDPROC(__arch_hweight8) 10 + EXPORT_SYMBOL(__arch_hweight8) 11 11 .section .popc_3insn_patch, "ax" 12 12 .word __arch_hweight8 13 13 sllx %o0, 64-8, %g1 ··· 21 19 nop 22 20 nop 23 21 ENDPROC(__arch_hweight16) 22 + EXPORT_SYMBOL(__arch_hweight16) 24 23 .section .popc_3insn_patch, "ax" 25 24 .word __arch_hweight16 26 25 sllx %o0, 64-16, %g1 ··· 34 31 nop 35 32 nop 36 33 ENDPROC(__arch_hweight32) 34 + EXPORT_SYMBOL(__arch_hweight32) 37 35 .section .popc_3insn_patch, "ax" 38 36 .word __arch_hweight32 39 37 sllx %o0, 64-32, %g1 ··· 47 43 nop 48 44 nop 49 45 ENDPROC(__arch_hweight64) 46 + EXPORT_SYMBOL(__arch_hweight64) 50 47 .section .popc_3insn_patch, "ax" 51 48 .word __arch_hweight64 52 49 retl
+2
arch/sparc/lib/ipcsum.S
··· 1 1 #include <linux/linkage.h> 2 + #include <asm/export.h> 2 3 3 4 .text 4 5 ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */ ··· 32 31 retl 33 32 and %o2, %o1, %o0 34 33 ENDPROC(ip_fast_csum) 34 + EXPORT_SYMBOL(ip_fast_csum)
-174
arch/sparc/lib/ksyms.c
··· 1 - /* 2 - * Export of symbols defined in assembler 3 - */ 4 - 5 - /* Tell string.h we don't want memcpy etc. as cpp defines */ 6 - #define EXPORT_SYMTAB_STROPS 7 - 8 - #include <linux/module.h> 9 - #include <linux/string.h> 10 - #include <linux/types.h> 11 - 12 - #include <asm/checksum.h> 13 - #include <asm/uaccess.h> 14 - #include <asm/ftrace.h> 15 - 16 - /* string functions */ 17 - EXPORT_SYMBOL(strlen); 18 - EXPORT_SYMBOL(strncmp); 19 - 20 - /* mem* functions */ 21 - extern void *__memscan_zero(void *, size_t); 22 - extern void *__memscan_generic(void *, int, size_t); 23 - extern void *__bzero(void *, size_t); 24 - 25 - EXPORT_SYMBOL(memscan); 26 - EXPORT_SYMBOL(__memscan_zero); 27 - EXPORT_SYMBOL(__memscan_generic); 28 - EXPORT_SYMBOL(memcmp); 29 - EXPORT_SYMBOL(memcpy); 30 - EXPORT_SYMBOL(memset); 31 - EXPORT_SYMBOL(memmove); 32 - EXPORT_SYMBOL(__bzero); 33 - 34 - /* Networking helper routines. */ 35 - EXPORT_SYMBOL(csum_partial); 36 - 37 - #ifdef CONFIG_MCOUNT 38 - EXPORT_SYMBOL(_mcount); 39 - #endif 40 - 41 - /* 42 - * sparc 43 - */ 44 - #ifdef CONFIG_SPARC32 45 - extern int __ashrdi3(int, int); 46 - extern int __ashldi3(int, int); 47 - extern int __lshrdi3(int, int); 48 - extern int __muldi3(int, int); 49 - extern int __divdi3(int, int); 50 - 51 - extern void (*__copy_1page)(void *, const void *); 52 - extern void (*bzero_1page)(void *); 53 - 54 - extern void ___rw_read_enter(void); 55 - extern void ___rw_read_try(void); 56 - extern void ___rw_read_exit(void); 57 - extern void ___rw_write_enter(void); 58 - 59 - /* Networking helper routines. */ 60 - EXPORT_SYMBOL(__csum_partial_copy_sparc_generic); 61 - 62 - /* Special internal versions of library functions. */ 63 - EXPORT_SYMBOL(__copy_1page); 64 - EXPORT_SYMBOL(__memmove); 65 - EXPORT_SYMBOL(bzero_1page); 66 - 67 - /* Moving data to/from/in userspace. */ 68 - EXPORT_SYMBOL(__copy_user); 69 - 70 - /* Used by asm/spinlock.h */ 71 - #ifdef CONFIG_SMP 72 - EXPORT_SYMBOL(___rw_read_enter); 73 - EXPORT_SYMBOL(___rw_read_try); 74 - EXPORT_SYMBOL(___rw_read_exit); 75 - EXPORT_SYMBOL(___rw_write_enter); 76 - #endif 77 - 78 - EXPORT_SYMBOL(__ashrdi3); 79 - EXPORT_SYMBOL(__ashldi3); 80 - EXPORT_SYMBOL(__lshrdi3); 81 - EXPORT_SYMBOL(__muldi3); 82 - EXPORT_SYMBOL(__divdi3); 83 - #endif 84 - 85 - /* 86 - * sparc64 87 - */ 88 - #ifdef CONFIG_SPARC64 89 - /* Networking helper routines. */ 90 - EXPORT_SYMBOL(csum_partial_copy_nocheck); 91 - EXPORT_SYMBOL(__csum_partial_copy_from_user); 92 - EXPORT_SYMBOL(__csum_partial_copy_to_user); 93 - EXPORT_SYMBOL(ip_fast_csum); 94 - 95 - /* Moving data to/from/in userspace. */ 96 - EXPORT_SYMBOL(___copy_to_user); 97 - EXPORT_SYMBOL(___copy_from_user); 98 - EXPORT_SYMBOL(___copy_in_user); 99 - EXPORT_SYMBOL(__clear_user); 100 - 101 - /* Atomic counter implementation. */ 102 - #define ATOMIC_OP(op) \ 103 - EXPORT_SYMBOL(atomic_##op); \ 104 - EXPORT_SYMBOL(atomic64_##op); 105 - 106 - #define ATOMIC_OP_RETURN(op) \ 107 - EXPORT_SYMBOL(atomic_##op##_return); \ 108 - EXPORT_SYMBOL(atomic64_##op##_return); 109 - 110 - #define ATOMIC_FETCH_OP(op) \ 111 - EXPORT_SYMBOL(atomic_fetch_##op); \ 112 - EXPORT_SYMBOL(atomic64_fetch_##op); 113 - 114 - #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) 115 - 116 - ATOMIC_OPS(add) 117 - ATOMIC_OPS(sub) 118 - 119 - #undef ATOMIC_OPS 120 - #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) 121 - 122 - ATOMIC_OPS(and) 123 - ATOMIC_OPS(or) 124 - ATOMIC_OPS(xor) 125 - 126 - #undef ATOMIC_OPS 127 - #undef ATOMIC_FETCH_OP 128 - #undef ATOMIC_OP_RETURN 129 - #undef ATOMIC_OP 130 - 131 - EXPORT_SYMBOL(atomic64_dec_if_positive); 132 - 133 - /* Atomic bit operations. */ 134 - EXPORT_SYMBOL(test_and_set_bit); 135 - EXPORT_SYMBOL(test_and_clear_bit); 136 - EXPORT_SYMBOL(test_and_change_bit); 137 - EXPORT_SYMBOL(set_bit); 138 - EXPORT_SYMBOL(clear_bit); 139 - EXPORT_SYMBOL(change_bit); 140 - 141 - /* Special internal versions of library functions. */ 142 - EXPORT_SYMBOL(_clear_page); 143 - EXPORT_SYMBOL(clear_user_page); 144 - EXPORT_SYMBOL(copy_user_page); 145 - 146 - /* RAID code needs this */ 147 - void VISenter(void); 148 - EXPORT_SYMBOL(VISenter); 149 - 150 - extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); 151 - extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, 152 - unsigned long *); 153 - extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *, 154 - unsigned long *, unsigned long *); 155 - extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *, 156 - unsigned long *, unsigned long *, unsigned long *); 157 - EXPORT_SYMBOL(xor_vis_2); 158 - EXPORT_SYMBOL(xor_vis_3); 159 - EXPORT_SYMBOL(xor_vis_4); 160 - EXPORT_SYMBOL(xor_vis_5); 161 - 162 - extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *); 163 - extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *, 164 - unsigned long *); 165 - extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *, 166 - unsigned long *, unsigned long *); 167 - extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *, 168 - unsigned long *, unsigned long *, unsigned long *); 169 - 170 - EXPORT_SYMBOL(xor_niagara_2); 171 - EXPORT_SYMBOL(xor_niagara_3); 172 - EXPORT_SYMBOL(xor_niagara_4); 173 - EXPORT_SYMBOL(xor_niagara_5); 174 - #endif
+5
arch/sparc/lib/locks.S
··· 10 10 #include <asm/psr.h> 11 11 #include <asm/smp.h> 12 12 #include <asm/spinlock.h> 13 + #include <asm/export.h> 13 14 14 15 .text 15 16 .align 4 ··· 49 48 ld [%g1], %g2 50 49 51 50 .globl ___rw_read_enter 51 + EXPORT_SYMBOL(___rw_read_enter) 52 52 ___rw_read_enter: 53 53 orcc %g2, 0x0, %g0 54 54 bne,a ___rw_read_enter_spin_on_wlock ··· 61 59 mov %g4, %o7 62 60 63 61 .globl ___rw_read_exit 62 + EXPORT_SYMBOL(___rw_read_exit) 64 63 ___rw_read_exit: 65 64 orcc %g2, 0x0, %g0 66 65 bne,a ___rw_read_exit_spin_on_wlock ··· 73 70 mov %g4, %o7 74 71 75 72 .globl ___rw_read_try 73 + EXPORT_SYMBOL(___rw_read_try) 76 74 ___rw_read_try: 77 75 orcc %g2, 0x0, %g0 78 76 bne ___rw_read_try_spin_on_wlock ··· 85 81 mov %g4, %o7 86 82 87 83 .globl ___rw_write_enter 84 + EXPORT_SYMBOL(___rw_write_enter) 88 85 ___rw_write_enter: 89 86 orcc %g2, 0x0, %g0 90 87 bne ___rw_write_enter_spin_on_wlock
+2
arch/sparc/lib/lshrdi3.S
··· 1 1 #include <linux/linkage.h> 2 + #include <asm/export.h> 2 3 3 4 ENTRY(__lshrdi3) 4 5 cmp %o2, 0 ··· 26 25 retl 27 26 nop 28 27 ENDPROC(__lshrdi3) 28 + EXPORT_SYMBOL(__lshrdi3)
+2
arch/sparc/lib/mcount.S
··· 6 6 */ 7 7 8 8 #include <linux/linkage.h> 9 + #include <asm/export.h> 9 10 10 11 /* 11 12 * This is the main variant and is called by C code. GCC's -pg option ··· 17 16 .align 32 18 17 .globl _mcount 19 18 .type _mcount,#function 19 + EXPORT_SYMBOL(_mcount) 20 20 .globl mcount 21 21 .type mcount,#function 22 22 _mcount:
+2
arch/sparc/lib/memcmp.S
··· 6 6 7 7 #include <linux/linkage.h> 8 8 #include <asm/asm.h> 9 + #include <asm/export.h> 9 10 10 11 .text 11 12 ENTRY(memcmp) ··· 26 25 2: retl 27 26 mov 0, %o0 28 27 ENDPROC(memcmp) 28 + EXPORT_SYMBOL(memcmp)
+3 -83
arch/sparc/lib/memcpy.S
··· 7 7 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 8 8 */ 9 9 10 + #include <asm/export.h> 10 11 #define FUNC(x) \ 11 12 .globl x; \ 12 13 .type x,@function; \ ··· 59 58 stb %t0, [%dst - (offset) - 0x02]; \ 60 59 stb %t1, [%dst - (offset) - 0x01]; 61 60 62 - /* Both these macros have to start with exactly the same insn */ 63 - #define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ 64 - ldd [%src - (offset) - 0x20], %t0; \ 65 - ldd [%src - (offset) - 0x18], %t2; \ 66 - ldd [%src - (offset) - 0x10], %t4; \ 67 - ldd [%src - (offset) - 0x08], %t6; \ 68 - st %t0, [%dst - (offset) - 0x20]; \ 69 - st %t1, [%dst - (offset) - 0x1c]; \ 70 - st %t2, [%dst - (offset) - 0x18]; \ 71 - st %t3, [%dst - (offset) - 0x14]; \ 72 - st %t4, [%dst - (offset) - 0x10]; \ 73 - st %t5, [%dst - (offset) - 0x0c]; \ 74 - st %t6, [%dst - (offset) - 0x08]; \ 75 - st %t7, [%dst - (offset) - 0x04]; 76 - 77 - #define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ 78 - ldd [%src - (offset) - 0x20], %t0; \ 79 - ldd [%src - (offset) - 0x18], %t2; \ 80 - ldd [%src - (offset) - 0x10], %t4; \ 81 - ldd [%src - (offset) - 0x08], %t6; \ 82 - std %t0, [%dst - (offset) - 0x20]; \ 83 - std %t2, [%dst - (offset) - 0x18]; \ 84 - std %t4, [%dst - (offset) - 0x10]; \ 85 - std %t6, [%dst - (offset) - 0x08]; 86 - 87 - #define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \ 88 - ldd [%src + (offset) + 0x00], %t0; \ 89 - ldd [%src + (offset) + 0x08], %t2; \ 90 - st %t0, [%dst + (offset) + 0x00]; \ 91 - st %t1, [%dst + (offset) + 0x04]; \ 92 - st %t2, [%dst + (offset) + 0x08]; \ 93 - st %t3, [%dst + (offset) + 0x0c]; 94 - 95 - #define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \ 96 - ldub [%src + (offset) + 0x00], %t0; \ 97 - ldub [%src + (offset) + 0x01], %t1; \ 98 - stb %t0, [%dst + (offset) + 0x00]; \ 99 - stb %t1, [%dst + (offset) + 0x01]; 100 - 101 - #define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \ 102 - ldd [%src + (offset) + 0x00], %t0; \ 103 - ldd [%src + (offset) + 0x08], %t2; \ 104 - srl %t0, shir, %t5; \ 105 - srl %t1, shir, %t6; \ 106 - sll %t0, shil, %t0; \ 107 - or %t5, %prev, %t5; \ 108 - sll %t1, shil, %prev; \ 109 - or %t6, %t0, %t0; \ 110 - srl %t2, shir, %t1; \ 111 - srl %t3, shir, %t6; \ 112 - sll %t2, shil, %t2; \ 113 - or %t1, %prev, %t1; \ 114 - std %t4, [%dst + (offset) + (offset2) - 0x04]; \ 115 - std %t0, [%dst + (offset) + (offset2) + 0x04]; \ 116 - sll %t3, shil, %prev; \ 117 - or %t6, %t2, %t4; 118 - 119 - #define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \ 120 - ldd [%src + (offset) + 0x00], %t0; \ 121 - ldd [%src + (offset) + 0x08], %t2; \ 122 - srl %t0, shir, %t4; \ 123 - srl %t1, shir, %t5; \ 124 - sll %t0, shil, %t6; \ 125 - or %t4, %prev, %t0; \ 126 - sll %t1, shil, %prev; \ 127 - or %t5, %t6, %t1; \ 128 - srl %t2, shir, %t4; \ 129 - srl %t3, shir, %t5; \ 130 - sll %t2, shil, %t6; \ 131 - or %t4, %prev, %t2; \ 132 - sll %t3, shil, %prev; \ 133 - or %t5, %t6, %t3; \ 134 - std %t0, [%dst + (offset) + (offset2) + 0x00]; \ 135 - std %t2, [%dst + (offset) + (offset2) + 0x08]; 136 - 137 61 .text 138 62 .align 4 139 63 140 - 0: 141 - retl 142 - nop ! Only bcopy returns here and it retuns void... 143 - 144 - #ifdef __KERNEL__ 145 - FUNC(amemmove) 146 - FUNC(__memmove) 147 - #endif 148 64 FUNC(memmove) 65 + EXPORT_SYMBOL(memmove) 149 66 cmp %o0, %o1 150 67 mov %o0, %g7 151 68 bleu 9f ··· 121 202 add %o0, 2, %o0 122 203 123 204 FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ 205 + EXPORT_SYMBOL(memcpy) 124 206 125 207 sub %o0, %o1, %o4 126 208 mov %o0, %g7
+2
arch/sparc/lib/memmove.S
··· 5 5 */ 6 6 7 7 #include <linux/linkage.h> 8 + #include <asm/export.h> 8 9 9 10 .text 10 11 ENTRY(memmove) /* o0=dst o1=src o2=len */ ··· 58 57 stb %g7, [%o0 - 0x1] 59 58 ba,a,pt %xcc, 99b 60 59 ENDPROC(memmove) 60 + EXPORT_SYMBOL(memmove)
+4
arch/sparc/lib/memscan_32.S
··· 4 4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 5 5 */ 6 6 7 + #include <asm/export.h> 8 + 7 9 /* In essence, this is just a fancy strlen. */ 8 10 9 11 #define LO_MAGIC 0x01010101 ··· 15 13 .align 4 16 14 .globl __memscan_zero, __memscan_generic 17 15 .globl memscan 16 + EXPORT_SYMBOL(__memscan_zero) 17 + EXPORT_SYMBOL(__memscan_generic) 18 18 __memscan_zero: 19 19 /* %o0 = addr, %o1 = size */ 20 20 cmp %o1, 0
+4
arch/sparc/lib/memscan_64.S
··· 5 5 * Copyright (C) 1998 David S. Miller (davem@redhat.com) 6 6 */ 7 7 8 + #include <asm/export.h> 9 + 8 10 #define HI_MAGIC 0x8080808080808080 9 11 #define LO_MAGIC 0x0101010101010101 10 12 #define ASI_PL 0x88 ··· 15 13 .align 32 16 14 .globl __memscan_zero, __memscan_generic 17 15 .globl memscan 16 + EXPORT_SYMBOL(__memscan_zero) 17 + EXPORT_SYMBOL(__memscan_generic) 18 18 19 19 __memscan_zero: 20 20 /* %o0 = bufp, %o1 = size */
+3
arch/sparc/lib/memset.S
··· 9 9 */ 10 10 11 11 #include <asm/ptrace.h> 12 + #include <asm/export.h> 12 13 13 14 /* Work around cpp -rob */ 14 15 #define ALLOC #alloc ··· 64 63 65 64 .globl __bzero 66 65 .globl memset 66 + EXPORT_SYMBOL(__bzero) 67 + EXPORT_SYMBOL(memset) 67 68 .globl __memset_start, __memset_end 68 69 __memset_start: 69 70 memset:
+2
arch/sparc/lib/muldi3.S
··· 17 17 the Free Software Foundation, 59 Temple Place - Suite 330, 18 18 Boston, MA 02111-1307, USA. */ 19 19 20 + #include <asm/export.h> 20 21 .text 21 22 .align 4 22 23 .globl __muldi3 ··· 75 74 add %l2, %l0, %i0 76 75 ret 77 76 restore %g0, %l3, %o1 77 + EXPORT_SYMBOL(__muldi3)
+2
arch/sparc/lib/strlen.S
··· 7 7 8 8 #include <linux/linkage.h> 9 9 #include <asm/asm.h> 10 + #include <asm/export.h> 10 11 11 12 #define LO_MAGIC 0x01010101 12 13 #define HI_MAGIC 0x80808080 ··· 79 78 retl 80 79 mov 2, %o0 81 80 ENDPROC(strlen) 81 + EXPORT_SYMBOL(strlen)
+2
arch/sparc/lib/strncmp_32.S
··· 4 4 */ 5 5 6 6 #include <linux/linkage.h> 7 + #include <asm/export.h> 7 8 8 9 .text 9 10 ENTRY(strncmp) ··· 117 116 retl 118 117 sub %o3, %o0, %o0 119 118 ENDPROC(strncmp) 119 + EXPORT_SYMBOL(strncmp)
+2
arch/sparc/lib/strncmp_64.S
··· 6 6 7 7 #include <linux/linkage.h> 8 8 #include <asm/asi.h> 9 + #include <asm/export.h> 9 10 10 11 .text 11 12 ENTRY(strncmp) ··· 29 28 retl 30 29 clr %o0 31 30 ENDPROC(strncmp) 31 + EXPORT_SYMBOL(strncmp)
+9
arch/sparc/lib/xor.S
··· 13 13 #include <asm/asi.h> 14 14 #include <asm/dcu.h> 15 15 #include <asm/spitfire.h> 16 + #include <asm/export.h> 16 17 17 18 /* 18 19 * Requirements: ··· 91 90 retl 92 91 wr %g0, 0, %fprs 93 92 ENDPROC(xor_vis_2) 93 + EXPORT_SYMBOL(xor_vis_2) 94 94 95 95 ENTRY(xor_vis_3) 96 96 rd %fprs, %o5 ··· 158 156 retl 159 157 wr %g0, 0, %fprs 160 158 ENDPROC(xor_vis_3) 159 + EXPORT_SYMBOL(xor_vis_3) 161 160 162 161 ENTRY(xor_vis_4) 163 162 rd %fprs, %o5 ··· 244 241 retl 245 242 wr %g0, 0, %fprs 246 243 ENDPROC(xor_vis_4) 244 + EXPORT_SYMBOL(xor_vis_4) 247 245 248 246 ENTRY(xor_vis_5) 249 247 save %sp, -192, %sp ··· 351 347 ret 352 348 restore 353 349 ENDPROC(xor_vis_5) 350 + EXPORT_SYMBOL(xor_vis_5) 354 351 355 352 /* Niagara versions. */ 356 353 ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */ ··· 398 393 ret 399 394 restore 400 395 ENDPROC(xor_niagara_2) 396 + EXPORT_SYMBOL(xor_niagara_2) 401 397 402 398 ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */ 403 399 save %sp, -192, %sp ··· 460 454 ret 461 455 restore 462 456 ENDPROC(xor_niagara_3) 457 + EXPORT_SYMBOL(xor_niagara_3) 463 458 464 459 ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ 465 460 save %sp, -192, %sp ··· 543 536 ret 544 537 restore 545 538 ENDPROC(xor_niagara_4) 539 + EXPORT_SYMBOL(xor_niagara_4) 546 540 547 541 ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */ 548 542 save %sp, -192, %sp ··· 642 634 ret 643 635 restore 644 636 ENDPROC(xor_niagara_5) 637 + EXPORT_SYMBOL(xor_niagara_5)
+2
arch/x86/entry/entry_32.S
··· 44 44 #include <asm/alternative-asm.h> 45 45 #include <asm/asm.h> 46 46 #include <asm/smap.h> 47 + #include <asm/export.h> 47 48 48 49 .section .entry.text, "ax" 49 50 ··· 992 991 jmp ftrace_stub 993 992 END(mcount) 994 993 #endif /* CONFIG_DYNAMIC_FTRACE */ 994 + EXPORT_SYMBOL(mcount) 995 995 #endif /* CONFIG_FUNCTION_TRACER */ 996 996 997 997 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+2
arch/x86/entry/entry_64.S
··· 35 35 #include <asm/asm.h> 36 36 #include <asm/smap.h> 37 37 #include <asm/pgtable_types.h> 38 + #include <asm/export.h> 38 39 #include <linux/err.h> 39 40 40 41 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ ··· 876 875 popfq 877 876 ret 878 877 END(native_load_gs_index) 878 + EXPORT_SYMBOL(native_load_gs_index) 879 879 880 880 _ASM_EXTABLE(.Lgs_change, bad_gs) 881 881 .section .fixup, "ax"
+3
arch/x86/entry/thunk_32.S
··· 6 6 */ 7 7 #include <linux/linkage.h> 8 8 #include <asm/asm.h> 9 + #include <asm/export.h> 9 10 10 11 /* put return address in eax (arg1) */ 11 12 .macro THUNK name, func, put_ret_addr_in_eax=0 ··· 37 36 #ifdef CONFIG_PREEMPT 38 37 THUNK ___preempt_schedule, preempt_schedule 39 38 THUNK ___preempt_schedule_notrace, preempt_schedule_notrace 39 + EXPORT_SYMBOL(___preempt_schedule) 40 + EXPORT_SYMBOL(___preempt_schedule_notrace) 40 41 #endif 41 42
+3
arch/x86/entry/thunk_64.S
··· 8 8 #include <linux/linkage.h> 9 9 #include "calling.h" 10 10 #include <asm/asm.h> 11 + #include <asm/export.h> 11 12 12 13 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ 13 14 .macro THUNK name, func, put_ret_addr_in_rdi=0 ··· 50 49 #ifdef CONFIG_PREEMPT 51 50 THUNK ___preempt_schedule, preempt_schedule 52 51 THUNK ___preempt_schedule_notrace, preempt_schedule_notrace 52 + EXPORT_SYMBOL(___preempt_schedule) 53 + EXPORT_SYMBOL(___preempt_schedule_notrace) 53 54 #endif 54 55 55 56 #if defined(CONFIG_TRACE_IRQFLAGS) \
+4
arch/x86/include/asm/export.h
··· 1 + #ifdef CONFIG_64BIT 2 + #define KSYM_ALIGN 16 3 + #endif 4 + #include <asm-generic/export.h>
+1 -3
arch/x86/kernel/Makefile
··· 46 46 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o 47 47 obj-$(CONFIG_IRQ_WORK) += irq_work.o 48 48 obj-y += probe_roms.o 49 - obj-$(CONFIG_X86_32) += i386_ksyms_32.o 50 - obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o 51 - obj-$(CONFIG_X86_64) += mcount_64.o 49 + obj-$(CONFIG_X86_64) += sys_x86_64.o mcount_64.o 52 50 obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o 53 51 obj-$(CONFIG_SYSFS) += ksysfs.o 54 52 obj-y += bootflag.o e820.o
+2
arch/x86/kernel/head_32.S
··· 23 23 #include <asm/percpu.h> 24 24 #include <asm/nops.h> 25 25 #include <asm/bootparam.h> 26 + #include <asm/export.h> 26 27 27 28 /* Physical address */ 28 29 #define pa(X) ((X) - __PAGE_OFFSET) ··· 674 673 .fill 4096,1,0 675 674 ENTRY(swapper_pg_dir) 676 675 .fill 1024,4,0 676 + EXPORT_SYMBOL(empty_zero_page) 677 677 678 678 /* 679 679 * This starts the data section.
+3
arch/x86/kernel/head_64.S
··· 21 21 #include <asm/percpu.h> 22 22 #include <asm/nops.h> 23 23 #include "../entry/calling.h" 24 + #include <asm/export.h> 24 25 25 26 #ifdef CONFIG_PARAVIRT 26 27 #include <asm/asm-offsets.h> ··· 487 486 ENTRY(phys_base) 488 487 /* This must match the first entry in level2_kernel_pgt */ 489 488 .quad 0x0000000000000000 489 + EXPORT_SYMBOL(phys_base) 490 490 491 491 #include "../../x86/xen/xen-head.S" 492 492 493 493 __PAGE_ALIGNED_BSS 494 494 NEXT_PAGE(empty_zero_page) 495 495 .skip PAGE_SIZE 496 + EXPORT_SYMBOL(empty_zero_page) 496 497
-47
arch/x86/kernel/i386_ksyms_32.c
··· 1 - #include <linux/export.h> 2 - #include <linux/spinlock_types.h> 3 - 4 - #include <asm/checksum.h> 5 - #include <asm/pgtable.h> 6 - #include <asm/desc.h> 7 - #include <asm/ftrace.h> 8 - 9 - #ifdef CONFIG_FUNCTION_TRACER 10 - /* mcount is defined in assembly */ 11 - EXPORT_SYMBOL(mcount); 12 - #endif 13 - 14 - /* 15 - * Note, this is a prototype to get at the symbol for 16 - * the export, but dont use it from C code, it is used 17 - * by assembly code and is not using C calling convention! 18 - */ 19 - #ifndef CONFIG_X86_CMPXCHG64 20 - extern void cmpxchg8b_emu(void); 21 - EXPORT_SYMBOL(cmpxchg8b_emu); 22 - #endif 23 - 24 - /* Networking helper routines. */ 25 - EXPORT_SYMBOL(csum_partial_copy_generic); 26 - 27 - EXPORT_SYMBOL(__get_user_1); 28 - EXPORT_SYMBOL(__get_user_2); 29 - EXPORT_SYMBOL(__get_user_4); 30 - EXPORT_SYMBOL(__get_user_8); 31 - 32 - EXPORT_SYMBOL(__put_user_1); 33 - EXPORT_SYMBOL(__put_user_2); 34 - EXPORT_SYMBOL(__put_user_4); 35 - EXPORT_SYMBOL(__put_user_8); 36 - 37 - EXPORT_SYMBOL(strstr); 38 - 39 - EXPORT_SYMBOL(csum_partial); 40 - EXPORT_SYMBOL(empty_zero_page); 41 - 42 - #ifdef CONFIG_PREEMPT 43 - EXPORT_SYMBOL(___preempt_schedule); 44 - EXPORT_SYMBOL(___preempt_schedule_notrace); 45 - #endif 46 - 47 - EXPORT_SYMBOL(__sw_hweight32);
+2
arch/x86/kernel/mcount_64.S
··· 7 7 #include <linux/linkage.h> 8 8 #include <asm/ptrace.h> 9 9 #include <asm/ftrace.h> 10 + #include <asm/export.h> 10 11 11 12 12 13 .code64 ··· 295 294 jmp fgraph_trace 296 295 END(function_hook) 297 296 #endif /* CONFIG_DYNAMIC_FTRACE */ 297 + EXPORT_SYMBOL(function_hook) 298 298 #endif /* CONFIG_FUNCTION_TRACER */ 299 299 300 300 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-85
arch/x86/kernel/x8664_ksyms_64.c
··· 1 - /* Exports for assembly files. 2 - All C exports should go in the respective C files. */ 3 - 4 - #include <linux/export.h> 5 - #include <linux/spinlock_types.h> 6 - #include <linux/smp.h> 7 - 8 - #include <net/checksum.h> 9 - 10 - #include <asm/processor.h> 11 - #include <asm/pgtable.h> 12 - #include <asm/uaccess.h> 13 - #include <asm/desc.h> 14 - #include <asm/ftrace.h> 15 - 16 - #ifdef CONFIG_FUNCTION_TRACER 17 - /* mcount and __fentry__ are defined in assembly */ 18 - #ifdef CC_USING_FENTRY 19 - EXPORT_SYMBOL(__fentry__); 20 - #else 21 - EXPORT_SYMBOL(mcount); 22 - #endif 23 - #endif 24 - 25 - EXPORT_SYMBOL(__get_user_1); 26 - EXPORT_SYMBOL(__get_user_2); 27 - EXPORT_SYMBOL(__get_user_4); 28 - EXPORT_SYMBOL(__get_user_8); 29 - EXPORT_SYMBOL(__put_user_1); 30 - EXPORT_SYMBOL(__put_user_2); 31 - EXPORT_SYMBOL(__put_user_4); 32 - EXPORT_SYMBOL(__put_user_8); 33 - 34 - EXPORT_SYMBOL(copy_user_generic_string); 35 - EXPORT_SYMBOL(copy_user_generic_unrolled); 36 - EXPORT_SYMBOL(copy_user_enhanced_fast_string); 37 - EXPORT_SYMBOL(__copy_user_nocache); 38 - EXPORT_SYMBOL(_copy_from_user); 39 - EXPORT_SYMBOL(_copy_to_user); 40 - 41 - EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled); 42 - 43 - EXPORT_SYMBOL(copy_page); 44 - EXPORT_SYMBOL(clear_page); 45 - 46 - EXPORT_SYMBOL(csum_partial); 47 - 48 - EXPORT_SYMBOL(__sw_hweight32); 49 - EXPORT_SYMBOL(__sw_hweight64); 50 - 51 - /* 52 - * Export string functions. We normally rely on gcc builtin for most of these, 53 - * but gcc sometimes decides not to inline them. 54 - */ 55 - #undef memcpy 56 - #undef memset 57 - #undef memmove 58 - 59 - extern void *__memset(void *, int, __kernel_size_t); 60 - extern void *__memcpy(void *, const void *, __kernel_size_t); 61 - extern void *__memmove(void *, const void *, __kernel_size_t); 62 - extern void *memset(void *, int, __kernel_size_t); 63 - extern void *memcpy(void *, const void *, __kernel_size_t); 64 - extern void *memmove(void *, const void *, __kernel_size_t); 65 - 66 - EXPORT_SYMBOL(__memset); 67 - EXPORT_SYMBOL(__memcpy); 68 - EXPORT_SYMBOL(__memmove); 69 - 70 - EXPORT_SYMBOL(memset); 71 - EXPORT_SYMBOL(memcpy); 72 - EXPORT_SYMBOL(memmove); 73 - 74 - #ifndef CONFIG_DEBUG_VIRTUAL 75 - EXPORT_SYMBOL(phys_base); 76 - #endif 77 - EXPORT_SYMBOL(empty_zero_page); 78 - #ifndef CONFIG_PARAVIRT 79 - EXPORT_SYMBOL(native_load_gs_index); 80 - #endif 81 - 82 - #ifdef CONFIG_PREEMPT 83 - EXPORT_SYMBOL(___preempt_schedule); 84 - EXPORT_SYMBOL(___preempt_schedule_notrace); 85 - #endif
+3
arch/x86/lib/checksum_32.S
··· 28 28 #include <linux/linkage.h> 29 29 #include <asm/errno.h> 30 30 #include <asm/asm.h> 31 + #include <asm/export.h> 31 32 32 33 /* 33 34 * computes a partial checksum, e.g. for TCP/UDP fragments ··· 252 251 ENDPROC(csum_partial) 253 252 254 253 #endif 254 + EXPORT_SYMBOL(csum_partial) 255 255 256 256 /* 257 257 unsigned int csum_partial_copy_generic (const char *src, char *dst, ··· 492 490 #undef ROUND1 493 491 494 492 #endif 493 + EXPORT_SYMBOL(csum_partial_copy_generic)
+2
arch/x86/lib/clear_page_64.S
··· 1 1 #include <linux/linkage.h> 2 2 #include <asm/cpufeatures.h> 3 3 #include <asm/alternative-asm.h> 4 + #include <asm/export.h> 4 5 5 6 /* 6 7 * Most CPUs support enhanced REP MOVSB/STOSB instructions. It is ··· 24 23 rep stosq 25 24 ret 26 25 ENDPROC(clear_page) 26 + EXPORT_SYMBOL(clear_page) 27 27 28 28 ENTRY(clear_page_orig) 29 29
+2
arch/x86/lib/cmpxchg8b_emu.S
··· 7 7 */ 8 8 9 9 #include <linux/linkage.h> 10 + #include <asm/export.h> 10 11 11 12 .text 12 13 ··· 49 48 ret 50 49 51 50 ENDPROC(cmpxchg8b_emu) 51 + EXPORT_SYMBOL(cmpxchg8b_emu)
+2
arch/x86/lib/copy_page_64.S
··· 3 3 #include <linux/linkage.h> 4 4 #include <asm/cpufeatures.h> 5 5 #include <asm/alternative-asm.h> 6 + #include <asm/export.h> 6 7 7 8 /* 8 9 * Some CPUs run faster using the string copy instructions (sane microcode). ··· 18 17 rep movsq 19 18 ret 20 19 ENDPROC(copy_page) 20 + EXPORT_SYMBOL(copy_page) 21 21 22 22 ENTRY(copy_page_regs) 23 23 subq $2*8, %rsp
+8
arch/x86/lib/copy_user_64.S
··· 14 14 #include <asm/alternative-asm.h> 15 15 #include <asm/asm.h> 16 16 #include <asm/smap.h> 17 + #include <asm/export.h> 17 18 18 19 /* Standard copy_to_user with segment limit checking */ 19 20 ENTRY(_copy_to_user) ··· 30 29 "jmp copy_user_enhanced_fast_string", \ 31 30 X86_FEATURE_ERMS 32 31 ENDPROC(_copy_to_user) 32 + EXPORT_SYMBOL(_copy_to_user) 33 33 34 34 /* Standard copy_from_user with segment limit checking */ 35 35 ENTRY(_copy_from_user) ··· 46 44 "jmp copy_user_enhanced_fast_string", \ 47 45 X86_FEATURE_ERMS 48 46 ENDPROC(_copy_from_user) 47 + EXPORT_SYMBOL(_copy_from_user) 48 + 49 49 50 50 .section .fixup,"ax" 51 51 /* must zero dest */ ··· 159 155 _ASM_EXTABLE(21b,50b) 160 156 _ASM_EXTABLE(22b,50b) 161 157 ENDPROC(copy_user_generic_unrolled) 158 + EXPORT_SYMBOL(copy_user_generic_unrolled) 162 159 163 160 /* Some CPUs run faster using the string copy instructions. 164 161 * This is also a lot simpler. Use them when possible. ··· 205 200 _ASM_EXTABLE(1b,11b) 206 201 _ASM_EXTABLE(3b,12b) 207 202 ENDPROC(copy_user_generic_string) 203 + EXPORT_SYMBOL(copy_user_generic_string) 208 204 209 205 /* 210 206 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions. ··· 235 229 236 230 _ASM_EXTABLE(1b,12b) 237 231 ENDPROC(copy_user_enhanced_fast_string) 232 + EXPORT_SYMBOL(copy_user_enhanced_fast_string) 238 233 239 234 /* 240 235 * copy_user_nocache - Uncached memory copy with exception handling ··· 386 379 _ASM_EXTABLE(40b,.L_fixup_1b_copy) 387 380 _ASM_EXTABLE(41b,.L_fixup_1b_copy) 388 381 ENDPROC(__copy_user_nocache) 382 + EXPORT_SYMBOL(__copy_user_nocache)
+1
arch/x86/lib/csum-partial_64.c
··· 135 135 return (__force __wsum)add32_with_carry(do_csum(buff, len), 136 136 (__force u32)sum); 137 137 } 138 + EXPORT_SYMBOL(csum_partial); 138 139 139 140 /* 140 141 * this routine is used for miscellaneous IP-like checksums, mainly
+5
arch/x86/lib/getuser.S
··· 32 32 #include <asm/thread_info.h> 33 33 #include <asm/asm.h> 34 34 #include <asm/smap.h> 35 + #include <asm/export.h> 35 36 36 37 .text 37 38 ENTRY(__get_user_1) ··· 45 44 ASM_CLAC 46 45 ret 47 46 ENDPROC(__get_user_1) 47 + EXPORT_SYMBOL(__get_user_1) 48 48 49 49 ENTRY(__get_user_2) 50 50 add $1,%_ASM_AX ··· 59 57 ASM_CLAC 60 58 ret 61 59 ENDPROC(__get_user_2) 60 + EXPORT_SYMBOL(__get_user_2) 62 61 63 62 ENTRY(__get_user_4) 64 63 add $3,%_ASM_AX ··· 73 70 ASM_CLAC 74 71 ret 75 72 ENDPROC(__get_user_4) 73 + EXPORT_SYMBOL(__get_user_4) 76 74 77 75 ENTRY(__get_user_8) 78 76 #ifdef CONFIG_X86_64 ··· 101 97 ret 102 98 #endif 103 99 ENDPROC(__get_user_8) 100 + EXPORT_SYMBOL(__get_user_8) 104 101 105 102 106 103 bad_get_user:
+3
arch/x86/lib/hweight.S
··· 1 1 #include <linux/linkage.h> 2 + #include <asm/export.h> 2 3 3 4 #include <asm/asm.h> 4 5 ··· 33 32 __ASM_SIZE(pop,) %__ASM_REG(dx) 34 33 ret 35 34 ENDPROC(__sw_hweight32) 35 + EXPORT_SYMBOL(__sw_hweight32) 36 36 37 37 ENTRY(__sw_hweight64) 38 38 #ifdef CONFIG_X86_64 ··· 79 77 ret 80 78 #endif 81 79 ENDPROC(__sw_hweight64) 80 + EXPORT_SYMBOL(__sw_hweight64)
+4
arch/x86/lib/memcpy_64.S
··· 4 4 #include <asm/errno.h> 5 5 #include <asm/cpufeatures.h> 6 6 #include <asm/alternative-asm.h> 7 + #include <asm/export.h> 7 8 8 9 /* 9 10 * We build a jump to memcpy_orig by default which gets NOPped out on ··· 41 40 ret 42 41 ENDPROC(memcpy) 43 42 ENDPROC(__memcpy) 43 + EXPORT_SYMBOL(memcpy) 44 + EXPORT_SYMBOL(__memcpy) 44 45 45 46 /* 46 47 * memcpy_erms() - enhanced fast string memcpy. This is faster and ··· 277 274 xorq %rax, %rax 278 275 ret 279 276 ENDPROC(memcpy_mcsafe_unrolled) 277 + EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled) 280 278 281 279 .section .fixup, "ax" 282 280 /* Return -EFAULT for any failure */
+3
arch/x86/lib/memmove_64.S
··· 8 8 #include <linux/linkage.h> 9 9 #include <asm/cpufeatures.h> 10 10 #include <asm/alternative-asm.h> 11 + #include <asm/export.h> 11 12 12 13 #undef memmove 13 14 ··· 208 207 retq 209 208 ENDPROC(__memmove) 210 209 ENDPROC(memmove) 210 + EXPORT_SYMBOL(__memmove) 211 + EXPORT_SYMBOL(memmove)
+3
arch/x86/lib/memset_64.S
··· 3 3 #include <linux/linkage.h> 4 4 #include <asm/cpufeatures.h> 5 5 #include <asm/alternative-asm.h> 6 + #include <asm/export.h> 6 7 7 8 .weak memset 8 9 ··· 44 43 ret 45 44 ENDPROC(memset) 46 45 ENDPROC(__memset) 46 + EXPORT_SYMBOL(memset) 47 + EXPORT_SYMBOL(__memset) 47 48 48 49 /* 49 50 * ISO C memset - set a memory block to a byte value. This function uses
+5
arch/x86/lib/putuser.S
··· 15 15 #include <asm/errno.h> 16 16 #include <asm/asm.h> 17 17 #include <asm/smap.h> 18 + #include <asm/export.h> 18 19 19 20 20 21 /* ··· 44 43 xor %eax,%eax 45 44 EXIT 46 45 ENDPROC(__put_user_1) 46 + EXPORT_SYMBOL(__put_user_1) 47 47 48 48 ENTRY(__put_user_2) 49 49 ENTER ··· 57 55 xor %eax,%eax 58 56 EXIT 59 57 ENDPROC(__put_user_2) 58 + EXPORT_SYMBOL(__put_user_2) 60 59 61 60 ENTRY(__put_user_4) 62 61 ENTER ··· 70 67 xor %eax,%eax 71 68 EXIT 72 69 ENDPROC(__put_user_4) 70 + EXPORT_SYMBOL(__put_user_4) 73 71 74 72 ENTRY(__put_user_8) 75 73 ENTER ··· 86 82 xor %eax,%eax 87 83 EXIT 88 84 ENDPROC(__put_user_8) 85 + EXPORT_SYMBOL(__put_user_8) 89 86 90 87 bad_put_user: 91 88 movl $-EFAULT,%eax
+2 -1
arch/x86/lib/strstr_32.c
··· 1 1 #include <linux/string.h> 2 + #include <linux/export.h> 2 3 3 4 char *strstr(const char *cs, const char *ct) 4 5 { ··· 29 28 : "dx", "di"); 30 29 return __res; 31 30 } 32 - 31 + EXPORT_SYMBOL(strstr);
+1 -1
arch/x86/um/Makefile
··· 8 8 BITS := 64 9 9 endif 10 10 11 - obj-y = bug.o bugs_$(BITS).o delay.o fault.o ksyms.o ldt.o \ 11 + obj-y = bug.o bugs_$(BITS).o delay.o fault.o ldt.o \ 12 12 ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \ 13 13 stub_$(BITS).o stub_segv.o \ 14 14 sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \
+2
arch/x86/um/checksum_32.S
··· 27 27 28 28 #include <asm/errno.h> 29 29 #include <asm/asm.h> 30 + #include <asm/export.h> 30 31 31 32 /* 32 33 * computes a partial checksum, e.g. for TCP/UDP fragments ··· 215 214 ret 216 215 217 216 #endif 217 + EXPORT_SYMBOL(csum_partial)
-13
arch/x86/um/ksyms.c
··· 1 - #include <linux/module.h> 2 - #include <asm/string.h> 3 - #include <asm/checksum.h> 4 - 5 - #ifndef CONFIG_X86_32 6 - /*XXX: we need them because they would be exported by x86_64 */ 7 - #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 8 - EXPORT_SYMBOL(memcpy); 9 - #else 10 - EXPORT_SYMBOL(__memcpy); 11 - #endif 12 - #endif 13 - EXPORT_SYMBOL(csum_partial);
+94
include/asm-generic/export.h
··· 1 + #ifndef __ASM_GENERIC_EXPORT_H 2 + #define __ASM_GENERIC_EXPORT_H 3 + 4 + #ifndef KSYM_FUNC 5 + #define KSYM_FUNC(x) x 6 + #endif 7 + #ifdef CONFIG_64BIT 8 + #define __put .quad 9 + #ifndef KSYM_ALIGN 10 + #define KSYM_ALIGN 8 11 + #endif 12 + #ifndef KCRC_ALIGN 13 + #define KCRC_ALIGN 8 14 + #endif 15 + #else 16 + #define __put .long 17 + #ifndef KSYM_ALIGN 18 + #define KSYM_ALIGN 4 19 + #endif 20 + #ifndef KCRC_ALIGN 21 + #define KCRC_ALIGN 4 22 + #endif 23 + #endif 24 + 25 + #ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX 26 + #define KSYM(name) _##name 27 + #else 28 + #define KSYM(name) name 29 + #endif 30 + 31 + /* 32 + * note on .section use: @progbits vs %progbits nastiness doesn't matter, 33 + * since we immediately emit into those sections anyway. 34 + */ 35 + .macro ___EXPORT_SYMBOL name,val,sec 36 + #ifdef CONFIG_MODULES 37 + .globl KSYM(__ksymtab_\name) 38 + .section ___ksymtab\sec+\name,"a" 39 + .balign KSYM_ALIGN 40 + KSYM(__ksymtab_\name): 41 + __put \val, KSYM(__kstrtab_\name) 42 + .previous 43 + .section __ksymtab_strings,"a" 44 + KSYM(__kstrtab_\name): 45 + #ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX 46 + .asciz "_\name" 47 + #else 48 + .asciz "\name" 49 + #endif 50 + .previous 51 + #ifdef CONFIG_MODVERSIONS 52 + .section ___kcrctab\sec+\name,"a" 53 + .balign KCRC_ALIGN 54 + KSYM(__kcrctab_\name): 55 + __put KSYM(__crc_\name) 56 + .weak KSYM(__crc_\name) 57 + .previous 58 + #endif 59 + #endif 60 + .endm 61 + #undef __put 62 + 63 + #if defined(__KSYM_DEPS__) 64 + 65 + #define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym === 66 + 67 + #elif defined(CONFIG_TRIM_UNUSED_KSYMS) 68 + 69 + #include <linux/kconfig.h> 70 + #include <generated/autoksyms.h> 71 + 72 + #define __EXPORT_SYMBOL(sym, val, sec) \ 73 + __cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym)) 74 + #define __cond_export_sym(sym, val, sec, conf) \ 75 + ___cond_export_sym(sym, val, sec, conf) 76 + #define ___cond_export_sym(sym, val, sec, enabled) \ 77 + __cond_export_sym_##enabled(sym, val, sec) 78 + #define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec 79 + #define __cond_export_sym_0(sym, val, sec) /* nothing */ 80 + 81 + #else 82 + #define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec 83 + #endif 84 + 85 + #define EXPORT_SYMBOL(name) \ 86 + __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),) 87 + #define EXPORT_SYMBOL_GPL(name) \ 88 + __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl) 89 + #define EXPORT_DATA_SYMBOL(name) \ 90 + __EXPORT_SYMBOL(name, KSYM(name),) 91 + #define EXPORT_DATA_SYMBOL_GPL(name) \ 92 + __EXPORT_SYMBOL(name, KSYM(name),_gpl) 93 + 94 + #endif
+34 -23
include/asm-generic/vmlinux.lds.h
··· 196 196 *(.dtb.init.rodata) \ 197 197 VMLINUX_SYMBOL(__dtb_end) = .; 198 198 199 - /* .data section */ 199 + /* 200 + * .data section 201 + * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates 202 + * .data.identifier which needs to be pulled in with .data, but don't want to 203 + * pull in .data..stuff which has its own requirements. Same for bss. 204 + */ 200 205 #define DATA_DATA \ 201 - *(.data) \ 206 + *(.data .data.[0-9a-zA-Z_]*) \ 202 207 *(.ref.data) \ 203 208 *(.data..shared_aligned) /* percpu related */ \ 204 209 MEM_KEEP(init.data) \ ··· 325 320 /* Kernel symbol table: Normal symbols */ \ 326 321 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 327 322 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 328 - *(SORT(___ksymtab+*)) \ 323 + KEEP(*(SORT(___ksymtab+*))) \ 329 324 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 330 325 } \ 331 326 \ 332 327 /* Kernel symbol table: GPL-only symbols */ \ 333 328 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 334 329 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 335 - *(SORT(___ksymtab_gpl+*)) \ 330 + KEEP(*(SORT(___ksymtab_gpl+*))) \ 336 331 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 337 332 } \ 338 333 \ 339 334 /* Kernel symbol table: Normal unused symbols */ \ 340 335 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 341 336 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 342 - *(SORT(___ksymtab_unused+*)) \ 337 + KEEP(*(SORT(___ksymtab_unused+*))) \ 343 338 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 344 339 } \ 345 340 \ 346 341 /* Kernel symbol table: GPL-only unused symbols */ \ 347 342 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 348 343 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 349 - *(SORT(___ksymtab_unused_gpl+*)) \ 344 + KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ 350 345 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 351 346 } \ 352 347 \ 353 348 /* Kernel symbol table: GPL-future-only symbols */ \ 354 349 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 355 350 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 356 - *(SORT(___ksymtab_gpl_future+*)) \ 351 + KEEP(*(SORT(___ksymtab_gpl_future+*))) \ 357 352 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 358 353 } \ 359 354 \ 360 355 /* Kernel symbol table: Normal symbols */ \ 361 356 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 362 357 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 363 - *(SORT(___kcrctab+*)) \ 358 + KEEP(*(SORT(___kcrctab+*))) \ 364 359 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 365 360 } \ 366 361 \ 367 362 /* Kernel symbol table: GPL-only symbols */ \ 368 363 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 369 364 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 370 - *(SORT(___kcrctab_gpl+*)) \ 365 + KEEP(*(SORT(___kcrctab_gpl+*))) \ 371 366 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 372 367 } \ 373 368 \ 374 369 /* Kernel symbol table: Normal unused symbols */ \ 375 370 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 376 371 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 377 - *(SORT(___kcrctab_unused+*)) \ 372 + KEEP(*(SORT(___kcrctab_unused+*))) \ 378 373 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 379 374 } \ 380 375 \ 381 376 /* Kernel symbol table: GPL-only unused symbols */ \ 382 377 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 383 378 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 384 - *(SORT(___kcrctab_unused_gpl+*)) \ 379 + KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ 385 380 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 386 381 } \ 387 382 \ 388 383 /* Kernel symbol table: GPL-future-only symbols */ \ 389 384 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 390 385 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 391 - *(SORT(___kcrctab_gpl_future+*)) \ 386 + KEEP(*(SORT(___kcrctab_gpl_future+*))) \ 392 387 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 393 388 } \ 394 389 \ 395 390 /* Kernel symbol table: strings */ \ 396 391 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 397 - *(__ksymtab_strings) \ 392 + KEEP(*(__ksymtab_strings)) \ 398 393 } \ 399 394 \ 400 395 /* __*init sections */ \ ··· 429 424 #define SECURITY_INIT \ 430 425 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 431 426 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 432 - *(.security_initcall.init) \ 427 + KEEP(*(.security_initcall.init)) \ 433 428 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 434 429 } 435 430 436 431 /* .text section. Map to function alignment to avoid address changes 437 - * during second ld run in second ld pass when generating System.map */ 432 + * during second ld run in second ld pass when generating System.map 433 + * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates 434 + * .text.identifier which needs to be pulled in with .text , but some 435 + * architectures define .text.foo which is not intended to be pulled in here. 436 + * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have 437 + * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */ 438 438 #define TEXT_TEXT \ 439 439 ALIGN_FUNCTION(); \ 440 440 *(.text.hot .text .text.fixup .text.unlikely) \ ··· 543 533 544 534 /* init and exit section handling */ 545 535 #define INIT_DATA \ 536 + KEEP(*(SORT(___kentry+*))) \ 546 537 *(.init.data) \ 547 538 MEM_DISCARD(init.data) \ 548 539 KERNEL_CTORS() \ ··· 610 599 BSS_FIRST_SECTIONS \ 611 600 *(.bss..page_aligned) \ 612 601 *(.dynbss) \ 613 - *(.bss) \ 602 + *(.bss .bss.[0-9a-zA-Z_]*) \ 614 603 *(COMMON) \ 615 604 } 616 605 ··· 693 682 694 683 #define INIT_CALLS_LEVEL(level) \ 695 684 VMLINUX_SYMBOL(__initcall##level##_start) = .; \ 696 - *(.initcall##level##.init) \ 697 - *(.initcall##level##s.init) \ 685 + KEEP(*(.initcall##level##.init)) \ 686 + KEEP(*(.initcall##level##s.init)) \ 698 687 699 688 #define INIT_CALLS \ 700 689 VMLINUX_SYMBOL(__initcall_start) = .; \ 701 - *(.initcallearly.init) \ 690 + KEEP(*(.initcallearly.init)) \ 702 691 INIT_CALLS_LEVEL(0) \ 703 692 INIT_CALLS_LEVEL(1) \ 704 693 INIT_CALLS_LEVEL(2) \ ··· 712 701 713 702 #define CON_INITCALL \ 714 703 VMLINUX_SYMBOL(__con_initcall_start) = .; \ 715 - *(.con_initcall.init) \ 704 + KEEP(*(.con_initcall.init)) \ 716 705 VMLINUX_SYMBOL(__con_initcall_end) = .; 717 706 718 707 #define SECURITY_INITCALL \ 719 708 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 720 - *(.security_initcall.init) \ 709 + KEEP(*(.security_initcall.init)) \ 721 710 VMLINUX_SYMBOL(__security_initcall_end) = .; 722 711 723 712 #ifdef CONFIG_BLK_DEV_INITRD 724 713 #define INIT_RAM_FS \ 725 714 . = ALIGN(4); \ 726 715 VMLINUX_SYMBOL(__initramfs_start) = .; \ 727 - *(.init.ramfs) \ 716 + KEEP(*(.init.ramfs)) \ 728 717 . = ALIGN(8); \ 729 - *(.init.ramfs.info) 718 + KEEP(*(.init.ramfs.info)) 730 719 #else 731 720 #define INIT_RAM_FS 732 721 #endif
+23
include/linux/compiler.h
··· 182 182 # define unreachable() do { } while (1) 183 183 #endif 184 184 185 + /* 186 + * KENTRY - kernel entry point 187 + * This can be used to annotate symbols (functions or data) that are used 188 + * without their linker symbol being referenced explicitly. For example, 189 + * interrupt vector handlers, or functions in the kernel image that are found 190 + * programatically. 191 + * 192 + * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those 193 + * are handled in their own way (with KEEP() in linker scripts). 194 + * 195 + * KENTRY can be avoided if the symbols in question are marked as KEEP() in the 196 + * linker script. For example an architecture could KEEP() its entire 197 + * boot/exception vector code rather than annotate each function and data. 198 + */ 199 + #ifndef KENTRY 200 + # define KENTRY(sym) \ 201 + extern typeof(sym) sym; \ 202 + static const unsigned long __kentry_##sym \ 203 + __used \ 204 + __attribute__((section("___kentry" "+" #sym ), used)) \ 205 + = (unsigned long)&sym; 206 + #endif 207 + 185 208 #ifndef RELOC_HIDE 186 209 # define RELOC_HIDE(ptr, off) \ 187 210 ({ unsigned long __ptr; \
+15 -15
include/linux/export.h
··· 1 1 #ifndef _LINUX_EXPORT_H 2 2 #define _LINUX_EXPORT_H 3 + 3 4 /* 4 5 * Export symbols from the kernel to modules. Forked from module.h 5 6 * to reduce the amount of pointless cruft we feed to gcc when only ··· 43 42 #ifdef CONFIG_MODVERSIONS 44 43 /* Mark the CRC weak since genksyms apparently decides not to 45 44 * generate a checksums for some symbols */ 46 - #define __CRC_SYMBOL(sym, sec) \ 47 - extern __visible void *__crc_##sym __attribute__((weak)); \ 48 - static const unsigned long __kcrctab_##sym \ 49 - __used \ 50 - __attribute__((section("___kcrctab" sec "+" #sym), unused)) \ 45 + #define __CRC_SYMBOL(sym, sec) \ 46 + extern __visible void *__crc_##sym __attribute__((weak)); \ 47 + static const unsigned long __kcrctab_##sym \ 48 + __used \ 49 + __attribute__((section("___kcrctab" sec "+" #sym), used)) \ 51 50 = (unsigned long) &__crc_##sym; 52 51 #else 53 52 #define __CRC_SYMBOL(sym, sec) 54 53 #endif 55 54 56 55 /* For every exported symbol, place a struct in the __ksymtab section */ 57 - #define ___EXPORT_SYMBOL(sym, sec) \ 58 - extern typeof(sym) sym; \ 59 - __CRC_SYMBOL(sym, sec) \ 60 - static const char __kstrtab_##sym[] \ 61 - __attribute__((section("__ksymtab_strings"), aligned(1))) \ 62 - = VMLINUX_SYMBOL_STR(sym); \ 63 - extern const struct kernel_symbol __ksymtab_##sym; \ 64 - __visible const struct kernel_symbol __ksymtab_##sym \ 65 - __used \ 66 - __attribute__((section("___ksymtab" sec "+" #sym), unused)) \ 56 + #define ___EXPORT_SYMBOL(sym, sec) \ 57 + extern typeof(sym) sym; \ 58 + __CRC_SYMBOL(sym, sec) \ 59 + static const char __kstrtab_##sym[] \ 60 + __attribute__((section("__ksymtab_strings"), aligned(1))) \ 61 + = VMLINUX_SYMBOL_STR(sym); \ 62 + static const struct kernel_symbol __ksymtab_##sym \ 63 + __used \ 64 + __attribute__((section("___ksymtab" sec "+" #sym), used)) \ 67 65 = { (unsigned long)&sym, __kstrtab_##sym } 68 66 69 67 #if defined(__KSYM_DEPS__)
+13 -25
include/linux/init.h
··· 139 139 140 140 #ifndef __ASSEMBLY__ 141 141 142 - #ifdef CONFIG_LTO 143 - /* Work around a LTO gcc problem: when there is no reference to a variable 144 - * in a module it will be moved to the end of the program. This causes 145 - * reordering of initcalls which the kernel does not like. 146 - * Add a dummy reference function to avoid this. The function is 147 - * deleted by the linker. 148 - */ 149 - #define LTO_REFERENCE_INITCALL(x) \ 150 - ; /* yes this is needed */ \ 151 - static __used __exit void *reference_##x(void) \ 152 - { \ 153 - return &x; \ 154 - } 155 - #else 156 - #define LTO_REFERENCE_INITCALL(x) 157 - #endif 158 - 159 - /* initcalls are now grouped by functionality into separate 142 + /* 143 + * initcalls are now grouped by functionality into separate 160 144 * subsections. Ordering inside the subsections is determined 161 145 * by link order. 162 146 * For backwards compatibility, initcall() puts the call in ··· 148 164 * 149 165 * The `id' arg to __define_initcall() is needed so that multiple initcalls 150 166 * can point at the same handler without causing duplicate-symbol build errors. 167 + * 168 + * Initcalls are run by placing pointers in initcall sections that the 169 + * kernel iterates at runtime. The linker can do dead code / data elimination 170 + * and remove that completely, so the initcall sections have to be marked 171 + * as KEEP() in the linker script. 151 172 */ 152 173 153 174 #define __define_initcall(fn, id) \ 154 175 static initcall_t __initcall_##fn##id __used \ 155 - __attribute__((__section__(".initcall" #id ".init"))) = fn; \ 156 - LTO_REFERENCE_INITCALL(__initcall_##fn##id) 176 + __attribute__((__section__(".initcall" #id ".init"))) = fn; 157 177 158 178 /* 159 179 * Early initcalls run before initializing SMP. ··· 193 205 194 206 #define __initcall(fn) device_initcall(fn) 195 207 196 - #define __exitcall(fn) \ 208 + #define __exitcall(fn) \ 197 209 static exitcall_t __exitcall_##fn __exit_call = fn 198 210 199 - #define console_initcall(fn) \ 200 - static initcall_t __initcall_##fn \ 211 + #define console_initcall(fn) \ 212 + static initcall_t __initcall_##fn \ 201 213 __used __section(.con_initcall.init) = fn 202 214 203 - #define security_initcall(fn) \ 204 - static initcall_t __initcall_##fn \ 215 + #define security_initcall(fn) \ 216 + static initcall_t __initcall_##fn \ 205 217 __used __section(.security_initcall.init) = fn 206 218 207 219 struct obs_kernel_param {
+2
init/Makefile
··· 2 2 # Makefile for the linux kernel. 3 3 # 4 4 5 + ccflags-y := -fno-function-sections -fno-data-sections 6 + 5 7 obj-y := main.o version.o mounts.o 6 8 ifneq ($(CONFIG_BLK_DEV_INITRD),y) 7 9 obj-y += noinitramfs.o
+39 -4
scripts/Makefile.build
··· 81 81 82 82 ifneq ($(strip $(lib-y) $(lib-m) $(lib-)),) 83 83 lib-target := $(obj)/lib.a 84 + obj-y += $(obj)/lib-ksyms.o 84 85 endif 85 86 86 87 ifneq ($(strip $(obj-y) $(obj-m) $(obj-) $(subdir-m) $(lib-target)),) ··· 359 358 # Rule to compile a set of .o files into one .o file 360 359 # 361 360 ifdef builtin-target 362 - quiet_cmd_link_o_target = LD $@ 361 + 362 + ifdef CONFIG_THIN_ARCHIVES 363 + cmd_make_builtin = rm -f $@; $(AR) rcST$(KBUILD_ARFLAGS) 364 + cmd_make_empty_builtin = rm -f $@; $(AR) rcST$(KBUILD_ARFLAGS) 365 + quiet_cmd_link_o_target = AR $@ 366 + else 367 + cmd_make_builtin = $(LD) $(ld_flags) -r -o 368 + cmd_make_empty_builtin = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) 369 + quiet_cmd_link_o_target = LD $@ 370 + endif 371 + 363 372 # If the list of objects to link is empty, just create an empty built-in.o 364 373 cmd_link_o_target = $(if $(strip $(obj-y)),\ 365 - $(LD) $(ld_flags) -r -o $@ $(filter $(obj-y), $^) \ 374 + $(cmd_make_builtin) $@ $(filter $(obj-y), $^) \ 366 375 $(cmd_secanalysis),\ 367 - rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@) 376 + $(cmd_make_empty_builtin) $@) 368 377 369 378 $(builtin-target): $(obj-y) FORCE 370 379 $(call if_changed,link_o_target) ··· 400 389 # 401 390 ifdef lib-target 402 391 quiet_cmd_link_l_target = AR $@ 403 - cmd_link_l_target = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@ $(lib-y) 392 + 393 + ifdef CONFIG_THIN_ARCHIVES 394 + cmd_link_l_target = rm -f $@; $(AR) rcsT$(KBUILD_ARFLAGS) $@ $(lib-y) 395 + else 396 + cmd_link_l_target = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@ $(lib-y) 397 + endif 404 398 405 399 $(lib-target): $(lib-y) FORCE 406 400 $(call if_changed,link_l_target) 407 401 408 402 targets += $(lib-target) 403 + 404 + dummy-object = $(obj)/.lib_exports.o 405 + ksyms-lds = $(dot-target).lds 406 + ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX 407 + ref_prefix = EXTERN(_ 408 + else 409 + ref_prefix = EXTERN( 410 + endif 411 + 412 + quiet_cmd_export_list = EXPORTS $@ 413 + cmd_export_list = $(OBJDUMP) -h $< | \ 414 + sed -ne '/___ksymtab/{s/.*+/$(ref_prefix)/;s/ .*/)/;p}' >$(ksyms-lds);\ 415 + rm -f $(dummy-object);\ 416 + $(AR) rcs$(KBUILD_ARFLAGS) $(dummy-object);\ 417 + $(LD) $(ld_flags) -r -o $@ -T $(ksyms-lds) $(dummy-object);\ 418 + rm $(dummy-object) $(ksyms-lds) 419 + 420 + $(obj)/lib-ksyms.o: $(lib-target) FORCE 421 + $(call if_changed,export_list) 409 422 endif 410 423 411 424 #
+9 -5
scripts/Makefile.modpost
··· 115 115 116 116 targets += $(modules:.ko=.mod.o) 117 117 118 - # Step 6), final link of the modules 118 + ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink) 119 + 120 + # Step 6), final link of the modules with optional arch pass after final link 119 121 quiet_cmd_ld_ko_o = LD [M] $@ 120 - cmd_ld_ko_o = $(LD) -r $(LDFLAGS) \ 121 - $(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \ 122 - -o $@ $(filter-out FORCE,$^) 122 + cmd_ld_ko_o = \ 123 + $(LD) -r $(LDFLAGS) \ 124 + $(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \ 125 + -o $@ $(filter-out FORCE,$^) ; \ 126 + $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true) 123 127 124 128 $(modules): %.ko :%.o %.mod.o FORCE 125 - $(call if_changed,ld_ko_o) 129 + +$(call if_changed,ld_ko_o) 126 130 127 131 targets += $(modules) 128 132
+28 -58
scripts/basic/fixdep.c
··· 82 82 * to date before even starting the recursive build, so it's too late 83 83 * at this point anyway. 84 84 * 85 - * The algorithm to grep for "CONFIG_..." is bit unusual, but should 86 - * be fast ;-) We don't even try to really parse the header files, but 85 + * We don't even try to really parse the header files, but 87 86 * merely grep, i.e. if CONFIG_FOO is mentioned in a comment, it will 88 87 * be picked up as well. It's not a problem with respect to 89 88 * correctness, since that can only give too many dependencies, thus ··· 113 114 #include <limits.h> 114 115 #include <ctype.h> 115 116 #include <arpa/inet.h> 116 - 117 - #define INT_CONF ntohl(0x434f4e46) 118 - #define INT_ONFI ntohl(0x4f4e4649) 119 - #define INT_NFIG ntohl(0x4e464947) 120 - #define INT_FIG_ ntohl(0x4649475f) 121 117 122 118 int insert_extra_deps; 123 119 char *target; ··· 235 241 print_config(m, slen); 236 242 } 237 243 238 - static void parse_config_file(const char *map, size_t len) 244 + static void parse_config_file(const char *p) 239 245 { 240 - const int *end = (const int *) (map + len); 241 - /* start at +1, so that p can never be < map */ 242 - const int *m = (const int *) map + 1; 243 - const char *p, *q; 246 + const char *q, *r; 244 247 245 - for (; m < end; m++) { 246 - if (*m == INT_CONF) { p = (char *) m ; goto conf; } 247 - if (*m == INT_ONFI) { p = (char *) m-1; goto conf; } 248 - if (*m == INT_NFIG) { p = (char *) m-2; goto conf; } 249 - if (*m == INT_FIG_) { p = (char *) m-3; goto conf; } 250 - continue; 251 - conf: 252 - if (p > map + len - 7) 253 - continue; 254 - if (memcmp(p, "CONFIG_", 7)) 255 - continue; 248 + while ((p = strstr(p, "CONFIG_"))) { 256 249 p += 7; 257 - for (q = p; q < map + len; q++) { 258 - if (!(isalnum(*q) || *q == '_')) 259 - goto found; 260 - } 261 - continue; 262 - 263 - found: 264 - if (!memcmp(q - 7, "_MODULE", 7)) 265 - q -= 7; 266 - if (q - p < 0) 267 - continue; 268 - use_config(p, q - p); 250 + q = p; 251 + while (*q && (isalnum(*q) || *q == '_')) 252 + q++; 253 + if (memcmp(q - 7, "_MODULE", 7) == 0) 254 + r = q - 7; 255 + else 256 + r = q; 257 + if (r > p) 258 + use_config(p, r - p); 259 + p = q; 269 260 } 270 261 } 271 262 ··· 270 291 { 271 292 struct stat st; 272 293 int fd; 273 - void *map; 294 + char *map; 274 295 275 296 fd = open(filename, O_RDONLY); 276 297 if (fd < 0) { ··· 287 308 close(fd); 288 309 return; 289 310 } 290 - map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); 291 - if ((long) map == -1) { 292 - perror("fixdep: mmap"); 311 + map = malloc(st.st_size + 1); 312 + if (!map) { 313 + perror("fixdep: malloc"); 293 314 close(fd); 294 315 return; 295 316 } 296 - 297 - parse_config_file(map, st.st_size); 298 - 299 - munmap(map, st.st_size); 300 - 317 + if (read(fd, map, st.st_size) != st.st_size) { 318 + perror("fixdep: read"); 319 + close(fd); 320 + return; 321 + } 322 + map[st.st_size] = '\0'; 301 323 close(fd); 324 + 325 + parse_config_file(map); 326 + 327 + free(map); 302 328 } 303 329 304 330 /* ··· 430 446 close(fd); 431 447 } 432 448 433 - static void traps(void) 434 - { 435 - static char test[] __attribute__((aligned(sizeof(int)))) = "CONF"; 436 - int *p = (int *)test; 437 - 438 - if (*p != INT_CONF) { 439 - fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianness? %#x\n", 440 - *p); 441 - exit(2); 442 - } 443 - } 444 - 445 449 int main(int argc, char *argv[]) 446 450 { 447 - traps(); 448 - 449 451 if (argc == 5 && !strcmp(argv[1], "-e")) { 450 452 insert_extra_deps = 1; 451 453 argv++;
+4 -1
scripts/gen_initramfs_list.sh
··· 97 97 } 98 98 99 99 list_parse() { 100 - [ ! -L "$1" ] && echo "$1 \\" || : 100 + if [ -L "$1" ]; then 101 + return 102 + fi 103 + echo "$1" | sed 's/:/\\:/g; s/$/ \\/' 101 104 } 102 105 103 106 # for each file print a line in following format
+17 -18
scripts/genksyms/lex.l
··· 289 289 } 290 290 break; 291 291 292 + case ST_TYPEOF_1: 293 + if (token == IDENT) 294 + { 295 + if (is_reserved_word(yytext, yyleng) 296 + || find_symbol(yytext, SYM_TYPEDEF, 1)) 297 + { 298 + yyless(0); 299 + unput('('); 300 + lexstate = ST_NORMAL; 301 + token = TYPEOF_KEYW; 302 + break; 303 + } 304 + _APP("(", 1); 305 + } 306 + lexstate = ST_TYPEOF; 307 + /* FALLTHRU */ 308 + 292 309 case ST_TYPEOF: 293 310 switch (token) 294 311 { ··· 329 312 goto repeat; 330 313 } 331 314 break; 332 - 333 - case ST_TYPEOF_1: 334 - if (token == IDENT) 335 - { 336 - if (is_reserved_word(yytext, yyleng) 337 - || find_symbol(yytext, SYM_TYPEDEF, 1)) 338 - { 339 - yyless(0); 340 - unput('('); 341 - lexstate = ST_NORMAL; 342 - token = TYPEOF_KEYW; 343 - break; 344 - } 345 - _APP("(", 1); 346 - } 347 - APP; 348 - lexstate = ST_TYPEOF; 349 - goto repeat; 350 315 351 316 case ST_BRACKET: 352 317 APP;
+17 -18
scripts/genksyms/lex.lex.c_shipped
··· 2098 2098 } 2099 2099 break; 2100 2100 2101 + case ST_TYPEOF_1: 2102 + if (token == IDENT) 2103 + { 2104 + if (is_reserved_word(yytext, yyleng) 2105 + || find_symbol(yytext, SYM_TYPEDEF, 1)) 2106 + { 2107 + yyless(0); 2108 + unput('('); 2109 + lexstate = ST_NORMAL; 2110 + token = TYPEOF_KEYW; 2111 + break; 2112 + } 2113 + _APP("(", 1); 2114 + } 2115 + lexstate = ST_TYPEOF; 2116 + /* FALLTHRU */ 2117 + 2101 2118 case ST_TYPEOF: 2102 2119 switch (token) 2103 2120 { ··· 2138 2121 goto repeat; 2139 2122 } 2140 2123 break; 2141 - 2142 - case ST_TYPEOF_1: 2143 - if (token == IDENT) 2144 - { 2145 - if (is_reserved_word(yytext, yyleng) 2146 - || find_symbol(yytext, SYM_TYPEDEF, 1)) 2147 - { 2148 - yyless(0); 2149 - unput('('); 2150 - lexstate = ST_NORMAL; 2151 - token = TYPEOF_KEYW; 2152 - break; 2153 - } 2154 - _APP("(", 1); 2155 - } 2156 - APP; 2157 - lexstate = ST_TYPEOF; 2158 - goto repeat; 2159 2124 2160 2125 case ST_BRACKET: 2161 2126 APP;