Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kbuild-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild

Pull Kbuild updates from Masahiro Yamada:

- improve fixdep to coalesce consecutive slashes in dep-files

- fix some issues of the maintainer string generation in deb-pkg script

- remove unused CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX and clean-up
several tools and linker scripts

- clean-up modpost

- allow to enable the dead code/data elimination for PowerPC in EXPERT
mode

- improve two coccinelle scripts for better performance

- pass endianness and machine size flags to sparse for all architecture

- misc fixes

* tag 'kbuild-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild: (25 commits)
kbuild: add machine size to CHECKFLAGS
kbuild: add endianness flag to CHEKCFLAGS
kbuild: $(CHECK) doesnt need NOSTDINC_FLAGS twice
scripts: Fixed printf format mismatch
scripts/tags.sh: use `find` for $ALLSOURCE_ARCHS generation
coccinelle: deref_null: improve performance
coccinelle: mini_lock: improve performance
powerpc: Allow LD_DEAD_CODE_DATA_ELIMINATION to be selected
kbuild: Allow LD_DEAD_CODE_DATA_ELIMINATION to be selectable if enabled
kbuild: LD_DEAD_CODE_DATA_ELIMINATION no -ffunction-sections/-fdata-sections for module build
kbuild: Fix asm-generic/vmlinux.lds.h for LD_DEAD_CODE_DATA_ELIMINATION
modpost: constify *modname function argument where possible
modpost: remove redundant is_vmlinux() test
modpost: use strstarts() helper more widely
modpost: pass struct elf_info pointer to get_modinfo()
checkpatch: remove VMLINUX_SYMBOL() check
vmlinux.lds.h: remove no-op macro VMLINUX_SYMBOL()
kbuild: remove CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
export.h: remove code for prefixing symbols with underscore
depmod.sh: remove symbol prefix support
...

+349 -443
+9 -4
Makefile
··· 802 802 endif 803 803 804 804 ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION 805 - KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,) 806 - KBUILD_CFLAGS += $(call cc-option,-fdata-sections,) 805 + KBUILD_CFLAGS_KERNEL += $(call cc-option,-ffunction-sections,) 806 + KBUILD_CFLAGS_KERNEL += $(call cc-option,-fdata-sections,) 807 807 endif 808 808 809 809 # arch Makefile may override CC so keep this after arch Makefile is included 810 810 NOSTDINC_FLAGS += -nostdinc -isystem $(call shell-cached,$(CC) -print-file-name=include) 811 - CHECKFLAGS += $(NOSTDINC_FLAGS) 812 811 813 812 # warn about C99 declaration after statement 814 813 KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,) ··· 876 877 ifeq ($(CONFIG_STRIP_ASM_SYMS),y) 877 878 LDFLAGS_vmlinux += $(call ld-option, -X,) 878 879 endif 880 + 881 + # insure the checker run with the right endianness 882 + CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian) 883 + 884 + # the checker needs the correct machine size 885 + CHECKFLAGS += $(if $(CONFIG_64BIT),-m64,-m32) 879 886 880 887 # Default kernel image to build when no specific target is given. 881 888 # KBUILD_IMAGE may be overruled on the command line or ··· 1768 1763 # Run depmod only if we have System.map and depmod is executable 1769 1764 quiet_cmd_depmod = DEPMOD $(KERNELRELEASE) 1770 1765 cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \ 1771 - $(KERNELRELEASE) "$(patsubst y,_,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))" 1766 + $(KERNELRELEASE) 1772 1767 1773 1768 # Create temporary dir for module support files 1774 1769 # clean it up only when building all modules
-21
arch/Kconfig
··· 597 597 598 598 endchoice 599 599 600 - config LD_DEAD_CODE_DATA_ELIMINATION 601 - bool 602 - help 603 - Select this if the architecture wants to do dead code and 604 - data elimination with the linker by compiling with 605 - -ffunction-sections -fdata-sections and linking with 606 - --gc-sections. 607 - 608 - This requires that the arch annotates or otherwise protects 609 - its external entry points from being discarded. Linker scripts 610 - must also merge .text.*, .data.*, and .bss.* correctly into 611 - output sections. Care must be taken not to pull in unrelated 612 - sections (e.g., '.text.init'). Typically '.' in section names 613 - is used to distinguish them from label names / C identifiers. 614 - 615 600 config HAVE_ARCH_WITHIN_STACK_FRAMES 616 601 bool 617 602 help ··· 671 686 help 672 687 Modules only use ELF REL relocations. Modules with ELF RELA 673 688 relocations will give an error. 674 - 675 - config HAVE_UNDERSCORE_SYMBOL_PREFIX 676 - bool 677 - help 678 - Some architectures generate an _ in front of C symbols; things like 679 - module loading and assembly files need to know about this. 680 689 681 690 config HAVE_IRQ_EXIT_ON_IRQ_STACK 682 691 bool
+1 -1
arch/alpha/Makefile
··· 11 11 NM := $(NM) -B 12 12 13 13 LDFLAGS_vmlinux := -static -N #-relax 14 - CHECKFLAGS += -D__alpha__ -m64 14 + CHECKFLAGS += -D__alpha__ 15 15 cflags-y := -pipe -mno-fp-regs -ffixed-8 16 16 cflags-y += $(call cc-option, -fno-jump-tables) 17 17
+1 -1
arch/arm/Makefile
··· 135 135 KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm 136 136 KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float 137 137 138 - CHECKFLAGS += -D__arm__ -m32 138 + CHECKFLAGS += -D__arm__ 139 139 140 140 #Default value 141 141 head-y := arch/arm/kernel/head$(MMUEXT).o
+1 -1
arch/arm64/Makefile
··· 78 78 UTS_MACHINE := aarch64 79 79 endif 80 80 81 - CHECKFLAGS += -D__aarch64__ -m64 81 + CHECKFLAGS += -D__aarch64__ 82 82 83 83 ifeq ($(CONFIG_ARM64_MODULE_PLTS),y) 84 84 KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds
+1 -1
arch/ia64/Makefile
··· 18 18 19 19 export AWK 20 20 21 - CHECKFLAGS += -m64 -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ 21 + CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ 22 22 23 23 OBJCOPYFLAGS := --strip-all 24 24 LDFLAGS_vmlinux := -static
-3
arch/mips/Makefile
··· 309 309 CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ 310 310 egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ 311 311 sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g') 312 - ifdef CONFIG_64BIT 313 - CHECKFLAGS += -m64 314 - endif 315 312 endif 316 313 317 314 OBJCOPYFLAGS += --remove-section=.reginfo
-1
arch/openrisc/Makefile
··· 25 25 LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) 26 26 27 27 KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ 28 - CHECKFLAGS += -mbig-endian 29 28 30 29 ifeq ($(CONFIG_OPENRISC_HAVE_INST_MUL),y) 31 30 KBUILD_CFLAGS += $(call cc-option,-mhard-mul)
+2 -2
arch/parisc/Makefile
··· 22 22 KBUILD_DEFCONFIG := default_defconfig 23 23 24 24 NM = sh $(srctree)/arch/parisc/nm 25 - CHECKFLAGS += -D__hppa__=1 -mbig-endian 25 + CHECKFLAGS += -D__hppa__=1 26 26 LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) 27 27 export LIBGCC 28 28 29 29 ifdef CONFIG_64BIT 30 30 UTS_MACHINE := parisc64 31 - CHECKFLAGS += -D__LP64__=1 -m64 31 + CHECKFLAGS += -D__LP64__=1 32 32 CC_ARCHES = hppa64 33 33 LD_BFD := elf64-hppa-linux 34 34 else # 32-bit
+1
arch/powerpc/Kconfig
··· 198 198 select HAVE_KPROBES 199 199 select HAVE_KPROBES_ON_FTRACE 200 200 select HAVE_KRETPROBES 201 + select HAVE_LD_DEAD_CODE_DATA_ELIMINATION 201 202 select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS 202 203 select HAVE_MEMBLOCK 203 204 select HAVE_MEMBLOCK_NODE_MAP
+11 -11
arch/powerpc/kernel/vmlinux.lds.S
··· 89 89 */ 90 90 .text BLOCK(0) : AT(ADDR(.text) - LOAD_OFFSET) { 91 91 #ifdef CONFIG_LD_HEAD_STUB_CATCH 92 - *(.linker_stub_catch); 92 + KEEP(*(.linker_stub_catch)); 93 93 . = . ; 94 94 #endif 95 95 ··· 98 98 ALIGN_FUNCTION(); 99 99 #endif 100 100 /* careful! __ftr_alt_* sections need to be close to .text */ 101 - *(.text.hot .text .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text); 101 + *(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text); 102 102 SCHED_TEXT 103 103 CPUIDLE_TEXT 104 104 LOCK_TEXT ··· 184 184 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { 185 185 INIT_DATA 186 186 __vtop_table_begin = .; 187 - *(.vtop_fixup); 187 + KEEP(*(.vtop_fixup)); 188 188 __vtop_table_end = .; 189 189 __ptov_table_begin = .; 190 - *(.ptov_fixup); 190 + KEEP(*(.ptov_fixup)); 191 191 __ptov_table_end = .; 192 192 } 193 193 ··· 208 208 . = ALIGN(8); 209 209 __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) { 210 210 __start___ftr_fixup = .; 211 - *(__ftr_fixup) 211 + KEEP(*(__ftr_fixup)) 212 212 __stop___ftr_fixup = .; 213 213 } 214 214 . = ALIGN(8); 215 215 __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) { 216 216 __start___mmu_ftr_fixup = .; 217 - *(__mmu_ftr_fixup) 217 + KEEP(*(__mmu_ftr_fixup)) 218 218 __stop___mmu_ftr_fixup = .; 219 219 } 220 220 . = ALIGN(8); 221 221 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) { 222 222 __start___lwsync_fixup = .; 223 - *(__lwsync_fixup) 223 + KEEP(*(__lwsync_fixup)) 224 224 __stop___lwsync_fixup = .; 225 225 } 226 226 #ifdef CONFIG_PPC64 227 227 . = ALIGN(8); 228 228 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) { 229 229 __start___fw_ftr_fixup = .; 230 - *(__fw_ftr_fixup) 230 + KEEP(*(__fw_ftr_fixup)) 231 231 __stop___fw_ftr_fixup = .; 232 232 } 233 233 #endif ··· 240 240 . = ALIGN(8); 241 241 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { 242 242 __machine_desc_start = . ; 243 - *(.machine.desc) 243 + KEEP(*(.machine.desc)) 244 244 __machine_desc_end = . ; 245 245 } 246 246 #ifdef CONFIG_RELOCATABLE ··· 288 288 .data : AT(ADDR(.data) - LOAD_OFFSET) { 289 289 DATA_DATA 290 290 *(.data.rel*) 291 - *(.sdata) 291 + *(SDATA_MAIN) 292 292 *(.sdata2) 293 293 *(.got.plt) *(.got) 294 294 *(.plt) ··· 303 303 304 304 .opd : AT(ADDR(.opd) - LOAD_OFFSET) { 305 305 __start_opd = .; 306 - *(.opd) 306 + KEEP(*(.opd)) 307 307 __end_opd = .; 308 308 } 309 309
+1 -1
arch/s390/Makefile
··· 18 18 KBUILD_AFLAGS += -m64 19 19 UTS_MACHINE := s390x 20 20 STACK_SIZE := 16384 21 - CHECKFLAGS += -D__s390__ -D__s390x__ -mbig-endian 21 + CHECKFLAGS += -D__s390__ -D__s390x__ 22 22 23 23 export LD_BFD 24 24
+1 -1
arch/sparc/Makefile
··· 39 39 # sparc64 40 40 # 41 41 42 - CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64 42 + CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__ 43 43 LDFLAGS := -m elf64_sparc 44 44 export BITS := 64 45 45 UTS_MACHINE := sparc64
+1 -1
arch/x86/Makefile
··· 94 94 else 95 95 BITS := 64 96 96 UTS_MACHINE := x86_64 97 - CHECKFLAGS += -D__x86_64__ -m64 97 + CHECKFLAGS += -D__x86_64__ 98 98 99 99 biarch := -m64 100 100 KBUILD_AFLAGS += -m64
+12 -22
include/asm-generic/export.h
··· 19 19 #define KCRC_ALIGN 4 20 20 #endif 21 21 22 - #ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX 23 - #define KSYM(name) _##name 24 - #else 25 - #define KSYM(name) name 26 - #endif 27 - 28 22 /* 29 23 * note on .section use: @progbits vs %progbits nastiness doesn't matter, 30 24 * since we immediately emit into those sections anyway. 31 25 */ 32 26 .macro ___EXPORT_SYMBOL name,val,sec 33 27 #ifdef CONFIG_MODULES 34 - .globl KSYM(__ksymtab_\name) 28 + .globl __ksymtab_\name 35 29 .section ___ksymtab\sec+\name,"a" 36 30 .balign KSYM_ALIGN 37 - KSYM(__ksymtab_\name): 38 - __put \val, KSYM(__kstrtab_\name) 31 + __ksymtab_\name: 32 + __put \val, __kstrtab_\name 39 33 .previous 40 34 .section __ksymtab_strings,"a" 41 - KSYM(__kstrtab_\name): 42 - #ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX 43 - .asciz "_\name" 44 - #else 35 + __kstrtab_\name: 45 36 .asciz "\name" 46 - #endif 47 37 .previous 48 38 #ifdef CONFIG_MODVERSIONS 49 39 .section ___kcrctab\sec+\name,"a" 50 40 .balign KCRC_ALIGN 51 - KSYM(__kcrctab_\name): 41 + __kcrctab_\name: 52 42 #if defined(CONFIG_MODULE_REL_CRCS) 53 - .long KSYM(__crc_\name) - . 43 + .long __crc_\name - . 54 44 #else 55 - .long KSYM(__crc_\name) 45 + .long __crc_\name 56 46 #endif 57 - .weak KSYM(__crc_\name) 47 + .weak __crc_\name 58 48 .previous 59 49 #endif 60 50 #endif ··· 74 84 #endif 75 85 76 86 #define EXPORT_SYMBOL(name) \ 77 - __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),) 87 + __EXPORT_SYMBOL(name, KSYM_FUNC(name),) 78 88 #define EXPORT_SYMBOL_GPL(name) \ 79 - __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl) 89 + __EXPORT_SYMBOL(name, KSYM_FUNC(name), _gpl) 80 90 #define EXPORT_DATA_SYMBOL(name) \ 81 - __EXPORT_SYMBOL(name, KSYM(name),) 91 + __EXPORT_SYMBOL(name, name,) 82 92 #define EXPORT_DATA_SYMBOL_GPL(name) \ 83 - __EXPORT_SYMBOL(name, KSYM(name),_gpl) 93 + __EXPORT_SYMBOL(name, name,_gpl) 84 94 85 95 #endif
+171 -163
include/asm-generic/vmlinux.lds.h
··· 64 64 * generates .data.identifier sections, which need to be pulled in with 65 65 * .data. We don't want to pull in .data..other sections, which Linux 66 66 * has defined. Same for text and bss. 67 + * 68 + * RODATA_MAIN is not used because existing code already defines .rodata.x 69 + * sections to be brought in with rodata. 67 70 */ 68 71 #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION 69 72 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* 70 73 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* 74 + #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* 75 + #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* 71 76 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* 77 + #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* 72 78 #else 73 79 #define TEXT_MAIN .text 74 80 #define DATA_MAIN .data 81 + #define SDATA_MAIN .sdata 82 + #define RODATA_MAIN .rodata 75 83 #define BSS_MAIN .bss 84 + #define SBSS_MAIN .sbss 76 85 #endif 77 86 78 87 /* ··· 113 104 114 105 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 115 106 #define MCOUNT_REC() . = ALIGN(8); \ 116 - VMLINUX_SYMBOL(__start_mcount_loc) = .; \ 117 - *(__mcount_loc) \ 118 - VMLINUX_SYMBOL(__stop_mcount_loc) = .; 107 + __start_mcount_loc = .; \ 108 + KEEP(*(__mcount_loc)) \ 109 + __stop_mcount_loc = .; 119 110 #else 120 111 #define MCOUNT_REC() 121 112 #endif 122 113 123 114 #ifdef CONFIG_TRACE_BRANCH_PROFILING 124 - #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ 125 - *(_ftrace_annotated_branch) \ 126 - VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; 115 + #define LIKELY_PROFILE() __start_annotated_branch_profile = .; \ 116 + KEEP(*(_ftrace_annotated_branch)) \ 117 + __stop_annotated_branch_profile = .; 127 118 #else 128 119 #define LIKELY_PROFILE() 129 120 #endif 130 121 131 122 #ifdef CONFIG_PROFILE_ALL_BRANCHES 132 - #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ 133 - *(_ftrace_branch) \ 134 - VMLINUX_SYMBOL(__stop_branch_profile) = .; 123 + #define BRANCH_PROFILE() __start_branch_profile = .; \ 124 + KEEP(*(_ftrace_branch)) \ 125 + __stop_branch_profile = .; 135 126 #else 136 127 #define BRANCH_PROFILE() 137 128 #endif 138 129 139 130 #ifdef CONFIG_KPROBES 140 131 #define KPROBE_BLACKLIST() . = ALIGN(8); \ 141 - VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ 132 + __start_kprobe_blacklist = .; \ 142 133 KEEP(*(_kprobe_blacklist)) \ 143 - VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .; 134 + __stop_kprobe_blacklist = .; 144 135 #else 145 136 #define KPROBE_BLACKLIST() 146 137 #endif 147 138 148 139 #ifdef CONFIG_FUNCTION_ERROR_INJECTION 149 140 #define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \ 150 - VMLINUX_SYMBOL(__start_error_injection_whitelist) = .;\ 141 + __start_error_injection_whitelist = .; \ 151 142 KEEP(*(_error_injection_whitelist)) \ 152 - VMLINUX_SYMBOL(__stop_error_injection_whitelist) = .; 143 + __stop_error_injection_whitelist = .; 153 144 #else 154 145 #define ERROR_INJECT_WHITELIST() 155 146 #endif 156 147 157 148 #ifdef CONFIG_EVENT_TRACING 158 149 #define FTRACE_EVENTS() . = ALIGN(8); \ 159 - VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 150 + __start_ftrace_events = .; \ 160 151 KEEP(*(_ftrace_events)) \ 161 - VMLINUX_SYMBOL(__stop_ftrace_events) = .; \ 162 - VMLINUX_SYMBOL(__start_ftrace_eval_maps) = .; \ 152 + __stop_ftrace_events = .; \ 153 + __start_ftrace_eval_maps = .; \ 163 154 KEEP(*(_ftrace_eval_map)) \ 164 - VMLINUX_SYMBOL(__stop_ftrace_eval_maps) = .; 155 + __stop_ftrace_eval_maps = .; 165 156 #else 166 157 #define FTRACE_EVENTS() 167 158 #endif 168 159 169 160 #ifdef CONFIG_TRACING 170 - #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ 161 + #define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \ 171 162 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ 172 - VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; 173 - #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \ 163 + __stop___trace_bprintk_fmt = .; 164 + #define TRACEPOINT_STR() __start___tracepoint_str = .; \ 174 165 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \ 175 - VMLINUX_SYMBOL(__stop___tracepoint_str) = .; 166 + __stop___tracepoint_str = .; 176 167 #else 177 168 #define TRACE_PRINTKS() 178 169 #define TRACEPOINT_STR() ··· 180 171 181 172 #ifdef CONFIG_FTRACE_SYSCALLS 182 173 #define TRACE_SYSCALLS() . = ALIGN(8); \ 183 - VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ 174 + __start_syscalls_metadata = .; \ 184 175 KEEP(*(__syscalls_metadata)) \ 185 - VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; 176 + __stop_syscalls_metadata = .; 186 177 #else 187 178 #define TRACE_SYSCALLS() 188 179 #endif 189 180 190 181 #ifdef CONFIG_BPF_EVENTS 191 182 #define BPF_RAW_TP() STRUCT_ALIGN(); \ 192 - VMLINUX_SYMBOL(__start__bpf_raw_tp) = .; \ 183 + __start__bpf_raw_tp = .; \ 193 184 KEEP(*(__bpf_raw_tp_map)) \ 194 - VMLINUX_SYMBOL(__stop__bpf_raw_tp) = .; 185 + __stop__bpf_raw_tp = .; 195 186 #else 196 187 #define BPF_RAW_TP() 197 188 #endif 198 189 199 190 #ifdef CONFIG_SERIAL_EARLYCON 200 191 #define EARLYCON_TABLE() . = ALIGN(8); \ 201 - VMLINUX_SYMBOL(__earlycon_table) = .; \ 192 + __earlycon_table = .; \ 202 193 KEEP(*(__earlycon_table)) \ 203 - VMLINUX_SYMBOL(__earlycon_table_end) = .; 194 + __earlycon_table_end = .; 204 195 #else 205 196 #define EARLYCON_TABLE() 206 197 #endif ··· 211 202 #define _OF_TABLE_0(name) 212 203 #define _OF_TABLE_1(name) \ 213 204 . = ALIGN(8); \ 214 - VMLINUX_SYMBOL(__##name##_of_table) = .; \ 205 + __##name##_of_table = .; \ 215 206 KEEP(*(__##name##_of_table)) \ 216 207 KEEP(*(__##name##_of_table_end)) 217 208 ··· 226 217 #ifdef CONFIG_ACPI 227 218 #define ACPI_PROBE_TABLE(name) \ 228 219 . = ALIGN(8); \ 229 - VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \ 220 + __##name##_acpi_probe_table = .; \ 230 221 KEEP(*(__##name##_acpi_probe_table)) \ 231 - VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .; 222 + __##name##_acpi_probe_table_end = .; 232 223 #else 233 224 #define ACPI_PROBE_TABLE(name) 234 225 #endif 235 226 236 227 #define KERNEL_DTB() \ 237 228 STRUCT_ALIGN(); \ 238 - VMLINUX_SYMBOL(__dtb_start) = .; \ 229 + __dtb_start = .; \ 239 230 KEEP(*(.dtb.init.rodata)) \ 240 - VMLINUX_SYMBOL(__dtb_end) = .; 231 + __dtb_end = .; 241 232 242 233 /* 243 234 * .data section ··· 247 238 *(DATA_MAIN) \ 248 239 *(.ref.data) \ 249 240 *(.data..shared_aligned) /* percpu related */ \ 250 - MEM_KEEP(init.data) \ 251 - MEM_KEEP(exit.data) \ 241 + MEM_KEEP(init.data*) \ 242 + MEM_KEEP(exit.data*) \ 252 243 *(.data.unlikely) \ 253 - VMLINUX_SYMBOL(__start_once) = .; \ 244 + __start_once = .; \ 254 245 *(.data.once) \ 255 - VMLINUX_SYMBOL(__end_once) = .; \ 246 + __end_once = .; \ 256 247 STRUCT_ALIGN(); \ 257 248 *(__tracepoints) \ 258 249 /* implement dynamic printk debug */ \ 259 250 . = ALIGN(8); \ 260 - VMLINUX_SYMBOL(__start___jump_table) = .; \ 251 + __start___jump_table = .; \ 261 252 KEEP(*(__jump_table)) \ 262 - VMLINUX_SYMBOL(__stop___jump_table) = .; \ 253 + __stop___jump_table = .; \ 263 254 . = ALIGN(8); \ 264 - VMLINUX_SYMBOL(__start___verbose) = .; \ 255 + __start___verbose = .; \ 265 256 KEEP(*(__verbose)) \ 266 - VMLINUX_SYMBOL(__stop___verbose) = .; \ 257 + __stop___verbose = .; \ 267 258 LIKELY_PROFILE() \ 268 259 BRANCH_PROFILE() \ 269 260 TRACE_PRINTKS() \ ··· 275 266 */ 276 267 #define NOSAVE_DATA \ 277 268 . = ALIGN(PAGE_SIZE); \ 278 - VMLINUX_SYMBOL(__nosave_begin) = .; \ 269 + __nosave_begin = .; \ 279 270 *(.data..nosave) \ 280 271 . = ALIGN(PAGE_SIZE); \ 281 - VMLINUX_SYMBOL(__nosave_end) = .; 272 + __nosave_end = .; 282 273 283 274 #define PAGE_ALIGNED_DATA(page_align) \ 284 275 . = ALIGN(page_align); \ ··· 295 286 296 287 #define INIT_TASK_DATA(align) \ 297 288 . = ALIGN(align); \ 298 - VMLINUX_SYMBOL(__start_init_task) = .; \ 299 - VMLINUX_SYMBOL(init_thread_union) = .; \ 300 - VMLINUX_SYMBOL(init_stack) = .; \ 301 - *(.data..init_task) \ 302 - *(.data..init_thread_info) \ 303 - . = VMLINUX_SYMBOL(__start_init_task) + THREAD_SIZE; \ 304 - VMLINUX_SYMBOL(__end_init_task) = .; 289 + __start_init_task = .; \ 290 + init_thread_union = .; \ 291 + init_stack = .; \ 292 + KEEP(*(.data..init_task)) \ 293 + KEEP(*(.data..init_thread_info)) \ 294 + . = __start_init_task + THREAD_SIZE; \ 295 + __end_init_task = .; 305 296 306 297 /* 307 298 * Allow architectures to handle ro_after_init data on their ··· 309 300 */ 310 301 #ifndef RO_AFTER_INIT_DATA 311 302 #define RO_AFTER_INIT_DATA \ 312 - VMLINUX_SYMBOL(__start_ro_after_init) = .; \ 303 + __start_ro_after_init = .; \ 313 304 *(.data..ro_after_init) \ 314 - VMLINUX_SYMBOL(__end_ro_after_init) = .; 305 + __end_ro_after_init = .; 315 306 #endif 316 307 317 308 /* ··· 320 311 #define RO_DATA_SECTION(align) \ 321 312 . = ALIGN((align)); \ 322 313 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 323 - VMLINUX_SYMBOL(__start_rodata) = .; \ 314 + __start_rodata = .; \ 324 315 *(.rodata) *(.rodata.*) \ 325 316 RO_AFTER_INIT_DATA /* Read only after init */ \ 326 317 KEEP(*(__vermagic)) /* Kernel version magic */ \ 327 318 . = ALIGN(8); \ 328 - VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ 319 + __start___tracepoints_ptrs = .; \ 329 320 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ 330 - VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \ 321 + __stop___tracepoints_ptrs = .; \ 331 322 *(__tracepoints_strings)/* Tracepoints: strings */ \ 332 323 } \ 333 324 \ ··· 337 328 \ 338 329 /* PCI quirks */ \ 339 330 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 340 - VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 331 + __start_pci_fixups_early = .; \ 341 332 KEEP(*(.pci_fixup_early)) \ 342 - VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ 343 - VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ 333 + __end_pci_fixups_early = .; \ 334 + __start_pci_fixups_header = .; \ 344 335 KEEP(*(.pci_fixup_header)) \ 345 - VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ 346 - VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ 336 + __end_pci_fixups_header = .; \ 337 + __start_pci_fixups_final = .; \ 347 338 KEEP(*(.pci_fixup_final)) \ 348 - VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ 349 - VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ 339 + __end_pci_fixups_final = .; \ 340 + __start_pci_fixups_enable = .; \ 350 341 KEEP(*(.pci_fixup_enable)) \ 351 - VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ 352 - VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 342 + __end_pci_fixups_enable = .; \ 343 + __start_pci_fixups_resume = .; \ 353 344 KEEP(*(.pci_fixup_resume)) \ 354 - VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 355 - VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ 345 + __end_pci_fixups_resume = .; \ 346 + __start_pci_fixups_resume_early = .; \ 356 347 KEEP(*(.pci_fixup_resume_early)) \ 357 - VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ 358 - VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 348 + __end_pci_fixups_resume_early = .; \ 349 + __start_pci_fixups_suspend = .; \ 359 350 KEEP(*(.pci_fixup_suspend)) \ 360 - VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 361 - VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \ 351 + __end_pci_fixups_suspend = .; \ 352 + __start_pci_fixups_suspend_late = .; \ 362 353 KEEP(*(.pci_fixup_suspend_late)) \ 363 - VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \ 354 + __end_pci_fixups_suspend_late = .; \ 364 355 } \ 365 356 \ 366 357 /* Built-in firmware blobs */ \ 367 358 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 368 - VMLINUX_SYMBOL(__start_builtin_fw) = .; \ 359 + __start_builtin_fw = .; \ 369 360 KEEP(*(.builtin_fw)) \ 370 - VMLINUX_SYMBOL(__end_builtin_fw) = .; \ 361 + __end_builtin_fw = .; \ 371 362 } \ 372 363 \ 373 364 TRACEDATA \ 374 365 \ 375 366 /* Kernel symbol table: Normal symbols */ \ 376 367 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 377 - VMLINUX_SYMBOL(__start___ksymtab) = .; \ 368 + __start___ksymtab = .; \ 378 369 KEEP(*(SORT(___ksymtab+*))) \ 379 - VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 370 + __stop___ksymtab = .; \ 380 371 } \ 381 372 \ 382 373 /* Kernel symbol table: GPL-only symbols */ \ 383 374 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 384 - VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 375 + __start___ksymtab_gpl = .; \ 385 376 KEEP(*(SORT(___ksymtab_gpl+*))) \ 386 - VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 377 + __stop___ksymtab_gpl = .; \ 387 378 } \ 388 379 \ 389 380 /* Kernel symbol table: Normal unused symbols */ \ 390 381 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 391 - VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 382 + __start___ksymtab_unused = .; \ 392 383 KEEP(*(SORT(___ksymtab_unused+*))) \ 393 - VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 384 + __stop___ksymtab_unused = .; \ 394 385 } \ 395 386 \ 396 387 /* Kernel symbol table: GPL-only unused symbols */ \ 397 388 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 398 - VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 389 + __start___ksymtab_unused_gpl = .; \ 399 390 KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ 400 - VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 391 + __stop___ksymtab_unused_gpl = .; \ 401 392 } \ 402 393 \ 403 394 /* Kernel symbol table: GPL-future-only symbols */ \ 404 395 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 405 - VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 396 + __start___ksymtab_gpl_future = .; \ 406 397 KEEP(*(SORT(___ksymtab_gpl_future+*))) \ 407 - VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 398 + __stop___ksymtab_gpl_future = .; \ 408 399 } \ 409 400 \ 410 401 /* Kernel symbol table: Normal symbols */ \ 411 402 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 412 - VMLINUX_SYMBOL(__start___kcrctab) = .; \ 403 + __start___kcrctab = .; \ 413 404 KEEP(*(SORT(___kcrctab+*))) \ 414 - VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 405 + __stop___kcrctab = .; \ 415 406 } \ 416 407 \ 417 408 /* Kernel symbol table: GPL-only symbols */ \ 418 409 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 419 - VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 410 + __start___kcrctab_gpl = .; \ 420 411 KEEP(*(SORT(___kcrctab_gpl+*))) \ 421 - VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 412 + __stop___kcrctab_gpl = .; \ 422 413 } \ 423 414 \ 424 415 /* Kernel symbol table: Normal unused symbols */ \ 425 416 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 426 - VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 417 + __start___kcrctab_unused = .; \ 427 418 KEEP(*(SORT(___kcrctab_unused+*))) \ 428 - VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 419 + __stop___kcrctab_unused = .; \ 429 420 } \ 430 421 \ 431 422 /* Kernel symbol table: GPL-only unused symbols */ \ 432 423 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 433 - VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 424 + __start___kcrctab_unused_gpl = .; \ 434 425 KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ 435 - VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 426 + __stop___kcrctab_unused_gpl = .; \ 436 427 } \ 437 428 \ 438 429 /* Kernel symbol table: GPL-future-only symbols */ \ 439 430 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 440 - VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 431 + __start___kcrctab_gpl_future = .; \ 441 432 KEEP(*(SORT(___kcrctab_gpl_future+*))) \ 442 - VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 433 + __stop___kcrctab_gpl_future = .; \ 443 434 } \ 444 435 \ 445 436 /* Kernel symbol table: strings */ \ ··· 456 447 \ 457 448 /* Built-in module parameters. */ \ 458 449 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 459 - VMLINUX_SYMBOL(__start___param) = .; \ 450 + __start___param = .; \ 460 451 KEEP(*(__param)) \ 461 - VMLINUX_SYMBOL(__stop___param) = .; \ 452 + __stop___param = .; \ 462 453 } \ 463 454 \ 464 455 /* Built-in module versions. */ \ 465 456 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 466 - VMLINUX_SYMBOL(__start___modver) = .; \ 457 + __start___modver = .; \ 467 458 KEEP(*(__modver)) \ 468 - VMLINUX_SYMBOL(__stop___modver) = .; \ 459 + __stop___modver = .; \ 469 460 . = ALIGN((align)); \ 470 - VMLINUX_SYMBOL(__end_rodata) = .; \ 461 + __end_rodata = .; \ 471 462 } \ 472 463 . = ALIGN((align)); 473 464 ··· 478 469 479 470 #define SECURITY_INIT \ 480 471 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 481 - VMLINUX_SYMBOL(__security_initcall_start) = .; \ 472 + __security_initcall_start = .; \ 482 473 KEEP(*(.security_initcall.init)) \ 483 - VMLINUX_SYMBOL(__security_initcall_end) = .; \ 474 + __security_initcall_end = .; \ 484 475 } 485 476 486 477 /* ··· 496 487 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ 497 488 *(.text..refcount) \ 498 489 *(.ref.text) \ 499 - MEM_KEEP(init.text) \ 500 - MEM_KEEP(exit.text) \ 490 + MEM_KEEP(init.text*) \ 491 + MEM_KEEP(exit.text*) \ 501 492 502 493 503 494 /* sched.text is aling to function alignment to secure we have same 504 495 * address even at second ld pass when generating System.map */ 505 496 #define SCHED_TEXT \ 506 497 ALIGN_FUNCTION(); \ 507 - VMLINUX_SYMBOL(__sched_text_start) = .; \ 498 + __sched_text_start = .; \ 508 499 *(.sched.text) \ 509 - VMLINUX_SYMBOL(__sched_text_end) = .; 500 + __sched_text_end = .; 510 501 511 502 /* spinlock.text is aling to function alignment to secure we have same 512 503 * address even at second ld pass when generating System.map */ 513 504 #define LOCK_TEXT \ 514 505 ALIGN_FUNCTION(); \ 515 - VMLINUX_SYMBOL(__lock_text_start) = .; \ 506 + __lock_text_start = .; \ 516 507 *(.spinlock.text) \ 517 - VMLINUX_SYMBOL(__lock_text_end) = .; 508 + __lock_text_end = .; 518 509 519 510 #define CPUIDLE_TEXT \ 520 511 ALIGN_FUNCTION(); \ 521 - VMLINUX_SYMBOL(__cpuidle_text_start) = .; \ 512 + __cpuidle_text_start = .; \ 522 513 *(.cpuidle.text) \ 523 - VMLINUX_SYMBOL(__cpuidle_text_end) = .; 514 + __cpuidle_text_end = .; 524 515 525 516 #define KPROBES_TEXT \ 526 517 ALIGN_FUNCTION(); \ 527 - VMLINUX_SYMBOL(__kprobes_text_start) = .; \ 518 + __kprobes_text_start = .; \ 528 519 *(.kprobes.text) \ 529 - VMLINUX_SYMBOL(__kprobes_text_end) = .; 520 + __kprobes_text_end = .; 530 521 531 522 #define ENTRY_TEXT \ 532 523 ALIGN_FUNCTION(); \ 533 - VMLINUX_SYMBOL(__entry_text_start) = .; \ 524 + __entry_text_start = .; \ 534 525 *(.entry.text) \ 535 - VMLINUX_SYMBOL(__entry_text_end) = .; 526 + __entry_text_end = .; 536 527 537 528 #define IRQENTRY_TEXT \ 538 529 ALIGN_FUNCTION(); \ 539 - VMLINUX_SYMBOL(__irqentry_text_start) = .; \ 530 + __irqentry_text_start = .; \ 540 531 *(.irqentry.text) \ 541 - VMLINUX_SYMBOL(__irqentry_text_end) = .; 532 + __irqentry_text_end = .; 542 533 543 534 #define SOFTIRQENTRY_TEXT \ 544 535 ALIGN_FUNCTION(); \ 545 - VMLINUX_SYMBOL(__softirqentry_text_start) = .; \ 536 + __softirqentry_text_start = .; \ 546 537 *(.softirqentry.text) \ 547 - VMLINUX_SYMBOL(__softirqentry_text_end) = .; 538 + __softirqentry_text_end = .; 548 539 549 540 /* Section used for early init (in .S files) */ 550 - #define HEAD_TEXT *(.head.text) 541 + #define HEAD_TEXT KEEP(*(.head.text)) 551 542 552 543 #define HEAD_TEXT_SECTION \ 553 544 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ ··· 560 551 #define EXCEPTION_TABLE(align) \ 561 552 . = ALIGN(align); \ 562 553 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 563 - VMLINUX_SYMBOL(__start___ex_table) = .; \ 554 + __start___ex_table = .; \ 564 555 KEEP(*(__ex_table)) \ 565 - VMLINUX_SYMBOL(__stop___ex_table) = .; \ 556 + __stop___ex_table = .; \ 566 557 } 567 558 568 559 /* ··· 576 567 577 568 #ifdef CONFIG_CONSTRUCTORS 578 569 #define KERNEL_CTORS() . = ALIGN(8); \ 579 - VMLINUX_SYMBOL(__ctors_start) = .; \ 570 + __ctors_start = .; \ 580 571 KEEP(*(.ctors)) \ 581 572 KEEP(*(SORT(.init_array.*))) \ 582 573 KEEP(*(.init_array)) \ 583 - VMLINUX_SYMBOL(__ctors_end) = .; 574 + __ctors_end = .; 584 575 #else 585 576 #define KERNEL_CTORS() 586 577 #endif ··· 588 579 /* init and exit section handling */ 589 580 #define INIT_DATA \ 590 581 KEEP(*(SORT(___kentry+*))) \ 591 - *(.init.data) \ 592 - MEM_DISCARD(init.data) \ 582 + *(.init.data init.data.*) \ 583 + MEM_DISCARD(init.data*) \ 593 584 KERNEL_CTORS() \ 594 585 MCOUNT_REC() \ 595 - *(.init.rodata) \ 586 + *(.init.rodata .init.rodata.*) \ 596 587 FTRACE_EVENTS() \ 597 588 TRACE_SYSCALLS() \ 598 589 KPROBE_BLACKLIST() \ ··· 611 602 EARLYCON_TABLE() 612 603 613 604 #define INIT_TEXT \ 614 - *(.init.text) \ 605 + *(.init.text .init.text.*) \ 615 606 *(.text.startup) \ 616 - MEM_DISCARD(init.text) 607 + MEM_DISCARD(init.text*) 617 608 618 609 #define EXIT_DATA \ 619 - *(.exit.data) \ 610 + *(.exit.data .exit.data.*) \ 620 611 *(.fini_array) \ 621 612 *(.dtors) \ 622 - MEM_DISCARD(exit.data) \ 623 - MEM_DISCARD(exit.rodata) 613 + MEM_DISCARD(exit.data*) \ 614 + MEM_DISCARD(exit.rodata*) 624 615 625 616 #define EXIT_TEXT \ 626 617 *(.exit.text) \ ··· 638 629 . = ALIGN(sbss_align); \ 639 630 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 640 631 *(.dynsbss) \ 641 - *(.sbss) \ 632 + *(SBSS_MAIN) \ 642 633 *(.scommon) \ 643 634 } 644 635 ··· 715 706 #define BUG_TABLE \ 716 707 . = ALIGN(8); \ 717 708 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 718 - VMLINUX_SYMBOL(__start___bug_table) = .; \ 709 + __start___bug_table = .; \ 719 710 KEEP(*(__bug_table)) \ 720 - VMLINUX_SYMBOL(__stop___bug_table) = .; \ 711 + __stop___bug_table = .; \ 721 712 } 722 713 #else 723 714 #define BUG_TABLE ··· 727 718 #define ORC_UNWIND_TABLE \ 728 719 . = ALIGN(4); \ 729 720 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ 730 - VMLINUX_SYMBOL(__start_orc_unwind_ip) = .; \ 721 + __start_orc_unwind_ip = .; \ 731 722 KEEP(*(.orc_unwind_ip)) \ 732 - VMLINUX_SYMBOL(__stop_orc_unwind_ip) = .; \ 723 + __stop_orc_unwind_ip = .; \ 733 724 } \ 734 725 . = ALIGN(6); \ 735 726 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ 736 - VMLINUX_SYMBOL(__start_orc_unwind) = .; \ 727 + __start_orc_unwind = .; \ 737 728 KEEP(*(.orc_unwind)) \ 738 - VMLINUX_SYMBOL(__stop_orc_unwind) = .; \ 729 + __stop_orc_unwind = .; \ 739 730 } \ 740 731 . = ALIGN(4); \ 741 732 .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \ 742 - VMLINUX_SYMBOL(orc_lookup) = .; \ 733 + orc_lookup = .; \ 743 734 . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \ 744 735 LOOKUP_BLOCK_SIZE) + 1) * 4; \ 745 - VMLINUX_SYMBOL(orc_lookup_end) = .; \ 736 + orc_lookup_end = .; \ 746 737 } 747 738 #else 748 739 #define ORC_UNWIND_TABLE ··· 752 743 #define TRACEDATA \ 753 744 . = ALIGN(4); \ 754 745 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 755 - VMLINUX_SYMBOL(__tracedata_start) = .; \ 746 + __tracedata_start = .; \ 756 747 KEEP(*(.tracedata)) \ 757 - VMLINUX_SYMBOL(__tracedata_end) = .; \ 748 + __tracedata_end = .; \ 758 749 } 759 750 #else 760 751 #define TRACEDATA ··· 762 753 763 754 #define NOTES \ 764 755 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 765 - VMLINUX_SYMBOL(__start_notes) = .; \ 766 - *(.note.*) \ 767 - VMLINUX_SYMBOL(__stop_notes) = .; \ 756 + __start_notes = .; \ 757 + KEEP(*(.note.*)) \ 758 + __stop_notes = .; \ 768 759 } 769 760 770 761 #define INIT_SETUP(initsetup_align) \ 771 762 . = ALIGN(initsetup_align); \ 772 - VMLINUX_SYMBOL(__setup_start) = .; \ 763 + __setup_start = .; \ 773 764 KEEP(*(.init.setup)) \ 774 - VMLINUX_SYMBOL(__setup_end) = .; 765 + __setup_end = .; 775 766 776 767 #define INIT_CALLS_LEVEL(level) \ 777 - VMLINUX_SYMBOL(__initcall##level##_start) = .; \ 768 + __initcall##level##_start = .; \ 778 769 KEEP(*(.initcall##level##.init)) \ 779 770 KEEP(*(.initcall##level##s.init)) \ 780 771 781 772 #define INIT_CALLS \ 782 - VMLINUX_SYMBOL(__initcall_start) = .; \ 773 + __initcall_start = .; \ 783 774 KEEP(*(.initcallearly.init)) \ 784 775 INIT_CALLS_LEVEL(0) \ 785 776 INIT_CALLS_LEVEL(1) \ ··· 790 781 INIT_CALLS_LEVEL(rootfs) \ 791 782 INIT_CALLS_LEVEL(6) \ 792 783 INIT_CALLS_LEVEL(7) \ 793 - VMLINUX_SYMBOL(__initcall_end) = .; 784 + __initcall_end = .; 794 785 795 786 #define CON_INITCALL \ 796 - VMLINUX_SYMBOL(__con_initcall_start) = .; \ 787 + __con_initcall_start = .; \ 797 788 KEEP(*(.con_initcall.init)) \ 798 - VMLINUX_SYMBOL(__con_initcall_end) = .; 789 + __con_initcall_end = .; 799 790 800 791 #define SECURITY_INITCALL \ 801 - VMLINUX_SYMBOL(__security_initcall_start) = .; \ 792 + __security_initcall_start = .; \ 802 793 KEEP(*(.security_initcall.init)) \ 803 - VMLINUX_SYMBOL(__security_initcall_end) = .; 794 + __security_initcall_end = .; 804 795 805 796 #ifdef CONFIG_BLK_DEV_INITRD 806 797 #define INIT_RAM_FS \ 807 798 . = ALIGN(4); \ 808 - VMLINUX_SYMBOL(__initramfs_start) = .; \ 799 + __initramfs_start = .; \ 809 800 KEEP(*(.init.ramfs)) \ 810 801 . = ALIGN(8); \ 811 802 KEEP(*(.init.ramfs.info)) ··· 860 851 * sharing between subsections for different purposes. 861 852 */ 862 853 #define PERCPU_INPUT(cacheline) \ 863 - VMLINUX_SYMBOL(__per_cpu_start) = .; \ 854 + __per_cpu_start = .; \ 864 855 *(.data..percpu..first) \ 865 856 . = ALIGN(PAGE_SIZE); \ 866 857 *(.data..percpu..page_aligned) \ ··· 870 861 *(.data..percpu) \ 871 862 *(.data..percpu..shared_aligned) \ 872 863 PERCPU_DECRYPTED_SECTION \ 873 - VMLINUX_SYMBOL(__per_cpu_end) = .; 864 + __per_cpu_end = .; 874 865 875 866 /** 876 867 * PERCPU_VADDR - define output section for percpu area ··· 897 888 * address, use PERCPU_SECTION. 898 889 */ 899 890 #define PERCPU_VADDR(cacheline, vaddr, phdr) \ 900 - VMLINUX_SYMBOL(__per_cpu_load) = .; \ 901 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 902 - - LOAD_OFFSET) { \ 891 + __per_cpu_load = .; \ 892 + .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \ 903 893 PERCPU_INPUT(cacheline) \ 904 894 } phdr \ 905 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); 895 + . = __per_cpu_load + SIZEOF(.data..percpu); 906 896 907 897 /** 908 898 * PERCPU_SECTION - define output section for percpu area, simple version ··· 918 910 #define PERCPU_SECTION(cacheline) \ 919 911 . = ALIGN(PAGE_SIZE); \ 920 912 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 921 - VMLINUX_SYMBOL(__per_cpu_load) = .; \ 913 + __per_cpu_load = .; \ 922 914 PERCPU_INPUT(cacheline) \ 923 915 } 924 916 ··· 957 949 #define INIT_TEXT_SECTION(inittext_align) \ 958 950 . = ALIGN(inittext_align); \ 959 951 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 960 - VMLINUX_SYMBOL(_sinittext) = .; \ 952 + _sinittext = .; \ 961 953 INIT_TEXT \ 962 - VMLINUX_SYMBOL(_einittext) = .; \ 954 + _einittext = .; \ 963 955 } 964 956 965 957 #define INIT_DATA_SECTION(initsetup_align) \ ··· 974 966 975 967 #define BSS_SECTION(sbss_align, bss_align, stop_align) \ 976 968 . = ALIGN(sbss_align); \ 977 - VMLINUX_SYMBOL(__bss_start) = .; \ 969 + __bss_start = .; \ 978 970 SBSS(sbss_align) \ 979 971 BSS(bss_align) \ 980 972 . = ALIGN(stop_align); \ 981 - VMLINUX_SYMBOL(__bss_stop) = .; 973 + __bss_stop = .;
+5 -11
include/linux/export.h
··· 10 10 * hackers place grumpy comments in header files. 11 11 */ 12 12 13 - /* Some toolchains use a `_' prefix for all user symbols. */ 14 - #ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX 15 - #define __VMLINUX_SYMBOL(x) _##x 16 - #define __VMLINUX_SYMBOL_STR(x) "_" #x 17 - #else 18 13 #define __VMLINUX_SYMBOL(x) x 19 14 #define __VMLINUX_SYMBOL_STR(x) #x 20 - #endif 21 15 22 16 /* Indirect, so macros are expanded before pasting. */ 23 17 #define VMLINUX_SYMBOL(x) __VMLINUX_SYMBOL(x) ··· 40 46 #if defined(CONFIG_MODULE_REL_CRCS) 41 47 #define __CRC_SYMBOL(sym, sec) \ 42 48 asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ 43 - " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \ 44 - " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n" \ 49 + " .weak __crc_" #sym " \n" \ 50 + " .long __crc_" #sym " - . \n" \ 45 51 " .previous \n"); 46 52 #else 47 53 #define __CRC_SYMBOL(sym, sec) \ 48 54 asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ 49 - " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \ 50 - " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \ 55 + " .weak __crc_" #sym " \n" \ 56 + " .long __crc_" #sym " \n" \ 51 57 " .previous \n"); 52 58 #endif 53 59 #else ··· 60 66 __CRC_SYMBOL(sym, sec) \ 61 67 static const char __kstrtab_##sym[] \ 62 68 __attribute__((section("__ksymtab_strings"), aligned(1))) \ 63 - = VMLINUX_SYMBOL_STR(sym); \ 69 + = #sym; \ 64 70 static const struct kernel_symbol __ksymtab_##sym \ 65 71 __used \ 66 72 __attribute__((section("___ksymtab" sec "+" #sym), used)) \
+27
init/Kconfig
··· 1038 1038 1039 1039 endchoice 1040 1040 1041 + config HAVE_LD_DEAD_CODE_DATA_ELIMINATION 1042 + bool 1043 + help 1044 + This requires that the arch annotates or otherwise protects 1045 + its external entry points from being discarded. Linker scripts 1046 + must also merge .text.*, .data.*, and .bss.* correctly into 1047 + output sections. Care must be taken not to pull in unrelated 1048 + sections (e.g., '.text.init'). Typically '.' in section names 1049 + is used to distinguish them from label names / C identifiers. 1050 + 1051 + config LD_DEAD_CODE_DATA_ELIMINATION 1052 + bool "Dead code and data elimination (EXPERIMENTAL)" 1053 + depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION 1054 + depends on EXPERT 1055 + help 1056 + Select this if the architecture wants to do dead code and 1057 + data elimination with the linker by compiling with 1058 + -ffunction-sections -fdata-sections, and linking with 1059 + --gc-sections. 1060 + 1061 + This can reduce on disk and in-memory size of the kernel 1062 + code and static data, particularly for small configs and 1063 + on small systems. This has the possibility of introducing 1064 + silently broken kernel if the required annotations are not 1065 + present. This option is not well tested yet, so use at your 1066 + own risk. 1067 + 1041 1068 config SYSCTL 1042 1069 bool 1043 1070
+1 -8
scripts/Makefile.build
··· 147 147 cmd_gensymtypes_c = \ 148 148 $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ 149 149 $(GENKSYMS) $(if $(1), -T $(2)) \ 150 - $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ 151 150 $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \ 152 151 $(if $(KBUILD_PRESERVE),-p) \ 153 152 -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) ··· 354 355 sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ) | \ 355 356 $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \ 356 357 $(GENKSYMS) $(if $(1), -T $(2)) \ 357 - $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ 358 358 $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \ 359 359 $(if $(KBUILD_PRESERVE),-p) \ 360 360 -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) ··· 485 487 486 488 dummy-object = $(obj)/.lib_exports.o 487 489 ksyms-lds = $(dot-target).lds 488 - ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX 489 - ref_prefix = EXTERN(_ 490 - else 491 - ref_prefix = EXTERN( 492 - endif 493 490 494 491 quiet_cmd_export_list = EXPORTS $@ 495 492 cmd_export_list = $(OBJDUMP) -h $< | \ 496 - sed -ne '/___ksymtab/s/.*+\([^ ]*\).*/$(ref_prefix)\1)/p' >$(ksyms-lds);\ 493 + sed -ne '/___ksymtab/s/.*+\([^ ]*\).*/EXTERN(\1)/p' >$(ksyms-lds);\ 497 494 rm -f $(dummy-object);\ 498 495 echo | $(CC) $(a_flags) -c -o $(dummy-object) -x assembler -;\ 499 496 $(LD) $(ld_flags) -r -o $@ -T $(ksyms-lds) $(dummy-object);\
-3
scripts/adjust_autoksyms.sh
··· 61 61 sed -n -e '3{s/ /\n/g;/^$/!p;}' "$mod" 62 62 done | sort -u | 63 63 while read sym; do 64 - if [ -n "$CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX" ]; then 65 - sym="${sym#_}" 66 - fi 67 64 echo "#define __KSYM_${sym} 1" 68 65 done >> "$new_ksyms_file" 69 66
+4 -2
scripts/basic/fixdep.c
··· 115 115 */ 116 116 static void print_dep(const char *m, int slen, const char *dir) 117 117 { 118 - int c, i; 118 + int c, prev_c = '/', i; 119 119 120 120 printf(" $(wildcard %s/", dir); 121 121 for (i = 0; i < slen; i++) { ··· 124 124 c = '/'; 125 125 else 126 126 c = tolower(c); 127 - putchar(c); 127 + if (c != '/' || prev_c != '/') 128 + putchar(c); 129 + prev_c = c; 128 130 } 129 131 printf(".h) \\\n"); 130 132 }
-10
scripts/checkpatch.pl
··· 5121 5121 } 5122 5122 } 5123 5123 5124 - # make sure symbols are always wrapped with VMLINUX_SYMBOL() ... 5125 - # all assignments may have only one of the following with an assignment: 5126 - # . 5127 - # ALIGN(...) 5128 - # VMLINUX_SYMBOL(...) 5129 - if ($realfile eq 'vmlinux.lds.h' && $line =~ /(?:(?:^|\s)$Ident\s*=|=\s*$Ident(?:\s|$))/) { 5130 - WARN("MISSING_VMLINUX_SYMBOL", 5131 - "vmlinux.lds.h needs VMLINUX_SYMBOL() around C-visible symbols\n" . $herecurr); 5132 - } 5133 - 5134 5124 # check for redundant bracing round if etc 5135 5125 if ($line =~ /(^.*)\bif\b/ && $1 !~ /else\s*$/) { 5136 5126 my ($level, $endln, @chunks) =
+4 -2
scripts/coccinelle/locks/mini_lock.cocci
··· 67 67 @@ 68 68 69 69 *lock(E1@p,...); 70 - <+... when != E1 70 + ... when != E1 71 + when any 71 72 if (...) { 72 73 ... when != E1 73 74 * return@r ...; 74 75 } 75 - ...+> 76 + ... when != E1 77 + when any 76 78 *unlock@up(E1,...); 77 79 78 80 @script:python depends on org@
+20 -20
scripts/coccinelle/null/deref_null.cocci
··· 14 14 virtual org 15 15 virtual report 16 16 17 - @ifm@ 18 - expression *E; 19 - statement S1,S2; 20 - position p1; 21 - @@ 22 - 23 - if@p1 ((E == NULL && ...) || ...) S1 else S2 24 - 25 17 // The following two rules are separate, because both can match a single 26 18 // expression in different ways 27 19 @pr1 expression@ 28 - expression *ifm.E; 20 + expression E; 29 21 identifier f; 30 22 position p1; 31 23 @@ ··· 25 33 (E != NULL && ...) ? <+...E->f@p1...+> : ... 26 34 27 35 @pr2 expression@ 28 - expression *ifm.E; 36 + expression E; 29 37 identifier f; 30 38 position p2; 31 39 @@ ··· 37 45 | 38 46 sizeof(<+...E->f@p2...+>) 39 47 ) 48 + 49 + @ifm@ 50 + expression *E; 51 + statement S1,S2; 52 + position p1; 53 + @@ 54 + 55 + if@p1 ((E == NULL && ...) || ...) S1 else S2 40 56 41 57 // For org and report modes 42 58 ··· 212 212 // The following three rules are duplicates of ifm, pr1 and pr2 respectively. 213 213 // It is need because the previous rule as already made a "change". 214 214 215 - @ifm1 depends on context && !org && !report@ 216 - expression *E; 217 - statement S1,S2; 218 - position p1; 219 - @@ 220 - 221 - if@p1 ((E == NULL && ...) || ...) S1 else S2 222 - 223 215 @pr11 depends on context && !org && !report expression@ 224 - expression *ifm1.E; 216 + expression E; 225 217 identifier f; 226 218 position p1; 227 219 @@ ··· 221 229 (E != NULL && ...) ? <+...E->f@p1...+> : ... 222 230 223 231 @pr12 depends on context && !org && !report expression@ 224 - expression *ifm1.E; 232 + expression E; 225 233 identifier f; 226 234 position p2; 227 235 @@ ··· 233 241 | 234 242 sizeof(<+...E->f@p2...+>) 235 243 ) 244 + 245 + @ifm1 depends on context && !org && !report@ 246 + expression *E; 247 + statement S1,S2; 248 + position p1; 249 + @@ 250 + 251 + if@p1 ((E == NULL && ...) || ...) S1 else S2 236 252 237 253 @depends on context && !org && !report exists@ 238 254 expression subE <= ifm1.E;
+3 -22
scripts/depmod.sh
··· 3 3 # 4 4 # A depmod wrapper used by the toplevel Makefile 5 5 6 - if test $# -ne 3; then 7 - echo "Usage: $0 /sbin/depmod <kernelrelease> <symbolprefix>" >&2 6 + if test $# -ne 2; then 7 + echo "Usage: $0 /sbin/depmod <kernelrelease>" >&2 8 8 exit 1 9 9 fi 10 10 DEPMOD=$1 11 11 KERNELRELEASE=$2 12 - SYMBOL_PREFIX=$3 13 12 14 13 if ! test -r System.map -a -x "$DEPMOD"; then 15 14 exit 0 16 - fi 17 - 18 - # older versions of depmod don't support -P <symbol-prefix> 19 - # support was added in module-init-tools 3.13 20 - if test -n "$SYMBOL_PREFIX"; then 21 - release=$("$DEPMOD" --version) 22 - package=$(echo "$release" | cut -d' ' -f 1) 23 - if test "$package" = "module-init-tools"; then 24 - version=$(echo "$release" | cut -d' ' -f 2) 25 - later=$(printf '%s\n' "$version" "3.13" | sort -V | tail -n 1) 26 - if test "$later" != "$version"; then 27 - # module-init-tools < 3.13, drop the symbol prefix 28 - SYMBOL_PREFIX="" 29 - fi 30 - fi 31 - if test -n "$SYMBOL_PREFIX"; then 32 - SYMBOL_PREFIX="-P $SYMBOL_PREFIX" 33 - fi 34 15 fi 35 16 36 17 # older versions of depmod require the version string to start with three ··· 36 55 if test -n "$INSTALL_MOD_PATH"; then 37 56 set -- "$@" -b "$INSTALL_MOD_PATH" 38 57 fi 39 - "$DEPMOD" "$@" "$KERNELRELEASE" $SYMBOL_PREFIX 58 + "$DEPMOD" "$@" "$KERNELRELEASE" 40 59 ret=$? 41 60 42 61 if $depmod_hack_needed; then
+3 -8
scripts/genksyms/genksyms.c
··· 45 45 46 46 static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types, 47 47 flag_preserve, flag_warnings, flag_rel_crcs; 48 - static const char *mod_prefix = ""; 49 48 50 49 static int errors; 51 50 static int nsyms; ··· 692 693 fputs(">\n", debugfile); 693 694 694 695 /* Used as a linker script. */ 695 - printf(!flag_rel_crcs ? "%s__crc_%s = 0x%08lx;\n" : 696 + printf(!flag_rel_crcs ? "__crc_%s = 0x%08lx;\n" : 696 697 "SECTIONS { .rodata : ALIGN(4) { " 697 - "%s__crc_%s = .; LONG(0x%08lx); } }\n", 698 - mod_prefix, name, crc); 698 + "__crc_%s = .; LONG(0x%08lx); } }\n", 699 + name, crc); 699 700 } 700 701 } 701 702 ··· 768 769 769 770 #ifdef __GNU_LIBRARY__ 770 771 struct option long_opts[] = { 771 - {"symbol-prefix", 1, 0, 's'}, 772 772 {"debug", 0, 0, 'd'}, 773 773 {"warnings", 0, 0, 'w'}, 774 774 {"quiet", 0, 0, 'q'}, ··· 787 789 while ((o = getopt(argc, argv, "s:dwqVDr:T:phR")) != EOF) 788 790 #endif /* __GNU_LIBRARY__ */ 789 791 switch (o) { 790 - case 's': 791 - mod_prefix = optarg; 792 - break; 793 792 case 'd': 794 793 flag_debug++; 795 794 break;
+12 -37
scripts/kallsyms.c
··· 62 62 static unsigned int table_size, table_cnt; 63 63 static int all_symbols = 0; 64 64 static int absolute_percpu = 0; 65 - static char symbol_prefix_char = '\0'; 66 65 static int base_relative = 0; 67 66 68 67 int token_profit[0x10000]; ··· 74 75 static void usage(void) 75 76 { 76 77 fprintf(stderr, "Usage: kallsyms [--all-symbols] " 77 - "[--symbol-prefix=<prefix char>] " 78 78 "[--base-relative] < in.map > out.S\n"); 79 79 exit(1); 80 80 } ··· 111 113 112 114 static int read_symbol(FILE *in, struct sym_entry *s) 113 115 { 114 - char str[500]; 115 - char *sym, stype; 116 + char sym[500], stype; 116 117 int rc; 117 118 118 - rc = fscanf(in, "%llx %c %499s\n", &s->addr, &stype, str); 119 + rc = fscanf(in, "%llx %c %499s\n", &s->addr, &stype, sym); 119 120 if (rc != 3) { 120 - if (rc != EOF && fgets(str, 500, in) == NULL) 121 + if (rc != EOF && fgets(sym, 500, in) == NULL) 121 122 fprintf(stderr, "Read error or end of file.\n"); 122 123 return -1; 123 124 } 124 - if (strlen(str) > KSYM_NAME_LEN) { 125 + if (strlen(sym) > KSYM_NAME_LEN) { 125 126 fprintf(stderr, "Symbol %s too long for kallsyms (%zu vs %d).\n" 126 127 "Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n", 127 - str, strlen(str), KSYM_NAME_LEN); 128 + sym, strlen(sym), KSYM_NAME_LEN); 128 129 return -1; 129 130 } 130 - 131 - sym = str; 132 - /* skip prefix char */ 133 - if (symbol_prefix_char && str[0] == symbol_prefix_char) 134 - sym++; 135 131 136 132 /* Ignore most absolute/undefined (?) symbols. */ 137 133 if (strcmp(sym, "_text") == 0) ··· 147 155 is_arm_mapping_symbol(sym)) 148 156 return -1; 149 157 /* exclude also MIPS ELF local symbols ($L123 instead of .L123) */ 150 - else if (str[0] == '$') 158 + else if (sym[0] == '$') 151 159 return -1; 152 160 /* exclude debugging symbols */ 153 161 else if (stype == 'N' || stype == 'n') ··· 155 163 156 164 /* include the type field in the symbol name, so that it gets 157 165 * compressed together */ 158 - s->len = strlen(str) + 1; 166 + s->len = strlen(sym) + 1; 159 167 s->sym = malloc(s->len + 1); 160 168 if (!s->sym) { 161 169 fprintf(stderr, "kallsyms failure: " 162 170 "unable to allocate required amount of memory\n"); 163 171 exit(EXIT_FAILURE); 164 172 } 165 - strcpy((char *)s->sym + 1, str); 173 + strcpy((char *)s->sym + 1, sym); 166 174 s->sym[0] = stype; 167 175 168 176 s->percpu_absolute = 0; ··· 224 232 225 233 int i; 226 234 char *sym_name = (char *)s->sym + 1; 227 - 228 - /* skip prefix char */ 229 - if (symbol_prefix_char && *sym_name == symbol_prefix_char) 230 - sym_name++; 231 - 232 235 233 236 /* if --all-symbols is not specified, then symbols outside the text 234 237 * and inittext sections are discarded */ ··· 289 302 290 303 static void output_label(char *label) 291 304 { 292 - if (symbol_prefix_char) 293 - printf(".globl %c%s\n", symbol_prefix_char, label); 294 - else 295 - printf(".globl %s\n", label); 305 + printf(".globl %s\n", label); 296 306 printf("\tALGN\n"); 297 - if (symbol_prefix_char) 298 - printf("%c%s:\n", symbol_prefix_char, label); 299 - else 300 - printf("%s:\n", label); 307 + printf("%s:\n", label); 301 308 } 302 309 303 310 /* uncompress a compressed symbol. When this function is called, the best table ··· 405 424 } 406 425 407 426 output_label("kallsyms_num_syms"); 408 - printf("\tPTR\t%d\n", table_cnt); 427 + printf("\tPTR\t%u\n", table_cnt); 409 428 printf("\n"); 410 429 411 430 /* table of offset markers, that give the offset in the compressed stream ··· 749 768 all_symbols = 1; 750 769 else if (strcmp(argv[i], "--absolute-percpu") == 0) 751 770 absolute_percpu = 1; 752 - else if (strncmp(argv[i], "--symbol-prefix=", 16) == 0) { 753 - char *p = &argv[i][16]; 754 - /* skip quote */ 755 - if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\'')) 756 - p++; 757 - symbol_prefix_char = *p; 758 - } else if (strcmp(argv[i], "--base-relative") == 0) 771 + else if (strcmp(argv[i], "--base-relative") == 0) 759 772 base_relative = 1; 760 773 else 761 774 usage();
+41 -56
scripts/mod/modpost.c
··· 19 19 #include <stdbool.h> 20 20 #include <errno.h> 21 21 #include "modpost.h" 22 - #include "../../include/generated/autoconf.h" 23 22 #include "../../include/linux/license.h" 24 - #include "../../include/linux/export.h" 25 23 26 24 /* Are we using CONFIG_MODVERSIONS? */ 27 25 static int modversions = 0; ··· 121 123 /* A list of all modules we processed */ 122 124 static struct module *modules; 123 125 124 - static struct module *find_module(char *modname) 126 + static struct module *find_module(const char *modname) 125 127 { 126 128 struct module *mod; 127 129 ··· 589 591 static int ignore_undef_symbol(struct elf_info *info, const char *symname) 590 592 { 591 593 /* ignore __this_module, it will be resolved shortly */ 592 - if (strcmp(symname, VMLINUX_SYMBOL_STR(__this_module)) == 0) 594 + if (strcmp(symname, "__this_module") == 0) 593 595 return 1; 594 596 /* ignore global offset table */ 595 597 if (strcmp(symname, "_GLOBAL_OFFSET_TABLE_") == 0) 596 598 return 1; 597 599 if (info->hdr->e_machine == EM_PPC) 598 600 /* Special register function linked on all modules during final link of .ko */ 599 - if (strncmp(symname, "_restgpr_", sizeof("_restgpr_") - 1) == 0 || 600 - strncmp(symname, "_savegpr_", sizeof("_savegpr_") - 1) == 0 || 601 - strncmp(symname, "_rest32gpr_", sizeof("_rest32gpr_") - 1) == 0 || 602 - strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0 || 603 - strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 || 604 - strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0) 601 + if (strstarts(symname, "_restgpr_") || 602 + strstarts(symname, "_savegpr_") || 603 + strstarts(symname, "_rest32gpr_") || 604 + strstarts(symname, "_save32gpr_") || 605 + strstarts(symname, "_restvr_") || 606 + strstarts(symname, "_savevr_")) 605 607 return 1; 606 608 if (info->hdr->e_machine == EM_PPC64) 607 609 /* Special register function linked on all modules during final link of .ko */ 608 - if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 || 609 - strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 || 610 - strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 || 611 - strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0 || 610 + if (strstarts(symname, "_restgpr0_") || 611 + strstarts(symname, "_savegpr0_") || 612 + strstarts(symname, "_restvr_") || 613 + strstarts(symname, "_savevr_") || 612 614 strcmp(symname, ".TOC.") == 0) 613 615 return 1; 614 616 /* Do not ignore this symbol */ 615 617 return 0; 616 618 } 617 - 618 - #define CRC_PFX VMLINUX_SYMBOL_STR(__crc_) 619 - #define KSYMTAB_PFX VMLINUX_SYMBOL_STR(__ksymtab_) 620 619 621 620 static void handle_modversions(struct module *mod, struct elf_info *info, 622 621 Elf_Sym *sym, const char *symname) ··· 623 628 bool is_crc = false; 624 629 625 630 if ((!is_vmlinux(mod->name) || mod->is_dot_o) && 626 - strncmp(symname, "__ksymtab", 9) == 0) 631 + strstarts(symname, "__ksymtab")) 627 632 export = export_from_secname(info, get_secindex(info, sym)); 628 633 else 629 634 export = export_from_sec(info, get_secindex(info, sym)); 630 635 631 636 /* CRC'd symbol */ 632 - if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) { 637 + if (strstarts(symname, "__crc_")) { 633 638 is_crc = true; 634 639 crc = (unsigned int) sym->st_value; 635 640 if (sym->st_shndx != SHN_UNDEF && sym->st_shndx != SHN_ABS) { ··· 642 647 info->sechdrs[sym->st_shndx].sh_addr : 0); 643 648 crc = *crcp; 644 649 } 645 - sym_update_crc(symname + strlen(CRC_PFX), mod, crc, 650 + sym_update_crc(symname + strlen("__crc_"), mod, crc, 646 651 export); 647 652 } 648 653 649 654 switch (sym->st_shndx) { 650 655 case SHN_COMMON: 651 - if (!strncmp(symname, "__gnu_lto_", sizeof("__gnu_lto_")-1)) { 656 + if (strstarts(symname, "__gnu_lto_")) { 652 657 /* Should warn here, but modpost runs before the linker */ 653 658 } else 654 659 warn("\"%s\" [%s] is COMMON symbol\n", symname, mod->name); ··· 680 685 } 681 686 #endif 682 687 683 - #ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX 684 - if (symname[0] != '_') 685 - break; 686 - else 687 - symname++; 688 - #endif 689 688 if (is_crc) { 690 689 const char *e = is_vmlinux(mod->name) ?"":".ko"; 691 - warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n", symname + strlen(CRC_PFX), mod->name, e); 690 + warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n", 691 + symname + strlen("__crc_"), mod->name, e); 692 692 } 693 693 mod->unres = alloc_symbol(symname, 694 694 ELF_ST_BIND(sym->st_info) == STB_WEAK, ··· 691 701 break; 692 702 default: 693 703 /* All exported symbols */ 694 - if (strncmp(symname, KSYMTAB_PFX, strlen(KSYMTAB_PFX)) == 0) { 695 - sym_add_exported(symname + strlen(KSYMTAB_PFX), mod, 704 + if (strstarts(symname, "__ksymtab_")) { 705 + sym_add_exported(symname + strlen("__ksymtab_"), mod, 696 706 export); 697 707 } 698 - if (strcmp(symname, VMLINUX_SYMBOL_STR(init_module)) == 0) 708 + if (strcmp(symname, "init_module") == 0) 699 709 mod->has_init = 1; 700 - if (strcmp(symname, VMLINUX_SYMBOL_STR(cleanup_module)) == 0) 710 + if (strcmp(symname, "cleanup_module") == 0) 701 711 mod->has_cleanup = 1; 702 712 break; 703 713 } ··· 724 734 return string; 725 735 } 726 736 727 - static char *get_next_modinfo(void *modinfo, unsigned long modinfo_len, 728 - const char *tag, char *info) 737 + static char *get_next_modinfo(struct elf_info *info, const char *tag, 738 + char *prev) 729 739 { 730 740 char *p; 731 741 unsigned int taglen = strlen(tag); 732 - unsigned long size = modinfo_len; 742 + char *modinfo = info->modinfo; 743 + unsigned long size = info->modinfo_len; 733 744 734 - if (info) { 735 - size -= info - (char *)modinfo; 736 - modinfo = next_string(info, &size); 745 + if (prev) { 746 + size -= prev - modinfo; 747 + modinfo = next_string(prev, &size); 737 748 } 738 749 739 750 for (p = modinfo; p; p = next_string(p, &size)) { ··· 744 753 return NULL; 745 754 } 746 755 747 - static char *get_modinfo(void *modinfo, unsigned long modinfo_len, 748 - const char *tag) 756 + static char *get_modinfo(struct elf_info *info, const char *tag) 749 757 750 758 { 751 - return get_next_modinfo(modinfo, modinfo_len, tag, NULL); 759 + return get_next_modinfo(info, tag, NULL); 752 760 } 753 761 754 762 /** ··· 1171 1181 /* Check for pattern 1 */ 1172 1182 if (match(tosec, init_data_sections) && 1173 1183 match(fromsec, data_sections) && 1174 - (strncmp(fromsym, "__param", strlen("__param")) == 0)) 1184 + strstarts(fromsym, "__param")) 1175 1185 return 0; 1176 1186 1177 1187 /* Check for pattern 1a */ 1178 1188 if (strcmp(tosec, ".init.text") == 0 && 1179 1189 match(fromsec, data_sections) && 1180 - (strncmp(fromsym, "__param_ops_", strlen("__param_ops_")) == 0)) 1190 + strstarts(fromsym, "__param_ops_")) 1181 1191 return 0; 1182 1192 1183 1193 /* Check for pattern 2 */ ··· 1532 1542 from = find_elf_symbol2(elf, r->r_offset, fromsec); 1533 1543 fromsym = sym_name(elf, from); 1534 1544 1535 - if (!strncmp(fromsym, "reference___initcall", 1536 - sizeof("reference___initcall")-1)) 1545 + if (strstarts(fromsym, "reference___initcall")) 1537 1546 return; 1538 1547 1539 1548 tosec = sec_name(elf, get_secindex(elf, sym)); ··· 1929 1940 return s; 1930 1941 } 1931 1942 1932 - static void read_symbols(char *modname) 1943 + static void read_symbols(const char *modname) 1933 1944 { 1934 1945 const char *symname; 1935 1946 char *version; ··· 1950 1961 mod->skip = 1; 1951 1962 } 1952 1963 1953 - license = get_modinfo(info.modinfo, info.modinfo_len, "license"); 1964 + license = get_modinfo(&info, "license"); 1954 1965 if (!license && !is_vmlinux(modname)) 1955 1966 warn("modpost: missing MODULE_LICENSE() in %s\n" 1956 1967 "see include/linux/module.h for " ··· 1962 1973 mod->gpl_compatible = 0; 1963 1974 break; 1964 1975 } 1965 - license = get_next_modinfo(info.modinfo, info.modinfo_len, 1966 - "license", license); 1976 + license = get_next_modinfo(&info, "license", license); 1967 1977 } 1968 1978 1969 1979 for (sym = info.symtab_start; sym < info.symtab_stop; sym++) { ··· 1971 1983 handle_modversions(mod, &info, sym, symname); 1972 1984 handle_moddevtable(mod, &info, sym, symname); 1973 1985 } 1974 - if (!is_vmlinux(modname) || 1975 - (is_vmlinux(modname) && vmlinux_section_warnings)) 1986 + if (!is_vmlinux(modname) || vmlinux_section_warnings) 1976 1987 check_sec_ref(mod, modname, &info); 1977 1988 1978 - version = get_modinfo(info.modinfo, info.modinfo_len, "version"); 1989 + version = get_modinfo(&info, "version"); 1979 1990 if (version) 1980 1991 maybe_frob_rcs_version(modname, version, info.modinfo, 1981 1992 version - (char *)info.hdr); ··· 2161 2174 2162 2175 static void add_staging_flag(struct buffer *b, const char *name) 2163 2176 { 2164 - static const char *staging_dir = "drivers/staging"; 2165 - 2166 - if (strncmp(staging_dir, name, strlen(staging_dir)) == 0) 2177 + if (strstarts(name, "drivers/staging")) 2167 2178 buf_printf(b, "\nMODULE_INFO(staging, \"Y\");\n"); 2168 2179 } 2169 2180 ··· 2215 2230 err = 1; 2216 2231 break; 2217 2232 } 2218 - buf_printf(b, "\t{ %#8x, __VMLINUX_SYMBOL_STR(%s) },\n", 2233 + buf_printf(b, "\t{ %#8x, \"%s\" },\n", 2219 2234 s->crc, s->name); 2220 2235 } 2221 2236
+13 -14
scripts/package/mkdebian
··· 71 71 packagename=user-mode-linux-$version 72 72 fi 73 73 74 - # Try to determine maintainer and email values 75 - if [ -n "$DEBEMAIL" ]; then 76 - email=$DEBEMAIL 77 - elif [ -n "$EMAIL" ]; then 78 - email=$EMAIL 74 + email=${DEBEMAIL-$EMAIL} 75 + 76 + # use email string directly if it contains <email> 77 + if echo $email | grep -q '<.*>'; then 78 + maintainer=$email 79 79 else 80 - email=$(id -nu)@$(hostname -f 2>/dev/null || hostname) 80 + # or construct the maintainer string 81 + user=${KBUILD_BUILD_USER-$(id -nu)} 82 + name=${DEBFULLNAME-$user} 83 + if [ -z "$email" ]; then 84 + buildhost=${KBUILD_BUILD_HOST-$(hostname -f 2>/dev/null || hostname)} 85 + email="$user@$buildhost" 86 + fi 87 + maintainer="$name <$email>" 81 88 fi 82 - if [ -n "$DEBFULLNAME" ]; then 83 - name=$DEBFULLNAME 84 - elif [ -n "$NAME" ]; then 85 - name=$NAME 86 - else 87 - name="Anonymous" 88 - fi 89 - maintainer="$name <$email>" 90 89 91 90 # Try to determine distribution 92 91 if [ -n "$KDEB_CHANGELOG_DIST" ]; then
+1 -1
scripts/recordmcount.c
··· 500 500 gpfx = 0; 501 501 switch (w2(ehdr->e_machine)) { 502 502 default: 503 - fprintf(stderr, "unrecognized e_machine %d %s\n", 503 + fprintf(stderr, "unrecognized e_machine %u %s\n", 504 504 w2(ehdr->e_machine), fname); 505 505 fail_file(); 506 506 break;
+1 -1
scripts/recordmcount.h
··· 441 441 return symp - sym0; 442 442 } 443 443 } 444 - fprintf(stderr, "Cannot find symbol for section %d: %s.\n", 444 + fprintf(stderr, "Cannot find symbol for section %u: %s.\n", 445 445 txtndx, txtname); 446 446 fail_file(); 447 447 }
+1 -10
scripts/tags.sh
··· 28 28 # ignore userspace tools 29 29 ignore="$ignore ( -path ${tree}tools ) -prune -o" 30 30 31 - # Find all available archs 32 - find_all_archs() 33 - { 34 - ALLSOURCE_ARCHS="" 35 - for arch in `ls ${tree}arch`; do 36 - ALLSOURCE_ARCHS="${ALLSOURCE_ARCHS} "${arch##\/} 37 - done 38 - } 39 - 40 31 # Detect if ALLSOURCE_ARCHS is set. If not, we assume SRCARCH 41 32 if [ "${ALLSOURCE_ARCHS}" = "" ]; then 42 33 ALLSOURCE_ARCHS=${SRCARCH} 43 34 elif [ "${ALLSOURCE_ARCHS}" = "all" ]; then 44 - find_all_archs 35 + ALLSOURCE_ARCHS=$(find ${tree}arch/ -mindepth 1 -maxdepth 1 -type d -printf '%f ') 45 36 fi 46 37 47 38 # find sources in arch/$ARCH