Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Helper macros to support writing architecture specific
3 * linker scripts.
4 *
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
7 *
8 * OUTPUT_FORMAT(...)
9 * OUTPUT_ARCH(...)
10 * ENTRY(...)
11 * SECTIONS
12 * {
13 * . = START;
14 * __init_begin = .;
15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU_SECTION(CACHELINE_SIZE)
19 * __init_end = .;
20 *
21 * _stext = .;
22 * TEXT_SECTION = 0
23 * _etext = .;
24 *
25 * _sdata = .;
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
28 * _edata = .;
29 *
30 * EXCEPTION_TABLE(...)
31 * NOTES
32 *
33 * BSS_SECTION(0, 0, 0)
34 * _end = .;
35 *
36 * STABS_DEBUG
37 * DWARF_DEBUG
38 *
39 * DISCARDS // must be the last
40 * }
41 *
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * // __init_begin and __init_end should be page aligned, so that we can
44 * // free the whole .init memory
45 * [_stext, _etext] is the text section
46 * [_sdata, _edata] is the data section
47 *
48 * Some of the included output section have their own set of constants.
49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
50 * [__nosave_begin, __nosave_end] for the nosave data
51 */
52
53#ifndef LOAD_OFFSET
54#define LOAD_OFFSET 0
55#endif
56
57#include <linux/export.h>
58
59/* Align . to a 8 byte boundary equals to maximum function alignment. */
60#define ALIGN_FUNCTION() . = ALIGN(8)
61
62/*
63 * Align to a 32 byte boundary equal to the
64 * alignment gcc 4.5 uses for a struct
65 */
66#define STRUCT_ALIGNMENT 32
67#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
68
69/* The actual configuration determine if the init/exit sections
70 * are handled as text/data or they can be discarded (which
71 * often happens at runtime)
72 */
73#ifdef CONFIG_HOTPLUG_CPU
74#define CPU_KEEP(sec) *(.cpu##sec)
75#define CPU_DISCARD(sec)
76#else
77#define CPU_KEEP(sec)
78#define CPU_DISCARD(sec) *(.cpu##sec)
79#endif
80
81#if defined(CONFIG_MEMORY_HOTPLUG)
82#define MEM_KEEP(sec) *(.mem##sec)
83#define MEM_DISCARD(sec)
84#else
85#define MEM_KEEP(sec)
86#define MEM_DISCARD(sec) *(.mem##sec)
87#endif
88
89#ifdef CONFIG_FTRACE_MCOUNT_RECORD
90#define MCOUNT_REC() . = ALIGN(8); \
91 VMLINUX_SYMBOL(__start_mcount_loc) = .; \
92 *(__mcount_loc) \
93 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
94#else
95#define MCOUNT_REC()
96#endif
97
98#ifdef CONFIG_TRACE_BRANCH_PROFILING
99#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
100 *(_ftrace_annotated_branch) \
101 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
102#else
103#define LIKELY_PROFILE()
104#endif
105
106#ifdef CONFIG_PROFILE_ALL_BRANCHES
107#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
108 *(_ftrace_branch) \
109 VMLINUX_SYMBOL(__stop_branch_profile) = .;
110#else
111#define BRANCH_PROFILE()
112#endif
113
114#ifdef CONFIG_KPROBES
115#define KPROBE_BLACKLIST() . = ALIGN(8); \
116 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
117 *(_kprobe_blacklist) \
118 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
119#else
120#define KPROBE_BLACKLIST()
121#endif
122
123#ifdef CONFIG_EVENT_TRACING
124#define FTRACE_EVENTS() . = ALIGN(8); \
125 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
126 *(_ftrace_events) \
127 VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
128 VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
129 *(_ftrace_enum_map) \
130 VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
131#else
132#define FTRACE_EVENTS()
133#endif
134
135#ifdef CONFIG_TRACING
136#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
137 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
138 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
139#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
140 *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
141 VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
142#else
143#define TRACE_PRINTKS()
144#define TRACEPOINT_STR()
145#endif
146
147#ifdef CONFIG_FTRACE_SYSCALLS
148#define TRACE_SYSCALLS() . = ALIGN(8); \
149 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
150 *(__syscalls_metadata) \
151 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
152#else
153#define TRACE_SYSCALLS()
154#endif
155
156#ifdef CONFIG_SERIAL_EARLYCON
157#define EARLYCON_TABLE() STRUCT_ALIGN(); \
158 VMLINUX_SYMBOL(__earlycon_table) = .; \
159 *(__earlycon_table) \
160 *(__earlycon_table_end)
161#else
162#define EARLYCON_TABLE()
163#endif
164
165#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
166#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
167#define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name)
168#define _OF_TABLE_0(name)
169#define _OF_TABLE_1(name) \
170 . = ALIGN(8); \
171 VMLINUX_SYMBOL(__##name##_of_table) = .; \
172 *(__##name##_of_table) \
173 *(__##name##_of_table_end)
174
175#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
176#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
177#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
178#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
179#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
180#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
181#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
182#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
183
184#define KERNEL_DTB() \
185 STRUCT_ALIGN(); \
186 VMLINUX_SYMBOL(__dtb_start) = .; \
187 *(.dtb.init.rodata) \
188 VMLINUX_SYMBOL(__dtb_end) = .;
189
190/* .data section */
191#define DATA_DATA \
192 *(.data) \
193 *(.ref.data) \
194 *(.data..shared_aligned) /* percpu related */ \
195 MEM_KEEP(init.data) \
196 MEM_KEEP(exit.data) \
197 *(.data.unlikely) \
198 STRUCT_ALIGN(); \
199 *(__tracepoints) \
200 /* implement dynamic printk debug */ \
201 . = ALIGN(8); \
202 VMLINUX_SYMBOL(__start___jump_table) = .; \
203 *(__jump_table) \
204 VMLINUX_SYMBOL(__stop___jump_table) = .; \
205 . = ALIGN(8); \
206 VMLINUX_SYMBOL(__start___verbose) = .; \
207 *(__verbose) \
208 VMLINUX_SYMBOL(__stop___verbose) = .; \
209 LIKELY_PROFILE() \
210 BRANCH_PROFILE() \
211 TRACE_PRINTKS() \
212 TRACEPOINT_STR()
213
214/*
215 * Data section helpers
216 */
217#define NOSAVE_DATA \
218 . = ALIGN(PAGE_SIZE); \
219 VMLINUX_SYMBOL(__nosave_begin) = .; \
220 *(.data..nosave) \
221 . = ALIGN(PAGE_SIZE); \
222 VMLINUX_SYMBOL(__nosave_end) = .;
223
224#define PAGE_ALIGNED_DATA(page_align) \
225 . = ALIGN(page_align); \
226 *(.data..page_aligned)
227
228#define READ_MOSTLY_DATA(align) \
229 . = ALIGN(align); \
230 *(.data..read_mostly) \
231 . = ALIGN(align);
232
233#define CACHELINE_ALIGNED_DATA(align) \
234 . = ALIGN(align); \
235 *(.data..cacheline_aligned)
236
237#define INIT_TASK_DATA(align) \
238 . = ALIGN(align); \
239 *(.data..init_task)
240
241/*
242 * Read only Data
243 */
244#define RO_DATA_SECTION(align) \
245 . = ALIGN((align)); \
246 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
247 VMLINUX_SYMBOL(__start_rodata) = .; \
248 *(.rodata) *(.rodata.*) \
249 *(__vermagic) /* Kernel version magic */ \
250 . = ALIGN(8); \
251 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
252 *(__tracepoints_ptrs) /* Tracepoints: pointer array */\
253 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
254 *(__tracepoints_strings)/* Tracepoints: strings */ \
255 } \
256 \
257 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
258 *(.rodata1) \
259 } \
260 \
261 BUG_TABLE \
262 \
263 /* PCI quirks */ \
264 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
265 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
266 *(.pci_fixup_early) \
267 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
268 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
269 *(.pci_fixup_header) \
270 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
271 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
272 *(.pci_fixup_final) \
273 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
274 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
275 *(.pci_fixup_enable) \
276 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
277 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
278 *(.pci_fixup_resume) \
279 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
280 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
281 *(.pci_fixup_resume_early) \
282 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
283 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
284 *(.pci_fixup_suspend) \
285 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
286 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
287 *(.pci_fixup_suspend_late) \
288 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
289 } \
290 \
291 /* Built-in firmware blobs */ \
292 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
293 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
294 *(.builtin_fw) \
295 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
296 } \
297 \
298 TRACEDATA \
299 \
300 /* Kernel symbol table: Normal symbols */ \
301 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
302 VMLINUX_SYMBOL(__start___ksymtab) = .; \
303 *(SORT(___ksymtab+*)) \
304 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
305 } \
306 \
307 /* Kernel symbol table: GPL-only symbols */ \
308 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
309 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
310 *(SORT(___ksymtab_gpl+*)) \
311 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
312 } \
313 \
314 /* Kernel symbol table: Normal unused symbols */ \
315 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
316 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
317 *(SORT(___ksymtab_unused+*)) \
318 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
319 } \
320 \
321 /* Kernel symbol table: GPL-only unused symbols */ \
322 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
323 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
324 *(SORT(___ksymtab_unused_gpl+*)) \
325 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
326 } \
327 \
328 /* Kernel symbol table: GPL-future-only symbols */ \
329 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
330 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
331 *(SORT(___ksymtab_gpl_future+*)) \
332 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
333 } \
334 \
335 /* Kernel symbol table: Normal symbols */ \
336 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
337 VMLINUX_SYMBOL(__start___kcrctab) = .; \
338 *(SORT(___kcrctab+*)) \
339 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
340 } \
341 \
342 /* Kernel symbol table: GPL-only symbols */ \
343 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
344 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
345 *(SORT(___kcrctab_gpl+*)) \
346 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
347 } \
348 \
349 /* Kernel symbol table: Normal unused symbols */ \
350 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
351 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
352 *(SORT(___kcrctab_unused+*)) \
353 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
354 } \
355 \
356 /* Kernel symbol table: GPL-only unused symbols */ \
357 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
358 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
359 *(SORT(___kcrctab_unused_gpl+*)) \
360 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
361 } \
362 \
363 /* Kernel symbol table: GPL-future-only symbols */ \
364 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
365 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
366 *(SORT(___kcrctab_gpl_future+*)) \
367 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
368 } \
369 \
370 /* Kernel symbol table: strings */ \
371 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
372 *(__ksymtab_strings) \
373 } \
374 \
375 /* __*init sections */ \
376 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
377 *(.ref.rodata) \
378 MEM_KEEP(init.rodata) \
379 MEM_KEEP(exit.rodata) \
380 } \
381 \
382 /* Built-in module parameters. */ \
383 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
384 VMLINUX_SYMBOL(__start___param) = .; \
385 *(__param) \
386 VMLINUX_SYMBOL(__stop___param) = .; \
387 } \
388 \
389 /* Built-in module versions. */ \
390 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
391 VMLINUX_SYMBOL(__start___modver) = .; \
392 *(__modver) \
393 VMLINUX_SYMBOL(__stop___modver) = .; \
394 . = ALIGN((align)); \
395 VMLINUX_SYMBOL(__end_rodata) = .; \
396 } \
397 . = ALIGN((align));
398
399/* RODATA & RO_DATA provided for backward compatibility.
400 * All archs are supposed to use RO_DATA() */
401#define RODATA RO_DATA_SECTION(4096)
402#define RO_DATA(align) RO_DATA_SECTION(align)
403
404#define SECURITY_INIT \
405 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
406 VMLINUX_SYMBOL(__security_initcall_start) = .; \
407 *(.security_initcall.init) \
408 VMLINUX_SYMBOL(__security_initcall_end) = .; \
409 }
410
411/* .text section. Map to function alignment to avoid address changes
412 * during second ld run in second ld pass when generating System.map */
413#define TEXT_TEXT \
414 ALIGN_FUNCTION(); \
415 *(.text.hot .text .text.fixup .text.unlikely) \
416 *(.ref.text) \
417 MEM_KEEP(init.text) \
418 MEM_KEEP(exit.text) \
419
420
421/* sched.text is aling to function alignment to secure we have same
422 * address even at second ld pass when generating System.map */
423#define SCHED_TEXT \
424 ALIGN_FUNCTION(); \
425 VMLINUX_SYMBOL(__sched_text_start) = .; \
426 *(.sched.text) \
427 VMLINUX_SYMBOL(__sched_text_end) = .;
428
429/* spinlock.text is aling to function alignment to secure we have same
430 * address even at second ld pass when generating System.map */
431#define LOCK_TEXT \
432 ALIGN_FUNCTION(); \
433 VMLINUX_SYMBOL(__lock_text_start) = .; \
434 *(.spinlock.text) \
435 VMLINUX_SYMBOL(__lock_text_end) = .;
436
437#define KPROBES_TEXT \
438 ALIGN_FUNCTION(); \
439 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
440 *(.kprobes.text) \
441 VMLINUX_SYMBOL(__kprobes_text_end) = .;
442
443#define ENTRY_TEXT \
444 ALIGN_FUNCTION(); \
445 VMLINUX_SYMBOL(__entry_text_start) = .; \
446 *(.entry.text) \
447 VMLINUX_SYMBOL(__entry_text_end) = .;
448
449#ifdef CONFIG_FUNCTION_GRAPH_TRACER
450#define IRQENTRY_TEXT \
451 ALIGN_FUNCTION(); \
452 VMLINUX_SYMBOL(__irqentry_text_start) = .; \
453 *(.irqentry.text) \
454 VMLINUX_SYMBOL(__irqentry_text_end) = .;
455#else
456#define IRQENTRY_TEXT
457#endif
458
459/* Section used for early init (in .S files) */
460#define HEAD_TEXT *(.head.text)
461
462#define HEAD_TEXT_SECTION \
463 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
464 HEAD_TEXT \
465 }
466
467/*
468 * Exception table
469 */
470#define EXCEPTION_TABLE(align) \
471 . = ALIGN(align); \
472 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
473 VMLINUX_SYMBOL(__start___ex_table) = .; \
474 *(__ex_table) \
475 VMLINUX_SYMBOL(__stop___ex_table) = .; \
476 }
477
478/*
479 * Init task
480 */
481#define INIT_TASK_DATA_SECTION(align) \
482 . = ALIGN(align); \
483 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
484 INIT_TASK_DATA(align) \
485 }
486
487#ifdef CONFIG_CONSTRUCTORS
488#define KERNEL_CTORS() . = ALIGN(8); \
489 VMLINUX_SYMBOL(__ctors_start) = .; \
490 *(.ctors) \
491 *(SORT(.init_array.*)) \
492 *(.init_array) \
493 VMLINUX_SYMBOL(__ctors_end) = .;
494#else
495#define KERNEL_CTORS()
496#endif
497
498/* init and exit section handling */
499#define INIT_DATA \
500 *(.init.data) \
501 MEM_DISCARD(init.data) \
502 KERNEL_CTORS() \
503 MCOUNT_REC() \
504 *(.init.rodata) \
505 FTRACE_EVENTS() \
506 TRACE_SYSCALLS() \
507 KPROBE_BLACKLIST() \
508 MEM_DISCARD(init.rodata) \
509 CLK_OF_TABLES() \
510 RESERVEDMEM_OF_TABLES() \
511 CLKSRC_OF_TABLES() \
512 IOMMU_OF_TABLES() \
513 CPU_METHOD_OF_TABLES() \
514 CPUIDLE_METHOD_OF_TABLES() \
515 KERNEL_DTB() \
516 IRQCHIP_OF_MATCH_TABLE() \
517 EARLYCON_TABLE() \
518 EARLYCON_OF_TABLES()
519
520#define INIT_TEXT \
521 *(.init.text) \
522 MEM_DISCARD(init.text)
523
524#define EXIT_DATA \
525 *(.exit.data) \
526 MEM_DISCARD(exit.data) \
527 MEM_DISCARD(exit.rodata)
528
529#define EXIT_TEXT \
530 *(.exit.text) \
531 MEM_DISCARD(exit.text)
532
533#define EXIT_CALL \
534 *(.exitcall.exit)
535
536/*
537 * bss (Block Started by Symbol) - uninitialized data
538 * zeroed during startup
539 */
540#define SBSS(sbss_align) \
541 . = ALIGN(sbss_align); \
542 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
543 *(.sbss) \
544 *(.scommon) \
545 }
546
547/*
548 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
549 * sections to the front of bss.
550 */
551#ifndef BSS_FIRST_SECTIONS
552#define BSS_FIRST_SECTIONS
553#endif
554
555#define BSS(bss_align) \
556 . = ALIGN(bss_align); \
557 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
558 BSS_FIRST_SECTIONS \
559 *(.bss..page_aligned) \
560 *(.dynbss) \
561 *(.bss) \
562 *(COMMON) \
563 }
564
565/*
566 * DWARF debug sections.
567 * Symbols in the DWARF debugging sections are relative to
568 * the beginning of the section so we begin them at 0.
569 */
570#define DWARF_DEBUG \
571 /* DWARF 1 */ \
572 .debug 0 : { *(.debug) } \
573 .line 0 : { *(.line) } \
574 /* GNU DWARF 1 extensions */ \
575 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
576 .debug_sfnames 0 : { *(.debug_sfnames) } \
577 /* DWARF 1.1 and DWARF 2 */ \
578 .debug_aranges 0 : { *(.debug_aranges) } \
579 .debug_pubnames 0 : { *(.debug_pubnames) } \
580 /* DWARF 2 */ \
581 .debug_info 0 : { *(.debug_info \
582 .gnu.linkonce.wi.*) } \
583 .debug_abbrev 0 : { *(.debug_abbrev) } \
584 .debug_line 0 : { *(.debug_line) } \
585 .debug_frame 0 : { *(.debug_frame) } \
586 .debug_str 0 : { *(.debug_str) } \
587 .debug_loc 0 : { *(.debug_loc) } \
588 .debug_macinfo 0 : { *(.debug_macinfo) } \
589 /* SGI/MIPS DWARF 2 extensions */ \
590 .debug_weaknames 0 : { *(.debug_weaknames) } \
591 .debug_funcnames 0 : { *(.debug_funcnames) } \
592 .debug_typenames 0 : { *(.debug_typenames) } \
593 .debug_varnames 0 : { *(.debug_varnames) } \
594
595 /* Stabs debugging sections. */
596#define STABS_DEBUG \
597 .stab 0 : { *(.stab) } \
598 .stabstr 0 : { *(.stabstr) } \
599 .stab.excl 0 : { *(.stab.excl) } \
600 .stab.exclstr 0 : { *(.stab.exclstr) } \
601 .stab.index 0 : { *(.stab.index) } \
602 .stab.indexstr 0 : { *(.stab.indexstr) } \
603 .comment 0 : { *(.comment) }
604
605#ifdef CONFIG_GENERIC_BUG
606#define BUG_TABLE \
607 . = ALIGN(8); \
608 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
609 VMLINUX_SYMBOL(__start___bug_table) = .; \
610 *(__bug_table) \
611 VMLINUX_SYMBOL(__stop___bug_table) = .; \
612 }
613#else
614#define BUG_TABLE
615#endif
616
617#ifdef CONFIG_PM_TRACE
618#define TRACEDATA \
619 . = ALIGN(4); \
620 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
621 VMLINUX_SYMBOL(__tracedata_start) = .; \
622 *(.tracedata) \
623 VMLINUX_SYMBOL(__tracedata_end) = .; \
624 }
625#else
626#define TRACEDATA
627#endif
628
629#define NOTES \
630 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
631 VMLINUX_SYMBOL(__start_notes) = .; \
632 *(.note.*) \
633 VMLINUX_SYMBOL(__stop_notes) = .; \
634 }
635
636#define INIT_SETUP(initsetup_align) \
637 . = ALIGN(initsetup_align); \
638 VMLINUX_SYMBOL(__setup_start) = .; \
639 *(.init.setup) \
640 VMLINUX_SYMBOL(__setup_end) = .;
641
642#define INIT_CALLS_LEVEL(level) \
643 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
644 *(.initcall##level##.init) \
645 *(.initcall##level##s.init) \
646
647#define INIT_CALLS \
648 VMLINUX_SYMBOL(__initcall_start) = .; \
649 *(.initcallearly.init) \
650 INIT_CALLS_LEVEL(0) \
651 INIT_CALLS_LEVEL(1) \
652 INIT_CALLS_LEVEL(2) \
653 INIT_CALLS_LEVEL(3) \
654 INIT_CALLS_LEVEL(4) \
655 INIT_CALLS_LEVEL(5) \
656 INIT_CALLS_LEVEL(rootfs) \
657 INIT_CALLS_LEVEL(6) \
658 INIT_CALLS_LEVEL(7) \
659 VMLINUX_SYMBOL(__initcall_end) = .;
660
661#define CON_INITCALL \
662 VMLINUX_SYMBOL(__con_initcall_start) = .; \
663 *(.con_initcall.init) \
664 VMLINUX_SYMBOL(__con_initcall_end) = .;
665
666#define SECURITY_INITCALL \
667 VMLINUX_SYMBOL(__security_initcall_start) = .; \
668 *(.security_initcall.init) \
669 VMLINUX_SYMBOL(__security_initcall_end) = .;
670
671#ifdef CONFIG_BLK_DEV_INITRD
672#define INIT_RAM_FS \
673 . = ALIGN(4); \
674 VMLINUX_SYMBOL(__initramfs_start) = .; \
675 *(.init.ramfs) \
676 . = ALIGN(8); \
677 *(.init.ramfs.info)
678#else
679#define INIT_RAM_FS
680#endif
681
682/*
683 * Default discarded sections.
684 *
685 * Some archs want to discard exit text/data at runtime rather than
686 * link time due to cross-section references such as alt instructions,
687 * bug table, eh_frame, etc. DISCARDS must be the last of output
688 * section definitions so that such archs put those in earlier section
689 * definitions.
690 */
691#define DISCARDS \
692 /DISCARD/ : { \
693 EXIT_TEXT \
694 EXIT_DATA \
695 EXIT_CALL \
696 *(.discard) \
697 *(.discard.*) \
698 }
699
700/**
701 * PERCPU_INPUT - the percpu input sections
702 * @cacheline: cacheline size
703 *
704 * The core percpu section names and core symbols which do not rely
705 * directly upon load addresses.
706 *
707 * @cacheline is used to align subsections to avoid false cacheline
708 * sharing between subsections for different purposes.
709 */
710#define PERCPU_INPUT(cacheline) \
711 VMLINUX_SYMBOL(__per_cpu_start) = .; \
712 *(.data..percpu..first) \
713 . = ALIGN(PAGE_SIZE); \
714 *(.data..percpu..page_aligned) \
715 . = ALIGN(cacheline); \
716 *(.data..percpu..read_mostly) \
717 . = ALIGN(cacheline); \
718 *(.data..percpu) \
719 *(.data..percpu..shared_aligned) \
720 VMLINUX_SYMBOL(__per_cpu_end) = .;
721
722/**
723 * PERCPU_VADDR - define output section for percpu area
724 * @cacheline: cacheline size
725 * @vaddr: explicit base address (optional)
726 * @phdr: destination PHDR (optional)
727 *
728 * Macro which expands to output section for percpu area.
729 *
730 * @cacheline is used to align subsections to avoid false cacheline
731 * sharing between subsections for different purposes.
732 *
733 * If @vaddr is not blank, it specifies explicit base address and all
734 * percpu symbols will be offset from the given address. If blank,
735 * @vaddr always equals @laddr + LOAD_OFFSET.
736 *
737 * @phdr defines the output PHDR to use if not blank. Be warned that
738 * output PHDR is sticky. If @phdr is specified, the next output
739 * section in the linker script will go there too. @phdr should have
740 * a leading colon.
741 *
742 * Note that this macros defines __per_cpu_load as an absolute symbol.
743 * If there is no need to put the percpu section at a predetermined
744 * address, use PERCPU_SECTION.
745 */
746#define PERCPU_VADDR(cacheline, vaddr, phdr) \
747 VMLINUX_SYMBOL(__per_cpu_load) = .; \
748 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
749 - LOAD_OFFSET) { \
750 PERCPU_INPUT(cacheline) \
751 } phdr \
752 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
753
754/**
755 * PERCPU_SECTION - define output section for percpu area, simple version
756 * @cacheline: cacheline size
757 *
758 * Align to PAGE_SIZE and outputs output section for percpu area. This
759 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
760 * __per_cpu_start will be identical.
761 *
762 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
763 * except that __per_cpu_load is defined as a relative symbol against
764 * .data..percpu which is required for relocatable x86_32 configuration.
765 */
766#define PERCPU_SECTION(cacheline) \
767 . = ALIGN(PAGE_SIZE); \
768 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
769 VMLINUX_SYMBOL(__per_cpu_load) = .; \
770 PERCPU_INPUT(cacheline) \
771 }
772
773
774/*
775 * Definition of the high level *_SECTION macros
776 * They will fit only a subset of the architectures
777 */
778
779
780/*
781 * Writeable data.
782 * All sections are combined in a single .data section.
783 * The sections following CONSTRUCTORS are arranged so their
784 * typical alignment matches.
785 * A cacheline is typical/always less than a PAGE_SIZE so
786 * the sections that has this restriction (or similar)
787 * is located before the ones requiring PAGE_SIZE alignment.
788 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
789 * matches the requirement of PAGE_ALIGNED_DATA.
790 *
791 * use 0 as page_align if page_aligned data is not used */
792#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
793 . = ALIGN(PAGE_SIZE); \
794 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
795 INIT_TASK_DATA(inittask) \
796 NOSAVE_DATA \
797 PAGE_ALIGNED_DATA(pagealigned) \
798 CACHELINE_ALIGNED_DATA(cacheline) \
799 READ_MOSTLY_DATA(cacheline) \
800 DATA_DATA \
801 CONSTRUCTORS \
802 }
803
804#define INIT_TEXT_SECTION(inittext_align) \
805 . = ALIGN(inittext_align); \
806 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
807 VMLINUX_SYMBOL(_sinittext) = .; \
808 INIT_TEXT \
809 VMLINUX_SYMBOL(_einittext) = .; \
810 }
811
812#define INIT_DATA_SECTION(initsetup_align) \
813 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
814 INIT_DATA \
815 INIT_SETUP(initsetup_align) \
816 INIT_CALLS \
817 CON_INITCALL \
818 SECURITY_INITCALL \
819 INIT_RAM_FS \
820 }
821
822#define BSS_SECTION(sbss_align, bss_align, stop_align) \
823 . = ALIGN(sbss_align); \
824 VMLINUX_SYMBOL(__bss_start) = .; \
825 SBSS(sbss_align) \
826 BSS(bss_align) \
827 . = ALIGN(stop_align); \
828 VMLINUX_SYMBOL(__bss_stop) = .;