Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Helper macros to support writing architecture specific
3 * linker scripts.
4 *
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
7 *
8 * OUTPUT_FORMAT(...)
9 * OUTPUT_ARCH(...)
10 * ENTRY(...)
11 * SECTIONS
12 * {
13 * . = START;
14 * __init_begin = .;
15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU_SECTION(CACHELINE_SIZE)
19 * __init_end = .;
20 *
21 * _stext = .;
22 * TEXT_SECTION = 0
23 * _etext = .;
24 *
25 * _sdata = .;
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
28 * _edata = .;
29 *
30 * EXCEPTION_TABLE(...)
31 * NOTES
32 *
33 * BSS_SECTION(0, 0, 0)
34 * _end = .;
35 *
36 * STABS_DEBUG
37 * DWARF_DEBUG
38 *
39 * DISCARDS // must be the last
40 * }
41 *
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * // __init_begin and __init_end should be page aligned, so that we can
44 * // free the whole .init memory
45 * [_stext, _etext] is the text section
46 * [_sdata, _edata] is the data section
47 *
48 * Some of the included output section have their own set of constants.
49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
50 * [__nosave_begin, __nosave_end] for the nosave data
51 */
52
53#ifndef LOAD_OFFSET
54#define LOAD_OFFSET 0
55#endif
56
57#include <linux/export.h>
58
59/* Align . to a 8 byte boundary equals to maximum function alignment. */
60#define ALIGN_FUNCTION() . = ALIGN(8)
61
62/*
63 * Align to a 32 byte boundary equal to the
64 * alignment gcc 4.5 uses for a struct
65 */
66#define STRUCT_ALIGNMENT 32
67#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
68
69/* The actual configuration determine if the init/exit sections
70 * are handled as text/data or they can be discarded (which
71 * often happens at runtime)
72 */
73#ifdef CONFIG_HOTPLUG_CPU
74#define CPU_KEEP(sec) *(.cpu##sec)
75#define CPU_DISCARD(sec)
76#else
77#define CPU_KEEP(sec)
78#define CPU_DISCARD(sec) *(.cpu##sec)
79#endif
80
81#if defined(CONFIG_MEMORY_HOTPLUG)
82#define MEM_KEEP(sec) *(.mem##sec)
83#define MEM_DISCARD(sec)
84#else
85#define MEM_KEEP(sec)
86#define MEM_DISCARD(sec) *(.mem##sec)
87#endif
88
89#ifdef CONFIG_FTRACE_MCOUNT_RECORD
90#define MCOUNT_REC() . = ALIGN(8); \
91 VMLINUX_SYMBOL(__start_mcount_loc) = .; \
92 *(__mcount_loc) \
93 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
94#else
95#define MCOUNT_REC()
96#endif
97
98#ifdef CONFIG_TRACE_BRANCH_PROFILING
99#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
100 *(_ftrace_annotated_branch) \
101 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
102#else
103#define LIKELY_PROFILE()
104#endif
105
106#ifdef CONFIG_PROFILE_ALL_BRANCHES
107#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
108 *(_ftrace_branch) \
109 VMLINUX_SYMBOL(__stop_branch_profile) = .;
110#else
111#define BRANCH_PROFILE()
112#endif
113
114#ifdef CONFIG_KPROBES
115#define KPROBE_BLACKLIST() . = ALIGN(8); \
116 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
117 *(_kprobe_blacklist) \
118 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
119#else
120#define KPROBE_BLACKLIST()
121#endif
122
123#ifdef CONFIG_EVENT_TRACING
124#define FTRACE_EVENTS() . = ALIGN(8); \
125 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
126 *(_ftrace_events) \
127 VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
128 VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
129 *(_ftrace_enum_map) \
130 VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
131#else
132#define FTRACE_EVENTS()
133#endif
134
135#ifdef CONFIG_TRACING
136#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
137 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
138 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
139#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
140 *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
141 VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
142#else
143#define TRACE_PRINTKS()
144#define TRACEPOINT_STR()
145#endif
146
147#ifdef CONFIG_FTRACE_SYSCALLS
148#define TRACE_SYSCALLS() . = ALIGN(8); \
149 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
150 *(__syscalls_metadata) \
151 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
152#else
153#define TRACE_SYSCALLS()
154#endif
155
156#ifdef CONFIG_SERIAL_EARLYCON
157#define EARLYCON_TABLE() STRUCT_ALIGN(); \
158 VMLINUX_SYMBOL(__earlycon_table) = .; \
159 *(__earlycon_table) \
160 *(__earlycon_table_end)
161#else
162#define EARLYCON_TABLE()
163#endif
164
165#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
166#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
167#define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name)
168#define _OF_TABLE_0(name)
169#define _OF_TABLE_1(name) \
170 . = ALIGN(8); \
171 VMLINUX_SYMBOL(__##name##_of_table) = .; \
172 *(__##name##_of_table) \
173 *(__##name##_of_table_end)
174
175#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
176#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
177#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
178#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
179#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
180#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
181#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
182#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
183
184#define KERNEL_DTB() \
185 STRUCT_ALIGN(); \
186 VMLINUX_SYMBOL(__dtb_start) = .; \
187 *(.dtb.init.rodata) \
188 VMLINUX_SYMBOL(__dtb_end) = .;
189
190/* .data section */
191#define DATA_DATA \
192 *(.data) \
193 *(.ref.data) \
194 *(.data..shared_aligned) /* percpu related */ \
195 MEM_KEEP(init.data) \
196 MEM_KEEP(exit.data) \
197 *(.data.unlikely) \
198 STRUCT_ALIGN(); \
199 *(__tracepoints) \
200 /* implement dynamic printk debug */ \
201 . = ALIGN(8); \
202 VMLINUX_SYMBOL(__start___jump_table) = .; \
203 *(__jump_table) \
204 VMLINUX_SYMBOL(__stop___jump_table) = .; \
205 . = ALIGN(8); \
206 VMLINUX_SYMBOL(__start___verbose) = .; \
207 *(__verbose) \
208 VMLINUX_SYMBOL(__stop___verbose) = .; \
209 LIKELY_PROFILE() \
210 BRANCH_PROFILE() \
211 TRACE_PRINTKS() \
212 TRACEPOINT_STR()
213
214/*
215 * Data section helpers
216 */
217#define NOSAVE_DATA \
218 . = ALIGN(PAGE_SIZE); \
219 VMLINUX_SYMBOL(__nosave_begin) = .; \
220 *(.data..nosave) \
221 . = ALIGN(PAGE_SIZE); \
222 VMLINUX_SYMBOL(__nosave_end) = .;
223
224#define PAGE_ALIGNED_DATA(page_align) \
225 . = ALIGN(page_align); \
226 *(.data..page_aligned)
227
228#define READ_MOSTLY_DATA(align) \
229 . = ALIGN(align); \
230 *(.data..read_mostly) \
231 . = ALIGN(align);
232
233#define CACHELINE_ALIGNED_DATA(align) \
234 . = ALIGN(align); \
235 *(.data..cacheline_aligned)
236
237#define INIT_TASK_DATA(align) \
238 . = ALIGN(align); \
239 *(.data..init_task)
240
241/*
242 * Read only Data
243 */
244#define RO_DATA_SECTION(align) \
245 . = ALIGN((align)); \
246 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
247 VMLINUX_SYMBOL(__start_rodata) = .; \
248 *(.rodata) *(.rodata.*) \
249 *(__vermagic) /* Kernel version magic */ \
250 . = ALIGN(8); \
251 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
252 *(__tracepoints_ptrs) /* Tracepoints: pointer array */\
253 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
254 *(__tracepoints_strings)/* Tracepoints: strings */ \
255 } \
256 \
257 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
258 *(.rodata1) \
259 } \
260 \
261 BUG_TABLE \
262 \
263 /* PCI quirks */ \
264 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
265 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
266 *(.pci_fixup_early) \
267 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
268 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
269 *(.pci_fixup_header) \
270 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
271 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
272 *(.pci_fixup_final) \
273 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
274 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
275 *(.pci_fixup_enable) \
276 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
277 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
278 *(.pci_fixup_resume) \
279 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
280 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
281 *(.pci_fixup_resume_early) \
282 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
283 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
284 *(.pci_fixup_suspend) \
285 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
286 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
287 *(.pci_fixup_suspend_late) \
288 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
289 } \
290 \
291 /* Built-in firmware blobs */ \
292 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
293 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
294 *(.builtin_fw) \
295 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
296 } \
297 \
298 TRACEDATA \
299 \
300 /* Kernel symbol table: Normal symbols */ \
301 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
302 VMLINUX_SYMBOL(__start___ksymtab) = .; \
303 *(SORT(___ksymtab+*)) \
304 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
305 } \
306 \
307 /* Kernel symbol table: GPL-only symbols */ \
308 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
309 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
310 *(SORT(___ksymtab_gpl+*)) \
311 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
312 } \
313 \
314 /* Kernel symbol table: Normal unused symbols */ \
315 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
316 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
317 *(SORT(___ksymtab_unused+*)) \
318 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
319 } \
320 \
321 /* Kernel symbol table: GPL-only unused symbols */ \
322 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
323 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
324 *(SORT(___ksymtab_unused_gpl+*)) \
325 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
326 } \
327 \
328 /* Kernel symbol table: GPL-future-only symbols */ \
329 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
330 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
331 *(SORT(___ksymtab_gpl_future+*)) \
332 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
333 } \
334 \
335 /* Kernel symbol table: Normal symbols */ \
336 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
337 VMLINUX_SYMBOL(__start___kcrctab) = .; \
338 *(SORT(___kcrctab+*)) \
339 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
340 } \
341 \
342 /* Kernel symbol table: GPL-only symbols */ \
343 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
344 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
345 *(SORT(___kcrctab_gpl+*)) \
346 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
347 } \
348 \
349 /* Kernel symbol table: Normal unused symbols */ \
350 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
351 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
352 *(SORT(___kcrctab_unused+*)) \
353 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
354 } \
355 \
356 /* Kernel symbol table: GPL-only unused symbols */ \
357 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
358 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
359 *(SORT(___kcrctab_unused_gpl+*)) \
360 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
361 } \
362 \
363 /* Kernel symbol table: GPL-future-only symbols */ \
364 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
365 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
366 *(SORT(___kcrctab_gpl_future+*)) \
367 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
368 } \
369 \
370 /* Kernel symbol table: strings */ \
371 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
372 *(__ksymtab_strings) \
373 } \
374 \
375 /* __*init sections */ \
376 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
377 *(.ref.rodata) \
378 MEM_KEEP(init.rodata) \
379 MEM_KEEP(exit.rodata) \
380 } \
381 \
382 /* Built-in module parameters. */ \
383 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
384 VMLINUX_SYMBOL(__start___param) = .; \
385 *(__param) \
386 VMLINUX_SYMBOL(__stop___param) = .; \
387 } \
388 \
389 /* Built-in module versions. */ \
390 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
391 VMLINUX_SYMBOL(__start___modver) = .; \
392 *(__modver) \
393 VMLINUX_SYMBOL(__stop___modver) = .; \
394 . = ALIGN((align)); \
395 VMLINUX_SYMBOL(__end_rodata) = .; \
396 } \
397 . = ALIGN((align));
398
399/* RODATA & RO_DATA provided for backward compatibility.
400 * All archs are supposed to use RO_DATA() */
401#define RODATA RO_DATA_SECTION(4096)
402#define RO_DATA(align) RO_DATA_SECTION(align)
403
404#define SECURITY_INIT \
405 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
406 VMLINUX_SYMBOL(__security_initcall_start) = .; \
407 *(.security_initcall.init) \
408 VMLINUX_SYMBOL(__security_initcall_end) = .; \
409 }
410
411/* .text section. Map to function alignment to avoid address changes
412 * during second ld run in second ld pass when generating System.map */
413#define TEXT_TEXT \
414 ALIGN_FUNCTION(); \
415 *(.text.hot) \
416 *(.text .text.fixup) \
417 *(.ref.text) \
418 MEM_KEEP(init.text) \
419 MEM_KEEP(exit.text) \
420 *(.text.unlikely)
421
422
423/* sched.text is aling to function alignment to secure we have same
424 * address even at second ld pass when generating System.map */
425#define SCHED_TEXT \
426 ALIGN_FUNCTION(); \
427 VMLINUX_SYMBOL(__sched_text_start) = .; \
428 *(.sched.text) \
429 VMLINUX_SYMBOL(__sched_text_end) = .;
430
431/* spinlock.text is aling to function alignment to secure we have same
432 * address even at second ld pass when generating System.map */
433#define LOCK_TEXT \
434 ALIGN_FUNCTION(); \
435 VMLINUX_SYMBOL(__lock_text_start) = .; \
436 *(.spinlock.text) \
437 VMLINUX_SYMBOL(__lock_text_end) = .;
438
439#define KPROBES_TEXT \
440 ALIGN_FUNCTION(); \
441 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
442 *(.kprobes.text) \
443 VMLINUX_SYMBOL(__kprobes_text_end) = .;
444
445#define ENTRY_TEXT \
446 ALIGN_FUNCTION(); \
447 VMLINUX_SYMBOL(__entry_text_start) = .; \
448 *(.entry.text) \
449 VMLINUX_SYMBOL(__entry_text_end) = .;
450
451#ifdef CONFIG_FUNCTION_GRAPH_TRACER
452#define IRQENTRY_TEXT \
453 ALIGN_FUNCTION(); \
454 VMLINUX_SYMBOL(__irqentry_text_start) = .; \
455 *(.irqentry.text) \
456 VMLINUX_SYMBOL(__irqentry_text_end) = .;
457#else
458#define IRQENTRY_TEXT
459#endif
460
461/* Section used for early init (in .S files) */
462#define HEAD_TEXT *(.head.text)
463
464#define HEAD_TEXT_SECTION \
465 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
466 HEAD_TEXT \
467 }
468
469/*
470 * Exception table
471 */
472#define EXCEPTION_TABLE(align) \
473 . = ALIGN(align); \
474 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
475 VMLINUX_SYMBOL(__start___ex_table) = .; \
476 *(__ex_table) \
477 VMLINUX_SYMBOL(__stop___ex_table) = .; \
478 }
479
480/*
481 * Init task
482 */
483#define INIT_TASK_DATA_SECTION(align) \
484 . = ALIGN(align); \
485 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
486 INIT_TASK_DATA(align) \
487 }
488
489#ifdef CONFIG_CONSTRUCTORS
490#define KERNEL_CTORS() . = ALIGN(8); \
491 VMLINUX_SYMBOL(__ctors_start) = .; \
492 *(.ctors) \
493 *(SORT(.init_array.*)) \
494 *(.init_array) \
495 VMLINUX_SYMBOL(__ctors_end) = .;
496#else
497#define KERNEL_CTORS()
498#endif
499
500/* init and exit section handling */
501#define INIT_DATA \
502 *(.init.data) \
503 MEM_DISCARD(init.data) \
504 KERNEL_CTORS() \
505 MCOUNT_REC() \
506 *(.init.rodata) \
507 FTRACE_EVENTS() \
508 TRACE_SYSCALLS() \
509 KPROBE_BLACKLIST() \
510 MEM_DISCARD(init.rodata) \
511 CLK_OF_TABLES() \
512 RESERVEDMEM_OF_TABLES() \
513 CLKSRC_OF_TABLES() \
514 IOMMU_OF_TABLES() \
515 CPU_METHOD_OF_TABLES() \
516 CPUIDLE_METHOD_OF_TABLES() \
517 KERNEL_DTB() \
518 IRQCHIP_OF_MATCH_TABLE() \
519 EARLYCON_TABLE() \
520 EARLYCON_OF_TABLES()
521
522#define INIT_TEXT \
523 *(.init.text) \
524 MEM_DISCARD(init.text)
525
526#define EXIT_DATA \
527 *(.exit.data) \
528 MEM_DISCARD(exit.data) \
529 MEM_DISCARD(exit.rodata)
530
531#define EXIT_TEXT \
532 *(.exit.text) \
533 MEM_DISCARD(exit.text)
534
535#define EXIT_CALL \
536 *(.exitcall.exit)
537
538/*
539 * bss (Block Started by Symbol) - uninitialized data
540 * zeroed during startup
541 */
542#define SBSS(sbss_align) \
543 . = ALIGN(sbss_align); \
544 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
545 *(.sbss) \
546 *(.scommon) \
547 }
548
549/*
550 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
551 * sections to the front of bss.
552 */
553#ifndef BSS_FIRST_SECTIONS
554#define BSS_FIRST_SECTIONS
555#endif
556
557#define BSS(bss_align) \
558 . = ALIGN(bss_align); \
559 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
560 BSS_FIRST_SECTIONS \
561 *(.bss..page_aligned) \
562 *(.dynbss) \
563 *(.bss) \
564 *(COMMON) \
565 }
566
567/*
568 * DWARF debug sections.
569 * Symbols in the DWARF debugging sections are relative to
570 * the beginning of the section so we begin them at 0.
571 */
572#define DWARF_DEBUG \
573 /* DWARF 1 */ \
574 .debug 0 : { *(.debug) } \
575 .line 0 : { *(.line) } \
576 /* GNU DWARF 1 extensions */ \
577 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
578 .debug_sfnames 0 : { *(.debug_sfnames) } \
579 /* DWARF 1.1 and DWARF 2 */ \
580 .debug_aranges 0 : { *(.debug_aranges) } \
581 .debug_pubnames 0 : { *(.debug_pubnames) } \
582 /* DWARF 2 */ \
583 .debug_info 0 : { *(.debug_info \
584 .gnu.linkonce.wi.*) } \
585 .debug_abbrev 0 : { *(.debug_abbrev) } \
586 .debug_line 0 : { *(.debug_line) } \
587 .debug_frame 0 : { *(.debug_frame) } \
588 .debug_str 0 : { *(.debug_str) } \
589 .debug_loc 0 : { *(.debug_loc) } \
590 .debug_macinfo 0 : { *(.debug_macinfo) } \
591 /* SGI/MIPS DWARF 2 extensions */ \
592 .debug_weaknames 0 : { *(.debug_weaknames) } \
593 .debug_funcnames 0 : { *(.debug_funcnames) } \
594 .debug_typenames 0 : { *(.debug_typenames) } \
595 .debug_varnames 0 : { *(.debug_varnames) } \
596
597 /* Stabs debugging sections. */
598#define STABS_DEBUG \
599 .stab 0 : { *(.stab) } \
600 .stabstr 0 : { *(.stabstr) } \
601 .stab.excl 0 : { *(.stab.excl) } \
602 .stab.exclstr 0 : { *(.stab.exclstr) } \
603 .stab.index 0 : { *(.stab.index) } \
604 .stab.indexstr 0 : { *(.stab.indexstr) } \
605 .comment 0 : { *(.comment) }
606
607#ifdef CONFIG_GENERIC_BUG
608#define BUG_TABLE \
609 . = ALIGN(8); \
610 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
611 VMLINUX_SYMBOL(__start___bug_table) = .; \
612 *(__bug_table) \
613 VMLINUX_SYMBOL(__stop___bug_table) = .; \
614 }
615#else
616#define BUG_TABLE
617#endif
618
619#ifdef CONFIG_PM_TRACE
620#define TRACEDATA \
621 . = ALIGN(4); \
622 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
623 VMLINUX_SYMBOL(__tracedata_start) = .; \
624 *(.tracedata) \
625 VMLINUX_SYMBOL(__tracedata_end) = .; \
626 }
627#else
628#define TRACEDATA
629#endif
630
631#define NOTES \
632 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
633 VMLINUX_SYMBOL(__start_notes) = .; \
634 *(.note.*) \
635 VMLINUX_SYMBOL(__stop_notes) = .; \
636 }
637
638#define INIT_SETUP(initsetup_align) \
639 . = ALIGN(initsetup_align); \
640 VMLINUX_SYMBOL(__setup_start) = .; \
641 *(.init.setup) \
642 VMLINUX_SYMBOL(__setup_end) = .;
643
644#define INIT_CALLS_LEVEL(level) \
645 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
646 *(.initcall##level##.init) \
647 *(.initcall##level##s.init) \
648
649#define INIT_CALLS \
650 VMLINUX_SYMBOL(__initcall_start) = .; \
651 *(.initcallearly.init) \
652 INIT_CALLS_LEVEL(0) \
653 INIT_CALLS_LEVEL(1) \
654 INIT_CALLS_LEVEL(2) \
655 INIT_CALLS_LEVEL(3) \
656 INIT_CALLS_LEVEL(4) \
657 INIT_CALLS_LEVEL(5) \
658 INIT_CALLS_LEVEL(rootfs) \
659 INIT_CALLS_LEVEL(6) \
660 INIT_CALLS_LEVEL(7) \
661 VMLINUX_SYMBOL(__initcall_end) = .;
662
663#define CON_INITCALL \
664 VMLINUX_SYMBOL(__con_initcall_start) = .; \
665 *(.con_initcall.init) \
666 VMLINUX_SYMBOL(__con_initcall_end) = .;
667
668#define SECURITY_INITCALL \
669 VMLINUX_SYMBOL(__security_initcall_start) = .; \
670 *(.security_initcall.init) \
671 VMLINUX_SYMBOL(__security_initcall_end) = .;
672
673#ifdef CONFIG_BLK_DEV_INITRD
674#define INIT_RAM_FS \
675 . = ALIGN(4); \
676 VMLINUX_SYMBOL(__initramfs_start) = .; \
677 *(.init.ramfs) \
678 . = ALIGN(8); \
679 *(.init.ramfs.info)
680#else
681#define INIT_RAM_FS
682#endif
683
684/*
685 * Default discarded sections.
686 *
687 * Some archs want to discard exit text/data at runtime rather than
688 * link time due to cross-section references such as alt instructions,
689 * bug table, eh_frame, etc. DISCARDS must be the last of output
690 * section definitions so that such archs put those in earlier section
691 * definitions.
692 */
693#define DISCARDS \
694 /DISCARD/ : { \
695 EXIT_TEXT \
696 EXIT_DATA \
697 EXIT_CALL \
698 *(.discard) \
699 *(.discard.*) \
700 }
701
702/**
703 * PERCPU_INPUT - the percpu input sections
704 * @cacheline: cacheline size
705 *
706 * The core percpu section names and core symbols which do not rely
707 * directly upon load addresses.
708 *
709 * @cacheline is used to align subsections to avoid false cacheline
710 * sharing between subsections for different purposes.
711 */
712#define PERCPU_INPUT(cacheline) \
713 VMLINUX_SYMBOL(__per_cpu_start) = .; \
714 *(.data..percpu..first) \
715 . = ALIGN(PAGE_SIZE); \
716 *(.data..percpu..page_aligned) \
717 . = ALIGN(cacheline); \
718 *(.data..percpu..read_mostly) \
719 . = ALIGN(cacheline); \
720 *(.data..percpu) \
721 *(.data..percpu..shared_aligned) \
722 VMLINUX_SYMBOL(__per_cpu_end) = .;
723
724/**
725 * PERCPU_VADDR - define output section for percpu area
726 * @cacheline: cacheline size
727 * @vaddr: explicit base address (optional)
728 * @phdr: destination PHDR (optional)
729 *
730 * Macro which expands to output section for percpu area.
731 *
732 * @cacheline is used to align subsections to avoid false cacheline
733 * sharing between subsections for different purposes.
734 *
735 * If @vaddr is not blank, it specifies explicit base address and all
736 * percpu symbols will be offset from the given address. If blank,
737 * @vaddr always equals @laddr + LOAD_OFFSET.
738 *
739 * @phdr defines the output PHDR to use if not blank. Be warned that
740 * output PHDR is sticky. If @phdr is specified, the next output
741 * section in the linker script will go there too. @phdr should have
742 * a leading colon.
743 *
744 * Note that this macros defines __per_cpu_load as an absolute symbol.
745 * If there is no need to put the percpu section at a predetermined
746 * address, use PERCPU_SECTION.
747 */
748#define PERCPU_VADDR(cacheline, vaddr, phdr) \
749 VMLINUX_SYMBOL(__per_cpu_load) = .; \
750 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
751 - LOAD_OFFSET) { \
752 PERCPU_INPUT(cacheline) \
753 } phdr \
754 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
755
756/**
757 * PERCPU_SECTION - define output section for percpu area, simple version
758 * @cacheline: cacheline size
759 *
760 * Align to PAGE_SIZE and outputs output section for percpu area. This
761 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
762 * __per_cpu_start will be identical.
763 *
764 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
765 * except that __per_cpu_load is defined as a relative symbol against
766 * .data..percpu which is required for relocatable x86_32 configuration.
767 */
768#define PERCPU_SECTION(cacheline) \
769 . = ALIGN(PAGE_SIZE); \
770 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
771 VMLINUX_SYMBOL(__per_cpu_load) = .; \
772 PERCPU_INPUT(cacheline) \
773 }
774
775
776/*
777 * Definition of the high level *_SECTION macros
778 * They will fit only a subset of the architectures
779 */
780
781
782/*
783 * Writeable data.
784 * All sections are combined in a single .data section.
785 * The sections following CONSTRUCTORS are arranged so their
786 * typical alignment matches.
787 * A cacheline is typical/always less than a PAGE_SIZE so
788 * the sections that has this restriction (or similar)
789 * is located before the ones requiring PAGE_SIZE alignment.
790 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
791 * matches the requirement of PAGE_ALIGNED_DATA.
792 *
793 * use 0 as page_align if page_aligned data is not used */
794#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
795 . = ALIGN(PAGE_SIZE); \
796 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
797 INIT_TASK_DATA(inittask) \
798 NOSAVE_DATA \
799 PAGE_ALIGNED_DATA(pagealigned) \
800 CACHELINE_ALIGNED_DATA(cacheline) \
801 READ_MOSTLY_DATA(cacheline) \
802 DATA_DATA \
803 CONSTRUCTORS \
804 }
805
806#define INIT_TEXT_SECTION(inittext_align) \
807 . = ALIGN(inittext_align); \
808 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
809 VMLINUX_SYMBOL(_sinittext) = .; \
810 INIT_TEXT \
811 VMLINUX_SYMBOL(_einittext) = .; \
812 }
813
814#define INIT_DATA_SECTION(initsetup_align) \
815 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
816 INIT_DATA \
817 INIT_SETUP(initsetup_align) \
818 INIT_CALLS \
819 CON_INITCALL \
820 SECURITY_INITCALL \
821 INIT_RAM_FS \
822 }
823
824#define BSS_SECTION(sbss_align, bss_align, stop_align) \
825 . = ALIGN(sbss_align); \
826 VMLINUX_SYMBOL(__bss_start) = .; \
827 SBSS(sbss_align) \
828 BSS(bss_align) \
829 . = ALIGN(stop_align); \
830 VMLINUX_SYMBOL(__bss_stop) = .;