Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Helper macros to support writing architecture specific
3 * linker scripts.
4 *
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
7 *
8 * OUTPUT_FORMAT(...)
9 * OUTPUT_ARCH(...)
10 * ENTRY(...)
11 * SECTIONS
12 * {
13 * . = START;
14 * __init_begin = .;
15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU_SECTION(CACHELINE_SIZE)
19 * __init_end = .;
20 *
21 * _stext = .;
22 * TEXT_SECTION = 0
23 * _etext = .;
24 *
25 * _sdata = .;
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
28 * _edata = .;
29 *
30 * EXCEPTION_TABLE(...)
31 * NOTES
32 *
33 * BSS_SECTION(0, 0, 0)
34 * _end = .;
35 *
36 * STABS_DEBUG
37 * DWARF_DEBUG
38 *
39 * DISCARDS // must be the last
40 * }
41 *
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * [_stext, _etext] is the text section
44 * [_sdata, _edata] is the data section
45 *
46 * Some of the included output section have their own set of constants.
47 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
48 * [__nosave_begin, __nosave_end] for the nosave data
49 */
50
51#ifndef LOAD_OFFSET
52#define LOAD_OFFSET 0
53#endif
54
55#include <linux/export.h>
56
57/* Align . to a 8 byte boundary equals to maximum function alignment. */
58#define ALIGN_FUNCTION() . = ALIGN(8)
59
60/*
61 * Align to a 32 byte boundary equal to the
62 * alignment gcc 4.5 uses for a struct
63 */
64#define STRUCT_ALIGNMENT 32
65#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
66
67/* The actual configuration determine if the init/exit sections
68 * are handled as text/data or they can be discarded (which
69 * often happens at runtime)
70 */
71#ifdef CONFIG_HOTPLUG_CPU
72#define CPU_KEEP(sec) *(.cpu##sec)
73#define CPU_DISCARD(sec)
74#else
75#define CPU_KEEP(sec)
76#define CPU_DISCARD(sec) *(.cpu##sec)
77#endif
78
79#if defined(CONFIG_MEMORY_HOTPLUG)
80#define MEM_KEEP(sec) *(.mem##sec)
81#define MEM_DISCARD(sec)
82#else
83#define MEM_KEEP(sec)
84#define MEM_DISCARD(sec) *(.mem##sec)
85#endif
86
87#ifdef CONFIG_FTRACE_MCOUNT_RECORD
88#define MCOUNT_REC() . = ALIGN(8); \
89 VMLINUX_SYMBOL(__start_mcount_loc) = .; \
90 *(__mcount_loc) \
91 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
92#else
93#define MCOUNT_REC()
94#endif
95
96#ifdef CONFIG_TRACE_BRANCH_PROFILING
97#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
98 *(_ftrace_annotated_branch) \
99 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
100#else
101#define LIKELY_PROFILE()
102#endif
103
104#ifdef CONFIG_PROFILE_ALL_BRANCHES
105#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
106 *(_ftrace_branch) \
107 VMLINUX_SYMBOL(__stop_branch_profile) = .;
108#else
109#define BRANCH_PROFILE()
110#endif
111
112#ifdef CONFIG_KPROBES
113#define KPROBE_BLACKLIST() . = ALIGN(8); \
114 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
115 *(_kprobe_blacklist) \
116 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
117#else
118#define KPROBE_BLACKLIST()
119#endif
120
121#ifdef CONFIG_EVENT_TRACING
122#define FTRACE_EVENTS() . = ALIGN(8); \
123 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
124 *(_ftrace_events) \
125 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
126#else
127#define FTRACE_EVENTS()
128#endif
129
130#ifdef CONFIG_TRACING
131#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
132 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
133 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
134#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
135 *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
136 VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
137#else
138#define TRACE_PRINTKS()
139#define TRACEPOINT_STR()
140#endif
141
142#ifdef CONFIG_FTRACE_SYSCALLS
143#define TRACE_SYSCALLS() . = ALIGN(8); \
144 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
145 *(__syscalls_metadata) \
146 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
147#else
148#define TRACE_SYSCALLS()
149#endif
150
151
152#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
153#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
154#define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name)
155#define _OF_TABLE_0(name)
156#define _OF_TABLE_1(name) \
157 . = ALIGN(8); \
158 VMLINUX_SYMBOL(__##name##_of_table) = .; \
159 *(__##name##_of_table) \
160 *(__##name##_of_table_end)
161
162#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
163#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
164#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
165#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
166#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
167#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
168
169#define KERNEL_DTB() \
170 STRUCT_ALIGN(); \
171 VMLINUX_SYMBOL(__dtb_start) = .; \
172 *(.dtb.init.rodata) \
173 VMLINUX_SYMBOL(__dtb_end) = .;
174
175/* .data section */
176#define DATA_DATA \
177 *(.data) \
178 *(.ref.data) \
179 *(.data..shared_aligned) /* percpu related */ \
180 MEM_KEEP(init.data) \
181 MEM_KEEP(exit.data) \
182 *(.data.unlikely) \
183 STRUCT_ALIGN(); \
184 *(__tracepoints) \
185 /* implement dynamic printk debug */ \
186 . = ALIGN(8); \
187 VMLINUX_SYMBOL(__start___jump_table) = .; \
188 *(__jump_table) \
189 VMLINUX_SYMBOL(__stop___jump_table) = .; \
190 . = ALIGN(8); \
191 VMLINUX_SYMBOL(__start___verbose) = .; \
192 *(__verbose) \
193 VMLINUX_SYMBOL(__stop___verbose) = .; \
194 LIKELY_PROFILE() \
195 BRANCH_PROFILE() \
196 TRACE_PRINTKS() \
197 TRACEPOINT_STR()
198
199/*
200 * Data section helpers
201 */
202#define NOSAVE_DATA \
203 . = ALIGN(PAGE_SIZE); \
204 VMLINUX_SYMBOL(__nosave_begin) = .; \
205 *(.data..nosave) \
206 . = ALIGN(PAGE_SIZE); \
207 VMLINUX_SYMBOL(__nosave_end) = .;
208
209#define PAGE_ALIGNED_DATA(page_align) \
210 . = ALIGN(page_align); \
211 *(.data..page_aligned)
212
213#define READ_MOSTLY_DATA(align) \
214 . = ALIGN(align); \
215 *(.data..read_mostly) \
216 . = ALIGN(align);
217
218#define CACHELINE_ALIGNED_DATA(align) \
219 . = ALIGN(align); \
220 *(.data..cacheline_aligned)
221
222#define INIT_TASK_DATA(align) \
223 . = ALIGN(align); \
224 *(.data..init_task)
225
226/*
227 * Read only Data
228 */
229#define RO_DATA_SECTION(align) \
230 . = ALIGN((align)); \
231 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
232 VMLINUX_SYMBOL(__start_rodata) = .; \
233 *(.rodata) *(.rodata.*) \
234 *(__vermagic) /* Kernel version magic */ \
235 . = ALIGN(8); \
236 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
237 *(__tracepoints_ptrs) /* Tracepoints: pointer array */\
238 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
239 *(__tracepoints_strings)/* Tracepoints: strings */ \
240 } \
241 \
242 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
243 *(.rodata1) \
244 } \
245 \
246 BUG_TABLE \
247 \
248 /* PCI quirks */ \
249 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
250 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
251 *(.pci_fixup_early) \
252 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
253 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
254 *(.pci_fixup_header) \
255 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
256 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
257 *(.pci_fixup_final) \
258 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
259 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
260 *(.pci_fixup_enable) \
261 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
262 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
263 *(.pci_fixup_resume) \
264 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
265 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
266 *(.pci_fixup_resume_early) \
267 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
268 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
269 *(.pci_fixup_suspend) \
270 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
271 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
272 *(.pci_fixup_suspend_late) \
273 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
274 } \
275 \
276 /* Built-in firmware blobs */ \
277 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
278 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
279 *(.builtin_fw) \
280 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
281 } \
282 \
283 TRACEDATA \
284 \
285 /* Kernel symbol table: Normal symbols */ \
286 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
287 VMLINUX_SYMBOL(__start___ksymtab) = .; \
288 *(SORT(___ksymtab+*)) \
289 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
290 } \
291 \
292 /* Kernel symbol table: GPL-only symbols */ \
293 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
294 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
295 *(SORT(___ksymtab_gpl+*)) \
296 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
297 } \
298 \
299 /* Kernel symbol table: Normal unused symbols */ \
300 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
301 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
302 *(SORT(___ksymtab_unused+*)) \
303 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
304 } \
305 \
306 /* Kernel symbol table: GPL-only unused symbols */ \
307 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
308 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
309 *(SORT(___ksymtab_unused_gpl+*)) \
310 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
311 } \
312 \
313 /* Kernel symbol table: GPL-future-only symbols */ \
314 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
315 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
316 *(SORT(___ksymtab_gpl_future+*)) \
317 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
318 } \
319 \
320 /* Kernel symbol table: Normal symbols */ \
321 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
322 VMLINUX_SYMBOL(__start___kcrctab) = .; \
323 *(SORT(___kcrctab+*)) \
324 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
325 } \
326 \
327 /* Kernel symbol table: GPL-only symbols */ \
328 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
329 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
330 *(SORT(___kcrctab_gpl+*)) \
331 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
332 } \
333 \
334 /* Kernel symbol table: Normal unused symbols */ \
335 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
336 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
337 *(SORT(___kcrctab_unused+*)) \
338 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
339 } \
340 \
341 /* Kernel symbol table: GPL-only unused symbols */ \
342 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
343 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
344 *(SORT(___kcrctab_unused_gpl+*)) \
345 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
346 } \
347 \
348 /* Kernel symbol table: GPL-future-only symbols */ \
349 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
350 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
351 *(SORT(___kcrctab_gpl_future+*)) \
352 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
353 } \
354 \
355 /* Kernel symbol table: strings */ \
356 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
357 *(__ksymtab_strings) \
358 } \
359 \
360 /* __*init sections */ \
361 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
362 *(.ref.rodata) \
363 MEM_KEEP(init.rodata) \
364 MEM_KEEP(exit.rodata) \
365 } \
366 \
367 /* Built-in module parameters. */ \
368 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
369 VMLINUX_SYMBOL(__start___param) = .; \
370 *(__param) \
371 VMLINUX_SYMBOL(__stop___param) = .; \
372 } \
373 \
374 /* Built-in module versions. */ \
375 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
376 VMLINUX_SYMBOL(__start___modver) = .; \
377 *(__modver) \
378 VMLINUX_SYMBOL(__stop___modver) = .; \
379 . = ALIGN((align)); \
380 VMLINUX_SYMBOL(__end_rodata) = .; \
381 } \
382 . = ALIGN((align));
383
384/* RODATA & RO_DATA provided for backward compatibility.
385 * All archs are supposed to use RO_DATA() */
386#define RODATA RO_DATA_SECTION(4096)
387#define RO_DATA(align) RO_DATA_SECTION(align)
388
389#define SECURITY_INIT \
390 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
391 VMLINUX_SYMBOL(__security_initcall_start) = .; \
392 *(.security_initcall.init) \
393 VMLINUX_SYMBOL(__security_initcall_end) = .; \
394 }
395
396/* .text section. Map to function alignment to avoid address changes
397 * during second ld run in second ld pass when generating System.map */
398#define TEXT_TEXT \
399 ALIGN_FUNCTION(); \
400 *(.text.hot) \
401 *(.text) \
402 *(.ref.text) \
403 MEM_KEEP(init.text) \
404 MEM_KEEP(exit.text) \
405 *(.text.unlikely)
406
407
408/* sched.text is aling to function alignment to secure we have same
409 * address even at second ld pass when generating System.map */
410#define SCHED_TEXT \
411 ALIGN_FUNCTION(); \
412 VMLINUX_SYMBOL(__sched_text_start) = .; \
413 *(.sched.text) \
414 VMLINUX_SYMBOL(__sched_text_end) = .;
415
416/* spinlock.text is aling to function alignment to secure we have same
417 * address even at second ld pass when generating System.map */
418#define LOCK_TEXT \
419 ALIGN_FUNCTION(); \
420 VMLINUX_SYMBOL(__lock_text_start) = .; \
421 *(.spinlock.text) \
422 VMLINUX_SYMBOL(__lock_text_end) = .;
423
424#define KPROBES_TEXT \
425 ALIGN_FUNCTION(); \
426 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
427 *(.kprobes.text) \
428 VMLINUX_SYMBOL(__kprobes_text_end) = .;
429
430#define ENTRY_TEXT \
431 ALIGN_FUNCTION(); \
432 VMLINUX_SYMBOL(__entry_text_start) = .; \
433 *(.entry.text) \
434 VMLINUX_SYMBOL(__entry_text_end) = .;
435
436#ifdef CONFIG_FUNCTION_GRAPH_TRACER
437#define IRQENTRY_TEXT \
438 ALIGN_FUNCTION(); \
439 VMLINUX_SYMBOL(__irqentry_text_start) = .; \
440 *(.irqentry.text) \
441 VMLINUX_SYMBOL(__irqentry_text_end) = .;
442#else
443#define IRQENTRY_TEXT
444#endif
445
446/* Section used for early init (in .S files) */
447#define HEAD_TEXT *(.head.text)
448
449#define HEAD_TEXT_SECTION \
450 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
451 HEAD_TEXT \
452 }
453
454/*
455 * Exception table
456 */
457#define EXCEPTION_TABLE(align) \
458 . = ALIGN(align); \
459 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
460 VMLINUX_SYMBOL(__start___ex_table) = .; \
461 *(__ex_table) \
462 VMLINUX_SYMBOL(__stop___ex_table) = .; \
463 }
464
465/*
466 * Init task
467 */
468#define INIT_TASK_DATA_SECTION(align) \
469 . = ALIGN(align); \
470 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
471 INIT_TASK_DATA(align) \
472 }
473
474#ifdef CONFIG_CONSTRUCTORS
475#define KERNEL_CTORS() . = ALIGN(8); \
476 VMLINUX_SYMBOL(__ctors_start) = .; \
477 *(.ctors) \
478 *(.init_array) \
479 VMLINUX_SYMBOL(__ctors_end) = .;
480#else
481#define KERNEL_CTORS()
482#endif
483
484/* init and exit section handling */
485#define INIT_DATA \
486 *(.init.data) \
487 MEM_DISCARD(init.data) \
488 KERNEL_CTORS() \
489 MCOUNT_REC() \
490 *(.init.rodata) \
491 FTRACE_EVENTS() \
492 TRACE_SYSCALLS() \
493 KPROBE_BLACKLIST() \
494 MEM_DISCARD(init.rodata) \
495 CLK_OF_TABLES() \
496 RESERVEDMEM_OF_TABLES() \
497 CLKSRC_OF_TABLES() \
498 CPU_METHOD_OF_TABLES() \
499 KERNEL_DTB() \
500 IRQCHIP_OF_MATCH_TABLE() \
501 EARLYCON_OF_TABLES()
502
503#define INIT_TEXT \
504 *(.init.text) \
505 MEM_DISCARD(init.text)
506
507#define EXIT_DATA \
508 *(.exit.data) \
509 MEM_DISCARD(exit.data) \
510 MEM_DISCARD(exit.rodata)
511
512#define EXIT_TEXT \
513 *(.exit.text) \
514 MEM_DISCARD(exit.text)
515
516#define EXIT_CALL \
517 *(.exitcall.exit)
518
519/*
520 * bss (Block Started by Symbol) - uninitialized data
521 * zeroed during startup
522 */
523#define SBSS(sbss_align) \
524 . = ALIGN(sbss_align); \
525 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
526 *(.sbss) \
527 *(.scommon) \
528 }
529
530/*
531 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
532 * sections to the front of bss.
533 */
534#ifndef BSS_FIRST_SECTIONS
535#define BSS_FIRST_SECTIONS
536#endif
537
538#define BSS(bss_align) \
539 . = ALIGN(bss_align); \
540 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
541 BSS_FIRST_SECTIONS \
542 *(.bss..page_aligned) \
543 *(.dynbss) \
544 *(.bss) \
545 *(COMMON) \
546 }
547
548/*
549 * DWARF debug sections.
550 * Symbols in the DWARF debugging sections are relative to
551 * the beginning of the section so we begin them at 0.
552 */
553#define DWARF_DEBUG \
554 /* DWARF 1 */ \
555 .debug 0 : { *(.debug) } \
556 .line 0 : { *(.line) } \
557 /* GNU DWARF 1 extensions */ \
558 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
559 .debug_sfnames 0 : { *(.debug_sfnames) } \
560 /* DWARF 1.1 and DWARF 2 */ \
561 .debug_aranges 0 : { *(.debug_aranges) } \
562 .debug_pubnames 0 : { *(.debug_pubnames) } \
563 /* DWARF 2 */ \
564 .debug_info 0 : { *(.debug_info \
565 .gnu.linkonce.wi.*) } \
566 .debug_abbrev 0 : { *(.debug_abbrev) } \
567 .debug_line 0 : { *(.debug_line) } \
568 .debug_frame 0 : { *(.debug_frame) } \
569 .debug_str 0 : { *(.debug_str) } \
570 .debug_loc 0 : { *(.debug_loc) } \
571 .debug_macinfo 0 : { *(.debug_macinfo) } \
572 /* SGI/MIPS DWARF 2 extensions */ \
573 .debug_weaknames 0 : { *(.debug_weaknames) } \
574 .debug_funcnames 0 : { *(.debug_funcnames) } \
575 .debug_typenames 0 : { *(.debug_typenames) } \
576 .debug_varnames 0 : { *(.debug_varnames) } \
577
578 /* Stabs debugging sections. */
579#define STABS_DEBUG \
580 .stab 0 : { *(.stab) } \
581 .stabstr 0 : { *(.stabstr) } \
582 .stab.excl 0 : { *(.stab.excl) } \
583 .stab.exclstr 0 : { *(.stab.exclstr) } \
584 .stab.index 0 : { *(.stab.index) } \
585 .stab.indexstr 0 : { *(.stab.indexstr) } \
586 .comment 0 : { *(.comment) }
587
588#ifdef CONFIG_GENERIC_BUG
589#define BUG_TABLE \
590 . = ALIGN(8); \
591 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
592 VMLINUX_SYMBOL(__start___bug_table) = .; \
593 *(__bug_table) \
594 VMLINUX_SYMBOL(__stop___bug_table) = .; \
595 }
596#else
597#define BUG_TABLE
598#endif
599
600#ifdef CONFIG_PM_TRACE
601#define TRACEDATA \
602 . = ALIGN(4); \
603 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
604 VMLINUX_SYMBOL(__tracedata_start) = .; \
605 *(.tracedata) \
606 VMLINUX_SYMBOL(__tracedata_end) = .; \
607 }
608#else
609#define TRACEDATA
610#endif
611
612#define NOTES \
613 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
614 VMLINUX_SYMBOL(__start_notes) = .; \
615 *(.note.*) \
616 VMLINUX_SYMBOL(__stop_notes) = .; \
617 }
618
619#define INIT_SETUP(initsetup_align) \
620 . = ALIGN(initsetup_align); \
621 VMLINUX_SYMBOL(__setup_start) = .; \
622 *(.init.setup) \
623 VMLINUX_SYMBOL(__setup_end) = .;
624
625#define INIT_CALLS_LEVEL(level) \
626 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
627 *(.initcall##level##.init) \
628 *(.initcall##level##s.init) \
629
630#define INIT_CALLS \
631 VMLINUX_SYMBOL(__initcall_start) = .; \
632 *(.initcallearly.init) \
633 INIT_CALLS_LEVEL(0) \
634 INIT_CALLS_LEVEL(1) \
635 INIT_CALLS_LEVEL(2) \
636 INIT_CALLS_LEVEL(3) \
637 INIT_CALLS_LEVEL(4) \
638 INIT_CALLS_LEVEL(5) \
639 INIT_CALLS_LEVEL(rootfs) \
640 INIT_CALLS_LEVEL(6) \
641 INIT_CALLS_LEVEL(7) \
642 VMLINUX_SYMBOL(__initcall_end) = .;
643
644#define CON_INITCALL \
645 VMLINUX_SYMBOL(__con_initcall_start) = .; \
646 *(.con_initcall.init) \
647 VMLINUX_SYMBOL(__con_initcall_end) = .;
648
649#define SECURITY_INITCALL \
650 VMLINUX_SYMBOL(__security_initcall_start) = .; \
651 *(.security_initcall.init) \
652 VMLINUX_SYMBOL(__security_initcall_end) = .;
653
654#ifdef CONFIG_BLK_DEV_INITRD
655#define INIT_RAM_FS \
656 . = ALIGN(4); \
657 VMLINUX_SYMBOL(__initramfs_start) = .; \
658 *(.init.ramfs) \
659 . = ALIGN(8); \
660 *(.init.ramfs.info)
661#else
662#define INIT_RAM_FS
663#endif
664
665/*
666 * Default discarded sections.
667 *
668 * Some archs want to discard exit text/data at runtime rather than
669 * link time due to cross-section references such as alt instructions,
670 * bug table, eh_frame, etc. DISCARDS must be the last of output
671 * section definitions so that such archs put those in earlier section
672 * definitions.
673 */
674#define DISCARDS \
675 /DISCARD/ : { \
676 EXIT_TEXT \
677 EXIT_DATA \
678 EXIT_CALL \
679 *(.discard) \
680 *(.discard.*) \
681 }
682
683/**
684 * PERCPU_INPUT - the percpu input sections
685 * @cacheline: cacheline size
686 *
687 * The core percpu section names and core symbols which do not rely
688 * directly upon load addresses.
689 *
690 * @cacheline is used to align subsections to avoid false cacheline
691 * sharing between subsections for different purposes.
692 */
693#define PERCPU_INPUT(cacheline) \
694 VMLINUX_SYMBOL(__per_cpu_start) = .; \
695 *(.data..percpu..first) \
696 . = ALIGN(PAGE_SIZE); \
697 *(.data..percpu..page_aligned) \
698 . = ALIGN(cacheline); \
699 *(.data..percpu..read_mostly) \
700 . = ALIGN(cacheline); \
701 *(.data..percpu) \
702 *(.data..percpu..shared_aligned) \
703 VMLINUX_SYMBOL(__per_cpu_end) = .;
704
705/**
706 * PERCPU_VADDR - define output section for percpu area
707 * @cacheline: cacheline size
708 * @vaddr: explicit base address (optional)
709 * @phdr: destination PHDR (optional)
710 *
711 * Macro which expands to output section for percpu area.
712 *
713 * @cacheline is used to align subsections to avoid false cacheline
714 * sharing between subsections for different purposes.
715 *
716 * If @vaddr is not blank, it specifies explicit base address and all
717 * percpu symbols will be offset from the given address. If blank,
718 * @vaddr always equals @laddr + LOAD_OFFSET.
719 *
720 * @phdr defines the output PHDR to use if not blank. Be warned that
721 * output PHDR is sticky. If @phdr is specified, the next output
722 * section in the linker script will go there too. @phdr should have
723 * a leading colon.
724 *
725 * Note that this macros defines __per_cpu_load as an absolute symbol.
726 * If there is no need to put the percpu section at a predetermined
727 * address, use PERCPU_SECTION.
728 */
729#define PERCPU_VADDR(cacheline, vaddr, phdr) \
730 VMLINUX_SYMBOL(__per_cpu_load) = .; \
731 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
732 - LOAD_OFFSET) { \
733 PERCPU_INPUT(cacheline) \
734 } phdr \
735 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
736
737/**
738 * PERCPU_SECTION - define output section for percpu area, simple version
739 * @cacheline: cacheline size
740 *
741 * Align to PAGE_SIZE and outputs output section for percpu area. This
742 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
743 * __per_cpu_start will be identical.
744 *
745 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
746 * except that __per_cpu_load is defined as a relative symbol against
747 * .data..percpu which is required for relocatable x86_32 configuration.
748 */
749#define PERCPU_SECTION(cacheline) \
750 . = ALIGN(PAGE_SIZE); \
751 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
752 VMLINUX_SYMBOL(__per_cpu_load) = .; \
753 PERCPU_INPUT(cacheline) \
754 }
755
756
757/*
758 * Definition of the high level *_SECTION macros
759 * They will fit only a subset of the architectures
760 */
761
762
763/*
764 * Writeable data.
765 * All sections are combined in a single .data section.
766 * The sections following CONSTRUCTORS are arranged so their
767 * typical alignment matches.
768 * A cacheline is typical/always less than a PAGE_SIZE so
769 * the sections that has this restriction (or similar)
770 * is located before the ones requiring PAGE_SIZE alignment.
771 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
772 * matches the requirement of PAGE_ALIGNED_DATA.
773 *
774 * use 0 as page_align if page_aligned data is not used */
775#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
776 . = ALIGN(PAGE_SIZE); \
777 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
778 INIT_TASK_DATA(inittask) \
779 NOSAVE_DATA \
780 PAGE_ALIGNED_DATA(pagealigned) \
781 CACHELINE_ALIGNED_DATA(cacheline) \
782 READ_MOSTLY_DATA(cacheline) \
783 DATA_DATA \
784 CONSTRUCTORS \
785 }
786
787#define INIT_TEXT_SECTION(inittext_align) \
788 . = ALIGN(inittext_align); \
789 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
790 VMLINUX_SYMBOL(_sinittext) = .; \
791 INIT_TEXT \
792 VMLINUX_SYMBOL(_einittext) = .; \
793 }
794
795#define INIT_DATA_SECTION(initsetup_align) \
796 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
797 INIT_DATA \
798 INIT_SETUP(initsetup_align) \
799 INIT_CALLS \
800 CON_INITCALL \
801 SECURITY_INITCALL \
802 INIT_RAM_FS \
803 }
804
805#define BSS_SECTION(sbss_align, bss_align, stop_align) \
806 . = ALIGN(sbss_align); \
807 VMLINUX_SYMBOL(__bss_start) = .; \
808 SBSS(sbss_align) \
809 BSS(bss_align) \
810 . = ALIGN(stop_align); \
811 VMLINUX_SYMBOL(__bss_stop) = .;