1/* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requiriements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU(PAGE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA_SECTION(PAGE_SIZE) 27 * RW_DATA_SECTION(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * NOTES 32 * 33 * __bss_start = .; 34 * BSS_SECTION(0, 0) 35 * __bss_stop = .; 36 * _end = .; 37 * 38 * /DISCARD/ : { 39 * EXIT_TEXT 40 * EXIT_DATA 41 * EXIT_CALL 42 * } 43 * STABS_DEBUG 44 * DWARF_DEBUG 45 * } 46 * 47 * [__init_begin, __init_end] is the init section that may be freed after init 48 * [_stext, _etext] is the text section 49 * [_sdata, _edata] is the data section 50 * 51 * Some of the included output section have their own set of constants. 52 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 53 * [__nosave_begin, __nosave_end] for the nosave data 54 */ 55 56#ifndef LOAD_OFFSET 57#define LOAD_OFFSET 0 58#endif 59 60#ifndef VMLINUX_SYMBOL 61#define VMLINUX_SYMBOL(_sym_) _sym_ 62#endif 63 64/* Align . to a 8 byte boundary equals to maximum function alignment. */ 65#define ALIGN_FUNCTION() . = ALIGN(8) 66 67/* The actual configuration determine if the init/exit sections 68 * are handled as text/data or they can be discarded (which 69 * often happens at runtime) 70 */ 71#ifdef CONFIG_HOTPLUG 72#define DEV_KEEP(sec) *(.dev##sec) 73#define DEV_DISCARD(sec) 74#else 75#define DEV_KEEP(sec) 76#define DEV_DISCARD(sec) *(.dev##sec) 77#endif 78 79#ifdef CONFIG_HOTPLUG_CPU 80#define CPU_KEEP(sec) *(.cpu##sec) 81#define CPU_DISCARD(sec) 82#else 83#define CPU_KEEP(sec) 84#define CPU_DISCARD(sec) *(.cpu##sec) 85#endif 86 87#if defined(CONFIG_MEMORY_HOTPLUG) 88#define MEM_KEEP(sec) *(.mem##sec) 89#define MEM_DISCARD(sec) 90#else 91#define MEM_KEEP(sec) 92#define MEM_DISCARD(sec) *(.mem##sec) 93#endif 94 95#ifdef CONFIG_FTRACE_MCOUNT_RECORD 96#define MCOUNT_REC() VMLINUX_SYMBOL(__start_mcount_loc) = .; \ 97 *(__mcount_loc) \ 98 VMLINUX_SYMBOL(__stop_mcount_loc) = .; 99#else 100#define MCOUNT_REC() 101#endif 102 103#ifdef CONFIG_TRACE_BRANCH_PROFILING 104#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ 105 *(_ftrace_annotated_branch) \ 106 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; 107#else 108#define LIKELY_PROFILE() 109#endif 110 111#ifdef CONFIG_PROFILE_ALL_BRANCHES 112#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ 113 *(_ftrace_branch) \ 114 VMLINUX_SYMBOL(__stop_branch_profile) = .; 115#else 116#define BRANCH_PROFILE() 117#endif 118 119#ifdef CONFIG_EVENT_TRACING 120#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 121 *(_ftrace_events) \ 122 VMLINUX_SYMBOL(__stop_ftrace_events) = .; 123#else 124#define FTRACE_EVENTS() 125#endif 126 127#ifdef CONFIG_TRACING 128#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ 129 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ 130 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; 131#else 132#define TRACE_PRINTKS() 133#endif 134 135#ifdef CONFIG_FTRACE_SYSCALLS 136#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ 137 *(__syscalls_metadata) \ 138 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; 139#else 140#define TRACE_SYSCALLS() 141#endif 142 143/* .data section */ 144#define DATA_DATA \ 145 *(.data) \ 146 *(.ref.data) \ 147 DEV_KEEP(init.data) \ 148 DEV_KEEP(exit.data) \ 149 CPU_KEEP(init.data) \ 150 CPU_KEEP(exit.data) \ 151 MEM_KEEP(init.data) \ 152 MEM_KEEP(exit.data) \ 153 . = ALIGN(8); \ 154 VMLINUX_SYMBOL(__start___markers) = .; \ 155 *(__markers) \ 156 VMLINUX_SYMBOL(__stop___markers) = .; \ 157 . = ALIGN(32); \ 158 VMLINUX_SYMBOL(__start___tracepoints) = .; \ 159 *(__tracepoints) \ 160 VMLINUX_SYMBOL(__stop___tracepoints) = .; \ 161 /* implement dynamic printk debug */ \ 162 . = ALIGN(8); \ 163 VMLINUX_SYMBOL(__start___verbose) = .; \ 164 *(__verbose) \ 165 VMLINUX_SYMBOL(__stop___verbose) = .; \ 166 LIKELY_PROFILE() \ 167 BRANCH_PROFILE() \ 168 TRACE_PRINTKS() \ 169 FTRACE_EVENTS() \ 170 TRACE_SYSCALLS() 171 172/* 173 * Data section helpers 174 */ 175#define NOSAVE_DATA \ 176 . = ALIGN(PAGE_SIZE); \ 177 VMLINUX_SYMBOL(__nosave_begin) = .; \ 178 *(.data.nosave) \ 179 . = ALIGN(PAGE_SIZE); \ 180 VMLINUX_SYMBOL(__nosave_end) = .; 181 182#define PAGE_ALIGNED_DATA(page_align) \ 183 . = ALIGN(page_align); \ 184 *(.data.page_aligned) 185 186#define READ_MOSTLY_DATA(align) \ 187 . = ALIGN(align); \ 188 *(.data.read_mostly) 189 190#define CACHELINE_ALIGNED_DATA(align) \ 191 . = ALIGN(align); \ 192 *(.data.cacheline_aligned) 193 194#define INIT_TASK_DATA(align) \ 195 . = ALIGN(align); \ 196 *(.data.init_task) 197 198/* 199 * Read only Data 200 */ 201#define RO_DATA_SECTION(align) \ 202 . = ALIGN((align)); \ 203 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 204 VMLINUX_SYMBOL(__start_rodata) = .; \ 205 *(.rodata) *(.rodata.*) \ 206 *(__vermagic) /* Kernel version magic */ \ 207 *(__markers_strings) /* Markers: strings */ \ 208 *(__tracepoints_strings)/* Tracepoints: strings */ \ 209 } \ 210 \ 211 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 212 *(.rodata1) \ 213 } \ 214 \ 215 BUG_TABLE \ 216 \ 217 /* PCI quirks */ \ 218 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 219 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 220 *(.pci_fixup_early) \ 221 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ 222 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ 223 *(.pci_fixup_header) \ 224 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ 225 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ 226 *(.pci_fixup_final) \ 227 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ 228 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ 229 *(.pci_fixup_enable) \ 230 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ 231 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 232 *(.pci_fixup_resume) \ 233 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 234 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ 235 *(.pci_fixup_resume_early) \ 236 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ 237 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 238 *(.pci_fixup_suspend) \ 239 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 240 } \ 241 \ 242 /* Built-in firmware blobs */ \ 243 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 244 VMLINUX_SYMBOL(__start_builtin_fw) = .; \ 245 *(.builtin_fw) \ 246 VMLINUX_SYMBOL(__end_builtin_fw) = .; \ 247 } \ 248 \ 249 /* RapidIO route ops */ \ 250 .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \ 251 VMLINUX_SYMBOL(__start_rio_route_ops) = .; \ 252 *(.rio_route_ops) \ 253 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ 254 } \ 255 \ 256 TRACEDATA \ 257 \ 258 /* Kernel symbol table: Normal symbols */ \ 259 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 260 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 261 *(__ksymtab) \ 262 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 263 } \ 264 \ 265 /* Kernel symbol table: GPL-only symbols */ \ 266 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 267 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 268 *(__ksymtab_gpl) \ 269 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 270 } \ 271 \ 272 /* Kernel symbol table: Normal unused symbols */ \ 273 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 274 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 275 *(__ksymtab_unused) \ 276 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 277 } \ 278 \ 279 /* Kernel symbol table: GPL-only unused symbols */ \ 280 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 281 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 282 *(__ksymtab_unused_gpl) \ 283 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 284 } \ 285 \ 286 /* Kernel symbol table: GPL-future-only symbols */ \ 287 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 288 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 289 *(__ksymtab_gpl_future) \ 290 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 291 } \ 292 \ 293 /* Kernel symbol table: Normal symbols */ \ 294 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 295 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 296 *(__kcrctab) \ 297 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 298 } \ 299 \ 300 /* Kernel symbol table: GPL-only symbols */ \ 301 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 302 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 303 *(__kcrctab_gpl) \ 304 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 305 } \ 306 \ 307 /* Kernel symbol table: Normal unused symbols */ \ 308 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 309 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 310 *(__kcrctab_unused) \ 311 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 312 } \ 313 \ 314 /* Kernel symbol table: GPL-only unused symbols */ \ 315 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 316 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 317 *(__kcrctab_unused_gpl) \ 318 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 319 } \ 320 \ 321 /* Kernel symbol table: GPL-future-only symbols */ \ 322 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 323 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 324 *(__kcrctab_gpl_future) \ 325 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 326 } \ 327 \ 328 /* Kernel symbol table: strings */ \ 329 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 330 *(__ksymtab_strings) \ 331 } \ 332 \ 333 /* __*init sections */ \ 334 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 335 *(.ref.rodata) \ 336 MCOUNT_REC() \ 337 DEV_KEEP(init.rodata) \ 338 DEV_KEEP(exit.rodata) \ 339 CPU_KEEP(init.rodata) \ 340 CPU_KEEP(exit.rodata) \ 341 MEM_KEEP(init.rodata) \ 342 MEM_KEEP(exit.rodata) \ 343 } \ 344 \ 345 /* Built-in module parameters. */ \ 346 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 347 VMLINUX_SYMBOL(__start___param) = .; \ 348 *(__param) \ 349 VMLINUX_SYMBOL(__stop___param) = .; \ 350 . = ALIGN((align)); \ 351 VMLINUX_SYMBOL(__end_rodata) = .; \ 352 } \ 353 . = ALIGN((align)); 354 355/* RODATA & RO_DATA provided for backward compatibility. 356 * All archs are supposed to use RO_DATA() */ 357#define RODATA RO_DATA_SECTION(4096) 358#define RO_DATA(align) RO_DATA_SECTION(align) 359 360#define SECURITY_INIT \ 361 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 362 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 363 *(.security_initcall.init) \ 364 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 365 } 366 367/* .text section. Map to function alignment to avoid address changes 368 * during second ld run in second ld pass when generating System.map */ 369#define TEXT_TEXT \ 370 ALIGN_FUNCTION(); \ 371 *(.text.hot) \ 372 *(.text) \ 373 *(.ref.text) \ 374 DEV_KEEP(init.text) \ 375 DEV_KEEP(exit.text) \ 376 CPU_KEEP(init.text) \ 377 CPU_KEEP(exit.text) \ 378 MEM_KEEP(init.text) \ 379 MEM_KEEP(exit.text) \ 380 *(.text.unlikely) 381 382 383/* sched.text is aling to function alignment to secure we have same 384 * address even at second ld pass when generating System.map */ 385#define SCHED_TEXT \ 386 ALIGN_FUNCTION(); \ 387 VMLINUX_SYMBOL(__sched_text_start) = .; \ 388 *(.sched.text) \ 389 VMLINUX_SYMBOL(__sched_text_end) = .; 390 391/* spinlock.text is aling to function alignment to secure we have same 392 * address even at second ld pass when generating System.map */ 393#define LOCK_TEXT \ 394 ALIGN_FUNCTION(); \ 395 VMLINUX_SYMBOL(__lock_text_start) = .; \ 396 *(.spinlock.text) \ 397 VMLINUX_SYMBOL(__lock_text_end) = .; 398 399#define KPROBES_TEXT \ 400 ALIGN_FUNCTION(); \ 401 VMLINUX_SYMBOL(__kprobes_text_start) = .; \ 402 *(.kprobes.text) \ 403 VMLINUX_SYMBOL(__kprobes_text_end) = .; 404 405#ifdef CONFIG_FUNCTION_GRAPH_TRACER 406#define IRQENTRY_TEXT \ 407 ALIGN_FUNCTION(); \ 408 VMLINUX_SYMBOL(__irqentry_text_start) = .; \ 409 *(.irqentry.text) \ 410 VMLINUX_SYMBOL(__irqentry_text_end) = .; 411#else 412#define IRQENTRY_TEXT 413#endif 414 415/* Section used for early init (in .S files) */ 416#define HEAD_TEXT *(.head.text) 417 418#define HEAD_TEXT_SECTION \ 419 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 420 HEAD_TEXT \ 421 } 422 423/* 424 * Exception table 425 */ 426#define EXCEPTION_TABLE(align) \ 427 . = ALIGN(align); \ 428 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 429 VMLINUX_SYMBOL(__start___ex_table) = .; \ 430 *(__ex_table) \ 431 VMLINUX_SYMBOL(__stop___ex_table) = .; \ 432 } 433 434/* 435 * Init task 436 */ 437#define INIT_TASK_DATA_SECTION(align) \ 438 . = ALIGN(align); \ 439 .data.init_task : { \ 440 INIT_TASK_DATA(align) \ 441 } 442 443#ifdef CONFIG_CONSTRUCTORS 444#define KERNEL_CTORS() . = ALIGN(8); \ 445 VMLINUX_SYMBOL(__ctors_start) = .; \ 446 *(.ctors) \ 447 VMLINUX_SYMBOL(__ctors_end) = .; 448#else 449#define KERNEL_CTORS() 450#endif 451 452/* init and exit section handling */ 453#define INIT_DATA \ 454 *(.init.data) \ 455 DEV_DISCARD(init.data) \ 456 CPU_DISCARD(init.data) \ 457 MEM_DISCARD(init.data) \ 458 KERNEL_CTORS() \ 459 *(.init.rodata) \ 460 DEV_DISCARD(init.rodata) \ 461 CPU_DISCARD(init.rodata) \ 462 MEM_DISCARD(init.rodata) 463 464#define INIT_TEXT \ 465 *(.init.text) \ 466 DEV_DISCARD(init.text) \ 467 CPU_DISCARD(init.text) \ 468 MEM_DISCARD(init.text) 469 470#define EXIT_DATA \ 471 *(.exit.data) \ 472 DEV_DISCARD(exit.data) \ 473 DEV_DISCARD(exit.rodata) \ 474 CPU_DISCARD(exit.data) \ 475 CPU_DISCARD(exit.rodata) \ 476 MEM_DISCARD(exit.data) \ 477 MEM_DISCARD(exit.rodata) 478 479#define EXIT_TEXT \ 480 *(.exit.text) \ 481 DEV_DISCARD(exit.text) \ 482 CPU_DISCARD(exit.text) \ 483 MEM_DISCARD(exit.text) 484 485#define EXIT_CALL \ 486 *(.exitcall.exit) 487 488/* 489 * bss (Block Started by Symbol) - uninitialized data 490 * zeroed during startup 491 */ 492#define SBSS \ 493 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 494 *(.sbss) \ 495 *(.scommon) \ 496 } 497 498#define BSS(bss_align) \ 499 . = ALIGN(bss_align); \ 500 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 501 VMLINUX_SYMBOL(__bss_start) = .; \ 502 *(.bss.page_aligned) \ 503 *(.dynbss) \ 504 *(.bss) \ 505 *(COMMON) \ 506 VMLINUX_SYMBOL(__bss_stop) = .; \ 507 } 508 509/* 510 * DWARF debug sections. 511 * Symbols in the DWARF debugging sections are relative to 512 * the beginning of the section so we begin them at 0. 513 */ 514#define DWARF_DEBUG \ 515 /* DWARF 1 */ \ 516 .debug 0 : { *(.debug) } \ 517 .line 0 : { *(.line) } \ 518 /* GNU DWARF 1 extensions */ \ 519 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 520 .debug_sfnames 0 : { *(.debug_sfnames) } \ 521 /* DWARF 1.1 and DWARF 2 */ \ 522 .debug_aranges 0 : { *(.debug_aranges) } \ 523 .debug_pubnames 0 : { *(.debug_pubnames) } \ 524 /* DWARF 2 */ \ 525 .debug_info 0 : { *(.debug_info \ 526 .gnu.linkonce.wi.*) } \ 527 .debug_abbrev 0 : { *(.debug_abbrev) } \ 528 .debug_line 0 : { *(.debug_line) } \ 529 .debug_frame 0 : { *(.debug_frame) } \ 530 .debug_str 0 : { *(.debug_str) } \ 531 .debug_loc 0 : { *(.debug_loc) } \ 532 .debug_macinfo 0 : { *(.debug_macinfo) } \ 533 /* SGI/MIPS DWARF 2 extensions */ \ 534 .debug_weaknames 0 : { *(.debug_weaknames) } \ 535 .debug_funcnames 0 : { *(.debug_funcnames) } \ 536 .debug_typenames 0 : { *(.debug_typenames) } \ 537 .debug_varnames 0 : { *(.debug_varnames) } \ 538 539 /* Stabs debugging sections. */ 540#define STABS_DEBUG \ 541 .stab 0 : { *(.stab) } \ 542 .stabstr 0 : { *(.stabstr) } \ 543 .stab.excl 0 : { *(.stab.excl) } \ 544 .stab.exclstr 0 : { *(.stab.exclstr) } \ 545 .stab.index 0 : { *(.stab.index) } \ 546 .stab.indexstr 0 : { *(.stab.indexstr) } \ 547 .comment 0 : { *(.comment) } 548 549#ifdef CONFIG_GENERIC_BUG 550#define BUG_TABLE \ 551 . = ALIGN(8); \ 552 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 553 VMLINUX_SYMBOL(__start___bug_table) = .; \ 554 *(__bug_table) \ 555 VMLINUX_SYMBOL(__stop___bug_table) = .; \ 556 } 557#else 558#define BUG_TABLE 559#endif 560 561#ifdef CONFIG_PM_TRACE 562#define TRACEDATA \ 563 . = ALIGN(4); \ 564 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 565 VMLINUX_SYMBOL(__tracedata_start) = .; \ 566 *(.tracedata) \ 567 VMLINUX_SYMBOL(__tracedata_end) = .; \ 568 } 569#else 570#define TRACEDATA 571#endif 572 573#define NOTES \ 574 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 575 VMLINUX_SYMBOL(__start_notes) = .; \ 576 *(.note.*) \ 577 VMLINUX_SYMBOL(__stop_notes) = .; \ 578 } 579 580#define INIT_SETUP(initsetup_align) \ 581 . = ALIGN(initsetup_align); \ 582 VMLINUX_SYMBOL(__setup_start) = .; \ 583 *(.init.setup) \ 584 VMLINUX_SYMBOL(__setup_end) = .; 585 586#define INITCALLS \ 587 *(.initcallearly.init) \ 588 VMLINUX_SYMBOL(__early_initcall_end) = .; \ 589 *(.initcall0.init) \ 590 *(.initcall0s.init) \ 591 *(.initcall1.init) \ 592 *(.initcall1s.init) \ 593 *(.initcall2.init) \ 594 *(.initcall2s.init) \ 595 *(.initcall3.init) \ 596 *(.initcall3s.init) \ 597 *(.initcall4.init) \ 598 *(.initcall4s.init) \ 599 *(.initcall5.init) \ 600 *(.initcall5s.init) \ 601 *(.initcallrootfs.init) \ 602 *(.initcall6.init) \ 603 *(.initcall6s.init) \ 604 *(.initcall7.init) \ 605 *(.initcall7s.init) 606 607#define INIT_CALLS \ 608 VMLINUX_SYMBOL(__initcall_start) = .; \ 609 INITCALLS \ 610 VMLINUX_SYMBOL(__initcall_end) = .; 611 612#define CON_INITCALL \ 613 VMLINUX_SYMBOL(__con_initcall_start) = .; \ 614 *(.con_initcall.init) \ 615 VMLINUX_SYMBOL(__con_initcall_end) = .; 616 617#define SECURITY_INITCALL \ 618 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 619 *(.security_initcall.init) \ 620 VMLINUX_SYMBOL(__security_initcall_end) = .; 621 622#ifdef CONFIG_BLK_DEV_INITRD 623#define INIT_RAM_FS \ 624 . = ALIGN(PAGE_SIZE); \ 625 VMLINUX_SYMBOL(__initramfs_start) = .; \ 626 *(.init.ramfs) \ 627 VMLINUX_SYMBOL(__initramfs_end) = .; 628#else 629#define INIT_RAM_FS 630#endif 631 632/** 633 * PERCPU_VADDR - define output section for percpu area 634 * @vaddr: explicit base address (optional) 635 * @phdr: destination PHDR (optional) 636 * 637 * Macro which expands to output section for percpu area. If @vaddr 638 * is not blank, it specifies explicit base address and all percpu 639 * symbols will be offset from the given address. If blank, @vaddr 640 * always equals @laddr + LOAD_OFFSET. 641 * 642 * @phdr defines the output PHDR to use if not blank. Be warned that 643 * output PHDR is sticky. If @phdr is specified, the next output 644 * section in the linker script will go there too. @phdr should have 645 * a leading colon. 646 * 647 * Note that this macros defines __per_cpu_load as an absolute symbol. 648 * If there is no need to put the percpu section at a predetermined 649 * address, use PERCPU(). 650 */ 651#define PERCPU_VADDR(vaddr, phdr) \ 652 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 653 .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 654 - LOAD_OFFSET) { \ 655 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 656 *(.data.percpu.first) \ 657 *(.data.percpu.page_aligned) \ 658 *(.data.percpu) \ 659 *(.data.percpu.shared_aligned) \ 660 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 661 } phdr \ 662 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu); 663 664/** 665 * PERCPU - define output section for percpu area, simple version 666 * @align: required alignment 667 * 668 * Align to @align and outputs output section for percpu area. This 669 * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and 670 * __per_cpu_start will be identical. 671 * 672 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except 673 * that __per_cpu_load is defined as a relative symbol against 674 * .data.percpu which is required for relocatable x86_32 675 * configuration. 676 */ 677#define PERCPU(align) \ 678 . = ALIGN(align); \ 679 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ 680 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 681 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 682 *(.data.percpu.first) \ 683 *(.data.percpu.page_aligned) \ 684 *(.data.percpu) \ 685 *(.data.percpu.shared_aligned) \ 686 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 687 } 688 689 690/* 691 * Definition of the high level *_SECTION macros 692 * They will fit only a subset of the architectures 693 */ 694 695 696/* 697 * Writeable data. 698 * All sections are combined in a single .data section. 699 * The sections following CONSTRUCTORS are arranged so their 700 * typical alignment matches. 701 * A cacheline is typical/always less than a PAGE_SIZE so 702 * the sections that has this restriction (or similar) 703 * is located before the ones requiring PAGE_SIZE alignment. 704 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 705 * matches the requirment of PAGE_ALIGNED_DATA. 706 * 707 * use 0 as page_align if page_aligned data is not used */ 708#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ 709 . = ALIGN(PAGE_SIZE); \ 710 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 711 INIT_TASK_DATA(inittask) \ 712 CACHELINE_ALIGNED_DATA(cacheline) \ 713 READ_MOSTLY_DATA(cacheline) \ 714 DATA_DATA \ 715 CONSTRUCTORS \ 716 NOSAVE_DATA \ 717 PAGE_ALIGNED_DATA(pagealigned) \ 718 } 719 720#define INIT_TEXT_SECTION(inittext_align) \ 721 . = ALIGN(inittext_align); \ 722 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 723 VMLINUX_SYMBOL(_sinittext) = .; \ 724 INIT_TEXT \ 725 VMLINUX_SYMBOL(_einittext) = .; \ 726 } 727 728#define INIT_DATA_SECTION(initsetup_align) \ 729 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 730 INIT_DATA \ 731 INIT_SETUP(initsetup_align) \ 732 INIT_CALLS \ 733 CON_INITCALL \ 734 SECURITY_INITCALL \ 735 INIT_RAM_FS \ 736 } 737 738#define BSS_SECTION(sbss_align, bss_align) \ 739 SBSS \ 740 BSS(bss_align) \ 741 . = ALIGN(4); 742