at v2.6.38-rc3 800 lines 24 kB view raw
1/* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requiriements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU(PAGE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA_SECTION(PAGE_SIZE) 27 * RW_DATA_SECTION(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * NOTES 32 * 33 * BSS_SECTION(0, 0, 0) 34 * _end = .; 35 * 36 * STABS_DEBUG 37 * DWARF_DEBUG 38 * 39 * DISCARDS // must be the last 40 * } 41 * 42 * [__init_begin, __init_end] is the init section that may be freed after init 43 * [_stext, _etext] is the text section 44 * [_sdata, _edata] is the data section 45 * 46 * Some of the included output section have their own set of constants. 47 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 48 * [__nosave_begin, __nosave_end] for the nosave data 49 */ 50 51#ifndef LOAD_OFFSET 52#define LOAD_OFFSET 0 53#endif 54 55#ifndef SYMBOL_PREFIX 56#define VMLINUX_SYMBOL(sym) sym 57#else 58#define PASTE2(x,y) x##y 59#define PASTE(x,y) PASTE2(x,y) 60#define VMLINUX_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym) 61#endif 62 63/* Align . to a 8 byte boundary equals to maximum function alignment. */ 64#define ALIGN_FUNCTION() . = ALIGN(8) 65 66/* 67 * Align to a 32 byte boundary equal to the 68 * alignment gcc 4.5 uses for a struct 69 */ 70#define STRUCT_ALIGNMENT 32 71#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 72 73/* The actual configuration determine if the init/exit sections 74 * are handled as text/data or they can be discarded (which 75 * often happens at runtime) 76 */ 77#ifdef CONFIG_HOTPLUG 78#define DEV_KEEP(sec) *(.dev##sec) 79#define DEV_DISCARD(sec) 80#else 81#define DEV_KEEP(sec) 82#define DEV_DISCARD(sec) *(.dev##sec) 83#endif 84 85#ifdef CONFIG_HOTPLUG_CPU 86#define CPU_KEEP(sec) *(.cpu##sec) 87#define CPU_DISCARD(sec) 88#else 89#define CPU_KEEP(sec) 90#define CPU_DISCARD(sec) *(.cpu##sec) 91#endif 92 93#if defined(CONFIG_MEMORY_HOTPLUG) 94#define MEM_KEEP(sec) *(.mem##sec) 95#define MEM_DISCARD(sec) 96#else 97#define MEM_KEEP(sec) 98#define MEM_DISCARD(sec) *(.mem##sec) 99#endif 100 101#ifdef CONFIG_FTRACE_MCOUNT_RECORD 102#define MCOUNT_REC() . = ALIGN(8); \ 103 VMLINUX_SYMBOL(__start_mcount_loc) = .; \ 104 *(__mcount_loc) \ 105 VMLINUX_SYMBOL(__stop_mcount_loc) = .; 106#else 107#define MCOUNT_REC() 108#endif 109 110#ifdef CONFIG_TRACE_BRANCH_PROFILING 111#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ 112 *(_ftrace_annotated_branch) \ 113 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; 114#else 115#define LIKELY_PROFILE() 116#endif 117 118#ifdef CONFIG_PROFILE_ALL_BRANCHES 119#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ 120 *(_ftrace_branch) \ 121 VMLINUX_SYMBOL(__stop_branch_profile) = .; 122#else 123#define BRANCH_PROFILE() 124#endif 125 126#ifdef CONFIG_EVENT_TRACING 127#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 128 *(_ftrace_events) \ 129 VMLINUX_SYMBOL(__stop_ftrace_events) = .; 130#else 131#define FTRACE_EVENTS() 132#endif 133 134#ifdef CONFIG_TRACING 135#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ 136 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ 137 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; 138#else 139#define TRACE_PRINTKS() 140#endif 141 142#ifdef CONFIG_FTRACE_SYSCALLS 143#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ 144 *(__syscalls_metadata) \ 145 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; 146#else 147#define TRACE_SYSCALLS() 148#endif 149 150 151#define KERNEL_DTB() \ 152 STRUCT_ALIGN(); \ 153 VMLINUX_SYMBOL(__dtb_start) = .; \ 154 *(.dtb.init.rodata) \ 155 VMLINUX_SYMBOL(__dtb_end) = .; 156 157/* .data section */ 158#define DATA_DATA \ 159 *(.data) \ 160 *(.ref.data) \ 161 *(.data..shared_aligned) /* percpu related */ \ 162 DEV_KEEP(init.data) \ 163 DEV_KEEP(exit.data) \ 164 CPU_KEEP(init.data) \ 165 CPU_KEEP(exit.data) \ 166 MEM_KEEP(init.data) \ 167 MEM_KEEP(exit.data) \ 168 . = ALIGN(32); \ 169 VMLINUX_SYMBOL(__start___tracepoints) = .; \ 170 *(__tracepoints) \ 171 VMLINUX_SYMBOL(__stop___tracepoints) = .; \ 172 /* implement dynamic printk debug */ \ 173 . = ALIGN(8); \ 174 VMLINUX_SYMBOL(__start___verbose) = .; \ 175 *(__verbose) \ 176 VMLINUX_SYMBOL(__stop___verbose) = .; \ 177 LIKELY_PROFILE() \ 178 BRANCH_PROFILE() \ 179 TRACE_PRINTKS() \ 180 \ 181 STRUCT_ALIGN(); \ 182 FTRACE_EVENTS() \ 183 \ 184 STRUCT_ALIGN(); \ 185 TRACE_SYSCALLS() 186 187/* 188 * Data section helpers 189 */ 190#define NOSAVE_DATA \ 191 . = ALIGN(PAGE_SIZE); \ 192 VMLINUX_SYMBOL(__nosave_begin) = .; \ 193 *(.data..nosave) \ 194 . = ALIGN(PAGE_SIZE); \ 195 VMLINUX_SYMBOL(__nosave_end) = .; 196 197#define PAGE_ALIGNED_DATA(page_align) \ 198 . = ALIGN(page_align); \ 199 *(.data..page_aligned) 200 201#define READ_MOSTLY_DATA(align) \ 202 . = ALIGN(align); \ 203 *(.data..read_mostly) \ 204 . = ALIGN(align); 205 206#define CACHELINE_ALIGNED_DATA(align) \ 207 . = ALIGN(align); \ 208 *(.data..cacheline_aligned) 209 210#define INIT_TASK_DATA(align) \ 211 . = ALIGN(align); \ 212 *(.data..init_task) 213 214/* 215 * Read only Data 216 */ 217#define RO_DATA_SECTION(align) \ 218 . = ALIGN((align)); \ 219 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 220 VMLINUX_SYMBOL(__start_rodata) = .; \ 221 *(.rodata) *(.rodata.*) \ 222 *(__vermagic) /* Kernel version magic */ \ 223 *(__markers_strings) /* Markers: strings */ \ 224 *(__tracepoints_strings)/* Tracepoints: strings */ \ 225 } \ 226 \ 227 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 228 *(.rodata1) \ 229 } \ 230 \ 231 BUG_TABLE \ 232 \ 233 JUMP_TABLE \ 234 \ 235 /* PCI quirks */ \ 236 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 237 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 238 *(.pci_fixup_early) \ 239 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ 240 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ 241 *(.pci_fixup_header) \ 242 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ 243 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ 244 *(.pci_fixup_final) \ 245 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ 246 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ 247 *(.pci_fixup_enable) \ 248 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ 249 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 250 *(.pci_fixup_resume) \ 251 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 252 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ 253 *(.pci_fixup_resume_early) \ 254 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ 255 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 256 *(.pci_fixup_suspend) \ 257 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 258 } \ 259 \ 260 /* Built-in firmware blobs */ \ 261 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 262 VMLINUX_SYMBOL(__start_builtin_fw) = .; \ 263 *(.builtin_fw) \ 264 VMLINUX_SYMBOL(__end_builtin_fw) = .; \ 265 } \ 266 \ 267 /* RapidIO route ops */ \ 268 .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \ 269 VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \ 270 *(.rio_switch_ops) \ 271 VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \ 272 } \ 273 \ 274 TRACEDATA \ 275 \ 276 /* Kernel symbol table: Normal symbols */ \ 277 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 278 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 279 *(__ksymtab) \ 280 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 281 } \ 282 \ 283 /* Kernel symbol table: GPL-only symbols */ \ 284 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 285 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 286 *(__ksymtab_gpl) \ 287 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 288 } \ 289 \ 290 /* Kernel symbol table: Normal unused symbols */ \ 291 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 292 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 293 *(__ksymtab_unused) \ 294 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 295 } \ 296 \ 297 /* Kernel symbol table: GPL-only unused symbols */ \ 298 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 299 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 300 *(__ksymtab_unused_gpl) \ 301 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 302 } \ 303 \ 304 /* Kernel symbol table: GPL-future-only symbols */ \ 305 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 306 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 307 *(__ksymtab_gpl_future) \ 308 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 309 } \ 310 \ 311 /* Kernel symbol table: Normal symbols */ \ 312 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 313 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 314 *(__kcrctab) \ 315 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 316 } \ 317 \ 318 /* Kernel symbol table: GPL-only symbols */ \ 319 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 320 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 321 *(__kcrctab_gpl) \ 322 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 323 } \ 324 \ 325 /* Kernel symbol table: Normal unused symbols */ \ 326 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 327 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 328 *(__kcrctab_unused) \ 329 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 330 } \ 331 \ 332 /* Kernel symbol table: GPL-only unused symbols */ \ 333 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 334 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 335 *(__kcrctab_unused_gpl) \ 336 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 337 } \ 338 \ 339 /* Kernel symbol table: GPL-future-only symbols */ \ 340 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 341 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 342 *(__kcrctab_gpl_future) \ 343 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 344 } \ 345 \ 346 /* Kernel symbol table: strings */ \ 347 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 348 *(__ksymtab_strings) \ 349 } \ 350 \ 351 /* __*init sections */ \ 352 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 353 *(.ref.rodata) \ 354 DEV_KEEP(init.rodata) \ 355 DEV_KEEP(exit.rodata) \ 356 CPU_KEEP(init.rodata) \ 357 CPU_KEEP(exit.rodata) \ 358 MEM_KEEP(init.rodata) \ 359 MEM_KEEP(exit.rodata) \ 360 } \ 361 \ 362 /* Built-in module parameters. */ \ 363 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 364 VMLINUX_SYMBOL(__start___param) = .; \ 365 *(__param) \ 366 VMLINUX_SYMBOL(__stop___param) = .; \ 367 } \ 368 \ 369 /* Built-in module versions. */ \ 370 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 371 VMLINUX_SYMBOL(__start___modver) = .; \ 372 *(__modver) \ 373 VMLINUX_SYMBOL(__stop___modver) = .; \ 374 . = ALIGN((align)); \ 375 VMLINUX_SYMBOL(__end_rodata) = .; \ 376 } \ 377 . = ALIGN((align)); 378 379/* RODATA & RO_DATA provided for backward compatibility. 380 * All archs are supposed to use RO_DATA() */ 381#define RODATA RO_DATA_SECTION(4096) 382#define RO_DATA(align) RO_DATA_SECTION(align) 383 384#define SECURITY_INIT \ 385 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 386 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 387 *(.security_initcall.init) \ 388 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 389 } 390 391/* .text section. Map to function alignment to avoid address changes 392 * during second ld run in second ld pass when generating System.map */ 393#define TEXT_TEXT \ 394 ALIGN_FUNCTION(); \ 395 *(.text.hot) \ 396 *(.text) \ 397 *(.ref.text) \ 398 DEV_KEEP(init.text) \ 399 DEV_KEEP(exit.text) \ 400 CPU_KEEP(init.text) \ 401 CPU_KEEP(exit.text) \ 402 MEM_KEEP(init.text) \ 403 MEM_KEEP(exit.text) \ 404 *(.text.unlikely) 405 406 407/* sched.text is aling to function alignment to secure we have same 408 * address even at second ld pass when generating System.map */ 409#define SCHED_TEXT \ 410 ALIGN_FUNCTION(); \ 411 VMLINUX_SYMBOL(__sched_text_start) = .; \ 412 *(.sched.text) \ 413 VMLINUX_SYMBOL(__sched_text_end) = .; 414 415/* spinlock.text is aling to function alignment to secure we have same 416 * address even at second ld pass when generating System.map */ 417#define LOCK_TEXT \ 418 ALIGN_FUNCTION(); \ 419 VMLINUX_SYMBOL(__lock_text_start) = .; \ 420 *(.spinlock.text) \ 421 VMLINUX_SYMBOL(__lock_text_end) = .; 422 423#define KPROBES_TEXT \ 424 ALIGN_FUNCTION(); \ 425 VMLINUX_SYMBOL(__kprobes_text_start) = .; \ 426 *(.kprobes.text) \ 427 VMLINUX_SYMBOL(__kprobes_text_end) = .; 428 429#ifdef CONFIG_FUNCTION_GRAPH_TRACER 430#define IRQENTRY_TEXT \ 431 ALIGN_FUNCTION(); \ 432 VMLINUX_SYMBOL(__irqentry_text_start) = .; \ 433 *(.irqentry.text) \ 434 VMLINUX_SYMBOL(__irqentry_text_end) = .; 435#else 436#define IRQENTRY_TEXT 437#endif 438 439/* Section used for early init (in .S files) */ 440#define HEAD_TEXT *(.head.text) 441 442#define HEAD_TEXT_SECTION \ 443 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 444 HEAD_TEXT \ 445 } 446 447/* 448 * Exception table 449 */ 450#define EXCEPTION_TABLE(align) \ 451 . = ALIGN(align); \ 452 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 453 VMLINUX_SYMBOL(__start___ex_table) = .; \ 454 *(__ex_table) \ 455 VMLINUX_SYMBOL(__stop___ex_table) = .; \ 456 } 457 458/* 459 * Init task 460 */ 461#define INIT_TASK_DATA_SECTION(align) \ 462 . = ALIGN(align); \ 463 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ 464 INIT_TASK_DATA(align) \ 465 } 466 467#ifdef CONFIG_CONSTRUCTORS 468#define KERNEL_CTORS() . = ALIGN(8); \ 469 VMLINUX_SYMBOL(__ctors_start) = .; \ 470 *(.ctors) \ 471 VMLINUX_SYMBOL(__ctors_end) = .; 472#else 473#define KERNEL_CTORS() 474#endif 475 476/* init and exit section handling */ 477#define INIT_DATA \ 478 *(.init.data) \ 479 DEV_DISCARD(init.data) \ 480 CPU_DISCARD(init.data) \ 481 MEM_DISCARD(init.data) \ 482 KERNEL_CTORS() \ 483 *(.init.rodata) \ 484 MCOUNT_REC() \ 485 DEV_DISCARD(init.rodata) \ 486 CPU_DISCARD(init.rodata) \ 487 MEM_DISCARD(init.rodata) \ 488 KERNEL_DTB() 489 490#define INIT_TEXT \ 491 *(.init.text) \ 492 DEV_DISCARD(init.text) \ 493 CPU_DISCARD(init.text) \ 494 MEM_DISCARD(init.text) 495 496#define EXIT_DATA \ 497 *(.exit.data) \ 498 DEV_DISCARD(exit.data) \ 499 DEV_DISCARD(exit.rodata) \ 500 CPU_DISCARD(exit.data) \ 501 CPU_DISCARD(exit.rodata) \ 502 MEM_DISCARD(exit.data) \ 503 MEM_DISCARD(exit.rodata) 504 505#define EXIT_TEXT \ 506 *(.exit.text) \ 507 DEV_DISCARD(exit.text) \ 508 CPU_DISCARD(exit.text) \ 509 MEM_DISCARD(exit.text) 510 511#define EXIT_CALL \ 512 *(.exitcall.exit) 513 514/* 515 * bss (Block Started by Symbol) - uninitialized data 516 * zeroed during startup 517 */ 518#define SBSS(sbss_align) \ 519 . = ALIGN(sbss_align); \ 520 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 521 *(.sbss) \ 522 *(.scommon) \ 523 } 524 525#define BSS(bss_align) \ 526 . = ALIGN(bss_align); \ 527 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 528 *(.bss..page_aligned) \ 529 *(.dynbss) \ 530 *(.bss) \ 531 *(COMMON) \ 532 } 533 534/* 535 * DWARF debug sections. 536 * Symbols in the DWARF debugging sections are relative to 537 * the beginning of the section so we begin them at 0. 538 */ 539#define DWARF_DEBUG \ 540 /* DWARF 1 */ \ 541 .debug 0 : { *(.debug) } \ 542 .line 0 : { *(.line) } \ 543 /* GNU DWARF 1 extensions */ \ 544 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 545 .debug_sfnames 0 : { *(.debug_sfnames) } \ 546 /* DWARF 1.1 and DWARF 2 */ \ 547 .debug_aranges 0 : { *(.debug_aranges) } \ 548 .debug_pubnames 0 : { *(.debug_pubnames) } \ 549 /* DWARF 2 */ \ 550 .debug_info 0 : { *(.debug_info \ 551 .gnu.linkonce.wi.*) } \ 552 .debug_abbrev 0 : { *(.debug_abbrev) } \ 553 .debug_line 0 : { *(.debug_line) } \ 554 .debug_frame 0 : { *(.debug_frame) } \ 555 .debug_str 0 : { *(.debug_str) } \ 556 .debug_loc 0 : { *(.debug_loc) } \ 557 .debug_macinfo 0 : { *(.debug_macinfo) } \ 558 /* SGI/MIPS DWARF 2 extensions */ \ 559 .debug_weaknames 0 : { *(.debug_weaknames) } \ 560 .debug_funcnames 0 : { *(.debug_funcnames) } \ 561 .debug_typenames 0 : { *(.debug_typenames) } \ 562 .debug_varnames 0 : { *(.debug_varnames) } \ 563 564 /* Stabs debugging sections. */ 565#define STABS_DEBUG \ 566 .stab 0 : { *(.stab) } \ 567 .stabstr 0 : { *(.stabstr) } \ 568 .stab.excl 0 : { *(.stab.excl) } \ 569 .stab.exclstr 0 : { *(.stab.exclstr) } \ 570 .stab.index 0 : { *(.stab.index) } \ 571 .stab.indexstr 0 : { *(.stab.indexstr) } \ 572 .comment 0 : { *(.comment) } 573 574#ifdef CONFIG_GENERIC_BUG 575#define BUG_TABLE \ 576 . = ALIGN(8); \ 577 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 578 VMLINUX_SYMBOL(__start___bug_table) = .; \ 579 *(__bug_table) \ 580 VMLINUX_SYMBOL(__stop___bug_table) = .; \ 581 } 582#else 583#define BUG_TABLE 584#endif 585 586#define JUMP_TABLE \ 587 . = ALIGN(8); \ 588 __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \ 589 VMLINUX_SYMBOL(__start___jump_table) = .; \ 590 *(__jump_table) \ 591 VMLINUX_SYMBOL(__stop___jump_table) = .; \ 592 } 593 594#ifdef CONFIG_PM_TRACE 595#define TRACEDATA \ 596 . = ALIGN(4); \ 597 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 598 VMLINUX_SYMBOL(__tracedata_start) = .; \ 599 *(.tracedata) \ 600 VMLINUX_SYMBOL(__tracedata_end) = .; \ 601 } 602#else 603#define TRACEDATA 604#endif 605 606#define NOTES \ 607 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 608 VMLINUX_SYMBOL(__start_notes) = .; \ 609 *(.note.*) \ 610 VMLINUX_SYMBOL(__stop_notes) = .; \ 611 } 612 613#define INIT_SETUP(initsetup_align) \ 614 . = ALIGN(initsetup_align); \ 615 VMLINUX_SYMBOL(__setup_start) = .; \ 616 *(.init.setup) \ 617 VMLINUX_SYMBOL(__setup_end) = .; 618 619#define INITCALLS \ 620 *(.initcallearly.init) \ 621 VMLINUX_SYMBOL(__early_initcall_end) = .; \ 622 *(.initcall0.init) \ 623 *(.initcall0s.init) \ 624 *(.initcall1.init) \ 625 *(.initcall1s.init) \ 626 *(.initcall2.init) \ 627 *(.initcall2s.init) \ 628 *(.initcall3.init) \ 629 *(.initcall3s.init) \ 630 *(.initcall4.init) \ 631 *(.initcall4s.init) \ 632 *(.initcall5.init) \ 633 *(.initcall5s.init) \ 634 *(.initcallrootfs.init) \ 635 *(.initcall6.init) \ 636 *(.initcall6s.init) \ 637 *(.initcall7.init) \ 638 *(.initcall7s.init) 639 640#define INIT_CALLS \ 641 VMLINUX_SYMBOL(__initcall_start) = .; \ 642 INITCALLS \ 643 VMLINUX_SYMBOL(__initcall_end) = .; 644 645#define CON_INITCALL \ 646 VMLINUX_SYMBOL(__con_initcall_start) = .; \ 647 *(.con_initcall.init) \ 648 VMLINUX_SYMBOL(__con_initcall_end) = .; 649 650#define SECURITY_INITCALL \ 651 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 652 *(.security_initcall.init) \ 653 VMLINUX_SYMBOL(__security_initcall_end) = .; 654 655#ifdef CONFIG_BLK_DEV_INITRD 656#define INIT_RAM_FS \ 657 . = ALIGN(4); \ 658 VMLINUX_SYMBOL(__initramfs_start) = .; \ 659 *(.init.ramfs) \ 660 . = ALIGN(8); \ 661 *(.init.ramfs.info) 662#else 663#define INIT_RAM_FS 664#endif 665 666/* 667 * Default discarded sections. 668 * 669 * Some archs want to discard exit text/data at runtime rather than 670 * link time due to cross-section references such as alt instructions, 671 * bug table, eh_frame, etc. DISCARDS must be the last of output 672 * section definitions so that such archs put those in earlier section 673 * definitions. 674 */ 675#define DISCARDS \ 676 /DISCARD/ : { \ 677 EXIT_TEXT \ 678 EXIT_DATA \ 679 EXIT_CALL \ 680 *(.discard) \ 681 *(.discard.*) \ 682 } 683 684/** 685 * PERCPU_VADDR - define output section for percpu area 686 * @vaddr: explicit base address (optional) 687 * @phdr: destination PHDR (optional) 688 * 689 * Macro which expands to output section for percpu area. If @vaddr 690 * is not blank, it specifies explicit base address and all percpu 691 * symbols will be offset from the given address. If blank, @vaddr 692 * always equals @laddr + LOAD_OFFSET. 693 * 694 * @phdr defines the output PHDR to use if not blank. Be warned that 695 * output PHDR is sticky. If @phdr is specified, the next output 696 * section in the linker script will go there too. @phdr should have 697 * a leading colon. 698 * 699 * Note that this macros defines __per_cpu_load as an absolute symbol. 700 * If there is no need to put the percpu section at a predetermined 701 * address, use PERCPU(). 702 */ 703#define PERCPU_VADDR(vaddr, phdr) \ 704 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 705 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 706 - LOAD_OFFSET) { \ 707 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 708 *(.data..percpu..first) \ 709 . = ALIGN(PAGE_SIZE); \ 710 *(.data..percpu..page_aligned) \ 711 *(.data..percpu..readmostly) \ 712 *(.data..percpu) \ 713 *(.data..percpu..shared_aligned) \ 714 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 715 } phdr \ 716 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); 717 718/** 719 * PERCPU - define output section for percpu area, simple version 720 * @align: required alignment 721 * 722 * Align to @align and outputs output section for percpu area. This 723 * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and 724 * __per_cpu_start will be identical. 725 * 726 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except 727 * that __per_cpu_load is defined as a relative symbol against 728 * .data..percpu which is required for relocatable x86_32 729 * configuration. 730 */ 731#define PERCPU(align) \ 732 . = ALIGN(align); \ 733 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 734 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 735 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 736 *(.data..percpu..first) \ 737 . = ALIGN(PAGE_SIZE); \ 738 *(.data..percpu..page_aligned) \ 739 *(.data..percpu..readmostly) \ 740 *(.data..percpu) \ 741 *(.data..percpu..shared_aligned) \ 742 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 743 } 744 745 746/* 747 * Definition of the high level *_SECTION macros 748 * They will fit only a subset of the architectures 749 */ 750 751 752/* 753 * Writeable data. 754 * All sections are combined in a single .data section. 755 * The sections following CONSTRUCTORS are arranged so their 756 * typical alignment matches. 757 * A cacheline is typical/always less than a PAGE_SIZE so 758 * the sections that has this restriction (or similar) 759 * is located before the ones requiring PAGE_SIZE alignment. 760 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 761 * matches the requirment of PAGE_ALIGNED_DATA. 762 * 763 * use 0 as page_align if page_aligned data is not used */ 764#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ 765 . = ALIGN(PAGE_SIZE); \ 766 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 767 INIT_TASK_DATA(inittask) \ 768 NOSAVE_DATA \ 769 PAGE_ALIGNED_DATA(pagealigned) \ 770 CACHELINE_ALIGNED_DATA(cacheline) \ 771 READ_MOSTLY_DATA(cacheline) \ 772 DATA_DATA \ 773 CONSTRUCTORS \ 774 } 775 776#define INIT_TEXT_SECTION(inittext_align) \ 777 . = ALIGN(inittext_align); \ 778 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 779 VMLINUX_SYMBOL(_sinittext) = .; \ 780 INIT_TEXT \ 781 VMLINUX_SYMBOL(_einittext) = .; \ 782 } 783 784#define INIT_DATA_SECTION(initsetup_align) \ 785 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 786 INIT_DATA \ 787 INIT_SETUP(initsetup_align) \ 788 INIT_CALLS \ 789 CON_INITCALL \ 790 SECURITY_INITCALL \ 791 INIT_RAM_FS \ 792 } 793 794#define BSS_SECTION(sbss_align, bss_align, stop_align) \ 795 . = ALIGN(sbss_align); \ 796 VMLINUX_SYMBOL(__bss_start) = .; \ 797 SBSS(sbss_align) \ 798 BSS(bss_align) \ 799 . = ALIGN(stop_align); \ 800 VMLINUX_SYMBOL(__bss_stop) = .;