at v5.6 1035 lines 30 kB view raw
1/* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requiriements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU_SECTION(CACHELINE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA(PAGE_SIZE) 27 * RW_DATA(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * 32 * BSS_SECTION(0, 0, 0) 33 * _end = .; 34 * 35 * STABS_DEBUG 36 * DWARF_DEBUG 37 * 38 * DISCARDS // must be the last 39 * } 40 * 41 * [__init_begin, __init_end] is the init section that may be freed after init 42 * // __init_begin and __init_end should be page aligned, so that we can 43 * // free the whole .init memory 44 * [_stext, _etext] is the text section 45 * [_sdata, _edata] is the data section 46 * 47 * Some of the included output section have their own set of constants. 48 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 49 * [__nosave_begin, __nosave_end] for the nosave data 50 */ 51 52#ifndef LOAD_OFFSET 53#define LOAD_OFFSET 0 54#endif 55 56/* 57 * Only some architectures want to have the .notes segment visible in 58 * a separate PT_NOTE ELF Program Header. When this happens, it needs 59 * to be visible in both the kernel text's PT_LOAD and the PT_NOTE 60 * Program Headers. In this case, though, the PT_LOAD needs to be made 61 * the default again so that all the following sections don't also end 62 * up in the PT_NOTE Program Header. 63 */ 64#ifdef EMITS_PT_NOTE 65#define NOTES_HEADERS :text :note 66#define NOTES_HEADERS_RESTORE __restore_ph : { *(.__restore_ph) } :text 67#else 68#define NOTES_HEADERS 69#define NOTES_HEADERS_RESTORE 70#endif 71 72/* 73 * Some architectures have non-executable read-only exception tables. 74 * They can be added to the RO_DATA segment by specifying their desired 75 * alignment. 76 */ 77#ifdef RO_EXCEPTION_TABLE_ALIGN 78#define RO_EXCEPTION_TABLE EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN) 79#else 80#define RO_EXCEPTION_TABLE 81#endif 82 83/* Align . to a 8 byte boundary equals to maximum function alignment. */ 84#define ALIGN_FUNCTION() . = ALIGN(8) 85 86/* 87 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which 88 * generates .data.identifier sections, which need to be pulled in with 89 * .data. We don't want to pull in .data..other sections, which Linux 90 * has defined. Same for text and bss. 91 * 92 * RODATA_MAIN is not used because existing code already defines .rodata.x 93 * sections to be brought in with rodata. 94 */ 95#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION 96#define TEXT_MAIN .text .text.[0-9a-zA-Z_]* 97#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* 98#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* 99#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* 100#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* 101#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* 102#else 103#define TEXT_MAIN .text 104#define DATA_MAIN .data 105#define SDATA_MAIN .sdata 106#define RODATA_MAIN .rodata 107#define BSS_MAIN .bss 108#define SBSS_MAIN .sbss 109#endif 110 111/* 112 * Align to a 32 byte boundary equal to the 113 * alignment gcc 4.5 uses for a struct 114 */ 115#define STRUCT_ALIGNMENT 32 116#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 117 118/* The actual configuration determine if the init/exit sections 119 * are handled as text/data or they can be discarded (which 120 * often happens at runtime) 121 */ 122#ifdef CONFIG_HOTPLUG_CPU 123#define CPU_KEEP(sec) *(.cpu##sec) 124#define CPU_DISCARD(sec) 125#else 126#define CPU_KEEP(sec) 127#define CPU_DISCARD(sec) *(.cpu##sec) 128#endif 129 130#if defined(CONFIG_MEMORY_HOTPLUG) 131#define MEM_KEEP(sec) *(.mem##sec) 132#define MEM_DISCARD(sec) 133#else 134#define MEM_KEEP(sec) 135#define MEM_DISCARD(sec) *(.mem##sec) 136#endif 137 138#ifdef CONFIG_FTRACE_MCOUNT_RECORD 139/* 140 * The ftrace call sites are logged to a section whose name depends on the 141 * compiler option used. A given kernel image will only use one, AKA 142 * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header 143 * dependencies for FTRACE_CALLSITE_SECTION's definition. 144 * 145 * Need to also make ftrace_stub_graph point to ftrace_stub 146 * so that the same stub location may have different protocols 147 * and not mess up with C verifiers. 148 */ 149#define MCOUNT_REC() . = ALIGN(8); \ 150 __start_mcount_loc = .; \ 151 KEEP(*(__mcount_loc)) \ 152 KEEP(*(__patchable_function_entries)) \ 153 __stop_mcount_loc = .; \ 154 ftrace_stub_graph = ftrace_stub; 155#else 156# ifdef CONFIG_FUNCTION_TRACER 157# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub; 158# else 159# define MCOUNT_REC() 160# endif 161#endif 162 163#ifdef CONFIG_TRACE_BRANCH_PROFILING 164#define LIKELY_PROFILE() __start_annotated_branch_profile = .; \ 165 KEEP(*(_ftrace_annotated_branch)) \ 166 __stop_annotated_branch_profile = .; 167#else 168#define LIKELY_PROFILE() 169#endif 170 171#ifdef CONFIG_PROFILE_ALL_BRANCHES 172#define BRANCH_PROFILE() __start_branch_profile = .; \ 173 KEEP(*(_ftrace_branch)) \ 174 __stop_branch_profile = .; 175#else 176#define BRANCH_PROFILE() 177#endif 178 179#ifdef CONFIG_KPROBES 180#define KPROBE_BLACKLIST() . = ALIGN(8); \ 181 __start_kprobe_blacklist = .; \ 182 KEEP(*(_kprobe_blacklist)) \ 183 __stop_kprobe_blacklist = .; 184#else 185#define KPROBE_BLACKLIST() 186#endif 187 188#ifdef CONFIG_FUNCTION_ERROR_INJECTION 189#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \ 190 __start_error_injection_whitelist = .; \ 191 KEEP(*(_error_injection_whitelist)) \ 192 __stop_error_injection_whitelist = .; 193#else 194#define ERROR_INJECT_WHITELIST() 195#endif 196 197#ifdef CONFIG_EVENT_TRACING 198#define FTRACE_EVENTS() . = ALIGN(8); \ 199 __start_ftrace_events = .; \ 200 KEEP(*(_ftrace_events)) \ 201 __stop_ftrace_events = .; \ 202 __start_ftrace_eval_maps = .; \ 203 KEEP(*(_ftrace_eval_map)) \ 204 __stop_ftrace_eval_maps = .; 205#else 206#define FTRACE_EVENTS() 207#endif 208 209#ifdef CONFIG_TRACING 210#define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \ 211 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ 212 __stop___trace_bprintk_fmt = .; 213#define TRACEPOINT_STR() __start___tracepoint_str = .; \ 214 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \ 215 __stop___tracepoint_str = .; 216#else 217#define TRACE_PRINTKS() 218#define TRACEPOINT_STR() 219#endif 220 221#ifdef CONFIG_FTRACE_SYSCALLS 222#define TRACE_SYSCALLS() . = ALIGN(8); \ 223 __start_syscalls_metadata = .; \ 224 KEEP(*(__syscalls_metadata)) \ 225 __stop_syscalls_metadata = .; 226#else 227#define TRACE_SYSCALLS() 228#endif 229 230#ifdef CONFIG_BPF_EVENTS 231#define BPF_RAW_TP() STRUCT_ALIGN(); \ 232 __start__bpf_raw_tp = .; \ 233 KEEP(*(__bpf_raw_tp_map)) \ 234 __stop__bpf_raw_tp = .; 235#else 236#define BPF_RAW_TP() 237#endif 238 239#ifdef CONFIG_SERIAL_EARLYCON 240#define EARLYCON_TABLE() . = ALIGN(8); \ 241 __earlycon_table = .; \ 242 KEEP(*(__earlycon_table)) \ 243 __earlycon_table_end = .; 244#else 245#define EARLYCON_TABLE() 246#endif 247 248#ifdef CONFIG_SECURITY 249#define LSM_TABLE() . = ALIGN(8); \ 250 __start_lsm_info = .; \ 251 KEEP(*(.lsm_info.init)) \ 252 __end_lsm_info = .; 253#define EARLY_LSM_TABLE() . = ALIGN(8); \ 254 __start_early_lsm_info = .; \ 255 KEEP(*(.early_lsm_info.init)) \ 256 __end_early_lsm_info = .; 257#else 258#define LSM_TABLE() 259#define EARLY_LSM_TABLE() 260#endif 261 262#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) 263#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) 264#define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) 265#define _OF_TABLE_0(name) 266#define _OF_TABLE_1(name) \ 267 . = ALIGN(8); \ 268 __##name##_of_table = .; \ 269 KEEP(*(__##name##_of_table)) \ 270 KEEP(*(__##name##_of_table_end)) 271 272#define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer) 273#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 274#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 275#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) 276#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) 277#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) 278 279#ifdef CONFIG_ACPI 280#define ACPI_PROBE_TABLE(name) \ 281 . = ALIGN(8); \ 282 __##name##_acpi_probe_table = .; \ 283 KEEP(*(__##name##_acpi_probe_table)) \ 284 __##name##_acpi_probe_table_end = .; 285#else 286#define ACPI_PROBE_TABLE(name) 287#endif 288 289#ifdef CONFIG_THERMAL 290#define THERMAL_TABLE(name) \ 291 . = ALIGN(8); \ 292 __##name##_thermal_table = .; \ 293 KEEP(*(__##name##_thermal_table)) \ 294 __##name##_thermal_table_end = .; 295#else 296#define THERMAL_TABLE(name) 297#endif 298 299#define KERNEL_DTB() \ 300 STRUCT_ALIGN(); \ 301 __dtb_start = .; \ 302 KEEP(*(.dtb.init.rodata)) \ 303 __dtb_end = .; 304 305/* 306 * .data section 307 */ 308#define DATA_DATA \ 309 *(.xiptext) \ 310 *(DATA_MAIN) \ 311 *(.ref.data) \ 312 *(.data..shared_aligned) /* percpu related */ \ 313 MEM_KEEP(init.data*) \ 314 MEM_KEEP(exit.data*) \ 315 *(.data.unlikely) \ 316 __start_once = .; \ 317 *(.data.once) \ 318 __end_once = .; \ 319 STRUCT_ALIGN(); \ 320 *(__tracepoints) \ 321 /* implement dynamic printk debug */ \ 322 . = ALIGN(8); \ 323 __start___verbose = .; \ 324 KEEP(*(__verbose)) \ 325 __stop___verbose = .; \ 326 LIKELY_PROFILE() \ 327 BRANCH_PROFILE() \ 328 TRACE_PRINTKS() \ 329 BPF_RAW_TP() \ 330 TRACEPOINT_STR() 331 332/* 333 * Data section helpers 334 */ 335#define NOSAVE_DATA \ 336 . = ALIGN(PAGE_SIZE); \ 337 __nosave_begin = .; \ 338 *(.data..nosave) \ 339 . = ALIGN(PAGE_SIZE); \ 340 __nosave_end = .; 341 342#define PAGE_ALIGNED_DATA(page_align) \ 343 . = ALIGN(page_align); \ 344 *(.data..page_aligned) 345 346#define READ_MOSTLY_DATA(align) \ 347 . = ALIGN(align); \ 348 *(.data..read_mostly) \ 349 . = ALIGN(align); 350 351#define CACHELINE_ALIGNED_DATA(align) \ 352 . = ALIGN(align); \ 353 *(.data..cacheline_aligned) 354 355#define INIT_TASK_DATA(align) \ 356 . = ALIGN(align); \ 357 __start_init_task = .; \ 358 init_thread_union = .; \ 359 init_stack = .; \ 360 KEEP(*(.data..init_task)) \ 361 KEEP(*(.data..init_thread_info)) \ 362 . = __start_init_task + THREAD_SIZE; \ 363 __end_init_task = .; 364 365#define JUMP_TABLE_DATA \ 366 . = ALIGN(8); \ 367 __start___jump_table = .; \ 368 KEEP(*(__jump_table)) \ 369 __stop___jump_table = .; 370 371/* 372 * Allow architectures to handle ro_after_init data on their 373 * own by defining an empty RO_AFTER_INIT_DATA. 374 */ 375#ifndef RO_AFTER_INIT_DATA 376#define RO_AFTER_INIT_DATA \ 377 __start_ro_after_init = .; \ 378 *(.data..ro_after_init) \ 379 JUMP_TABLE_DATA \ 380 __end_ro_after_init = .; 381#endif 382 383/* 384 * Read only Data 385 */ 386#define RO_DATA(align) \ 387 . = ALIGN((align)); \ 388 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 389 __start_rodata = .; \ 390 *(.rodata) *(.rodata.*) \ 391 RO_AFTER_INIT_DATA /* Read only after init */ \ 392 . = ALIGN(8); \ 393 __start___tracepoints_ptrs = .; \ 394 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ 395 __stop___tracepoints_ptrs = .; \ 396 *(__tracepoints_strings)/* Tracepoints: strings */ \ 397 } \ 398 \ 399 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 400 *(.rodata1) \ 401 } \ 402 \ 403 /* PCI quirks */ \ 404 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 405 __start_pci_fixups_early = .; \ 406 KEEP(*(.pci_fixup_early)) \ 407 __end_pci_fixups_early = .; \ 408 __start_pci_fixups_header = .; \ 409 KEEP(*(.pci_fixup_header)) \ 410 __end_pci_fixups_header = .; \ 411 __start_pci_fixups_final = .; \ 412 KEEP(*(.pci_fixup_final)) \ 413 __end_pci_fixups_final = .; \ 414 __start_pci_fixups_enable = .; \ 415 KEEP(*(.pci_fixup_enable)) \ 416 __end_pci_fixups_enable = .; \ 417 __start_pci_fixups_resume = .; \ 418 KEEP(*(.pci_fixup_resume)) \ 419 __end_pci_fixups_resume = .; \ 420 __start_pci_fixups_resume_early = .; \ 421 KEEP(*(.pci_fixup_resume_early)) \ 422 __end_pci_fixups_resume_early = .; \ 423 __start_pci_fixups_suspend = .; \ 424 KEEP(*(.pci_fixup_suspend)) \ 425 __end_pci_fixups_suspend = .; \ 426 __start_pci_fixups_suspend_late = .; \ 427 KEEP(*(.pci_fixup_suspend_late)) \ 428 __end_pci_fixups_suspend_late = .; \ 429 } \ 430 \ 431 /* Built-in firmware blobs */ \ 432 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 433 __start_builtin_fw = .; \ 434 KEEP(*(.builtin_fw)) \ 435 __end_builtin_fw = .; \ 436 } \ 437 \ 438 TRACEDATA \ 439 \ 440 /* Kernel symbol table: Normal symbols */ \ 441 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 442 __start___ksymtab = .; \ 443 KEEP(*(SORT(___ksymtab+*))) \ 444 __stop___ksymtab = .; \ 445 } \ 446 \ 447 /* Kernel symbol table: GPL-only symbols */ \ 448 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 449 __start___ksymtab_gpl = .; \ 450 KEEP(*(SORT(___ksymtab_gpl+*))) \ 451 __stop___ksymtab_gpl = .; \ 452 } \ 453 \ 454 /* Kernel symbol table: Normal unused symbols */ \ 455 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 456 __start___ksymtab_unused = .; \ 457 KEEP(*(SORT(___ksymtab_unused+*))) \ 458 __stop___ksymtab_unused = .; \ 459 } \ 460 \ 461 /* Kernel symbol table: GPL-only unused symbols */ \ 462 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 463 __start___ksymtab_unused_gpl = .; \ 464 KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ 465 __stop___ksymtab_unused_gpl = .; \ 466 } \ 467 \ 468 /* Kernel symbol table: GPL-future-only symbols */ \ 469 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 470 __start___ksymtab_gpl_future = .; \ 471 KEEP(*(SORT(___ksymtab_gpl_future+*))) \ 472 __stop___ksymtab_gpl_future = .; \ 473 } \ 474 \ 475 /* Kernel symbol table: Normal symbols */ \ 476 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 477 __start___kcrctab = .; \ 478 KEEP(*(SORT(___kcrctab+*))) \ 479 __stop___kcrctab = .; \ 480 } \ 481 \ 482 /* Kernel symbol table: GPL-only symbols */ \ 483 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 484 __start___kcrctab_gpl = .; \ 485 KEEP(*(SORT(___kcrctab_gpl+*))) \ 486 __stop___kcrctab_gpl = .; \ 487 } \ 488 \ 489 /* Kernel symbol table: Normal unused symbols */ \ 490 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 491 __start___kcrctab_unused = .; \ 492 KEEP(*(SORT(___kcrctab_unused+*))) \ 493 __stop___kcrctab_unused = .; \ 494 } \ 495 \ 496 /* Kernel symbol table: GPL-only unused symbols */ \ 497 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 498 __start___kcrctab_unused_gpl = .; \ 499 KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ 500 __stop___kcrctab_unused_gpl = .; \ 501 } \ 502 \ 503 /* Kernel symbol table: GPL-future-only symbols */ \ 504 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 505 __start___kcrctab_gpl_future = .; \ 506 KEEP(*(SORT(___kcrctab_gpl_future+*))) \ 507 __stop___kcrctab_gpl_future = .; \ 508 } \ 509 \ 510 /* Kernel symbol table: strings */ \ 511 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 512 *(__ksymtab_strings) \ 513 } \ 514 \ 515 /* __*init sections */ \ 516 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 517 *(.ref.rodata) \ 518 MEM_KEEP(init.rodata) \ 519 MEM_KEEP(exit.rodata) \ 520 } \ 521 \ 522 /* Built-in module parameters. */ \ 523 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 524 __start___param = .; \ 525 KEEP(*(__param)) \ 526 __stop___param = .; \ 527 } \ 528 \ 529 /* Built-in module versions. */ \ 530 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 531 __start___modver = .; \ 532 KEEP(*(__modver)) \ 533 __stop___modver = .; \ 534 } \ 535 \ 536 RO_EXCEPTION_TABLE \ 537 NOTES \ 538 \ 539 . = ALIGN((align)); \ 540 __end_rodata = .; 541 542/* 543 * .text section. Map to function alignment to avoid address changes 544 * during second ld run in second ld pass when generating System.map 545 * 546 * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead 547 * code elimination is enabled, so these sections should be converted 548 * to use ".." first. 549 */ 550#define TEXT_TEXT \ 551 ALIGN_FUNCTION(); \ 552 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ 553 *(.text..refcount) \ 554 *(.ref.text) \ 555 MEM_KEEP(init.text*) \ 556 MEM_KEEP(exit.text*) \ 557 558 559/* sched.text is aling to function alignment to secure we have same 560 * address even at second ld pass when generating System.map */ 561#define SCHED_TEXT \ 562 ALIGN_FUNCTION(); \ 563 __sched_text_start = .; \ 564 *(.sched.text) \ 565 __sched_text_end = .; 566 567/* spinlock.text is aling to function alignment to secure we have same 568 * address even at second ld pass when generating System.map */ 569#define LOCK_TEXT \ 570 ALIGN_FUNCTION(); \ 571 __lock_text_start = .; \ 572 *(.spinlock.text) \ 573 __lock_text_end = .; 574 575#define CPUIDLE_TEXT \ 576 ALIGN_FUNCTION(); \ 577 __cpuidle_text_start = .; \ 578 *(.cpuidle.text) \ 579 __cpuidle_text_end = .; 580 581#define KPROBES_TEXT \ 582 ALIGN_FUNCTION(); \ 583 __kprobes_text_start = .; \ 584 *(.kprobes.text) \ 585 __kprobes_text_end = .; 586 587#define ENTRY_TEXT \ 588 ALIGN_FUNCTION(); \ 589 __entry_text_start = .; \ 590 *(.entry.text) \ 591 __entry_text_end = .; 592 593#define IRQENTRY_TEXT \ 594 ALIGN_FUNCTION(); \ 595 __irqentry_text_start = .; \ 596 *(.irqentry.text) \ 597 __irqentry_text_end = .; 598 599#define SOFTIRQENTRY_TEXT \ 600 ALIGN_FUNCTION(); \ 601 __softirqentry_text_start = .; \ 602 *(.softirqentry.text) \ 603 __softirqentry_text_end = .; 604 605/* Section used for early init (in .S files) */ 606#define HEAD_TEXT KEEP(*(.head.text)) 607 608#define HEAD_TEXT_SECTION \ 609 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 610 HEAD_TEXT \ 611 } 612 613/* 614 * Exception table 615 */ 616#define EXCEPTION_TABLE(align) \ 617 . = ALIGN(align); \ 618 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 619 __start___ex_table = .; \ 620 KEEP(*(__ex_table)) \ 621 __stop___ex_table = .; \ 622 } 623 624/* 625 * Init task 626 */ 627#define INIT_TASK_DATA_SECTION(align) \ 628 . = ALIGN(align); \ 629 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ 630 INIT_TASK_DATA(align) \ 631 } 632 633#ifdef CONFIG_CONSTRUCTORS 634#define KERNEL_CTORS() . = ALIGN(8); \ 635 __ctors_start = .; \ 636 KEEP(*(.ctors)) \ 637 KEEP(*(SORT(.init_array.*))) \ 638 KEEP(*(.init_array)) \ 639 __ctors_end = .; 640#else 641#define KERNEL_CTORS() 642#endif 643 644/* init and exit section handling */ 645#define INIT_DATA \ 646 KEEP(*(SORT(___kentry+*))) \ 647 *(.init.data init.data.*) \ 648 MEM_DISCARD(init.data*) \ 649 KERNEL_CTORS() \ 650 MCOUNT_REC() \ 651 *(.init.rodata .init.rodata.*) \ 652 FTRACE_EVENTS() \ 653 TRACE_SYSCALLS() \ 654 KPROBE_BLACKLIST() \ 655 ERROR_INJECT_WHITELIST() \ 656 MEM_DISCARD(init.rodata) \ 657 CLK_OF_TABLES() \ 658 RESERVEDMEM_OF_TABLES() \ 659 TIMER_OF_TABLES() \ 660 CPU_METHOD_OF_TABLES() \ 661 CPUIDLE_METHOD_OF_TABLES() \ 662 KERNEL_DTB() \ 663 IRQCHIP_OF_MATCH_TABLE() \ 664 ACPI_PROBE_TABLE(irqchip) \ 665 ACPI_PROBE_TABLE(timer) \ 666 THERMAL_TABLE(governor) \ 667 EARLYCON_TABLE() \ 668 LSM_TABLE() \ 669 EARLY_LSM_TABLE() 670 671#define INIT_TEXT \ 672 *(.init.text .init.text.*) \ 673 *(.text.startup) \ 674 MEM_DISCARD(init.text*) 675 676#define EXIT_DATA \ 677 *(.exit.data .exit.data.*) \ 678 *(.fini_array .fini_array.*) \ 679 *(.dtors .dtors.*) \ 680 MEM_DISCARD(exit.data*) \ 681 MEM_DISCARD(exit.rodata*) 682 683#define EXIT_TEXT \ 684 *(.exit.text) \ 685 *(.text.exit) \ 686 MEM_DISCARD(exit.text) 687 688#define EXIT_CALL \ 689 *(.exitcall.exit) 690 691/* 692 * bss (Block Started by Symbol) - uninitialized data 693 * zeroed during startup 694 */ 695#define SBSS(sbss_align) \ 696 . = ALIGN(sbss_align); \ 697 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 698 *(.dynsbss) \ 699 *(SBSS_MAIN) \ 700 *(.scommon) \ 701 } 702 703/* 704 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra 705 * sections to the front of bss. 706 */ 707#ifndef BSS_FIRST_SECTIONS 708#define BSS_FIRST_SECTIONS 709#endif 710 711#define BSS(bss_align) \ 712 . = ALIGN(bss_align); \ 713 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 714 BSS_FIRST_SECTIONS \ 715 *(.bss..page_aligned) \ 716 *(.dynbss) \ 717 *(BSS_MAIN) \ 718 *(COMMON) \ 719 } 720 721/* 722 * DWARF debug sections. 723 * Symbols in the DWARF debugging sections are relative to 724 * the beginning of the section so we begin them at 0. 725 */ 726#define DWARF_DEBUG \ 727 /* DWARF 1 */ \ 728 .debug 0 : { *(.debug) } \ 729 .line 0 : { *(.line) } \ 730 /* GNU DWARF 1 extensions */ \ 731 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 732 .debug_sfnames 0 : { *(.debug_sfnames) } \ 733 /* DWARF 1.1 and DWARF 2 */ \ 734 .debug_aranges 0 : { *(.debug_aranges) } \ 735 .debug_pubnames 0 : { *(.debug_pubnames) } \ 736 /* DWARF 2 */ \ 737 .debug_info 0 : { *(.debug_info \ 738 .gnu.linkonce.wi.*) } \ 739 .debug_abbrev 0 : { *(.debug_abbrev) } \ 740 .debug_line 0 : { *(.debug_line) } \ 741 .debug_frame 0 : { *(.debug_frame) } \ 742 .debug_str 0 : { *(.debug_str) } \ 743 .debug_loc 0 : { *(.debug_loc) } \ 744 .debug_macinfo 0 : { *(.debug_macinfo) } \ 745 .debug_pubtypes 0 : { *(.debug_pubtypes) } \ 746 /* DWARF 3 */ \ 747 .debug_ranges 0 : { *(.debug_ranges) } \ 748 /* SGI/MIPS DWARF 2 extensions */ \ 749 .debug_weaknames 0 : { *(.debug_weaknames) } \ 750 .debug_funcnames 0 : { *(.debug_funcnames) } \ 751 .debug_typenames 0 : { *(.debug_typenames) } \ 752 .debug_varnames 0 : { *(.debug_varnames) } \ 753 /* GNU DWARF 2 extensions */ \ 754 .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \ 755 .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \ 756 /* DWARF 4 */ \ 757 .debug_types 0 : { *(.debug_types) } \ 758 /* DWARF 5 */ \ 759 .debug_macro 0 : { *(.debug_macro) } \ 760 .debug_addr 0 : { *(.debug_addr) } 761 762 /* Stabs debugging sections. */ 763#define STABS_DEBUG \ 764 .stab 0 : { *(.stab) } \ 765 .stabstr 0 : { *(.stabstr) } \ 766 .stab.excl 0 : { *(.stab.excl) } \ 767 .stab.exclstr 0 : { *(.stab.exclstr) } \ 768 .stab.index 0 : { *(.stab.index) } \ 769 .stab.indexstr 0 : { *(.stab.indexstr) } \ 770 .comment 0 : { *(.comment) } 771 772#ifdef CONFIG_GENERIC_BUG 773#define BUG_TABLE \ 774 . = ALIGN(8); \ 775 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 776 __start___bug_table = .; \ 777 KEEP(*(__bug_table)) \ 778 __stop___bug_table = .; \ 779 } 780#else 781#define BUG_TABLE 782#endif 783 784#ifdef CONFIG_UNWINDER_ORC 785#define ORC_UNWIND_TABLE \ 786 . = ALIGN(4); \ 787 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ 788 __start_orc_unwind_ip = .; \ 789 KEEP(*(.orc_unwind_ip)) \ 790 __stop_orc_unwind_ip = .; \ 791 } \ 792 . = ALIGN(2); \ 793 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ 794 __start_orc_unwind = .; \ 795 KEEP(*(.orc_unwind)) \ 796 __stop_orc_unwind = .; \ 797 } \ 798 . = ALIGN(4); \ 799 .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \ 800 orc_lookup = .; \ 801 . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \ 802 LOOKUP_BLOCK_SIZE) + 1) * 4; \ 803 orc_lookup_end = .; \ 804 } 805#else 806#define ORC_UNWIND_TABLE 807#endif 808 809#ifdef CONFIG_PM_TRACE 810#define TRACEDATA \ 811 . = ALIGN(4); \ 812 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 813 __tracedata_start = .; \ 814 KEEP(*(.tracedata)) \ 815 __tracedata_end = .; \ 816 } 817#else 818#define TRACEDATA 819#endif 820 821#define NOTES \ 822 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 823 __start_notes = .; \ 824 KEEP(*(.note.*)) \ 825 __stop_notes = .; \ 826 } NOTES_HEADERS \ 827 NOTES_HEADERS_RESTORE 828 829#define INIT_SETUP(initsetup_align) \ 830 . = ALIGN(initsetup_align); \ 831 __setup_start = .; \ 832 KEEP(*(.init.setup)) \ 833 __setup_end = .; 834 835#define INIT_CALLS_LEVEL(level) \ 836 __initcall##level##_start = .; \ 837 KEEP(*(.initcall##level##.init)) \ 838 KEEP(*(.initcall##level##s.init)) \ 839 840#define INIT_CALLS \ 841 __initcall_start = .; \ 842 KEEP(*(.initcallearly.init)) \ 843 INIT_CALLS_LEVEL(0) \ 844 INIT_CALLS_LEVEL(1) \ 845 INIT_CALLS_LEVEL(2) \ 846 INIT_CALLS_LEVEL(3) \ 847 INIT_CALLS_LEVEL(4) \ 848 INIT_CALLS_LEVEL(5) \ 849 INIT_CALLS_LEVEL(rootfs) \ 850 INIT_CALLS_LEVEL(6) \ 851 INIT_CALLS_LEVEL(7) \ 852 __initcall_end = .; 853 854#define CON_INITCALL \ 855 __con_initcall_start = .; \ 856 KEEP(*(.con_initcall.init)) \ 857 __con_initcall_end = .; 858 859#ifdef CONFIG_BLK_DEV_INITRD 860#define INIT_RAM_FS \ 861 . = ALIGN(4); \ 862 __initramfs_start = .; \ 863 KEEP(*(.init.ramfs)) \ 864 . = ALIGN(8); \ 865 KEEP(*(.init.ramfs.info)) 866#else 867#define INIT_RAM_FS 868#endif 869 870/* 871 * Memory encryption operates on a page basis. Since we need to clear 872 * the memory encryption mask for this section, it needs to be aligned 873 * on a page boundary and be a page-size multiple in length. 874 * 875 * Note: We use a separate section so that only this section gets 876 * decrypted to avoid exposing more than we wish. 877 */ 878#ifdef CONFIG_AMD_MEM_ENCRYPT 879#define PERCPU_DECRYPTED_SECTION \ 880 . = ALIGN(PAGE_SIZE); \ 881 *(.data..percpu..decrypted) \ 882 . = ALIGN(PAGE_SIZE); 883#else 884#define PERCPU_DECRYPTED_SECTION 885#endif 886 887 888/* 889 * Default discarded sections. 890 * 891 * Some archs want to discard exit text/data at runtime rather than 892 * link time due to cross-section references such as alt instructions, 893 * bug table, eh_frame, etc. DISCARDS must be the last of output 894 * section definitions so that such archs put those in earlier section 895 * definitions. 896 */ 897#define DISCARDS \ 898 /DISCARD/ : { \ 899 EXIT_TEXT \ 900 EXIT_DATA \ 901 EXIT_CALL \ 902 *(.discard) \ 903 *(.discard.*) \ 904 *(.modinfo) \ 905 } 906 907/** 908 * PERCPU_INPUT - the percpu input sections 909 * @cacheline: cacheline size 910 * 911 * The core percpu section names and core symbols which do not rely 912 * directly upon load addresses. 913 * 914 * @cacheline is used to align subsections to avoid false cacheline 915 * sharing between subsections for different purposes. 916 */ 917#define PERCPU_INPUT(cacheline) \ 918 __per_cpu_start = .; \ 919 *(.data..percpu..first) \ 920 . = ALIGN(PAGE_SIZE); \ 921 *(.data..percpu..page_aligned) \ 922 . = ALIGN(cacheline); \ 923 *(.data..percpu..read_mostly) \ 924 . = ALIGN(cacheline); \ 925 *(.data..percpu) \ 926 *(.data..percpu..shared_aligned) \ 927 PERCPU_DECRYPTED_SECTION \ 928 __per_cpu_end = .; 929 930/** 931 * PERCPU_VADDR - define output section for percpu area 932 * @cacheline: cacheline size 933 * @vaddr: explicit base address (optional) 934 * @phdr: destination PHDR (optional) 935 * 936 * Macro which expands to output section for percpu area. 937 * 938 * @cacheline is used to align subsections to avoid false cacheline 939 * sharing between subsections for different purposes. 940 * 941 * If @vaddr is not blank, it specifies explicit base address and all 942 * percpu symbols will be offset from the given address. If blank, 943 * @vaddr always equals @laddr + LOAD_OFFSET. 944 * 945 * @phdr defines the output PHDR to use if not blank. Be warned that 946 * output PHDR is sticky. If @phdr is specified, the next output 947 * section in the linker script will go there too. @phdr should have 948 * a leading colon. 949 * 950 * Note that this macros defines __per_cpu_load as an absolute symbol. 951 * If there is no need to put the percpu section at a predetermined 952 * address, use PERCPU_SECTION. 953 */ 954#define PERCPU_VADDR(cacheline, vaddr, phdr) \ 955 __per_cpu_load = .; \ 956 .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \ 957 PERCPU_INPUT(cacheline) \ 958 } phdr \ 959 . = __per_cpu_load + SIZEOF(.data..percpu); 960 961/** 962 * PERCPU_SECTION - define output section for percpu area, simple version 963 * @cacheline: cacheline size 964 * 965 * Align to PAGE_SIZE and outputs output section for percpu area. This 966 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and 967 * __per_cpu_start will be identical. 968 * 969 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) 970 * except that __per_cpu_load is defined as a relative symbol against 971 * .data..percpu which is required for relocatable x86_32 configuration. 972 */ 973#define PERCPU_SECTION(cacheline) \ 974 . = ALIGN(PAGE_SIZE); \ 975 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 976 __per_cpu_load = .; \ 977 PERCPU_INPUT(cacheline) \ 978 } 979 980 981/* 982 * Definition of the high level *_SECTION macros 983 * They will fit only a subset of the architectures 984 */ 985 986 987/* 988 * Writeable data. 989 * All sections are combined in a single .data section. 990 * The sections following CONSTRUCTORS are arranged so their 991 * typical alignment matches. 992 * A cacheline is typical/always less than a PAGE_SIZE so 993 * the sections that has this restriction (or similar) 994 * is located before the ones requiring PAGE_SIZE alignment. 995 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 996 * matches the requirement of PAGE_ALIGNED_DATA. 997 * 998 * use 0 as page_align if page_aligned data is not used */ 999#define RW_DATA(cacheline, pagealigned, inittask) \ 1000 . = ALIGN(PAGE_SIZE); \ 1001 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 1002 INIT_TASK_DATA(inittask) \ 1003 NOSAVE_DATA \ 1004 PAGE_ALIGNED_DATA(pagealigned) \ 1005 CACHELINE_ALIGNED_DATA(cacheline) \ 1006 READ_MOSTLY_DATA(cacheline) \ 1007 DATA_DATA \ 1008 CONSTRUCTORS \ 1009 } \ 1010 BUG_TABLE \ 1011 1012#define INIT_TEXT_SECTION(inittext_align) \ 1013 . = ALIGN(inittext_align); \ 1014 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 1015 _sinittext = .; \ 1016 INIT_TEXT \ 1017 _einittext = .; \ 1018 } 1019 1020#define INIT_DATA_SECTION(initsetup_align) \ 1021 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 1022 INIT_DATA \ 1023 INIT_SETUP(initsetup_align) \ 1024 INIT_CALLS \ 1025 CON_INITCALL \ 1026 INIT_RAM_FS \ 1027 } 1028 1029#define BSS_SECTION(sbss_align, bss_align, stop_align) \ 1030 . = ALIGN(sbss_align); \ 1031 __bss_start = .; \ 1032 SBSS(sbss_align) \ 1033 BSS(bss_align) \ 1034 . = ALIGN(stop_align); \ 1035 __bss_stop = .;