at v4.16 964 lines 30 kB view raw
1/* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requiriements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU_SECTION(CACHELINE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA_SECTION(PAGE_SIZE) 27 * RW_DATA_SECTION(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * NOTES 32 * 33 * BSS_SECTION(0, 0, 0) 34 * _end = .; 35 * 36 * STABS_DEBUG 37 * DWARF_DEBUG 38 * 39 * DISCARDS // must be the last 40 * } 41 * 42 * [__init_begin, __init_end] is the init section that may be freed after init 43 * // __init_begin and __init_end should be page aligned, so that we can 44 * // free the whole .init memory 45 * [_stext, _etext] is the text section 46 * [_sdata, _edata] is the data section 47 * 48 * Some of the included output section have their own set of constants. 49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 50 * [__nosave_begin, __nosave_end] for the nosave data 51 */ 52 53#ifndef LOAD_OFFSET 54#define LOAD_OFFSET 0 55#endif 56 57#include <linux/export.h> 58 59/* Align . to a 8 byte boundary equals to maximum function alignment. */ 60#define ALIGN_FUNCTION() . = ALIGN(8) 61 62/* 63 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which 64 * generates .data.identifier sections, which need to be pulled in with 65 * .data. We don't want to pull in .data..other sections, which Linux 66 * has defined. Same for text and bss. 67 */ 68#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION 69#define TEXT_MAIN .text .text.[0-9a-zA-Z_]* 70#define DATA_MAIN .data .data.[0-9a-zA-Z_]* 71#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* 72#else 73#define TEXT_MAIN .text 74#define DATA_MAIN .data 75#define BSS_MAIN .bss 76#endif 77 78/* 79 * Align to a 32 byte boundary equal to the 80 * alignment gcc 4.5 uses for a struct 81 */ 82#define STRUCT_ALIGNMENT 32 83#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 84 85/* The actual configuration determine if the init/exit sections 86 * are handled as text/data or they can be discarded (which 87 * often happens at runtime) 88 */ 89#ifdef CONFIG_HOTPLUG_CPU 90#define CPU_KEEP(sec) *(.cpu##sec) 91#define CPU_DISCARD(sec) 92#else 93#define CPU_KEEP(sec) 94#define CPU_DISCARD(sec) *(.cpu##sec) 95#endif 96 97#if defined(CONFIG_MEMORY_HOTPLUG) 98#define MEM_KEEP(sec) *(.mem##sec) 99#define MEM_DISCARD(sec) 100#else 101#define MEM_KEEP(sec) 102#define MEM_DISCARD(sec) *(.mem##sec) 103#endif 104 105#ifdef CONFIG_FTRACE_MCOUNT_RECORD 106#define MCOUNT_REC() . = ALIGN(8); \ 107 VMLINUX_SYMBOL(__start_mcount_loc) = .; \ 108 *(__mcount_loc) \ 109 VMLINUX_SYMBOL(__stop_mcount_loc) = .; 110#else 111#define MCOUNT_REC() 112#endif 113 114#ifdef CONFIG_TRACE_BRANCH_PROFILING 115#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ 116 *(_ftrace_annotated_branch) \ 117 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; 118#else 119#define LIKELY_PROFILE() 120#endif 121 122#ifdef CONFIG_PROFILE_ALL_BRANCHES 123#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ 124 *(_ftrace_branch) \ 125 VMLINUX_SYMBOL(__stop_branch_profile) = .; 126#else 127#define BRANCH_PROFILE() 128#endif 129 130#ifdef CONFIG_KPROBES 131#define KPROBE_BLACKLIST() . = ALIGN(8); \ 132 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ 133 KEEP(*(_kprobe_blacklist)) \ 134 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .; 135#else 136#define KPROBE_BLACKLIST() 137#endif 138 139#ifdef CONFIG_FUNCTION_ERROR_INJECTION 140#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \ 141 VMLINUX_SYMBOL(__start_error_injection_whitelist) = .;\ 142 KEEP(*(_error_injection_whitelist)) \ 143 VMLINUX_SYMBOL(__stop_error_injection_whitelist) = .; 144#else 145#define ERROR_INJECT_WHITELIST() 146#endif 147 148#ifdef CONFIG_EVENT_TRACING 149#define FTRACE_EVENTS() . = ALIGN(8); \ 150 VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 151 KEEP(*(_ftrace_events)) \ 152 VMLINUX_SYMBOL(__stop_ftrace_events) = .; \ 153 VMLINUX_SYMBOL(__start_ftrace_eval_maps) = .; \ 154 KEEP(*(_ftrace_eval_map)) \ 155 VMLINUX_SYMBOL(__stop_ftrace_eval_maps) = .; 156#else 157#define FTRACE_EVENTS() 158#endif 159 160#ifdef CONFIG_TRACING 161#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ 162 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ 163 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; 164#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \ 165 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \ 166 VMLINUX_SYMBOL(__stop___tracepoint_str) = .; 167#else 168#define TRACE_PRINTKS() 169#define TRACEPOINT_STR() 170#endif 171 172#ifdef CONFIG_FTRACE_SYSCALLS 173#define TRACE_SYSCALLS() . = ALIGN(8); \ 174 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ 175 KEEP(*(__syscalls_metadata)) \ 176 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; 177#else 178#define TRACE_SYSCALLS() 179#endif 180 181#ifdef CONFIG_SERIAL_EARLYCON 182#define EARLYCON_TABLE() STRUCT_ALIGN(); \ 183 VMLINUX_SYMBOL(__earlycon_table) = .; \ 184 KEEP(*(__earlycon_table)) \ 185 VMLINUX_SYMBOL(__earlycon_table_end) = .; 186#else 187#define EARLYCON_TABLE() 188#endif 189 190#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) 191#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) 192#define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) 193#define _OF_TABLE_0(name) 194#define _OF_TABLE_1(name) \ 195 . = ALIGN(8); \ 196 VMLINUX_SYMBOL(__##name##_of_table) = .; \ 197 KEEP(*(__##name##_of_table)) \ 198 KEEP(*(__##name##_of_table_end)) 199 200#define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer) 201#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 202#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 203#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) 204#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) 205#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) 206#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) 207 208#ifdef CONFIG_ACPI 209#define ACPI_PROBE_TABLE(name) \ 210 . = ALIGN(8); \ 211 VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \ 212 KEEP(*(__##name##_acpi_probe_table)) \ 213 VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .; 214#else 215#define ACPI_PROBE_TABLE(name) 216#endif 217 218#define KERNEL_DTB() \ 219 STRUCT_ALIGN(); \ 220 VMLINUX_SYMBOL(__dtb_start) = .; \ 221 KEEP(*(.dtb.init.rodata)) \ 222 VMLINUX_SYMBOL(__dtb_end) = .; 223 224/* 225 * .data section 226 */ 227#define DATA_DATA \ 228 *(.xiptext) \ 229 *(DATA_MAIN) \ 230 *(.ref.data) \ 231 *(.data..shared_aligned) /* percpu related */ \ 232 MEM_KEEP(init.data) \ 233 MEM_KEEP(exit.data) \ 234 *(.data.unlikely) \ 235 VMLINUX_SYMBOL(__start_once) = .; \ 236 *(.data.once) \ 237 VMLINUX_SYMBOL(__end_once) = .; \ 238 STRUCT_ALIGN(); \ 239 *(__tracepoints) \ 240 /* implement dynamic printk debug */ \ 241 . = ALIGN(8); \ 242 VMLINUX_SYMBOL(__start___jump_table) = .; \ 243 KEEP(*(__jump_table)) \ 244 VMLINUX_SYMBOL(__stop___jump_table) = .; \ 245 . = ALIGN(8); \ 246 VMLINUX_SYMBOL(__start___verbose) = .; \ 247 KEEP(*(__verbose)) \ 248 VMLINUX_SYMBOL(__stop___verbose) = .; \ 249 LIKELY_PROFILE() \ 250 BRANCH_PROFILE() \ 251 TRACE_PRINTKS() \ 252 TRACEPOINT_STR() 253 254/* 255 * Data section helpers 256 */ 257#define NOSAVE_DATA \ 258 . = ALIGN(PAGE_SIZE); \ 259 VMLINUX_SYMBOL(__nosave_begin) = .; \ 260 *(.data..nosave) \ 261 . = ALIGN(PAGE_SIZE); \ 262 VMLINUX_SYMBOL(__nosave_end) = .; 263 264#define PAGE_ALIGNED_DATA(page_align) \ 265 . = ALIGN(page_align); \ 266 *(.data..page_aligned) 267 268#define READ_MOSTLY_DATA(align) \ 269 . = ALIGN(align); \ 270 *(.data..read_mostly) \ 271 . = ALIGN(align); 272 273#define CACHELINE_ALIGNED_DATA(align) \ 274 . = ALIGN(align); \ 275 *(.data..cacheline_aligned) 276 277#define INIT_TASK_DATA(align) \ 278 . = ALIGN(align); \ 279 VMLINUX_SYMBOL(__start_init_task) = .; \ 280 VMLINUX_SYMBOL(init_thread_union) = .; \ 281 VMLINUX_SYMBOL(init_stack) = .; \ 282 *(.data..init_task) \ 283 *(.data..init_thread_info) \ 284 . = VMLINUX_SYMBOL(__start_init_task) + THREAD_SIZE; \ 285 VMLINUX_SYMBOL(__end_init_task) = .; 286 287/* 288 * Allow architectures to handle ro_after_init data on their 289 * own by defining an empty RO_AFTER_INIT_DATA. 290 */ 291#ifndef RO_AFTER_INIT_DATA 292#define RO_AFTER_INIT_DATA \ 293 VMLINUX_SYMBOL(__start_ro_after_init) = .; \ 294 *(.data..ro_after_init) \ 295 VMLINUX_SYMBOL(__end_ro_after_init) = .; 296#endif 297 298/* 299 * Read only Data 300 */ 301#define RO_DATA_SECTION(align) \ 302 . = ALIGN((align)); \ 303 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 304 VMLINUX_SYMBOL(__start_rodata) = .; \ 305 *(.rodata) *(.rodata.*) \ 306 RO_AFTER_INIT_DATA /* Read only after init */ \ 307 KEEP(*(__vermagic)) /* Kernel version magic */ \ 308 . = ALIGN(8); \ 309 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ 310 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ 311 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \ 312 *(__tracepoints_strings)/* Tracepoints: strings */ \ 313 } \ 314 \ 315 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 316 *(.rodata1) \ 317 } \ 318 \ 319 /* PCI quirks */ \ 320 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 321 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 322 KEEP(*(.pci_fixup_early)) \ 323 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ 324 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ 325 KEEP(*(.pci_fixup_header)) \ 326 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ 327 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ 328 KEEP(*(.pci_fixup_final)) \ 329 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ 330 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ 331 KEEP(*(.pci_fixup_enable)) \ 332 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ 333 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 334 KEEP(*(.pci_fixup_resume)) \ 335 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 336 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ 337 KEEP(*(.pci_fixup_resume_early)) \ 338 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ 339 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 340 KEEP(*(.pci_fixup_suspend)) \ 341 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 342 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \ 343 KEEP(*(.pci_fixup_suspend_late)) \ 344 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \ 345 } \ 346 \ 347 /* Built-in firmware blobs */ \ 348 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 349 VMLINUX_SYMBOL(__start_builtin_fw) = .; \ 350 KEEP(*(.builtin_fw)) \ 351 VMLINUX_SYMBOL(__end_builtin_fw) = .; \ 352 } \ 353 \ 354 TRACEDATA \ 355 \ 356 /* Kernel symbol table: Normal symbols */ \ 357 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 358 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 359 KEEP(*(SORT(___ksymtab+*))) \ 360 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 361 } \ 362 \ 363 /* Kernel symbol table: GPL-only symbols */ \ 364 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 365 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 366 KEEP(*(SORT(___ksymtab_gpl+*))) \ 367 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 368 } \ 369 \ 370 /* Kernel symbol table: Normal unused symbols */ \ 371 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 372 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 373 KEEP(*(SORT(___ksymtab_unused+*))) \ 374 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 375 } \ 376 \ 377 /* Kernel symbol table: GPL-only unused symbols */ \ 378 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 379 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 380 KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ 381 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 382 } \ 383 \ 384 /* Kernel symbol table: GPL-future-only symbols */ \ 385 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 386 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 387 KEEP(*(SORT(___ksymtab_gpl_future+*))) \ 388 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 389 } \ 390 \ 391 /* Kernel symbol table: Normal symbols */ \ 392 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 393 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 394 KEEP(*(SORT(___kcrctab+*))) \ 395 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 396 } \ 397 \ 398 /* Kernel symbol table: GPL-only symbols */ \ 399 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 400 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 401 KEEP(*(SORT(___kcrctab_gpl+*))) \ 402 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 403 } \ 404 \ 405 /* Kernel symbol table: Normal unused symbols */ \ 406 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 407 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 408 KEEP(*(SORT(___kcrctab_unused+*))) \ 409 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 410 } \ 411 \ 412 /* Kernel symbol table: GPL-only unused symbols */ \ 413 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 414 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 415 KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ 416 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 417 } \ 418 \ 419 /* Kernel symbol table: GPL-future-only symbols */ \ 420 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 421 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 422 KEEP(*(SORT(___kcrctab_gpl_future+*))) \ 423 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 424 } \ 425 \ 426 /* Kernel symbol table: strings */ \ 427 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 428 *(__ksymtab_strings) \ 429 } \ 430 \ 431 /* __*init sections */ \ 432 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 433 *(.ref.rodata) \ 434 MEM_KEEP(init.rodata) \ 435 MEM_KEEP(exit.rodata) \ 436 } \ 437 \ 438 /* Built-in module parameters. */ \ 439 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 440 VMLINUX_SYMBOL(__start___param) = .; \ 441 KEEP(*(__param)) \ 442 VMLINUX_SYMBOL(__stop___param) = .; \ 443 } \ 444 \ 445 /* Built-in module versions. */ \ 446 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 447 VMLINUX_SYMBOL(__start___modver) = .; \ 448 KEEP(*(__modver)) \ 449 VMLINUX_SYMBOL(__stop___modver) = .; \ 450 . = ALIGN((align)); \ 451 VMLINUX_SYMBOL(__end_rodata) = .; \ 452 } \ 453 . = ALIGN((align)); 454 455/* RODATA & RO_DATA provided for backward compatibility. 456 * All archs are supposed to use RO_DATA() */ 457#define RODATA RO_DATA_SECTION(4096) 458#define RO_DATA(align) RO_DATA_SECTION(align) 459 460#define SECURITY_INIT \ 461 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 462 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 463 KEEP(*(.security_initcall.init)) \ 464 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 465 } 466 467/* 468 * .text section. Map to function alignment to avoid address changes 469 * during second ld run in second ld pass when generating System.map 470 * 471 * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead 472 * code elimination is enabled, so these sections should be converted 473 * to use ".." first. 474 */ 475#define TEXT_TEXT \ 476 ALIGN_FUNCTION(); \ 477 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ 478 *(.text..refcount) \ 479 *(.ref.text) \ 480 MEM_KEEP(init.text) \ 481 MEM_KEEP(exit.text) \ 482 483 484/* sched.text is aling to function alignment to secure we have same 485 * address even at second ld pass when generating System.map */ 486#define SCHED_TEXT \ 487 ALIGN_FUNCTION(); \ 488 VMLINUX_SYMBOL(__sched_text_start) = .; \ 489 *(.sched.text) \ 490 VMLINUX_SYMBOL(__sched_text_end) = .; 491 492/* spinlock.text is aling to function alignment to secure we have same 493 * address even at second ld pass when generating System.map */ 494#define LOCK_TEXT \ 495 ALIGN_FUNCTION(); \ 496 VMLINUX_SYMBOL(__lock_text_start) = .; \ 497 *(.spinlock.text) \ 498 VMLINUX_SYMBOL(__lock_text_end) = .; 499 500#define CPUIDLE_TEXT \ 501 ALIGN_FUNCTION(); \ 502 VMLINUX_SYMBOL(__cpuidle_text_start) = .; \ 503 *(.cpuidle.text) \ 504 VMLINUX_SYMBOL(__cpuidle_text_end) = .; 505 506#define KPROBES_TEXT \ 507 ALIGN_FUNCTION(); \ 508 VMLINUX_SYMBOL(__kprobes_text_start) = .; \ 509 *(.kprobes.text) \ 510 VMLINUX_SYMBOL(__kprobes_text_end) = .; 511 512#define ENTRY_TEXT \ 513 ALIGN_FUNCTION(); \ 514 VMLINUX_SYMBOL(__entry_text_start) = .; \ 515 *(.entry.text) \ 516 VMLINUX_SYMBOL(__entry_text_end) = .; 517 518#define IRQENTRY_TEXT \ 519 ALIGN_FUNCTION(); \ 520 VMLINUX_SYMBOL(__irqentry_text_start) = .; \ 521 *(.irqentry.text) \ 522 VMLINUX_SYMBOL(__irqentry_text_end) = .; 523 524#define SOFTIRQENTRY_TEXT \ 525 ALIGN_FUNCTION(); \ 526 VMLINUX_SYMBOL(__softirqentry_text_start) = .; \ 527 *(.softirqentry.text) \ 528 VMLINUX_SYMBOL(__softirqentry_text_end) = .; 529 530/* Section used for early init (in .S files) */ 531#define HEAD_TEXT *(.head.text) 532 533#define HEAD_TEXT_SECTION \ 534 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 535 HEAD_TEXT \ 536 } 537 538/* 539 * Exception table 540 */ 541#define EXCEPTION_TABLE(align) \ 542 . = ALIGN(align); \ 543 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 544 VMLINUX_SYMBOL(__start___ex_table) = .; \ 545 KEEP(*(__ex_table)) \ 546 VMLINUX_SYMBOL(__stop___ex_table) = .; \ 547 } 548 549/* 550 * Init task 551 */ 552#define INIT_TASK_DATA_SECTION(align) \ 553 . = ALIGN(align); \ 554 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ 555 INIT_TASK_DATA(align) \ 556 } 557 558#ifdef CONFIG_CONSTRUCTORS 559#define KERNEL_CTORS() . = ALIGN(8); \ 560 VMLINUX_SYMBOL(__ctors_start) = .; \ 561 KEEP(*(.ctors)) \ 562 KEEP(*(SORT(.init_array.*))) \ 563 KEEP(*(.init_array)) \ 564 VMLINUX_SYMBOL(__ctors_end) = .; 565#else 566#define KERNEL_CTORS() 567#endif 568 569/* init and exit section handling */ 570#define INIT_DATA \ 571 KEEP(*(SORT(___kentry+*))) \ 572 *(.init.data) \ 573 MEM_DISCARD(init.data) \ 574 KERNEL_CTORS() \ 575 MCOUNT_REC() \ 576 *(.init.rodata) \ 577 FTRACE_EVENTS() \ 578 TRACE_SYSCALLS() \ 579 KPROBE_BLACKLIST() \ 580 ERROR_INJECT_WHITELIST() \ 581 MEM_DISCARD(init.rodata) \ 582 CLK_OF_TABLES() \ 583 RESERVEDMEM_OF_TABLES() \ 584 TIMER_OF_TABLES() \ 585 IOMMU_OF_TABLES() \ 586 CPU_METHOD_OF_TABLES() \ 587 CPUIDLE_METHOD_OF_TABLES() \ 588 KERNEL_DTB() \ 589 IRQCHIP_OF_MATCH_TABLE() \ 590 ACPI_PROBE_TABLE(irqchip) \ 591 ACPI_PROBE_TABLE(timer) \ 592 ACPI_PROBE_TABLE(iort) \ 593 EARLYCON_TABLE() 594 595#define INIT_TEXT \ 596 *(.init.text) \ 597 *(.text.startup) \ 598 MEM_DISCARD(init.text) 599 600#define EXIT_DATA \ 601 *(.exit.data) \ 602 *(.fini_array) \ 603 *(.dtors) \ 604 MEM_DISCARD(exit.data) \ 605 MEM_DISCARD(exit.rodata) 606 607#define EXIT_TEXT \ 608 *(.exit.text) \ 609 *(.text.exit) \ 610 MEM_DISCARD(exit.text) 611 612#define EXIT_CALL \ 613 *(.exitcall.exit) 614 615/* 616 * bss (Block Started by Symbol) - uninitialized data 617 * zeroed during startup 618 */ 619#define SBSS(sbss_align) \ 620 . = ALIGN(sbss_align); \ 621 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 622 *(.dynsbss) \ 623 *(.sbss) \ 624 *(.scommon) \ 625 } 626 627/* 628 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra 629 * sections to the front of bss. 630 */ 631#ifndef BSS_FIRST_SECTIONS 632#define BSS_FIRST_SECTIONS 633#endif 634 635#define BSS(bss_align) \ 636 . = ALIGN(bss_align); \ 637 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 638 BSS_FIRST_SECTIONS \ 639 *(.bss..page_aligned) \ 640 *(.dynbss) \ 641 *(BSS_MAIN) \ 642 *(COMMON) \ 643 } 644 645/* 646 * DWARF debug sections. 647 * Symbols in the DWARF debugging sections are relative to 648 * the beginning of the section so we begin them at 0. 649 */ 650#define DWARF_DEBUG \ 651 /* DWARF 1 */ \ 652 .debug 0 : { *(.debug) } \ 653 .line 0 : { *(.line) } \ 654 /* GNU DWARF 1 extensions */ \ 655 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 656 .debug_sfnames 0 : { *(.debug_sfnames) } \ 657 /* DWARF 1.1 and DWARF 2 */ \ 658 .debug_aranges 0 : { *(.debug_aranges) } \ 659 .debug_pubnames 0 : { *(.debug_pubnames) } \ 660 /* DWARF 2 */ \ 661 .debug_info 0 : { *(.debug_info \ 662 .gnu.linkonce.wi.*) } \ 663 .debug_abbrev 0 : { *(.debug_abbrev) } \ 664 .debug_line 0 : { *(.debug_line) } \ 665 .debug_frame 0 : { *(.debug_frame) } \ 666 .debug_str 0 : { *(.debug_str) } \ 667 .debug_loc 0 : { *(.debug_loc) } \ 668 .debug_macinfo 0 : { *(.debug_macinfo) } \ 669 .debug_pubtypes 0 : { *(.debug_pubtypes) } \ 670 /* DWARF 3 */ \ 671 .debug_ranges 0 : { *(.debug_ranges) } \ 672 /* SGI/MIPS DWARF 2 extensions */ \ 673 .debug_weaknames 0 : { *(.debug_weaknames) } \ 674 .debug_funcnames 0 : { *(.debug_funcnames) } \ 675 .debug_typenames 0 : { *(.debug_typenames) } \ 676 .debug_varnames 0 : { *(.debug_varnames) } \ 677 /* GNU DWARF 2 extensions */ \ 678 .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \ 679 .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \ 680 /* DWARF 4 */ \ 681 .debug_types 0 : { *(.debug_types) } \ 682 /* DWARF 5 */ \ 683 .debug_macro 0 : { *(.debug_macro) } \ 684 .debug_addr 0 : { *(.debug_addr) } 685 686 /* Stabs debugging sections. */ 687#define STABS_DEBUG \ 688 .stab 0 : { *(.stab) } \ 689 .stabstr 0 : { *(.stabstr) } \ 690 .stab.excl 0 : { *(.stab.excl) } \ 691 .stab.exclstr 0 : { *(.stab.exclstr) } \ 692 .stab.index 0 : { *(.stab.index) } \ 693 .stab.indexstr 0 : { *(.stab.indexstr) } \ 694 .comment 0 : { *(.comment) } 695 696#ifdef CONFIG_GENERIC_BUG 697#define BUG_TABLE \ 698 . = ALIGN(8); \ 699 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 700 VMLINUX_SYMBOL(__start___bug_table) = .; \ 701 KEEP(*(__bug_table)) \ 702 VMLINUX_SYMBOL(__stop___bug_table) = .; \ 703 } 704#else 705#define BUG_TABLE 706#endif 707 708#ifdef CONFIG_UNWINDER_ORC 709#define ORC_UNWIND_TABLE \ 710 . = ALIGN(4); \ 711 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ 712 VMLINUX_SYMBOL(__start_orc_unwind_ip) = .; \ 713 KEEP(*(.orc_unwind_ip)) \ 714 VMLINUX_SYMBOL(__stop_orc_unwind_ip) = .; \ 715 } \ 716 . = ALIGN(6); \ 717 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ 718 VMLINUX_SYMBOL(__start_orc_unwind) = .; \ 719 KEEP(*(.orc_unwind)) \ 720 VMLINUX_SYMBOL(__stop_orc_unwind) = .; \ 721 } \ 722 . = ALIGN(4); \ 723 .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \ 724 VMLINUX_SYMBOL(orc_lookup) = .; \ 725 . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \ 726 LOOKUP_BLOCK_SIZE) + 1) * 4; \ 727 VMLINUX_SYMBOL(orc_lookup_end) = .; \ 728 } 729#else 730#define ORC_UNWIND_TABLE 731#endif 732 733#ifdef CONFIG_PM_TRACE 734#define TRACEDATA \ 735 . = ALIGN(4); \ 736 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 737 VMLINUX_SYMBOL(__tracedata_start) = .; \ 738 KEEP(*(.tracedata)) \ 739 VMLINUX_SYMBOL(__tracedata_end) = .; \ 740 } 741#else 742#define TRACEDATA 743#endif 744 745#define NOTES \ 746 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 747 VMLINUX_SYMBOL(__start_notes) = .; \ 748 *(.note.*) \ 749 VMLINUX_SYMBOL(__stop_notes) = .; \ 750 } 751 752#define INIT_SETUP(initsetup_align) \ 753 . = ALIGN(initsetup_align); \ 754 VMLINUX_SYMBOL(__setup_start) = .; \ 755 KEEP(*(.init.setup)) \ 756 VMLINUX_SYMBOL(__setup_end) = .; 757 758#define INIT_CALLS_LEVEL(level) \ 759 VMLINUX_SYMBOL(__initcall##level##_start) = .; \ 760 KEEP(*(.initcall##level##.init)) \ 761 KEEP(*(.initcall##level##s.init)) \ 762 763#define INIT_CALLS \ 764 VMLINUX_SYMBOL(__initcall_start) = .; \ 765 KEEP(*(.initcallearly.init)) \ 766 INIT_CALLS_LEVEL(0) \ 767 INIT_CALLS_LEVEL(1) \ 768 INIT_CALLS_LEVEL(2) \ 769 INIT_CALLS_LEVEL(3) \ 770 INIT_CALLS_LEVEL(4) \ 771 INIT_CALLS_LEVEL(5) \ 772 INIT_CALLS_LEVEL(rootfs) \ 773 INIT_CALLS_LEVEL(6) \ 774 INIT_CALLS_LEVEL(7) \ 775 VMLINUX_SYMBOL(__initcall_end) = .; 776 777#define CON_INITCALL \ 778 VMLINUX_SYMBOL(__con_initcall_start) = .; \ 779 KEEP(*(.con_initcall.init)) \ 780 VMLINUX_SYMBOL(__con_initcall_end) = .; 781 782#define SECURITY_INITCALL \ 783 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 784 KEEP(*(.security_initcall.init)) \ 785 VMLINUX_SYMBOL(__security_initcall_end) = .; 786 787#ifdef CONFIG_BLK_DEV_INITRD 788#define INIT_RAM_FS \ 789 . = ALIGN(4); \ 790 VMLINUX_SYMBOL(__initramfs_start) = .; \ 791 KEEP(*(.init.ramfs)) \ 792 . = ALIGN(8); \ 793 KEEP(*(.init.ramfs.info)) 794#else 795#define INIT_RAM_FS 796#endif 797 798/* 799 * Memory encryption operates on a page basis. Since we need to clear 800 * the memory encryption mask for this section, it needs to be aligned 801 * on a page boundary and be a page-size multiple in length. 802 * 803 * Note: We use a separate section so that only this section gets 804 * decrypted to avoid exposing more than we wish. 805 */ 806#ifdef CONFIG_AMD_MEM_ENCRYPT 807#define PERCPU_DECRYPTED_SECTION \ 808 . = ALIGN(PAGE_SIZE); \ 809 *(.data..percpu..decrypted) \ 810 . = ALIGN(PAGE_SIZE); 811#else 812#define PERCPU_DECRYPTED_SECTION 813#endif 814 815 816/* 817 * Default discarded sections. 818 * 819 * Some archs want to discard exit text/data at runtime rather than 820 * link time due to cross-section references such as alt instructions, 821 * bug table, eh_frame, etc. DISCARDS must be the last of output 822 * section definitions so that such archs put those in earlier section 823 * definitions. 824 */ 825#define DISCARDS \ 826 /DISCARD/ : { \ 827 EXIT_TEXT \ 828 EXIT_DATA \ 829 EXIT_CALL \ 830 *(.discard) \ 831 *(.discard.*) \ 832 } 833 834/** 835 * PERCPU_INPUT - the percpu input sections 836 * @cacheline: cacheline size 837 * 838 * The core percpu section names and core symbols which do not rely 839 * directly upon load addresses. 840 * 841 * @cacheline is used to align subsections to avoid false cacheline 842 * sharing between subsections for different purposes. 843 */ 844#define PERCPU_INPUT(cacheline) \ 845 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 846 *(.data..percpu..first) \ 847 . = ALIGN(PAGE_SIZE); \ 848 *(.data..percpu..page_aligned) \ 849 . = ALIGN(cacheline); \ 850 *(.data..percpu..read_mostly) \ 851 . = ALIGN(cacheline); \ 852 *(.data..percpu) \ 853 *(.data..percpu..shared_aligned) \ 854 PERCPU_DECRYPTED_SECTION \ 855 VMLINUX_SYMBOL(__per_cpu_end) = .; 856 857/** 858 * PERCPU_VADDR - define output section for percpu area 859 * @cacheline: cacheline size 860 * @vaddr: explicit base address (optional) 861 * @phdr: destination PHDR (optional) 862 * 863 * Macro which expands to output section for percpu area. 864 * 865 * @cacheline is used to align subsections to avoid false cacheline 866 * sharing between subsections for different purposes. 867 * 868 * If @vaddr is not blank, it specifies explicit base address and all 869 * percpu symbols will be offset from the given address. If blank, 870 * @vaddr always equals @laddr + LOAD_OFFSET. 871 * 872 * @phdr defines the output PHDR to use if not blank. Be warned that 873 * output PHDR is sticky. If @phdr is specified, the next output 874 * section in the linker script will go there too. @phdr should have 875 * a leading colon. 876 * 877 * Note that this macros defines __per_cpu_load as an absolute symbol. 878 * If there is no need to put the percpu section at a predetermined 879 * address, use PERCPU_SECTION. 880 */ 881#define PERCPU_VADDR(cacheline, vaddr, phdr) \ 882 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 883 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 884 - LOAD_OFFSET) { \ 885 PERCPU_INPUT(cacheline) \ 886 } phdr \ 887 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); 888 889/** 890 * PERCPU_SECTION - define output section for percpu area, simple version 891 * @cacheline: cacheline size 892 * 893 * Align to PAGE_SIZE and outputs output section for percpu area. This 894 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and 895 * __per_cpu_start will be identical. 896 * 897 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) 898 * except that __per_cpu_load is defined as a relative symbol against 899 * .data..percpu which is required for relocatable x86_32 configuration. 900 */ 901#define PERCPU_SECTION(cacheline) \ 902 . = ALIGN(PAGE_SIZE); \ 903 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 904 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 905 PERCPU_INPUT(cacheline) \ 906 } 907 908 909/* 910 * Definition of the high level *_SECTION macros 911 * They will fit only a subset of the architectures 912 */ 913 914 915/* 916 * Writeable data. 917 * All sections are combined in a single .data section. 918 * The sections following CONSTRUCTORS are arranged so their 919 * typical alignment matches. 920 * A cacheline is typical/always less than a PAGE_SIZE so 921 * the sections that has this restriction (or similar) 922 * is located before the ones requiring PAGE_SIZE alignment. 923 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 924 * matches the requirement of PAGE_ALIGNED_DATA. 925 * 926 * use 0 as page_align if page_aligned data is not used */ 927#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ 928 . = ALIGN(PAGE_SIZE); \ 929 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 930 INIT_TASK_DATA(inittask) \ 931 NOSAVE_DATA \ 932 PAGE_ALIGNED_DATA(pagealigned) \ 933 CACHELINE_ALIGNED_DATA(cacheline) \ 934 READ_MOSTLY_DATA(cacheline) \ 935 DATA_DATA \ 936 CONSTRUCTORS \ 937 } \ 938 BUG_TABLE \ 939 940#define INIT_TEXT_SECTION(inittext_align) \ 941 . = ALIGN(inittext_align); \ 942 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 943 VMLINUX_SYMBOL(_sinittext) = .; \ 944 INIT_TEXT \ 945 VMLINUX_SYMBOL(_einittext) = .; \ 946 } 947 948#define INIT_DATA_SECTION(initsetup_align) \ 949 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 950 INIT_DATA \ 951 INIT_SETUP(initsetup_align) \ 952 INIT_CALLS \ 953 CON_INITCALL \ 954 SECURITY_INITCALL \ 955 INIT_RAM_FS \ 956 } 957 958#define BSS_SECTION(sbss_align, bss_align, stop_align) \ 959 . = ALIGN(sbss_align); \ 960 VMLINUX_SYMBOL(__bss_start) = .; \ 961 SBSS(sbss_align) \ 962 BSS(bss_align) \ 963 . = ALIGN(stop_align); \ 964 VMLINUX_SYMBOL(__bss_stop) = .;