1 /* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requiriements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU_SECTION(CACHELINE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA_SECTION(PAGE_SIZE) 27 * RW_DATA_SECTION(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * NOTES 32 * 33 * BSS_SECTION(0, 0, 0) 34 * _end = .; 35 * 36 * STABS_DEBUG 37 * DWARF_DEBUG 38 * 39 * DISCARDS // must be the last 40 * } 41 * 42 * [__init_begin, __init_end] is the init section that may be freed after init 43 * // __init_begin and __init_end should be page aligned, so that we can 44 * // free the whole .init memory 45 * [_stext, _etext] is the text section 46 * [_sdata, _edata] is the data section 47 * 48 * Some of the included output section have their own set of constants. 49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 50 * [__nosave_begin, __nosave_end] for the nosave data 51 */ 52 53 #ifndef LOAD_OFFSET 54 #define LOAD_OFFSET 0 55 #endif 56 57 /* Align . to a 8 byte boundary equals to maximum function alignment. */ 58 #define ALIGN_FUNCTION() . = ALIGN(8) 59 60 /* 61 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which 62 * generates .data.identifier sections, which need to be pulled in with 63 * .data. We don't want to pull in .data..other sections, which Linux 64 * has defined. Same for text and bss. 65 * 66 * RODATA_MAIN is not used because existing code already defines .rodata.x 67 * sections to be brought in with rodata. 68 */ 69 #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION 70 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* 71 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* 72 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* 73 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* 74 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* 75 #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* 76 #else 77 #define TEXT_MAIN .text 78 #define DATA_MAIN .data 79 #define SDATA_MAIN .sdata 80 #define RODATA_MAIN .rodata 81 #define BSS_MAIN .bss 82 #define SBSS_MAIN .sbss 83 #endif 84 85 /* 86 * Align to a 32 byte boundary equal to the 87 * alignment gcc 4.5 uses for a struct 88 */ 89 #define STRUCT_ALIGNMENT 32 90 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 91 92 /* The actual configuration determine if the init/exit sections 93 * are handled as text/data or they can be discarded (which 94 * often happens at runtime) 95 */ 96 #ifdef CONFIG_HOTPLUG_CPU 97 #define CPU_KEEP(sec) *(.cpu##sec) 98 #define CPU_DISCARD(sec) 99 #else 100 #define CPU_KEEP(sec) 101 #define CPU_DISCARD(sec) *(.cpu##sec) 102 #endif 103 104 #if defined(CONFIG_MEMORY_HOTPLUG) 105 #define MEM_KEEP(sec) *(.mem##sec) 106 #define MEM_DISCARD(sec) 107 #else 108 #define MEM_KEEP(sec) 109 #define MEM_DISCARD(sec) *(.mem##sec) 110 #endif 111 112 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 113 #define MCOUNT_REC() . = ALIGN(8); \ 114 __start_mcount_loc = .; \ 115 KEEP(*(__mcount_loc)) \ 116 __stop_mcount_loc = .; 117 #else 118 #define MCOUNT_REC() 119 #endif 120 121 #ifdef CONFIG_TRACE_BRANCH_PROFILING 122 #define LIKELY_PROFILE() __start_annotated_branch_profile = .; \ 123 KEEP(*(_ftrace_annotated_branch)) \ 124 __stop_annotated_branch_profile = .; 125 #else 126 #define LIKELY_PROFILE() 127 #endif 128 129 #ifdef CONFIG_PROFILE_ALL_BRANCHES 130 #define BRANCH_PROFILE() __start_branch_profile = .; \ 131 KEEP(*(_ftrace_branch)) \ 132 __stop_branch_profile = .; 133 #else 134 #define BRANCH_PROFILE() 135 #endif 136 137 #ifdef CONFIG_KPROBES 138 #define KPROBE_BLACKLIST() . = ALIGN(8); \ 139 __start_kprobe_blacklist = .; \ 140 KEEP(*(_kprobe_blacklist)) \ 141 __stop_kprobe_blacklist = .; 142 #else 143 #define KPROBE_BLACKLIST() 144 #endif 145 146 #ifdef CONFIG_FUNCTION_ERROR_INJECTION 147 #define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \ 148 __start_error_injection_whitelist = .; \ 149 KEEP(*(_error_injection_whitelist)) \ 150 __stop_error_injection_whitelist = .; 151 #else 152 #define ERROR_INJECT_WHITELIST() 153 #endif 154 155 #ifdef CONFIG_EVENT_TRACING 156 #define FTRACE_EVENTS() . = ALIGN(8); \ 157 __start_ftrace_events = .; \ 158 KEEP(*(_ftrace_events)) \ 159 __stop_ftrace_events = .; \ 160 __start_ftrace_eval_maps = .; \ 161 KEEP(*(_ftrace_eval_map)) \ 162 __stop_ftrace_eval_maps = .; 163 #else 164 #define FTRACE_EVENTS() 165 #endif 166 167 #ifdef CONFIG_TRACING 168 #define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \ 169 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ 170 __stop___trace_bprintk_fmt = .; 171 #define TRACEPOINT_STR() __start___tracepoint_str = .; \ 172 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \ 173 __stop___tracepoint_str = .; 174 #else 175 #define TRACE_PRINTKS() 176 #define TRACEPOINT_STR() 177 #endif 178 179 #ifdef CONFIG_FTRACE_SYSCALLS 180 #define TRACE_SYSCALLS() . = ALIGN(8); \ 181 __start_syscalls_metadata = .; \ 182 KEEP(*(__syscalls_metadata)) \ 183 __stop_syscalls_metadata = .; 184 #else 185 #define TRACE_SYSCALLS() 186 #endif 187 188 #ifdef CONFIG_BPF_EVENTS 189 #define BPF_RAW_TP() STRUCT_ALIGN(); \ 190 __start__bpf_raw_tp = .; \ 191 KEEP(*(__bpf_raw_tp_map)) \ 192 __stop__bpf_raw_tp = .; 193 #else 194 #define BPF_RAW_TP() 195 #endif 196 197 #ifdef CONFIG_SERIAL_EARLYCON 198 #define EARLYCON_TABLE() . = ALIGN(8); \ 199 __earlycon_table = .; \ 200 KEEP(*(__earlycon_table)) \ 201 __earlycon_table_end = .; 202 #else 203 #define EARLYCON_TABLE() 204 #endif 205 206 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) 207 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) 208 #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) 209 #define _OF_TABLE_0(name) 210 #define _OF_TABLE_1(name) \ 211 . = ALIGN(8); \ 212 __##name##_of_table = .; \ 213 KEEP(*(__##name##_of_table)) \ 214 KEEP(*(__##name##_of_table_end)) 215 216 #define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer) 217 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 218 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 219 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) 220 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) 221 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) 222 223 #ifdef CONFIG_ACPI 224 #define ACPI_PROBE_TABLE(name) \ 225 . = ALIGN(8); \ 226 __##name##_acpi_probe_table = .; \ 227 KEEP(*(__##name##_acpi_probe_table)) \ 228 __##name##_acpi_probe_table_end = .; 229 #else 230 #define ACPI_PROBE_TABLE(name) 231 #endif 232 233 #define KERNEL_DTB() \ 234 STRUCT_ALIGN(); \ 235 __dtb_start = .; \ 236 KEEP(*(.dtb.init.rodata)) \ 237 __dtb_end = .; 238 239 /* 240 * .data section 241 */ 242 #define DATA_DATA \ 243 *(.xiptext) \ 244 *(DATA_MAIN) \ 245 *(.data..decrypted) \ 246 *(.ref.data) \ 247 *(.data..shared_aligned) /* percpu related */ \ 248 MEM_KEEP(init.data*) \ 249 MEM_KEEP(exit.data*) \ 250 *(.data.unlikely) \ 251 __start_once = .; \ 252 *(.data.once) \ 253 __end_once = .; \ 254 STRUCT_ALIGN(); \ 255 *(__tracepoints) \ 256 /* implement dynamic printk debug */ \ 257 . = ALIGN(8); \ 258 __start___jump_table = .; \ 259 KEEP(*(__jump_table)) \ 260 __stop___jump_table = .; \ 261 . = ALIGN(8); \ 262 __start___verbose = .; \ 263 KEEP(*(__verbose)) \ 264 __stop___verbose = .; \ 265 LIKELY_PROFILE() \ 266 BRANCH_PROFILE() \ 267 TRACE_PRINTKS() \ 268 BPF_RAW_TP() \ 269 TRACEPOINT_STR() 270 271 /* 272 * Data section helpers 273 */ 274 #define NOSAVE_DATA \ 275 . = ALIGN(PAGE_SIZE); \ 276 __nosave_begin = .; \ 277 *(.data..nosave) \ 278 . = ALIGN(PAGE_SIZE); \ 279 __nosave_end = .; 280 281 #define PAGE_ALIGNED_DATA(page_align) \ 282 . = ALIGN(page_align); \ 283 *(.data..page_aligned) \ 284 . = ALIGN(page_align); 285 286 #define READ_MOSTLY_DATA(align) \ 287 . = ALIGN(align); \ 288 *(.data..read_mostly) \ 289 . = ALIGN(align); 290 291 #define CACHELINE_ALIGNED_DATA(align) \ 292 . = ALIGN(align); \ 293 *(.data..cacheline_aligned) 294 295 #define INIT_TASK_DATA(align) \ 296 . = ALIGN(align); \ 297 __start_init_task = .; \ 298 init_thread_union = .; \ 299 init_stack = .; \ 300 KEEP(*(.data..init_task)) \ 301 KEEP(*(.data..init_thread_info)) \ 302 . = __start_init_task + THREAD_SIZE; \ 303 __end_init_task = .; 304 305 /* 306 * Allow architectures to handle ro_after_init data on their 307 * own by defining an empty RO_AFTER_INIT_DATA. 308 */ 309 #ifndef RO_AFTER_INIT_DATA 310 #define RO_AFTER_INIT_DATA \ 311 . = ALIGN(8); \ 312 __start_ro_after_init = .; \ 313 *(.data..ro_after_init) \ 314 __end_ro_after_init = .; 315 #endif 316 317 /* 318 * Read only Data 319 */ 320 #define RO_DATA_SECTION(align) \ 321 . = ALIGN((align)); \ 322 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 323 __start_rodata = .; \ 324 *(.rodata) *(.rodata.*) \ 325 RO_AFTER_INIT_DATA /* Read only after init */ \ 326 KEEP(*(__vermagic)) /* Kernel version magic */ \ 327 . = ALIGN(8); \ 328 __start___tracepoints_ptrs = .; \ 329 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ 330 __stop___tracepoints_ptrs = .; \ 331 *(__tracepoints_strings)/* Tracepoints: strings */ \ 332 } \ 333 \ 334 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 335 *(.rodata1) \ 336 } \ 337 \ 338 /* PCI quirks */ \ 339 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 340 __start_pci_fixups_early = .; \ 341 KEEP(*(.pci_fixup_early)) \ 342 __end_pci_fixups_early = .; \ 343 __start_pci_fixups_header = .; \ 344 KEEP(*(.pci_fixup_header)) \ 345 __end_pci_fixups_header = .; \ 346 __start_pci_fixups_final = .; \ 347 KEEP(*(.pci_fixup_final)) \ 348 __end_pci_fixups_final = .; \ 349 __start_pci_fixups_enable = .; \ 350 KEEP(*(.pci_fixup_enable)) \ 351 __end_pci_fixups_enable = .; \ 352 __start_pci_fixups_resume = .; \ 353 KEEP(*(.pci_fixup_resume)) \ 354 __end_pci_fixups_resume = .; \ 355 __start_pci_fixups_resume_early = .; \ 356 KEEP(*(.pci_fixup_resume_early)) \ 357 __end_pci_fixups_resume_early = .; \ 358 __start_pci_fixups_suspend = .; \ 359 KEEP(*(.pci_fixup_suspend)) \ 360 __end_pci_fixups_suspend = .; \ 361 __start_pci_fixups_suspend_late = .; \ 362 KEEP(*(.pci_fixup_suspend_late)) \ 363 __end_pci_fixups_suspend_late = .; \ 364 } \ 365 \ 366 /* Built-in firmware blobs */ \ 367 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \ 368 __start_builtin_fw = .; \ 369 KEEP(*(.builtin_fw)) \ 370 __end_builtin_fw = .; \ 371 } \ 372 \ 373 TRACEDATA \ 374 \ 375 /* Kernel symbol table: Normal symbols */ \ 376 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 377 __start___ksymtab = .; \ 378 KEEP(*(SORT(___ksymtab+*))) \ 379 __stop___ksymtab = .; \ 380 } \ 381 \ 382 /* Kernel symbol table: GPL-only symbols */ \ 383 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 384 __start___ksymtab_gpl = .; \ 385 KEEP(*(SORT(___ksymtab_gpl+*))) \ 386 __stop___ksymtab_gpl = .; \ 387 } \ 388 \ 389 /* Kernel symbol table: Normal unused symbols */ \ 390 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 391 __start___ksymtab_unused = .; \ 392 KEEP(*(SORT(___ksymtab_unused+*))) \ 393 __stop___ksymtab_unused = .; \ 394 } \ 395 \ 396 /* Kernel symbol table: GPL-only unused symbols */ \ 397 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 398 __start___ksymtab_unused_gpl = .; \ 399 KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ 400 __stop___ksymtab_unused_gpl = .; \ 401 } \ 402 \ 403 /* Kernel symbol table: GPL-future-only symbols */ \ 404 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 405 __start___ksymtab_gpl_future = .; \ 406 KEEP(*(SORT(___ksymtab_gpl_future+*))) \ 407 __stop___ksymtab_gpl_future = .; \ 408 } \ 409 \ 410 /* Kernel symbol table: Normal symbols */ \ 411 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 412 __start___kcrctab = .; \ 413 KEEP(*(SORT(___kcrctab+*))) \ 414 __stop___kcrctab = .; \ 415 } \ 416 \ 417 /* Kernel symbol table: GPL-only symbols */ \ 418 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 419 __start___kcrctab_gpl = .; \ 420 KEEP(*(SORT(___kcrctab_gpl+*))) \ 421 __stop___kcrctab_gpl = .; \ 422 } \ 423 \ 424 /* Kernel symbol table: Normal unused symbols */ \ 425 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 426 __start___kcrctab_unused = .; \ 427 KEEP(*(SORT(___kcrctab_unused+*))) \ 428 __stop___kcrctab_unused = .; \ 429 } \ 430 \ 431 /* Kernel symbol table: GPL-only unused symbols */ \ 432 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 433 __start___kcrctab_unused_gpl = .; \ 434 KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ 435 __stop___kcrctab_unused_gpl = .; \ 436 } \ 437 \ 438 /* Kernel symbol table: GPL-future-only symbols */ \ 439 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 440 __start___kcrctab_gpl_future = .; \ 441 KEEP(*(SORT(___kcrctab_gpl_future+*))) \ 442 __stop___kcrctab_gpl_future = .; \ 443 } \ 444 \ 445 /* Kernel symbol table: strings */ \ 446 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 447 *(__ksymtab_strings) \ 448 } \ 449 \ 450 /* __*init sections */ \ 451 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 452 *(.ref.rodata) \ 453 MEM_KEEP(init.rodata) \ 454 MEM_KEEP(exit.rodata) \ 455 } \ 456 \ 457 /* Built-in module parameters. */ \ 458 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 459 __start___param = .; \ 460 KEEP(*(__param)) \ 461 __stop___param = .; \ 462 } \ 463 \ 464 /* Built-in module versions. */ \ 465 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 466 __start___modver = .; \ 467 KEEP(*(__modver)) \ 468 __stop___modver = .; \ 469 . = ALIGN((align)); \ 470 __end_rodata = .; \ 471 } \ 472 . = ALIGN((align)); 473 474 /* RODATA & RO_DATA provided for backward compatibility. 475 * All archs are supposed to use RO_DATA() */ 476 #define RODATA RO_DATA_SECTION(4096) 477 #define RO_DATA(align) RO_DATA_SECTION(align) 478 479 #define SECURITY_INIT \ 480 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 481 __security_initcall_start = .; \ 482 KEEP(*(.security_initcall.init)) \ 483 __security_initcall_end = .; \ 484 } 485 486 /* 487 * Non-instrumentable text section 488 */ 489 #define NOINSTR_TEXT \ 490 ALIGN_FUNCTION(); \ 491 __noinstr_text_start = .; \ 492 *(.noinstr.text) \ 493 __noinstr_text_end = .; 494 495 /* 496 * .text section. Map to function alignment to avoid address changes 497 * during second ld run in second ld pass when generating System.map 498 * 499 * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead 500 * code elimination is enabled, so these sections should be converted 501 * to use ".." first. 502 */ 503 #define TEXT_TEXT \ 504 ALIGN_FUNCTION(); \ 505 *(.text.hot .text.hot.*) \ 506 *(TEXT_MAIN .text.fixup) \ 507 *(.text.unlikely .text.unlikely.*) \ 508 *(.text.unknown .text.unknown.*) \ 509 NOINSTR_TEXT \ 510 *(.text..refcount) \ 511 *(.ref.text) \ 512 *(.text.asan.* .text.tsan.*) \ 513 MEM_KEEP(init.text*) \ 514 MEM_KEEP(exit.text*) \ 515 516 517 /* sched.text is aling to function alignment to secure we have same 518 * address even at second ld pass when generating System.map */ 519 #define SCHED_TEXT \ 520 ALIGN_FUNCTION(); \ 521 __sched_text_start = .; \ 522 *(.sched.text) \ 523 __sched_text_end = .; 524 525 /* spinlock.text is aling to function alignment to secure we have same 526 * address even at second ld pass when generating System.map */ 527 #define LOCK_TEXT \ 528 ALIGN_FUNCTION(); \ 529 __lock_text_start = .; \ 530 *(.spinlock.text) \ 531 __lock_text_end = .; 532 533 #define CPUIDLE_TEXT \ 534 ALIGN_FUNCTION(); \ 535 __cpuidle_text_start = .; \ 536 *(.cpuidle.text) \ 537 __cpuidle_text_end = .; 538 539 #define KPROBES_TEXT \ 540 ALIGN_FUNCTION(); \ 541 __kprobes_text_start = .; \ 542 *(.kprobes.text) \ 543 __kprobes_text_end = .; 544 545 #define ENTRY_TEXT \ 546 ALIGN_FUNCTION(); \ 547 __entry_text_start = .; \ 548 *(.entry.text) \ 549 __entry_text_end = .; 550 551 #define IRQENTRY_TEXT \ 552 ALIGN_FUNCTION(); \ 553 __irqentry_text_start = .; \ 554 *(.irqentry.text) \ 555 __irqentry_text_end = .; 556 557 #define SOFTIRQENTRY_TEXT \ 558 ALIGN_FUNCTION(); \ 559 __softirqentry_text_start = .; \ 560 *(.softirqentry.text) \ 561 __softirqentry_text_end = .; 562 563 /* Section used for early init (in .S files) */ 564 #define HEAD_TEXT KEEP(*(.head.text)) 565 566 #define HEAD_TEXT_SECTION \ 567 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 568 HEAD_TEXT \ 569 } 570 571 /* 572 * Exception table 573 */ 574 #define EXCEPTION_TABLE(align) \ 575 . = ALIGN(align); \ 576 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 577 __start___ex_table = .; \ 578 KEEP(*(__ex_table)) \ 579 __stop___ex_table = .; \ 580 } 581 582 /* 583 * Init task 584 */ 585 #define INIT_TASK_DATA_SECTION(align) \ 586 . = ALIGN(align); \ 587 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ 588 INIT_TASK_DATA(align) \ 589 } 590 591 #ifdef CONFIG_CONSTRUCTORS 592 #define KERNEL_CTORS() . = ALIGN(8); \ 593 __ctors_start = .; \ 594 KEEP(*(.ctors)) \ 595 KEEP(*(SORT(.init_array.*))) \ 596 KEEP(*(.init_array)) \ 597 __ctors_end = .; 598 #else 599 #define KERNEL_CTORS() 600 #endif 601 602 /* init and exit section handling */ 603 #define INIT_DATA \ 604 KEEP(*(SORT(___kentry+*))) \ 605 *(.init.data init.data.*) \ 606 MEM_DISCARD(init.data*) \ 607 KERNEL_CTORS() \ 608 MCOUNT_REC() \ 609 *(.init.rodata .init.rodata.*) \ 610 FTRACE_EVENTS() \ 611 TRACE_SYSCALLS() \ 612 KPROBE_BLACKLIST() \ 613 ERROR_INJECT_WHITELIST() \ 614 MEM_DISCARD(init.rodata) \ 615 CLK_OF_TABLES() \ 616 RESERVEDMEM_OF_TABLES() \ 617 TIMER_OF_TABLES() \ 618 CPU_METHOD_OF_TABLES() \ 619 CPUIDLE_METHOD_OF_TABLES() \ 620 KERNEL_DTB() \ 621 IRQCHIP_OF_MATCH_TABLE() \ 622 ACPI_PROBE_TABLE(irqchip) \ 623 ACPI_PROBE_TABLE(timer) \ 624 EARLYCON_TABLE() 625 626 #define INIT_TEXT \ 627 *(.init.text .init.text.*) \ 628 *(.text.startup) \ 629 MEM_DISCARD(init.text*) 630 631 #define EXIT_DATA \ 632 *(.exit.data .exit.data.*) \ 633 *(.fini_array .fini_array.*) \ 634 *(.dtors .dtors.*) \ 635 MEM_DISCARD(exit.data*) \ 636 MEM_DISCARD(exit.rodata*) 637 638 #define EXIT_TEXT \ 639 *(.exit.text) \ 640 *(.text.exit) \ 641 MEM_DISCARD(exit.text) 642 643 #define EXIT_CALL \ 644 *(.exitcall.exit) 645 646 /* 647 * bss (Block Started by Symbol) - uninitialized data 648 * zeroed during startup 649 */ 650 #define SBSS(sbss_align) \ 651 . = ALIGN(sbss_align); \ 652 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 653 *(.dynsbss) \ 654 *(SBSS_MAIN) \ 655 *(.scommon) \ 656 } 657 658 /* 659 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra 660 * sections to the front of bss. 661 */ 662 #ifndef BSS_FIRST_SECTIONS 663 #define BSS_FIRST_SECTIONS 664 #endif 665 666 #define BSS(bss_align) \ 667 . = ALIGN(bss_align); \ 668 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 669 BSS_FIRST_SECTIONS \ 670 . = ALIGN(PAGE_SIZE); \ 671 *(.bss..page_aligned) \ 672 . = ALIGN(PAGE_SIZE); \ 673 *(.dynbss) \ 674 *(BSS_MAIN) \ 675 *(COMMON) \ 676 } 677 678 /* 679 * DWARF debug sections. 680 * Symbols in the DWARF debugging sections are relative to 681 * the beginning of the section so we begin them at 0. 682 */ 683 #define DWARF_DEBUG \ 684 /* DWARF 1 */ \ 685 .debug 0 : { *(.debug) } \ 686 .line 0 : { *(.line) } \ 687 /* GNU DWARF 1 extensions */ \ 688 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 689 .debug_sfnames 0 : { *(.debug_sfnames) } \ 690 /* DWARF 1.1 and DWARF 2 */ \ 691 .debug_aranges 0 : { *(.debug_aranges) } \ 692 .debug_pubnames 0 : { *(.debug_pubnames) } \ 693 /* DWARF 2 */ \ 694 .debug_info 0 : { *(.debug_info \ 695 .gnu.linkonce.wi.*) } \ 696 .debug_abbrev 0 : { *(.debug_abbrev) } \ 697 .debug_line 0 : { *(.debug_line) } \ 698 .debug_frame 0 : { *(.debug_frame) } \ 699 .debug_str 0 : { *(.debug_str) } \ 700 .debug_loc 0 : { *(.debug_loc) } \ 701 .debug_macinfo 0 : { *(.debug_macinfo) } \ 702 .debug_pubtypes 0 : { *(.debug_pubtypes) } \ 703 /* DWARF 3 */ \ 704 .debug_ranges 0 : { *(.debug_ranges) } \ 705 /* SGI/MIPS DWARF 2 extensions */ \ 706 .debug_weaknames 0 : { *(.debug_weaknames) } \ 707 .debug_funcnames 0 : { *(.debug_funcnames) } \ 708 .debug_typenames 0 : { *(.debug_typenames) } \ 709 .debug_varnames 0 : { *(.debug_varnames) } \ 710 /* GNU DWARF 2 extensions */ \ 711 .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \ 712 .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \ 713 /* DWARF 4 */ \ 714 .debug_types 0 : { *(.debug_types) } \ 715 /* DWARF 5 */ \ 716 .debug_addr 0 : { *(.debug_addr) } \ 717 .debug_line_str 0 : { *(.debug_line_str) } \ 718 .debug_loclists 0 : { *(.debug_loclists) } \ 719 .debug_macro 0 : { *(.debug_macro) } \ 720 .debug_names 0 : { *(.debug_names) } \ 721 .debug_rnglists 0 : { *(.debug_rnglists) } \ 722 .debug_str_offsets 0 : { *(.debug_str_offsets) } 723 724 /* Stabs debugging sections. */ 725 #define STABS_DEBUG \ 726 .stab 0 : { *(.stab) } \ 727 .stabstr 0 : { *(.stabstr) } \ 728 .stab.excl 0 : { *(.stab.excl) } \ 729 .stab.exclstr 0 : { *(.stab.exclstr) } \ 730 .stab.index 0 : { *(.stab.index) } \ 731 .stab.indexstr 0 : { *(.stab.indexstr) } \ 732 .comment 0 : { *(.comment) } 733 734 #ifdef CONFIG_GENERIC_BUG 735 #define BUG_TABLE \ 736 . = ALIGN(8); \ 737 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 738 __start___bug_table = .; \ 739 KEEP(*(__bug_table)) \ 740 __stop___bug_table = .; \ 741 } 742 #else 743 #define BUG_TABLE 744 #endif 745 746 #ifdef CONFIG_UNWINDER_ORC 747 #define ORC_UNWIND_TABLE \ 748 . = ALIGN(4); \ 749 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ 750 __start_orc_unwind_ip = .; \ 751 KEEP(*(.orc_unwind_ip)) \ 752 __stop_orc_unwind_ip = .; \ 753 } \ 754 . = ALIGN(2); \ 755 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ 756 __start_orc_unwind = .; \ 757 KEEP(*(.orc_unwind)) \ 758 __stop_orc_unwind = .; \ 759 } \ 760 . = ALIGN(4); \ 761 .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \ 762 orc_lookup = .; \ 763 . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \ 764 LOOKUP_BLOCK_SIZE) + 1) * 4; \ 765 orc_lookup_end = .; \ 766 } 767 #else 768 #define ORC_UNWIND_TABLE 769 #endif 770 771 #ifdef CONFIG_PM_TRACE 772 #define TRACEDATA \ 773 . = ALIGN(4); \ 774 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 775 __tracedata_start = .; \ 776 KEEP(*(.tracedata)) \ 777 __tracedata_end = .; \ 778 } 779 #else 780 #define TRACEDATA 781 #endif 782 783 #define NOTES \ 784 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 785 __start_notes = .; \ 786 KEEP(*(.note.*)) \ 787 __stop_notes = .; \ 788 } 789 790 #define INIT_SETUP(initsetup_align) \ 791 . = ALIGN(initsetup_align); \ 792 __setup_start = .; \ 793 KEEP(*(.init.setup)) \ 794 __setup_end = .; 795 796 #define INIT_CALLS_LEVEL(level) \ 797 __initcall##level##_start = .; \ 798 KEEP(*(.initcall##level##.init)) \ 799 KEEP(*(.initcall##level##s.init)) \ 800 801 #define INIT_CALLS \ 802 __initcall_start = .; \ 803 KEEP(*(.initcallearly.init)) \ 804 INIT_CALLS_LEVEL(0) \ 805 INIT_CALLS_LEVEL(1) \ 806 INIT_CALLS_LEVEL(2) \ 807 INIT_CALLS_LEVEL(3) \ 808 INIT_CALLS_LEVEL(4) \ 809 INIT_CALLS_LEVEL(5) \ 810 INIT_CALLS_LEVEL(rootfs) \ 811 INIT_CALLS_LEVEL(6) \ 812 INIT_CALLS_LEVEL(7) \ 813 __initcall_end = .; 814 815 #define CON_INITCALL \ 816 __con_initcall_start = .; \ 817 KEEP(*(.con_initcall.init)) \ 818 __con_initcall_end = .; 819 820 #define SECURITY_INITCALL \ 821 __security_initcall_start = .; \ 822 KEEP(*(.security_initcall.init)) \ 823 __security_initcall_end = .; 824 825 #ifdef CONFIG_BLK_DEV_INITRD 826 #define INIT_RAM_FS \ 827 . = ALIGN(4); \ 828 __initramfs_start = .; \ 829 KEEP(*(.init.ramfs)) \ 830 . = ALIGN(8); \ 831 KEEP(*(.init.ramfs.info)) 832 #else 833 #define INIT_RAM_FS 834 #endif 835 836 /* 837 * Memory encryption operates on a page basis. Since we need to clear 838 * the memory encryption mask for this section, it needs to be aligned 839 * on a page boundary and be a page-size multiple in length. 840 * 841 * Note: We use a separate section so that only this section gets 842 * decrypted to avoid exposing more than we wish. 843 */ 844 #ifdef CONFIG_AMD_MEM_ENCRYPT 845 #define PERCPU_DECRYPTED_SECTION \ 846 . = ALIGN(PAGE_SIZE); \ 847 *(.data..percpu..decrypted) \ 848 . = ALIGN(PAGE_SIZE); 849 #else 850 #define PERCPU_DECRYPTED_SECTION 851 #endif 852 853 854 /* 855 * Default discarded sections. 856 * 857 * Some archs want to discard exit text/data at runtime rather than 858 * link time due to cross-section references such as alt instructions, 859 * bug table, eh_frame, etc. DISCARDS must be the last of output 860 * section definitions so that such archs put those in earlier section 861 * definitions. 862 */ 863 #define DISCARDS \ 864 /DISCARD/ : { \ 865 EXIT_TEXT \ 866 EXIT_DATA \ 867 EXIT_CALL \ 868 *(.discard) \ 869 *(.discard.*) \ 870 } 871 872 /** 873 * PERCPU_INPUT - the percpu input sections 874 * @cacheline: cacheline size 875 * 876 * The core percpu section names and core symbols which do not rely 877 * directly upon load addresses. 878 * 879 * @cacheline is used to align subsections to avoid false cacheline 880 * sharing between subsections for different purposes. 881 */ 882 #define PERCPU_INPUT(cacheline) \ 883 __per_cpu_start = .; \ 884 *(.data..percpu..first) \ 885 . = ALIGN(PAGE_SIZE); \ 886 *(.data..percpu..page_aligned) \ 887 . = ALIGN(cacheline); \ 888 *(.data..percpu..read_mostly) \ 889 . = ALIGN(cacheline); \ 890 *(.data..percpu) \ 891 *(.data..percpu..shared_aligned) \ 892 PERCPU_DECRYPTED_SECTION \ 893 __per_cpu_end = .; 894 895 /** 896 * PERCPU_VADDR - define output section for percpu area 897 * @cacheline: cacheline size 898 * @vaddr: explicit base address (optional) 899 * @phdr: destination PHDR (optional) 900 * 901 * Macro which expands to output section for percpu area. 902 * 903 * @cacheline is used to align subsections to avoid false cacheline 904 * sharing between subsections for different purposes. 905 * 906 * If @vaddr is not blank, it specifies explicit base address and all 907 * percpu symbols will be offset from the given address. If blank, 908 * @vaddr always equals @laddr + LOAD_OFFSET. 909 * 910 * @phdr defines the output PHDR to use if not blank. Be warned that 911 * output PHDR is sticky. If @phdr is specified, the next output 912 * section in the linker script will go there too. @phdr should have 913 * a leading colon. 914 * 915 * Note that this macros defines __per_cpu_load as an absolute symbol. 916 * If there is no need to put the percpu section at a predetermined 917 * address, use PERCPU_SECTION. 918 */ 919 #define PERCPU_VADDR(cacheline, vaddr, phdr) \ 920 __per_cpu_load = .; \ 921 .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \ 922 PERCPU_INPUT(cacheline) \ 923 } phdr \ 924 . = __per_cpu_load + SIZEOF(.data..percpu); 925 926 /** 927 * PERCPU_SECTION - define output section for percpu area, simple version 928 * @cacheline: cacheline size 929 * 930 * Align to PAGE_SIZE and outputs output section for percpu area. This 931 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and 932 * __per_cpu_start will be identical. 933 * 934 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) 935 * except that __per_cpu_load is defined as a relative symbol against 936 * .data..percpu which is required for relocatable x86_32 configuration. 937 */ 938 #define PERCPU_SECTION(cacheline) \ 939 . = ALIGN(PAGE_SIZE); \ 940 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 941 __per_cpu_load = .; \ 942 PERCPU_INPUT(cacheline) \ 943 } 944 945 946 /* 947 * Definition of the high level *_SECTION macros 948 * They will fit only a subset of the architectures 949 */ 950 951 952 /* 953 * Writeable data. 954 * All sections are combined in a single .data section. 955 * The sections following CONSTRUCTORS are arranged so their 956 * typical alignment matches. 957 * A cacheline is typical/always less than a PAGE_SIZE so 958 * the sections that has this restriction (or similar) 959 * is located before the ones requiring PAGE_SIZE alignment. 960 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 961 * matches the requirement of PAGE_ALIGNED_DATA. 962 * 963 * use 0 as page_align if page_aligned data is not used */ 964 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ 965 . = ALIGN(PAGE_SIZE); \ 966 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 967 INIT_TASK_DATA(inittask) \ 968 NOSAVE_DATA \ 969 PAGE_ALIGNED_DATA(pagealigned) \ 970 CACHELINE_ALIGNED_DATA(cacheline) \ 971 READ_MOSTLY_DATA(cacheline) \ 972 DATA_DATA \ 973 CONSTRUCTORS \ 974 } \ 975 BUG_TABLE \ 976 977 #define INIT_TEXT_SECTION(inittext_align) \ 978 . = ALIGN(inittext_align); \ 979 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 980 _sinittext = .; \ 981 INIT_TEXT \ 982 _einittext = .; \ 983 } 984 985 #define INIT_DATA_SECTION(initsetup_align) \ 986 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 987 INIT_DATA \ 988 INIT_SETUP(initsetup_align) \ 989 INIT_CALLS \ 990 CON_INITCALL \ 991 SECURITY_INITCALL \ 992 INIT_RAM_FS \ 993 } 994 995 #define BSS_SECTION(sbss_align, bss_align, stop_align) \ 996 . = ALIGN(sbss_align); \ 997 __bss_start = .; \ 998 SBSS(sbss_align) \ 999 BSS(bss_align) \ 1000 . = ALIGN(stop_align); \ 1001 __bss_stop = .; 1002