1 /* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requiriements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU_SECTION(CACHELINE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA_SECTION(PAGE_SIZE) 27 * RW_DATA_SECTION(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * NOTES 32 * 33 * BSS_SECTION(0, 0, 0) 34 * _end = .; 35 * 36 * STABS_DEBUG 37 * DWARF_DEBUG 38 * 39 * DISCARDS // must be the last 40 * } 41 * 42 * [__init_begin, __init_end] is the init section that may be freed after init 43 * // __init_begin and __init_end should be page aligned, so that we can 44 * // free the whole .init memory 45 * [_stext, _etext] is the text section 46 * [_sdata, _edata] is the data section 47 * 48 * Some of the included output section have their own set of constants. 49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 50 * [__nosave_begin, __nosave_end] for the nosave data 51 */ 52 53 #ifndef LOAD_OFFSET 54 #define LOAD_OFFSET 0 55 #endif 56 57 /* Align . to a 8 byte boundary equals to maximum function alignment. */ 58 #define ALIGN_FUNCTION() . = ALIGN(8) 59 60 /* 61 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which 62 * generates .data.identifier sections, which need to be pulled in with 63 * .data. We don't want to pull in .data..other sections, which Linux 64 * has defined. Same for text and bss. 65 * 66 * RODATA_MAIN is not used because existing code already defines .rodata.x 67 * sections to be brought in with rodata. 68 */ 69 #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION 70 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* 71 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* 72 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* 73 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* 74 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* 75 #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* 76 #else 77 #define TEXT_MAIN .text 78 #define DATA_MAIN .data 79 #define SDATA_MAIN .sdata 80 #define RODATA_MAIN .rodata 81 #define BSS_MAIN .bss 82 #define SBSS_MAIN .sbss 83 #endif 84 85 /* 86 * Align to a 32 byte boundary equal to the 87 * alignment gcc 4.5 uses for a struct 88 */ 89 #define STRUCT_ALIGNMENT 32 90 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 91 92 /* The actual configuration determine if the init/exit sections 93 * are handled as text/data or they can be discarded (which 94 * often happens at runtime) 95 */ 96 #ifdef CONFIG_HOTPLUG_CPU 97 #define CPU_KEEP(sec) *(.cpu##sec) 98 #define CPU_DISCARD(sec) 99 #else 100 #define CPU_KEEP(sec) 101 #define CPU_DISCARD(sec) *(.cpu##sec) 102 #endif 103 104 #if defined(CONFIG_MEMORY_HOTPLUG) 105 #define MEM_KEEP(sec) *(.mem##sec) 106 #define MEM_DISCARD(sec) 107 #else 108 #define MEM_KEEP(sec) 109 #define MEM_DISCARD(sec) *(.mem##sec) 110 #endif 111 112 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 113 #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY 114 #define MCOUNT_REC() . = ALIGN(8); \ 115 __start_mcount_loc = .; \ 116 KEEP(*(__patchable_function_entries)) \ 117 __stop_mcount_loc = .; 118 #else 119 #define MCOUNT_REC() . = ALIGN(8); \ 120 __start_mcount_loc = .; \ 121 KEEP(*(__mcount_loc)) \ 122 __stop_mcount_loc = .; 123 #endif 124 #else 125 #define MCOUNT_REC() 126 #endif 127 128 #ifdef CONFIG_TRACE_BRANCH_PROFILING 129 #define LIKELY_PROFILE() __start_annotated_branch_profile = .; \ 130 KEEP(*(_ftrace_annotated_branch)) \ 131 __stop_annotated_branch_profile = .; 132 #else 133 #define LIKELY_PROFILE() 134 #endif 135 136 #ifdef CONFIG_PROFILE_ALL_BRANCHES 137 #define BRANCH_PROFILE() __start_branch_profile = .; \ 138 KEEP(*(_ftrace_branch)) \ 139 __stop_branch_profile = .; 140 #else 141 #define BRANCH_PROFILE() 142 #endif 143 144 #ifdef CONFIG_KPROBES 145 #define KPROBE_BLACKLIST() . = ALIGN(8); \ 146 __start_kprobe_blacklist = .; \ 147 KEEP(*(_kprobe_blacklist)) \ 148 __stop_kprobe_blacklist = .; 149 #else 150 #define KPROBE_BLACKLIST() 151 #endif 152 153 #ifdef CONFIG_FUNCTION_ERROR_INJECTION 154 #define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \ 155 __start_error_injection_whitelist = .; \ 156 KEEP(*(_error_injection_whitelist)) \ 157 __stop_error_injection_whitelist = .; 158 #else 159 #define ERROR_INJECT_WHITELIST() 160 #endif 161 162 #ifdef CONFIG_EVENT_TRACING 163 #define FTRACE_EVENTS() . = ALIGN(8); \ 164 __start_ftrace_events = .; \ 165 KEEP(*(_ftrace_events)) \ 166 __stop_ftrace_events = .; \ 167 __start_ftrace_eval_maps = .; \ 168 KEEP(*(_ftrace_eval_map)) \ 169 __stop_ftrace_eval_maps = .; 170 #else 171 #define FTRACE_EVENTS() 172 #endif 173 174 #ifdef CONFIG_TRACING 175 #define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \ 176 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ 177 __stop___trace_bprintk_fmt = .; 178 #define TRACEPOINT_STR() __start___tracepoint_str = .; \ 179 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \ 180 __stop___tracepoint_str = .; 181 #else 182 #define TRACE_PRINTKS() 183 #define TRACEPOINT_STR() 184 #endif 185 186 #ifdef CONFIG_FTRACE_SYSCALLS 187 #define TRACE_SYSCALLS() . = ALIGN(8); \ 188 __start_syscalls_metadata = .; \ 189 KEEP(*(__syscalls_metadata)) \ 190 __stop_syscalls_metadata = .; 191 #else 192 #define TRACE_SYSCALLS() 193 #endif 194 195 #ifdef CONFIG_BPF_EVENTS 196 #define BPF_RAW_TP() STRUCT_ALIGN(); \ 197 __start__bpf_raw_tp = .; \ 198 KEEP(*(__bpf_raw_tp_map)) \ 199 __stop__bpf_raw_tp = .; 200 #else 201 #define BPF_RAW_TP() 202 #endif 203 204 #ifdef CONFIG_SERIAL_EARLYCON 205 #define EARLYCON_TABLE() . = ALIGN(8); \ 206 __earlycon_table = .; \ 207 KEEP(*(__earlycon_table)) \ 208 __earlycon_table_end = .; 209 #else 210 #define EARLYCON_TABLE() 211 #endif 212 213 #ifdef CONFIG_SECURITY 214 #define LSM_TABLE() . = ALIGN(8); \ 215 __start_lsm_info = .; \ 216 KEEP(*(.lsm_info.init)) \ 217 __end_lsm_info = .; 218 #define EARLY_LSM_TABLE() . = ALIGN(8); \ 219 __start_early_lsm_info = .; \ 220 KEEP(*(.early_lsm_info.init)) \ 221 __end_early_lsm_info = .; 222 #else 223 #define LSM_TABLE() 224 #define EARLY_LSM_TABLE() 225 #endif 226 227 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) 228 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) 229 #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) 230 #define _OF_TABLE_0(name) 231 #define _OF_TABLE_1(name) \ 232 . = ALIGN(8); \ 233 __##name##_of_table = .; \ 234 KEEP(*(__##name##_of_table)) \ 235 KEEP(*(__##name##_of_table_end)) 236 237 #define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer) 238 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 239 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 240 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) 241 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) 242 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) 243 244 #ifdef CONFIG_ACPI 245 #define ACPI_PROBE_TABLE(name) \ 246 . = ALIGN(8); \ 247 __##name##_acpi_probe_table = .; \ 248 KEEP(*(__##name##_acpi_probe_table)) \ 249 __##name##_acpi_probe_table_end = .; 250 #else 251 #define ACPI_PROBE_TABLE(name) 252 #endif 253 254 #ifdef CONFIG_THERMAL 255 #define THERMAL_TABLE(name) \ 256 . = ALIGN(8); \ 257 __##name##_thermal_table = .; \ 258 KEEP(*(__##name##_thermal_table)) \ 259 __##name##_thermal_table_end = .; 260 #else 261 #define THERMAL_TABLE(name) 262 #endif 263 264 #define KERNEL_DTB() \ 265 STRUCT_ALIGN(); \ 266 __dtb_start = .; \ 267 KEEP(*(.dtb.init.rodata)) \ 268 __dtb_end = .; 269 270 /* 271 * .data section 272 */ 273 #define DATA_DATA \ 274 *(.xiptext) \ 275 *(DATA_MAIN) \ 276 *(.ref.data) \ 277 *(.data..shared_aligned) /* percpu related */ \ 278 MEM_KEEP(init.data*) \ 279 MEM_KEEP(exit.data*) \ 280 *(.data.unlikely) \ 281 __start_once = .; \ 282 *(.data.once) \ 283 __end_once = .; \ 284 STRUCT_ALIGN(); \ 285 *(__tracepoints) \ 286 /* implement dynamic printk debug */ \ 287 . = ALIGN(8); \ 288 __start___verbose = .; \ 289 KEEP(*(__verbose)) \ 290 __stop___verbose = .; \ 291 LIKELY_PROFILE() \ 292 BRANCH_PROFILE() \ 293 TRACE_PRINTKS() \ 294 BPF_RAW_TP() \ 295 TRACEPOINT_STR() 296 297 /* 298 * Data section helpers 299 */ 300 #define NOSAVE_DATA \ 301 . = ALIGN(PAGE_SIZE); \ 302 __nosave_begin = .; \ 303 *(.data..nosave) \ 304 . = ALIGN(PAGE_SIZE); \ 305 __nosave_end = .; 306 307 #define PAGE_ALIGNED_DATA(page_align) \ 308 . = ALIGN(page_align); \ 309 *(.data..page_aligned) 310 311 #define READ_MOSTLY_DATA(align) \ 312 . = ALIGN(align); \ 313 *(.data..read_mostly) \ 314 . = ALIGN(align); 315 316 #define CACHELINE_ALIGNED_DATA(align) \ 317 . = ALIGN(align); \ 318 *(.data..cacheline_aligned) 319 320 #define INIT_TASK_DATA(align) \ 321 . = ALIGN(align); \ 322 __start_init_task = .; \ 323 init_thread_union = .; \ 324 init_stack = .; \ 325 KEEP(*(.data..init_task)) \ 326 KEEP(*(.data..init_thread_info)) \ 327 . = __start_init_task + THREAD_SIZE; \ 328 __end_init_task = .; 329 330 #define JUMP_TABLE_DATA \ 331 . = ALIGN(8); \ 332 __start___jump_table = .; \ 333 KEEP(*(__jump_table)) \ 334 __stop___jump_table = .; 335 336 /* 337 * Allow architectures to handle ro_after_init data on their 338 * own by defining an empty RO_AFTER_INIT_DATA. 339 */ 340 #ifndef RO_AFTER_INIT_DATA 341 #define RO_AFTER_INIT_DATA \ 342 __start_ro_after_init = .; \ 343 *(.data..ro_after_init) \ 344 JUMP_TABLE_DATA \ 345 __end_ro_after_init = .; 346 #endif 347 348 /* 349 * Read only Data 350 */ 351 #define RO_DATA_SECTION(align) \ 352 . = ALIGN((align)); \ 353 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 354 __start_rodata = .; \ 355 *(.rodata) *(.rodata.*) \ 356 RO_AFTER_INIT_DATA /* Read only after init */ \ 357 . = ALIGN(8); \ 358 __start___tracepoints_ptrs = .; \ 359 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ 360 __stop___tracepoints_ptrs = .; \ 361 *(__tracepoints_strings)/* Tracepoints: strings */ \ 362 } \ 363 \ 364 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 365 *(.rodata1) \ 366 } \ 367 \ 368 /* PCI quirks */ \ 369 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 370 __start_pci_fixups_early = .; \ 371 KEEP(*(.pci_fixup_early)) \ 372 __end_pci_fixups_early = .; \ 373 __start_pci_fixups_header = .; \ 374 KEEP(*(.pci_fixup_header)) \ 375 __end_pci_fixups_header = .; \ 376 __start_pci_fixups_final = .; \ 377 KEEP(*(.pci_fixup_final)) \ 378 __end_pci_fixups_final = .; \ 379 __start_pci_fixups_enable = .; \ 380 KEEP(*(.pci_fixup_enable)) \ 381 __end_pci_fixups_enable = .; \ 382 __start_pci_fixups_resume = .; \ 383 KEEP(*(.pci_fixup_resume)) \ 384 __end_pci_fixups_resume = .; \ 385 __start_pci_fixups_resume_early = .; \ 386 KEEP(*(.pci_fixup_resume_early)) \ 387 __end_pci_fixups_resume_early = .; \ 388 __start_pci_fixups_suspend = .; \ 389 KEEP(*(.pci_fixup_suspend)) \ 390 __end_pci_fixups_suspend = .; \ 391 __start_pci_fixups_suspend_late = .; \ 392 KEEP(*(.pci_fixup_suspend_late)) \ 393 __end_pci_fixups_suspend_late = .; \ 394 } \ 395 \ 396 /* Built-in firmware blobs */ \ 397 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 398 __start_builtin_fw = .; \ 399 KEEP(*(.builtin_fw)) \ 400 __end_builtin_fw = .; \ 401 } \ 402 \ 403 TRACEDATA \ 404 \ 405 /* Kernel symbol table: Normal symbols */ \ 406 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 407 __start___ksymtab = .; \ 408 KEEP(*(SORT(___ksymtab+*))) \ 409 __stop___ksymtab = .; \ 410 } \ 411 \ 412 /* Kernel symbol table: GPL-only symbols */ \ 413 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 414 __start___ksymtab_gpl = .; \ 415 KEEP(*(SORT(___ksymtab_gpl+*))) \ 416 __stop___ksymtab_gpl = .; \ 417 } \ 418 \ 419 /* Kernel symbol table: Normal unused symbols */ \ 420 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 421 __start___ksymtab_unused = .; \ 422 KEEP(*(SORT(___ksymtab_unused+*))) \ 423 __stop___ksymtab_unused = .; \ 424 } \ 425 \ 426 /* Kernel symbol table: GPL-only unused symbols */ \ 427 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 428 __start___ksymtab_unused_gpl = .; \ 429 KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ 430 __stop___ksymtab_unused_gpl = .; \ 431 } \ 432 \ 433 /* Kernel symbol table: GPL-future-only symbols */ \ 434 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 435 __start___ksymtab_gpl_future = .; \ 436 KEEP(*(SORT(___ksymtab_gpl_future+*))) \ 437 __stop___ksymtab_gpl_future = .; \ 438 } \ 439 \ 440 /* Kernel symbol table: Normal symbols */ \ 441 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 442 __start___kcrctab = .; \ 443 KEEP(*(SORT(___kcrctab+*))) \ 444 __stop___kcrctab = .; \ 445 } \ 446 \ 447 /* Kernel symbol table: GPL-only symbols */ \ 448 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 449 __start___kcrctab_gpl = .; \ 450 KEEP(*(SORT(___kcrctab_gpl+*))) \ 451 __stop___kcrctab_gpl = .; \ 452 } \ 453 \ 454 /* Kernel symbol table: Normal unused symbols */ \ 455 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 456 __start___kcrctab_unused = .; \ 457 KEEP(*(SORT(___kcrctab_unused+*))) \ 458 __stop___kcrctab_unused = .; \ 459 } \ 460 \ 461 /* Kernel symbol table: GPL-only unused symbols */ \ 462 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 463 __start___kcrctab_unused_gpl = .; \ 464 KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ 465 __stop___kcrctab_unused_gpl = .; \ 466 } \ 467 \ 468 /* Kernel symbol table: GPL-future-only symbols */ \ 469 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 470 __start___kcrctab_gpl_future = .; \ 471 KEEP(*(SORT(___kcrctab_gpl_future+*))) \ 472 __stop___kcrctab_gpl_future = .; \ 473 } \ 474 \ 475 /* Kernel symbol table: strings */ \ 476 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 477 *(__ksymtab_strings) \ 478 } \ 479 \ 480 /* __*init sections */ \ 481 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 482 *(.ref.rodata) \ 483 MEM_KEEP(init.rodata) \ 484 MEM_KEEP(exit.rodata) \ 485 } \ 486 \ 487 /* Built-in module parameters. */ \ 488 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 489 __start___param = .; \ 490 KEEP(*(__param)) \ 491 __stop___param = .; \ 492 } \ 493 \ 494 /* Built-in module versions. */ \ 495 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 496 __start___modver = .; \ 497 KEEP(*(__modver)) \ 498 __stop___modver = .; \ 499 . = ALIGN((align)); \ 500 __end_rodata = .; \ 501 } \ 502 . = ALIGN((align)); 503 504 /* RODATA & RO_DATA provided for backward compatibility. 505 * All archs are supposed to use RO_DATA() */ 506 #define RODATA RO_DATA_SECTION(4096) 507 #define RO_DATA(align) RO_DATA_SECTION(align) 508 509 /* 510 * .text section. Map to function alignment to avoid address changes 511 * during second ld run in second ld pass when generating System.map 512 * 513 * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead 514 * code elimination is enabled, so these sections should be converted 515 * to use ".." first. 516 */ 517 #define TEXT_TEXT \ 518 ALIGN_FUNCTION(); \ 519 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ 520 *(.text..refcount) \ 521 *(.ref.text) \ 522 MEM_KEEP(init.text*) \ 523 MEM_KEEP(exit.text*) \ 524 525 526 /* sched.text is aling to function alignment to secure we have same 527 * address even at second ld pass when generating System.map */ 528 #define SCHED_TEXT \ 529 ALIGN_FUNCTION(); \ 530 __sched_text_start = .; \ 531 *(.sched.text) \ 532 __sched_text_end = .; 533 534 /* spinlock.text is aling to function alignment to secure we have same 535 * address even at second ld pass when generating System.map */ 536 #define LOCK_TEXT \ 537 ALIGN_FUNCTION(); \ 538 __lock_text_start = .; \ 539 *(.spinlock.text) \ 540 __lock_text_end = .; 541 542 #define CPUIDLE_TEXT \ 543 ALIGN_FUNCTION(); \ 544 __cpuidle_text_start = .; \ 545 *(.cpuidle.text) \ 546 __cpuidle_text_end = .; 547 548 #define KPROBES_TEXT \ 549 ALIGN_FUNCTION(); \ 550 __kprobes_text_start = .; \ 551 *(.kprobes.text) \ 552 __kprobes_text_end = .; 553 554 #define ENTRY_TEXT \ 555 ALIGN_FUNCTION(); \ 556 __entry_text_start = .; \ 557 *(.entry.text) \ 558 __entry_text_end = .; 559 560 #define IRQENTRY_TEXT \ 561 ALIGN_FUNCTION(); \ 562 __irqentry_text_start = .; \ 563 *(.irqentry.text) \ 564 __irqentry_text_end = .; 565 566 #define SOFTIRQENTRY_TEXT \ 567 ALIGN_FUNCTION(); \ 568 __softirqentry_text_start = .; \ 569 *(.softirqentry.text) \ 570 __softirqentry_text_end = .; 571 572 /* Section used for early init (in .S files) */ 573 #define HEAD_TEXT KEEP(*(.head.text)) 574 575 #define HEAD_TEXT_SECTION \ 576 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 577 HEAD_TEXT \ 578 } 579 580 /* 581 * Exception table 582 */ 583 #define EXCEPTION_TABLE(align) \ 584 . = ALIGN(align); \ 585 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 586 __start___ex_table = .; \ 587 KEEP(*(__ex_table)) \ 588 __stop___ex_table = .; \ 589 } 590 591 /* 592 * Init task 593 */ 594 #define INIT_TASK_DATA_SECTION(align) \ 595 . = ALIGN(align); \ 596 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ 597 INIT_TASK_DATA(align) \ 598 } 599 600 #ifdef CONFIG_CONSTRUCTORS 601 #define KERNEL_CTORS() . = ALIGN(8); \ 602 __ctors_start = .; \ 603 KEEP(*(.ctors)) \ 604 KEEP(*(SORT(.init_array.*))) \ 605 KEEP(*(.init_array)) \ 606 __ctors_end = .; 607 #else 608 #define KERNEL_CTORS() 609 #endif 610 611 /* init and exit section handling */ 612 #define INIT_DATA \ 613 KEEP(*(SORT(___kentry+*))) \ 614 *(.init.data init.data.*) \ 615 MEM_DISCARD(init.data*) \ 616 KERNEL_CTORS() \ 617 MCOUNT_REC() \ 618 *(.init.rodata .init.rodata.*) \ 619 FTRACE_EVENTS() \ 620 TRACE_SYSCALLS() \ 621 KPROBE_BLACKLIST() \ 622 ERROR_INJECT_WHITELIST() \ 623 MEM_DISCARD(init.rodata) \ 624 CLK_OF_TABLES() \ 625 RESERVEDMEM_OF_TABLES() \ 626 TIMER_OF_TABLES() \ 627 CPU_METHOD_OF_TABLES() \ 628 CPUIDLE_METHOD_OF_TABLES() \ 629 KERNEL_DTB() \ 630 IRQCHIP_OF_MATCH_TABLE() \ 631 ACPI_PROBE_TABLE(irqchip) \ 632 ACPI_PROBE_TABLE(timer) \ 633 THERMAL_TABLE(governor) \ 634 EARLYCON_TABLE() \ 635 LSM_TABLE() \ 636 EARLY_LSM_TABLE() 637 638 #define INIT_TEXT \ 639 *(.init.text .init.text.*) \ 640 *(.text.startup) \ 641 MEM_DISCARD(init.text*) 642 643 #define EXIT_DATA \ 644 *(.exit.data .exit.data.*) \ 645 *(.fini_array .fini_array.*) \ 646 *(.dtors .dtors.*) \ 647 MEM_DISCARD(exit.data*) \ 648 MEM_DISCARD(exit.rodata*) 649 650 #define EXIT_TEXT \ 651 *(.exit.text) \ 652 *(.text.exit) \ 653 MEM_DISCARD(exit.text) 654 655 #define EXIT_CALL \ 656 *(.exitcall.exit) 657 658 /* 659 * bss (Block Started by Symbol) - uninitialized data 660 * zeroed during startup 661 */ 662 #define SBSS(sbss_align) \ 663 . = ALIGN(sbss_align); \ 664 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 665 *(.dynsbss) \ 666 *(SBSS_MAIN) \ 667 *(.scommon) \ 668 } 669 670 /* 671 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra 672 * sections to the front of bss. 673 */ 674 #ifndef BSS_FIRST_SECTIONS 675 #define BSS_FIRST_SECTIONS 676 #endif 677 678 #define BSS(bss_align) \ 679 . = ALIGN(bss_align); \ 680 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 681 BSS_FIRST_SECTIONS \ 682 *(.bss..page_aligned) \ 683 *(.dynbss) \ 684 *(BSS_MAIN) \ 685 *(COMMON) \ 686 } 687 688 /* 689 * DWARF debug sections. 690 * Symbols in the DWARF debugging sections are relative to 691 * the beginning of the section so we begin them at 0. 692 */ 693 #define DWARF_DEBUG \ 694 /* DWARF 1 */ \ 695 .debug 0 : { *(.debug) } \ 696 .line 0 : { *(.line) } \ 697 /* GNU DWARF 1 extensions */ \ 698 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 699 .debug_sfnames 0 : { *(.debug_sfnames) } \ 700 /* DWARF 1.1 and DWARF 2 */ \ 701 .debug_aranges 0 : { *(.debug_aranges) } \ 702 .debug_pubnames 0 : { *(.debug_pubnames) } \ 703 /* DWARF 2 */ \ 704 .debug_info 0 : { *(.debug_info \ 705 .gnu.linkonce.wi.*) } \ 706 .debug_abbrev 0 : { *(.debug_abbrev) } \ 707 .debug_line 0 : { *(.debug_line) } \ 708 .debug_frame 0 : { *(.debug_frame) } \ 709 .debug_str 0 : { *(.debug_str) } \ 710 .debug_loc 0 : { *(.debug_loc) } \ 711 .debug_macinfo 0 : { *(.debug_macinfo) } \ 712 .debug_pubtypes 0 : { *(.debug_pubtypes) } \ 713 /* DWARF 3 */ \ 714 .debug_ranges 0 : { *(.debug_ranges) } \ 715 /* SGI/MIPS DWARF 2 extensions */ \ 716 .debug_weaknames 0 : { *(.debug_weaknames) } \ 717 .debug_funcnames 0 : { *(.debug_funcnames) } \ 718 .debug_typenames 0 : { *(.debug_typenames) } \ 719 .debug_varnames 0 : { *(.debug_varnames) } \ 720 /* GNU DWARF 2 extensions */ \ 721 .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \ 722 .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \ 723 /* DWARF 4 */ \ 724 .debug_types 0 : { *(.debug_types) } \ 725 /* DWARF 5 */ \ 726 .debug_macro 0 : { *(.debug_macro) } \ 727 .debug_addr 0 : { *(.debug_addr) } 728 729 /* Stabs debugging sections. */ 730 #define STABS_DEBUG \ 731 .stab 0 : { *(.stab) } \ 732 .stabstr 0 : { *(.stabstr) } \ 733 .stab.excl 0 : { *(.stab.excl) } \ 734 .stab.exclstr 0 : { *(.stab.exclstr) } \ 735 .stab.index 0 : { *(.stab.index) } \ 736 .stab.indexstr 0 : { *(.stab.indexstr) } \ 737 .comment 0 : { *(.comment) } 738 739 #ifdef CONFIG_GENERIC_BUG 740 #define BUG_TABLE \ 741 . = ALIGN(8); \ 742 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 743 __start___bug_table = .; \ 744 KEEP(*(__bug_table)) \ 745 __stop___bug_table = .; \ 746 } 747 #else 748 #define BUG_TABLE 749 #endif 750 751 #ifdef CONFIG_UNWINDER_ORC 752 #define ORC_UNWIND_TABLE \ 753 . = ALIGN(4); \ 754 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ 755 __start_orc_unwind_ip = .; \ 756 KEEP(*(.orc_unwind_ip)) \ 757 __stop_orc_unwind_ip = .; \ 758 } \ 759 . = ALIGN(2); \ 760 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ 761 __start_orc_unwind = .; \ 762 KEEP(*(.orc_unwind)) \ 763 __stop_orc_unwind = .; \ 764 } \ 765 . = ALIGN(4); \ 766 .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \ 767 orc_lookup = .; \ 768 . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \ 769 LOOKUP_BLOCK_SIZE) + 1) * 4; \ 770 orc_lookup_end = .; \ 771 } 772 #else 773 #define ORC_UNWIND_TABLE 774 #endif 775 776 #ifdef CONFIG_PM_TRACE 777 #define TRACEDATA \ 778 . = ALIGN(4); \ 779 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 780 __tracedata_start = .; \ 781 KEEP(*(.tracedata)) \ 782 __tracedata_end = .; \ 783 } 784 #else 785 #define TRACEDATA 786 #endif 787 788 #define NOTES \ 789 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 790 __start_notes = .; \ 791 KEEP(*(.note.*)) \ 792 __stop_notes = .; \ 793 } 794 795 #define INIT_SETUP(initsetup_align) \ 796 . = ALIGN(initsetup_align); \ 797 __setup_start = .; \ 798 KEEP(*(.init.setup)) \ 799 __setup_end = .; 800 801 #define INIT_CALLS_LEVEL(level) \ 802 __initcall##level##_start = .; \ 803 KEEP(*(.initcall##level##.init)) \ 804 KEEP(*(.initcall##level##s.init)) \ 805 806 #define INIT_CALLS \ 807 __initcall_start = .; \ 808 KEEP(*(.initcallearly.init)) \ 809 INIT_CALLS_LEVEL(0) \ 810 INIT_CALLS_LEVEL(1) \ 811 INIT_CALLS_LEVEL(2) \ 812 INIT_CALLS_LEVEL(3) \ 813 INIT_CALLS_LEVEL(4) \ 814 INIT_CALLS_LEVEL(5) \ 815 INIT_CALLS_LEVEL(rootfs) \ 816 INIT_CALLS_LEVEL(6) \ 817 INIT_CALLS_LEVEL(7) \ 818 __initcall_end = .; 819 820 #define CON_INITCALL \ 821 __con_initcall_start = .; \ 822 KEEP(*(.con_initcall.init)) \ 823 __con_initcall_end = .; 824 825 #ifdef CONFIG_BLK_DEV_INITRD 826 #define INIT_RAM_FS \ 827 . = ALIGN(4); \ 828 __initramfs_start = .; \ 829 KEEP(*(.init.ramfs)) \ 830 . = ALIGN(8); \ 831 KEEP(*(.init.ramfs.info)) 832 #else 833 #define INIT_RAM_FS 834 #endif 835 836 /* 837 * Memory encryption operates on a page basis. Since we need to clear 838 * the memory encryption mask for this section, it needs to be aligned 839 * on a page boundary and be a page-size multiple in length. 840 * 841 * Note: We use a separate section so that only this section gets 842 * decrypted to avoid exposing more than we wish. 843 */ 844 #ifdef CONFIG_AMD_MEM_ENCRYPT 845 #define PERCPU_DECRYPTED_SECTION \ 846 . = ALIGN(PAGE_SIZE); \ 847 *(.data..percpu..decrypted) \ 848 . = ALIGN(PAGE_SIZE); 849 #else 850 #define PERCPU_DECRYPTED_SECTION 851 #endif 852 853 854 /* 855 * Default discarded sections. 856 * 857 * Some archs want to discard exit text/data at runtime rather than 858 * link time due to cross-section references such as alt instructions, 859 * bug table, eh_frame, etc. DISCARDS must be the last of output 860 * section definitions so that such archs put those in earlier section 861 * definitions. 862 */ 863 #define DISCARDS \ 864 /DISCARD/ : { \ 865 EXIT_TEXT \ 866 EXIT_DATA \ 867 EXIT_CALL \ 868 *(.discard) \ 869 *(.discard.*) \ 870 *(.modinfo) \ 871 } 872 873 /** 874 * PERCPU_INPUT - the percpu input sections 875 * @cacheline: cacheline size 876 * 877 * The core percpu section names and core symbols which do not rely 878 * directly upon load addresses. 879 * 880 * @cacheline is used to align subsections to avoid false cacheline 881 * sharing between subsections for different purposes. 882 */ 883 #define PERCPU_INPUT(cacheline) \ 884 __per_cpu_start = .; \ 885 *(.data..percpu..first) \ 886 . = ALIGN(PAGE_SIZE); \ 887 *(.data..percpu..page_aligned) \ 888 . = ALIGN(cacheline); \ 889 *(.data..percpu..read_mostly) \ 890 . = ALIGN(cacheline); \ 891 *(.data..percpu) \ 892 *(.data..percpu..shared_aligned) \ 893 PERCPU_DECRYPTED_SECTION \ 894 __per_cpu_end = .; 895 896 /** 897 * PERCPU_VADDR - define output section for percpu area 898 * @cacheline: cacheline size 899 * @vaddr: explicit base address (optional) 900 * @phdr: destination PHDR (optional) 901 * 902 * Macro which expands to output section for percpu area. 903 * 904 * @cacheline is used to align subsections to avoid false cacheline 905 * sharing between subsections for different purposes. 906 * 907 * If @vaddr is not blank, it specifies explicit base address and all 908 * percpu symbols will be offset from the given address. If blank, 909 * @vaddr always equals @laddr + LOAD_OFFSET. 910 * 911 * @phdr defines the output PHDR to use if not blank. Be warned that 912 * output PHDR is sticky. If @phdr is specified, the next output 913 * section in the linker script will go there too. @phdr should have 914 * a leading colon. 915 * 916 * Note that this macros defines __per_cpu_load as an absolute symbol. 917 * If there is no need to put the percpu section at a predetermined 918 * address, use PERCPU_SECTION. 919 */ 920 #define PERCPU_VADDR(cacheline, vaddr, phdr) \ 921 __per_cpu_load = .; \ 922 .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \ 923 PERCPU_INPUT(cacheline) \ 924 } phdr \ 925 . = __per_cpu_load + SIZEOF(.data..percpu); 926 927 /** 928 * PERCPU_SECTION - define output section for percpu area, simple version 929 * @cacheline: cacheline size 930 * 931 * Align to PAGE_SIZE and outputs output section for percpu area. This 932 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and 933 * __per_cpu_start will be identical. 934 * 935 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) 936 * except that __per_cpu_load is defined as a relative symbol against 937 * .data..percpu which is required for relocatable x86_32 configuration. 938 */ 939 #define PERCPU_SECTION(cacheline) \ 940 . = ALIGN(PAGE_SIZE); \ 941 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 942 __per_cpu_load = .; \ 943 PERCPU_INPUT(cacheline) \ 944 } 945 946 947 /* 948 * Definition of the high level *_SECTION macros 949 * They will fit only a subset of the architectures 950 */ 951 952 953 /* 954 * Writeable data. 955 * All sections are combined in a single .data section. 956 * The sections following CONSTRUCTORS are arranged so their 957 * typical alignment matches. 958 * A cacheline is typical/always less than a PAGE_SIZE so 959 * the sections that has this restriction (or similar) 960 * is located before the ones requiring PAGE_SIZE alignment. 961 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 962 * matches the requirement of PAGE_ALIGNED_DATA. 963 * 964 * use 0 as page_align if page_aligned data is not used */ 965 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ 966 . = ALIGN(PAGE_SIZE); \ 967 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 968 INIT_TASK_DATA(inittask) \ 969 NOSAVE_DATA \ 970 PAGE_ALIGNED_DATA(pagealigned) \ 971 CACHELINE_ALIGNED_DATA(cacheline) \ 972 READ_MOSTLY_DATA(cacheline) \ 973 DATA_DATA \ 974 CONSTRUCTORS \ 975 } \ 976 BUG_TABLE \ 977 978 #define INIT_TEXT_SECTION(inittext_align) \ 979 . = ALIGN(inittext_align); \ 980 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 981 _sinittext = .; \ 982 INIT_TEXT \ 983 _einittext = .; \ 984 } 985 986 #define INIT_DATA_SECTION(initsetup_align) \ 987 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 988 INIT_DATA \ 989 INIT_SETUP(initsetup_align) \ 990 INIT_CALLS \ 991 CON_INITCALL \ 992 INIT_RAM_FS \ 993 } 994 995 #define BSS_SECTION(sbss_align, bss_align, stop_align) \ 996 . = ALIGN(sbss_align); \ 997 __bss_start = .; \ 998 SBSS(sbss_align) \ 999 BSS(bss_align) \ 1000 . = ALIGN(stop_align); \ 1001 __bss_stop = .; 1002