1/* 2 * Copyright (c) 2016 Cadence Design Systems, Inc. 3 * 4 * SPDX-License-Identifier: Apache-2.0 5 */ 6 7/** 8 * @file 9 * @brief Linker command/script file 10 * 11 * Linker script for the Xtensa platform. 12 */ 13 14#include <xtensa/config/core-isa.h> 15 16#include <zephyr/linker/sections.h> 17 18#include <zephyr/devicetree.h> 19#include <zephyr/linker/linker-defs.h> 20#include <zephyr/linker/linker-tool.h> 21 22#define RAMABLE_REGION RAM :sram0_phdr 23#define ROMABLE_REGION RAM :sram0_phdr 24 25#ifdef CONFIG_MMU 26#define MMU_PAGE_ALIGN . = ALIGN(CONFIG_MMU_PAGE_SIZE); 27#define HDR_MMU_PAGE_ALIGN ALIGN(CONFIG_MMU_PAGE_SIZE) 28#define HDR_4K_OR_MMU_PAGE_ALIGN ALIGN(CONFIG_MMU_PAGE_SIZE) 29#define LAST_RAM_ALIGN MMU_PAGE_ALIGN 30#else 31#define MMU_PAGE_ALIGN . = ALIGN(4); 32#define HDR_MMU_PAGE_ALIGN ALIGN(4) 33#define HDR_4K_OR_MMU_PAGE_ALIGN ALIGN(4096) 34#endif 35 36#define PHYS_SRAM0_ADDR (DT_REG_ADDR(DT_NODELABEL(sram0))) 37#define PHYS_SRAM0_SIZE (DT_REG_SIZE(DT_NODELABEL(sram0))) 38 39#define PHYS_ROM0_ADDR (DT_REG_ADDR(DT_NODELABEL(rom0))) 40#define PHYS_ROM0_SIZE (DT_REG_SIZE(DT_NODELABEL(rom0))) 41 42/* Usable RAM is after the exception vectors and page-aligned. */ 43#define PHYS_RAM_ADDR (PHYS_SRAM0_ADDR + CONFIG_SRAM_OFFSET) 44#define PHYS_RAM_SIZE (PHYS_SRAM0_SIZE - CONFIG_SRAM_OFFSET) 45 46MEMORY 47{ 48 sram0_0_seg : org = 0x00002000, len = 0x178 49 sram0_1_seg : org = 0x00002178, len = 0x8 50 sram0_2_seg : org = 0x00002180, len = 0x38 51 sram0_3_seg : org = 0x000021B8, len = 0x8 52 sram0_4_seg : org = 0x000021C0, len = 0x38 53 sram0_5_seg : org = 0x000021F8, len = 0x8 54 sram0_6_seg : org = 0x00002200, len = 0x38 55 sram0_7_seg : org = 0x00002238, len = 0x8 56 sram0_8_seg : org = 0x00002240, len = 0x38 57 sram0_9_seg : org = 0x00002278, len = 0x8 58 sram0_10_seg : org = 0x00002280, len = 0x38 59 sram0_11_seg : org = 0x000022B8, len = 0x8 60 sram0_12_seg : org = 0x000022C0, len = 0x38 61 sram0_13_seg : org = 0x000022F8, len = 0x8 62 sram0_14_seg : org = 0x00002300, len = 0x38 63 sram0_15_seg : org = 0x00002338, len = 0x8 64 sram0_16_seg : org = 0x00002340, len = 0x38 65 sram0_17_seg : org = 0x00002378, len = 0x48 66 sram0_18_seg : org = 0x000023C0, len = 0x40 67#ifdef CONFIG_XTENSA_MMU 68 vec_helpers : org = 0x00002400, len = (PHYS_RAM_ADDR - 0x00002400) 69#endif 70 RAM : org = PHYS_RAM_ADDR, len = PHYS_RAM_SIZE 71 72 /* Although ROM is of size 0x02000000, we limit it to 8KB so 73 * fewer L2 page table entries are needed. 74 */ 75 rom0_seg : org = PHYS_ROM0_ADDR, len = PHYS_ROM0_SIZE 76 77#ifdef CONFIG_GEN_ISR_TABLES 78 /* The space before exception vectors is not being used. 79 * So we stuff the temporary IDT_LIST there to avoid 80 * some linker issues which would balloon the size of 81 * the intermediate files (like zephyr_pre0.elf, to 82 * couple hundred MBs or even GBs). 83 */ 84 IDT_LIST : org = 0x00000000, len = 0x2000 85#endif 86} 87 88PHDRS 89{ 90 sram0_0_phdr PT_LOAD; 91 sram0_1_phdr PT_LOAD; 92 sram0_2_phdr PT_LOAD; 93 sram0_3_phdr PT_LOAD; 94 sram0_4_phdr PT_LOAD; 95 sram0_5_phdr PT_LOAD; 96 sram0_6_phdr PT_LOAD; 97 sram0_7_phdr PT_LOAD; 98 sram0_8_phdr PT_LOAD; 99 sram0_9_phdr PT_LOAD; 100 sram0_10_phdr PT_LOAD; 101 sram0_11_phdr PT_LOAD; 102 sram0_12_phdr PT_LOAD; 103 sram0_13_phdr PT_LOAD; 104 sram0_14_phdr PT_LOAD; 105 sram0_15_phdr PT_LOAD; 106 sram0_16_phdr PT_LOAD; 107 sram0_17_phdr PT_LOAD; 108 sram0_18_phdr PT_LOAD; 109 110#ifdef CONFIG_XTENSA_MMU 111 vec_helpers_phdr PT_LOAD; 112#endif 113 114 rom0_phdr PT_LOAD; 115 sram0_phdr PT_LOAD; 116 sram0_bss_phdr PT_LOAD; 117} 118 119 120/* Default entry point: */ 121ENTRY(CONFIG_KERNEL_ENTRY) 122 123_rom_store_table = 0; 124PROVIDE(_memmap_vecbase_reset = 0x00002000); 125PROVIDE(_memmap_reset_vector = 0xFE000000); 126/* Various memory-map dependent cache attribute settings: 127 * 128 * Note that there is no cacheattr register which means thah 129 * cacheattr is emulated through TLB way 6 (8x 512MB regions). 130 * So the attributes here are the MMU memory attributes: 131 * 0x3 - rwx, bypass cache 132 * 0x7 - rwx, cache write back 133 * 0xB - wrx, cache write through 134 * Refer to the ISA manual for other attributes. 135 */ 136_memmap_cacheattr_wb_base = 0x70000007; 137_memmap_cacheattr_wt_base = 0xB000000B; 138_memmap_cacheattr_bp_base = 0x30000003; 139_memmap_cacheattr_unused_mask = 0x0FFFFFF0; 140_memmap_cacheattr_wb_strict = 0x7FFFFFF7; 141_memmap_cacheattr_wt_strict = 0xBFFFFFFB; 142_memmap_cacheattr_bp_strict = 0x3FFFFFF3; 143_memmap_cacheattr_wb_allvalid = 0x73333337; 144_memmap_cacheattr_wt_allvalid = 0xB333333B; 145_memmap_cacheattr_bp_allvalid = 0x33333333; 146PROVIDE(_memmap_cacheattr_reset = _memmap_cacheattr_wb_allvalid); 147 148SECTIONS 149{ 150 151#include <zephyr/linker/rel-sections.ld> 152 153#ifdef CONFIG_GEN_ISR_TABLES 154#include <zephyr/linker/intlist.ld> 155#endif 156 157 .WindowVectors.text : ALIGN(4) 158 { 159 _WindowVectors_text_start = ABSOLUTE(.); 160 KEEP (*(.WindowVectors.text)) 161 _WindowVectors_text_end = ABSOLUTE(.); 162 } >sram0_0_seg :sram0_0_phdr 163 164 .Level2InterruptVector.literal : ALIGN(4) 165 { 166 _Level2InterruptVector_literal_start = ABSOLUTE(.); 167 *(.Level2InterruptVector.literal) 168 _Level2InterruptVector_literal_end = ABSOLUTE(.); 169 } >sram0_1_seg :sram0_1_phdr 170 171 .Level2InterruptVector.text : ALIGN(4) 172 { 173 _Level2InterruptVector_text_start = ABSOLUTE(.); 174 KEEP (*(.Level2InterruptVector.text)) 175 _Level2InterruptVector_text_end = ABSOLUTE(.); 176 } >sram0_2_seg :sram0_2_phdr 177 178 .Level3InterruptVector.literal : ALIGN(4) 179 { 180 _Level3InterruptVector_literal_start = ABSOLUTE(.); 181 *(.Level3InterruptVector.literal) 182 _Level3InterruptVector_literal_end = ABSOLUTE(.); 183 } >sram0_3_seg :sram0_3_phdr 184 185 .Level3InterruptVector.text : ALIGN(4) 186 { 187 _Level3InterruptVector_text_start = ABSOLUTE(.); 188 KEEP (*(.Level3InterruptVector.text)) 189 _Level3InterruptVector_text_end = ABSOLUTE(.); 190 } >sram0_4_seg :sram0_4_phdr 191 192 .Level4InterruptVector.literal : ALIGN(4) 193 { 194 _Level4InterruptVector_literal_start = ABSOLUTE(.); 195 *(.Level4InterruptVector.literal) 196 _Level4InterruptVector_literal_end = ABSOLUTE(.); 197 } >sram0_5_seg :sram0_5_phdr 198 199 .Level4InterruptVector.text : ALIGN(4) 200 { 201 _Level4InterruptVector_text_start = ABSOLUTE(.); 202 KEEP (*(.Level4InterruptVector.text)) 203 _Level4InterruptVector_text_end = ABSOLUTE(.); 204 } >sram0_6_seg :sram0_6_phdr 205 206 .Level5InterruptVector.literal : ALIGN(4) 207 { 208 _Level5InterruptVector_literal_start = ABSOLUTE(.); 209 *(.Level5InterruptVector.literal) 210 _Level5InterruptVector_literal_end = ABSOLUTE(.); 211 } >sram0_7_seg :sram0_7_phdr 212 213 .Level5InterruptVector.text : ALIGN(4) 214 { 215 _Level5InterruptVector_text_start = ABSOLUTE(.); 216 KEEP (*(.Level5InterruptVector.text)) 217 _Level5InterruptVector_text_end = ABSOLUTE(.); 218 } >sram0_8_seg :sram0_8_phdr 219 220 .DebugExceptionVector.literal : ALIGN(4) 221 { 222 _DebugExceptionVector_literal_start = ABSOLUTE(.); 223 *(.DebugExceptionVector.literal) 224 _DebugExceptionVector_literal_end = ABSOLUTE(.); 225 } >sram0_9_seg :sram0_9_phdr 226 227 .DebugExceptionVector.text : ALIGN(4) 228 { 229 _DebugExceptionVector_text_start = ABSOLUTE(.); 230 KEEP (*(.DebugExceptionVector.text)) 231 _DebugExceptionVector_text_end = ABSOLUTE(.); 232 } >sram0_10_seg :sram0_10_phdr 233 234 .NMIExceptionVector.literal : ALIGN(4) 235 { 236 _NMIExceptionVector_literal_start = ABSOLUTE(.); 237 *(.NMIExceptionVector.literal) 238 _NMIExceptionVector_literal_end = ABSOLUTE(.); 239 } >sram0_11_seg :sram0_11_phdr 240 241 .NMIExceptionVector.text : ALIGN(4) 242 { 243 _NMIExceptionVector_text_start = ABSOLUTE(.); 244 KEEP (*(.NMIExceptionVector.text)) 245 _NMIExceptionVector_text_end = ABSOLUTE(.); 246 } >sram0_12_seg :sram0_12_phdr 247 248 .KernelExceptionVector.literal : ALIGN(4) 249 { 250 _KernelExceptionVector_literal_start = ABSOLUTE(.); 251 *(.KernelExceptionVector.literal) 252 _KernelExceptionVector_literal_end = ABSOLUTE(.); 253 } >sram0_13_seg :sram0_13_phdr 254 255 .KernelExceptionVector.text : ALIGN(4) 256 { 257 _KernelExceptionVector_text_start = ABSOLUTE(.); 258 KEEP (*(.KernelExceptionVector.text)) 259 _KernelExceptionVector_text_end = ABSOLUTE(.); 260 } >sram0_14_seg :sram0_14_phdr 261 262 .UserExceptionVector.literal : ALIGN(4) 263 { 264 _UserExceptionVector_literal_start = ABSOLUTE(.); 265 *(.UserExceptionVector.literal) 266 _UserExceptionVector_literal_end = ABSOLUTE(.); 267 } >sram0_15_seg :sram0_15_phdr 268 269 .UserExceptionVector.text : ALIGN(4) 270 { 271 _UserExceptionVector_text_start = ABSOLUTE(.); 272 KEEP (*(.UserExceptionVector.text)) 273 _UserExceptionVector_text_end = ABSOLUTE(.); 274 } >sram0_16_seg :sram0_16_phdr 275 276 .DoubleExceptionVector.literal : ALIGN(4) 277 { 278 _DoubleExceptionVector_literal_start = ABSOLUTE(.); 279 *(.DoubleExceptionVector.literal) 280 _DoubleExceptionVector_literal_end = ABSOLUTE(.); 281 } >sram0_17_seg :sram0_17_phdr 282 283 .DoubleExceptionVector.text : ALIGN(4) 284 { 285 _DoubleExceptionVector_text_start = ABSOLUTE(.); 286 KEEP (*(.DoubleExceptionVector.text)) 287 _DoubleExceptionVector_text_end = ABSOLUTE(.); 288 } >sram0_18_seg :sram0_18_phdr 289 290#define LIB_OBJ_FUNC_IN_SECT(library, obj_file, func) \ 291 *##library##:##obj_file##(.literal.##func .text.##func) \ 292 293#ifdef CONFIG_XTENSA_MMU 294 .vec_helpers : 295 { 296 /* There is quite some space between .DoubleExceptionVector 297 * and the beginning of .text. We can put exception handling 298 * code here to avoid TLB misses, thus speed up exception 299 * handling a little bit. 300 * 301 * Note: DO NOT PUT MMU init code here as this will be 302 * mapped in TLB manually. This manual entry will 303 * conflict with auto-refill TLB resulting in 304 * TLB multi-hit exception. 305 */ 306 307 *libarch__xtensa__core.a:xtensa_asm2_util.S.obj(.literal .text) 308 *libarch__xtensa__core.a:xtensa_asm2_util.S.obj(.iram.text .iram0.text) 309 310 *libarch__xtensa__core.a:window_vectors.S.obj(.iram.text) 311 312 *libarch__xtensa__core.a:crt1.S.obj(.literal .text) 313 314 LIB_OBJ_FUNC_IN_SECT(libarch__xtensa__core.a,xtensa_asm2.c.obj,*) 315 LIB_OBJ_FUNC_IN_SECT(libarch__xtensa__core.a,fatal.c.obj,*) 316 LIB_OBJ_FUNC_IN_SECT(libarch__xtensa__core.a,cpu_idle.c.obj,*) 317 318 *(.text.arch_is_in_isr) 319 320 /* To support backtracing */ 321 LIB_OBJ_FUNC_IN_SECT(libarch__xtensa__core.a,xtensa_backtrace.c.obj,*) 322 323 *libarch__xtensa__core.a:debug_helpers_asm.S.obj(.iram1.literal .iram1) 324 325 /* Userspace related stuff */ 326 LIB_OBJ_FUNC_IN_SECT(libarch__xtensa__core.a,userspace.S.obj,xtensa_do_syscall) 327 LIB_OBJ_FUNC_IN_SECT(libarch__xtensa__core.a,ptables.c.obj,xtensa_swap_update_page_tables) 328 329 /* Below are to speed up execution by avoiding TLB misses 330 * on frequently used functions. 331 * 332 * There is almost 1MB space (due to TLB pinning) so we can 333 * be generous. 334 */ 335 LIB_OBJ_FUNC_IN_SECT(libkernel.a,,*) 336 337 LIB_OBJ_FUNC_IN_SECT(libdrivers__console.a,,*) 338 LIB_OBJ_FUNC_IN_SECT(libdrivers__timer.a,,*) 339 340 *(.literal.z_vrfy_* .text.z_vrfy_*) 341 *(.literal.z_mrsh_* .text.z_mrsh_*) 342 *(.literal.z_impl_* .text.z_impl_*) 343 *(.literal.z_obj_* .text.z_obj_*) 344 345 *(.literal.k_sys_fatal_error_handler .text.k_sys_fatal_error_handler) 346 } >vec_helpers :vec_helpers_phdr 347#endif /* CONFIG_XTENSA_MMU */ 348 349#ifdef CONFIG_CODE_DATA_RELOCATION 350#include <linker_relocate.ld> 351#endif 352 353 .ResetVector.text : ALIGN(4) 354 { 355 __rom_region_start = ABSOLUTE(.); 356 _ResetVector_text_start = ABSOLUTE(.); 357 KEEP (*(.ResetVector.text)) 358 _ResetVector_text_end = ABSOLUTE(.); 359 } >rom0_seg :rom0_phdr 360 361 .text : HDR_MMU_PAGE_ALIGN 362 { 363 _stext = .; 364 __text_region_start = .; 365 z_mapped_start = .; 366 _text_start = ABSOLUTE(.); 367 *(.entry.text) 368 *(.init.literal) 369 *(.iram0.literal .iram.literal .iram.text.literal .iram0.text .iram.text) 370 *(.iram1.literal .iram1) 371 KEEP(*(.init)) 372 *(.literal .text .literal.* .text.* .stub .gnu.warning .gnu.linkonce.literal.* .gnu.linkonce.t.*.literal .gnu.linkonce.t.*) 373 *(.fini.literal) 374 KEEP(*(.fini)) 375 *(.gnu.version) 376 377 #include <zephyr/linker/kobject-text.ld> 378 379 MMU_PAGE_ALIGN 380 381 _text_end = ABSOLUTE(.); 382 _etext = .; 383 } >RAMABLE_REGION 384 __text_region_end = .; 385 386 .rodata : HDR_MMU_PAGE_ALIGN 387 { 388 __rodata_region_start = ABSOLUTE(.); 389 *(.rodata) 390 *(.rodata.*) 391 *(.gnu.linkonce.r.*) 392 *(.rodata1) 393 394 . = ALIGN(4); 395 #include <snippets-rodata.ld> 396 #include <zephyr/linker/kobject-rom.ld> 397 } >RAMABLE_REGION 398 399#include <zephyr/linker/common-rom.ld> 400 401#include <zephyr/linker/thread-local-storage.ld> 402 403#include <zephyr/linker/cplusplus-rom.ld> 404 405 .rodata_end : ALIGN(4) 406 { 407 . = ALIGN(4); /* this table MUST be 4-byte aligned */ 408 _bss_table_start = ABSOLUTE(.); 409 LONG(_bss_start) 410 LONG(_bss_end) 411 _bss_table_end = ABSOLUTE(.); 412 413 MMU_PAGE_ALIGN 414 415 __rodata_region_end = ABSOLUTE(.); 416 } >RAMABLE_REGION 417 418#ifdef CONFIG_USERSPACE 419#define SMEM_PARTITION_ALIGN(size) MMU_PAGE_ALIGN 420#define APP_SHARED_ALIGN MMU_PAGE_ALIGN 421 422#include <app_smem.ld> 423 424 _image_ram_start = _app_smem_start; 425 _app_smem_size = _app_smem_end - _app_smem_start; 426 _app_smem_num_words = _app_smem_size >> 2; 427 _app_smem_rom_start = LOADADDR(_APP_SMEM_SECTION_NAME); 428 _app_smem_num_words = _app_smem_size >> 2; 429#endif /* CONFIG_USERSPACE */ 430 431 .data : HDR_MMU_PAGE_ALIGN 432 { 433#ifndef CONFIG_USERSPACE 434 _image_ram_start = ABSOLUTE(.); 435#endif 436 __data_start = ABSOLUTE(.); 437 *(.data) 438 *(.data.*) 439 *(.gnu.linkonce.d.*) 440 KEEP(*(.gnu.linkonce.d.*personality*)) 441 *(.data1) 442 *(.sdata) 443 *(.sdata.*) 444 *(.gnu.linkonce.s.*) 445 *(.sdata2) 446 *(.sdata2.*) 447 *(.gnu.linkonce.s2.*) 448 KEEP(*(.jcr)) 449 450 . = ALIGN(4); 451 #include <snippets-rwdata.ld> 452 . = ALIGN(4); 453 454 MMU_PAGE_ALIGN 455 456 __data_end = ABSOLUTE(.); 457 } >RAMABLE_REGION 458 459#include <snippets-sections.ld> 460 461#include <snippets-data-sections.ld> 462 463#include <zephyr/linker/common-ram.ld> 464 465#include <zephyr/linker/cplusplus-ram.ld> 466 467#include <snippets-ram-sections.ld> 468 469 .bss (NOLOAD) : HDR_MMU_PAGE_ALIGN 470 { 471 . = ALIGN (8); 472 _bss_start = ABSOLUTE(.); 473 *(.dynsbss) 474 *(.sbss) 475 *(.sbss.*) 476 *(.gnu.linkonce.sb.*) 477 *(.scommon) 478 *(.sbss2) 479 *(.sbss2.*) 480 *(.gnu.linkonce.sb2.*) 481 *(.dynbss) 482 *(.bss) 483 *(.bss.*) 484 *(.gnu.linkonce.b.*) 485 *(COMMON) 486 *(.sram.bss) 487 . = ALIGN (8); 488 _bss_end = ABSOLUTE(.); 489 490 MMU_PAGE_ALIGN 491 492 } >RAM :sram0_bss_phdr 493 494#include <zephyr/linker/common-noinit.ld> 495 496/* Must be last in RAM */ 497#include <zephyr/linker/kobject-data.ld> 498 499#include <zephyr/linker/ram-end.ld> 500 501 _heap_start = .; 502 503 PROVIDE(_heap_sentry = ORIGIN(RAM) + LENGTH(RAM)); 504 PROVIDE(_heap_end = ORIGIN(RAM) + LENGTH(RAM)); 505 506 PROVIDE(__stack = z_interrupt_stacks + CONFIG_ISR_STACK_SIZE); 507 508#include <zephyr/linker/debug-sections.ld> 509 510 .xtensa.info 0 : { *(.xtensa.info) } 511 .xt.insn 0 : 512 { 513 KEEP (*(.xt.insn)) 514 KEEP (*(.gnu.linkonce.x.*)) 515 } 516 .xt.prop 0 : 517 { 518 KEEP (*(.xt.prop)) 519 KEEP (*(.xt.prop.*)) 520 KEEP (*(.gnu.linkonce.prop.*)) 521 } 522 .xt.lit 0 : 523 { 524 KEEP (*(.xt.lit)) 525 KEEP (*(.xt.lit.*)) 526 KEEP (*(.gnu.linkonce.p.*)) 527 } 528 .debug.xt.callgraph 0 : 529 { 530 KEEP (*(.debug.xt.callgraph .debug.xt.callgraph.* .gnu.linkonce.xt.callgraph.*)) 531 } 532} 533