1;/* 2; * SPDX-License-Identifier: BSD-3-Clause 3; * SPDX-FileCopyrightText: Copyright The TrustedFirmware-M Contributors 4; * 5; */ 6/* Linker script to configure memory regions. */ 7/* This file will be run trough the pre-processor. */ 8#include "region_defs.h" 9/* Include file with definitions for section alignments. 10 * Note: it should be included after region_defs.h to let platform define 11 * default values if needed. */ 12#include "tfm_s_linker_alignments.h" 13MEMORY 14{ 15 FLASH (rx) : ORIGIN = S_CODE_START, LENGTH = S_CODE_SIZE 16 RAM (rw) : ORIGIN = S_DATA_START, LENGTH = S_DATA_SIZE 17#if defined(S_RAM_CODE_START) 18 CODE_RAM (rwx) : ORIGIN = S_RAM_CODE_START, LENGTH = S_RAM_CODE_SIZE 19#endif 20#ifdef __ENABLE_SCRATCH__ 21 SCRATCH_X(rwx) : ORIGIN = SCRATCH_X_START, LENGTH = SCRATCH_X_SIZE 22 SCRATCH_Y(rwx) : ORIGIN = SCRATCH_Y_START, LENGTH = SCRATCH_Y_SIZE 23#endif 24} 25 26#ifndef TFM_LINKER_VENEERS_START 27#define TFM_LINKER_VENEERS_START ALIGN(TFM_LINKER_VENEERS_ALIGNMENT) 28#endif 29 30#ifndef TFM_LINKER_VENEERS_END 31#define TFM_LINKER_VENEERS_END ALIGN(TFM_LINKER_VENEERS_ALIGNMENT) 32#endif 33 34#define VENEERS() \ 35/* \ 36 * Place the CMSE Veneers (containing the SG instruction) after the code, in \ 37 * a separate at least 32 bytes aligned region so that the SAU can \ 38 * programmed to just set this region as Non-Secure Callable. \ 39 */ \ 40.gnu.sgstubs TFM_LINKER_VENEERS_START : \ 41{ \ 42 *(.gnu.sgstubs*) \ 43} > FLASH \ 44/* GCC always places veneers at the end of .gnu.sgstubs section, so the only \ 45 * way to align the end of .gnu.sgstubs section is to align start of the \ 46 * next section */ \ 47.sgstubs_end : TFM_LINKER_VENEERS_END \ 48{ \ 49} > FLASH 50 51__msp_stack_size__ = S_MSP_STACK_SIZE; 52 53ENTRY(_entry_point) 54 55SECTIONS 56{ 57 /* Start address of the code. */ 58 Image$$PT_RO_START$$Base = ADDR(.TFM_VECTORS); 59 60 .TFM_VECTORS : ALIGN(4) 61 { 62 __logical_binary_start = .; 63 __vectors_start__ = .; 64 KEEP(*(.vectors)) 65 . = ALIGN(4); 66 __vectors_end__ = .; 67 } > FLASH 68 69 ASSERT(__vectors_start__ != __vectors_end__, ".vectors should not be empty") 70 71#if defined(S_CODE_VECTOR_TABLE_SIZE) 72 ASSERT(. <= ADDR(.TFM_VECTORS) + S_CODE_VECTOR_TABLE_SIZE, ".TFM_VECTORS section size overflow.") 73 . = ADDR(.TFM_VECTORS) + S_CODE_VECTOR_TABLE_SIZE; 74#endif 75 76 .PICO_RESET : ALIGN(4) 77 { 78 KEEP (*(.binary_info_header)) 79 __binary_info_header_end = .; 80 KEEP (*(.embedded_block)) 81 __embedded_block_end = .; 82 KEEP (*(.reset)) 83 } > FLASH 84 85#if defined(CONFIG_TFM_USE_TRUSTZONE) && !defined(TFM_LINKER_VENEERS_LOCATION_END) 86 VENEERS() 87#endif 88 89 /**** Section for holding partition RO load data */ 90 /* 91 * Sort the partition info by priority to guarantee the initing order. 92 * The first loaded partition will be inited at last in SFN model. 93 */ 94 .TFM_SP_LOAD_LIST ALIGN(4) : 95 { 96 KEEP(*(.part_load_priority_00)) 97 KEEP(*(.part_load_priority_01)) 98 KEEP(*(.part_load_priority_02)) 99 KEEP(*(.part_load_priority_03)) 100 } > FLASH 101 Image$$TFM_SP_LOAD_LIST$$RO$$Base = ADDR(.TFM_SP_LOAD_LIST); 102 Image$$TFM_SP_LOAD_LIST$$RO$$Limit = ADDR(.TFM_SP_LOAD_LIST) + SIZEOF(.TFM_SP_LOAD_LIST); 103 104 /**** PSA RoT RO part (CODE + RODATA) start here */ 105 . = ALIGN(TFM_LINKER_PSA_ROT_LINKER_CODE_ALIGNMENT); 106 Image$$TFM_PSA_CODE_START$$Base = .; 107 108 .TFM_PSA_ROT_LINKER ALIGN(TFM_LINKER_PSA_ROT_LINKER_CODE_ALIGNMENT) : 109 { 110 *tfm_psa_rot_partition*:(SORT_BY_ALIGNMENT(.text*)) 111 *tfm_psa_rot_partition*:(SORT_BY_ALIGNMENT(.rodata*)) 112 *(TFM_*_PSA-ROT_ATTR_FN) 113 . = ALIGN(TFM_LINKER_PSA_ROT_LINKER_CODE_ALIGNMENT); 114 } > FLASH 115 116 Image$$TFM_PSA_ROT_LINKER$$RO$$Base = ADDR(.TFM_PSA_ROT_LINKER); 117 Image$$TFM_PSA_ROT_LINKER$$RO$$Limit = ADDR(.TFM_PSA_ROT_LINKER) + SIZEOF(.TFM_PSA_ROT_LINKER); 118 Image$$TFM_PSA_ROT_LINKER$$Base = ADDR(.TFM_PSA_ROT_LINKER); 119 Image$$TFM_PSA_ROT_LINKER$$Limit = ADDR(.TFM_PSA_ROT_LINKER) + SIZEOF(.TFM_PSA_ROT_LINKER); 120 121 /**** PSA RoT RO part (CODE + RODATA) end here */ 122 Image$$TFM_PSA_CODE_END$$Base = .; 123 124 /**** APPLICATION RoT RO part (CODE + RODATA) start here */ 125 Image$$TFM_APP_CODE_START$$Base = .; 126 127 .TFM_APP_ROT_LINKER ALIGN(TFM_LINKER_APP_ROT_LINKER_CODE_ALIGNMENT) : 128 { 129 *tfm_app_rot_partition*:(SORT_BY_ALIGNMENT(.text*)) 130 *tfm_app_rot_partition*:(SORT_BY_ALIGNMENT(.rodata*)) 131 *(TFM_*_APP-ROT_ATTR_FN) 132 . = ALIGN(TFM_LINKER_APP_ROT_LINKER_CODE_ALIGNMENT); 133 } > FLASH 134 135 Image$$TFM_APP_ROT_LINKER$$RO$$Base = ADDR(.TFM_APP_ROT_LINKER); 136 Image$$TFM_APP_ROT_LINKER$$RO$$Limit = ADDR(.TFM_APP_ROT_LINKER) + SIZEOF(.TFM_APP_ROT_LINKER); 137 Image$$TFM_APP_ROT_LINKER$$Base = ADDR(.TFM_APP_ROT_LINKER); 138 Image$$TFM_APP_ROT_LINKER$$Limit = ADDR(.TFM_APP_ROT_LINKER) + SIZEOF(.TFM_APP_ROT_LINKER); 139 140 /**** APPLICATION RoT RO part (CODE + RODATA) end here */ 141 Image$$TFM_APP_CODE_END$$Base = .; 142 143#if defined(S_RAM_CODE_START) 144 /* Flash drivers code that gets copied from Flash */ 145 .ER_CODE_SRAM ALIGN(S_RAM_CODE_START, 4) : 146 { 147 *libflash_drivers*:(SORT_BY_ALIGNMENT(.text*)) 148 *libflash_drivers*:(SORT_BY_ALIGNMENT(.rodata*)) 149 KEEP(*(.ramfunc)) 150 . = ALIGN(4); /* This alignment is needed to make the section size 4 bytes aligned */ 151 } > CODE_RAM AT > FLASH 152 153 ASSERT(S_RAM_CODE_START % 4 == 0, "S_RAM_CODE_START must be divisible by 4") 154 155 Image$$ER_CODE_SRAM$$RO$$Base = ADDR(.ER_CODE_SRAM); 156 Image$$ER_CODE_SRAM$$RO$$Limit = ADDR(.ER_CODE_SRAM) + SIZEOF(.ER_CODE_SRAM); 157 Image$$ER_CODE_SRAM$$Base = ADDR(.ER_CODE_SRAM); 158 Image$$ER_CODE_SRAM$$Limit = ADDR(.ER_CODE_SRAM) + SIZEOF(.ER_CODE_SRAM); 159#endif 160 161 .ARM.extab : 162 { 163 *(.ARM.extab* .gnu.linkonce.armextab.*) 164 } > FLASH 165 166 __exidx_start = .; 167 .ARM.exidx : 168 { 169 *(.ARM.exidx* .gnu.linkonce.armexidx.*) 170 } > FLASH 171 __exidx_end = .; 172 173 /* Machine inspectable binary information */ 174 . = ALIGN(4); 175 __binary_info_start = .; 176 .binary_info : 177 { 178 KEEP(*(.binary_info.keep.*)) 179 *(.binary_info.*) 180 } > FLASH 181 __binary_info_end = .; 182 183 /* Data copy is done by extra_init */ 184 __etext = 0; 185 __data_start__ = 0; 186 __data_end__ = 0; 187 188 .ER_TFM_CODE ALIGN(4) (READONLY) : 189 { 190 . = ALIGN(4); 191 /* preinit data */ 192 PROVIDE_HIDDEN (__mutex_array_start = .); 193 KEEP(*(SORT(.mutex_array.*))) 194 KEEP(*(.mutex_array)) 195 PROVIDE_HIDDEN (__mutex_array_end = .); 196 197 . = ALIGN(4); 198 /* preinit data */ 199 PROVIDE_HIDDEN (__preinit_array_start = .); 200 KEEP(*(SORT(.preinit_array.*))) 201 KEEP(*(.preinit_array)) 202 PROVIDE_HIDDEN (__preinit_array_end = .); 203 204 . = ALIGN(4); 205 /* init data */ 206 PROVIDE_HIDDEN (__init_array_start = .); 207 KEEP(*(SORT(.init_array.*))) 208 KEEP(*(.init_array)) 209 PROVIDE_HIDDEN (__init_array_end = .); 210 211 . = ALIGN(4); 212 /* finit data */ 213 PROVIDE_HIDDEN (__fini_array_start = .); 214 KEEP(*(SORT(.fini_array.*))) 215 KEEP(*(.fini_array)) 216 PROVIDE_HIDDEN (__fini_array_end = .); 217 218 /* .copy.table */ 219 . = ALIGN(4); 220 __copy_table_start__ = .; 221#ifdef RAM_VECTORS_SUPPORT 222 /* Copy interrupt vectors from flash to RAM */ 223 LONG (__vectors_start__) /* From */ 224 LONG (__ram_vectors_start__) /* To */ 225 LONG ((__vectors_end__ - __vectors_start__) / 4) /* Size */ 226#endif 227 LONG (LOADADDR(.TFM_DATA)) 228 LONG (ADDR(.TFM_DATA)) 229 LONG (SIZEOF(.TFM_DATA) / 4) 230 231 LONG (LOADADDR(.TFM_PSA_ROT_LINKER_DATA)) 232 LONG (ADDR(.TFM_PSA_ROT_LINKER_DATA)) 233 LONG (SIZEOF(.TFM_PSA_ROT_LINKER_DATA) / 4) 234 235 LONG (LOADADDR(.TFM_APP_ROT_LINKER_DATA)) 236 LONG (ADDR(.TFM_APP_ROT_LINKER_DATA)) 237 LONG (SIZEOF(.TFM_APP_ROT_LINKER_DATA) / 4) 238 239#if defined (S_RAM_CODE_START) 240 LONG (LOADADDR(.ER_CODE_SRAM)) 241 LONG (ADDR(.ER_CODE_SRAM)) 242 LONG (SIZEOF(.ER_CODE_SRAM) / 4) 243#endif 244 __copy_table_end__ = .; 245 246 /* .zero.table */ 247 . = ALIGN(4); 248 __zero_table_start__ = .; 249 LONG (ADDR(.TFM_BSS)) 250 LONG (SIZEOF(.TFM_BSS) / 4) 251 LONG (ADDR(.TFM_PSA_ROT_LINKER_BSS)) 252 LONG (SIZEOF(.TFM_PSA_ROT_LINKER_BSS) / 4) 253 254 LONG (ADDR(.TFM_APP_ROT_LINKER_BSS)) 255 LONG (SIZEOF(.TFM_APP_ROT_LINKER_BSS) / 4) 256#if defined(CONFIG_TFM_PARTITION_META) 257 LONG (ADDR(.TFM_SP_META_PTR)) 258 LONG (SIZEOF(.TFM_SP_META_PTR) / 4) 259#endif 260 __zero_table_end__ = .; 261 262 *startup*(.text*) 263 /* Remove flash driver related files */ 264 EXCLUDE_FILE (*libplatform_s*:*Flash_RPI*) *libplatform_s*:(SORT_BY_ALIGNMENT(.text*)) 265 *libtfm_spm*:(SORT_BY_ALIGNMENT(.text*)) 266 267 EXCLUDE_FILE (*libplatform_s*:*Flash_RPI*) *libplatform_s*:*(.rodata*) 268 *libtfm_spm*:*(.rodata*) 269 } > FLASH 270 271 .TFM_UNPRIV_CODE ALIGN(TFM_LINKER_UNPRIV_CODE_ALIGNMENT) : 272 { 273 /* Remove flash driver related files */ 274 EXCLUDE_FILE (*libplatform_s*:*Flash_RPI*) *(SORT_BY_ALIGNMENT(.text*)) 275 276 KEEP(*(.init)) 277 KEEP(*(.fini)) 278 279 /* .ctors */ 280 *crtbegin.o(.ctors) 281 *crtbegin?.o(.ctors) 282 *(EXCLUDE_FILE(*crtend?.o *crtend.o) .ctors) 283 *(SORT(.ctors.*)) 284 *(.ctors) 285 286 /* .dtors */ 287 *crtbegin.o(.dtors) 288 *crtbegin?.o(.dtors) 289 *(EXCLUDE_FILE(*crtend?.o *crtend.o) .dtors) 290 *(SORT(.dtors.*)) 291 *(.dtors) 292 293 *(SORT_BY_ALIGNMENT(.rodata*)) 294 . = ALIGN(4); 295 *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.flashdata*))) 296 297 KEEP(*(.eh_frame*)) 298 . = ALIGN(TFM_LINKER_UNPRIV_CODE_ALIGNMENT); 299 } > FLASH 300 Image$$TFM_UNPRIV_CODE_START$$RO$$Base = ADDR(.TFM_UNPRIV_CODE); 301 Image$$TFM_UNPRIV_CODE_END$$RO$$Limit = ADDR(.TFM_UNPRIV_CODE) + SIZEOF(.TFM_UNPRIV_CODE); 302 303#if defined(CONFIG_TFM_USE_TRUSTZONE) && defined(TFM_LINKER_VENEERS_LOCATION_END) 304 VENEERS() 305#endif 306 307 /* Position tag */ 308 . = ALIGN(TFM_LINKER_PT_RO_ALIGNMENT); 309 Image$$PT_RO_END$$Base = .; 310 311 /**** Base address of secure data area */ 312 .tfm_secure_data_start : 313 { 314 /* Relocate current position to RAM */ 315 . = ALIGN(4); 316 } > RAM 317 318 /* 319 * MPU on Armv6-M/v7-M core in multi-core topology may require more strict 320 * alignment that MPU region base address must align with the MPU region 321 * size. 322 * As a result, on Armv6-M/v7-M cores, to save memory resource and MPU 323 * regions, unprivileged data sections and privileged data sections are 324 * separated and gathered in unprivileged/privileged data area respectively. 325 * Keep BL2 shared data and MSP stack at the beginning of the secure data 326 * area on Armv8-M cores, while move the two areas to the beginning of 327 * privileged data region on Armv6-M/v7-M cores. 328 */ 329#if defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__) || \ 330 defined(__ARM_ARCH_8_1M_MAIN__) 331#ifdef CODE_SHARING 332 /* The code sharing between bootloader and runtime requires to share the 333 * global variables. 334 */ 335 .TFM_SHARED_SYMBOLS ALIGN(TFM_LINKER_SHARED_SYMBOLS_ALIGNMENT) : 336 { 337 . += SHARED_SYMBOL_AREA_SIZE; 338 } > RAM 339#endif 340 341 /* shared_data and msp_stack are overlapping on purpose when 342 * msp_stack is extended until the beginning of RAM, when shared_date 343 * was read out by partitions 344 */ 345 .tfm_bl2_shared_data ALIGN(TFM_LINKER_BL2_SHARED_DATA_ALIGNMENT) : 346 { 347 . += BOOT_TFM_SHARED_DATA_SIZE; 348 } > RAM 349 350 .msp_stack ALIGN(TFM_LINKER_MSP_STACK_ALIGNMENT) : 351 { 352 . += __msp_stack_size__ - 0x8; 353 } > RAM 354 Image$$ARM_LIB_STACK$$ZI$$Base = ADDR(.msp_stack); 355 Image$$ARM_LIB_STACK$$ZI$$Limit = ADDR(.msp_stack) + SIZEOF(.msp_stack); 356 357 .msp_stack_seal_res : 358 { 359 . += 0x8; 360 } > RAM 361 __StackSeal = ADDR(.msp_stack_seal_res); 362 363#endif /* defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__) || \ 364 * defined(__ARM_ARCH_8_1M_MAIN__) */ 365 366 .ram_vector_table (NOLOAD): ALIGN(256) { 367 *(.ram_vector_table) 368 } > RAM 369 370#if defined(ENABLE_HEAP) 371 __heap_size__ = S_HEAP_SIZE; 372 .heap (NOLOAD): ALIGN(8) 373 { 374 . = ALIGN(8); 375 __end__ = .; 376 end = __end__; 377 PROVIDE(end = .); 378 __HeapBase = .; 379 KEEP(*(.heap*)) 380 . += __heap_size__; 381 __HeapLimit = .; 382 __heap_limit = .; 383 } > RAM 384#else 385 end = 0; 386#endif 387 388#if defined(CONFIG_TFM_PARTITION_META) 389 .TFM_SP_META_PTR ALIGN(TFM_LINKER_SP_META_PTR_ALIGNMENT) (NOLOAD): 390 { 391 *(.bss.SP_META_PTR_SPRTL_INST) 392 . = ALIGN(TFM_LINKER_SP_META_PTR_ALIGNMENT); 393 } > RAM 394 Image$$TFM_SP_META_PTR$$ZI$$Base = ADDR(.TFM_SP_META_PTR); 395 Image$$TFM_SP_META_PTR$$ZI$$Limit = ADDR(.TFM_SP_META_PTR) + SIZEOF(.TFM_SP_META_PTR); 396 /* This is needed for the uniform configuration of MPU region. */ 397 Image$$TFM_SP_META_PTR_END$$ZI$$Limit = Image$$TFM_SP_META_PTR$$ZI$$Limit; 398#endif 399 400 /**** APPLICATION RoT DATA start here */ 401 . = ALIGN(TFM_LINKER_APP_ROT_LINKER_DATA_ALIGNMENT); 402 Image$$TFM_APP_RW_STACK_START$$Base = .; 403 404 .TFM_APP_ROT_LINKER_DATA ALIGN(TFM_LINKER_APP_ROT_LINKER_DATA_ALIGNMENT) : 405 { 406 *tfm_app_rot_partition*:(SORT_BY_ALIGNMENT(.data*)) 407 *(TFM_*_APP-ROT_ATTR_RW) 408 . = ALIGN(4); 409 } > RAM AT> FLASH 410 Image$$TFM_APP_ROT_LINKER_DATA$$RW$$Base = ADDR(.TFM_APP_ROT_LINKER_DATA); 411 Image$$TFM_APP_ROT_LINKER_DATA$$RW$$Limit = ADDR(.TFM_APP_ROT_LINKER_DATA) + SIZEOF(.TFM_APP_ROT_LINKER_DATA); 412 413 .TFM_APP_ROT_LINKER_BSS ALIGN(4) (NOLOAD) : 414 { 415 start_of_TFM_APP_ROT_LINKER = .; 416 *tfm_app_rot_partition*:(SORT_BY_ALIGNMENT(.bss*)) 417 *tfm_app_rot_partition*:*(COMMON) 418 *(TFM_*_APP-ROT_ATTR_ZI) 419 . += (. - start_of_TFM_APP_ROT_LINKER) ? 0 : 4; 420 . = ALIGN(TFM_LINKER_APP_ROT_LINKER_DATA_ALIGNMENT); 421 } > RAM AT> RAM 422 Image$$TFM_APP_ROT_LINKER_DATA$$ZI$$Base = ADDR(.TFM_APP_ROT_LINKER_BSS); 423 Image$$TFM_APP_ROT_LINKER_DATA$$ZI$$Limit = ADDR(.TFM_APP_ROT_LINKER_BSS) + SIZEOF(.TFM_APP_ROT_LINKER_BSS); 424 425 /**** APPLICATION RoT DATA end here */ 426 Image$$TFM_APP_RW_STACK_END$$Base = .; 427 428 /**** PSA RoT DATA start here */ 429 430 Image$$TFM_PSA_RW_STACK_START$$Base = .; 431 432 .TFM_PSA_ROT_LINKER_DATA ALIGN(TFM_LINKER_PSA_ROT_LINKER_DATA_ALIGNMENT) : 433 { 434 *tfm_psa_rot_partition*:(SORT_BY_ALIGNMENT(.data*)) 435 *(TFM_*_PSA-ROT_ATTR_RW) 436 . = ALIGN(4); 437 } > RAM AT> FLASH 438 Image$$TFM_PSA_ROT_LINKER_DATA$$RW$$Base = ADDR(.TFM_PSA_ROT_LINKER_DATA); 439 Image$$TFM_PSA_ROT_LINKER_DATA$$RW$$Limit = ADDR(.TFM_PSA_ROT_LINKER_DATA) + SIZEOF(.TFM_PSA_ROT_LINKER_DATA); 440 441 .TFM_PSA_ROT_LINKER_BSS ALIGN(4) (NOLOAD) : 442 { 443 start_of_TFM_PSA_ROT_LINKER = .; 444 *tfm_psa_rot_partition*:(SORT_BY_ALIGNMENT(.bss*)) 445 *tfm_psa_rot_partition*:*(COMMON) 446 *(TFM_*_PSA-ROT_ATTR_ZI) 447 . += (. - start_of_TFM_PSA_ROT_LINKER) ? 0 : 4; 448 . = ALIGN(TFM_LINKER_PSA_ROT_LINKER_DATA_ALIGNMENT); 449 } > RAM AT> RAM 450 Image$$TFM_PSA_ROT_LINKER_DATA$$ZI$$Base = ADDR(.TFM_PSA_ROT_LINKER_BSS); 451 Image$$TFM_PSA_ROT_LINKER_DATA$$ZI$$Limit = ADDR(.TFM_PSA_ROT_LINKER_BSS) + SIZEOF(.TFM_PSA_ROT_LINKER_BSS); 452 453 /**** PSA RoT DATA end here */ 454 Image$$TFM_PSA_RW_STACK_END$$Base = .; 455 456#ifdef RAM_VECTORS_SUPPORT 457 .ramVectors ALIGN(TFM_LINKER_RAM_VECTORS_ALIGNMENT) (NOLOAD) : 458 { 459 __ram_vectors_start__ = .; 460 KEEP(*(.ram_vectors)) 461 __ram_vectors_end__ = .; 462 } > RAM 463 .TFM_DATA __ram_vectors_end__ : 464#else 465 466 .TFM_DATA ALIGN(4) : 467#endif 468 { 469 *(vtable) 470 *(.time_critical*) 471 *(*libplatform_s*:*Flash_RPI* .text*) 472 *(*libplatform_s*:*Flash_RPI* .rodata*) 473 *(SORT_BY_ALIGNMENT(.data*)) 474 *(.sdata*) 475 . = ALIGN(4); 476 *(.after_data.*) 477 478 KEEP(*(.jcr*)) 479 . = ALIGN(4); 480 481 } > RAM AT> FLASH 482 Image$$ER_TFM_DATA$$RW$$Base = ADDR(.TFM_DATA); 483 Image$$ER_TFM_DATA$$RW$$Limit = ADDR(.TFM_DATA) + SIZEOF(.TFM_DATA); 484 485 .uninitialized_data (NOLOAD): { 486 . = ALIGN(4); 487 *(.uninitialized_data*) 488 } > RAM AT> RAM 489 490 .TFM_BSS ALIGN(4) (NOLOAD) : 491 { 492 __bss_start__ = .; 493 494 /* The runtime partition placed order is same as load partition */ 495 __partition_runtime_start__ = .; 496 KEEP(*(.bss.part_runtime_priority_00)) 497 KEEP(*(.bss.part_runtime_priority_01)) 498 KEEP(*(.bss.part_runtime_priority_02)) 499 KEEP(*(.bss.part_runtime_priority_03)) 500 __partition_runtime_end__ = .; 501 . = ALIGN(4); 502 503 /* The runtime service placed order is same as load partition */ 504 __service_runtime_start__ = .; 505 KEEP(*(.bss.serv_runtime_priority_00)) 506 KEEP(*(.bss.serv_runtime_priority_01)) 507 KEEP(*(.bss.serv_runtime_priority_02)) 508 KEEP(*(.bss.serv_runtime_priority_03)) 509 __service_runtime_end__ = .; 510 *(SORT_BY_ALIGNMENT(.bss*)) 511 *(COMMON) 512 *(.sbss*) 513 . = ALIGN(4); 514 __bss_end__ = .; 515 } > RAM AT> RAM 516 Image$$ER_TFM_DATA$$ZI$$Base = ADDR(.TFM_BSS); 517 Image$$ER_TFM_DATA$$ZI$$Limit = ADDR(.TFM_BSS) + SIZEOF(.TFM_BSS); 518 Image$$ER_PART_RT_POOL$$ZI$$Base = __partition_runtime_start__; 519 Image$$ER_PART_RT_POOL$$ZI$$Limit = __partition_runtime_end__; 520 Image$$ER_SERV_RT_POOL$$ZI$$Base = __service_runtime_start__; 521 Image$$ER_SERV_RT_POOL$$ZI$$Limit = __service_runtime_end__; 522 523 Image$$ER_TFM_DATA$$Base = ADDR(.TFM_DATA); 524 Image$$ER_TFM_DATA$$Limit = ADDR(.TFM_DATA) + SIZEOF(.TFM_DATA) + SIZEOF(.TFM_BSS); 525 526#if defined(CONFIG_TFM_USE_TRUSTZONE) 527 Image$$ER_VENEER$$Base = ADDR(.gnu.sgstubs); 528 Image$$VENEER_ALIGN$$Limit = ADDR(.sgstubs_end); 529 530#if defined(TFM_LINKER_VENEERS_SIZE) 531 ASSERT ((Image$$VENEER_ALIGN$$Limit - Image$$ER_VENEER$$Base) <= TFM_LINKER_VENEERS_SIZE, "Veneer region overflowed") 532#endif 533#endif 534 535 Load$$LR$$LR_NS_PARTITION$$Base = NS_PARTITION_START; 536 537#ifdef BL2 538 Load$$LR$$LR_SECONDARY_PARTITION$$Base = SECONDARY_PARTITION_START; 539#endif /* BL2 */ 540 541 PROVIDE(__stack = Image$$ARM_LIB_STACK$$ZI$$Limit); 542 543#ifdef __ENABLE_SCRATCH__ 544 /* Start and end symbols must be word-aligned */ 545 .scratch_x : { 546 __scratch_x_start__ = .; 547 *(.scratch_x.*) 548 . = ALIGN(4); 549 __scratch_x_end__ = .; 550 } > SCRATCH_X AT > FLASH 551 __scratch_x_source__ = LOADADDR(.scratch_x); 552 .scratch_y : { 553 __scratch_y_start__ = .; 554 *(.scratch_y.*) 555 . = ALIGN(4); 556 __scratch_y_end__ = .; 557 } > SCRATCH_Y AT > FLASH 558 __scratch_y_source__ = LOADADDR(.scratch_y); 559 560 .stack1_dummy (NOLOAD): 561 { 562 *(.stack1*) 563 } > SCRATCH_X 564 .stack_dummy (NOLOAD): 565 { 566 KEEP(*(.stack*)) 567 } > SCRATCH_Y 568 569 PROVIDE(__StackBottom = Image$$ARM_LIB_STACK$$ZI$$Base); 570 PROVIDE(__StackTop = Image$$ARM_LIB_STACK$$ZI$$Limit); 571 __StackOneTop = ORIGIN(SCRATCH_X) + LENGTH(SCRATCH_X); 572 __StackOneBottom = __StackOneTop - SIZEOF(.stack1_dummy); 573#endif 574 575 ASSERT( __binary_info_header_end - __logical_binary_start <= 1024, "Binary info must be in first 1024 bytes of the binary") 576} 577