1/* 2 * Copyright (c) 2017-2023 Arm Limited. All rights reserved. 3 * Copyright (c) 2020-2021 IAR Systems AB 4 * Copyright (c) 2022 Cypress Semiconductor Corporation (an Infineon company) 5 * or an affiliate of Cypress Semiconductor Corporation. All rights reserved. 6 * 7 * Licensed under the Apache License, Version 2.0 (the "License"); 8 * you may not use this file except in compliance with the License. 9 * You may obtain a copy of the License at 10 * 11 * http://www.apache.org/licenses/LICENSE-2.0 12 * 13 * Unless required by applicable law or agreed to in writing, software 14 * distributed under the License is distributed on an "AS IS" BASIS, 15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 * See the License for the specific language governing permissions and 17 * limitations under the License. 18 * 19 * This file is derivative of ../armclang/tfm_common_s.sct.template 20 */ 21 22#include "region_defs.h" 23 24/* Include file with definitions for section alignments. 25 * Note: it should be included after region_defs.h to let platform define 26 * default values if needed. */ 27#include "tfm_s_linker_alignments.h" 28 29/* position tag block : code + RO-data */ 30define block PT_RO_START with alignment = TFM_LINKER_PT_RO_ALIGNMENT, size = 0 { }; 31define block PT_RO_END with alignment = TFM_LINKER_PT_RO_ALIGNMENT, size = 0 { }; 32 33define block ER_VECTORS with size = S_CODE_VECTOR_TABLE_SIZE { 34 readonly section .intvec 35}; 36 37#ifdef CONFIG_TFM_USE_TRUSTZONE 38 /* 39 * Place the CMSE Veneers (containing the SG instruction) in a separate 40 * at least 32 bytes aligned region so that the SAU can be programmed to 41 * just set this region as Non-Secure Callable. 42 */ 43define block ER_VENEER with alignment = TFM_LINKER_VENEERS_ALIGNMENT {section Veneer$$CMSE}; 44 45define block VENEER_ALIGN with alignment = TFM_LINKER_VENEERS_ALIGNMENT, size = 0 { }; 46#endif 47 48define block ER_TFM_CODE with fixed order, alignment = 8, maximum size = S_CODE_SIZE { 49 ro section .text object *startup*, 50 ro section .text object *libplatform_s*, 51 ro section .rodata object *libplatform_s*, 52 ro object *libtfm_spm*, 53}; 54 55define block TFM_UNPRIV_CODE_START with alignment = TFM_LINKER_UNPRIV_CODE_ALIGNMENT { 56 readonly 57 }; 58 59 /* 60 * This empty, zero long execution region is here to mark the end address 61 * of TFM unprivileged code. 62 */ 63define block TFM_UNPRIV_CODE_END with alignment = TFM_LINKER_UNPRIV_CODE_ALIGNMENT, size = 0 { }; 64 65 /**** Section for holding partition RO load data */ 66 /* 67 * Sort the partition info by priority to guarantee the initing order. 68 * The first loaded partition will be inited at last in SFN model. 69 */ 70define block TFM_SP_LOAD_LIST with alignment = 4 { 71 ro section .part_load_priority_00 object load_info_*.o, 72 ro section .part_load_priority_01 object load_info_*.o, 73 ro section .part_load_priority_02 object load_info_*.o, 74 ro section .part_load_priority_03 object load_info_*.o, 75}; 76 77 /**** PSA RoT RO part (CODE + RODATA) start here */ 78 /* 79 * This empty, zero long execution region is here to mark the start address 80 * of PSA RoT code. 81 */ 82define block TFM_PSA_CODE_START with alignment = TFM_LINKER_PSA_ROT_LINKER_CODE_ALIGNMENT, size = 0 { }; 83 84define block TFM_PSA_ROT_LINKER with alignment = TFM_LINKER_PSA_ROT_LINKER_CODE_ALIGNMENT { 85 ro object *tfm_psa_rot_partition*, 86 section TFM_*_PSA-ROT_ATTR_FN object *libplatform_s*, 87 section TFM_*_PSA-ROT_ATTR_FN object *.o, 88 }; 89 90 /* 91 * This empty, zero long execution region is here to mark the end address 92 * of PSA RoT code. 93 */ 94define block TFM_PSA_CODE_END with alignment = TFM_LINKER_PSA_ROT_LINKER_CODE_ALIGNMENT, size = 0 { }; 95 96 /**** APPLICATION RoT RO part (CODE + RODATA) start here */ 97 /* 98 * This empty, zero long execution region is here to mark the start address 99 * of APP RoT code. 100 */ 101define block TFM_APP_CODE_START with alignment = TFM_LINKER_APP_ROT_LINKER_CODE_ALIGNMENT, size = 0 { }; 102 103 define block TFM_APP_ROT_LINKER with alignment = TFM_LINKER_APP_ROT_LINKER_CODE_ALIGNMENT { 104 ro object *tfm_app_rot_partition*, 105 }; 106 107 /* 108 * This empty, zero long execution region is here to mark the end address 109 * of APP RoT code. 110 */ 111define block TFM_APP_CODE_END with alignment = TFM_LINKER_APP_ROT_LINKER_CODE_ALIGNMENT, size = 0 { }; 112 113#if defined(S_RAM_CODE_START) 114 /* Flash drivers code that gets copied from Flash */ 115 initialize by copy { 116 ro object *libflash_drivers*, 117 }; 118 119 define block ER_CODE_SRAM with fixed order, alignment = 4, maximum size = S_RAM_CODE_SIZE { 120 rw section .text, 121 rw section .rodata, 122 rw section .textrw, /* __ramfunc */ 123 }; 124 125place at address S_RAM_CODE_START { block ER_CODE_SRAM }; 126#endif 127 128 /**** Base address of secure data area */ 129define block TFM_SECURE_DATA_START with size = 0 { }; 130 131#if ((defined(__ARM8M_MAINLINE__) && (__CORE__ == __ARM8M_MAINLINE__)) || \ 132 (defined(__ARM8M_BASELINE__) && (__CORE__ == __ARM8M_BASELINE__)) || \ 133 (defined(__ARM8EM_MAINLINE__) && (__CORE__ == __ARM8EM_MAINLINE__))) 134 /* Shared area between BL2 and runtime to exchange data */ 135define block TFM_SHARED_DATA with alignment = TFM_LINKER_BL2_SHARED_DATA_ALIGNMENT, size = BOOT_TFM_SHARED_DATA_SIZE { }; 136define block ARM_LIB_STACK with alignment = TFM_LINKER_MSP_STACK_ALIGNMENT, size = S_MSP_STACK_SIZE - 0x8 { }; 137define overlay STACK_DATA {block TFM_SHARED_DATA}; 138define overlay STACK_DATA {block ARM_LIB_STACK}; 139 140define block STACKSEAL with size = 0x8 { }; 141 142keep {block TFM_SHARED_DATA, block ARM_LIB_STACK}; 143#endif /* (defined(__ARM8M_MAINLINE__) && (__CORE__ == __ARM8M_MAINLINE__)) || \ 144 * (defined(__ARM8M_BASELINE__) && (__CORE__ == __ARM8M_BASELINE__)) || \ 145 * (defined(__ARM8EM_MAINLINE__) && (__CORE__ == __ARM8EM_MAINLINE__)) */ 146 147#if defined(CONFIG_TFM_PARTITION_META) 148define block TFM_SP_META_PTR with alignment = TFM_LINKER_SP_META_PTR_ALIGNMENT { 149 zi section .bss.SP_META_PTR_SPRTL_INST 150 }; 151 152 /* 153 * This empty, zero long execution region is here to mark the end address 154 * of TFM partition metadata pointer region. 155 */ 156define block TFM_SP_META_PTR_END with alignment = TFM_LINKER_SP_META_PTR_ALIGNMENT, size = 0 { }; 157#endif 158 159define block TFM_APP_RW_STACK_START with alignment = TFM_LINKER_APP_ROT_LINKER_DATA_ALIGNMENT, size = 0 { }; 160 161define block TFM_APP_ROT_LINKER_DATA with alignment = TFM_LINKER_APP_ROT_LINKER_DATA_ALIGNMENT { 162 rw object *tfm_app_rot_partition*, 163 section TFM_*_APP-ROT_ATTR_RW object *.o, 164 section TFM_*_APP-ROT_ATTR_ZI object *.o, 165 }; 166 167 168 /* 169 * This empty, zero long execution region is here to mark the end address 170 * of APP RoT RW and Stack. 171 */ 172define block TFM_APP_RW_STACK_END with alignment = TFM_LINKER_APP_ROT_LINKER_DATA_ALIGNMENT, size = 0 { }; 173 174#if ((defined(__ARM6M__) && (__CORE__ == __ARM6M__)) || \ 175 (defined(__ARM7M__) && (__CORE__ == __ARM7M__)) || \ 176 (defined(__ARM7EM__) && (__CORE__ == __ARM7EM__))) 177#ifdef S_DATA_PRIV_START 178 /**** Privileged data area base address specified by Armv6-M/v7-M platform */ 179define block TFM_SECURE_PRIV_DATA_BOUNDARY with size = 0 { }; 180#endif 181 182 /* 183 * Move BL2 shared area and MSP stack to the beginning of privileged data 184 * area on Armv6-M/v7-M platforms. 185 */ 186 /* Shared area between BL2 and runtime to exchange data */ 187define block TFM_SHARED_DATA with alignment = TFM_LINKER_BL2_SHARED_DATA_ALIGNMENT, size = BOOT_TFM_SHARED_DATA_SIZE { }; 188 189 /* MSP */ 190define block ARM_LIB_STACK with alignment = TFM_LINKER_MSP_STACK_ALIGNMENT, size = S_MSP_STACK_SIZE { }; 191 192define overlay STACK_DATA {block TFM_SHARED_DATA}; 193define overlay STACK_DATA {block ARM_LIB_STACK}; 194 195keep {block TFM_SHARED_DATA, block ARM_LIB_STACK}; 196#endif /* (defined(__ARM6M__) && (__CORE__ == __ARM6M__)) || \ 197 * (defined(__ARM7M__) && (__CORE__ == __ARM7M__)) || \ 198 * (defined(__ARM7EM__) && (__CORE__ == __ARM7EM__)) */ 199 200#if defined(ENABLE_HEAP) 201 define block HEAP with alignment = 8, size = S_HEAP_SIZE { }; 202 define block ARM_LIB_HEAP {block HEAP}; 203 keep {block HEAP, block ARM_LIB_HEAP}; 204#endif 205 206define block ER_TFM_DATA with alignment = 8 {readwrite}; 207 208/* The runtime partition placed order is same as load partition */ 209define block ER_PART_RT_POOL with alignment = 4 { 210 zi section .bss.part_runtime_priority_00, 211 zi section .bss.part_runtime_priority_01, 212 zi section .bss.part_runtime_priority_02, 213 zi section .bss.part_runtime_priority_03, 214}; 215 216/* The runtime service placed order is same as load partition */ 217define block ER_SERV_RT_POOL with alignment = 4 { 218 zi section .bss.serv_runtime_priority_00, 219 zi section .bss.serv_runtime_priority_01, 220 zi section .bss.serv_runtime_priority_02, 221 zi section .bss.serv_runtime_priority_03, 222}; 223 224keep {block ER_PART_RT_POOL, block ER_SERV_RT_POOL}; 225 226 /**** PSA RoT DATA start here */ 227 /* 228 * This empty, zero long execution region is here to mark the start address 229 * of PSA RoT RW and Stack. 230 */ 231define block TFM_PSA_RW_STACK_START with alignment = TFM_LINKER_PSA_ROT_LINKER_DATA_ALIGNMENT, size = 0 { }; 232 233define block TFM_PSA_ROT_LINKER_DATA with alignment = TFM_LINKER_PSA_ROT_LINKER_DATA_ALIGNMENT { 234 rw object *tfm_psa_rot_partition*, 235 section TFM_*_PSA-ROT_ATTR_RW object *.o, 236 section TFM_*_PSA-ROT_ATTR_ZI object *.o, 237}; 238 239 240 /* 241 * This empty, zero long execution region is here to mark the end address 242 * of PSA RoT RW and Stack. 243 */ 244define block TFM_PSA_RW_STACK_END with alignment = TFM_LINKER_PSA_ROT_LINKER_DATA_ALIGNMENT, size = 0x0 { }; 245 246#ifdef RAM_VECTORS_SUPPORT 247define block ER_RAM_VECTORS with alignment = TFM_LINKER_RAM_VECTORS_ALIGNMENT { section .ramvec }; 248#endif 249 250 /* This empty, zero long execution region is here to mark the limit address 251 * of the last execution region that is allocated in SRAM. 252 */ 253define block SRAM_WATERMARK with size = 0 { }; 254 255define block LR_CODE with fixed order, maximum size = S_CODE_SIZE { 256 block ER_VECTORS, 257#ifdef CONFIG_TFM_USE_TRUSTZONE 258 block ER_VENEER, 259 block VENEER_ALIGN, 260#endif 261 block PT_RO_START, 262 block ER_TFM_CODE, 263 block TFM_UNPRIV_CODE_START, 264 block TFM_UNPRIV_CODE_END, 265 266 block TFM_SP_LOAD_LIST, 267 268 block TFM_PSA_CODE_START, 269 270 block TFM_PSA_ROT_LINKER, 271 272 block TFM_PSA_CODE_END, 273 274/**** APPLICATION RoT RO part (CODE + RODATA) start here */ 275 /* 276 * This empty, zero long execution region is here to mark the start address 277 * of APP RoT code. 278 */ 279 block TFM_APP_CODE_START, 280 281 block TFM_APP_ROT_LINKER, 282 283 /* 284 * This empty, zero long execution region is here to mark the end address 285 * of APP RoT code. 286 */ 287 block TFM_APP_CODE_END, 288 block PT_RO_END, 289 }; 290 291do not initialize { 292 section .noinit, 293 rw section .ramvec 294 }; 295initialize by copy with packing = none { readwrite } 296#ifndef S_RAM_CODE_START 297 except { section .textrw } 298#endif 299 ; 300if (isdefinedsymbol(__USE_DLIB_PERTHREAD)) 301{ 302 // Required in a multi-threaded application 303 initialize by copy { section __DLIB_PERTHREAD }; 304} 305 306place at address S_CODE_START { block LR_CODE }; 307 308define block DATA with fixed order { 309 /**** Base address of secure data area */ 310 block TFM_SECURE_DATA_START, 311 312 /* 313 * MPU on Armv6-M/v7-M core in multi-core topology may require more strict 314 * alignment that MPU region base address must align with the MPU region 315 * size. 316 * As a result, on Armv6-M/v7-M cores, to save memory resource and MPU 317 * regions, unprivileged data sections and privileged data sections are 318 * separated and gathered in unprivileged/privileged data area respectively. 319 * Keep BL2 shared data and MSP stack at the beginning of the secure data 320 * area on Armv8-M cores, while move the two areas to the beginning of 321 * privileged data region on Armv6-M/v7-M cores. 322 */ 323#if ((defined(__ARM8M_MAINLINE__) && (__CORE__ == __ARM8M_MAINLINE__)) || \ 324 (defined(__ARM8M_BASELINE__) && (__CORE__ == __ARM8M_BASELINE__)) || \ 325 (defined(__ARM8EM_MAINLINE__) && (__CORE__ == __ARM8EM_MAINLINE__))) 326 /* Shared area between BL2 and runtime to exchange data */ 327 overlay STACK_DATA, 328 block STACKSEAL, 329#endif /* (defined(__ARM8M_MAINLINE__) && (__CORE__ == __ARM8M_MAINLINE__)) || \ 330 * (defined(__ARM8M_BASELINE__) && (__CORE__ == __ARM8M_BASELINE__)) || \ 331 * (defined(__ARM8EM_MAINLINE__) && (__CORE__ == __ARM8EM_MAINLINE__)) */ 332 333#if defined(CONFIG_TFM_PARTITION_META) 334 block TFM_SP_META_PTR, 335 block TFM_SP_META_PTR_END, 336#endif 337 338 /**** APP RoT DATA start here */ 339 /* 340 * This empty, zero long execution region is here to mark the start address 341 * of APP RoT RW and Stack. 342 */ 343 block TFM_APP_RW_STACK_START, 344 345 block TFM_APP_ROT_LINKER_DATA, 346 347 /* 348 * This empty, zero long execution region is here to mark the end address 349 * of APP RoT RW and Stack. 350 */ 351 block TFM_APP_RW_STACK_END, 352 353#if ((defined(__ARM6M__) && (__CORE__ == __ARM6M__)) || \ 354 (defined(__ARM7M__) && (__CORE__ == __ARM7M__)) || \ 355 (defined(__ARM7EM__) && (__CORE__ == __ARM7EM__))) 356#ifdef S_DATA_PRIV_START 357 /**** Privileged data area base address specified by Armv6-M/v7-M platform */ 358}; 359define block PRIV_DATA with fixed order { 360 block TFM_SECURE_PRIV_DATA_BOUNDARY, 361#endif 362 363 /* 364 * Move BL2 shared area and MSP stack to the beginning of privileged data 365 * area on Armv6-M/v7-M platforms. 366 */ 367 /* Shared area between BL2 and runtime to exchange data */ 368 overlay STACK_DATA, 369#endif /* (defined(__ARM6M__) && (__CORE__ == __ARM6M__)) || \ 370 * (defined(__ARM7M__) && (__CORE__ == __ARM7M__)) || \ 371 * (defined(__ARM7EM__) && (__CORE__ == __ARM7EM__)) */ 372 373#if defined(ENABLE_HEAP) 374 block ARM_LIB_HEAP, 375#endif 376 377 block ER_TFM_DATA, 378 379 block ER_PART_RT_POOL, 380 381 block ER_SERV_RT_POOL, 382 383 /**** PSA RoT DATA start here */ 384 /* 385 * This empty, zero long execution region is here to mark the start address 386 * of PSA RoT RW and Stack. 387 */ 388 block TFM_PSA_RW_STACK_START, 389 390 block TFM_PSA_ROT_LINKER_DATA, 391 392#ifdef RAM_VECTORS_SUPPORT 393 block ER_RAM_VECTORS, 394#endif 395 /* 396 * This empty, zero long execution region is here to mark the end address 397 * of PSA RoT RW and Stack. 398 */ 399 block TFM_PSA_RW_STACK_END, 400 401 /* This empty, zero long execution region is here to mark the limit address 402 * of the last execution region that is allocated in SRAM. 403 */ 404 block SRAM_WATERMARK, 405 406 /* Make sure that the sections allocated in the SRAM does not exceed the 407 * size of the SRAM available. 408 */ 409}; 410 411place at address S_DATA_START { block DATA }; 412#if ((defined(__ARM6M__) && (__CORE__ == __ARM6M__)) || \ 413 (defined(__ARM7M__) && (__CORE__ == __ARM7M__)) || \ 414 (defined(__ARM7EM__) && (__CORE__ == __ARM7EM__))) && defined(S_DATA_PRIV_START) 415place at address S_DATA_PRIV_START { block PRIV_DATA }; 416#endif 417 418 /* Reserved place for NS application. 419 * No code will be placed here, just address of this region is used in the 420 * secure code to configure certain HW components. This generates an empty 421 * execution region description warning during linking. 422 */ 423define block LR_NS_PARTITION with size = NS_PARTITION_SIZE { }; 424place at address NS_PARTITION_START { block LR_NS_PARTITION }; 425 426#ifdef BL2 427 /* Reserved place for new image in case of firmware upgrade. 428 * No code will be placed here, just address of this region is used in the 429 * secure code to configure certain HW components. This generates an empty 430 * execution region description warning during linking. 431 */ 432define block LR_SECONDARY_PARTITION with size = SECONDARY_PARTITION_SIZE { }; 433place at address SECONDARY_PARTITION_START { block LR_SECONDARY_PARTITION }; 434#endif /* BL2 */ 435