1/* 2 * Copyright (c) 2017-2022 Arm Limited. All rights reserved. 3 * Copyright (c) 2020-2021 IAR Systems AB 4 * Copyright (c) 2022 Cypress Semiconductor Corporation (an Infineon company) 5 * or an affiliate of Cypress Semiconductor Corporation. All rights reserved. 6 * 7 * Licensed under the Apache License, Version 2.0 (the "License"); 8 * you may not use this file except in compliance with the License. 9 * You may obtain a copy of the License at 10 * 11 * http://www.apache.org/licenses/LICENSE-2.0 12 * 13 * Unless required by applicable law or agreed to in writing, software 14 * distributed under the License is distributed on an "AS IS" BASIS, 15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 * See the License for the specific language governing permissions and 17 * limitations under the License. 18 * 19 * This file is derivative of ../armclang/tfm_common_s.sct.template 20 */ 21 22#include "region_defs.h" 23 24/* Include file with definitions for section alignments. 25 * Note: it should be included after region_defs.h to let platform define 26 * default values if needed. */ 27#include "tfm_s_linker_alignments.h" 28 29define block ER_VECTORS with size = S_CODE_VECTOR_TABLE_SIZE { 30 readonly section .intvec 31}; 32 33#ifdef CONFIG_TFM_USE_TRUSTZONE 34 /* 35 * Place the CMSE Veneers (containing the SG instruction) in a separate 36 * at least 32 bytes aligned region so that the SAU can be programmed to 37 * just set this region as Non-Secure Callable. 38 */ 39define block ER_VENEER with alignment = TFM_LINKER_VENEERS_ALIGNMENT {section Veneer$$CMSE}; 40 41define block VENEER_ALIGN with alignment = TFM_LINKER_VENEERS_ALIGNMENT, size = 0 { }; 42#endif 43 44define block ER_TFM_CODE with fixed order, alignment = 8, maximum size = S_CODE_SIZE { 45 ro section .text object *startup*, 46 ro section .text object *libplatform_s*, 47 ro section .rodata object *libplatform_s*, 48 ro object *libtfm_spm*, 49}; 50 51define block TFM_UNPRIV_CODE with alignment = TFM_LINKER_UNPRIV_CODE_ALIGNMENT { 52 section SFN, 53 readonly 54 }; 55 56 /**** Section for holding partition RO load data */ 57 /* 58 * Sort the partition info by priority to guarantee the initing order. 59 * The first loaded partition will be inited at last in SFN model. 60 */ 61define block TFM_SP_LOAD_LIST with alignment = 4 { 62 ro section .part_load_priority_lowest object load_info_*.o, 63 ro section .part_load_priority_low object load_info_*.o, 64 ro section .part_load_priority_normal object load_info_*.o, 65 ro section .part_load_priority_high object load_info_*.o, 66}; 67 68 /**** PSA RoT RO part (CODE + RODATA) start here */ 69 /* 70 * This empty, zero long execution region is here to mark the start address 71 * of PSA RoT code. 72 */ 73define block TFM_PSA_CODE_START with alignment = TFM_LINKER_PSA_ROT_LINKER_CODE_ALIGNMENT, size = 0 { }; 74 75define block TFM_PSA_ROT_LINKER with alignment = TFM_LINKER_PSA_ROT_LINKER_CODE_ALIGNMENT { 76 ro object *tfm_psa_rot_partition*, 77 section TFM_*_PSA-ROT_ATTR_FN object *libplatform_s*, 78 section TFM_*_PSA-ROT_ATTR_FN object *.o, 79 }; 80 81 /* 82 * This empty, zero long execution region is here to mark the end address 83 * of PSA RoT code. 84 */ 85define block TFM_PSA_CODE_END with alignment = TFM_LINKER_PSA_ROT_LINKER_CODE_ALIGNMENT, size = 0 { }; 86 87 /**** APPLICATION RoT RO part (CODE + RODATA) start here */ 88 /* 89 * This empty, zero long execution region is here to mark the start address 90 * of APP RoT code. 91 */ 92define block TFM_APP_CODE_START with alignment = TFM_LINKER_APP_ROT_LINKER_CODE_ALIGNMENT, size = 0 { }; 93 94 define block TFM_APP_ROT_LINKER with alignment = TFM_LINKER_APP_ROT_LINKER_CODE_ALIGNMENT { 95 ro object *tfm_app_rot_partition*, 96 }; 97 98 /* 99 * This empty, zero long execution region is here to mark the end address 100 * of APP RoT code. 101 */ 102define block TFM_APP_CODE_END with alignment = TFM_LINKER_APP_ROT_LINKER_CODE_ALIGNMENT, size = 0 { }; 103 104#if defined(S_CODE_SRAM_ALIAS_BASE) 105 /* eFlash driver code that gets copied from Flash to SRAM */ 106initialize by copy { 107 ro object Driver_GFC100_EFlash.o, 108 ro object gfc100_eflash_drv.o, 109 ro object musca_b1_eflash_drv.o 110 }; 111 112define block ER_CODE_SRAM with fixed order, alignment = 8 { 113 rw section .text, 114 rw section .rodata 115 }; 116 117place at address S_CODE_SRAM_ALIAS_BASE { block ER_CODE_SRAM }; 118#endif 119 120 /**** Base address of secure data area */ 121define block TFM_SECURE_DATA_START with size = 0 { }; 122 123#if ((defined(__ARM8M_MAINLINE__) && (__CORE__ == __ARM8M_MAINLINE__)) || \ 124 (defined(__ARM8M_BASELINE__) && (__CORE__ == __ARM8M_BASELINE__)) || \ 125 (defined(__ARM8EM_MAINLINE__) && (__CORE__ == __ARM8EM_MAINLINE__))) 126 /* Shared area between BL2 and runtime to exchange data */ 127define block TFM_SHARED_DATA with alignment = TFM_LINKER_BL2_SHARED_DATA_ALIGNMENT, size = BOOT_TFM_SHARED_DATA_SIZE { }; 128define block ARM_LIB_STACK with alignment = TFM_LINKER_MSP_STACK_ALIGNMENT, size = S_MSP_STACK_SIZE - 0x8 { }; 129define overlay STACK_DATA {block TFM_SHARED_DATA}; 130define overlay STACK_DATA {block ARM_LIB_STACK}; 131 132define block STACKSEAL with size = 0x8 { }; 133 134keep {block TFM_SHARED_DATA, block ARM_LIB_STACK}; 135#endif /* (defined(__ARM8M_MAINLINE__) && (__CORE__ == __ARM8M_MAINLINE__)) || \ 136 * (defined(__ARM8M_BASELINE__) && (__CORE__ == __ARM8M_BASELINE__)) || \ 137 * (defined(__ARM8EM_MAINLINE__) && (__CORE__ == __ARM8EM_MAINLINE__)) */ 138 139#if defined(CONFIG_TFM_PARTITION_META) 140define block TFM_SP_META_PTR with alignment = TFM_LINKER_SP_META_PTR_ALIGNMENT { 141 zi section .bss.SP_META_PTR_SPRTL_INST 142 }; 143#endif 144 145define block TFM_APP_RW_STACK_START with alignment = TFM_LINKER_APP_ROT_LINKER_DATA_ALIGNMENT, size = 0 { }; 146 147define block TFM_APP_ROT_LINKER_DATA with alignment = TFM_LINKER_APP_ROT_LINKER_DATA_ALIGNMENT { 148 rw object *tfm_app_rot_partition*, 149 section TFM_*_APP-ROT_ATTR_RW object *.o, 150 section TFM_*_APP-ROT_ATTR_ZI object *.o, 151 }; 152 153 154 /* 155 * This empty, zero long execution region is here to mark the end address 156 * of APP RoT RW and Stack. 157 */ 158define block TFM_APP_RW_STACK_END with alignment = TFM_LINKER_APP_ROT_LINKER_DATA_ALIGNMENT, size = 0 { }; 159 160#if ((defined(__ARM6M__) && (__CORE__ == __ARM6M__)) || \ 161 (defined(__ARM7M__) && (__CORE__ == __ARM7M__)) || \ 162 (defined(__ARM7EM__) && (__CORE__ == __ARM7EM__))) 163#ifdef S_DATA_PRIV_START 164 /**** Privileged data area base address specified by Armv6-M/v7-M platform */ 165define block TFM_SECURE_PRIV_DATA_BOUNDARY with size = 0 { }; 166#endif 167 168 /* 169 * Move BL2 shared area and MSP stack to the beginning of privileged data 170 * area on Armv6-M/v7-M platforms. 171 */ 172 /* Shared area between BL2 and runtime to exchange data */ 173define block TFM_SHARED_DATA with alignment = TFM_LINKER_BL2_SHARED_DATA_ALIGNMENT, size = BOOT_TFM_SHARED_DATA_SIZE { }; 174 175 /* MSP */ 176define block ARM_LIB_STACK with alignment = TFM_LINKER_MSP_STACK_ALIGNMENT, size = S_MSP_STACK_SIZE { }; 177 178define overlay STACK_DATA {block TFM_SHARED_DATA}; 179define overlay STACK_DATA {block ARM_LIB_STACK}; 180 181keep {block TFM_SHARED_DATA, block ARM_LIB_STACK}; 182#endif /* (defined(__ARM6M__) && (__CORE__ == __ARM6M__)) || \ 183 * (defined(__ARM7M__) && (__CORE__ == __ARM7M__)) || \ 184 * (defined(__ARM7EM__) && (__CORE__ == __ARM7EM__)) */ 185 186#if defined(ENABLE_HEAP) 187 define block HEAP with alignment = 8, size = S_HEAP_SIZE { }; 188 define block ARM_LIB_HEAP {block HEAP}; 189 keep {block HEAP, block ARM_LIB_HEAP}; 190#endif 191 192define block ER_TFM_DATA with alignment = 8 {readwrite}; 193 194/* The runtime partition placed order is same as load partition */ 195define block ER_PART_RT_POOL with alignment = 4 { 196 zi section .bss.part_runtime_priority_lowest, 197 zi section .bss.part_runtime_priority_low, 198 zi section .bss.part_runtime_priority_normal, 199 zi section .bss.part_runtime_priority_high, 200}; 201 202/* The runtime service placed order is same as load partition */ 203define block ER_SERV_RT_POOL with alignment = 4 { 204 zi section .bss.serv_runtime_priority_lowest, 205 zi section .bss.serv_runtime_priority_low, 206 zi section .bss.serv_runtime_priority_normal, 207 zi section .bss.serv_runtime_priority_high, 208}; 209 210keep {block ER_PART_RT_POOL, block ER_SERV_RT_POOL}; 211 212 /**** PSA RoT DATA start here */ 213 /* 214 * This empty, zero long execution region is here to mark the start address 215 * of PSA RoT RW and Stack. 216 */ 217define block TFM_PSA_RW_STACK_START with alignment = TFM_LINKER_PSA_ROT_LINKER_DATA_ALIGNMENT, size = 0 { }; 218 219define block TFM_PSA_ROT_LINKER_DATA with alignment = TFM_LINKER_PSA_ROT_LINKER_DATA_ALIGNMENT { 220 rw object *tfm_psa_rot_partition*, 221 section TFM_*_PSA-ROT_ATTR_RW object *.o, 222 section TFM_*_PSA-ROT_ATTR_ZI object *.o, 223}; 224 225 226 /* 227 * This empty, zero long execution region is here to mark the end address 228 * of PSA RoT RW and Stack. 229 */ 230define block TFM_PSA_RW_STACK_END with alignment = TFM_LINKER_PSA_ROT_LINKER_DATA_ALIGNMENT, size = 0x0 { }; 231 232#ifdef RAM_VECTORS_SUPPORT 233define block ER_RAM_VECTORS with alignment = TFM_LINKER_RAM_VECTORS_ALIGNMENT { section .ramvec }; 234#endif 235 236#if defined (S_RAM_CODE_START) 237define block TFM_RAM_CODE with alignment = 8 { 238 rw section .textrw, /* __ramfunc */ 239}; 240place at address S_RAM_CODE_START { block TFM_RAM_CODE }; 241#endif 242 243 /* This empty, zero long execution region is here to mark the limit address 244 * of the last execution region that is allocated in SRAM. 245 */ 246define block SRAM_WATERMARK with size = 0 { }; 247 248define block LR_CODE with fixed order, maximum size = S_CODE_SIZE { 249 block ER_VECTORS, 250#ifdef CONFIG_TFM_USE_TRUSTZONE 251 block ER_VENEER, 252 block VENEER_ALIGN, 253#endif 254 block ER_TFM_CODE, 255 block TFM_UNPRIV_CODE, 256 257 block TFM_SP_LOAD_LIST, 258 259 block TFM_PSA_CODE_START, 260 261 block TFM_PSA_ROT_LINKER, 262 263 block TFM_PSA_CODE_END, 264 265/**** APPLICATION RoT RO part (CODE + RODATA) start here */ 266 /* 267 * This empty, zero long execution region is here to mark the start address 268 * of APP RoT code. 269 */ 270 block TFM_APP_CODE_START, 271 272 block TFM_APP_ROT_LINKER, 273 274 /* 275 * This empty, zero long execution region is here to mark the end address 276 * of APP RoT code. 277 */ 278 block TFM_APP_CODE_END, 279 }; 280 281do not initialize { 282 section .noinit, 283 rw section .ramvec 284 }; 285initialize by copy with packing = none { readwrite } 286#ifndef S_RAM_CODE_START 287 except { section .textrw } 288#endif 289 ; 290if (isdefinedsymbol(__USE_DLIB_PERTHREAD)) 291{ 292 // Required in a multi-threaded application 293 initialize by copy { section __DLIB_PERTHREAD }; 294} 295 296place at address S_CODE_START { block LR_CODE }; 297 298define block DATA with fixed order { 299 /**** Base address of secure data area */ 300 block TFM_SECURE_DATA_START, 301 302 /* 303 * MPU on Armv6-M/v7-M core in multi-core topology may require more strict 304 * alignment that MPU region base address must align with the MPU region 305 * size. 306 * As a result, on Armv6-M/v7-M cores, to save memory resource and MPU 307 * regions, unprivileged data sections and privileged data sections are 308 * separated and gathered in unprivileged/privileged data area respectively. 309 * Keep BL2 shared data and MSP stack at the beginning of the secure data 310 * area on Armv8-M cores, while move the two areas to the beginning of 311 * privileged data region on Armv6-M/v7-M cores. 312 */ 313#if ((defined(__ARM8M_MAINLINE__) && (__CORE__ == __ARM8M_MAINLINE__)) || \ 314 (defined(__ARM8M_BASELINE__) && (__CORE__ == __ARM8M_BASELINE__)) || \ 315 (defined(__ARM8EM_MAINLINE__) && (__CORE__ == __ARM8EM_MAINLINE__))) 316 /* Shared area between BL2 and runtime to exchange data */ 317 overlay STACK_DATA, 318 block STACKSEAL, 319#endif /* (defined(__ARM8M_MAINLINE__) && (__CORE__ == __ARM8M_MAINLINE__)) || \ 320 * (defined(__ARM8M_BASELINE__) && (__CORE__ == __ARM8M_BASELINE__)) || \ 321 * (defined(__ARM8EM_MAINLINE__) && (__CORE__ == __ARM8EM_MAINLINE__)) */ 322 323#if defined(CONFIG_TFM_PARTITION_META) 324 block TFM_SP_META_PTR, 325#endif 326 327 /**** APP RoT DATA start here */ 328 /* 329 * This empty, zero long execution region is here to mark the start address 330 * of APP RoT RW and Stack. 331 */ 332 block TFM_APP_RW_STACK_START, 333 334 block TFM_APP_ROT_LINKER_DATA, 335 336 /* 337 * This empty, zero long execution region is here to mark the end address 338 * of APP RoT RW and Stack. 339 */ 340 block TFM_APP_RW_STACK_END, 341 342#if ((defined(__ARM6M__) && (__CORE__ == __ARM6M__)) || \ 343 (defined(__ARM7M__) && (__CORE__ == __ARM7M__)) || \ 344 (defined(__ARM7EM__) && (__CORE__ == __ARM7EM__))) 345#ifdef S_DATA_PRIV_START 346 /**** Privileged data area base address specified by Armv6-M/v7-M platform */ 347}; 348define block PRIV_DATA with fixed order { 349 block TFM_SECURE_PRIV_DATA_BOUNDARY, 350#endif 351 352 /* 353 * Move BL2 shared area and MSP stack to the beginning of privileged data 354 * area on Armv6-M/v7-M platforms. 355 */ 356 /* Shared area between BL2 and runtime to exchange data */ 357 overlay STACK_DATA, 358#endif /* (defined(__ARM6M__) && (__CORE__ == __ARM6M__)) || \ 359 * (defined(__ARM7M__) && (__CORE__ == __ARM7M__)) || \ 360 * (defined(__ARM7EM__) && (__CORE__ == __ARM7EM__)) */ 361 362#if defined(ENABLE_HEAP) 363 block ARM_LIB_HEAP, 364#endif 365 366 block ER_TFM_DATA, 367 368 block ER_PART_RT_POOL, 369 370 block ER_SERV_RT_POOL, 371 372 /**** PSA RoT DATA start here */ 373 /* 374 * This empty, zero long execution region is here to mark the start address 375 * of PSA RoT RW and Stack. 376 */ 377 block TFM_PSA_RW_STACK_START, 378 379 block TFM_PSA_ROT_LINKER_DATA, 380 381#ifdef RAM_VECTORS_SUPPORT 382 block ER_RAM_VECTORS, 383#endif 384 /* 385 * This empty, zero long execution region is here to mark the end address 386 * of PSA RoT RW and Stack. 387 */ 388 block TFM_PSA_RW_STACK_END, 389 390 /* This empty, zero long execution region is here to mark the limit address 391 * of the last execution region that is allocated in SRAM. 392 */ 393 block SRAM_WATERMARK, 394 395 /* Make sure that the sections allocated in the SRAM does not exceed the 396 * size of the SRAM available. 397 */ 398}; 399 400place at address S_DATA_START { block DATA }; 401#if ((defined(__ARM6M__) && (__CORE__ == __ARM6M__)) || \ 402 (defined(__ARM7M__) && (__CORE__ == __ARM7M__)) || \ 403 (defined(__ARM7EM__) && (__CORE__ == __ARM7EM__))) && defined(S_DATA_PRIV_START) 404place at address S_DATA_PRIV_START { block PRIV_DATA }; 405#endif 406 407 /* Reserved place for NS application. 408 * No code will be placed here, just address of this region is used in the 409 * secure code to configure certain HW components. This generates an empty 410 * execution region description warning during linking. 411 */ 412define block LR_NS_PARTITION with size = NS_PARTITION_SIZE { }; 413place at address NS_PARTITION_START { block LR_NS_PARTITION }; 414 415#ifdef BL2 416 /* Reserved place for new image in case of firmware upgrade. 417 * No code will be placed here, just address of this region is used in the 418 * secure code to configure certain HW components. This generates an empty 419 * execution region description warning during linking. 420 */ 421define block LR_SECONDARY_PARTITION with size = SECONDARY_PARTITION_SIZE { }; 422place at address SECONDARY_PARTITION_START { block LR_SECONDARY_PARTITION }; 423#endif /* BL2 */ 424