1;/*
2; * Copyright (c) 2009-2024 Arm Limited
3; * Copyright (c) 2022-2023 Cypress Semiconductor Corporation (an Infineon company)
4; * or an affiliate of Cypress Semiconductor Corporation. All rights reserved.
5; *
6; * Licensed under the Apache License, Version 2.0 (the "License");
7; * you may not use this file except in compliance with the License.
8; * You may obtain a copy of the License at
9; *
10; *     http://www.apache.org/licenses/LICENSE-2.0
11; *
12; * Unless required by applicable law or agreed to in writing, software
13; * distributed under the License is distributed on an "AS IS" BASIS,
14; * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15; * See the License for the specific language governing permissions and
16; * limitations under the License.
17; *
18; *
19; * This file is derivative of CMSIS V5.00 gcc_arm.ld
20; */
21
22/* Linker script to configure memory regions. */
23/* This file will be run trough the pre-processor. */
24
25#include "region_defs.h"
26
27/* Include file with definitions for section alignments.
28 * Note: it should be included after region_defs.h to let platform define
29 * default values if needed. */
30#include "tfm_s_linker_alignments.h"
31
32MEMORY
33{
34  FLASH    (rx)  : ORIGIN = S_CODE_START, LENGTH = S_CODE_SIZE
35  RAM      (rw)  : ORIGIN = S_DATA_START, LENGTH = S_DATA_SIZE
36#if defined(S_RAM_CODE_START)
37  CODE_RAM (rwx) : ORIGIN = S_RAM_CODE_START, LENGTH = S_RAM_CODE_SIZE
38#endif
39}
40
41#ifndef TFM_LINKER_VENEERS_START
42#define TFM_LINKER_VENEERS_START ALIGN(TFM_LINKER_VENEERS_ALIGNMENT)
43#endif
44
45#ifndef TFM_LINKER_VENEERS_END
46#define TFM_LINKER_VENEERS_END ALIGN(TFM_LINKER_VENEERS_ALIGNMENT)
47#endif
48
49#define VENEERS() \
50/* \
51 * Place the CMSE Veneers (containing the SG instruction) after the code, in \
52 * a separate at least 32 bytes aligned region so that the SAU can \
53 * programmed to just set this region as Non-Secure Callable. \
54 */ \
55.gnu.sgstubs TFM_LINKER_VENEERS_START : \
56{ \
57    *(.gnu.sgstubs*) \
58} > FLASH \
59/* GCC always places veneers at the end of .gnu.sgstubs section, so the only \
60 * way to align the end of .gnu.sgstubs section is to align start of the \
61 * next section */ \
62.sgstubs_end : TFM_LINKER_VENEERS_END \
63{ \
64} > FLASH
65
66__msp_stack_size__ = S_MSP_STACK_SIZE;
67
68ENTRY(Reset_Handler)
69
70SECTIONS
71{
72    /* Start address of the code. */
73    Image$$PT_RO_START$$Base = ADDR(.TFM_VECTORS);
74
75    .TFM_VECTORS : ALIGN(4)
76    {
77        __vectors_start__ = .;
78        KEEP(*(.vectors))
79        . = ALIGN(4);
80        __vectors_end__ = .;
81    } > FLASH
82
83    ASSERT(__vectors_start__ != __vectors_end__, ".vectors should not be empty")
84
85#if defined(S_CODE_VECTOR_TABLE_SIZE)
86    ASSERT(. <= ADDR(.TFM_VECTORS) + S_CODE_VECTOR_TABLE_SIZE, ".TFM_VECTORS section size overflow.")
87    . = ADDR(.TFM_VECTORS) + S_CODE_VECTOR_TABLE_SIZE;
88#endif
89
90#if defined(CONFIG_TFM_USE_TRUSTZONE) && !defined(TFM_LINKER_VENEERS_LOCATION_END)
91    VENEERS()
92#endif
93
94    /**** Section for holding partition RO load data */
95    /*
96     * Sort the partition info by priority to guarantee the initing order.
97     * The first loaded partition will be inited at last in SFN model.
98     */
99    .TFM_SP_LOAD_LIST ALIGN(4) :
100    {
101       KEEP(*(.part_load_priority_00))
102       KEEP(*(.part_load_priority_01))
103       KEEP(*(.part_load_priority_02))
104       KEEP(*(.part_load_priority_03))
105    } > FLASH
106    Image$$TFM_SP_LOAD_LIST$$RO$$Base = ADDR(.TFM_SP_LOAD_LIST);
107    Image$$TFM_SP_LOAD_LIST$$RO$$Limit = ADDR(.TFM_SP_LOAD_LIST) + SIZEOF(.TFM_SP_LOAD_LIST);
108
109    /**** PSA RoT RO part (CODE + RODATA) start here */
110    . = ALIGN(TFM_LINKER_PSA_ROT_LINKER_CODE_ALIGNMENT);
111    Image$$TFM_PSA_CODE_START$$Base = .;
112
113    .TFM_PSA_ROT_LINKER ALIGN(TFM_LINKER_PSA_ROT_LINKER_CODE_ALIGNMENT) :
114    {
115        *tfm_psa_rot_partition*:(SORT_BY_ALIGNMENT(.text*))
116        *tfm_psa_rot_partition*:(SORT_BY_ALIGNMENT(.rodata*))
117        *(TFM_*_PSA-ROT_ATTR_FN)
118        . = ALIGN(TFM_LINKER_PSA_ROT_LINKER_CODE_ALIGNMENT);
119    } > FLASH
120
121    Image$$TFM_PSA_ROT_LINKER$$RO$$Base = ADDR(.TFM_PSA_ROT_LINKER);
122    Image$$TFM_PSA_ROT_LINKER$$RO$$Limit = ADDR(.TFM_PSA_ROT_LINKER) + SIZEOF(.TFM_PSA_ROT_LINKER);
123    Image$$TFM_PSA_ROT_LINKER$$Base = ADDR(.TFM_PSA_ROT_LINKER);
124    Image$$TFM_PSA_ROT_LINKER$$Limit = ADDR(.TFM_PSA_ROT_LINKER) + SIZEOF(.TFM_PSA_ROT_LINKER);
125
126    /**** PSA RoT RO part (CODE + RODATA) end here */
127    Image$$TFM_PSA_CODE_END$$Base = .;
128
129    /**** APPLICATION RoT RO part (CODE + RODATA) start here */
130    Image$$TFM_APP_CODE_START$$Base = .;
131
132    .TFM_APP_ROT_LINKER ALIGN(TFM_LINKER_APP_ROT_LINKER_CODE_ALIGNMENT) :
133    {
134        *tfm_app_rot_partition*:(SORT_BY_ALIGNMENT(.text*))
135        *tfm_app_rot_partition*:(SORT_BY_ALIGNMENT(.rodata*))
136        *(TFM_*_APP-ROT_ATTR_FN)
137        . = ALIGN(TFM_LINKER_APP_ROT_LINKER_CODE_ALIGNMENT);
138    } > FLASH
139
140    Image$$TFM_APP_ROT_LINKER$$RO$$Base = ADDR(.TFM_APP_ROT_LINKER);
141    Image$$TFM_APP_ROT_LINKER$$RO$$Limit = ADDR(.TFM_APP_ROT_LINKER) + SIZEOF(.TFM_APP_ROT_LINKER);
142    Image$$TFM_APP_ROT_LINKER$$Base = ADDR(.TFM_APP_ROT_LINKER);
143    Image$$TFM_APP_ROT_LINKER$$Limit = ADDR(.TFM_APP_ROT_LINKER) + SIZEOF(.TFM_APP_ROT_LINKER);
144
145    /**** APPLICATION RoT RO part (CODE + RODATA) end here */
146    Image$$TFM_APP_CODE_END$$Base = .;
147
148#if defined(S_RAM_CODE_START)
149    /* Flash drivers code that gets copied from Flash */
150    .ER_CODE_SRAM ALIGN(S_RAM_CODE_START, 4) :
151    {
152        *libflash_drivers*:(SORT_BY_ALIGNMENT(.text*))
153        *libflash_drivers*:(SORT_BY_ALIGNMENT(.rodata*))
154        KEEP(*(.ramfunc))
155        . = ALIGN(4); /* This alignment is needed to make the section size 4 bytes aligned */
156    } > CODE_RAM AT > FLASH
157
158    ASSERT(S_RAM_CODE_START % 4 == 0, "S_RAM_CODE_START must be divisible by 4")
159
160    Image$$ER_CODE_SRAM$$RO$$Base = ADDR(.ER_CODE_SRAM);
161    Image$$ER_CODE_SRAM$$RO$$Limit = ADDR(.ER_CODE_SRAM) + SIZEOF(.ER_CODE_SRAM);
162    Image$$ER_CODE_SRAM$$Base = ADDR(.ER_CODE_SRAM);
163    Image$$ER_CODE_SRAM$$Limit = ADDR(.ER_CODE_SRAM) + SIZEOF(.ER_CODE_SRAM);
164#endif
165
166    .ARM.extab :
167    {
168        *(.ARM.extab* .gnu.linkonce.armextab.*)
169    } > FLASH
170
171    __exidx_start = .;
172    .ARM.exidx :
173    {
174        *(.ARM.exidx* .gnu.linkonce.armexidx.*)
175    } > FLASH
176    __exidx_end = .;
177
178    .ER_TFM_CODE ALIGN(4) (READONLY) :
179    {
180        . = ALIGN(4);
181        /* preinit data */
182        PROVIDE_HIDDEN (__preinit_array_start = .);
183        KEEP(*(.preinit_array))
184        PROVIDE_HIDDEN (__preinit_array_end = .);
185
186        . = ALIGN(4);
187        /* init data */
188        PROVIDE_HIDDEN (__init_array_start = .);
189        KEEP(*(SORT(.init_array.*)))
190        KEEP(*(.init_array))
191        PROVIDE_HIDDEN (__init_array_end = .);
192
193        . = ALIGN(4);
194        /* finit data */
195        PROVIDE_HIDDEN (__fini_array_start = .);
196        KEEP(*(SORT(.fini_array.*)))
197        KEEP(*(.fini_array))
198        PROVIDE_HIDDEN (__fini_array_end = .);
199
200        /* .copy.table */
201        . = ALIGN(4);
202        __copy_table_start__ = .;
203#ifdef RAM_VECTORS_SUPPORT
204        /* Copy interrupt vectors from flash to RAM */
205        LONG (__vectors_start__)                            /* From */
206        LONG (__ram_vectors_start__)                        /* To   */
207        LONG ((__vectors_end__ - __vectors_start__) / 4)    /* Size */
208#endif
209        LONG (LOADADDR(.TFM_DATA))
210        LONG (ADDR(.TFM_DATA))
211        LONG (SIZEOF(.TFM_DATA) / 4)
212
213        LONG (LOADADDR(.TFM_PSA_ROT_LINKER_DATA))
214        LONG (ADDR(.TFM_PSA_ROT_LINKER_DATA))
215        LONG (SIZEOF(.TFM_PSA_ROT_LINKER_DATA) / 4)
216
217        LONG (LOADADDR(.TFM_APP_ROT_LINKER_DATA))
218        LONG (ADDR(.TFM_APP_ROT_LINKER_DATA))
219        LONG (SIZEOF(.TFM_APP_ROT_LINKER_DATA) / 4)
220
221#if defined (S_RAM_CODE_START)
222        LONG (LOADADDR(.ER_CODE_SRAM))
223        LONG (ADDR(.ER_CODE_SRAM))
224        LONG (SIZEOF(.ER_CODE_SRAM) / 4)
225#endif
226        __copy_table_end__ = .;
227
228        /* .zero.table */
229        . = ALIGN(4);
230        __zero_table_start__ = .;
231        LONG (ADDR(.TFM_BSS))
232        LONG (SIZEOF(.TFM_BSS) / 4)
233        LONG (ADDR(.TFM_PSA_ROT_LINKER_BSS))
234        LONG (SIZEOF(.TFM_PSA_ROT_LINKER_BSS) / 4)
235
236        LONG (ADDR(.TFM_APP_ROT_LINKER_BSS))
237        LONG (SIZEOF(.TFM_APP_ROT_LINKER_BSS) / 4)
238#if defined(CONFIG_TFM_PARTITION_META)
239        LONG (ADDR(.TFM_SP_META_PTR))
240        LONG (SIZEOF(.TFM_SP_META_PTR) / 4)
241#endif
242        __zero_table_end__ = .;
243
244        *startup*(.text*)
245        *libplatform_s*:(SORT_BY_ALIGNMENT(.text*))
246        *libtfm_spm*:(SORT_BY_ALIGNMENT(.text*))
247
248        *libplatform_s*:*(.rodata*)
249        *libtfm_spm*:*(.rodata*)
250    } > FLASH
251
252    .TFM_UNPRIV_CODE ALIGN(TFM_LINKER_UNPRIV_CODE_ALIGNMENT) :
253    {
254        *(SORT_BY_ALIGNMENT(.text*))
255
256        KEEP(*(.init))
257        KEEP(*(.fini))
258
259        /* .ctors */
260        *crtbegin.o(.ctors)
261        *crtbegin?.o(.ctors)
262        *(EXCLUDE_FILE(*crtend?.o *crtend.o) .ctors)
263        *(SORT(.ctors.*))
264        *(.ctors)
265
266        /* .dtors */
267         *crtbegin.o(.dtors)
268         *crtbegin?.o(.dtors)
269         *(EXCLUDE_FILE(*crtend?.o *crtend.o) .dtors)
270         *(SORT(.dtors.*))
271         *(.dtors)
272
273        *(SORT_BY_ALIGNMENT(.rodata*))
274
275        KEEP(*(.eh_frame*))
276        . = ALIGN(TFM_LINKER_UNPRIV_CODE_ALIGNMENT);
277    } > FLASH
278    Image$$TFM_UNPRIV_CODE_START$$RO$$Base = ADDR(.TFM_UNPRIV_CODE);
279    Image$$TFM_UNPRIV_CODE_END$$RO$$Limit = ADDR(.TFM_UNPRIV_CODE) + SIZEOF(.TFM_UNPRIV_CODE);
280
281#if defined(CONFIG_TFM_USE_TRUSTZONE) && defined(TFM_LINKER_VENEERS_LOCATION_END)
282    VENEERS()
283#endif
284
285    /* Position tag */
286    . = ALIGN(TFM_LINKER_PT_RO_ALIGNMENT);
287    Image$$PT_RO_END$$Base = .;
288
289    /**** Base address of secure data area */
290    .tfm_secure_data_start :
291    {
292        /* Relocate current position to RAM */
293        . = ALIGN(4);
294    } > RAM
295
296    /*
297     * MPU on Armv6-M/v7-M core in multi-core topology may require more strict
298     * alignment that MPU region base address must align with the MPU region
299     * size.
300     * As a result, on Armv6-M/v7-M cores, to save memory resource and MPU
301     * regions, unprivileged data sections and privileged data sections are
302     * separated and gathered in unprivileged/privileged data area respectively.
303     * Keep BL2 shared data and MSP stack at the beginning of the secure data
304     * area on Armv8-M cores, while move the two areas to the beginning of
305     * privileged data region on Armv6-M/v7-M cores.
306     */
307#if defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__) || \
308    defined(__ARM_ARCH_8_1M_MAIN__)
309#ifdef CODE_SHARING
310    /* The code sharing between bootloader and runtime requires to share the
311     * global variables.
312     */
313    .TFM_SHARED_SYMBOLS ALIGN(TFM_LINKER_SHARED_SYMBOLS_ALIGNMENT) :
314    {
315        . += SHARED_SYMBOL_AREA_SIZE;
316    } > RAM
317#endif
318
319    /* shared_data and msp_stack are overlapping on purpose when
320     * msp_stack is extended until the beginning of RAM, when shared_date
321     * was read out by partitions
322     */
323    .tfm_bl2_shared_data ALIGN(TFM_LINKER_BL2_SHARED_DATA_ALIGNMENT) :
324    {
325        . += BOOT_TFM_SHARED_DATA_SIZE;
326    } > RAM
327
328    .msp_stack ALIGN(TFM_LINKER_MSP_STACK_ALIGNMENT) :
329    {
330        . += __msp_stack_size__ - 0x8;
331    } > RAM
332    Image$$ARM_LIB_STACK$$ZI$$Base = ADDR(.msp_stack);
333    Image$$ARM_LIB_STACK$$ZI$$Limit = ADDR(.msp_stack) + SIZEOF(.msp_stack);
334
335    .msp_stack_seal_res :
336    {
337        . += 0x8;
338    } > RAM
339    __StackSeal = ADDR(.msp_stack_seal_res);
340
341#endif /* defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__) || \
342        * defined(__ARM_ARCH_8_1M_MAIN__) */
343
344#if defined(ENABLE_HEAP)
345    __heap_size__ = S_HEAP_SIZE;
346    .heap ALIGN(8) :
347    {
348        __end__ = .;
349        PROVIDE(end = .);
350        __HeapBase = .;
351        . += __heap_size__;
352        __HeapLimit = .;
353        __heap_limit = .; /* Add for _sbrk */
354    } > RAM
355#endif
356
357#if defined(CONFIG_TFM_PARTITION_META)
358    .TFM_SP_META_PTR ALIGN(TFM_LINKER_SP_META_PTR_ALIGNMENT) (NOLOAD):
359    {
360        *(.bss.SP_META_PTR_SPRTL_INST)
361        . = ALIGN(TFM_LINKER_SP_META_PTR_ALIGNMENT);
362    } > RAM
363    Image$$TFM_SP_META_PTR$$ZI$$Base = ADDR(.TFM_SP_META_PTR);
364    Image$$TFM_SP_META_PTR$$ZI$$Limit = ADDR(.TFM_SP_META_PTR) + SIZEOF(.TFM_SP_META_PTR);
365    /* This is needed for the uniform configuration of MPU region. */
366    Image$$TFM_SP_META_PTR_END$$ZI$$Limit = Image$$TFM_SP_META_PTR$$ZI$$Limit;
367#endif
368
369    /**** APPLICATION RoT DATA start here */
370    . = ALIGN(TFM_LINKER_APP_ROT_LINKER_DATA_ALIGNMENT);
371    Image$$TFM_APP_RW_STACK_START$$Base = .;
372
373    .TFM_APP_ROT_LINKER_DATA ALIGN(TFM_LINKER_APP_ROT_LINKER_DATA_ALIGNMENT) :
374    {
375        *tfm_app_rot_partition*:(SORT_BY_ALIGNMENT(.data*))
376        *(TFM_*_APP-ROT_ATTR_RW)
377        . = ALIGN(4);
378    } > RAM AT> FLASH
379    Image$$TFM_APP_ROT_LINKER_DATA$$RW$$Base = ADDR(.TFM_APP_ROT_LINKER_DATA);
380    Image$$TFM_APP_ROT_LINKER_DATA$$RW$$Limit = ADDR(.TFM_APP_ROT_LINKER_DATA) + SIZEOF(.TFM_APP_ROT_LINKER_DATA);
381
382    .TFM_APP_ROT_LINKER_BSS ALIGN(4) (NOLOAD) :
383    {
384        start_of_TFM_APP_ROT_LINKER = .;
385        *tfm_app_rot_partition*:(SORT_BY_ALIGNMENT(.bss*))
386        *tfm_app_rot_partition*:*(COMMON)
387        *(TFM_*_APP-ROT_ATTR_ZI)
388        . += (. - start_of_TFM_APP_ROT_LINKER) ? 0 : 4;
389        . = ALIGN(TFM_LINKER_APP_ROT_LINKER_DATA_ALIGNMENT);
390    } > RAM AT> RAM
391    Image$$TFM_APP_ROT_LINKER_DATA$$ZI$$Base = ADDR(.TFM_APP_ROT_LINKER_BSS);
392    Image$$TFM_APP_ROT_LINKER_DATA$$ZI$$Limit = ADDR(.TFM_APP_ROT_LINKER_BSS) + SIZEOF(.TFM_APP_ROT_LINKER_BSS);
393
394    /**** APPLICATION RoT DATA end here */
395    Image$$TFM_APP_RW_STACK_END$$Base = .;
396
397#if defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_7M__) || \
398    defined(__ARM_ARCH_7EM__)
399#ifdef S_DATA_PRIV_START
400    /**** Privileged data area base address specified by Armv6-M/v7-M platform */
401    .tfm_secure_priv_data_boundary :
402    {
403        . = ABSOLUTE(S_DATA_PRIV_START) ;
404    } > RAM
405#endif
406
407    /*
408     * Move BL2 shared area and MSP stack to the beginning of privileged data
409     * area on Armv6-M/v7-M platforms.
410     */
411
412    /* shared_data and msp_stack are overlapping on purpose when
413     * msp_stack is extended until the beginning of RAM, when shared_date
414     * was read out by partitions
415     */
416    .tfm_bl2_shared_data ALIGN(TFM_LINKER_BL2_SHARED_DATA_ALIGNMENT) :
417    {
418        . += BOOT_TFM_SHARED_DATA_SIZE;
419    } > RAM AT> RAM
420
421    .msp_stack ALIGN(TFM_LINKER_MSP_STACK_ALIGNMENT) :
422    {
423        . += __msp_stack_size__;
424    } > RAM
425    Image$$ARM_LIB_STACK$$ZI$$Base = ADDR(.msp_stack);
426    Image$$ARM_LIB_STACK$$ZI$$Limit = ADDR(.msp_stack) + SIZEOF(.msp_stack);
427#endif /* defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_7M__) || \
428        * defined(__ARM_ARCH_7EM__) */
429
430    /**** PSA RoT DATA start here */
431
432    Image$$TFM_PSA_RW_STACK_START$$Base = .;
433
434    .TFM_PSA_ROT_LINKER_DATA ALIGN(TFM_LINKER_PSA_ROT_LINKER_DATA_ALIGNMENT) :
435    {
436        *tfm_psa_rot_partition*:(SORT_BY_ALIGNMENT(.data*))
437        *(TFM_*_PSA-ROT_ATTR_RW)
438        . = ALIGN(4);
439    } > RAM AT> FLASH
440    Image$$TFM_PSA_ROT_LINKER_DATA$$RW$$Base = ADDR(.TFM_PSA_ROT_LINKER_DATA);
441    Image$$TFM_PSA_ROT_LINKER_DATA$$RW$$Limit = ADDR(.TFM_PSA_ROT_LINKER_DATA) + SIZEOF(.TFM_PSA_ROT_LINKER_DATA);
442
443    .TFM_PSA_ROT_LINKER_BSS ALIGN(4) (NOLOAD) :
444    {
445        start_of_TFM_PSA_ROT_LINKER = .;
446        *tfm_psa_rot_partition*:(SORT_BY_ALIGNMENT(.bss*))
447        *tfm_psa_rot_partition*:*(COMMON)
448        *(TFM_*_PSA-ROT_ATTR_ZI)
449        . += (. - start_of_TFM_PSA_ROT_LINKER) ? 0 : 4;
450        . = ALIGN(TFM_LINKER_PSA_ROT_LINKER_DATA_ALIGNMENT);
451    } > RAM AT> RAM
452    Image$$TFM_PSA_ROT_LINKER_DATA$$ZI$$Base = ADDR(.TFM_PSA_ROT_LINKER_BSS);
453    Image$$TFM_PSA_ROT_LINKER_DATA$$ZI$$Limit = ADDR(.TFM_PSA_ROT_LINKER_BSS) + SIZEOF(.TFM_PSA_ROT_LINKER_BSS);
454
455    /**** PSA RoT DATA end here */
456    Image$$TFM_PSA_RW_STACK_END$$Base = .;
457
458#ifdef RAM_VECTORS_SUPPORT
459    .ramVectors ALIGN(TFM_LINKER_RAM_VECTORS_ALIGNMENT) (NOLOAD) :
460    {
461        __ram_vectors_start__ = .;
462        KEEP(*(.ram_vectors))
463        __ram_vectors_end__   = .;
464    } > RAM
465    .TFM_DATA __ram_vectors_end__ :
466#else
467
468    .TFM_DATA ALIGN(4) :
469#endif
470    {
471        *(SORT_BY_ALIGNMENT(.data*))
472
473        KEEP(*(.jcr*))
474        . = ALIGN(4);
475
476    } > RAM AT> FLASH
477    Image$$ER_TFM_DATA$$RW$$Base = ADDR(.TFM_DATA);
478    Image$$ER_TFM_DATA$$RW$$Limit = ADDR(.TFM_DATA) + SIZEOF(.TFM_DATA);
479
480    .TFM_BSS ALIGN(4) (NOLOAD) :
481    {
482        __bss_start__ = .;
483
484        /* The runtime partition placed order is same as load partition */
485        __partition_runtime_start__ = .;
486        KEEP(*(.bss.part_runtime_priority_00))
487        KEEP(*(.bss.part_runtime_priority_01))
488        KEEP(*(.bss.part_runtime_priority_02))
489        KEEP(*(.bss.part_runtime_priority_03))
490        __partition_runtime_end__ = .;
491        . = ALIGN(4);
492
493        /* The runtime service placed order is same as load partition */
494        __service_runtime_start__ = .;
495        KEEP(*(.bss.serv_runtime_priority_00))
496        KEEP(*(.bss.serv_runtime_priority_01))
497        KEEP(*(.bss.serv_runtime_priority_02))
498        KEEP(*(.bss.serv_runtime_priority_03))
499        __service_runtime_end__ = .;
500        *(SORT_BY_ALIGNMENT(.bss*))
501        *(COMMON)
502        . = ALIGN(4);
503        __bss_end__ = .;
504    } > RAM AT> RAM
505    Image$$ER_TFM_DATA$$ZI$$Base = ADDR(.TFM_BSS);
506    Image$$ER_TFM_DATA$$ZI$$Limit = ADDR(.TFM_BSS) + SIZEOF(.TFM_BSS);
507    Image$$ER_PART_RT_POOL$$ZI$$Base = __partition_runtime_start__;
508    Image$$ER_PART_RT_POOL$$ZI$$Limit = __partition_runtime_end__;
509    Image$$ER_SERV_RT_POOL$$ZI$$Base = __service_runtime_start__;
510    Image$$ER_SERV_RT_POOL$$ZI$$Limit = __service_runtime_end__;
511
512    Image$$ER_TFM_DATA$$Base = ADDR(.TFM_DATA);
513    Image$$ER_TFM_DATA$$Limit = ADDR(.TFM_DATA) + SIZEOF(.TFM_DATA) + SIZEOF(.TFM_BSS);
514
515#if defined(CONFIG_TFM_USE_TRUSTZONE)
516    Image$$ER_VENEER$$Base = ADDR(.gnu.sgstubs);
517    Image$$VENEER_ALIGN$$Limit = ADDR(.sgstubs_end);
518
519#if defined(TFM_LINKER_VENEERS_SIZE)
520    ASSERT ((Image$$VENEER_ALIGN$$Limit - Image$$ER_VENEER$$Base) <= TFM_LINKER_VENEERS_SIZE, "Veneer region overflowed")
521#endif
522#endif
523
524    Load$$LR$$LR_NS_PARTITION$$Base = NS_PARTITION_START;
525
526#ifdef BL2
527    Load$$LR$$LR_SECONDARY_PARTITION$$Base = SECONDARY_PARTITION_START;
528#endif /* BL2 */
529
530    PROVIDE(__stack = Image$$ARM_LIB_STACK$$ZI$$Limit);
531}
532