1/*
2 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <common/bl_common.ld.h>
8#include <lib/xlat_tables/xlat_tables_defs.h>
9
10OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
11OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
12ENTRY(bl31_entrypoint)
13
14MEMORY {
15    RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
16
17#if SEPARATE_NOBITS_REGION
18    NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
19#else /* SEPARATE_NOBITS_REGION */
20#   define NOBITS RAM
21#endif /* SEPARATE_NOBITS_REGION */
22}
23
24#ifdef PLAT_EXTRA_LD_SCRIPT
25#   include <plat.ld.S>
26#endif /* PLAT_EXTRA_LD_SCRIPT */
27
28SECTIONS {
29    RAM_REGION_START = ORIGIN(RAM);
30    RAM_REGION_LENGTH = LENGTH(RAM);
31    . = BL31_BASE;
32
33    ASSERT(. == ALIGN(PAGE_SIZE),
34        "BL31_BASE address is not aligned on a page boundary.")
35
36    __BL31_START__ = .;
37
38#if SEPARATE_CODE_AND_RODATA
39    .text . : {
40        __TEXT_START__ = .;
41
42        *bl31_entrypoint.o(.text*)
43        *(SORT_BY_ALIGNMENT(SORT(.text*)))
44        *(.vectors)
45        __TEXT_END_UNALIGNED__ = .;
46
47        . = ALIGN(PAGE_SIZE);
48
49        __TEXT_END__ = .;
50    } >RAM
51
52    .rodata . : {
53        __RODATA_START__ = .;
54
55        *(SORT_BY_ALIGNMENT(.rodata*))
56
57#   if PLAT_EXTRA_RODATA_INCLUDES
58#       include <plat.ld.rodata.inc>
59#   endif /* PLAT_EXTRA_RODATA_INCLUDES */
60
61        RODATA_COMMON
62
63        . = ALIGN(8);
64
65#   include <lib/el3_runtime/pubsub_events.h>
66        __RODATA_END_UNALIGNED__ = .;
67
68        . = ALIGN(PAGE_SIZE);
69
70        __RODATA_END__ = .;
71    } >RAM
72#else /* SEPARATE_CODE_AND_RODATA */
73    .ro . : {
74        __RO_START__ = .;
75
76        *bl31_entrypoint.o(.text*)
77        *(SORT_BY_ALIGNMENT(.text*))
78        *(SORT_BY_ALIGNMENT(.rodata*))
79
80        RODATA_COMMON
81
82        . = ALIGN(8);
83
84#   include <lib/el3_runtime/pubsub_events.h>
85
86        *(.vectors)
87
88        __RO_END_UNALIGNED__ = .;
89
90        /*
91         * Memory page(s) mapped to this section will be marked as read-only,
92         * executable. No RW data from the next section must creep in. Ensure
93         * that the rest of the current memory page is unused.
94         */
95        . = ALIGN(PAGE_SIZE);
96
97        __RO_END__ = .;
98    } >RAM
99#endif /* SEPARATE_CODE_AND_RODATA */
100
101    ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
102        "cpu_ops not defined for this platform.")
103
104#if SPM_MM || (SPMC_AT_EL3 && SPMC_AT_EL3_SEL0_SP)
105#   ifndef SPM_SHIM_EXCEPTIONS_VMA
106#       define SPM_SHIM_EXCEPTIONS_VMA RAM
107#   endif /* SPM_SHIM_EXCEPTIONS_VMA */
108
109    /*
110     * Exception vectors of the SPM shim layer. They must be aligned to a 2K
111     * address but we need to place them in a separate page so that we can set
112     * individual permissions on them, so the actual alignment needed is the
113     * page size.
114     *
115     * There's no need to include this into the RO section of BL31 because it
116     * doesn't need to be accessed by BL31.
117     */
118    .spm_shim_exceptions : ALIGN(PAGE_SIZE) {
119        __SPM_SHIM_EXCEPTIONS_START__ = .;
120
121        *(.spm_shim_exceptions)
122
123        . = ALIGN(PAGE_SIZE);
124
125        __SPM_SHIM_EXCEPTIONS_END__ = .;
126    } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
127
128    PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(.spm_shim_exceptions));
129
130    . = LOADADDR(.spm_shim_exceptions) + SIZEOF(.spm_shim_exceptions);
131#endif /* SPM_MM || (SPMC_AT_EL3 && SPMC_AT_EL3_SEL0_SP) */
132
133    __RW_START__ = .;
134
135    DATA_SECTION >RAM
136    RELA_SECTION >RAM
137
138#ifdef BL31_PROGBITS_LIMIT
139    ASSERT(
140        . <= BL31_PROGBITS_LIMIT,
141        "BL31 progbits has exceeded its limit. Consider disabling some features."
142    )
143#endif /* BL31_PROGBITS_LIMIT */
144
145#if SEPARATE_NOBITS_REGION
146    . = ALIGN(PAGE_SIZE);
147
148    __RW_END__ = .;
149    __BL31_END__ = .;
150
151    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
152
153    . = BL31_NOBITS_BASE;
154
155    ASSERT(. == ALIGN(PAGE_SIZE),
156        "BL31 NOBITS base address is not aligned on a page boundary.")
157
158    __NOBITS_START__ = .;
159#endif /* SEPARATE_NOBITS_REGION */
160
161    STACK_SECTION >NOBITS
162    BSS_SECTION >NOBITS
163    XLAT_TABLE_SECTION >NOBITS
164
165#if USE_COHERENT_MEM
166    /*
167     * The base address of the coherent memory section must be page-aligned to
168     * guarantee that the coherent data are stored on their own pages and are
169     * not mixed with normal data.  This is required to set up the correct
170     * memory attributes for the coherent data page tables.
171     */
172    .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
173        __COHERENT_RAM_START__ = .;
174
175        /*
176         * Bakery locks are stored in coherent memory. Each lock's data is
177         * contiguous and fully allocated by the compiler.
178         */
179        *(.bakery_lock)
180        *(.tzfw_coherent_mem)
181
182        __COHERENT_RAM_END_UNALIGNED__ = .;
183
184        /*
185         * Memory page(s) mapped to this section will be marked as device
186         * memory. No other unexpected data must creep in. Ensure the rest of
187         * the current memory page is unused.
188         */
189        . = ALIGN(PAGE_SIZE);
190
191        __COHERENT_RAM_END__ = .;
192    } >NOBITS
193#endif /* USE_COHERENT_MEM */
194
195#if SEPARATE_NOBITS_REGION
196    __NOBITS_END__ = .;
197
198    ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
199#else /* SEPARATE_NOBITS_REGION */
200    __RW_END__ = .;
201    __BL31_END__ = .;
202
203    ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
204#endif /* SEPARATE_NOBITS_REGION */
205    RAM_REGION_END = .;
206
207    /DISCARD/ : {
208        *(.dynsym .dynstr .hash .gnu.hash)
209    }
210}
211