1 /*
2  * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef BL_COMMON_LD_H
8 #define BL_COMMON_LD_H
9 
10 #include <platform_def.h>
11 
12 #ifdef __aarch64__
13 #define STRUCT_ALIGN	8
14 #define BSS_ALIGN	16
15 #else
16 #define STRUCT_ALIGN	4
17 #define BSS_ALIGN	8
18 #endif
19 
20 #ifndef DATA_ALIGN
21 #define DATA_ALIGN	1
22 #endif
23 
24 #define CPU_OPS						\
25 	. = ALIGN(STRUCT_ALIGN);			\
26 	__CPU_OPS_START__ = .;				\
27 	KEEP(*(.cpu_ops))				\
28 	__CPU_OPS_END__ = .;
29 
30 #define PARSER_LIB_DESCS				\
31 	. = ALIGN(STRUCT_ALIGN);			\
32 	__PARSER_LIB_DESCS_START__ = .;			\
33 	KEEP(*(.img_parser_lib_descs))			\
34 	__PARSER_LIB_DESCS_END__ = .;
35 
36 #define RT_SVC_DESCS					\
37 	. = ALIGN(STRUCT_ALIGN);			\
38 	__RT_SVC_DESCS_START__ = .;			\
39 	KEEP(*(.rt_svc_descs))				\
40 	__RT_SVC_DESCS_END__ = .;
41 
42 #if SPMC_AT_EL3
43 #define EL3_LP_DESCS					\
44 	. = ALIGN(STRUCT_ALIGN);			\
45 	__EL3_LP_DESCS_START__ = .;			\
46 	KEEP(*(.el3_lp_descs))				\
47 	__EL3_LP_DESCS_END__ = .;
48 #else
49 #define EL3_LP_DESCS
50 #endif
51 
52 #if ENABLE_SPMD_LP
53 #define SPMD_LP_DESCS					\
54 	. = ALIGN(STRUCT_ALIGN);			\
55 	__SPMD_LP_DESCS_START__ = .;			\
56 	KEEP(*(.spmd_lp_descs))			\
57 	__SPMD_LP_DESCS_END__ = .;
58 #else
59 #define SPMD_LP_DESCS
60 #endif
61 #define PMF_SVC_DESCS					\
62 	. = ALIGN(STRUCT_ALIGN);			\
63 	__PMF_SVC_DESCS_START__ = .;			\
64 	KEEP(*(.pmf_svc_descs))				\
65 	__PMF_SVC_DESCS_END__ = .;
66 
67 #define FCONF_POPULATOR					\
68 	. = ALIGN(STRUCT_ALIGN);			\
69 	__FCONF_POPULATOR_START__ = .;			\
70 	KEEP(*(.fconf_populator))			\
71 	__FCONF_POPULATOR_END__ = .;
72 
73 /*
74  * Keep the .got section in the RO section as it is patched prior to enabling
75  * the MMU and having the .got in RO is better for security. GOT is a table of
76  * addresses so ensure pointer size alignment.
77  */
78 #define GOT						\
79 	. = ALIGN(STRUCT_ALIGN);			\
80 	__GOT_START__ = .;				\
81 	*(.got)						\
82 	__GOT_END__ = .;
83 
84 /*
85  * The base xlat table
86  *
87  * It is put into the rodata section if PLAT_RO_XLAT_TABLES=1,
88  * or into the bss section otherwise.
89  */
90 #define BASE_XLAT_TABLE					\
91 	. = ALIGN(16);					\
92 	__BASE_XLAT_TABLE_START__ = .;			\
93 	*(.base_xlat_table)				\
94 	__BASE_XLAT_TABLE_END__ = .;
95 
96 #if PLAT_RO_XLAT_TABLES
97 #define BASE_XLAT_TABLE_RO		BASE_XLAT_TABLE
98 #define BASE_XLAT_TABLE_BSS
99 #else
100 #define BASE_XLAT_TABLE_RO
101 #define BASE_XLAT_TABLE_BSS		BASE_XLAT_TABLE
102 #endif
103 
104 #define RODATA_COMMON					\
105 	RT_SVC_DESCS					\
106 	FCONF_POPULATOR					\
107 	PMF_SVC_DESCS					\
108 	PARSER_LIB_DESCS				\
109 	CPU_OPS						\
110 	GOT						\
111 	BASE_XLAT_TABLE_RO				\
112 	EL3_LP_DESCS					\
113 	SPMD_LP_DESCS
114 
115 /*
116  * .data must be placed at a lower address than the stacks if the stack
117  * protector is enabled. Alternatively, the .data.stack_protector_canary
118  * section can be placed independently of the main .data section.
119  */
120 #define DATA_SECTION					\
121 	.data . : ALIGN(DATA_ALIGN) {			\
122 		__DATA_START__ = .;			\
123 		*(SORT_BY_ALIGNMENT(.data*))		\
124 		__DATA_END__ = .;			\
125 	}
126 
127 /*
128  * .rela.dyn needs to come after .data for the read-elf utility to parse
129  * this section correctly.
130  */
131 #if __aarch64__
132 #define RELA_DYN_NAME		.rela.dyn
133 #define RELOC_SECTIONS_PATTERN	*(.rela*)
134 #else
135 #define RELA_DYN_NAME		.rel.dyn
136 #define RELOC_SECTIONS_PATTERN	*(.rel*)
137 #endif
138 
139 #define RELA_SECTION					\
140 	RELA_DYN_NAME : ALIGN(STRUCT_ALIGN) {		\
141 		__RELA_START__ = .;			\
142 		RELOC_SECTIONS_PATTERN			\
143 		__RELA_END__ = .;			\
144 	}
145 
146 #if !(defined(IMAGE_BL31) && RECLAIM_INIT_CODE)
147 #define STACK_SECTION					\
148 	.stacks (NOLOAD) : {				\
149 		__STACKS_START__ = .;			\
150 		*(.tzfw_normal_stacks)			\
151 		__STACKS_END__ = .;			\
152 	}
153 #endif
154 
155 /*
156  * If BL doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
157  * will be zero. For this reason, the only two valid values for
158  * __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
159  * PLAT_PERCPU_BAKERY_LOCK_SIZE.
160  */
161 #ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
162 #define BAKERY_LOCK_SIZE_CHECK				\
163 	ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) ||	\
164 	       (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE), \
165 	       "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
166 #else
167 #define BAKERY_LOCK_SIZE_CHECK
168 #endif
169 
170 /*
171  * Bakery locks are stored in normal .bss memory
172  *
173  * Each lock's data is spread across multiple cache lines, one per CPU,
174  * but multiple locks can share the same cache line.
175  * The compiler will allocate enough memory for one CPU's bakery locks,
176  * the remaining cache lines are allocated by the linker script
177  */
178 #if !USE_COHERENT_MEM
179 #define BAKERY_LOCK_NORMAL				\
180 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
181 	__BAKERY_LOCK_START__ = .;			\
182 	__PERCPU_BAKERY_LOCK_START__ = .;		\
183 	*(.bakery_lock)					\
184 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
185 	__PERCPU_BAKERY_LOCK_END__ = .;			\
186 	__PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__); \
187 	. = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
188 	__BAKERY_LOCK_END__ = .;			\
189 	BAKERY_LOCK_SIZE_CHECK
190 #else
191 #define BAKERY_LOCK_NORMAL
192 #endif
193 
194 /*
195  * Time-stamps are stored in normal .bss memory
196  *
197  * The compiler will allocate enough memory for one CPU's time-stamps,
198  * the remaining memory for other CPUs is allocated by the
199  * linker script
200  */
201 #define PMF_TIMESTAMP					\
202 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
203 	__PMF_TIMESTAMP_START__ = .;			\
204 	KEEP(*(.pmf_timestamp_array))			\
205 	. = ALIGN(CACHE_WRITEBACK_GRANULE);		\
206 	__PMF_PERCPU_TIMESTAMP_END__ = .;		\
207 	__PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); \
208 	. = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
209 	__PMF_TIMESTAMP_END__ = .;
210 
211 
212 /*
213  * The .bss section gets initialised to 0 at runtime.
214  * Its base address has bigger alignment for better performance of the
215  * zero-initialization code.
216  */
217 #define BSS_SECTION					\
218 	.bss (NOLOAD) : ALIGN(BSS_ALIGN) {		\
219 		__BSS_START__ = .;			\
220 		*(SORT_BY_ALIGNMENT(.bss*))		\
221 		*(COMMON)				\
222 		BAKERY_LOCK_NORMAL			\
223 		PMF_TIMESTAMP				\
224 		BASE_XLAT_TABLE_BSS			\
225 		__BSS_END__ = .;			\
226 	}
227 
228 /*
229  * The .xlat_table section is for full, aligned page tables (4K).
230  * Removing them from .bss avoids forcing 4K alignment on
231  * the .bss section. The tables are initialized to zero by the translation
232  * tables library.
233  */
234 #define XLAT_TABLE_SECTION				\
235 	.xlat_table (NOLOAD) : {				\
236 		__XLAT_TABLE_START__ = .;		\
237 		*(.xlat_table)				\
238 		__XLAT_TABLE_END__ = .;			\
239 	}
240 
241 #endif /* BL_COMMON_LD_H */
242