1 /*
2 * Copyright (c) 2015-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <string.h>
9
10 #include <libfdt.h>
11
12 #include <platform_def.h>
13
14 #include <arch_helpers.h>
15 #include <common/bl_common.h>
16 #include <common/debug.h>
17 #include <common/desc_image_load.h>
18 #include <common/fdt_fixup.h>
19 #include <common/fdt_wrappers.h>
20 #include <lib/optee_utils.h>
21 #if TRANSFER_LIST
22 #include <lib/transfer_list.h>
23 #endif
24 #include <lib/utils.h>
25 #include <plat/common/platform.h>
26
27 #include "qemu_private.h"
28
29 #define MAP_BL2_TOTAL MAP_REGION_FLAT( \
30 bl2_tzram_layout.total_base, \
31 bl2_tzram_layout.total_size, \
32 MT_MEMORY | MT_RW | MT_SECURE)
33
34 #define MAP_BL2_RO MAP_REGION_FLAT( \
35 BL_CODE_BASE, \
36 BL_CODE_END - BL_CODE_BASE, \
37 MT_CODE | MT_SECURE), \
38 MAP_REGION_FLAT( \
39 BL_RO_DATA_BASE, \
40 BL_RO_DATA_END \
41 - BL_RO_DATA_BASE, \
42 MT_RO_DATA | MT_SECURE)
43
44 #if USE_COHERENT_MEM
45 #define MAP_BL_COHERENT_RAM MAP_REGION_FLAT( \
46 BL_COHERENT_RAM_BASE, \
47 BL_COHERENT_RAM_END \
48 - BL_COHERENT_RAM_BASE, \
49 MT_DEVICE | MT_RW | MT_SECURE)
50 #endif
51
52 /* Data structure which holds the extents of the trusted SRAM for BL2 */
53 static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
54 #if TRANSFER_LIST
55 static struct transfer_list_header *bl2_tl;
56 #endif
57
bl2_early_platform_setup2(u_register_t arg0,u_register_t arg1,u_register_t arg2,u_register_t arg3)58 void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1,
59 u_register_t arg2, u_register_t arg3)
60 {
61 meminfo_t *mem_layout = (void *)arg1;
62
63 /* Initialize the console to provide early debug support */
64 qemu_console_init();
65
66 /* Setup the BL2 memory layout */
67 bl2_tzram_layout = *mem_layout;
68
69 plat_qemu_io_setup();
70 }
71
security_setup(void)72 static void security_setup(void)
73 {
74 /*
75 * This is where a TrustZone address space controller and other
76 * security related peripherals, would be configured.
77 */
78 }
79
update_dt(void)80 static void update_dt(void)
81 {
82 #if TRANSFER_LIST
83 struct transfer_list_entry *te;
84 #endif
85 int ret;
86 void *fdt = (void *)(uintptr_t)ARM_PRELOADED_DTB_BASE;
87
88 ret = fdt_open_into(fdt, fdt, PLAT_QEMU_DT_MAX_SIZE);
89 if (ret < 0) {
90 ERROR("Invalid Device Tree at %p: error %d\n", fdt, ret);
91 return;
92 }
93
94 if (dt_add_psci_node(fdt)) {
95 ERROR("Failed to add PSCI Device Tree node\n");
96 return;
97 }
98
99 if (dt_add_psci_cpu_enable_methods(fdt)) {
100 ERROR("Failed to add PSCI cpu enable methods in Device Tree\n");
101 return;
102 }
103
104 ret = fdt_pack(fdt);
105 if (ret < 0)
106 ERROR("Failed to pack Device Tree at %p: error %d\n", fdt, ret);
107
108 #if TRANSFER_LIST
109 // create a TE
110 te = transfer_list_add(bl2_tl, TL_TAG_FDT, fdt_totalsize(fdt), fdt);
111 if (!te) {
112 ERROR("Failed to add FDT entry to Transfer List\n");
113 return;
114 }
115 #endif
116 }
117
bl2_platform_setup(void)118 void bl2_platform_setup(void)
119 {
120 #if TRANSFER_LIST
121 bl2_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE,
122 FW_HANDOFF_SIZE);
123 if (!bl2_tl) {
124 ERROR("Failed to initialize Transfer List at 0x%lx\n",
125 (unsigned long)FW_HANDOFF_BASE);
126 }
127 #endif
128 security_setup();
129 update_dt();
130
131 /* TODO Initialize timer */
132 }
133
qemu_bl2_sync_transfer_list(void)134 void qemu_bl2_sync_transfer_list(void)
135 {
136 #if TRANSFER_LIST
137 transfer_list_update_checksum(bl2_tl);
138 #endif
139 }
140
bl2_plat_arch_setup(void)141 void bl2_plat_arch_setup(void)
142 {
143 const mmap_region_t bl_regions[] = {
144 MAP_BL2_TOTAL,
145 MAP_BL2_RO,
146 #if USE_COHERENT_MEM
147 MAP_BL_COHERENT_RAM,
148 #endif
149 {0}
150 };
151
152 setup_page_tables(bl_regions, plat_qemu_get_mmap());
153
154 #ifdef __aarch64__
155 enable_mmu_el1(0);
156 #else
157 enable_mmu_svc_mon(0);
158 #endif
159 }
160
161 /*******************************************************************************
162 * Gets SPSR for BL32 entry
163 ******************************************************************************/
qemu_get_spsr_for_bl32_entry(void)164 static uint32_t qemu_get_spsr_for_bl32_entry(void)
165 {
166 #ifdef __aarch64__
167 /*
168 * The Secure Payload Dispatcher service is responsible for
169 * setting the SPSR prior to entry into the BL3-2 image.
170 */
171 return 0;
172 #else
173 return SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE,
174 DISABLE_ALL_EXCEPTIONS);
175 #endif
176 }
177
178 /*******************************************************************************
179 * Gets SPSR for BL33 entry
180 ******************************************************************************/
qemu_get_spsr_for_bl33_entry(void)181 static uint32_t qemu_get_spsr_for_bl33_entry(void)
182 {
183 uint32_t spsr;
184 #ifdef __aarch64__
185 unsigned int mode;
186
187 /* Figure out what mode we enter the non-secure world in */
188 mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
189
190 /*
191 * TODO: Consider the possibility of specifying the SPSR in
192 * the FIP ToC and allowing the platform to have a say as
193 * well.
194 */
195 spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
196 #else
197 spsr = SPSR_MODE32(MODE32_svc,
198 plat_get_ns_image_entrypoint() & 0x1,
199 SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
200 #endif
201 return spsr;
202 }
203
204 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
load_sps_from_tb_fw_config(struct image_info * image_info)205 static int load_sps_from_tb_fw_config(struct image_info *image_info)
206 {
207 void *dtb = (void *)image_info->image_base;
208 const char *compat_str = "arm,sp";
209 const struct fdt_property *uuid;
210 uint32_t load_addr;
211 const char *name;
212 int sp_node;
213 int node;
214
215 node = fdt_node_offset_by_compatible(dtb, -1, compat_str);
216 if (node < 0) {
217 ERROR("Can't find %s in TB_FW_CONFIG", compat_str);
218 return -1;
219 }
220
221 fdt_for_each_subnode(sp_node, dtb, node) {
222 name = fdt_get_name(dtb, sp_node, NULL);
223 if (name == NULL) {
224 ERROR("Can't get name of node in dtb\n");
225 return -1;
226 }
227 uuid = fdt_get_property(dtb, sp_node, "uuid", NULL);
228 if (uuid == NULL) {
229 ERROR("Can't find property uuid in node %s", name);
230 return -1;
231 }
232 if (fdt_read_uint32(dtb, sp_node, "load-address",
233 &load_addr) < 0) {
234 ERROR("Can't read load-address in node %s", name);
235 return -1;
236 }
237 if (qemu_io_register_sp_pkg(name, uuid->data, load_addr) < 0) {
238 return -1;
239 }
240 }
241
242 return 0;
243 }
244 #endif /*defined(SPD_spmd) && SPMD_SPM_AT_SEL2*/
245
qemu_bl2_handle_post_image_load(unsigned int image_id)246 static int qemu_bl2_handle_post_image_load(unsigned int image_id)
247 {
248 int err = 0;
249 bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
250 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
251 bl_mem_params_node_t *pager_mem_params = NULL;
252 bl_mem_params_node_t *paged_mem_params = NULL;
253 #endif
254 #if defined(SPD_spmd)
255 bl_mem_params_node_t *bl32_mem_params = NULL;
256 #endif
257 #if TRANSFER_LIST
258 struct transfer_list_header *ns_tl = NULL;
259 struct transfer_list_entry *te = NULL;
260 #endif
261
262 assert(bl_mem_params);
263
264 switch (image_id) {
265 case BL32_IMAGE_ID:
266 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
267 pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
268 assert(pager_mem_params);
269
270 paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
271 assert(paged_mem_params);
272
273 err = parse_optee_header(&bl_mem_params->ep_info,
274 &pager_mem_params->image_info,
275 &paged_mem_params->image_info);
276 if (err != 0) {
277 WARN("OPTEE header parse error.\n");
278 }
279 #endif
280
281 #if defined(SPMC_OPTEE)
282 /*
283 * Explicit zeroes to unused registers since they may have
284 * been populated by parse_optee_header() above.
285 *
286 * OP-TEE expects system DTB in x2 and TOS_FW_CONFIG in x0,
287 * the latter is filled in below for TOS_FW_CONFIG_ID and
288 * applies to any other SPMC too.
289 */
290 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
291 #elif defined(SPD_opteed)
292 /*
293 * OP-TEE expect to receive DTB address in x2.
294 * This will be copied into x2 by dispatcher.
295 */
296 bl_mem_params->ep_info.args.arg3 = ARM_PRELOADED_DTB_BASE;
297 #elif defined(AARCH32_SP_OPTEE)
298 bl_mem_params->ep_info.args.arg0 =
299 bl_mem_params->ep_info.args.arg1;
300 bl_mem_params->ep_info.args.arg1 = 0;
301 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
302 bl_mem_params->ep_info.args.arg3 = 0;
303 #endif
304 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl32_entry();
305 break;
306
307 case BL33_IMAGE_ID:
308 #ifdef AARCH32_SP_OPTEE
309 /* AArch32 only core: OP-TEE expects NSec EP in register LR */
310 pager_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
311 assert(pager_mem_params);
312 pager_mem_params->ep_info.lr_svc = bl_mem_params->ep_info.pc;
313 #endif
314
315 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl33_entry();
316
317 #if ARM_LINUX_KERNEL_AS_BL33
318 /*
319 * According to the file ``Documentation/arm64/booting.txt`` of
320 * the Linux kernel tree, Linux expects the physical address of
321 * the device tree blob (DTB) in x0, while x1-x3 are reserved
322 * for future use and must be 0.
323 */
324 bl_mem_params->ep_info.args.arg0 =
325 (u_register_t)ARM_PRELOADED_DTB_BASE;
326 bl_mem_params->ep_info.args.arg1 = 0U;
327 bl_mem_params->ep_info.args.arg2 = 0U;
328 bl_mem_params->ep_info.args.arg3 = 0U;
329 #elif TRANSFER_LIST
330 if (bl2_tl) {
331 // relocate the tl to pre-allocate NS memory
332 ns_tl = transfer_list_relocate(bl2_tl,
333 (void *)(uintptr_t)FW_NS_HANDOFF_BASE,
334 bl2_tl->max_size);
335 if (!ns_tl) {
336 ERROR("Relocate TL to 0x%lx failed\n",
337 (unsigned long)FW_NS_HANDOFF_BASE);
338 return -1;
339 }
340 NOTICE("Transfer list handoff to BL33\n");
341 transfer_list_dump(ns_tl);
342
343 te = transfer_list_find(ns_tl, TL_TAG_FDT);
344
345 bl_mem_params->ep_info.args.arg1 =
346 TRANSFER_LIST_SIGNATURE |
347 REGISTER_CONVENTION_VERSION_MASK;
348 bl_mem_params->ep_info.args.arg3 = (uintptr_t)ns_tl;
349
350 if (GET_RW(bl_mem_params->ep_info.spsr) == MODE_RW_32) {
351 // aarch32
352 bl_mem_params->ep_info.args.arg0 = 0;
353 bl_mem_params->ep_info.args.arg2 = te ?
354 (uintptr_t)transfer_list_entry_data(te)
355 : 0;
356 } else {
357 // aarch64
358 bl_mem_params->ep_info.args.arg0 = te ?
359 (uintptr_t)transfer_list_entry_data(te)
360 : 0;
361 bl_mem_params->ep_info.args.arg2 = 0;
362 }
363 } else {
364 // Legacy handoff
365 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
366 }
367 #else
368 /* BL33 expects to receive the primary CPU MPID (through r0) */
369 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
370 #endif // ARM_LINUX_KERNEL_AS_BL33
371
372 break;
373 #ifdef SPD_spmd
374 #if SPMD_SPM_AT_SEL2
375 case TB_FW_CONFIG_ID:
376 err = load_sps_from_tb_fw_config(&bl_mem_params->image_info);
377 break;
378 #endif
379 case TOS_FW_CONFIG_ID:
380 /* An SPMC expects TOS_FW_CONFIG in x0/r0 */
381 bl32_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
382 bl32_mem_params->ep_info.args.arg0 =
383 bl_mem_params->image_info.image_base;
384 break;
385 #endif
386 default:
387 /* Do nothing in default case */
388 break;
389 }
390
391 return err;
392 }
393
394 /*******************************************************************************
395 * This function can be used by the platforms to update/use image
396 * information for given `image_id`.
397 ******************************************************************************/
bl2_plat_handle_post_image_load(unsigned int image_id)398 int bl2_plat_handle_post_image_load(unsigned int image_id)
399 {
400 return qemu_bl2_handle_post_image_load(image_id);
401 }
402
plat_get_ns_image_entrypoint(void)403 uintptr_t plat_get_ns_image_entrypoint(void)
404 {
405 return NS_IMAGE_OFFSET;
406 }
407