1 /*
2  * Copyright (c) 2021-2023, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <arch_helpers.h>
14 #include <arch_features.h>
15 #include <bl31/bl31.h>
16 #include <common/debug.h>
17 #include <common/runtime_svc.h>
18 #include <context.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/el3_runtime/cpu_data.h>
21 #include <lib/el3_runtime/pubsub.h>
22 #include <lib/extensions/pmuv3.h>
23 #include <lib/extensions/sys_reg_trace.h>
24 #include <lib/gpt_rme/gpt_rme.h>
25 
26 #include <lib/spinlock.h>
27 #include <lib/utils.h>
28 #include <lib/xlat_tables/xlat_tables_v2.h>
29 #include <plat/common/common_def.h>
30 #include <plat/common/platform.h>
31 #include <platform_def.h>
32 #include <services/rmmd_svc.h>
33 #include <smccc_helpers.h>
34 #include <lib/extensions/sme.h>
35 #include <lib/extensions/sve.h>
36 #include "rmmd_initial_context.h"
37 #include "rmmd_private.h"
38 
39 /*******************************************************************************
40  * RMM boot failure flag
41  ******************************************************************************/
42 static bool rmm_boot_failed;
43 
44 /*******************************************************************************
45  * RMM context information.
46  ******************************************************************************/
47 rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT];
48 
49 /*******************************************************************************
50  * RMM entry point information. Discovered on the primary core and reused
51  * on secondary cores.
52  ******************************************************************************/
53 static entry_point_info_t *rmm_ep_info;
54 
55 /*******************************************************************************
56  * Static function declaration.
57  ******************************************************************************/
58 static int32_t rmm_init(void);
59 
60 /*******************************************************************************
61  * This function takes an RMM context pointer and performs a synchronous entry
62  * into it.
63  ******************************************************************************/
rmmd_rmm_sync_entry(rmmd_rmm_context_t * rmm_ctx)64 uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx)
65 {
66 	uint64_t rc;
67 
68 	assert(rmm_ctx != NULL);
69 
70 	cm_set_context(&(rmm_ctx->cpu_ctx), REALM);
71 
72 	/* Restore the realm context assigned above */
73 	cm_el1_sysregs_context_restore(REALM);
74 	cm_el2_sysregs_context_restore(REALM);
75 	cm_set_next_eret_context(REALM);
76 
77 	/* Enter RMM */
78 	rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx);
79 
80 	/*
81 	 * Save realm context. EL1 and EL2 Non-secure
82 	 * contexts will be restored before exiting to
83 	 * Non-secure world, therefore there is no need
84 	 * to clear EL1 and EL2 context registers.
85 	 */
86 	cm_el1_sysregs_context_save(REALM);
87 	cm_el2_sysregs_context_save(REALM);
88 
89 	return rc;
90 }
91 
92 /*******************************************************************************
93  * This function returns to the place where rmmd_rmm_sync_entry() was
94  * called originally.
95  ******************************************************************************/
rmmd_rmm_sync_exit(uint64_t rc)96 __dead2 void rmmd_rmm_sync_exit(uint64_t rc)
97 {
98 	rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
99 
100 	/* Get context of the RMM in use by this CPU. */
101 	assert(cm_get_context(REALM) == &(ctx->cpu_ctx));
102 
103 	/*
104 	 * The RMMD must have initiated the original request through a
105 	 * synchronous entry into RMM. Jump back to the original C runtime
106 	 * context with the value of rc in x0;
107 	 */
108 	rmmd_rmm_exit(ctx->c_rt_ctx, rc);
109 
110 	panic();
111 }
112 
rmm_el2_context_init(el2_sysregs_t * regs)113 static void rmm_el2_context_init(el2_sysregs_t *regs)
114 {
115 	regs->ctx_regs[CTX_SPSR_EL2 >> 3] = REALM_SPSR_EL2;
116 	regs->ctx_regs[CTX_SCTLR_EL2 >> 3] = SCTLR_EL2_RES1;
117 }
118 
119 /*******************************************************************************
120  * Enable architecture extensions on first entry to Realm world.
121  ******************************************************************************/
122 
manage_extensions_realm(cpu_context_t * ctx)123 static void manage_extensions_realm(cpu_context_t *ctx)
124 {
125 	pmuv3_enable(ctx);
126 
127 	/*
128 	 * Enable access to TPIDR2_EL0 if SME/SME2 is enabled for Non Secure world.
129 	 */
130 	if (is_feat_sme_supported()) {
131 		sme_enable(ctx);
132 	}
133 }
134 
manage_extensions_realm_per_world(void)135 static void manage_extensions_realm_per_world(void)
136 {
137 	if (is_feat_sve_supported()) {
138 	/*
139 	 * Enable SVE and FPU in realm context when it is enabled for NS.
140 	 * Realm manager must ensure that the SVE and FPU register
141 	 * contexts are properly managed.
142 	 */
143 		sve_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
144 	}
145 
146 	/* NS can access this but Realm shouldn't */
147 	if (is_feat_sys_reg_trace_supported()) {
148 		sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
149 	}
150 
151 	/*
152 	 * If SME/SME2 is supported and enabled for NS world, then disable trapping
153 	 * of SME instructions for Realm world. RMM will save/restore required
154 	 * registers that are shared with SVE/FPU so that Realm can use FPU or SVE.
155 	 */
156 	if (is_feat_sme_supported()) {
157 		sme_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
158 	}
159 }
160 
161 /*******************************************************************************
162  * Jump to the RMM for the first time.
163  ******************************************************************************/
rmm_init(void)164 static int32_t rmm_init(void)
165 {
166 	long rc;
167 	rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
168 
169 	INFO("RMM init start.\n");
170 
171 	/* Enable architecture extensions */
172 	manage_extensions_realm(&ctx->cpu_ctx);
173 
174 	manage_extensions_realm_per_world();
175 
176 	/* Initialize RMM EL2 context. */
177 	rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
178 
179 	rc = rmmd_rmm_sync_entry(ctx);
180 	if (rc != E_RMM_BOOT_SUCCESS) {
181 		ERROR("RMM init failed: %ld\n", rc);
182 		/* Mark the boot as failed for all the CPUs */
183 		rmm_boot_failed = true;
184 		return 0;
185 	}
186 
187 	INFO("RMM init end.\n");
188 
189 	return 1;
190 }
191 
192 /*******************************************************************************
193  * Load and read RMM manifest, setup RMM.
194  ******************************************************************************/
rmmd_setup(void)195 int rmmd_setup(void)
196 {
197 	size_t shared_buf_size __unused;
198 	uintptr_t shared_buf_base;
199 	uint32_t ep_attr;
200 	unsigned int linear_id = plat_my_core_pos();
201 	rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id];
202 	struct rmm_manifest *manifest;
203 	int rc;
204 
205 	/* Make sure RME is supported. */
206 	assert(get_armv9_2_feat_rme_support() != 0U);
207 
208 	rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM);
209 	if (rmm_ep_info == NULL) {
210 		WARN("No RMM image provided by BL2 boot loader, Booting "
211 		     "device without RMM initialization. SMCs destined for "
212 		     "RMM will return SMC_UNK\n");
213 		return -ENOENT;
214 	}
215 
216 	/* Under no circumstances will this parameter be 0 */
217 	assert(rmm_ep_info->pc == RMM_BASE);
218 
219 	/* Initialise an entrypoint to set up the CPU context */
220 	ep_attr = EP_REALM;
221 	if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) {
222 		ep_attr |= EP_EE_BIG;
223 	}
224 
225 	SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr);
226 	rmm_ep_info->spsr = SPSR_64(MODE_EL2,
227 					MODE_SP_ELX,
228 					DISABLE_ALL_EXCEPTIONS);
229 
230 	shared_buf_size =
231 			plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base);
232 
233 	assert((shared_buf_size == SZ_4K) &&
234 					((void *)shared_buf_base != NULL));
235 
236 	/* Load the boot manifest at the beginning of the shared area */
237 	manifest = (struct rmm_manifest *)shared_buf_base;
238 	rc = plat_rmmd_load_manifest(manifest);
239 	if (rc != 0) {
240 		ERROR("Error loading RMM Boot Manifest (%i)\n", rc);
241 		return rc;
242 	}
243 	flush_dcache_range((uintptr_t)shared_buf_base, shared_buf_size);
244 
245 	/*
246 	 * Prepare coldboot arguments for RMM:
247 	 * arg0: This CPUID (primary processor).
248 	 * arg1: Version for this Boot Interface.
249 	 * arg2: PLATFORM_CORE_COUNT.
250 	 * arg3: Base address for the EL3 <-> RMM shared area. The boot
251 	 *       manifest will be stored at the beginning of this area.
252 	 */
253 	rmm_ep_info->args.arg0 = linear_id;
254 	rmm_ep_info->args.arg1 = RMM_EL3_INTERFACE_VERSION;
255 	rmm_ep_info->args.arg2 = PLATFORM_CORE_COUNT;
256 	rmm_ep_info->args.arg3 = shared_buf_base;
257 
258 	/* Initialise RMM context with this entry point information */
259 	cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info);
260 
261 	INFO("RMM setup done.\n");
262 
263 	/* Register init function for deferred init.  */
264 	bl31_register_rmm_init(&rmm_init);
265 
266 	return 0;
267 }
268 
269 /*******************************************************************************
270  * Forward SMC to the other security state
271  ******************************************************************************/
rmmd_smc_forward(uint32_t src_sec_state,uint32_t dst_sec_state,uint64_t x0,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * handle)272 static uint64_t	rmmd_smc_forward(uint32_t src_sec_state,
273 				 uint32_t dst_sec_state, uint64_t x0,
274 				 uint64_t x1, uint64_t x2, uint64_t x3,
275 				 uint64_t x4, void *handle)
276 {
277 	cpu_context_t *ctx = cm_get_context(dst_sec_state);
278 
279 	/* Save incoming security state */
280 	cm_el1_sysregs_context_save(src_sec_state);
281 	cm_el2_sysregs_context_save(src_sec_state);
282 
283 	/* Restore outgoing security state */
284 	cm_el1_sysregs_context_restore(dst_sec_state);
285 	cm_el2_sysregs_context_restore(dst_sec_state);
286 	cm_set_next_eret_context(dst_sec_state);
287 
288 	/*
289 	 * As per SMCCCv1.2, we need to preserve x4 to x7 unless
290 	 * being used as return args. Hence we differentiate the
291 	 * onward and backward path. Support upto 8 args in the
292 	 * onward path and 4 args in return path.
293 	 * Register x4 will be preserved by RMM in case it is not
294 	 * used in return path.
295 	 */
296 	if (src_sec_state == NON_SECURE) {
297 		SMC_RET8(ctx, x0, x1, x2, x3, x4,
298 			 SMC_GET_GP(handle, CTX_GPREG_X5),
299 			 SMC_GET_GP(handle, CTX_GPREG_X6),
300 			 SMC_GET_GP(handle, CTX_GPREG_X7));
301 	}
302 
303 	SMC_RET5(ctx, x0, x1, x2, x3, x4);
304 }
305 
306 /*******************************************************************************
307  * This function handles all SMCs in the range reserved for RMI. Each call is
308  * either forwarded to the other security state or handled by the RMM dispatcher
309  ******************************************************************************/
rmmd_rmi_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)310 uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
311 			  uint64_t x3, uint64_t x4, void *cookie,
312 			  void *handle, uint64_t flags)
313 {
314 	uint32_t src_sec_state;
315 
316 	/* If RMM failed to boot, treat any RMI SMC as unknown */
317 	if (rmm_boot_failed) {
318 		WARN("RMMD: Failed to boot up RMM. Ignoring RMI call\n");
319 		SMC_RET1(handle, SMC_UNK);
320 	}
321 
322 	/* Determine which security state this SMC originated from */
323 	src_sec_state = caller_sec_state(flags);
324 
325 	/* RMI must not be invoked by the Secure world */
326 	if (src_sec_state == SMC_FROM_SECURE) {
327 		WARN("RMMD: RMI invoked by secure world.\n");
328 		SMC_RET1(handle, SMC_UNK);
329 	}
330 
331 	/*
332 	 * Forward an RMI call from the Normal world to the Realm world as it
333 	 * is.
334 	 */
335 	if (src_sec_state == SMC_FROM_NON_SECURE) {
336 		/*
337 		 * If SVE hint bit is set in the flags then update the SMC
338 		 * function id and pass it on to the lower EL.
339 		 */
340 		if (is_sve_hint_set(flags)) {
341 			smc_fid |= (FUNCID_SVE_HINT_MASK <<
342 				    FUNCID_SVE_HINT_SHIFT);
343 		}
344 		VERBOSE("RMMD: RMI call from non-secure world.\n");
345 		return rmmd_smc_forward(NON_SECURE, REALM, smc_fid,
346 					x1, x2, x3, x4, handle);
347 	}
348 
349 	if (src_sec_state != SMC_FROM_REALM) {
350 		SMC_RET1(handle, SMC_UNK);
351 	}
352 
353 	switch (smc_fid) {
354 	case RMM_RMI_REQ_COMPLETE: {
355 		uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
356 
357 		return rmmd_smc_forward(REALM, NON_SECURE, x1,
358 					x2, x3, x4, x5, handle);
359 	}
360 	default:
361 		WARN("RMMD: Unsupported RMM call 0x%08x\n", smc_fid);
362 		SMC_RET1(handle, SMC_UNK);
363 	}
364 }
365 
366 /*******************************************************************************
367  * This cpu has been turned on. Enter RMM to initialise R-EL2.  Entry into RMM
368  * is done after initialising minimal architectural state that guarantees safe
369  * execution.
370  ******************************************************************************/
rmmd_cpu_on_finish_handler(const void * arg)371 static void *rmmd_cpu_on_finish_handler(const void *arg)
372 {
373 	long rc;
374 	uint32_t linear_id = plat_my_core_pos();
375 	rmmd_rmm_context_t *ctx = &rmm_context[linear_id];
376 
377 	if (rmm_boot_failed) {
378 		/* RMM Boot failed on a previous CPU. Abort. */
379 		ERROR("RMM Failed to initialize. Ignoring for CPU%d\n",
380 								linear_id);
381 		return NULL;
382 	}
383 
384 	/*
385 	 * Prepare warmboot arguments for RMM:
386 	 * arg0: This CPUID.
387 	 * arg1 to arg3: Not used.
388 	 */
389 	rmm_ep_info->args.arg0 = linear_id;
390 	rmm_ep_info->args.arg1 = 0ULL;
391 	rmm_ep_info->args.arg2 = 0ULL;
392 	rmm_ep_info->args.arg3 = 0ULL;
393 
394 	/* Initialise RMM context with this entry point information */
395 	cm_setup_context(&ctx->cpu_ctx, rmm_ep_info);
396 
397 	/* Enable architecture extensions */
398 	manage_extensions_realm(&ctx->cpu_ctx);
399 
400 	/* Initialize RMM EL2 context. */
401 	rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
402 
403 	rc = rmmd_rmm_sync_entry(ctx);
404 
405 	if (rc != E_RMM_BOOT_SUCCESS) {
406 		ERROR("RMM init failed on CPU%d: %ld\n", linear_id, rc);
407 		/* Mark the boot as failed for any other booting CPU */
408 		rmm_boot_failed = true;
409 	}
410 
411 	return NULL;
412 }
413 
414 /* Subscribe to PSCI CPU on to initialize RMM on secondary */
415 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler);
416 
417 /* Convert GPT lib error to RMMD GTS error */
gpt_to_gts_error(int error,uint32_t smc_fid,uint64_t address)418 static int gpt_to_gts_error(int error, uint32_t smc_fid, uint64_t address)
419 {
420 	int ret;
421 
422 	if (error == 0) {
423 		return E_RMM_OK;
424 	}
425 
426 	if (error == -EINVAL) {
427 		ret = E_RMM_BAD_ADDR;
428 	} else {
429 		/* This is the only other error code we expect */
430 		assert(error == -EPERM);
431 		ret = E_RMM_BAD_PAS;
432 	}
433 
434 	ERROR("RMMD: PAS Transition failed. GPT ret = %d, PA: 0x%"PRIx64 ", FID = 0x%x\n",
435 				error, address, smc_fid);
436 	return ret;
437 }
438 
439 /*******************************************************************************
440  * This function handles RMM-EL3 interface SMCs
441  ******************************************************************************/
rmmd_rmm_el3_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)442 uint64_t rmmd_rmm_el3_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
443 				uint64_t x3, uint64_t x4, void *cookie,
444 				void *handle, uint64_t flags)
445 {
446 	uint32_t src_sec_state;
447 	int ret;
448 
449 	/* If RMM failed to boot, treat any RMM-EL3 interface SMC as unknown */
450 	if (rmm_boot_failed) {
451 		WARN("RMMD: Failed to boot up RMM. Ignoring RMM-EL3 call\n");
452 		SMC_RET1(handle, SMC_UNK);
453 	}
454 
455 	/* Determine which security state this SMC originated from */
456 	src_sec_state = caller_sec_state(flags);
457 
458 	if (src_sec_state != SMC_FROM_REALM) {
459 		WARN("RMMD: RMM-EL3 call originated from secure or normal world\n");
460 		SMC_RET1(handle, SMC_UNK);
461 	}
462 
463 	switch (smc_fid) {
464 	case RMM_GTSI_DELEGATE:
465 		ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
466 		SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
467 	case RMM_GTSI_UNDELEGATE:
468 		ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
469 		SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
470 	case RMM_ATTEST_GET_PLAT_TOKEN:
471 		ret = rmmd_attest_get_platform_token(x1, &x2, x3);
472 		SMC_RET2(handle, ret, x2);
473 	case RMM_ATTEST_GET_REALM_KEY:
474 		ret = rmmd_attest_get_signing_key(x1, &x2, x3);
475 		SMC_RET2(handle, ret, x2);
476 
477 	case RMM_BOOT_COMPLETE:
478 		VERBOSE("RMMD: running rmmd_rmm_sync_exit\n");
479 		rmmd_rmm_sync_exit(x1);
480 
481 	default:
482 		WARN("RMMD: Unsupported RMM-EL3 call 0x%08x\n", smc_fid);
483 		SMC_RET1(handle, SMC_UNK);
484 	}
485 }
486