1 /*
2 * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9
10 #include <arch_helpers.h>
11 #include <bl31/bl31.h>
12 #include <bl31/ehf.h>
13 #include <bl31/interrupt_mgmt.h>
14 #include <common/debug.h>
15 #include <common/fdt_wrappers.h>
16 #include <common/runtime_svc.h>
17 #include <common/uuid.h>
18 #include <lib/el3_runtime/context_mgmt.h>
19 #include <lib/smccc.h>
20 #include <lib/utils.h>
21 #include <lib/xlat_tables/xlat_tables_v2.h>
22 #include <libfdt.h>
23 #include <plat/common/platform.h>
24 #include <services/el3_spmc_logical_sp.h>
25 #include <services/ffa_svc.h>
26 #include <services/spmc_svc.h>
27 #include <services/spmd_svc.h>
28 #include "spmc.h"
29 #include "spmc_shared_mem.h"
30
31 #include <platform_def.h>
32
33 /* Declare the maximum number of SPs and El3 LPs. */
34 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
35
36 /*
37 * Allocate a secure partition descriptor to describe each SP in the system that
38 * does not reside at EL3.
39 */
40 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
41
42 /*
43 * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
44 * the system that interacts with a SP. It is used to track the Hypervisor
45 * buffer pair, version and ID for now. It could be extended to track VM
46 * properties when the SPMC supports indirect messaging.
47 */
48 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
49
50 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
51 uint32_t flags,
52 void *handle,
53 void *cookie);
54
55 /*
56 * Helper function to obtain the array storing the EL3
57 * Logical Partition descriptors.
58 */
get_el3_lp_array(void)59 struct el3_lp_desc *get_el3_lp_array(void)
60 {
61 return (struct el3_lp_desc *) EL3_LP_DESCS_START;
62 }
63
64 /*
65 * Helper function to obtain the descriptor of the last SP to whom control was
66 * handed to on this physical cpu. Currently, we assume there is only one SP.
67 * TODO: Expand to track multiple partitions when required.
68 */
spmc_get_current_sp_ctx(void)69 struct secure_partition_desc *spmc_get_current_sp_ctx(void)
70 {
71 return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
72 }
73
74 /*
75 * Helper function to obtain the execution context of an SP on the
76 * current physical cpu.
77 */
spmc_get_sp_ec(struct secure_partition_desc * sp)78 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
79 {
80 return &(sp->ec[get_ec_index(sp)]);
81 }
82
83 /* Helper function to get pointer to SP context from its ID. */
spmc_get_sp_ctx(uint16_t id)84 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
85 {
86 /* Check for Secure World Partitions. */
87 for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
88 if (sp_desc[i].sp_id == id) {
89 return &(sp_desc[i]);
90 }
91 }
92 return NULL;
93 }
94
95 /*
96 * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
97 * We assume that the first descriptor is reserved for this entity.
98 */
spmc_get_hyp_ctx(void)99 struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
100 {
101 return &(ns_ep_desc[0]);
102 }
103
104 /*
105 * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
106 * or OS kernel in the normal world or the last SP that was run.
107 */
spmc_get_mbox_desc(bool secure_origin)108 struct mailbox *spmc_get_mbox_desc(bool secure_origin)
109 {
110 /* Obtain the RX/TX buffer pair descriptor. */
111 if (secure_origin) {
112 return &(spmc_get_current_sp_ctx()->mailbox);
113 } else {
114 return &(spmc_get_hyp_ctx()->mailbox);
115 }
116 }
117
118 /******************************************************************************
119 * This function returns to the place where spmc_sp_synchronous_entry() was
120 * called originally.
121 ******************************************************************************/
spmc_sp_synchronous_exit(struct sp_exec_ctx * ec,uint64_t rc)122 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
123 {
124 /*
125 * The SPM must have initiated the original request through a
126 * synchronous entry into the secure partition. Jump back to the
127 * original C runtime context with the value of rc in x0;
128 */
129 spm_secure_partition_exit(ec->c_rt_ctx, rc);
130
131 panic();
132 }
133
134 /*******************************************************************************
135 * Return FFA_ERROR with specified error code.
136 ******************************************************************************/
spmc_ffa_error_return(void * handle,int error_code)137 uint64_t spmc_ffa_error_return(void *handle, int error_code)
138 {
139 SMC_RET8(handle, FFA_ERROR,
140 FFA_TARGET_INFO_MBZ, error_code,
141 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
142 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
143 }
144
145 /******************************************************************************
146 * Helper function to validate a secure partition ID to ensure it does not
147 * conflict with any other FF-A component and follows the convention to
148 * indicate it resides within the secure world.
149 ******************************************************************************/
is_ffa_secure_id_valid(uint16_t partition_id)150 bool is_ffa_secure_id_valid(uint16_t partition_id)
151 {
152 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
153
154 /* Ensure the ID is not the invalid partition ID. */
155 if (partition_id == INV_SP_ID) {
156 return false;
157 }
158
159 /* Ensure the ID is not the SPMD ID. */
160 if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
161 return false;
162 }
163
164 /*
165 * Ensure the ID follows the convention to indicate it resides
166 * in the secure world.
167 */
168 if (!ffa_is_secure_world_id(partition_id)) {
169 return false;
170 }
171
172 /* Ensure we don't conflict with the SPMC partition ID. */
173 if (partition_id == FFA_SPMC_ID) {
174 return false;
175 }
176
177 /* Ensure we do not already have an SP context with this ID. */
178 if (spmc_get_sp_ctx(partition_id)) {
179 return false;
180 }
181
182 /* Ensure we don't clash with any Logical SP's. */
183 for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
184 if (el3_lp_descs[i].sp_id == partition_id) {
185 return false;
186 }
187 }
188
189 return true;
190 }
191
192 /*******************************************************************************
193 * This function either forwards the request to the other world or returns
194 * with an ERET depending on the source of the call.
195 * We can assume that the destination is for an entity at a lower exception
196 * level as any messages destined for a logical SP resident in EL3 will have
197 * already been taken care of by the SPMC before entering this function.
198 ******************************************************************************/
spmc_smc_return(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * handle,void * cookie,uint64_t flags,uint16_t dst_id)199 static uint64_t spmc_smc_return(uint32_t smc_fid,
200 bool secure_origin,
201 uint64_t x1,
202 uint64_t x2,
203 uint64_t x3,
204 uint64_t x4,
205 void *handle,
206 void *cookie,
207 uint64_t flags,
208 uint16_t dst_id)
209 {
210 /* If the destination is in the normal world always go via the SPMD. */
211 if (ffa_is_normal_world_id(dst_id)) {
212 return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
213 cookie, handle, flags);
214 }
215 /*
216 * If the caller is secure and we want to return to the secure world,
217 * ERET directly.
218 */
219 else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
220 SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
221 }
222 /* If we originated in the normal world then switch contexts. */
223 else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
224 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
225 x3, x4, handle);
226 } else {
227 /* Unknown State. */
228 panic();
229 }
230
231 /* Shouldn't be Reached. */
232 return 0;
233 }
234
235 /*******************************************************************************
236 * FF-A ABI Handlers.
237 ******************************************************************************/
238
239 /*******************************************************************************
240 * Helper function to validate arg2 as part of a direct message.
241 ******************************************************************************/
direct_msg_validate_arg2(uint64_t x2)242 static inline bool direct_msg_validate_arg2(uint64_t x2)
243 {
244 /* Check message type. */
245 if (x2 & FFA_FWK_MSG_BIT) {
246 /* We have a framework message, ensure it is a known message. */
247 if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
248 VERBOSE("Invalid message format 0x%lx.\n", x2);
249 return false;
250 }
251 } else {
252 /* We have a partition messages, ensure x2 is not set. */
253 if (x2 != (uint64_t) 0) {
254 VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
255 x2);
256 return false;
257 }
258 }
259 return true;
260 }
261
262 /*******************************************************************************
263 * Helper function to validate the destination ID of a direct response.
264 ******************************************************************************/
direct_msg_validate_dst_id(uint16_t dst_id)265 static bool direct_msg_validate_dst_id(uint16_t dst_id)
266 {
267 struct secure_partition_desc *sp;
268
269 /* Check if we're targeting a normal world partition. */
270 if (ffa_is_normal_world_id(dst_id)) {
271 return true;
272 }
273
274 /* Or directed to the SPMC itself.*/
275 if (dst_id == FFA_SPMC_ID) {
276 return true;
277 }
278
279 /* Otherwise ensure the SP exists. */
280 sp = spmc_get_sp_ctx(dst_id);
281 if (sp != NULL) {
282 return true;
283 }
284
285 return false;
286 }
287
288 /*******************************************************************************
289 * Helper function to validate the response from a Logical Partition.
290 ******************************************************************************/
direct_msg_validate_lp_resp(uint16_t origin_id,uint16_t lp_id,void * handle)291 static bool direct_msg_validate_lp_resp(uint16_t origin_id, uint16_t lp_id,
292 void *handle)
293 {
294 /* Retrieve populated Direct Response Arguments. */
295 uint64_t x1 = SMC_GET_GP(handle, CTX_GPREG_X1);
296 uint64_t x2 = SMC_GET_GP(handle, CTX_GPREG_X2);
297 uint16_t src_id = ffa_endpoint_source(x1);
298 uint16_t dst_id = ffa_endpoint_destination(x1);
299
300 if (src_id != lp_id) {
301 ERROR("Invalid EL3 LP source ID (0x%x).\n", src_id);
302 return false;
303 }
304
305 /*
306 * Check the destination ID is valid and ensure the LP is responding to
307 * the original request.
308 */
309 if ((!direct_msg_validate_dst_id(dst_id)) || (dst_id != origin_id)) {
310 ERROR("Invalid EL3 LP destination ID (0x%x).\n", dst_id);
311 return false;
312 }
313
314 if (!direct_msg_validate_arg2(x2)) {
315 ERROR("Invalid EL3 LP message encoding.\n");
316 return false;
317 }
318 return true;
319 }
320
321 /*******************************************************************************
322 * Handle direct request messages and route to the appropriate destination.
323 ******************************************************************************/
direct_req_smc_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)324 static uint64_t direct_req_smc_handler(uint32_t smc_fid,
325 bool secure_origin,
326 uint64_t x1,
327 uint64_t x2,
328 uint64_t x3,
329 uint64_t x4,
330 void *cookie,
331 void *handle,
332 uint64_t flags)
333 {
334 uint16_t src_id = ffa_endpoint_source(x1);
335 uint16_t dst_id = ffa_endpoint_destination(x1);
336 struct el3_lp_desc *el3_lp_descs;
337 struct secure_partition_desc *sp;
338 unsigned int idx;
339
340 /* Check if arg2 has been populated correctly based on message type. */
341 if (!direct_msg_validate_arg2(x2)) {
342 return spmc_ffa_error_return(handle,
343 FFA_ERROR_INVALID_PARAMETER);
344 }
345
346 /* Validate Sender is either the current SP or from the normal world. */
347 if ((secure_origin && src_id != spmc_get_current_sp_ctx()->sp_id) ||
348 (!secure_origin && !ffa_is_normal_world_id(src_id))) {
349 ERROR("Invalid direct request source ID (0x%x).\n", src_id);
350 return spmc_ffa_error_return(handle,
351 FFA_ERROR_INVALID_PARAMETER);
352 }
353
354 el3_lp_descs = get_el3_lp_array();
355
356 /* Check if the request is destined for a Logical Partition. */
357 for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
358 if (el3_lp_descs[i].sp_id == dst_id) {
359 uint64_t ret = el3_lp_descs[i].direct_req(
360 smc_fid, secure_origin, x1, x2,
361 x3, x4, cookie, handle, flags);
362 if (!direct_msg_validate_lp_resp(src_id, dst_id,
363 handle)) {
364 panic();
365 }
366
367 /* Message checks out. */
368 return ret;
369 }
370 }
371
372 /*
373 * If the request was not targeted to a LSP and from the secure world
374 * then it is invalid since a SP cannot call into the Normal world and
375 * there is no other SP to call into. If there are other SPs in future
376 * then the partition runtime model would need to be validated as well.
377 */
378 if (secure_origin) {
379 VERBOSE("Direct request not supported to the Normal World.\n");
380 return spmc_ffa_error_return(handle,
381 FFA_ERROR_INVALID_PARAMETER);
382 }
383
384 /* Check if the SP ID is valid. */
385 sp = spmc_get_sp_ctx(dst_id);
386 if (sp == NULL) {
387 VERBOSE("Direct request to unknown partition ID (0x%x).\n",
388 dst_id);
389 return spmc_ffa_error_return(handle,
390 FFA_ERROR_INVALID_PARAMETER);
391 }
392
393 /*
394 * Check that the target execution context is in a waiting state before
395 * forwarding the direct request to it.
396 */
397 idx = get_ec_index(sp);
398 if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
399 VERBOSE("SP context on core%u is not waiting (%u).\n",
400 idx, sp->ec[idx].rt_model);
401 return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
402 }
403
404 /*
405 * Everything checks out so forward the request to the SP after updating
406 * its state and runtime model.
407 */
408 sp->ec[idx].rt_state = RT_STATE_RUNNING;
409 sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
410 sp->ec[idx].dir_req_origin_id = src_id;
411 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
412 handle, cookie, flags, dst_id);
413 }
414
415 /*******************************************************************************
416 * Handle direct response messages and route to the appropriate destination.
417 ******************************************************************************/
direct_resp_smc_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)418 static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
419 bool secure_origin,
420 uint64_t x1,
421 uint64_t x2,
422 uint64_t x3,
423 uint64_t x4,
424 void *cookie,
425 void *handle,
426 uint64_t flags)
427 {
428 uint16_t dst_id = ffa_endpoint_destination(x1);
429 struct secure_partition_desc *sp;
430 unsigned int idx;
431
432 /* Check if arg2 has been populated correctly based on message type. */
433 if (!direct_msg_validate_arg2(x2)) {
434 return spmc_ffa_error_return(handle,
435 FFA_ERROR_INVALID_PARAMETER);
436 }
437
438 /* Check that the response did not originate from the Normal world. */
439 if (!secure_origin) {
440 VERBOSE("Direct Response not supported from Normal World.\n");
441 return spmc_ffa_error_return(handle,
442 FFA_ERROR_INVALID_PARAMETER);
443 }
444
445 /*
446 * Check that the response is either targeted to the Normal world or the
447 * SPMC e.g. a PM response.
448 */
449 if (!direct_msg_validate_dst_id(dst_id)) {
450 VERBOSE("Direct response to invalid partition ID (0x%x).\n",
451 dst_id);
452 return spmc_ffa_error_return(handle,
453 FFA_ERROR_INVALID_PARAMETER);
454 }
455
456 /* Obtain the SP descriptor and update its runtime state. */
457 sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
458 if (sp == NULL) {
459 VERBOSE("Direct response to unknown partition ID (0x%x).\n",
460 dst_id);
461 return spmc_ffa_error_return(handle,
462 FFA_ERROR_INVALID_PARAMETER);
463 }
464
465 /* Sanity check state is being tracked correctly in the SPMC. */
466 idx = get_ec_index(sp);
467 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
468
469 /* Ensure SP execution context was in the right runtime model. */
470 if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
471 VERBOSE("SP context on core%u not handling direct req (%u).\n",
472 idx, sp->ec[idx].rt_model);
473 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
474 }
475
476 if (sp->ec[idx].dir_req_origin_id != dst_id) {
477 WARN("Invalid direct resp partition ID 0x%x != 0x%x on core%u.\n",
478 dst_id, sp->ec[idx].dir_req_origin_id, idx);
479 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
480 }
481
482 /* Update the state of the SP execution context. */
483 sp->ec[idx].rt_state = RT_STATE_WAITING;
484
485 /* Clear the ongoing direct request ID. */
486 sp->ec[idx].dir_req_origin_id = INV_SP_ID;
487
488 /*
489 * If the receiver is not the SPMC then forward the response to the
490 * Normal world.
491 */
492 if (dst_id == FFA_SPMC_ID) {
493 spmc_sp_synchronous_exit(&sp->ec[idx], x4);
494 /* Should not get here. */
495 panic();
496 }
497
498 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
499 handle, cookie, flags, dst_id);
500 }
501
502 /*******************************************************************************
503 * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
504 * cycles.
505 ******************************************************************************/
msg_wait_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)506 static uint64_t msg_wait_handler(uint32_t smc_fid,
507 bool secure_origin,
508 uint64_t x1,
509 uint64_t x2,
510 uint64_t x3,
511 uint64_t x4,
512 void *cookie,
513 void *handle,
514 uint64_t flags)
515 {
516 struct secure_partition_desc *sp;
517 unsigned int idx;
518
519 /*
520 * Check that the response did not originate from the Normal world as
521 * only the secure world can call this ABI.
522 */
523 if (!secure_origin) {
524 VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
525 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
526 }
527
528 /* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
529 sp = spmc_get_current_sp_ctx();
530 if (sp == NULL) {
531 return spmc_ffa_error_return(handle,
532 FFA_ERROR_INVALID_PARAMETER);
533 }
534
535 /*
536 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
537 */
538 idx = get_ec_index(sp);
539
540 /* Ensure SP execution context was in the right runtime model. */
541 if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
542 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
543 }
544
545 /* Sanity check the state is being tracked correctly in the SPMC. */
546 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
547
548 /*
549 * Perform a synchronous exit if the partition was initialising. The
550 * state is updated after the exit.
551 */
552 if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
553 spmc_sp_synchronous_exit(&sp->ec[idx], x4);
554 /* Should not get here */
555 panic();
556 }
557
558 /* Update the state of the SP execution context. */
559 sp->ec[idx].rt_state = RT_STATE_WAITING;
560
561 /* Resume normal world if a secure interrupt was handled. */
562 if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
563 /* FFA_MSG_WAIT can only be called from the secure world. */
564 unsigned int secure_state_in = SECURE;
565 unsigned int secure_state_out = NON_SECURE;
566
567 cm_el1_sysregs_context_save(secure_state_in);
568 cm_el1_sysregs_context_restore(secure_state_out);
569 cm_set_next_eret_context(secure_state_out);
570 SMC_RET0(cm_get_context(secure_state_out));
571 }
572
573 /* Forward the response to the Normal world. */
574 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
575 handle, cookie, flags, FFA_NWD_ID);
576 }
577
ffa_error_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)578 static uint64_t ffa_error_handler(uint32_t smc_fid,
579 bool secure_origin,
580 uint64_t x1,
581 uint64_t x2,
582 uint64_t x3,
583 uint64_t x4,
584 void *cookie,
585 void *handle,
586 uint64_t flags)
587 {
588 struct secure_partition_desc *sp;
589 unsigned int idx;
590
591 /* Check that the response did not originate from the Normal world. */
592 if (!secure_origin) {
593 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
594 }
595
596 /* Get the descriptor of the SP that invoked FFA_ERROR. */
597 sp = spmc_get_current_sp_ctx();
598 if (sp == NULL) {
599 return spmc_ffa_error_return(handle,
600 FFA_ERROR_INVALID_PARAMETER);
601 }
602
603 /* Get the execution context of the SP that invoked FFA_ERROR. */
604 idx = get_ec_index(sp);
605
606 /*
607 * We only expect FFA_ERROR to be received during SP initialisation
608 * otherwise this is an invalid call.
609 */
610 if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
611 ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
612 spmc_sp_synchronous_exit(&sp->ec[idx], x2);
613 /* Should not get here. */
614 panic();
615 }
616
617 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
618 }
619
ffa_version_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)620 static uint64_t ffa_version_handler(uint32_t smc_fid,
621 bool secure_origin,
622 uint64_t x1,
623 uint64_t x2,
624 uint64_t x3,
625 uint64_t x4,
626 void *cookie,
627 void *handle,
628 uint64_t flags)
629 {
630 uint32_t requested_version = x1 & FFA_VERSION_MASK;
631
632 if (requested_version & FFA_VERSION_BIT31_MASK) {
633 /* Invalid encoding, return an error. */
634 SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
635 /* Execution stops here. */
636 }
637
638 /* Determine the caller to store the requested version. */
639 if (secure_origin) {
640 /*
641 * Ensure that the SP is reporting the same version as
642 * specified in its manifest. If these do not match there is
643 * something wrong with the SP.
644 * TODO: Should we abort the SP? For now assert this is not
645 * case.
646 */
647 assert(requested_version ==
648 spmc_get_current_sp_ctx()->ffa_version);
649 } else {
650 /*
651 * If this is called by the normal world, record this
652 * information in its descriptor.
653 */
654 spmc_get_hyp_ctx()->ffa_version = requested_version;
655 }
656
657 SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
658 FFA_VERSION_MINOR));
659 }
660
661 /*******************************************************************************
662 * Helper function to obtain the FF-A version of the calling partition.
663 ******************************************************************************/
get_partition_ffa_version(bool secure_origin)664 uint32_t get_partition_ffa_version(bool secure_origin)
665 {
666 if (secure_origin) {
667 return spmc_get_current_sp_ctx()->ffa_version;
668 } else {
669 return spmc_get_hyp_ctx()->ffa_version;
670 }
671 }
672
rxtx_map_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)673 static uint64_t rxtx_map_handler(uint32_t smc_fid,
674 bool secure_origin,
675 uint64_t x1,
676 uint64_t x2,
677 uint64_t x3,
678 uint64_t x4,
679 void *cookie,
680 void *handle,
681 uint64_t flags)
682 {
683 int ret;
684 uint32_t error_code;
685 uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
686 struct mailbox *mbox;
687 uintptr_t tx_address = x1;
688 uintptr_t rx_address = x2;
689 uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
690 uint32_t buf_size = page_count * FFA_PAGE_SIZE;
691
692 /*
693 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
694 * indirect messaging with SPs. Check if the Hypervisor has invoked this
695 * ABI on behalf of a VM and reject it if this is the case.
696 */
697 if (tx_address == 0 || rx_address == 0) {
698 WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
699 return spmc_ffa_error_return(handle,
700 FFA_ERROR_INVALID_PARAMETER);
701 }
702
703 /* Ensure the specified buffers are not the same. */
704 if (tx_address == rx_address) {
705 WARN("TX Buffer must not be the same as RX Buffer.\n");
706 return spmc_ffa_error_return(handle,
707 FFA_ERROR_INVALID_PARAMETER);
708 }
709
710 /* Ensure the buffer size is not 0. */
711 if (buf_size == 0U) {
712 WARN("Buffer size must not be 0\n");
713 return spmc_ffa_error_return(handle,
714 FFA_ERROR_INVALID_PARAMETER);
715 }
716
717 /*
718 * Ensure the buffer size is a multiple of the translation granule size
719 * in TF-A.
720 */
721 if (buf_size % PAGE_SIZE != 0U) {
722 WARN("Buffer size must be aligned to translation granule.\n");
723 return spmc_ffa_error_return(handle,
724 FFA_ERROR_INVALID_PARAMETER);
725 }
726
727 /* Obtain the RX/TX buffer pair descriptor. */
728 mbox = spmc_get_mbox_desc(secure_origin);
729
730 spin_lock(&mbox->lock);
731
732 /* Check if buffers have already been mapped. */
733 if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
734 WARN("RX/TX Buffers already mapped (%p/%p)\n",
735 (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
736 error_code = FFA_ERROR_DENIED;
737 goto err;
738 }
739
740 /* memmap the TX buffer as read only. */
741 ret = mmap_add_dynamic_region(tx_address, /* PA */
742 tx_address, /* VA */
743 buf_size, /* size */
744 mem_atts | MT_RO_DATA); /* attrs */
745 if (ret != 0) {
746 /* Return the correct error code. */
747 error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
748 FFA_ERROR_INVALID_PARAMETER;
749 WARN("Unable to map TX buffer: %d\n", error_code);
750 goto err;
751 }
752
753 /* memmap the RX buffer as read write. */
754 ret = mmap_add_dynamic_region(rx_address, /* PA */
755 rx_address, /* VA */
756 buf_size, /* size */
757 mem_atts | MT_RW_DATA); /* attrs */
758
759 if (ret != 0) {
760 error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
761 FFA_ERROR_INVALID_PARAMETER;
762 WARN("Unable to map RX buffer: %d\n", error_code);
763 /* Unmap the TX buffer again. */
764 mmap_remove_dynamic_region(tx_address, buf_size);
765 goto err;
766 }
767
768 mbox->tx_buffer = (void *) tx_address;
769 mbox->rx_buffer = (void *) rx_address;
770 mbox->rxtx_page_count = page_count;
771 spin_unlock(&mbox->lock);
772
773 SMC_RET1(handle, FFA_SUCCESS_SMC32);
774 /* Execution stops here. */
775 err:
776 spin_unlock(&mbox->lock);
777 return spmc_ffa_error_return(handle, error_code);
778 }
779
rxtx_unmap_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)780 static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
781 bool secure_origin,
782 uint64_t x1,
783 uint64_t x2,
784 uint64_t x3,
785 uint64_t x4,
786 void *cookie,
787 void *handle,
788 uint64_t flags)
789 {
790 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
791 uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
792
793 /*
794 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
795 * indirect messaging with SPs. Check if the Hypervisor has invoked this
796 * ABI on behalf of a VM and reject it if this is the case.
797 */
798 if (x1 != 0UL) {
799 return spmc_ffa_error_return(handle,
800 FFA_ERROR_INVALID_PARAMETER);
801 }
802
803 spin_lock(&mbox->lock);
804
805 /* Check if buffers are currently mapped. */
806 if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
807 spin_unlock(&mbox->lock);
808 return spmc_ffa_error_return(handle,
809 FFA_ERROR_INVALID_PARAMETER);
810 }
811
812 /* Unmap RX Buffer */
813 if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
814 buf_size) != 0) {
815 WARN("Unable to unmap RX buffer!\n");
816 }
817
818 mbox->rx_buffer = 0;
819
820 /* Unmap TX Buffer */
821 if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
822 buf_size) != 0) {
823 WARN("Unable to unmap TX buffer!\n");
824 }
825
826 mbox->tx_buffer = 0;
827 mbox->rxtx_page_count = 0;
828
829 spin_unlock(&mbox->lock);
830 SMC_RET1(handle, FFA_SUCCESS_SMC32);
831 }
832
833 /*
834 * Helper function to populate the properties field of a Partition Info Get
835 * descriptor.
836 */
837 static uint32_t
partition_info_get_populate_properties(uint32_t sp_properties,enum sp_execution_state sp_ec_state)838 partition_info_get_populate_properties(uint32_t sp_properties,
839 enum sp_execution_state sp_ec_state)
840 {
841 uint32_t properties = sp_properties;
842 uint32_t ec_state;
843
844 /* Determine the execution state of the SP. */
845 ec_state = sp_ec_state == SP_STATE_AARCH64 ?
846 FFA_PARTITION_INFO_GET_AARCH64_STATE :
847 FFA_PARTITION_INFO_GET_AARCH32_STATE;
848
849 properties |= ec_state << FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT;
850
851 return properties;
852 }
853
854 /*
855 * Collate the partition information in a v1.1 partition information
856 * descriptor format, this will be converter later if required.
857 */
partition_info_get_handler_v1_1(uint32_t * uuid,struct ffa_partition_info_v1_1 * partitions,uint32_t max_partitions,uint32_t * partition_count)858 static int partition_info_get_handler_v1_1(uint32_t *uuid,
859 struct ffa_partition_info_v1_1
860 *partitions,
861 uint32_t max_partitions,
862 uint32_t *partition_count)
863 {
864 uint32_t index;
865 struct ffa_partition_info_v1_1 *desc;
866 bool null_uuid = is_null_uuid(uuid);
867 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
868
869 /* Deal with Logical Partitions. */
870 for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
871 if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
872 /* Found a matching UUID, populate appropriately. */
873 if (*partition_count >= max_partitions) {
874 return FFA_ERROR_NO_MEMORY;
875 }
876
877 desc = &partitions[*partition_count];
878 desc->ep_id = el3_lp_descs[index].sp_id;
879 desc->execution_ctx_count = PLATFORM_CORE_COUNT;
880 /* LSPs must be AArch64. */
881 desc->properties =
882 partition_info_get_populate_properties(
883 el3_lp_descs[index].properties,
884 SP_STATE_AARCH64);
885
886 if (null_uuid) {
887 copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
888 }
889 (*partition_count)++;
890 }
891 }
892
893 /* Deal with physical SP's. */
894 for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
895 if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
896 /* Found a matching UUID, populate appropriately. */
897 if (*partition_count >= max_partitions) {
898 return FFA_ERROR_NO_MEMORY;
899 }
900
901 desc = &partitions[*partition_count];
902 desc->ep_id = sp_desc[index].sp_id;
903 /*
904 * Execution context count must match No. cores for
905 * S-EL1 SPs.
906 */
907 desc->execution_ctx_count = PLATFORM_CORE_COUNT;
908 desc->properties =
909 partition_info_get_populate_properties(
910 sp_desc[index].properties,
911 sp_desc[index].execution_state);
912
913 if (null_uuid) {
914 copy_uuid(desc->uuid, sp_desc[index].uuid);
915 }
916 (*partition_count)++;
917 }
918 }
919 return 0;
920 }
921
922 /*
923 * Handle the case where that caller only wants the count of partitions
924 * matching a given UUID and does not want the corresponding descriptors
925 * populated.
926 */
partition_info_get_handler_count_only(uint32_t * uuid)927 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
928 {
929 uint32_t index = 0;
930 uint32_t partition_count = 0;
931 bool null_uuid = is_null_uuid(uuid);
932 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
933
934 /* Deal with Logical Partitions. */
935 for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
936 if (null_uuid ||
937 uuid_match(uuid, el3_lp_descs[index].uuid)) {
938 (partition_count)++;
939 }
940 }
941
942 /* Deal with physical SP's. */
943 for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
944 if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
945 (partition_count)++;
946 }
947 }
948 return partition_count;
949 }
950
951 /*
952 * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
953 * the corresponding descriptor format from the v1.1 descriptor array.
954 */
partition_info_populate_v1_0(struct ffa_partition_info_v1_1 * partitions,struct mailbox * mbox,int partition_count)955 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
956 *partitions,
957 struct mailbox *mbox,
958 int partition_count)
959 {
960 uint32_t index;
961 uint32_t buf_size;
962 uint32_t descriptor_size;
963 struct ffa_partition_info_v1_0 *v1_0_partitions =
964 (struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
965
966 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
967 descriptor_size = partition_count *
968 sizeof(struct ffa_partition_info_v1_0);
969
970 if (descriptor_size > buf_size) {
971 return FFA_ERROR_NO_MEMORY;
972 }
973
974 for (index = 0U; index < partition_count; index++) {
975 v1_0_partitions[index].ep_id = partitions[index].ep_id;
976 v1_0_partitions[index].execution_ctx_count =
977 partitions[index].execution_ctx_count;
978 /* Only report v1.0 properties. */
979 v1_0_partitions[index].properties =
980 (partitions[index].properties &
981 FFA_PARTITION_INFO_GET_PROPERTIES_V1_0_MASK);
982 }
983 return 0;
984 }
985
986 /*
987 * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
988 * v1.0 implementations.
989 */
partition_info_get_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)990 static uint64_t partition_info_get_handler(uint32_t smc_fid,
991 bool secure_origin,
992 uint64_t x1,
993 uint64_t x2,
994 uint64_t x3,
995 uint64_t x4,
996 void *cookie,
997 void *handle,
998 uint64_t flags)
999 {
1000 int ret;
1001 uint32_t partition_count = 0;
1002 uint32_t size = 0;
1003 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1004 struct mailbox *mbox;
1005 uint64_t info_get_flags;
1006 bool count_only;
1007 uint32_t uuid[4];
1008
1009 uuid[0] = x1;
1010 uuid[1] = x2;
1011 uuid[2] = x3;
1012 uuid[3] = x4;
1013
1014 /* Determine if the Partition descriptors should be populated. */
1015 info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
1016 count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
1017
1018 /* Handle the case where we don't need to populate the descriptors. */
1019 if (count_only) {
1020 partition_count = partition_info_get_handler_count_only(uuid);
1021 if (partition_count == 0) {
1022 return spmc_ffa_error_return(handle,
1023 FFA_ERROR_INVALID_PARAMETER);
1024 }
1025 } else {
1026 struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
1027
1028 /*
1029 * Handle the case where the partition descriptors are required,
1030 * check we have the buffers available and populate the
1031 * appropriate structure version.
1032 */
1033
1034 /* Obtain the v1.1 format of the descriptors. */
1035 ret = partition_info_get_handler_v1_1(uuid, partitions,
1036 MAX_SP_LP_PARTITIONS,
1037 &partition_count);
1038
1039 /* Check if an error occurred during discovery. */
1040 if (ret != 0) {
1041 goto err;
1042 }
1043
1044 /* If we didn't find any matches the UUID is unknown. */
1045 if (partition_count == 0) {
1046 ret = FFA_ERROR_INVALID_PARAMETER;
1047 goto err;
1048 }
1049
1050 /* Obtain the partition mailbox RX/TX buffer pair descriptor. */
1051 mbox = spmc_get_mbox_desc(secure_origin);
1052
1053 /*
1054 * If the caller has not bothered registering its RX/TX pair
1055 * then return an error code.
1056 */
1057 spin_lock(&mbox->lock);
1058 if (mbox->rx_buffer == NULL) {
1059 ret = FFA_ERROR_BUSY;
1060 goto err_unlock;
1061 }
1062
1063 /* Ensure the RX buffer is currently free. */
1064 if (mbox->state != MAILBOX_STATE_EMPTY) {
1065 ret = FFA_ERROR_BUSY;
1066 goto err_unlock;
1067 }
1068
1069 /* Zero the RX buffer before populating. */
1070 (void)memset(mbox->rx_buffer, 0,
1071 mbox->rxtx_page_count * FFA_PAGE_SIZE);
1072
1073 /*
1074 * Depending on the FF-A version of the requesting partition
1075 * we may need to convert to a v1.0 format otherwise we can copy
1076 * directly.
1077 */
1078 if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
1079 ret = partition_info_populate_v1_0(partitions,
1080 mbox,
1081 partition_count);
1082 if (ret != 0) {
1083 goto err_unlock;
1084 }
1085 } else {
1086 uint32_t buf_size = mbox->rxtx_page_count *
1087 FFA_PAGE_SIZE;
1088
1089 /* Ensure the descriptor will fit in the buffer. */
1090 size = sizeof(struct ffa_partition_info_v1_1);
1091 if (partition_count * size > buf_size) {
1092 ret = FFA_ERROR_NO_MEMORY;
1093 goto err_unlock;
1094 }
1095 memcpy(mbox->rx_buffer, partitions,
1096 partition_count * size);
1097 }
1098
1099 mbox->state = MAILBOX_STATE_FULL;
1100 spin_unlock(&mbox->lock);
1101 }
1102 SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
1103
1104 err_unlock:
1105 spin_unlock(&mbox->lock);
1106 err:
1107 return spmc_ffa_error_return(handle, ret);
1108 }
1109
ffa_feature_success(void * handle,uint32_t arg2)1110 static uint64_t ffa_feature_success(void *handle, uint32_t arg2)
1111 {
1112 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2);
1113 }
1114
ffa_features_retrieve_request(bool secure_origin,uint32_t input_properties,void * handle)1115 static uint64_t ffa_features_retrieve_request(bool secure_origin,
1116 uint32_t input_properties,
1117 void *handle)
1118 {
1119 /*
1120 * If we're called by the normal world we don't support any
1121 * additional features.
1122 */
1123 if (!secure_origin) {
1124 if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) {
1125 return spmc_ffa_error_return(handle,
1126 FFA_ERROR_NOT_SUPPORTED);
1127 }
1128
1129 } else {
1130 struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1131 /*
1132 * If v1.1 the NS bit must be set otherwise it is an invalid
1133 * call. If v1.0 check and store whether the SP has requested
1134 * the use of the NS bit.
1135 */
1136 if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) {
1137 if ((input_properties &
1138 FFA_FEATURES_RET_REQ_NS_BIT) == 0U) {
1139 return spmc_ffa_error_return(handle,
1140 FFA_ERROR_NOT_SUPPORTED);
1141 }
1142 return ffa_feature_success(handle,
1143 FFA_FEATURES_RET_REQ_NS_BIT);
1144 } else {
1145 sp->ns_bit_requested = (input_properties &
1146 FFA_FEATURES_RET_REQ_NS_BIT) !=
1147 0U;
1148 }
1149 if (sp->ns_bit_requested) {
1150 return ffa_feature_success(handle,
1151 FFA_FEATURES_RET_REQ_NS_BIT);
1152 }
1153 }
1154 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1155 }
1156
ffa_features_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1157 static uint64_t ffa_features_handler(uint32_t smc_fid,
1158 bool secure_origin,
1159 uint64_t x1,
1160 uint64_t x2,
1161 uint64_t x3,
1162 uint64_t x4,
1163 void *cookie,
1164 void *handle,
1165 uint64_t flags)
1166 {
1167 uint32_t function_id = (uint32_t) x1;
1168 uint32_t input_properties = (uint32_t) x2;
1169
1170 /* Check if a Feature ID was requested. */
1171 if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
1172 /* We currently don't support any additional features. */
1173 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1174 }
1175
1176 /*
1177 * Handle the cases where we have separate handlers due to additional
1178 * properties.
1179 */
1180 switch (function_id) {
1181 case FFA_MEM_RETRIEVE_REQ_SMC32:
1182 case FFA_MEM_RETRIEVE_REQ_SMC64:
1183 return ffa_features_retrieve_request(secure_origin,
1184 input_properties,
1185 handle);
1186 }
1187
1188 /*
1189 * We don't currently support additional input properties for these
1190 * other ABIs therefore ensure this value is set to 0.
1191 */
1192 if (input_properties != 0U) {
1193 return spmc_ffa_error_return(handle,
1194 FFA_ERROR_NOT_SUPPORTED);
1195 }
1196
1197 /* Report if any other FF-A ABI is supported. */
1198 switch (function_id) {
1199 /* Supported features from both worlds. */
1200 case FFA_ERROR:
1201 case FFA_SUCCESS_SMC32:
1202 case FFA_INTERRUPT:
1203 case FFA_SPM_ID_GET:
1204 case FFA_ID_GET:
1205 case FFA_FEATURES:
1206 case FFA_VERSION:
1207 case FFA_RX_RELEASE:
1208 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1209 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1210 case FFA_PARTITION_INFO_GET:
1211 case FFA_RXTX_MAP_SMC32:
1212 case FFA_RXTX_MAP_SMC64:
1213 case FFA_RXTX_UNMAP:
1214 case FFA_MEM_FRAG_TX:
1215 case FFA_MSG_RUN:
1216
1217 /*
1218 * We are relying on the fact that the other registers
1219 * will be set to 0 as these values align with the
1220 * currently implemented features of the SPMC. If this
1221 * changes this function must be extended to handle
1222 * reporting the additional functionality.
1223 */
1224
1225 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1226 /* Execution stops here. */
1227
1228 /* Supported ABIs only from the secure world. */
1229 case FFA_SECONDARY_EP_REGISTER_SMC64:
1230 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1231 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1232 case FFA_MEM_RELINQUISH:
1233 case FFA_MSG_WAIT:
1234
1235 if (!secure_origin) {
1236 return spmc_ffa_error_return(handle,
1237 FFA_ERROR_NOT_SUPPORTED);
1238 }
1239 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1240 /* Execution stops here. */
1241
1242 /* Supported features only from the normal world. */
1243 case FFA_MEM_SHARE_SMC32:
1244 case FFA_MEM_SHARE_SMC64:
1245 case FFA_MEM_LEND_SMC32:
1246 case FFA_MEM_LEND_SMC64:
1247 case FFA_MEM_RECLAIM:
1248 case FFA_MEM_FRAG_RX:
1249
1250 if (secure_origin) {
1251 return spmc_ffa_error_return(handle,
1252 FFA_ERROR_NOT_SUPPORTED);
1253 }
1254 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1255 /* Execution stops here. */
1256
1257 default:
1258 return spmc_ffa_error_return(handle,
1259 FFA_ERROR_NOT_SUPPORTED);
1260 }
1261 }
1262
ffa_id_get_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1263 static uint64_t ffa_id_get_handler(uint32_t smc_fid,
1264 bool secure_origin,
1265 uint64_t x1,
1266 uint64_t x2,
1267 uint64_t x3,
1268 uint64_t x4,
1269 void *cookie,
1270 void *handle,
1271 uint64_t flags)
1272 {
1273 if (secure_origin) {
1274 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1275 spmc_get_current_sp_ctx()->sp_id);
1276 } else {
1277 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1278 spmc_get_hyp_ctx()->ns_ep_id);
1279 }
1280 }
1281
1282 /*
1283 * Enable an SP to query the ID assigned to the SPMC.
1284 */
ffa_spm_id_get_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1285 static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
1286 bool secure_origin,
1287 uint64_t x1,
1288 uint64_t x2,
1289 uint64_t x3,
1290 uint64_t x4,
1291 void *cookie,
1292 void *handle,
1293 uint64_t flags)
1294 {
1295 assert(x1 == 0UL);
1296 assert(x2 == 0UL);
1297 assert(x3 == 0UL);
1298 assert(x4 == 0UL);
1299 assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
1300 assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
1301 assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
1302
1303 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
1304 }
1305
ffa_run_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1306 static uint64_t ffa_run_handler(uint32_t smc_fid,
1307 bool secure_origin,
1308 uint64_t x1,
1309 uint64_t x2,
1310 uint64_t x3,
1311 uint64_t x4,
1312 void *cookie,
1313 void *handle,
1314 uint64_t flags)
1315 {
1316 struct secure_partition_desc *sp;
1317 uint16_t target_id = FFA_RUN_EP_ID(x1);
1318 uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
1319 unsigned int idx;
1320 unsigned int *rt_state;
1321 unsigned int *rt_model;
1322
1323 /* Can only be called from the normal world. */
1324 if (secure_origin) {
1325 ERROR("FFA_RUN can only be called from NWd.\n");
1326 return spmc_ffa_error_return(handle,
1327 FFA_ERROR_INVALID_PARAMETER);
1328 }
1329
1330 /* Cannot run a Normal world partition. */
1331 if (ffa_is_normal_world_id(target_id)) {
1332 ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
1333 return spmc_ffa_error_return(handle,
1334 FFA_ERROR_INVALID_PARAMETER);
1335 }
1336
1337 /* Check that the target SP exists. */
1338 sp = spmc_get_sp_ctx(target_id);
1339 ERROR("Unknown partition ID (0x%x).\n", target_id);
1340 if (sp == NULL) {
1341 return spmc_ffa_error_return(handle,
1342 FFA_ERROR_INVALID_PARAMETER);
1343 }
1344
1345 idx = get_ec_index(sp);
1346 if (idx != vcpu_id) {
1347 ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
1348 return spmc_ffa_error_return(handle,
1349 FFA_ERROR_INVALID_PARAMETER);
1350 }
1351 rt_state = &((sp->ec[idx]).rt_state);
1352 rt_model = &((sp->ec[idx]).rt_model);
1353 if (*rt_state == RT_STATE_RUNNING) {
1354 ERROR("Partition (0x%x) is already running.\n", target_id);
1355 return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
1356 }
1357
1358 /*
1359 * Sanity check that if the execution context was not waiting then it
1360 * was either in the direct request or the run partition runtime model.
1361 */
1362 if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
1363 assert(*rt_model == RT_MODEL_RUN ||
1364 *rt_model == RT_MODEL_DIR_REQ);
1365 }
1366
1367 /*
1368 * If the context was waiting then update the partition runtime model.
1369 */
1370 if (*rt_state == RT_STATE_WAITING) {
1371 *rt_model = RT_MODEL_RUN;
1372 }
1373
1374 /*
1375 * Forward the request to the correct SP vCPU after updating
1376 * its state.
1377 */
1378 *rt_state = RT_STATE_RUNNING;
1379
1380 return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
1381 handle, cookie, flags, target_id);
1382 }
1383
rx_release_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1384 static uint64_t rx_release_handler(uint32_t smc_fid,
1385 bool secure_origin,
1386 uint64_t x1,
1387 uint64_t x2,
1388 uint64_t x3,
1389 uint64_t x4,
1390 void *cookie,
1391 void *handle,
1392 uint64_t flags)
1393 {
1394 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1395
1396 spin_lock(&mbox->lock);
1397
1398 if (mbox->state != MAILBOX_STATE_FULL) {
1399 spin_unlock(&mbox->lock);
1400 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1401 }
1402
1403 mbox->state = MAILBOX_STATE_EMPTY;
1404 spin_unlock(&mbox->lock);
1405
1406 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1407 }
1408
1409 /*
1410 * Perform initial validation on the provided secondary entry point.
1411 * For now ensure it does not lie within the BL31 Image or the SP's
1412 * RX/TX buffers as these are mapped within EL3.
1413 * TODO: perform validation for additional invalid memory regions.
1414 */
validate_secondary_ep(uintptr_t ep,struct secure_partition_desc * sp)1415 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
1416 {
1417 struct mailbox *mb;
1418 uintptr_t buffer_size;
1419 uintptr_t sp_rx_buffer;
1420 uintptr_t sp_tx_buffer;
1421 uintptr_t sp_rx_buffer_limit;
1422 uintptr_t sp_tx_buffer_limit;
1423
1424 mb = &sp->mailbox;
1425 buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
1426 sp_rx_buffer = (uintptr_t) mb->rx_buffer;
1427 sp_tx_buffer = (uintptr_t) mb->tx_buffer;
1428 sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
1429 sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
1430
1431 /*
1432 * Check if the entry point lies within BL31, or the
1433 * SP's RX or TX buffer.
1434 */
1435 if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
1436 (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
1437 (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
1438 return -EINVAL;
1439 }
1440 return 0;
1441 }
1442
1443 /*******************************************************************************
1444 * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
1445 * register an entry point for initialization during a secondary cold boot.
1446 ******************************************************************************/
ffa_sec_ep_register_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1447 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
1448 bool secure_origin,
1449 uint64_t x1,
1450 uint64_t x2,
1451 uint64_t x3,
1452 uint64_t x4,
1453 void *cookie,
1454 void *handle,
1455 uint64_t flags)
1456 {
1457 struct secure_partition_desc *sp;
1458 struct sp_exec_ctx *sp_ctx;
1459
1460 /* This request cannot originate from the Normal world. */
1461 if (!secure_origin) {
1462 WARN("%s: Can only be called from SWd.\n", __func__);
1463 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1464 }
1465
1466 /* Get the context of the current SP. */
1467 sp = spmc_get_current_sp_ctx();
1468 if (sp == NULL) {
1469 WARN("%s: Cannot find SP context.\n", __func__);
1470 return spmc_ffa_error_return(handle,
1471 FFA_ERROR_INVALID_PARAMETER);
1472 }
1473
1474 /* Only an S-EL1 SP should be invoking this ABI. */
1475 if (sp->runtime_el != S_EL1) {
1476 WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
1477 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1478 }
1479
1480 /* Ensure the SP is in its initialization state. */
1481 sp_ctx = spmc_get_sp_ec(sp);
1482 if (sp_ctx->rt_model != RT_MODEL_INIT) {
1483 WARN("%s: Can only be called during SP initialization.\n",
1484 __func__);
1485 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1486 }
1487
1488 /* Perform initial validation of the secondary entry point. */
1489 if (validate_secondary_ep(x1, sp)) {
1490 WARN("%s: Invalid entry point provided (0x%lx).\n",
1491 __func__, x1);
1492 return spmc_ffa_error_return(handle,
1493 FFA_ERROR_INVALID_PARAMETER);
1494 }
1495
1496 /*
1497 * Update the secondary entrypoint in SP context.
1498 * We don't need a lock here as during partition initialization there
1499 * will only be a single core online.
1500 */
1501 sp->secondary_ep = x1;
1502 VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
1503
1504 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1505 }
1506
1507 /*******************************************************************************
1508 * This function will parse the Secure Partition Manifest. From manifest, it
1509 * will fetch details for preparing Secure partition image context and secure
1510 * partition image boot arguments if any.
1511 ******************************************************************************/
sp_manifest_parse(void * sp_manifest,int offset,struct secure_partition_desc * sp,entry_point_info_t * ep_info,int32_t * boot_info_reg)1512 static int sp_manifest_parse(void *sp_manifest, int offset,
1513 struct secure_partition_desc *sp,
1514 entry_point_info_t *ep_info,
1515 int32_t *boot_info_reg)
1516 {
1517 int32_t ret, node;
1518 uint32_t config_32;
1519
1520 /*
1521 * Look for the mandatory fields that are expected to be present in
1522 * the SP manifests.
1523 */
1524 node = fdt_path_offset(sp_manifest, "/");
1525 if (node < 0) {
1526 ERROR("Did not find root node.\n");
1527 return node;
1528 }
1529
1530 ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
1531 ARRAY_SIZE(sp->uuid), sp->uuid);
1532 if (ret != 0) {
1533 ERROR("Missing Secure Partition UUID.\n");
1534 return ret;
1535 }
1536
1537 ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
1538 if (ret != 0) {
1539 ERROR("Missing SP Exception Level information.\n");
1540 return ret;
1541 }
1542
1543 sp->runtime_el = config_32;
1544
1545 ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
1546 if (ret != 0) {
1547 ERROR("Missing Secure Partition FF-A Version.\n");
1548 return ret;
1549 }
1550
1551 sp->ffa_version = config_32;
1552
1553 ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
1554 if (ret != 0) {
1555 ERROR("Missing Secure Partition Execution State.\n");
1556 return ret;
1557 }
1558
1559 sp->execution_state = config_32;
1560
1561 ret = fdt_read_uint32(sp_manifest, node,
1562 "messaging-method", &config_32);
1563 if (ret != 0) {
1564 ERROR("Missing Secure Partition messaging method.\n");
1565 return ret;
1566 }
1567
1568 /* Validate this entry, we currently only support direct messaging. */
1569 if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
1570 FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) {
1571 WARN("Invalid Secure Partition messaging method (0x%x)\n",
1572 config_32);
1573 return -EINVAL;
1574 }
1575
1576 sp->properties = config_32;
1577
1578 ret = fdt_read_uint32(sp_manifest, node,
1579 "execution-ctx-count", &config_32);
1580
1581 if (ret != 0) {
1582 ERROR("Missing SP Execution Context Count.\n");
1583 return ret;
1584 }
1585
1586 /*
1587 * Ensure this field is set correctly in the manifest however
1588 * since this is currently a hardcoded value for S-EL1 partitions
1589 * we don't need to save it here, just validate.
1590 */
1591 if (config_32 != PLATFORM_CORE_COUNT) {
1592 ERROR("SP Execution Context Count (%u) must be %u.\n",
1593 config_32, PLATFORM_CORE_COUNT);
1594 return -EINVAL;
1595 }
1596
1597 /*
1598 * Look for the optional fields that are expected to be present in
1599 * an SP manifest.
1600 */
1601 ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
1602 if (ret != 0) {
1603 WARN("Missing Secure Partition ID.\n");
1604 } else {
1605 if (!is_ffa_secure_id_valid(config_32)) {
1606 ERROR("Invalid Secure Partition ID (0x%x).\n",
1607 config_32);
1608 return -EINVAL;
1609 }
1610 sp->sp_id = config_32;
1611 }
1612
1613 ret = fdt_read_uint32(sp_manifest, node,
1614 "power-management-messages", &config_32);
1615 if (ret != 0) {
1616 WARN("Missing Power Management Messages entry.\n");
1617 } else {
1618 /*
1619 * Ensure only the currently supported power messages have
1620 * been requested.
1621 */
1622 if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
1623 FFA_PM_MSG_SUB_CPU_SUSPEND |
1624 FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
1625 ERROR("Requested unsupported PM messages (%x)\n",
1626 config_32);
1627 return -EINVAL;
1628 }
1629 sp->pwr_mgmt_msgs = config_32;
1630 }
1631
1632 ret = fdt_read_uint32(sp_manifest, node,
1633 "gp-register-num", &config_32);
1634 if (ret != 0) {
1635 WARN("Missing boot information register.\n");
1636 } else {
1637 /* Check if a register number between 0-3 is specified. */
1638 if (config_32 < 4) {
1639 *boot_info_reg = config_32;
1640 } else {
1641 WARN("Incorrect boot information register (%u).\n",
1642 config_32);
1643 }
1644 }
1645
1646 return 0;
1647 }
1648
1649 /*******************************************************************************
1650 * This function gets the Secure Partition Manifest base and maps the manifest
1651 * region.
1652 * Currently only one Secure Partition manifest is considered which is used to
1653 * prepare the context for the single Secure Partition.
1654 ******************************************************************************/
find_and_prepare_sp_context(void)1655 static int find_and_prepare_sp_context(void)
1656 {
1657 void *sp_manifest;
1658 uintptr_t manifest_base;
1659 uintptr_t manifest_base_align;
1660 entry_point_info_t *next_image_ep_info;
1661 int32_t ret, boot_info_reg = -1;
1662 struct secure_partition_desc *sp;
1663
1664 next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
1665 if (next_image_ep_info == NULL) {
1666 WARN("No Secure Partition image provided by BL2.\n");
1667 return -ENOENT;
1668 }
1669
1670 sp_manifest = (void *)next_image_ep_info->args.arg0;
1671 if (sp_manifest == NULL) {
1672 WARN("Secure Partition manifest absent.\n");
1673 return -ENOENT;
1674 }
1675
1676 manifest_base = (uintptr_t)sp_manifest;
1677 manifest_base_align = page_align(manifest_base, DOWN);
1678
1679 /*
1680 * Map the secure partition manifest region in the EL3 translation
1681 * regime.
1682 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
1683 * alignment the region of 1 PAGE_SIZE from manifest align base may
1684 * not completely accommodate the secure partition manifest region.
1685 */
1686 ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
1687 manifest_base_align,
1688 PAGE_SIZE * 2,
1689 MT_RO_DATA);
1690 if (ret != 0) {
1691 ERROR("Error while mapping SP manifest (%d).\n", ret);
1692 return ret;
1693 }
1694
1695 ret = fdt_node_offset_by_compatible(sp_manifest, -1,
1696 "arm,ffa-manifest-1.0");
1697 if (ret < 0) {
1698 ERROR("Error happened in SP manifest reading.\n");
1699 return -EINVAL;
1700 }
1701
1702 /*
1703 * Store the size of the manifest so that it can be used later to pass
1704 * the manifest as boot information later.
1705 */
1706 next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
1707 INFO("Manifest size = %lu bytes.\n", next_image_ep_info->args.arg1);
1708
1709 /*
1710 * Select an SP descriptor for initialising the partition's execution
1711 * context on the primary CPU.
1712 */
1713 sp = spmc_get_current_sp_ctx();
1714
1715 /* Initialize entry point information for the SP */
1716 SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
1717 SECURE | EP_ST_ENABLE);
1718
1719 /* Parse the SP manifest. */
1720 ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
1721 &boot_info_reg);
1722 if (ret != 0) {
1723 ERROR("Error in Secure Partition manifest parsing.\n");
1724 return ret;
1725 }
1726
1727 /* Check that the runtime EL in the manifest was correct. */
1728 if (sp->runtime_el != S_EL1) {
1729 ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
1730 return -EINVAL;
1731 }
1732
1733 /* Perform any common initialisation. */
1734 spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
1735
1736 /* Perform any initialisation specific to S-EL1 SPs. */
1737 spmc_el1_sp_setup(sp, next_image_ep_info);
1738
1739 /* Initialize the SP context with the required ep info. */
1740 spmc_sp_common_ep_commit(sp, next_image_ep_info);
1741
1742 return 0;
1743 }
1744
1745 /*******************************************************************************
1746 * This function takes an SP context pointer and performs a synchronous entry
1747 * into it.
1748 ******************************************************************************/
logical_sp_init(void)1749 static int32_t logical_sp_init(void)
1750 {
1751 int32_t rc = 0;
1752 struct el3_lp_desc *el3_lp_descs;
1753
1754 /* Perform initial validation of the Logical Partitions. */
1755 rc = el3_sp_desc_validate();
1756 if (rc != 0) {
1757 ERROR("Logical Partition validation failed!\n");
1758 return rc;
1759 }
1760
1761 el3_lp_descs = get_el3_lp_array();
1762
1763 INFO("Logical Secure Partition init start.\n");
1764 for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
1765 rc = el3_lp_descs[i].init();
1766 if (rc != 0) {
1767 ERROR("Logical SP (0x%x) Failed to Initialize\n",
1768 el3_lp_descs[i].sp_id);
1769 return rc;
1770 }
1771 VERBOSE("Logical SP (0x%x) Initialized\n",
1772 el3_lp_descs[i].sp_id);
1773 }
1774
1775 INFO("Logical Secure Partition init completed.\n");
1776
1777 return rc;
1778 }
1779
spmc_sp_synchronous_entry(struct sp_exec_ctx * ec)1780 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
1781 {
1782 uint64_t rc;
1783
1784 assert(ec != NULL);
1785
1786 /* Assign the context of the SP to this CPU */
1787 cm_set_context(&(ec->cpu_ctx), SECURE);
1788
1789 /* Restore the context assigned above */
1790 cm_el1_sysregs_context_restore(SECURE);
1791 cm_set_next_eret_context(SECURE);
1792
1793 /* Invalidate TLBs at EL1. */
1794 tlbivmalle1();
1795 dsbish();
1796
1797 /* Enter Secure Partition */
1798 rc = spm_secure_partition_enter(&ec->c_rt_ctx);
1799
1800 /* Save secure state */
1801 cm_el1_sysregs_context_save(SECURE);
1802
1803 return rc;
1804 }
1805
1806 /*******************************************************************************
1807 * SPMC Helper Functions.
1808 ******************************************************************************/
sp_init(void)1809 static int32_t sp_init(void)
1810 {
1811 uint64_t rc;
1812 struct secure_partition_desc *sp;
1813 struct sp_exec_ctx *ec;
1814
1815 sp = spmc_get_current_sp_ctx();
1816 ec = spmc_get_sp_ec(sp);
1817 ec->rt_model = RT_MODEL_INIT;
1818 ec->rt_state = RT_STATE_RUNNING;
1819
1820 INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
1821
1822 rc = spmc_sp_synchronous_entry(ec);
1823 if (rc != 0) {
1824 /* Indicate SP init was not successful. */
1825 ERROR("SP (0x%x) failed to initialize (%lu).\n",
1826 sp->sp_id, rc);
1827 return 0;
1828 }
1829
1830 ec->rt_state = RT_STATE_WAITING;
1831 INFO("Secure Partition initialized.\n");
1832
1833 return 1;
1834 }
1835
initalize_sp_descs(void)1836 static void initalize_sp_descs(void)
1837 {
1838 struct secure_partition_desc *sp;
1839
1840 for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
1841 sp = &sp_desc[i];
1842 sp->sp_id = INV_SP_ID;
1843 sp->mailbox.rx_buffer = NULL;
1844 sp->mailbox.tx_buffer = NULL;
1845 sp->mailbox.state = MAILBOX_STATE_EMPTY;
1846 sp->secondary_ep = 0;
1847 }
1848 }
1849
initalize_ns_ep_descs(void)1850 static void initalize_ns_ep_descs(void)
1851 {
1852 struct ns_endpoint_desc *ns_ep;
1853
1854 for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
1855 ns_ep = &ns_ep_desc[i];
1856 /*
1857 * Clashes with the Hypervisor ID but will not be a
1858 * problem in practice.
1859 */
1860 ns_ep->ns_ep_id = 0;
1861 ns_ep->ffa_version = 0;
1862 ns_ep->mailbox.rx_buffer = NULL;
1863 ns_ep->mailbox.tx_buffer = NULL;
1864 ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
1865 }
1866 }
1867
1868 /*******************************************************************************
1869 * Initialize SPMC attributes for the SPMD.
1870 ******************************************************************************/
spmc_populate_attrs(spmc_manifest_attribute_t * spmc_attrs)1871 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
1872 {
1873 spmc_attrs->major_version = FFA_VERSION_MAJOR;
1874 spmc_attrs->minor_version = FFA_VERSION_MINOR;
1875 spmc_attrs->exec_state = MODE_RW_64;
1876 spmc_attrs->spmc_id = FFA_SPMC_ID;
1877 }
1878
1879 /*******************************************************************************
1880 * Initialize contexts of all Secure Partitions.
1881 ******************************************************************************/
spmc_setup(void)1882 int32_t spmc_setup(void)
1883 {
1884 int32_t ret;
1885 uint32_t flags;
1886
1887 /* Initialize endpoint descriptors */
1888 initalize_sp_descs();
1889 initalize_ns_ep_descs();
1890
1891 /*
1892 * Retrieve the information of the datastore for tracking shared memory
1893 * requests allocated by platform code and zero the region if available.
1894 */
1895 ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
1896 &spmc_shmem_obj_state.data_size);
1897 if (ret != 0) {
1898 ERROR("Failed to obtain memory descriptor backing store!\n");
1899 return ret;
1900 }
1901 memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
1902
1903 /* Setup logical SPs. */
1904 ret = logical_sp_init();
1905 if (ret != 0) {
1906 ERROR("Failed to initialize Logical Partitions.\n");
1907 return ret;
1908 }
1909
1910 /* Perform physical SP setup. */
1911
1912 /* Disable MMU at EL1 (initialized by BL2) */
1913 disable_mmu_icache_el1();
1914
1915 /* Initialize context of the SP */
1916 INFO("Secure Partition context setup start.\n");
1917
1918 ret = find_and_prepare_sp_context();
1919 if (ret != 0) {
1920 ERROR("Error in SP finding and context preparation.\n");
1921 return ret;
1922 }
1923
1924 /* Register power management hooks with PSCI */
1925 psci_register_spd_pm_hook(&spmc_pm);
1926
1927 /*
1928 * Register an interrupt handler for S-EL1 interrupts
1929 * when generated during code executing in the
1930 * non-secure state.
1931 */
1932 flags = 0;
1933 set_interrupt_rm_flag(flags, NON_SECURE);
1934 ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
1935 spmc_sp_interrupt_handler,
1936 flags);
1937 if (ret != 0) {
1938 ERROR("Failed to register interrupt handler! (%d)\n", ret);
1939 panic();
1940 }
1941
1942 /* Register init function for deferred init. */
1943 bl31_register_bl32_init(&sp_init);
1944
1945 INFO("Secure Partition setup done.\n");
1946
1947 return 0;
1948 }
1949
1950 /*******************************************************************************
1951 * Secure Partition Manager SMC handler.
1952 ******************************************************************************/
spmc_smc_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1953 uint64_t spmc_smc_handler(uint32_t smc_fid,
1954 bool secure_origin,
1955 uint64_t x1,
1956 uint64_t x2,
1957 uint64_t x3,
1958 uint64_t x4,
1959 void *cookie,
1960 void *handle,
1961 uint64_t flags)
1962 {
1963 switch (smc_fid) {
1964
1965 case FFA_VERSION:
1966 return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
1967 x4, cookie, handle, flags);
1968
1969 case FFA_SPM_ID_GET:
1970 return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
1971 x3, x4, cookie, handle, flags);
1972
1973 case FFA_ID_GET:
1974 return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
1975 x4, cookie, handle, flags);
1976
1977 case FFA_FEATURES:
1978 return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
1979 x4, cookie, handle, flags);
1980
1981 case FFA_SECONDARY_EP_REGISTER_SMC64:
1982 return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
1983 x2, x3, x4, cookie, handle,
1984 flags);
1985
1986 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1987 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1988 return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
1989 x3, x4, cookie, handle, flags);
1990
1991 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1992 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1993 return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
1994 x3, x4, cookie, handle, flags);
1995
1996 case FFA_RXTX_MAP_SMC32:
1997 case FFA_RXTX_MAP_SMC64:
1998 return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1999 cookie, handle, flags);
2000
2001 case FFA_RXTX_UNMAP:
2002 return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
2003 x4, cookie, handle, flags);
2004
2005 case FFA_PARTITION_INFO_GET:
2006 return partition_info_get_handler(smc_fid, secure_origin, x1,
2007 x2, x3, x4, cookie, handle,
2008 flags);
2009
2010 case FFA_RX_RELEASE:
2011 return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
2012 x4, cookie, handle, flags);
2013
2014 case FFA_MSG_WAIT:
2015 return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2016 cookie, handle, flags);
2017
2018 case FFA_ERROR:
2019 return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2020 cookie, handle, flags);
2021
2022 case FFA_MSG_RUN:
2023 return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2024 cookie, handle, flags);
2025
2026 case FFA_MEM_SHARE_SMC32:
2027 case FFA_MEM_SHARE_SMC64:
2028 case FFA_MEM_LEND_SMC32:
2029 case FFA_MEM_LEND_SMC64:
2030 return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
2031 cookie, handle, flags);
2032
2033 case FFA_MEM_FRAG_TX:
2034 return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
2035 x4, cookie, handle, flags);
2036
2037 case FFA_MEM_FRAG_RX:
2038 return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
2039 x4, cookie, handle, flags);
2040
2041 case FFA_MEM_RETRIEVE_REQ_SMC32:
2042 case FFA_MEM_RETRIEVE_REQ_SMC64:
2043 return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
2044 x3, x4, cookie, handle, flags);
2045
2046 case FFA_MEM_RELINQUISH:
2047 return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
2048 x3, x4, cookie, handle, flags);
2049
2050 case FFA_MEM_RECLAIM:
2051 return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
2052 x4, cookie, handle, flags);
2053
2054 default:
2055 WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
2056 break;
2057 }
2058 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
2059 }
2060
2061 /*******************************************************************************
2062 * This function is the handler registered for S-EL1 interrupts by the SPMC. It
2063 * validates the interrupt and upon success arranges entry into the SP for
2064 * handling the interrupt.
2065 ******************************************************************************/
spmc_sp_interrupt_handler(uint32_t id,uint32_t flags,void * handle,void * cookie)2066 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
2067 uint32_t flags,
2068 void *handle,
2069 void *cookie)
2070 {
2071 struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
2072 struct sp_exec_ctx *ec;
2073 uint32_t linear_id = plat_my_core_pos();
2074
2075 /* Sanity check for a NULL pointer dereference. */
2076 assert(sp != NULL);
2077
2078 /* Check the security state when the exception was generated. */
2079 assert(get_interrupt_src_ss(flags) == NON_SECURE);
2080
2081 /* Panic if not an S-EL1 Partition. */
2082 if (sp->runtime_el != S_EL1) {
2083 ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
2084 linear_id);
2085 panic();
2086 }
2087
2088 /* Obtain a reference to the SP execution context. */
2089 ec = spmc_get_sp_ec(sp);
2090
2091 /* Ensure that the execution context is in waiting state else panic. */
2092 if (ec->rt_state != RT_STATE_WAITING) {
2093 ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
2094 linear_id, RT_STATE_WAITING, ec->rt_state);
2095 panic();
2096 }
2097
2098 /* Update the runtime model and state of the partition. */
2099 ec->rt_model = RT_MODEL_INTR;
2100 ec->rt_state = RT_STATE_RUNNING;
2101
2102 VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
2103
2104 /*
2105 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
2106 * populated as the SP can determine this by itself.
2107 */
2108 return spmd_smc_switch_state(FFA_INTERRUPT, false,
2109 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2110 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2111 handle);
2112 }
2113