1 /*
2 * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
3 * Copyright (c) 2021-2024 Cypress Semiconductor Corporation (an Infineon company)
4 * or an affiliate of Cypress Semiconductor Corporation. All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 *
8 */
9
10 #include <stdbool.h>
11 #include <stdint.h>
12
13 #include "cmsis_compiler.h"
14
15 #include "async.h"
16 #include "config_impl.h"
17 #include "internal_status_code.h"
18 #include "psa/error.h"
19 #include "utilities.h"
20 #include "private/assert.h"
21 #include "tfm_arch.h"
22 #include "thread.h"
23 #include "tfm_psa_call_pack.h"
24 #include "tfm_spe_mailbox.h"
25 #include "tfm_rpc.h"
26 #include "tfm_hal_multi_core.h"
27 #include "tfm_multi_core.h"
28 #include "ffm/mailbox_agent_api.h"
29
30 static struct secure_mailbox_queue_t spe_mailbox_queue;
31
32 /*
33 * Local copies of invecs and outvecs associated with each mailbox message
34 * while it is being processed.
35 */
36 struct vectors {
37 psa_invec in_vec[PSA_MAX_IOVEC];
38 psa_outvec out_vec[PSA_MAX_IOVEC];
39 psa_outvec *original_out_vec;
40 size_t out_len;
41 bool in_use;
42 };
43 static struct vectors vectors[NUM_MAILBOX_QUEUE_SLOT] = {0};
44
45
set_spe_queue_empty_status(uint8_t idx)46 __STATIC_INLINE void set_spe_queue_empty_status(uint8_t idx)
47 {
48 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
49 spe_mailbox_queue.empty_slots |= (1 << idx);
50 }
51 }
52
clear_spe_queue_empty_status(uint8_t idx)53 __STATIC_INLINE void clear_spe_queue_empty_status(uint8_t idx)
54 {
55 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
56 spe_mailbox_queue.empty_slots &= ~(1 << idx);
57 }
58 }
59
get_spe_queue_empty_status(uint8_t idx)60 __STATIC_INLINE bool get_spe_queue_empty_status(uint8_t idx)
61 {
62 if ((idx < NUM_MAILBOX_QUEUE_SLOT) &&
63 (spe_mailbox_queue.empty_slots & (1 << idx))) {
64 return true;
65 }
66
67 return false;
68 }
69
get_nspe_queue_pend_status(const struct mailbox_status_t * ns_status)70 __STATIC_INLINE mailbox_queue_status_t get_nspe_queue_pend_status(
71 const struct mailbox_status_t *ns_status)
72 {
73 return ns_status->pend_slots;
74 }
75
set_nspe_queue_replied_status(struct mailbox_status_t * ns_status,mailbox_queue_status_t mask)76 __STATIC_INLINE void set_nspe_queue_replied_status(
77 struct mailbox_status_t *ns_status,
78 mailbox_queue_status_t mask)
79 {
80 ns_status->replied_slots |= mask;
81 }
82
clear_nspe_queue_pend_status(struct mailbox_status_t * ns_status,mailbox_queue_status_t mask)83 __STATIC_INLINE void clear_nspe_queue_pend_status(
84 struct mailbox_status_t *ns_status,
85 mailbox_queue_status_t mask)
86 {
87 ns_status->pend_slots &= ~mask;
88 }
89
get_spe_mailbox_msg_handle(uint8_t idx,mailbox_msg_handle_t * handle)90 __STATIC_INLINE int32_t get_spe_mailbox_msg_handle(uint8_t idx,
91 mailbox_msg_handle_t *handle)
92 {
93 if ((idx >= NUM_MAILBOX_QUEUE_SLOT) || !handle) {
94 return MAILBOX_INVAL_PARAMS;
95 }
96
97 *handle = (mailbox_msg_handle_t)(idx + 1);
98
99 return MAILBOX_SUCCESS;
100 }
101
get_spe_mailbox_msg_idx(mailbox_msg_handle_t handle,uint8_t * idx)102 __STATIC_INLINE int32_t get_spe_mailbox_msg_idx(mailbox_msg_handle_t handle,
103 uint8_t *idx)
104 {
105 if ((handle == MAILBOX_MSG_NULL_HANDLE) || !idx) {
106 return MAILBOX_INVAL_PARAMS;
107 }
108
109 *idx = (uint8_t)(handle - 1);
110
111 return MAILBOX_SUCCESS;
112 }
113
mailbox_clean_queue_slot(uint8_t idx)114 static void mailbox_clean_queue_slot(uint8_t idx)
115 {
116 if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
117 return;
118 }
119
120 spm_memset(&spe_mailbox_queue.queue[idx], 0,
121 sizeof(spe_mailbox_queue.queue[idx]));
122 set_spe_queue_empty_status(idx);
123 }
124
get_nspe_reply_addr(uint8_t idx)125 __STATIC_INLINE struct mailbox_reply_t *get_nspe_reply_addr(uint8_t idx)
126 {
127 uint8_t ns_slot_idx;
128
129 if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
130 psa_panic();
131 }
132
133 ns_slot_idx = spe_mailbox_queue.queue[idx].ns_slot_idx;
134 if ((ns_slot_idx >= NUM_MAILBOX_QUEUE_SLOT) || (ns_slot_idx >= spe_mailbox_queue.ns_slot_count)) {
135 psa_panic();
136 }
137
138 return &spe_mailbox_queue.ns_slots[ns_slot_idx].reply;
139 }
140
mailbox_direct_reply(uint8_t idx,uint32_t result)141 static void mailbox_direct_reply(uint8_t idx, uint32_t result)
142 {
143 struct mailbox_reply_t *reply_ptr;
144 uint32_t ret_result = result;
145
146 /* Copy outvec lengths back if necessary */
147 if ((vectors[idx].in_use) && (result == PSA_SUCCESS)) {
148 for (int i = 0; i < vectors[idx].out_len; i++) {
149 vectors[idx].original_out_vec[i].len = vectors[idx].out_vec[i].len;
150 }
151 }
152
153 vectors[idx].in_use = false;
154
155 /* Get reply address */
156 reply_ptr = get_nspe_reply_addr(idx);
157 spm_memcpy(&reply_ptr->return_val, &ret_result,
158 sizeof(reply_ptr->return_val));
159
160 mailbox_clean_queue_slot(idx);
161
162 /*
163 * Skip NSPE queue status update after single reply.
164 * Update NSPE queue status after all the mailbox messages are completed
165 */
166 }
167
check_mailbox_msg(const struct mailbox_msg_t * msg)168 __STATIC_INLINE int32_t check_mailbox_msg(const struct mailbox_msg_t *msg)
169 {
170 /*
171 * TODO
172 * Comprehensive check of mailbox msessage content can be implemented here.
173 */
174 (void)msg;
175 return MAILBOX_SUCCESS;
176 }
177
local_copy_vects(const struct psa_client_params_t * params,uint32_t idx,uint32_t * control)178 static int local_copy_vects(const struct psa_client_params_t *params,
179 uint32_t idx,
180 uint32_t *control)
181 {
182 size_t in_len, out_len;
183
184 in_len = params->psa_call_params.in_len;
185 out_len = params->psa_call_params.out_len;
186
187 if (((params->psa_call_params.out_vec == NULL) && (out_len != 0)) ||
188 ((params->psa_call_params.in_vec == NULL) && (in_len != 0))) {
189 return MAILBOX_INVAL_PARAMS;
190 }
191
192 if ((in_len > PSA_MAX_IOVEC) ||
193 (out_len > PSA_MAX_IOVEC) ||
194 ((in_len + out_len) > PSA_MAX_IOVEC)) {
195 return MAILBOX_INVAL_PARAMS;
196 }
197
198 for (unsigned int i = 0; i < PSA_MAX_IOVEC; i++) {
199 if (i < in_len) {
200 vectors[idx].in_vec[i] = params->psa_call_params.in_vec[i];
201 } else {
202 vectors[idx].in_vec[i].base = 0;
203 vectors[idx].in_vec[i].len = 0;
204 }
205 }
206
207 for (unsigned int i = 0; i < PSA_MAX_IOVEC; i++) {
208 if (i < out_len) {
209 vectors[idx].out_vec[i] = params->psa_call_params.out_vec[i];
210 } else {
211 vectors[idx].out_vec[i].base = 0;
212 vectors[idx].out_vec[i].len = 0;
213 }
214 }
215
216 *control = PARAM_SET_NS_INVEC(*control);
217 *control = PARAM_SET_NS_OUTVEC(*control);
218
219 vectors[idx].out_len = out_len;
220 vectors[idx].original_out_vec = params->psa_call_params.out_vec;
221
222 vectors[idx].in_use = true;
223 return MAILBOX_SUCCESS;
224 }
225
226 /* Passes the request from the mailbox message into SPM.
227 * idx indicates the slot used to use for any immediate reply.
228 * If it queues the reply immediately, updates reply_slots accordingly.
229 */
tfm_mailbox_dispatch(const struct mailbox_msg_t * msg_ptr,uint8_t idx,mailbox_queue_status_t * reply_slots)230 static int32_t tfm_mailbox_dispatch(const struct mailbox_msg_t *msg_ptr,
231 uint8_t idx,
232 mailbox_queue_status_t *reply_slots)
233 {
234 const struct psa_client_params_t *params = &msg_ptr->params;
235 struct client_params_t client_params = {0};
236 uint32_t control = PARAM_PACK(params->psa_call_params.type,
237 params->psa_call_params.in_len,
238 params->psa_call_params.out_len);
239 int32_t client_id;
240 psa_status_t psa_ret = PSA_ERROR_GENERIC_ERROR;
241 mailbox_msg_handle_t *mb_msg_handle =
242 &spe_mailbox_queue.queue[idx].msg_handle;
243 int ret;
244
245 #if CONFIG_TFM_SPM_BACKEND_IPC == 1
246 /* Assume asynchronous. Set to synchronous when an error happens. */
247 bool sync = false;
248 #else
249 /* Assume synchronous. */
250 bool sync = true;
251 #endif
252
253 SPM_ASSERT(params != NULL);
254
255 switch (msg_ptr->call_type) {
256 case MAILBOX_PSA_FRAMEWORK_VERSION:
257 psa_ret = tfm_rpc_psa_framework_version();
258 sync = true;
259 break;
260
261 case MAILBOX_PSA_VERSION:
262 psa_ret = tfm_rpc_psa_version(params->psa_version_params.sid);
263 sync = true;
264 break;
265
266 case MAILBOX_PSA_CALL:
267 ret = local_copy_vects(params, idx, &control);
268 if (ret != MAILBOX_SUCCESS) {
269 sync = true;
270 psa_ret = PSA_ERROR_INVALID_ARGUMENT;
271 break;
272 }
273
274 if (tfm_multi_core_hal_client_id_translate(CLIENT_ID_OWNER_MAGIC,
275 msg_ptr->client_id,
276 &client_id) != SPM_SUCCESS) {
277 sync = true;
278 psa_ret = PSA_ERROR_INVALID_ARGUMENT;
279 break;
280 }
281 client_params.ns_client_id_stateless = client_id;
282 client_params.p_invecs = vectors[idx].in_vec;
283 client_params.p_outvecs = vectors[idx].out_vec;
284 psa_ret = tfm_rpc_psa_call(params->psa_call_params.handle,
285 control, &client_params, mb_msg_handle);
286 if (psa_ret != PSA_SUCCESS) {
287 sync = true;
288 }
289 break;
290
291 /* Following cases are only needed by connection-based services */
292 #if CONFIG_TFM_CONNECTION_BASED_SERVICE_API == 1
293 case MAILBOX_PSA_CONNECT:
294 if (tfm_multi_core_hal_client_id_translate(CLIENT_ID_OWNER_MAGIC,
295 msg_ptr->client_id,
296 &client_id) != SPM_SUCCESS) {
297 sync = true;
298 psa_ret = PSA_ERROR_INVALID_ARGUMENT;
299 break;
300 }
301 psa_ret = tfm_rpc_psa_connect(params->psa_connect_params.sid,
302 params->psa_connect_params.version,
303 client_id,
304 mb_msg_handle);
305 if (psa_ret != PSA_SUCCESS) {
306 sync = true;
307 }
308 break;
309
310 case MAILBOX_PSA_CLOSE:
311 if (tfm_multi_core_hal_client_id_translate(CLIENT_ID_OWNER_MAGIC,
312 msg_ptr->client_id,
313 &client_id) != SPM_SUCCESS) {
314 sync = true;
315 psa_ret = PSA_ERROR_INVALID_ARGUMENT;
316 break;
317 }
318 psa_ret = tfm_rpc_psa_close(params->psa_close_params.handle, client_id);
319 if (psa_ret != PSA_SUCCESS) {
320 sync = true;
321 }
322 break;
323 #endif /* CONFIG_TFM_CONNECTION_BASED_SERVICE_API */
324
325 default:
326 return MAILBOX_INVAL_PARAMS;
327 }
328
329 /* Any synchronous result should be returned immediately */
330 if (sync) {
331 *reply_slots |= (1 << idx);
332 mailbox_direct_reply(idx, (uint32_t)psa_ret);
333 }
334
335 return MAILBOX_SUCCESS;
336 }
337
tfm_mailbox_handle_msg(void)338 int32_t tfm_mailbox_handle_msg(void)
339 {
340 uint8_t idx;
341 mailbox_queue_status_t mask_bits, pend_slots, reply_slots = 0;
342 struct mailbox_status_t *ns_status = spe_mailbox_queue.ns_status;
343 struct mailbox_msg_t *msg_ptr;
344
345 SPM_ASSERT(ns_status != NULL);
346
347 tfm_mailbox_hal_enter_critical();
348
349 pend_slots = get_nspe_queue_pend_status(ns_status);
350
351 tfm_mailbox_hal_exit_critical();
352
353 /* Check if NSPE mailbox did assert a PSA client call request */
354 if (!pend_slots) {
355 return MAILBOX_NO_PEND_EVENT;
356 }
357
358 for (idx = 0; idx < spe_mailbox_queue.ns_slot_count; idx++) {
359 mask_bits = (1 << idx);
360 /* Check if current NSPE mailbox queue slot is pending for handling */
361 if (!(pend_slots & mask_bits)) {
362 continue;
363 }
364
365 /*
366 * TODO
367 * The operations are simplified here. Use the SPE mailbox queue
368 * slot with the same idx as that of the NSPE mailbox queue slot.
369 * A more general implementation should dynamically search and
370 * select an empty SPE mailbox queue slot.
371 */
372 clear_spe_queue_empty_status(idx);
373 spe_mailbox_queue.queue[idx].ns_slot_idx = idx;
374
375 msg_ptr = &spe_mailbox_queue.queue[idx].msg;
376 spm_memcpy(msg_ptr, &spe_mailbox_queue.ns_slots[idx].msg, sizeof(*msg_ptr));
377
378 if (check_mailbox_msg(msg_ptr) != MAILBOX_SUCCESS) {
379 mailbox_clean_queue_slot(idx);
380 continue;
381 }
382
383 get_spe_mailbox_msg_handle(idx,
384 &spe_mailbox_queue.queue[idx].msg_handle);
385
386 if (tfm_mailbox_dispatch(msg_ptr, idx, &reply_slots) != MAILBOX_SUCCESS) {
387 mailbox_clean_queue_slot(idx);
388 continue;
389 }
390 }
391
392 tfm_mailbox_hal_enter_critical();
393
394 /* Clean the NSPE mailbox pending status. */
395 clear_nspe_queue_pend_status(ns_status, pend_slots);
396
397 /* Set the NSPE mailbox replied status */
398 set_nspe_queue_replied_status(ns_status, reply_slots);
399
400 tfm_mailbox_hal_exit_critical();
401
402 if (reply_slots) {
403 tfm_mailbox_hal_notify_peer();
404 }
405
406 return MAILBOX_SUCCESS;
407 }
408
tfm_mailbox_reply_msg(mailbox_msg_handle_t handle,int32_t reply)409 int32_t tfm_mailbox_reply_msg(mailbox_msg_handle_t handle, int32_t reply)
410 {
411 uint8_t idx;
412 int32_t ret;
413 struct mailbox_status_t *ns_status = spe_mailbox_queue.ns_status;
414
415 SPM_ASSERT(ns_status != NULL);
416
417 /*
418 * If handle == MAILBOX_MSG_NULL_HANDLE, reply to the mailbox message
419 * in the first slot.
420 * When multiple ongoing PSA client calls from NSPE are supported,
421 * additional check might be necessary to avoid spoofing the first slot.
422 */
423 if (handle == MAILBOX_MSG_NULL_HANDLE) {
424 idx = 0;
425 } else {
426 ret = get_spe_mailbox_msg_idx(handle, &idx);
427 if (ret != MAILBOX_SUCCESS) {
428 return ret;
429 }
430 }
431
432 if (get_spe_queue_empty_status(idx)) {
433 return MAILBOX_NO_PEND_EVENT;
434 }
435
436 mailbox_direct_reply(idx, (uint32_t)reply);
437
438 tfm_mailbox_hal_enter_critical();
439
440 /* Set the NSPE mailbox replied status */
441 set_nspe_queue_replied_status(ns_status, (1 << idx));
442
443 tfm_mailbox_hal_exit_critical();
444
445 tfm_mailbox_hal_notify_peer();
446
447 return MAILBOX_SUCCESS;
448 }
449
450 /* RPC handle_req() callback */
mailbox_handle_req(void)451 static void mailbox_handle_req(void)
452 {
453 (void)tfm_mailbox_handle_msg();
454 }
455
456 /* RPC reply() callback */
mailbox_reply(const void * owner,int32_t ret)457 static void mailbox_reply(const void *owner, int32_t ret)
458 {
459 mailbox_msg_handle_t handle = MAILBOX_MSG_NULL_HANDLE;
460
461 /* If the owner is specified */
462 if (owner) {
463 handle = *((mailbox_msg_handle_t *)owner);
464 }
465
466 (void)tfm_mailbox_reply_msg(handle, ret);
467 }
468
469 /* Mailbox specific operations callback for TF-M RPC */
470 static const struct tfm_rpc_ops_t mailbox_rpc_ops = {
471 .handle_req = mailbox_handle_req,
472 .reply = mailbox_reply,
473 };
474
tfm_mailbox_init(void)475 static int32_t tfm_mailbox_init(void)
476 {
477 int32_t ret;
478
479 spm_memset(&spe_mailbox_queue, 0, sizeof(spe_mailbox_queue));
480
481 spe_mailbox_queue.empty_slots =
482 (mailbox_queue_status_t)((1UL << (NUM_MAILBOX_QUEUE_SLOT - 1)) - 1);
483 spe_mailbox_queue.empty_slots +=
484 (mailbox_queue_status_t)(1UL << (NUM_MAILBOX_QUEUE_SLOT - 1));
485
486 /* Register RPC callbacks */
487 ret = tfm_rpc_register_ops(&mailbox_rpc_ops);
488 if (ret != TFM_RPC_SUCCESS) {
489 return MAILBOX_CALLBACK_REG_ERROR;
490 }
491
492 /*
493 * Platform specific initialization.
494 * Initialize Inter-Processor Communication and achieve the base address of
495 * NSPE mailbox queue
496 */
497 ret = tfm_mailbox_hal_init(&spe_mailbox_queue);
498 if (ret != MAILBOX_SUCCESS) {
499 tfm_rpc_unregister_ops();
500
501 return ret;
502 }
503
504 return MAILBOX_SUCCESS;
505 }
506
tfm_inter_core_comm_init(void)507 int32_t tfm_inter_core_comm_init(void)
508 {
509 return tfm_mailbox_init();
510 }
511