1 /*
2  * Copyright (c) 2019-2024, Arm Limited. All rights reserved.
3  * Copyright (c) 2021-2024 Cypress Semiconductor Corporation (an Infineon company)
4  * or an affiliate of Cypress Semiconductor Corporation. All rights reserved.
5  *
6  * SPDX-License-Identifier: BSD-3-Clause
7  *
8  */
9 
10 #include <stdbool.h>
11 #include <stdint.h>
12 
13 #include "cmsis_compiler.h"
14 
15 #include "async.h"
16 #include "config_impl.h"
17 #include "internal_status_code.h"
18 #include "psa/error.h"
19 #include "utilities.h"
20 #include "private/assert.h"
21 #include "tfm_arch.h"
22 #include "thread.h"
23 #include "tfm_psa_call_pack.h"
24 #include "tfm_spe_mailbox.h"
25 #include "tfm_rpc.h"
26 #include "tfm_hal_multi_core.h"
27 #include "tfm_multi_core.h"
28 #include "ffm/mailbox_agent_api.h"
29 
30 static struct secure_mailbox_queue_t spe_mailbox_queue;
31 
32 /*
33  * Local copies of invecs and outvecs associated with each mailbox message
34  * while it is being processed.
35  */
36 struct vectors {
37     psa_invec in_vec[PSA_MAX_IOVEC];
38     psa_outvec out_vec[PSA_MAX_IOVEC];
39     psa_outvec *original_out_vec;
40     size_t out_len;
41     bool in_use;
42 };
43 static struct vectors vectors[NUM_MAILBOX_QUEUE_SLOT] = {0};
44 
45 
set_spe_queue_empty_status(uint8_t idx)46 __STATIC_INLINE void set_spe_queue_empty_status(uint8_t idx)
47 {
48     if (idx < NUM_MAILBOX_QUEUE_SLOT) {
49         spe_mailbox_queue.empty_slots |= (1 << idx);
50     }
51 }
52 
clear_spe_queue_empty_status(uint8_t idx)53 __STATIC_INLINE void clear_spe_queue_empty_status(uint8_t idx)
54 {
55     if (idx < NUM_MAILBOX_QUEUE_SLOT) {
56         spe_mailbox_queue.empty_slots &= ~(1 << idx);
57     }
58 }
59 
get_spe_queue_empty_status(uint8_t idx)60 __STATIC_INLINE bool get_spe_queue_empty_status(uint8_t idx)
61 {
62     if ((idx < NUM_MAILBOX_QUEUE_SLOT) &&
63         (spe_mailbox_queue.empty_slots & (1 << idx))) {
64         return true;
65     }
66 
67     return false;
68 }
69 
get_nspe_queue_pend_status(const struct mailbox_status_t * ns_status)70 __STATIC_INLINE mailbox_queue_status_t get_nspe_queue_pend_status(
71                                     const struct mailbox_status_t *ns_status)
72 {
73     return ns_status->pend_slots;
74 }
75 
set_nspe_queue_replied_status(struct mailbox_status_t * ns_status,mailbox_queue_status_t mask)76 __STATIC_INLINE void set_nspe_queue_replied_status(
77                                             struct mailbox_status_t *ns_status,
78                                             mailbox_queue_status_t mask)
79 {
80     ns_status->replied_slots |= mask;
81 }
82 
clear_nspe_queue_pend_status(struct mailbox_status_t * ns_status,mailbox_queue_status_t mask)83 __STATIC_INLINE void clear_nspe_queue_pend_status(
84                                             struct mailbox_status_t *ns_status,
85                                             mailbox_queue_status_t mask)
86 {
87     ns_status->pend_slots &= ~mask;
88 }
89 
get_spe_mailbox_msg_handle(uint8_t idx,mailbox_msg_handle_t * handle)90 __STATIC_INLINE int32_t get_spe_mailbox_msg_handle(uint8_t idx,
91                                                    mailbox_msg_handle_t *handle)
92 {
93     if ((idx >= NUM_MAILBOX_QUEUE_SLOT) || !handle) {
94         return MAILBOX_INVAL_PARAMS;
95     }
96 
97     *handle = (mailbox_msg_handle_t)(idx + 1);
98 
99     return MAILBOX_SUCCESS;
100 }
101 
get_spe_mailbox_msg_idx(mailbox_msg_handle_t handle,uint8_t * idx)102 __STATIC_INLINE int32_t get_spe_mailbox_msg_idx(mailbox_msg_handle_t handle,
103                                                 uint8_t *idx)
104 {
105     if ((handle == MAILBOX_MSG_NULL_HANDLE) || !idx) {
106         return MAILBOX_INVAL_PARAMS;
107     }
108 
109     *idx = (uint8_t)(handle - 1);
110 
111     return MAILBOX_SUCCESS;
112 }
113 
mailbox_clean_queue_slot(uint8_t idx)114 static void mailbox_clean_queue_slot(uint8_t idx)
115 {
116     if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
117         return;
118     }
119 
120     spm_memset(&spe_mailbox_queue.queue[idx], 0,
121                          sizeof(spe_mailbox_queue.queue[idx]));
122     set_spe_queue_empty_status(idx);
123 }
124 
get_nspe_reply_addr(uint8_t idx)125 __STATIC_INLINE struct mailbox_reply_t *get_nspe_reply_addr(uint8_t idx)
126 {
127     uint8_t ns_slot_idx;
128 
129     if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
130         psa_panic();
131     }
132 
133     ns_slot_idx = spe_mailbox_queue.queue[idx].ns_slot_idx;
134     if ((ns_slot_idx >= NUM_MAILBOX_QUEUE_SLOT) || (ns_slot_idx >= spe_mailbox_queue.ns_slot_count)) {
135         psa_panic();
136     }
137 
138     return &spe_mailbox_queue.ns_slots[ns_slot_idx].reply;
139 }
140 
mailbox_direct_reply(uint8_t idx,uint32_t result)141 static void mailbox_direct_reply(uint8_t idx, uint32_t result)
142 {
143     struct mailbox_reply_t *reply_ptr;
144     uint32_t ret_result = result;
145 
146     /* Copy outvec lengths back if necessary */
147     if (vectors[idx].in_use) {
148         for (int i = 0; i < vectors[idx].out_len; i++) {
149             vectors[idx].original_out_vec[i].len = vectors[idx].out_vec[i].len;
150         }
151         vectors[idx].in_use = false;
152     }
153 
154     /* Get reply address */
155     reply_ptr = get_nspe_reply_addr(idx);
156     spm_memcpy(&reply_ptr->return_val, &ret_result,
157                sizeof(reply_ptr->return_val));
158 
159     mailbox_clean_queue_slot(idx);
160 
161     /*
162      * Skip NSPE queue status update after single reply.
163      * Update NSPE queue status after all the mailbox messages are completed
164      */
165 }
166 
check_mailbox_msg(const struct mailbox_msg_t * msg)167 __STATIC_INLINE int32_t check_mailbox_msg(const struct mailbox_msg_t *msg)
168 {
169     /*
170      * TODO
171      * Comprehensive check of mailbox msessage content can be implemented here.
172      */
173     (void)msg;
174     return MAILBOX_SUCCESS;
175 }
176 
177 /* Passes the request from the mailbox message into SPM.
178  * idx indicates the slot used to use for any immediate reply.
179  * If it queues the reply immediately, updates reply_slots accordingly.
180  */
tfm_mailbox_dispatch(const struct mailbox_msg_t * msg_ptr,uint8_t idx,mailbox_queue_status_t * reply_slots)181 static int32_t tfm_mailbox_dispatch(const struct mailbox_msg_t *msg_ptr,
182                                     uint8_t idx,
183                                     mailbox_queue_status_t *reply_slots)
184 {
185     const struct psa_client_params_t *params = &msg_ptr->params;
186     struct client_params_t client_params = {0};
187     uint32_t control = PARAM_PACK(params->psa_call_params.type,
188                                   params->psa_call_params.in_len,
189                                   params->psa_call_params.out_len);
190     int32_t client_id;
191     psa_status_t psa_ret = PSA_ERROR_GENERIC_ERROR;
192     mailbox_msg_handle_t *mb_msg_handle =
193         &spe_mailbox_queue.queue[idx].msg_handle;
194 
195 #if CONFIG_TFM_SPM_BACKEND_IPC == 1
196     /* Assume asynchronous. Set to synchronous when an error happens. */
197     bool sync = false;
198 #else
199     /* Assume synchronous. */
200     bool sync = true;
201 #endif
202 
203     SPM_ASSERT(params != NULL);
204 
205     switch (msg_ptr->call_type) {
206     case MAILBOX_PSA_FRAMEWORK_VERSION:
207         psa_ret = tfm_rpc_psa_framework_version();
208         sync = true;
209         break;
210 
211     case MAILBOX_PSA_VERSION:
212         psa_ret = tfm_rpc_psa_version(params->psa_version_params.sid);
213         sync = true;
214         break;
215 
216     case MAILBOX_PSA_CALL:
217         /* TODO check vector validity before use */
218         /* Make local copy of invecs and outvecs */
219         vectors[idx].in_use = true;
220         vectors[idx].out_len = params->psa_call_params.out_len;
221         vectors[idx].original_out_vec = params->psa_call_params.out_vec;
222         for (int i = 0; i < PSA_MAX_IOVEC; i++) {
223             if (i < params->psa_call_params.in_len) {
224                 vectors[idx].in_vec[i] = params->psa_call_params.in_vec[i];
225             } else {
226                 vectors[idx].in_vec[i].base = 0;
227                 vectors[idx].in_vec[i].len = 0;
228             }
229         }
230 
231         control = PARAM_SET_NS_INVEC(control);
232 
233         for (int i = 0; i < PSA_MAX_IOVEC; i++) {
234             if (i < params->psa_call_params.out_len) {
235                 vectors[idx].out_vec[i] = params->psa_call_params.out_vec[i];
236             } else {
237                 vectors[idx].out_vec[i].base = 0;
238                 vectors[idx].out_vec[i].len = 0;
239             }
240         }
241 
242         control = PARAM_SET_NS_OUTVEC(control);
243 
244         if (tfm_multi_core_hal_client_id_translate(CLIENT_ID_OWNER_MAGIC,
245                                                    msg_ptr->client_id,
246                                                    &client_id) != SPM_SUCCESS) {
247             sync = true;
248             psa_ret = PSA_ERROR_INVALID_ARGUMENT;
249             break;
250         }
251         client_params.ns_client_id_stateless = client_id;
252         client_params.p_invecs = vectors[idx].in_vec;
253         client_params.p_outvecs = vectors[idx].out_vec;
254         psa_ret = tfm_rpc_psa_call(params->psa_call_params.handle,
255                                    control, &client_params, mb_msg_handle);
256         if (psa_ret != PSA_SUCCESS) {
257             sync = true;
258         }
259         break;
260 
261 /* Following cases are only needed by connection-based services */
262 #if CONFIG_TFM_CONNECTION_BASED_SERVICE_API == 1
263     case MAILBOX_PSA_CONNECT:
264         if (tfm_multi_core_hal_client_id_translate(CLIENT_ID_OWNER_MAGIC,
265                                                    msg_ptr->client_id,
266                                                    &client_id) != SPM_SUCCESS) {
267             sync = true;
268             psa_ret = PSA_ERROR_INVALID_ARGUMENT;
269             break;
270         }
271         psa_ret = tfm_rpc_psa_connect(params->psa_connect_params.sid,
272                                       params->psa_connect_params.version,
273                                       client_id,
274                                       mb_msg_handle);
275         if (psa_ret != PSA_SUCCESS) {
276             sync = true;
277         }
278         break;
279 
280     case MAILBOX_PSA_CLOSE:
281         if (tfm_multi_core_hal_client_id_translate(CLIENT_ID_OWNER_MAGIC,
282                                                    msg_ptr->client_id,
283                                                    &client_id) != SPM_SUCCESS) {
284             sync = true;
285             psa_ret = PSA_ERROR_INVALID_ARGUMENT;
286             break;
287         }
288         psa_ret = tfm_rpc_psa_close(params->psa_close_params.handle, client_id);
289         if (psa_ret != PSA_SUCCESS) {
290             sync = true;
291         }
292         break;
293 #endif /* CONFIG_TFM_CONNECTION_BASED_SERVICE_API */
294 
295     default:
296         return MAILBOX_INVAL_PARAMS;
297     }
298 
299     /* Any synchronous result should be returned immediately */
300     if (sync) {
301         *reply_slots |= (1 << idx);
302         mailbox_direct_reply(idx, (uint32_t)psa_ret);
303     }
304 
305     return MAILBOX_SUCCESS;
306 }
307 
tfm_mailbox_handle_msg(void)308 int32_t tfm_mailbox_handle_msg(void)
309 {
310     uint8_t idx;
311     mailbox_queue_status_t mask_bits, pend_slots, reply_slots = 0;
312     struct mailbox_status_t *ns_status = spe_mailbox_queue.ns_status;
313     struct mailbox_msg_t *msg_ptr;
314 
315     SPM_ASSERT(ns_status != NULL);
316 
317     tfm_mailbox_hal_enter_critical();
318 
319     pend_slots = get_nspe_queue_pend_status(ns_status);
320 
321     tfm_mailbox_hal_exit_critical();
322 
323     /* Check if NSPE mailbox did assert a PSA client call request */
324     if (!pend_slots) {
325         return MAILBOX_NO_PEND_EVENT;
326     }
327 
328     for (idx = 0; idx < spe_mailbox_queue.ns_slot_count; idx++) {
329         mask_bits = (1 << idx);
330         /* Check if current NSPE mailbox queue slot is pending for handling */
331         if (!(pend_slots & mask_bits)) {
332             continue;
333         }
334 
335         /*
336          * TODO
337          * The operations are simplified here. Use the SPE mailbox queue
338          * slot with the same idx as that of the NSPE mailbox queue slot.
339          * A more general implementation should dynamically search and
340          * select an empty SPE mailbox queue slot.
341          */
342         clear_spe_queue_empty_status(idx);
343         spe_mailbox_queue.queue[idx].ns_slot_idx = idx;
344 
345         msg_ptr = &spe_mailbox_queue.queue[idx].msg;
346         spm_memcpy(msg_ptr, &spe_mailbox_queue.ns_slots[idx].msg, sizeof(*msg_ptr));
347 
348         if (check_mailbox_msg(msg_ptr) != MAILBOX_SUCCESS) {
349             mailbox_clean_queue_slot(idx);
350             continue;
351         }
352 
353         get_spe_mailbox_msg_handle(idx,
354                                    &spe_mailbox_queue.queue[idx].msg_handle);
355 
356         if (tfm_mailbox_dispatch(msg_ptr, idx, &reply_slots) != MAILBOX_SUCCESS) {
357             mailbox_clean_queue_slot(idx);
358             continue;
359         }
360     }
361 
362     tfm_mailbox_hal_enter_critical();
363 
364     /* Clean the NSPE mailbox pending status. */
365     clear_nspe_queue_pend_status(ns_status, pend_slots);
366 
367     /* Set the NSPE mailbox replied status */
368     set_nspe_queue_replied_status(ns_status, reply_slots);
369 
370     tfm_mailbox_hal_exit_critical();
371 
372     if (reply_slots) {
373         tfm_mailbox_hal_notify_peer();
374     }
375 
376     return MAILBOX_SUCCESS;
377 }
378 
tfm_mailbox_reply_msg(mailbox_msg_handle_t handle,int32_t reply)379 int32_t tfm_mailbox_reply_msg(mailbox_msg_handle_t handle, int32_t reply)
380 {
381     uint8_t idx;
382     int32_t ret;
383     struct mailbox_status_t *ns_status = spe_mailbox_queue.ns_status;
384 
385     SPM_ASSERT(ns_status != NULL);
386 
387     /*
388      * If handle == MAILBOX_MSG_NULL_HANDLE, reply to the mailbox message
389      * in the first slot.
390      * When multiple ongoing PSA client calls from NSPE are supported,
391      * additional check might be necessary to avoid spoofing the first slot.
392      */
393     if (handle == MAILBOX_MSG_NULL_HANDLE) {
394         idx = 0;
395     } else {
396         ret = get_spe_mailbox_msg_idx(handle, &idx);
397         if (ret != MAILBOX_SUCCESS) {
398             return ret;
399         }
400     }
401 
402     if (get_spe_queue_empty_status(idx)) {
403         return MAILBOX_NO_PEND_EVENT;
404     }
405 
406     mailbox_direct_reply(idx, (uint32_t)reply);
407 
408     tfm_mailbox_hal_enter_critical();
409 
410     /* Set the NSPE mailbox replied status */
411     set_nspe_queue_replied_status(ns_status, (1 << idx));
412 
413     tfm_mailbox_hal_exit_critical();
414 
415     tfm_mailbox_hal_notify_peer();
416 
417     return MAILBOX_SUCCESS;
418 }
419 
420 /* RPC handle_req() callback */
mailbox_handle_req(void)421 static void mailbox_handle_req(void)
422 {
423     (void)tfm_mailbox_handle_msg();
424 }
425 
426 /* RPC reply() callback */
mailbox_reply(const void * owner,int32_t ret)427 static void mailbox_reply(const void *owner, int32_t ret)
428 {
429     mailbox_msg_handle_t handle = MAILBOX_MSG_NULL_HANDLE;
430 
431     /* If the owner is specified */
432     if (owner) {
433         handle = *((mailbox_msg_handle_t *)owner);
434     }
435 
436     (void)tfm_mailbox_reply_msg(handle, ret);
437 }
438 
439 /* Mailbox specific operations callback for TF-M RPC */
440 static const struct tfm_rpc_ops_t mailbox_rpc_ops = {
441     .handle_req = mailbox_handle_req,
442     .reply      = mailbox_reply,
443 };
444 
tfm_mailbox_init(void)445 static int32_t tfm_mailbox_init(void)
446 {
447     int32_t ret;
448 
449     spm_memset(&spe_mailbox_queue, 0, sizeof(spe_mailbox_queue));
450 
451     spe_mailbox_queue.empty_slots =
452             (mailbox_queue_status_t)((1UL << (NUM_MAILBOX_QUEUE_SLOT - 1)) - 1);
453     spe_mailbox_queue.empty_slots +=
454             (mailbox_queue_status_t)(1UL << (NUM_MAILBOX_QUEUE_SLOT - 1));
455 
456     /* Register RPC callbacks */
457     ret = tfm_rpc_register_ops(&mailbox_rpc_ops);
458     if (ret != TFM_RPC_SUCCESS) {
459         return MAILBOX_CALLBACK_REG_ERROR;
460     }
461 
462     /*
463      * Platform specific initialization.
464      * Initialize Inter-Processor Communication and achieve the base address of
465      * NSPE mailbox queue
466      */
467     ret = tfm_mailbox_hal_init(&spe_mailbox_queue);
468     if (ret != MAILBOX_SUCCESS) {
469         tfm_rpc_unregister_ops();
470 
471         return ret;
472     }
473 
474     return MAILBOX_SUCCESS;
475 }
476 
tfm_inter_core_comm_init(void)477 int32_t tfm_inter_core_comm_init(void)
478 {
479     return tfm_mailbox_init();
480 }
481