1 /*
2 * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
3 * Copyright (c) 2024 Cypress Semiconductor Corporation (an Infineon company)
4 * or an affiliate of Cypress Semiconductor Corporation. All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 *
8 */
9
10 #include <string.h>
11
12 #include "tfm_ns_mailbox.h"
13
14 /* Thread woken up flag */
15 #define NOT_WOKEN 0x0
16 #define WOKEN_UP 0x5C
17
18 /*
19 * The request contains the parameters which application threads share with
20 * NS mailbox thread.
21 */
22 struct ns_mailbox_req_t {
23 uint32_t call_type; /* PSA client call type */
24 const struct psa_client_params_t *params_ptr; /* Pointer to PSA client call
25 * parameters.
26 */
27 int32_t client_id; /* Optional client ID of the
28 * non-secure caller.
29 * It is required to identify
30 * the non-secure task when
31 * NSPE OS enforces non-secure
32 * task isolation
33 */
34 const void *owner; /* Handle of owner task. */
35 int32_t *reply; /* Address of reply value
36 * belonging to owner task.
37 */
38
39 uint8_t *woken_flag; /* Indicate that owner task
40 * has been or should be woken
41 * up, after the reply is
42 * received.
43 */
44 };
45
46 /* Message queue handle */
47 static void *msgq_handle = NULL;
48
49 /* The handle of the dedicated NS mailbox thread. */
50 static const void *ns_mailbox_thread_handle = NULL;
51
52 /* The pointer to NSPE mailbox queue */
53 static struct ns_mailbox_queue_t *mailbox_queue_ptr = NULL;
54
set_queue_slot_all_empty(mailbox_queue_status_t completed)55 static inline void set_queue_slot_all_empty(mailbox_queue_status_t completed)
56 {
57 mailbox_queue_ptr->empty_slots |= completed;
58 }
59
set_queue_slot_woken(uint8_t idx)60 static inline void set_queue_slot_woken(uint8_t idx)
61 {
62 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
63 *mailbox_queue_ptr->slots_ns[idx].woken_flag = WOKEN_UP;
64 }
65 }
66
acquire_empty_slot(struct ns_mailbox_queue_t * queue)67 static uint8_t acquire_empty_slot(struct ns_mailbox_queue_t *queue)
68 {
69 uint8_t idx;
70 mailbox_queue_status_t status;
71
72 while (1) {
73 tfm_ns_mailbox_os_spin_lock();
74 status = queue->empty_slots;
75 tfm_ns_mailbox_os_spin_unlock();
76
77 if (status) {
78 break;
79 }
80
81 /* No empty slot */
82 queue->is_full = true;
83 /* DSB to make sure the thread sleeps after the flag is set */
84 __DSB();
85
86 /* Wait for an empty slot released by a completed mailbox message */
87 tfm_ns_mailbox_os_wait_reply();
88 queue->is_full = false;
89 }
90
91 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
92 if (status & (1 << idx)) {
93 break;
94 }
95 }
96
97 tfm_ns_mailbox_os_spin_lock();
98 clear_queue_slot_empty(queue, idx);
99 tfm_ns_mailbox_os_spin_unlock();
100
101 return idx;
102 }
103
mailbox_tx_client_call_msg(const struct ns_mailbox_req_t * req,uint8_t * slot_idx)104 static int32_t mailbox_tx_client_call_msg(const struct ns_mailbox_req_t *req,
105 uint8_t *slot_idx)
106 {
107 struct mailbox_msg_t *msg_ptr;
108 struct mailbox_reply_t *reply_ptr;
109 uint8_t idx = NUM_MAILBOX_QUEUE_SLOT;
110
111 idx = acquire_empty_slot(mailbox_queue_ptr);
112 if (idx == NUM_MAILBOX_QUEUE_SLOT) {
113 return MAILBOX_QUEUE_FULL;
114 }
115
116 #ifdef TFM_MULTI_CORE_TEST
117 tfm_ns_mailbox_tx_stats_update();
118 #endif
119
120 /* Fill the mailbox message */
121 msg_ptr = &mailbox_queue_ptr->queue[idx].msg;
122 msg_ptr->call_type = req->call_type;
123 memcpy(&msg_ptr->params, req->params_ptr, sizeof(msg_ptr->params));
124 msg_ptr->client_id = req->client_id;
125
126 /* Prepare the reply structure */
127 reply_ptr = &mailbox_queue_ptr->queue[idx].reply;
128 reply_ptr->owner = req->owner;
129 reply_ptr->reply = req->reply;
130 reply_ptr->woken_flag = req->woken_flag;
131
132 /*
133 * Memory check can be added here to prevent a malicious application
134 * from providing addresses of other applications or privileged area.
135 */
136
137 tfm_ns_mailbox_hal_enter_critical();
138 set_queue_slot_pend(mailbox_queue_ptr, idx);
139 tfm_ns_mailbox_hal_exit_critical();
140
141 tfm_ns_mailbox_hal_notify_peer();
142
143 if (slot_idx) {
144 *slot_idx = idx;
145 }
146
147 return MAILBOX_SUCCESS;
148 }
149
ns_mailbox_set_reply_isr(uint8_t idx)150 static inline void ns_mailbox_set_reply_isr(uint8_t idx)
151 {
152 int32_t *reply_ptr = mailbox_queue_ptr->queue[idx].reply.reply;
153
154 if (reply_ptr) {
155 *reply_ptr = mailbox_queue_ptr->queue[idx].reply.return_val;
156 }
157 }
158
mailbox_wait_reply(const struct ns_mailbox_req_t * req)159 static int32_t mailbox_wait_reply(const struct ns_mailbox_req_t *req)
160 {
161 while (1) {
162 /*
163 * Check the completed flag to make sure that the current thread is
164 * woken up by reply event, rather than other events.
165 */
166 if (*req->woken_flag == WOKEN_UP) {
167 break;
168 }
169
170 /* Woken up from sleep */
171 tfm_ns_mailbox_os_wait_reply();
172 }
173
174 return MAILBOX_SUCCESS;
175 }
176
tfm_ns_mailbox_client_call(uint32_t call_type,const struct psa_client_params_t * params,int32_t client_id,int32_t * reply)177 int32_t tfm_ns_mailbox_client_call(uint32_t call_type,
178 const struct psa_client_params_t *params,
179 int32_t client_id,
180 int32_t *reply)
181 {
182 struct ns_mailbox_req_t req;
183 uint8_t woken_flag = NOT_WOKEN;
184 int32_t ret;
185
186 if (!mailbox_queue_ptr) {
187 return MAILBOX_INIT_ERROR;
188 }
189
190 if (!params || !reply) {
191 return MAILBOX_INVAL_PARAMS;
192 }
193
194 req.call_type = call_type;
195 req.params_ptr = params;
196 req.reply = reply;
197 req.woken_flag = &woken_flag;
198 req.owner = tfm_ns_mailbox_os_get_task_handle();
199 req.client_id = client_id;
200
201 ret = tfm_ns_mailbox_os_mq_send(msgq_handle, &req);
202 if (ret != MAILBOX_SUCCESS) {
203 return ret;
204 }
205
206 ret = mailbox_wait_reply(&req);
207
208 return ret;
209 }
210
tfm_ns_mailbox_thread_runner(void * args)211 void tfm_ns_mailbox_thread_runner(void *args)
212 {
213 struct ns_mailbox_req_t req;
214 int32_t ret;
215
216 (void)args;
217
218 ns_mailbox_thread_handle = tfm_ns_mailbox_os_get_task_handle();
219
220 while (1) {
221 ret = tfm_ns_mailbox_os_mq_receive(msgq_handle, &req);
222 if (ret != MAILBOX_SUCCESS) {
223 continue;
224 }
225
226 /*
227 * Invalid client address. However, the pointer was already
228 * checked previously and therefore just simply ignore this
229 * client call request.
230 */
231 if (!req.params_ptr || !req.reply || !req.woken_flag) {
232 continue;
233 }
234
235 mailbox_tx_client_call_msg(&req, NULL);
236 }
237 }
238
tfm_ns_mailbox_wake_reply_owner_isr(void)239 int32_t tfm_ns_mailbox_wake_reply_owner_isr(void)
240 {
241 uint8_t idx;
242 const void *task_handle;
243 mailbox_queue_status_t replied_status, complete_slots = 0x0;
244
245 if (!mailbox_queue_ptr) {
246 return MAILBOX_INIT_ERROR;
247 }
248
249 tfm_ns_mailbox_hal_enter_critical_isr();
250 replied_status = clear_queue_slot_all_replied(mailbox_queue_ptr);
251 tfm_ns_mailbox_hal_exit_critical_isr();
252
253 if (!replied_status) {
254 return MAILBOX_NO_PEND_EVENT;
255 }
256
257 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
258 /*
259 * The reply has already received from SPE mailbox but
260 * the wake-up signal is not sent yet.
261 */
262 if (!(replied_status & (0x1UL << idx))) {
263 continue;
264 }
265
266 /*
267 * Write back the return result.
268 * When TFM_MULTI_CORE_NS_OS_MAILBOX_THREAD is enabled, a reply is
269 * returned inside ns_mailbox_set_reply_isr().
270 * When TFM_MULTI_CORE_NS_OS_MAILBOX_THREAD is disabled, a reply is
271 * returned inside mailbox_rx_client_reply(). ns_mailbox_set_reply_isr()
272 * is defined as dummy function.
273 */
274 ns_mailbox_set_reply_isr(idx);
275
276 /* Wake up the owner of this mailbox message */
277 set_queue_slot_woken(idx);
278
279 task_handle = mailbox_queue_ptr->queue[idx].reply.owner;
280 if (task_handle) {
281 tfm_ns_mailbox_os_wake_task_isr(task_handle);
282 }
283
284 complete_slots |= (1UL << idx);
285
286 replied_status &= ~(0x1UL << idx);
287 if (!replied_status) {
288 break;
289 }
290 }
291
292 set_queue_slot_all_empty(complete_slots);
293
294 /*
295 * Wake up the NS mailbox thread in case it is waiting for
296 * empty slots.
297 */
298 if (mailbox_queue_ptr->is_full) {
299 if (ns_mailbox_thread_handle) {
300 tfm_ns_mailbox_os_wake_task_isr(ns_mailbox_thread_handle);
301 }
302 }
303
304 return MAILBOX_SUCCESS;
305 }
306
mailbox_req_queue_init(uint8_t queue_depth)307 static inline int32_t mailbox_req_queue_init(uint8_t queue_depth)
308 {
309 msgq_handle = tfm_ns_mailbox_os_mq_create(sizeof(struct ns_mailbox_req_t),
310 queue_depth);
311 if (!msgq_handle) {
312 return MAILBOX_GENERIC_ERROR;
313 }
314
315 return MAILBOX_SUCCESS;
316 }
317
tfm_ns_mailbox_init(struct ns_mailbox_queue_t * queue)318 int32_t tfm_ns_mailbox_init(struct ns_mailbox_queue_t *queue)
319 {
320 int32_t ret;
321
322 if (!queue) {
323 return MAILBOX_INVAL_PARAMS;
324 }
325
326 /*
327 * Further verification of mailbox queue address may be required according
328 * to non-secure memory assignment.
329 */
330
331 memset(queue, 0, sizeof(*queue));
332
333 /* Initialize empty bitmask */
334 queue->empty_slots =
335 (mailbox_queue_status_t)((1UL << (NUM_MAILBOX_QUEUE_SLOT - 1)) - 1);
336 queue->empty_slots +=
337 (mailbox_queue_status_t)(1UL << (NUM_MAILBOX_QUEUE_SLOT - 1));
338
339 mailbox_queue_ptr = queue;
340
341 /* Platform specific initialization. */
342 ret = tfm_ns_mailbox_hal_init(queue);
343 if (ret != MAILBOX_SUCCESS) {
344 return ret;
345 }
346
347 ret = mailbox_req_queue_init(NUM_MAILBOX_QUEUE_SLOT);
348
349 #ifdef TFM_MULTI_CORE_TEST
350 tfm_ns_mailbox_tx_stats_init(queue);
351 #endif
352
353 return ret;
354 }
355