1 /*
2 * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
3 * Copyright (c) 2024 Cypress Semiconductor Corporation (an Infineon company)
4 * or an affiliate of Cypress Semiconductor Corporation. All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 *
8 */
9
10 #include <string.h>
11
12 #include "tfm_ns_mailbox.h"
13 #ifdef TFM_MULTI_CORE_TEST
14 #include "tfm_ns_mailbox_test.h"
15 #endif
16
17 /* The pointer to NSPE mailbox queue */
18 static struct ns_mailbox_queue_t *mailbox_queue_ptr = NULL;
19
20 static int32_t mailbox_wait_reply(uint8_t idx);
21
set_queue_slot_empty(uint8_t idx)22 static inline void set_queue_slot_empty(uint8_t idx)
23 {
24 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
25 mailbox_queue_ptr->empty_slots |= (1UL << idx);
26 }
27 }
28
set_queue_slot_woken(uint8_t idx)29 static inline void set_queue_slot_woken(uint8_t idx)
30 {
31 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
32 mailbox_queue_ptr->slots_ns[idx].is_woken = true;
33 }
34 }
35
is_queue_slot_woken(uint8_t idx)36 static inline bool is_queue_slot_woken(uint8_t idx)
37 {
38 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
39 return mailbox_queue_ptr->slots_ns[idx].is_woken;
40 }
41
42 return false;
43 }
44
clear_queue_slot_woken(uint8_t idx)45 static inline void clear_queue_slot_woken(uint8_t idx)
46 {
47 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
48 mailbox_queue_ptr->slots_ns[idx].is_woken = false;
49 }
50 }
51
52 #ifndef TFM_MULTI_CORE_NS_OS
clear_queue_slot_replied(uint8_t idx)53 static inline void clear_queue_slot_replied(uint8_t idx)
54 {
55 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
56 mailbox_queue_ptr->status.replied_slots &= ~(1UL << idx);
57 }
58 }
59
is_queue_slot_replied(uint8_t idx)60 static inline bool is_queue_slot_replied(uint8_t idx)
61 {
62 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
63 return mailbox_queue_ptr->status.replied_slots & (1UL << idx);
64 }
65
66 return false;
67 }
68 #endif /* !defined TFM_MULTI_CORE_NS_OS */
69
acquire_empty_slot(struct ns_mailbox_queue_t * queue)70 static uint8_t acquire_empty_slot(struct ns_mailbox_queue_t *queue)
71 {
72 uint8_t idx;
73 mailbox_queue_status_t status;
74
75 tfm_ns_mailbox_os_spin_lock();
76 status = queue->empty_slots;
77 tfm_ns_mailbox_os_spin_unlock();
78
79 if (!status) {
80 /* No empty slot */
81 return NUM_MAILBOX_QUEUE_SLOT;
82 }
83
84 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
85 if (status & (1 << idx)) {
86 break;
87 }
88 }
89
90 tfm_ns_mailbox_os_spin_lock();
91 clear_queue_slot_empty(queue, idx);
92 tfm_ns_mailbox_os_spin_unlock();
93
94 return idx;
95 }
96
set_msg_owner(uint8_t idx,const void * owner)97 static void set_msg_owner(uint8_t idx, const void *owner)
98 {
99 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
100 mailbox_queue_ptr->slots_ns[idx].owner = owner;
101 }
102 }
103
mailbox_tx_client_req(uint32_t call_type,const struct psa_client_params_t * params,int32_t client_id,uint8_t * slot_idx)104 static int32_t mailbox_tx_client_req(uint32_t call_type,
105 const struct psa_client_params_t *params,
106 int32_t client_id,
107 uint8_t *slot_idx)
108 {
109 uint8_t idx;
110 struct mailbox_msg_t *msg_ptr;
111 const void *task_handle;
112
113 idx = acquire_empty_slot(mailbox_queue_ptr);
114 if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
115 return MAILBOX_QUEUE_FULL;
116 }
117
118 #ifdef TFM_MULTI_CORE_TEST
119 tfm_ns_mailbox_tx_stats_update();
120 #endif
121
122 /* Fill the mailbox message */
123 msg_ptr = &mailbox_queue_ptr->slots[idx].msg;
124
125 msg_ptr->call_type = call_type;
126 memcpy(&msg_ptr->params, params, sizeof(msg_ptr->params));
127 msg_ptr->client_id = client_id;
128
129 /*
130 * Fetch the current task handle. The task will be woken up according the
131 * handle value set in the owner field.
132 */
133 task_handle = tfm_ns_mailbox_os_get_task_handle();
134 set_msg_owner(idx, task_handle);
135
136 tfm_ns_mailbox_hal_enter_critical();
137 set_queue_slot_pend(mailbox_queue_ptr, idx);
138 tfm_ns_mailbox_hal_exit_critical();
139
140 tfm_ns_mailbox_hal_notify_peer();
141
142 *slot_idx = idx;
143
144 return MAILBOX_SUCCESS;
145 }
146
mailbox_rx_client_reply(uint8_t idx,int32_t * reply)147 static int32_t mailbox_rx_client_reply(uint8_t idx, int32_t *reply)
148 {
149 *reply = mailbox_queue_ptr->slots[idx].reply.return_val;
150
151 /* Clear up the owner field */
152 set_msg_owner(idx, NULL);
153
154 tfm_ns_mailbox_os_spin_lock();
155 clear_queue_slot_woken(idx);
156 /*
157 * Make sure that the empty flag is set after all the other status flags are
158 * re-initialized.
159 */
160 set_queue_slot_empty(idx);
161 tfm_ns_mailbox_os_spin_unlock();
162
163 return MAILBOX_SUCCESS;
164 }
165
tfm_ns_mailbox_client_call(uint32_t call_type,const struct psa_client_params_t * params,int32_t client_id,int32_t * reply)166 int32_t tfm_ns_mailbox_client_call(uint32_t call_type,
167 const struct psa_client_params_t *params,
168 int32_t client_id,
169 int32_t *reply)
170 {
171 uint8_t slot_idx = NUM_MAILBOX_QUEUE_SLOT;
172 int32_t reply_buf = 0x0;
173 int32_t ret;
174
175 if (!mailbox_queue_ptr) {
176 return MAILBOX_INIT_ERROR;
177 }
178
179 if (!params || !reply) {
180 return MAILBOX_INVAL_PARAMS;
181 }
182
183 if (tfm_ns_mailbox_os_lock_acquire() != MAILBOX_SUCCESS) {
184 return MAILBOX_QUEUE_FULL;
185 }
186
187 /* It requires SVCall if NS mailbox is put in privileged mode. */
188 ret = mailbox_tx_client_req(call_type, params, client_id, &slot_idx);
189 if (ret != MAILBOX_SUCCESS) {
190 goto exit;
191 }
192
193 mailbox_wait_reply(slot_idx);
194
195 /* It requires SVCall if NS mailbox is put in privileged mode. */
196 ret = mailbox_rx_client_reply(slot_idx, &reply_buf);
197 if (ret == MAILBOX_SUCCESS) {
198 *reply = reply_buf;
199 }
200
201 exit:
202 if (tfm_ns_mailbox_os_lock_release() != MAILBOX_SUCCESS) {
203 return MAILBOX_GENERIC_ERROR;
204 }
205
206 return ret;
207 }
208
209 #ifdef TFM_MULTI_CORE_NS_OS
tfm_ns_mailbox_wake_reply_owner_isr(void)210 int32_t tfm_ns_mailbox_wake_reply_owner_isr(void)
211 {
212 uint8_t idx;
213 mailbox_queue_status_t replied_status;
214
215 if (!mailbox_queue_ptr) {
216 return MAILBOX_INIT_ERROR;
217 }
218
219 tfm_ns_mailbox_hal_enter_critical_isr();
220 replied_status = clear_queue_slot_all_replied(mailbox_queue_ptr);
221 tfm_ns_mailbox_hal_exit_critical_isr();
222
223 if (!replied_status) {
224 return MAILBOX_NO_PEND_EVENT;
225 }
226
227 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
228 /*
229 * The reply has already received from SPE mailbox but
230 * the wake-up signal is not sent yet.
231 */
232 if (!(replied_status & (0x1UL << idx))) {
233 continue;
234 }
235
236 /* Set woken-up flag */
237 tfm_ns_mailbox_hal_enter_critical_isr();
238 set_queue_slot_woken(idx);
239 tfm_ns_mailbox_hal_exit_critical_isr();
240
241 tfm_ns_mailbox_os_wake_task_isr(
242 mailbox_queue_ptr->slots_ns[idx].owner);
243
244 replied_status &= ~(0x1UL << idx);
245 if (!replied_status) {
246 break;
247 }
248 }
249
250 return MAILBOX_SUCCESS;
251 }
252
mailbox_wait_reply_signal(uint8_t idx)253 static inline bool mailbox_wait_reply_signal(uint8_t idx)
254 {
255 bool is_set = false;
256
257 tfm_ns_mailbox_os_spin_lock();
258
259 if (is_queue_slot_woken(idx)) {
260 clear_queue_slot_woken(idx);
261 is_set = true;
262 }
263
264 tfm_ns_mailbox_os_spin_unlock();
265
266 return is_set;
267 }
268 #else /* TFM_MULTI_CORE_NS_OS */
mailbox_wait_reply_signal(uint8_t idx)269 static inline bool mailbox_wait_reply_signal(uint8_t idx)
270 {
271 bool is_set = false;
272
273 tfm_ns_mailbox_hal_enter_critical();
274
275 if (is_queue_slot_replied(idx)) {
276 clear_queue_slot_replied(idx);
277 is_set = true;
278 }
279
280 tfm_ns_mailbox_hal_exit_critical();
281
282 return is_set;
283 }
284 #endif /* TFM_MULTI_CORE_NS_OS */
285
mailbox_wait_reply(uint8_t idx)286 static int32_t mailbox_wait_reply(uint8_t idx)
287 {
288 bool is_replied;
289
290 while (1) {
291 tfm_ns_mailbox_os_wait_reply();
292
293 /*
294 * Woken up from sleep
295 * Check the completed flag to make sure that the current thread is
296 * woken up by reply event, rather than other events.
297 */
298 /*
299 * It requires SVCall to access NS mailbox flags if NS mailbox is put
300 * in privileged mode.
301 * An alternative is to let NS thread allocate its own is_woken flag.
302 * But a spinlock-like mechanism is still required.
303 */
304 is_replied = mailbox_wait_reply_signal(idx);
305 if (is_replied) {
306 break;
307 }
308 }
309
310 return MAILBOX_SUCCESS;
311 }
312
tfm_ns_mailbox_init(struct ns_mailbox_queue_t * queue)313 int32_t tfm_ns_mailbox_init(struct ns_mailbox_queue_t *queue)
314 {
315 int32_t ret;
316
317 if (!queue) {
318 return MAILBOX_INVAL_PARAMS;
319 }
320
321 /*
322 * Further verification of mailbox queue address may be required according
323 * to non-secure memory assignment.
324 */
325
326 memset(queue, 0, sizeof(*queue));
327
328 /* Initialize empty bitmask */
329 queue->empty_slots =
330 (mailbox_queue_status_t)((1UL << (NUM_MAILBOX_QUEUE_SLOT - 1)) - 1);
331 queue->empty_slots +=
332 (mailbox_queue_status_t)(1UL << (NUM_MAILBOX_QUEUE_SLOT - 1));
333
334 mailbox_queue_ptr = queue;
335
336 /* Platform specific initialization. */
337 ret = tfm_ns_mailbox_hal_init(queue);
338 if (ret != MAILBOX_SUCCESS) {
339 return ret;
340 }
341
342 ret = tfm_ns_mailbox_os_lock_init();
343
344 #ifdef TFM_MULTI_CORE_TEST
345 tfm_ns_mailbox_tx_stats_init(queue);
346 #endif
347
348 return ret;
349 }
350