1 /*
2  * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
3  * Copyright (c) 2021-2024 Cypress Semiconductor Corporation (an Infineon
4  * company) or an affiliate of Cypress Semiconductor Corporation. All rights
5  * reserved.
6  *
7  * SPDX-License-Identifier: BSD-3-Clause
8  *
9  */
10 
11 #include <stdint.h>
12 #include "aapcs_local.h"
13 #include "async.h"
14 #include "config_spm.h"
15 #include "critical_section.h"
16 #include "compiler_ext_defs.h"
17 #include "ffm/psa_api.h"
18 #include "fih.h"
19 #include "runtime_defs.h"
20 #include "stack_watermark.h"
21 #include "spm.h"
22 #include "tfm_hal_isolation.h"
23 #include "tfm_hal_platform.h"
24 #include "tfm_nspm.h"
25 #include "ffm/backend.h"
26 #include "utilities.h"
27 #include "private/assert.h"
28 #include "memory_symbols.h"
29 #include "load/partition_defs.h"
30 #include "load/service_defs.h"
31 #include "load/spm_load_api.h"
32 #include "psa/error.h"
33 #include "internal_status_code.h"
34 #include "sprt_partition_metadata_indicator.h"
35 
36 /* Declare the global component list */
37 struct partition_head_t partition_listhead;
38 
39 #if TFM_ISOLATION_LEVEL > 1
40 extern uintptr_t spm_boundary;
41 #endif
42 
43 #ifdef CONFIG_TFM_USE_TRUSTZONE
44 /* Instance for SPM_THREAD_CONTEXT */
45 struct context_ctrl_t *p_spm_thread_context;
46 #else
47 /* If ns_agent_tz isn't used, we need to provide a stack for SPM to use */
48 static uint8_t spm_thread_stack[CONFIG_TFM_SPM_THREAD_STACK_SIZE] __aligned(8);
49 ARCH_CLAIM_CTXCTRL_INSTANCE(spm_thread_context,
50                             spm_thread_stack,
51                             sizeof(spm_thread_stack));
52 
53 struct context_ctrl_t *p_spm_thread_context = &spm_thread_context;
54 #endif
55 
56 #if (CONFIG_TFM_SECURE_THREAD_MASK_NS_INTERRUPT == 1) && defined(CONFIG_TFM_USE_TRUSTZONE)
57 static bool basepri_set_by_ipc_schedule;
58 #endif
59 
60 /*
61  * Query the state of current thread.
62  */
query_state(struct thread_t * p_thrd,uint32_t * p_retval)63 static uint32_t query_state(struct thread_t *p_thrd, uint32_t *p_retval)
64 {
65     struct critical_section_t cs_signal = CRITICAL_SECTION_STATIC_INIT;
66     struct partition_t *p_pt = NULL;
67     uint32_t state = p_thrd->state;
68     psa_signal_t retval_signals = 0;
69 
70     /* Get current partition of thread. */
71     p_pt = TO_CONTAINER(p_thrd->p_context_ctrl,
72                         struct partition_t, ctx_ctrl);
73 
74     CRITICAL_SECTION_ENTER(cs_signal);
75 
76     retval_signals = p_pt->signals_waiting & p_pt->signals_asserted;
77 
78     if (retval_signals) {
79         /*
80          * Signal "ASYNC_MSG_REPLY" can only be waited in one of cases below:
81          *
82          *   - A FF-M Secure Partition is calling the Client API and
83          *     expecting a replied "handle/status" from RoT Services.
84          *     FF-M Secure Partitions cannot use 'psa_wait' to wait
85          *     on this signal because the signal is not set in FF-M
86          *     Secure Partitions' "signals_allowed".
87          *
88          *   - A Mailbox NS Agent is calling "psa_wait" with a pattern
89          *     containing "ASYNC_MSG_REPLY". The signal is set in
90          *     Mailbox NS Agents' "signals_allowed".
91          *
92          * Here uses "signals_allowed" to check if the calling target is a
93          * FF-M Secure Partition or a Mailbox NS Agent.
94          */
95         if ((retval_signals ==  ASYNC_MSG_REPLY) &&
96             ((p_pt->signals_allowed & ASYNC_MSG_REPLY) != ASYNC_MSG_REPLY)) {
97             p_pt->signals_asserted &= ~ASYNC_MSG_REPLY;
98             *p_retval = (uint32_t)p_pt->reply_value;
99         } else {
100             *p_retval = retval_signals;
101         }
102 
103         /* Clear 'signals_waiting' to indicate the component is not waiting. */
104         p_pt->signals_waiting = 0;
105         state = THRD_STATE_RET_VAL_AVAIL;
106     } else if (p_pt->signals_waiting != 0) {
107         /*
108          * If the thread is waiting some signals but none of them is asserted,
109          * block the thread.
110          */
111         state = THRD_STATE_BLOCK;
112     }
113 
114     CRITICAL_SECTION_LEAVE(cs_signal);
115     return state;
116 }
117 
118 extern struct psa_api_tbl_t psa_api_thread_fn_call;
119 extern struct psa_api_tbl_t psa_api_svc;
120 
prv_process_metadata(struct partition_t * p_pt)121 static void prv_process_metadata(struct partition_t *p_pt)
122 {
123     const struct partition_load_info_t *p_pt_ldi;
124     const struct service_load_info_t *p_srv_ldi;
125     struct context_ctrl_t *ctx_ctrl;
126     struct runtime_metadata_t *p_rt_meta;
127     service_fn_t *p_sfn_table;
128     uint32_t allocate_size;
129 #if TFM_ISOLATION_LEVEL != 1
130     FIH_RET_TYPE(bool) fih_rc;
131 #endif
132 
133     p_pt_ldi = p_pt->p_ldinf;
134     p_srv_ldi = LOAD_INFO_SERVICE(p_pt_ldi);
135     ctx_ctrl = &p_pt->ctx_ctrl;
136 
137     /* common runtime metadata */
138     allocate_size = sizeof(*p_rt_meta);
139 
140     if (!IS_IPC_MODEL(p_pt_ldi)) {
141         /* SFN specific metadata - SFN function table */
142         allocate_size += sizeof(service_fn_t) * p_pt_ldi->nservices;
143     }
144 
145     ARCH_CTXCTRL_ALLOCATE_STACK(ctx_ctrl, allocate_size);
146     p_rt_meta = (struct runtime_metadata_t *)
147                                     ARCH_CTXCTRL_ALLOCATED_PTR(ctx_ctrl);
148 
149 #if TFM_ISOLATION_LEVEL == 1
150     p_rt_meta->psa_fns = &psa_api_thread_fn_call;
151 #else
152     FIH_CALL(tfm_hal_boundary_need_switch, fih_rc, spm_boundary, p_pt->boundary);
153     if (fih_not_eq(fih_rc, fih_int_encode(false))) {
154         p_rt_meta->psa_fns = &psa_api_svc;
155     } else {
156         p_rt_meta->psa_fns = &psa_api_thread_fn_call;
157     }
158 #endif
159 
160     p_rt_meta->entry = p_pt_ldi->entry;
161     p_rt_meta->n_sfn = 0;
162     p_sfn_table = p_rt_meta->sfn_table;
163 
164     if (!IS_IPC_MODEL(p_pt_ldi)) {
165         /* SFN table. The signal bit of the service is the same index of SFN. */
166         for (int i = 0; i < p_pt_ldi->nservices; i++) {
167             p_sfn_table[i] = (service_fn_t)p_srv_ldi[i].sfn;
168         }
169 
170         p_rt_meta->n_sfn = p_pt_ldi->nservices;
171     }
172 
173     p_pt->p_metadata = p_rt_meta;
174 }
175 
176 /*
177  * Send message and wake up the SP who is waiting on message queue, block the
178  * current thread and trigger scheduler.
179  */
backend_messaging(struct connection_t * p_connection)180 psa_status_t backend_messaging(struct connection_t *p_connection)
181 {
182     struct partition_t *p_owner = NULL;
183     psa_signal_t signal = 0;
184     psa_status_t ret = PSA_SUCCESS;
185 
186     if (!p_connection || !p_connection->service ||
187         !p_connection->service->p_ldinf         ||
188         !p_connection->service->partition) {
189         return PSA_ERROR_PROGRAMMER_ERROR;
190     }
191 
192     p_owner = p_connection->service->partition;
193     signal = p_connection->service->p_ldinf->signal;
194 
195     UNI_LIST_INSERT_AFTER(p_owner, p_connection, p_handles);
196 
197     /* Messages put. Update signals */
198     ret = backend_assert_signal(p_owner, signal);
199 
200     /*
201      * If it is a NS request via RPC, it is unnecessary to block current
202      * thread.
203      */
204     if (tfm_spm_is_rpc_msg(p_connection)) {
205         ret = PSA_SUCCESS;
206     } else {
207         signal = backend_wait_signals(p_connection->p_client, ASYNC_MSG_REPLY);
208         if (signal == (psa_signal_t)0) {
209             ret = STATUS_NEED_SCHEDULE;
210         }
211     }
212 
213     p_connection->status = TFM_HANDLE_STATUS_ACTIVE;
214 
215     return ret;
216 }
217 
backend_replying(struct connection_t * handle,int32_t status)218 psa_status_t backend_replying(struct connection_t *handle, int32_t status)
219 {
220     struct partition_t *client = handle->p_client;
221 
222     if (tfm_spm_is_rpc_msg(handle)) {
223         /*
224          * Add to the list of outstanding responses.
225          * Note that we use the partition's p_handles pointer.
226          * This assumes that partitions using the agent API will process all requests
227          * asynchronously and will not also provide services of their own.
228          */
229         handle->reply_value = (uintptr_t)status;
230         handle->msg.rhandle = handle;
231         UNI_LIST_INSERT_AFTER(client, handle, p_handles);
232         return backend_assert_signal(handle->p_client, ASYNC_MSG_REPLY);
233     } else {
234         handle->p_client->reply_value = (uintptr_t)status;
235         return backend_assert_signal(handle->p_client, ASYNC_MSG_REPLY);
236     }
237 }
238 
239 extern void common_sfn_thread(void *param);
240 
partition_init(struct partition_t * p_pt,uint32_t service_setting,uint32_t * param)241 static thrd_fn_t partition_init(struct partition_t *p_pt,
242                                 uint32_t service_setting, uint32_t *param)
243 {
244     thrd_fn_t thrd_entry;
245 
246     (void)param;
247     SPM_ASSERT(p_pt);
248 
249 #if CONFIG_TFM_DOORBELL_API == 1
250     p_pt->signals_allowed |= PSA_DOORBELL;
251 #endif /* CONFIG_TFM_DOORBELL_API == 1 */
252 
253     p_pt->signals_allowed |= service_setting;
254 
255     /* Allow 'ASYNC_MSG_REPLY' for Mailbox NS Agent. */
256     if (IS_NS_AGENT_MAILBOX(p_pt->p_ldinf)) {
257         p_pt->signals_allowed |= ASYNC_MSG_REPLY;
258     }
259 
260     UNI_LISI_INIT_NODE(p_pt, p_handles);
261 
262     if (IS_IPC_MODEL(p_pt->p_ldinf)) {
263         /* IPC Partition */
264         thrd_entry = POSITION_TO_ENTRY(p_pt->p_ldinf->entry, thrd_fn_t);
265     } else {
266         /* SFN Partition */
267         thrd_entry = (thrd_fn_t)common_sfn_thread;
268     }
269     return thrd_entry;
270 }
271 
272 #ifdef CONFIG_TFM_USE_TRUSTZONE
ns_agent_tz_init(struct partition_t * p_pt,uint32_t service_setting,uint32_t * param)273 static thrd_fn_t ns_agent_tz_init(struct partition_t *p_pt,
274                                   uint32_t service_setting, uint32_t *param)
275 {
276     thrd_fn_t thrd_entry;
277 
278     (void)service_setting;
279     SPM_ASSERT(p_pt);
280     SPM_ASSERT(param);
281 
282     tz_ns_agent_register_client_id_range(p_pt->p_ldinf->client_id_base,
283                                          p_pt->p_ldinf->client_id_limit);
284 
285     /* Get the context from ns_agent_tz */
286     SPM_THREAD_CONTEXT = &p_pt->ctx_ctrl;
287 
288     thrd_entry = POSITION_TO_ENTRY(p_pt->p_ldinf->entry, thrd_fn_t);
289 
290     /* NS agent TZ expects NSPE entry point as the parameter */
291     *param = tfm_hal_get_ns_entry_point();
292     return thrd_entry;
293 }
294 #else
ns_agent_tz_init(struct partition_t * p_pt,uint32_t service_setting,uint32_t * param)295 static thrd_fn_t ns_agent_tz_init(struct partition_t *p_pt,
296                                   uint32_t service_setting, uint32_t *param)
297 {
298     (void)p_pt;
299     (void)service_setting;
300     (void)param;
301 }
302 #endif
303 
304 typedef thrd_fn_t (*comp_init_fn_t)(struct partition_t *, uint32_t, uint32_t *);
305 comp_init_fn_t comp_init_fns[] = {partition_init, ns_agent_tz_init};
306 
307 /* Parameters are treated as assuredly */
backend_init_comp_assuredly(struct partition_t * p_pt,uint32_t service_setting)308 void backend_init_comp_assuredly(struct partition_t *p_pt, uint32_t service_setting)
309 {
310     const struct partition_load_info_t *p_pldi = p_pt->p_ldinf;
311     thrd_fn_t thrd_entry;
312     uint32_t param;
313     int32_t index = PARTITION_TYPE_TO_INDEX(p_pldi->flags);
314 
315     ARCH_CTXCTRL_INIT(&p_pt->ctx_ctrl,
316                       LOAD_ALLOCED_STACK_ADDR(p_pldi),
317                       p_pldi->stack_size);
318 
319     watermark_stack(p_pt);
320 
321     THRD_INIT(&p_pt->thrd, &p_pt->ctx_ctrl,
322               TO_THREAD_PRIORITY(PARTITION_PRIORITY(p_pldi->flags)));
323 
324     thrd_entry = (comp_init_fns[index])(p_pt, service_setting, &param);
325 
326     prv_process_metadata(p_pt);
327 
328     thrd_start(&p_pt->thrd, thrd_entry, THRD_GENERAL_EXIT, (void *)param);
329 }
330 
backend_system_run(void)331 uint32_t backend_system_run(void)
332 {
333     uint32_t control;
334     struct partition_t *p_cur_pt;
335     fih_int fih_rc = FIH_FAILURE;
336 
337     SPM_ASSERT(SPM_THREAD_CONTEXT);
338 
339 #ifndef CONFIG_TFM_USE_TRUSTZONE
340     /*
341      * TZ NS Agent is mandatory when Trustzone is enabled. SPM borrows its
342      * stack to improve the stack usage efficiency.
343      * Hence SPM needs to have a dedicated stack when Trustzone is not enabled,
344      * and this stack needs to be sealed before upcoming usage.
345      */
346     ARCH_CTXCTRL_ALLOCATE_STACK(SPM_THREAD_CONTEXT, sizeof(uint64_t));
347     arch_seal_thread_stack(ARCH_CTXCTRL_ALLOCATED_PTR(SPM_THREAD_CONTEXT));
348 #endif
349 
350     /* Init thread callback function. */
351     thrd_set_query_callback(query_state);
352 
353     control = thrd_start_scheduler(&CURRENT_THREAD);
354 
355     p_cur_pt = TO_CONTAINER(CURRENT_THREAD->p_context_ctrl,
356                             struct partition_t, ctx_ctrl);
357 
358     FIH_CALL(tfm_hal_activate_boundary, fih_rc, p_cur_pt->p_ldinf, p_cur_pt->boundary);
359     if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
360         tfm_core_panic();
361     }
362 
363     return control;
364 }
365 
backend_wait_signals(struct partition_t * p_pt,psa_signal_t signals)366 psa_signal_t backend_wait_signals(struct partition_t *p_pt, psa_signal_t signals)
367 {
368     struct critical_section_t cs_signal = CRITICAL_SECTION_STATIC_INIT;
369     psa_signal_t ret;
370 
371     if (!p_pt) {
372         tfm_core_panic();
373     }
374 
375     CRITICAL_SECTION_ENTER(cs_signal);
376 
377     ret = p_pt->signals_asserted & signals;
378     if (ret == (psa_signal_t)0) {
379         p_pt->signals_waiting = signals;
380     }
381 
382     CRITICAL_SECTION_LEAVE(cs_signal);
383 
384     return ret;
385 }
386 
backend_assert_signal(struct partition_t * p_pt,psa_signal_t signal)387 psa_status_t backend_assert_signal(struct partition_t *p_pt, psa_signal_t signal)
388 {
389     struct critical_section_t cs_signal = CRITICAL_SECTION_STATIC_INIT;
390     psa_status_t ret = PSA_SUCCESS;
391 
392     if (!p_pt) {
393         tfm_core_panic();
394     }
395 
396     CRITICAL_SECTION_ENTER(cs_signal);
397     p_pt->signals_asserted |= signal;
398 
399     if (p_pt->signals_asserted & p_pt->signals_waiting) {
400         ret = STATUS_NEED_SCHEDULE;
401     }
402     CRITICAL_SECTION_LEAVE(cs_signal);
403 
404     return ret;
405 }
406 
backend_abi_entering_spm(void)407 uint64_t backend_abi_entering_spm(void)
408 {
409     struct partition_t *caller = GET_CURRENT_COMPONENT();
410     uint32_t sp = 0;
411     uint32_t sp_limit = 0;
412     AAPCS_DUAL_U32_T spm_stack_info;
413 
414 #if TFM_ISOLATION_LEVEL == 1
415     /* PSA APIs must be called from Thread mode */
416     if (__get_active_exc_num() != EXC_NUM_THREAD_MODE) {
417         tfm_core_panic();
418     }
419 #endif
420 
421     /*
422      * Check if caller stack is within SPM stack. If not, then stack needs to
423      * switch. Otherwise, return zeros.
424      */
425     if ((caller->ctx_ctrl.sp <= SPM_THREAD_CONTEXT->sp_limit) ||
426         (caller->ctx_ctrl.sp >  SPM_THREAD_CONTEXT->sp_base)) {
427         sp       = SPM_THREAD_CONTEXT->sp;
428         sp_limit = SPM_THREAD_CONTEXT->sp_limit;
429     }
430 
431     AAPCS_DUAL_U32_SET(spm_stack_info, sp, sp_limit);
432 
433     arch_acquire_sched_lock();
434 
435     return AAPCS_DUAL_U32_AS_U64(spm_stack_info);
436 }
437 
backend_abi_leaving_spm(uint32_t result)438 uint32_t backend_abi_leaving_spm(uint32_t result)
439 {
440     uint32_t sched_attempted;
441 
442     spm_handle_programmer_errors(result);
443 
444     /* Release scheduler lock and check the record of schedule attempt. */
445     sched_attempted = arch_release_sched_lock();
446 
447     /* Interrupt is masked, PendSV will not happen immediately. */
448     if (result == STATUS_NEED_SCHEDULE ||
449         sched_attempted == SCHEDULER_ATTEMPTED) {
450         arch_attempt_schedule();
451     }
452 
453     return result;
454 }
455 
ipc_schedule(uint32_t exc_return)456 uint64_t ipc_schedule(uint32_t exc_return)
457 {
458     fih_int fih_rc = FIH_FAILURE;
459     FIH_RET_TYPE(bool) fih_bool;
460     AAPCS_DUAL_U32_T ctx_ctrls;
461     struct partition_t *p_part_curr, *p_part_next;
462     struct context_ctrl_t *p_curr_ctx;
463     struct thread_t *pth_next;
464     struct critical_section_t cs = CRITICAL_SECTION_STATIC_INIT;
465 
466     /* Protect concurrent access to current thread/component and thread status */
467     CRITICAL_SECTION_ENTER(cs);
468 
469 #if (CONFIG_TFM_SECURE_THREAD_MASK_NS_INTERRUPT == 1) && defined(CONFIG_TFM_USE_TRUSTZONE)
470     if (__get_BASEPRI() == 0) {
471         /*
472          * If BASEPRI is not set, that means an interrupt was taken when
473          * Non-Secure code was executing, and a scheduling is necessary because
474          * a secure partition become runnable.
475          */
476         SPM_ASSERT(!basepri_set_by_ipc_schedule);
477         basepri_set_by_ipc_schedule = true;
478         __set_BASEPRI(SECURE_THREAD_EXECUTION_PRIORITY);
479     }
480 #endif
481 
482     p_curr_ctx = CURRENT_THREAD->p_context_ctrl;
483 
484     /*
485      * Update SP for current thread, in case tfm_arch_set_context_ret_code have to update R0
486      * in the current thread's saved context.
487      */
488     p_curr_ctx->sp = __get_PSP() -
489         (is_default_stacking_rules_apply(exc_return) ?
490             sizeof(struct tfm_additional_context_t) : 0);
491 
492     pth_next = thrd_next();
493 
494     AAPCS_DUAL_U32_SET(ctx_ctrls, (uint32_t)p_curr_ctx, (uint32_t)p_curr_ctx);
495 
496     p_part_curr = GET_CURRENT_COMPONENT();
497     p_part_next = GET_THRD_OWNER(pth_next);
498 
499     if (pth_next != NULL && p_part_curr != p_part_next) {
500         /* Check if there is enough room on stack to save more context */
501         if ((p_curr_ctx->sp_limit +
502                 sizeof(struct tfm_additional_context_t)) > __get_PSP()) {
503             tfm_core_panic();
504         }
505 
506         /*
507          * If required, let the platform update boundary based on its
508          * implementation. Change privilege, MPU or other configurations.
509          */
510         FIH_CALL(tfm_hal_boundary_need_switch, fih_bool,
511                  p_part_curr->boundary, p_part_next->boundary);
512         if (fih_not_eq(fih_bool, fih_int_encode(false))) {
513             FIH_CALL(tfm_hal_activate_boundary, fih_rc,
514                      p_part_next->p_ldinf, p_part_next->boundary);
515             if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
516                 tfm_core_panic();
517             }
518         }
519         ARCH_FLUSH_FP_CONTEXT();
520 
521 #if (CONFIG_TFM_SECURE_THREAD_MASK_NS_INTERRUPT == 1) && defined(CONFIG_TFM_USE_TRUSTZONE)
522         if (IS_NS_AGENT_TZ(p_part_next->p_ldinf)) {
523             /*
524              * The Non-Secure Agent for TrustZone is going to be scheduled.
525              * A secure partition was scheduled previously, so BASEPRI must be
526              * set to non-zero. However BASEPRI only needs to be reset to 0 if
527              * Non-Secure code execution was interrupted (and not got to secure
528              * execution through a veneer call. Veneers set and unset BASEPRI on
529              * enter and exit). In this case basepri_set_by_ipc_schedule is set,
530              * so it can be used in the condition.
531              */
532             SPM_ASSERT(__get_BASEPRI() == SECURE_THREAD_EXECUTION_PRIORITY);
533             if (basepri_set_by_ipc_schedule) {
534                 basepri_set_by_ipc_schedule = false;
535                 __set_BASEPRI(0);
536             }
537         }
538 #endif
539 
540         AAPCS_DUAL_U32_SET_A1(ctx_ctrls, (uint32_t)pth_next->p_context_ctrl);
541 
542         CURRENT_THREAD = pth_next;
543     }
544 
545     /* Update meta indicator */
546     if (p_part_next->p_metadata == NULL) {
547         tfm_core_panic();
548     }
549     p_partition_metadata = (uintptr_t)(p_part_next->p_metadata);
550 
551     CRITICAL_SECTION_LEAVE(cs);
552 
553     return AAPCS_DUAL_U32_AS_U64(ctx_ctrls);
554 }
555