1 /*
2 * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
3 * Copyright (c) 2021-2023 Cypress Semiconductor Corporation (an Infineon
4 * company) or an affiliate of Cypress Semiconductor Corporation. All rights
5 * reserved.
6 *
7 * SPDX-License-Identifier: BSD-3-Clause
8 *
9 */
10
11 #include <stdint.h>
12 #include "aapcs_local.h"
13 #include "async.h"
14 #include "critical_section.h"
15 #include "compiler_ext_defs.h"
16 #include "config_spm.h"
17 #include "ffm/psa_api.h"
18 #include "runtime_defs.h"
19 #include "stack_watermark.h"
20 #include "spm.h"
21 #include "tfm_hal_isolation.h"
22 #include "tfm_hal_platform.h"
23 #include "tfm_rpc.h"
24 #include "ffm/backend.h"
25 #include "utilities.h"
26 #include "memory_symbols.h"
27 #include "load/partition_defs.h"
28 #include "load/service_defs.h"
29 #include "load/spm_load_api.h"
30 #include "psa/error.h"
31 #include "internal_status_code.h"
32
33 /* Declare the global component list */
34 struct partition_head_t partition_listhead;
35
36 #if TFM_ISOLATION_LEVEL > 1
37 extern uintptr_t spm_boundary;
38 #endif
39
40 #ifdef CONFIG_TFM_USE_TRUSTZONE
41 /* Instance for SPM_THREAD_CONTEXT */
42 struct context_ctrl_t *p_spm_thread_context;
43 #else
44 /* If ns_agent_tz isn't used, we need to provide a stack for SPM to use */
45 static uint8_t spm_thread_stack[CONFIG_TFM_SPM_THREAD_STACK_SIZE] __aligned(8);
46 ARCH_CLAIM_CTXCTRL_INSTANCE(spm_thread_context,
47 spm_thread_stack,
48 sizeof(spm_thread_stack));
49
50 struct context_ctrl_t *p_spm_thread_context = &spm_thread_context;
51 #endif
52
53 /* Indicator point to the partition meta */
54 uintptr_t *partition_meta_indicator_pos;
55
56 /*
57 * Query the state of current thread.
58 */
query_state(struct thread_t * p_thrd,uint32_t * p_retval)59 static uint32_t query_state(struct thread_t *p_thrd, uint32_t *p_retval)
60 {
61 struct critical_section_t cs_signal = CRITICAL_SECTION_STATIC_INIT;
62 struct partition_t *p_pt = NULL;
63 uint32_t state = p_thrd->state;
64 psa_signal_t retval_signals = 0;
65
66 /* Get current partition of thread. */
67 p_pt = TO_CONTAINER(p_thrd->p_context_ctrl,
68 struct partition_t, ctx_ctrl);
69
70 CRITICAL_SECTION_ENTER(cs_signal);
71
72 retval_signals = p_pt->signals_waiting & p_pt->signals_asserted;
73
74 if (retval_signals) {
75 /*
76 * Signal "ASYNC_MSG_REPLY" can only be waited in one of cases below:
77 *
78 * - A FF-M Secure Partition is calling the Client API and
79 * expecting a replied "handle/status" from RoT Services.
80 * FF-M Secure Partitions cannot use 'psa_wait' to wait
81 * on this signal because the signal is not set in FF-M
82 * Secure Partitions' "signals_allowed".
83 *
84 * - A Mailbox NS Agent is calling "psa_wait" with a pattern
85 * containing "ASYNC_MSG_REPLY". The signal is set in
86 * Mailbox NS Agents' "signals_allowed".
87 *
88 * Here uses "signals_allowed" to check if the calling target is a
89 * FF-M Secure Partition or a Mailbox NS Agent.
90 */
91 if ((retval_signals == ASYNC_MSG_REPLY) &&
92 ((p_pt->signals_allowed & ASYNC_MSG_REPLY) != ASYNC_MSG_REPLY)) {
93 p_pt->signals_asserted &= ~ASYNC_MSG_REPLY;
94 *p_retval = (uint32_t)p_pt->reply_value;
95 } else {
96 *p_retval = retval_signals;
97 }
98
99 /* Clear 'signals_waiting' to indicate the component is not waiting. */
100 p_pt->signals_waiting = 0;
101 state = THRD_STATE_RET_VAL_AVAIL;
102 } else if (p_pt->signals_waiting != 0) {
103 /*
104 * If the thread is waiting some signals but none of them is asserted,
105 * block the thread.
106 */
107 state = THRD_STATE_BLOCK;
108 }
109
110 CRITICAL_SECTION_LEAVE(cs_signal);
111 return state;
112 }
113
114 extern struct psa_api_tbl_t psa_api_thread_fn_call;
115 extern struct psa_api_tbl_t psa_api_svc;
116
prv_process_metadata(struct partition_t * p_pt)117 static void prv_process_metadata(struct partition_t *p_pt)
118 {
119 const struct partition_load_info_t *p_pt_ldi;
120 const struct service_load_info_t *p_srv_ldi;
121 struct context_ctrl_t *ctx_ctrl;
122 struct runtime_metadata_t *p_rt_meta;
123 service_fn_t *p_sfn_table;
124 uint32_t allocate_size;
125
126 p_pt_ldi = p_pt->p_ldinf;
127 p_srv_ldi = LOAD_INFO_SERVICE(p_pt_ldi);
128 ctx_ctrl = &p_pt->ctx_ctrl;
129
130 /* common runtime metadata */
131 allocate_size = sizeof(*p_rt_meta);
132
133 if (!IS_IPC_MODEL(p_pt_ldi)) {
134 /* SFN specific metadata - SFN function table */
135 allocate_size += sizeof(service_fn_t) * p_pt_ldi->nservices;
136 }
137
138 ARCH_CTXCTRL_ALLOCATE_STACK(ctx_ctrl, allocate_size);
139 p_rt_meta = (struct runtime_metadata_t *)
140 ARCH_CTXCTRL_ALLOCATED_PTR(ctx_ctrl);
141
142 #if TFM_ISOLATION_LEVEL == 1
143 p_rt_meta->psa_fns = &psa_api_thread_fn_call;
144 #else
145 if (tfm_hal_boundary_need_switch(spm_boundary, p_pt->boundary)) {
146 p_rt_meta->psa_fns = &psa_api_svc;
147 } else {
148 p_rt_meta->psa_fns = &psa_api_thread_fn_call;
149 }
150 #endif
151
152 p_rt_meta->entry = p_pt_ldi->entry;
153 p_rt_meta->n_sfn = 0;
154 p_sfn_table = p_rt_meta->sfn_table;
155
156 if (!IS_IPC_MODEL(p_pt_ldi)) {
157 /* SFN table. The signal bit of the service is the same index of SFN. */
158 for (int i = 0; i < p_pt_ldi->nservices; i++) {
159 p_sfn_table[i] = (service_fn_t)p_srv_ldi[i].sfn;
160 }
161
162 p_rt_meta->n_sfn = p_pt_ldi->nservices;
163 }
164
165 p_pt->p_metadata = (void *)p_rt_meta;
166 }
167
168 /*
169 * Send message and wake up the SP who is waiting on message queue, block the
170 * current thread and trigger scheduler.
171 */
backend_messaging(struct connection_t * p_connection)172 psa_status_t backend_messaging(struct connection_t *p_connection)
173 {
174 struct partition_t *p_owner = NULL;
175 psa_signal_t signal = 0;
176 psa_status_t ret = PSA_SUCCESS;
177
178 if (!p_connection || !p_connection->service ||
179 !p_connection->service->p_ldinf ||
180 !p_connection->service->partition) {
181 return PSA_ERROR_PROGRAMMER_ERROR;
182 }
183
184 p_owner = p_connection->service->partition;
185 signal = p_connection->service->p_ldinf->signal;
186
187 UNI_LIST_INSERT_AFTER(p_owner, p_connection, p_handles);
188
189 /* Messages put. Update signals */
190 ret = backend_assert_signal(p_owner, signal);
191
192 /*
193 * If it is a NS request via RPC, it is unnecessary to block current
194 * thread.
195 */
196 if (is_tfm_rpc_msg(p_connection)) {
197 ret = PSA_SUCCESS;
198 } else {
199 signal = backend_wait_signals(p_connection->p_client, ASYNC_MSG_REPLY);
200 if (signal == (psa_signal_t)0) {
201 ret = STATUS_NEED_SCHEDULE;
202 }
203 }
204
205 p_connection->status = TFM_HANDLE_STATUS_ACTIVE;
206
207 return ret;
208 }
209
backend_replying(struct connection_t * handle,int32_t status)210 psa_status_t backend_replying(struct connection_t *handle, int32_t status)
211 {
212 struct partition_t *client = handle->p_client;
213
214 if (is_tfm_rpc_msg(handle)) {
215 /*
216 * Add to the list of outstanding responses.
217 * Note that we use the partition's p_handles pointer.
218 * This assumes that partitions using the agent API will process all requests
219 * asynchronously and will not also provide services of their own.
220 */
221 handle->reply_value = (uintptr_t)status;
222 handle->msg.rhandle = handle;
223 UNI_LIST_INSERT_AFTER(client, handle, p_handles);
224 return backend_assert_signal(handle->p_client, ASYNC_MSG_REPLY);
225 } else {
226 handle->p_client->reply_value = (uintptr_t)status;
227 return backend_assert_signal(handle->p_client, ASYNC_MSG_REPLY);
228 }
229 }
230
231 extern void common_sfn_thread(void *param);
232
partition_init(struct partition_t * p_pt,uint32_t service_setting,uint32_t * param)233 static thrd_fn_t partition_init(struct partition_t *p_pt,
234 uint32_t service_setting, uint32_t *param)
235 {
236 thrd_fn_t thrd_entry;
237
238 (void)param;
239 SPM_ASSERT(p_pt);
240
241 #if CONFIG_TFM_DOORBELL_API == 1
242 p_pt->signals_allowed |= PSA_DOORBELL;
243 #endif /* CONFIG_TFM_DOORBELL_API == 1 */
244
245 p_pt->signals_allowed |= service_setting;
246
247 /* Allow 'ASYNC_MSG_REPLY' for Mailbox NS Agent. */
248 if (IS_NS_AGENT_MAILBOX(p_pt->p_ldinf)) {
249 p_pt->signals_allowed |= ASYNC_MSG_REPLY;
250 }
251
252 UNI_LISI_INIT_NODE(p_pt, p_handles);
253
254 if (IS_IPC_MODEL(p_pt->p_ldinf)) {
255 /* IPC Partition */
256 thrd_entry = POSITION_TO_ENTRY(p_pt->p_ldinf->entry, thrd_fn_t);
257 } else {
258 /* SFN Partition */
259 thrd_entry = (thrd_fn_t)common_sfn_thread;
260 }
261 return thrd_entry;
262 }
263
264 #ifdef CONFIG_TFM_USE_TRUSTZONE
ns_agent_tz_init(struct partition_t * p_pt,uint32_t service_setting,uint32_t * param)265 static thrd_fn_t ns_agent_tz_init(struct partition_t *p_pt,
266 uint32_t service_setting, uint32_t *param)
267 {
268 thrd_fn_t thrd_entry;
269
270 (void)service_setting;
271 SPM_ASSERT(p_pt);
272 SPM_ASSERT(param);
273
274 /* Get the context from ns_agent_tz */
275 SPM_THREAD_CONTEXT = &p_pt->ctx_ctrl;
276
277 thrd_entry = POSITION_TO_ENTRY(p_pt->p_ldinf->entry, thrd_fn_t);
278
279 /* NS agent TZ expects NSPE entry point as the parameter */
280 *param = tfm_hal_get_ns_entry_point();
281 return thrd_entry;
282 }
283 #else
ns_agent_tz_init(struct partition_t * p_pt,uint32_t service_setting,uint32_t * param)284 static thrd_fn_t ns_agent_tz_init(struct partition_t *p_pt,
285 uint32_t service_setting, uint32_t *param)
286 {
287 (void)p_pt;
288 (void)service_setting;
289 (void)param;
290 }
291 #endif
292
293 typedef thrd_fn_t (*comp_init_fn_t)(struct partition_t *, uint32_t, uint32_t *);
294 comp_init_fn_t comp_init_fns[] = {partition_init, ns_agent_tz_init};
295
296 /* Parameters are treated as assuredly */
backend_init_comp_assuredly(struct partition_t * p_pt,uint32_t service_setting)297 void backend_init_comp_assuredly(struct partition_t *p_pt, uint32_t service_setting)
298 {
299 const struct partition_load_info_t *p_pldi = p_pt->p_ldinf;
300 thrd_fn_t thrd_entry;
301 uint32_t param;
302 int32_t index = PARTITION_TYPE_TO_INDEX(p_pldi->flags);
303
304 ARCH_CTXCTRL_INIT(&p_pt->ctx_ctrl,
305 LOAD_ALLOCED_STACK_ADDR(p_pldi),
306 p_pldi->stack_size);
307
308 watermark_stack(p_pt);
309
310 THRD_INIT(&p_pt->thrd, &p_pt->ctx_ctrl,
311 TO_THREAD_PRIORITY(PARTITION_PRIORITY(p_pldi->flags)));
312
313 thrd_entry = (comp_init_fns[index])(p_pt, service_setting, ¶m);
314
315 prv_process_metadata(p_pt);
316
317 thrd_start(&p_pt->thrd, thrd_entry, THRD_GENERAL_EXIT, (void *)param);
318 }
319
backend_system_run(void)320 uint32_t backend_system_run(void)
321 {
322 uint32_t control;
323 struct partition_t *p_cur_pt;
324 fih_int fih_rc = FIH_FAILURE;
325
326 SPM_ASSERT(SPM_THREAD_CONTEXT);
327
328 #ifndef CONFIG_TFM_USE_TRUSTZONE
329 /*
330 * TZ NS Agent is mandatory when Trustzone is enabled. SPM borrows its
331 * stack to improve the stack usage efficiency.
332 * Hence SPM needs to have a dedicated stack when Trustzone is not enabled,
333 * and this stack needs to be sealed before upcoming usage.
334 */
335 ARCH_CTXCTRL_ALLOCATE_STACK(SPM_THREAD_CONTEXT, sizeof(uint64_t));
336 arch_seal_thread_stack(ARCH_CTXCTRL_ALLOCATED_PTR(SPM_THREAD_CONTEXT));
337 #endif
338
339 /* Init thread callback function. */
340 thrd_set_query_callback(query_state);
341
342 partition_meta_indicator_pos = (uintptr_t *)PART_LOCAL_STORAGE_PTR_POS;
343 control = thrd_start_scheduler(&CURRENT_THREAD);
344
345 p_cur_pt = TO_CONTAINER(CURRENT_THREAD->p_context_ctrl,
346 struct partition_t, ctx_ctrl);
347
348 FIH_CALL(tfm_hal_activate_boundary, fih_rc, p_cur_pt->p_ldinf, p_cur_pt->boundary);
349 if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
350 tfm_core_panic();
351 }
352
353 return control;
354 }
355
backend_wait_signals(struct partition_t * p_pt,psa_signal_t signals)356 psa_signal_t backend_wait_signals(struct partition_t *p_pt, psa_signal_t signals)
357 {
358 struct critical_section_t cs_signal = CRITICAL_SECTION_STATIC_INIT;
359 psa_signal_t ret;
360
361 if (!p_pt) {
362 tfm_core_panic();
363 }
364
365 CRITICAL_SECTION_ENTER(cs_signal);
366
367 ret = p_pt->signals_asserted & signals;
368 if (ret == (psa_signal_t)0) {
369 p_pt->signals_waiting = signals;
370 }
371
372 CRITICAL_SECTION_LEAVE(cs_signal);
373
374 return ret;
375 }
376
backend_assert_signal(struct partition_t * p_pt,psa_signal_t signal)377 psa_status_t backend_assert_signal(struct partition_t *p_pt, psa_signal_t signal)
378 {
379 struct critical_section_t cs_signal = CRITICAL_SECTION_STATIC_INIT;
380 psa_status_t ret = PSA_SUCCESS;
381
382 if (!p_pt) {
383 tfm_core_panic();
384 }
385
386 CRITICAL_SECTION_ENTER(cs_signal);
387 p_pt->signals_asserted |= signal;
388
389 if (p_pt->signals_asserted & p_pt->signals_waiting) {
390 ret = STATUS_NEED_SCHEDULE;
391 }
392 CRITICAL_SECTION_LEAVE(cs_signal);
393
394 return ret;
395 }
396
backend_abi_entering_spm(void)397 uint64_t backend_abi_entering_spm(void)
398 {
399 struct partition_t *caller = GET_CURRENT_COMPONENT();
400 uint32_t sp = 0;
401 uint32_t sp_limit = 0;
402 AAPCS_DUAL_U32_T spm_stack_info;
403
404 #if TFM_ISOLATION_LEVEL == 1
405 /* PSA APIs must be called from Thread mode */
406 if (__get_active_exc_num() != EXC_NUM_THREAD_MODE) {
407 tfm_core_panic();
408 }
409 #endif
410
411 /*
412 * Check if caller stack is within SPM stack. If not, then stack needs to
413 * switch. Otherwise, return zeros.
414 */
415 if ((caller->ctx_ctrl.sp <= SPM_THREAD_CONTEXT->sp_limit) ||
416 (caller->ctx_ctrl.sp > SPM_THREAD_CONTEXT->sp_base)) {
417 sp = SPM_THREAD_CONTEXT->sp;
418 sp_limit = SPM_THREAD_CONTEXT->sp_limit;
419 }
420
421 AAPCS_DUAL_U32_SET(spm_stack_info, sp, sp_limit);
422
423 arch_acquire_sched_lock();
424
425 return AAPCS_DUAL_U32_AS_U64(spm_stack_info);
426 }
427
backend_abi_leaving_spm(uint32_t result)428 uint32_t backend_abi_leaving_spm(uint32_t result)
429 {
430 uint32_t sched_attempted;
431
432 spm_handle_programmer_errors(result);
433
434 /* Release scheduler lock and check the record of schedule attempt. */
435 sched_attempted = arch_release_sched_lock();
436
437 /* Interrupt is masked, PendSV will not happen immediately. */
438 if (result == STATUS_NEED_SCHEDULE ||
439 sched_attempted == SCHEDULER_ATTEMPTED) {
440 arch_attempt_schedule();
441 }
442
443 return result;
444 }
445
ipc_schedule(void)446 uint64_t ipc_schedule(void)
447 {
448 fih_int fih_rc = FIH_FAILURE;
449 AAPCS_DUAL_U32_T ctx_ctrls;
450 struct partition_t *p_part_curr, *p_part_next;
451 struct context_ctrl_t *p_curr_ctx;
452 struct thread_t *pth_next;
453 struct critical_section_t cs = CRITICAL_SECTION_STATIC_INIT;
454
455 /* Protect concurrent access to current thread/component and thread status */
456 CRITICAL_SECTION_ENTER(cs);
457
458 pth_next = thrd_next();
459 p_curr_ctx = (struct context_ctrl_t *)(CURRENT_THREAD->p_context_ctrl);
460
461 AAPCS_DUAL_U32_SET(ctx_ctrls, (uint32_t)p_curr_ctx, (uint32_t)p_curr_ctx);
462
463 p_part_curr = GET_CURRENT_COMPONENT();
464 p_part_next = GET_THRD_OWNER(pth_next);
465
466 if (pth_next != NULL && p_part_curr != p_part_next) {
467 /* Check if there is enough room on stack to save more context */
468 if ((p_curr_ctx->sp_limit +
469 sizeof(struct tfm_additional_context_t)) > __get_PSP()) {
470 tfm_core_panic();
471 }
472
473 /*
474 * If required, let the platform update boundary based on its
475 * implementation. Change privilege, MPU or other configurations.
476 */
477 if (tfm_hal_boundary_need_switch(p_part_curr->boundary,
478 p_part_next->boundary)) {
479 FIH_CALL(tfm_hal_activate_boundary, fih_rc,
480 p_part_next->p_ldinf, p_part_next->boundary);
481 if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
482 tfm_core_panic();
483 }
484 }
485 ARCH_FLUSH_FP_CONTEXT();
486
487 AAPCS_DUAL_U32_SET_A1(ctx_ctrls, (uint32_t)pth_next->p_context_ctrl);
488
489 CURRENT_THREAD = pth_next;
490 }
491
492 /* Update meta indicator */
493 if (partition_meta_indicator_pos && (p_part_next->p_metadata)) {
494 *partition_meta_indicator_pos = (uintptr_t)(p_part_next->p_metadata);
495 }
496 CRITICAL_SECTION_LEAVE(cs);
497
498 return AAPCS_DUAL_U32_AS_U64(ctx_ctrls);
499 }
500