1 /*
2 * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
3 * Copyright (c) 2021-2023 Cypress Semiconductor Corporation (an Infineon
4 * company) or an affiliate of Cypress Semiconductor Corporation. All rights
5 * reserved.
6 *
7 * SPDX-License-Identifier: BSD-3-Clause
8 *
9 */
10
11 #include <stdint.h>
12 #include "aapcs_local.h"
13 #include "critical_section.h"
14 #include "compiler_ext_defs.h"
15 #include "config_spm.h"
16 #include "runtime_defs.h"
17 #include "ffm/stack_watermark.h"
18 #include "spm.h"
19 #include "tfm_hal_isolation.h"
20 #include "tfm_hal_platform.h"
21 #include "tfm_rpc.h"
22 #include "ffm/backend.h"
23 #include "utilities.h"
24 #include "cmsis_psa/memory_symbols.h"
25 #include "load/partition_defs.h"
26 #include "load/service_defs.h"
27 #include "load/spm_load_api.h"
28 #include "psa/error.h"
29
30 /* Declare the global component list */
31 struct partition_head_t partition_listhead;
32
33 #if CONFIG_TFM_PSA_API_CROSS_CALL == 1
34 /* Instance for SPM_THREAD_CONTEXT */
35
36 #ifdef CONFIG_TFM_USE_TRUSTZONE
37 struct context_ctrl_t *p_spm_thread_context;
38 #else
39 /* If ns_agent_tz isn't used, we need to provide a stack for SPM to use */
40 static uint8_t spm_thread_stack[CONFIG_TFM_SPM_THREAD_STACK_SIZE] __aligned(8);
41 ARCH_CLAIM_CTXCTRL_INSTANCE(spm_thread_context,
42 spm_thread_stack,
43 sizeof(spm_thread_stack));
44
45 struct context_ctrl_t *p_spm_thread_context = &spm_thread_context;
46 #endif
47
48 #endif
49
50 /* Indicator point to the partition meta */
51 uintptr_t *partition_meta_indicator_pos;
52
53 extern uint32_t scheduler_lock;
54
55 /*
56 * Query the state of current thread.
57 */
query_state(struct thread_t * p_thrd,uint32_t * p_retval)58 static uint32_t query_state(struct thread_t *p_thrd, uint32_t *p_retval)
59 {
60 struct critical_section_t cs_signal = CRITICAL_SECTION_STATIC_INIT;
61 struct partition_t *p_pt = NULL;
62 uint32_t state = p_thrd->state;
63 psa_signal_t signal_ret = 0;
64
65 /* Get current partition of thread. */
66 p_pt = TO_CONTAINER(p_thrd->p_context_ctrl,
67 struct partition_t, ctx_ctrl);
68
69 CRITICAL_SECTION_ENTER(cs_signal);
70
71 signal_ret = p_pt->signals_waiting & p_pt->signals_asserted;
72
73 if (signal_ret) {
74 /*
75 * If the partition is waiting some signals and any of them is asserted,
76 * change thread to be THRD_STATE_RET_VAL_AVAIL and fill the retval. If
77 * the waiting signal is TFM_IPC_REPLY_SIGNAL, it means the Secure
78 * Partition is waiting for the services to be fulfilled, then the
79 * return value comes from the backend_replying() by the server
80 * Partition. For other waiting signals by psa_wait(), the return value
81 * is just the signal.
82 */
83 if (signal_ret == TFM_IPC_REPLY_SIGNAL) {
84 p_pt->signals_asserted &= ~TFM_IPC_REPLY_SIGNAL;
85 *p_retval = (uint32_t)p_pt->reply_value;
86 } else {
87 *p_retval = signal_ret;
88 }
89
90 p_pt->signals_waiting = 0;
91 state = THRD_STATE_RET_VAL_AVAIL;
92 } else if (p_pt->signals_waiting != 0) {
93 /*
94 * If the thread is waiting some signals but none of them is asserted,
95 * block the thread.
96 */
97 state = THRD_STATE_BLOCK;
98 }
99
100 CRITICAL_SECTION_LEAVE(cs_signal);
101 return state;
102 }
103
104 extern struct psa_api_tbl_t psa_api_cross;
105 extern struct psa_api_tbl_t psa_api_svc;
106
prv_process_metadata(struct partition_t * p_pt)107 static void prv_process_metadata(struct partition_t *p_pt)
108 {
109 const struct partition_load_info_t *p_pt_ldi;
110 const struct service_load_info_t *p_srv_ldi;
111 struct context_ctrl_t *ctx_ctrl;
112 struct runtime_metadata_t *p_rt_meta;
113 service_fn_t *p_sfn_table;
114 uint32_t allocate_size;
115
116 p_pt_ldi = p_pt->p_ldinf;
117 p_srv_ldi = LOAD_INFO_SERVICE(p_pt_ldi);
118 ctx_ctrl = &p_pt->ctx_ctrl;
119
120 /* common runtime metadata */
121 allocate_size = sizeof(*p_rt_meta);
122
123 if (!IS_IPC_MODEL(p_pt_ldi)) {
124 /* SFN specific metadata - SFN function table */
125 allocate_size += sizeof(service_fn_t) * p_pt_ldi->nservices;
126 }
127
128 ARCH_CTXCTRL_ALLOCATE_STACK(ctx_ctrl, allocate_size);
129 p_rt_meta = (struct runtime_metadata_t *)
130 ARCH_CTXCTRL_ALLOCATED_PTR(ctx_ctrl);
131
132 p_rt_meta->entry = p_pt_ldi->entry;
133 #if TFM_LVL == 1
134 p_rt_meta->psa_fns = &psa_api_cross;
135 #else
136 /* TODO: ABI for PRoT partitions needs to be updated based on implementations. */
137 p_rt_meta->psa_fns = &psa_api_svc;
138 #endif
139 p_rt_meta->n_sfn = 0;
140 p_sfn_table = p_rt_meta->sfn_table;
141
142 if (!IS_IPC_MODEL(p_pt_ldi)) {
143 /* SFN table. The signal bit of the service is the same index of SFN. */
144 for (int i = 0; i < p_pt_ldi->nservices; i++) {
145 p_sfn_table[i] = (service_fn_t)p_srv_ldi[i].sfn;
146 }
147
148 p_rt_meta->n_sfn = p_pt_ldi->nservices;
149 }
150
151 p_pt->p_metadata = (void *)p_rt_meta;
152 }
153
154 /*
155 * Send message and wake up the SP who is waiting on message queue, block the
156 * current thread and trigger scheduler.
157 */
backend_messaging(struct service_t * service,struct connection_t * handle)158 psa_status_t backend_messaging(struct service_t *service,
159 struct connection_t *handle)
160 {
161 struct partition_t *p_owner = NULL;
162 psa_signal_t signal = 0;
163
164 if (!handle || !service || !service->p_ldinf || !service->partition) {
165 return PSA_ERROR_PROGRAMMER_ERROR;
166 }
167
168 p_owner = service->partition;
169 signal = service->p_ldinf->signal;
170
171 UNI_LIST_INSERT_AFTER(p_owner, handle, p_handles);
172
173 /* Messages put. Update signals */
174 backend_assert_signal(p_owner, signal);
175
176 /*
177 * If it is a NS request via RPC, it is unnecessary to block current
178 * thread.
179 */
180
181 if (!is_tfm_rpc_msg(handle)) {
182 backend_wait_signals(handle->p_client, TFM_IPC_REPLY_SIGNAL);
183 }
184
185 handle->status = TFM_HANDLE_STATUS_ACTIVE;
186
187 return PSA_SUCCESS;
188 }
189
backend_replying(struct connection_t * handle,int32_t status)190 psa_status_t backend_replying(struct connection_t *handle, int32_t status)
191 {
192 if (is_tfm_rpc_msg(handle)) {
193 tfm_rpc_client_call_reply(handle, status);
194 } else {
195 handle->p_client->reply_value = (uintptr_t)status;
196 backend_assert_signal(handle->p_client, TFM_IPC_REPLY_SIGNAL);
197 }
198
199 /*
200 * 'psa_reply' exists in IPC model only and returns 'void'. Return
201 * 'PSA_SUCCESS' here always since SPM does not forward the status
202 * to the caller.
203 */
204 return PSA_SUCCESS;
205 }
206
207 extern void common_sfn_thread(void *param);
208
209 /* Parameters are treated as assuredly */
backend_init_comp_assuredly(struct partition_t * p_pt,uint32_t service_setting)210 void backend_init_comp_assuredly(struct partition_t *p_pt,
211 uint32_t service_setting)
212 {
213 const struct partition_load_info_t *p_pldi = p_pt->p_ldinf;
214 thrd_fn_t thrd_entry;
215 void *param = NULL;
216
217 #if CONFIG_TFM_DOORBELL_API == 1
218 p_pt->signals_allowed |= PSA_DOORBELL;
219 #endif /* CONFIG_TFM_DOORBELL_API == 1 */
220
221 p_pt->signals_allowed |= service_setting;
222
223 UNI_LISI_INIT_NODE(p_pt, p_handles);
224
225 ARCH_CTXCTRL_INIT(&p_pt->ctx_ctrl,
226 LOAD_ALLOCED_STACK_ADDR(p_pldi),
227 p_pldi->stack_size);
228
229 watermark_stack(p_pt);
230
231 prv_process_metadata(p_pt);
232
233 THRD_INIT(&p_pt->thrd, &p_pt->ctx_ctrl,
234 TO_THREAD_PRIORITY(PARTITION_PRIORITY(p_pldi->flags)));
235
236 #if (CONFIG_TFM_PSA_API_CROSS_CALL == 1)
237 if (IS_NS_AGENT_TZ(p_pldi)) {
238 /* Get the context from ns_agent_tz */
239 SPM_THREAD_CONTEXT = &p_pt->ctx_ctrl;
240 }
241 #endif
242
243 if (IS_IPC_MODEL(p_pldi)) {
244 /* IPC Partition */
245 thrd_entry = POSITION_TO_ENTRY(p_pldi->entry, thrd_fn_t);
246 } else {
247 /* SFN Partition */
248 thrd_entry = POSITION_TO_ENTRY(common_sfn_thread, thrd_fn_t);
249 }
250
251 if (IS_NS_AGENT_TZ(p_pldi)) {
252 /* NS agent TZ expects NSPE entry point as the parameter */
253 param = (void *)tfm_hal_get_ns_entry_point();
254 }
255
256 thrd_start(&p_pt->thrd,
257 thrd_entry,
258 THRD_GENERAL_EXIT,
259 param);
260 }
261
backend_system_run(void)262 uint32_t backend_system_run(void)
263 {
264 uint32_t control;
265 struct partition_t *p_cur_pt;
266 fih_int fih_rc = FIH_FAILURE;
267
268 #if CONFIG_TFM_PSA_API_CROSS_CALL == 1
269 SPM_ASSERT(SPM_THREAD_CONTEXT);
270 #endif
271
272 /* Init thread callback function. */
273 thrd_set_query_callback(query_state);
274
275 partition_meta_indicator_pos = (uintptr_t *)PART_LOCAL_STORAGE_PTR_POS;
276 control = thrd_start_scheduler(&CURRENT_THREAD);
277
278 p_cur_pt = TO_CONTAINER(CURRENT_THREAD->p_context_ctrl,
279 struct partition_t, ctx_ctrl);
280
281 FIH_CALL(tfm_hal_activate_boundary, fih_rc, p_cur_pt->p_ldinf, p_cur_pt->boundary);
282 if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
283 tfm_core_panic();
284 }
285
286 return control;
287 }
288
backend_wait_signals(struct partition_t * p_pt,psa_signal_t signals)289 psa_signal_t backend_wait_signals(struct partition_t *p_pt, psa_signal_t signals)
290 {
291 struct critical_section_t cs_signal = CRITICAL_SECTION_STATIC_INIT;
292 psa_signal_t ret_signal;
293
294 if (!p_pt) {
295 tfm_core_panic();
296 }
297
298 CRITICAL_SECTION_ENTER(cs_signal);
299
300 ret_signal = p_pt->signals_asserted & signals;
301 if (ret_signal == 0) {
302 p_pt->signals_waiting = signals;
303 }
304
305 CRITICAL_SECTION_LEAVE(cs_signal);
306
307 return ret_signal;
308 }
309
backend_assert_signal(struct partition_t * p_pt,psa_signal_t signal)310 uint32_t backend_assert_signal(struct partition_t *p_pt, psa_signal_t signal)
311 {
312 struct critical_section_t cs_signal = CRITICAL_SECTION_STATIC_INIT;
313
314 if (!p_pt) {
315 tfm_core_panic();
316 }
317
318 CRITICAL_SECTION_ENTER(cs_signal);
319 p_pt->signals_asserted |= signal;
320 CRITICAL_SECTION_LEAVE(cs_signal);
321
322 return PSA_SUCCESS;
323 }
324
ipc_schedule(void)325 uint64_t ipc_schedule(void)
326 {
327 fih_int fih_rc = FIH_FAILURE;
328 AAPCS_DUAL_U32_T ctx_ctrls;
329 struct partition_t *p_part_curr, *p_part_next;
330 struct context_ctrl_t *p_curr_ctx;
331 struct thread_t *pth_next = thrd_next();
332 struct critical_section_t cs = CRITICAL_SECTION_STATIC_INIT;
333
334 p_curr_ctx = (struct context_ctrl_t *)(CURRENT_THREAD->p_context_ctrl);
335
336 AAPCS_DUAL_U32_SET(ctx_ctrls, (uint32_t)p_curr_ctx, (uint32_t)p_curr_ctx);
337
338 p_part_curr = GET_CURRENT_COMPONENT();
339 p_part_next = GET_THRD_OWNER(pth_next);
340
341 if (scheduler_lock != SCHEDULER_LOCKED && pth_next != NULL &&
342 p_part_curr != p_part_next) {
343 /* Check if there is enough room on stack to save more context */
344 if ((p_curr_ctx->sp_limit +
345 sizeof(struct tfm_additional_context_t)) > __get_PSP()) {
346 tfm_core_panic();
347 }
348
349 CRITICAL_SECTION_ENTER(cs);
350 /*
351 * If required, let the platform update boundary based on its
352 * implementation. Change privilege, MPU or other configurations.
353 */
354 if (tfm_hal_boundary_need_switch(p_part_curr->boundary,
355 p_part_next->boundary)) {
356 FIH_CALL(tfm_hal_activate_boundary, fih_rc,
357 p_part_next->p_ldinf, p_part_next->boundary);
358 if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
359 tfm_core_panic();
360 }
361 }
362 ARCH_FLUSH_FP_CONTEXT();
363
364 AAPCS_DUAL_U32_SET_A1(ctx_ctrls, (uint32_t)pth_next->p_context_ctrl);
365
366 CURRENT_THREAD = pth_next;
367 CRITICAL_SECTION_LEAVE(cs);
368 }
369
370 /* Update meta indicator */
371 if (partition_meta_indicator_pos && (p_part_next->p_metadata)) {
372 *partition_meta_indicator_pos = (uintptr_t)(p_part_next->p_metadata);
373 }
374 return AAPCS_DUAL_U32_AS_U64(ctx_ctrls);
375 }
376