1 /*
2 * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
3 * Copyright (c) 2021-2022 Cypress Semiconductor Corporation (an Infineon
4 * company) or an affiliate of Cypress Semiconductor Corporation. All rights
5 * reserved.
6 *
7 * SPDX-License-Identifier: BSD-3-Clause
8 *
9 */
10
11 #include <stdint.h>
12 #include "aapcs_local.h"
13 #include "critical_section.h"
14 #include "compiler_ext_defs.h"
15 #include "config_spm.h"
16 #include "runtime_defs.h"
17 #include "ffm/stack_watermark.h"
18 #include "spm_ipc.h"
19 #include "tfm_hal_memory_symbols.h"
20 #include "tfm_hal_isolation.h"
21 #include "tfm_hal_platform.h"
22 #include "tfm_rpc.h"
23 #include "ffm/backend.h"
24 #include "utilities.h"
25 #include "load/partition_defs.h"
26 #include "load/service_defs.h"
27 #include "load/spm_load_api.h"
28 #include "psa/error.h"
29
30 /* Declare the global component list */
31 struct partition_head_t partition_listhead;
32
33 #if CONFIG_TFM_PSA_API_CROSS_CALL == 1
34 /* Instance for SPM_THREAD_CONTEXT */
35
36 #ifdef CONFIG_TFM_USE_TRUSTZONE
37 struct context_ctrl_t *p_spm_thread_context;
38 #else
39 /* If ns_agent_tz isn't used, we need to provide a stack for SPM to use */
40 static uint8_t spm_thread_stack[CONFIG_TFM_SPM_THREAD_STACK_SIZE] __aligned(8);
41 ARCH_CLAIM_CTXCTRL_INSTANCE(spm_thread_context,
42 spm_thread_stack,
43 sizeof(spm_thread_stack));
44
45 struct context_ctrl_t *p_spm_thread_context = &spm_thread_context;
46 #endif
47
48 #endif
49
50 /* Indicator point to the partition meta */
51 uintptr_t *partition_meta_indicator_pos;
52
53 extern uint32_t scheduler_lock;
54
prv_process_metadata(struct partition_t * p_pt)55 static void prv_process_metadata(struct partition_t *p_pt)
56 {
57 const struct partition_load_info_t *p_pt_ldi;
58 const struct service_load_info_t *p_srv_ldi;
59 struct context_ctrl_t *ctx_ctrl;
60 struct runtime_metadata_t *p_rt_meta;
61 service_fn_t *p_sfn_table;
62 uint32_t allocate_size;
63
64 p_pt_ldi = p_pt->p_ldinf;
65 p_srv_ldi = LOAD_INFO_SERVICE(p_pt_ldi);
66 ctx_ctrl = &p_pt->ctx_ctrl;
67
68 /* common runtime metadata */
69 allocate_size = sizeof(*p_rt_meta);
70
71 if (!IS_PARTITION_IPC_MODEL(p_pt_ldi)) {
72 /* SFN specific metadata - SFN function table */
73 allocate_size += sizeof(service_fn_t) * p_pt_ldi->nservices;
74 }
75
76 ARCH_CTXCTRL_ALLOCATE_STACK(ctx_ctrl, allocate_size);
77 p_rt_meta = (struct runtime_metadata_t *)
78 ARCH_CTXCTRL_ALLOCATED_PTR(ctx_ctrl);
79
80 p_rt_meta->entry = p_pt_ldi->entry;
81 p_rt_meta->n_sfn = 0;
82 p_sfn_table = p_rt_meta->sfn_table;
83
84 if (!IS_PARTITION_IPC_MODEL(p_pt_ldi)) {
85 /* SFN table. The signal bit of the service is the same index of SFN. */
86 for (int i = 0; i < p_pt_ldi->nservices; i++) {
87 p_sfn_table[i] = (service_fn_t)p_srv_ldi[i].sfn;
88 }
89
90 p_rt_meta->n_sfn = p_pt_ldi->nservices;
91 }
92
93 p_pt->p_metadata = (void *)p_rt_meta;
94 }
95
96 /*
97 * Send message and wake up the SP who is waiting on message queue, block the
98 * current thread and trigger scheduler.
99 */
backend_messaging(struct service_t * service,struct conn_handle_t * handle)100 psa_status_t backend_messaging(struct service_t *service,
101 struct conn_handle_t *handle)
102 {
103 struct partition_t *p_owner = NULL;
104 psa_signal_t signal = 0;
105 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
106
107 if (!handle || !service || !service->p_ldinf || !service->partition) {
108 return PSA_ERROR_PROGRAMMER_ERROR;
109 }
110
111 p_owner = service->partition;
112 signal = service->p_ldinf->signal;
113
114 CRITICAL_SECTION_ENTER(cs_assert);
115
116 UNI_LIST_INSERT_AFTER(p_owner, handle, p_handles);
117
118 /* Messages put. Update signals */
119 p_owner->signals_asserted |= signal;
120
121 if (p_owner->signals_waiting & signal) {
122 thrd_wake_up(&p_owner->waitobj,
123 (p_owner->signals_asserted & p_owner->signals_waiting));
124 p_owner->signals_waiting &= ~signal;
125 }
126 CRITICAL_SECTION_LEAVE(cs_assert);
127
128 /*
129 * If it is a NS request via RPC, it is unnecessary to block current
130 * thread.
131 */
132
133 if (!is_tfm_rpc_msg(handle)) {
134 thrd_set_wait(&handle->ack_evnt, CURRENT_THREAD);
135 }
136
137 handle->status = TFM_HANDLE_STATUS_ACTIVE;
138
139 return PSA_SUCCESS;
140 }
141
backend_replying(struct conn_handle_t * handle,int32_t status)142 psa_status_t backend_replying(struct conn_handle_t *handle, int32_t status)
143 {
144 if (is_tfm_rpc_msg(handle)) {
145 tfm_rpc_client_call_reply(handle, status);
146 } else {
147 thrd_wake_up(&handle->ack_evnt, status);
148 }
149
150 /*
151 * 'psa_reply' exists in IPC model only and returns 'void'. Return
152 * 'PSA_SUCCESS' here always since SPM does not forward the status
153 * to the caller.
154 */
155 return PSA_SUCCESS;
156 }
157
158 extern void sprt_main(void);
159
160 /* Parameters are treated as assuredly */
backend_init_comp_assuredly(struct partition_t * p_pt,uint32_t service_setting)161 void backend_init_comp_assuredly(struct partition_t *p_pt,
162 uint32_t service_setting)
163 {
164 const struct partition_load_info_t *p_pldi = p_pt->p_ldinf;
165
166 #if CONFIG_TFM_DOORBELL_API == 1
167 p_pt->signals_allowed |= PSA_DOORBELL;
168 #endif /* CONFIG_TFM_DOORBELL_API == 1 */
169
170 p_pt->signals_allowed |= service_setting;
171
172 THRD_SYNC_INIT(&p_pt->waitobj);
173 UNI_LISI_INIT_NODE(p_pt, p_handles);
174
175 ARCH_CTXCTRL_INIT(&p_pt->ctx_ctrl,
176 LOAD_ALLOCED_STACK_ADDR(p_pldi),
177 p_pldi->stack_size);
178
179 watermark_stack(p_pt);
180
181 prv_process_metadata(p_pt);
182
183 THRD_INIT(&p_pt->thrd, &p_pt->ctx_ctrl,
184 TO_THREAD_PRIORITY(PARTITION_PRIORITY(p_pldi->flags)));
185
186 #if (CONFIG_TFM_PSA_API_CROSS_CALL == 1) && defined(CONFIG_TFM_USE_TRUSTZONE)
187 if (IS_PARTITION_NS_AGENT(p_pldi)) {
188 /* Get the context from ns_agent_tz */
189 if (p_pldi->pid == 0) {
190 SPM_THREAD_CONTEXT = &p_pt->ctx_ctrl;
191 }
192 }
193 #endif
194
195 thrd_start(&p_pt->thrd,
196 POSITION_TO_ENTRY(sprt_main, thrd_fn_t),
197 THRD_GENERAL_EXIT);
198 }
199
backend_system_run(void)200 uint32_t backend_system_run(void)
201 {
202 uint32_t control;
203 struct partition_t *p_cur_pt;
204 fih_int fih_rc = FIH_FAILURE;
205
206 #if CONFIG_TFM_PSA_API_CROSS_CALL == 1
207 SPM_ASSERT(SPM_THREAD_CONTEXT);
208 #endif
209
210 partition_meta_indicator_pos = (uintptr_t *)hal_mem_sp_meta_start;
211 control = thrd_start_scheduler(&CURRENT_THREAD);
212
213 p_cur_pt = TO_CONTAINER(CURRENT_THREAD->p_context_ctrl,
214 struct partition_t, ctx_ctrl);
215
216 FIH_CALL(tfm_hal_activate_boundary, fih_rc, p_cur_pt->p_ldinf, p_cur_pt->boundary);
217 if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
218 tfm_core_panic();
219 }
220
221 return control;
222 }
223
backend_wait(struct partition_t * p_pt,psa_signal_t signal_mask)224 psa_signal_t backend_wait(struct partition_t *p_pt, psa_signal_t signal_mask)
225 {
226 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
227 psa_signal_t ret_signal;
228
229 /*
230 * 'backend_wait()' sets the waiting signal mask for partition, and
231 * blocks the partition thread state to wait for signals.
232 * These changes should be inside the ciritical section to avoid
233 * 'signal_waiting' or the thread state to be changed by interrupts
234 * while this function is reading or writing values.
235 */
236 CRITICAL_SECTION_ENTER(cs_assert);
237
238 ret_signal = p_pt->signals_asserted & signal_mask;
239 if (ret_signal == 0) {
240 p_pt->signals_waiting = signal_mask;
241 thrd_set_wait(&p_pt->waitobj, CURRENT_THREAD);
242 }
243 CRITICAL_SECTION_LEAVE(cs_assert);
244
245 return ret_signal;
246 }
247
backend_wake_up(struct partition_t * p_pt)248 void backend_wake_up(struct partition_t *p_pt)
249 {
250 thrd_wake_up(&p_pt->waitobj,
251 p_pt->signals_asserted & p_pt->signals_waiting);
252 p_pt->signals_waiting = 0;
253 }
254
ipc_schedule(void)255 uint64_t ipc_schedule(void)
256 {
257 fih_int fih_rc = FIH_FAILURE;
258 AAPCS_DUAL_U32_T ctx_ctrls;
259 struct partition_t *p_part_curr, *p_part_next;
260 struct context_ctrl_t *p_curr_ctx;
261 struct thread_t *pth_next = thrd_next();
262 struct critical_section_t cs = CRITICAL_SECTION_STATIC_INIT;
263
264 p_curr_ctx = (struct context_ctrl_t *)(CURRENT_THREAD->p_context_ctrl);
265
266 AAPCS_DUAL_U32_SET(ctx_ctrls, (uint32_t)p_curr_ctx, (uint32_t)p_curr_ctx);
267
268 p_part_curr = GET_CURRENT_COMPONENT();
269 p_part_next = GET_THRD_OWNER(pth_next);
270
271 if (scheduler_lock != SCHEDULER_LOCKED && pth_next != NULL &&
272 p_part_curr != p_part_next) {
273 /* Check if there is enough room on stack to save more context */
274 if ((p_curr_ctx->sp_limit +
275 sizeof(struct tfm_additional_context_t)) > __get_PSP()) {
276 tfm_core_panic();
277 }
278
279 CRITICAL_SECTION_ENTER(cs);
280 /*
281 * If required, let the platform update boundary based on its
282 * implementation. Change privilege, MPU or other configurations.
283 */
284 if (tfm_hal_boundary_need_switch(p_part_curr->boundary,
285 p_part_next->boundary)) {
286 FIH_CALL(tfm_hal_activate_boundary, fih_rc,
287 p_part_next->p_ldinf, p_part_next->boundary);
288 if (fih_not_eq(fih_rc, fih_int_encode(TFM_HAL_SUCCESS))) {
289 tfm_core_panic();
290 }
291 }
292 ARCH_FLUSH_FP_CONTEXT();
293
294 AAPCS_DUAL_U32_SET_A1(ctx_ctrls, (uint32_t)pth_next->p_context_ctrl);
295
296 CURRENT_THREAD = pth_next;
297 CRITICAL_SECTION_LEAVE(cs);
298 }
299
300 /* Update meta indicator */
301 if (partition_meta_indicator_pos && (p_part_next->p_metadata)) {
302 *partition_meta_indicator_pos = (uintptr_t)(p_part_next->p_metadata);
303 }
304 return AAPCS_DUAL_U32_AS_U64(ctx_ctrls);
305 }
306