1 /*
2 * Copyright (c) 2018-2024, Arm Limited. All rights reserved.
3 * Copyright (c) 2022 Cypress Semiconductor Corporation (an Infineon
4 * company) or an affiliate of Cypress Semiconductor Corporation. All rights
5 * reserved.
6 *
7 * SPDX-License-Identifier: BSD-3-Clause
8 *
9 */
10 #ifndef __TFM_ARCH_H__
11 #define __TFM_ARCH_H__
12
13 /* This header file collects the architecture related operations. */
14
15 #include <stdbool.h>
16 #include <stddef.h>
17 #include <inttypes.h>
18 #include "fih.h"
19 #include "tfm_hal_device_header.h"
20 #include "cmsis_compiler.h"
21
22 #if defined(__ARM_ARCH_8_1M_MAIN__) || \
23 defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__)
24 #include "tfm_arch_v8m.h"
25 #elif defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_7M__) || \
26 defined(__ARM_ARCH_7EM__)
27 #include "tfm_arch_v6m_v7m.h"
28 #else
29 #error "Unsupported ARM Architecture."
30 #endif
31
32 #define SCHEDULER_ATTEMPTED 2 /* Schedule attempt when scheduler is locked. */
33 #define SCHEDULER_LOCKED 1
34 #define SCHEDULER_UNLOCKED 0
35
36 #define XPSR_T32 0x01000000
37
38 /* Define IRQ level */
39 #if defined(__ARM_ARCH_8_1M_MAIN__) || defined(__ARM_ARCH_8M_MAIN__)
40 #define SecureFault_IRQnLVL (0)
41 #define MemoryManagement_IRQnLVL (0)
42 #define BusFault_IRQnLVL (0)
43 #define SVCall_IRQnLVL (0)
44 #elif defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
45 #define MemoryManagement_IRQnLVL (0)
46 #define BusFault_IRQnLVL (0)
47 #define SVCall_IRQnLVL (0)
48 #elif defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_8M_BASE__)
49 #define SVCall_IRQnLVL (0)
50 #else
51 #error "Unsupported ARM Architecture."
52 #endif
53
54
55 /* The lowest secure interrupt priority */
56 #ifdef CONFIG_TFM_USE_TRUSTZONE
57 /* IMPORTANT NOTE:
58 *
59 * Although the priority of the secure PendSV must be the lowest possible
60 * among other interrupts in the Secure state, it must be ensured that
61 * PendSV is not preempted nor masked by Non-Secure interrupts to ensure
62 * the integrity of the Secure operation.
63 * When AIRCR.PRIS is set, the Non-Secure execution can act on
64 * FAULTMASK_NS, PRIMASK_NS or BASEPRI_NS register to boost its priority
65 * number up to the value 0x80.
66 * For this reason, set the priority of the PendSV interrupt to the next
67 * priority level configurable on the platform, just below 0x80.
68 */
69 #define PENDSV_PRIO_FOR_SCHED ((1 << (__NVIC_PRIO_BITS - 1)) - 1)
70
71 #if CONFIG_TFM_SECURE_THREAD_MASK_NS_INTERRUPT == 1
72 #if (!defined(__ARM_ARCH_8_1M_MAIN__)) && (!defined(__ARM_ARCH_8M_MAIN__))
73 #error CONFIG_TFM_SECURE_THREAD_MASK_NS_INTERRUPT is not supported in Baseline implementations
74 #endif /* (!defined(__ARM_ARCH_8_1M_MAIN__)) && (!defined(__ARM_ARCH_8M_MAIN__)) */
75 /* IMPORTANT NOTE:
76 *
77 * When AIRCR.PRIS is set, the Non-Secure execution can act on
78 * FAULTMASK_NS, PRIMASK_NS or BASEPRI_NS register to boost its priority
79 * number up to the value 0x80. To mask NS interrupts in secure thread
80 * execution, set the priority of Secure thread mode execution to this value.
81 */
82 #define SECURE_THREAD_EXECUTION_PRIORITY 0x80
83 #endif /* CONFIG_TFM_SECURE_THREAD_MASK_NS_INTERRUPT == 1 */
84 #else /* CONFIG_TFM_USE_TRUSTZONE */
85 /* If TZ is not in use, we have the full priority range available */
86 #define PENDSV_PRIO_FOR_SCHED ((1 << __NVIC_PRIO_BITS) - 1)
87 #endif /* CONFIG_TFM_USE_TRUSTZONE */
88
89 /* State context defined by architecture */
90 struct tfm_state_context_t {
91 uint32_t r0;
92 uint32_t r1;
93 uint32_t r2;
94 uint32_t r3;
95 uint32_t r12;
96 uint32_t lr;
97 uint32_t ra;
98 uint32_t xpsr;
99 };
100
101 /* Context addition to state context */
102 struct tfm_additional_context_t {
103 uint32_t integ_sign; /* Integrity signature */
104 uint32_t reserved; /* Reserved */
105 uint32_t callee[8]; /* R4-R11. NOT ORDERED!! */
106 };
107
108 /* Full thread context */
109 struct full_context_t {
110 struct tfm_additional_context_t addi_ctx;
111 struct tfm_state_context_t stat_ctx;
112 };
113
114 /* Context control.
115 * CAUTION: Assembly references this structure. DO CHECK the below functions
116 * before changing the structure:
117 'PendSV_Handler'
118 */
119 struct context_ctrl_t {
120 uint32_t sp; /* Stack pointer (higher address).
121 * THIS MUST BE THE FIRST MEMBER OF
122 * THE STRUCT.
123 */
124 uint32_t exc_ret; /* EXC_RETURN pattern.
125 * THIS MUST BE THE SECOND MEMBER OF
126 * THE STRUCT.
127 */
128 uint32_t sp_limit; /* Stack limit (lower address) */
129 uint32_t sp_base; /* Stack usage start (higher addr) */
130 };
131
132 /*
133 * The context on MSP when de-privileged FLIH Function calls SVC to return.
134 * It is the same when de-privileged FLIH Function is ready to run.
135 */
136 struct context_flih_ret_t {
137 uint64_t stack_seal; /* Two words stack seal */
138 struct tfm_additional_context_t addi_ctx;
139 uint32_t exc_return; /* exception return value on SVC_PREPARE_DEPRIV_FLIH */
140 uint32_t dummy; /* dummy value for 8 bytes aligned */
141 uint32_t psp; /* PSP when interrupt exception ocurrs */
142 uint32_t psplim; /* PSPLIM when interrupt exception ocurrs when */
143 struct tfm_state_context_t state_ctx; /* ctx on SVC_PREPARE_DEPRIV_FLIH */
144 };
145
146 /* Assign stack and stack limit to the context control instance. */
147 #define ARCH_CTXCTRL_INIT(x, buf, sz) do { \
148 (x)->sp = ((uint32_t)(buf) + (uint32_t)(sz)) & ~0x7; \
149 (x)->sp_limit = ((uint32_t)(buf) + 7) & ~0x7; \
150 (x)->sp_base = (x)->sp; \
151 (x)->exc_ret = 0; \
152 } while (0)
153
154 /* Allocate 'size' bytes in stack. */
155 #define ARCH_CTXCTRL_ALLOCATE_STACK(x, size) \
156 ((x)->sp -= ((size) + 7) & ~0x7)
157
158 /* The last allocated pointer. */
159 #define ARCH_CTXCTRL_ALLOCATED_PTR(x) ((x)->sp)
160
161 /* Prepare an exception return pattern on the stack. */
162 #define ARCH_CTXCTRL_EXCRET_PATTERN(x, param0, param1, param2, param3, pfn, pfnlr) do { \
163 (x)->r0 = (uint32_t)(param0); \
164 (x)->r1 = (uint32_t)(param1); \
165 (x)->r2 = (uint32_t)(param2); \
166 (x)->r3 = (uint32_t)(param3); \
167 (x)->ra = (uint32_t)(pfn); \
168 (x)->lr = (uint32_t)(pfnlr); \
169 (x)->xpsr = XPSR_T32; \
170 } while (0)
171
172 /* Set state context parameter r0. */
173 #define ARCH_STATE_CTX_SET_R0(x, r0_val) \
174 ((x)->r0 = (uint32_t)(r0_val))
175
176 /*
177 * Claim a statically initialized context control instance.
178 * Make the start stack pointer at 'stack_buf[stack_size]' because
179 * the hardware acts in a 'Decrease-then-store' behaviour.
180 */
181 #define ARCH_CLAIM_CTXCTRL_INSTANCE(name, stack_buf, stack_size) \
182 struct context_ctrl_t name = { \
183 .sp = (uint32_t)&stack_buf[stack_size], \
184 .sp_base = (uint32_t)&stack_buf[stack_size], \
185 .sp_limit = (uint32_t)stack_buf, \
186 .exc_ret = 0, \
187 }
188
__save_disable_irq(void)189 __STATIC_INLINE uint32_t __save_disable_irq(void)
190 {
191 uint32_t result;
192
193 __ASM volatile ("mrs %0, primask \n cpsid i" : "=r" (result) :: "memory");
194 return result;
195 }
196
__restore_irq(uint32_t status)197 __STATIC_INLINE void __restore_irq(uint32_t status)
198 {
199 __ASM volatile ("msr primask, %0" :: "r" (status) : "memory");
200 }
201
202 __attribute__ ((always_inline))
__get_active_exc_num(void)203 __STATIC_INLINE uint32_t __get_active_exc_num(void)
204 {
205 IPSR_Type IPSR;
206
207 /* if non-zero, exception is active. NOT banked S/NS */
208 IPSR.w = __get_IPSR();
209 return IPSR.b.ISR;
210 }
211
212 __attribute__ ((always_inline))
__set_CONTROL_nPRIV(uint32_t nPRIV)213 __STATIC_INLINE void __set_CONTROL_nPRIV(uint32_t nPRIV)
214 {
215 CONTROL_Type ctrl;
216
217 ctrl.w = __get_CONTROL();
218 ctrl.b.nPRIV = nPRIV;
219 __set_CONTROL(ctrl.w);
220 __ISB();
221 }
222
223 /**
224 * \brief Whether in privileged level
225 *
226 * \retval true If current execution runs in privileged level.
227 * \retval false If current execution runs in unprivileged level.
228 */
tfm_arch_is_priv(void)229 __STATIC_INLINE bool tfm_arch_is_priv(void)
230 {
231 CONTROL_Type ctrl;
232
233 /* If in Handler mode */
234 if (__get_IPSR()) {
235 return true;
236 }
237
238 /* If in privileged Thread mode */
239 ctrl.w = __get_CONTROL();
240 if (!ctrl.b.nPRIV) {
241 return true;
242 }
243
244 return false;
245 }
246
247 #if (CONFIG_TFM_FLOAT_ABI >= 1) && CONFIG_TFM_LAZY_STACKING
248 #define ARCH_FLUSH_FP_CONTEXT() __asm volatile("vmov.f32 s0, s0 \n":::"memory")
249 #else
250 #define ARCH_FLUSH_FP_CONTEXT()
251 #endif
252
253 /* Set secure exceptions priority. */
254 void tfm_arch_set_secure_exception_priorities(void);
255
256 #ifdef TFM_FIH_PROFILE_ON
257 /* Check secure exception priority */
258 FIH_RET_TYPE(int32_t) tfm_arch_verify_secure_exception_priorities(void);
259 #endif
260
261 /* Configure various extensions. */
262 void tfm_arch_config_extensions(void);
263
264 #if (CONFIG_TFM_FLOAT_ABI > 0)
265 /* Clear float point data. */
266 void tfm_arch_clear_fp_data(void);
267 #endif
268
269 /*
270 * This function is called after SPM has initialized.
271 * It frees the stack used by SPM initialization and do Exception Return.
272 * It does not return.
273 */
274 void tfm_arch_free_msp_and_exc_ret(uint32_t msp_base, uint32_t exc_return);
275
276 /*
277 * This function sets return value on APIs that cause scheduling, for example
278 * psa_wait(), by manipulating the control context - this is usaully setting the
279 * R0 register of the thread context.
280 */
281 void tfm_arch_set_context_ret_code(const struct context_ctrl_t *p_ctx_ctrl, uint32_t ret_code);
282
283 /* Init a thread context on thread stack and update the control context. */
284 void tfm_arch_init_context(struct context_ctrl_t *p_ctx_ctrl,
285 uintptr_t pfn, void *param, uintptr_t pfnlr);
286
287 /*
288 * Refresh the HW (sp, splimit) according to the given control context and
289 * returns the EXC_RETURN payload (caller might need it for following codes).
290 *
291 * The p_ctx_ctrl must have been initialized by 'tfm_arch_init_context'.
292 */
293 uint32_t tfm_arch_refresh_hardware_context(const struct context_ctrl_t *p_ctx_ctrl);
294
295 /*
296 * Lock the scheduler. Any scheduling attempt during locked period will not
297 * take place and is recorded.
298 */
299 void arch_acquire_sched_lock(void);
300
301 /*
302 * Release the scheduler lock and return if there are scheduling attempts during
303 * locked period. The recorded attempts are cleared after this function so do
304 * not call it a second time after unlock to query attempt status.
305 *
306 * return value:
307 * SCHEDULER_ATTEMPTED: unlocked successfully but there are recorded attempts
308 * or function get called without locked.
309 * other values: unlocked successfully without attempts detected.
310 */
311 uint32_t arch_release_sched_lock(void);
312
313 /*
314 * Try to schedule if scheduler is not locked, otherwise record the schedule
315 * attempt and return without scheduling.
316 */
317 uint32_t arch_attempt_schedule(void);
318
319 /*
320 * Thread Function Call at Thread mode. It is called in the IPC backend and
321 * isolation level 1. The function switches to the SPM stack to execute the
322 * target PSA API to avoid using up the Secure Partitions' stacks. The NS agent
323 * shares the stack with the SPM so it doesn't need to switch.
324 *
325 * The stack check process destroyes the caller registers so the input args and
326 * the target PSA API address are stored in the caller stack at the beginning.
327 * They are loaded again before the PSA API is called. This function is
328 * non-preemptive except for the target PSA API execution.
329 *
330 * NOTE: This function cannot be called by any C functions as it uses a
331 * customized parameter passing method and puts the target function address in
332 * r12. These input parameters a0~a3 come from standard PSA interface input.
333 * The return value is stored in r0 for the PSA API to return.
334 */
335 void tfm_arch_thread_fn_call(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3);
336
337 /*
338 * Reset MSP to msp_base.
339 * Use PSP as the current stack in Thread mode.
340 * Execute two init functions in turn.
341 */
342 void arch_clean_stack_and_launch(void *param, uintptr_t spm_init_func,
343 uintptr_t ns_agent_entry, uint32_t msp_base);
344
345 #endif
346