1 /*
2  * Copyright (c) 2018-2024, Arm Limited. All rights reserved.
3  * Copyright (c) 2022-2024 Cypress Semiconductor Corporation (an Infineon
4  * company) or an affiliate of Cypress Semiconductor Corporation. All rights
5  * reserved.
6  *
7  * SPDX-License-Identifier: BSD-3-Clause
8  *
9  */
10 
11 #include <inttypes.h>
12 
13 #include "compiler_ext_defs.h"
14 #include "config_spm.h"
15 #include "security_defs.h"
16 #include "region_defs.h"
17 #include "spm.h"
18 #include "svc_num.h"
19 #include "tfm_arch.h"
20 #include "tfm_hal_device_header.h"
21 #include "tfm_svcalls.h"
22 #include "utilities.h"
23 #include "core_ext.h"
24 #include "ffm/backend.h"
25 
26 #if !defined(__ARM_ARCH_8M_MAIN__) && !defined(__ARM_ARCH_8_1M_MAIN__)
27 #error "Unsupported ARM Architecture."
28 #endif
29 
30 /* Delcaraction flag to control the scheduling logic in PendSV. */
31 uint32_t scheduler_lock = SCHEDULER_UNLOCKED;
32 
33 /* IAR Specific */
34 #if defined(__ICCARM__)
35 
36 #pragma required = scheduler_lock
37 #pragma required = spm_svc_handler
38 
39 #if CONFIG_TFM_SPM_BACKEND_IPC == 1
40 
41 #pragma required = ipc_schedule
42 #pragma required = backend_abi_entering_spm
43 #pragma required = backend_abi_leaving_spm
44 
45 #endif /* CONFIG_TFM_SPM_BACKEND_IPC == 1*/
46 
47 #endif
48 
49 #if CONFIG_TFM_SPM_BACKEND_IPC == 1
50 
51 __naked
tfm_arch_thread_fn_call(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3)52 void tfm_arch_thread_fn_call(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3)
53 {
54     __asm volatile(
55         "   push    {r4-r6, lr}             \n"
56         "   push    {r0-r4, r12}            \n"
57         "   cpsid   i                       \n"
58         "   isb                             \n"
59         "   bl      backend_abi_entering_spm\n" /* r0: new SP, r1: new PSPLIM */
60         "   mrs     r5, psplim              \n"
61         "   mov     r6, sp                  \n"
62         "   cmp     r0, #0                  \n" /* Check whether the caller is
63                                                  * NS agent(new SP == 0) or
64                                                  * secure partition(new SP != 0)
65                                                  */
66         "   itttt   ne                      \n"
67         "   movne   r2, #0                  \n"
68         "   msrne   psplim, r2              \n" /* Clear PSPLIM before setting
69                                                  * PSP to a new value. This can
70                                                  * avoid potential stack
71                                                  * overflow.
72                                                  */
73         "   movne   sp, r0                  \n" /* Switch to the SPM stack if
74                                                  * caller is NOT an NS agent.
75                                                  */
76         "   msrne   psplim, r1              \n"
77         "   cpsie   i                       \n"
78         "   isb                             \n"
79         "   ldmia   r6!, {r0-r4, r12}       \n" /* Load PSA interface input args
80                                                  * and target function
81                                                  */
82         "   blx     r12                     \n"
83         "   cpsid   i                       \n"
84         "   isb                             \n"
85         "   bl      backend_abi_leaving_spm \n"
86         "   mov     r2, #0                  \n" /* Back to caller new stack */
87         "   msr     psplim, r2              \n"
88         "   mov     sp, r6                  \n"
89         "   msr     psplim, r5              \n"
90         "   cpsie   i                       \n"
91         "   isb                             \n"
92         "   pop     {r4-r6, pc}             \n"
93     );
94 }
95 
96 #endif /* CONFIG_TFM_SPM_BACKEND_IPC == 1*/
97 
98 #if CONFIG_TFM_SPM_BACKEND_IPC == 1
PendSV_Handler(void)99 __attribute__((naked)) void PendSV_Handler(void)
100 {
101     __ASM volatile(
102         SYNTAX_UNIFIED
103         "   movs    r0, #"M2S(EXC_RETURN_S)"            \n"
104         "   ands    r0, lr                              \n" /* NS interrupted */
105 #if CONFIG_TFM_SCHEDULE_WHEN_NS_INTERRUPTED == 0
106         "   beq     v8m_pendsv_exit                     \n" /* No schedule */
107 #endif
108         "   push    {r0, lr}                            \n" /* Save R0, LR */
109         "   mov     r0, lr                              \n" /* Pass the EXC_RETURN value as
110                                                              * parameter
111                                                              */
112         "   bl      ipc_schedule                        \n"
113         "   pop     {r2, lr}                            \n"
114         "   cmp     r0, r1                              \n" /* curr, next ctx */
115         "   beq     v8m_pendsv_exit                     \n" /* No schedule */
116         "   cpsid   i                                   \n"
117         "   isb                                         \n"
118         "   mrs     r2, psp                             \n"
119         "   ands    r3, lr, #"M2S(EXC_RETURN_DCRS)"     \n" /* Check DCRS */
120         "   itt     ne                                  \n" /* Skip saving callee */
121         "   stmdbne r2!, {r4-r11}                       \n" /* Save callee */
122         "   subne   r2, #8                              \n" /* SP offset for
123                                                              * reserved additional state context,
124                                                              * integrity signature
125                                                              */
126         "   stmia   r0, {r2, lr}                        \n" /* Save curr ctx:
127                                                              * PSP, LR
128                                                              */
129         "   ldmia   r1!, {r2, lr}                       \n" /* Load next ctx:
130                                                              * PSP, LR
131                                                              */
132         "   ands    r3, lr, #"M2S(EXC_RETURN_DCRS)"     \n" /* Check DCRS */
133         "   itt     ne                                  \n" /* Skip loading callee */
134         "   addne   r2, #8                              \n" /* SP offset for
135                                                              * reserved additional state context,
136                                                              * integrity signature
137                                                              */
138         "   ldmiane r2!, {r4-r11}                       \n" /* Load callee */
139         "   ldr     r3, [r1]                            \n" /* Load sp_limit */
140         "   msr     psp, r2                             \n"
141         "   msr     psplim, r3                          \n"
142         "   cpsie   i                                   \n"
143         "   isb                                         \n"
144         "v8m_pendsv_exit:                               \n"
145         "   bx      lr                                  \n"
146     );
147 }
148 #endif
149 
SVC_Handler(void)150 __attribute__((naked)) void SVC_Handler(void)
151 {
152     __ASM volatile(
153     SYNTAX_UNIFIED
154     "MRS     r0, MSP                         \n"
155     "MOV     r1, lr                          \n"
156     "MRS     r2, PSP                         \n"
157     "MRS     r3, PSPLIM                      \n"
158     "PUSH    {r2, r3}                        \n" /* PSP PSPLIM */
159     "PUSH    {r1, r2}                        \n" /* Orig_exc_return, dummy */
160     "BL      spm_svc_handler                 \n"
161     "MOV     lr, r0                          \n"
162     "LDR     r1, [sp]                        \n" /* Get orig_exc_return value */
163     "AND     r0, #8                          \n" /* Mode bit */
164     "AND     r1, #8                          \n"
165     "SUBS    r0, r1                          \n" /* Compare EXC_RETURN values */
166     "BGT     to_flih_func                    \n"
167     "BLT     from_flih_func                  \n"
168     "ADD     sp, #16                         \n" /*
169                                                   * "Unstack" unused orig_exc_return, dummy,
170                                                   * PSP, PSPLIM pushed by current handler
171                                                   */
172     "BX      lr                              \n"
173     "to_flih_func:                           \n"
174     "ANDS    r3, lr, #"M2S(EXC_RETURN_DCRS)" \n" /* Check DCRS */
175     "ITT     ne                              \n" /* Skip saving callee */
176     "PUSHNE  {r4-r11}                        \n" /* Save callee */
177     "SUBSNE  sp, #8                          \n" /* SP offset for
178                                                   * reserved additional state context,
179                                                   * integrity signature
180                                                   */
181     "LDR     r4, ="M2S(STACK_SEAL_PATTERN)"  \n" /* clear r4-r11 */
182     "MOV     r5, r4                          \n"
183     "MOV     r6, r4                          \n"
184     "MOV     r7, r4                          \n"
185     "MOV     r8, r4                          \n"
186     "MOV     r9, r4                          \n"
187     "MOV     r10, r4                         \n"
188     "MOV     r11, r4                         \n"
189     "PUSH    {r4, r5}                        \n" /* Seal stack before EXC_RET */
190     "BX      lr                              \n"
191     "from_flih_func:                         \n"
192     "ADD     sp, #16                         \n" /*
193                                                   * "Unstack" unused orig_exc_return, dummy,
194                                                   * PSP, PSPLIM pushed by current handler
195                                                   */
196     "POP     {r4, r5}                        \n" /* Seal stack */
197     "ANDS    r3, lr, #"M2S(EXC_RETURN_DCRS)" \n" /* Check DCRS */
198     "ITT     ne                              \n" /* Skip loading callee */
199     "ADDSNE  sp, #8                          \n" /* SP offset for
200                                                   * reserved additional state context,
201                                                   * integrity signature
202                                                   */
203     "POPNE   {r4-r11}                        \n" /* Load callee */
204     "ADD     sp, #16                         \n" /*
205                                                   * "Unstack" unused orig_exc_return, dummy,
206                                                   * PSP, PSPLIM pushed by the previous
207                                                   * TFM_SVC_PREPARE_DEPRIV_FLIH request
208                                                   */
209     "BX      lr                              \n"
210     );
211 }
212 
tfm_arch_set_secure_exception_priorities(void)213 void tfm_arch_set_secure_exception_priorities(void)
214 {
215     uint32_t VECTKEY;
216     SCB_Type *scb = SCB;
217     uint32_t AIRCR;
218 
219     /* Set PRIS flag in AIRCR */
220     AIRCR = scb->AIRCR;
221     VECTKEY = (~AIRCR & SCB_AIRCR_VECTKEYSTAT_Msk);
222     scb->AIRCR = SCB_AIRCR_PRIS_Msk |
223                  VECTKEY |
224                  (AIRCR & ~SCB_AIRCR_VECTKEY_Msk);
225     /* Set fault priority to less than 0x80 (with AIRCR.PRIS set) to prevent
226      * Non-secure from pre-empting faults that may indicate corruption of Secure
227      * state.
228      */
229     NVIC_SetPriority(MemoryManagement_IRQn, MemoryManagement_IRQnLVL);
230     NVIC_SetPriority(BusFault_IRQn, BusFault_IRQnLVL);
231     NVIC_SetPriority(SecureFault_IRQn, SecureFault_IRQnLVL);
232 
233     NVIC_SetPriority(SVCall_IRQn, SVCall_IRQnLVL);
234     /*
235      * Set secure PendSV priority to the lowest in SECURE state.
236      */
237     NVIC_SetPriority(PendSV_IRQn, PENDSV_PRIO_FOR_SCHED);
238 }
239 
240 #ifdef TFM_FIH_PROFILE_ON
tfm_arch_verify_secure_exception_priorities(void)241 FIH_RET_TYPE(int32_t) tfm_arch_verify_secure_exception_priorities(void)
242 {
243     SCB_Type *scb = SCB;
244 
245     if ((scb->AIRCR & SCB_AIRCR_PRIS_Msk) != SCB_AIRCR_PRIS_Msk) {
246         FIH_RET(FIH_FAILURE);
247     }
248     (void)fih_delay();
249     if ((scb->AIRCR & SCB_AIRCR_PRIS_Msk) != SCB_AIRCR_PRIS_Msk) {
250         FIH_RET(FIH_FAILURE);
251     }
252     if (fih_not_eq(fih_int_encode(NVIC_GetPriority(MemoryManagement_IRQn)),
253                   fih_int_encode(MemoryManagement_IRQnLVL))) {
254         FIH_RET(FIH_FAILURE);
255     }
256     if (fih_not_eq(fih_int_encode(NVIC_GetPriority(BusFault_IRQn)),
257                   fih_int_encode(BusFault_IRQnLVL))) {
258         FIH_RET(FIH_FAILURE);
259     }
260     if (fih_not_eq(fih_int_encode(NVIC_GetPriority(SecureFault_IRQn)),
261                   fih_int_encode(SecureFault_IRQnLVL))) {
262         FIH_RET(FIH_FAILURE);
263     }
264     if (fih_not_eq(fih_int_encode(NVIC_GetPriority(SVCall_IRQn)),
265                   fih_int_encode(SVCall_IRQnLVL))) {
266         FIH_RET(FIH_FAILURE);
267     }
268     if (fih_not_eq(fih_int_encode(NVIC_GetPriority(PendSV_IRQn)),
269                   fih_int_encode(PENDSV_PRIO_FOR_SCHED))) {
270         FIH_RET(FIH_FAILURE);
271     }
272     FIH_RET(FIH_SUCCESS);
273 }
274 #endif
275 
tfm_arch_config_extensions(void)276 void tfm_arch_config_extensions(void)
277 {
278 #if defined(CONFIG_TFM_ENABLE_CP10CP11)
279     /*
280      * Enable SPE privileged and unprivileged access to the FP Extension.
281      * Note: On Armv8-M, if Non-secure access to the FPU is needed, Secure
282      * access to the FPU must be enabled first in order to avoid No Coprocessor
283      * (NOCP) usage fault when a Non-secure to Secure service call is
284      * interrupted while CONTROL.FPCA=1 is set by Non-secure. This is needed
285      * even if SPE will not use the FPU directly.
286      */
287     SCB->CPACR |= (3U << 10U*2U)     /* enable CP10 full access */
288                   | (3U << 11U*2U);  /* enable CP11 full access */
289     __DSB();
290     __ISB();
291     /*
292      * Permit Non-secure access to the Floating-point Extension.
293      * Note: It is still necessary to set CPACR_NS to enable the FP Extension
294      * in the NSPE. This configuration is left to NS privileged software.
295      */
296     SCB->NSACR |= SCB_NSACR_CP10_Msk | SCB_NSACR_CP11_Msk;
297 #endif
298 
299 #if (CONFIG_TFM_FLOAT_ABI >= 1)
300 
301 #ifdef CONFIG_TFM_LAZY_STACKING
302     /* Enable lazy stacking. */
303     FPU->FPCCR |= FPU_FPCCR_LSPEN_Msk;
304 #else
305     /* Disable lazy stacking. */
306     FPU->FPCCR &= ~FPU_FPCCR_LSPEN_Msk;
307 #endif
308 
309     /*
310      * If the SPE will ever use the floating-point registers for sensitive
311      * data, then FPCCR.ASPEN, FPCCR.TS, FPCCR.CLRONRET and FPCCR.CLRONRETS
312      * must be set at initialisation and not changed again afterwards.
313      * Let SPE decide the S/NS shared setting (LSPEN and CLRONRET) to avoid the
314      * possible side-path brought by flexibility. This is not needed
315      * if the SPE will never use floating-point but enables the FPU only for
316      * avoiding NOCP faults during interrupted NSPE to SPE calls.
317      */
318     FPU->FPCCR |= FPU_FPCCR_ASPEN_Msk
319                   | FPU_FPCCR_TS_Msk
320                   | FPU_FPCCR_CLRONRET_Msk
321                   | FPU_FPCCR_CLRONRETS_Msk
322                   | FPU_FPCCR_LSPENS_Msk;
323 
324     /* Prevent non-secure from modifying FPU’s power setting. */
325 #if defined(__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)
326     ICB->CPPWR |=
327 #else
328     SCnSCB->CPPWR |=
329 #endif
330       SCnSCB_CPPWR_SUS11_Msk | SCnSCB_CPPWR_SUS10_Msk;
331 #endif /* CONFIG_TFM_FLOAT_ABI >= 1 */
332 
333 #if defined(__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)
334     SCB->CCR |= SCB_CCR_TRD_Msk;
335 #endif
336 }
337 
338 #if (CONFIG_TFM_FLOAT_ABI > 0)
tfm_arch_clear_fp_data(void)339 __attribute__((naked, noinline, used)) void tfm_arch_clear_fp_data(void)
340 {
341     __ASM volatile(
342                     "eor  r0, r0, r0         \n"
343                     "vmsr fpscr, r0          \n"
344 #if (defined(__ARM_ARCH_8_1M_MAIN__))
345 /* IAR throws an error if the S0-S31 syntax is used.
346  * Splitting the command into two parts solved the issue.
347  */
348 #if defined(__ICCARM__)
349                     "vscclrm {s0-s30,vpr}    \n"
350                     "vscclrm {s31,vpr}       \n"
351 #else
352                     "vscclrm {s0-s31,vpr}    \n"
353 #endif
354 #else
355                     "vmov s0, r0             \n"
356                     "vmov s1, r0             \n"
357                     "vmov s2, r0             \n"
358                     "vmov s3, r0             \n"
359                     "vmov s4, r0             \n"
360                     "vmov s5, r0             \n"
361                     "vmov s6, r0             \n"
362                     "vmov s7, r0             \n"
363                     "vmov s8, r0             \n"
364                     "vmov s9, r0             \n"
365                     "vmov s10, r0            \n"
366                     "vmov s11, r0            \n"
367                     "vmov s12, r0            \n"
368                     "vmov s13, r0            \n"
369                     "vmov s14, r0            \n"
370                     "vmov s15, r0            \n"
371                     "vmov s16, r0            \n"
372                     "vmov s17, r0            \n"
373                     "vmov s18, r0            \n"
374                     "vmov s19, r0            \n"
375                     "vmov s20, r0            \n"
376                     "vmov s21, r0            \n"
377                     "vmov s22, r0            \n"
378                     "vmov s23, r0            \n"
379                     "vmov s24, r0            \n"
380                     "vmov s25, r0            \n"
381                     "vmov s26, r0            \n"
382                     "vmov s27, r0            \n"
383                     "vmov s28, r0            \n"
384                     "vmov s29, r0            \n"
385                     "vmov s30, r0            \n"
386                     "vmov s31, r0            \n"
387 #endif
388                     "bx   lr                 \n"
389                   );
390 }
391 #endif
392