1 /*
2  * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  */
7 
8 #include "compiler_ext_defs.h"
9 #include "security_defs.h"
10 #include "tfm_arch.h"
11 #include "tfm_core_trustzone.h"
12 #include "utilities.h"
13 #include "config_impl.h"
14 
15 #if defined(__ICCARM__) && (CONFIG_TFM_FLOAT_ABI >= 1)
16 #pragma required = tfm_arch_clear_fp_data
17 #endif
18 
tfm_arch_free_msp_and_exc_ret(uint32_t msp_base,uint32_t exc_return)19 __naked void tfm_arch_free_msp_and_exc_ret(uint32_t msp_base,
20                                            uint32_t exc_return)
21 {
22     __ASM volatile(
23         SYNTAX_UNIFIED
24         "mov     r4, r0                         \n"
25         "mov     r5, r1                         \n"
26 #if (CONFIG_TFM_FLOAT_ABI > 0)
27         "bl      tfm_arch_clear_fp_data         \n"
28 #endif
29         "mov     sp, r4                         \n"
30         "bx      r5                             \n"
31     );
32 }
33 #if CONFIG_TFM_SPM_BACKEND_IPC == 1
34 
35 extern uint32_t scheduler_lock;
36 
tfm_arch_set_context_ret_code(const struct context_ctrl_t * p_ctx_ctrl,uint32_t ret_code)37 void tfm_arch_set_context_ret_code(const struct context_ctrl_t *p_ctx_ctrl, uint32_t ret_code)
38 {
39     /* Write the return value to the state context on stack. */
40     ((struct full_context_t *)p_ctx_ctrl->sp)->stat_ctx.r0 = ret_code;
41 }
42 
arch_acquire_sched_lock(void)43 __naked void arch_acquire_sched_lock(void)
44 {
45     __asm volatile(
46         SYNTAX_UNIFIED
47         "   ldr    r0, =scheduler_lock                 \n"
48         "   movs   r1, #"M2S(SCHEDULER_LOCKED)"        \n"
49         "   str    r1, [r0, #0]                        \n"
50         "   dsb    #0xf                                \n"
51         "   bx     lr                                  \n"
52     );
53 }
54 
arch_release_sched_lock(void)55 __naked uint32_t arch_release_sched_lock(void)
56 {
57     __asm volatile(
58         SYNTAX_UNIFIED
59         "ldr    r1, =scheduler_lock                    \n"
60         "ldr    r0, [r1, #0]                           \n"
61         "movs   r2, #"M2S(SCHEDULER_UNLOCKED)"         \n"/* Unlock scheduler */
62         "str    r2, [r1, #0]                           \n"
63         "dsb    #0xf                                   \n"
64         "bx     lr                                     \n"
65     );
66 }
67 
68 /*
69  * Try to trigger scheduler by setting PendSV if the scheduler is not locked.
70  * Otherwise, record the attempt. The scheduler is locked when SPM is performing
71  * context-related operations that can't be disturbed. The lock is managed by
72  * lock/unlock interfaces with a public variable.
73  *
74  * When this function is returning to the caller, a scheduling event might have
75  * performed and 'R0' may contain significant return values for the caller.
76  * Keep a 'uint32_t' always in case the caller is expecting a return value.
77  *
78  * Caution: This is an API for core usage, do not call it out of SPM.
79  */
arch_attempt_schedule(void)80 __naked uint32_t arch_attempt_schedule(void)
81 {
82     __ASM volatile(
83         SYNTAX_UNIFIED
84         "ldr     r0, =scheduler_lock                   \n"
85         "ldr     r2, [r0, #0]                          \n"
86         "cmp     r2, #"M2S(SCHEDULER_UNLOCKED)"        \n"
87         /* Skip PendSV if scheduler is locked and mark scheduling attempted. */
88         "bne     mark_schedule_attempted_and_exit      \n"
89         "ldr     r0, ="M2S(SCB_ICSR_ADDR)"             \n"
90         "ldr     r1, ="M2S(SCB_ICSR_PENDSVSET_BIT)"    \n"
91         "str     r1, [r0, #0]                          \n"
92         "dsb     #0xf                                  \n"
93         "isb                                           \n"
94         "bx      lr                                    \n"
95     "mark_schedule_attempted_and_exit:                 \n"
96         "movs    r2, #"M2S(SCHEDULER_ATTEMPTED)"       \n"
97         "str     r2, [r0, #0]                          \n"
98         "dsb     #0xf                                  \n"
99         "bx      lr                                    \n"
100     );
101 }
102 #endif
103 
104 #if CONFIG_TFM_SPM_BACKEND_SFN == 1
arch_clean_stack_and_launch(void * param,uintptr_t spm_init_func,uintptr_t ns_agent_entry,uint32_t msp_base)105 __naked void arch_clean_stack_and_launch(void *param, uintptr_t spm_init_func,
106                                          uintptr_t ns_agent_entry, uint32_t msp_base)
107 {
108     __ASM volatile(
109         SYNTAX_UNIFIED
110         "msr  msp, r3                       \n" /* Reset MSP */
111         "mov  lr, r2                        \n" /*
112                                                  * Set lr - the return address of the first
113                                                  * init function (r1) - to the second init
114                                                  * function (r2) so that they will be executed
115                                                  * in turn. The return value of first init
116                                                  * function is passed to second init function
117                                                  * through "r0".
118                                                  */
119         "movs r2, #"M2S(CONTROL_SPSEL_Msk)" \n"
120         "mrs  r3, control                   \n"
121         "orrs r3, r3, r2                    \n" /*
122                                                  * CONTROL.SPSEL, bit [1], stack-pointer
123                                                  * select.
124                                                  * 0: Use SP_main as the current stack.
125                                                  * 1: In Thread mode use PSP as the
126                                                  * current stack
127                                                  */
128         "msr  control, r3                   \n" /* Use PSP as the current stack */
129         "isb                                \n"
130         "bx   r1                            \n" /* Execute first init function */
131     );
132 }
133 #endif
134 
tfm_arch_init_context(struct context_ctrl_t * p_ctx_ctrl,uintptr_t pfn,void * param,uintptr_t pfnlr)135 void tfm_arch_init_context(struct context_ctrl_t *p_ctx_ctrl,
136                            uintptr_t pfn, void *param, uintptr_t pfnlr)
137 {
138     uintptr_t sp = p_ctx_ctrl->sp;
139     uintptr_t sp_limit = p_ctx_ctrl->sp_limit;
140     struct full_context_t *p_tctx =
141             (struct full_context_t *)arch_seal_thread_stack(sp);
142 
143     /* Check if enough space on stack */
144     if ((uintptr_t)p_tctx - sizeof(struct full_context_t) < sp_limit) {
145         tfm_core_panic();
146     }
147 
148     /* Reserve a full context (state context + additional context) on the stack. */
149 
150     /*
151      * Although a full context is reserved from the stack, the additional context within it is not
152      * needed to be popped out when doing exception return. They are reserved for the scheduler
153      * usage which requires full contexts.
154      * So the DCRS bit of EXC_RETURN payload is set to "1" in later code, which means default rule
155      * (no additional context) is followed.
156      * The tfm_arch_refresh_hardware_context() must set the PSP to the state context within the
157      * full context pointer rather than the full context pointer itself.
158      */
159     p_tctx--;
160 
161     spm_memset(p_tctx, 0, sizeof(*p_tctx));
162 
163     ARCH_CTXCTRL_EXCRET_PATTERN(&p_tctx->stat_ctx, param, 0, 0, 0, pfn, pfnlr);
164 
165     p_ctx_ctrl->exc_ret  = EXC_RETURN_THREAD_PSP;
166     p_ctx_ctrl->sp       = (uintptr_t)p_tctx;
167 }
168 
tfm_arch_refresh_hardware_context(const struct context_ctrl_t * p_ctx_ctrl)169 uint32_t tfm_arch_refresh_hardware_context(const struct context_ctrl_t *p_ctx_ctrl)
170 {
171     struct tfm_state_context_t *sc;
172 
173     sc = &(((struct full_context_t *)(p_ctx_ctrl->sp))->stat_ctx);
174 
175     arch_update_process_sp((uint32_t)sc, p_ctx_ctrl->sp_limit);
176 
177     return p_ctx_ctrl->exc_ret;
178 }
179