1 /*
2 * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8 #include "compiler_ext_defs.h"
9 #include "security_defs.h"
10 #include "tfm_arch.h"
11 #include "tfm_core_trustzone.h"
12 #include "utilities.h"
13 #include "config_impl.h"
14
15 #if defined(__ICCARM__) && (CONFIG_TFM_FLOAT_ABI >= 1)
16 #pragma required = tfm_arch_clear_fp_data
17 #endif
18
tfm_arch_free_msp_and_exc_ret(uint32_t msp_base,uint32_t exc_return)19 __naked void tfm_arch_free_msp_and_exc_ret(uint32_t msp_base,
20 uint32_t exc_return)
21 {
22 __ASM volatile(
23 SYNTAX_UNIFIED
24 "mov r4, r0 \n"
25 "mov r5, r1 \n"
26 #if (CONFIG_TFM_FLOAT_ABI > 0)
27 "bl tfm_arch_clear_fp_data \n"
28 #endif
29 "mov sp, r4 \n"
30 "bx r5 \n"
31 );
32 }
33 #if CONFIG_TFM_SPM_BACKEND_IPC == 1
34
35 extern uint32_t scheduler_lock;
36
tfm_arch_set_context_ret_code(void * p_ctx_ctrl,uint32_t ret_code)37 void tfm_arch_set_context_ret_code(void *p_ctx_ctrl, uint32_t ret_code)
38 {
39 struct context_ctrl_t *ctx_ctrl = (struct context_ctrl_t *)p_ctx_ctrl;
40
41 /* Write the return value to the state context on stack. */
42 ((struct full_context_t *)ctx_ctrl->sp)->stat_ctx.r0 = ret_code;
43 }
44
arch_acquire_sched_lock(void)45 __naked void arch_acquire_sched_lock(void)
46 {
47 __asm volatile(
48 SYNTAX_UNIFIED
49 " ldr r0, =scheduler_lock \n"
50 " movs r1, #"M2S(SCHEDULER_LOCKED)" \n"
51 " str r1, [r0, #0] \n"
52 " dsb #0xf \n"
53 " bx lr \n"
54 );
55 }
56
arch_release_sched_lock(void)57 __naked uint32_t arch_release_sched_lock(void)
58 {
59 __asm volatile(
60 SYNTAX_UNIFIED
61 "ldr r1, =scheduler_lock \n"
62 "ldr r0, [r1, #0] \n"
63 "movs r2, #"M2S(SCHEDULER_UNLOCKED)" \n"/* Unlock scheduler */
64 "str r2, [r1, #0] \n"
65 "dsb #0xf \n"
66 "bx lr \n"
67 );
68 }
69
70 /*
71 * Try to trigger scheduler by setting PendSV if the scheduler is not locked.
72 * Otherwise, record the attempt. The scheduler is locked when SPM is performing
73 * context-related operations that can't be disturbed. The lock is managed by
74 * lock/unlock interfaces with a public variable.
75 *
76 * When this function is returning to the caller, a scheduling event might have
77 * performed and 'R0' may contain significant return values for the caller.
78 * Keep a 'uint32_t' always in case the caller is expecting a return value.
79 *
80 * Caution: This is an API for core usage, do not call it out of SPM.
81 */
arch_attempt_schedule(void)82 __naked uint32_t arch_attempt_schedule(void)
83 {
84 __ASM volatile(
85 SYNTAX_UNIFIED
86 "ldr r0, =scheduler_lock \n"
87 "ldr r2, [r0, #0] \n"
88 "cmp r2, #"M2S(SCHEDULER_UNLOCKED)" \n"
89 /* Skip PendSV if scheduler is locked and mark scheduling attempted. */
90 "bne mark_schedule_attempted_and_exit \n"
91 "ldr r0, ="M2S(SCB_ICSR_ADDR)" \n"
92 "ldr r1, ="M2S(SCB_ICSR_PENDSVSET_BIT)" \n"
93 "str r1, [r0, #0] \n"
94 "dsb #0xf \n"
95 "isb \n"
96 "bx lr \n"
97 "mark_schedule_attempted_and_exit: \n"
98 "movs r2, #"M2S(SCHEDULER_ATTEMPTED)" \n"
99 "str r2, [r0, #0] \n"
100 "dsb #0xf \n"
101 "bx lr \n"
102 );
103 }
104 #endif
105
tfm_arch_init_context(void * p_ctx_ctrl,uintptr_t pfn,void * param,uintptr_t pfnlr)106 void tfm_arch_init_context(void *p_ctx_ctrl,
107 uintptr_t pfn, void *param, uintptr_t pfnlr)
108 {
109 uintptr_t sp = ((struct context_ctrl_t *)p_ctx_ctrl)->sp;
110 uintptr_t sp_limit = ((struct context_ctrl_t *)p_ctx_ctrl)->sp_limit;
111 struct full_context_t *p_tctx =
112 (struct full_context_t *)arch_seal_thread_stack(sp);
113
114 /* Check if enough space on stack */
115 if ((uintptr_t)p_tctx - sizeof(struct full_context_t) < sp_limit) {
116 tfm_core_panic();
117 }
118
119 /* Reserve a full context (state context + additional context) on the stack. */
120
121 /*
122 * Although a full context is reserved from the stack, the additional context within it is not
123 * needed to be popped out when doing exception return. They are reserved for the scheduler
124 * usage which requires full contexts.
125 * So the DCRS bit of EXC_RETURN payload is set to "1" in later code, which means default rule
126 * (no additional context) is followed.
127 * The tfm_arch_refresh_hardware_context() must set the PSP to the state context within the
128 * full context pointer rather than the full context pointer itself.
129 */
130 p_tctx--;
131
132 spm_memset(p_tctx, 0, sizeof(*p_tctx));
133
134 ARCH_CTXCTRL_EXCRET_PATTERN(&p_tctx->stat_ctx, param, 0, 0, 0, pfn, pfnlr);
135
136 ((struct context_ctrl_t *)p_ctx_ctrl)->exc_ret = EXC_RETURN_THREAD_PSP;
137 ((struct context_ctrl_t *)p_ctx_ctrl)->sp = (uintptr_t)p_tctx;
138 }
139
tfm_arch_refresh_hardware_context(void * p_ctx_ctrl)140 uint32_t tfm_arch_refresh_hardware_context(void *p_ctx_ctrl)
141 {
142 struct context_ctrl_t *ctx_ctrl;
143 struct tfm_state_context_t *sc;
144
145 ctx_ctrl = (struct context_ctrl_t *)p_ctx_ctrl;
146 sc = &(((struct full_context_t *)(ctx_ctrl->sp))->stat_ctx);
147
148 arch_update_process_sp((uint32_t)sc, ctx_ctrl->sp_limit);
149
150 return ctx_ctrl->exc_ret;
151 }
152