1 /*
2 * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
3 * Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8 #include <assert.h>
9 #include <string.h>
10
11 #include <arch.h>
12 #include <arch_helpers.h>
13 #include <context.h>
14 #include <common/debug.h>
15 #include <lib/el3_runtime/context_mgmt.h>
16 #include <lib/xlat_tables/xlat_tables_v2.h>
17 #include <platform_def.h>
18 #include <plat/common/common_def.h>
19 #include <plat/common/platform.h>
20 #include <services/spm_mm_partition.h>
21
22 #include "spm_common.h"
23 #include "spm_mm_private.h"
24 #include "spm_mm_shim_private.h"
25
26 /* Setup context of the Secure Partition */
spm_sp_setup(sp_context_t * sp_ctx)27 void spm_sp_setup(sp_context_t *sp_ctx)
28 {
29 cpu_context_t *ctx = &(sp_ctx->cpu_ctx);
30
31 /* Pointer to the MP information from the platform port. */
32 const spm_mm_boot_info_t *sp_boot_info =
33 plat_get_secure_partition_boot_info(NULL);
34
35 /*
36 * Initialize CPU context
37 * ----------------------
38 */
39
40 entry_point_info_t ep_info = {0};
41
42 SET_PARAM_HEAD(&ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
43
44 /* Setup entrypoint and SPSR */
45 ep_info.pc = sp_boot_info->sp_image_base;
46 ep_info.spsr = SPSR_64(MODE_EL0, MODE_SP_EL0, DISABLE_ALL_EXCEPTIONS);
47
48 /*
49 * X0: Virtual address of a buffer shared between EL3 and Secure EL0.
50 * The buffer will be mapped in the Secure EL1 translation regime
51 * with Normal IS WBWA attributes and RO data and Execute Never
52 * instruction access permissions.
53 *
54 * X1: Size of the buffer in bytes
55 *
56 * X2: cookie value (Implementation Defined)
57 *
58 * X3: cookie value (Implementation Defined)
59 *
60 * X4 to X7 = 0
61 */
62 ep_info.args.arg0 = sp_boot_info->sp_shared_buf_base;
63 ep_info.args.arg1 = sp_boot_info->sp_shared_buf_size;
64 ep_info.args.arg2 = PLAT_SPM_COOKIE_0;
65 ep_info.args.arg3 = PLAT_SPM_COOKIE_1;
66
67 cm_setup_context(ctx, &ep_info);
68
69 /*
70 * SP_EL0: A non-zero value will indicate to the SP that the SPM has
71 * initialized the stack pointer for the current CPU through
72 * implementation defined means. The value will be 0 otherwise.
73 */
74 write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_SP_EL0,
75 sp_boot_info->sp_stack_base + sp_boot_info->sp_pcpu_stack_size);
76
77 /*
78 * Setup translation tables
79 * ------------------------
80 */
81
82 #if ENABLE_ASSERTIONS
83
84 /* Get max granularity supported by the platform. */
85 unsigned int max_granule = xlat_arch_get_max_supported_granule_size();
86
87 VERBOSE("Max translation granule size supported: %u KiB\n",
88 max_granule / 1024U);
89
90 unsigned int max_granule_mask = max_granule - 1U;
91
92 /* Base must be aligned to the max granularity */
93 assert((sp_boot_info->sp_ns_comm_buf_base & max_granule_mask) == 0);
94
95 /* Size must be a multiple of the max granularity */
96 assert((sp_boot_info->sp_ns_comm_buf_size & max_granule_mask) == 0);
97
98 #endif /* ENABLE_ASSERTIONS */
99
100 /* This region contains the exception vectors used at S-EL1. */
101 const mmap_region_t sel1_exception_vectors =
102 MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START,
103 SPM_SHIM_EXCEPTIONS_SIZE,
104 MT_CODE | MT_SECURE | MT_PRIVILEGED);
105 mmap_add_region_ctx(sp_ctx->xlat_ctx_handle,
106 &sel1_exception_vectors);
107
108 mmap_add_ctx(sp_ctx->xlat_ctx_handle,
109 plat_get_secure_partition_mmap(NULL));
110
111 init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle);
112
113 /*
114 * MMU-related registers
115 * ---------------------
116 */
117 xlat_ctx_t *xlat_ctx = sp_ctx->xlat_ctx_handle;
118
119 uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
120
121 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, 0, xlat_ctx->base_table,
122 xlat_ctx->pa_max_address, xlat_ctx->va_max_address,
123 EL1_EL0_REGIME);
124
125 write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_MAIR_EL1,
126 mmu_cfg_params[MMU_CFG_MAIR]);
127
128 write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TCR_EL1,
129 mmu_cfg_params[MMU_CFG_TCR]);
130
131 write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TTBR0_EL1,
132 mmu_cfg_params[MMU_CFG_TTBR0]);
133
134 /* Setup SCTLR_EL1 */
135 u_register_t sctlr_el1 = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1);
136
137 sctlr_el1 |=
138 /*SCTLR_EL1_RES1 |*/
139 /* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */
140 SCTLR_UCI_BIT |
141 /* RW regions at xlat regime EL1&0 are forced to be XN. */
142 SCTLR_WXN_BIT |
143 /* Don't trap to EL1 execution of WFI or WFE at EL0. */
144 SCTLR_NTWI_BIT | SCTLR_NTWE_BIT |
145 /* Don't trap to EL1 accesses to CTR_EL0 from EL0. */
146 SCTLR_UCT_BIT |
147 /* Don't trap to EL1 execution of DZ ZVA at EL0. */
148 SCTLR_DZE_BIT |
149 /* Enable SP Alignment check for EL0 */
150 SCTLR_SA0_BIT |
151 /* Don't change PSTATE.PAN on taking an exception to EL1 */
152 SCTLR_SPAN_BIT |
153 /* Allow cacheable data and instr. accesses to normal memory. */
154 SCTLR_C_BIT | SCTLR_I_BIT |
155 /* Enable MMU. */
156 SCTLR_M_BIT
157 ;
158
159 sctlr_el1 &= ~(
160 /* Explicit data accesses at EL0 are little-endian. */
161 SCTLR_E0E_BIT |
162 /*
163 * Alignment fault checking disabled when at EL1 and EL0 as
164 * the UEFI spec permits unaligned accesses.
165 */
166 SCTLR_A_BIT |
167 /* Accesses to DAIF from EL0 are trapped to EL1. */
168 SCTLR_UMA_BIT
169 );
170
171 write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
172
173 /*
174 * Setup other system registers
175 * ----------------------------
176 */
177
178 /* Shim Exception Vector Base Address */
179 write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_VBAR_EL1,
180 SPM_SHIM_EXCEPTIONS_PTR);
181
182 write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CNTKCTL_EL1,
183 EL0PTEN_BIT | EL0VTEN_BIT | EL0PCTEN_BIT | EL0VCTEN_BIT);
184
185 /*
186 * FPEN: Allow the Secure Partition to access FP/SIMD registers.
187 * Note that SPM will not do any saving/restoring of these registers on
188 * behalf of the SP. This falls under the SP's responsibility.
189 * TTA: Enable access to trace registers.
190 * ZEN (v8.2): Trap SVE instructions and access to SVE registers.
191 */
192 write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CPACR_EL1,
193 CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
194
195 /*
196 * Prepare information in buffer shared between EL3 and S-EL0
197 * ----------------------------------------------------------
198 */
199
200 void *shared_buf_ptr = (void *) sp_boot_info->sp_shared_buf_base;
201
202 /* Copy the boot information into the shared buffer with the SP. */
203 assert((uintptr_t)shared_buf_ptr + sizeof(spm_mm_boot_info_t)
204 <= (sp_boot_info->sp_shared_buf_base + sp_boot_info->sp_shared_buf_size));
205
206 assert(sp_boot_info->sp_shared_buf_base <=
207 (UINTPTR_MAX - sp_boot_info->sp_shared_buf_size + 1));
208
209 assert(sp_boot_info != NULL);
210
211 memcpy((void *) shared_buf_ptr, (const void *) sp_boot_info,
212 sizeof(spm_mm_boot_info_t));
213
214 /* Pointer to the MP information from the platform port. */
215 spm_mm_mp_info_t *sp_mp_info =
216 ((spm_mm_boot_info_t *) shared_buf_ptr)->mp_info;
217
218 assert(sp_mp_info != NULL);
219
220 /*
221 * Point the shared buffer MP information pointer to where the info will
222 * be populated, just after the boot info.
223 */
224 ((spm_mm_boot_info_t *) shared_buf_ptr)->mp_info =
225 (spm_mm_mp_info_t *) ((uintptr_t)shared_buf_ptr
226 + sizeof(spm_mm_boot_info_t));
227
228 /*
229 * Update the shared buffer pointer to where the MP information for the
230 * payload will be populated
231 */
232 shared_buf_ptr = ((spm_mm_boot_info_t *) shared_buf_ptr)->mp_info;
233
234 /*
235 * Copy the cpu information into the shared buffer area after the boot
236 * information.
237 */
238 assert(sp_boot_info->num_cpus <= PLATFORM_CORE_COUNT);
239
240 assert((uintptr_t)shared_buf_ptr
241 <= (sp_boot_info->sp_shared_buf_base + sp_boot_info->sp_shared_buf_size -
242 (sp_boot_info->num_cpus * sizeof(*sp_mp_info))));
243
244 memcpy(shared_buf_ptr, (const void *) sp_mp_info,
245 sp_boot_info->num_cpus * sizeof(*sp_mp_info));
246
247 /*
248 * Calculate the linear indices of cores in boot information for the
249 * secure partition and flag the primary CPU
250 */
251 sp_mp_info = (spm_mm_mp_info_t *) shared_buf_ptr;
252
253 for (unsigned int index = 0; index < sp_boot_info->num_cpus; index++) {
254 u_register_t mpidr = sp_mp_info[index].mpidr;
255
256 sp_mp_info[index].linear_id = plat_core_pos_by_mpidr(mpidr);
257 if (plat_my_core_pos() == sp_mp_info[index].linear_id)
258 sp_mp_info[index].flags |= MP_INFO_FLAG_PRIMARY_CPU;
259 }
260 }
261