1 /*
2 * Copyright (c) 2020-2024, Arm Limited. All rights reserved.
3 * Copyright 2020-2022 NXP. All rights reserved.
4 * Copyright (c) 2024 Cypress Semiconductor Corporation (an Infineon
5 * company) or an affiliate of Cypress Semiconductor Corporation. All rights
6 * reserved.
7 *
8 * SPDX-License-Identifier: BSD-3-Clause
9 *
10 */
11
12 #include <arm_cmse.h>
13 #include <stddef.h>
14 #include <stdint.h>
15 #include "array.h"
16 #include "tfm_hal_device_header.h"
17 #include "Driver_Common.h"
18 #include "mmio_defs.h"
19 #include "mpu_armv8m_drv.h"
20 #include "region.h"
21 #include "target_cfg.h"
22 #include "tfm_hal_defs.h"
23 #include "tfm_hal_isolation.h"
24 #include "region_defs.h"
25 #include "tfm_peripherals_def.h"
26 #include "load/partition_defs.h"
27 #include "load/asset_defs.h"
28 #include "load/spm_load_api.h"
29 #include "fih.h"
30
31 extern const struct memory_region_limits memory_regions;
32
33 /* Define Peripherals NS address range for the platform */
34 #define PERIPHERALS_BASE_NS_START (0x40000000)
35 #define PERIPHERALS_BASE_NS_END (0x4FFFFFFF)
36
37 /* It can be retrieved from the MPU_TYPE register. */
38 #define MPU_REGION_NUM 8
39
40 #if TFM_ISOLATION_LEVEL == 3
41 #define PROT_BOUNDARY_VAL \
42 (((1U << HANDLE_ATTR_PRIV_POS) & HANDLE_ATTR_PRIV_MASK) | \
43 ((1U << HANDLE_ATTR_SPM_POS) & HANDLE_ATTR_SPM_MASK))
44 #else
45 #define PROT_BOUNDARY_VAL \
46 ((1U << HANDLE_ATTR_PRIV_POS) & HANDLE_ATTR_PRIV_MASK)
47 #endif
48
49 #ifdef CONFIG_TFM_ENABLE_MEMORY_PROTECT
50 static uint32_t n_configured_regions = 0;
51 struct mpu_armv8m_dev_t dev_mpu_s = { MPU_BASE };
52
53 #ifdef CONFIG_TFM_PARTITION_META
54 REGION_DECLARE(Image$$, TFM_SP_META_PTR, $$ZI$$Base);
55 REGION_DECLARE(Image$$, TFM_SP_META_PTR_END, $$ZI$$Limit);
56 #endif
57
58 #if TFM_ISOLATION_LEVEL == 3
59 static uint32_t idx_boundary_handle = 0;
60 REGION_DECLARE(Image$$, PT_RO_START, $$Base);
61 REGION_DECLARE(Image$$, PT_RO_END, $$Base);
62 REGION_DECLARE(Image$$, PT_PRIV_RWZI_START, $$Base);
63 REGION_DECLARE(Image$$, PT_PRIV_RWZI_END, $$Base);
64
65 static struct mpu_armv8m_region_cfg_t isolation_regions[] = {
66 {
67 0, /* will be updated before using */
68 (uint32_t)®ION_NAME(Image$$, PT_RO_START, $$Base),
69 (uint32_t)®ION_NAME(Image$$, PT_RO_END, $$Base) - 1,
70 MPU_ARMV8M_MAIR_ATTR_CODE_IDX,
71 MPU_ARMV8M_XN_EXEC_OK,
72 MPU_ARMV8M_AP_RO_PRIV_UNPRIV,
73 MPU_ARMV8M_SH_NONE,
74 },
75 /* For isolation Level 3, set up static isolation for privileged data.
76 * Unprivileged data is dynamically set during Partition scheduling.
77 */
78 {
79 0, /* will be updated before using */
80 (uint32_t)®ION_NAME(Image$$, PT_PRIV_RWZI_START, $$Base),
81 (uint32_t)®ION_NAME(Image$$, PT_PRIV_RWZI_END, $$Base) - 1,
82 MPU_ARMV8M_MAIR_ATTR_DATA_IDX,
83 MPU_ARMV8M_XN_EXEC_NEVER,
84 MPU_ARMV8M_AP_RW_PRIV_ONLY,
85 MPU_ARMV8M_SH_NONE,
86 },
87 #ifdef CONFIG_TFM_PARTITION_META
88 {
89 0, /* will be updated before using */
90 (uint32_t)®ION_NAME(Image$$, TFM_SP_META_PTR, $$ZI$$Base),
91 (uint32_t)®ION_NAME(Image$$, TFM_SP_META_PTR_END, $$ZI$$Limit) - 1,
92 MPU_ARMV8M_MAIR_ATTR_DATA_IDX,
93 MPU_ARMV8M_XN_EXEC_NEVER,
94 MPU_ARMV8M_AP_RW_PRIV_UNPRIV,
95 MPU_ARMV8M_SH_NONE,
96 }
97 #endif
98 };
99 #else /* TFM_ISOLATION_LEVEL == 3 */
100
101 REGION_DECLARE(Image$$, ER_VENEER, $$Base);
102 REGION_DECLARE(Image$$, VENEER_ALIGN, $$Limit);
103 REGION_DECLARE(Image$$, TFM_UNPRIV_CODE_START, $$RO$$Base);
104 REGION_DECLARE(Image$$, TFM_UNPRIV_CODE_END, $$RO$$Limit);
105 REGION_DECLARE(Image$$, TFM_APP_CODE_START, $$Base);
106 REGION_DECLARE(Image$$, TFM_APP_CODE_END, $$Base);
107 REGION_DECLARE(Image$$, TFM_APP_RW_STACK_START, $$Base);
108 REGION_DECLARE(Image$$, TFM_APP_RW_STACK_END, $$Base);
109
110 #endif /* TFM_ISOLATION_LEVEL == 3 */
111 #endif /* CONFIG_TFM_ENABLE_MEMORY_PROTECT */
112
tfm_hal_set_up_static_boundaries(uintptr_t * p_spm_boundary)113 FIH_RET_TYPE(enum tfm_hal_status_t) tfm_hal_set_up_static_boundaries(
114 uintptr_t *p_spm_boundary)
115 {
116 /* Set up isolation boundaries between SPE and NSPE */
117 sau_and_idau_cfg();
118
119 if (mpc_init_cfg() != ARM_DRIVER_OK) {
120 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
121 }
122
123 if (ppc_init_cfg() != ARM_DRIVER_OK) {
124 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
125 }
126
127 /* Set up static isolation boundaries inside SPE */
128 #ifdef CONFIG_TFM_ENABLE_MEMORY_PROTECT
129 fih_int fih_rc = FIH_FAILURE;
130 struct mpu_armv8m_dev_t dev_mpu_s = { MPU_BASE };
131
132 mpu_armv8m_clean(&dev_mpu_s);
133 #if TFM_ISOLATION_LEVEL == 3
134 int32_t i;
135
136 /*
137 * Update MPU region numbers. The numbers start from 0 and are continuous.
138 * Under isolation level3, at lease one MPU region is reserved for private
139 * data asset.
140 */
141 if (ARRAY_SIZE(isolation_regions) >= MPU_REGION_NUM) {
142 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
143 }
144 for (i = 0; i < ARRAY_SIZE(isolation_regions); i++) {
145 /* Update region number */
146 isolation_regions[i].region_nr = i;
147 /* Enable regions */
148 FIH_CALL(mpu_armv8m_region_enable, fih_rc, &dev_mpu_s, &isolation_regions[i]);
149 if (fih_not_eq(fih_rc, fih_int_encode(MPU_ARMV8M_OK))) {
150 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
151 }
152 }
153 n_configured_regions = i;
154 #else /* TFM_ISOLATION_LEVEL == 3 */
155 struct mpu_armv8m_region_cfg_t region_cfg;
156
157 /* Veneer region */
158 region_cfg.region_nr = n_configured_regions;
159 region_cfg.region_base = (uint32_t)®ION_NAME(Image$$, ER_VENEER, $$Base);
160 region_cfg.region_limit =
161 (uint32_t)®ION_NAME(Image$$, VENEER_ALIGN, $$Limit) - 1;
162 region_cfg.region_attridx = MPU_ARMV8M_MAIR_ATTR_CODE_IDX;
163 region_cfg.attr_access = MPU_ARMV8M_AP_RO_PRIV_UNPRIV;
164 region_cfg.attr_sh = MPU_ARMV8M_SH_NONE;
165 region_cfg.attr_exec = MPU_ARMV8M_XN_EXEC_OK;
166 FIH_CALL(mpu_armv8m_region_enable, fih_rc, &dev_mpu_s, ®ion_cfg);
167 if (fih_not_eq(fih_rc, fih_int_encode(MPU_ARMV8M_OK))) {
168 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
169 }
170 n_configured_regions++;
171
172 #if TARGET_DEBUG_LOG //NXP
173 SPMLOG_DBGMSGVAL("Veneers starts from : ", region_cfg.region_base);
174 SPMLOG_DBGMSGVAL("Veneers ends at : ", region_cfg.region_base +
175 region_cfg.region_limit);
176 #endif
177
178 /* TFM Core unprivileged code region */
179 region_cfg.region_nr = n_configured_regions;
180 region_cfg.region_base =
181 (uint32_t)®ION_NAME(Image$$, TFM_UNPRIV_CODE_START, $$RO$$Base);
182 region_cfg.region_limit =
183 (uint32_t)®ION_NAME(Image$$, TFM_UNPRIV_CODE_END, $$RO$$Limit) - 1;
184 region_cfg.region_attridx = MPU_ARMV8M_MAIR_ATTR_CODE_IDX;
185 region_cfg.attr_access = MPU_ARMV8M_AP_RO_PRIV_UNPRIV;
186 region_cfg.attr_sh = MPU_ARMV8M_SH_NONE;
187 region_cfg.attr_exec = MPU_ARMV8M_XN_EXEC_OK;
188 FIH_CALL(mpu_armv8m_region_enable, fih_rc, &dev_mpu_s, ®ion_cfg);
189 if (fih_not_eq(fih_rc, fih_int_encode(MPU_ARMV8M_OK))) {
190 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
191 }
192 n_configured_regions++;
193
194 #if TARGET_DEBUG_LOG //NXP
195 SPMLOG_DBGMSGVAL("Code section starts from : ", region_cfg.region_base);
196 SPMLOG_DBGMSGVAL("Code section ends at : ", region_cfg.region_base +
197 region_cfg.region_limit);
198 #endif
199
200 /* RO region */
201 region_cfg.region_nr = n_configured_regions;
202 region_cfg.region_base =
203 (uint32_t)®ION_NAME(Image$$, TFM_APP_CODE_START, $$Base);
204 region_cfg.region_limit =
205 (uint32_t)®ION_NAME(Image$$, TFM_APP_CODE_END, $$Base) - 1;
206 region_cfg.region_attridx = MPU_ARMV8M_MAIR_ATTR_CODE_IDX;
207 region_cfg.attr_access = MPU_ARMV8M_AP_RO_PRIV_UNPRIV;
208 region_cfg.attr_sh = MPU_ARMV8M_SH_NONE;
209 region_cfg.attr_exec = MPU_ARMV8M_XN_EXEC_OK;
210 FIH_CALL(mpu_armv8m_region_enable, fih_rc, &dev_mpu_s, ®ion_cfg);
211 if (fih_not_eq(fih_rc, fih_int_encode(MPU_ARMV8M_OK))) {
212 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
213 }
214 n_configured_regions++;
215
216 #if TARGET_DEBUG_LOG //NXP
217 SPMLOG_DBGMSGVAL("RO APP CODE starts from : ", region_cfg.region_base);
218 SPMLOG_DBGMSGVAL("RO APP CODE ends at : ", region_cfg.region_base +
219 region_cfg.region_limit);
220 #endif
221
222 /* RW, ZI and stack as one region */
223 region_cfg.region_nr = n_configured_regions;
224 region_cfg.region_base =
225 (uint32_t)®ION_NAME(Image$$, TFM_APP_RW_STACK_START, $$Base);
226 region_cfg.region_limit =
227 (uint32_t)®ION_NAME(Image$$, TFM_APP_RW_STACK_END, $$Base) - 1;
228 region_cfg.region_attridx = MPU_ARMV8M_MAIR_ATTR_DATA_IDX;
229 region_cfg.attr_access = MPU_ARMV8M_AP_RW_PRIV_UNPRIV;
230 region_cfg.attr_sh = MPU_ARMV8M_SH_NONE;
231 region_cfg.attr_exec = MPU_ARMV8M_XN_EXEC_NEVER;
232 FIH_CALL(mpu_armv8m_region_enable, fih_rc, &dev_mpu_s, ®ion_cfg);
233 if (fih_not_eq(fih_rc, fih_int_encode(MPU_ARMV8M_OK))) {
234 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
235 }
236 n_configured_regions++;
237
238 #if TARGET_DEBUG_LOG //NXP
239 SPMLOG_DBGMSGVAL("RW, ZI APP starts from : ", region_cfg.region_base);
240 SPMLOG_DBGMSGVAL("RW, ZI APP ends at : ", region_cfg.region_base +
241 region_cfg.region_limit);
242 #endif
243
244 /* NS Data, mark as nonpriviladged */ //NXP
245 region_cfg.region_nr = n_configured_regions;
246 region_cfg.region_base = NS_DATA_START;
247 region_cfg.region_limit = NS_DATA_LIMIT;
248 region_cfg.region_attridx = MPU_ARMV8M_MAIR_ATTR_DATA_IDX;
249 region_cfg.attr_access = MPU_ARMV8M_AP_RW_PRIV_UNPRIV;
250 region_cfg.attr_sh = MPU_ARMV8M_SH_NONE;
251 region_cfg.attr_exec = MPU_ARMV8M_XN_EXEC_NEVER;
252 FIH_CALL(mpu_armv8m_region_enable, fih_rc, &dev_mpu_s, ®ion_cfg);
253 if (fih_not_eq(fih_rc, fih_int_encode(MPU_ARMV8M_OK))) {
254 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
255 }
256 n_configured_regions++;
257
258 #if TARGET_DEBUG_LOG
259 SPMLOG_DBGMSGVAL("NS Data starts from : ", region_cfg.region_base);
260 SPMLOG_DBGMSGVAL("NS Data ends at : ", region_cfg.region_base +
261 region_cfg.region_limit);
262 #endif
263
264 #ifdef CONFIG_TFM_PARTITION_META
265 /* TFM partition metadata pointer region */
266 region_cfg.region_nr = n_configured_regions;
267 region_cfg.region_base =
268 (uint32_t)®ION_NAME(Image$$, TFM_SP_META_PTR, $$ZI$$Base);
269 region_cfg.region_limit =
270 (uint32_t)®ION_NAME(Image$$, TFM_SP_META_PTR_END, $$ZI$$Limit) - 1;
271 region_cfg.region_attridx = MPU_ARMV8M_MAIR_ATTR_DATA_IDX;
272 region_cfg.attr_access = MPU_ARMV8M_AP_RW_PRIV_UNPRIV;
273 region_cfg.attr_sh = MPU_ARMV8M_SH_NONE;
274 region_cfg.attr_exec = MPU_ARMV8M_XN_EXEC_NEVER;
275 FIH_CALL(mpu_armv8m_region_enable, fih_rc, &dev_mpu_s, ®ion_cfg);
276 if (fih_not_eq(fih_rc, fih_int_encode(MPU_ARMV8M_OK))) {
277 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
278 }
279 n_configured_regions++;
280 #endif /* CONFIG_TFM_PARTITION_META */
281 #endif /* TFM_ISOLATION_LEVEL == 3 */
282
283 /* Enable MPU */
284 FIH_CALL(mpu_armv8m_enable, fih_rc, &dev_mpu_s,
285 PRIVILEGED_DEFAULT_ENABLE, HARDFAULT_NMI_ENABLE);
286 if (fih_not_eq(fih_rc, fih_int_encode(MPU_ARMV8M_OK))) {
287 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
288 }
289 #endif
290
291 *p_spm_boundary = (uintptr_t)PROT_BOUNDARY_VAL;
292
293 FIH_RET(fih_int_encode(TFM_HAL_SUCCESS));
294 }
295
296 /*
297 * Implementation of tfm_hal_bind_boundary():
298 *
299 * The API encodes some attributes into a handle and returns it to SPM.
300 * The attributes include isolation boundaries, privilege, and MMIO information.
301 * When scheduler switches running partitions, SPM compares the handle between
302 * partitions to know if boundary update is necessary. If update is required,
303 * SPM passes the handle to platform to do platform settings and update
304 * isolation boundaries.
305 *
306 * The handle should be unique under isolation level 3. The implementation
307 * encodes an index at the highest 8 bits to assure handle uniqueness. While
308 * under isolation level 1/2, handles may not be unique.
309 *
310 * The encoding format assignment:
311 * - For isolation level 3
312 * BIT | 31 24 | 23 20 | ... | 7 4 | 3 0 |
313 * | Unique Index | Region Attr 5 | ... | Region Attr 1 | Base Attr |
314 *
315 * In which the "Region Attr i" is:
316 * BIT | 3 | 2 0 |
317 * | 1: RW, 0: RO | MMIO Index |
318 *
319 * In which the "Base Attr" is:
320 * BIT | 1 | 0 |
321 * | 1: privileged, 0: unprivileged | 1: Trustzone-specific NSPE, 0: Secure partition |
322 *
323 * - For isolation level 1/2
324 * BIT | 31 2 | 1 | 0 |
325 * | Reserved |1: privileged, 0: unprivileged | 1: Trustzone-specific NSPE, 0: Secure partition |
326 *
327 * This is a reference implementation, and may have some
328 * limitations.
329 * 1. The maximum number of allowed MMIO regions is 5.
330 * 2. Highest 8 bits are for index. It supports 256 unique handles at most.
331 */
tfm_hal_bind_boundary(const struct partition_load_info_t * p_ldinf,uintptr_t * p_boundary)332 FIH_RET_TYPE(enum tfm_hal_status_t) tfm_hal_bind_boundary(
333 const struct partition_load_info_t *p_ldinf,
334 uintptr_t *p_boundary)
335 {
336 uint32_t i, j;
337 bool privileged;
338 bool ns_agent;
339 uint32_t partition_attrs = 0;
340 const struct asset_desc_t *p_asset;
341 struct platform_data_t *plat_data_ptr;
342 #if TFM_ISOLATION_LEVEL == 2
343 struct mpu_armv8m_region_cfg_t localcfg;
344 fih_int fih_rc = FIH_FAILURE;
345 #endif
346
347 if (!p_ldinf || !p_boundary) {
348 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
349 }
350
351 #if TFM_ISOLATION_LEVEL == 1
352 privileged = true;
353 #else
354 privileged = IS_PSA_ROT(p_ldinf);
355 #endif
356
357 ns_agent = IS_NS_AGENT(p_ldinf);
358 p_asset = LOAD_INFO_ASSET(p_ldinf);
359
360 /*
361 * Validate if the named MMIO of partition is allowed by the platform.
362 * Otherwise, skip validation.
363 *
364 * NOTE: Need to add validation of numbered MMIO if platform requires.
365 */
366 for (i = 0; i < p_ldinf->nassets; i++) {
367 if (!(p_asset[i].attr & ASSET_ATTR_NAMED_MMIO)) {
368 continue;
369 }
370 for (j = 0; j < ARRAY_SIZE(partition_named_mmio_list); j++) {
371 if (p_asset[i].dev.dev_ref == partition_named_mmio_list[j]) {
372 break;
373 }
374 }
375
376 if (j == ARRAY_SIZE(partition_named_mmio_list)) {
377 /* The MMIO asset is not in the allowed list of platform. */
378 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
379 }
380 /* Assume PPC & MPC settings are required even under level 1 */
381 plat_data_ptr = REFERENCE_TO_PTR(p_asset[i].dev.dev_ref,
382 struct platform_data_t *);
383 ppc_configure_to_secure(plat_data_ptr, privileged);
384 #if TFM_ISOLATION_LEVEL == 2
385 /*
386 * Static boundaries are set. Set up MPU region for MMIO.
387 * Setup regions for unprivileged assets only.
388 */
389 if (!privileged) {
390 localcfg.region_base = plat_data_ptr->periph_start;
391 localcfg.region_limit = plat_data_ptr->periph_limit;
392 localcfg.region_attridx = MPU_ARMV8M_MAIR_ATTR_DEVICE_IDX;
393 localcfg.attr_access = MPU_ARMV8M_AP_RW_PRIV_UNPRIV;
394 localcfg.attr_sh = MPU_ARMV8M_SH_NONE;
395 localcfg.attr_exec = MPU_ARMV8M_XN_EXEC_NEVER;
396 localcfg.region_nr = n_configured_regions++;
397
398 FIH_CALL(mpu_armv8m_region_enable, fih_rc, &dev_mpu_s, &localcfg);
399 if (fih_not_eq(fih_rc, fih_int_encode(MPU_ARMV8M_OK))) {
400 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
401 }
402 }
403 #elif TFM_ISOLATION_LEVEL == 3
404 /* Encode MMIO attributes into the "partition_attrs". */
405 partition_attrs <<= HANDLE_PER_ATTR_BITS;
406 partition_attrs |= ((j + 1) & HANDLE_ATTR_INDEX_MASK);
407 if (p_asset[i].attr & ASSET_ATTR_READ_WRITE) {
408 partition_attrs |= HANDLE_ATTR_RW_POS;
409 }
410 #endif
411 }
412
413 #if TFM_ISOLATION_LEVEL == 3
414 partition_attrs <<= HANDLE_PER_ATTR_BITS;
415 /*
416 * Highest 8 bits are reserved for index, if they are non-zero, MMIO numbers
417 * must have exceeded the limit of 5.
418 */
419 if (partition_attrs & HANDLE_INDEX_MASK) {
420 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
421 }
422 HANDLE_ENCODE_INDEX(partition_attrs, idx_boundary_handle);
423 #endif
424
425 partition_attrs |= ((uint32_t)privileged << HANDLE_ATTR_PRIV_POS) &
426 HANDLE_ATTR_PRIV_MASK;
427 partition_attrs |= ((uint32_t)ns_agent << HANDLE_ATTR_NS_POS) &
428 HANDLE_ATTR_NS_MASK;
429 *p_boundary = (uintptr_t)partition_attrs;
430
431 FIH_RET(fih_int_encode(TFM_HAL_SUCCESS));
432 }
433
tfm_hal_activate_boundary(const struct partition_load_info_t * p_ldinf,uintptr_t boundary)434 FIH_RET_TYPE(enum tfm_hal_status_t) tfm_hal_activate_boundary(
435 const struct partition_load_info_t *p_ldinf,
436 uintptr_t boundary)
437 {
438 CONTROL_Type ctrl;
439 uint32_t local_handle = (uint32_t)boundary;
440 bool privileged = !!(local_handle & HANDLE_ATTR_PRIV_MASK);
441 #if TFM_ISOLATION_LEVEL == 3
442 bool is_spm = !!(local_handle & HANDLE_ATTR_SPM_MASK);
443 fih_int fih_rc = FIH_FAILURE;
444 struct mpu_armv8m_region_cfg_t localcfg;
445 uint32_t i, mmio_index;
446 struct platform_data_t *plat_data_ptr;
447 const struct asset_desc_t *rt_mem;
448 #endif
449
450 /* Privileged level is required to be set always */
451 ctrl.w = __get_CONTROL();
452 ctrl.b.nPRIV = privileged ? 0 : 1;
453 __set_CONTROL(ctrl.w);
454
455 #if TFM_ISOLATION_LEVEL == 3
456 if (is_spm) {
457 FIH_RET(fih_int_encode(TFM_HAL_SUCCESS));
458 }
459
460 if (!p_ldinf) {
461 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
462 }
463
464 /* Update regions, for unprivileged partitions only */
465 if (privileged) {
466 FIH_RET(fih_int_encode(TFM_HAL_SUCCESS));
467 }
468
469 /* Setup runtime memory first */
470 localcfg.attr_exec = MPU_ARMV8M_XN_EXEC_NEVER;
471 localcfg.attr_sh = MPU_ARMV8M_SH_NONE;
472 localcfg.region_attridx = MPU_ARMV8M_MAIR_ATTR_DATA_IDX;
473 localcfg.attr_access = MPU_ARMV8M_AP_RW_PRIV_UNPRIV;
474 rt_mem = LOAD_INFO_ASSET(p_ldinf);
475 /*
476 * The first item is the only runtime memory asset.
477 * Platforms with many memory assets please check this part.
478 */
479 for (i = 0;
480 i < p_ldinf->nassets && !(rt_mem[i].attr & ASSET_ATTR_MMIO);
481 i++) {
482 localcfg.region_nr = n_configured_regions + i;
483 localcfg.region_base = rt_mem[i].mem.start;
484 localcfg.region_limit = rt_mem[i].mem.limit;
485
486 if (mpu_armv8m_region_enable(&dev_mpu_s, &localcfg) != MPU_ARMV8M_OK) {
487 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
488 }
489 FIH_CALL(mpu_armv8m_region_enable, fih_rc, &dev_mpu_s, &localcfg);
490 if (fih_not_eq(fih_rc, fih_int_encode(MPU_ARMV8M_OK))) {
491 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
492 }
493 }
494
495 /* Named MMIO part */
496 local_handle = local_handle & (~HANDLE_INDEX_MASK);
497 local_handle >>= HANDLE_PER_ATTR_BITS;
498 mmio_index = local_handle & HANDLE_ATTR_INDEX_MASK;
499
500 localcfg.region_attridx = MPU_ARMV8M_MAIR_ATTR_DEVICE_IDX;
501
502 i = n_configured_regions + i;
503 while (mmio_index && i < MPU_REGION_NUM) {
504 plat_data_ptr =
505 (struct platform_data_t *)partition_named_mmio_list[mmio_index - 1];
506 localcfg.region_nr = i++;
507 localcfg.attr_access = (local_handle & HANDLE_ATTR_RW_POS)?
508 MPU_ARMV8M_AP_RW_PRIV_UNPRIV :
509 MPU_ARMV8M_AP_RO_PRIV_UNPRIV;
510 localcfg.region_base = plat_data_ptr->periph_start;
511 localcfg.region_limit = plat_data_ptr->periph_limit;
512
513 FIH_CALL(mpu_armv8m_region_enable, fih_rc, &dev_mpu_s, &localcfg);
514 if (fih_not_eq(fih_rc, fih_int_encode(MPU_ARMV8M_OK))) {
515 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
516 }
517
518 local_handle >>= HANDLE_PER_ATTR_BITS;
519 mmio_index = local_handle & HANDLE_ATTR_INDEX_MASK;
520 }
521
522 /* Disable unused regions */
523 while (i < MPU_REGION_NUM) {
524 FIH_CALL(mpu_armv8m_region_disable, fih_rc, &dev_mpu_s, i++);
525 if (fih_not_eq(fih_rc, fih_int_encode(MPU_ARMV8M_OK))) {
526 FIH_RET(fih_int_encode(TFM_HAL_ERROR_GENERIC));
527 }
528 }
529 #endif
530 FIH_RET(fih_int_encode(TFM_HAL_SUCCESS));
531 }
532
tfm_hal_memory_check(uintptr_t boundary,uintptr_t base,size_t size,uint32_t access_type)533 FIH_RET_TYPE(enum tfm_hal_status_t) tfm_hal_memory_check(
534 uintptr_t boundary, uintptr_t base,
535 size_t size, uint32_t access_type)
536 {
537 int flags = 0;
538
539 /* If size is zero, this indicates an empty buffer and base is ignored */
540 if (size == 0) {
541 FIH_RET(fih_int_encode(TFM_HAL_SUCCESS));
542 }
543
544 if (!base) {
545 FIH_RET(fih_int_encode(TFM_HAL_ERROR_INVALID_INPUT));
546 }
547
548 if ((access_type & TFM_HAL_ACCESS_READWRITE) == TFM_HAL_ACCESS_READWRITE) {
549 flags |= CMSE_MPU_READWRITE;
550 } else if (access_type & TFM_HAL_ACCESS_READABLE) {
551 flags |= CMSE_MPU_READ;
552 } else {
553 FIH_RET(fih_int_encode(TFM_HAL_ERROR_INVALID_INPUT));
554 }
555
556 if (!((uint32_t)boundary & HANDLE_ATTR_PRIV_MASK)) {
557 flags |= CMSE_MPU_UNPRIV;
558 }
559
560 if ((uint32_t)boundary & HANDLE_ATTR_NS_MASK) {
561 CONTROL_Type ctrl;
562 ctrl.w = __TZ_get_CONTROL_NS();
563 if (ctrl.b.nPRIV == 1) {
564 flags |= CMSE_MPU_UNPRIV;
565 } else {
566 flags &= ~CMSE_MPU_UNPRIV;
567 }
568 flags |= CMSE_NONSECURE;
569 }
570
571 if (cmse_check_address_range((void *)base, size, flags) != NULL) {
572 FIH_RET(fih_int_encode(TFM_HAL_SUCCESS));
573 } else {
574 FIH_RET(fih_int_encode(TFM_HAL_ERROR_MEM_FAULT));
575 }
576 }
577
tfm_hal_boundary_need_switch(uintptr_t boundary_from,uintptr_t boundary_to)578 FIH_RET_TYPE(bool) tfm_hal_boundary_need_switch(uintptr_t boundary_from,
579 uintptr_t boundary_to)
580 {
581 if (boundary_from == boundary_to) {
582 FIH_RET(fih_int_encode(false));
583 }
584
585 if (((uint32_t)boundary_from & HANDLE_ATTR_PRIV_MASK) &&
586 ((uint32_t)boundary_to & HANDLE_ATTR_PRIV_MASK)) {
587 FIH_RET(fih_int_encode(false));
588 }
589 FIH_RET(fih_int_encode(true));
590 }
591
592 /*------------------- SAU/IDAU configuration functions -----------------------*/
593
sau_and_idau_cfg(void)594 void sau_and_idau_cfg(void)
595 {
596 /* Ensure all memory accesses are completed */
597 __DMB();
598
599 /* Enables SAU */
600 TZ_SAU_Enable();
601
602 /* Configures SAU regions to be non-secure */
603 SAU->RNR = 0U;
604 SAU->RBAR = (memory_regions.non_secure_partition_base
605 & SAU_RBAR_BADDR_Msk);
606 SAU->RLAR = (memory_regions.non_secure_partition_limit
607 & SAU_RLAR_LADDR_Msk)
608 | SAU_RLAR_ENABLE_Msk;
609
610 SAU->RNR = 1U;
611 SAU->RBAR = (NS_DATA_START & SAU_RBAR_BADDR_Msk);
612 SAU->RLAR = (NS_DATA_LIMIT & SAU_RLAR_LADDR_Msk) | SAU_RLAR_ENABLE_Msk;
613
614 /* Configures veneers region to be non-secure callable */
615 SAU->RNR = 2U;
616 SAU->RBAR = (memory_regions.veneer_base & SAU_RBAR_BADDR_Msk);
617 SAU->RLAR = (memory_regions.veneer_limit & SAU_RLAR_LADDR_Msk)
618 | SAU_RLAR_ENABLE_Msk
619 | SAU_RLAR_NSC_Msk;
620
621 /* Configure the peripherals space */
622 SAU->RNR = 3U;
623 SAU->RBAR = (PERIPHERALS_BASE_NS_START & SAU_RBAR_BADDR_Msk);
624 SAU->RLAR = (PERIPHERALS_BASE_NS_END & SAU_RLAR_LADDR_Msk)
625 | SAU_RLAR_ENABLE_Msk;
626
627 #ifdef BL2
628 /* Secondary image partition */
629 SAU->RNR = 4U;
630 SAU->RBAR = (memory_regions.secondary_partition_base & SAU_RBAR_BADDR_Msk);
631 SAU->RLAR = (memory_regions.secondary_partition_limit & SAU_RLAR_LADDR_Msk)
632 | SAU_RLAR_ENABLE_Msk;
633 #endif /* BL2 */
634
635 /* Ensure the write is completed and flush pipeline */
636 __DSB();
637 __ISB();
638 }
639
ppc_configure_to_secure(struct platform_data_t * platform_data,bool privileged)640 void ppc_configure_to_secure(struct platform_data_t *platform_data, bool privileged)
641 {
642 #ifdef AHB_SECURE_CTRL
643 /* Clear NS flag for peripheral to prevent NS access */
644 if(platform_data && platform_data->periph_ppc_bank)
645 {
646 /* 0b00..Non-secure and Non-priviledge user access allowed.
647 * 0b01..Non-secure and Privilege access allowed.
648 * 0b10..Secure and Non-priviledge user access allowed.
649 * 0b11..Secure and Priviledge/Non-priviledge user access allowed.
650 */
651 /* Set to secure and privileged user access 0x3. */
652 *platform_data->periph_ppc_bank = (*platform_data->periph_ppc_bank) | (((privileged == true)?0x3:0x2) << (platform_data->periph_ppc_loc));
653 }
654 #endif
655 #ifdef TRDC
656 /* If the peripheral is not shared with non-secure world, give it SEC access */
657 if (platform_data && platform_data->nseEnable == false)
658 {
659 trdc_mbc_memory_block_config_t mbcBlockConfig;
660
661 (void)memset(&mbcBlockConfig, 0, sizeof(mbcBlockConfig));
662
663 mbcBlockConfig.nseEnable = false;
664
665 mbcBlockConfig.domainIdx = 0; /* Core domain */
666 mbcBlockConfig.mbcIdx = platform_data->mbcIdx;
667 mbcBlockConfig.slaveMemoryIdx = platform_data->slaveMemoryIdx;
668 mbcBlockConfig.memoryBlockIdx = platform_data->memoryBlockIdx;
669
670 if (privileged == true)
671 mbcBlockConfig.memoryAccessControlSelect = TRDC_ACCESS_CONTROL_POLICY_SEC_PRIV_INDEX;
672 else
673 mbcBlockConfig.memoryAccessControlSelect = TRDC_ACCESS_CONTROL_POLICY_SEC_INDEX;
674
675 TRDC_MbcSetMemoryBlockConfig(TRDC, &mbcBlockConfig);
676 }
677 #endif
678 }
679
680 #ifdef TFM_FIH_PROFILE_ON
681 /* This function is responsible for checking all critical isolation configurations. */
tfm_hal_verify_static_boundaries(void)682 fih_int tfm_hal_verify_static_boundaries(void)
683 {
684 int32_t result = TFM_HAL_ERROR_GENERIC;
685
686 /* Check if SAU is enabled */
687 if(((SAU->CTRL & SAU_CTRL_ENABLE_Msk) == SAU_CTRL_ENABLE_Msk)
688 #ifdef AHB_SECURE_CTRL
689 /* Check if AHB secure controller check is enabled */
690 && (AHB_SECURE_CTRL->MISC_CTRL_DP_REG == AHB_SECURE_CTRL->MISC_CTRL_REG) &&
691 #ifdef SECTRL_MISC_CTRL_REG_ENABLE_SECURE_CHECKING /* Different definition name for LPC55S36 */
692 ((AHB_SECURE_CTRL->MISC_CTRL_REG & SECTRL_MISC_CTRL_REG_ENABLE_SECURE_CHECKING(0x1U)) == SECTRL_MISC_CTRL_REG_ENABLE_SECURE_CHECKING(0x1U))
693 #else
694 ((AHB_SECURE_CTRL->MISC_CTRL_REG & AHB_SECURE_CTRL_MISC_CTRL_REG_ENABLE_SECURE_CHECKING(0x1U)) == AHB_SECURE_CTRL_MISC_CTRL_REG_ENABLE_SECURE_CHECKING(0x1U))
695 #endif
696 #endif /* AHB_SECURE_CTRL */
697 )
698 {
699 result = TFM_HAL_SUCCESS;
700 }
701
702 FIH_RET(fih_int_encode(result));
703 }
704 #endif /* TFM_FIH_PROFILE_ON */
705