1 /*
2  * Copyright (c) 2020-2024, Arm Limited. All rights reserved.
3  * Copyright (c) 2022 Cypress Semiconductor Corporation (an Infineon
4  * company) or an affiliate of Cypress Semiconductor Corporation. All rights
5  * reserved.
6  *
7  * SPDX-License-Identifier: BSD-3-Clause
8  *
9  */
10 
11 #include <arm_cmse.h>
12 #include "tfm_hal_device_header.h"
13 #include "mpu_armv8m_drv.h"
14 #include "region.h"
15 #include "spu.h"
16 #include "target_cfg.h"
17 #include "tfm_hal_isolation.h"
18 #include "mmio_defs.h"
19 #include "array.h"
20 #include "load/spm_load_api.h"
21 
22 #define PROT_BOUNDARY_VAL \
23     ((1U << HANDLE_ATTR_PRIV_POS) & HANDLE_ATTR_PRIV_MASK)
24 
25 REGION_DECLARE(Image$$, TFM_UNPRIV_CODE_START, $$RO$$Base);
26 REGION_DECLARE(Image$$, TFM_UNPRIV_CODE_END, $$RO$$Limit);
27 REGION_DECLARE(Image$$, TFM_APP_CODE_START, $$Base);
28 REGION_DECLARE(Image$$, TFM_APP_CODE_END, $$Base);
29 REGION_DECLARE(Image$$, TFM_APP_RW_STACK_START, $$Base);
30 REGION_DECLARE(Image$$, TFM_APP_RW_STACK_END, $$Base);
31 
32 #ifdef CONFIG_TFM_PARTITION_META
33 REGION_DECLARE(Image$$, TFM_SP_META_PTR, $$ZI$$Base);
34 REGION_DECLARE(Image$$, TFM_SP_META_PTR, $$ZI$$Limit);
35 #endif /* CONFIG_TFM_PARTITION_META */
36 
37 /* Get address of memory regions to configure MPU */
38 extern const struct memory_region_limits memory_regions;
39 
40 struct mpu_armv8m_dev_t dev_mpu_s = { MPU_BASE };
41 
42 // We assume we are the only consumer of MPU regions and we use this
43 // variable to keep track of what the next available region is.
44 static uint32_t n_configured_regions = 0;
45 enum tfm_hal_status_t mpu_init_cfg(void);
46 
tfm_hal_set_up_static_boundaries(uintptr_t * p_spm_boundary)47 enum tfm_hal_status_t tfm_hal_set_up_static_boundaries(
48                                             uintptr_t *p_spm_boundary)
49 {
50     /* Set up isolation boundaries between SPE and NSPE */
51     sau_and_idau_cfg();
52 
53     if (spu_init_cfg() != TFM_PLAT_ERR_SUCCESS) {
54         return TFM_HAL_ERROR_GENERIC;
55     }
56 
57     if (spu_periph_init_cfg() != TFM_PLAT_ERR_SUCCESS) {
58         return TFM_HAL_ERROR_GENERIC;
59     }
60 
61     if (mpu_init_cfg() != TFM_HAL_SUCCESS) {
62         return TFM_HAL_ERROR_GENERIC;
63     }
64 
65     *p_spm_boundary = (uintptr_t)PROT_BOUNDARY_VAL;
66 
67     return TFM_HAL_SUCCESS;
68 }
69 
70 enum tfm_hal_status_t
tfm_hal_bind_boundary(const struct partition_load_info_t * p_ldinf,uintptr_t * p_boundary)71 tfm_hal_bind_boundary(const struct partition_load_info_t *p_ldinf,
72                         uintptr_t *p_boundary)
73 {
74     if (!p_ldinf || !p_boundary) {
75         return TFM_HAL_ERROR_GENERIC;
76     }
77 
78     bool privileged;
79     bool ns_agent;
80     uint32_t partition_attrs = 0;
81 
82 #if TFM_ISOLATION_LEVEL == 1
83     privileged = true;
84 #else
85     privileged = IS_PSA_ROT(p_ldinf);
86 #endif
87 
88     ns_agent = IS_NS_AGENT(p_ldinf);
89     partition_attrs = ((uint32_t)privileged << HANDLE_ATTR_PRIV_POS) &
90                     HANDLE_ATTR_PRIV_MASK;
91     partition_attrs |= ((uint32_t)ns_agent << HANDLE_ATTR_NS_POS) &
92                         HANDLE_ATTR_NS_MASK;
93     *p_boundary = (uintptr_t)partition_attrs;
94 
95     for (uint32_t i = 0; i < p_ldinf->nassets; i++) {
96         const struct asset_desc_t *p_asset =
97                 LOAD_INFO_ASSET(p_ldinf);
98 
99         if (!(p_asset[i].attr & ASSET_ATTR_NAMED_MMIO)) {
100             // Skip numbered MMIO. NB: Need to add validation if it
101             // becomes supported. Should we return an error instead?
102             continue;
103         }
104 
105         bool found = false;
106         for (uint32_t j = 0; j < ARRAY_SIZE(partition_named_mmio_list); j++) {
107             if (partition_named_mmio_list[j] == p_asset[i].dev.dev_ref) {
108                 found = true;
109                 break;
110             }
111         }
112 
113         if (!found) {
114             /* The MMIO asset is not in the allowed list of platform. */
115             return TFM_HAL_ERROR_GENERIC;
116         }
117 
118         /* Assume PPC & MPC settings are required even under level 1 */
119         struct platform_data_t *plat_data_ptr =
120                 (struct platform_data_t *)p_asset[i].dev.dev_ref;
121 
122         if (plat_data_ptr->periph_start == 0) {
123             // Should we return an error instead?
124             continue;
125         }
126 
127         spu_peripheral_config_secure(NRFX_PERIPHERAL_ID_GET(plat_data_ptr->periph_start),
128                                      SPU_LOCK_CONF_LOCKED);
129 
130         /*
131          * Static boundaries are set. Set up MPU region for MMIO.
132          * Setup regions for unprivileged assets only.
133          */
134         if (!privileged) {
135             struct mpu_armv8m_region_cfg_t localcfg;
136 
137             localcfg.region_nr = n_configured_regions++;
138 
139             localcfg.region_base = plat_data_ptr->periph_start;
140             localcfg.region_limit = plat_data_ptr->periph_limit;
141             localcfg.region_attridx = MPU_ARMV8M_MAIR_ATTR_DEVICE_IDX;
142             localcfg.attr_access = MPU_ARMV8M_AP_RW_PRIV_UNPRIV;
143             localcfg.attr_sh = MPU_ARMV8M_SH_NONE;
144             localcfg.attr_exec = MPU_ARMV8M_XN_EXEC_NEVER;
145 
146             enum mpu_armv8m_error_t err =
147                     mpu_armv8m_region_enable(&dev_mpu_s, &localcfg);
148 
149             if (err != MPU_ARMV8M_OK) {
150                 return TFM_HAL_ERROR_GENERIC;
151             }
152         }
153     }
154 
155     return TFM_HAL_SUCCESS;
156 }
157 
158 enum tfm_hal_status_t
tfm_hal_activate_boundary(const struct partition_load_info_t * p_ldinf,uintptr_t boundary)159 tfm_hal_activate_boundary(const struct partition_load_info_t *p_ldinf,
160                           uintptr_t boundary)
161 {
162     /* Privileged level is required to be set always */
163     CONTROL_Type ctrl;
164     ctrl.w = __get_CONTROL();
165 
166     ctrl.b.nPRIV = ((uint32_t)boundary & HANDLE_ATTR_PRIV_MASK) ? 0 : 1;
167 
168     __set_CONTROL(ctrl.w);
169 
170     return TFM_HAL_SUCCESS;
171 }
172 
173 #if !defined(__SAUREGION_PRESENT) || (__SAUREGION_PRESENT == 0)
accessible_to_region(const void * p,size_t s,int flags)174 static bool accessible_to_region(const void *p, size_t s, int flags)
175 {
176     cmse_address_info_t tt_base = cmse_TT((void *)p);
177     cmse_address_info_t tt_last = cmse_TT((void *)((uint32_t)p + s - 1));
178 
179     uint32_t base_spu_id = tt_base.flags.idau_region;
180     uint32_t last_spu_id = tt_last.flags.idau_region;
181 
182     size_t size;
183     uint32_t p_start = (uint32_t)p;
184     int i;
185 
186     if ((base_spu_id >= spu_regions_flash_get_start_id()) &&
187         (last_spu_id <= spu_regions_flash_get_last_id())) {
188 
189         size = spu_regions_flash_get_last_address_in_region(base_spu_id) + 1
190                                                                       - p_start;
191 
192         if (cmse_check_address_range((void *)p_start, size, flags) == 0) {
193             return false;
194         }
195 
196         for (i = base_spu_id + 1; i < last_spu_id; i++) {
197             p_start = spu_regions_flash_get_base_address_in_region(i);
198             if (cmse_check_address_range((void *)p_start,
199                 spu_regions_flash_get_region_size(), flags) == 0) {
200                 return false;
201             }
202         }
203 
204         p_start = spu_regions_flash_get_base_address_in_region(last_spu_id);
205         size = (uint32_t)p + s - p_start;
206         if (cmse_check_address_range((void *)p_start, size, flags) == 0) {
207             return false;
208         }
209 
210 
211     } else if ((base_spu_id >= spu_regions_sram_get_start_id()) &&
212         (last_spu_id <= spu_regions_sram_get_last_id())) {
213 
214         size = spu_regions_sram_get_last_address_in_region(base_spu_id) + 1
215                                                                       - p_start;
216         if (cmse_check_address_range((void *)p_start, size, flags) == 0) {
217             return false;
218         }
219 
220         for (i = base_spu_id + 1; i < last_spu_id; i++) {
221             p_start = spu_regions_sram_get_base_address_in_region(i);
222             if (cmse_check_address_range((void *)p_start,
223                 spu_regions_sram_get_region_size(), flags) == 0) {
224                 return false;
225             }
226         }
227 
228         p_start = spu_regions_sram_get_base_address_in_region(last_spu_id);
229         size = (uint32_t)p + s - p_start;
230         if (cmse_check_address_range((void *)p_start, size, flags) == 0) {
231             return false;
232         }
233     } else {
234         return false;
235     }
236 
237     return true;
238 }
239 #endif /* !defined(__SAUREGION_PRESENT) || (__SAUREGION_PRESENT == 0) */
240 
tfm_hal_memory_check(uintptr_t boundary,uintptr_t base,size_t size,uint32_t access_type)241 enum tfm_hal_status_t tfm_hal_memory_check(uintptr_t boundary, uintptr_t base,
242                                            size_t size, uint32_t access_type)
243 {
244     int flags = 0;
245     int32_t range_access_allowed_by_mpu;
246 
247     /* If size is zero, this indicates an empty buffer and base is ignored */
248     if (size == 0) {
249         return TFM_HAL_SUCCESS;
250     }
251 
252     if (!base) {
253         return TFM_HAL_ERROR_INVALID_INPUT;
254     }
255 
256     if ((access_type & TFM_HAL_ACCESS_READWRITE) == TFM_HAL_ACCESS_READWRITE) {
257         flags |= CMSE_MPU_READWRITE;
258     } else if (access_type & TFM_HAL_ACCESS_READABLE) {
259         flags |= CMSE_MPU_READ;
260     } else {
261         return TFM_HAL_ERROR_INVALID_INPUT;
262     }
263 
264     if (access_type & TFM_HAL_ACCESS_NS) {
265         flags |= CMSE_NONSECURE;
266     }
267 
268     if (!((uint32_t)boundary & HANDLE_ATTR_PRIV_MASK)) {
269         flags |= CMSE_MPU_UNPRIV;
270     }
271 
272     /* This check is only done for ns_agent_tz */
273     if ((uint32_t)boundary & HANDLE_ATTR_NS_MASK) {
274         CONTROL_Type ctrl;
275         ctrl.w = __TZ_get_CONTROL_NS();
276         if (ctrl.b.nPRIV == 1) {
277             flags |= CMSE_MPU_UNPRIV;
278         } else {
279             flags &= ~CMSE_MPU_UNPRIV;
280         }
281         flags |= CMSE_NONSECURE;
282     }
283 
284     /* Use the TT instruction to check access to the partition's regions*/
285     range_access_allowed_by_mpu =
286             cmse_check_address_range((void *)base, size, flags) != NULL;
287 
288 #if !defined(__SAUREGION_PRESENT) || (__SAUREGION_PRESENT == 0)
289     if (!range_access_allowed_by_mpu) {
290         /*
291          * Verification failure may be due to address range crossing
292          * one or multiple IDAU boundaries. In this case request a
293          * platform-specific check for access permissions.
294          */
295         cmse_address_info_t addr_info_base = cmse_TT((void *)base);
296         cmse_address_info_t addr_info_last =
297                 cmse_TT((void *)((uint32_t)base + size - 1));
298 
299         if ((addr_info_base.flags.idau_region_valid != 0) &&
300             (addr_info_last.flags.idau_region_valid != 0) &&
301             (addr_info_base.flags.idau_region !=
302              addr_info_last.flags.idau_region)) {
303             range_access_allowed_by_mpu =
304                                 accessible_to_region((void *)base, size, flags);
305         }
306     }
307 #endif
308 
309     if (range_access_allowed_by_mpu) {
310         return TFM_HAL_SUCCESS;
311     } else {
312         return TFM_HAL_ERROR_MEM_FAULT;
313     }
314 }
315 
mpu_init_cfg(void)316 enum tfm_hal_status_t mpu_init_cfg(void)
317 {
318     struct mpu_armv8m_region_cfg_t region_cfg;
319     enum mpu_armv8m_error_t err;
320 
321     mpu_armv8m_clean(&dev_mpu_s);
322 
323     /* Veneer region */
324     region_cfg.region_nr = n_configured_regions++;
325 
326     region_cfg.region_base = memory_regions.veneer_base;
327     region_cfg.region_limit = memory_regions.veneer_limit;
328     region_cfg.region_attridx = MPU_ARMV8M_MAIR_ATTR_CODE_IDX;
329     region_cfg.attr_access = MPU_ARMV8M_AP_RO_PRIV_UNPRIV;
330     region_cfg.attr_sh = MPU_ARMV8M_SH_NONE;
331     region_cfg.attr_exec = MPU_ARMV8M_XN_EXEC_OK;
332 
333     err = mpu_armv8m_region_enable(&dev_mpu_s, &region_cfg);
334 
335     if (err != MPU_ARMV8M_OK) {
336         return TFM_HAL_ERROR_GENERIC;
337     }
338 
339     /* TFM Core unprivileged code region */
340     region_cfg.region_nr = n_configured_regions++;
341 
342     region_cfg.region_base =
343             (uint32_t)&REGION_NAME(Image$$, TFM_UNPRIV_CODE_START, $$RO$$Base);
344     region_cfg.region_limit =
345             (uint32_t)&REGION_NAME(Image$$, TFM_UNPRIV_CODE_END, $$RO$$Limit);
346     region_cfg.region_attridx = MPU_ARMV8M_MAIR_ATTR_CODE_IDX;
347     region_cfg.attr_access = MPU_ARMV8M_AP_RO_PRIV_UNPRIV;
348     region_cfg.attr_sh = MPU_ARMV8M_SH_NONE;
349     region_cfg.attr_exec = MPU_ARMV8M_XN_EXEC_OK;
350 
351     err = mpu_armv8m_region_enable(&dev_mpu_s, &region_cfg);
352 
353     if (err != MPU_ARMV8M_OK) {
354         return TFM_HAL_ERROR_GENERIC;
355     }
356 
357     /* RO region */
358     region_cfg.region_nr = n_configured_regions++;
359 
360     region_cfg.region_base =
361             (uint32_t)&REGION_NAME(Image$$, TFM_APP_CODE_START, $$Base);
362     region_cfg.region_limit =
363             (uint32_t)&REGION_NAME(Image$$, TFM_APP_CODE_END, $$Base);
364     region_cfg.region_attridx = MPU_ARMV8M_MAIR_ATTR_CODE_IDX;
365     region_cfg.attr_access = MPU_ARMV8M_AP_RO_PRIV_UNPRIV;
366     region_cfg.attr_sh = MPU_ARMV8M_SH_NONE;
367     region_cfg.attr_exec = MPU_ARMV8M_XN_EXEC_OK;
368 
369     err = mpu_armv8m_region_enable(&dev_mpu_s, &region_cfg);
370 
371     if (err != MPU_ARMV8M_OK) {
372         return TFM_HAL_ERROR_GENERIC;
373     }
374 
375     /* RW, ZI and stack as one region */
376     region_cfg.region_nr = n_configured_regions++;
377 
378     region_cfg.region_base =
379             (uint32_t)&REGION_NAME(Image$$, TFM_APP_RW_STACK_START, $$Base);
380     region_cfg.region_limit =
381             (uint32_t)&REGION_NAME(Image$$, TFM_APP_RW_STACK_END, $$Base);
382     region_cfg.region_attridx = MPU_ARMV8M_MAIR_ATTR_DATA_IDX;
383     region_cfg.attr_access = MPU_ARMV8M_AP_RW_PRIV_UNPRIV;
384     region_cfg.attr_sh = MPU_ARMV8M_SH_NONE;
385     region_cfg.attr_exec = MPU_ARMV8M_XN_EXEC_NEVER;
386 
387     err = mpu_armv8m_region_enable(&dev_mpu_s, &region_cfg);
388 
389     if (err != MPU_ARMV8M_OK) {
390         return TFM_HAL_ERROR_GENERIC;
391     }
392 
393 #ifdef CONFIG_TFM_PARTITION_META
394     /* TFM partition metadata poniter region */
395     region_cfg.region_nr = n_configured_regions++;
396 
397     region_cfg.region_base =
398             (uint32_t)&REGION_NAME(Image$$, TFM_SP_META_PTR, $$ZI$$Base);
399     region_cfg.region_limit =
400             (uint32_t)&REGION_NAME(Image$$, TFM_SP_META_PTR, $$ZI$$Limit);
401     region_cfg.region_attridx = MPU_ARMV8M_MAIR_ATTR_DATA_IDX;
402     region_cfg.attr_access = MPU_ARMV8M_AP_RW_PRIV_UNPRIV;
403     region_cfg.attr_sh = MPU_ARMV8M_SH_NONE;
404     region_cfg.attr_exec = MPU_ARMV8M_XN_EXEC_NEVER;
405 
406     err = mpu_armv8m_region_enable(&dev_mpu_s, &region_cfg);
407 
408     if (err != MPU_ARMV8M_OK) {
409         return TFM_HAL_ERROR_GENERIC;
410     }
411 #endif /* CONFIG_TFM_PARTITION_META */
412 
413 #ifdef NULL_POINTER_EXCEPTION_DETECTION
414 	uint32_t num_regions =
415 		((MPU->TYPE & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos);
416 
417 	if ((num_regions - n_configured_regions) < 2) {
418 		// We have enabled null pointer detection, but we don't have
419 		// enough regions for it.
420 		//
421 	    // NB: Enabling null-pointer detection can also
422 		// cause tfm_hal_bind_boundary to return an error due to
423 		// insufficient memory regions
424 		return TFM_HAL_ERROR_GENERIC;
425 	}
426 
427 	// The armv8m MPU can not be configured to protect a memory region
428 	// from priviliged reads. However, it is invalid to have two
429 	// overlapping memory regions and when a memory access is made to
430 	// such an overlapping area we will get a MemFault. We exploit
431 	// this undefined behaviour to block priviliged reads to the first
432 	// 256 bytes. The first 350 bytes on nRF platforms are used for
433 	// the vector table but luckily the armv8m MPU does not affect
434 	// exception vector fetches so these two regions we configure will
435 	// not accidentally disturb any valid memory access.
436 	for(int i = 0; i < 2; i++) {
437 		region_cfg.region_nr = n_configured_regions++;
438 
439 		region_cfg.region_base = 0;
440 		region_cfg.region_limit = 256 - 32; // The last protected address is limit + 31
441 
442 		// The region_attridx, attr_access, attr_sh and attr_exec
443 		// have no effect when memory regions overlap as any
444 		// access will trigger a MemFault so we just use the
445 		// previously configured attributes.
446 
447 		err = mpu_armv8m_region_enable(&dev_mpu_s, &region_cfg);
448 
449 		if (err != MPU_ARMV8M_OK) {
450 			return TFM_HAL_ERROR_GENERIC;
451 		}
452 	}
453 #endif /* NULL_POINTER_EXCEPTION_DETECTION */
454 
455     mpu_armv8m_enable(&dev_mpu_s, PRIVILEGED_DEFAULT_ENABLE,
456                       HARDFAULT_NMI_ENABLE);
457 
458     return TFM_HAL_SUCCESS;
459 }
460 
tfm_hal_boundary_need_switch(uintptr_t boundary_from,uintptr_t boundary_to)461 bool tfm_hal_boundary_need_switch(uintptr_t boundary_from,
462                                   uintptr_t boundary_to)
463 {
464     if (boundary_from == boundary_to) {
465         return false;
466     }
467 
468     if (((uint32_t)boundary_from & HANDLE_ATTR_PRIV_MASK) &&
469         ((uint32_t)boundary_to & HANDLE_ATTR_PRIV_MASK)) {
470         return false;
471     }
472     return true;
473 }
474