1 /*
2  * Copyright (c) 2017 Linaro Limited.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/device.h>
8 #include <zephyr/init.h>
9 #include <zephyr/kernel.h>
10 #include <zephyr/kernel_structs.h>
11 
12 #include "arm_core_mpu_dev.h"
13 #include <zephyr/linker/linker-defs.h>
14 
15 #define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
16 #include <zephyr/logging/log.h>
17 LOG_MODULE_REGISTER(mpu);
18 
19 extern void arm_core_mpu_enable(void);
20 extern void arm_core_mpu_disable(void);
21 
22 /*
23  * Maximum number of dynamic memory partitions that may be supplied to the MPU
24  * driver for programming during run-time. Note that the actual number of the
25  * available MPU regions for dynamic programming depends on the number of the
26  * static MPU regions currently being programmed, and the total number of HW-
27  * available MPU regions. This macro is only used internally in function
28  * z_arm_configure_dynamic_mpu_regions(), to reserve sufficient area for the
29  * array of dynamic regions passed to the underlying driver.
30  */
31 #if defined(CONFIG_USERSPACE)
32 #define _MAX_DYNAMIC_MPU_REGIONS_NUM \
33 	CONFIG_MAX_DOMAIN_PARTITIONS + /* User thread stack */ 1 + \
34 	(IS_ENABLED(CONFIG_MPU_STACK_GUARD) ? 1 : 0)
35 #else
36 #define _MAX_DYNAMIC_MPU_REGIONS_NUM \
37 	(IS_ENABLED(CONFIG_MPU_STACK_GUARD) ? 1 : 0)
38 #endif /* CONFIG_USERSPACE */
39 
40 /* Convenience macros to denote the start address and the size of the system
41  * memory area, where dynamic memory regions may be programmed at run-time.
42  */
43 #if defined(CONFIG_USERSPACE)
44 #define _MPU_DYNAMIC_REGIONS_AREA_START ((uint32_t)&_app_smem_start)
45 #else
46 #define _MPU_DYNAMIC_REGIONS_AREA_START ((uint32_t)&__kernel_ram_start)
47 #endif /* CONFIG_USERSPACE */
48 #define _MPU_DYNAMIC_REGIONS_AREA_SIZE ((uint32_t)&__kernel_ram_end - \
49 		_MPU_DYNAMIC_REGIONS_AREA_START)
50 
51 #if !defined(CONFIG_MULTITHREADING) && defined(CONFIG_MPU_STACK_GUARD)
52 K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
53 #endif
54 
55 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) \
56 	&& defined(CONFIG_MPU_STACK_GUARD)
57 uint32_t z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread *thread);
58 #endif
59 
60 #if defined(CONFIG_CODE_DATA_RELOCATION_SRAM)
61 extern char __ram_text_reloc_start[];
62 extern char __ram_text_reloc_size[];
63 #endif
64 
65 static const struct z_arm_mpu_partition static_regions[] = {
66 #if defined(CONFIG_COVERAGE_GCOV) && defined(CONFIG_USERSPACE)
67 		{
68 		/* GCOV code coverage accounting area. Needs User permissions
69 		 * to function
70 		 */
71 		.start = (uint32_t)&__gcov_bss_start,
72 		.size = (uint32_t)&__gcov_bss_size,
73 		.attr = K_MEM_PARTITION_P_RW_U_RW,
74 		},
75 #endif /* CONFIG_COVERAGE_GCOV && CONFIG_USERSPACE */
76 #if defined(CONFIG_NOCACHE_MEMORY)
77 		{
78 		/* Special non-cacheable RAM area */
79 		.start = (uint32_t)&_nocache_ram_start,
80 		.size = (uint32_t)&_nocache_ram_size,
81 		.attr = K_MEM_PARTITION_P_RW_U_NA_NOCACHE,
82 		},
83 #endif /* CONFIG_NOCACHE_MEMORY */
84 #if defined(CONFIG_ARCH_HAS_RAMFUNC_SUPPORT)
85 		{
86 		/* Special RAM area for program text */
87 		.start = (uint32_t)&__ramfunc_start,
88 		.size = (uint32_t)&__ramfunc_size,
89 		.attr = K_MEM_PARTITION_P_RX_U_RX,
90 		},
91 #endif /* CONFIG_ARCH_HAS_RAMFUNC_SUPPORT */
92 #if defined(CONFIG_CODE_DATA_RELOCATION_SRAM)
93 		{
94 		/* RAM area for relocated text */
95 		.start = (uint32_t)&__ram_text_reloc_start,
96 		.size = (uint32_t)&__ram_text_reloc_size,
97 		.attr = K_MEM_PARTITION_P_RX_U_RX,
98 		},
99 #endif /* CONFIG_CODE_DATA_RELOCATION_SRAM */
100 #if !defined(CONFIG_MULTITHREADING) && defined(CONFIG_MPU_STACK_GUARD)
101 		/* Main stack MPU guard to detect overflow.
102 		 * Note:
103 		 * FPU_SHARING and USERSPACE are not supported features
104 		 * under CONFIG_MULTITHREADING=n, so the MPU guard (if
105 		 * exists) is reserved aside of CONFIG_MAIN_STACK_SIZE
106 		 * and there is no requirement for larger guard area (FP
107 		 * context is not stacked).
108 		 */
109 		{
110 			.start = (uint32_t)z_main_stack,
111 			.size = (uint32_t)MPU_GUARD_ALIGN_AND_SIZE,
112 			.attr = K_MEM_PARTITION_P_RO_U_NA,
113 		},
114 #endif /* !CONFIG_MULTITHREADING && CONFIG_MPU_STACK_GUARD */
115 };
116 
117 /**
118  * @brief Use the HW-specific MPU driver to program
119  *        the static MPU regions.
120  *
121  * Program the static MPU regions using the HW-specific MPU driver. The
122  * function is meant to be invoked only once upon system initialization.
123  *
124  * If the function attempts to configure a number of regions beyond the
125  * MPU HW limitations, the system behavior will be undefined.
126  *
127  * For some MPU architectures, such as the unmodified ARMv8-M MPU,
128  * the function must execute with MPU enabled.
129  */
z_arm_configure_static_mpu_regions(void)130 void z_arm_configure_static_mpu_regions(void)
131 {
132 	/* Configure the static MPU regions within firmware SRAM boundaries.
133 	 * Start address of the image is given by _image_ram_start. The end
134 	 * of the firmware SRAM area is marked by __kernel_ram_end, taking
135 	 * into account the unused SRAM area, as well.
136 	 */
137 #ifdef CONFIG_AARCH32_ARMV8_R
138 	arm_core_mpu_disable();
139 #endif
140 	arm_core_mpu_configure_static_mpu_regions(static_regions,
141 		ARRAY_SIZE(static_regions),
142 		(uint32_t)&_image_ram_start,
143 		(uint32_t)&__kernel_ram_end);
144 #ifdef CONFIG_AARCH32_ARMV8_R
145 	arm_core_mpu_enable();
146 #endif
147 
148 #if defined(CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS) && \
149 	defined(CONFIG_MULTITHREADING)
150 	/* Define a constant array of z_arm_mpu_partition objects that holds the
151 	 * boundaries of the areas, inside which dynamic region programming
152 	 * is allowed. The information is passed to the underlying driver at
153 	 * initialization.
154 	 */
155 	const struct z_arm_mpu_partition dyn_region_areas[] = {
156 		{
157 		.start = _MPU_DYNAMIC_REGIONS_AREA_START,
158 		.size =  _MPU_DYNAMIC_REGIONS_AREA_SIZE,
159 		}
160 	};
161 
162 	arm_core_mpu_mark_areas_for_dynamic_regions(dyn_region_areas,
163 		ARRAY_SIZE(dyn_region_areas));
164 #endif /* CONFIG_MPU_REQUIRES_NON_OVERLAPPING_REGIONS */
165 }
166 
167 /**
168  * @brief Use the HW-specific MPU driver to program
169  *        the dynamic MPU regions.
170  *
171  * Program the dynamic MPU regions using the HW-specific MPU
172  * driver. This function is meant to be invoked every time the
173  * memory map is to be re-programmed, e.g during thread context
174  * switch, entering user mode, reconfiguring memory domain, etc.
175  *
176  * For some MPU architectures, such as the unmodified ARMv8-M MPU,
177  * the function must execute with MPU enabled.
178  *
179  * This function is not inherently thread-safe, but the memory domain
180  * spinlock needs to be held anyway.
181  */
z_arm_configure_dynamic_mpu_regions(struct k_thread * thread)182 void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread)
183 {
184 	/* Define an array of z_arm_mpu_partition objects to hold the configuration
185 	 * of the respective dynamic MPU regions to be programmed for
186 	 * the given thread. The array of partitions (along with its
187 	 * actual size) will be supplied to the underlying MPU driver.
188 	 *
189 	 * The drivers of what regions get configured are CONFIG_USERSPACE,
190 	 * CONFIG_MPU_STACK_GUARD, and K_USER/supervisor threads.
191 	 *
192 	 * If CONFIG_USERSPACE is defined and the thread is a member of any
193 	 * memory domain then any partitions defined within that domain get a
194 	 * defined region.
195 	 *
196 	 * If CONFIG_USERSPACE is defined and the thread is a user thread
197 	 * (K_USER) the usermode thread stack is defined a region.
198 	 *
199 	 * IF CONFIG_MPU_STACK_GUARD is defined the thread is a supervisor
200 	 * thread, the stack guard will be defined in front of the
201 	 * thread->stack_info.start. On a K_USER thread, the guard is defined
202 	 * in front of the privilege mode stack, thread->arch.priv_stack_start.
203 	 */
204 	static struct z_arm_mpu_partition
205 			dynamic_regions[_MAX_DYNAMIC_MPU_REGIONS_NUM];
206 
207 	uint8_t region_num = 0U;
208 
209 #if defined(CONFIG_USERSPACE)
210 	/* Memory domain */
211 	LOG_DBG("configure thread %p's domain", thread);
212 	struct k_mem_domain *mem_domain = thread->mem_domain_info.mem_domain;
213 
214 	if (mem_domain) {
215 		LOG_DBG("configure domain: %p", mem_domain);
216 		uint32_t num_partitions = mem_domain->num_partitions;
217 		struct k_mem_partition *partition;
218 		int i;
219 
220 		LOG_DBG("configure domain: %p", mem_domain);
221 
222 		for (i = 0; i < CONFIG_MAX_DOMAIN_PARTITIONS; i++) {
223 			partition = &mem_domain->partitions[i];
224 			if (partition->size == 0) {
225 				/* Zero size indicates a non-existing
226 				 * memory partition.
227 				 */
228 				continue;
229 			}
230 			LOG_DBG("set region 0x%lx 0x%x",
231 				partition->start, partition->size);
232 			__ASSERT(region_num < _MAX_DYNAMIC_MPU_REGIONS_NUM,
233 				"Out-of-bounds error for dynamic region map.");
234 
235 			dynamic_regions[region_num].start = partition->start;
236 			dynamic_regions[region_num].size = partition->size;
237 			dynamic_regions[region_num].attr = partition->attr;
238 
239 			region_num++;
240 			num_partitions--;
241 			if (num_partitions == 0U) {
242 				break;
243 			}
244 		}
245 	}
246 	/* Thread user stack */
247 	LOG_DBG("configure user thread %p's context", thread);
248 	if (thread->arch.priv_stack_start) {
249 		/* K_USER thread stack needs a region */
250 		uintptr_t base = (uintptr_t)thread->stack_obj;
251 		size_t size = thread->stack_info.size +
252 			(thread->stack_info.start - base);
253 
254 		__ASSERT(region_num < _MAX_DYNAMIC_MPU_REGIONS_NUM,
255 			"Out-of-bounds error for dynamic region map.");
256 
257 		dynamic_regions[region_num].start = base;
258 		dynamic_regions[region_num].size = size;
259 		dynamic_regions[region_num].attr = K_MEM_PARTITION_P_RW_U_RW;
260 
261 		region_num++;
262 	}
263 #endif /* CONFIG_USERSPACE */
264 
265 #if defined(CONFIG_MPU_STACK_GUARD)
266 	/* Define a stack guard region for either the thread stack or the
267 	 * supervisor/privilege mode stack depending on the type of thread
268 	 * being mapped.
269 	 */
270 
271 	/* Privileged stack guard */
272 	uintptr_t guard_start;
273 	size_t guard_size = MPU_GUARD_ALIGN_AND_SIZE;
274 
275 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
276 	guard_size = z_arm_mpu_stack_guard_and_fpu_adjust(thread);
277 #endif
278 
279 #if defined(CONFIG_USERSPACE)
280 	if (thread->arch.priv_stack_start) {
281 		/* A K_USER thread has the stack guard protecting the privilege
282 		 * stack and not on the usermode stack because the user mode
283 		 * stack already has its own defined memory region.
284 		 */
285 		guard_start = thread->arch.priv_stack_start - guard_size;
286 
287 		__ASSERT((uintptr_t)&z_priv_stacks_ram_start <= guard_start,
288 		"Guard start: (0x%lx) below privilege stacks boundary: (%p)",
289 		guard_start, z_priv_stacks_ram_start);
290 	} else
291 #endif /* CONFIG_USERSPACE */
292 	{
293 		/* A supervisor thread only has the normal thread stack to
294 		 * protect with a stack guard.
295 		 */
296 		guard_start = thread->stack_info.start - guard_size;
297 #ifdef CONFIG_USERSPACE
298 		__ASSERT((uintptr_t)thread->stack_obj == guard_start,
299 			"Guard start (0x%lx) not beginning at stack object (%p)\n",
300 			guard_start, thread->stack_obj);
301 #endif /* CONFIG_USERSPACE */
302 	}
303 
304 	__ASSERT(region_num < _MAX_DYNAMIC_MPU_REGIONS_NUM,
305 		"Out-of-bounds error for dynamic region map.");
306 
307 	dynamic_regions[region_num].start = guard_start;
308 	dynamic_regions[region_num].size = guard_size;
309 	dynamic_regions[region_num].attr = K_MEM_PARTITION_P_RO_U_NA;
310 
311 	region_num++;
312 #endif /* CONFIG_MPU_STACK_GUARD */
313 
314 	/* Configure the dynamic MPU regions */
315 #ifdef CONFIG_AARCH32_ARMV8_R
316 	arm_core_mpu_disable();
317 #endif
318 	arm_core_mpu_configure_dynamic_mpu_regions(dynamic_regions,
319 						   region_num);
320 #ifdef CONFIG_AARCH32_ARMV8_R
321 	arm_core_mpu_enable();
322 #endif
323 }
324 
325 #if defined(CONFIG_USERSPACE)
arch_mem_domain_max_partitions_get(void)326 int arch_mem_domain_max_partitions_get(void)
327 {
328 	int available_regions = arm_core_mpu_get_max_available_dyn_regions();
329 
330 	available_regions -=
331 		ARM_CORE_MPU_NUM_MPU_REGIONS_FOR_THREAD_STACK;
332 
333 	if (IS_ENABLED(CONFIG_MPU_STACK_GUARD)) {
334 		available_regions -=
335 			ARM_CORE_MPU_NUM_MPU_REGIONS_FOR_MPU_STACK_GUARD;
336 	}
337 
338 	return ARM_CORE_MPU_MAX_DOMAIN_PARTITIONS_GET(available_regions);
339 }
340 
arch_buffer_validate(const void * addr,size_t size,int write)341 int arch_buffer_validate(const void *addr, size_t size, int write)
342 {
343 	return arm_core_mpu_buffer_validate(addr, size, write);
344 }
345 
346 #endif /* CONFIG_USERSPACE */
347