1 /*
2  * Copyright (c) 2021 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Common Memory Management Driver Code
10  *
11  * This file provides common implementation of memory management driver
12  * functions, for example, sys_mm_drv_map_region() can use
13  * sys_mm_drv_map_page() to map page by page for the whole region.
14  * This avoids duplicate implementations of same functionality in
15  * different drivers. The implementations here are marked as
16  * weak functions so they can be overridden by the driver.
17  */
18 
19 #include <zephyr/kernel.h>
20 #include <string.h>
21 #include <zephyr/toolchain.h>
22 #include <zephyr/sys/__assert.h>
23 #include <zephyr/sys/check.h>
24 #include <zephyr/sys/util.h>
25 
26 #include <zephyr/drivers/mm/system_mm.h>
27 
28 #include "mm_drv_common.h"
29 
30 struct k_spinlock sys_mm_drv_common_lock;
31 
sys_mm_drv_is_addr_array_aligned(uintptr_t * addr,size_t cnt)32 bool sys_mm_drv_is_addr_array_aligned(uintptr_t *addr, size_t cnt)
33 {
34 	size_t idx;
35 	bool ret = true;
36 
37 	for (idx = 0; idx < cnt; idx++) {
38 		if (!sys_mm_drv_is_addr_aligned(addr[idx])) {
39 			ret = false;
40 			break;
41 		}
42 	}
43 
44 	return ret;
45 }
46 
sys_mm_drv_is_virt_region_mapped(void * virt,size_t size)47 bool sys_mm_drv_is_virt_region_mapped(void *virt, size_t size)
48 {
49 	size_t offset;
50 	bool ret = true;
51 
52 	for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
53 		uint8_t *va = (uint8_t *)virt + offset;
54 
55 		if (sys_mm_drv_page_phys_get(va, NULL) != 0) {
56 			ret = false;
57 			break;
58 		}
59 	}
60 
61 	return ret;
62 }
63 
sys_mm_drv_is_virt_region_unmapped(void * virt,size_t size)64 bool sys_mm_drv_is_virt_region_unmapped(void *virt, size_t size)
65 {
66 	size_t offset;
67 	bool ret = true;
68 
69 	for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
70 		uint8_t *va = (uint8_t *)virt + offset;
71 
72 		if (sys_mm_drv_page_phys_get(va, NULL) != -EFAULT) {
73 			ret = false;
74 			break;
75 		}
76 	}
77 
78 	return ret;
79 }
80 
sys_mm_drv_simple_map_region(void * virt,uintptr_t phys,size_t size,uint32_t flags)81 int sys_mm_drv_simple_map_region(void *virt, uintptr_t phys,
82 				 size_t size, uint32_t flags)
83 {
84 	k_spinlock_key_t key;
85 	int ret = 0;
86 	size_t offset;
87 
88 	CHECKIF(!sys_mm_drv_is_addr_aligned(phys) ||
89 		!sys_mm_drv_is_virt_addr_aligned(virt) ||
90 		!sys_mm_drv_is_size_aligned(size)) {
91 		ret = -EINVAL;
92 		goto out;
93 	}
94 
95 	key = k_spin_lock(&sys_mm_drv_common_lock);
96 
97 	for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
98 		uint8_t *va = (uint8_t *)virt + offset;
99 		uintptr_t pa = phys + offset;
100 
101 		int ret2 = sys_mm_drv_map_page(va, pa, flags);
102 
103 		if (ret2 != 0) {
104 			__ASSERT(false, "cannot map 0x%lx to %p\n", pa, va);
105 
106 			ret = ret2;
107 		}
108 	}
109 
110 	k_spin_unlock(&sys_mm_drv_common_lock, key);
111 
112 out:
113 	return ret;
114 }
115 
116 __weak FUNC_ALIAS(sys_mm_drv_simple_map_region,
117 		  sys_mm_drv_map_region, int);
118 
sys_mm_drv_simple_map_array(void * virt,uintptr_t * phys,size_t cnt,uint32_t flags)119 int sys_mm_drv_simple_map_array(void *virt, uintptr_t *phys,
120 				size_t cnt, uint32_t flags)
121 {
122 	k_spinlock_key_t key;
123 	int ret = 0;
124 	size_t idx, offset;
125 
126 	CHECKIF(!sys_mm_drv_is_addr_array_aligned(phys, cnt) ||
127 		!sys_mm_drv_is_virt_addr_aligned(virt)) {
128 		ret = -EINVAL;
129 		goto out;
130 	}
131 
132 	key = k_spin_lock(&sys_mm_drv_common_lock);
133 
134 	offset = 0;
135 	idx = 0;
136 	while (idx < cnt) {
137 		uint8_t *va = (uint8_t *)virt + offset;
138 
139 		int ret2 = sys_mm_drv_map_page(va, phys[idx], flags);
140 
141 		if (ret2 != 0) {
142 			__ASSERT(false, "cannot map 0x%lx to %p\n", phys[idx], va);
143 
144 			ret = ret2;
145 		}
146 
147 		offset += CONFIG_MM_DRV_PAGE_SIZE;
148 		idx++;
149 	}
150 
151 	k_spin_unlock(&sys_mm_drv_common_lock, key);
152 
153 out:
154 	return ret;
155 }
156 
157 __weak FUNC_ALIAS(sys_mm_drv_simple_map_array, sys_mm_drv_map_array, int);
158 
sys_mm_drv_simple_unmap_region(void * virt,size_t size)159 int sys_mm_drv_simple_unmap_region(void *virt, size_t size)
160 {
161 	k_spinlock_key_t key;
162 	int ret = 0;
163 	size_t offset;
164 
165 	CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt) ||
166 		!sys_mm_drv_is_size_aligned(size)) {
167 		ret = -EINVAL;
168 		goto out;
169 	}
170 
171 	key = k_spin_lock(&sys_mm_drv_common_lock);
172 
173 	for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
174 		uint8_t *va = (uint8_t *)virt + offset;
175 
176 		int ret2 = sys_mm_drv_unmap_page(va);
177 
178 		if (ret2 != 0) {
179 			__ASSERT(false, "cannot unmap %p\n", va);
180 
181 			ret = ret2;
182 		}
183 	}
184 
185 	k_spin_unlock(&sys_mm_drv_common_lock, key);
186 
187 out:
188 	return ret;
189 }
190 
191 __weak FUNC_ALIAS(sys_mm_drv_simple_unmap_region,
192 		  sys_mm_drv_unmap_region, int);
193 
sys_mm_drv_simple_remap_region(void * virt_old,size_t size,void * virt_new)194 int sys_mm_drv_simple_remap_region(void *virt_old, size_t size,
195 				   void *virt_new)
196 {
197 	k_spinlock_key_t key;
198 	size_t offset;
199 	int ret = 0;
200 
201 	CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt_old) ||
202 		!sys_mm_drv_is_virt_addr_aligned(virt_new) ||
203 		!sys_mm_drv_is_size_aligned(size)) {
204 		ret = -EINVAL;
205 		goto out;
206 	}
207 
208 	if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) &&
209 	    (POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) {
210 		ret = -EINVAL; /* overlaps */
211 		goto out;
212 	}
213 
214 	key = k_spin_lock(&sys_mm_drv_common_lock);
215 
216 	if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) ||
217 	    !sys_mm_drv_is_virt_region_unmapped(virt_new, size)) {
218 		ret = -EINVAL;
219 		goto unlock_out;
220 	}
221 
222 	for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
223 		uint8_t *va_old = (uint8_t *)virt_old + offset;
224 		uint8_t *va_new = (uint8_t *)virt_new + offset;
225 		uintptr_t pa;
226 		uint32_t flags;
227 		int ret2;
228 		bool to_map;
229 
230 		/*
231 		 * va_old is mapped as checked above, so no need
232 		 * to check for return value here.
233 		 */
234 		(void)sys_mm_drv_page_phys_get(va_old, &pa);
235 
236 		to_map = true;
237 		ret2 = sys_mm_drv_page_flag_get(va_old, &flags);
238 		if (ret2 != 0) {
239 			__ASSERT(false, "cannot query page %p\n", va_old);
240 
241 			ret = ret2;
242 			to_map = false;
243 		}
244 
245 		ret2 = sys_mm_drv_unmap_page(va_old);
246 		if (ret2 != 0) {
247 			__ASSERT(false, "cannot unmap %p\n", va_old);
248 
249 			ret = ret2;
250 		}
251 
252 		if (!to_map) {
253 			/*
254 			 * Cannot retrieve flags of mapped virtual memory.
255 			 * Skip mapping this page as we don't want to map
256 			 * with unknown random flags.
257 			 */
258 			continue;
259 		}
260 
261 		ret2 = sys_mm_drv_map_page(va_new, pa, flags);
262 		if (ret2 != 0) {
263 			__ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new);
264 
265 			ret = ret2;
266 		}
267 	}
268 
269 unlock_out:
270 	k_spin_unlock(&sys_mm_drv_common_lock, key);
271 
272 out:
273 	return ret;
274 }
275 
276 __weak FUNC_ALIAS(sys_mm_drv_simple_remap_region,
277 		  sys_mm_drv_remap_region, int);
278 
sys_mm_drv_simple_move_region(void * virt_old,size_t size,void * virt_new,uintptr_t phys_new)279 int sys_mm_drv_simple_move_region(void *virt_old, size_t size,
280 				  void *virt_new, uintptr_t phys_new)
281 {
282 	k_spinlock_key_t key;
283 	size_t offset;
284 	int ret = 0;
285 
286 	CHECKIF(!sys_mm_drv_is_addr_aligned(phys_new) ||
287 		!sys_mm_drv_is_virt_addr_aligned(virt_old) ||
288 		!sys_mm_drv_is_virt_addr_aligned(virt_new) ||
289 		!sys_mm_drv_is_size_aligned(size)) {
290 		ret = -EINVAL;
291 		goto out;
292 	}
293 
294 	if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) &&
295 	    (POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) {
296 		ret = -EINVAL; /* overlaps */
297 		goto out;
298 	}
299 
300 	key = k_spin_lock(&sys_mm_drv_common_lock);
301 
302 	if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) ||
303 	    !sys_mm_drv_is_virt_region_unmapped(virt_new, size)) {
304 		ret = -EINVAL;
305 		goto unlock_out;
306 	}
307 
308 	for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
309 		uint8_t *va_old = (uint8_t *)virt_old + offset;
310 		uint8_t *va_new = (uint8_t *)virt_new + offset;
311 		uintptr_t pa = phys_new + offset;
312 		uint32_t flags;
313 		int ret2;
314 
315 		ret2 = sys_mm_drv_page_flag_get(va_old, &flags);
316 		if (ret2 != 0) {
317 			__ASSERT(false, "cannot query page %p\n", va_old);
318 
319 			ret = ret2;
320 		} else {
321 			/*
322 			 * Only map the new page when we can retrieve
323 			 * flags of the old mapped page as We don't
324 			 * want to map with unknown random flags.
325 			 */
326 			ret2 = sys_mm_drv_map_page(va_new, pa, flags);
327 			if (ret2 != 0) {
328 				__ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new);
329 
330 				ret = ret2;
331 			} else {
332 				(void)memcpy(va_new, va_old,
333 					     CONFIG_MM_DRV_PAGE_SIZE);
334 			}
335 		}
336 
337 		ret2 = sys_mm_drv_unmap_page(va_old);
338 		if (ret2 != 0) {
339 			__ASSERT(false, "cannot unmap %p\n", va_old);
340 
341 			ret = ret2;
342 		}
343 	}
344 
345 unlock_out:
346 	k_spin_unlock(&sys_mm_drv_common_lock, key);
347 
348 out:
349 	return ret;
350 }
351 
352 __weak FUNC_ALIAS(sys_mm_drv_simple_move_region,
353 		  sys_mm_drv_move_region, int);
354 
sys_mm_drv_simple_move_array(void * virt_old,size_t size,void * virt_new,uintptr_t * phys_new,size_t phys_cnt)355 int sys_mm_drv_simple_move_array(void *virt_old, size_t size,
356 				 void *virt_new,
357 				 uintptr_t *phys_new, size_t phys_cnt)
358 {
359 	k_spinlock_key_t key;
360 	size_t idx, offset;
361 	int ret = 0;
362 
363 	CHECKIF(!sys_mm_drv_is_addr_array_aligned(phys_new, phys_cnt) ||
364 		!sys_mm_drv_is_virt_addr_aligned(virt_old) ||
365 		!sys_mm_drv_is_virt_addr_aligned(virt_new) ||
366 		!sys_mm_drv_is_size_aligned(size)) {
367 		ret = -EINVAL;
368 		goto out;
369 	}
370 
371 	if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) &&
372 	    (POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) {
373 		ret = -EINVAL; /* overlaps */
374 		goto out;
375 	}
376 
377 	key = k_spin_lock(&sys_mm_drv_common_lock);
378 
379 	if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) ||
380 	    !sys_mm_drv_is_virt_region_unmapped(virt_new, size)) {
381 		ret = -EINVAL;
382 		goto unlock_out;
383 	}
384 
385 	offset = 0;
386 	idx = 0;
387 	while (idx < phys_cnt) {
388 		uint8_t *va_old = (uint8_t *)virt_old + offset;
389 		uint8_t *va_new = (uint8_t *)virt_new + offset;
390 		uint32_t flags;
391 		int ret2;
392 
393 		ret2 = sys_mm_drv_page_flag_get(va_old, &flags);
394 		if (ret2 != 0) {
395 			__ASSERT(false, "cannot query page %p\n", va_old);
396 
397 			ret = ret2;
398 		} else {
399 			/*
400 			 * Only map the new page when we can retrieve
401 			 * flags of the old mapped page as We don't
402 			 * want to map with unknown random flags.
403 			 */
404 			ret2 = sys_mm_drv_map_page(va_new, phys_new[idx], flags);
405 			if (ret2 != 0) {
406 				__ASSERT(false, "cannot map 0x%lx to %p\n",
407 					 phys_new[idx], va_new);
408 
409 				ret = ret2;
410 			} else {
411 				(void)memcpy(va_new, va_old,
412 					     CONFIG_MM_DRV_PAGE_SIZE);
413 			}
414 		}
415 
416 		ret2 = sys_mm_drv_unmap_page(va_old);
417 
418 		if (ret2 != 0) {
419 			__ASSERT(false, "cannot unmap %p\n", va_old);
420 
421 			ret = ret2;
422 		}
423 
424 		offset += CONFIG_MM_DRV_PAGE_SIZE;
425 		idx++;
426 	}
427 
428 unlock_out:
429 	k_spin_unlock(&sys_mm_drv_common_lock, key);
430 
431 out:
432 	return ret;
433 }
434 
435 __weak FUNC_ALIAS(sys_mm_drv_simple_move_array,
436 		  sys_mm_drv_move_array, int);
437 
sys_mm_drv_simple_update_region_flags(void * virt,size_t size,uint32_t flags)438 int sys_mm_drv_simple_update_region_flags(void *virt, size_t size, uint32_t flags)
439 {
440 	k_spinlock_key_t key;
441 	int ret = 0;
442 	size_t offset;
443 
444 	CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt) ||
445 		!sys_mm_drv_is_size_aligned(size)) {
446 		ret = -EINVAL;
447 		goto out;
448 	}
449 
450 	key = k_spin_lock(&sys_mm_drv_common_lock);
451 
452 	for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
453 		uint8_t *va = (uint8_t *)virt + offset;
454 
455 		int ret2 = sys_mm_drv_update_page_flags(va, flags);
456 
457 		if (ret2 != 0) {
458 			__ASSERT(false, "cannot update flags %p\n", va);
459 
460 			ret = ret2;
461 		}
462 	}
463 
464 	k_spin_unlock(&sys_mm_drv_common_lock, key);
465 
466 out:
467 	return ret;
468 }
469 
470 __weak FUNC_ALIAS(sys_mm_drv_simple_update_region_flags,
471 		  sys_mm_drv_update_region_flags, int);
472 
sys_mm_drv_simple_query_memory_regions(void)473 const struct sys_mm_drv_region *sys_mm_drv_simple_query_memory_regions(void)
474 {
475 	const static struct sys_mm_drv_region empty[] = {
476 		{ }
477 	};
478 
479 	return empty;
480 }
481 
482 __weak FUNC_ALIAS(sys_mm_drv_simple_query_memory_regions,
483 		  sys_mm_drv_query_memory_regions,
484 		  const struct sys_mm_drv_region *);
485 
sys_mm_drv_simple_query_memory_regions_free(const struct sys_mm_drv_region * regions)486 void sys_mm_drv_simple_query_memory_regions_free(const struct sys_mm_drv_region *regions)
487 {
488 	ARG_UNUSED(regions);
489 }
490 
491 __weak FUNC_ALIAS(sys_mm_drv_simple_query_memory_regions_free,
492 		  sys_mm_drv_query_memory_regions_free, void);
493