1 /*
2  * Copyright (c) 2021 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Common Memory Management Driver Code
10  *
11  * This file provides common implementation of memory management driver
12  * functions, for example, sys_mm_drv_map_region() can use
13  * sys_mm_drv_map_page() to map page by page for the whole region.
14  * This avoids duplicate implementations of same functionality in
15  * different drivers. The implementations here are marked as
16  * weak functions so they can be overridden by the driver.
17  */
18 
19 #include <zephyr/kernel.h>
20 #include <string.h>
21 #include <zephyr/toolchain.h>
22 #include <zephyr/sys/__assert.h>
23 #include <zephyr/sys/check.h>
24 #include <zephyr/sys/util.h>
25 
26 #include <zephyr/drivers/mm/system_mm.h>
27 
28 #include "mm_drv_common.h"
29 
30 struct k_spinlock sys_mm_drv_common_lock;
31 
sys_mm_drv_is_addr_array_aligned(uintptr_t * addr,size_t cnt)32 bool sys_mm_drv_is_addr_array_aligned(uintptr_t *addr, size_t cnt)
33 {
34 	size_t idx;
35 	bool ret = true;
36 
37 	for (idx = 0; idx < cnt; idx++) {
38 		if (!sys_mm_drv_is_addr_aligned(addr[idx])) {
39 			ret = false;
40 			break;
41 		}
42 	}
43 
44 	return ret;
45 }
46 
sys_mm_drv_is_virt_region_mapped(void * virt,size_t size)47 bool sys_mm_drv_is_virt_region_mapped(void *virt, size_t size)
48 {
49 	size_t offset;
50 	bool ret = true;
51 
52 	for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
53 		uint8_t *va = (uint8_t *)virt + offset;
54 
55 		if (sys_mm_drv_page_phys_get(va, NULL) != 0) {
56 			ret = false;
57 			break;
58 		}
59 	}
60 
61 	return ret;
62 }
63 
sys_mm_drv_is_virt_region_unmapped(void * virt,size_t size)64 bool sys_mm_drv_is_virt_region_unmapped(void *virt, size_t size)
65 {
66 	size_t offset;
67 	bool ret = true;
68 
69 	for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
70 		uint8_t *va = (uint8_t *)virt + offset;
71 
72 		if (sys_mm_drv_page_phys_get(va, NULL) != -EFAULT) {
73 			ret = false;
74 			break;
75 		}
76 	}
77 
78 	return ret;
79 }
80 
81 /**
82  * Unmap a memory region with synchronization already locked.
83  *
84  * @param virt Page-aligned base virtual address to un-map
85  * @param size Page-aligned region size
86  * @param is_reset True if resetting the mappings
87  *
88  * @retval 0 if successful
89  * @retval -EINVAL if invalid arguments are provided
90  * @retval -EFAULT if virtual address is not mapped
91  */
unmap_locked(void * virt,size_t size,bool is_reset)92 static int unmap_locked(void *virt, size_t size, bool is_reset)
93 {
94 	int ret = 0;
95 	size_t offset;
96 
97 	for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
98 		uint8_t *va = (uint8_t *)virt + offset;
99 
100 		int ret2 = sys_mm_drv_unmap_page(va);
101 
102 		if (ret2 != 0) {
103 			if (is_reset) {
104 				__ASSERT(false, "cannot reset mapping %p\n", va);
105 			} else {
106 				__ASSERT(false, "cannot unmap %p\n", va);
107 			}
108 
109 			ret = ret2;
110 		}
111 	}
112 
113 	return ret;
114 }
115 
sys_mm_drv_simple_map_region(void * virt,uintptr_t phys,size_t size,uint32_t flags)116 int sys_mm_drv_simple_map_region(void *virt, uintptr_t phys,
117 				 size_t size, uint32_t flags)
118 {
119 	k_spinlock_key_t key;
120 	int ret = 0;
121 	size_t offset;
122 
123 	CHECKIF(!sys_mm_drv_is_addr_aligned(phys) ||
124 		!sys_mm_drv_is_virt_addr_aligned(virt) ||
125 		!sys_mm_drv_is_size_aligned(size)) {
126 		ret = -EINVAL;
127 		goto out;
128 	}
129 
130 	key = k_spin_lock(&sys_mm_drv_common_lock);
131 
132 	for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
133 		uint8_t *va = (uint8_t *)virt + offset;
134 		uintptr_t pa = phys + offset;
135 
136 		ret = sys_mm_drv_map_page(va, pa, flags);
137 
138 		if (ret != 0) {
139 			__ASSERT(false, "cannot map 0x%lx to %p\n", pa, va);
140 
141 			/*
142 			 * Reset the already mapped virtual addresses.
143 			 * Note the offset is at the current failed address
144 			 * which will not be included during unmapping.
145 			 */
146 			(void)unmap_locked(virt, offset, true);
147 
148 			break;
149 		}
150 	}
151 
152 	k_spin_unlock(&sys_mm_drv_common_lock, key);
153 
154 out:
155 	return ret;
156 }
157 
158 __weak FUNC_ALIAS(sys_mm_drv_simple_map_region,
159 		  sys_mm_drv_map_region, int);
160 
sys_mm_drv_simple_map_array(void * virt,uintptr_t * phys,size_t cnt,uint32_t flags)161 int sys_mm_drv_simple_map_array(void *virt, uintptr_t *phys,
162 				size_t cnt, uint32_t flags)
163 {
164 	k_spinlock_key_t key;
165 	int ret = 0;
166 	size_t idx, offset;
167 
168 	CHECKIF(!sys_mm_drv_is_addr_array_aligned(phys, cnt) ||
169 		!sys_mm_drv_is_virt_addr_aligned(virt)) {
170 		ret = -EINVAL;
171 		goto out;
172 	}
173 
174 	key = k_spin_lock(&sys_mm_drv_common_lock);
175 
176 	offset = 0;
177 	idx = 0;
178 	while (idx < cnt) {
179 		uint8_t *va = (uint8_t *)virt + offset;
180 
181 		ret = sys_mm_drv_map_page(va, phys[idx], flags);
182 
183 		if (ret != 0) {
184 			__ASSERT(false, "cannot map 0x%lx to %p\n", phys[idx], va);
185 
186 			/*
187 			 * Reset the already mapped virtual addresses.
188 			 * Note the offset is at the current failed address
189 			 * which will not be included during unmapping.
190 			 */
191 			(void)unmap_locked(virt, offset, true);
192 
193 			break;
194 		}
195 
196 		offset += CONFIG_MM_DRV_PAGE_SIZE;
197 		idx++;
198 	}
199 
200 	k_spin_unlock(&sys_mm_drv_common_lock, key);
201 
202 out:
203 	return ret;
204 }
205 
206 __weak FUNC_ALIAS(sys_mm_drv_simple_map_array, sys_mm_drv_map_array, int);
207 
sys_mm_drv_simple_unmap_region(void * virt,size_t size)208 int sys_mm_drv_simple_unmap_region(void *virt, size_t size)
209 {
210 	k_spinlock_key_t key;
211 	int ret = 0;
212 
213 	CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt) ||
214 		!sys_mm_drv_is_size_aligned(size)) {
215 		ret = -EINVAL;
216 		goto out;
217 	}
218 
219 	key = k_spin_lock(&sys_mm_drv_common_lock);
220 
221 	ret = unmap_locked(virt, size, false);
222 
223 	k_spin_unlock(&sys_mm_drv_common_lock, key);
224 
225 out:
226 	return ret;
227 }
228 
229 __weak FUNC_ALIAS(sys_mm_drv_simple_unmap_region,
230 		  sys_mm_drv_unmap_region, int);
231 
sys_mm_drv_simple_remap_region(void * virt_old,size_t size,void * virt_new)232 int sys_mm_drv_simple_remap_region(void *virt_old, size_t size,
233 				   void *virt_new)
234 {
235 	k_spinlock_key_t key;
236 	size_t offset;
237 	int ret = 0;
238 
239 	CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt_old) ||
240 		!sys_mm_drv_is_virt_addr_aligned(virt_new) ||
241 		!sys_mm_drv_is_size_aligned(size)) {
242 		ret = -EINVAL;
243 		goto out;
244 	}
245 
246 	if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) &&
247 	    (POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) {
248 		ret = -EINVAL; /* overlaps */
249 		goto out;
250 	}
251 
252 	key = k_spin_lock(&sys_mm_drv_common_lock);
253 
254 	if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) ||
255 	    !sys_mm_drv_is_virt_region_unmapped(virt_new, size)) {
256 		ret = -EINVAL;
257 		goto unlock_out;
258 	}
259 
260 	for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
261 		uint8_t *va_old = (uint8_t *)virt_old + offset;
262 		uint8_t *va_new = (uint8_t *)virt_new + offset;
263 		uintptr_t pa;
264 		uint32_t flags;
265 
266 		/*
267 		 * Grab the physical address of the old mapped page
268 		 * so the new page can map to the same physical address.
269 		 */
270 		ret = sys_mm_drv_page_phys_get(va_old, &pa);
271 		if (ret != 0) {
272 			__ASSERT(false, "cannot query %p\n", va_old);
273 
274 			/*
275 			 * Reset the already mapped virtual addresses.
276 			 * Note the offset is at the current failed address
277 			 * which will not be included during unmapping.
278 			 */
279 			(void)unmap_locked(virt_new, offset, true);
280 
281 			goto unlock_out;
282 		}
283 
284 		/*
285 		 * Grab the flags of the old mapped page
286 		 * so the new page can map to the same flags.
287 		 */
288 		ret = sys_mm_drv_page_flag_get(va_old, &flags);
289 		if (ret != 0) {
290 			__ASSERT(false, "cannot query page %p\n", va_old);
291 
292 			/*
293 			 * Reset the already mapped virtual addresses.
294 			 * Note the offset is at the current failed address
295 			 * which will not be included during unmapping.
296 			 */
297 			(void)unmap_locked(virt_new, offset, true);
298 
299 			goto unlock_out;
300 		}
301 
302 		ret = sys_mm_drv_map_page(va_new, pa, flags);
303 		if (ret != 0) {
304 			__ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new);
305 
306 			/*
307 			 * Reset the already mapped virtual addresses.
308 			 * Note the offset is at the current failed address
309 			 * which will not be included during unmapping.
310 			 */
311 			(void)unmap_locked(virt_new, offset, true);
312 
313 			goto unlock_out;
314 		}
315 	}
316 
317 	(void)unmap_locked(virt_old, size, false);
318 
319 unlock_out:
320 	k_spin_unlock(&sys_mm_drv_common_lock, key);
321 
322 out:
323 	return ret;
324 }
325 
326 __weak FUNC_ALIAS(sys_mm_drv_simple_remap_region,
327 		  sys_mm_drv_remap_region, int);
328 
sys_mm_drv_simple_move_region(void * virt_old,size_t size,void * virt_new,uintptr_t phys_new)329 int sys_mm_drv_simple_move_region(void *virt_old, size_t size,
330 				  void *virt_new, uintptr_t phys_new)
331 {
332 	k_spinlock_key_t key;
333 	size_t offset;
334 	int ret = 0;
335 
336 	CHECKIF(!sys_mm_drv_is_addr_aligned(phys_new) ||
337 		!sys_mm_drv_is_virt_addr_aligned(virt_old) ||
338 		!sys_mm_drv_is_virt_addr_aligned(virt_new) ||
339 		!sys_mm_drv_is_size_aligned(size)) {
340 		ret = -EINVAL;
341 		goto out;
342 	}
343 
344 	if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) &&
345 	    (POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) {
346 		ret = -EINVAL; /* overlaps */
347 		goto out;
348 	}
349 
350 	key = k_spin_lock(&sys_mm_drv_common_lock);
351 
352 	if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) ||
353 	    !sys_mm_drv_is_virt_region_unmapped(virt_new, size)) {
354 		ret = -EINVAL;
355 		goto unlock_out;
356 	}
357 
358 	for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
359 		uint8_t *va_old = (uint8_t *)virt_old + offset;
360 		uint8_t *va_new = (uint8_t *)virt_new + offset;
361 		uintptr_t pa = phys_new + offset;
362 		uint32_t flags;
363 
364 		ret = sys_mm_drv_page_flag_get(va_old, &flags);
365 		if (ret != 0) {
366 			__ASSERT(false, "cannot query page %p\n", va_old);
367 
368 			/*
369 			 * Reset the already mapped virtual addresses.
370 			 * Note the offset is at the current failed address
371 			 * which will not be included during unmapping.
372 			 */
373 			(void)unmap_locked(virt_new, offset, true);
374 
375 			goto unlock_out;
376 		}
377 
378 		/*
379 		 * Map the new page with flags of the old mapped page
380 		 * so they both have the same properties.
381 		 */
382 		ret = sys_mm_drv_map_page(va_new, pa, flags);
383 		if (ret != 0) {
384 			__ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new);
385 
386 			/*
387 			 * Reset the already mapped virtual addresses.
388 			 * Note the offset is at the current failed address
389 			 * which will not be included during unmapping.
390 			 */
391 			(void)unmap_locked(virt_new, offset, true);
392 
393 			goto unlock_out;
394 		}
395 	}
396 
397 	/* Once new mappings are in place, copy the content over. */
398 	(void)memcpy(virt_new, virt_old, size);
399 
400 	/* Unmap old virtual memory region once the move is done. */
401 	(void)unmap_locked(virt_old, size, false);
402 
403 unlock_out:
404 	k_spin_unlock(&sys_mm_drv_common_lock, key);
405 
406 out:
407 	return ret;
408 }
409 
410 __weak FUNC_ALIAS(sys_mm_drv_simple_move_region,
411 		  sys_mm_drv_move_region, int);
412 
sys_mm_drv_simple_move_array(void * virt_old,size_t size,void * virt_new,uintptr_t * phys_new,size_t phys_cnt)413 int sys_mm_drv_simple_move_array(void *virt_old, size_t size,
414 				 void *virt_new,
415 				 uintptr_t *phys_new, size_t phys_cnt)
416 {
417 	k_spinlock_key_t key;
418 	size_t idx, offset;
419 	int ret = 0;
420 
421 	CHECKIF(!sys_mm_drv_is_addr_array_aligned(phys_new, phys_cnt) ||
422 		!sys_mm_drv_is_virt_addr_aligned(virt_old) ||
423 		!sys_mm_drv_is_virt_addr_aligned(virt_new) ||
424 		!sys_mm_drv_is_size_aligned(size)) {
425 		ret = -EINVAL;
426 		goto out;
427 	}
428 
429 	if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) &&
430 	    (POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) {
431 		ret = -EINVAL; /* overlaps */
432 		goto out;
433 	}
434 
435 	key = k_spin_lock(&sys_mm_drv_common_lock);
436 
437 	if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) ||
438 	    !sys_mm_drv_is_virt_region_unmapped(virt_new, size)) {
439 		ret = -EINVAL;
440 		goto unlock_out;
441 	}
442 
443 	offset = 0;
444 	idx = 0;
445 	while (idx < phys_cnt) {
446 		uint8_t *va_old = (uint8_t *)virt_old + offset;
447 		uint8_t *va_new = (uint8_t *)virt_new + offset;
448 		uint32_t flags;
449 
450 		ret = sys_mm_drv_page_flag_get(va_old, &flags);
451 		if (ret != 0) {
452 			__ASSERT(false, "cannot query page %p\n", va_old);
453 
454 			/*
455 			 * Reset the already mapped virtual addresses.
456 			 * Note the offset is at the current failed address
457 			 * which will not be included during unmapping.
458 			 */
459 			(void)unmap_locked(virt_new, offset, true);
460 
461 			goto unlock_out;
462 		}
463 
464 		/*
465 		 * Only map the new page when we can retrieve
466 		 * flags of the old mapped page as We don't
467 		 * want to map with unknown random flags.
468 		 */
469 		ret = sys_mm_drv_map_page(va_new, phys_new[idx], flags);
470 		if (ret != 0) {
471 			__ASSERT(false, "cannot map 0x%lx to %p\n",
472 				 phys_new[idx], va_new);
473 
474 			/*
475 			 * Reset the already mapped virtual addresses.
476 			 * Note the offset is at the current failed address
477 			 * which will not be included during unmapping.
478 			 */
479 			(void)unmap_locked(virt_new, offset, true);
480 
481 			goto unlock_out;
482 		}
483 
484 		offset += CONFIG_MM_DRV_PAGE_SIZE;
485 		idx++;
486 	}
487 
488 	/* Once new mappings are in place, copy the content over. */
489 	(void)memcpy(virt_new, virt_old, size);
490 
491 	/* Unmap old virtual memory region once the move is done. */
492 	(void)unmap_locked(virt_old, size, false);
493 
494 unlock_out:
495 	k_spin_unlock(&sys_mm_drv_common_lock, key);
496 
497 out:
498 	return ret;
499 }
500 
501 __weak FUNC_ALIAS(sys_mm_drv_simple_move_array,
502 		  sys_mm_drv_move_array, int);
503 
sys_mm_drv_simple_update_region_flags(void * virt,size_t size,uint32_t flags)504 int sys_mm_drv_simple_update_region_flags(void *virt, size_t size, uint32_t flags)
505 {
506 	k_spinlock_key_t key;
507 	int ret = 0;
508 	size_t offset;
509 
510 	CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt) ||
511 		!sys_mm_drv_is_size_aligned(size)) {
512 		ret = -EINVAL;
513 		goto out;
514 	}
515 
516 	key = k_spin_lock(&sys_mm_drv_common_lock);
517 
518 	for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
519 		uint8_t *va = (uint8_t *)virt + offset;
520 
521 		int ret2 = sys_mm_drv_update_page_flags(va, flags);
522 
523 		if (ret2 != 0) {
524 			__ASSERT(false, "cannot update flags %p\n", va);
525 
526 			ret = ret2;
527 		}
528 	}
529 
530 	k_spin_unlock(&sys_mm_drv_common_lock, key);
531 
532 out:
533 	return ret;
534 }
535 
536 __weak FUNC_ALIAS(sys_mm_drv_simple_update_region_flags,
537 		  sys_mm_drv_update_region_flags, int);
538 
sys_mm_drv_simple_query_memory_regions(void)539 const struct sys_mm_drv_region *sys_mm_drv_simple_query_memory_regions(void)
540 {
541 	const static struct sys_mm_drv_region empty[] = {
542 		{ }
543 	};
544 
545 	return empty;
546 }
547 
548 __weak FUNC_ALIAS(sys_mm_drv_simple_query_memory_regions,
549 		  sys_mm_drv_query_memory_regions,
550 		  const struct sys_mm_drv_region *);
551 
sys_mm_drv_simple_query_memory_regions_free(const struct sys_mm_drv_region * regions)552 void sys_mm_drv_simple_query_memory_regions_free(const struct sys_mm_drv_region *regions)
553 {
554 	ARG_UNUSED(regions);
555 }
556 
557 __weak FUNC_ALIAS(sys_mm_drv_simple_query_memory_regions_free,
558 		  sys_mm_drv_query_memory_regions_free, void);
559