1 /*
2  * Copyright (c) 2021 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Memory Management Driver APIs
10  *
11  * This contains APIs for a system-wide memory management
12  * driver. Only one instance is permitted on the system.
13  */
14 
15 #ifndef ZEPHYR_INCLUDE_DRIVERS_SYSTEM_MM_H_
16 #define ZEPHYR_INCLUDE_DRIVERS_SYSTEM_MM_H_
17 
18 #include <zephyr/types.h>
19 
20 #ifndef _ASMLANGUAGE
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif
25 
26 /**
27  * @brief Memory Management Driver APIs
28  * @defgroup mm_drv_apis Memory Management Driver APIs
29  *
30  * This contains APIs for a system-wide memory management
31  * driver. Only one instance is permitted on the system.
32  *
33  * @ingroup memory_management
34  * @{
35  */
36 
37 /**
38  * @name Caching mode definitions.
39  *
40  * These are mutually exclusive.
41  *
42  * @{
43  */
44 
45 /** No caching */
46 #define SYS_MM_MEM_CACHE_NONE		2
47 
48 /** Write-through caching */
49 #define SYS_MM_MEM_CACHE_WT		1
50 
51 /** Full write-back caching */
52 #define SYS_MM_MEM_CACHE_WB		0
53 
54 /** Reserved bits for cache modes */
55 #define SYS_MM_MEM_CACHE_MASK		(BIT(3) - 1)
56 
57 /**
58  * @}
59  */
60 
61 /**
62  * @name Region permission attributes.
63  *
64  * Default should be read-only, no user, no exec.
65  *
66  * @{
67  */
68 
69 /** Region will have read/write access (and not read-only) */
70 #define SYS_MM_MEM_PERM_RW		BIT(3)
71 
72 /** Region will be executable (normally forbidden) */
73 #define SYS_MM_MEM_PERM_EXEC		BIT(4)
74 
75 /** Region will be accessible to user mode (normally supervisor-only) */
76 #define SYS_MM_MEM_PERM_USER		BIT(5)
77 
78 /**
79  * @}
80  */
81 
82 /**
83  * @name Memory Mapping and Unmapping
84  *
85  * On mapping and unmapping of memory.
86  *
87  * @{
88  */
89 
90 /**
91  * @brief Map one physical page into the virtual address space
92  *
93  * This maps one physical page into the virtual address space.
94  * Behavior when providing unaligned address is undefined, this
95  * is assumed to be page aligned.
96  *
97  * The memory range itself is never accessed by this operation.
98  *
99  * This API must be safe to call in ISRs or exception handlers. Calls
100  * to this API are assumed to be serialized.
101  *
102  * @param virt Page-aligned destination virtual address to map
103  * @param phys Page-aligned source physical address to map
104  * @param flags Caching, access and control flags, see SYS_MM_MEM_* macros
105  *
106  * @retval 0 if successful
107  * @retval -EINVAL if invalid arguments are provided
108  * @retval -EFAULT if virtual address has already been mapped
109  */
110 int sys_mm_drv_map_page(void *virt, uintptr_t phys, uint32_t flags);
111 
112 /**
113  * @brief Map a region of physical memory into the virtual address space
114  *
115  * This maps a region of physical memory into the virtual address space.
116  * Behavior when providing unaligned addresses/sizes is undefined, these
117  * are assumed to be page aligned.
118  *
119  * The memory range itself is never accessed by this operation.
120  *
121  * This API must be safe to call in ISRs or exception handlers. Calls
122  * to this API are assumed to be serialized.
123  *
124  * @param virt Page-aligned destination virtual address to map
125  * @param phys Page-aligned source physical address to map
126  * @param size Page-aligned size of the mapped memory region in bytes
127  * @param flags Caching, access and control flags, see SYS_MM_MEM_* macros
128  *
129  * @retval 0 if successful
130  * @retval -EINVAL if invalid arguments are provided
131  * @retval -EFAULT if any virtual addresses have already been mapped
132  */
133 int sys_mm_drv_map_region(void *virt, uintptr_t phys,
134 			  size_t size, uint32_t flags);
135 
136 /**
137  * @brief Map an array of physical memory into the virtual address space
138  *
139  * This maps an array of physical pages into a continuous virtual address
140  * space. Behavior when providing unaligned addresses is undefined, these
141  * are assumed to be page aligned.
142  *
143  * The physical memory pages are never accessed by this operation.
144  *
145  * This API must be safe to call in ISRs or exception handlers. Calls
146  * to this API are assumed to be serialized.
147  *
148  * @param virt Page-aligned destination virtual address to map
149  * @param phys Array of pge-aligned source physical address to map
150  * @param cnt Number of elements in the physical page array
151  * @param flags Caching, access and control flags, see SYS_MM_MEM_* macros
152  *
153  * @retval 0 if successful
154  * @retval -EINVAL if invalid arguments are provided
155  * @retval -EFAULT if any virtual addresses have already been mapped
156  */
157 int sys_mm_drv_map_array(void *virt, uintptr_t *phys,
158 			 size_t cnt, uint32_t flags);
159 
160 /**
161  * @brief Remove mapping for one page of the provided virtual address
162  *
163  * This unmaps one page from the virtual address space.
164  *
165  * When this completes, the relevant translation table entries will be
166  * updated as if no mapping was ever made for that memory page. No previous
167  * context needs to be preserved. This function must update mapping in
168  * all active translation tables.
169  *
170  * Behavior when providing unaligned address is undefined, this
171  * is assumed to be page aligned.
172  *
173  * Implementations must invalidate translation caching as necessary.
174  *
175  * @param virt Page-aligned virtual address to un-map
176  *
177  * @retval 0 if successful
178  * @retval -EINVAL if invalid arguments are provided
179  * @retval -EFAULT if virtual address is not mapped
180  */
181 int sys_mm_drv_unmap_page(void *virt);
182 
183 /**
184  * @brief Remove mappings for a provided virtual address range
185  *
186  * This unmaps pages in the provided virtual address range.
187  *
188  * When this completes, the relevant translation table entries will be
189  * updated as if no mapping was ever made for that memory range. No previous
190  * context needs to be preserved. This function must update mappings in
191  * all active translation tables.
192  *
193  * Behavior when providing unaligned address is undefined, this
194  * is assumed to be page aligned.
195  *
196  * Implementations must invalidate translation caching as necessary.
197  *
198  * @param virt Page-aligned base virtual address to un-map
199  * @param size Page-aligned region size
200  *
201  * @retval 0 if successful
202  * @retval -EINVAL if invalid arguments are provided
203  * @retval -EFAULT if virtual address is not mapped
204  */
205 int sys_mm_drv_unmap_region(void *virt, size_t size);
206 
207 /**
208  * @brief Remap virtual pages into new address
209  *
210  * This remaps a virtual memory region starting at @p virt_old
211  * of size @p size into a new virtual memory region starting at
212  * @p virt_new. In other words, physical memory at @p virt_old is
213  * remapped to appear at @p virt_new. Both addresses must be page
214  * aligned and valid.
215  *
216  * Note that the virtual memory at both the old and new addresses
217  * must be unmapped in the memory domains of any runnable Zephyr
218  * thread as this does not deal with memory domains.
219  *
220  * Note that overlapping of old and new virtual memory regions
221  * is usually not supported for simpler implementation. Refer to
222  * the actual driver to make sure if overlapping is allowed.
223  *
224  * @param virt_old Page-aligned base virtual address of existing memory
225  * @param size Page-aligned size of the mapped memory region in bytes
226  * @param virt_new Page-aligned base virtual address to which to remap
227  *                 the memory
228  *
229  * @retval 0 if successful
230  * @retval -EINVAL if invalid arguments are provided
231  * @retval -EFAULT if old virtual addresses are not all mapped or
232  *                 new virtual addresses are not all unmapped
233  */
234 int sys_mm_drv_remap_region(void *virt_old, size_t size, void *virt_new);
235 
236 /**
237  * @}
238  */
239 
240 /**
241  * @name Memory Moving
242  *
243  * On moving already mapped memory.
244  *
245  * @{
246  */
247 
248 /**
249  * @brief Physically move memory, with copy
250  *
251  * This maps a region of physical memory into the new virtual address space
252  * (@p virt_new), and copy region of size @p size from the old virtual
253  * address space (@p virt_old). The new virtual memory region is mapped
254  * from physical memory starting at @p phys_new of size @p size.
255  *
256  * Behavior when providing unaligned addresses/sizes is undefined, these
257  * are assumed to be page aligned.
258  *
259  * Note that the virtual memory at both the old and new addresses
260  * must be unmapped in the memory domains of any runnable Zephyr
261  * thread as this does not deal with memory domains.
262  *
263  * Note that overlapping of old and new virtual memory regions
264  * is usually not supported for simpler implementation. Refer to
265  * the actual driver to make sure if overlapping is allowed.
266  *
267  * @param virt_old Page-aligned base virtual address of existing memory
268  * @param size Page-aligned size of the mapped memory region in bytes
269  * @param virt_new Page-aligned base virtual address to which to map
270  *                 new physical pages
271  * @param phys_new Page-aligned base physical address to contain
272  *                 the moved memory
273  *
274  * @retval 0 if successful
275  * @retval -EINVAL if invalid arguments are provided
276  * @retval -EFAULT if old virtual addresses are not all mapped or
277  *                 new virtual addresses are not all unmapped
278  */
279 int sys_mm_drv_move_region(void *virt_old, size_t size, void *virt_new,
280 			   uintptr_t phys_new);
281 
282 /**
283  * @brief Physically move memory, with copy
284  *
285  * This maps a region of physical memory into the new virtual address space
286  * (@p virt_new), and copy region of size @p size from the old virtual
287  * address space (@p virt_old). The new virtual memory region is mapped
288  * from an array of physical pages.
289  *
290  * Behavior when providing unaligned addresses/sizes is undefined, these
291  * are assumed to be page aligned.
292  *
293  * Note that the virtual memory at both the old and new addresses
294  * must be unmapped in the memory domains of any runnable Zephyr
295  * thread as this does not deal with memory domains.
296  *
297  * Note that overlapping of old and new virtual memory regions
298  * is usually not supported for simpler implementation. Refer to
299  * the actual driver to make sure if overlapping is allowed.
300  *
301  * @param virt_old Page-aligned base virtual address of existing memory
302  * @param size Page-aligned size of the mapped memory region in bytes
303  * @param virt_new Page-aligned base virtual address to which to map
304  *                 new physical pages
305  * @param phys_new Array of page-aligned physical address to contain
306  *                 the moved memory
307  * @param phys_cnt Number of elements in the physical page array
308  *
309  * @retval 0 if successful
310  * @retval -EINVAL if invalid arguments are provided
311  * @retval -EFAULT if old virtual addresses are not all mapped or
312  *                 new virtual addresses are not all unmapped
313  */
314 int sys_mm_drv_move_array(void *virt_old, size_t size, void *virt_new,
315 			  uintptr_t *phys_new, size_t phys_cnt);
316 
317 /**
318  * @}
319  */
320 
321 /**
322  * @name Memory Mapping Attributes
323  *
324  * On manipulating attributes of already mapped memory.
325  *
326  * @{
327  */
328 
329 /**
330  * @brief Update memory page flags
331  *
332  * This changes the attributes of physical memory page which is already
333  * mapped to a virtual address. This is useful when use case of
334  * specific memory region  changes.
335  * E.g. when the library/module code is copied to the memory then
336  * it needs to be read-write and after it has already
337  * been copied and library/module code is ready to be executed then
338  * attributes need to be changed to read-only/executable.
339  * Calling this API must not cause losing memory contents.
340  *
341  * @param virt Page-aligned virtual address to be updated
342  * @param flags Caching, access and control flags, see SYS_MM_MEM_* macros
343  *
344  * @retval 0 if successful
345  * @retval -EINVAL if invalid arguments are provided
346  * @retval -EFAULT if virtual addresses is not mapped
347  */
348 
349 int sys_mm_drv_update_page_flags(void *virt, uint32_t flags);
350 
351 /**
352  * @brief Update memory region flags
353  *
354  * This changes the attributes of physical memory which is already
355  * mapped to a virtual address. This is useful when use case of
356  * specific memory region  changes.
357  * E.g. when the library/module code is copied to the memory then
358  * it needs to be read-write and after it has already
359  * been copied and library/module code is ready to be executed then
360  * attributes need to be changed to read-only/executable.
361  * Calling this API must not cause losing memory contents.
362  *
363  * @param virt Page-aligned virtual address to be updated
364  * @param size Page-aligned size of the mapped memory region in bytes
365  * @param flags Caching, access and control flags, see SYS_MM_MEM_* macros
366  *
367  * @retval 0 if successful
368  * @retval -EINVAL if invalid arguments are provided
369  * @retval -EFAULT if virtual addresses is not mapped
370  */
371 
372 int sys_mm_drv_update_region_flags(void *virt, size_t size, uint32_t flags);
373 
374 /**
375  * @}
376  */
377 
378 /**
379  * @name Memory Mappings Query
380  *
381  * On querying information on memory mappings.
382  *
383  * @{
384  */
385 
386 /**
387  * @brief Get the mapped physical memory address from virtual address.
388  *
389  * The function queries the translation tables to find the physical
390  * memory address of a mapped virtual address.
391  *
392  * Behavior when providing unaligned address is undefined, this
393  * is assumed to be page aligned.
394  *
395  * @param      virt Page-aligned virtual address
396  * @param[out] phys Mapped physical address (can be NULL if only checking
397  *                  if virtual address is mapped)
398  *
399  * @retval 0 if mapping is found and valid
400  * @retval -EINVAL if invalid arguments are provided
401  * @retval -EFAULT if virtual address is not mapped
402  */
403 int sys_mm_drv_page_phys_get(void *virt, uintptr_t *phys);
404 
405 /**
406  * @brief Represents an available memory region.
407  *
408  * A memory region that can be used by allocators. Driver defined
409  * attributes can be used to guide the proper usage of each region.
410  */
411 struct sys_mm_drv_region {
412 	void *addr; /**< @brief Address of the memory region */
413 	size_t size; /**< @brief Size of the memory region */
414 	uint32_t attr; /**< @brief Driver defined attributes of the memory region */
415 };
416 
417 /* TODO is it safe to assume no valid region has size == 0? */
418 /**
419  * @brief Iterates over an array of regions returned by #sys_mm_drv_query_memory_regions
420  *
421  * Note that a sentinel item marking the end of the array is expected for
422  * this macro to work.
423  */
424 #define SYS_MM_DRV_MEMORY_REGION_FOREACH(regions, iter) \
425 	for (iter = regions; iter->size; iter++)
426 
427 /**
428  * @brief Query available memory regions
429  *
430  * Returns an array of available memory regions. One can iterate over
431  * the array using #SYS_MM_DRV_MEMORY_REGION_FOREACH. Note that the last
432  * item of the array is a sentinel marking the end, and it's identified
433  * by it's size attribute, which is zero.
434  *
435  * @retval regions A possibly empty array - i.e. containing only the sentinel
436  *         marking at the end - of memory regions.
437  */
438 const struct sys_mm_drv_region *sys_mm_drv_query_memory_regions(void);
439 
440 /**
441  * @brief Free the memory array returned by #sys_mm_drv_query_memory_regions
442  *
443  * The driver may have dynamically allocated the memory for the array of
444  * regions returned by #sys_mm_drv_query_memory_regions. This method provides
445  * it the opportunity to free any related resources.
446  *
447  * @param regions Array of regions previously returned by
448  *                #sys_mm_drv_query_memory_regions
449  */
450 void sys_mm_drv_query_memory_regions_free(const struct sys_mm_drv_region *regions);
451 
452 /**
453  * @}
454  */
455 
456 /**
457  * @}
458  */
459 
460 #ifdef __cplusplus
461 }
462 #endif
463 
464 #endif /* _ASMLANGUAGE */
465 
466 #endif /* ZEPHYR_INCLUDE_DRIVERS_SYSTEM_MM_H_ */
467