1 /*
2  * Copyright (c) 2021 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Memory Management Driver APIs
10  *
11  * This contains APIs for a system-wide memory management
12  * driver. Only one instance is permitted on the system.
13  */
14 
15 #ifndef ZEPHYR_INCLUDE_DRIVERS_SYSTEM_MM_H_
16 #define ZEPHYR_INCLUDE_DRIVERS_SYSTEM_MM_H_
17 
18 #include <zephyr/types.h>
19 
20 #ifndef _ASMLANGUAGE
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif
25 
26 /**
27  * @brief Memory Management Driver APIs
28  * @defgroup mm_drv_apis Memory Management Driver APIs
29  * @ingroup memory_management
30  * @{
31  */
32 
33 /*
34  * Caching mode definitions. These are mutually exclusive.
35  */
36 
37 /** No caching */
38 #define SYS_MM_MEM_CACHE_NONE		2
39 
40 /** Write-through caching */
41 #define SYS_MM_MEM_CACHE_WT		1
42 
43 /** Full write-back caching */
44 #define SYS_MM_MEM_CACHE_WB		0
45 
46 /** Reserved bits for cache modes */
47 #define SYS_MM_MEM_CACHE_MASK		(BIT(3) - 1)
48 
49 /*
50  * Region permission attributes.
51  * Default should be read-only, no user, no exec.
52  */
53 
54 /** Region will have read/write access (and not read-only) */
55 #define SYS_MM_MEM_PERM_RW		BIT(3)
56 
57 /** Region will be executable (normally forbidden) */
58 #define SYS_MM_MEM_PERM_EXEC		BIT(4)
59 
60 /** Region will be accessible to user mode (normally supervisor-only) */
61 #define SYS_MM_MEM_PERM_USER		BIT(5)
62 
63 /**
64  * @brief Map one physical page into the virtual address space
65  *
66  * This maps one physical page into the virtual address space.
67  * Behavior when providing unaligned address is undefined, this
68  * is assumed to be page aligned.
69  *
70  * The memory range itself is never accessed by this operation.
71  *
72  * This API must be safe to call in ISRs or exception handlers. Calls
73  * to this API are assumed to be serialized.
74  *
75  * @param virt Page-aligned destination virtual address to map
76  * @param phys Page-aligned source physical address to map
77  * @param flags Caching, access and control flags, see SYS_MM_MEM_* macros
78  *
79  * @retval 0 if successful
80  * @retval -EINVAL if invalid arguments are provided
81  * @retval -EFAULT if virtual address has already been mapped
82  */
83 int sys_mm_drv_map_page(void *virt, uintptr_t phys, uint32_t flags);
84 
85 /**
86  * @brief Map a region of physical memory into the virtual address space
87  *
88  * This maps a region of physical memory into the virtual address space.
89  * Behavior when providing unaligned addresses/sizes is undefined, these
90  * are assumed to be page aligned.
91  *
92  * The memory range itself is never accessed by this operation.
93  *
94  * This API must be safe to call in ISRs or exception handlers. Calls
95  * to this API are assumed to be serialized.
96  *
97  * @param virt Page-aligned destination virtual address to map
98  * @param phys Page-aligned source physical address to map
99  * @param size Page-aligned size of the mapped memory region in bytes
100  * @param flags Caching, access and control flags, see SYS_MM_MEM_* macros
101  *
102  * @retval 0 if successful
103  * @retval -EINVAL if invalid arguments are provided
104  * @retval -EFAULT if any virtual addresses have already been mapped
105  */
106 int sys_mm_drv_map_region(void *virt, uintptr_t phys,
107 			  size_t size, uint32_t flags);
108 
109 /**
110  * @brief Map an array of physical memory into the virtual address space
111  *
112  * This maps an array of physical pages into a continuous virtual address
113  * space. Behavior when providing unaligned addresses is undefined, these
114  * are assumed to be page aligned.
115  *
116  * The physical memory pages are never accessed by this operation.
117  *
118  * This API must be safe to call in ISRs or exception handlers. Calls
119  * to this API are assumed to be serialized.
120  *
121  * @param virt Page-aligned destination virtual address to map
122  * @param phys Array of pge-aligned source physical address to map
123  * @param cnt Number of elements in the physical page array
124  * @param flags Caching, access and control flags, see SYS_MM_MEM_* macros
125  *
126  * @retval 0 if successful
127  * @retval -EINVAL if invalid arguments are provided
128  * @retval -EFAULT if any virtual addresses have already been mapped
129  */
130 int sys_mm_drv_map_array(void *virt, uintptr_t *phys,
131 			 size_t cnt, uint32_t flags);
132 
133 /**
134  * @brief Remove mapping for one page of the provided virtual address
135  *
136  * This unmaps one page from the virtual address space.
137  *
138  * When this completes, the relevant translation table entries will be
139  * updated as if no mapping was ever made for that memory page. No previous
140  * context needs to be preserved. This function must update mapping in
141  * all active translation tables.
142  *
143  * Behavior when providing unaligned address is undefined, this
144  * is assumed to be page aligned.
145  *
146  * Implementations must invalidate translation caching as necessary.
147  *
148  * @param virt Page-aligned virtual address to un-map
149  *
150  * @retval 0 if successful
151  * @retval -EINVAL if invalid arguments are provided
152  * @retval -EFAULT if virtual address is not mapped
153  */
154 int sys_mm_drv_unmap_page(void *virt);
155 
156 /**
157  * @brief Remove mappings for a provided virtual address range
158  *
159  * This unmaps pages in the provided virtual address range.
160  *
161  * When this completes, the relevant translation table entries will be
162  * updated as if no mapping was ever made for that memory range. No previous
163  * context needs to be preserved. This function must update mappings in
164  * all active translation tables.
165  *
166  * Behavior when providing unaligned address is undefined, this
167  * is assumed to be page aligned.
168  *
169  * Implementations must invalidate translation caching as necessary.
170  *
171  * @param virt Page-aligned base virtual address to un-map
172  * @param size Page-aligned region size
173  *
174  * @retval 0 if successful
175  * @retval -EINVAL if invalid arguments are provided
176  * @retval -EFAULT if virtual addresses have already been mapped
177  */
178 int sys_mm_drv_unmap_region(void *virt, size_t size);
179 
180 /**
181  * @brief Get the mapped physical memory address from virtual address.
182  *
183  * The function queries the translation tables to find the physical
184  * memory address of a mapped virtual address.
185  *
186  * Behavior when providing unaligned address is undefined, this
187  * is assumed to be page aligned.
188  *
189  * @param      virt Page-aligned virtual address
190  * @param[out] phys Mapped physical address (can be NULL if only checking
191  *                  if virtual address is mapped)
192  *
193  * @retval 0 if mapping is found and valid
194  * @retval -EINVAL if invalid arguments are provided
195  * @retval -EFAULT if virtual address is not mapped
196  */
197 int sys_mm_drv_page_phys_get(void *virt, uintptr_t *phys);
198 
199 /**
200  * @brief Remap virtual pages into new address
201  *
202  * This remaps a virtual memory region starting at @p virt_old
203  * of size @p size into a new virtual memory region starting at
204  * @p virt_new. In other words, physical memory at @p virt_old is
205  * remapped to appear at @p virt_new. Both addresses must be page
206  * aligned and valid.
207  *
208  * Note that the virtual memory at both the old and new addresses
209  * must be unmapped in the memory domains of any runnable Zephyr
210  * thread as this does not deal with memory domains.
211  *
212  * Note that overlapping of old and new virtual memory regions
213  * is usually not supported for simpler implementation. Refer to
214  * the actual driver to make sure if overlapping is allowed.
215  *
216  * @param virt_old Page-aligned base virtual address of existing memory
217  * @param size Page-aligned size of the mapped memory region in bytes
218  * @param virt_new Page-aligned base virtual address to which to remap
219  *                 the memory
220  *
221  * @retval 0 if successful
222  * @retval -EINVAL if invalid arguments are provided
223  * @retval -EFAULT if old virtual addresses are not all mapped or
224  *                 new virtual addresses are not all unmapped
225  */
226 int sys_mm_drv_remap_region(void *virt_old, size_t size, void *virt_new);
227 
228 /**
229  * @brief Physically move memory, with copy
230  *
231  * This maps a region of physical memory into the new virtual address space
232  * (@p virt_new), and copy region of size @p size from the old virtual
233  * address space (@p virt_old). The new virtual memory region is mapped
234  * from physical memory starting at @p phys_new of size @p size.
235  *
236  * Behavior when providing unaligned addresses/sizes is undefined, these
237  * are assumed to be page aligned.
238  *
239  * Note that the virtual memory at both the old and new addresses
240  * must be unmapped in the memory domains of any runnable Zephyr
241  * thread as this does not deal with memory domains.
242  *
243  * Note that overlapping of old and new virtual memory regions
244  * is usually not supported for simpler implementation. Refer to
245  * the actual driver to make sure if overlapping is allowed.
246  *
247  * @param virt_old Page-aligned base virtual address of existing memory
248  * @param size Page-aligned size of the mapped memory region in bytes
249  * @param virt_new Page-aligned base virtual address to which to map
250  *                 new physical pages
251  * @param phys_new Page-aligned base physical address to contain
252  *                 the moved memory
253  *
254  * @retval 0 if successful
255  * @retval -EINVAL if invalid arguments are provided
256  * @retval -EFAULT if old virtual addresses are not all mapped or
257  *                 new virtual addresses are not all unmapped
258  */
259 int sys_mm_drv_move_region(void *virt_old, size_t size, void *virt_new,
260 			   uintptr_t phys_new);
261 
262 /**
263  * @brief Physically move memory, with copy
264  *
265  * This maps a region of physical memory into the new virtual address space
266  * (@p virt_new), and copy region of size @p size from the old virtual
267  * address space (@p virt_old). The new virtual memory region is mapped
268  * from an array of physical pages.
269  *
270  * Behavior when providing unaligned addresses/sizes is undefined, these
271  * are assumed to be page aligned.
272  *
273  * Note that the virtual memory at both the old and new addresses
274  * must be unmapped in the memory domains of any runnable Zephyr
275  * thread as this does not deal with memory domains.
276  *
277  * Note that overlapping of old and new virtual memory regions
278  * is usually not supported for simpler implementation. Refer to
279  * the actual driver to make sure if overlapping is allowed.
280  *
281  * @param virt_old Page-aligned base virtual address of existing memory
282  * @param size Page-aligned size of the mapped memory region in bytes
283  * @param virt_new Page-aligned base virtual address to which to map
284  *                 new physical pages
285  * @param phys_new Array of page-aligned physical address to contain
286  *                 the moved memory
287  * @param phys_cnt Number of elements in the physical page array
288  *
289  * @retval 0 if successful
290  * @retval -EINVAL if invalid arguments are provided
291  * @retval -EFAULT if old virtual addresses are not all mapped or
292  *                 new virtual addresses are not all unmapped
293  */
294 int sys_mm_drv_move_array(void *virt_old, size_t size, void *virt_new,
295 			  uintptr_t *phys_new, size_t phys_cnt);
296 
297 
298 /**
299  * @brief Update memory page flags
300  *
301  * This changes the attributes of physical memory page which is already
302  * mapped to a virtual address. This is useful when use case of
303  * specific memory region  changes.
304  * E.g. when the library/module code is copied to the memory then
305  * it needs to be read-write and after it has already
306  * been copied and library/module code is ready to be executed then
307  * attributes need to be changed to read-only/executable.
308  * Calling this API must not cause losing memory contents.
309  *
310  * @param virt Page-aligned virtual address to be updated
311  * @param flags Caching, access and control flags, see SYS_MM_MEM_* macros
312  *
313  * @retval 0 if successful
314  * @retval -EINVAL if invalid arguments are provided
315  * @retval -EFAULT if virtual addresses is not mapped
316  */
317 
318 int sys_mm_drv_update_page_flags(void *virt, uint32_t flags);
319 
320 /**
321  * @brief Update memory region flags
322  *
323  * This changes the attributes of physical memory which is already
324  * mapped to a virtual address. This is useful when use case of
325  * specific memory region  changes.
326  * E.g. when the library/module code is copied to the memory then
327  * it needs to be read-write and after it has already
328  * been copied and library/module code is ready to be executed then
329  * attributes need to be changed to read-only/executable.
330  * Calling this API must not cause losing memory contents.
331  *
332  * @param virt Page-aligned virtual address to be updated
333  * @param size Page-aligned size of the mapped memory region in bytes
334  * @param flags Caching, access and control flags, see SYS_MM_MEM_* macros
335  *
336  * @retval 0 if successful
337  * @retval -EINVAL if invalid arguments are provided
338  * @retval -EFAULT if virtual addresses is not mapped
339  */
340 
341 int sys_mm_drv_update_region_flags(void *virt, size_t size, uint32_t flags);
342 
343 /**
344  * @brief Represents an available memory region.
345  *
346  * A memory region that can be used by allocators. Driver defined
347  * attributes can be used to guide the proper usage of each region.
348  */
349 struct sys_mm_drv_region {
350 	void *addr; /**< @brief Address of the memory region */
351 	size_t size; /**< @brief Size of the memory region */
352 	uint32_t attr; /**< @brief Driver defined attributes of the memory region */
353 };
354 
355 /* TODO is it safe to assume no valid region has size == 0? */
356 /**
357  * @brief Iterates over an array of regions returned by #sys_mm_drv_query_memory_regions
358  *
359  * Note that a sentinel item marking the end of the array is expected for
360  * this macro to work.
361  */
362 #define SYS_MM_DRV_MEMORY_REGION_FOREACH(regions, iter) \
363 	for (iter = regions; iter->size; iter++)
364 
365 /**
366  * @brief Query available memory regions
367  *
368  * Returns an array of available memory regions. One can iterate over
369  * the array using #SYS_MM_DRV_MEMORY_REGION_FOREACH. Note that the last
370  * item of the array is a sentinel marking the end, and it's identified
371  * by it's size attribute, which is zero.
372  *
373  * @retval regions A possibly empty array - i.e. containing only the sentinel
374  *         marking at the end - of memory regions.
375  */
376 const struct sys_mm_drv_region *sys_mm_drv_query_memory_regions(void);
377 
378 /**
379  * @brief Free the memory array returned by #sys_mm_drv_query_memory_regions
380  *
381  * The driver may have dynamically allocated the memory for the array of
382  * regions returned by #sys_mm_drv_query_memory_regions. This method provides
383  * it the opportunity to free any related resources.
384  *
385  * @param regions Array of regions previously returned by
386  *                #sys_mm_drv_query_memory_regions
387  */
388 void sys_mm_drv_query_memory_regions_free(const struct sys_mm_drv_region *regions);
389 
390 /**
391  * @}
392  */
393 
394 #ifdef __cplusplus
395 }
396 #endif
397 
398 #endif /* _ASMLANGUAGE */
399 
400 #endif /* ZEPHYR_INCLUDE_DRIVERS_SYSTEM_MM_H_ */
401