1 /*
2  * Copyright (c) 2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #ifndef ZEPHYR_INCLUDE_KERNEL_MM_H
8 #define ZEPHYR_INCLUDE_KERNEL_MM_H
9 
10 #include <zephyr/sys/util.h>
11 #include <zephyr/toolchain.h>
12 #if defined(CONFIG_ARM_MMU) && defined(CONFIG_ARM64)
13 #include <zephyr/arch/arm64/arm_mem.h>
14 #elif defined(CONFIG_ARM_AARCH32_MMU)
15 #include <zephyr/arch/arm/mmu/arm_mem.h>
16 #endif /* CONFIG_ARM_MMU && CONFIG_ARM64 */
17 
18 #include <zephyr/kernel/internal/mm.h>
19 
20 /**
21  * @brief Kernel Memory Management
22  * @defgroup kernel_memory_management Kernel Memory Management
23  * @ingroup kernel_apis
24  * @{
25  */
26 
27 /**
28  * @name Caching mode definitions.
29  *
30  * These are mutually exclusive.
31  *
32  * @{
33  */
34 
35 /** No caching. Most drivers want this. */
36 #define K_MEM_CACHE_NONE	2
37 
38 /** Write-through caching. Used by certain drivers. */
39 #define K_MEM_CACHE_WT		1
40 
41 /** Full write-back caching. Any RAM mapped wants this. */
42 #define K_MEM_CACHE_WB		0
43 
44 /*
45  * ARM64 Specific flags are defined in arch/arm64/arm_mem.h,
46  * pay attention to be not conflicted when updating these flags.
47  */
48 
49 /** Reserved bits for cache modes in k_map() flags argument */
50 #define K_MEM_CACHE_MASK	(BIT(3) - 1)
51 
52 /** @} */
53 
54 /**
55  * @name Region permission attributes.
56  *
57  * Default is read-only, no user, no exec
58  *
59  * @{
60  */
61 
62 /** Region will have read/write access (and not read-only) */
63 #define K_MEM_PERM_RW		BIT(3)
64 
65 /** Region will be executable (normally forbidden) */
66 #define K_MEM_PERM_EXEC		BIT(4)
67 
68 /** Region will be accessible to user mode (normally supervisor-only) */
69 #define K_MEM_PERM_USER		BIT(5)
70 
71 /** @} */
72 
73 /**
74  * @name Region mapping behaviour attributes
75  *
76  * @{
77  */
78 
79 /** Region will be mapped to 1:1 virtual and physical address */
80 #define K_MEM_DIRECT_MAP	BIT(6)
81 
82 /** @} */
83 
84 #ifndef _ASMLANGUAGE
85 #include <stdint.h>
86 #include <stddef.h>
87 #include <inttypes.h>
88 
89 #ifdef __cplusplus
90 extern "C" {
91 #endif
92 
93 /**
94  * @name k_mem_map() control flags
95  *
96  * @{
97  */
98 
99 /**
100  * @brief The mapped region is not guaranteed to be zeroed.
101  *
102  * This may improve performance. The associated page frames may contain
103  * indeterminate data, zeroes, or even sensitive information.
104  *
105  * This may not be used with K_MEM_PERM_USER as there are no circumstances
106  * where this is safe.
107  */
108 #define K_MEM_MAP_UNINIT	BIT(16)
109 
110 /**
111  * Region will be pinned in memory and never paged
112  *
113  * Such memory is guaranteed to never produce a page fault due to page-outs
114  * or copy-on-write once the mapping call has returned. Physical page frames
115  * will be pre-fetched as necessary and pinned.
116  */
117 #define K_MEM_MAP_LOCK		BIT(17)
118 
119 /**
120  * Region will be unpaged i.e. not mapped into memory
121  *
122  * This is meant to be used by kernel code and not by application code.
123  *
124  * Corresponding memory address range will be set so no actual memory will
125  * be allocated initially. Allocation will happen through demand paging when
126  * addresses in that range are accessed. This is incompatible with
127  * K_MEM_MAP_LOCK.
128  *
129  * When this flag is specified, the phys argument to arch_mem_map()
130  * is interpreted as a backing store location value not a physical address.
131  * This is very similar to arch_mem_page_out() in that regard.
132  * Two special location values are defined: ARCH_UNPAGED_ANON_ZERO and
133  * ARCH_UNPAGED_ANON_UNINIT. Those are to be used with anonymous memory
134  * mappings for zeroed and uninitialized pages respectively.
135  */
136 #define K_MEM_MAP_UNPAGED	BIT(18)
137 
138 /** @} */
139 
140 /**
141  * Return the amount of free memory available
142  *
143  * The returned value will reflect how many free RAM page frames are available.
144  * If demand paging is enabled, it may still be possible to allocate more.
145  *
146  * The information reported by this function may go stale immediately if
147  * concurrent memory mappings or page-ins take place.
148  *
149  * @return Free physical RAM, in bytes
150  */
151 size_t k_mem_free_get(void);
152 
153 /**
154  * Map anonymous memory into Zephyr's address space
155  *
156  * This function effectively increases the data space available to Zephyr.
157  * The kernel will choose a base virtual address and return it to the caller.
158  * The memory will have access permissions for all contexts set per the
159  * provided flags argument.
160  *
161  * If user thread access control needs to be managed in any way, do not enable
162  * K_MEM_PERM_USER flags here; instead manage the region's permissions
163  * with memory domain APIs after the mapping has been established. Setting
164  * K_MEM_PERM_USER here will allow all user threads to access this memory
165  * which is usually undesirable.
166  *
167  * Unless K_MEM_MAP_UNINIT is used, the returned memory will be zeroed.
168  *
169  * The mapped region is not guaranteed to be physically contiguous in memory.
170  * Physically contiguous buffers should be allocated statically and pinned
171  * at build time.
172  *
173  * Pages mapped in this way have write-back cache settings.
174  *
175  * The returned virtual memory pointer will be page-aligned. The size
176  * parameter, and any base address for re-mapping purposes must be page-
177  * aligned.
178  *
179  * Note that the allocation includes two guard pages immediately before
180  * and after the requested region. The total size of the allocation will be
181  * the requested size plus the size of these two guard pages.
182  *
183  * Many K_MEM_MAP_* flags have been implemented to alter the behavior of this
184  * function, with details in the documentation for these flags.
185  *
186  * @param size Size of the memory mapping. This must be page-aligned.
187  * @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
188  * @return The mapped memory location, or NULL if insufficient virtual address
189  *         space, insufficient physical memory to establish the mapping,
190  *         or insufficient memory for paging structures.
191  */
k_mem_map(size_t size,uint32_t flags)192 static inline void *k_mem_map(size_t size, uint32_t flags)
193 {
194 	return k_mem_map_phys_guard((uintptr_t)NULL, size, flags, true);
195 }
196 
197 #ifdef CONFIG_DEMAND_MAPPING
198 /**
199  * Create an unpaged mapping
200  *
201  * This maps backing-store "location" tokens into Zephyr's address space.
202  * Corresponding memory address range will be set so no actual memory will
203  * be allocated initially. Allocation will happen through demand paging when
204  * addresses in the mapped range are accessed.
205  *
206  * The kernel will choose a base virtual address and return it to the caller.
207  * The memory access permissions for all contexts will be set per the
208  * provided flags argument.
209  *
210  * If user thread access control needs to be managed in any way, do not enable
211  * K_MEM_PERM_USER flags here; instead manage the region's permissions
212  * with memory domain APIs after the mapping has been established. Setting
213  * K_MEM_PERM_USER here will allow all user threads to access this memory
214  * which is usually undesirable.
215  *
216  * This is incompatible with K_MEM_MAP_LOCK.
217  *
218  * The provided backing-store "location" token must be linearly incrementable
219  * by a page size across the entire mapping.
220  *
221  * Allocated pages will have write-back cache settings.
222  *
223  * The returned virtual memory pointer will be page-aligned. The size
224  * parameter, and any base address for re-mapping purposes must be page-
225  * aligned.
226  *
227  * Note that the allocation includes two guard pages immediately before
228  * and after the requested region. The total size of the allocation will be
229  * the requested size plus the size of these two guard pages.
230  *
231  * @param location Backing store initial location token
232  * @param size Size of the memory mapping. This must be page-aligned.
233  * @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
234  * @return The mapping location, or NULL if insufficient virtual address
235  *         space to establish the mapping, or insufficient memory for paging
236  *         structures.
237  */
k_mem_map_unpaged(uintptr_t location,size_t size,uint32_t flags)238 static inline void *k_mem_map_unpaged(uintptr_t location, size_t size, uint32_t flags)
239 {
240 	flags |= K_MEM_MAP_UNPAGED;
241 	return k_mem_map_phys_guard(location, size, flags, false);
242 }
243 #endif
244 
245 /**
246  * Un-map mapped memory
247  *
248  * This removes a memory mapping for the provided page-aligned region.
249  * Associated page frames will be free and the kernel may re-use the associated
250  * virtual address region. Any paged out data pages may be discarded.
251  *
252  * Calling this function on a region which was not mapped to begin with is
253  * undefined behavior.
254  *
255  * @param addr Page-aligned memory region base virtual address
256  * @param size Page-aligned memory region size
257  */
k_mem_unmap(void * addr,size_t size)258 static inline void k_mem_unmap(void *addr, size_t size)
259 {
260 	k_mem_unmap_phys_guard(addr, size, true);
261 }
262 
263 /**
264  * Modify memory mapping attribute flags
265  *
266  * This updates caching, access and control flags for the provided
267  * page-aligned memory region.
268  *
269  * Calling this function on a region which was not mapped to begin with is
270  * undefined behavior. However system memory implicitly mapped at boot time
271  * is supported.
272  *
273  * @param addr Page-aligned memory region base virtual address
274  * @param size Page-aligned memory region size
275  * @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
276  * @return 0 for success, negative error code otherwise.
277  */
278 int k_mem_update_flags(void *addr, size_t size, uint32_t flags);
279 
280 /**
281  * Given an arbitrary region, provide a aligned region that covers it
282  *
283  * The returned region will have both its base address and size aligned
284  * to the provided alignment value.
285  *
286  * @param[out] aligned_addr Aligned address
287  * @param[out] aligned_size Aligned region size
288  * @param[in]  addr Region base address
289  * @param[in]  size Region size
290  * @param[in]  align What to align the address and size to
291  * @return offset between aligned_addr and addr
292  */
293 size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
294 			  uintptr_t addr, size_t size, size_t align);
295 
296 #ifdef __cplusplus
297 }
298 #endif
299 
300 /** @} */
301 
302 #endif /* !_ASMLANGUAGE */
303 #endif /* ZEPHYR_INCLUDE_KERNEL_MM_H */
304