1 /*
2  * Copyright (c) 2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #ifndef ZEPHYR_INCLUDE_KERNEL_MM_H
8 #define ZEPHYR_INCLUDE_KERNEL_MM_H
9 
10 #include <zephyr/sys/util.h>
11 #include <zephyr/toolchain.h>
12 #if defined(CONFIG_ARM_MMU) && defined(CONFIG_ARM64)
13 #include <zephyr/arch/arm64/arm_mem.h>
14 #endif
15 
16 #include <zephyr/kernel/internal/mm.h>
17 
18 /**
19  * @brief Kernel Memory Management
20  * @defgroup kernel_memory_management Kernel Memory Management
21  * @ingroup kernel_apis
22  * @{
23  */
24 
25 /**
26  * @name Caching mode definitions.
27  *
28  * These are mutually exclusive.
29  *
30  * @{
31  */
32 
33 /** No caching. Most drivers want this. */
34 #define K_MEM_CACHE_NONE	2
35 
36 /** Write-through caching. Used by certain drivers. */
37 #define K_MEM_CACHE_WT		1
38 
39 /** Full write-back caching. Any RAM mapped wants this. */
40 #define K_MEM_CACHE_WB		0
41 
42 /*
43  * ARM64 Specific flags are defined in arch/arm64/arm_mem.h,
44  * pay attention to be not conflicted when updating these flags.
45  */
46 
47 /** Reserved bits for cache modes in k_map() flags argument */
48 #define K_MEM_CACHE_MASK	(BIT(3) - 1)
49 
50 /** @} */
51 
52 /**
53  * @name Region permission attributes.
54  *
55  * Default is read-only, no user, no exec
56  *
57  * @{
58  */
59 
60 /** Region will have read/write access (and not read-only) */
61 #define K_MEM_PERM_RW		BIT(3)
62 
63 /** Region will be executable (normally forbidden) */
64 #define K_MEM_PERM_EXEC		BIT(4)
65 
66 /** Region will be accessible to user mode (normally supervisor-only) */
67 #define K_MEM_PERM_USER		BIT(5)
68 
69 /** @} */
70 
71 /**
72  * @name Region mapping behaviour attributes
73  *
74  * @{
75  */
76 
77 /** Region will be mapped to 1:1 virtual and physical address */
78 #define K_MEM_DIRECT_MAP	BIT(6)
79 
80 /** @} */
81 
82 #ifndef _ASMLANGUAGE
83 #include <stdint.h>
84 #include <stddef.h>
85 #include <inttypes.h>
86 
87 #ifdef __cplusplus
88 extern "C" {
89 #endif
90 
91 /**
92  * @name k_mem_map() control flags
93  *
94  * @{
95  */
96 
97 /**
98  * @brief The mapped region is not guaranteed to be zeroed.
99  *
100  * This may improve performance. The associated page frames may contain
101  * indeterminate data, zeroes, or even sensitive information.
102  *
103  * This may not be used with K_MEM_PERM_USER as there are no circumstances
104  * where this is safe.
105  */
106 #define K_MEM_MAP_UNINIT	BIT(16)
107 
108 /**
109  * Region will be pinned in memory and never paged
110  *
111  * Such memory is guaranteed to never produce a page fault due to page-outs
112  * or copy-on-write once the mapping call has returned. Physical page frames
113  * will be pre-fetched as necessary and pinned.
114  */
115 #define K_MEM_MAP_LOCK		BIT(17)
116 
117 /** @} */
118 
119 /**
120  * Return the amount of free memory available
121  *
122  * The returned value will reflect how many free RAM page frames are available.
123  * If demand paging is enabled, it may still be possible to allocate more.
124  *
125  * The information reported by this function may go stale immediately if
126  * concurrent memory mappings or page-ins take place.
127  *
128  * @return Free physical RAM, in bytes
129  */
130 size_t k_mem_free_get(void);
131 
132 /**
133  * Map anonymous memory into Zephyr's address space
134  *
135  * This function effectively increases the data space available to Zephyr.
136  * The kernel will choose a base virtual address and return it to the caller.
137  * The memory will have access permissions for all contexts set per the
138  * provided flags argument.
139  *
140  * If user thread access control needs to be managed in any way, do not enable
141  * K_MEM_PERM_USER flags here; instead manage the region's permissions
142  * with memory domain APIs after the mapping has been established. Setting
143  * K_MEM_PERM_USER here will allow all user threads to access this memory
144  * which is usually undesirable.
145  *
146  * Unless K_MEM_MAP_UNINIT is used, the returned memory will be zeroed.
147  *
148  * The mapped region is not guaranteed to be physically contiguous in memory.
149  * Physically contiguous buffers should be allocated statically and pinned
150  * at build time.
151  *
152  * Pages mapped in this way have write-back cache settings.
153  *
154  * The returned virtual memory pointer will be page-aligned. The size
155  * parameter, and any base address for re-mapping purposes must be page-
156  * aligned.
157  *
158  * Note that the allocation includes two guard pages immediately before
159  * and after the requested region. The total size of the allocation will be
160  * the requested size plus the size of these two guard pages.
161  *
162  * Many K_MEM_MAP_* flags have been implemented to alter the behavior of this
163  * function, with details in the documentation for these flags.
164  *
165  * @param size Size of the memory mapping. This must be page-aligned.
166  * @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
167  * @return The mapped memory location, or NULL if insufficient virtual address
168  *         space, insufficient physical memory to establish the mapping,
169  *         or insufficient memory for paging structures.
170  */
171 void *k_mem_map(size_t size, uint32_t flags);
172 
173 /**
174  * Un-map mapped memory
175  *
176  * This removes a memory mapping for the provided page-aligned region.
177  * Associated page frames will be free and the kernel may re-use the associated
178  * virtual address region. Any paged out data pages may be discarded.
179  *
180  * Calling this function on a region which was not mapped to begin with is
181  * undefined behavior.
182  *
183  * @param addr Page-aligned memory region base virtual address
184  * @param size Page-aligned memory region size
185  */
186 void k_mem_unmap(void *addr, size_t size);
187 
188 /**
189  * Given an arbitrary region, provide a aligned region that covers it
190  *
191  * The returned region will have both its base address and size aligned
192  * to the provided alignment value.
193  *
194  * @param[out] aligned_addr Aligned address
195  * @param[out] aligned_size Aligned region size
196  * @param[in]  addr Region base address
197  * @param[in]  size Region size
198  * @param[in]  align What to align the address and size to
199  * @retval offset between aligned_addr and addr
200  */
201 size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
202 			  uintptr_t addr, size_t size, size_t align);
203 
204 #ifdef __cplusplus
205 }
206 #endif
207 
208 /** @} */
209 
210 #endif /* !_ASMLANGUAGE */
211 #endif /* ZEPHYR_INCLUDE_KERNEL_MM_H */
212