1 /*
2 * Copyright (c) 2020 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #ifndef KERNEL_INCLUDE_MMU_H
7 #define KERNEL_INCLUDE_MMU_H
8
9 #ifdef CONFIG_MMU
10
11 #include <stdint.h>
12 #include <zephyr/sys/sflist.h>
13 #include <zephyr/sys/__assert.h>
14 #include <zephyr/sys/util.h>
15 #include <zephyr/kernel/mm.h>
16 #include <zephyr/linker/linker-defs.h>
17
18 /** Start address of physical memory. */
19 #define K_MEM_PHYS_RAM_START ((uintptr_t)CONFIG_SRAM_BASE_ADDRESS)
20
21 /** Size of physical memory. */
22 #define K_MEM_PHYS_RAM_SIZE (KB(CONFIG_SRAM_SIZE))
23
24 /** End address (exclusive) of physical memory. */
25 #define K_MEM_PHYS_RAM_END (K_MEM_PHYS_RAM_START + K_MEM_PHYS_RAM_SIZE)
26
27 /** Start address of virtual memory. */
28 #define K_MEM_VIRT_RAM_START ((uint8_t *)CONFIG_KERNEL_VM_BASE)
29
30 /** Size of virtual memory. */
31 #define K_MEM_VIRT_RAM_SIZE ((size_t)CONFIG_KERNEL_VM_SIZE)
32
33 /** End address (exclusive) of virtual memory. */
34 #define K_MEM_VIRT_RAM_END (K_MEM_VIRT_RAM_START + K_MEM_VIRT_RAM_SIZE)
35
36 /** Boot-time virtual start address of the kernel image. */
37 #define K_MEM_KERNEL_VIRT_START ((uint8_t *)&z_mapped_start[0])
38
39 /** Boot-time virtual end address of the kernel image. */
40 #define K_MEM_KERNEL_VIRT_END ((uint8_t *)&z_mapped_end[0])
41
42 /** Boot-time virtual address space size of the kernel image. */
43 #define K_MEM_KERNEL_VIRT_SIZE (K_MEM_KERNEL_VIRT_END - K_MEM_KERNEL_VIRT_START)
44
45 /**
46 * @brief Offset for translating between static physical and virtual addresses.
47 *
48 * @note Do not use directly unless you know exactly what you are going.
49 */
50 #define K_MEM_VM_OFFSET \
51 ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
52 (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
53
54 /**
55 * @brief Get physical address from virtual address for boot RAM mappings.
56 *
57 * @note Only applies to boot RAM mappings within the Zephyr image that have never
58 * been remapped or paged out. Never use this unless you know exactly what you
59 * are doing.
60 *
61 * @param virt Virtual address.
62 *
63 * @return Physical address.
64 */
65 #define K_MEM_BOOT_VIRT_TO_PHYS(virt) ((uintptr_t)(((uint8_t *)(virt)) - K_MEM_VM_OFFSET))
66
67 /**
68 * @brief Get virtual address from physical address for boot RAM mappings.
69 *
70 * @note Only applies to boot RAM mappings within the Zephyr image that have never
71 * been remapped or paged out. Never use this unless you know exactly what you
72 * are doing.
73 *
74 * @param phys Physical address.
75 *
76 * @return Virtual address.
77 */
78 #define K_MEM_BOOT_PHYS_TO_VIRT(phys) ((uint8_t *)(((uintptr_t)(phys)) + K_MEM_VM_OFFSET))
79
80 /**
81 * @def K_MEM_VM_FREE_START
82 * @brief Start address of unused, available virtual addresses.
83 *
84 * This is the start address of the virtual memory region where
85 * addresses can be allocated for memory mapping. This depends on whether
86 * CONFIG_ARCH_MAPS_ALL_RAM is enabled:
87 *
88 * - If it is enabled, which means all physical memory are mapped in virtual
89 * memory address space, and it is the same as
90 * (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_SIZE).
91 *
92 * - If it is disabled, K_MEM_VM_FREE_START is the same K_MEM_KERNEL_VIRT_END which
93 * is the end of the kernel image.
94 *
95 */
96 #ifdef CONFIG_ARCH_MAPS_ALL_RAM
97 #define K_MEM_VM_FREE_START K_MEM_BOOT_PHYS_TO_VIRT(K_MEM_PHYS_RAM_END)
98 #else
99 #define K_MEM_VM_FREE_START K_MEM_KERNEL_VIRT_END
100 #endif /* CONFIG_ARCH_MAPS_ALL_RAM */
101
102 /**
103 * @defgroup kernel_mm_page_frame_apis Kernel Memory Page Frame Management APIs
104 * @ingroup kernel_mm_internal_apis
105 * @{
106 *
107 * Macros and data structures for physical page frame accounting,
108 * APIs for use by eviction and backing store algorithms. This code
109 * is otherwise not application-facing.
110 */
111
112 /**
113 * @brief Number of page frames.
114 *
115 * At present, page frame management is only done for main system RAM,
116 * and we generate paging structures based on CONFIG_SRAM_BASE_ADDRESS
117 * and CONFIG_SRAM_SIZE.
118 *
119 * If we have other RAM regions (DCCM, etc) these typically have special
120 * properties and shouldn't be used generically for demand paging or
121 * anonymous mappings. We don't currently maintain an ontology of these in the
122 * core kernel.
123 */
124 #define K_MEM_NUM_PAGE_FRAMES (K_MEM_PHYS_RAM_SIZE / (size_t)CONFIG_MMU_PAGE_SIZE)
125
126 /*
127 * k_mem_page_frame flags bits
128 *
129 * Requirements:
130 * - K_MEM_PAGE_FRAME_FREE must be one of the possible sfnode flag bits
131 * - All bit values must be lower than CONFIG_MMU_PAGE_SIZE
132 */
133
134 /** This physical page is free and part of the free list */
135 #define K_MEM_PAGE_FRAME_FREE BIT(0)
136
137 /** This physical page is reserved by hardware; we will never use it */
138 #define K_MEM_PAGE_FRAME_RESERVED BIT(1)
139
140 /** This page contains critical kernel data and will never be swapped */
141 #define K_MEM_PAGE_FRAME_PINNED BIT(2)
142
143 /**
144 * This physical page is mapped to some virtual memory address
145 *
146 * Currently, we just support one mapping per page frame. If a page frame
147 * is mapped to multiple virtual pages then it must be pinned.
148 */
149 #define K_MEM_PAGE_FRAME_MAPPED BIT(3)
150
151 /**
152 * This page frame is currently involved in a page-in/out operation
153 */
154 #define K_MEM_PAGE_FRAME_BUSY BIT(4)
155
156 /**
157 * This page frame has a clean copy in the backing store
158 */
159 #define K_MEM_PAGE_FRAME_BACKED BIT(5)
160
161 /**
162 * Data structure for physical page frames
163 *
164 * An array of these is instantiated, one element per physical RAM page.
165 * Hence it's necessary to constrain its size as much as possible.
166 */
167 struct k_mem_page_frame {
168 union {
169 /*
170 * If mapped, K_MEM_PAGE_FRAME_* flags and virtual address
171 * this page is mapped to.
172 */
173 uintptr_t va_and_flags;
174
175 /*
176 * If unmapped and available, free pages list membership
177 * with the K_MEM_PAGE_FRAME_FREE flag.
178 */
179 sys_sfnode_t node;
180 };
181
182 /* Backing store and eviction algorithms may both need to
183 * require additional per-frame custom data for accounting purposes.
184 * They should declare their own array with indices matching
185 * k_mem_page_frames[] ones whenever possible.
186 * They may also want additional flags bits that could be stored here
187 * and they shouldn't clobber each other. At all costs the total
188 * size of struct k_mem_page_frame must be minimized.
189 */
190 };
191
192 /* Note: this must be false for the other flag bits to be valid */
k_mem_page_frame_is_free(struct k_mem_page_frame * pf)193 static inline bool k_mem_page_frame_is_free(struct k_mem_page_frame *pf)
194 {
195 return (pf->va_and_flags & K_MEM_PAGE_FRAME_FREE) != 0U;
196 }
197
k_mem_page_frame_is_pinned(struct k_mem_page_frame * pf)198 static inline bool k_mem_page_frame_is_pinned(struct k_mem_page_frame *pf)
199 {
200 return (pf->va_and_flags & K_MEM_PAGE_FRAME_PINNED) != 0U;
201 }
202
k_mem_page_frame_is_reserved(struct k_mem_page_frame * pf)203 static inline bool k_mem_page_frame_is_reserved(struct k_mem_page_frame *pf)
204 {
205 return (pf->va_and_flags & K_MEM_PAGE_FRAME_RESERVED) != 0U;
206 }
207
k_mem_page_frame_is_mapped(struct k_mem_page_frame * pf)208 static inline bool k_mem_page_frame_is_mapped(struct k_mem_page_frame *pf)
209 {
210 return (pf->va_and_flags & K_MEM_PAGE_FRAME_MAPPED) != 0U;
211 }
212
k_mem_page_frame_is_busy(struct k_mem_page_frame * pf)213 static inline bool k_mem_page_frame_is_busy(struct k_mem_page_frame *pf)
214 {
215 return (pf->va_and_flags & K_MEM_PAGE_FRAME_BUSY) != 0U;
216 }
217
k_mem_page_frame_is_backed(struct k_mem_page_frame * pf)218 static inline bool k_mem_page_frame_is_backed(struct k_mem_page_frame *pf)
219 {
220 return (pf->va_and_flags & K_MEM_PAGE_FRAME_BACKED) != 0U;
221 }
222
k_mem_page_frame_is_evictable(struct k_mem_page_frame * pf)223 static inline bool k_mem_page_frame_is_evictable(struct k_mem_page_frame *pf)
224 {
225 return (!k_mem_page_frame_is_free(pf) &&
226 !k_mem_page_frame_is_reserved(pf) &&
227 k_mem_page_frame_is_mapped(pf) &&
228 !k_mem_page_frame_is_pinned(pf) &&
229 !k_mem_page_frame_is_busy(pf));
230 }
231
232 /* If true, page is not being used for anything, is not reserved, is not
233 * a member of some free pages list, isn't busy, and is ready to be mapped
234 * in memory
235 */
k_mem_page_frame_is_available(struct k_mem_page_frame * page)236 static inline bool k_mem_page_frame_is_available(struct k_mem_page_frame *page)
237 {
238 return page->va_and_flags == 0U;
239 }
240
k_mem_page_frame_set(struct k_mem_page_frame * pf,uint8_t flags)241 static inline void k_mem_page_frame_set(struct k_mem_page_frame *pf, uint8_t flags)
242 {
243 pf->va_and_flags |= flags;
244 }
245
k_mem_page_frame_clear(struct k_mem_page_frame * pf,uint8_t flags)246 static inline void k_mem_page_frame_clear(struct k_mem_page_frame *pf, uint8_t flags)
247 {
248 /* ensure bit inversion to follow is done on the proper type width */
249 uintptr_t wide_flags = flags;
250
251 pf->va_and_flags &= ~wide_flags;
252 }
253
k_mem_assert_phys_aligned(uintptr_t phys)254 static inline void k_mem_assert_phys_aligned(uintptr_t phys)
255 {
256 __ASSERT(phys % CONFIG_MMU_PAGE_SIZE == 0U,
257 "physical address 0x%lx is not page-aligned", phys);
258 (void)phys;
259 }
260
261 extern struct k_mem_page_frame k_mem_page_frames[K_MEM_NUM_PAGE_FRAMES];
262
k_mem_page_frame_to_phys(struct k_mem_page_frame * pf)263 static inline uintptr_t k_mem_page_frame_to_phys(struct k_mem_page_frame *pf)
264 {
265 return (uintptr_t)((pf - k_mem_page_frames) * CONFIG_MMU_PAGE_SIZE) +
266 K_MEM_PHYS_RAM_START;
267 }
268
269 /* Presumes there is but one mapping in the virtual address space */
k_mem_page_frame_to_virt(struct k_mem_page_frame * pf)270 static inline void *k_mem_page_frame_to_virt(struct k_mem_page_frame *pf)
271 {
272 uintptr_t flags_mask = CONFIG_MMU_PAGE_SIZE - 1;
273
274 return (void *)(pf->va_and_flags & ~flags_mask);
275 }
276
k_mem_is_page_frame(uintptr_t phys)277 static inline bool k_mem_is_page_frame(uintptr_t phys)
278 {
279 k_mem_assert_phys_aligned(phys);
280 return IN_RANGE(phys, (uintptr_t)K_MEM_PHYS_RAM_START,
281 (uintptr_t)(K_MEM_PHYS_RAM_END - 1));
282 }
283
k_mem_phys_to_page_frame(uintptr_t phys)284 static inline struct k_mem_page_frame *k_mem_phys_to_page_frame(uintptr_t phys)
285 {
286 __ASSERT(k_mem_is_page_frame(phys),
287 "0x%lx not an SRAM physical address", phys);
288
289 return &k_mem_page_frames[(phys - K_MEM_PHYS_RAM_START) /
290 CONFIG_MMU_PAGE_SIZE];
291 }
292
k_mem_assert_virtual_region(uint8_t * addr,size_t size)293 static inline void k_mem_assert_virtual_region(uint8_t *addr, size_t size)
294 {
295 __ASSERT((uintptr_t)addr % CONFIG_MMU_PAGE_SIZE == 0U,
296 "unaligned addr %p", addr);
297 __ASSERT(size % CONFIG_MMU_PAGE_SIZE == 0U,
298 "unaligned size %zu", size);
299 __ASSERT(!Z_DETECT_POINTER_OVERFLOW(addr, size),
300 "region %p size %zu zero or wraps around", addr, size);
301 __ASSERT(IN_RANGE((uintptr_t)addr,
302 (uintptr_t)K_MEM_VIRT_RAM_START,
303 ((uintptr_t)K_MEM_VIRT_RAM_END - 1)) &&
304 IN_RANGE(((uintptr_t)addr + size - 1),
305 (uintptr_t)K_MEM_VIRT_RAM_START,
306 ((uintptr_t)K_MEM_VIRT_RAM_END - 1)),
307 "invalid virtual address region %p (%zu)", addr, size);
308 }
309
310 /**
311 * @brief Pretty-print page frame information for all page frames.
312 *
313 * Debug function, pretty-print page frame information for all frames
314 * concisely to printk.
315 */
316 void k_mem_page_frames_dump(void);
317
318 /* Convenience macro for iterating over all page frames */
319 #define K_MEM_PAGE_FRAME_FOREACH(_phys, _pageframe) \
320 for ((_phys) = K_MEM_PHYS_RAM_START, (_pageframe) = k_mem_page_frames; \
321 (_phys) < K_MEM_PHYS_RAM_END; \
322 (_phys) += CONFIG_MMU_PAGE_SIZE, (_pageframe)++)
323
324 /** @} */
325
326 /**
327 * @def K_MEM_VM_RESERVED
328 * @brief Reserve space at the end of virtual memory.
329 */
330 #ifdef CONFIG_DEMAND_PAGING
331 /* We reserve a virtual page as a scratch area for page-ins/outs at the end
332 * of the address space
333 */
334 #define K_MEM_VM_RESERVED CONFIG_MMU_PAGE_SIZE
335
336 /**
337 * @brief Location of the scratch page used for demand paging.
338 */
339 #define K_MEM_SCRATCH_PAGE ((void *)((uintptr_t)CONFIG_KERNEL_VM_BASE + \
340 (uintptr_t)CONFIG_KERNEL_VM_SIZE - \
341 CONFIG_MMU_PAGE_SIZE))
342 #else
343 #define K_MEM_VM_RESERVED 0
344 #endif /* CONFIG_DEMAND_PAGING */
345
346 #ifdef CONFIG_DEMAND_PAGING
347 /*
348 * Core kernel demand paging APIs
349 */
350
351 /**
352 * Number of page faults since system startup
353 *
354 * Counts only those page faults that were handled successfully by the demand
355 * paging mechanism and were not errors.
356 *
357 * @return Number of successful page faults
358 */
359 unsigned long k_mem_num_pagefaults_get(void);
360
361 /**
362 * Free a page frame physical address by evicting its contents
363 *
364 * The indicated page frame, if it contains a data page, will have that
365 * data page evicted to the backing store. The page frame will then be
366 * marked as available for mappings or page-ins.
367 *
368 * This is useful for freeing up entire memory banks so that they may be
369 * deactivated to save power.
370 *
371 * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
372 * called by ISRs as the backing store may be in-use.
373 *
374 * @param phys Page frame physical address
375 * @retval 0 Success
376 * @retval -ENOMEM Insufficient backing store space
377 */
378 int k_mem_page_frame_evict(uintptr_t phys);
379
380 /**
381 * Handle a page fault for a virtual data page
382 *
383 * This is invoked from the architecture page fault handler.
384 *
385 * If a valid page fault, the core kernel will obtain a page frame,
386 * populate it with the data page that was evicted to the backing store,
387 * update page tables, and return so that the faulting instruction may be
388 * re-tried.
389 *
390 * The architecture must not call this function if the page was mapped and
391 * not paged out at the time the exception was triggered (i.e. a protection
392 * violation for a mapped page).
393 *
394 * If the faulting context had interrupts disabled when the page fault was
395 * triggered, the entire page fault handling path must have interrupts
396 * disabled, including the invocation of this function.
397 *
398 * Otherwise, interrupts may be enabled and the page fault handler may be
399 * preemptible. Races to page-in will be appropriately handled by the kernel.
400 *
401 * @param addr Faulting virtual address
402 * @retval true Page fault successfully handled, or nothing needed to be done.
403 * The arch layer should retry the faulting instruction.
404 * @retval false This page fault was from an un-mapped page, should
405 * be treated as an error, and not re-tried.
406 */
407 bool k_mem_page_fault(void *addr);
408
409 #endif /* CONFIG_DEMAND_PAGING */
410 #endif /* CONFIG_MMU */
411 #endif /* KERNEL_INCLUDE_MMU_H */
412