1 /*
2 * Copyright (c) 2020 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #ifndef KERNEL_INCLUDE_MMU_H
7 #define KERNEL_INCLUDE_MMU_H
8
9 #ifdef CONFIG_MMU
10
11 #include <stdint.h>
12 #include <zephyr/sys/slist.h>
13 #include <zephyr/sys/__assert.h>
14 #include <zephyr/sys/util.h>
15 #include <zephyr/sys/mem_manage.h>
16 #include <zephyr/linker/linker-defs.h>
17
18 /*
19 * At present, page frame management is only done for main system RAM,
20 * and we generate paging structures based on CONFIG_SRAM_BASE_ADDRESS
21 * and CONFIG_SRAM_SIZE.
22 *
23 * If we have other RAM regions (DCCM, etc) these typically have special
24 * properties and shouldn't be used generically for demand paging or
25 * anonymous mappings. We don't currently maintain an ontology of these in the
26 * core kernel.
27 */
28 #define Z_PHYS_RAM_START ((uintptr_t)CONFIG_SRAM_BASE_ADDRESS)
29 #define Z_PHYS_RAM_SIZE ((size_t)KB(CONFIG_SRAM_SIZE))
30 #define Z_PHYS_RAM_END (Z_PHYS_RAM_START + Z_PHYS_RAM_SIZE)
31 #define Z_NUM_PAGE_FRAMES (Z_PHYS_RAM_SIZE / (size_t)CONFIG_MMU_PAGE_SIZE)
32
33 /** End virtual address of virtual address space */
34 #define Z_VIRT_RAM_START ((uint8_t *)CONFIG_KERNEL_VM_BASE)
35 #define Z_VIRT_RAM_SIZE ((size_t)CONFIG_KERNEL_VM_SIZE)
36 #define Z_VIRT_RAM_END (Z_VIRT_RAM_START + Z_VIRT_RAM_SIZE)
37
38 /* Boot-time virtual location of the kernel image. */
39 #define Z_KERNEL_VIRT_START ((uint8_t *)(&z_mapped_start))
40 #define Z_KERNEL_VIRT_END ((uint8_t *)(&z_mapped_end))
41 #define Z_KERNEL_VIRT_SIZE (Z_KERNEL_VIRT_END - Z_KERNEL_VIRT_START)
42
43 #define Z_VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
44 (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
45
46 /* Only applies to boot RAM mappings within the Zephyr image that have never
47 * been remapped or paged out. Never use this unless you know exactly what you
48 * are doing.
49 */
50 #define Z_BOOT_VIRT_TO_PHYS(virt) ((uintptr_t)(((uint8_t *)virt) - Z_VM_OFFSET))
51 #define Z_BOOT_PHYS_TO_VIRT(phys) ((uint8_t *)(((uintptr_t)phys) + Z_VM_OFFSET))
52
53 #ifdef CONFIG_ARCH_MAPS_ALL_RAM
54 #define Z_FREE_VM_START Z_BOOT_PHYS_TO_VIRT(Z_PHYS_RAM_END)
55 #else
56 #define Z_FREE_VM_START Z_KERNEL_VIRT_END
57 #endif
58
59 /*
60 * Macros and data structures for physical page frame accounting,
61 * APIs for use by eviction and backing store algorithms. This code
62 * is otherwise not application-facing.
63 */
64
65 /*
66 * z_page_frame flags bits
67 */
68
69 /** This page contains critical kernel data and will never be swapped */
70 #define Z_PAGE_FRAME_PINNED BIT(0)
71
72 /** This physical page is reserved by hardware; we will never use it */
73 #define Z_PAGE_FRAME_RESERVED BIT(1)
74
75 /**
76 * This physical page is mapped to some virtual memory address
77 *
78 * Currently, we just support one mapping per page frame. If a page frame
79 * is mapped to multiple virtual pages then it must be pinned.
80 */
81 #define Z_PAGE_FRAME_MAPPED BIT(2)
82
83 /**
84 * This page frame is currently involved in a page-in/out operation
85 */
86 #define Z_PAGE_FRAME_BUSY BIT(3)
87
88 /**
89 * This page frame has a clean copy in the backing store
90 */
91 #define Z_PAGE_FRAME_BACKED BIT(4)
92
93 /**
94 * Data structure for physical page frames
95 *
96 * An array of these is instantiated, one element per physical RAM page.
97 * Hence it's necessary to constrain its size as much as possible.
98 */
99 struct z_page_frame {
100 union {
101 /* If mapped, virtual address this page is mapped to */
102 void *addr;
103
104 /* If unmapped and available, free pages list membership. */
105 sys_snode_t node;
106 };
107
108 /* Z_PAGE_FRAME_* flags */
109 uint8_t flags;
110
111 /* TODO: Backing store and eviction algorithms may both need to
112 * introduce custom members for accounting purposes. Come up with
113 * a layer of abstraction for this. They may also want additional
114 * flags bits which shouldn't clobber each other. At all costs
115 * the total size of struct z_page_frame must be minimized.
116 */
117
118 /* On Xtensa we can't pack this struct because of the memory alignment.
119 */
120 #ifdef CONFIG_XTENSA
121 } __aligned(4);
122 #else
123 } __packed;
124 #endif
125
z_page_frame_is_pinned(struct z_page_frame * pf)126 static inline bool z_page_frame_is_pinned(struct z_page_frame *pf)
127 {
128 return (pf->flags & Z_PAGE_FRAME_PINNED) != 0U;
129 }
130
z_page_frame_is_reserved(struct z_page_frame * pf)131 static inline bool z_page_frame_is_reserved(struct z_page_frame *pf)
132 {
133 return (pf->flags & Z_PAGE_FRAME_RESERVED) != 0U;
134 }
135
z_page_frame_is_mapped(struct z_page_frame * pf)136 static inline bool z_page_frame_is_mapped(struct z_page_frame *pf)
137 {
138 return (pf->flags & Z_PAGE_FRAME_MAPPED) != 0U;
139 }
140
z_page_frame_is_busy(struct z_page_frame * pf)141 static inline bool z_page_frame_is_busy(struct z_page_frame *pf)
142 {
143 return (pf->flags & Z_PAGE_FRAME_BUSY) != 0U;
144 }
145
z_page_frame_is_backed(struct z_page_frame * pf)146 static inline bool z_page_frame_is_backed(struct z_page_frame *pf)
147 {
148 return (pf->flags & Z_PAGE_FRAME_BACKED) != 0U;
149 }
150
z_page_frame_is_evictable(struct z_page_frame * pf)151 static inline bool z_page_frame_is_evictable(struct z_page_frame *pf)
152 {
153 return (!z_page_frame_is_reserved(pf) && z_page_frame_is_mapped(pf) &&
154 !z_page_frame_is_pinned(pf) && !z_page_frame_is_busy(pf));
155 }
156
157 /* If true, page is not being used for anything, is not reserved, is a member
158 * of some free pages list, isn't busy, and may be mapped in memory
159 */
z_page_frame_is_available(struct z_page_frame * page)160 static inline bool z_page_frame_is_available(struct z_page_frame *page)
161 {
162 return page->flags == 0U;
163 }
164
z_assert_phys_aligned(uintptr_t phys)165 static inline void z_assert_phys_aligned(uintptr_t phys)
166 {
167 __ASSERT(phys % CONFIG_MMU_PAGE_SIZE == 0U,
168 "physical address 0x%lx is not page-aligned", phys);
169 (void)phys;
170 }
171
172 extern struct z_page_frame z_page_frames[Z_NUM_PAGE_FRAMES];
173
z_page_frame_to_phys(struct z_page_frame * pf)174 static inline uintptr_t z_page_frame_to_phys(struct z_page_frame *pf)
175 {
176 return (uintptr_t)((pf - z_page_frames) * CONFIG_MMU_PAGE_SIZE) +
177 Z_PHYS_RAM_START;
178 }
179
180 /* Presumes there is but one mapping in the virtual address space */
z_page_frame_to_virt(struct z_page_frame * pf)181 static inline void *z_page_frame_to_virt(struct z_page_frame *pf)
182 {
183 return pf->addr;
184 }
185
z_is_page_frame(uintptr_t phys)186 static inline bool z_is_page_frame(uintptr_t phys)
187 {
188 z_assert_phys_aligned(phys);
189 return (phys >= Z_PHYS_RAM_START) && (phys < Z_PHYS_RAM_END);
190 }
191
z_phys_to_page_frame(uintptr_t phys)192 static inline struct z_page_frame *z_phys_to_page_frame(uintptr_t phys)
193 {
194 __ASSERT(z_is_page_frame(phys),
195 "0x%lx not an SRAM physical address", phys);
196
197 return &z_page_frames[(phys - Z_PHYS_RAM_START) /
198 CONFIG_MMU_PAGE_SIZE];
199 }
200
z_mem_assert_virtual_region(uint8_t * addr,size_t size)201 static inline void z_mem_assert_virtual_region(uint8_t *addr, size_t size)
202 {
203 __ASSERT((uintptr_t)addr % CONFIG_MMU_PAGE_SIZE == 0U,
204 "unaligned addr %p", addr);
205 __ASSERT(size % CONFIG_MMU_PAGE_SIZE == 0U,
206 "unaligned size %zu", size);
207 __ASSERT(!Z_DETECT_POINTER_OVERFLOW(addr, size),
208 "region %p size %zu zero or wraps around", addr, size);
209 __ASSERT(addr >= Z_VIRT_RAM_START && addr + size < Z_VIRT_RAM_END,
210 "invalid virtual address region %p (%zu)", addr, size);
211 }
212
213 /* Debug function, pretty-print page frame information for all frames
214 * concisely to printk.
215 */
216 void z_page_frames_dump(void);
217
218 /* Number of free page frames. This information may go stale immediately */
219 extern size_t z_free_page_count;
220
221 /* Convenience macro for iterating over all page frames */
222 #define Z_PAGE_FRAME_FOREACH(_phys, _pageframe) \
223 for (_phys = Z_PHYS_RAM_START, _pageframe = z_page_frames; \
224 _phys < Z_PHYS_RAM_END; \
225 _phys += CONFIG_MMU_PAGE_SIZE, _pageframe++)
226
227 #ifdef CONFIG_DEMAND_PAGING
228 /* We reserve a virtual page as a scratch area for page-ins/outs at the end
229 * of the address space
230 */
231 #define Z_VM_RESERVED CONFIG_MMU_PAGE_SIZE
232 #define Z_SCRATCH_PAGE ((void *)((uintptr_t)CONFIG_KERNEL_VM_BASE + \
233 (uintptr_t)CONFIG_KERNEL_VM_SIZE - \
234 CONFIG_MMU_PAGE_SIZE))
235 #else
236 #define Z_VM_RESERVED 0
237 #endif
238
239 #ifdef CONFIG_DEMAND_PAGING
240 /*
241 * Core kernel demand paging APIs
242 */
243
244 /**
245 * Number of page faults since system startup
246 *
247 * Counts only those page faults that were handled successfully by the demand
248 * paging mechanism and were not errors.
249 *
250 * @return Number of successful page faults
251 */
252 unsigned long z_num_pagefaults_get(void);
253
254 /**
255 * Free a page frame physical address by evicting its contents
256 *
257 * The indicated page frame, if it contains a data page, will have that
258 * data page evicted to the backing store. The page frame will then be
259 * marked as available for mappings or page-ins.
260 *
261 * This is useful for freeing up entire memory banks so that they may be
262 * deactivated to save power.
263 *
264 * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
265 * called by ISRs as the backing store may be in-use.
266 *
267 * @param phys Page frame physical address
268 * @retval 0 Success
269 * @retval -ENOMEM Insufficient backing store space
270 */
271 int z_page_frame_evict(uintptr_t phys);
272
273 /**
274 * Handle a page fault for a virtual data page
275 *
276 * This is invoked from the architecture page fault handler.
277 *
278 * If a valid page fault, the core kernel will obtain a page frame,
279 * populate it with the data page that was evicted to the backing store,
280 * update page tables, and return so that the faulting instruction may be
281 * re-tried.
282 *
283 * The architecture must not call this function if the page was mapped and
284 * not paged out at the time the exception was triggered (i.e. a protection
285 * violation for a mapped page).
286 *
287 * If the faulting context had interrupts disabled when the page fault was
288 * triggered, the entire page fault handling path must have interrupts
289 * disabled, including the invocation of this function.
290 *
291 * Otherwise, interrupts may be enabled and the page fault handler may be
292 * preemptible. Races to page-in will be appropriately handled by the kernel.
293 *
294 * @param addr Faulting virtual address
295 * @retval true Page fault successfully handled, or nothing needed to be done.
296 * The arch layer should retry the faulting instruction.
297 * @retval false This page fault was from an un-mapped page, should
298 * be treated as an error, and not re-tried.
299 */
300 bool z_page_fault(void *addr);
301 #endif /* CONFIG_DEMAND_PAGING */
302 #endif /* CONFIG_MMU */
303 #endif /* KERNEL_INCLUDE_MMU_H */
304