1 /*
2 * Copyright (c) 2020 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #ifndef KERNEL_INCLUDE_MMU_H
7 #define KERNEL_INCLUDE_MMU_H
8
9 #ifdef CONFIG_MMU
10
11 #include <stdint.h>
12 #include <sys/slist.h>
13 #include <sys/__assert.h>
14 #include <sys/util.h>
15 #include <sys/mem_manage.h>
16 #include <linker/linker-defs.h>
17
18 /*
19 * At present, page frame management is only done for main system RAM,
20 * and we generate paging structures based on CONFIG_SRAM_BASE_ADDRESS
21 * and CONFIG_SRAM_SIZE.
22 *
23 * If we have other RAM regions (DCCM, etc) these typically have special
24 * properties and shouldn't be used generically for demand paging or
25 * anonymous mappings. We don't currently maintain an ontology of these in the
26 * core kernel.
27 */
28 #define Z_PHYS_RAM_START ((uintptr_t)CONFIG_SRAM_BASE_ADDRESS)
29 #define Z_PHYS_RAM_SIZE ((size_t)KB(CONFIG_SRAM_SIZE))
30 #define Z_PHYS_RAM_END (Z_PHYS_RAM_START + Z_PHYS_RAM_SIZE)
31 #define Z_NUM_PAGE_FRAMES (Z_PHYS_RAM_SIZE / (size_t)CONFIG_MMU_PAGE_SIZE)
32
33 /** End virtual address of virtual address space */
34 #define Z_VIRT_RAM_START ((uint8_t *)CONFIG_KERNEL_VM_BASE)
35 #define Z_VIRT_RAM_SIZE ((size_t)CONFIG_KERNEL_VM_SIZE)
36 #define Z_VIRT_RAM_END (Z_VIRT_RAM_START + Z_VIRT_RAM_SIZE)
37
38 /* Boot-time virtual location of the kernel image. */
39 #define Z_KERNEL_VIRT_START ((uint8_t *)(&z_mapped_start))
40 #define Z_KERNEL_VIRT_END ((uint8_t *)(&z_mapped_end))
41 #define Z_KERNEL_VIRT_SIZE (Z_KERNEL_VIRT_END - Z_KERNEL_VIRT_START)
42
43 #define Z_VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
44 (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
45
46 /* Only applies to boot RAM mappings within the Zephyr image that have never
47 * been remapped or paged out. Never use this unless you know exactly what you
48 * are doing.
49 */
50 #define Z_BOOT_VIRT_TO_PHYS(virt) ((uintptr_t)(((uint8_t *)virt) - Z_VM_OFFSET))
51 #define Z_BOOT_PHYS_TO_VIRT(phys) ((uint8_t *)(((uintptr_t)phys) + Z_VM_OFFSET))
52
53 #ifdef CONFIG_ARCH_MAPS_ALL_RAM
54 #define Z_FREE_VM_START Z_BOOT_PHYS_TO_VIRT(Z_PHYS_RAM_END)
55 #else
56 #define Z_FREE_VM_START Z_KERNEL_VIRT_END
57 #endif
58
59 /*
60 * Macros and data structures for physical page frame accounting,
61 * APIs for use by eviction and backing store algorithms. This code
62 * is otherwise not application-facing.
63 */
64
65 /*
66 * z_page_frame flags bits
67 */
68
69 /** This page contains critical kernel data and will never be swapped */
70 #define Z_PAGE_FRAME_PINNED BIT(0)
71
72 /** This physical page is reserved by hardware; we will never use it */
73 #define Z_PAGE_FRAME_RESERVED BIT(1)
74
75 /**
76 * This physical page is mapped to some virtual memory address
77 *
78 * Currently, we just support one mapping per page frame. If a page frame
79 * is mapped to multiple virtual pages then it must be pinned.
80 */
81 #define Z_PAGE_FRAME_MAPPED BIT(2)
82
83 /**
84 * This page frame is currently involved in a page-in/out operation
85 */
86 #define Z_PAGE_FRAME_BUSY BIT(3)
87
88 /**
89 * This page frame has a clean copy in the backing store
90 */
91 #define Z_PAGE_FRAME_BACKED BIT(4)
92
93 /**
94 * Data structure for physical page frames
95 *
96 * An array of these is instantiated, one element per physical RAM page.
97 * Hence it's necessary to constrain its size as much as possible.
98 */
99 struct z_page_frame {
100 union {
101 /* If mapped, virtual address this page is mapped to */
102 void *addr;
103
104 /* If unmapped and available, free pages list membership. */
105 sys_snode_t node;
106 };
107
108 /* Z_PAGE_FRAME_* flags */
109 uint8_t flags;
110
111 /* TODO: Backing store and eviction algorithms may both need to
112 * introduce custom members for accounting purposes. Come up with
113 * a layer of abstraction for this. They may also want additional
114 * flags bits which shouldn't clobber each other. At all costs
115 * the total size of struct z_page_frame must be minimized.
116 */
117 } __packed;
118
z_page_frame_is_pinned(struct z_page_frame * pf)119 static inline bool z_page_frame_is_pinned(struct z_page_frame *pf)
120 {
121 return (pf->flags & Z_PAGE_FRAME_PINNED) != 0U;
122 }
123
z_page_frame_is_reserved(struct z_page_frame * pf)124 static inline bool z_page_frame_is_reserved(struct z_page_frame *pf)
125 {
126 return (pf->flags & Z_PAGE_FRAME_RESERVED) != 0U;
127 }
128
z_page_frame_is_mapped(struct z_page_frame * pf)129 static inline bool z_page_frame_is_mapped(struct z_page_frame *pf)
130 {
131 return (pf->flags & Z_PAGE_FRAME_MAPPED) != 0U;
132 }
133
z_page_frame_is_busy(struct z_page_frame * pf)134 static inline bool z_page_frame_is_busy(struct z_page_frame *pf)
135 {
136 return (pf->flags & Z_PAGE_FRAME_BUSY) != 0U;
137 }
138
z_page_frame_is_backed(struct z_page_frame * pf)139 static inline bool z_page_frame_is_backed(struct z_page_frame *pf)
140 {
141 return (pf->flags & Z_PAGE_FRAME_BACKED) != 0U;
142 }
143
z_page_frame_is_evictable(struct z_page_frame * pf)144 static inline bool z_page_frame_is_evictable(struct z_page_frame *pf)
145 {
146 return (!z_page_frame_is_reserved(pf) && z_page_frame_is_mapped(pf) &&
147 !z_page_frame_is_pinned(pf) && !z_page_frame_is_busy(pf));
148 }
149
150 /* If true, page is not being used for anything, is not reserved, is a member
151 * of some free pages list, isn't busy, and may be mapped in memory
152 */
z_page_frame_is_available(struct z_page_frame * page)153 static inline bool z_page_frame_is_available(struct z_page_frame *page)
154 {
155 return page->flags == 0U;
156 }
157
z_assert_phys_aligned(uintptr_t phys)158 static inline void z_assert_phys_aligned(uintptr_t phys)
159 {
160 __ASSERT(phys % CONFIG_MMU_PAGE_SIZE == 0U,
161 "physical address 0x%lx is not page-aligned", phys);
162 (void)phys;
163 }
164
165 extern struct z_page_frame z_page_frames[Z_NUM_PAGE_FRAMES];
166
z_page_frame_to_phys(struct z_page_frame * pf)167 static inline uintptr_t z_page_frame_to_phys(struct z_page_frame *pf)
168 {
169 return (uintptr_t)((pf - z_page_frames) * CONFIG_MMU_PAGE_SIZE) +
170 Z_PHYS_RAM_START;
171 }
172
173 /* Presumes there is but one mapping in the virtual address space */
z_page_frame_to_virt(struct z_page_frame * pf)174 static inline void *z_page_frame_to_virt(struct z_page_frame *pf)
175 {
176 return pf->addr;
177 }
178
z_is_page_frame(uintptr_t phys)179 static inline bool z_is_page_frame(uintptr_t phys)
180 {
181 z_assert_phys_aligned(phys);
182 return (phys >= Z_PHYS_RAM_START) && (phys < Z_PHYS_RAM_END);
183 }
184
z_phys_to_page_frame(uintptr_t phys)185 static inline struct z_page_frame *z_phys_to_page_frame(uintptr_t phys)
186 {
187 __ASSERT(z_is_page_frame(phys),
188 "0x%lx not an SRAM physical address", phys);
189
190 return &z_page_frames[(phys - Z_PHYS_RAM_START) /
191 CONFIG_MMU_PAGE_SIZE];
192 }
193
z_mem_assert_virtual_region(uint8_t * addr,size_t size)194 static inline void z_mem_assert_virtual_region(uint8_t *addr, size_t size)
195 {
196 __ASSERT((uintptr_t)addr % CONFIG_MMU_PAGE_SIZE == 0U,
197 "unaligned addr %p", addr);
198 __ASSERT(size % CONFIG_MMU_PAGE_SIZE == 0U,
199 "unaligned size %zu", size);
200 __ASSERT(addr + size > addr,
201 "region %p size %zu zero or wraps around", addr, size);
202 __ASSERT(addr >= Z_VIRT_RAM_START && addr + size < Z_VIRT_RAM_END,
203 "invalid virtual address region %p (%zu)", addr, size);
204 }
205
206 /* Debug function, pretty-print page frame information for all frames
207 * concisely to printk.
208 */
209 void z_page_frames_dump(void);
210
211 /* Number of free page frames. This information may go stale immediately */
212 extern size_t z_free_page_count;
213
214 /* Convenience macro for iterating over all page frames */
215 #define Z_PAGE_FRAME_FOREACH(_phys, _pageframe) \
216 for (_phys = Z_PHYS_RAM_START, _pageframe = z_page_frames; \
217 _phys < Z_PHYS_RAM_END; \
218 _phys += CONFIG_MMU_PAGE_SIZE, _pageframe++)
219
220 #ifdef CONFIG_DEMAND_PAGING
221 /* We reserve a virtual page as a scratch area for page-ins/outs at the end
222 * of the address space
223 */
224 #define Z_VM_RESERVED CONFIG_MMU_PAGE_SIZE
225 #define Z_SCRATCH_PAGE ((void *)((uintptr_t)CONFIG_KERNEL_VM_BASE + \
226 (uintptr_t)CONFIG_KERNEL_VM_SIZE - \
227 CONFIG_MMU_PAGE_SIZE))
228 #else
229 #define Z_VM_RESERVED 0
230 #endif
231
232 #ifdef CONFIG_DEMAND_PAGING
233 /*
234 * Core kernel demand paging APIs
235 */
236
237 /**
238 * Number of page faults since system startup
239 *
240 * Counts only those page faults that were handled successfully by the demand
241 * paging mechanism and were not errors.
242 *
243 * @return Number of successful page faults
244 */
245 unsigned long z_num_pagefaults_get(void);
246
247 /**
248 * Free a page frame physical address by evicting its contents
249 *
250 * The indicated page frame, if it contains a data page, will have that
251 * data page evicted to the backing store. The page frame will then be
252 * marked as available for mappings or page-ins.
253 *
254 * This is useful for freeing up entire memory banks so that they may be
255 * deactivated to save power.
256 *
257 * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
258 * called by ISRs as the backing store may be in-use.
259 *
260 * @param phys Page frame physical address
261 * @retval 0 Success
262 * @retval -ENOMEM Insufficient backing store space
263 */
264 int z_page_frame_evict(uintptr_t phys);
265
266 /**
267 * Handle a page fault for a virtual data page
268 *
269 * This is invoked from the architecture page fault handler.
270 *
271 * If a valid page fault, the core kernel will obtain a page frame,
272 * populate it with the data page that was evicted to the backing store,
273 * update page tables, and return so that the faulting instruction may be
274 * re-tried.
275 *
276 * The architecture must not call this function if the page was mapped and
277 * not paged out at the time the exception was triggered (i.e. a protection
278 * violation for a mapped page).
279 *
280 * If the faulting context had interrupts disabled when the page fault was
281 * triggered, the entire page fault handling path must have interrupts
282 * disabled, including the invocation of this function.
283 *
284 * Otherwise, interrupts may be enabled and the page fault handler may be
285 * preemptible. Races to page-in will be appropriately handled by the kernel.
286 *
287 * @param addr Faulting virtual address
288 * @retval true Page fault successfully handled, or nothing needed to be done.
289 * The arch layer should retry the faulting instruction.
290 * @retval false This page fault was from an un-mapped page, should
291 * be treated as an error, and not re-tried.
292 */
293 bool z_page_fault(void *addr);
294 #endif /* CONFIG_DEMAND_PAGING */
295 #endif /* CONFIG_MMU */
296 #endif /* KERNEL_INCLUDE_MMU_H */
297