1 /*
2  * Copyright (c) 2020 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #ifndef KERNEL_INCLUDE_MMU_H
7 #define KERNEL_INCLUDE_MMU_H
8 
9 #ifdef CONFIG_MMU
10 
11 #include <stdint.h>
12 #include <zephyr/sys/slist.h>
13 #include <zephyr/sys/__assert.h>
14 #include <zephyr/sys/util.h>
15 #include <zephyr/kernel/mm.h>
16 #include <zephyr/linker/linker-defs.h>
17 
18 /*
19  * At present, page frame management is only done for main system RAM,
20  * and we generate paging structures based on CONFIG_SRAM_BASE_ADDRESS
21  * and CONFIG_SRAM_SIZE.
22  *
23  * If we have other RAM regions (DCCM, etc) these typically have special
24  * properties and shouldn't be used generically for demand paging or
25  * anonymous mappings. We don't currently maintain an ontology of these in the
26  * core kernel.
27  */
28 #define Z_PHYS_RAM_START	((uintptr_t)CONFIG_SRAM_BASE_ADDRESS)
29 #define Z_PHYS_RAM_SIZE		((size_t)KB(CONFIG_SRAM_SIZE))
30 #define Z_PHYS_RAM_END		(Z_PHYS_RAM_START + Z_PHYS_RAM_SIZE)
31 #define Z_NUM_PAGE_FRAMES	(Z_PHYS_RAM_SIZE / (size_t)CONFIG_MMU_PAGE_SIZE)
32 
33 /** End virtual address of virtual address space */
34 #define Z_VIRT_RAM_START	((uint8_t *)CONFIG_KERNEL_VM_BASE)
35 #define Z_VIRT_RAM_SIZE		((size_t)CONFIG_KERNEL_VM_SIZE)
36 #define Z_VIRT_RAM_END		(Z_VIRT_RAM_START + Z_VIRT_RAM_SIZE)
37 
38 /* Boot-time virtual location of the kernel image. */
39 #define Z_KERNEL_VIRT_START	((uint8_t *)(&z_mapped_start))
40 #define Z_KERNEL_VIRT_END	((uint8_t *)(&z_mapped_end))
41 #define Z_KERNEL_VIRT_SIZE	(Z_KERNEL_VIRT_END - Z_KERNEL_VIRT_START)
42 
43 #define Z_VM_OFFSET	 ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
44 			  (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
45 
46 /* Only applies to boot RAM mappings within the Zephyr image that have never
47  * been remapped or paged out. Never use this unless you know exactly what you
48  * are doing.
49  */
50 #define Z_BOOT_VIRT_TO_PHYS(virt) ((uintptr_t)(((uint8_t *)virt) - Z_VM_OFFSET))
51 #define Z_BOOT_PHYS_TO_VIRT(phys) ((uint8_t *)(((uintptr_t)phys) + Z_VM_OFFSET))
52 
53 #ifdef CONFIG_ARCH_MAPS_ALL_RAM
54 #define Z_FREE_VM_START	Z_BOOT_PHYS_TO_VIRT(Z_PHYS_RAM_END)
55 #else
56 #define Z_FREE_VM_START	Z_KERNEL_VIRT_END
57 #endif
58 
59 /*
60  * Macros and data structures for physical page frame accounting,
61  * APIs for use by eviction and backing store algorithms. This code
62  * is otherwise not application-facing.
63  */
64 
65 /*
66  * z_page_frame flags bits
67  */
68 
69 /** This page contains critical kernel data and will never be swapped */
70 #define Z_PAGE_FRAME_PINNED		BIT(0)
71 
72 /** This physical page is reserved by hardware; we will never use it */
73 #define Z_PAGE_FRAME_RESERVED		BIT(1)
74 
75 /**
76  * This physical page is mapped to some virtual memory address
77  *
78  * Currently, we just support one mapping per page frame. If a page frame
79  * is mapped to multiple virtual pages then it must be pinned.
80  */
81 #define Z_PAGE_FRAME_MAPPED		BIT(2)
82 
83 /**
84  * This page frame is currently involved in a page-in/out operation
85  */
86 #define Z_PAGE_FRAME_BUSY		BIT(3)
87 
88 /**
89  * This page frame has a clean copy in the backing store
90  */
91 #define Z_PAGE_FRAME_BACKED		BIT(4)
92 
93 /**
94  * Data structure for physical page frames
95  *
96  * An array of these is instantiated, one element per physical RAM page.
97  * Hence it's necessary to constrain its size as much as possible.
98  */
99 struct z_page_frame {
100 	union {
101 		/* If mapped, virtual address this page is mapped to */
102 		void *addr;
103 
104 		/* If unmapped and available, free pages list membership. */
105 		sys_snode_t node;
106 	};
107 
108 	/* Z_PAGE_FRAME_* flags */
109 	uint8_t flags;
110 
111 	/* TODO: Backing store and eviction algorithms may both need to
112 	 * introduce custom members for accounting purposes. Come up with
113 	 * a layer of abstraction for this. They may also want additional
114 	 * flags bits which shouldn't clobber each other. At all costs
115 	 * the total size of struct z_page_frame must be minimized.
116 	 */
117 
118 	/* On Xtensa we can't pack this struct because of the memory alignment.
119 	 */
120 #ifdef CONFIG_XTENSA
121 } __aligned(4);
122 #else
123 } __packed;
124 #endif
125 
z_page_frame_is_pinned(struct z_page_frame * pf)126 static inline bool z_page_frame_is_pinned(struct z_page_frame *pf)
127 {
128 	return (pf->flags & Z_PAGE_FRAME_PINNED) != 0U;
129 }
130 
z_page_frame_is_reserved(struct z_page_frame * pf)131 static inline bool z_page_frame_is_reserved(struct z_page_frame *pf)
132 {
133 	return (pf->flags & Z_PAGE_FRAME_RESERVED) != 0U;
134 }
135 
z_page_frame_is_mapped(struct z_page_frame * pf)136 static inline bool z_page_frame_is_mapped(struct z_page_frame *pf)
137 {
138 	return (pf->flags & Z_PAGE_FRAME_MAPPED) != 0U;
139 }
140 
z_page_frame_is_busy(struct z_page_frame * pf)141 static inline bool z_page_frame_is_busy(struct z_page_frame *pf)
142 {
143 	return (pf->flags & Z_PAGE_FRAME_BUSY) != 0U;
144 }
145 
z_page_frame_is_backed(struct z_page_frame * pf)146 static inline bool z_page_frame_is_backed(struct z_page_frame *pf)
147 {
148 	return (pf->flags & Z_PAGE_FRAME_BACKED) != 0U;
149 }
150 
z_page_frame_is_evictable(struct z_page_frame * pf)151 static inline bool z_page_frame_is_evictable(struct z_page_frame *pf)
152 {
153 	return (!z_page_frame_is_reserved(pf) && z_page_frame_is_mapped(pf) &&
154 		!z_page_frame_is_pinned(pf) && !z_page_frame_is_busy(pf));
155 }
156 
157 /* If true, page is not being used for anything, is not reserved, is a member
158  * of some free pages list, isn't busy, and may be mapped in memory
159  */
z_page_frame_is_available(struct z_page_frame * page)160 static inline bool z_page_frame_is_available(struct z_page_frame *page)
161 {
162 	return page->flags == 0U;
163 }
164 
z_assert_phys_aligned(uintptr_t phys)165 static inline void z_assert_phys_aligned(uintptr_t phys)
166 {
167 	__ASSERT(phys % CONFIG_MMU_PAGE_SIZE == 0U,
168 		 "physical address 0x%lx is not page-aligned", phys);
169 	(void)phys;
170 }
171 
172 extern struct z_page_frame z_page_frames[Z_NUM_PAGE_FRAMES];
173 
z_page_frame_to_phys(struct z_page_frame * pf)174 static inline uintptr_t z_page_frame_to_phys(struct z_page_frame *pf)
175 {
176 	return (uintptr_t)((pf - z_page_frames) * CONFIG_MMU_PAGE_SIZE) +
177 			Z_PHYS_RAM_START;
178 }
179 
180 /* Presumes there is but one mapping in the virtual address space */
z_page_frame_to_virt(struct z_page_frame * pf)181 static inline void *z_page_frame_to_virt(struct z_page_frame *pf)
182 {
183 	return pf->addr;
184 }
185 
z_is_page_frame(uintptr_t phys)186 static inline bool z_is_page_frame(uintptr_t phys)
187 {
188 	z_assert_phys_aligned(phys);
189 	return IN_RANGE(phys, (uintptr_t)Z_PHYS_RAM_START,
190 			(uintptr_t)(Z_PHYS_RAM_END - 1));
191 }
192 
z_phys_to_page_frame(uintptr_t phys)193 static inline struct z_page_frame *z_phys_to_page_frame(uintptr_t phys)
194 {
195 	__ASSERT(z_is_page_frame(phys),
196 		 "0x%lx not an SRAM physical address", phys);
197 
198 	return &z_page_frames[(phys - Z_PHYS_RAM_START) /
199 			      CONFIG_MMU_PAGE_SIZE];
200 }
201 
z_mem_assert_virtual_region(uint8_t * addr,size_t size)202 static inline void z_mem_assert_virtual_region(uint8_t *addr, size_t size)
203 {
204 	__ASSERT((uintptr_t)addr % CONFIG_MMU_PAGE_SIZE == 0U,
205 		 "unaligned addr %p", addr);
206 	__ASSERT(size % CONFIG_MMU_PAGE_SIZE == 0U,
207 		 "unaligned size %zu", size);
208 	__ASSERT(!Z_DETECT_POINTER_OVERFLOW(addr, size),
209 		 "region %p size %zu zero or wraps around", addr, size);
210 	__ASSERT(IN_RANGE((uintptr_t)addr,
211 			  (uintptr_t)Z_VIRT_RAM_START,
212 			  ((uintptr_t)Z_VIRT_RAM_END - 1)) &&
213 		 IN_RANGE(((uintptr_t)addr + size - 1),
214 			  (uintptr_t)Z_VIRT_RAM_START,
215 			  ((uintptr_t)Z_VIRT_RAM_END - 1)),
216 		 "invalid virtual address region %p (%zu)", addr, size);
217 }
218 
219 /* Debug function, pretty-print page frame information for all frames
220  * concisely to printk.
221  */
222 void z_page_frames_dump(void);
223 
224 /* Convenience macro for iterating over all page frames */
225 #define Z_PAGE_FRAME_FOREACH(_phys, _pageframe) \
226 	for (_phys = Z_PHYS_RAM_START, _pageframe = z_page_frames; \
227 	     _phys < Z_PHYS_RAM_END; \
228 	     _phys += CONFIG_MMU_PAGE_SIZE, _pageframe++)
229 
230 #ifdef CONFIG_DEMAND_PAGING
231 /* We reserve a virtual page as a scratch area for page-ins/outs at the end
232  * of the address space
233  */
234 #define Z_VM_RESERVED	CONFIG_MMU_PAGE_SIZE
235 #define Z_SCRATCH_PAGE	((void *)((uintptr_t)CONFIG_KERNEL_VM_BASE + \
236 				     (uintptr_t)CONFIG_KERNEL_VM_SIZE - \
237 				     CONFIG_MMU_PAGE_SIZE))
238 #else
239 #define Z_VM_RESERVED	0
240 #endif
241 
242 #ifdef CONFIG_DEMAND_PAGING
243 /*
244  * Core kernel demand paging APIs
245  */
246 
247 /**
248  * Number of page faults since system startup
249  *
250  * Counts only those page faults that were handled successfully by the demand
251  * paging mechanism and were not errors.
252  *
253  * @return Number of successful page faults
254  */
255 unsigned long z_num_pagefaults_get(void);
256 
257 /**
258  * Free a page frame physical address by evicting its contents
259  *
260  * The indicated page frame, if it contains a data page, will have that
261  * data page evicted to the backing store. The page frame will then be
262  * marked as available for mappings or page-ins.
263  *
264  * This is useful for freeing up entire memory banks so that they may be
265  * deactivated to save power.
266  *
267  * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
268  * called by ISRs as the backing store may be in-use.
269  *
270  * @param phys Page frame physical address
271  * @retval 0 Success
272  * @retval -ENOMEM Insufficient backing store space
273  */
274 int z_page_frame_evict(uintptr_t phys);
275 
276 /**
277  * Handle a page fault for a virtual data page
278  *
279  * This is invoked from the architecture page fault handler.
280  *
281  * If a valid page fault, the core kernel will obtain a page frame,
282  * populate it with the data page that was evicted to the backing store,
283  * update page tables, and return so that the faulting instruction may be
284  * re-tried.
285  *
286  * The architecture must not call this function if the page was mapped and
287  * not paged out at the time the exception was triggered (i.e. a protection
288  * violation for a mapped page).
289  *
290  * If the faulting context had interrupts disabled when the page fault was
291  * triggered, the entire page fault handling path must have interrupts
292  * disabled, including the invocation of this function.
293  *
294  * Otherwise, interrupts may be enabled and the page fault handler may be
295  * preemptible. Races to page-in will be appropriately handled by the kernel.
296  *
297  * @param addr Faulting virtual address
298  * @retval true Page fault successfully handled, or nothing needed to be done.
299  *              The arch layer should retry the faulting instruction.
300  * @retval false This page fault was from an un-mapped page, should
301  *               be treated as an error, and not re-tried.
302  */
303 bool z_page_fault(void *addr);
304 #endif /* CONFIG_DEMAND_PAGING */
305 #endif /* CONFIG_MMU */
306 #endif /* KERNEL_INCLUDE_MMU_H */
307