1 /*
2  * Copyright (c) 2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #ifndef ZEPHYR_INCLUDE_KERNEL_MM_DEMAND_PAGING_H
8 #define ZEPHYR_INCLUDE_KERNEL_MM_DEMAND_PAGING_H
9 
10 #include <zephyr/kernel/mm.h>
11 
12 #include <zephyr/sys/util.h>
13 #include <zephyr/toolchain.h>
14 
15 /**
16  * @defgroup demand_paging Demand Paging
17  * @ingroup kernel_memory_management
18  */
19 
20 /**
21  * @defgroup mem-demand-paging Demand Paging APIs
22  * @ingroup demand_paging
23  * @{
24  */
25 
26 #ifndef _ASMLANGUAGE
27 #include <stdint.h>
28 #include <stddef.h>
29 #include <inttypes.h>
30 #include <zephyr/sys/__assert.h>
31 
32 /**
33  * Paging Statistics.
34  */
35 struct k_mem_paging_stats_t {
36 #if defined(CONFIG_DEMAND_PAGING_STATS) || defined(__DOXYGEN__)
37 	struct {
38 		/** Number of page faults */
39 		unsigned long			cnt;
40 
41 		/** Number of page faults with IRQ locked */
42 		unsigned long			irq_locked;
43 
44 		/** Number of page faults with IRQ unlocked */
45 		unsigned long			irq_unlocked;
46 
47 #if !defined(CONFIG_DEMAND_PAGING_ALLOW_IRQ) || defined(__DOXYGEN__)
48 		/** Number of page faults while in ISR */
49 		unsigned long			in_isr;
50 #endif
51 	} pagefaults;
52 
53 	struct {
54 		/** Number of clean pages selected for eviction */
55 		unsigned long			clean;
56 
57 		/** Number of dirty pages selected for eviction */
58 		unsigned long			dirty;
59 	} eviction;
60 #endif /* CONFIG_DEMAND_PAGING_STATS */
61 };
62 
63 /**
64  * Paging Statistics Histograms.
65  */
66 struct k_mem_paging_histogram_t {
67 #if defined(CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM) || defined(__DOXYGEN__)
68 	/* Counts for each bin in timing histogram */
69 	unsigned long	counts[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
70 
71 	/* Bounds for the bins in timing histogram,
72 	 * excluding the first and last (hence, NUM_SLOTS - 1).
73 	 */
74 	unsigned long	bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
75 #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
76 };
77 
78 #ifdef __cplusplus
79 extern "C" {
80 #endif
81 
82 /**
83  * Evict a page-aligned virtual memory region to the backing store
84  *
85  * Useful if it is known that a memory region will not be used for some time.
86  * All the data pages within the specified region will be evicted to the
87  * backing store if they weren't already, with their associated page frames
88  * marked as available for mappings or page-ins.
89  *
90  * None of the associated page frames mapped to the provided region should
91  * be pinned.
92  *
93  * Note that there are no guarantees how long these pages will be evicted,
94  * they could take page faults immediately.
95  *
96  * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
97  * called by ISRs as the backing store may be in-use.
98  *
99  * @param addr Base page-aligned virtual address
100  * @param size Page-aligned data region size
101  * @retval 0 Success
102  * @retval -ENOMEM Insufficient space in backing store to satisfy request.
103  *         The region may be partially paged out.
104  */
105 int k_mem_page_out(void *addr, size_t size);
106 
107 /**
108  * Load a virtual data region into memory
109  *
110  * After the function completes, all the page frames associated with this
111  * function will be paged in. However, they are not guaranteed to stay there.
112  * This is useful if the region is known to be used soon.
113  *
114  * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
115  * called by ISRs as the backing store may be in-use.
116  *
117  * @param addr Base page-aligned virtual address
118  * @param size Page-aligned data region size
119  */
120 void k_mem_page_in(void *addr, size_t size);
121 
122 /**
123  * Pin an aligned virtual data region, paging in as necessary
124  *
125  * After the function completes, all the page frames associated with this
126  * region will be resident in memory and pinned such that they stay that way.
127  * This is a stronger version of z_mem_page_in().
128  *
129  * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
130  * called by ISRs as the backing store may be in-use.
131  *
132  * @param addr Base page-aligned virtual address
133  * @param size Page-aligned data region size
134  */
135 void k_mem_pin(void *addr, size_t size);
136 
137 /**
138  * Un-pin an aligned virtual data region
139  *
140  * After the function completes, all the page frames associated with this
141  * region will be no longer marked as pinned. This does not evict the region,
142  * follow this with z_mem_page_out() if you need that.
143  *
144  * @param addr Base page-aligned virtual address
145  * @param size Page-aligned data region size
146  */
147 void k_mem_unpin(void *addr, size_t size);
148 
149 /**
150  * Get the paging statistics since system startup
151  *
152  * This populates the paging statistics struct being passed in
153  * as argument.
154  *
155  * @param[in,out] stats Paging statistics struct to be filled.
156  */
157 __syscall void k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats);
158 
159 struct k_thread;
160 /**
161  * Get the paging statistics since system startup for a thread
162  *
163  * This populates the paging statistics struct being passed in
164  * as argument for a particular thread.
165  *
166  * @param[in] thread Thread
167  * @param[in,out] stats Paging statistics struct to be filled.
168  */
169 __syscall
170 void k_mem_paging_thread_stats_get(struct k_thread *thread,
171 				   struct k_mem_paging_stats_t *stats);
172 
173 /**
174  * Get the eviction timing histogram
175  *
176  * This populates the timing histogram struct being passed in
177  * as argument.
178  *
179  * @param[in,out] hist Timing histogram struct to be filled.
180  */
181 __syscall void k_mem_paging_histogram_eviction_get(
182 	struct k_mem_paging_histogram_t *hist);
183 
184 /**
185  * Get the backing store page-in timing histogram
186  *
187  * This populates the timing histogram struct being passed in
188  * as argument.
189  *
190  * @param[in,out] hist Timing histogram struct to be filled.
191  */
192 __syscall void k_mem_paging_histogram_backing_store_page_in_get(
193 	struct k_mem_paging_histogram_t *hist);
194 
195 /**
196  * Get the backing store page-out timing histogram
197  *
198  * This populates the timing histogram struct being passed in
199  * as argument.
200  *
201  * @param[in,out] hist Timing histogram struct to be filled.
202  */
203 __syscall void k_mem_paging_histogram_backing_store_page_out_get(
204 	struct k_mem_paging_histogram_t *hist);
205 
206 #include <syscalls/demand_paging.h>
207 
208 /** @} */
209 
210 /**
211  * Eviction algorithm APIs
212  *
213  * @defgroup mem-demand-paging-eviction Eviction Algorithm APIs
214  * @ingroup demand_paging
215  * @{
216  */
217 
218 /**
219  * Select a page frame for eviction
220  *
221  * The kernel will invoke this to choose a page frame to evict if there
222  * are no free page frames.
223  *
224  * This function will never be called before the initial
225  * k_mem_paging_eviction_init().
226  *
227  * This function is invoked with interrupts locked.
228  *
229  * @param [out] dirty Whether the page to evict is dirty
230  * @return The page frame to evict
231  */
232 struct z_page_frame *k_mem_paging_eviction_select(bool *dirty);
233 
234 /**
235  * Initialization function
236  *
237  * Called at POST_KERNEL to perform any necessary initialization tasks for the
238  * eviction algorithm. k_mem_paging_eviction_select() is guaranteed to never be
239  * called until this has returned, and this will only be called once.
240  */
241 void k_mem_paging_eviction_init(void);
242 
243 /** @} */
244 
245 /**
246  * Backing store APIs
247  *
248  * @defgroup mem-demand-paging-backing-store Backing Store APIs
249  * @ingroup demand_paging
250  * @{
251  */
252 
253 /**
254  * Reserve or fetch a storage location for a data page loaded into a page frame
255  *
256  * The returned location token must be unique to the mapped virtual address.
257  * This location will be used in the backing store to page out data page
258  * contents for later retrieval. The location value must be page-aligned.
259  *
260  * This function may be called multiple times on the same data page. If its
261  * page frame has its Z_PAGE_FRAME_BACKED bit set, it is expected to return
262  * the previous backing store location for the data page containing a cached
263  * clean copy. This clean copy may be updated on page-out, or used to
264  * discard clean pages without needing to write out their contents.
265  *
266  * If the backing store is full, some other backing store location which caches
267  * a loaded data page may be selected, in which case its associated page frame
268  * will have the Z_PAGE_FRAME_BACKED bit cleared (as it is no longer cached).
269  *
270  * pf->addr will indicate the virtual address the page is currently mapped to.
271  * Large, sparse backing stores which can contain the entire address space
272  * may simply generate location tokens purely as a function of pf->addr with no
273  * other management necessary.
274  *
275  * This function distinguishes whether it was called on behalf of a page
276  * fault. A free backing store location must always be reserved in order for
277  * page faults to succeed. If the page_fault parameter is not set, this
278  * function should return -ENOMEM even if one location is available.
279  *
280  * This function is invoked with interrupts locked.
281  *
282  * @param pf Virtual address to obtain a storage location
283  * @param [out] location storage location token
284  * @param page_fault Whether this request was for a page fault
285  * @return 0 Success
286  * @return -ENOMEM Backing store is full
287  */
288 int k_mem_paging_backing_store_location_get(struct z_page_frame *pf,
289 					    uintptr_t *location,
290 					    bool page_fault);
291 
292 /**
293  * Free a backing store location
294  *
295  * Any stored data may be discarded, and the location token associated with
296  * this address may be re-used for some other data page.
297  *
298  * This function is invoked with interrupts locked.
299  *
300  * @param location Location token to free
301  */
302 void k_mem_paging_backing_store_location_free(uintptr_t location);
303 
304 /**
305  * Copy a data page from Z_SCRATCH_PAGE to the specified location
306  *
307  * Immediately before this is called, Z_SCRATCH_PAGE will be mapped read-write
308  * to the intended source page frame for the calling context.
309  *
310  * Calls to this and k_mem_paging_backing_store_page_in() will always be
311  * serialized, but interrupts may be enabled.
312  *
313  * @param location Location token for the data page, for later retrieval
314  */
315 void k_mem_paging_backing_store_page_out(uintptr_t location);
316 
317 /**
318  * Copy a data page from the provided location to Z_SCRATCH_PAGE.
319  *
320  * Immediately before this is called, Z_SCRATCH_PAGE will be mapped read-write
321  * to the intended destination page frame for the calling context.
322  *
323  * Calls to this and k_mem_paging_backing_store_page_out() will always be
324  * serialized, but interrupts may be enabled.
325  *
326  * @param location Location token for the data page
327  */
328 void k_mem_paging_backing_store_page_in(uintptr_t location);
329 
330 /**
331  * Update internal accounting after a page-in
332  *
333  * This is invoked after k_mem_paging_backing_store_page_in() and interrupts
334  * have been* re-locked, making it safe to access the z_page_frame data.
335  * The location value will be the same passed to
336  * k_mem_paging_backing_store_page_in().
337  *
338  * The primary use-case for this is to update custom fields for the backing
339  * store in the page frame, to reflect where the data should be evicted to
340  * if it is paged out again. This may be a no-op in some implementations.
341  *
342  * If the backing store caches paged-in data pages, this is the appropriate
343  * time to set the Z_PAGE_FRAME_BACKED bit. The kernel only skips paging
344  * out clean data pages if they are noted as clean in the page tables and the
345  * Z_PAGE_FRAME_BACKED bit is set in their associated page frame.
346  *
347  * @param pf Page frame that was loaded in
348  * @param location Location of where the loaded data page was retrieved
349  */
350 void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf,
351 					      uintptr_t location);
352 
353 /**
354  * Backing store initialization function.
355  *
356  * The implementation may expect to receive page in/out calls as soon as this
357  * returns, but not before that. Called at POST_KERNEL.
358  *
359  * This function is expected to do two things:
360  * - Initialize any internal data structures and accounting for the backing
361  *   store.
362  * - If the backing store already contains all or some loaded kernel data pages
363  *   at boot time, Z_PAGE_FRAME_BACKED should be appropriately set for their
364  *   associated page frames, and any internal accounting set up appropriately.
365  */
366 void k_mem_paging_backing_store_init(void);
367 
368 /** @} */
369 
370 #ifdef __cplusplus
371 }
372 #endif
373 
374 #endif /* !_ASMLANGUAGE */
375 #endif /* ZEPHYR_INCLUDE_KERNEL_MM_DEMAND_PAGING_H */
376