1 /*
2  * Copyright (c) 2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #ifndef ZEPHYR_INCLUDE_KERNEL_MM_DEMAND_PAGING_H
8 #define ZEPHYR_INCLUDE_KERNEL_MM_DEMAND_PAGING_H
9 
10 #include <zephyr/kernel/mm.h>
11 
12 #include <zephyr/sys/util.h>
13 #include <zephyr/toolchain.h>
14 
15 /**
16  * @defgroup demand_paging Demand Paging
17  * @ingroup kernel_memory_management
18  */
19 
20 /**
21  * @defgroup mem-demand-paging Demand Paging APIs
22  * @ingroup demand_paging
23  * @{
24  */
25 
26 #ifndef _ASMLANGUAGE
27 #include <stdint.h>
28 #include <stddef.h>
29 #include <inttypes.h>
30 #include <zephyr/sys/__assert.h>
31 
32 struct k_mem_page_frame;
33 
34 /**
35  * Paging Statistics.
36  */
37 struct k_mem_paging_stats_t {
38 #if defined(CONFIG_DEMAND_PAGING_STATS) || defined(__DOXYGEN__)
39 	struct {
40 		/** Number of page faults */
41 		unsigned long			cnt;
42 
43 		/** Number of page faults with IRQ locked */
44 		unsigned long			irq_locked;
45 
46 		/** Number of page faults with IRQ unlocked */
47 		unsigned long			irq_unlocked;
48 
49 #if !defined(CONFIG_DEMAND_PAGING_ALLOW_IRQ) || defined(__DOXYGEN__)
50 		/** Number of page faults while in ISR */
51 		unsigned long			in_isr;
52 #endif /* !CONFIG_DEMAND_PAGING_ALLOW_IRQ */
53 	} pagefaults;
54 
55 	struct {
56 		/** Number of clean pages selected for eviction */
57 		unsigned long			clean;
58 
59 		/** Number of dirty pages selected for eviction */
60 		unsigned long			dirty;
61 	} eviction;
62 #endif /* CONFIG_DEMAND_PAGING_STATS */
63 };
64 
65 /**
66  * Paging Statistics Histograms.
67  */
68 struct k_mem_paging_histogram_t {
69 #if defined(CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM) || defined(__DOXYGEN__)
70 	/* Counts for each bin in timing histogram */
71 	unsigned long	counts[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
72 
73 	/* Bounds for the bins in timing histogram,
74 	 * excluding the first and last (hence, NUM_SLOTS - 1).
75 	 */
76 	unsigned long	bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
77 #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
78 };
79 
80 #ifdef __cplusplus
81 extern "C" {
82 #endif
83 
84 /**
85  * Evict a page-aligned virtual memory region to the backing store
86  *
87  * Useful if it is known that a memory region will not be used for some time.
88  * All the data pages within the specified region will be evicted to the
89  * backing store if they weren't already, with their associated page frames
90  * marked as available for mappings or page-ins.
91  *
92  * None of the associated page frames mapped to the provided region should
93  * be pinned.
94  *
95  * Note that there are no guarantees how long these pages will be evicted,
96  * they could take page faults immediately.
97  *
98  * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
99  * called by ISRs as the backing store may be in-use.
100  *
101  * @param addr Base page-aligned virtual address
102  * @param size Page-aligned data region size
103  * @retval 0 Success
104  * @retval -ENOMEM Insufficient space in backing store to satisfy request.
105  *         The region may be partially paged out.
106  */
107 int k_mem_page_out(void *addr, size_t size);
108 
109 /**
110  * Load a virtual data region into memory
111  *
112  * After the function completes, all the page frames associated with this
113  * function will be paged in. However, they are not guaranteed to stay there.
114  * This is useful if the region is known to be used soon.
115  *
116  * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
117  * called by ISRs as the backing store may be in-use.
118  *
119  * @param addr Base page-aligned virtual address
120  * @param size Page-aligned data region size
121  */
122 void k_mem_page_in(void *addr, size_t size);
123 
124 /**
125  * Pin an aligned virtual data region, paging in as necessary
126  *
127  * After the function completes, all the page frames associated with this
128  * region will be resident in memory and pinned such that they stay that way.
129  * This is a stronger version of z_mem_page_in().
130  *
131  * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
132  * called by ISRs as the backing store may be in-use.
133  *
134  * @param addr Base page-aligned virtual address
135  * @param size Page-aligned data region size
136  */
137 void k_mem_pin(void *addr, size_t size);
138 
139 /**
140  * Un-pin an aligned virtual data region
141  *
142  * After the function completes, all the page frames associated with this
143  * region will be no longer marked as pinned. This does not evict the region,
144  * follow this with z_mem_page_out() if you need that.
145  *
146  * @param addr Base page-aligned virtual address
147  * @param size Page-aligned data region size
148  */
149 void k_mem_unpin(void *addr, size_t size);
150 
151 /**
152  * Get the paging statistics since system startup
153  *
154  * This populates the paging statistics struct being passed in
155  * as argument.
156  *
157  * @param[in,out] stats Paging statistics struct to be filled.
158  */
159 __syscall void k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats);
160 
161 struct k_thread;
162 /**
163  * Get the paging statistics since system startup for a thread
164  *
165  * This populates the paging statistics struct being passed in
166  * as argument for a particular thread.
167  *
168  * @param[in] thread Thread
169  * @param[in,out] stats Paging statistics struct to be filled.
170  */
171 __syscall
172 void k_mem_paging_thread_stats_get(struct k_thread *thread,
173 				   struct k_mem_paging_stats_t *stats);
174 
175 /**
176  * Get the eviction timing histogram
177  *
178  * This populates the timing histogram struct being passed in
179  * as argument.
180  *
181  * @param[in,out] hist Timing histogram struct to be filled.
182  */
183 __syscall void k_mem_paging_histogram_eviction_get(
184 	struct k_mem_paging_histogram_t *hist);
185 
186 /**
187  * Get the backing store page-in timing histogram
188  *
189  * This populates the timing histogram struct being passed in
190  * as argument.
191  *
192  * @param[in,out] hist Timing histogram struct to be filled.
193  */
194 __syscall void k_mem_paging_histogram_backing_store_page_in_get(
195 	struct k_mem_paging_histogram_t *hist);
196 
197 /**
198  * Get the backing store page-out timing histogram
199  *
200  * This populates the timing histogram struct being passed in
201  * as argument.
202  *
203  * @param[in,out] hist Timing histogram struct to be filled.
204  */
205 __syscall void k_mem_paging_histogram_backing_store_page_out_get(
206 	struct k_mem_paging_histogram_t *hist);
207 
208 #include <zephyr/syscalls/demand_paging.h>
209 
210 /** @} */
211 
212 /**
213  * Eviction algorithm APIs
214  *
215  * @defgroup mem-demand-paging-eviction Eviction Algorithm APIs
216  * @ingroup demand_paging
217  * @{
218  */
219 
220 /**
221  * Submit a page frame for eviction candidate tracking
222  *
223  * The kernel will invoke this to tell the eviction algorithm the provided
224  * page frame may be considered as a potential eviction candidate.
225  *
226  * This function will never be called before the initial
227  * k_mem_paging_eviction_init().
228  *
229  * This function is invoked with interrupts locked.
230  *
231  * @param [in] pf The page frame to add
232  */
233 void k_mem_paging_eviction_add(struct k_mem_page_frame *pf);
234 
235 /**
236  * Remove a page frame from potential eviction candidates
237  *
238  * The kernel will invoke this to tell the eviction algorithm the provided
239  * page frame may no longer be considered as a potential eviction candidate.
240  *
241  * This function will only be called with page frames that were submitted
242  * using k_mem_paging_eviction_add() beforehand.
243  *
244  * This function is invoked with interrupts locked.
245  *
246  * @param [in] pf The page frame to remove
247  */
248 void k_mem_paging_eviction_remove(struct k_mem_page_frame *pf);
249 
250 /**
251  * Process a page frame as being newly accessed
252  *
253  * The architecture-specific memory fault handler will invoke this to tell the
254  * eviction algorithm the provided physical address belongs to a page frame
255  * being accessed and such page frame should become unlikely to be
256  * considered as the next eviction candidate.
257  *
258  * This function is invoked with interrupts locked.
259  *
260  * @param [in] phys The physical address being accessed
261  */
262 void k_mem_paging_eviction_accessed(uintptr_t phys);
263 
264 /**
265  * Select a page frame for eviction
266  *
267  * The kernel will invoke this to choose a page frame to evict if there
268  * are no free page frames. It is not guaranteed that the returned page
269  * frame will actually be evicted. If it is then the kernel will call
270  * k_mem_paging_eviction_remove() with it.
271  *
272  * This function will never be called before the initial
273  * k_mem_paging_eviction_init().
274  *
275  * This function is invoked with interrupts locked.
276  *
277  * @param [out] dirty Whether the page to evict is dirty
278  * @return The page frame to evict
279  */
280 struct k_mem_page_frame *k_mem_paging_eviction_select(bool *dirty);
281 
282 /**
283  * Initialization function
284  *
285  * Called at POST_KERNEL to perform any necessary initialization tasks for the
286  * eviction algorithm. k_mem_paging_eviction_select() is guaranteed to never be
287  * called until this has returned, and this will only be called once.
288  */
289 void k_mem_paging_eviction_init(void);
290 
291 /** @} */
292 
293 /**
294  * Backing store APIs
295  *
296  * @defgroup mem-demand-paging-backing-store Backing Store APIs
297  * @ingroup demand_paging
298  * @{
299  */
300 
301 /**
302  * Reserve or fetch a storage location for a data page loaded into a page frame
303  *
304  * The returned location token must be unique to the mapped virtual address.
305  * This location will be used in the backing store to page out data page
306  * contents for later retrieval. The location value must be page-aligned.
307  *
308  * This function may be called multiple times on the same data page. If its
309  * page frame has its K_MEM_PAGE_FRAME_BACKED bit set, it is expected to return
310  * the previous backing store location for the data page containing a cached
311  * clean copy. This clean copy may be updated on page-out, or used to
312  * discard clean pages without needing to write out their contents.
313  *
314  * If the backing store is full, some other backing store location which caches
315  * a loaded data page may be selected, in which case its associated page frame
316  * will have the K_MEM_PAGE_FRAME_BACKED bit cleared (as it is no longer cached).
317  *
318  * k_mem_page_frame_to_virt(pf) will indicate the virtual address the page is
319  * currently mapped to. Large, sparse backing stores which can contain the
320  * entire address space may simply generate location tokens purely as a
321  * function of that virtual address with no other management necessary.
322  *
323  * This function distinguishes whether it was called on behalf of a page
324  * fault. A free backing store location must always be reserved in order for
325  * page faults to succeed. If the page_fault parameter is not set, this
326  * function should return -ENOMEM even if one location is available.
327  *
328  * This function is invoked with interrupts locked.
329  *
330  * @param pf Virtual address to obtain a storage location
331  * @param [out] location storage location token
332  * @param page_fault Whether this request was for a page fault
333  * @return 0 Success
334  * @return -ENOMEM Backing store is full
335  */
336 int k_mem_paging_backing_store_location_get(struct k_mem_page_frame *pf,
337 					    uintptr_t *location,
338 					    bool page_fault);
339 
340 /**
341  * Free a backing store location
342  *
343  * Any stored data may be discarded, and the location token associated with
344  * this address may be re-used for some other data page.
345  *
346  * This function is invoked with interrupts locked.
347  *
348  * @param location Location token to free
349  */
350 void k_mem_paging_backing_store_location_free(uintptr_t location);
351 
352 /**
353  * Copy a data page from K_MEM_SCRATCH_PAGE to the specified location
354  *
355  * Immediately before this is called, K_MEM_SCRATCH_PAGE will be mapped read-write
356  * to the intended source page frame for the calling context.
357  *
358  * Calls to this and k_mem_paging_backing_store_page_in() will always be
359  * serialized, but interrupts may be enabled.
360  *
361  * @param location Location token for the data page, for later retrieval
362  */
363 void k_mem_paging_backing_store_page_out(uintptr_t location);
364 
365 /**
366  * Copy a data page from the provided location to K_MEM_SCRATCH_PAGE.
367  *
368  * Immediately before this is called, K_MEM_SCRATCH_PAGE will be mapped read-write
369  * to the intended destination page frame for the calling context.
370  *
371  * Calls to this and k_mem_paging_backing_store_page_out() will always be
372  * serialized, but interrupts may be enabled.
373  *
374  * @param location Location token for the data page
375  */
376 void k_mem_paging_backing_store_page_in(uintptr_t location);
377 
378 /**
379  * Update internal accounting after a page-in
380  *
381  * This is invoked after k_mem_paging_backing_store_page_in() and interrupts
382  * have been* re-locked, making it safe to access the k_mem_page_frame data.
383  * The location value will be the same passed to
384  * k_mem_paging_backing_store_page_in().
385  *
386  * The primary use-case for this is to update custom fields for the backing
387  * store in the page frame, to reflect where the data should be evicted to
388  * if it is paged out again. This may be a no-op in some implementations.
389  *
390  * If the backing store caches paged-in data pages, this is the appropriate
391  * time to set the K_MEM_PAGE_FRAME_BACKED bit. The kernel only skips paging
392  * out clean data pages if they are noted as clean in the page tables and the
393  * K_MEM_PAGE_FRAME_BACKED bit is set in their associated page frame.
394  *
395  * @param pf Page frame that was loaded in
396  * @param location Location of where the loaded data page was retrieved
397  */
398 void k_mem_paging_backing_store_page_finalize(struct k_mem_page_frame *pf,
399 					      uintptr_t location);
400 
401 /**
402  * Backing store initialization function.
403  *
404  * The implementation may expect to receive page in/out calls as soon as this
405  * returns, but not before that. Called at POST_KERNEL.
406  *
407  * This function is expected to do two things:
408  * - Initialize any internal data structures and accounting for the backing
409  *   store.
410  * - If the backing store already contains all or some loaded kernel data pages
411  *   at boot time, K_MEM_PAGE_FRAME_BACKED should be appropriately set for their
412  *   associated page frames, and any internal accounting set up appropriately.
413  */
414 void k_mem_paging_backing_store_init(void);
415 
416 /** @} */
417 
418 #ifdef __cplusplus
419 }
420 #endif
421 
422 #endif /* !_ASMLANGUAGE */
423 #endif /* ZEPHYR_INCLUDE_KERNEL_MM_DEMAND_PAGING_H */
424