1 /*
2 * Copyright (c) 2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Not Recently Used (NRU) eviction algorithm for demand paging
7 */
8 #include <zephyr/kernel.h>
9 #include <mmu.h>
10 #include <kernel_arch_interface.h>
11 #include <zephyr/init.h>
12
13 #include <zephyr/kernel/mm/demand_paging.h>
14
15 /* The accessed and dirty states of each page frame are used to create
16 * a hierarchy with a numerical value. When evicting a page, try to evict
17 * page with the highest value (we prefer clean, not accessed pages).
18 *
19 * In this ontology, "accessed" means "recently accessed" and gets cleared
20 * during the periodic update.
21 *
22 * 0 not accessed, clean
23 * 1 not accessed, dirty
24 * 2 accessed, clean
25 * 3 accessed, dirty
26 */
nru_periodic_update(struct k_timer * timer)27 static void nru_periodic_update(struct k_timer *timer)
28 {
29 uintptr_t phys;
30 struct k_mem_page_frame *pf;
31 unsigned int key = irq_lock();
32
33 K_MEM_PAGE_FRAME_FOREACH(phys, pf) {
34 if (!k_mem_page_frame_is_evictable(pf)) {
35 continue;
36 }
37
38 /* Clear accessed bit in page tables */
39 (void)arch_page_info_get(k_mem_page_frame_to_virt(pf),
40 NULL, true);
41 }
42
43 irq_unlock(key);
44 }
45
k_mem_paging_eviction_select(bool * dirty_ptr)46 struct k_mem_page_frame *k_mem_paging_eviction_select(bool *dirty_ptr)
47 {
48 unsigned int last_prec = 4U;
49 struct k_mem_page_frame *last_pf = NULL, *pf;
50 bool accessed;
51 bool last_dirty = false;
52 bool dirty = false;
53 uintptr_t flags;
54 uint32_t pf_idx;
55 static uint32_t last_pf_idx;
56
57 /* similar to K_MEM_PAGE_FRAME_FOREACH except we don't always start at 0 */
58 last_pf_idx = (last_pf_idx + 1) % ARRAY_SIZE(k_mem_page_frames);
59 pf_idx = last_pf_idx;
60 do {
61 pf = &k_mem_page_frames[pf_idx];
62 pf_idx = (pf_idx + 1) % ARRAY_SIZE(k_mem_page_frames);
63
64 unsigned int prec;
65
66 if (!k_mem_page_frame_is_evictable(pf)) {
67 continue;
68 }
69
70 flags = arch_page_info_get(k_mem_page_frame_to_virt(pf), NULL, false);
71 accessed = (flags & ARCH_DATA_PAGE_ACCESSED) != 0UL;
72 dirty = (flags & ARCH_DATA_PAGE_DIRTY) != 0UL;
73
74 /* Implies a mismatch with page frame ontology and page
75 * tables
76 */
77 __ASSERT((flags & ARCH_DATA_PAGE_LOADED) != 0U,
78 "non-present page, %s",
79 ((flags & ARCH_DATA_PAGE_NOT_MAPPED) != 0U) ?
80 "un-mapped" : "paged out");
81
82 prec = (dirty ? 1U : 0U) + (accessed ? 2U : 0U);
83 if (prec == 0) {
84 /* If we find a not accessed, clean page we're done */
85 last_pf = pf;
86 last_dirty = dirty;
87 break;
88 }
89
90 if (prec < last_prec) {
91 last_prec = prec;
92 last_pf = pf;
93 last_dirty = dirty;
94 }
95 } while (pf_idx != last_pf_idx);
96
97 /* Shouldn't ever happen unless every page is pinned */
98 __ASSERT(last_pf != NULL, "no page to evict");
99
100 last_pf_idx = last_pf - k_mem_page_frames;
101 *dirty_ptr = last_dirty;
102
103 return last_pf;
104 }
105
106 static K_TIMER_DEFINE(nru_timer, nru_periodic_update, NULL);
107
k_mem_paging_eviction_init(void)108 void k_mem_paging_eviction_init(void)
109 {
110 k_timer_start(&nru_timer, K_NO_WAIT,
111 K_MSEC(CONFIG_EVICTION_NRU_PERIOD));
112 }
113
114 #ifdef CONFIG_EVICTION_TRACKING
115 /*
116 * Empty functions defined here so that architectures unconditionally
117 * implement eviction tracking can still use this algorithm for
118 * testing.
119 */
120
k_mem_paging_eviction_add(struct k_mem_page_frame * pf)121 void k_mem_paging_eviction_add(struct k_mem_page_frame *pf)
122 {
123 ARG_UNUSED(pf);
124 }
125
k_mem_paging_eviction_remove(struct k_mem_page_frame * pf)126 void k_mem_paging_eviction_remove(struct k_mem_page_frame *pf)
127 {
128 ARG_UNUSED(pf);
129 }
130
k_mem_paging_eviction_accessed(uintptr_t phys)131 void k_mem_paging_eviction_accessed(uintptr_t phys)
132 {
133 ARG_UNUSED(phys);
134 }
135
136 #endif /* CONFIG_EVICTION_TRACKING */
137