1 /*
2 * Copyright (c) 2021 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/ztest.h>
8 #include <zephyr/kernel/mm.h>
9 #include <zephyr/kernel/mm/demand_paging.h>
10 #include <zephyr/timing/timing.h>
11 #include <mmu.h>
12 #include <zephyr/linker/sections.h>
13
14 #ifdef CONFIG_BACKING_STORE_RAM_PAGES
15 #define EXTRA_PAGES (CONFIG_BACKING_STORE_RAM_PAGES - 1)
16 #else
17 #error "Unsupported configuration"
18 #endif
19
20 #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
21 #ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
22
23 #ifdef CONFIG_BOARD_QEMU_X86_TINY
24 unsigned long
25 k_mem_paging_eviction_histogram_bounds[
26 CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS] = {
27 10000,
28 20000,
29 30000,
30 40000,
31 50000,
32 60000,
33 70000,
34 80000,
35 100000,
36 ULONG_MAX
37 };
38
39 unsigned long
40 k_mem_paging_backing_store_histogram_bounds[
41 CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS] = {
42 10000,
43 50000,
44 100000,
45 150000,
46 200000,
47 250000,
48 500000,
49 750000,
50 1000000,
51 ULONG_MAX
52 };
53 #else
54 #error "Need to define paging histogram bounds"
55 #endif
56
57 #endif /* CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS */
58 #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
59
60 size_t arena_size;
61 char *arena;
62
63 __pinned_bss
64 static bool expect_fault;
65
66 __pinned_func
k_sys_fatal_error_handler(unsigned int reason,const struct arch_esf * pEsf)67 void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf)
68 {
69 printk("Caught system error -- reason %d\n", reason);
70
71 if (expect_fault && reason == 0) {
72 expect_fault = false;
73 ztest_test_pass();
74 } else {
75 printk("Unexpected fault during test");
76 TC_END_REPORT(TC_FAIL);
77 k_fatal_halt(reason);
78 }
79 }
80
81 /* The mapped anonymous area will be free RAM plus half of the available
82 * frames in the backing store.
83 */
84 #define HALF_PAGES (EXTRA_PAGES / 2)
85 #define HALF_BYTES (HALF_PAGES * CONFIG_MMU_PAGE_SIZE)
86 static const char *nums = "0123456789";
87
ZTEST(demand_paging,test_map_anon_pages)88 ZTEST(demand_paging, test_map_anon_pages)
89 {
90 arena_size = k_mem_free_get() + HALF_BYTES;
91 arena = k_mem_map(arena_size, K_MEM_PERM_RW);
92
93 zassert_not_null(arena, "failed to map anonymous memory arena size %zu",
94 arena_size);
95 printk("Anonymous memory arena %p size %zu\n", arena, arena_size);
96 k_mem_page_frames_dump();
97 }
98
print_paging_stats(struct k_mem_paging_stats_t * stats,const char * scope)99 static void print_paging_stats(struct k_mem_paging_stats_t *stats, const char *scope)
100 {
101 printk("* Page Faults (%s):\n", scope);
102 printk(" - Total: %lu\n", stats->pagefaults.cnt);
103 printk(" - IRQ locked: %lu\n", stats->pagefaults.irq_locked);
104 printk(" - IRQ unlocked: %lu\n", stats->pagefaults.irq_unlocked);
105 #ifndef CONFIG_DEMAND_PAGING_ALLOW_IRQ
106 printk(" - in ISR: %lu\n", stats->pagefaults.in_isr);
107 #endif
108
109 printk("* Eviction (%s):\n", scope);
110 printk(" - Total pages evicted: %lu\n",
111 stats->eviction.clean + stats->eviction.dirty);
112 printk(" - Clean pages evicted: %lu\n",
113 stats->eviction.clean);
114 printk(" - Dirty pages evicted: %lu\n",
115 stats->eviction.dirty);
116 }
117
touch_anon_pages(bool zig,bool zag)118 static void touch_anon_pages(bool zig, bool zag)
119 {
120 void **arena_ptr = (void **)arena;
121 size_t arena_ptr_size = arena_size / sizeof(void *);
122 unsigned long faults;
123 struct k_mem_paging_stats_t stats;
124 k_tid_t tid = k_current_get();
125
126 faults = k_mem_num_pagefaults_get();
127
128 printk("checking zeroes\n");
129 /* The mapped area should have started out zeroed. Check this. */
130 for (size_t j = 0; j < arena_size; j++) {
131 size_t i = zig ? (arena_size - 1 - j) : j;
132
133 zassert_equal(arena[i], '\x00',
134 "page not zeroed got 0x%hhx at index %d",
135 arena[i], i);
136 }
137
138 printk("writing data\n");
139 /* Fill the whole arena with each location's own virtual address */
140 for (size_t j = 0; j < arena_ptr_size; j++) {
141 size_t i = zag ? (arena_ptr_size - 1 - j) : j;
142
143 arena_ptr[i] = &arena_ptr[i];
144 }
145
146 /* And ensure it can be read back */
147 printk("verify written data\n");
148 for (size_t j = 0; j < arena_ptr_size; j++) {
149 size_t i = zig ? (arena_ptr_size - 1 - j) : j;
150
151 zassert_equal(arena_ptr[i], &arena_ptr[i],
152 "arena corrupted at index %d: got %p expected %p",
153 i, arena_ptr[i], &arena_ptr[i]);
154 }
155
156 faults = k_mem_num_pagefaults_get() - faults;
157
158 /* Specific number depends on how much RAM we have but shouldn't be 0 */
159 zassert_not_equal(faults, 0UL, "no page faults handled?");
160 printk("Kernel handled %lu page faults\n", faults);
161
162 k_mem_paging_stats_get(&stats);
163 print_paging_stats(&stats, "kernel");
164 zassert_not_equal(stats.eviction.dirty, 0UL,
165 "there should be dirty pages being evicted.");
166
167 #ifdef CONFIG_EVICTION_NRU
168 k_msleep(CONFIG_EVICTION_NRU_PERIOD * 2);
169 #endif /* CONFIG_EVICTION_NRU */
170
171 /* There should be some clean pages to be evicted now,
172 * since the arena is not modified.
173 */
174 printk("reading unmodified data\n");
175 for (size_t j = 0; j < arena_ptr_size; j++) {
176 size_t i = zag ? (arena_ptr_size - 1 - j) : j;
177
178 zassert_equal(arena_ptr[i], &arena_ptr[i],
179 "arena corrupted at index %d: got %p expected %p",
180 i, arena_ptr[i], &arena_ptr[i]);
181 }
182
183 k_mem_paging_stats_get(&stats);
184 print_paging_stats(&stats, "kernel");
185 zassert_not_equal(stats.eviction.clean, 0UL,
186 "there should be clean pages being evicted.");
187
188 /* per-thread statistics */
189 printk("\nPaging stats for current thread (%p):\n", tid);
190 k_mem_paging_thread_stats_get(tid, &stats);
191 print_paging_stats(&stats, "thread");
192 zassert_not_equal(stats.pagefaults.cnt, 0UL,
193 "no page faults handled in thread?");
194 zassert_not_equal(stats.eviction.dirty, 0UL,
195 "test thread should have dirty pages evicted.");
196 zassert_not_equal(stats.eviction.clean, 0UL,
197 "test thread should have clean pages evicted.");
198
199 /* Reset arena to zero */
200 for (size_t i = 0; i < arena_size; i++) {
201 arena[i] = 0;
202 }
203 }
204
ZTEST(demand_paging,test_touch_anon_pages)205 ZTEST(demand_paging, test_touch_anon_pages)
206 {
207 touch_anon_pages(false, false);
208 }
209
ZTEST(demand_paging,test_touch_anon_pages_zigzag1)210 ZTEST(demand_paging, test_touch_anon_pages_zigzag1)
211 {
212 touch_anon_pages(true, false);
213 }
214
ZTEST(demand_paging,test_touch_anon_pages_zigzag2)215 ZTEST(demand_paging, test_touch_anon_pages_zigzag2)
216 {
217 touch_anon_pages(false, true);
218 }
219
ZTEST(demand_paging,test_unmap_anon_pages)220 ZTEST(demand_paging, test_unmap_anon_pages)
221 {
222 k_mem_unmap(arena, arena_size);
223
224 /* memory should no longer be accessible */
225 expect_fault = true;
226 compiler_barrier();
227
228 TC_PRINT("Accessing unmapped memory should fault\n");
229 arena[0] = 'x';
230
231 /* and execution should not reach this point */
232 ztest_test_fail();
233 }
234
test_k_mem_page_out(void)235 static void test_k_mem_page_out(void)
236 {
237 unsigned long faults;
238 int key, ret;
239
240 /* Lock IRQs to prevent other pagefaults from happening while we
241 * are measuring stuff
242 */
243 key = irq_lock();
244 faults = k_mem_num_pagefaults_get();
245 ret = k_mem_page_out(arena, HALF_BYTES);
246 zassert_equal(ret, 0, "k_mem_page_out failed with %d", ret);
247
248 /* Write to the supposedly evicted region */
249 for (size_t i = 0; i < HALF_BYTES; i++) {
250 arena[i] = nums[i % 10];
251 }
252 faults = k_mem_num_pagefaults_get() - faults;
253 irq_unlock(key);
254
255 zassert_equal(faults, HALF_PAGES,
256 "unexpected num pagefaults expected %lu got %d",
257 HALF_PAGES, faults);
258
259 ret = k_mem_page_out(arena, arena_size);
260 zassert_equal(ret, -ENOMEM, "k_mem_page_out should have failed");
261
262 }
263
ZTEST(demand_paging_api,test_k_mem_page_in)264 ZTEST(demand_paging_api, test_k_mem_page_in)
265 {
266 unsigned long faults;
267 int key, ret;
268
269 /* Lock IRQs to prevent other pagefaults from happening while we
270 * are measuring stuff
271 */
272 key = irq_lock();
273
274 ret = k_mem_page_out(arena, HALF_BYTES);
275 zassert_equal(ret, 0, "k_mem_page_out failed with %d", ret);
276
277 k_mem_page_in(arena, HALF_BYTES);
278
279 faults = k_mem_num_pagefaults_get();
280 /* Write to the supposedly evicted region */
281 for (size_t i = 0; i < HALF_BYTES; i++) {
282 arena[i] = nums[i % 10];
283 }
284 faults = k_mem_num_pagefaults_get() - faults;
285 irq_unlock(key);
286
287 zassert_equal(faults, 0, "%d page faults when 0 expected",
288 faults);
289 }
290
ZTEST(demand_paging_api,test_k_mem_pin)291 ZTEST(demand_paging_api, test_k_mem_pin)
292 {
293 unsigned long faults;
294 unsigned int key;
295
296 k_mem_pin(arena, HALF_BYTES);
297
298 /* Write to the rest of the arena */
299 for (size_t i = HALF_BYTES; i < arena_size; i++) {
300 arena[i] = nums[i % 10];
301 }
302
303 key = irq_lock();
304 /* Show no faults writing to the pinned area */
305 faults = k_mem_num_pagefaults_get();
306 for (size_t i = 0; i < HALF_BYTES; i++) {
307 arena[i] = nums[i % 10];
308 }
309 faults = k_mem_num_pagefaults_get() - faults;
310 irq_unlock(key);
311
312 zassert_equal(faults, 0, "%d page faults when 0 expected",
313 faults);
314
315 /* Clean up */
316 k_mem_unpin(arena, HALF_BYTES);
317 }
318
ZTEST(demand_paging_api,test_k_mem_unpin)319 ZTEST(demand_paging_api, test_k_mem_unpin)
320 {
321 /* Pin the memory (which we know works from prior test) */
322 k_mem_pin(arena, HALF_BYTES);
323
324 /* Now un-pin it */
325 k_mem_unpin(arena, HALF_BYTES);
326
327 /* repeat the page_out scenario, which should work */
328 test_k_mem_page_out();
329 }
330
331 /* Show that even if we map enough anonymous memory to fill the backing
332 * store, we can still handle pagefaults.
333 * This eats up memory so should be last in the suite.
334 */
ZTEST(demand_paging_stat,test_backing_store_capacity)335 ZTEST(demand_paging_stat, test_backing_store_capacity)
336 {
337 char *mem, *ret;
338 unsigned int key;
339 unsigned long faults;
340 size_t size = (((CONFIG_BACKING_STORE_RAM_PAGES - 1) - HALF_PAGES) *
341 CONFIG_MMU_PAGE_SIZE);
342
343 /* Consume the rest of memory */
344 mem = k_mem_map(size, K_MEM_PERM_RW);
345 zassert_not_null(mem, "k_mem_map failed");
346
347 /* Show no memory is left */
348 ret = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
349 zassert_is_null(ret, "k_mem_map shouldn't have succeeded");
350
351 key = irq_lock();
352 faults = k_mem_num_pagefaults_get();
353 /* Poke all anonymous memory */
354 for (size_t i = 0; i < HALF_BYTES; i++) {
355 arena[i] = nums[i % 10];
356 }
357 for (size_t i = 0; i < size; i++) {
358 mem[i] = nums[i % 10];
359 }
360 faults = k_mem_num_pagefaults_get() - faults;
361 irq_unlock(key);
362
363 zassert_not_equal(faults, 0, "should have had some pagefaults");
364 }
365
366 /* Test if we can get paging statistics under usermode */
ZTEST_USER(demand_paging_stat,test_user_get_stats)367 ZTEST_USER(demand_paging_stat, test_user_get_stats)
368 {
369 struct k_mem_paging_stats_t stats;
370 k_tid_t tid = k_current_get();
371
372 /* overall kernel statistics */
373 printk("\nPaging stats for kernel:\n");
374 k_mem_paging_stats_get(&stats);
375 print_paging_stats(&stats, "kernel - usermode");
376 zassert_not_equal(stats.pagefaults.cnt, 0UL,
377 "no page faults handled in thread?");
378 zassert_not_equal(stats.eviction.dirty, 0UL,
379 "test thread should have dirty pages evicted.");
380 zassert_not_equal(stats.eviction.clean, 0UL,
381 "test thread should have clean pages evicted.");
382
383 /* per-thread statistics */
384 printk("\nPaging stats for current thread (%p):\n", tid);
385 k_mem_paging_thread_stats_get(tid, &stats);
386 print_paging_stats(&stats, "thread - usermode");
387 zassert_not_equal(stats.pagefaults.cnt, 0UL,
388 "no page faults handled in thread?");
389 zassert_not_equal(stats.eviction.dirty, 0UL,
390 "test thread should have dirty pages evicted.");
391 zassert_not_equal(stats.eviction.clean, 0UL,
392 "test thread should have clean pages evicted.");
393
394 }
395
396 /* Print the histogram and return true if histogram has non-zero values
397 * in one of its bins.
398 */
print_histogram(struct k_mem_paging_histogram_t * hist)399 static bool print_histogram(struct k_mem_paging_histogram_t *hist)
400 {
401 bool has_non_zero;
402 uint64_t time_ns;
403 int idx;
404
405 has_non_zero = false;
406 for (idx = 0;
407 idx < CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS;
408 idx++) {
409 #ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
410 time_ns = timing_cycles_to_ns(hist->bounds[idx]);
411 #else
412 time_ns = k_cyc_to_ns_ceil64(hist->bounds[idx]);
413 #endif
414 printk(" <= %llu ns (%lu cycles): %lu\n", time_ns,
415 hist->bounds[idx], hist->counts[idx]);
416 if (hist->counts[idx] > 0U) {
417 has_non_zero = true;
418 }
419 }
420
421 return has_non_zero;
422 }
423
424 /* Test if we can get paging timing histograms */
ZTEST_USER(demand_paging_stat,test_user_get_hist)425 ZTEST_USER(demand_paging_stat, test_user_get_hist)
426 {
427 struct k_mem_paging_histogram_t hist;
428
429 printk("Eviction Timing Histogram:\n");
430 k_mem_paging_histogram_eviction_get(&hist);
431 zassert_true(print_histogram(&hist),
432 "should have non-zero counts in histogram.");
433 printk("\n");
434
435 printk("Backing Store Page-IN Histogram:\n");
436 k_mem_paging_histogram_backing_store_page_in_get(&hist);
437 zassert_true(print_histogram(&hist),
438 "should have non-zero counts in histogram.");
439 printk("\n");
440
441 printk("Backing Store Page-OUT Histogram:\n");
442 k_mem_paging_histogram_backing_store_page_out_get(&hist);
443 zassert_true(print_histogram(&hist),
444 "should have non-zero counts in histogram.");
445 printk("\n");
446 }
447
demand_paging_api_setup(void)448 void *demand_paging_api_setup(void)
449 {
450 arena = k_mem_map(arena_size, K_MEM_PERM_RW);
451 test_k_mem_page_out();
452
453 return NULL;
454 }
455
456 ZTEST_SUITE(demand_paging, NULL, NULL, NULL, NULL, NULL);
457
458 ZTEST_SUITE(demand_paging_api, NULL, demand_paging_api_setup,
459 NULL, NULL, NULL);
460
461 ZTEST_SUITE(demand_paging_stat, NULL, NULL, NULL, NULL, NULL);
462