1 /*
2  * Copyright (c) 2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * RAM-based memory buffer backing store implementation for demo purposes
7  */
8 #include <mmu.h>
9 #include <string.h>
10 #include <kernel_arch_interface.h>
11 #include <zephyr/kernel/mm/demand_paging.h>
12 
13 /*
14  * TODO:
15  *
16  * This is a demonstration backing store for testing the kernel side of the
17  * demand paging feature. In production there are basically two types of
18  * backing stores:
19  *
20  * 1) A large, sparse backing store that is big enough to capture the entire
21  *    address space. Implementation of these is very simple; the location
22  *    token is just a function of the evicted virtual address and no space
23  *    management is necessary. Clean copies of paged-in data pages may be kept
24  *    indefinitely.
25  *
26  * 2) A backing store that has limited storage space, and is not sufficiently
27  *    large to hold clean copies of all mapped memory.
28  *
29  * This backing store is an example of the latter case. However, locations
30  * are freed as soon as pages are paged in, in
31  * k_mem_paging_backing_store_page_finalize().
32  * This implies that all data pages are treated as dirty as
33  * K_MEM_PAGE_FRAME_BACKED is never set, even if the data page was paged out before
34  * and not modified since then.
35  *
36  * An optimization a real backing store will want is have
37  * k_mem_paging_backing_store_page_finalize() note the storage location of
38  * a paged-in data page in a custom field of its associated k_mem_page_frame, and
39  * set the K_MEM_PAGE_FRAME_BACKED bit. Invocations of
40  * k_mem_paging_backing_store_location_get() will have logic to return
41  * the previous clean page location instead of allocating
42  * a new one if K_MEM_PAGE_FRAME_BACKED is set.
43  *
44  * This will, however, require the implementation of a clean page
45  * eviction algorithm, to free backing store locations for loaded data pages
46  * as the backing store fills up, and clear the K_MEM_PAGE_FRAME_BACKED bit
47  * appropriately.
48  *
49  * All of this logic is local to the backing store implementation; from the
50  * core kernel's perspective the only change is that K_MEM_PAGE_FRAME_BACKED
51  * starts getting set for certain page frames after a page-in (and possibly
52  * cleared at a later time).
53  */
54 #define BACKING_STORE_SIZE (CONFIG_BACKING_STORE_RAM_PAGES * CONFIG_MMU_PAGE_SIZE)
55 static char backing_store[BACKING_STORE_SIZE] __aligned(sizeof(void *));
56 static struct k_mem_slab backing_slabs;
57 static unsigned int free_slabs;
58 
location_to_slab(uintptr_t location)59 static void *location_to_slab(uintptr_t location)
60 {
61 	__ASSERT(location % CONFIG_MMU_PAGE_SIZE == 0,
62 		 "unaligned location 0x%lx", location);
63 	__ASSERT(location <
64 		 (CONFIG_BACKING_STORE_RAM_PAGES * CONFIG_MMU_PAGE_SIZE),
65 		 "bad location 0x%lx, past bounds of backing store", location);
66 
67 	return backing_store + location;
68 }
69 
slab_to_location(void * slab)70 static uintptr_t slab_to_location(void *slab)
71 {
72 	char *pos = slab;
73 	uintptr_t offset;
74 
75 	__ASSERT(pos >= backing_store &&
76 		 pos < backing_store + ARRAY_SIZE(backing_store),
77 		 "bad slab pointer %p", slab);
78 	offset = pos - backing_store;
79 	__ASSERT(offset % CONFIG_MMU_PAGE_SIZE == 0,
80 		 "unaligned slab pointer %p", slab);
81 
82 	return offset;
83 }
84 
k_mem_paging_backing_store_location_get(struct k_mem_page_frame * pf,uintptr_t * location,bool page_fault)85 int k_mem_paging_backing_store_location_get(struct k_mem_page_frame *pf,
86 					    uintptr_t *location,
87 					    bool page_fault)
88 {
89 	int ret;
90 	void *slab;
91 
92 	if ((!page_fault && free_slabs == 1) || free_slabs == 0) {
93 		return -ENOMEM;
94 	}
95 
96 	ret = k_mem_slab_alloc(&backing_slabs, &slab, K_NO_WAIT);
97 	__ASSERT(ret == 0, "slab count mismatch");
98 	if (ret != 0) {
99 		return ret;
100 	}
101 	*location = slab_to_location(slab);
102 	free_slabs--;
103 
104 	return 0;
105 }
106 
k_mem_paging_backing_store_location_free(uintptr_t location)107 void k_mem_paging_backing_store_location_free(uintptr_t location)
108 {
109 	void *slab = location_to_slab(location);
110 
111 	k_mem_slab_free(&backing_slabs, slab);
112 	free_slabs++;
113 }
114 
k_mem_paging_backing_store_page_out(uintptr_t location)115 void k_mem_paging_backing_store_page_out(uintptr_t location)
116 {
117 	(void)memcpy(location_to_slab(location), K_MEM_SCRATCH_PAGE,
118 		     CONFIG_MMU_PAGE_SIZE);
119 }
120 
k_mem_paging_backing_store_page_in(uintptr_t location)121 void k_mem_paging_backing_store_page_in(uintptr_t location)
122 {
123 	(void)memcpy(K_MEM_SCRATCH_PAGE, location_to_slab(location),
124 		     CONFIG_MMU_PAGE_SIZE);
125 }
126 
k_mem_paging_backing_store_page_finalize(struct k_mem_page_frame * pf,uintptr_t location)127 void k_mem_paging_backing_store_page_finalize(struct k_mem_page_frame *pf,
128 					      uintptr_t location)
129 {
130 #ifdef CONFIG_DEMAND_MAPPING
131 	/* ignore those */
132 	if (location == ARCH_UNPAGED_ANON_ZERO || location == ARCH_UNPAGED_ANON_UNINIT) {
133 		return;
134 	}
135 #endif
136 	k_mem_paging_backing_store_location_free(location);
137 }
138 
k_mem_paging_backing_store_init(void)139 void k_mem_paging_backing_store_init(void)
140 {
141 	k_mem_slab_init(&backing_slabs, backing_store, CONFIG_MMU_PAGE_SIZE,
142 			CONFIG_BACKING_STORE_RAM_PAGES);
143 	free_slabs = CONFIG_BACKING_STORE_RAM_PAGES;
144 }
145