1 /*
2  * Copyright (c) 2021 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <kernel_internal.h>
9 #include <zephyr/internal/syscall_handler.h>
10 #include <zephyr/toolchain.h>
11 #include <zephyr/kernel/mm/demand_paging.h>
12 
13 extern struct k_mem_paging_stats_t paging_stats;
14 
15 #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
16 struct k_mem_paging_histogram_t z_paging_histogram_eviction;
17 struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_in;
18 struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_out;
19 
20 #ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
21 
22 /*
23  * The frequency of timing functions is highly dependent on
24  * architecture, SoC or board. It is also not available at build time.
25  * Therefore, the bounds for the timing histograms needs to be defined
26  * externally to this file, and must be tailored to the platform
27  * being used.
28  */
29 
30 extern unsigned long
31 k_mem_paging_eviction_histogram_bounds[
32 	CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
33 
34 extern unsigned long
35 k_mem_paging_backing_store_histogram_bounds[
36 	CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
37 
38 #else
39 #define NS_TO_CYC(ns)		(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / 1000000U * ns)
40 
41 /*
42  * This provides the upper bounds of the bins in eviction timing histogram.
43  */
44 __weak unsigned long
45 k_mem_paging_eviction_histogram_bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS] = {
46 	NS_TO_CYC(1),
47 	NS_TO_CYC(5),
48 	NS_TO_CYC(10),
49 	NS_TO_CYC(50),
50 	NS_TO_CYC(100),
51 	NS_TO_CYC(200),
52 	NS_TO_CYC(500),
53 	NS_TO_CYC(1000),
54 	NS_TO_CYC(2000),
55 	ULONG_MAX
56 };
57 
58 /*
59  * This provides the upper bounds of the bins in backing store timing histogram
60  * (both page-in and page-out).
61  */
62 __weak unsigned long
63 k_mem_paging_backing_store_histogram_bounds[
64 	CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS] = {
65 	NS_TO_CYC(10),
66 	NS_TO_CYC(100),
67 	NS_TO_CYC(125),
68 	NS_TO_CYC(250),
69 	NS_TO_CYC(500),
70 	NS_TO_CYC(1000),
71 	NS_TO_CYC(2000),
72 	NS_TO_CYC(5000),
73 	NS_TO_CYC(10000),
74 	ULONG_MAX
75 };
76 #endif /* CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS */
77 #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
78 
z_num_pagefaults_get(void)79 unsigned long z_num_pagefaults_get(void)
80 {
81 	unsigned long ret;
82 	unsigned int key;
83 
84 	key = irq_lock();
85 	ret = paging_stats.pagefaults.cnt;
86 	irq_unlock(key);
87 
88 	return ret;
89 }
90 
z_impl_k_mem_paging_stats_get(struct k_mem_paging_stats_t * stats)91 void z_impl_k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats)
92 {
93 	if (stats == NULL) {
94 		return;
95 	}
96 
97 	/* Copy statistics */
98 	memcpy(stats, &paging_stats, sizeof(paging_stats));
99 }
100 
101 #ifdef CONFIG_USERSPACE
102 static inline
z_vrfy_k_mem_paging_stats_get(struct k_mem_paging_stats_t * stats)103 void z_vrfy_k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats)
104 {
105 	K_OOPS(K_SYSCALL_MEMORY_WRITE(stats, sizeof(*stats)));
106 	z_impl_k_mem_paging_stats_get(stats);
107 }
108 #include <syscalls/k_mem_paging_stats_get_mrsh.c>
109 #endif /* CONFIG_USERSPACE */
110 
111 #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
z_impl_k_mem_paging_thread_stats_get(struct k_thread * thread,struct k_mem_paging_stats_t * stats)112 void z_impl_k_mem_paging_thread_stats_get(struct k_thread *thread,
113 					  struct k_mem_paging_stats_t *stats)
114 {
115 	if ((thread == NULL) || (stats == NULL)) {
116 		return;
117 	}
118 
119 	/* Copy statistics */
120 	memcpy(stats, &thread->paging_stats, sizeof(thread->paging_stats));
121 }
122 
123 #ifdef CONFIG_USERSPACE
124 static inline
z_vrfy_k_mem_paging_thread_stats_get(struct k_thread * thread,struct k_mem_paging_stats_t * stats)125 void z_vrfy_k_mem_paging_thread_stats_get(struct k_thread *thread,
126 					  struct k_mem_paging_stats_t *stats)
127 {
128 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
129 	K_OOPS(K_SYSCALL_MEMORY_WRITE(stats, sizeof(*stats)));
130 	z_impl_k_mem_paging_thread_stats_get(thread, stats);
131 }
132 #include <syscalls/k_mem_paging_thread_stats_get_mrsh.c>
133 #endif /* CONFIG_USERSPACE */
134 
135 #endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
136 
137 #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
z_paging_histogram_init(void)138 void z_paging_histogram_init(void)
139 {
140 	/*
141 	 * Zero out the histogram structs and copy the bounds.
142 	 * The copying is done as the histogram structs need
143 	 * to be pinned in memory and never swapped out, while
144 	 * the source bound array may not be pinned.
145 	 */
146 
147 	memset(&z_paging_histogram_eviction, 0, sizeof(z_paging_histogram_eviction));
148 	memcpy(z_paging_histogram_eviction.bounds,
149 	       k_mem_paging_eviction_histogram_bounds,
150 	       sizeof(z_paging_histogram_eviction.bounds));
151 
152 	memset(&z_paging_histogram_backing_store_page_in, 0,
153 	       sizeof(z_paging_histogram_backing_store_page_in));
154 	memcpy(z_paging_histogram_backing_store_page_in.bounds,
155 	       k_mem_paging_backing_store_histogram_bounds,
156 	       sizeof(z_paging_histogram_backing_store_page_in.bounds));
157 
158 	memset(&z_paging_histogram_backing_store_page_out, 0,
159 	       sizeof(z_paging_histogram_backing_store_page_out));
160 	memcpy(z_paging_histogram_backing_store_page_out.bounds,
161 	       k_mem_paging_backing_store_histogram_bounds,
162 	       sizeof(z_paging_histogram_backing_store_page_out.bounds));
163 }
164 
165 /**
166  * Increment the counter in the timing histogram.
167  *
168  * @param hist The timing histogram to be updated.
169  * @param cycles Time spent in measured operation.
170  */
z_paging_histogram_inc(struct k_mem_paging_histogram_t * hist,uint32_t cycles)171 void z_paging_histogram_inc(struct k_mem_paging_histogram_t *hist,
172 			    uint32_t cycles)
173 {
174 	int idx;
175 
176 	for (idx = 0;
177 	     idx < CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS;
178 	     idx++) {
179 		if (cycles <= hist->bounds[idx]) {
180 			hist->counts[idx]++;
181 			break;
182 		}
183 	}
184 }
185 
z_impl_k_mem_paging_histogram_eviction_get(struct k_mem_paging_histogram_t * hist)186 void z_impl_k_mem_paging_histogram_eviction_get(
187 	struct k_mem_paging_histogram_t *hist)
188 {
189 	if (hist == NULL) {
190 		return;
191 	}
192 
193 	/* Copy statistics */
194 	memcpy(hist, &z_paging_histogram_eviction,
195 	       sizeof(z_paging_histogram_eviction));
196 }
197 
z_impl_k_mem_paging_histogram_backing_store_page_in_get(struct k_mem_paging_histogram_t * hist)198 void z_impl_k_mem_paging_histogram_backing_store_page_in_get(
199 	struct k_mem_paging_histogram_t *hist)
200 {
201 	if (hist == NULL) {
202 		return;
203 	}
204 
205 	/* Copy histogram */
206 	memcpy(hist, &z_paging_histogram_backing_store_page_in,
207 	       sizeof(z_paging_histogram_backing_store_page_in));
208 }
209 
z_impl_k_mem_paging_histogram_backing_store_page_out_get(struct k_mem_paging_histogram_t * hist)210 void z_impl_k_mem_paging_histogram_backing_store_page_out_get(
211 	struct k_mem_paging_histogram_t *hist)
212 {
213 	if (hist == NULL) {
214 		return;
215 	}
216 
217 	/* Copy histogram */
218 	memcpy(hist, &z_paging_histogram_backing_store_page_out,
219 	       sizeof(z_paging_histogram_backing_store_page_out));
220 }
221 
222 #ifdef CONFIG_USERSPACE
223 static inline
z_vrfy_k_mem_paging_histogram_eviction_get(struct k_mem_paging_histogram_t * hist)224 void z_vrfy_k_mem_paging_histogram_eviction_get(
225 	struct k_mem_paging_histogram_t *hist)
226 {
227 	K_OOPS(K_SYSCALL_MEMORY_WRITE(hist, sizeof(*hist)));
228 	z_impl_k_mem_paging_histogram_eviction_get(hist);
229 }
230 #include <syscalls/k_mem_paging_histogram_eviction_get_mrsh.c>
231 
232 static inline
z_vrfy_k_mem_paging_histogram_backing_store_page_in_get(struct k_mem_paging_histogram_t * hist)233 void z_vrfy_k_mem_paging_histogram_backing_store_page_in_get(
234 	struct k_mem_paging_histogram_t *hist)
235 {
236 	K_OOPS(K_SYSCALL_MEMORY_WRITE(hist, sizeof(*hist)));
237 	z_impl_k_mem_paging_histogram_backing_store_page_in_get(hist);
238 }
239 #include <syscalls/k_mem_paging_histogram_backing_store_page_in_get_mrsh.c>
240 
241 static inline
z_vrfy_k_mem_paging_histogram_backing_store_page_out_get(struct k_mem_paging_histogram_t * hist)242 void z_vrfy_k_mem_paging_histogram_backing_store_page_out_get(
243 	struct k_mem_paging_histogram_t *hist)
244 {
245 	K_OOPS(K_SYSCALL_MEMORY_WRITE(hist, sizeof(*hist)));
246 	z_impl_k_mem_paging_histogram_backing_store_page_out_get(hist);
247 }
248 #include <syscalls/k_mem_paging_histogram_backing_store_page_out_get_mrsh.c>
249 #endif /* CONFIG_USERSPACE */
250 
251 #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
252