1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DAMON-based LRU-lists Sorting
4 *
5 * Author: SeongJae Park <sj@kernel.org>
6 */
7
8 #define pr_fmt(fmt) "damon-lru-sort: " fmt
9
10 #include <linux/damon.h>
11 #include <linux/ioport.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/workqueue.h>
15
16 #include "modules-common.h"
17
18 #ifdef MODULE_PARAM_PREFIX
19 #undef MODULE_PARAM_PREFIX
20 #endif
21 #define MODULE_PARAM_PREFIX "damon_lru_sort."
22
23 /*
24 * Enable or disable DAMON_LRU_SORT.
25 *
26 * You can enable DAMON_LRU_SORT by setting the value of this parameter as
27 * ``Y``. Setting it as ``N`` disables DAMON_LRU_SORT. Note that
28 * DAMON_LRU_SORT could do no real monitoring and LRU-lists sorting due to the
29 * watermarks-based activation condition. Refer to below descriptions for the
30 * watermarks parameter for this.
31 */
32 static bool enabled __read_mostly;
33
34 /*
35 * Make DAMON_LRU_SORT reads the input parameters again, except ``enabled``.
36 *
37 * Input parameters that updated while DAMON_LRU_SORT is running are not
38 * applied by default. Once this parameter is set as ``Y``, DAMON_LRU_SORT
39 * reads values of parametrs except ``enabled`` again. Once the re-reading is
40 * done, this parameter is set as ``N``. If invalid parameters are found while
41 * the re-reading, DAMON_LRU_SORT will be disabled.
42 */
43 static bool commit_inputs __read_mostly;
44 module_param(commit_inputs, bool, 0600);
45
46 /*
47 * Access frequency threshold for hot memory regions identification in permil.
48 *
49 * If a memory region is accessed in frequency of this or higher,
50 * DAMON_LRU_SORT identifies the region as hot, and mark it as accessed on the
51 * LRU list, so that it could not be reclaimed under memory pressure. 50% by
52 * default.
53 */
54 static unsigned long hot_thres_access_freq = 500;
55 module_param(hot_thres_access_freq, ulong, 0600);
56
57 /*
58 * Time threshold for cold memory regions identification in microseconds.
59 *
60 * If a memory region is not accessed for this or longer time, DAMON_LRU_SORT
61 * identifies the region as cold, and mark it as unaccessed on the LRU list, so
62 * that it could be reclaimed first under memory pressure. 120 seconds by
63 * default.
64 */
65 static unsigned long cold_min_age __read_mostly = 120000000;
66 module_param(cold_min_age, ulong, 0600);
67
68 static struct damos_quota damon_lru_sort_quota = {
69 /* Use up to 10 ms per 1 sec, by default */
70 .ms = 10,
71 .sz = 0,
72 .reset_interval = 1000,
73 /* Within the quota, mark hotter regions accessed first. */
74 .weight_sz = 0,
75 .weight_nr_accesses = 1,
76 .weight_age = 0,
77 };
78 DEFINE_DAMON_MODULES_DAMOS_TIME_QUOTA(damon_lru_sort_quota);
79
80 static struct damos_watermarks damon_lru_sort_wmarks = {
81 .metric = DAMOS_WMARK_FREE_MEM_RATE,
82 .interval = 5000000, /* 5 seconds */
83 .high = 200, /* 20 percent */
84 .mid = 150, /* 15 percent */
85 .low = 50, /* 5 percent */
86 };
87 DEFINE_DAMON_MODULES_WMARKS_PARAMS(damon_lru_sort_wmarks);
88
89 static struct damon_attrs damon_lru_sort_mon_attrs = {
90 .sample_interval = 5000, /* 5 ms */
91 .aggr_interval = 100000, /* 100 ms */
92 .ops_update_interval = 0,
93 .min_nr_regions = 10,
94 .max_nr_regions = 1000,
95 };
96 DEFINE_DAMON_MODULES_MON_ATTRS_PARAMS(damon_lru_sort_mon_attrs);
97
98 /*
99 * Start of the target memory region in physical address.
100 *
101 * The start physical address of memory region that DAMON_LRU_SORT will do work
102 * against. By default, biggest System RAM is used as the region.
103 */
104 static unsigned long monitor_region_start __read_mostly;
105 module_param(monitor_region_start, ulong, 0600);
106
107 /*
108 * End of the target memory region in physical address.
109 *
110 * The end physical address of memory region that DAMON_LRU_SORT will do work
111 * against. By default, biggest System RAM is used as the region.
112 */
113 static unsigned long monitor_region_end __read_mostly;
114 module_param(monitor_region_end, ulong, 0600);
115
116 /*
117 * PID of the DAMON thread
118 *
119 * If DAMON_LRU_SORT is enabled, this becomes the PID of the worker thread.
120 * Else, -1.
121 */
122 static int kdamond_pid __read_mostly = -1;
123 module_param(kdamond_pid, int, 0400);
124
125 static struct damos_stat damon_lru_sort_hot_stat;
126 DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_lru_sort_hot_stat,
127 lru_sort_tried_hot_regions, lru_sorted_hot_regions,
128 hot_quota_exceeds);
129
130 static struct damos_stat damon_lru_sort_cold_stat;
131 DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_lru_sort_cold_stat,
132 lru_sort_tried_cold_regions, lru_sorted_cold_regions,
133 cold_quota_exceeds);
134
135 static struct damos_access_pattern damon_lru_sort_stub_pattern = {
136 /* Find regions having PAGE_SIZE or larger size */
137 .min_sz_region = PAGE_SIZE,
138 .max_sz_region = ULONG_MAX,
139 /* no matter its access frequency */
140 .min_nr_accesses = 0,
141 .max_nr_accesses = UINT_MAX,
142 /* no matter its age */
143 .min_age_region = 0,
144 .max_age_region = UINT_MAX,
145 };
146
147 static struct damon_ctx *ctx;
148 static struct damon_target *target;
149
damon_lru_sort_new_scheme(struct damos_access_pattern * pattern,enum damos_action action)150 static struct damos *damon_lru_sort_new_scheme(
151 struct damos_access_pattern *pattern, enum damos_action action)
152 {
153 struct damos_quota quota = damon_lru_sort_quota;
154
155 /* Use half of total quota for hot/cold pages sorting */
156 quota.ms = quota.ms / 2;
157
158 return damon_new_scheme(
159 /* find the pattern, and */
160 pattern,
161 /* (de)prioritize on LRU-lists */
162 action,
163 /* under the quota. */
164 "a,
165 /* (De)activate this according to the watermarks. */
166 &damon_lru_sort_wmarks);
167 }
168
169 /* Create a DAMON-based operation scheme for hot memory regions */
damon_lru_sort_new_hot_scheme(unsigned int hot_thres)170 static struct damos *damon_lru_sort_new_hot_scheme(unsigned int hot_thres)
171 {
172 struct damos_access_pattern pattern = damon_lru_sort_stub_pattern;
173
174 pattern.min_nr_accesses = hot_thres;
175 return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_PRIO);
176 }
177
178 /* Create a DAMON-based operation scheme for cold memory regions */
damon_lru_sort_new_cold_scheme(unsigned int cold_thres)179 static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
180 {
181 struct damos_access_pattern pattern = damon_lru_sort_stub_pattern;
182
183 pattern.max_nr_accesses = 0;
184 pattern.min_age_region = cold_thres;
185 return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO);
186 }
187
damon_lru_sort_apply_parameters(void)188 static int damon_lru_sort_apply_parameters(void)
189 {
190 struct damos *scheme;
191 unsigned int hot_thres, cold_thres;
192 int err = 0;
193
194 err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
195 if (err)
196 return err;
197
198 /* aggr_interval / sample_interval is the maximum nr_accesses */
199 hot_thres = damon_lru_sort_mon_attrs.aggr_interval /
200 damon_lru_sort_mon_attrs.sample_interval *
201 hot_thres_access_freq / 1000;
202 scheme = damon_lru_sort_new_hot_scheme(hot_thres);
203 if (!scheme)
204 return -ENOMEM;
205 damon_set_schemes(ctx, &scheme, 1);
206
207 cold_thres = cold_min_age / damon_lru_sort_mon_attrs.aggr_interval;
208 scheme = damon_lru_sort_new_cold_scheme(cold_thres);
209 if (!scheme)
210 return -ENOMEM;
211 damon_add_scheme(ctx, scheme);
212
213 return damon_set_region_biggest_system_ram_default(target,
214 &monitor_region_start,
215 &monitor_region_end);
216 }
217
damon_lru_sort_turn(bool on)218 static int damon_lru_sort_turn(bool on)
219 {
220 int err;
221
222 if (!on) {
223 err = damon_stop(&ctx, 1);
224 if (!err)
225 kdamond_pid = -1;
226 return err;
227 }
228
229 err = damon_lru_sort_apply_parameters();
230 if (err)
231 return err;
232
233 err = damon_start(&ctx, 1, true);
234 if (err)
235 return err;
236 kdamond_pid = ctx->kdamond->pid;
237 return 0;
238 }
239
240 static struct delayed_work damon_lru_sort_timer;
damon_lru_sort_timer_fn(struct work_struct * work)241 static void damon_lru_sort_timer_fn(struct work_struct *work)
242 {
243 static bool last_enabled;
244 bool now_enabled;
245
246 now_enabled = enabled;
247 if (last_enabled != now_enabled) {
248 if (!damon_lru_sort_turn(now_enabled))
249 last_enabled = now_enabled;
250 else
251 enabled = last_enabled;
252 }
253 }
254 static DECLARE_DELAYED_WORK(damon_lru_sort_timer, damon_lru_sort_timer_fn);
255
256 static bool damon_lru_sort_initialized;
257
damon_lru_sort_enabled_store(const char * val,const struct kernel_param * kp)258 static int damon_lru_sort_enabled_store(const char *val,
259 const struct kernel_param *kp)
260 {
261 int rc = param_set_bool(val, kp);
262
263 if (rc < 0)
264 return rc;
265
266 if (!damon_lru_sort_initialized)
267 return rc;
268
269 schedule_delayed_work(&damon_lru_sort_timer, 0);
270
271 return 0;
272 }
273
274 static const struct kernel_param_ops enabled_param_ops = {
275 .set = damon_lru_sort_enabled_store,
276 .get = param_get_bool,
277 };
278
279 module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
280 MODULE_PARM_DESC(enabled,
281 "Enable or disable DAMON_LRU_SORT (default: disabled)");
282
damon_lru_sort_handle_commit_inputs(void)283 static int damon_lru_sort_handle_commit_inputs(void)
284 {
285 int err;
286
287 if (!commit_inputs)
288 return 0;
289
290 err = damon_lru_sort_apply_parameters();
291 commit_inputs = false;
292 return err;
293 }
294
damon_lru_sort_after_aggregation(struct damon_ctx * c)295 static int damon_lru_sort_after_aggregation(struct damon_ctx *c)
296 {
297 struct damos *s;
298
299 /* update the stats parameter */
300 damon_for_each_scheme(s, c) {
301 if (s->action == DAMOS_LRU_PRIO)
302 damon_lru_sort_hot_stat = s->stat;
303 else if (s->action == DAMOS_LRU_DEPRIO)
304 damon_lru_sort_cold_stat = s->stat;
305 }
306
307 return damon_lru_sort_handle_commit_inputs();
308 }
309
damon_lru_sort_after_wmarks_check(struct damon_ctx * c)310 static int damon_lru_sort_after_wmarks_check(struct damon_ctx *c)
311 {
312 return damon_lru_sort_handle_commit_inputs();
313 }
314
damon_lru_sort_init(void)315 static int __init damon_lru_sort_init(void)
316 {
317 ctx = damon_new_ctx();
318 if (!ctx)
319 return -ENOMEM;
320
321 if (damon_select_ops(ctx, DAMON_OPS_PADDR)) {
322 damon_destroy_ctx(ctx);
323 return -EINVAL;
324 }
325
326 ctx->callback.after_wmarks_check = damon_lru_sort_after_wmarks_check;
327 ctx->callback.after_aggregation = damon_lru_sort_after_aggregation;
328
329 target = damon_new_target();
330 if (!target) {
331 damon_destroy_ctx(ctx);
332 return -ENOMEM;
333 }
334 damon_add_target(ctx, target);
335
336 schedule_delayed_work(&damon_lru_sort_timer, 0);
337
338 damon_lru_sort_initialized = true;
339 return 0;
340 }
341
342 module_init(damon_lru_sort_init);
343