1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Data Access Monitor
4 *
5 * Author: SeongJae Park <sjpark@amazon.de>
6 */
7
8 #define pr_fmt(fmt) "damon: " fmt
9
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/damon.h>
19
20 #ifdef CONFIG_DAMON_KUNIT_TEST
21 #undef DAMON_MIN_REGION
22 #define DAMON_MIN_REGION 1
23 #endif
24
25 static DEFINE_MUTEX(damon_lock);
26 static int nr_running_ctxs;
27 static bool running_exclusive_ctxs;
28
29 static DEFINE_MUTEX(damon_ops_lock);
30 static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
31
32 static struct kmem_cache *damon_region_cache __ro_after_init;
33
34 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
__damon_is_registered_ops(enum damon_ops_id id)35 static bool __damon_is_registered_ops(enum damon_ops_id id)
36 {
37 struct damon_operations empty_ops = {};
38
39 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
40 return false;
41 return true;
42 }
43
44 /**
45 * damon_is_registered_ops() - Check if a given damon_operations is registered.
46 * @id: Id of the damon_operations to check if registered.
47 *
48 * Return: true if the ops is set, false otherwise.
49 */
damon_is_registered_ops(enum damon_ops_id id)50 bool damon_is_registered_ops(enum damon_ops_id id)
51 {
52 bool registered;
53
54 if (id >= NR_DAMON_OPS)
55 return false;
56 mutex_lock(&damon_ops_lock);
57 registered = __damon_is_registered_ops(id);
58 mutex_unlock(&damon_ops_lock);
59 return registered;
60 }
61
62 /**
63 * damon_register_ops() - Register a monitoring operations set to DAMON.
64 * @ops: monitoring operations set to register.
65 *
66 * This function registers a monitoring operations set of valid &struct
67 * damon_operations->id so that others can find and use them later.
68 *
69 * Return: 0 on success, negative error code otherwise.
70 */
damon_register_ops(struct damon_operations * ops)71 int damon_register_ops(struct damon_operations *ops)
72 {
73 int err = 0;
74
75 if (ops->id >= NR_DAMON_OPS)
76 return -EINVAL;
77 mutex_lock(&damon_ops_lock);
78 /* Fail for already registered ops */
79 if (__damon_is_registered_ops(ops->id)) {
80 err = -EINVAL;
81 goto out;
82 }
83 damon_registered_ops[ops->id] = *ops;
84 out:
85 mutex_unlock(&damon_ops_lock);
86 return err;
87 }
88
89 /**
90 * damon_select_ops() - Select a monitoring operations to use with the context.
91 * @ctx: monitoring context to use the operations.
92 * @id: id of the registered monitoring operations to select.
93 *
94 * This function finds registered monitoring operations set of @id and make
95 * @ctx to use it.
96 *
97 * Return: 0 on success, negative error code otherwise.
98 */
damon_select_ops(struct damon_ctx * ctx,enum damon_ops_id id)99 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
100 {
101 int err = 0;
102
103 if (id >= NR_DAMON_OPS)
104 return -EINVAL;
105
106 mutex_lock(&damon_ops_lock);
107 if (!__damon_is_registered_ops(id))
108 err = -EINVAL;
109 else
110 ctx->ops = damon_registered_ops[id];
111 mutex_unlock(&damon_ops_lock);
112 return err;
113 }
114
115 /*
116 * Construct a damon_region struct
117 *
118 * Returns the pointer to the new struct if success, or NULL otherwise
119 */
damon_new_region(unsigned long start,unsigned long end)120 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
121 {
122 struct damon_region *region;
123
124 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
125 if (!region)
126 return NULL;
127
128 region->ar.start = start;
129 region->ar.end = end;
130 region->nr_accesses = 0;
131 INIT_LIST_HEAD(®ion->list);
132
133 region->age = 0;
134 region->last_nr_accesses = 0;
135
136 return region;
137 }
138
damon_add_region(struct damon_region * r,struct damon_target * t)139 void damon_add_region(struct damon_region *r, struct damon_target *t)
140 {
141 list_add_tail(&r->list, &t->regions_list);
142 t->nr_regions++;
143 }
144
damon_del_region(struct damon_region * r,struct damon_target * t)145 static void damon_del_region(struct damon_region *r, struct damon_target *t)
146 {
147 list_del(&r->list);
148 t->nr_regions--;
149 }
150
damon_free_region(struct damon_region * r)151 static void damon_free_region(struct damon_region *r)
152 {
153 kmem_cache_free(damon_region_cache, r);
154 }
155
damon_destroy_region(struct damon_region * r,struct damon_target * t)156 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
157 {
158 damon_del_region(r, t);
159 damon_free_region(r);
160 }
161
162 /*
163 * Check whether a region is intersecting an address range
164 *
165 * Returns true if it is.
166 */
damon_intersect(struct damon_region * r,struct damon_addr_range * re)167 static bool damon_intersect(struct damon_region *r,
168 struct damon_addr_range *re)
169 {
170 return !(r->ar.end <= re->start || re->end <= r->ar.start);
171 }
172
173 /*
174 * Fill holes in regions with new regions.
175 */
damon_fill_regions_holes(struct damon_region * first,struct damon_region * last,struct damon_target * t)176 static int damon_fill_regions_holes(struct damon_region *first,
177 struct damon_region *last, struct damon_target *t)
178 {
179 struct damon_region *r = first;
180
181 damon_for_each_region_from(r, t) {
182 struct damon_region *next, *newr;
183
184 if (r == last)
185 break;
186 next = damon_next_region(r);
187 if (r->ar.end != next->ar.start) {
188 newr = damon_new_region(r->ar.end, next->ar.start);
189 if (!newr)
190 return -ENOMEM;
191 damon_insert_region(newr, r, next, t);
192 }
193 }
194 return 0;
195 }
196
197 /*
198 * damon_set_regions() - Set regions of a target for given address ranges.
199 * @t: the given target.
200 * @ranges: array of new monitoring target ranges.
201 * @nr_ranges: length of @ranges.
202 *
203 * This function adds new regions to, or modify existing regions of a
204 * monitoring target to fit in specific ranges.
205 *
206 * Return: 0 if success, or negative error code otherwise.
207 */
damon_set_regions(struct damon_target * t,struct damon_addr_range * ranges,unsigned int nr_ranges)208 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
209 unsigned int nr_ranges)
210 {
211 struct damon_region *r, *next;
212 unsigned int i;
213 int err;
214
215 /* Remove regions which are not in the new ranges */
216 damon_for_each_region_safe(r, next, t) {
217 for (i = 0; i < nr_ranges; i++) {
218 if (damon_intersect(r, &ranges[i]))
219 break;
220 }
221 if (i == nr_ranges)
222 damon_destroy_region(r, t);
223 }
224
225 r = damon_first_region(t);
226 /* Add new regions or resize existing regions to fit in the ranges */
227 for (i = 0; i < nr_ranges; i++) {
228 struct damon_region *first = NULL, *last, *newr;
229 struct damon_addr_range *range;
230
231 range = &ranges[i];
232 /* Get the first/last regions intersecting with the range */
233 damon_for_each_region_from(r, t) {
234 if (damon_intersect(r, range)) {
235 if (!first)
236 first = r;
237 last = r;
238 }
239 if (r->ar.start >= range->end)
240 break;
241 }
242 if (!first) {
243 /* no region intersects with this range */
244 newr = damon_new_region(
245 ALIGN_DOWN(range->start,
246 DAMON_MIN_REGION),
247 ALIGN(range->end, DAMON_MIN_REGION));
248 if (!newr)
249 return -ENOMEM;
250 damon_insert_region(newr, damon_prev_region(r), r, t);
251 } else {
252 /* resize intersecting regions to fit in this range */
253 first->ar.start = ALIGN_DOWN(range->start,
254 DAMON_MIN_REGION);
255 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
256
257 /* fill possible holes in the range */
258 err = damon_fill_regions_holes(first, last, t);
259 if (err)
260 return err;
261 }
262 }
263 return 0;
264 }
265
damos_new_filter(enum damos_filter_type type,bool matching)266 struct damos_filter *damos_new_filter(enum damos_filter_type type,
267 bool matching)
268 {
269 struct damos_filter *filter;
270
271 filter = kmalloc(sizeof(*filter), GFP_KERNEL);
272 if (!filter)
273 return NULL;
274 filter->type = type;
275 filter->matching = matching;
276 INIT_LIST_HEAD(&filter->list);
277 return filter;
278 }
279
damos_add_filter(struct damos * s,struct damos_filter * f)280 void damos_add_filter(struct damos *s, struct damos_filter *f)
281 {
282 list_add_tail(&f->list, &s->filters);
283 }
284
damos_del_filter(struct damos_filter * f)285 static void damos_del_filter(struct damos_filter *f)
286 {
287 list_del(&f->list);
288 }
289
damos_free_filter(struct damos_filter * f)290 static void damos_free_filter(struct damos_filter *f)
291 {
292 kfree(f);
293 }
294
damos_destroy_filter(struct damos_filter * f)295 void damos_destroy_filter(struct damos_filter *f)
296 {
297 damos_del_filter(f);
298 damos_free_filter(f);
299 }
300
301 /* initialize private fields of damos_quota and return the pointer */
damos_quota_init_priv(struct damos_quota * quota)302 static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota)
303 {
304 quota->total_charged_sz = 0;
305 quota->total_charged_ns = 0;
306 quota->esz = 0;
307 quota->charged_sz = 0;
308 quota->charged_from = 0;
309 quota->charge_target_from = NULL;
310 quota->charge_addr_from = 0;
311 return quota;
312 }
313
damon_new_scheme(struct damos_access_pattern * pattern,enum damos_action action,struct damos_quota * quota,struct damos_watermarks * wmarks)314 struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
315 enum damos_action action, struct damos_quota *quota,
316 struct damos_watermarks *wmarks)
317 {
318 struct damos *scheme;
319
320 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
321 if (!scheme)
322 return NULL;
323 scheme->pattern = *pattern;
324 scheme->action = action;
325 INIT_LIST_HEAD(&scheme->filters);
326 scheme->stat = (struct damos_stat){};
327 INIT_LIST_HEAD(&scheme->list);
328
329 scheme->quota = *(damos_quota_init_priv(quota));
330
331 scheme->wmarks = *wmarks;
332 scheme->wmarks.activated = true;
333
334 return scheme;
335 }
336
damon_add_scheme(struct damon_ctx * ctx,struct damos * s)337 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
338 {
339 list_add_tail(&s->list, &ctx->schemes);
340 }
341
damon_del_scheme(struct damos * s)342 static void damon_del_scheme(struct damos *s)
343 {
344 list_del(&s->list);
345 }
346
damon_free_scheme(struct damos * s)347 static void damon_free_scheme(struct damos *s)
348 {
349 kfree(s);
350 }
351
damon_destroy_scheme(struct damos * s)352 void damon_destroy_scheme(struct damos *s)
353 {
354 struct damos_filter *f, *next;
355
356 damos_for_each_filter_safe(f, next, s)
357 damos_destroy_filter(f);
358 damon_del_scheme(s);
359 damon_free_scheme(s);
360 }
361
362 /*
363 * Construct a damon_target struct
364 *
365 * Returns the pointer to the new struct if success, or NULL otherwise
366 */
damon_new_target(void)367 struct damon_target *damon_new_target(void)
368 {
369 struct damon_target *t;
370
371 t = kmalloc(sizeof(*t), GFP_KERNEL);
372 if (!t)
373 return NULL;
374
375 t->pid = NULL;
376 t->nr_regions = 0;
377 INIT_LIST_HEAD(&t->regions_list);
378 INIT_LIST_HEAD(&t->list);
379
380 return t;
381 }
382
damon_add_target(struct damon_ctx * ctx,struct damon_target * t)383 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
384 {
385 list_add_tail(&t->list, &ctx->adaptive_targets);
386 }
387
damon_targets_empty(struct damon_ctx * ctx)388 bool damon_targets_empty(struct damon_ctx *ctx)
389 {
390 return list_empty(&ctx->adaptive_targets);
391 }
392
damon_del_target(struct damon_target * t)393 static void damon_del_target(struct damon_target *t)
394 {
395 list_del(&t->list);
396 }
397
damon_free_target(struct damon_target * t)398 void damon_free_target(struct damon_target *t)
399 {
400 struct damon_region *r, *next;
401
402 damon_for_each_region_safe(r, next, t)
403 damon_free_region(r);
404 kfree(t);
405 }
406
damon_destroy_target(struct damon_target * t)407 void damon_destroy_target(struct damon_target *t)
408 {
409 damon_del_target(t);
410 damon_free_target(t);
411 }
412
damon_nr_regions(struct damon_target * t)413 unsigned int damon_nr_regions(struct damon_target *t)
414 {
415 return t->nr_regions;
416 }
417
damon_new_ctx(void)418 struct damon_ctx *damon_new_ctx(void)
419 {
420 struct damon_ctx *ctx;
421
422 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
423 if (!ctx)
424 return NULL;
425
426 ctx->attrs.sample_interval = 5 * 1000;
427 ctx->attrs.aggr_interval = 100 * 1000;
428 ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
429
430 ktime_get_coarse_ts64(&ctx->last_aggregation);
431 ctx->last_ops_update = ctx->last_aggregation;
432
433 mutex_init(&ctx->kdamond_lock);
434
435 ctx->attrs.min_nr_regions = 10;
436 ctx->attrs.max_nr_regions = 1000;
437
438 INIT_LIST_HEAD(&ctx->adaptive_targets);
439 INIT_LIST_HEAD(&ctx->schemes);
440
441 return ctx;
442 }
443
damon_destroy_targets(struct damon_ctx * ctx)444 static void damon_destroy_targets(struct damon_ctx *ctx)
445 {
446 struct damon_target *t, *next_t;
447
448 if (ctx->ops.cleanup) {
449 ctx->ops.cleanup(ctx);
450 return;
451 }
452
453 damon_for_each_target_safe(t, next_t, ctx)
454 damon_destroy_target(t);
455 }
456
damon_destroy_ctx(struct damon_ctx * ctx)457 void damon_destroy_ctx(struct damon_ctx *ctx)
458 {
459 struct damos *s, *next_s;
460
461 damon_destroy_targets(ctx);
462
463 damon_for_each_scheme_safe(s, next_s, ctx)
464 damon_destroy_scheme(s);
465
466 kfree(ctx);
467 }
468
damon_age_for_new_attrs(unsigned int age,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)469 static unsigned int damon_age_for_new_attrs(unsigned int age,
470 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
471 {
472 return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
473 }
474
475 /* convert access ratio in bp (per 10,000) to nr_accesses */
damon_accesses_bp_to_nr_accesses(unsigned int accesses_bp,struct damon_attrs * attrs)476 static unsigned int damon_accesses_bp_to_nr_accesses(
477 unsigned int accesses_bp, struct damon_attrs *attrs)
478 {
479 unsigned int max_nr_accesses =
480 attrs->aggr_interval / attrs->sample_interval;
481
482 return accesses_bp * max_nr_accesses / 10000;
483 }
484
485 /* convert nr_accesses to access ratio in bp (per 10,000) */
damon_nr_accesses_to_accesses_bp(unsigned int nr_accesses,struct damon_attrs * attrs)486 static unsigned int damon_nr_accesses_to_accesses_bp(
487 unsigned int nr_accesses, struct damon_attrs *attrs)
488 {
489 unsigned int max_nr_accesses =
490 attrs->aggr_interval / attrs->sample_interval;
491
492 return nr_accesses * 10000 / max_nr_accesses;
493 }
494
damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)495 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
496 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
497 {
498 return damon_accesses_bp_to_nr_accesses(
499 damon_nr_accesses_to_accesses_bp(
500 nr_accesses, old_attrs),
501 new_attrs);
502 }
503
damon_update_monitoring_result(struct damon_region * r,struct damon_attrs * old_attrs,struct damon_attrs * new_attrs)504 static void damon_update_monitoring_result(struct damon_region *r,
505 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
506 {
507 r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses,
508 old_attrs, new_attrs);
509 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
510 }
511
512 /*
513 * region->nr_accesses is the number of sampling intervals in the last
514 * aggregation interval that access to the region has found, and region->age is
515 * the number of aggregation intervals that its access pattern has maintained.
516 * For the reason, the real meaning of the two fields depend on current
517 * sampling interval and aggregation interval. This function updates
518 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
519 */
damon_update_monitoring_results(struct damon_ctx * ctx,struct damon_attrs * new_attrs)520 static void damon_update_monitoring_results(struct damon_ctx *ctx,
521 struct damon_attrs *new_attrs)
522 {
523 struct damon_attrs *old_attrs = &ctx->attrs;
524 struct damon_target *t;
525 struct damon_region *r;
526
527 /* if any interval is zero, simply forgive conversion */
528 if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
529 !new_attrs->sample_interval ||
530 !new_attrs->aggr_interval)
531 return;
532
533 damon_for_each_target(t, ctx)
534 damon_for_each_region(r, t)
535 damon_update_monitoring_result(
536 r, old_attrs, new_attrs);
537 }
538
539 /**
540 * damon_set_attrs() - Set attributes for the monitoring.
541 * @ctx: monitoring context
542 * @attrs: monitoring attributes
543 *
544 * This function should not be called while the kdamond is running.
545 * Every time interval is in micro-seconds.
546 *
547 * Return: 0 on success, negative error code otherwise.
548 */
damon_set_attrs(struct damon_ctx * ctx,struct damon_attrs * attrs)549 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
550 {
551 if (attrs->min_nr_regions < 3)
552 return -EINVAL;
553 if (attrs->min_nr_regions > attrs->max_nr_regions)
554 return -EINVAL;
555 if (attrs->sample_interval > attrs->aggr_interval)
556 return -EINVAL;
557
558 damon_update_monitoring_results(ctx, attrs);
559 ctx->attrs = *attrs;
560 return 0;
561 }
562
563 /**
564 * damon_set_schemes() - Set data access monitoring based operation schemes.
565 * @ctx: monitoring context
566 * @schemes: array of the schemes
567 * @nr_schemes: number of entries in @schemes
568 *
569 * This function should not be called while the kdamond of the context is
570 * running.
571 */
damon_set_schemes(struct damon_ctx * ctx,struct damos ** schemes,ssize_t nr_schemes)572 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
573 ssize_t nr_schemes)
574 {
575 struct damos *s, *next;
576 ssize_t i;
577
578 damon_for_each_scheme_safe(s, next, ctx)
579 damon_destroy_scheme(s);
580 for (i = 0; i < nr_schemes; i++)
581 damon_add_scheme(ctx, schemes[i]);
582 }
583
584 /**
585 * damon_nr_running_ctxs() - Return number of currently running contexts.
586 */
damon_nr_running_ctxs(void)587 int damon_nr_running_ctxs(void)
588 {
589 int nr_ctxs;
590
591 mutex_lock(&damon_lock);
592 nr_ctxs = nr_running_ctxs;
593 mutex_unlock(&damon_lock);
594
595 return nr_ctxs;
596 }
597
598 /* Returns the size upper limit for each monitoring region */
damon_region_sz_limit(struct damon_ctx * ctx)599 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
600 {
601 struct damon_target *t;
602 struct damon_region *r;
603 unsigned long sz = 0;
604
605 damon_for_each_target(t, ctx) {
606 damon_for_each_region(r, t)
607 sz += damon_sz_region(r);
608 }
609
610 if (ctx->attrs.min_nr_regions)
611 sz /= ctx->attrs.min_nr_regions;
612 if (sz < DAMON_MIN_REGION)
613 sz = DAMON_MIN_REGION;
614
615 return sz;
616 }
617
618 static int kdamond_fn(void *data);
619
620 /*
621 * __damon_start() - Starts monitoring with given context.
622 * @ctx: monitoring context
623 *
624 * This function should be called while damon_lock is hold.
625 *
626 * Return: 0 on success, negative error code otherwise.
627 */
__damon_start(struct damon_ctx * ctx)628 static int __damon_start(struct damon_ctx *ctx)
629 {
630 int err = -EBUSY;
631
632 mutex_lock(&ctx->kdamond_lock);
633 if (!ctx->kdamond) {
634 err = 0;
635 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
636 nr_running_ctxs);
637 if (IS_ERR(ctx->kdamond)) {
638 err = PTR_ERR(ctx->kdamond);
639 ctx->kdamond = NULL;
640 }
641 }
642 mutex_unlock(&ctx->kdamond_lock);
643
644 return err;
645 }
646
647 /**
648 * damon_start() - Starts the monitorings for a given group of contexts.
649 * @ctxs: an array of the pointers for contexts to start monitoring
650 * @nr_ctxs: size of @ctxs
651 * @exclusive: exclusiveness of this contexts group
652 *
653 * This function starts a group of monitoring threads for a group of monitoring
654 * contexts. One thread per each context is created and run in parallel. The
655 * caller should handle synchronization between the threads by itself. If
656 * @exclusive is true and a group of threads that created by other
657 * 'damon_start()' call is currently running, this function does nothing but
658 * returns -EBUSY.
659 *
660 * Return: 0 on success, negative error code otherwise.
661 */
damon_start(struct damon_ctx ** ctxs,int nr_ctxs,bool exclusive)662 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
663 {
664 int i;
665 int err = 0;
666
667 mutex_lock(&damon_lock);
668 if ((exclusive && nr_running_ctxs) ||
669 (!exclusive && running_exclusive_ctxs)) {
670 mutex_unlock(&damon_lock);
671 return -EBUSY;
672 }
673
674 for (i = 0; i < nr_ctxs; i++) {
675 err = __damon_start(ctxs[i]);
676 if (err)
677 break;
678 nr_running_ctxs++;
679 }
680 if (exclusive && nr_running_ctxs)
681 running_exclusive_ctxs = true;
682 mutex_unlock(&damon_lock);
683
684 return err;
685 }
686
687 /*
688 * __damon_stop() - Stops monitoring of a given context.
689 * @ctx: monitoring context
690 *
691 * Return: 0 on success, negative error code otherwise.
692 */
__damon_stop(struct damon_ctx * ctx)693 static int __damon_stop(struct damon_ctx *ctx)
694 {
695 struct task_struct *tsk;
696
697 mutex_lock(&ctx->kdamond_lock);
698 tsk = ctx->kdamond;
699 if (tsk) {
700 get_task_struct(tsk);
701 mutex_unlock(&ctx->kdamond_lock);
702 kthread_stop(tsk);
703 put_task_struct(tsk);
704 return 0;
705 }
706 mutex_unlock(&ctx->kdamond_lock);
707
708 return -EPERM;
709 }
710
711 /**
712 * damon_stop() - Stops the monitorings for a given group of contexts.
713 * @ctxs: an array of the pointers for contexts to stop monitoring
714 * @nr_ctxs: size of @ctxs
715 *
716 * Return: 0 on success, negative error code otherwise.
717 */
damon_stop(struct damon_ctx ** ctxs,int nr_ctxs)718 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
719 {
720 int i, err = 0;
721
722 for (i = 0; i < nr_ctxs; i++) {
723 /* nr_running_ctxs is decremented in kdamond_fn */
724 err = __damon_stop(ctxs[i]);
725 if (err)
726 break;
727 }
728 return err;
729 }
730
731 /*
732 * damon_check_reset_time_interval() - Check if a time interval is elapsed.
733 * @baseline: the time to check whether the interval has elapsed since
734 * @interval: the time interval (microseconds)
735 *
736 * See whether the given time interval has passed since the given baseline
737 * time. If so, it also updates the baseline to current time for next check.
738 *
739 * Return: true if the time interval has passed, or false otherwise.
740 */
damon_check_reset_time_interval(struct timespec64 * baseline,unsigned long interval)741 static bool damon_check_reset_time_interval(struct timespec64 *baseline,
742 unsigned long interval)
743 {
744 struct timespec64 now;
745
746 ktime_get_coarse_ts64(&now);
747 if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
748 interval * 1000)
749 return false;
750 *baseline = now;
751 return true;
752 }
753
754 /*
755 * Check whether it is time to flush the aggregated information
756 */
kdamond_aggregate_interval_passed(struct damon_ctx * ctx)757 static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
758 {
759 return damon_check_reset_time_interval(&ctx->last_aggregation,
760 ctx->attrs.aggr_interval);
761 }
762
763 /*
764 * Reset the aggregated monitoring results ('nr_accesses' of each region).
765 */
kdamond_reset_aggregated(struct damon_ctx * c)766 static void kdamond_reset_aggregated(struct damon_ctx *c)
767 {
768 struct damon_target *t;
769 unsigned int ti = 0; /* target's index */
770
771 damon_for_each_target(t, c) {
772 struct damon_region *r;
773
774 damon_for_each_region(r, t) {
775 trace_damon_aggregated(t, ti, r, damon_nr_regions(t));
776 r->last_nr_accesses = r->nr_accesses;
777 r->nr_accesses = 0;
778 }
779 ti++;
780 }
781 }
782
783 static void damon_split_region_at(struct damon_target *t,
784 struct damon_region *r, unsigned long sz_r);
785
__damos_valid_target(struct damon_region * r,struct damos * s)786 static bool __damos_valid_target(struct damon_region *r, struct damos *s)
787 {
788 unsigned long sz;
789
790 sz = damon_sz_region(r);
791 return s->pattern.min_sz_region <= sz &&
792 sz <= s->pattern.max_sz_region &&
793 s->pattern.min_nr_accesses <= r->nr_accesses &&
794 r->nr_accesses <= s->pattern.max_nr_accesses &&
795 s->pattern.min_age_region <= r->age &&
796 r->age <= s->pattern.max_age_region;
797 }
798
damos_valid_target(struct damon_ctx * c,struct damon_target * t,struct damon_region * r,struct damos * s)799 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
800 struct damon_region *r, struct damos *s)
801 {
802 bool ret = __damos_valid_target(r, s);
803
804 if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
805 return ret;
806
807 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
808 }
809
810 /*
811 * damos_skip_charged_region() - Check if the given region or starting part of
812 * it is already charged for the DAMOS quota.
813 * @t: The target of the region.
814 * @rp: The pointer to the region.
815 * @s: The scheme to be applied.
816 *
817 * If a quota of a scheme has exceeded in a quota charge window, the scheme's
818 * action would applied to only a part of the target access pattern fulfilling
819 * regions. To avoid applying the scheme action to only already applied
820 * regions, DAMON skips applying the scheme action to the regions that charged
821 * in the previous charge window.
822 *
823 * This function checks if a given region should be skipped or not for the
824 * reason. If only the starting part of the region has previously charged,
825 * this function splits the region into two so that the second one covers the
826 * area that not charged in the previous charge widnow and saves the second
827 * region in *rp and returns false, so that the caller can apply DAMON action
828 * to the second one.
829 *
830 * Return: true if the region should be entirely skipped, false otherwise.
831 */
damos_skip_charged_region(struct damon_target * t,struct damon_region ** rp,struct damos * s)832 static bool damos_skip_charged_region(struct damon_target *t,
833 struct damon_region **rp, struct damos *s)
834 {
835 struct damon_region *r = *rp;
836 struct damos_quota *quota = &s->quota;
837 unsigned long sz_to_skip;
838
839 /* Skip previously charged regions */
840 if (quota->charge_target_from) {
841 if (t != quota->charge_target_from)
842 return true;
843 if (r == damon_last_region(t)) {
844 quota->charge_target_from = NULL;
845 quota->charge_addr_from = 0;
846 return true;
847 }
848 if (quota->charge_addr_from &&
849 r->ar.end <= quota->charge_addr_from)
850 return true;
851
852 if (quota->charge_addr_from && r->ar.start <
853 quota->charge_addr_from) {
854 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
855 r->ar.start, DAMON_MIN_REGION);
856 if (!sz_to_skip) {
857 if (damon_sz_region(r) <= DAMON_MIN_REGION)
858 return true;
859 sz_to_skip = DAMON_MIN_REGION;
860 }
861 damon_split_region_at(t, r, sz_to_skip);
862 r = damon_next_region(r);
863 *rp = r;
864 }
865 quota->charge_target_from = NULL;
866 quota->charge_addr_from = 0;
867 }
868 return false;
869 }
870
damos_update_stat(struct damos * s,unsigned long sz_tried,unsigned long sz_applied)871 static void damos_update_stat(struct damos *s,
872 unsigned long sz_tried, unsigned long sz_applied)
873 {
874 s->stat.nr_tried++;
875 s->stat.sz_tried += sz_tried;
876 if (sz_applied)
877 s->stat.nr_applied++;
878 s->stat.sz_applied += sz_applied;
879 }
880
__damos_filter_out(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos_filter * filter)881 static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
882 struct damon_region *r, struct damos_filter *filter)
883 {
884 bool matched = false;
885 struct damon_target *ti;
886 int target_idx = 0;
887 unsigned long start, end;
888
889 switch (filter->type) {
890 case DAMOS_FILTER_TYPE_TARGET:
891 damon_for_each_target(ti, ctx) {
892 if (ti == t)
893 break;
894 target_idx++;
895 }
896 matched = target_idx == filter->target_idx;
897 break;
898 case DAMOS_FILTER_TYPE_ADDR:
899 start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION);
900 end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION);
901
902 /* inside the range */
903 if (start <= r->ar.start && r->ar.end <= end) {
904 matched = true;
905 break;
906 }
907 /* outside of the range */
908 if (r->ar.end <= start || end <= r->ar.start) {
909 matched = false;
910 break;
911 }
912 /* start before the range and overlap */
913 if (r->ar.start < start) {
914 damon_split_region_at(t, r, start - r->ar.start);
915 matched = false;
916 break;
917 }
918 /* start inside the range */
919 damon_split_region_at(t, r, end - r->ar.start);
920 matched = true;
921 break;
922 default:
923 break;
924 }
925
926 return matched == filter->matching;
927 }
928
damos_filter_out(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * s)929 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
930 struct damon_region *r, struct damos *s)
931 {
932 struct damos_filter *filter;
933
934 damos_for_each_filter(filter, s) {
935 if (__damos_filter_out(ctx, t, r, filter))
936 return true;
937 }
938 return false;
939 }
940
damos_apply_scheme(struct damon_ctx * c,struct damon_target * t,struct damon_region * r,struct damos * s)941 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
942 struct damon_region *r, struct damos *s)
943 {
944 struct damos_quota *quota = &s->quota;
945 unsigned long sz = damon_sz_region(r);
946 struct timespec64 begin, end;
947 unsigned long sz_applied = 0;
948 int err = 0;
949
950 if (c->ops.apply_scheme) {
951 if (quota->esz && quota->charged_sz + sz > quota->esz) {
952 sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
953 DAMON_MIN_REGION);
954 if (!sz)
955 goto update_stat;
956 damon_split_region_at(t, r, sz);
957 }
958 if (damos_filter_out(c, t, r, s))
959 return;
960 ktime_get_coarse_ts64(&begin);
961 if (c->callback.before_damos_apply)
962 err = c->callback.before_damos_apply(c, t, r, s);
963 if (!err)
964 sz_applied = c->ops.apply_scheme(c, t, r, s);
965 ktime_get_coarse_ts64(&end);
966 quota->total_charged_ns += timespec64_to_ns(&end) -
967 timespec64_to_ns(&begin);
968 quota->charged_sz += sz;
969 if (quota->esz && quota->charged_sz >= quota->esz) {
970 quota->charge_target_from = t;
971 quota->charge_addr_from = r->ar.end + 1;
972 }
973 }
974 if (s->action != DAMOS_STAT)
975 r->age = 0;
976
977 update_stat:
978 damos_update_stat(s, sz, sz_applied);
979 }
980
damon_do_apply_schemes(struct damon_ctx * c,struct damon_target * t,struct damon_region * r)981 static void damon_do_apply_schemes(struct damon_ctx *c,
982 struct damon_target *t,
983 struct damon_region *r)
984 {
985 struct damos *s;
986
987 damon_for_each_scheme(s, c) {
988 struct damos_quota *quota = &s->quota;
989
990 if (!s->wmarks.activated)
991 continue;
992
993 /* Check the quota */
994 if (quota->esz && quota->charged_sz >= quota->esz)
995 continue;
996
997 if (damos_skip_charged_region(t, &r, s))
998 continue;
999
1000 if (!damos_valid_target(c, t, r, s))
1001 continue;
1002
1003 damos_apply_scheme(c, t, r, s);
1004 }
1005 }
1006
1007 /* Shouldn't be called if quota->ms and quota->sz are zero */
damos_set_effective_quota(struct damos_quota * quota)1008 static void damos_set_effective_quota(struct damos_quota *quota)
1009 {
1010 unsigned long throughput;
1011 unsigned long esz;
1012
1013 if (!quota->ms) {
1014 quota->esz = quota->sz;
1015 return;
1016 }
1017
1018 if (quota->total_charged_ns)
1019 throughput = quota->total_charged_sz * 1000000 /
1020 quota->total_charged_ns;
1021 else
1022 throughput = PAGE_SIZE * 1024;
1023 esz = throughput * quota->ms;
1024
1025 if (quota->sz && quota->sz < esz)
1026 esz = quota->sz;
1027 quota->esz = esz;
1028 }
1029
damos_adjust_quota(struct damon_ctx * c,struct damos * s)1030 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
1031 {
1032 struct damos_quota *quota = &s->quota;
1033 struct damon_target *t;
1034 struct damon_region *r;
1035 unsigned long cumulated_sz;
1036 unsigned int score, max_score = 0;
1037
1038 if (!quota->ms && !quota->sz)
1039 return;
1040
1041 /* New charge window starts */
1042 if (time_after_eq(jiffies, quota->charged_from +
1043 msecs_to_jiffies(quota->reset_interval))) {
1044 if (quota->esz && quota->charged_sz >= quota->esz)
1045 s->stat.qt_exceeds++;
1046 quota->total_charged_sz += quota->charged_sz;
1047 quota->charged_from = jiffies;
1048 quota->charged_sz = 0;
1049 damos_set_effective_quota(quota);
1050 }
1051
1052 if (!c->ops.get_scheme_score)
1053 return;
1054
1055 /* Fill up the score histogram */
1056 memset(quota->histogram, 0, sizeof(quota->histogram));
1057 damon_for_each_target(t, c) {
1058 damon_for_each_region(r, t) {
1059 if (!__damos_valid_target(r, s))
1060 continue;
1061 score = c->ops.get_scheme_score(c, t, r, s);
1062 quota->histogram[score] += damon_sz_region(r);
1063 if (score > max_score)
1064 max_score = score;
1065 }
1066 }
1067
1068 /* Set the min score limit */
1069 for (cumulated_sz = 0, score = max_score; ; score--) {
1070 cumulated_sz += quota->histogram[score];
1071 if (cumulated_sz >= quota->esz || !score)
1072 break;
1073 }
1074 quota->min_score = score;
1075 }
1076
kdamond_apply_schemes(struct damon_ctx * c)1077 static void kdamond_apply_schemes(struct damon_ctx *c)
1078 {
1079 struct damon_target *t;
1080 struct damon_region *r, *next_r;
1081 struct damos *s;
1082
1083 damon_for_each_scheme(s, c) {
1084 if (!s->wmarks.activated)
1085 continue;
1086
1087 damos_adjust_quota(c, s);
1088 }
1089
1090 damon_for_each_target(t, c) {
1091 damon_for_each_region_safe(r, next_r, t)
1092 damon_do_apply_schemes(c, t, r);
1093 }
1094 }
1095
1096 /*
1097 * Merge two adjacent regions into one region
1098 */
damon_merge_two_regions(struct damon_target * t,struct damon_region * l,struct damon_region * r)1099 static void damon_merge_two_regions(struct damon_target *t,
1100 struct damon_region *l, struct damon_region *r)
1101 {
1102 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
1103
1104 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
1105 (sz_l + sz_r);
1106 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
1107 l->ar.end = r->ar.end;
1108 damon_destroy_region(r, t);
1109 }
1110
1111 /*
1112 * Merge adjacent regions having similar access frequencies
1113 *
1114 * t target affected by this merge operation
1115 * thres '->nr_accesses' diff threshold for the merge
1116 * sz_limit size upper limit of each region
1117 */
damon_merge_regions_of(struct damon_target * t,unsigned int thres,unsigned long sz_limit)1118 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
1119 unsigned long sz_limit)
1120 {
1121 struct damon_region *r, *prev = NULL, *next;
1122
1123 damon_for_each_region_safe(r, next, t) {
1124 if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
1125 r->age = 0;
1126 else
1127 r->age++;
1128
1129 if (prev && prev->ar.end == r->ar.start &&
1130 abs(prev->nr_accesses - r->nr_accesses) <= thres &&
1131 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
1132 damon_merge_two_regions(t, prev, r);
1133 else
1134 prev = r;
1135 }
1136 }
1137
1138 /*
1139 * Merge adjacent regions having similar access frequencies
1140 *
1141 * threshold '->nr_accesses' diff threshold for the merge
1142 * sz_limit size upper limit of each region
1143 *
1144 * This function merges monitoring target regions which are adjacent and their
1145 * access frequencies are similar. This is for minimizing the monitoring
1146 * overhead under the dynamically changeable access pattern. If a merge was
1147 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
1148 */
kdamond_merge_regions(struct damon_ctx * c,unsigned int threshold,unsigned long sz_limit)1149 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
1150 unsigned long sz_limit)
1151 {
1152 struct damon_target *t;
1153
1154 damon_for_each_target(t, c)
1155 damon_merge_regions_of(t, threshold, sz_limit);
1156 }
1157
1158 /*
1159 * Split a region in two
1160 *
1161 * r the region to be split
1162 * sz_r size of the first sub-region that will be made
1163 */
damon_split_region_at(struct damon_target * t,struct damon_region * r,unsigned long sz_r)1164 static void damon_split_region_at(struct damon_target *t,
1165 struct damon_region *r, unsigned long sz_r)
1166 {
1167 struct damon_region *new;
1168
1169 new = damon_new_region(r->ar.start + sz_r, r->ar.end);
1170 if (!new)
1171 return;
1172
1173 r->ar.end = new->ar.start;
1174
1175 new->age = r->age;
1176 new->last_nr_accesses = r->last_nr_accesses;
1177
1178 damon_insert_region(new, r, damon_next_region(r), t);
1179 }
1180
1181 /* Split every region in the given target into 'nr_subs' regions */
damon_split_regions_of(struct damon_target * t,int nr_subs)1182 static void damon_split_regions_of(struct damon_target *t, int nr_subs)
1183 {
1184 struct damon_region *r, *next;
1185 unsigned long sz_region, sz_sub = 0;
1186 int i;
1187
1188 damon_for_each_region_safe(r, next, t) {
1189 sz_region = damon_sz_region(r);
1190
1191 for (i = 0; i < nr_subs - 1 &&
1192 sz_region > 2 * DAMON_MIN_REGION; i++) {
1193 /*
1194 * Randomly select size of left sub-region to be at
1195 * least 10 percent and at most 90% of original region
1196 */
1197 sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
1198 sz_region / 10, DAMON_MIN_REGION);
1199 /* Do not allow blank region */
1200 if (sz_sub == 0 || sz_sub >= sz_region)
1201 continue;
1202
1203 damon_split_region_at(t, r, sz_sub);
1204 sz_region = sz_sub;
1205 }
1206 }
1207 }
1208
1209 /*
1210 * Split every target region into randomly-sized small regions
1211 *
1212 * This function splits every target region into random-sized small regions if
1213 * current total number of the regions is equal or smaller than half of the
1214 * user-specified maximum number of regions. This is for maximizing the
1215 * monitoring accuracy under the dynamically changeable access patterns. If a
1216 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
1217 * it.
1218 */
kdamond_split_regions(struct damon_ctx * ctx)1219 static void kdamond_split_regions(struct damon_ctx *ctx)
1220 {
1221 struct damon_target *t;
1222 unsigned int nr_regions = 0;
1223 static unsigned int last_nr_regions;
1224 int nr_subregions = 2;
1225
1226 damon_for_each_target(t, ctx)
1227 nr_regions += damon_nr_regions(t);
1228
1229 if (nr_regions > ctx->attrs.max_nr_regions / 2)
1230 return;
1231
1232 /* Maybe the middle of the region has different access frequency */
1233 if (last_nr_regions == nr_regions &&
1234 nr_regions < ctx->attrs.max_nr_regions / 3)
1235 nr_subregions = 3;
1236
1237 damon_for_each_target(t, ctx)
1238 damon_split_regions_of(t, nr_subregions);
1239
1240 last_nr_regions = nr_regions;
1241 }
1242
1243 /*
1244 * Check whether it is time to check and apply the operations-related data
1245 * structures.
1246 *
1247 * Returns true if it is.
1248 */
kdamond_need_update_operations(struct damon_ctx * ctx)1249 static bool kdamond_need_update_operations(struct damon_ctx *ctx)
1250 {
1251 return damon_check_reset_time_interval(&ctx->last_ops_update,
1252 ctx->attrs.ops_update_interval);
1253 }
1254
1255 /*
1256 * Check whether current monitoring should be stopped
1257 *
1258 * The monitoring is stopped when either the user requested to stop, or all
1259 * monitoring targets are invalid.
1260 *
1261 * Returns true if need to stop current monitoring.
1262 */
kdamond_need_stop(struct damon_ctx * ctx)1263 static bool kdamond_need_stop(struct damon_ctx *ctx)
1264 {
1265 struct damon_target *t;
1266
1267 if (kthread_should_stop())
1268 return true;
1269
1270 if (!ctx->ops.target_valid)
1271 return false;
1272
1273 damon_for_each_target(t, ctx) {
1274 if (ctx->ops.target_valid(t))
1275 return false;
1276 }
1277
1278 return true;
1279 }
1280
damos_wmark_metric_value(enum damos_wmark_metric metric)1281 static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric)
1282 {
1283 struct sysinfo i;
1284
1285 switch (metric) {
1286 case DAMOS_WMARK_FREE_MEM_RATE:
1287 si_meminfo(&i);
1288 return i.freeram * 1000 / i.totalram;
1289 default:
1290 break;
1291 }
1292 return -EINVAL;
1293 }
1294
1295 /*
1296 * Returns zero if the scheme is active. Else, returns time to wait for next
1297 * watermark check in micro-seconds.
1298 */
damos_wmark_wait_us(struct damos * scheme)1299 static unsigned long damos_wmark_wait_us(struct damos *scheme)
1300 {
1301 unsigned long metric;
1302
1303 if (scheme->wmarks.metric == DAMOS_WMARK_NONE)
1304 return 0;
1305
1306 metric = damos_wmark_metric_value(scheme->wmarks.metric);
1307 /* higher than high watermark or lower than low watermark */
1308 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
1309 if (scheme->wmarks.activated)
1310 pr_debug("deactivate a scheme (%d) for %s wmark\n",
1311 scheme->action,
1312 metric > scheme->wmarks.high ?
1313 "high" : "low");
1314 scheme->wmarks.activated = false;
1315 return scheme->wmarks.interval;
1316 }
1317
1318 /* inactive and higher than middle watermark */
1319 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
1320 !scheme->wmarks.activated)
1321 return scheme->wmarks.interval;
1322
1323 if (!scheme->wmarks.activated)
1324 pr_debug("activate a scheme (%d)\n", scheme->action);
1325 scheme->wmarks.activated = true;
1326 return 0;
1327 }
1328
kdamond_usleep(unsigned long usecs)1329 static void kdamond_usleep(unsigned long usecs)
1330 {
1331 /* See Documentation/timers/timers-howto.rst for the thresholds */
1332 if (usecs > 20 * USEC_PER_MSEC)
1333 schedule_timeout_idle(usecs_to_jiffies(usecs));
1334 else
1335 usleep_idle_range(usecs, usecs + 1);
1336 }
1337
1338 /* Returns negative error code if it's not activated but should return */
kdamond_wait_activation(struct damon_ctx * ctx)1339 static int kdamond_wait_activation(struct damon_ctx *ctx)
1340 {
1341 struct damos *s;
1342 unsigned long wait_time;
1343 unsigned long min_wait_time = 0;
1344 bool init_wait_time = false;
1345
1346 while (!kdamond_need_stop(ctx)) {
1347 damon_for_each_scheme(s, ctx) {
1348 wait_time = damos_wmark_wait_us(s);
1349 if (!init_wait_time || wait_time < min_wait_time) {
1350 init_wait_time = true;
1351 min_wait_time = wait_time;
1352 }
1353 }
1354 if (!min_wait_time)
1355 return 0;
1356
1357 kdamond_usleep(min_wait_time);
1358
1359 if (ctx->callback.after_wmarks_check &&
1360 ctx->callback.after_wmarks_check(ctx))
1361 break;
1362 }
1363 return -EBUSY;
1364 }
1365
1366 /*
1367 * The monitoring daemon that runs as a kernel thread
1368 */
kdamond_fn(void * data)1369 static int kdamond_fn(void *data)
1370 {
1371 struct damon_ctx *ctx = data;
1372 struct damon_target *t;
1373 struct damon_region *r, *next;
1374 unsigned int max_nr_accesses = 0;
1375 unsigned long sz_limit = 0;
1376
1377 pr_debug("kdamond (%d) starts\n", current->pid);
1378
1379 if (ctx->ops.init)
1380 ctx->ops.init(ctx);
1381 if (ctx->callback.before_start && ctx->callback.before_start(ctx))
1382 goto done;
1383
1384 sz_limit = damon_region_sz_limit(ctx);
1385
1386 while (!kdamond_need_stop(ctx)) {
1387 if (kdamond_wait_activation(ctx))
1388 break;
1389
1390 if (ctx->ops.prepare_access_checks)
1391 ctx->ops.prepare_access_checks(ctx);
1392 if (ctx->callback.after_sampling &&
1393 ctx->callback.after_sampling(ctx))
1394 break;
1395
1396 kdamond_usleep(ctx->attrs.sample_interval);
1397
1398 if (ctx->ops.check_accesses)
1399 max_nr_accesses = ctx->ops.check_accesses(ctx);
1400
1401 if (kdamond_aggregate_interval_passed(ctx)) {
1402 kdamond_merge_regions(ctx,
1403 max_nr_accesses / 10,
1404 sz_limit);
1405 if (ctx->callback.after_aggregation &&
1406 ctx->callback.after_aggregation(ctx))
1407 break;
1408 if (!list_empty(&ctx->schemes))
1409 kdamond_apply_schemes(ctx);
1410 kdamond_reset_aggregated(ctx);
1411 kdamond_split_regions(ctx);
1412 if (ctx->ops.reset_aggregated)
1413 ctx->ops.reset_aggregated(ctx);
1414 }
1415
1416 if (kdamond_need_update_operations(ctx)) {
1417 if (ctx->ops.update)
1418 ctx->ops.update(ctx);
1419 sz_limit = damon_region_sz_limit(ctx);
1420 }
1421 }
1422 done:
1423 damon_for_each_target(t, ctx) {
1424 damon_for_each_region_safe(r, next, t)
1425 damon_destroy_region(r, t);
1426 }
1427
1428 if (ctx->callback.before_terminate)
1429 ctx->callback.before_terminate(ctx);
1430 if (ctx->ops.cleanup)
1431 ctx->ops.cleanup(ctx);
1432
1433 pr_debug("kdamond (%d) finishes\n", current->pid);
1434 mutex_lock(&ctx->kdamond_lock);
1435 ctx->kdamond = NULL;
1436 mutex_unlock(&ctx->kdamond_lock);
1437
1438 mutex_lock(&damon_lock);
1439 nr_running_ctxs--;
1440 if (!nr_running_ctxs && running_exclusive_ctxs)
1441 running_exclusive_ctxs = false;
1442 mutex_unlock(&damon_lock);
1443
1444 return 0;
1445 }
1446
1447 /*
1448 * struct damon_system_ram_region - System RAM resource address region of
1449 * [@start, @end).
1450 * @start: Start address of the region (inclusive).
1451 * @end: End address of the region (exclusive).
1452 */
1453 struct damon_system_ram_region {
1454 unsigned long start;
1455 unsigned long end;
1456 };
1457
walk_system_ram(struct resource * res,void * arg)1458 static int walk_system_ram(struct resource *res, void *arg)
1459 {
1460 struct damon_system_ram_region *a = arg;
1461
1462 if (a->end - a->start < resource_size(res)) {
1463 a->start = res->start;
1464 a->end = res->end;
1465 }
1466 return 0;
1467 }
1468
1469 /*
1470 * Find biggest 'System RAM' resource and store its start and end address in
1471 * @start and @end, respectively. If no System RAM is found, returns false.
1472 */
damon_find_biggest_system_ram(unsigned long * start,unsigned long * end)1473 static bool damon_find_biggest_system_ram(unsigned long *start,
1474 unsigned long *end)
1475
1476 {
1477 struct damon_system_ram_region arg = {};
1478
1479 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
1480 if (arg.end <= arg.start)
1481 return false;
1482
1483 *start = arg.start;
1484 *end = arg.end;
1485 return true;
1486 }
1487
1488 /**
1489 * damon_set_region_biggest_system_ram_default() - Set the region of the given
1490 * monitoring target as requested, or biggest 'System RAM'.
1491 * @t: The monitoring target to set the region.
1492 * @start: The pointer to the start address of the region.
1493 * @end: The pointer to the end address of the region.
1494 *
1495 * This function sets the region of @t as requested by @start and @end. If the
1496 * values of @start and @end are zero, however, this function finds the biggest
1497 * 'System RAM' resource and sets the region to cover the resource. In the
1498 * latter case, this function saves the start and end addresses of the resource
1499 * in @start and @end, respectively.
1500 *
1501 * Return: 0 on success, negative error code otherwise.
1502 */
damon_set_region_biggest_system_ram_default(struct damon_target * t,unsigned long * start,unsigned long * end)1503 int damon_set_region_biggest_system_ram_default(struct damon_target *t,
1504 unsigned long *start, unsigned long *end)
1505 {
1506 struct damon_addr_range addr_range;
1507
1508 if (*start > *end)
1509 return -EINVAL;
1510
1511 if (!*start && !*end &&
1512 !damon_find_biggest_system_ram(start, end))
1513 return -EINVAL;
1514
1515 addr_range.start = *start;
1516 addr_range.end = *end;
1517 return damon_set_regions(t, &addr_range, 1);
1518 }
1519
damon_init(void)1520 static int __init damon_init(void)
1521 {
1522 damon_region_cache = KMEM_CACHE(damon_region, 0);
1523 if (unlikely(!damon_region_cache)) {
1524 pr_err("creating damon_region_cache fails\n");
1525 return -ENOMEM;
1526 }
1527
1528 return 0;
1529 }
1530
1531 subsys_initcall(damon_init);
1532
1533 #include "core-test.h"
1534