1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Linux VM pressure
4 *
5 * Copyright 2012 Linaro Ltd.
6 * Anton Vorontsov <anton.vorontsov@linaro.org>
7 *
8 * Based on ideas from Andrew Morton, David Rientjes, KOSAKI Motohiro,
9 * Leonid Moiseichuk, Mel Gorman, Minchan Kim and Pekka Enberg.
10 */
11
12 #include <linux/cgroup.h>
13 #include <linux/fs.h>
14 #include <linux/log2.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/vmstat.h>
18 #include <linux/eventfd.h>
19 #include <linux/slab.h>
20 #include <linux/swap.h>
21 #include <linux/printk.h>
22 #include <linux/vmpressure.h>
23
24 /*
25 * The window size (vmpressure_win) is the number of scanned pages before
26 * we try to analyze scanned/reclaimed ratio. So the window is used as a
27 * rate-limit tunable for the "low" level notification, and also for
28 * averaging the ratio for medium/critical levels. Using small window
29 * sizes can cause lot of false positives, but too big window size will
30 * delay the notifications.
31 *
32 * As the vmscan reclaimer logic works with chunks which are multiple of
33 * SWAP_CLUSTER_MAX, it makes sense to use it for the window size as well.
34 *
35 * TODO: Make the window size depend on machine size, as we do for vmstat
36 * thresholds. Currently we set it to 512 pages (2MB for 4KB pages).
37 */
38 static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16;
39
40 /*
41 * These thresholds are used when we account memory pressure through
42 * scanned/reclaimed ratio. The current values were chosen empirically. In
43 * essence, they are percents: the higher the value, the more number
44 * unsuccessful reclaims there were.
45 */
46 static const unsigned int vmpressure_level_med = 60;
47 static const unsigned int vmpressure_level_critical = 95;
48
49 /*
50 * When there are too little pages left to scan, vmpressure() may miss the
51 * critical pressure as number of pages will be less than "window size".
52 * However, in that case the vmscan priority will raise fast as the
53 * reclaimer will try to scan LRUs more deeply.
54 *
55 * The vmscan logic considers these special priorities:
56 *
57 * prio == DEF_PRIORITY (12): reclaimer starts with that value
58 * prio <= DEF_PRIORITY - 2 : kswapd becomes somewhat overwhelmed
59 * prio == 0 : close to OOM, kernel scans every page in an lru
60 *
61 * Any value in this range is acceptable for this tunable (i.e. from 12 to
62 * 0). Current value for the vmpressure_level_critical_prio is chosen
63 * empirically, but the number, in essence, means that we consider
64 * critical level when scanning depth is ~10% of the lru size (vmscan
65 * scans 'lru_size >> prio' pages, so it is actually 12.5%, or one
66 * eights).
67 */
68 static const unsigned int vmpressure_level_critical_prio = ilog2(100 / 10);
69
work_to_vmpressure(struct work_struct * work)70 static struct vmpressure *work_to_vmpressure(struct work_struct *work)
71 {
72 return container_of(work, struct vmpressure, work);
73 }
74
vmpressure_parent(struct vmpressure * vmpr)75 static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
76 {
77 struct mem_cgroup *memcg = vmpressure_to_memcg(vmpr);
78
79 memcg = parent_mem_cgroup(memcg);
80 if (!memcg)
81 return NULL;
82 return memcg_to_vmpressure(memcg);
83 }
84
85 enum vmpressure_levels {
86 VMPRESSURE_LOW = 0,
87 VMPRESSURE_MEDIUM,
88 VMPRESSURE_CRITICAL,
89 VMPRESSURE_NUM_LEVELS,
90 };
91
92 enum vmpressure_modes {
93 VMPRESSURE_NO_PASSTHROUGH = 0,
94 VMPRESSURE_HIERARCHY,
95 VMPRESSURE_LOCAL,
96 VMPRESSURE_NUM_MODES,
97 };
98
99 static const char * const vmpressure_str_levels[] = {
100 [VMPRESSURE_LOW] = "low",
101 [VMPRESSURE_MEDIUM] = "medium",
102 [VMPRESSURE_CRITICAL] = "critical",
103 };
104
105 static const char * const vmpressure_str_modes[] = {
106 [VMPRESSURE_NO_PASSTHROUGH] = "default",
107 [VMPRESSURE_HIERARCHY] = "hierarchy",
108 [VMPRESSURE_LOCAL] = "local",
109 };
110
vmpressure_level(unsigned long pressure)111 static enum vmpressure_levels vmpressure_level(unsigned long pressure)
112 {
113 if (pressure >= vmpressure_level_critical)
114 return VMPRESSURE_CRITICAL;
115 else if (pressure >= vmpressure_level_med)
116 return VMPRESSURE_MEDIUM;
117 return VMPRESSURE_LOW;
118 }
119
vmpressure_calc_level(unsigned long scanned,unsigned long reclaimed)120 static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
121 unsigned long reclaimed)
122 {
123 unsigned long scale = scanned + reclaimed;
124 unsigned long pressure = 0;
125
126 /*
127 * reclaimed can be greater than scanned for things such as reclaimed
128 * slab pages. shrink_node() just adds reclaimed pages without a
129 * related increment to scanned pages.
130 */
131 if (reclaimed >= scanned)
132 goto out;
133 /*
134 * We calculate the ratio (in percents) of how many pages were
135 * scanned vs. reclaimed in a given time frame (window). Note that
136 * time is in VM reclaimer's "ticks", i.e. number of pages
137 * scanned. This makes it possible to set desired reaction time
138 * and serves as a ratelimit.
139 */
140 pressure = scale - (reclaimed * scale / scanned);
141 pressure = pressure * 100 / scale;
142
143 out:
144 pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure,
145 scanned, reclaimed);
146
147 return vmpressure_level(pressure);
148 }
149
150 struct vmpressure_event {
151 struct eventfd_ctx *efd;
152 enum vmpressure_levels level;
153 enum vmpressure_modes mode;
154 struct list_head node;
155 };
156
vmpressure_event(struct vmpressure * vmpr,const enum vmpressure_levels level,bool ancestor,bool signalled)157 static bool vmpressure_event(struct vmpressure *vmpr,
158 const enum vmpressure_levels level,
159 bool ancestor, bool signalled)
160 {
161 struct vmpressure_event *ev;
162 bool ret = false;
163
164 mutex_lock(&vmpr->events_lock);
165 list_for_each_entry(ev, &vmpr->events, node) {
166 if (ancestor && ev->mode == VMPRESSURE_LOCAL)
167 continue;
168 if (signalled && ev->mode == VMPRESSURE_NO_PASSTHROUGH)
169 continue;
170 if (level < ev->level)
171 continue;
172 eventfd_signal(ev->efd, 1);
173 ret = true;
174 }
175 mutex_unlock(&vmpr->events_lock);
176
177 return ret;
178 }
179
vmpressure_work_fn(struct work_struct * work)180 static void vmpressure_work_fn(struct work_struct *work)
181 {
182 struct vmpressure *vmpr = work_to_vmpressure(work);
183 unsigned long scanned;
184 unsigned long reclaimed;
185 enum vmpressure_levels level;
186 bool ancestor = false;
187 bool signalled = false;
188
189 spin_lock(&vmpr->sr_lock);
190 /*
191 * Several contexts might be calling vmpressure(), so it is
192 * possible that the work was rescheduled again before the old
193 * work context cleared the counters. In that case we will run
194 * just after the old work returns, but then scanned might be zero
195 * here. No need for any locks here since we don't care if
196 * vmpr->reclaimed is in sync.
197 */
198 scanned = vmpr->tree_scanned;
199 if (!scanned) {
200 spin_unlock(&vmpr->sr_lock);
201 return;
202 }
203
204 reclaimed = vmpr->tree_reclaimed;
205 vmpr->tree_scanned = 0;
206 vmpr->tree_reclaimed = 0;
207 spin_unlock(&vmpr->sr_lock);
208
209 level = vmpressure_calc_level(scanned, reclaimed);
210
211 do {
212 if (vmpressure_event(vmpr, level, ancestor, signalled))
213 signalled = true;
214 ancestor = true;
215 } while ((vmpr = vmpressure_parent(vmpr)));
216 }
217
218 /**
219 * vmpressure() - Account memory pressure through scanned/reclaimed ratio
220 * @gfp: reclaimer's gfp mask
221 * @memcg: cgroup memory controller handle
222 * @tree: legacy subtree mode
223 * @scanned: number of pages scanned
224 * @reclaimed: number of pages reclaimed
225 *
226 * This function should be called from the vmscan reclaim path to account
227 * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw
228 * pressure index is then further refined and averaged over time.
229 *
230 * If @tree is set, vmpressure is in traditional userspace reporting
231 * mode: @memcg is considered the pressure root and userspace is
232 * notified of the entire subtree's reclaim efficiency.
233 *
234 * If @tree is not set, reclaim efficiency is recorded for @memcg, and
235 * only in-kernel users are notified.
236 *
237 * This function does not return any value.
238 */
vmpressure(gfp_t gfp,struct mem_cgroup * memcg,bool tree,unsigned long scanned,unsigned long reclaimed)239 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
240 unsigned long scanned, unsigned long reclaimed)
241 {
242 struct vmpressure *vmpr;
243
244 if (mem_cgroup_disabled())
245 return;
246
247 /*
248 * The in-kernel users only care about the reclaim efficiency
249 * for this @memcg rather than the whole subtree, and there
250 * isn't and won't be any in-kernel user in a legacy cgroup.
251 */
252 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !tree)
253 return;
254
255 vmpr = memcg_to_vmpressure(memcg);
256
257 /*
258 * Here we only want to account pressure that userland is able to
259 * help us with. For example, suppose that DMA zone is under
260 * pressure; if we notify userland about that kind of pressure,
261 * then it will be mostly a waste as it will trigger unnecessary
262 * freeing of memory by userland (since userland is more likely to
263 * have HIGHMEM/MOVABLE pages instead of the DMA fallback). That
264 * is why we include only movable, highmem and FS/IO pages.
265 * Indirect reclaim (kswapd) sets sc->gfp_mask to GFP_KERNEL, so
266 * we account it too.
267 */
268 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS)))
269 return;
270
271 /*
272 * If we got here with no pages scanned, then that is an indicator
273 * that reclaimer was unable to find any shrinkable LRUs at the
274 * current scanning depth. But it does not mean that we should
275 * report the critical pressure, yet. If the scanning priority
276 * (scanning depth) goes too high (deep), we will be notified
277 * through vmpressure_prio(). But so far, keep calm.
278 */
279 if (!scanned)
280 return;
281
282 if (tree) {
283 spin_lock(&vmpr->sr_lock);
284 scanned = vmpr->tree_scanned += scanned;
285 vmpr->tree_reclaimed += reclaimed;
286 spin_unlock(&vmpr->sr_lock);
287
288 if (scanned < vmpressure_win)
289 return;
290 schedule_work(&vmpr->work);
291 } else {
292 enum vmpressure_levels level;
293
294 /* For now, no users for root-level efficiency */
295 if (!memcg || mem_cgroup_is_root(memcg))
296 return;
297
298 spin_lock(&vmpr->sr_lock);
299 scanned = vmpr->scanned += scanned;
300 reclaimed = vmpr->reclaimed += reclaimed;
301 if (scanned < vmpressure_win) {
302 spin_unlock(&vmpr->sr_lock);
303 return;
304 }
305 vmpr->scanned = vmpr->reclaimed = 0;
306 spin_unlock(&vmpr->sr_lock);
307
308 level = vmpressure_calc_level(scanned, reclaimed);
309
310 if (level > VMPRESSURE_LOW) {
311 /*
312 * Let the socket buffer allocator know that
313 * we are having trouble reclaiming LRU pages.
314 *
315 * For hysteresis keep the pressure state
316 * asserted for a second in which subsequent
317 * pressure events can occur.
318 */
319 WRITE_ONCE(memcg->socket_pressure, jiffies + HZ);
320 }
321 }
322 }
323
324 /**
325 * vmpressure_prio() - Account memory pressure through reclaimer priority level
326 * @gfp: reclaimer's gfp mask
327 * @memcg: cgroup memory controller handle
328 * @prio: reclaimer's priority
329 *
330 * This function should be called from the reclaim path every time when
331 * the vmscan's reclaiming priority (scanning depth) changes.
332 *
333 * This function does not return any value.
334 */
vmpressure_prio(gfp_t gfp,struct mem_cgroup * memcg,int prio)335 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
336 {
337 /*
338 * We only use prio for accounting critical level. For more info
339 * see comment for vmpressure_level_critical_prio variable above.
340 */
341 if (prio > vmpressure_level_critical_prio)
342 return;
343
344 /*
345 * OK, the prio is below the threshold, updating vmpressure
346 * information before shrinker dives into long shrinking of long
347 * range vmscan. Passing scanned = vmpressure_win, reclaimed = 0
348 * to the vmpressure() basically means that we signal 'critical'
349 * level.
350 */
351 vmpressure(gfp, memcg, true, vmpressure_win, 0);
352 }
353
354 #define MAX_VMPRESSURE_ARGS_LEN (strlen("critical") + strlen("hierarchy") + 2)
355
356 /**
357 * vmpressure_register_event() - Bind vmpressure notifications to an eventfd
358 * @memcg: memcg that is interested in vmpressure notifications
359 * @eventfd: eventfd context to link notifications with
360 * @args: event arguments (pressure level threshold, optional mode)
361 *
362 * This function associates eventfd context with the vmpressure
363 * infrastructure, so that the notifications will be delivered to the
364 * @eventfd. The @args parameter is a comma-delimited string that denotes a
365 * pressure level threshold (one of vmpressure_str_levels, i.e. "low", "medium",
366 * or "critical") and an optional mode (one of vmpressure_str_modes, i.e.
367 * "hierarchy" or "local").
368 *
369 * To be used as memcg event method.
370 *
371 * Return: 0 on success, -ENOMEM on memory failure or -EINVAL if @args could
372 * not be parsed.
373 */
vmpressure_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args)374 int vmpressure_register_event(struct mem_cgroup *memcg,
375 struct eventfd_ctx *eventfd, const char *args)
376 {
377 struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
378 struct vmpressure_event *ev;
379 enum vmpressure_modes mode = VMPRESSURE_NO_PASSTHROUGH;
380 enum vmpressure_levels level;
381 char *spec, *spec_orig;
382 char *token;
383 int ret = 0;
384
385 spec_orig = spec = kstrndup(args, MAX_VMPRESSURE_ARGS_LEN, GFP_KERNEL);
386 if (!spec)
387 return -ENOMEM;
388
389 /* Find required level */
390 token = strsep(&spec, ",");
391 ret = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
392 if (ret < 0)
393 goto out;
394 level = ret;
395
396 /* Find optional mode */
397 token = strsep(&spec, ",");
398 if (token) {
399 ret = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
400 if (ret < 0)
401 goto out;
402 mode = ret;
403 }
404
405 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
406 if (!ev) {
407 ret = -ENOMEM;
408 goto out;
409 }
410
411 ev->efd = eventfd;
412 ev->level = level;
413 ev->mode = mode;
414
415 mutex_lock(&vmpr->events_lock);
416 list_add(&ev->node, &vmpr->events);
417 mutex_unlock(&vmpr->events_lock);
418 ret = 0;
419 out:
420 kfree(spec_orig);
421 return ret;
422 }
423
424 /**
425 * vmpressure_unregister_event() - Unbind eventfd from vmpressure
426 * @memcg: memcg handle
427 * @eventfd: eventfd context that was used to link vmpressure with the @cg
428 *
429 * This function does internal manipulations to detach the @eventfd from
430 * the vmpressure notifications, and then frees internal resources
431 * associated with the @eventfd (but the @eventfd itself is not freed).
432 *
433 * To be used as memcg event method.
434 */
vmpressure_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd)435 void vmpressure_unregister_event(struct mem_cgroup *memcg,
436 struct eventfd_ctx *eventfd)
437 {
438 struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
439 struct vmpressure_event *ev;
440
441 mutex_lock(&vmpr->events_lock);
442 list_for_each_entry(ev, &vmpr->events, node) {
443 if (ev->efd != eventfd)
444 continue;
445 list_del(&ev->node);
446 kfree(ev);
447 break;
448 }
449 mutex_unlock(&vmpr->events_lock);
450 }
451
452 /**
453 * vmpressure_init() - Initialize vmpressure control structure
454 * @vmpr: Structure to be initialized
455 *
456 * This function should be called on every allocated vmpressure structure
457 * before any usage.
458 */
vmpressure_init(struct vmpressure * vmpr)459 void vmpressure_init(struct vmpressure *vmpr)
460 {
461 spin_lock_init(&vmpr->sr_lock);
462 mutex_init(&vmpr->events_lock);
463 INIT_LIST_HEAD(&vmpr->events);
464 INIT_WORK(&vmpr->work, vmpressure_work_fn);
465 }
466
467 /**
468 * vmpressure_cleanup() - shuts down vmpressure control structure
469 * @vmpr: Structure to be cleaned up
470 *
471 * This function should be called before the structure in which it is
472 * embedded is cleaned up.
473 */
vmpressure_cleanup(struct vmpressure * vmpr)474 void vmpressure_cleanup(struct vmpressure *vmpr)
475 {
476 /*
477 * Make sure there is no pending work before eventfd infrastructure
478 * goes away.
479 */
480 flush_work(&vmpr->work);
481 }
482