1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMSTAT_H
3 #define _LINUX_VMSTAT_H
4 
5 #include <linux/types.h>
6 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <linux/vm_event_item.h>
9 #include <linux/atomic.h>
10 #include <linux/static_key.h>
11 #include <linux/mmdebug.h>
12 
13 extern int sysctl_stat_interval;
14 
15 #ifdef CONFIG_NUMA
16 #define ENABLE_NUMA_STAT   1
17 #define DISABLE_NUMA_STAT   0
18 extern int sysctl_vm_numa_stat;
19 DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
20 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
21 		void *buffer, size_t *length, loff_t *ppos);
22 #endif
23 
24 struct reclaim_stat {
25 	unsigned nr_dirty;
26 	unsigned nr_unqueued_dirty;
27 	unsigned nr_congested;
28 	unsigned nr_writeback;
29 	unsigned nr_immediate;
30 	unsigned nr_pageout;
31 	unsigned nr_activate[ANON_AND_FILE];
32 	unsigned nr_ref_keep;
33 	unsigned nr_unmap_fail;
34 	unsigned nr_lazyfree_fail;
35 };
36 
37 enum writeback_stat_item {
38 	NR_DIRTY_THRESHOLD,
39 	NR_DIRTY_BG_THRESHOLD,
40 	NR_VM_WRITEBACK_STAT_ITEMS,
41 };
42 
43 #ifdef CONFIG_VM_EVENT_COUNTERS
44 /*
45  * Light weight per cpu counter implementation.
46  *
47  * Counters should only be incremented and no critical kernel component
48  * should rely on the counter values.
49  *
50  * Counters are handled completely inline. On many platforms the code
51  * generated will simply be the increment of a global address.
52  */
53 
54 struct vm_event_state {
55 	unsigned long event[NR_VM_EVENT_ITEMS];
56 };
57 
58 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
59 
60 /*
61  * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
62  * local_irq_disable overhead.
63  */
__count_vm_event(enum vm_event_item item)64 static inline void __count_vm_event(enum vm_event_item item)
65 {
66 	raw_cpu_inc(vm_event_states.event[item]);
67 }
68 
count_vm_event(enum vm_event_item item)69 static inline void count_vm_event(enum vm_event_item item)
70 {
71 	this_cpu_inc(vm_event_states.event[item]);
72 }
73 
__count_vm_events(enum vm_event_item item,long delta)74 static inline void __count_vm_events(enum vm_event_item item, long delta)
75 {
76 	raw_cpu_add(vm_event_states.event[item], delta);
77 }
78 
count_vm_events(enum vm_event_item item,long delta)79 static inline void count_vm_events(enum vm_event_item item, long delta)
80 {
81 	this_cpu_add(vm_event_states.event[item], delta);
82 }
83 
84 extern void all_vm_events(unsigned long *);
85 
86 extern void vm_events_fold_cpu(int cpu);
87 
88 #else
89 
90 /* Disable counters */
count_vm_event(enum vm_event_item item)91 static inline void count_vm_event(enum vm_event_item item)
92 {
93 }
count_vm_events(enum vm_event_item item,long delta)94 static inline void count_vm_events(enum vm_event_item item, long delta)
95 {
96 }
__count_vm_event(enum vm_event_item item)97 static inline void __count_vm_event(enum vm_event_item item)
98 {
99 }
__count_vm_events(enum vm_event_item item,long delta)100 static inline void __count_vm_events(enum vm_event_item item, long delta)
101 {
102 }
all_vm_events(unsigned long * ret)103 static inline void all_vm_events(unsigned long *ret)
104 {
105 }
vm_events_fold_cpu(int cpu)106 static inline void vm_events_fold_cpu(int cpu)
107 {
108 }
109 
110 #endif /* CONFIG_VM_EVENT_COUNTERS */
111 
112 #ifdef CONFIG_NUMA_BALANCING
113 #define count_vm_numa_event(x)     count_vm_event(x)
114 #define count_vm_numa_events(x, y) count_vm_events(x, y)
115 #else
116 #define count_vm_numa_event(x) do {} while (0)
117 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
118 #endif /* CONFIG_NUMA_BALANCING */
119 
120 #ifdef CONFIG_DEBUG_TLBFLUSH
121 #define count_vm_tlb_event(x)	   count_vm_event(x)
122 #define count_vm_tlb_events(x, y)  count_vm_events(x, y)
123 #else
124 #define count_vm_tlb_event(x)     do {} while (0)
125 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
126 #endif
127 
128 #ifdef CONFIG_DEBUG_VM_VMACACHE
129 #define count_vm_vmacache_event(x) count_vm_event(x)
130 #else
131 #define count_vm_vmacache_event(x) do {} while (0)
132 #endif
133 
134 #define __count_zid_vm_events(item, zid, delta) \
135 	__count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
136 
137 /*
138  * Zone and node-based page accounting with per cpu differentials.
139  */
140 extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
141 extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
142 extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
143 
144 #ifdef CONFIG_NUMA
zone_numa_event_add(long x,struct zone * zone,enum numa_stat_item item)145 static inline void zone_numa_event_add(long x, struct zone *zone,
146 				enum numa_stat_item item)
147 {
148 	atomic_long_add(x, &zone->vm_numa_event[item]);
149 	atomic_long_add(x, &vm_numa_event[item]);
150 }
151 
zone_numa_event_state(struct zone * zone,enum numa_stat_item item)152 static inline unsigned long zone_numa_event_state(struct zone *zone,
153 					enum numa_stat_item item)
154 {
155 	return atomic_long_read(&zone->vm_numa_event[item]);
156 }
157 
158 static inline unsigned long
global_numa_event_state(enum numa_stat_item item)159 global_numa_event_state(enum numa_stat_item item)
160 {
161 	return atomic_long_read(&vm_numa_event[item]);
162 }
163 #endif /* CONFIG_NUMA */
164 
zone_page_state_add(long x,struct zone * zone,enum zone_stat_item item)165 static inline void zone_page_state_add(long x, struct zone *zone,
166 				 enum zone_stat_item item)
167 {
168 	atomic_long_add(x, &zone->vm_stat[item]);
169 	atomic_long_add(x, &vm_zone_stat[item]);
170 }
171 
node_page_state_add(long x,struct pglist_data * pgdat,enum node_stat_item item)172 static inline void node_page_state_add(long x, struct pglist_data *pgdat,
173 				 enum node_stat_item item)
174 {
175 	atomic_long_add(x, &pgdat->vm_stat[item]);
176 	atomic_long_add(x, &vm_node_stat[item]);
177 }
178 
global_zone_page_state(enum zone_stat_item item)179 static inline unsigned long global_zone_page_state(enum zone_stat_item item)
180 {
181 	long x = atomic_long_read(&vm_zone_stat[item]);
182 #ifdef CONFIG_SMP
183 	if (x < 0)
184 		x = 0;
185 #endif
186 	return x;
187 }
188 
189 static inline
global_node_page_state_pages(enum node_stat_item item)190 unsigned long global_node_page_state_pages(enum node_stat_item item)
191 {
192 	long x = atomic_long_read(&vm_node_stat[item]);
193 #ifdef CONFIG_SMP
194 	if (x < 0)
195 		x = 0;
196 #endif
197 	return x;
198 }
199 
global_node_page_state(enum node_stat_item item)200 static inline unsigned long global_node_page_state(enum node_stat_item item)
201 {
202 	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
203 
204 	return global_node_page_state_pages(item);
205 }
206 
zone_page_state(struct zone * zone,enum zone_stat_item item)207 static inline unsigned long zone_page_state(struct zone *zone,
208 					enum zone_stat_item item)
209 {
210 	long x = atomic_long_read(&zone->vm_stat[item]);
211 #ifdef CONFIG_SMP
212 	if (x < 0)
213 		x = 0;
214 #endif
215 	return x;
216 }
217 
218 /*
219  * More accurate version that also considers the currently pending
220  * deltas. For that we need to loop over all cpus to find the current
221  * deltas. There is no synchronization so the result cannot be
222  * exactly accurate either.
223  */
zone_page_state_snapshot(struct zone * zone,enum zone_stat_item item)224 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
225 					enum zone_stat_item item)
226 {
227 	long x = atomic_long_read(&zone->vm_stat[item]);
228 
229 #ifdef CONFIG_SMP
230 	int cpu;
231 	for_each_online_cpu(cpu)
232 		x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item];
233 
234 	if (x < 0)
235 		x = 0;
236 #endif
237 	return x;
238 }
239 
240 #ifdef CONFIG_NUMA
241 /* See __count_vm_event comment on why raw_cpu_inc is used. */
242 static inline void
__count_numa_event(struct zone * zone,enum numa_stat_item item)243 __count_numa_event(struct zone *zone, enum numa_stat_item item)
244 {
245 	struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
246 
247 	raw_cpu_inc(pzstats->vm_numa_event[item]);
248 }
249 
250 static inline void
__count_numa_events(struct zone * zone,enum numa_stat_item item,long delta)251 __count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
252 {
253 	struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
254 
255 	raw_cpu_add(pzstats->vm_numa_event[item], delta);
256 }
257 
258 extern unsigned long sum_zone_node_page_state(int node,
259 					      enum zone_stat_item item);
260 extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item);
261 extern unsigned long node_page_state(struct pglist_data *pgdat,
262 						enum node_stat_item item);
263 extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
264 					   enum node_stat_item item);
265 extern void fold_vm_numa_events(void);
266 #else
267 #define sum_zone_node_page_state(node, item) global_zone_page_state(item)
268 #define node_page_state(node, item) global_node_page_state(item)
269 #define node_page_state_pages(node, item) global_node_page_state_pages(item)
fold_vm_numa_events(void)270 static inline void fold_vm_numa_events(void)
271 {
272 }
273 #endif /* CONFIG_NUMA */
274 
275 #ifdef CONFIG_SMP
276 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
277 void __inc_zone_page_state(struct page *, enum zone_stat_item);
278 void __dec_zone_page_state(struct page *, enum zone_stat_item);
279 
280 void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
281 void __inc_node_page_state(struct page *, enum node_stat_item);
282 void __dec_node_page_state(struct page *, enum node_stat_item);
283 
284 void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
285 void inc_zone_page_state(struct page *, enum zone_stat_item);
286 void dec_zone_page_state(struct page *, enum zone_stat_item);
287 
288 void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
289 void inc_node_page_state(struct page *, enum node_stat_item);
290 void dec_node_page_state(struct page *, enum node_stat_item);
291 
292 extern void inc_node_state(struct pglist_data *, enum node_stat_item);
293 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
294 extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
295 extern void dec_zone_state(struct zone *, enum zone_stat_item);
296 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
297 extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
298 
299 void quiet_vmstat(void);
300 void cpu_vm_stats_fold(int cpu);
301 void refresh_zone_stat_thresholds(void);
302 
303 struct ctl_table;
304 int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
305 		loff_t *ppos);
306 
307 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
308 
309 int calculate_pressure_threshold(struct zone *zone);
310 int calculate_normal_threshold(struct zone *zone);
311 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
312 				int (*calculate_pressure)(struct zone *));
313 #else /* CONFIG_SMP */
314 
315 /*
316  * We do not maintain differentials in a single processor configuration.
317  * The functions directly modify the zone and global counters.
318  */
__mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)319 static inline void __mod_zone_page_state(struct zone *zone,
320 			enum zone_stat_item item, long delta)
321 {
322 	zone_page_state_add(delta, zone, item);
323 }
324 
__mod_node_page_state(struct pglist_data * pgdat,enum node_stat_item item,int delta)325 static inline void __mod_node_page_state(struct pglist_data *pgdat,
326 			enum node_stat_item item, int delta)
327 {
328 	if (vmstat_item_in_bytes(item)) {
329 		/*
330 		 * Only cgroups use subpage accounting right now; at
331 		 * the global level, these items still change in
332 		 * multiples of whole pages. Store them as pages
333 		 * internally to keep the per-cpu counters compact.
334 		 */
335 		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
336 		delta >>= PAGE_SHIFT;
337 	}
338 
339 	node_page_state_add(delta, pgdat, item);
340 }
341 
__inc_zone_state(struct zone * zone,enum zone_stat_item item)342 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
343 {
344 	atomic_long_inc(&zone->vm_stat[item]);
345 	atomic_long_inc(&vm_zone_stat[item]);
346 }
347 
__inc_node_state(struct pglist_data * pgdat,enum node_stat_item item)348 static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
349 {
350 	atomic_long_inc(&pgdat->vm_stat[item]);
351 	atomic_long_inc(&vm_node_stat[item]);
352 }
353 
__dec_zone_state(struct zone * zone,enum zone_stat_item item)354 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
355 {
356 	atomic_long_dec(&zone->vm_stat[item]);
357 	atomic_long_dec(&vm_zone_stat[item]);
358 }
359 
__dec_node_state(struct pglist_data * pgdat,enum node_stat_item item)360 static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
361 {
362 	atomic_long_dec(&pgdat->vm_stat[item]);
363 	atomic_long_dec(&vm_node_stat[item]);
364 }
365 
__inc_zone_page_state(struct page * page,enum zone_stat_item item)366 static inline void __inc_zone_page_state(struct page *page,
367 			enum zone_stat_item item)
368 {
369 	__inc_zone_state(page_zone(page), item);
370 }
371 
__inc_node_page_state(struct page * page,enum node_stat_item item)372 static inline void __inc_node_page_state(struct page *page,
373 			enum node_stat_item item)
374 {
375 	__inc_node_state(page_pgdat(page), item);
376 }
377 
378 
__dec_zone_page_state(struct page * page,enum zone_stat_item item)379 static inline void __dec_zone_page_state(struct page *page,
380 			enum zone_stat_item item)
381 {
382 	__dec_zone_state(page_zone(page), item);
383 }
384 
__dec_node_page_state(struct page * page,enum node_stat_item item)385 static inline void __dec_node_page_state(struct page *page,
386 			enum node_stat_item item)
387 {
388 	__dec_node_state(page_pgdat(page), item);
389 }
390 
391 
392 /*
393  * We only use atomic operations to update counters. So there is no need to
394  * disable interrupts.
395  */
396 #define inc_zone_page_state __inc_zone_page_state
397 #define dec_zone_page_state __dec_zone_page_state
398 #define mod_zone_page_state __mod_zone_page_state
399 
400 #define inc_node_page_state __inc_node_page_state
401 #define dec_node_page_state __dec_node_page_state
402 #define mod_node_page_state __mod_node_page_state
403 
404 #define inc_zone_state __inc_zone_state
405 #define inc_node_state __inc_node_state
406 #define dec_zone_state __dec_zone_state
407 
408 #define set_pgdat_percpu_threshold(pgdat, callback) { }
409 
refresh_zone_stat_thresholds(void)410 static inline void refresh_zone_stat_thresholds(void) { }
cpu_vm_stats_fold(int cpu)411 static inline void cpu_vm_stats_fold(int cpu) { }
quiet_vmstat(void)412 static inline void quiet_vmstat(void) { }
413 
drain_zonestat(struct zone * zone,struct per_cpu_zonestat * pzstats)414 static inline void drain_zonestat(struct zone *zone,
415 			struct per_cpu_zonestat *pzstats) { }
416 #endif		/* CONFIG_SMP */
417 
__mod_zone_freepage_state(struct zone * zone,int nr_pages,int migratetype)418 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
419 					     int migratetype)
420 {
421 	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
422 	if (is_migrate_cma(migratetype))
423 		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
424 }
425 
426 extern const char * const vmstat_text[];
427 
zone_stat_name(enum zone_stat_item item)428 static inline const char *zone_stat_name(enum zone_stat_item item)
429 {
430 	return vmstat_text[item];
431 }
432 
433 #ifdef CONFIG_NUMA
numa_stat_name(enum numa_stat_item item)434 static inline const char *numa_stat_name(enum numa_stat_item item)
435 {
436 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
437 			   item];
438 }
439 #endif /* CONFIG_NUMA */
440 
node_stat_name(enum node_stat_item item)441 static inline const char *node_stat_name(enum node_stat_item item)
442 {
443 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
444 			   NR_VM_NUMA_EVENT_ITEMS +
445 			   item];
446 }
447 
lru_list_name(enum lru_list lru)448 static inline const char *lru_list_name(enum lru_list lru)
449 {
450 	return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
451 }
452 
writeback_stat_name(enum writeback_stat_item item)453 static inline const char *writeback_stat_name(enum writeback_stat_item item)
454 {
455 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
456 			   NR_VM_NUMA_EVENT_ITEMS +
457 			   NR_VM_NODE_STAT_ITEMS +
458 			   item];
459 }
460 
461 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
vm_event_name(enum vm_event_item item)462 static inline const char *vm_event_name(enum vm_event_item item)
463 {
464 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
465 			   NR_VM_NUMA_EVENT_ITEMS +
466 			   NR_VM_NODE_STAT_ITEMS +
467 			   NR_VM_WRITEBACK_STAT_ITEMS +
468 			   item];
469 }
470 #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
471 
472 #ifdef CONFIG_MEMCG
473 
474 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
475 			int val);
476 
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)477 static inline void mod_lruvec_state(struct lruvec *lruvec,
478 				    enum node_stat_item idx, int val)
479 {
480 	unsigned long flags;
481 
482 	local_irq_save(flags);
483 	__mod_lruvec_state(lruvec, idx, val);
484 	local_irq_restore(flags);
485 }
486 
487 void __mod_lruvec_page_state(struct page *page,
488 			     enum node_stat_item idx, int val);
489 
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)490 static inline void mod_lruvec_page_state(struct page *page,
491 					 enum node_stat_item idx, int val)
492 {
493 	unsigned long flags;
494 
495 	local_irq_save(flags);
496 	__mod_lruvec_page_state(page, idx, val);
497 	local_irq_restore(flags);
498 }
499 
500 #else
501 
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)502 static inline void __mod_lruvec_state(struct lruvec *lruvec,
503 				      enum node_stat_item idx, int val)
504 {
505 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
506 }
507 
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)508 static inline void mod_lruvec_state(struct lruvec *lruvec,
509 				    enum node_stat_item idx, int val)
510 {
511 	mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
512 }
513 
__mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)514 static inline void __mod_lruvec_page_state(struct page *page,
515 					   enum node_stat_item idx, int val)
516 {
517 	__mod_node_page_state(page_pgdat(page), idx, val);
518 }
519 
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)520 static inline void mod_lruvec_page_state(struct page *page,
521 					 enum node_stat_item idx, int val)
522 {
523 	mod_node_page_state(page_pgdat(page), idx, val);
524 }
525 
526 #endif /* CONFIG_MEMCG */
527 
inc_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx)528 static inline void inc_lruvec_state(struct lruvec *lruvec,
529 				    enum node_stat_item idx)
530 {
531 	mod_lruvec_state(lruvec, idx, 1);
532 }
533 
__inc_lruvec_page_state(struct page * page,enum node_stat_item idx)534 static inline void __inc_lruvec_page_state(struct page *page,
535 					   enum node_stat_item idx)
536 {
537 	__mod_lruvec_page_state(page, idx, 1);
538 }
539 
__dec_lruvec_page_state(struct page * page,enum node_stat_item idx)540 static inline void __dec_lruvec_page_state(struct page *page,
541 					   enum node_stat_item idx)
542 {
543 	__mod_lruvec_page_state(page, idx, -1);
544 }
545 
inc_lruvec_page_state(struct page * page,enum node_stat_item idx)546 static inline void inc_lruvec_page_state(struct page *page,
547 					 enum node_stat_item idx)
548 {
549 	mod_lruvec_page_state(page, idx, 1);
550 }
551 
dec_lruvec_page_state(struct page * page,enum node_stat_item idx)552 static inline void dec_lruvec_page_state(struct page *page,
553 					 enum node_stat_item idx)
554 {
555 	mod_lruvec_page_state(page, idx, -1);
556 }
557 
558 #endif /* _LINUX_VMSTAT_H */
559