Lines Matching +full:int +full:- +full:threshold
1 // SPDX-License-Identifier: GPL-2.0-only
11 * Copyright (C) 2008-2014 Christoph Lameter
35 int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
40 int item, cpu; in zero_zone_numa_counters()
43 atomic_long_set(&zone->vm_numa_event[item], 0); in zero_zone_numa_counters()
45 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item] in zero_zone_numa_counters()
63 int item; in zero_global_numa_counters()
77 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, in sysctl_vm_numa_stat_handler()
80 int ret, oldval; in sysctl_vm_numa_stat_handler()
112 int cpu; in sum_vm_events()
113 int i; in sum_vm_events()
121 ret[i] += this->event[i]; in sum_vm_events()
127 * The result is unavoidably approximate - it can change
144 void vm_events_fold_cpu(int cpu) in vm_events_fold_cpu()
147 int i; in vm_events_fold_cpu()
150 count_vm_events(i, fold_state->event[i]); in vm_events_fold_cpu()
151 fold_state->event[i] = 0; in vm_events_fold_cpu()
172 int cpu; in fold_vm_zone_numa_events()
178 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in fold_vm_zone_numa_events()
180 zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0); in fold_vm_zone_numa_events()
198 int calculate_pressure_threshold(struct zone *zone) in calculate_pressure_threshold()
200 int threshold; in calculate_pressure_threshold() local
201 int watermark_distance; in calculate_pressure_threshold()
207 * value looks fine. The pressure threshold is a reduced value such in calculate_pressure_threshold()
211 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); in calculate_pressure_threshold()
212 threshold = max(1, (int)(watermark_distance / num_online_cpus())); in calculate_pressure_threshold()
215 * Maximum threshold is 125 in calculate_pressure_threshold()
217 threshold = min(125, threshold); in calculate_pressure_threshold()
219 return threshold; in calculate_pressure_threshold()
222 int calculate_normal_threshold(struct zone *zone) in calculate_normal_threshold()
224 int threshold; in calculate_normal_threshold() local
225 int mem; /* memory in 128 MB units */ in calculate_normal_threshold()
228 * The threshold scales with the number of processors and the amount in calculate_normal_threshold()
235 * Threshold Processors (fls) Zonesize fls(mem)+1 in calculate_normal_threshold()
236 * ------------------------------------------------------------------ in calculate_normal_threshold()
237 * 8 1 1 0.9-1 GB 4 in calculate_normal_threshold()
238 * 16 2 2 0.9-1 GB 4 in calculate_normal_threshold()
239 * 20 2 2 1-2 GB 5 in calculate_normal_threshold()
240 * 24 2 2 2-4 GB 6 in calculate_normal_threshold()
241 * 28 2 2 4-8 GB 7 in calculate_normal_threshold()
242 * 32 2 2 8-16 GB 8 in calculate_normal_threshold()
244 * 30 4 3 2-4 GB 5 in calculate_normal_threshold()
245 * 48 4 3 8-16 GB 8 in calculate_normal_threshold()
246 * 32 8 4 1-2 GB 4 in calculate_normal_threshold()
247 * 32 8 4 0.9-1GB 4 in calculate_normal_threshold()
250 * 70 64 7 2-4 GB 5 in calculate_normal_threshold()
251 * 84 64 7 4-8 GB 6 in calculate_normal_threshold()
252 * 108 512 9 4-8 GB 6 in calculate_normal_threshold()
253 * 125 1024 10 8-16 GB 8 in calculate_normal_threshold()
254 * 125 1024 10 16-32 GB 9 in calculate_normal_threshold()
257 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT); in calculate_normal_threshold()
259 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); in calculate_normal_threshold()
262 * Maximum threshold is 125 in calculate_normal_threshold()
264 threshold = min(125, threshold); in calculate_normal_threshold()
266 return threshold; in calculate_normal_threshold()
276 int cpu; in refresh_zone_stat_thresholds()
277 int threshold; in refresh_zone_stat_thresholds() local
282 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0; in refresh_zone_stat_thresholds()
287 struct pglist_data *pgdat = zone->zone_pgdat; in refresh_zone_stat_thresholds()
290 threshold = calculate_normal_threshold(zone); in refresh_zone_stat_thresholds()
293 int pgdat_threshold; in refresh_zone_stat_thresholds()
295 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold in refresh_zone_stat_thresholds()
296 = threshold; in refresh_zone_stat_thresholds()
298 /* Base nodestat threshold on the largest populated zone. */ in refresh_zone_stat_thresholds()
299 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold; in refresh_zone_stat_thresholds()
300 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold in refresh_zone_stat_thresholds()
301 = max(threshold, pgdat_threshold); in refresh_zone_stat_thresholds()
309 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); in refresh_zone_stat_thresholds()
310 max_drift = num_online_cpus() * threshold; in refresh_zone_stat_thresholds()
312 zone->percpu_drift_mark = high_wmark_pages(zone) + in refresh_zone_stat_thresholds()
318 int (*calculate_pressure)(struct zone *)) in set_pgdat_percpu_threshold()
321 int cpu; in set_pgdat_percpu_threshold()
322 int threshold; in set_pgdat_percpu_threshold() local
323 int i; in set_pgdat_percpu_threshold()
325 for (i = 0; i < pgdat->nr_zones; i++) { in set_pgdat_percpu_threshold()
326 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold()
327 if (!zone->percpu_drift_mark) in set_pgdat_percpu_threshold()
330 threshold = (*calculate_pressure)(zone); in set_pgdat_percpu_threshold()
332 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold in set_pgdat_percpu_threshold()
333 = threshold; in set_pgdat_percpu_threshold()
345 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; in __mod_zone_page_state()
346 s8 __percpu *p = pcp->vm_stat_diff + item; in __mod_zone_page_state()
352 * atomicity is provided by IRQs being disabled -- either explicitly in __mod_zone_page_state()
361 t = __this_cpu_read(pcp->stat_threshold); in __mod_zone_page_state()
376 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in __mod_node_page_state()
377 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __mod_node_page_state()
386 * internally to keep the per-cpu counters compact. in __mod_node_page_state()
388 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); in __mod_node_page_state()
397 t = __this_cpu_read(pcp->stat_threshold); in __mod_node_page_state()
434 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; in __inc_zone_state()
435 s8 __percpu *p = pcp->vm_stat_diff + item; in __inc_zone_state()
442 t = __this_cpu_read(pcp->stat_threshold); in __inc_zone_state()
447 __this_cpu_write(*p, -overstep); in __inc_zone_state()
455 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in __inc_node_state()
456 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __inc_node_state()
465 t = __this_cpu_read(pcp->stat_threshold); in __inc_node_state()
470 __this_cpu_write(*p, -overstep); in __inc_node_state()
490 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; in __dec_zone_state()
491 s8 __percpu *p = pcp->vm_stat_diff + item; in __dec_zone_state()
498 t = __this_cpu_read(pcp->stat_threshold); in __dec_zone_state()
499 if (unlikely(v < - t)) { in __dec_zone_state()
502 zone_page_state_add(v - overstep, zone, item); in __dec_zone_state()
511 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in __dec_node_state()
512 s8 __percpu *p = pcp->vm_node_stat_diff + item; in __dec_node_state()
521 t = __this_cpu_read(pcp->stat_threshold); in __dec_node_state()
522 if (unlikely(v < - t)) { in __dec_node_state()
525 node_page_state_add(v - overstep, pgdat, item); in __dec_node_state()
554 * 1 Overstepping half of threshold
555 * -1 Overstepping minus half of threshold
558 enum zone_stat_item item, long delta, int overstep_mode) in mod_zone_state()
560 struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; in mod_zone_state()
561 s8 __percpu *p = pcp->vm_stat_diff + item; in mod_zone_state()
569 * a counter threshold to the wrong the cpu if we get in mod_zone_state()
571 * counter update will apply the threshold again and in mod_zone_state()
572 * therefore bring the counter under the threshold again. in mod_zone_state()
577 t = this_cpu_read(pcp->stat_threshold); in mod_zone_state()
583 int os = overstep_mode * (t >> 1) ; in mod_zone_state()
587 n = -os; in mod_zone_state()
610 mod_zone_state(page_zone(page), item, -1, -1); in dec_zone_page_state()
615 enum node_stat_item item, int delta, int overstep_mode) in mod_node_state()
617 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; in mod_node_state()
618 s8 __percpu *p = pcp->vm_node_stat_diff + item; in mod_node_state()
626 * internally to keep the per-cpu counters compact. in mod_node_state()
628 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); in mod_node_state()
637 * a counter threshold to the wrong the cpu if we get in mod_node_state()
639 * counter update will apply the threshold again and in mod_node_state()
640 * therefore bring the counter under the threshold again. in mod_node_state()
645 t = this_cpu_read(pcp->stat_threshold); in mod_node_state()
651 int os = overstep_mode * (t >> 1) ; in mod_node_state()
655 n = -os; in mod_node_state()
683 mod_node_state(page_pgdat(page), item, -1, -1); in dec_node_page_state()
771 static int fold_diff(int *zone_diff, int *node_diff) in fold_diff()
773 int i; in fold_diff()
774 int changes = 0; in fold_diff()
806 static int refresh_cpu_vm_stats(bool do_pagesets) in refresh_cpu_vm_stats()
810 int i; in refresh_cpu_vm_stats()
811 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; in refresh_cpu_vm_stats()
812 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, }; in refresh_cpu_vm_stats()
813 int changes = 0; in refresh_cpu_vm_stats()
816 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; in refresh_cpu_vm_stats()
818 struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset; in refresh_cpu_vm_stats()
822 int v; in refresh_cpu_vm_stats()
824 v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0); in refresh_cpu_vm_stats()
827 atomic_long_add(v, &zone->vm_stat[i]); in refresh_cpu_vm_stats()
831 __this_cpu_write(pcp->expire, 3); in refresh_cpu_vm_stats()
846 if (!__this_cpu_read(pcp->expire) || in refresh_cpu_vm_stats()
847 !__this_cpu_read(pcp->count)) in refresh_cpu_vm_stats()
854 __this_cpu_write(pcp->expire, 0); in refresh_cpu_vm_stats()
858 if (__this_cpu_dec_return(pcp->expire)) in refresh_cpu_vm_stats()
861 if (__this_cpu_read(pcp->count)) { in refresh_cpu_vm_stats()
870 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats; in refresh_cpu_vm_stats()
873 int v; in refresh_cpu_vm_stats()
875 v = this_cpu_xchg(p->vm_node_stat_diff[i], 0); in refresh_cpu_vm_stats()
877 atomic_long_add(v, &pgdat->vm_stat[i]); in refresh_cpu_vm_stats()
892 void cpu_vm_stats_fold(int cpu) in cpu_vm_stats_fold()
896 int i; in cpu_vm_stats_fold()
897 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; in cpu_vm_stats_fold()
898 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, }; in cpu_vm_stats_fold()
903 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in cpu_vm_stats_fold()
906 if (pzstats->vm_stat_diff[i]) { in cpu_vm_stats_fold()
907 int v; in cpu_vm_stats_fold()
909 v = pzstats->vm_stat_diff[i]; in cpu_vm_stats_fold()
910 pzstats->vm_stat_diff[i] = 0; in cpu_vm_stats_fold()
911 atomic_long_add(v, &zone->vm_stat[i]); in cpu_vm_stats_fold()
917 if (pzstats->vm_numa_event[i]) { in cpu_vm_stats_fold()
920 v = pzstats->vm_numa_event[i]; in cpu_vm_stats_fold()
921 pzstats->vm_numa_event[i] = 0; in cpu_vm_stats_fold()
931 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); in cpu_vm_stats_fold()
934 if (p->vm_node_stat_diff[i]) { in cpu_vm_stats_fold()
935 int v; in cpu_vm_stats_fold()
937 v = p->vm_node_stat_diff[i]; in cpu_vm_stats_fold()
938 p->vm_node_stat_diff[i] = 0; in cpu_vm_stats_fold()
939 atomic_long_add(v, &pgdat->vm_stat[i]); in cpu_vm_stats_fold()
949 * pset->vm_stat_diff[] exist.
954 int i; in drain_zonestat()
957 if (pzstats->vm_stat_diff[i]) { in drain_zonestat()
958 v = pzstats->vm_stat_diff[i]; in drain_zonestat()
959 pzstats->vm_stat_diff[i] = 0; in drain_zonestat()
966 if (pzstats->vm_numa_event[i]) { in drain_zonestat()
967 v = pzstats->vm_numa_event[i]; in drain_zonestat()
968 pzstats->vm_numa_event[i] = 0; in drain_zonestat()
982 unsigned long sum_zone_node_page_state(int node, in sum_zone_node_page_state()
985 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_node_page_state()
986 int i; in sum_zone_node_page_state()
996 unsigned long sum_zone_numa_event_state(int node, in sum_zone_numa_event_state()
999 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_numa_event_state()
1001 int i; in sum_zone_numa_event_state()
1015 long x = atomic_long_read(&pgdat->vm_stat[item]); in node_page_state_pages()
1049 unsigned int suitable_order, in fill_contig_page_info()
1052 unsigned int order; in fill_contig_page_info()
1054 info->free_pages = 0; in fill_contig_page_info()
1055 info->free_blocks_total = 0; in fill_contig_page_info()
1056 info->free_blocks_suitable = 0; in fill_contig_page_info()
1067 blocks = data_race(zone->free_area[order].nr_free); in fill_contig_page_info()
1068 info->free_blocks_total += blocks; in fill_contig_page_info()
1071 info->free_pages += blocks << order; in fill_contig_page_info()
1075 info->free_blocks_suitable += blocks << in fill_contig_page_info()
1076 (order - suitable_order); in fill_contig_page_info()
1087 static int __fragmentation_index(unsigned int order, struct contig_page_info *info) in __fragmentation_index()
1094 if (!info->free_blocks_total) in __fragmentation_index()
1098 if (info->free_blocks_suitable) in __fragmentation_index()
1099 return -1000; in __fragmentation_index()
1107 …return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_… in __fragmentation_index()
1115 unsigned int extfrag_for_order(struct zone *zone, unsigned int order) in extfrag_for_order()
1123 return div_u64((info.free_pages - in extfrag_for_order()
1129 int fragmentation_index(struct zone *zone, unsigned int order) in fragmentation_index()
1425 --node; in frag_start()
1451 struct zone *node_zones = pgdat->node_zones; in walk_zones_in_node()
1454 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { in walk_zones_in_node()
1459 spin_lock_irqsave(&zone->lock, flags); in walk_zones_in_node()
1462 spin_unlock_irqrestore(&zone->lock, flags); in walk_zones_in_node()
1471 int order; in frag_show_print()
1473 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); in frag_show_print()
1479 seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free)); in frag_show_print()
1486 static int frag_show(struct seq_file *m, void *arg) in frag_show()
1496 int order, mtype; in pagetypeinfo_showfree_print()
1500 pgdat->node_id, in pagetypeinfo_showfree_print()
1501 zone->name, in pagetypeinfo_showfree_print()
1509 area = &(zone->free_area[order]); in pagetypeinfo_showfree_print()
1511 list_for_each(curr, &area->free_list[mtype]) { in pagetypeinfo_showfree_print()
1527 spin_unlock_irq(&zone->lock); in pagetypeinfo_showfree_print()
1529 spin_lock_irq(&zone->lock); in pagetypeinfo_showfree_print()
1538 int order; in pagetypeinfo_showfree()
1542 seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); in pagetypeinfo_showfree()
1553 int mtype; in pagetypeinfo_showblockcount_print()
1555 unsigned long start_pfn = zone->zone_start_pfn; in pagetypeinfo_showblockcount_print()
1576 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); in pagetypeinfo_showblockcount_print()
1585 int mtype; in pagetypeinfo_showblockcount()
1588 seq_printf(m, "\n%-23s", "Number of blocks type "); in pagetypeinfo_showblockcount()
1605 int mtype; in pagetypeinfo_showmixedcount()
1612 seq_printf(m, "\n%-23s", "Number of mixed blocks "); in pagetypeinfo_showmixedcount()
1626 static int pagetypeinfo_show(struct seq_file *m, void *arg) in pagetypeinfo_show()
1631 if (!node_state(pgdat->node_id, N_MEMORY)) in pagetypeinfo_show()
1660 int zid; in is_zone_first_populated()
1663 struct zone *compare = &pgdat->node_zones[zid]; in is_zone_first_populated()
1675 int i; in zoneinfo_show_print()
1676 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); in zoneinfo_show_print()
1678 seq_printf(m, "\n per-node stats"); in zoneinfo_show_print()
1684 seq_printf(m, "\n %-12s %lu", node_stat_name(i), in zoneinfo_show_print()
1699 zone->watermark_boost, in zoneinfo_show_print()
1703 zone->spanned_pages, in zoneinfo_show_print()
1704 zone->present_pages, in zoneinfo_show_print()
1710 zone->lowmem_reserve[0]); in zoneinfo_show_print()
1711 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) in zoneinfo_show_print()
1712 seq_printf(m, ", %ld", zone->lowmem_reserve[i]); in zoneinfo_show_print()
1722 seq_printf(m, "\n %-12s %lu", zone_stat_name(i), in zoneinfo_show_print()
1727 seq_printf(m, "\n %-12s %lu", numa_stat_name(i), in zoneinfo_show_print()
1736 pcp = per_cpu_ptr(zone->per_cpu_pageset, i); in zoneinfo_show_print()
1743 pcp->count, in zoneinfo_show_print()
1744 pcp->high, in zoneinfo_show_print()
1745 pcp->batch); in zoneinfo_show_print()
1747 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i); in zoneinfo_show_print()
1748 seq_printf(m, "\n vm stats threshold: %d", in zoneinfo_show_print()
1749 pzstats->stat_threshold); in zoneinfo_show_print()
1755 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES, in zoneinfo_show_print()
1756 zone->zone_start_pfn); in zoneinfo_show_print()
1766 static int zoneinfo_show(struct seq_file *m, void *arg) in zoneinfo_show()
1791 int i; in vmstat_start()
1799 m->private = v; in vmstat_start()
1801 return ERR_PTR(-ENOMEM); in vmstat_start()
1825 v[PGPGIN] /= 2; /* sectors -> kbytes */ in vmstat_start()
1828 return (unsigned long *)m->private + *pos; in vmstat_start()
1836 return (unsigned long *)m->private + *pos; in vmstat_next()
1839 static int vmstat_show(struct seq_file *m, void *arg) in vmstat_show()
1842 unsigned long off = l - (unsigned long *)m->private; in vmstat_show()
1848 if (off == NR_VMSTAT_ITEMS - 1) { in vmstat_show()
1850 * We've come to the end - add any deprecated counters to avoid in vmstat_show()
1860 kfree(m->private); in vmstat_stop()
1861 m->private = NULL; in vmstat_stop()
1874 int sysctl_stat_interval __read_mostly = HZ;
1882 int vmstat_refresh(struct ctl_table *table, int write, in vmstat_refresh()
1886 int err; in vmstat_refresh()
1887 int i; in vmstat_refresh()
1959 static bool need_update(int cpu) in need_update()
1965 struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in need_update()
1971 if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff))) in need_update()
1974 if (last_pgdat == zone->zone_pgdat) in need_update()
1976 last_pgdat = zone->zone_pgdat; in need_update()
1977 n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu); in need_update()
1978 if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff))) in need_update()
2021 int cpu; in vmstat_shepherd()
2055 int cpu; in start_shepherd_timer()
2067 int node; in init_cpu_node_state()
2075 static int vmstat_cpu_online(unsigned int cpu) in vmstat_cpu_online()
2086 static int vmstat_cpu_down_prep(unsigned int cpu) in vmstat_cpu_down_prep()
2092 static int vmstat_cpu_dead(unsigned int cpu) in vmstat_cpu_dead()
2095 int node; in vmstat_cpu_dead()
2115 int ret __maybe_unused; in init_mm_internals()
2151 static int unusable_free_index(unsigned int order, in unusable_free_index()
2155 if (info->free_pages == 0) in unusable_free_index()
2165 …return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pa… in unusable_free_index()
2172 unsigned int order; in unusable_show_print()
2173 int index; in unusable_show_print()
2177 pgdat->node_id, in unusable_show_print()
2178 zone->name); in unusable_show_print()
2197 static int unusable_show(struct seq_file *m, void *arg) in unusable_show()
2202 if (!node_state(pgdat->node_id, N_MEMORY)) in unusable_show()
2222 unsigned int order; in extfrag_show_print()
2223 int index; in extfrag_show_print()
2229 pgdat->node_id, in extfrag_show_print()
2230 zone->name); in extfrag_show_print()
2243 static int extfrag_show(struct seq_file *m, void *arg) in extfrag_show()
2261 static int __init extfrag_debug_init(void) in extfrag_debug_init()