Lines Matching refs:pgdat

273 static void set_memcg_congestion(pg_data_t *pgdat,  in set_memcg_congestion()  argument
282 mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id); in set_memcg_congestion()
286 static bool memcg_congested(pg_data_t *pgdat, in memcg_congested() argument
291 mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id); in memcg_congested()
315 static inline void set_memcg_congestion(struct pglist_data *pgdat, in set_memcg_congestion() argument
320 static inline bool memcg_congested(struct pglist_data *pgdat, in memcg_congested() argument
1120 struct pglist_data *pgdat, in shrink_page_list() argument
1237 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { in shrink_page_list()
1371 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { in shrink_page_list()
1804 pg_data_t *pgdat = page_pgdat(page); in isolate_lru_page() local
1807 spin_lock_irq(&pgdat->lru_lock); in isolate_lru_page()
1808 lruvec = mem_cgroup_page_lruvec(page, pgdat); in isolate_lru_page()
1816 spin_unlock_irq(&pgdat->lru_lock); in isolate_lru_page()
1828 static int too_many_isolated(struct pglist_data *pgdat, int file, in too_many_isolated() argument
1840 inactive = node_page_state(pgdat, NR_INACTIVE_FILE); in too_many_isolated()
1841 isolated = node_page_state(pgdat, NR_ISOLATED_FILE); in too_many_isolated()
1843 inactive = node_page_state(pgdat, NR_INACTIVE_ANON); in too_many_isolated()
1844 isolated = node_page_state(pgdat, NR_ISOLATED_ANON); in too_many_isolated()
1881 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in move_pages_to_lru() local
1892 spin_unlock_irq(&pgdat->lru_lock); in move_pages_to_lru()
1894 spin_lock_irq(&pgdat->lru_lock); in move_pages_to_lru()
1897 lruvec = mem_cgroup_page_lruvec(page, pgdat); in move_pages_to_lru()
1912 spin_unlock_irq(&pgdat->lru_lock); in move_pages_to_lru()
1914 spin_lock_irq(&pgdat->lru_lock); in move_pages_to_lru()
1958 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_inactive_list() local
1962 while (unlikely(too_many_isolated(pgdat, file, sc))) { in shrink_inactive_list()
1977 spin_lock_irq(&pgdat->lru_lock); in shrink_inactive_list()
1982 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); in shrink_inactive_list()
1989 spin_unlock_irq(&pgdat->lru_lock); in shrink_inactive_list()
1994 nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0, in shrink_inactive_list()
1997 spin_lock_irq(&pgdat->lru_lock); in shrink_inactive_list()
2008 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_inactive_list()
2010 spin_unlock_irq(&pgdat->lru_lock); in shrink_inactive_list()
2038 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in shrink_inactive_list()
2059 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in shrink_active_list() local
2063 spin_lock_irq(&pgdat->lru_lock); in shrink_active_list()
2068 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); in shrink_active_list()
2074 spin_unlock_irq(&pgdat->lru_lock); in shrink_active_list()
2120 spin_lock_irq(&pgdat->lru_lock); in shrink_active_list()
2137 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_active_list()
2138 spin_unlock_irq(&pgdat->lru_lock); in shrink_active_list()
2142 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, in shrink_active_list()
2234 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in inactive_list_is_low() local
2268 trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, sc->reclaim_idx, in inactive_list_is_low()
2312 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in get_scan_count() local
2362 pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); in get_scan_count()
2363 pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) + in get_scan_count()
2364 node_page_state(pgdat, NR_INACTIVE_FILE); in get_scan_count()
2367 struct zone *zone = &pgdat->node_zones[z]; in get_scan_count()
2430 spin_lock_irq(&pgdat->lru_lock); in get_scan_count()
2451 spin_unlock_irq(&pgdat->lru_lock); in get_scan_count()
2560 static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg, in shrink_node_memcg() argument
2563 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); in shrink_node_memcg()
2694 static inline bool should_continue_reclaim(struct pglist_data *pgdat, in should_continue_reclaim() argument
2721 struct zone *zone = &pgdat->node_zones[z]; in should_continue_reclaim()
2740 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); in should_continue_reclaim()
2742 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); in should_continue_reclaim()
2747 static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg) in pgdat_memcg_congested() argument
2749 return test_bit(PGDAT_CONGESTED, &pgdat->flags) || in pgdat_memcg_congested()
2750 (memcg && memcg_congested(pgdat, memcg)); in pgdat_memcg_congested()
2753 static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) in shrink_node() argument
2808 shrink_node_memcg(pgdat, memcg, sc, &lru_pages); in shrink_node()
2811 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, in shrink_node()
2853 set_bit(PGDAT_WRITEBACK, &pgdat->flags); in shrink_node()
2861 set_bit(PGDAT_CONGESTED, &pgdat->flags); in shrink_node()
2865 set_bit(PGDAT_DIRTY, &pgdat->flags); in shrink_node()
2883 set_memcg_congestion(pgdat, root, true); in shrink_node()
2892 current_may_throttle() && pgdat_memcg_congested(pgdat, root)) in shrink_node()
2895 } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed, in shrink_node()
2905 pgdat->kswapd_failures = 0; in shrink_node()
3035 static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat) in snapshot_refaults() argument
3044 lruvec = mem_cgroup_lruvec(pgdat, memcg); in snapshot_refaults()
3129 static bool allow_direct_reclaim(pg_data_t *pgdat) in allow_direct_reclaim() argument
3137 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in allow_direct_reclaim()
3141 zone = &pgdat->node_zones[i]; in allow_direct_reclaim()
3159 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { in allow_direct_reclaim()
3160 pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx, in allow_direct_reclaim()
3162 wake_up_interruptible(&pgdat->kswapd_wait); in allow_direct_reclaim()
3182 pg_data_t *pgdat = NULL; in throttle_direct_reclaim() local
3221 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
3222 if (allow_direct_reclaim(pgdat)) in throttle_direct_reclaim()
3228 if (!pgdat) in throttle_direct_reclaim()
3243 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
3244 allow_direct_reclaim(pgdat), HZ); in throttle_direct_reclaim()
3251 allow_direct_reclaim(pgdat)); in throttle_direct_reclaim()
3309 pg_data_t *pgdat, in mem_cgroup_shrink_node() argument
3337 shrink_node_memcg(pgdat, memcg, &sc, &lru_pages); in mem_cgroup_shrink_node()
3395 static void age_active_anon(struct pglist_data *pgdat, in age_active_anon() argument
3405 struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); in age_active_anon()
3415 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int classzone_idx) in pgdat_watermark_boosted() argument
3428 zone = pgdat->node_zones + i; in pgdat_watermark_boosted()
3443 static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) in pgdat_balanced() argument
3454 zone = pgdat->node_zones + i; in pgdat_balanced()
3476 static void clear_pgdat_congested(pg_data_t *pgdat) in clear_pgdat_congested() argument
3478 clear_bit(PGDAT_CONGESTED, &pgdat->flags); in clear_pgdat_congested()
3479 clear_bit(PGDAT_DIRTY, &pgdat->flags); in clear_pgdat_congested()
3480 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); in clear_pgdat_congested()
3489 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx) in prepare_kswapd_sleep() argument
3504 if (waitqueue_active(&pgdat->pfmemalloc_wait)) in prepare_kswapd_sleep()
3505 wake_up_all(&pgdat->pfmemalloc_wait); in prepare_kswapd_sleep()
3508 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in prepare_kswapd_sleep()
3511 if (pgdat_balanced(pgdat, order, classzone_idx)) { in prepare_kswapd_sleep()
3512 clear_pgdat_congested(pgdat); in prepare_kswapd_sleep()
3527 static bool kswapd_shrink_node(pg_data_t *pgdat, in kswapd_shrink_node() argument
3536 zone = pgdat->node_zones + z; in kswapd_shrink_node()
3547 shrink_node(pgdat, sc); in kswapd_shrink_node()
3575 static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) in balance_pgdat() argument
3604 zone = pgdat->node_zones + i; in balance_pgdat()
3635 zone = pgdat->node_zones + i; in balance_pgdat()
3651 balanced = pgdat_balanced(pgdat, sc.order, classzone_idx); in balance_pgdat()
3684 age_active_anon(pgdat, &sc); in balance_pgdat()
3696 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order, in balance_pgdat()
3705 if (kswapd_shrink_node(pgdat, &sc)) in balance_pgdat()
3713 if (waitqueue_active(&pgdat->pfmemalloc_wait) && in balance_pgdat()
3714 allow_direct_reclaim(pgdat)) in balance_pgdat()
3715 wake_up_all(&pgdat->pfmemalloc_wait); in balance_pgdat()
3744 pgdat->kswapd_failures++; in balance_pgdat()
3756 zone = pgdat->node_zones + i; in balance_pgdat()
3766 wakeup_kcompactd(pgdat, pageblock_order, classzone_idx); in balance_pgdat()
3769 snapshot_refaults(NULL, pgdat); in balance_pgdat()
3790 static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat, in kswapd_classzone_idx() argument
3793 if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) in kswapd_classzone_idx()
3795 return pgdat->kswapd_classzone_idx; in kswapd_classzone_idx()
3798 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, in kswapd_try_to_sleep() argument
3807 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
3816 if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { in kswapd_try_to_sleep()
3823 reset_isolation_suitable(pgdat); in kswapd_try_to_sleep()
3829 wakeup_kcompactd(pgdat, alloc_order, classzone_idx); in kswapd_try_to_sleep()
3839 pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); in kswapd_try_to_sleep()
3840 pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order); in kswapd_try_to_sleep()
3843 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
3844 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
3852 prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { in kswapd_try_to_sleep()
3853 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); in kswapd_try_to_sleep()
3863 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); in kswapd_try_to_sleep()
3868 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); in kswapd_try_to_sleep()
3875 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
3895 pg_data_t *pgdat = (pg_data_t*)p; in kswapd() local
3897 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in kswapd()
3917 pgdat->kswapd_order = 0; in kswapd()
3918 pgdat->kswapd_classzone_idx = MAX_NR_ZONES; in kswapd()
3922 alloc_order = reclaim_order = pgdat->kswapd_order; in kswapd()
3923 classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); in kswapd()
3926 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, in kswapd()
3930 alloc_order = reclaim_order = pgdat->kswapd_order; in kswapd()
3931 classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); in kswapd()
3932 pgdat->kswapd_order = 0; in kswapd()
3933 pgdat->kswapd_classzone_idx = MAX_NR_ZONES; in kswapd()
3954 trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx, in kswapd()
3956 reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx); in kswapd()
3976 pg_data_t *pgdat; in wakeup_kswapd() local
3983 pgdat = zone->zone_pgdat; in wakeup_kswapd()
3985 if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) in wakeup_kswapd()
3986 pgdat->kswapd_classzone_idx = classzone_idx; in wakeup_kswapd()
3988 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, in wakeup_kswapd()
3990 pgdat->kswapd_order = max(pgdat->kswapd_order, order); in wakeup_kswapd()
3991 if (!waitqueue_active(&pgdat->kswapd_wait)) in wakeup_kswapd()
3995 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || in wakeup_kswapd()
3996 (pgdat_balanced(pgdat, order, classzone_idx) && in wakeup_kswapd()
3997 !pgdat_watermark_boosted(pgdat, classzone_idx))) { in wakeup_kswapd()
4006 wakeup_kcompactd(pgdat, order, classzone_idx); in wakeup_kswapd()
4010 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, classzone_idx, order, in wakeup_kswapd()
4012 wake_up_interruptible(&pgdat->kswapd_wait); in wakeup_kswapd()
4063 pg_data_t *pgdat = NODE_DATA(nid); in kswapd_cpu_online() local
4066 mask = cpumask_of_node(pgdat->node_id); in kswapd_cpu_online()
4070 set_cpus_allowed_ptr(pgdat->kswapd, mask); in kswapd_cpu_online()
4081 pg_data_t *pgdat = NODE_DATA(nid); in kswapd_run() local
4084 if (pgdat->kswapd) in kswapd_run()
4087 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); in kswapd_run()
4088 if (IS_ERR(pgdat->kswapd)) { in kswapd_run()
4092 ret = PTR_ERR(pgdat->kswapd); in kswapd_run()
4093 pgdat->kswapd = NULL; in kswapd_run()
4161 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) in node_unmapped_file_pages() argument
4163 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED); in node_unmapped_file_pages()
4164 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) + in node_unmapped_file_pages()
4165 node_page_state(pgdat, NR_ACTIVE_FILE); in node_unmapped_file_pages()
4176 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat) in node_pagecache_reclaimable() argument
4188 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES); in node_pagecache_reclaimable()
4190 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat); in node_pagecache_reclaimable()
4194 delta += node_page_state(pgdat, NR_FILE_DIRTY); in node_pagecache_reclaimable()
4206 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) in __node_reclaim() argument
4223 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, in __node_reclaim()
4237 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) { in __node_reclaim()
4243 shrink_node(pgdat, &sc); in __node_reclaim()
4257 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) in node_reclaim() argument
4271 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && in node_reclaim()
4272 node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages) in node_reclaim()
4287 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) in node_reclaim()
4290 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) in node_reclaim()
4293 ret = __node_reclaim(pgdat, gfp_mask, order); in node_reclaim()
4294 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); in node_reclaim()
4338 struct pglist_data *pgdat = NULL; in check_move_unevictable_pages() local
4348 if (pagepgdat != pgdat) { in check_move_unevictable_pages()
4349 if (pgdat) in check_move_unevictable_pages()
4350 spin_unlock_irq(&pgdat->lru_lock); in check_move_unevictable_pages()
4351 pgdat = pagepgdat; in check_move_unevictable_pages()
4352 spin_lock_irq(&pgdat->lru_lock); in check_move_unevictable_pages()
4354 lruvec = mem_cgroup_page_lruvec(page, pgdat); in check_move_unevictable_pages()
4370 if (pgdat) { in check_move_unevictable_pages()
4373 spin_unlock_irq(&pgdat->lru_lock); in check_move_unevictable_pages()