Home
last modified time | relevance | path

Searched refs:HPAGE_PMD_NR (Results 1 – 21 of 21) sorted by relevance

/Linux-v4.19/mm/
Dkhugepaged.c248 if (err || max_ptes_none > HPAGE_PMD_NR-1) in khugepaged_max_ptes_none_store()
274 if (err || max_ptes_swap > HPAGE_PMD_NR-1) in khugepaged_max_ptes_swap_store()
353 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; in khugepaged_init()
354 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; in khugepaged_init()
355 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; in khugepaged_init()
411 HPAGE_PMD_NR); in hugepage_vma_check()
529 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; in __collapse_huge_page_isolate()
640 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; in __collapse_huge_page_copy()
894 if (referenced < HPAGE_PMD_NR/2) { in __collapse_huge_page_swapin()
899 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE; in __collapse_huge_page_swapin()
[all …]
Dhuge_memory.c125 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; in shrink_huge_zero_page_count()
135 return HPAGE_PMD_NR; in shrink_huge_zero_page_scan()
567 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
605 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
936 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); in copy_huge_pmd()
972 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); in copy_huge_pmd()
1135 pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *), in do_huge_pmd_wp_page_fallback()
1142 for (i = 0; i < HPAGE_PMD_NR; i++) { in do_huge_pmd_wp_page_fallback()
1164 for (i = 0; i < HPAGE_PMD_NR; i++) { in do_huge_pmd_wp_page_fallback()
1193 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { in do_huge_pmd_wp_page_fallback()
[all …]
Dshmem.c612 idx < index + HPAGE_PMD_NR) { in shmem_add_to_page_cache()
617 for (i = 0; i < HPAGE_PMD_NR; i++) { in shmem_add_to_page_cache()
846 if (index == round_down(end, HPAGE_PMD_NR)) { in shmem_undo_range()
855 index += HPAGE_PMD_NR - 1; in shmem_undo_range()
856 i += HPAGE_PMD_NR - 1; in shmem_undo_range()
947 if (index != round_down(end, HPAGE_PMD_NR)) in shmem_undo_range()
951 if (index == round_down(end, HPAGE_PMD_NR)) { in shmem_undo_range()
960 index += HPAGE_PMD_NR - 1; in shmem_undo_range()
961 i += HPAGE_PMD_NR - 1; in shmem_undo_range()
1465 hindex = round_down(index, HPAGE_PMD_NR); in shmem_alloc_hugepage()
[all …]
Dtruncate.c182 pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1; in truncate_cleanup_page()
580 index += HPAGE_PMD_NR - 1; in invalidate_mapping_pages()
581 i += HPAGE_PMD_NR - 1; in invalidate_mapping_pages()
Dmprotect.c199 if (nr_ptes == HPAGE_PMD_NR) { in change_pmd_range()
200 pages += HPAGE_PMD_NR; in change_pmd_range()
Drmap.c1186 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { in page_add_file_rmap()
1226 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { in page_remove_file_rmap()
1273 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { in page_remove_anon_compound_rmap()
1278 nr = HPAGE_PMD_NR; in page_remove_anon_compound_rmap()
Dswap_slots.c319 get_swap_pages(1, &entry, HPAGE_PMD_NR); in get_swap_page()
Dmigrate.c534 for (i = 1; i < HPAGE_PMD_NR; i++) { in migrate_page_move_mapping()
2023 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); in migrate_misplaced_transhuge_page()
2065 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR); in migrate_misplaced_transhuge_page()
2066 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR); in migrate_misplaced_transhuge_page()
2070 -HPAGE_PMD_NR); in migrate_misplaced_transhuge_page()
2074 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); in migrate_misplaced_transhuge_page()
Dmemcontrol.c2675 for (i = 1; i < HPAGE_PMD_NR; i++) in mem_cgroup_split_huge_fixup()
2678 __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR); in mem_cgroup_split_huge_fixup()
4984 mc.precharge += HPAGE_PMD_NR; in mem_cgroup_count_precharge_pte_range()
5182 if (mc.precharge < HPAGE_PMD_NR) { in mem_cgroup_move_charge_pte_range()
5192 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
5193 mc.moved_charge += HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
5202 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
5203 mc.moved_charge += HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
Dswapfile.c206 #define SWAPFILE_CLUSTER HPAGE_PMD_NR
1498 for (i = 0; i < HPAGE_PMD_NR; i++) { in page_trans_huge_map_swapcount()
1510 _total_mapcount -= HPAGE_PMD_NR; in page_trans_huge_map_swapcount()
Dfilemap.c279 page_ref_sub(page, HPAGE_PMD_NR); in page_cache_free_page()
351 tail_pages = HPAGE_PMD_NR - 1; in page_cache_tree_delete_batch()
Dvmscan.c748 HPAGE_PMD_NR : 1; in is_page_cache_freeable()
911 refcount = 1 + HPAGE_PMD_NR; in __remove_mapping()
Dmemory.c3323 #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
3378 for (i = 0; i < HPAGE_PMD_NR; i++) in do_set_pmd()
3385 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); in do_set_pmd()
Dgup.c314 *page_mask = HPAGE_PMD_NR - 1; in follow_pmd_mask()
Dpage_alloc.c4932 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), in show_free_areas()
4934 * HPAGE_PMD_NR), in show_free_areas()
4935 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), in show_free_areas()
/Linux-v4.19/drivers/gpu/drm/ttm/
Dttm_page_alloc.c734 for (j = 0; j < HPAGE_PMD_NR; ++j) in ttm_put_pages()
738 if (j == HPAGE_PMD_NR) in ttm_put_pages()
769 for (j = 0; j < HPAGE_PMD_NR; ++j) in ttm_put_pages()
773 if (j != HPAGE_PMD_NR) in ttm_put_pages()
778 for (j = 0; j < HPAGE_PMD_NR; ++j) in ttm_put_pages()
785 max_size /= HPAGE_PMD_NR; in ttm_put_pages()
860 while (npages >= HPAGE_PMD_NR) { in ttm_get_pages()
871 for (j = 0; j < HPAGE_PMD_NR; ++j) in ttm_get_pages()
874 npages -= HPAGE_PMD_NR; in ttm_get_pages()
900 if (huge && npages >= HPAGE_PMD_NR) { in ttm_get_pages()
[all …]
Dttm_page_alloc_dma.c924 while (num_pages >= HPAGE_PMD_NR) { in ttm_dma_populate()
939 for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) { in ttm_dma_populate()
945 i += HPAGE_PMD_NR; in ttm_dma_populate()
946 num_pages -= HPAGE_PMD_NR; in ttm_dma_populate()
/Linux-v4.19/fs/proc/
Dmeminfo.c134 global_node_page_state(NR_ANON_THPS) * HPAGE_PMD_NR); in meminfo_proc_show()
136 global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR); in meminfo_proc_show()
138 global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR); in meminfo_proc_show()
/Linux-v4.19/include/linux/
Dhuge_mm.h81 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) macro
211 return HPAGE_PMD_NR; in hpage_nr_pages()
Dbio.h42 #if HPAGE_PMD_NR > 256
43 #define BIO_MAX_PAGES HPAGE_PMD_NR
/Linux-v4.19/drivers/base/
Dnode.c147 HPAGE_PMD_NR), in node_read_meminfo()
149 HPAGE_PMD_NR), in node_read_meminfo()
151 HPAGE_PMD_NR)); in node_read_meminfo()