| /Linux-v5.4/fs/proc/ |
| D | meminfo.c | 130 global_node_page_state(NR_ANON_THPS) * HPAGE_PMD_NR); in meminfo_proc_show() 132 global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR); in meminfo_proc_show() 134 global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR); in meminfo_proc_show() 136 global_node_page_state(NR_FILE_THPS) * HPAGE_PMD_NR); in meminfo_proc_show() 138 global_node_page_state(NR_FILE_PMDMAPPED) * HPAGE_PMD_NR); in meminfo_proc_show()
|
| /Linux-v5.4/drivers/gpu/drm/ttm/ |
| D | ttm_page_alloc.c | 734 (npages - i) >= HPAGE_PMD_NR) { in ttm_put_pages() 735 for (j = 1; j < HPAGE_PMD_NR; ++j) in ttm_put_pages() 739 if (j == HPAGE_PMD_NR) in ttm_put_pages() 763 while ((npages - i) >= HPAGE_PMD_NR) { in ttm_put_pages() 770 for (j = 1; j < HPAGE_PMD_NR; ++j) in ttm_put_pages() 774 if (j != HPAGE_PMD_NR) in ttm_put_pages() 779 for (j = 0; j < HPAGE_PMD_NR; ++j) in ttm_put_pages() 786 max_size /= HPAGE_PMD_NR; in ttm_put_pages() 861 while (npages >= HPAGE_PMD_NR) { in ttm_get_pages() 872 for (j = 0; j < HPAGE_PMD_NR; ++j) in ttm_get_pages() [all …]
|
| D | ttm_page_alloc_dma.c | 922 while (num_pages >= HPAGE_PMD_NR) { in ttm_dma_populate() 937 for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) { in ttm_dma_populate() 943 i += HPAGE_PMD_NR; in ttm_dma_populate() 944 num_pages -= HPAGE_PMD_NR; in ttm_dma_populate()
|
| /Linux-v5.4/mm/ |
| D | huge_memory.c | 140 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; in shrink_huge_zero_page_count() 150 return HPAGE_PMD_NR; in shrink_huge_zero_page_scan() 596 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page() 634 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page() 1013 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); in copy_huge_pmd() 1049 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); in copy_huge_pmd() 1209 pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *), in do_huge_pmd_wp_page_fallback() 1216 for (i = 0; i < HPAGE_PMD_NR; i++) { in do_huge_pmd_wp_page_fallback() 1238 for (i = 0; i < HPAGE_PMD_NR; i++) { in do_huge_pmd_wp_page_fallback() 1267 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { in do_huge_pmd_wp_page_fallback() [all …]
|
| D | khugepaged.c | 255 if (err || max_ptes_none > HPAGE_PMD_NR-1) in khugepaged_max_ptes_none_store() 281 if (err || max_ptes_swap > HPAGE_PMD_NR-1) in khugepaged_max_ptes_swap_store() 360 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; in khugepaged_init() 361 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; in khugepaged_init() 362 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; in khugepaged_init() 422 HPAGE_PMD_NR); in hugepage_vma_check() 541 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; in __collapse_huge_page_isolate() 652 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; in __collapse_huge_page_copy() 906 if (referenced < HPAGE_PMD_NR/2) { in __collapse_huge_page_swapin() 911 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE; in __collapse_huge_page_swapin() [all …]
|
| D | truncate.c | 180 pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1; in truncate_cleanup_page() 584 index += HPAGE_PMD_NR - 1; in invalidate_mapping_pages() 585 i += HPAGE_PMD_NR - 1; in invalidate_mapping_pages()
|
| D | shmem.c | 847 if (index == round_down(end, HPAGE_PMD_NR)) { in shmem_undo_range() 856 index += HPAGE_PMD_NR - 1; in shmem_undo_range() 857 i += HPAGE_PMD_NR - 1; in shmem_undo_range() 948 if (index != round_down(end, HPAGE_PMD_NR)) in shmem_undo_range() 952 if (index == round_down(end, HPAGE_PMD_NR)) { in shmem_undo_range() 961 index += HPAGE_PMD_NR - 1; in shmem_undo_range() 962 i += HPAGE_PMD_NR - 1; in shmem_undo_range() 1477 hindex = round_down(index, HPAGE_PMD_NR); in shmem_alloc_hugepage() 1478 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, in shmem_alloc_hugepage() 1515 nr = huge ? HPAGE_PMD_NR : 1; in shmem_alloc_and_acct_page() [all …]
|
| D | mprotect.c | 201 if (nr_ptes == HPAGE_PMD_NR) { in change_pmd_range() 202 pages += HPAGE_PMD_NR; in change_pmd_range()
|
| D | rmap.c | 1187 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { in page_add_file_rmap() 1229 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { in page_remove_file_rmap() 1278 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { in page_remove_anon_compound_rmap() 1283 nr = HPAGE_PMD_NR; in page_remove_anon_compound_rmap()
|
| D | swap_slots.c | 319 get_swap_pages(1, &entry, HPAGE_PMD_NR); in get_swap_page()
|
| D | migrate.c | 461 for (i = 1; i < HPAGE_PMD_NR; i++) { in migrate_page_move_mapping() 2049 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR); in migrate_misplaced_transhuge_page() 2096 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR); in migrate_misplaced_transhuge_page() 2097 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR); in migrate_misplaced_transhuge_page() 2101 -HPAGE_PMD_NR); in migrate_misplaced_transhuge_page() 2105 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); in migrate_misplaced_transhuge_page()
|
| D | memcontrol.c | 3057 for (i = 1; i < HPAGE_PMD_NR; i++) in mem_cgroup_split_huge_fixup() 3060 __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR); in mem_cgroup_split_huge_fixup() 5655 mc.precharge += HPAGE_PMD_NR; in mem_cgroup_count_precharge_pte_range() 5852 if (mc.precharge < HPAGE_PMD_NR) { in mem_cgroup_move_charge_pte_range() 5862 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range() 5863 mc.moved_charge += HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range() 5872 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range() 5873 mc.moved_charge += HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
|
| D | swapfile.c | 254 #define SWAPFILE_CLUSTER HPAGE_PMD_NR 1618 for (i = 0; i < HPAGE_PMD_NR; i++) { in page_trans_huge_map_swapcount() 1630 _total_mapcount -= HPAGE_PMD_NR; in page_trans_huge_map_swapcount()
|
| D | gup.c | 419 ctx->page_mask = HPAGE_PMD_NR - 1; in follow_pmd_mask()
|
| D | page_alloc.c | 5329 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), in show_free_areas() 5331 * HPAGE_PMD_NR), in show_free_areas() 5332 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), in show_free_areas()
|
| D | memory.c | 3217 for (i = 0; i < HPAGE_PMD_NR; i++) in do_set_pmd() 3224 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); in do_set_pmd()
|
| D | filemap.c | 251 page_ref_sub(page, HPAGE_PMD_NR); in page_cache_free_page()
|
| D | vmscan.c | 774 HPAGE_PMD_NR : 1; in is_page_cache_freeable()
|
| /Linux-v5.4/include/linux/ |
| D | huge_mm.h | 79 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) macro 129 #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1) 237 return HPAGE_PMD_NR; in hpage_nr_pages()
|
| /Linux-v5.4/drivers/base/ |
| D | node.c | 453 HPAGE_PMD_NR), in node_read_meminfo() 455 HPAGE_PMD_NR), in node_read_meminfo() 457 HPAGE_PMD_NR), in node_read_meminfo() 459 HPAGE_PMD_NR), in node_read_meminfo() 461 HPAGE_PMD_NR) in node_read_meminfo()
|
| /Linux-v5.4/drivers/gpu/drm/amd/amdgpu/ |
| D | amdgpu_vram_mgr.c | 299 pages_per_node = HPAGE_PMD_NR; in amdgpu_vram_mgr_new()
|