Searched refs:HPAGE_PMD_NR (Results 1 – 16 of 16) sorted by relevance
/Linux-v6.1/mm/ |
D | khugepaged.c | 271 if (err || max_ptes_none > HPAGE_PMD_NR - 1) in max_ptes_none_store() 296 if (err || max_ptes_swap > HPAGE_PMD_NR - 1) in max_ptes_swap_store() 322 if (err || max_ptes_shared > HPAGE_PMD_NR - 1) in max_ptes_shared_store() 398 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; in khugepaged_init() 399 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; in khugepaged_init() 400 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; in khugepaged_init() 401 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2; in khugepaged_init() 544 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; in __collapse_huge_page_isolate() 681 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; in __collapse_huge_page_copy() 904 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE); in __collapse_huge_page_swapin() [all …]
|
D | huge_memory.c | 214 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; in shrink_huge_zero_page_count() 225 return HPAGE_PMD_NR; in shrink_huge_zero_page_scan() 676 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page() 711 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page() 1115 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); in copy_huge_pmd() 1158 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); in copy_huge_pmd() 1536 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, in do_huge_pmd_numa_page() 1684 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); in zap_huge_pmd() 1688 add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR); in zap_huge_pmd() 1896 ret = HPAGE_PMD_NR; in change_huge_pmd() [all …]
|
D | memfd.c | 44 cache_count = HPAGE_PMD_NR; in memfd_tag_pins() 103 cache_count = HPAGE_PMD_NR; in memfd_wait_for_pins()
|
D | page_vma_mapped.c | 113 if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn) in check_pmd() 246 (pvmw->nr_pages >= HPAGE_PMD_NR)) { in page_vma_mapped_walk()
|
D | mprotect.c | 407 if (nr_ptes == HPAGE_PMD_NR) { in change_pmd_range() 408 pages += HPAGE_PMD_NR; in change_pmd_range()
|
D | vmstat.c | 1671 pages /= HPAGE_PMD_NR; in zoneinfo_show_print() 1803 v[i] /= HPAGE_PMD_NR; in vmstat_start()
|
D | shmem.c | 486 index = round_up(index + 1, HPAGE_PMD_NR); in shmem_is_huge() 1552 hindex = round_down(index, HPAGE_PMD_NR); in shmem_alloc_hugefolio() 1553 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, in shmem_alloc_hugefolio() 1588 nr = huge ? HPAGE_PMD_NR : 1; in shmem_alloc_and_acct_folio() 2573 for (i = 0; i < HPAGE_PMD_NR; i++) { in shmem_write_end()
|
D | memcontrol.c | 5927 mc.precharge += HPAGE_PMD_NR; in mem_cgroup_count_precharge_pte_range() 6121 if (mc.precharge < HPAGE_PMD_NR) { in mem_cgroup_move_charge_pte_range() 6131 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range() 6132 mc.moved_charge += HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range() 6141 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range() 6142 mc.moved_charge += HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
|
D | filemap.c | 2997 ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); in do_sync_mmap_readahead() 2998 ra->size = HPAGE_PMD_NR; in do_sync_mmap_readahead() 3005 ra->async_size = HPAGE_PMD_NR; in do_sync_mmap_readahead()
|
D | memory-failure.c | 659 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) { in check_hwpoisoned_pmd_entry()
|
D | gup.c | 769 ctx->page_mask = HPAGE_PMD_NR - 1; in follow_pmd_mask()
|
D | memory.c | 4285 for (i = 0; i < HPAGE_PMD_NR; i++) in do_set_pmd() 4292 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); in do_set_pmd()
|
D | swapfile.c | 271 #define SWAPFILE_CLUSTER HPAGE_PMD_NR
|
/Linux-v6.1/include/linux/ |
D | huge_mm.h | 106 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) macro 146 HPAGE_PMD_NR)) in transhuge_vma_suitable()
|
/Linux-v6.1/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_vram_mgr.c | 412 pages_per_block = HPAGE_PMD_NR; in amdgpu_vram_mgr_new()
|
/Linux-v6.1/drivers/base/ |
D | node.c | 534 pages /= HPAGE_PMD_NR; in node_read_vmstat()
|