Searched refs:HPAGE_PUD_SIZE (Results 1 – 12 of 12) sorted by relevance
113 #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) macro114 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))337 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) macro
622 ___haddr + HPAGE_PUD_SIZE); \
154 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); in pudp_huge_clear_flush()
372 flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE); in move_huge_pud()511 if (extent == HPAGE_PUD_SIZE) { in move_page_tables()
1891 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); in __split_huge_pud_locked()1907 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); in __split_huge_pud()
1182 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma); in copy_pud_range()1501 if (next - addr != HPAGE_PUD_SIZE) { in zap_pud_range()
80 align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT); in vmw_thp_get_node()
510 fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT; in vmw_bo_vm_huge_fault()
1036 len >= HPAGE_PUD_SIZE) in drm_get_unmapped_area()1038 flags, HPAGE_PUD_SIZE); in drm_get_unmapped_area()
599 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
232 else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT)) in ttm_bo_vm_insert_huge()
103 alignments[2] = HPAGE_PUD_SIZE; in nd_pfn_supported_alignments()