Searched refs:hstate_vma (Results 1 – 18 of 18) sorted by relevance
| /Linux-v6.1/mm/damon/ |
| D | vaddr.c | 350 addr + huge_page_size(hstate_vma(vma)))) in damon_hugetlb_mkold() 365 struct hstate *h = hstate_vma(walk->vma); in damon_mkold_hugetlb_entry() 492 struct hstate *h = hstate_vma(walk->vma); in damon_young_hugetlb_entry()
|
| /Linux-v6.1/mm/ |
| D | hugetlb.c | 824 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index() 2867 struct hstate *h = hstate_vma(vma); in alloc_huge_page() 4649 struct hstate *h = hstate_vma(vma); in hugetlb_vm_op_close() 4680 if (addr & ~(huge_page_mask(hstate_vma(vma)))) in hugetlb_vm_op_split() 4687 return huge_page_size(hstate_vma(vma)); in hugetlb_vm_op_pagesize() 4721 unsigned int shift = huge_page_shift(hstate_vma(vma)); in make_huge_pte() 4779 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); in hugetlb_install_page() 4792 struct hstate *h = hstate_vma(src_vma); in copy_hugetlb_page_range() 4970 struct hstate *h = hstate_vma(vma); in move_huge_pte() 4998 struct hstate *h = hstate_vma(vma); in move_hugetlb_page_tables() [all …]
|
| D | hmm.c | 486 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); in hmm_vma_walk_hugetlb_entry() 492 hmm_pfn_flags_order(huge_page_order(hstate_vma(vma))); in hmm_vma_walk_hugetlb_entry()
|
| D | page_vma_mapped.c | 166 struct hstate *hstate = hstate_vma(vma); in page_vma_mapped_walk()
|
| D | madvise.c | 813 if (start & ~huge_page_mask(hstate_vma(vma))) in madvise_dontneed_free_valid_vma() 822 *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma))); in madvise_dontneed_free_valid_vma()
|
| D | pagewalk.c | 297 struct hstate *h = hstate_vma(vma); in walk_hugetlb_range()
|
| D | mempolicy.c | 573 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); in queue_pages_hugetlb() 1736 !hugepage_migration_supported(hstate_vma(vma))) in vma_migratable() 2028 huge_page_shift(hstate_vma(vma))); in huge_node()
|
| D | migrate.c | 238 unsigned int shift = huge_page_shift(hstate_vma(vma)); in remove_migration_pte() 338 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte); in migration_entry_wait_huge()
|
| D | mremap.c | 952 struct hstate *h __maybe_unused = hstate_vma(vma); in SYSCALL_DEFINE5()
|
| D | memory-failure.c | 713 struct hstate *h = hstate_vma(walk->vma); in hwpoison_hugetlb_range()
|
| /Linux-v6.1/arch/x86/include/asm/ |
| D | tlbflush.h | 229 ? huge_page_shift(hstate_vma(vma)) \
|
| /Linux-v6.1/arch/riscv/kvm/ |
| D | mmu.c | 643 vma_pageshift = huge_page_shift(hstate_vma(vma)); in kvm_riscv_gstage_map() 651 gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; in kvm_riscv_gstage_map()
|
| /Linux-v6.1/include/linux/ |
| D | hugetlb.h | 758 static inline struct hstate *hstate_vma(struct vm_area_struct *vma) in hstate_vma() function 1033 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
|
| /Linux-v6.1/arch/powerpc/mm/ |
| D | pgtable.c | 260 struct hstate *h = hstate_vma(vma); in huge_ptep_set_access_flags()
|
| /Linux-v6.1/fs/proc/ |
| D | task_mmu.c | 743 mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range() 745 mss->private_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range()
|
| /Linux-v6.1/arch/arm64/kvm/ |
| D | mmu.c | 1059 return huge_page_shift(hstate_vma(vma)); in get_vma_page_shift()
|
| /Linux-v6.1/fs/hugetlbfs/ |
| D | inode.c | 392 huge_page_size(hstate_vma(vma))); in hugetlb_vma_maps_page()
|
| /Linux-v6.1/arch/sparc/mm/ |
| D | init_64.c | 419 hugepage_size = huge_page_size(hstate_vma(vma)); in update_mmu_cache()
|