Home
last modified time | relevance | path

Searched refs:hpage (Results 1 – 8 of 8) sorted by relevance

/Linux-v4.19/mm/
Dmemory-failure.c784 struct page *hpage = compound_head(p); in me_huge_page() local
787 if (!PageHuge(hpage)) in me_huge_page()
790 mapping = page_mapping(hpage); in me_huge_page()
792 res = truncate_error_page(hpage, pfn, mapping); in me_huge_page()
794 unlock_page(hpage); in me_huge_page()
800 if (PageAnon(hpage)) in me_huge_page()
801 put_page(hpage); in me_huge_page()
804 lock_page(hpage); in me_huge_page()
971 struct page *hpage = *hpagep; in hwpoison_user_mappings() local
972 bool mlocked = PageMlocked(hpage); in hwpoison_user_mappings()
[all …]
Dkhugepaged.c751 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) in khugepaged_prealloc_page() argument
753 if (IS_ERR(*hpage)) { in khugepaged_prealloc_page()
758 *hpage = NULL; in khugepaged_prealloc_page()
760 } else if (*hpage) { in khugepaged_prealloc_page()
761 put_page(*hpage); in khugepaged_prealloc_page()
762 *hpage = NULL; in khugepaged_prealloc_page()
769 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page() argument
771 VM_BUG_ON_PAGE(*hpage, *hpage); in khugepaged_alloc_page()
773 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); in khugepaged_alloc_page()
774 if (unlikely(!*hpage)) { in khugepaged_alloc_page()
[all …]
Dhwpoison-inject.c17 struct page *hpage; in hwpoison_inject() local
27 hpage = compound_head(p); in hwpoison_inject()
37 shake_page(hpage, 0); in hwpoison_inject()
41 if (!PageLRU(hpage) && !PageHuge(p)) in hwpoison_inject()
49 err = hwpoison_filter(hpage); in hwpoison_inject()
Dpage_vma_mapped.c33 static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn) in pfn_in_hpage() argument
35 unsigned long hpage_pfn = page_to_pfn(hpage); in pfn_in_hpage()
38 return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage); in pfn_in_hpage()
Dmigrate.c1266 struct page *hpage, int force, in unmap_and_move_huge_page() argument
1281 if (!hugepage_migration_supported(page_hstate(hpage))) { in unmap_and_move_huge_page()
1282 putback_active_hugepage(hpage); in unmap_and_move_huge_page()
1286 new_hpage = get_new_page(hpage, private); in unmap_and_move_huge_page()
1290 if (!trylock_page(hpage)) { in unmap_and_move_huge_page()
1300 lock_page(hpage); in unmap_and_move_huge_page()
1303 if (PageAnon(hpage)) in unmap_and_move_huge_page()
1304 anon_vma = page_get_anon_vma(hpage); in unmap_and_move_huge_page()
1309 if (page_mapped(hpage)) { in unmap_and_move_huge_page()
1310 try_to_unmap(hpage, in unmap_and_move_huge_page()
[all …]
/Linux-v4.19/arch/powerpc/kvm/
Dbook3s_pr.c629 struct page *hpage; in kvmppc_patch_dcbz() local
634 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); in kvmppc_patch_dcbz()
635 if (is_error_page(hpage)) in kvmppc_patch_dcbz()
642 get_page(hpage); in kvmppc_patch_dcbz()
643 page = kmap_atomic(hpage); in kvmppc_patch_dcbz()
651 put_page(hpage); in kvmppc_patch_dcbz()
/Linux-v4.19/arch/s390/kvm/
Dkvm-s390.c176 static int hpage; variable
177 module_param(hpage, int, 0444);
178 MODULE_PARM_DESC(hpage, "1m huge page backing support");
484 if (hpage && !kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_check_extension()
694 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
4199 if (nested && hpage) { in kvm_s390_init()
/Linux-v4.19/Documentation/virtual/kvm/
Dapi.txt4512 Returns: 0 on success, -EINVAL if hpage module parameter was not set
4520 hpage module parameter is not set to 1, -EINVAL is returned.