Lines Matching refs:vma

65 bool transparent_hugepage_enabled(struct vm_area_struct *vma)  in transparent_hugepage_enabled()  argument
68 unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE; in transparent_hugepage_enabled()
70 if (!transhuge_vma_suitable(vma, addr)) in transparent_hugepage_enabled()
72 if (vma_is_anonymous(vma)) in transparent_hugepage_enabled()
73 return __transparent_hugepage_enabled(vma); in transparent_hugepage_enabled()
74 if (vma_is_shmem(vma)) in transparent_hugepage_enabled()
75 return shmem_huge_enabled(vma); in transparent_hugepage_enabled()
492 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
494 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite()
576 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page() local
584 if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) { in __do_huge_pmd_anonymous_page()
590 pgtable = pte_alloc_one(vma->vm_mm); in __do_huge_pmd_anonymous_page()
604 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
610 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page()
615 if (userfaultfd_missing(vma)) { in __do_huge_pmd_anonymous_page()
621 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
627 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page()
628 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in __do_huge_pmd_anonymous_page()
629 page_add_new_anon_rmap(page, vma, haddr, true); in __do_huge_pmd_anonymous_page()
631 lru_cache_add_active_or_unevictable(page, vma); in __do_huge_pmd_anonymous_page()
632 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
633 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
634 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
635 mm_inc_nr_ptes(vma->vm_mm); in __do_huge_pmd_anonymous_page()
646 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
662 static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) in alloc_hugepage_direct_gfpmask() argument
664 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); in alloc_hugepage_direct_gfpmask()
690 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument
696 entry = mk_pmd(zero_page, vma->vm_page_prot); in set_huge_zero_page()
707 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_anonymous_page() local
712 if (!transhuge_vma_suitable(vma, haddr)) in do_huge_pmd_anonymous_page()
714 if (unlikely(anon_vma_prepare(vma))) in do_huge_pmd_anonymous_page()
716 if (unlikely(khugepaged_enter(vma, vma->vm_flags))) in do_huge_pmd_anonymous_page()
719 !mm_forbids_zeropage(vma->vm_mm) && in do_huge_pmd_anonymous_page()
725 pgtable = pte_alloc_one(vma->vm_mm); in do_huge_pmd_anonymous_page()
728 zero_page = mm_get_huge_zero_page(vma->vm_mm); in do_huge_pmd_anonymous_page()
730 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
734 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page()
738 ret = check_stable_address_space(vma->vm_mm); in do_huge_pmd_anonymous_page()
741 } else if (userfaultfd_missing(vma)) { in do_huge_pmd_anonymous_page()
746 set_huge_zero_page(pgtable, vma->vm_mm, vma, in do_huge_pmd_anonymous_page()
754 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
757 gfp = alloc_hugepage_direct_gfpmask(vma); in do_huge_pmd_anonymous_page()
758 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_anonymous_page()
767 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, in insert_pfn_pmd() argument
771 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pmd()
783 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in insert_pfn_pmd()
784 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) in insert_pfn_pmd()
785 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
796 entry = maybe_pmd_mkwrite(entry, vma); in insert_pfn_pmd()
806 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
817 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pmd() local
818 pgprot_t pgprot = vma->vm_page_prot; in vmf_insert_pfn_pmd()
826 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in vmf_insert_pfn_pmd()
828 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pmd()
830 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pmd()
832 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_pmd()
836 pgtable = pte_alloc_one(vma->vm_mm); in vmf_insert_pfn_pmd()
841 track_pfn_insert(vma, &pgprot, pfn); in vmf_insert_pfn_pmd()
843 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); in vmf_insert_pfn_pmd()
849 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) in maybe_pud_mkwrite() argument
851 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pud_mkwrite()
856 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, in insert_pfn_pud() argument
859 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pud()
871 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); in insert_pfn_pud()
872 if (pudp_set_access_flags(vma, addr, pud, entry, 1)) in insert_pfn_pud()
873 update_mmu_cache_pud(vma, addr, pud); in insert_pfn_pud()
883 entry = maybe_pud_mkwrite(entry, vma); in insert_pfn_pud()
886 update_mmu_cache_pud(vma, addr, pud); in insert_pfn_pud()
895 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pud() local
896 pgprot_t pgprot = vma->vm_page_prot; in vmf_insert_pfn_pud()
903 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in vmf_insert_pfn_pud()
905 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pud()
907 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pud()
909 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_pud()
912 track_pfn_insert(vma, &pgprot, pfn); in vmf_insert_pfn_pud()
914 insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write); in vmf_insert_pfn_pud()
920 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, in touch_pmd() argument
928 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, in touch_pmd()
930 update_mmu_cache_pmd(vma, addr, pmd); in touch_pmd()
933 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, in follow_devmap_pmd() argument
937 struct mm_struct *mm = vma->vm_mm; in follow_devmap_pmd()
957 touch_pmd(vma, addr, pmd, flags); in follow_devmap_pmd()
978 struct vm_area_struct *vma) in copy_huge_pmd() argument
987 if (!vma_is_anonymous(vma)) in copy_huge_pmd()
1039 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, in copy_huge_pmd()
1066 static void touch_pud(struct vm_area_struct *vma, unsigned long addr, in touch_pud() argument
1074 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, in touch_pud()
1076 update_mmu_cache_pud(vma, addr, pud); in touch_pud()
1079 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, in follow_devmap_pud() argument
1083 struct mm_struct *mm = vma->vm_mm; in follow_devmap_pud()
1097 touch_pud(vma, addr, pud, flags); in follow_devmap_pud()
1118 struct vm_area_struct *vma) in copy_huge_pud() argument
1159 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); in huge_pud_set_accessed()
1167 if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write)) in huge_pud_set_accessed()
1168 update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); in huge_pud_set_accessed()
1181 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in huge_pmd_set_accessed()
1189 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) in huge_pmd_set_accessed()
1190 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); in huge_pmd_set_accessed()
1199 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_wp_page_fallback() local
1217 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma, in do_huge_pmd_wp_page_fallback()
1220 mem_cgroup_try_charge_delay(pages[i], vma->vm_mm, in do_huge_pmd_wp_page_fallback()
1240 haddr + PAGE_SIZE * i, vma); in do_huge_pmd_wp_page_fallback()
1245 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in do_huge_pmd_wp_page_fallback()
1249 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page_fallback()
1262 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); in do_huge_pmd_wp_page_fallback()
1264 pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page_fallback()
1265 pmd_populate(vma->vm_mm, &_pmd, pgtable); in do_huge_pmd_wp_page_fallback()
1269 entry = mk_pte(pages[i], vma->vm_page_prot); in do_huge_pmd_wp_page_fallback()
1270 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in do_huge_pmd_wp_page_fallback()
1273 page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false); in do_huge_pmd_wp_page_fallback()
1275 lru_cache_add_active_or_unevictable(pages[i], vma); in do_huge_pmd_wp_page_fallback()
1278 set_pte_at(vma->vm_mm, haddr, vmf->pte, entry); in do_huge_pmd_wp_page_fallback()
1284 pmd_populate(vma->vm_mm, vmf->pmd, pgtable); in do_huge_pmd_wp_page_fallback()
1315 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_wp_page() local
1323 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page()
1324 VM_BUG_ON_VMA(!vma->anon_vma, vma); in do_huge_pmd_wp_page()
1352 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_huge_pmd_wp_page()
1353 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) in do_huge_pmd_wp_page()
1354 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1363 if (__transparent_hugepage_enabled(vma) && in do_huge_pmd_wp_page()
1365 huge_gfp = alloc_hugepage_direct_gfpmask(vma); in do_huge_pmd_wp_page()
1366 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_wp_page()
1374 split_huge_pmd(vma, vmf->pmd, vmf->address); in do_huge_pmd_wp_page()
1379 split_huge_pmd(vma, vmf->pmd, vmf->address); in do_huge_pmd_wp_page()
1388 if (unlikely(mem_cgroup_try_charge_delay(new_page, vma->vm_mm, in do_huge_pmd_wp_page()
1391 split_huge_pmd(vma, vmf->pmd, vmf->address); in do_huge_pmd_wp_page()
1406 vma, HPAGE_PMD_NR); in do_huge_pmd_wp_page()
1409 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in do_huge_pmd_wp_page()
1423 entry = mk_huge_pmd(new_page, vma->vm_page_prot); in do_huge_pmd_wp_page()
1424 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_huge_pmd_wp_page()
1425 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); in do_huge_pmd_wp_page()
1426 page_add_new_anon_rmap(new_page, vma, haddr, true); in do_huge_pmd_wp_page()
1428 lru_cache_add_active_or_unevictable(new_page, vma); in do_huge_pmd_wp_page()
1429 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_huge_pmd_wp_page()
1430 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1432 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in do_huge_pmd_wp_page()
1464 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, in follow_trans_huge_pmd() argument
1469 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd()
1488 touch_pmd(vma, addr, pmd, flags); in follow_trans_huge_pmd()
1489 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { in follow_trans_huge_pmd()
1535 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_numa_page() local
1546 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1583 target_nid = mpol_misplaced(page, vma, haddr); in do_huge_pmd_numa_page()
1635 if (mm_tlb_flush_pending(vma->vm_mm)) { in do_huge_pmd_numa_page()
1636 flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); in do_huge_pmd_numa_page()
1646 mmu_notifier_invalidate_range(vma->vm_mm, haddr, in do_huge_pmd_numa_page()
1656 migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, in do_huge_pmd_numa_page()
1668 pmd = pmd_modify(pmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1672 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); in do_huge_pmd_numa_page()
1673 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_numa_page()
1693 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in madvise_free_huge_pmd() argument
1704 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_free_huge_pmd()
1747 pmdp_invalidate(vma, addr, pmd); in madvise_free_huge_pmd()
1772 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pmd() argument
1780 ptl = __pmd_trans_huge_lock(pmd, vma); in zap_huge_pmd()
1792 if (vma_is_dax(vma)) { in zap_huge_pmd()
1840 struct vm_area_struct *vma) in pmd_move_must_withdraw() argument
1848 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); in pmd_move_must_withdraw()
1863 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, in move_huge_pmd() argument
1869 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd()
1890 old_ptl = __pmd_trans_huge_lock(old_pmd, vma); in move_huge_pmd()
1900 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { in move_huge_pmd()
1908 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); in move_huge_pmd()
1923 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in change_huge_pmd() argument
1926 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd()
1932 ptl = __pmd_trans_huge_lock(pmd, vma); in change_huge_pmd()
1992 entry = pmdp_invalidate(vma, addr, pmd); in change_huge_pmd()
1999 BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); in change_huge_pmd()
2011 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) in __pmd_trans_huge_lock() argument
2014 ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
2028 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) in __pud_trans_huge_lock() argument
2032 ptl = pud_lock(vma->vm_mm, pud); in __pud_trans_huge_lock()
2040 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pud() argument
2045 ptl = __pud_trans_huge_lock(pud, vma); in zap_huge_pud()
2056 if (vma_is_dax(vma)) { in zap_huge_pud()
2066 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, in __split_huge_pud_locked() argument
2070 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); in __split_huge_pud_locked()
2071 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); in __split_huge_pud_locked()
2076 pudp_huge_clear_flush_notify(vma, haddr, pud); in __split_huge_pud_locked()
2079 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, in __split_huge_pud() argument
2085 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in __split_huge_pud()
2089 ptl = pud_lock(vma->vm_mm, pud); in __split_huge_pud()
2092 __split_huge_pud_locked(vma, pud, range.start); in __split_huge_pud()
2104 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, in __split_huge_zero_page_pmd() argument
2107 struct mm_struct *mm = vma->vm_mm; in __split_huge_zero_page_pmd()
2120 pmdp_huge_clear_flush(vma, haddr, pmd); in __split_huge_zero_page_pmd()
2127 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); in __split_huge_zero_page_pmd()
2138 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd_locked() argument
2141 struct mm_struct *mm = vma->vm_mm; in __split_huge_pmd_locked()
2150 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); in __split_huge_pmd_locked()
2151 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); in __split_huge_pmd_locked()
2157 if (!vma_is_anonymous(vma)) { in __split_huge_pmd_locked()
2158 _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); in __split_huge_pmd_locked()
2165 if (vma_is_dax(vma)) in __split_huge_pmd_locked()
2186 return __split_huge_zero_page_pmd(vma, haddr, pmd); in __split_huge_pmd_locked()
2209 old_pmd = pmdp_invalidate(vma, haddr, pmd); in __split_huge_pmd_locked()
2252 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); in __split_huge_pmd_locked()
2253 entry = maybe_mkwrite(entry, vma); in __split_huge_pmd_locked()
2298 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd() argument
2304 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in __split_huge_pmd()
2308 ptl = pmd_lock(vma->vm_mm, pmd); in __split_huge_pmd()
2324 __split_huge_pmd_locked(vma, pmd, range.start, freeze); in __split_huge_pmd()
2343 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, in split_huge_pmd_address() argument
2351 pgd = pgd_offset(vma->vm_mm, address); in split_huge_pmd_address()
2365 __split_huge_pmd(vma, pmd, address, freeze, page); in split_huge_pmd_address()
2368 void vma_adjust_trans_huge(struct vm_area_struct *vma, in vma_adjust_trans_huge() argument
2379 (start & HPAGE_PMD_MASK) >= vma->vm_start && in vma_adjust_trans_huge()
2380 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) in vma_adjust_trans_huge()
2381 split_huge_pmd_address(vma, start, false, NULL); in vma_adjust_trans_huge()
2389 (end & HPAGE_PMD_MASK) >= vma->vm_start && in vma_adjust_trans_huge()
2390 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) in vma_adjust_trans_huge()
2391 split_huge_pmd_address(vma, end, false, NULL); in vma_adjust_trans_huge()
2399 struct vm_area_struct *next = vma->vm_next; in vma_adjust_trans_huge()
3022 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() local
3023 struct mm_struct *mm = vma->vm_mm; in set_pmd_migration_entry()
3032 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); in set_pmd_migration_entry()
3034 pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry()
3048 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd() local
3049 struct mm_struct *mm = vma->vm_mm; in remove_migration_pmd()
3060 pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot)); in remove_migration_pmd()
3064 pmde = maybe_pmd_mkwrite(pmde, vma); in remove_migration_pmd()
3066 flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); in remove_migration_pmd()
3068 page_add_anon_rmap(new, vma, mmun_start, true); in remove_migration_pmd()
3072 if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new)) in remove_migration_pmd()
3074 update_mmu_cache_pmd(vma, address, pvmw->pmd); in remove_migration_pmd()