Lines Matching full:pmd
464 * DAX PMD support. in hugepage_init()
554 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
557 pmd = pmd_mkwrite(pmd); in maybe_pmd_mkwrite()
558 return pmd; in maybe_pmd_mkwrite()
684 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
685 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page()
708 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
709 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
710 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in __do_huge_pmd_anonymous_page()
766 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument
770 if (!pmd_none(*pmd)) in set_huge_zero_page()
774 pgtable_trans_huge_deposit(mm, pmd, pgtable); in set_huge_zero_page()
775 set_pmd_at(mm, haddr, pmd, entry); in set_huge_zero_page()
807 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page()
809 if (pmd_none(*vmf->pmd)) { in do_huge_pmd_anonymous_page()
821 haddr, vmf->pmd, zero_page); in do_huge_pmd_anonymous_page()
822 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_anonymous_page()
841 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, in insert_pfn_pmd() argument
848 ptl = pmd_lock(mm, pmd); in insert_pfn_pmd()
849 if (!pmd_none(*pmd)) { in insert_pfn_pmd()
851 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { in insert_pfn_pmd()
852 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); in insert_pfn_pmd()
855 entry = pmd_mkyoung(*pmd); in insert_pfn_pmd()
857 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) in insert_pfn_pmd()
858 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
873 pgtable_trans_huge_deposit(mm, pmd, pgtable); in insert_pfn_pmd()
878 set_pmd_at(mm, addr, pmd, entry); in insert_pfn_pmd()
879 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
888 * vmf_insert_pfn_pmd_prot - insert a pmd size pfn
894 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and
929 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); in vmf_insert_pfn_pmd_prot()
1020 pmd_t *pmd, bool write) in touch_pmd() argument
1024 _pmd = pmd_mkyoung(*pmd); in touch_pmd()
1028 pmd, _pmd, write)) in touch_pmd()
1029 update_mmu_cache_pmd(vma, addr, pmd); in touch_pmd()
1033 pmd_t *pmd, int flags, struct dev_pagemap **pgmap) in follow_devmap_pmd() argument
1035 unsigned long pfn = pmd_pfn(*pmd); in follow_devmap_pmd()
1039 assert_spin_locked(pmd_lockptr(mm, pmd)); in follow_devmap_pmd()
1046 if (flags & FOLL_WRITE && !pmd_write(*pmd)) in follow_devmap_pmd()
1049 if (pmd_present(*pmd) && pmd_devmap(*pmd)) in follow_devmap_pmd()
1055 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); in follow_devmap_pmd()
1081 pmd_t pmd; in copy_huge_pmd() local
1098 pmd = *src_pmd; in copy_huge_pmd()
1101 if (unlikely(is_swap_pmd(pmd))) { in copy_huge_pmd()
1102 swp_entry_t entry = pmd_to_swp_entry(pmd); in copy_huge_pmd()
1104 VM_BUG_ON(!is_pmd_migration_entry(pmd)); in copy_huge_pmd()
1108 pmd = swp_entry_to_pmd(entry); in copy_huge_pmd()
1110 pmd = pmd_swp_mksoft_dirty(pmd); in copy_huge_pmd()
1112 pmd = pmd_swp_mkuffd_wp(pmd); in copy_huge_pmd()
1113 set_pmd_at(src_mm, addr, src_pmd, pmd); in copy_huge_pmd()
1119 pmd = pmd_swp_clear_uffd_wp(pmd); in copy_huge_pmd()
1120 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd()
1126 if (unlikely(!pmd_trans_huge(pmd))) { in copy_huge_pmd()
1131 * When page table lock is held, the huge zero pmd should not be in copy_huge_pmd()
1132 * under splitting since we don't split the page itself, only pmd to in copy_huge_pmd()
1135 if (is_huge_zero_pmd(pmd)) { in copy_huge_pmd()
1145 src_page = pmd_page(pmd); in copy_huge_pmd()
1164 pmd = pmd_clear_uffd_wp(pmd); in copy_huge_pmd()
1165 pmd = pmd_mkold(pmd_wrprotect(pmd)); in copy_huge_pmd()
1166 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd()
1294 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in huge_pmd_set_accessed()
1295 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) in huge_pmd_set_accessed()
1298 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); in huge_pmd_set_accessed()
1313 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page()
1324 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1342 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1379 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) in do_huge_pmd_wp_page()
1380 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1389 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); in do_huge_pmd_wp_page()
1394 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, in can_follow_write_pmd() argument
1398 /* If the pmd is writable, we can write to the page. */ in can_follow_write_pmd()
1399 if (pmd_write(pmd)) in can_follow_write_pmd()
1426 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd)) in can_follow_write_pmd()
1428 return !userfaultfd_huge_pmd_wp(vma, pmd); in can_follow_write_pmd()
1433 pmd_t *pmd, in follow_trans_huge_pmd() argument
1439 assert_spin_locked(pmd_lockptr(mm, pmd)); in follow_trans_huge_pmd()
1441 page = pmd_page(*pmd); in follow_trans_huge_pmd()
1445 !can_follow_write_pmd(*pmd, page, vma, flags)) in follow_trans_huge_pmd()
1449 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) in follow_trans_huge_pmd()
1453 if (pmd_protnone(*pmd) && !gup_can_follow_protnone(flags)) in follow_trans_huge_pmd()
1456 if (!pmd_write(*pmd) && gup_must_unshare(flags, page)) in follow_trans_huge_pmd()
1466 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); in follow_trans_huge_pmd()
1479 pmd_t pmd; in do_huge_pmd_numa_page() local
1488 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1489 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { in do_huge_pmd_numa_page()
1494 pmd = pmd_modify(oldpmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1495 page = vm_normal_page_pmd(vma, haddr, pmd); in do_huge_pmd_numa_page()
1526 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1527 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { in do_huge_pmd_numa_page()
1542 /* Restore the PMD */ in do_huge_pmd_numa_page()
1543 pmd = pmd_modify(oldpmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1544 pmd = pmd_mkyoung(pmd); in do_huge_pmd_numa_page()
1546 pmd = pmd_mkwrite(pmd); in do_huge_pmd_numa_page()
1547 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); in do_huge_pmd_numa_page()
1548 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_numa_page()
1554 * Return true if we do MADV_FREE successfully on entire pmd page.
1558 pmd_t *pmd, unsigned long addr, unsigned long next) in madvise_free_huge_pmd() argument
1568 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_free_huge_pmd()
1572 orig_pmd = *pmd; in madvise_free_huge_pmd()
1611 pmdp_invalidate(vma, addr, pmd); in madvise_free_huge_pmd()
1615 set_pmd_at(mm, addr, pmd, orig_pmd); in madvise_free_huge_pmd()
1616 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_free_huge_pmd()
1627 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) in zap_deposited_table() argument
1631 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in zap_deposited_table()
1637 pmd_t *pmd, unsigned long addr) in zap_huge_pmd() argument
1644 ptl = __pmd_trans_huge_lock(pmd, vma); in zap_huge_pmd()
1653 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, in zap_huge_pmd()
1655 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in zap_huge_pmd()
1658 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1661 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1680 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); in zap_huge_pmd()
1683 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1687 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1704 * With split pmd lock we also need to move preallocated in pmd_move_must_withdraw()
1705 * PTE page table if new_pmd is on different PMD page table. in pmd_move_must_withdraw()
1713 static pmd_t move_soft_dirty_pmd(pmd_t pmd) in move_soft_dirty_pmd() argument
1716 if (unlikely(is_pmd_migration_entry(pmd))) in move_soft_dirty_pmd()
1717 pmd = pmd_swp_mksoft_dirty(pmd); in move_soft_dirty_pmd()
1718 else if (pmd_present(pmd)) in move_soft_dirty_pmd()
1719 pmd = pmd_mksoft_dirty(pmd); in move_soft_dirty_pmd()
1721 return pmd; in move_soft_dirty_pmd()
1728 pmd_t pmd; in move_huge_pmd() local
1733 * The destination pmd shouldn't be established, free_pgtables() in move_huge_pmd()
1750 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); in move_huge_pmd()
1751 if (pmd_present(pmd)) in move_huge_pmd()
1760 pmd = move_soft_dirty_pmd(pmd); in move_huge_pmd()
1761 set_pmd_at(mm, new_addr, new_pmd, pmd); in move_huge_pmd()
1774 * - 0 if PMD could not be locked
1775 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
1780 pmd_t *pmd, unsigned long addr, pgprot_t newprot, in change_huge_pmd() argument
1797 ptl = __pmd_trans_huge_lock(pmd, vma); in change_huge_pmd()
1801 preserve_write = prot_numa && pmd_write(*pmd); in change_huge_pmd()
1805 if (is_swap_pmd(*pmd)) { in change_huge_pmd()
1806 swp_entry_t entry = pmd_to_swp_entry(*pmd); in change_huge_pmd()
1809 VM_BUG_ON(!is_pmd_migration_entry(*pmd)); in change_huge_pmd()
1821 if (pmd_swp_soft_dirty(*pmd)) in change_huge_pmd()
1823 if (pmd_swp_uffd_wp(*pmd)) in change_huge_pmd()
1825 set_pmd_at(mm, addr, pmd, newpmd); in change_huge_pmd()
1839 if (is_huge_zero_pmd(*pmd)) in change_huge_pmd()
1842 if (pmd_protnone(*pmd)) in change_huge_pmd()
1845 page = pmd_page(*pmd); in change_huge_pmd()
1861 * to not clear pmd intermittently to avoid race with MADV_DONTNEED in change_huge_pmd()
1869 * pmd_trans_huge(*pmd) == 0 (without ptl) in change_huge_pmd()
1870 * // skip the pmd in change_huge_pmd()
1872 * // pmd is re-established in change_huge_pmd()
1874 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it in change_huge_pmd()
1880 oldpmd = pmdp_invalidate_ad(vma, addr, pmd); in change_huge_pmd()
1897 set_pmd_at(mm, addr, pmd, entry); in change_huge_pmd()
1909 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1914 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) in __pmd_trans_huge_lock() argument
1917 ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
1918 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || in __pmd_trans_huge_lock()
1919 pmd_devmap(*pmd))) in __pmd_trans_huge_lock()
2003 unsigned long haddr, pmd_t *pmd) in __split_huge_zero_page_pmd() argument
2011 * Leave pmd empty until pte is filled note that it is fine to delay in __split_huge_zero_page_pmd()
2013 * replacing a zero pmd write protected page with a zero pte write in __split_huge_zero_page_pmd()
2018 pmdp_huge_clear_flush(vma, haddr, pmd); in __split_huge_zero_page_pmd()
2020 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_zero_page_pmd()
2032 smp_wmb(); /* make pte visible before pmd */ in __split_huge_zero_page_pmd()
2033 pmd_populate(mm, pmd, pgtable); in __split_huge_zero_page_pmd()
2036 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd_locked() argument
2051 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) in __split_huge_pmd_locked()
2052 && !pmd_devmap(*pmd)); in __split_huge_pmd_locked()
2057 old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); in __split_huge_pmd_locked()
2063 zap_deposited_table(mm, pmd); in __split_huge_pmd_locked()
2084 if (is_huge_zero_pmd(*pmd)) { in __split_huge_pmd_locked()
2094 return __split_huge_zero_page_pmd(vma, haddr, pmd); in __split_huge_pmd_locked()
2098 * Up to this point the pmd is present and huge and userland has the in __split_huge_pmd_locked()
2100 * place). If we overwrite the pmd with the not-huge version pointing in __split_huge_pmd_locked()
2112 * current pmd notpresent (atomically because here the pmd_trans_huge in __split_huge_pmd_locked()
2113 * must remain set at all times on the pmd until the split is complete in __split_huge_pmd_locked()
2114 * for this pmd), then we flush the SMP TLB and finally we write the in __split_huge_pmd_locked()
2115 * non-huge version of the pmd entry with pmd_populate. in __split_huge_pmd_locked()
2117 old_pmd = pmdp_invalidate(vma, haddr, pmd); in __split_huge_pmd_locked()
2147 * Without "freeze", we'll simply split the PMD, propagating the in __split_huge_pmd_locked()
2156 * In case we cannot clear PageAnonExclusive(), split the PMD in __split_huge_pmd_locked()
2159 * See page_try_share_anon_rmap(): invalidate PMD first. in __split_huge_pmd_locked()
2167 * Withdraw the table only after we mark the pmd entry invalid. in __split_huge_pmd_locked()
2170 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_pmd_locked()
2256 smp_wmb(); /* make pte visible before pmd */ in __split_huge_pmd_locked()
2257 pmd_populate(mm, pmd, pgtable); in __split_huge_pmd_locked()
2267 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd() argument
2277 ptl = pmd_lock(vma->vm_mm, pmd); in __split_huge_pmd()
2281 * pmd against. Otherwise we can end up replacing wrong folio. in __split_huge_pmd()
2286 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || in __split_huge_pmd()
2287 is_pmd_migration_entry(*pmd)) { in __split_huge_pmd()
2290 * guaranteed that pmd is present. in __split_huge_pmd()
2292 if (folio && folio != page_folio(pmd_page(*pmd))) in __split_huge_pmd()
2294 __split_huge_pmd_locked(vma, pmd, range.start, freeze); in __split_huge_pmd()
2307 * 3) Split a huge pmd into pte pointing to the same page. No need in __split_huge_pmd()
2318 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); in split_huge_pmd_address() local
2320 if (!pmd) in split_huge_pmd_address()
2323 __split_huge_pmd(vma, pmd, address, freeze, folio); in split_huge_pmd_address()
2330 * contain an hugepage: check if we need to split an huge pmd. in split_huge_pmd_if_needed()
3203 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry()
3207 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry()
3209 /* See page_try_share_anon_rmap(): invalidate PMD first. */ in set_pmd_migration_entry()
3212 set_pmd_at(mm, address, pvmw->pmd, pmdval); in set_pmd_migration_entry()
3231 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry()
3248 if (!(pvmw->pmd && !pvmw->pte)) in remove_migration_pmd()
3251 entry = pmd_to_swp_entry(*pvmw->pmd); in remove_migration_pmd()
3254 if (pmd_swp_soft_dirty(*pvmw->pmd)) in remove_migration_pmd()
3258 if (pmd_swp_uffd_wp(*pvmw->pmd)) in remove_migration_pmd()
3277 set_pmd_at(mm, haddr, pvmw->pmd, pmde); in remove_migration_pmd()
3280 update_mmu_cache_pmd(vma, address, pvmw->pmd); in remove_migration_pmd()