Lines Matching +full:compound +full:- +full:device

1 /* SPDX-License-Identifier: GPL-2.0 */
45 * have ->parent pointing to this one, including itself.
51 /* Count of VMAs whose ->anon_vma pointer points to this object. */
70 * The copy-on-write semantics of fork mean that an anon_vma
86 struct rb_node rb; /* locked by anon_vma->rwsem */
108 atomic_inc(&anon_vma->refcount); in get_anon_vma()
115 if (atomic_dec_and_test(&anon_vma->refcount)) in put_anon_vma()
121 down_write(&anon_vma->root->rwsem); in anon_vma_lock_write()
126 up_write(&anon_vma->root->rwsem); in anon_vma_unlock_write()
131 down_read(&anon_vma->root->rwsem); in anon_vma_lock_read()
136 return down_read_trylock(&anon_vma->root->rwsem); in anon_vma_trylock_read()
141 up_read(&anon_vma->root->rwsem); in anon_vma_unlock_read()
156 if (likely(vma->anon_vma)) in anon_vma_prepare()
165 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); in anon_vma_merge()
175 * No special request: if the page is a subpage of a compound page, it is
184 * The compound page is not mapped via PTEs, but instead via a single PMD and
200 bool compound);
202 struct vm_area_struct *, bool compound);
204 bool compound);
211 static inline void __page_dup_rmap(struct page *page, bool compound) in __page_dup_rmap() argument
213 if (compound) { in __page_dup_rmap()
216 VM_BUG_ON_PAGE(compound && !PageHead(page), page); in __page_dup_rmap()
217 atomic_inc(&folio->_entire_mapcount); in __page_dup_rmap()
219 atomic_inc(&page->_mapcount); in __page_dup_rmap()
223 static inline void page_dup_file_rmap(struct page *page, bool compound) in page_dup_file_rmap() argument
225 __page_dup_rmap(page, compound); in page_dup_file_rmap()
229 * page_try_dup_anon_rmap - try duplicating a mapping of an already mapped
232 * @compound: the page is mapped as compound or as a small page
235 * The caller needs to hold the PT lock and the vma->vma_mm->write_protect_seq.
237 * Duplicating the mapping can only fail if the page may be pinned; device
243 * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
245 static inline int page_try_dup_anon_rmap(struct page *page, bool compound, in page_try_dup_anon_rmap() argument
266 return -EBUSY; in page_try_dup_anon_rmap()
274 __page_dup_rmap(page, compound); in page_try_dup_anon_rmap()
279 * page_try_share_anon_rmap - try marking an exclusive anonymous page possibly
290 * Marking the page shared can only fail if the page may be pinned; device
293 * Returns 0 if marking the page possibly shared succeeded. Returns -EBUSY
300 /* device private pages cannot get pinned via GUP. */ in page_try_share_anon_rmap()
308 * the page is not pinned and that concurrent GUP-fast won't succeed in in page_try_share_anon_rmap()
323 * Conceptually, GUP-fast pinning of an anon page consists of: in page_try_share_anon_rmap()
327 * (B4) Check if the PTE changed by re-reading it; back off if so. in page_try_share_anon_rmap()
331 * If the PTE was writable, we only have to make sure that GUP-fast in page_try_share_anon_rmap()
334 * If the PTE was not writable, we have to make sure that GUP-fast either in page_try_share_anon_rmap()
340 * order. In GUP-fast pinning code, we have to make sure that (B3),(B4) in page_try_share_anon_rmap()
353 return -EBUSY; in page_try_share_anon_rmap()
418 if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma)) in page_vma_mapped_walk_done()
419 pte_unmap(pvmw->pte); in page_vma_mapped_walk_done()
420 if (pvmw->ptl) in page_vma_mapped_walk_done()
421 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_done()