Lines Matching +full:compound +full:- +full:device
2 * mm/rmap.c - physical to virtual reverse mappings
23 * inode->i_rwsem (while writing or truncating, not reading or faulting)
24 * mm->mmap_lock
25 * mapping->invalidate_lock (in filemap_fault)
26 * page->flags PG_locked (lock_page)
29 * mapping->i_mmap_rwsem
30 * anon_vma->rwsem
31 * mm->page_table_lock or pte_lock
34 * mapping->private_lock (in block_dirty_folio)
37 * lruvec->lru_lock (in folio_lruvec_lock_irq)
38 * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
39 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
40 * sb_lock (within inode_lock in fs/fs-writeback.c)
42 * in arch-dependent flush_dcache_mmap_lock,
43 * within bdi.wb->list_lock in __sync_single_inode)
45 * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon)
46 * ->tasklist_lock
52 * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing)
53 * page->flags PG_locked (lock_page)
73 #include <linux/backing-dev.h>
96 atomic_set(&anon_vma->refcount, 1); in anon_vma_alloc()
97 anon_vma->num_children = 0; in anon_vma_alloc()
98 anon_vma->num_active_vmas = 0; in anon_vma_alloc()
99 anon_vma->parent = anon_vma; in anon_vma_alloc()
104 anon_vma->root = anon_vma; in anon_vma_alloc()
112 VM_BUG_ON(atomic_read(&anon_vma->refcount)); in anon_vma_free()
132 if (rwsem_is_locked(&anon_vma->root->rwsem)) { in anon_vma_free()
154 avc->vma = vma; in anon_vma_chain_link()
155 avc->anon_vma = anon_vma; in anon_vma_chain_link()
156 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link()
157 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); in anon_vma_chain_link()
161 * __anon_vma_prepare - attach an anon_vma to a memory region
171 * can re-use the anon_vma from (very common when the only
175 * Anon-vma allocations are very subtle, because we may have
190 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare()
206 anon_vma->num_children++; /* self-parent link for new root */ in __anon_vma_prepare()
212 spin_lock(&mm->page_table_lock); in __anon_vma_prepare()
213 if (likely(!vma->anon_vma)) { in __anon_vma_prepare()
214 vma->anon_vma = anon_vma; in __anon_vma_prepare()
216 anon_vma->num_active_vmas++; in __anon_vma_prepare()
220 spin_unlock(&mm->page_table_lock); in __anon_vma_prepare()
233 return -ENOMEM; in __anon_vma_prepare()
238 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
246 struct anon_vma *new_root = anon_vma->root; in lock_anon_vma_root()
249 up_write(&root->rwsem); in lock_anon_vma_root()
251 down_write(&root->rwsem); in lock_anon_vma_root()
259 up_write(&root->rwsem); in unlock_anon_vma_root()
264 * Returns 0 on success, -ENOMEM on failure.
269 * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before
270 * call, we can identify this case by checking (!dst->anon_vma &&
271 * src->anon_vma).
273 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
286 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { in anon_vma_clone()
297 anon_vma = pavc->anon_vma; in anon_vma_clone()
306 * it has self-parent reference and at least one child. in anon_vma_clone()
308 if (!dst->anon_vma && src->anon_vma && in anon_vma_clone()
309 anon_vma->num_children < 2 && in anon_vma_clone()
310 anon_vma->num_active_vmas == 0) in anon_vma_clone()
311 dst->anon_vma = anon_vma; in anon_vma_clone()
313 if (dst->anon_vma) in anon_vma_clone()
314 dst->anon_vma->num_active_vmas++; in anon_vma_clone()
320 * dst->anon_vma is dropped here otherwise its num_active_vmas can in anon_vma_clone()
323 * about dst->anon_vma if anon_vma_clone() failed. in anon_vma_clone()
325 dst->anon_vma = NULL; in anon_vma_clone()
327 return -ENOMEM; in anon_vma_clone()
333 * Returns 0 on success, non-zero on failure.
342 if (!pvma->anon_vma) in anon_vma_fork()
346 vma->anon_vma = NULL; in anon_vma_fork()
350 * so rmap can find non-COWed pages in child processes. in anon_vma_fork()
357 if (vma->anon_vma) in anon_vma_fork()
364 anon_vma->num_active_vmas++; in anon_vma_fork()
373 anon_vma->root = pvma->anon_vma->root; in anon_vma_fork()
374 anon_vma->parent = pvma->anon_vma; in anon_vma_fork()
380 get_anon_vma(anon_vma->root); in anon_vma_fork()
382 vma->anon_vma = anon_vma; in anon_vma_fork()
385 anon_vma->parent->num_children++; in anon_vma_fork()
394 return -ENOMEM; in anon_vma_fork()
406 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
407 struct anon_vma *anon_vma = avc->anon_vma; in unlink_anon_vmas()
410 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); in unlink_anon_vmas()
413 * Leave empty anon_vmas on the list - we'll need in unlink_anon_vmas()
416 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { in unlink_anon_vmas()
417 anon_vma->parent->num_children--; in unlink_anon_vmas()
421 list_del(&avc->same_vma); in unlink_anon_vmas()
424 if (vma->anon_vma) { in unlink_anon_vmas()
425 vma->anon_vma->num_active_vmas--; in unlink_anon_vmas()
431 vma->anon_vma = NULL; in unlink_anon_vmas()
438 * needing to write-acquire the anon_vma->root->rwsem. in unlink_anon_vmas()
440 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
441 struct anon_vma *anon_vma = avc->anon_vma; in unlink_anon_vmas()
443 VM_WARN_ON(anon_vma->num_children); in unlink_anon_vmas()
444 VM_WARN_ON(anon_vma->num_active_vmas); in unlink_anon_vmas()
447 list_del(&avc->same_vma); in unlink_anon_vmas()
456 init_rwsem(&anon_vma->rwsem); in anon_vma_ctor()
457 atomic_set(&anon_vma->refcount, 0); in anon_vma_ctor()
458 anon_vma->rb_root = RB_ROOT_CACHED; in anon_vma_ctor()
490 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
500 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_get_anon_vma()
506 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); in folio_get_anon_vma()
507 if (!atomic_inc_not_zero(&anon_vma->refcount)) { in folio_get_anon_vma()
516 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() in folio_get_anon_vma()
534 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
536 * on !rwc->try_lock case.
546 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_lock_anon_vma_read()
552 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); in folio_lock_anon_vma_read()
553 root_anon_vma = READ_ONCE(anon_vma->root); in folio_lock_anon_vma_read()
554 if (down_read_trylock(&root_anon_vma->rwsem)) { in folio_lock_anon_vma_read()
561 up_read(&root_anon_vma->rwsem); in folio_lock_anon_vma_read()
567 if (rwc && rwc->try_lock) { in folio_lock_anon_vma_read()
569 rwc->contended = true; in folio_lock_anon_vma_read()
574 if (!atomic_inc_not_zero(&anon_vma->refcount)) { in folio_lock_anon_vma_read()
589 if (atomic_dec_and_test(&anon_vma->refcount)) { in folio_lock_anon_vma_read()
592 * and bail -- can't simply use put_anon_vma() because in folio_lock_anon_vma_read()
616 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; in try_to_unmap_flush()
618 if (!tlb_ubc->flush_required) in try_to_unmap_flush()
621 arch_tlbbatch_flush(&tlb_ubc->arch); in try_to_unmap_flush()
622 tlb_ubc->flush_required = false; in try_to_unmap_flush()
623 tlb_ubc->writable = false; in try_to_unmap_flush()
629 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; in try_to_unmap_flush_dirty()
631 if (tlb_ubc->writable) in try_to_unmap_flush_dirty()
636 * Bits 0-14 of mm->tlb_flush_batched record pending generations.
637 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations.
641 ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1)
648 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; in set_tlb_ubc_flush_pending()
655 arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr); in set_tlb_ubc_flush_pending()
656 tlb_ubc->flush_required = true; in set_tlb_ubc_flush_pending()
659 * Ensure compiler does not re-order the setting of tlb_flush_batched in set_tlb_ubc_flush_pending()
663 batch = atomic_read(&mm->tlb_flush_batched); in set_tlb_ubc_flush_pending()
671 if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) in set_tlb_ubc_flush_pending()
674 atomic_inc(&mm->tlb_flush_batched); in set_tlb_ubc_flush_pending()
683 tlb_ubc->writable = true; in set_tlb_ubc_flush_pending()
715 int batch = atomic_read(&mm->tlb_flush_batched); in flush_tlb_batched_pending()
723 * mm->tlb_flush_batched as is, to avoid losing flushing. in flush_tlb_batched_pending()
725 atomic_cmpxchg(&mm->tlb_flush_batched, batch, in flush_tlb_batched_pending()
754 if (!vma->anon_vma || !page__anon_vma || in page_address_in_vma()
755 vma->anon_vma->root != page__anon_vma->root) in page_address_in_vma()
756 return -EFAULT; in page_address_in_vma()
757 } else if (!vma->vm_file) { in page_address_in_vma()
758 return -EFAULT; in page_address_in_vma()
759 } else if (vma->vm_file->f_mapping != folio->mapping) { in page_address_in_vma()
760 return -EFAULT; in page_address_in_vma()
814 if ((vma->vm_flags & VM_LOCKED) && in folio_referenced_one()
819 pra->vm_flags |= VM_LOCKED; in folio_referenced_one()
838 /* unexpected pmd-mapped folio? */ in folio_referenced_one()
842 pra->mapcount--; in folio_referenced_one()
851 pra->referenced++; in folio_referenced_one()
852 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; in folio_referenced_one()
855 if (!pra->mapcount) in folio_referenced_one()
864 struct mem_cgroup *memcg = pra->memcg; in invalid_folio_referenced_vma()
879 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) in invalid_folio_referenced_vma()
886 * folio_referenced() - Test if the folio was referenced.
890 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
894 * Return: The number of mappings which referenced the folio. Return -1 if
932 return rwc.contended ? -1 : pra.referenced; in folio_referenced()
938 struct vm_area_struct *vma = pvmw->vma; in page_vma_mkclean_one()
940 unsigned long address = pvmw->address; in page_vma_mkclean_one()
947 vma->vm_mm, address, vma_address_end(pvmw)); in page_vma_mkclean_one()
953 address = pvmw->address; in page_vma_mkclean_one()
954 if (pvmw->pte) { in page_vma_mkclean_one()
955 pte_t *pte = pvmw->pte; in page_vma_mkclean_one()
965 set_pte_at(vma->vm_mm, address, pte, entry); in page_vma_mkclean_one()
969 pmd_t *pmd = pvmw->pmd; in page_vma_mkclean_one()
980 set_pmd_at(vma->vm_mm, address, pmd, entry); in page_vma_mkclean_one()
983 /* unexpected pmd-mapped folio? */ in page_vma_mkclean_one()
1010 if (vma->vm_flags & VM_SHARED) in invalid_mkclean_vma()
1042 * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
1068 VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); in pfn_mkclean_range()
1089 mapcount += atomic_read(&folio_page(folio, i)->_mapcount); in folio_total_mapcount()
1091 /* But each of those _mapcounts was based on -1 */ in folio_total_mapcount()
1097 * page_move_anon_rmap - move a page to our anon_vma
1108 void *anon_vma = vma->anon_vma; in page_move_anon_rmap()
1120 WRITE_ONCE(folio->mapping, anon_vma); in page_move_anon_rmap()
1125 * __page_set_anon_rmap - set up new anonymous rmap
1135 struct anon_vma *anon_vma = vma->anon_vma; in __page_set_anon_rmap()
1148 anon_vma = anon_vma->root; in __page_set_anon_rmap()
1151 * page_idle does a lockless/optimistic rmap scan on folio->mapping. in __page_set_anon_rmap()
1157 WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); in __page_set_anon_rmap()
1158 folio->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1165 * __page_check_anon_rmap - sanity check anonymous rmap addition
1175 * The page's anon-rmap details (mapping and index) are guaranteed to in __page_check_anon_rmap()
1185 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, in __page_check_anon_rmap()
1192 * page_add_anon_rmap - add pte mapping to an anonymous page
1207 atomic_t *mapped = &folio->_nr_pages_mapped; in page_add_anon_rmap()
1209 bool compound = flags & RMAP_COMPOUND; in page_add_anon_rmap() local
1213 if (likely(!compound)) { in page_add_anon_rmap()
1214 first = atomic_inc_and_test(&page->_mapcount); in page_add_anon_rmap()
1223 first = atomic_inc_and_test(&folio->_entire_mapcount); in page_add_anon_rmap()
1228 nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); in page_add_anon_rmap()
1256 mlock_vma_folio(folio, vma, compound); in page_add_anon_rmap()
1260 * folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
1266 * This means the inc-and-test can be bypassed.
1277 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); in folio_add_new_anon_rmap()
1281 /* increment count (starts at -1) */ in folio_add_new_anon_rmap()
1282 atomic_set(&folio->_mapcount, 0); in folio_add_new_anon_rmap()
1285 /* increment count (starts at -1) */ in folio_add_new_anon_rmap()
1286 atomic_set(&folio->_entire_mapcount, 0); in folio_add_new_anon_rmap()
1287 atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED); in folio_add_new_anon_rmap()
1293 __page_set_anon_rmap(folio, &folio->page, vma, address, 1); in folio_add_new_anon_rmap()
1297 * folio_add_file_rmap_range - add pte mapping to page range of a folio
1302 * @compound: charge the page as compound or small page
1310 bool compound) in folio_add_file_rmap_range() argument
1312 atomic_t *mapped = &folio->_nr_pages_mapped; in folio_add_file_rmap_range()
1316 VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio); in folio_add_file_rmap_range()
1319 if (likely(!compound)) { in folio_add_file_rmap_range()
1321 first = atomic_inc_and_test(&page->_mapcount); in folio_add_file_rmap_range()
1329 } while (page++, --nr_pages > 0); in folio_add_file_rmap_range()
1333 first = atomic_inc_and_test(&folio->_entire_mapcount); in folio_add_file_rmap_range()
1338 nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); in folio_add_file_rmap_range()
1355 mlock_vma_folio(folio, vma, compound); in folio_add_file_rmap_range()
1359 * page_add_file_rmap - add pte mapping to a file page
1362 * @compound: charge the page as compound or small page
1367 bool compound) in page_add_file_rmap() argument
1372 VM_WARN_ON_ONCE_PAGE(compound && !PageTransHuge(page), page); in page_add_file_rmap()
1374 if (likely(!compound)) in page_add_file_rmap()
1379 folio_add_file_rmap_range(folio, page, nr_pages, vma, compound); in page_add_file_rmap()
1383 * page_remove_rmap - take down pte mapping from a page
1386 * @compound: uncharge the page as compound or small page
1391 bool compound) in page_remove_rmap() argument
1394 atomic_t *mapped = &folio->_nr_pages_mapped; in page_remove_rmap()
1399 VM_BUG_ON_PAGE(compound && !PageHead(page), page); in page_remove_rmap()
1404 atomic_dec(&folio->_entire_mapcount); in page_remove_rmap()
1409 if (likely(!compound)) { in page_remove_rmap()
1410 last = atomic_add_negative(-1, &page->_mapcount); in page_remove_rmap()
1419 last = atomic_add_negative(-1, &folio->_entire_mapcount); in page_remove_rmap()
1424 nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); in page_remove_rmap()
1442 __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped); in page_remove_rmap()
1446 __lruvec_stat_mod_folio(folio, idx, -nr); in page_remove_rmap()
1454 if (!compound || nr < nr_pmdmapped) in page_remove_rmap()
1466 munlock_vma_folio(folio, vma, compound); in page_remove_rmap()
1475 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one()
1506 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in try_to_unmap_one()
1522 /* Unexpected PMD-mapped THP? */ in try_to_unmap_one()
1529 (vma->vm_flags & VM_LOCKED)) { in try_to_unmap_one()
1538 subpage = folio_page(folio, pfn - folio_pfn(folio)); in try_to_unmap_one()
1605 * architecture must guarantee that a clear->dirty in try_to_unmap_one()
1618 * Now the pte is cleared. If this pte was uffd-wp armed, in try_to_unmap_one()
1620 * it's file-backed, so we don't lose the tracking info. in try_to_unmap_one()
1638 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_unmap_one()
1653 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_unmap_one()
1675 * - clear PTE; barrier; read refcount in try_to_unmap_one()
1676 * - inc refcount; barrier; read PTE in try_to_unmap_one()
1733 if (list_empty(&mm->mmlist)) { in try_to_unmap_one()
1735 if (list_empty(&mm->mmlist)) in try_to_unmap_one()
1736 list_add(&mm->mmlist, &init_mm.mmlist); in try_to_unmap_one()
1751 * This is a locked file-backed folio, in try_to_unmap_one()
1756 * to point at a new folio while a device is in try_to_unmap_one()
1761 dec_mm_counter(mm, mm_counter_file(&folio->page)); in try_to_unmap_one()
1765 if (vma->vm_flags & VM_LOCKED) in try_to_unmap_one()
1786 * try_to_unmap - Try to remove all page table mappings to a folio.
1820 struct mm_struct *mm = vma->vm_mm; in try_to_migrate_one()
1855 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in try_to_migrate_one()
1872 /* PMD-mapped THP migration entry */ in try_to_migrate_one()
1875 pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); in try_to_migrate_one()
1888 /* Unexpected PMD-mapped THP? */ in try_to_migrate_one()
1895 * Our PTE is a non-present device exclusive entry and in try_to_migrate_one()
1901 * changed when hugepage migrations to device private in try_to_migrate_one()
1905 subpage = &folio->page; in try_to_migrate_one()
1907 subpage = folio_page(folio, pfn - folio_pfn(folio)); in try_to_migrate_one()
1972 * architecture must guarantee that a clear->dirty in try_to_migrate_one()
2014 * pteval maps a zone device page and is therefore in try_to_migrate_one()
2023 compound_order(&folio->page)); in try_to_migrate_one()
2035 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_migrate_one()
2050 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_migrate_one()
2110 compound_order(&folio->page)); in try_to_migrate_one()
2118 if (vma->vm_flags & VM_LOCKED) in try_to_migrate_one()
2129 * try_to_migrate - try to replace all page table mappings with swap entries
2185 struct mm_struct *mm = vma->vm_mm; in page_make_device_exclusive_one()
2197 vma->vm_mm, address, min(vma->vm_end, in page_make_device_exclusive_one()
2199 args->owner); in page_make_device_exclusive_one()
2203 /* Unexpected PMD-mapped THP? */ in page_make_device_exclusive_one()
2214 pte_pfn(ptent) - folio_pfn(folio)); in page_make_device_exclusive_one()
2229 if (args->mm == mm && args->address == address && in page_make_device_exclusive_one()
2231 args->valid = true; in page_make_device_exclusive_one()
2265 * folio_make_device_exclusive - Mark the folio exclusively owned by a device.
2272 * folio and replace them with special device exclusive swap entries to
2273 * grant a device exclusive access to the folio.
2308 * make_device_exclusive_range() - Mark a range for exclusive use by a device
2310 * @start: start of the region to mark for exclusive device access
2316 * exclusive access only if the page pointer is non-NULL.
2323 * A driver using this to program access from a device must use a mmu notifier
2324 * critical section to hold a device specific lock during programming. Once
2332 long npages = (end - start) >> PAGE_SHIFT; in make_device_exclusive_range()
2363 struct anon_vma *root = anon_vma->root; in __put_anon_vma()
2366 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) in __put_anon_vma()
2375 if (rwc->anon_lock) in rmap_walk_anon_lock()
2376 return rwc->anon_lock(folio, rwc); in rmap_walk_anon_lock()
2391 if (rwc->try_lock) { in rmap_walk_anon_lock()
2393 rwc->contended = true; in rmap_walk_anon_lock()
2403 * rmap_walk_anon - do something to anonymous page using the object-based
2430 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; in rmap_walk_anon()
2431 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, in rmap_walk_anon()
2433 struct vm_area_struct *vma = avc->vma; in rmap_walk_anon()
2434 unsigned long address = vma_address(&folio->page, vma); in rmap_walk_anon()
2436 VM_BUG_ON_VMA(address == -EFAULT, vma); in rmap_walk_anon()
2439 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_anon()
2442 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in rmap_walk_anon()
2444 if (rwc->done && rwc->done(folio)) in rmap_walk_anon()
2453 * rmap_walk_file - do something to file page using the object-based rmap method
2469 * The page lock not only makes sure that page->mapping cannot in rmap_walk_file()
2472 * so we can safely take mapping->i_mmap_rwsem. in rmap_walk_file()
2480 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; in rmap_walk_file()
2485 if (rwc->try_lock) { in rmap_walk_file()
2486 rwc->contended = true; in rmap_walk_file()
2493 vma_interval_tree_foreach(vma, &mapping->i_mmap, in rmap_walk_file()
2495 unsigned long address = vma_address(&folio->page, vma); in rmap_walk_file()
2497 VM_BUG_ON_VMA(address == -EFAULT, vma); in rmap_walk_file()
2500 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_file()
2503 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in rmap_walk_file()
2505 if (rwc->done && rwc->done(folio)) in rmap_walk_file()
2547 struct anon_vma *anon_vma = vma->anon_vma; in hugepage_add_anon_rmap()
2553 first = atomic_inc_and_test(&folio->_entire_mapcount); in hugepage_add_anon_rmap()
2564 BUG_ON(address < vma->vm_start || address >= vma->vm_end); in hugepage_add_new_anon_rmap()
2565 /* increment count (starts at -1) */ in hugepage_add_new_anon_rmap()
2566 atomic_set(&folio->_entire_mapcount, 0); in hugepage_add_new_anon_rmap()
2568 __page_set_anon_rmap(folio, &folio->page, vma, address, 1); in hugepage_add_new_anon_rmap()