Lines Matching refs:vma

137 static void anon_vma_chain_link(struct vm_area_struct *vma,  in anon_vma_chain_link()  argument
141 avc->vma = vma; in anon_vma_chain_link()
143 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link()
175 int __anon_vma_prepare(struct vm_area_struct *vma) in __anon_vma_prepare() argument
177 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare()
187 anon_vma = find_mergeable_anon_vma(vma); in __anon_vma_prepare()
199 if (likely(!vma->anon_vma)) { in __anon_vma_prepare()
200 vma->anon_vma = anon_vma; in __anon_vma_prepare()
201 anon_vma_chain_link(vma, avc, anon_vma); in __anon_vma_prepare()
315 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() argument
326 vma->anon_vma = NULL; in anon_vma_fork()
332 error = anon_vma_clone(vma, pvma); in anon_vma_fork()
337 if (vma->anon_vma) in anon_vma_fork()
361 vma->anon_vma = anon_vma; in anon_vma_fork()
363 anon_vma_chain_link(vma, avc, anon_vma); in anon_vma_fork()
372 unlink_anon_vmas(vma); in anon_vma_fork()
376 void unlink_anon_vmas(struct vm_area_struct *vma) in unlink_anon_vmas() argument
385 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
403 if (vma->anon_vma) in unlink_anon_vmas()
404 vma->anon_vma->degree--; in unlink_anon_vmas()
412 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
687 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
696 if (!vma->anon_vma || !page__anon_vma || in page_address_in_vma()
697 vma->anon_vma->root != page__anon_vma->root) in page_address_in_vma()
700 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) in page_address_in_vma()
704 address = __vma_address(page, vma); in page_address_in_vma()
705 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in page_address_in_vma()
753 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, in page_referenced_one() argument
759 .vma = vma, in page_referenced_one()
767 if (vma->vm_flags & VM_LOCKED) { in page_referenced_one()
774 if (ptep_clear_flush_young_notify(vma, address, in page_referenced_one()
784 if (likely(!(vma->vm_flags & VM_SEQ_READ))) in page_referenced_one()
788 if (pmdp_clear_flush_young_notify(vma, address, in page_referenced_one()
806 pra->vm_flags |= vma->vm_flags; in page_referenced_one()
815 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) in invalid_page_referenced_vma() argument
820 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma()
883 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, in page_mkclean_one() argument
888 .vma = vma, in page_mkclean_one()
899 end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); in page_mkclean_one()
900 mmu_notifier_invalidate_range_start(vma->vm_mm, start, end); in page_mkclean_one()
914 flush_cache_page(vma, address, pte_pfn(*pte)); in page_mkclean_one()
915 entry = ptep_clear_flush(vma, address, pte); in page_mkclean_one()
918 set_pte_at(vma->vm_mm, address, pte, entry); in page_mkclean_one()
928 flush_cache_page(vma, address, page_to_pfn(page)); in page_mkclean_one()
929 entry = pmdp_huge_clear_flush(vma, address, pmd); in page_mkclean_one()
932 set_pmd_at(vma->vm_mm, address, pmd, entry); in page_mkclean_one()
952 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); in page_mkclean_one()
957 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) in invalid_mkclean_vma() argument
959 if (vma->vm_flags & VM_SHARED) in invalid_mkclean_vma()
1000 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) in page_move_anon_rmap() argument
1002 struct anon_vma *anon_vma = vma->anon_vma; in page_move_anon_rmap()
1007 VM_BUG_ON_VMA(!anon_vma, vma); in page_move_anon_rmap()
1026 struct vm_area_struct *vma, unsigned long address, int exclusive) in __page_set_anon_rmap() argument
1028 struct anon_vma *anon_vma = vma->anon_vma; in __page_set_anon_rmap()
1045 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1055 struct vm_area_struct *vma, unsigned long address) in __page_check_anon_rmap() argument
1070 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); in __page_check_anon_rmap()
1071 BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address)); in __page_check_anon_rmap()
1088 struct vm_area_struct *vma, unsigned long address, bool compound) in page_add_anon_rmap() argument
1090 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); in page_add_anon_rmap()
1099 struct vm_area_struct *vma, unsigned long address, int flags) in do_page_add_anon_rmap() argument
1133 __page_set_anon_rmap(page, vma, address, in do_page_add_anon_rmap()
1136 __page_check_anon_rmap(page, vma, address); in do_page_add_anon_rmap()
1151 struct vm_area_struct *vma, unsigned long address, bool compound) in page_add_new_anon_rmap() argument
1155 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); in page_add_new_anon_rmap()
1169 __page_set_anon_rmap(page, vma, address, 1); in page_add_new_anon_rmap()
1336 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, in try_to_unmap_one() argument
1339 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one()
1342 .vma = vma, in try_to_unmap_one()
1352 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) in try_to_unmap_one()
1360 split_huge_pmd_address(vma, address, in try_to_unmap_one()
1372 end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); in try_to_unmap_one()
1378 adjust_range_if_pmd_sharing_possible(vma, &start, &end); in try_to_unmap_one()
1380 mmu_notifier_invalidate_range_start(vma->vm_mm, start, end); in try_to_unmap_one()
1399 if (vma->vm_flags & VM_LOCKED) { in try_to_unmap_one()
1431 flush_cache_range(vma, start, end); in try_to_unmap_one()
1432 flush_tlb_range(vma, start, end); in try_to_unmap_one()
1475 if (ptep_clear_flush_young_notify(vma, address, in try_to_unmap_one()
1484 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); in try_to_unmap_one()
1498 pteval = ptep_clear_flush(vma, address, pvmw.pte); in try_to_unmap_one()
1515 vma_mmu_pagesize(vma)); in try_to_unmap_one()
1521 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { in try_to_unmap_one()
1541 if (arch_unmap_one(mm, vma, address, pteval) < 0) { in try_to_unmap_one()
1607 if (arch_unmap_one(mm, vma, address, pteval) < 0) { in try_to_unmap_one()
1660 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); in try_to_unmap_one()
1665 bool is_vma_temporary_stack(struct vm_area_struct *vma) in is_vma_temporary_stack() argument
1667 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); in is_vma_temporary_stack()
1672 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == in is_vma_temporary_stack()
1679 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) in invalid_migration_vma() argument
1681 return is_vma_temporary_stack(vma); in invalid_migration_vma()
1824 struct vm_area_struct *vma = avc->vma; in rmap_walk_anon() local
1825 unsigned long address = vma_address(page, vma); in rmap_walk_anon()
1829 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_anon()
1832 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_anon()
1860 struct vm_area_struct *vma; in rmap_walk_file() local
1877 vma_interval_tree_foreach(vma, &mapping->i_mmap, in rmap_walk_file()
1879 unsigned long address = vma_address(page, vma); in rmap_walk_file()
1883 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_file()
1886 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_file()
1925 struct vm_area_struct *vma, unsigned long address, int exclusive) in __hugepage_set_anon_rmap() argument
1927 struct anon_vma *anon_vma = vma->anon_vma; in __hugepage_set_anon_rmap()
1938 page->index = linear_page_index(vma, address); in __hugepage_set_anon_rmap()
1942 struct vm_area_struct *vma, unsigned long address) in hugepage_add_anon_rmap() argument
1944 struct anon_vma *anon_vma = vma->anon_vma; in hugepage_add_anon_rmap()
1952 __hugepage_set_anon_rmap(page, vma, address, 0); in hugepage_add_anon_rmap()
1956 struct vm_area_struct *vma, unsigned long address) in hugepage_add_new_anon_rmap() argument
1958 BUG_ON(address < vma->vm_start || address >= vma->vm_end); in hugepage_add_new_anon_rmap()
1960 __hugepage_set_anon_rmap(page, vma, address, 1); in hugepage_add_new_anon_rmap()