Lines Matching refs:vm_mm

522 	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);  in print_bad_pte()
749 set_pte_at(vma->vm_mm, address, ptep, pte); in restore_exclusive_pte()
933 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_page()
946 struct mm_struct *src_mm = src_vma->vm_mm; in copy_present_pte()
994 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_pte()
1022 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pte_range()
1023 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pte_range()
1146 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pmd_range()
1147 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pmd_range()
1183 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pud_range()
1184 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pud_range()
1220 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_p4d_range()
1278 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_page_range()
1279 struct mm_struct *src_mm = src_vma->vm_mm; in copy_page_range()
1634 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1720 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, in unmap_vmas()
1740 struct maple_tree *mt = &vma->vm_mm->mm_mt; in zap_page_range()
1747 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in zap_page_range()
1749 tlb_gather_mmu(&tlb, vma->vm_mm); in zap_page_range()
1750 update_hiwater_rss(vma->vm_mm); in zap_page_range()
1776 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in zap_page_range_single()
1781 tlb_gather_mmu(&tlb, vma->vm_mm); in zap_page_range_single()
1782 update_hiwater_rss(vma->vm_mm); in zap_page_range_single()
1862 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); in insert_page_into_pte_locked()
1864 set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); in insert_page_into_pte_locked()
1886 pte = get_locked_pte(vma->vm_mm, addr, &ptl); in insert_page()
1918 struct mm_struct *const mm = vma->vm_mm; in insert_pages()
1991 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_pages()
2049 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_page()
2141 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
2487 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range_notrack()
2839 spinlock_t *ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in pte_unmap_same()
2858 struct mm_struct *mm = vma->vm_mm; in __wp_page_copy_user()
3100 struct mm_struct *mm = vma->vm_mm; in wp_page_copy()
3274 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
3388 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
3626 if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags)) in remove_device_exclusive_entry()
3629 vma->vm_mm, vmf->address & PAGE_MASK, in remove_device_exclusive_entry()
3633 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in remove_device_exclusive_entry()
3666 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_marker_clear()
3673 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); in pte_marker_clear()
3745 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
3752 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
3802 vma->vm_mm, GFP_KERNEL, in do_swap_page()
3833 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
3843 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); in do_swap_page()
3853 locked = folio_lock_or_retry(folio, vma->vm_mm, vmf->flags); in do_swap_page()
3900 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
3970 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_swap_page()
3971 dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); in do_swap_page()
4008 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
4009 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
4081 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
4090 !mm_forbids_zeropage(vma->vm_mm)) { in do_anonymous_page()
4093 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
4099 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4117 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) in do_anonymous_page()
4133 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_anonymous_page()
4140 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4151 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_anonymous_page()
4155 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in do_anonymous_page()
4197 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
4237 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
4242 mm_inc_nr_ptes(vma->vm_mm); in deposit_prealloc_pte()
4276 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
4281 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
4292 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); in do_set_pmd()
4301 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
4341 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_set_pte()
4345 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); in do_set_pte()
4348 set_pte_at(vma->vm_mm, addr, vmf->pte, entry); in do_set_pte()
4391 ret = check_stable_address_space(vma->vm_mm); in finish_fault()
4404 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); in finish_fault()
4405 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) in finish_fault()
4416 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in finish_fault()
4517 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
4576 if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm, in do_cow_fault()
4649 struct mm_struct *vm_mm = vma->vm_mm; in do_fault() local
4663 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, in do_fault()
4690 pte_free(vm_mm, vmf->prealloc_pte); in do_fault()
4726 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); in do_numa_page()
4963 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in handle_pte_fault()
5015 struct mm_struct *mm = vma->vm_mm; in __handle_mm_fault()
5195 count_memcg_event_mm(vma->vm_mm, PGFAULT); in handle_mm_fault()
5215 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); in handle_mm_fault()
5393 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); in follow_pfn()
5414 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) in follow_phys()
5458 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) in generic_access_phys()
5473 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) in generic_access_phys()