Lines Matching refs:vm_mm
491 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
730 set_pte_at(vma->vm_mm, address, ptep, pte); in restore_exclusive_pte()
914 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_page()
927 struct mm_struct *src_mm = src_vma->vm_mm; in copy_present_pte()
978 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_pte()
1005 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pte_range()
1006 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pte_range()
1142 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pmd_range()
1143 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pmd_range()
1179 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pud_range()
1180 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pud_range()
1216 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_p4d_range()
1274 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_page_range()
1275 struct mm_struct *src_mm = src_vma->vm_mm; in copy_page_range()
1639 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1727 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, in unmap_vmas()
1758 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in zap_page_range_single()
1761 tlb_gather_mmu(&tlb, vma->vm_mm); in zap_page_range_single()
1762 update_hiwater_rss(vma->vm_mm); in zap_page_range_single()
1843 inc_mm_counter(vma->vm_mm, mm_counter_file(page)); in insert_page_into_pte_locked()
1845 set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); in insert_page_into_pte_locked()
1867 pte = get_locked_pte(vma->vm_mm, addr, &ptl); in insert_page()
1898 struct mm_struct *const mm = vma->vm_mm; in insert_pages()
1973 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_pages()
2019 BUG_ON(mmap_read_trylock(vma->vm_mm)); in vm_insert_page()
2111 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
2437 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range_notrack()
2812 struct mm_struct *mm = vma->vm_mm; in __wp_page_copy_user()
3057 struct mm_struct *mm = vma->vm_mm; in wp_page_copy()
3232 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
3356 mm_tlb_flush_pending(vmf->vma->vm_mm))) in do_wp_page()
3610 vma->vm_mm, vmf->address & PAGE_MASK, in remove_device_exclusive_entry()
3614 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in remove_device_exclusive_entry()
3649 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in pte_marker_clear()
3662 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); in pte_marker_clear()
3741 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
3758 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
3806 vma->vm_mm, GFP_KERNEL, in do_swap_page()
3837 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
3848 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); in do_swap_page()
3905 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
3978 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); in do_swap_page()
3979 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); in do_swap_page()
4013 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
4014 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
4083 if (pte_alloc(vma->vm_mm, vmf->pmd)) in do_anonymous_page()
4088 !mm_forbids_zeropage(vma->vm_mm)) { in do_anonymous_page()
4091 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
4099 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4117 if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) in do_anonymous_page()
4133 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_anonymous_page()
4142 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
4153 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); in do_anonymous_page()
4159 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in do_anonymous_page()
4202 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in __do_fault()
4242 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
4247 mm_inc_nr_ptes(vma->vm_mm); in deposit_prealloc_pte()
4280 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); in do_set_pmd()
4285 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
4295 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); in do_set_pmd()
4304 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
4353 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); in set_pte_range()
4358 add_mm_counter(vma->vm_mm, mm_counter_file(page), nr); in set_pte_range()
4361 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); in set_pte_range()
4407 ret = check_stable_address_space(vma->vm_mm); in finish_fault()
4420 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); in finish_fault()
4421 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) in finish_fault()
4425 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in finish_fault()
4522 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); in do_fault_around()
4600 if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm, in do_cow_fault()
4681 struct mm_struct *vm_mm = vma->vm_mm; in do_fault() local
4688 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, in do_fault()
4716 pte_free(vm_mm, vmf->prealloc_pte); in do_fault()
4825 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_numa_page()
4967 vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd, in handle_pte_fault()
5042 struct mm_struct *mm = vma->vm_mm; in __handle_mm_fault()
5259 struct mm_struct *mm = vma->vm_mm; in handle_mm_fault()
5285 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); in handle_mm_fault()
5632 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); in follow_pfn()
5653 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) in follow_phys()
5697 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) in generic_access_phys()
5712 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) in generic_access_phys()