Lines Matching refs:vm_mm
729 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
1506 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1582 struct mm_struct *mm = vma->vm_mm; in unmap_vmas()
1601 struct mm_struct *mm = vma->vm_mm; in zap_page_range()
1627 struct mm_struct *mm = vma->vm_mm; in zap_page_range_single()
1695 struct mm_struct *mm = vma->vm_mm; in insert_page()
1762 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); in vm_insert_page()
1773 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
2085 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range()
2489 struct mm_struct *mm = vma->vm_mm; in wp_page_copy()
2637 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, in finish_mkwrite_fault()
2756 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_wp_page()
2911 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) in do_swap_page()
2917 migration_entry_wait(vma->vm_mm, vmf->pmd, in do_swap_page()
2967 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_swap_page()
2978 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); in do_swap_page()
2989 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); in do_swap_page()
3014 if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, in do_swap_page()
3023 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_swap_page()
3043 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_swap_page()
3044 dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); in do_swap_page()
3055 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); in do_swap_page()
3056 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); in do_swap_page()
3142 if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address)) in do_anonymous_page()
3151 !mm_forbids_zeropage(vma->vm_mm)) { in do_anonymous_page()
3154 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, in do_anonymous_page()
3158 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
3176 if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg, in do_anonymous_page()
3191 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in do_anonymous_page()
3196 ret = check_stable_address_space(vma->vm_mm); in do_anonymous_page()
3208 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_anonymous_page()
3213 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in do_anonymous_page()
3279 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in pte_alloc_one_map()
3285 mm_inc_nr_ptes(vma->vm_mm); in pte_alloc_one_map()
3286 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in pte_alloc_one_map()
3289 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) { in pte_alloc_one_map()
3316 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, in pte_alloc_one_map()
3339 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); in deposit_prealloc_pte()
3344 mm_inc_nr_ptes(vma->vm_mm); in deposit_prealloc_pte()
3368 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address); in do_set_pmd()
3374 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_set_pmd()
3385 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); in do_set_pmd()
3393 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_set_pmd()
3460 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in alloc_set_pte()
3465 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); in alloc_set_pte()
3468 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); in alloc_set_pte()
3508 ret = check_stable_address_space(vmf->vma->vm_mm); in finish_fault()
3606 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm, in do_fault_around()
3675 if (mem_cgroup_try_charge_delay(vmf->cow_page, vma->vm_mm, GFP_KERNEL, in do_cow_fault()
3760 pte_free(vma->vm_mm, vmf->prealloc_pte); in do_fault()
3798 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); in do_numa_page()
3809 pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte); in do_numa_page()
3814 ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte); in do_numa_page()
3992 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); in handle_pte_fault()
4038 struct mm_struct *mm = vma->vm_mm; in __handle_mm_fault()
4124 count_memcg_event_mm(vma->vm_mm, PGFAULT); in handle_mm_fault()
4142 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); in handle_mm_fault()
4361 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); in follow_pfn()
4382 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) in follow_phys()