Lines Matching refs:vmf
2900 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, in lock_page_maybe_drop_mmap() argument
2911 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in lock_page_maybe_drop_mmap()
2914 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); in lock_page_maybe_drop_mmap()
2915 if (vmf->flags & FAULT_FLAG_KILLABLE) { in lock_page_maybe_drop_mmap()
2924 mmap_read_unlock(vmf->vma->vm_mm); in lock_page_maybe_drop_mmap()
2940 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) in do_sync_mmap_readahead() argument
2942 struct file *file = vmf->vma->vm_file; in do_sync_mmap_readahead()
2945 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); in do_sync_mmap_readahead()
2950 if (vmf->vma->vm_flags & VM_RAND_READ) in do_sync_mmap_readahead()
2955 if (vmf->vma->vm_flags & VM_SEQ_READ) { in do_sync_mmap_readahead()
2956 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_sync_mmap_readahead()
2976 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_sync_mmap_readahead()
2977 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); in do_sync_mmap_readahead()
2990 static struct file *do_async_mmap_readahead(struct vm_fault *vmf, in do_async_mmap_readahead() argument
2993 struct file *file = vmf->vma->vm_file; in do_async_mmap_readahead()
2998 pgoff_t offset = vmf->pgoff; in do_async_mmap_readahead()
3001 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) in do_async_mmap_readahead()
3007 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_async_mmap_readahead()
3037 vm_fault_t filemap_fault(struct vm_fault *vmf) in filemap_fault() argument
3040 struct file *file = vmf->vma->vm_file; in filemap_fault()
3044 pgoff_t offset = vmf->pgoff; in filemap_fault()
3063 if (!(vmf->flags & FAULT_FLAG_TRIED)) in filemap_fault()
3064 fpin = do_async_mmap_readahead(vmf, page); in filemap_fault()
3072 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); in filemap_fault()
3074 fpin = do_sync_mmap_readahead(vmf); in filemap_fault()
3086 vmf->gfp_mask); in filemap_fault()
3095 if (!lock_page_maybe_drop_mmap(vmf, page, &fpin)) in filemap_fault()
3148 vmf->page = page; in filemap_fault()
3158 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in filemap_fault()
3186 static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page) in filemap_map_pmd() argument
3188 struct mm_struct *mm = vmf->vma->vm_mm; in filemap_map_pmd()
3191 if (pmd_trans_huge(*vmf->pmd)) { in filemap_map_pmd()
3197 if (pmd_none(*vmf->pmd) && PageTransHuge(page)) { in filemap_map_pmd()
3198 vm_fault_t ret = do_set_pmd(vmf, page); in filemap_map_pmd()
3206 if (pmd_none(*vmf->pmd)) { in filemap_map_pmd()
3207 vmf->ptl = pmd_lock(mm, vmf->pmd); in filemap_map_pmd()
3208 if (likely(pmd_none(*vmf->pmd))) { in filemap_map_pmd()
3210 pmd_populate(mm, vmf->pmd, vmf->prealloc_pte); in filemap_map_pmd()
3211 vmf->prealloc_pte = NULL; in filemap_map_pmd()
3213 spin_unlock(vmf->ptl); in filemap_map_pmd()
3217 if (pmd_devmap_trans_unstable(vmf->pmd)) { in filemap_map_pmd()
3285 vm_fault_t filemap_map_pages(struct vm_fault *vmf, in filemap_map_pages() argument
3288 struct vm_area_struct *vma = vmf->vma; in filemap_map_pages()
3303 if (filemap_map_pmd(vmf, head)) { in filemap_map_pages()
3309 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in filemap_map_pages()
3319 vmf->pte += xas.xa_index - last_pgoff; in filemap_map_pages()
3322 if (!pte_none(*vmf->pte)) in filemap_map_pages()
3326 if (vmf->address == addr) in filemap_map_pages()
3329 do_set_pte(vmf, page, addr); in filemap_map_pages()
3331 update_mmu_cache(vma, addr, vmf->pte); in filemap_map_pages()
3338 pte_unmap_unlock(vmf->pte, vmf->ptl); in filemap_map_pages()
3346 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) in filemap_page_mkwrite() argument
3348 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in filemap_page_mkwrite()
3349 struct page *page = vmf->page; in filemap_page_mkwrite()
3353 file_update_time(vmf->vma->vm_file); in filemap_page_mkwrite()
3401 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) in filemap_page_mkwrite() argument