Lines Matching refs:vmf

1678 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf)  in __folio_lock_or_retry()  argument
1680 unsigned int flags = vmf->flags; in __folio_lock_or_retry()
1690 release_fault_lock(vmf); in __folio_lock_or_retry()
1702 release_fault_lock(vmf); in __folio_lock_or_retry()
3099 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, in lock_folio_maybe_drop_mmap() argument
3110 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in lock_folio_maybe_drop_mmap()
3113 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); in lock_folio_maybe_drop_mmap()
3114 if (vmf->flags & FAULT_FLAG_KILLABLE) { in lock_folio_maybe_drop_mmap()
3123 mmap_read_unlock(vmf->vma->vm_mm); in lock_folio_maybe_drop_mmap()
3139 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) in do_sync_mmap_readahead() argument
3141 struct file *file = vmf->vma->vm_file; in do_sync_mmap_readahead()
3144 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); in do_sync_mmap_readahead()
3146 unsigned long vm_flags = vmf->vma->vm_flags; in do_sync_mmap_readahead()
3152 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_sync_mmap_readahead()
3174 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_sync_mmap_readahead()
3194 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_sync_mmap_readahead()
3195 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); in do_sync_mmap_readahead()
3208 static struct file *do_async_mmap_readahead(struct vm_fault *vmf, in do_async_mmap_readahead() argument
3211 struct file *file = vmf->vma->vm_file; in do_async_mmap_readahead()
3213 DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff); in do_async_mmap_readahead()
3218 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) in do_async_mmap_readahead()
3226 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in do_async_mmap_readahead()
3255 vm_fault_t filemap_fault(struct vm_fault *vmf) in filemap_fault() argument
3258 struct file *file = vmf->vma->vm_file; in filemap_fault()
3262 pgoff_t max_idx, index = vmf->pgoff; in filemap_fault()
3280 if (!(vmf->flags & FAULT_FLAG_TRIED)) in filemap_fault()
3281 fpin = do_async_mmap_readahead(vmf, folio); in filemap_fault()
3289 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); in filemap_fault()
3291 fpin = do_sync_mmap_readahead(vmf); in filemap_fault()
3303 vmf->gfp_mask); in filemap_fault()
3312 if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin)) in filemap_fault()
3365 vmf->page = folio_file_page(folio, index); in filemap_fault()
3375 fpin = maybe_unlock_mmap_for_io(vmf, fpin); in filemap_fault()
3403 static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, in filemap_map_pmd() argument
3406 struct mm_struct *mm = vmf->vma->vm_mm; in filemap_map_pmd()
3409 if (pmd_trans_huge(*vmf->pmd)) { in filemap_map_pmd()
3415 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { in filemap_map_pmd()
3417 vm_fault_t ret = do_set_pmd(vmf, page); in filemap_map_pmd()
3425 if (pmd_none(*vmf->pmd)) in filemap_map_pmd()
3426 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); in filemap_map_pmd()
3476 static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, in filemap_map_folio_range() argument
3484 pte_t *old_ptep = vmf->pte; in filemap_map_folio_range()
3497 if (!pte_none(vmf->pte[count])) in filemap_map_folio_range()
3504 set_pte_range(vmf, folio, page, count, addr); in filemap_map_folio_range()
3506 if (in_range(vmf->address, addr, count * PAGE_SIZE)) in filemap_map_folio_range()
3512 vmf->pte += count; in filemap_map_folio_range()
3518 set_pte_range(vmf, folio, page, count, addr); in filemap_map_folio_range()
3520 if (in_range(vmf->address, addr, count * PAGE_SIZE)) in filemap_map_folio_range()
3524 vmf->pte = old_ptep; in filemap_map_folio_range()
3529 static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, in filemap_map_order0_folio() argument
3546 if (!pte_none(ptep_get(vmf->pte))) in filemap_map_order0_folio()
3549 if (vmf->address == addr) in filemap_map_order0_folio()
3552 set_pte_range(vmf, folio, page, 1, addr); in filemap_map_order0_folio()
3558 vm_fault_t filemap_map_pages(struct vm_fault *vmf, in filemap_map_pages() argument
3561 struct vm_area_struct *vma = vmf->vma; in filemap_map_pages()
3576 if (filemap_map_pmd(vmf, folio, start_pgoff)) { in filemap_map_pages()
3582 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in filemap_map_pages()
3583 if (!vmf->pte) { in filemap_map_pages()
3592 vmf->pte += xas.xa_index - last_pgoff; in filemap_map_pages()
3598 ret |= filemap_map_order0_folio(vmf, in filemap_map_pages()
3601 ret |= filemap_map_folio_range(vmf, folio, in filemap_map_pages()
3608 pte_unmap_unlock(vmf->pte, vmf->ptl); in filemap_map_pages()
3622 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) in filemap_page_mkwrite() argument
3624 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in filemap_page_mkwrite()
3625 struct folio *folio = page_folio(vmf->page); in filemap_page_mkwrite()
3629 file_update_time(vmf->vma->vm_file); in filemap_page_mkwrite()
3677 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) in filemap_page_mkwrite() argument