Lines Matching refs:vmf
745 struct address_space *mapping, struct vm_fault *vmf, in dax_insert_entry() argument
769 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); in dax_insert_entry()
1054 struct vm_fault *vmf) in dax_load_hole() argument
1057 unsigned long vaddr = vmf->address; in dax_load_hole()
1061 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_load_hole()
1064 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); in dax_load_hole()
1065 trace_dax_load_hole(inode, vmf, ret); in dax_load_hole()
1070 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1073 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_pmd_load_hole()
1074 unsigned long pmd_addr = vmf->address & PMD_MASK; in dax_pmd_load_hole()
1075 struct vm_area_struct *vma = vmf->vma; in dax_pmd_load_hole()
1083 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); in dax_pmd_load_hole()
1089 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_pmd_load_hole()
1098 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in dax_pmd_load_hole()
1099 if (!pmd_none(*(vmf->pmd))) { in dax_pmd_load_hole()
1105 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in dax_pmd_load_hole()
1108 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); in dax_pmd_load_hole()
1110 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); in dax_pmd_load_hole()
1112 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1118 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1122 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1332 static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, in dax_fault_cow_page() argument
1336 unsigned long vaddr = vmf->address; in dax_fault_cow_page()
1343 clear_user_highpage(vmf->cow_page, vaddr); in dax_fault_cow_page()
1347 sector, vmf->cow_page, vaddr); in dax_fault_cow_page()
1358 __SetPageUptodate(vmf->cow_page); in dax_fault_cow_page()
1359 ret = finish_fault(vmf); in dax_fault_cow_page()
1374 static vm_fault_t dax_fault_iter(struct vm_fault *vmf, in dax_fault_iter() argument
1378 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_fault_iter()
1382 bool write = vmf->flags & FAULT_FLAG_WRITE; in dax_fault_iter()
1383 bool sync = dax_fault_is_synchronous(iter->flags, vmf->vma, iomap); in dax_fault_iter()
1388 if (!pmd && vmf->cow_page) in dax_fault_iter()
1389 return dax_fault_cow_page(vmf, iter); in dax_fault_iter()
1395 return dax_load_hole(xas, mapping, entry, vmf); in dax_fault_iter()
1396 return dax_pmd_load_hole(xas, vmf, iomap, entry); in dax_fault_iter()
1408 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags, in dax_fault_iter()
1416 return vmf_insert_pfn_pmd(vmf, pfn, write); in dax_fault_iter()
1420 return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); in dax_fault_iter()
1421 return vmf_insert_mixed(vmf->vma, vmf->address, pfn); in dax_fault_iter()
1424 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pte_fault() argument
1427 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_iomap_pte_fault()
1428 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1431 .pos = (loff_t)vmf->pgoff << PAGE_SHIFT, in dax_iomap_pte_fault()
1439 trace_dax_pte_fault(iter.inode, vmf, ret); in dax_iomap_pte_fault()
1450 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) in dax_iomap_pte_fault()
1465 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { in dax_iomap_pte_fault()
1476 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); in dax_iomap_pte_fault()
1480 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); in dax_iomap_pte_fault()
1496 trace_dax_pte_fault_done(iter.inode, vmf, ret); in dax_iomap_pte_fault()
1501 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, in dax_fault_check_fallback() argument
1504 unsigned long pmd_addr = vmf->address & PMD_MASK; in dax_fault_check_fallback()
1505 bool write = vmf->flags & FAULT_FLAG_WRITE; in dax_fault_check_fallback()
1513 if ((vmf->pgoff & PG_PMD_COLOUR) != in dax_fault_check_fallback()
1514 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) in dax_fault_check_fallback()
1518 if (write && !(vmf->vma->vm_flags & VM_SHARED)) in dax_fault_check_fallback()
1522 if (pmd_addr < vmf->vma->vm_start) in dax_fault_check_fallback()
1524 if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end) in dax_fault_check_fallback()
1534 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pmd_fault() argument
1537 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_iomap_pmd_fault()
1538 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1549 if (vmf->flags & FAULT_FLAG_WRITE) in dax_iomap_pmd_fault()
1559 trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0); in dax_iomap_pmd_fault()
1566 if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) in dax_iomap_pmd_fault()
1587 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && in dax_iomap_pmd_fault()
1588 !pmd_devmap(*vmf->pmd)) { in dax_iomap_pmd_fault()
1598 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); in dax_iomap_pmd_fault()
1607 split_huge_pmd(vmf->vma, vmf->pmd, vmf->address); in dax_iomap_pmd_fault()
1611 trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret); in dax_iomap_pmd_fault()
1615 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pmd_fault() argument
1635 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, in dax_iomap_fault() argument
1640 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); in dax_iomap_fault()
1642 return dax_iomap_pmd_fault(vmf, pfnp, ops); in dax_iomap_fault()
1659 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) in dax_insert_pfn_mkwrite() argument
1661 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_pfn_mkwrite()
1662 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
1673 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, in dax_insert_pfn_mkwrite()
1681 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); in dax_insert_pfn_mkwrite()
1684 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); in dax_insert_pfn_mkwrite()
1689 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); in dax_insert_pfn_mkwrite()
1703 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, in dax_finish_sync_fault() argument
1707 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; in dax_finish_sync_fault()
1711 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); in dax_finish_sync_fault()
1714 return dax_insert_pfn_mkwrite(vmf, pfn, order); in dax_finish_sync_fault()