Lines Matching +full:no +full:- +full:unaligned +full:- +full:direct +full:- +full:access
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
30 #include <linux/backing-dev.h>
47 struct xfs_mount *mp = ip->i_mount; in xfs_is_falloc_aligned()
51 if (!is_power_of_2(mp->m_sb.sb_rextsize)) { in xfs_is_falloc_aligned()
55 rextbytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize); in xfs_is_falloc_aligned()
62 mask = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize) - 1; in xfs_is_falloc_aligned()
64 mask = mp->m_sb.sb_blocksize - 1; in xfs_is_falloc_aligned()
72 * as there is no file data to flush, and thus also no need for explicit
73 * cache flush operations, and there are no non-transaction metadata updates
83 struct xfs_inode *ip = XFS_I(file->f_mapping->host); in xfs_dir_fsync()
96 if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) in xfs_fsync_seq()
98 return ip->i_itemp->ili_commit_seq; in xfs_fsync_seq()
126 error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, in xfs_fsync_flush_log()
129 spin_lock(&ip->i_itemp->ili_lock); in xfs_fsync_flush_log()
130 ip->i_itemp->ili_fsync_fields = 0; in xfs_fsync_flush_log()
131 spin_unlock(&ip->i_itemp->ili_lock); in xfs_fsync_flush_log()
144 struct xfs_inode *ip = XFS_I(file->f_mapping->host); in xfs_file_fsync()
145 struct xfs_mount *mp = ip->i_mount; in xfs_file_fsync()
156 return -EIO; in xfs_file_fsync()
167 error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev); in xfs_file_fsync()
168 else if (mp->m_logdev_targp != mp->m_ddev_targp) in xfs_file_fsync()
169 error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev); in xfs_file_fsync()
185 * a no-op we might have to flush the data device cache here. in xfs_file_fsync()
191 mp->m_logdev_targp == mp->m_ddev_targp) { in xfs_file_fsync()
192 err2 = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev); in xfs_file_fsync()
205 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_ilock_iocb()
207 if (iocb->ki_flags & IOCB_NOWAIT) { in xfs_ilock_iocb()
209 return -EAGAIN; in xfs_ilock_iocb()
222 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_file_dio_read()
230 file_accessed(iocb->ki_filp); in xfs_file_dio_read()
246 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host); in xfs_file_dax_read()
260 file_accessed(iocb->ki_filp); in xfs_file_dax_read()
269 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_file_buffered_read()
288 struct inode *inode = file_inode(iocb->ki_filp); in xfs_file_read_iter()
289 struct xfs_mount *mp = XFS_I(inode)->i_mount; in xfs_file_read_iter()
295 return -EIO; in xfs_file_read_iter()
299 else if (iocb->ki_flags & IOCB_DIRECT) in xfs_file_read_iter()
310 * Common pre-write limit and setup checks.
314 * if called for a direct write beyond i_size.
322 struct file *file = iocb->ki_filp; in xfs_file_write_checks()
323 struct inode *inode = file->f_mapping->host; in xfs_file_write_checks()
335 if (iocb->ki_flags & IOCB_NOWAIT) { in xfs_file_write_checks()
337 if (error == -EWOULDBLOCK) in xfs_file_write_checks()
338 error = -EAGAIN; in xfs_file_write_checks()
381 if (iocb->ki_pos <= i_size_read(inode)) in xfs_file_write_checks()
384 spin_lock(&ip->i_flags_lock); in xfs_file_write_checks()
386 if (iocb->ki_pos > isize) { in xfs_file_write_checks()
387 spin_unlock(&ip->i_flags_lock); in xfs_file_write_checks()
389 if (iocb->ki_flags & IOCB_NOWAIT) in xfs_file_write_checks()
390 return -EAGAIN; in xfs_file_write_checks()
402 * we now need to wait for all of them to drain. Non-AIO in xfs_file_write_checks()
405 * no-op. in xfs_file_write_checks()
412 trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize); in xfs_file_write_checks()
413 error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL); in xfs_file_write_checks()
417 spin_unlock(&ip->i_flags_lock); in xfs_file_write_checks()
430 struct inode *inode = file_inode(iocb->ki_filp); in xfs_dio_write_end_io()
432 loff_t offset = iocb->ki_pos; in xfs_dio_write_end_io()
437 if (xfs_is_shutdown(ip->i_mount)) in xfs_dio_write_end_io()
438 return -EIO; in xfs_dio_write_end_io()
449 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size); in xfs_dio_write_end_io()
454 * task-wide nofs context for the following operations. in xfs_dio_write_end_io()
465 * Unwritten conversion updates the in-core isize after extent in xfs_dio_write_end_io()
466 * conversion but before updating the on-disk size. Updating isize any in xfs_dio_write_end_io()
476 * We need to update the in-core inode size here so that we don't end up in xfs_dio_write_end_io()
477 * with the on-disk inode size being outside the in-core inode size. We in xfs_dio_write_end_io()
478 * have no other method of updating EOF for AIO, so always do it here in xfs_dio_write_end_io()
496 spin_lock(&ip->i_flags_lock); in xfs_dio_write_end_io()
499 spin_unlock(&ip->i_flags_lock); in xfs_dio_write_end_io()
502 spin_unlock(&ip->i_flags_lock); in xfs_dio_write_end_io()
515 * Handle block aligned direct I/O writes
552 * Handle block unaligned direct I/O writes
554 * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
555 * them to be done in parallel with reads and other direct I/O writes. However,
556 * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
557 * to do sub-block zeroing and that requires serialisation against other direct
559 * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
560 * In the case where sub-block zeroing is not required, we can do concurrent
561 * sub-block dios to the same block successfully.
564 * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
581 * Extending writes need exclusivity because of the sub-block zeroing in xfs_file_dio_write_unaligned()
585 if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) { in xfs_file_dio_write_unaligned()
586 if (iocb->ki_flags & IOCB_NOWAIT) in xfs_file_dio_write_unaligned()
587 return -EAGAIN; in xfs_file_dio_write_unaligned()
598 * We can't properly handle unaligned direct I/O to reflink files yet, in xfs_file_dio_write_unaligned()
603 ret = -ENOTBLK; in xfs_file_dio_write_unaligned()
612 * If we are doing exclusive unaligned I/O, this must be the only I/O in xfs_file_dio_write_unaligned()
613 * in-flight. Otherwise we risk data corruption due to unwritten extent in xfs_file_dio_write_unaligned()
625 * Retry unaligned I/O with exclusive blocking semantics if the DIO in xfs_file_dio_write_unaligned()
629 if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) { in xfs_file_dio_write_unaligned()
646 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_file_dio_write()
650 /* direct I/O must be aligned to device logical sector size */ in xfs_file_dio_write()
651 if ((iocb->ki_pos | count) & target->bt_logical_sectormask) in xfs_file_dio_write()
652 return -EINVAL; in xfs_file_dio_write()
653 if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask) in xfs_file_dio_write()
663 struct inode *inode = iocb->ki_filp->f_mapping->host; in xfs_file_dax_write()
676 pos = iocb->ki_pos; in xfs_file_dax_write()
680 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) { in xfs_file_dax_write()
681 i_size_write(inode, iocb->ki_pos); in xfs_file_dax_write()
691 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret); in xfs_file_dax_write()
693 /* Handle various SYNC-type writes */ in xfs_file_dax_write()
704 struct inode *inode = iocb->ki_filp->f_mapping->host; in xfs_file_buffered_write()
721 current->backing_dev_info = inode_to_bdi(inode); in xfs_file_buffered_write()
727 iocb->ki_pos += ret; in xfs_file_buffered_write()
739 if (ret == -EDQUOT && !cleared_space) { in xfs_file_buffered_write()
744 } else if (ret == -ENOSPC && !cleared_space) { in xfs_file_buffered_write()
748 xfs_flush_inodes(ip->i_mount); in xfs_file_buffered_write()
752 xfs_blockgc_free_space(ip->i_mount, &icw); in xfs_file_buffered_write()
756 current->backing_dev_info = NULL; in xfs_file_buffered_write()
762 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret); in xfs_file_buffered_write()
763 /* Handle various SYNC-type writes */ in xfs_file_buffered_write()
774 struct inode *inode = iocb->ki_filp->f_mapping->host; in xfs_file_write_iter()
779 XFS_STATS_INC(ip->i_mount, xs_write_calls); in xfs_file_write_iter()
784 if (xfs_is_shutdown(ip->i_mount)) in xfs_file_write_iter()
785 return -EIO; in xfs_file_write_iter()
790 if (iocb->ki_flags & IOCB_DIRECT) { in xfs_file_write_iter()
798 if (ret != -ENOTBLK) in xfs_file_write_iter()
825 page = dax_layout_busy_page(inode->i_mapping); in xfs_break_dax_layouts()
830 return ___wait_var_event(&page->_refcount, in xfs_break_dax_layouts()
831 atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE, in xfs_break_dax_layouts()
859 error = -EINVAL; in xfs_break_layouts()
871 if (xfs_has_wsync(ip->i_mount)) in xfs_file_sync_writes()
873 if (filp->f_flags & (__O_SYNC | O_DSYNC)) in xfs_file_sync_writes()
900 if (!S_ISREG(inode->i_mode)) in xfs_file_fallocate()
901 return -EINVAL; in xfs_file_fallocate()
903 return -EOPNOTSUPP; in xfs_file_fallocate()
915 * require the in-memory size to be fully up-to-date. in xfs_file_fallocate()
950 error = -EINVAL; in xfs_file_fallocate()
955 * There is no need to overlap collapse range with EOF, in xfs_file_fallocate()
959 error = -EINVAL; in xfs_file_fallocate()
963 new_size = i_size_read(inode) - len; in xfs_file_fallocate()
972 error = -EINVAL; in xfs_file_fallocate()
977 * New inode size must not exceed ->s_maxbytes, accounting for in xfs_file_fallocate()
980 if (inode->i_sb->s_maxbytes - isize < len) { in xfs_file_fallocate()
981 error = -EFBIG; in xfs_file_fallocate()
988 error = -EINVAL; in xfs_file_fallocate()
1009 * still zero-valued by virtue of the hole punch. in xfs_file_fallocate()
1019 len = round_up(offset + len, blksize) - in xfs_file_fallocate()
1032 error = -EOPNOTSUPP; in xfs_file_fallocate()
1059 * leave shifted extents past EOF and hence losing access to in xfs_file_fallocate()
1114 struct xfs_mount *mp = src->i_mount; in xfs_file_remap_range()
1120 return -EINVAL; in xfs_file_remap_range()
1123 return -EOPNOTSUPP; in xfs_file_remap_range()
1126 return -EIO; in xfs_file_remap_range()
1148 (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) && in xfs_file_remap_range()
1150 !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)) in xfs_file_remap_range()
1151 cowextsize = src->i_cowextsize; in xfs_file_remap_range()
1172 if (xfs_is_shutdown(XFS_M(inode->i_sb))) in xfs_file_open()
1173 return -EIO; in xfs_file_open()
1174 file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC; in xfs_file_open()
1192 * If there are any blocks, read-ahead block 0 as we're almost in xfs_dir_open()
1196 if (ip->i_df.if_nextents > 0) in xfs_dir_open()
1228 * point we can change the ->readdir prototype to include the in xfs_file_readdir()
1231 bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size); in xfs_file_readdir()
1242 struct inode *inode = file->f_mapping->host; in xfs_file_llseek()
1244 if (xfs_is_shutdown(XFS_I(inode)->i_mount)) in xfs_file_llseek()
1245 return -EIO; in xfs_file_llseek()
1260 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); in xfs_file_llseek()
1272 (write_fault && !vmf->cow_page) ? in xfs_dax_fault()
1295 * invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
1297 * i_lock (XFS - extent map serialisation)
1305 struct inode *inode = file_inode(vmf->vma->vm_file); in __xfs_filemap_fault()
1312 sb_start_pagefault(inode->i_sb); in __xfs_filemap_fault()
1313 file_update_time(vmf->vma->vm_file); in __xfs_filemap_fault()
1336 sb_end_pagefault(inode->i_sb); in __xfs_filemap_fault()
1344 return (vmf->flags & FAULT_FLAG_WRITE) && in xfs_is_write_fault()
1345 (vmf->vma->vm_flags & VM_SHARED); in xfs_is_write_fault()
1354 IS_DAX(file_inode(vmf->vma->vm_file)) && in xfs_filemap_fault()
1363 if (!IS_DAX(file_inode(vmf->vma->vm_file))) in xfs_filemap_huge_fault()
1397 struct inode *inode = file_inode(vmf->vma->vm_file); in xfs_filemap_map_pages()
1423 * We don't support synchronous mappings for non-DAX files and in xfs_file_mmap()
1426 if (!daxdev_mapping_supported(vma, target->bt_daxdev)) in xfs_file_mmap()
1427 return -EOPNOTSUPP; in xfs_file_mmap()
1430 vma->vm_ops = &xfs_file_vm_ops; in xfs_file_mmap()
1432 vma->vm_flags |= VM_HUGEPAGE; in xfs_file_mmap()