Lines Matching +full:no +full:- +full:unaligned +full:- +full:direct +full:- +full:access
1 // SPDX-License-Identifier: GPL-2.0
33 struct super_block *sb = inode->i_sb; in zonefs_read_iomap_begin()
40 mutex_lock(&zi->i_truncate_mutex); in zonefs_read_iomap_begin()
41 iomap->bdev = inode->i_sb->s_bdev; in zonefs_read_iomap_begin()
42 iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); in zonefs_read_iomap_begin()
44 if (iomap->offset >= isize) { in zonefs_read_iomap_begin()
45 iomap->type = IOMAP_HOLE; in zonefs_read_iomap_begin()
46 iomap->addr = IOMAP_NULL_ADDR; in zonefs_read_iomap_begin()
47 iomap->length = length; in zonefs_read_iomap_begin()
49 iomap->type = IOMAP_MAPPED; in zonefs_read_iomap_begin()
50 iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset; in zonefs_read_iomap_begin()
51 iomap->length = isize - iomap->offset; in zonefs_read_iomap_begin()
53 mutex_unlock(&zi->i_truncate_mutex); in zonefs_read_iomap_begin()
70 struct super_block *sb = inode->i_sb; in zonefs_write_iomap_begin()
74 if (WARN_ON_ONCE(offset + length > z->z_capacity)) in zonefs_write_iomap_begin()
75 return -EIO; in zonefs_write_iomap_begin()
78 * Sequential zones can only accept direct writes. This is already in zonefs_write_iomap_begin()
83 return -EIO; in zonefs_write_iomap_begin()
90 mutex_lock(&zi->i_truncate_mutex); in zonefs_write_iomap_begin()
91 iomap->bdev = inode->i_sb->s_bdev; in zonefs_write_iomap_begin()
92 iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); in zonefs_write_iomap_begin()
93 iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset; in zonefs_write_iomap_begin()
95 if (iomap->offset >= isize) { in zonefs_write_iomap_begin()
96 iomap->type = IOMAP_UNWRITTEN; in zonefs_write_iomap_begin()
97 iomap->length = z->z_capacity - iomap->offset; in zonefs_write_iomap_begin()
99 iomap->type = IOMAP_MAPPED; in zonefs_write_iomap_begin()
100 iomap->length = isize - iomap->offset; in zonefs_write_iomap_begin()
102 mutex_unlock(&zi->i_truncate_mutex); in zonefs_write_iomap_begin()
133 return -EIO; in zonefs_write_map_blocks()
135 return -EIO; in zonefs_write_map_blocks()
138 if (offset >= wpc->iomap.offset && in zonefs_write_map_blocks()
139 offset < wpc->iomap.offset + wpc->iomap.length) in zonefs_write_map_blocks()
143 z->z_capacity - offset, in zonefs_write_map_blocks()
144 IOMAP_WRITE, &wpc->iomap, NULL); in zonefs_write_map_blocks()
165 zonefs_err(inode->i_sb, in zonefs_swap_activate()
167 return -EINVAL; in zonefs_swap_activate()
201 return -EPERM; in zonefs_file_truncate()
205 else if (isize == z->z_capacity) in zonefs_file_truncate()
208 return -EPERM; in zonefs_file_truncate()
213 filemap_invalidate_lock(inode->i_mapping); in zonefs_file_truncate()
216 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_truncate()
230 if (z->z_flags & ZONEFS_ZONE_OPEN) { in zonefs_file_truncate()
234 * re-open the zone to ensure new writes can be processed. in zonefs_file_truncate()
242 z->z_flags &= ~ZONEFS_ZONE_OPEN; in zonefs_file_truncate()
247 z->z_wpoffset = isize; in zonefs_file_truncate()
251 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_truncate()
252 filemap_invalidate_unlock(inode->i_mapping); in zonefs_file_truncate()
264 return -EPERM; in zonefs_file_fsync()
267 * Since only direct writes are allowed in sequential files, page cache in zonefs_file_fsync()
273 ret = blkdev_issue_flush(inode->i_sb->s_bdev); in zonefs_file_fsync()
283 struct inode *inode = file_inode(vmf->vma->vm_file); in zonefs_filemap_page_mkwrite()
296 sb_start_pagefault(inode->i_sb); in zonefs_filemap_page_mkwrite()
297 file_update_time(vmf->vma->vm_file); in zonefs_filemap_page_mkwrite()
300 filemap_invalidate_lock_shared(inode->i_mapping); in zonefs_filemap_page_mkwrite()
302 filemap_invalidate_unlock_shared(inode->i_mapping); in zonefs_filemap_page_mkwrite()
304 sb_end_pagefault(inode->i_sb); in zonefs_filemap_page_mkwrite()
319 * mappings are possible since there are no guarantees for write in zonefs_file_mmap()
323 (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) in zonefs_file_mmap()
324 return -EINVAL; in zonefs_file_mmap()
327 vma->vm_ops = &zonefs_file_vm_ops; in zonefs_file_mmap()
347 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_write_dio_end_io()
363 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_write_dio_end_io()
364 if (i_size_read(inode) < iocb->ki_pos + size) { in zonefs_file_write_dio_end_io()
365 zonefs_update_stats(inode, iocb->ki_pos + size); in zonefs_file_write_dio_end_io()
366 zonefs_i_size_write(inode, iocb->ki_pos + size); in zonefs_file_write_dio_end_io()
368 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_write_dio_end_io()
380 * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
388 loff_t max_size = z->z_capacity; in zonefs_write_check_limits()
393 return -EFBIG; in zonefs_write_check_limits()
395 count = min(count, limit - pos); in zonefs_write_check_limits()
398 if (!(file->f_flags & O_LARGEFILE)) in zonefs_write_check_limits()
402 return -EFBIG; in zonefs_write_check_limits()
404 return min(count, max_size - pos); in zonefs_write_check_limits()
409 struct file *file = iocb->ki_filp; in zonefs_write_checks()
416 return -ETXTBSY; in zonefs_write_checks()
421 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) in zonefs_write_checks()
422 return -EINVAL; in zonefs_write_checks()
424 if (iocb->ki_flags & IOCB_APPEND) { in zonefs_write_checks()
426 return -EINVAL; in zonefs_write_checks()
427 mutex_lock(&zi->i_truncate_mutex); in zonefs_write_checks()
428 iocb->ki_pos = z->z_wpoffset; in zonefs_write_checks()
429 mutex_unlock(&zi->i_truncate_mutex); in zonefs_write_checks()
432 count = zonefs_write_check_limits(file, iocb->ki_pos, in zonefs_write_checks()
442 * Handle direct writes. For sequential zone files, this is the only possible
447 * elevator feature is being used (e.g. mq-deadline). The block layer always
453 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_dio_write()
456 struct super_block *sb = inode->i_sb; in zonefs_file_dio_write()
460 * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT in zonefs_file_dio_write()
462 * on the inode lock but the second goes through but is now unaligned). in zonefs_file_dio_write()
465 (iocb->ki_flags & IOCB_NOWAIT)) in zonefs_file_dio_write()
466 return -EOPNOTSUPP; in zonefs_file_dio_write()
468 if (iocb->ki_flags & IOCB_NOWAIT) { in zonefs_file_dio_write()
470 return -EAGAIN; in zonefs_file_dio_write()
481 if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) { in zonefs_file_dio_write()
482 ret = -EINVAL; in zonefs_file_dio_write()
488 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
489 if (iocb->ki_pos != z->z_wpoffset) { in zonefs_file_dio_write()
490 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
491 ret = -EINVAL; in zonefs_file_dio_write()
494 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
504 if (ret == -ENOTBLK) in zonefs_file_dio_write()
505 ret = -EBUSY; in zonefs_file_dio_write()
508 (ret > 0 || ret == -EIOCBQUEUED)) { in zonefs_file_dio_write()
517 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
518 z->z_wpoffset += count; in zonefs_file_dio_write()
520 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
532 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_buffered_write()
536 * Direct IO writes are mandatory for sequential zone files so that the in zonefs_file_buffered_write()
540 return -EIO; in zonefs_file_buffered_write()
542 if (iocb->ki_flags & IOCB_NOWAIT) { in zonefs_file_buffered_write()
544 return -EAGAIN; in zonefs_file_buffered_write()
554 if (ret == -EIO) in zonefs_file_buffered_write()
567 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_write_iter()
571 return -EPERM; in zonefs_file_write_iter()
573 if (sb_rdonly(inode->i_sb)) in zonefs_file_write_iter()
574 return -EROFS; in zonefs_file_write_iter()
577 if (iocb->ki_pos >= z->z_capacity) in zonefs_file_write_iter()
578 return -EFBIG; in zonefs_file_write_iter()
580 if (iocb->ki_flags & IOCB_DIRECT) { in zonefs_file_write_iter()
583 if (ret != -ENOTBLK) in zonefs_file_write_iter()
594 zonefs_io_error(file_inode(iocb->ki_filp), false); in zonefs_file_read_dio_end_io()
607 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_read_iter()
610 struct super_block *sb = inode->i_sb; in zonefs_file_read_iter()
615 if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777))) in zonefs_file_read_iter()
616 return -EPERM; in zonefs_file_read_iter()
618 if (iocb->ki_pos >= z->z_capacity) in zonefs_file_read_iter()
621 if (iocb->ki_flags & IOCB_NOWAIT) { in zonefs_file_read_iter()
623 return -EAGAIN; in zonefs_file_read_iter()
629 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_read_iter()
631 if (iocb->ki_pos >= isize) { in zonefs_file_read_iter()
632 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_read_iter()
636 iov_iter_truncate(to, isize - iocb->ki_pos); in zonefs_file_read_iter()
637 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_read_iter()
639 if (iocb->ki_flags & IOCB_DIRECT) { in zonefs_file_read_iter()
642 if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) { in zonefs_file_read_iter()
643 ret = -EINVAL; in zonefs_file_read_iter()
646 file_accessed(iocb->ki_filp); in zonefs_file_read_iter()
651 if (ret == -EIO) in zonefs_file_read_iter()
672 if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777))) in zonefs_file_splice_read()
673 return -EPERM; in zonefs_file_splice_read()
675 if (*ppos >= z->z_capacity) in zonefs_file_splice_read()
681 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_splice_read()
686 len = min_t(loff_t, len, isize - *ppos); in zonefs_file_splice_read()
687 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_splice_read()
691 if (ret == -EIO) in zonefs_file_splice_read()
708 if (!(file->f_mode & FMODE_WRITE)) in zonefs_seq_file_need_wro()
720 mutex_lock(&zi->i_truncate_mutex); in zonefs_seq_file_write_open()
722 if (!zi->i_wr_refcnt) { in zonefs_seq_file_write_open()
723 struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb); in zonefs_seq_file_write_open()
724 unsigned int wro = atomic_inc_return(&sbi->s_wro_seq_files); in zonefs_seq_file_write_open()
726 if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) { in zonefs_seq_file_write_open()
728 if (sbi->s_max_wro_seq_files in zonefs_seq_file_write_open()
729 && wro > sbi->s_max_wro_seq_files) { in zonefs_seq_file_write_open()
730 atomic_dec(&sbi->s_wro_seq_files); in zonefs_seq_file_write_open()
731 ret = -EBUSY; in zonefs_seq_file_write_open()
735 if (i_size_read(inode) < z->z_capacity) { in zonefs_seq_file_write_open()
739 atomic_dec(&sbi->s_wro_seq_files); in zonefs_seq_file_write_open()
742 z->z_flags |= ZONEFS_ZONE_OPEN; in zonefs_seq_file_write_open()
748 zi->i_wr_refcnt++; in zonefs_seq_file_write_open()
751 mutex_unlock(&zi->i_truncate_mutex); in zonefs_seq_file_write_open()
760 file->f_mode |= FMODE_CAN_ODIRECT; in zonefs_file_open()
775 struct super_block *sb = inode->i_sb; in zonefs_seq_file_write_close()
779 mutex_lock(&zi->i_truncate_mutex); in zonefs_seq_file_write_close()
781 zi->i_wr_refcnt--; in zonefs_seq_file_write_close()
782 if (zi->i_wr_refcnt) in zonefs_seq_file_write_close()
790 if (z->z_flags & ZONEFS_ZONE_OPEN) { in zonefs_seq_file_write_close()
798 * read-only. in zonefs_seq_file_write_close()
800 if (z->z_flags & ZONEFS_ZONE_OPEN && in zonefs_seq_file_write_close()
801 !(sb->s_flags & SB_RDONLY)) { in zonefs_seq_file_write_close()
804 z->z_sector, ret); in zonefs_seq_file_write_close()
806 "remounting filesystem read-only\n"); in zonefs_seq_file_write_close()
807 sb->s_flags |= SB_RDONLY; in zonefs_seq_file_write_close()
812 z->z_flags &= ~ZONEFS_ZONE_OPEN; in zonefs_seq_file_write_close()
816 atomic_dec(&sbi->s_wro_seq_files); in zonefs_seq_file_write_close()
819 mutex_unlock(&zi->i_truncate_mutex); in zonefs_seq_file_write_close()
827 * the zone has gone offline or read-only). Make sure we don't fail the in zonefs_file_release()
828 * close(2) for user-space. in zonefs_file_release()