Lines Matching +full:no +full:- +full:unaligned +full:- +full:direct +full:- +full:access

1 // SPDX-License-Identifier: GPL-2.0
31 * Manage the active zone count. Called with zi->i_truncate_mutex held.
35 struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb); in zonefs_account_active()
38 lockdep_assert_held(&zi->i_truncate_mutex); in zonefs_account_active()
40 if (zi->i_ztype != ZONEFS_ZTYPE_SEQ) in zonefs_account_active()
47 if (zi->i_flags & (ZONEFS_ZONE_OFFLINE | ZONEFS_ZONE_READONLY)) in zonefs_account_active()
54 if ((zi->i_flags & ZONEFS_ZONE_OPEN) || in zonefs_account_active()
55 (zi->i_wpoffset > 0 && zi->i_wpoffset < zi->i_max_size)) { in zonefs_account_active()
56 if (!(zi->i_flags & ZONEFS_ZONE_ACTIVE)) { in zonefs_account_active()
57 zi->i_flags |= ZONEFS_ZONE_ACTIVE; in zonefs_account_active()
58 atomic_inc(&sbi->s_active_seq_files); in zonefs_account_active()
65 if (zi->i_flags & ZONEFS_ZONE_ACTIVE) { in zonefs_account_active()
66 zi->i_flags &= ~ZONEFS_ZONE_ACTIVE; in zonefs_account_active()
67 atomic_dec(&sbi->s_active_seq_files); in zonefs_account_active()
76 lockdep_assert_held(&zi->i_truncate_mutex); in zonefs_zone_mgmt()
86 if (op == REQ_OP_ZONE_CLOSE && !zi->i_wpoffset) in zonefs_zone_mgmt()
90 ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector, in zonefs_zone_mgmt()
91 zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS); in zonefs_zone_mgmt()
93 zonefs_err(inode->i_sb, in zonefs_zone_mgmt()
95 blk_op_str(op), zi->i_zsector, ret); in zonefs_zone_mgmt()
108 * A full zone is no longer open/active and does not need in zonefs_i_size_write()
111 if (isize >= zi->i_max_size) { in zonefs_i_size_write()
112 struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb); in zonefs_i_size_write()
114 if (zi->i_flags & ZONEFS_ZONE_ACTIVE) in zonefs_i_size_write()
115 atomic_dec(&sbi->s_active_seq_files); in zonefs_i_size_write()
116 zi->i_flags &= ~(ZONEFS_ZONE_OPEN | ZONEFS_ZONE_ACTIVE); in zonefs_i_size_write()
125 struct super_block *sb = inode->i_sb; in zonefs_read_iomap_begin()
132 mutex_lock(&zi->i_truncate_mutex); in zonefs_read_iomap_begin()
133 iomap->bdev = inode->i_sb->s_bdev; in zonefs_read_iomap_begin()
134 iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); in zonefs_read_iomap_begin()
136 if (iomap->offset >= isize) { in zonefs_read_iomap_begin()
137 iomap->type = IOMAP_HOLE; in zonefs_read_iomap_begin()
138 iomap->addr = IOMAP_NULL_ADDR; in zonefs_read_iomap_begin()
139 iomap->length = length; in zonefs_read_iomap_begin()
141 iomap->type = IOMAP_MAPPED; in zonefs_read_iomap_begin()
142 iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset; in zonefs_read_iomap_begin()
143 iomap->length = isize - iomap->offset; in zonefs_read_iomap_begin()
145 mutex_unlock(&zi->i_truncate_mutex); in zonefs_read_iomap_begin()
161 struct super_block *sb = inode->i_sb; in zonefs_write_iomap_begin()
165 if (WARN_ON_ONCE(offset + length > zi->i_max_size)) in zonefs_write_iomap_begin()
166 return -EIO; in zonefs_write_iomap_begin()
169 * Sequential zones can only accept direct writes. This is already in zonefs_write_iomap_begin()
173 if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ && in zonefs_write_iomap_begin()
175 return -EIO; in zonefs_write_iomap_begin()
182 mutex_lock(&zi->i_truncate_mutex); in zonefs_write_iomap_begin()
183 iomap->bdev = inode->i_sb->s_bdev; in zonefs_write_iomap_begin()
184 iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize); in zonefs_write_iomap_begin()
185 iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset; in zonefs_write_iomap_begin()
187 if (iomap->offset >= isize) { in zonefs_write_iomap_begin()
188 iomap->type = IOMAP_UNWRITTEN; in zonefs_write_iomap_begin()
189 iomap->length = zi->i_max_size - iomap->offset; in zonefs_write_iomap_begin()
191 iomap->type = IOMAP_MAPPED; in zonefs_write_iomap_begin()
192 iomap->length = isize - iomap->offset; in zonefs_write_iomap_begin()
194 mutex_unlock(&zi->i_truncate_mutex); in zonefs_write_iomap_begin()
224 if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV)) in zonefs_write_map_blocks()
225 return -EIO; in zonefs_write_map_blocks()
227 return -EIO; in zonefs_write_map_blocks()
230 if (offset >= wpc->iomap.offset && in zonefs_write_map_blocks()
231 offset < wpc->iomap.offset + wpc->iomap.length) in zonefs_write_map_blocks()
234 return zonefs_write_iomap_begin(inode, offset, zi->i_max_size - offset, in zonefs_write_map_blocks()
235 IOMAP_WRITE, &wpc->iomap, NULL); in zonefs_write_map_blocks()
256 if (zi->i_ztype != ZONEFS_ZTYPE_CNV) { in zonefs_swap_activate()
257 zonefs_err(inode->i_sb, in zonefs_swap_activate()
259 return -EINVAL; in zonefs_swap_activate()
282 struct super_block *sb = inode->i_sb; in zonefs_update_stats()
290 spin_lock(&sbi->s_lock); in zonefs_update_stats()
297 nr_blocks = (old_isize - new_isize) >> sb->s_blocksize_bits; in zonefs_update_stats()
298 if (sbi->s_used_blocks > nr_blocks) in zonefs_update_stats()
299 sbi->s_used_blocks -= nr_blocks; in zonefs_update_stats()
301 sbi->s_used_blocks = 0; in zonefs_update_stats()
303 sbi->s_used_blocks += in zonefs_update_stats()
304 (new_isize - old_isize) >> sb->s_blocksize_bits; in zonefs_update_stats()
305 if (sbi->s_used_blocks > sbi->s_blocks) in zonefs_update_stats()
306 sbi->s_used_blocks = sbi->s_blocks; in zonefs_update_stats()
309 spin_unlock(&sbi->s_lock); in zonefs_update_stats()
313 * Check a zone condition and adjust its file inode access permissions for
323 switch (zone->cond) { in zonefs_check_zone_condition()
330 zonefs_warn(inode->i_sb, "inode %lu: offline zone\n", in zonefs_check_zone_condition()
331 inode->i_ino); in zonefs_check_zone_condition()
332 inode->i_flags |= S_IMMUTABLE; in zonefs_check_zone_condition()
333 inode->i_mode &= ~0777; in zonefs_check_zone_condition()
334 zone->wp = zone->start; in zonefs_check_zone_condition()
335 zi->i_flags |= ZONEFS_ZONE_OFFLINE; in zonefs_check_zone_condition()
339 * The write pointer of read-only zones is invalid. If such a in zonefs_check_zone_condition()
347 zonefs_warn(inode->i_sb, "inode %lu: read-only zone\n", in zonefs_check_zone_condition()
348 inode->i_ino); in zonefs_check_zone_condition()
349 inode->i_flags |= S_IMMUTABLE; in zonefs_check_zone_condition()
351 zone->cond = BLK_ZONE_COND_OFFLINE; in zonefs_check_zone_condition()
352 inode->i_mode &= ~0777; in zonefs_check_zone_condition()
353 zone->wp = zone->start; in zonefs_check_zone_condition()
354 zi->i_flags |= ZONEFS_ZONE_OFFLINE; in zonefs_check_zone_condition()
357 zi->i_flags |= ZONEFS_ZONE_READONLY; in zonefs_check_zone_condition()
358 inode->i_mode &= ~0222; in zonefs_check_zone_condition()
362 return zi->i_max_size; in zonefs_check_zone_condition()
364 if (zi->i_ztype == ZONEFS_ZTYPE_CNV) in zonefs_check_zone_condition()
365 return zi->i_max_size; in zonefs_check_zone_condition()
366 return (zone->wp - zone->start) << SECTOR_SHIFT; in zonefs_check_zone_condition()
379 struct inode *inode = err->inode; in zonefs_io_error_cb()
381 struct super_block *sb = inode->i_sb; in zonefs_io_error_cb()
387 * read-only), read errors are simply signaled to the IO issuer as long in zonefs_io_error_cb()
388 * as there is no inconsistency between the inode size and the amount of in zonefs_io_error_cb()
393 if (zone->cond != BLK_ZONE_COND_OFFLINE && in zonefs_io_error_cb()
394 zone->cond != BLK_ZONE_COND_READONLY && in zonefs_io_error_cb()
395 !err->write && isize == data_size) in zonefs_io_error_cb()
405 * of the file. This can happen in the case of a large direct IO in zonefs_io_error_cb()
417 if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && isize != data_size) in zonefs_io_error_cb()
419 inode->i_ino, isize, data_size); in zonefs_io_error_cb()
423 * errors=zone-ro and errors=zone-offline result in changing the in zonefs_io_error_cb()
424 * zone condition to read-only and offline respectively, as if the in zonefs_io_error_cb()
427 if (zone->cond == BLK_ZONE_COND_OFFLINE || in zonefs_io_error_cb()
428 sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL) { in zonefs_io_error_cb()
429 zonefs_warn(sb, "inode %lu: read/write access disabled\n", in zonefs_io_error_cb()
430 inode->i_ino); in zonefs_io_error_cb()
431 if (zone->cond != BLK_ZONE_COND_OFFLINE) { in zonefs_io_error_cb()
432 zone->cond = BLK_ZONE_COND_OFFLINE; in zonefs_io_error_cb()
436 } else if (zone->cond == BLK_ZONE_COND_READONLY || in zonefs_io_error_cb()
437 sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO) { in zonefs_io_error_cb()
438 zonefs_warn(sb, "inode %lu: write access disabled\n", in zonefs_io_error_cb()
439 inode->i_ino); in zonefs_io_error_cb()
440 if (zone->cond != BLK_ZONE_COND_READONLY) { in zonefs_io_error_cb()
441 zone->cond = BLK_ZONE_COND_READONLY; in zonefs_io_error_cb()
448 * If the filesystem is mounted with the explicit-open mount option, we in zonefs_io_error_cb()
450 * the read-only or offline condition, to avoid attempting an explicit in zonefs_io_error_cb()
453 if ((sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) && in zonefs_io_error_cb()
454 (zone->cond == BLK_ZONE_COND_OFFLINE || in zonefs_io_error_cb()
455 zone->cond == BLK_ZONE_COND_READONLY)) in zonefs_io_error_cb()
456 zi->i_flags &= ~ZONEFS_ZONE_OPEN; in zonefs_io_error_cb()
459 * If error=remount-ro was specified, any error result in remounting in zonefs_io_error_cb()
460 * the volume as read-only. in zonefs_io_error_cb()
462 if ((sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO) && !sb_rdonly(sb)) { in zonefs_io_error_cb()
463 zonefs_warn(sb, "remounting filesystem read-only\n"); in zonefs_io_error_cb()
464 sb->s_flags |= SB_RDONLY; in zonefs_io_error_cb()
468 * Update block usage stats and the inode size to prevent access to in zonefs_io_error_cb()
473 zi->i_wpoffset = data_size; in zonefs_io_error_cb()
481 * in the zone condition (e.g. offline or read-only). For a failed write to a
489 struct super_block *sb = inode->i_sb; in __zonefs_io_error()
504 if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev)) in __zonefs_io_error()
505 nr_zones = zi->i_zone_size >> in __zonefs_io_error()
506 (sbi->s_zone_sectors_shift + SECTOR_SHIFT); in __zonefs_io_error()
517 ret = blkdev_report_zones(sb->s_bdev, zi->i_zsector, nr_zones, in __zonefs_io_error()
521 inode->i_ino, ret); in __zonefs_io_error()
529 mutex_lock(&zi->i_truncate_mutex); in zonefs_io_error()
531 mutex_unlock(&zi->i_truncate_mutex); in zonefs_io_error()
546 if (zi->i_ztype != ZONEFS_ZTYPE_SEQ) in zonefs_file_truncate()
547 return -EPERM; in zonefs_file_truncate()
551 else if (isize == zi->i_max_size) in zonefs_file_truncate()
554 return -EPERM; in zonefs_file_truncate()
559 filemap_invalidate_lock(inode->i_mapping); in zonefs_file_truncate()
562 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_truncate()
576 if (zi->i_flags & ZONEFS_ZONE_OPEN) { in zonefs_file_truncate()
580 * re-open the zone to ensure new writes can be processed. in zonefs_file_truncate()
588 zi->i_flags &= ~ZONEFS_ZONE_OPEN; in zonefs_file_truncate()
593 zi->i_wpoffset = isize; in zonefs_file_truncate()
597 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_truncate()
598 filemap_invalidate_unlock(inode->i_mapping); in zonefs_file_truncate()
610 return -EPERM; in zonefs_inode_setattr()
618 * allow setting any write attributes on the sub-directories grouping in zonefs_inode_setattr()
621 if ((iattr->ia_valid & ATTR_MODE) && S_ISDIR(inode->i_mode) && in zonefs_inode_setattr()
622 (iattr->ia_mode & 0222)) in zonefs_inode_setattr()
623 return -EPERM; in zonefs_inode_setattr()
625 if (((iattr->ia_valid & ATTR_UID) && in zonefs_inode_setattr()
626 !uid_eq(iattr->ia_uid, inode->i_uid)) || in zonefs_inode_setattr()
627 ((iattr->ia_valid & ATTR_GID) && in zonefs_inode_setattr()
628 !gid_eq(iattr->ia_gid, inode->i_gid))) { in zonefs_inode_setattr()
634 if (iattr->ia_valid & ATTR_SIZE) { in zonefs_inode_setattr()
635 ret = zonefs_file_truncate(inode, iattr->ia_size); in zonefs_inode_setattr()
656 return -EPERM; in zonefs_file_fsync()
659 * Since only direct writes are allowed in sequential files, page cache in zonefs_file_fsync()
662 if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV) in zonefs_file_fsync()
665 ret = blkdev_issue_flush(inode->i_sb->s_bdev); in zonefs_file_fsync()
675 struct inode *inode = file_inode(vmf->vma->vm_file); in zonefs_filemap_page_mkwrite()
686 if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV)) in zonefs_filemap_page_mkwrite()
689 sb_start_pagefault(inode->i_sb); in zonefs_filemap_page_mkwrite()
690 file_update_time(vmf->vma->vm_file); in zonefs_filemap_page_mkwrite()
693 filemap_invalidate_lock_shared(inode->i_mapping); in zonefs_filemap_page_mkwrite()
695 filemap_invalidate_unlock_shared(inode->i_mapping); in zonefs_filemap_page_mkwrite()
697 sb_end_pagefault(inode->i_sb); in zonefs_filemap_page_mkwrite()
712 * mappings are possible since there are no guarantees for write in zonefs_file_mmap()
715 if (ZONEFS_I(file_inode(file))->i_ztype == ZONEFS_ZTYPE_SEQ && in zonefs_file_mmap()
716 (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) in zonefs_file_mmap()
717 return -EINVAL; in zonefs_file_mmap()
720 vma->vm_ops = &zonefs_file_vm_ops; in zonefs_file_mmap()
740 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_write_dio_end_io()
748 if (size && zi->i_ztype != ZONEFS_ZTYPE_CNV) { in zonefs_file_write_dio_end_io()
756 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_write_dio_end_io()
757 if (i_size_read(inode) < iocb->ki_pos + size) { in zonefs_file_write_dio_end_io()
758 zonefs_update_stats(inode, iocb->ki_pos + size); in zonefs_file_write_dio_end_io()
759 zonefs_i_size_write(inode, iocb->ki_pos + size); in zonefs_file_write_dio_end_io()
761 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_write_dio_end_io()
773 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_dio_append()
775 struct block_device *bdev = inode->i_sb->s_bdev; in zonefs_file_dio_append()
782 max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize); in zonefs_file_dio_append()
791 bio->bi_iter.bi_sector = zi->i_zsector; in zonefs_file_dio_append()
792 bio->bi_ioprio = iocb->ki_ioprio; in zonefs_file_dio_append()
794 bio->bi_opf |= REQ_FUA; in zonefs_file_dio_append()
800 size = bio->bi_iter.bi_size; in zonefs_file_dio_append()
803 if (iocb->ki_flags & IOCB_HIPRI) in zonefs_file_dio_append()
816 iocb->ki_pos += size; in zonefs_file_dio_append()
825 * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
833 loff_t max_size = zi->i_max_size; in zonefs_write_check_limits()
838 return -EFBIG; in zonefs_write_check_limits()
840 count = min(count, limit - pos); in zonefs_write_check_limits()
843 if (!(file->f_flags & O_LARGEFILE)) in zonefs_write_check_limits()
847 return -EFBIG; in zonefs_write_check_limits()
849 return min(count, max_size - pos); in zonefs_write_check_limits()
854 struct file *file = iocb->ki_filp; in zonefs_write_checks()
860 return -ETXTBSY; in zonefs_write_checks()
865 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) in zonefs_write_checks()
866 return -EINVAL; in zonefs_write_checks()
868 if (iocb->ki_flags & IOCB_APPEND) { in zonefs_write_checks()
869 if (zi->i_ztype != ZONEFS_ZTYPE_SEQ) in zonefs_write_checks()
870 return -EINVAL; in zonefs_write_checks()
871 mutex_lock(&zi->i_truncate_mutex); in zonefs_write_checks()
872 iocb->ki_pos = zi->i_wpoffset; in zonefs_write_checks()
873 mutex_unlock(&zi->i_truncate_mutex); in zonefs_write_checks()
876 count = zonefs_write_check_limits(file, iocb->ki_pos, in zonefs_write_checks()
886 * Handle direct writes. For sequential zone files, this is the only possible
891 * elevator feature is being used (e.g. mq-deadline). The block layer always
897 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_dio_write()
899 struct super_block *sb = inode->i_sb; in zonefs_file_dio_write()
905 * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT in zonefs_file_dio_write()
907 * on the inode lock but the second goes through but is now unaligned). in zonefs_file_dio_write()
909 if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !sync && in zonefs_file_dio_write()
910 (iocb->ki_flags & IOCB_NOWAIT)) in zonefs_file_dio_write()
911 return -EOPNOTSUPP; in zonefs_file_dio_write()
913 if (iocb->ki_flags & IOCB_NOWAIT) { in zonefs_file_dio_write()
915 return -EAGAIN; in zonefs_file_dio_write()
926 if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) { in zonefs_file_dio_write()
927 ret = -EINVAL; in zonefs_file_dio_write()
932 if (zi->i_ztype == ZONEFS_ZTYPE_SEQ) { in zonefs_file_dio_write()
933 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
934 if (iocb->ki_pos != zi->i_wpoffset) { in zonefs_file_dio_write()
935 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
936 ret = -EINVAL; in zonefs_file_dio_write()
939 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
948 if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && in zonefs_file_dio_write()
949 (ret > 0 || ret == -EIOCBQUEUED)) { in zonefs_file_dio_write()
958 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
959 zi->i_wpoffset += count; in zonefs_file_dio_write()
961 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_dio_write()
973 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_buffered_write()
978 * Direct IO writes are mandatory for sequential zone files so that the in zonefs_file_buffered_write()
981 if (zi->i_ztype != ZONEFS_ZTYPE_CNV) in zonefs_file_buffered_write()
982 return -EIO; in zonefs_file_buffered_write()
984 if (iocb->ki_flags & IOCB_NOWAIT) { in zonefs_file_buffered_write()
986 return -EAGAIN; in zonefs_file_buffered_write()
997 iocb->ki_pos += ret; in zonefs_file_buffered_write()
998 else if (ret == -EIO) in zonefs_file_buffered_write()
1011 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_write_iter()
1014 return -EPERM; in zonefs_file_write_iter()
1016 if (sb_rdonly(inode->i_sb)) in zonefs_file_write_iter()
1017 return -EROFS; in zonefs_file_write_iter()
1020 if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size) in zonefs_file_write_iter()
1021 return -EFBIG; in zonefs_file_write_iter()
1023 if (iocb->ki_flags & IOCB_DIRECT) { in zonefs_file_write_iter()
1025 if (ret != -ENOTBLK) in zonefs_file_write_iter()
1036 zonefs_io_error(file_inode(iocb->ki_filp), false); in zonefs_file_read_dio_end_io()
1049 struct inode *inode = file_inode(iocb->ki_filp); in zonefs_file_read_iter()
1051 struct super_block *sb = inode->i_sb; in zonefs_file_read_iter()
1056 if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777))) in zonefs_file_read_iter()
1057 return -EPERM; in zonefs_file_read_iter()
1059 if (iocb->ki_pos >= zi->i_max_size) in zonefs_file_read_iter()
1062 if (iocb->ki_flags & IOCB_NOWAIT) { in zonefs_file_read_iter()
1064 return -EAGAIN; in zonefs_file_read_iter()
1070 mutex_lock(&zi->i_truncate_mutex); in zonefs_file_read_iter()
1072 if (iocb->ki_pos >= isize) { in zonefs_file_read_iter()
1073 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_read_iter()
1077 iov_iter_truncate(to, isize - iocb->ki_pos); in zonefs_file_read_iter()
1078 mutex_unlock(&zi->i_truncate_mutex); in zonefs_file_read_iter()
1080 if (iocb->ki_flags & IOCB_DIRECT) { in zonefs_file_read_iter()
1083 if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) { in zonefs_file_read_iter()
1084 ret = -EINVAL; in zonefs_file_read_iter()
1087 file_accessed(iocb->ki_filp); in zonefs_file_read_iter()
1092 if (ret == -EIO) in zonefs_file_read_iter()
1110 if (zi->i_ztype != ZONEFS_ZTYPE_SEQ) in zonefs_seq_file_need_wro()
1113 if (!(file->f_mode & FMODE_WRITE)) in zonefs_seq_file_need_wro()
1124 mutex_lock(&zi->i_truncate_mutex); in zonefs_seq_file_write_open()
1126 if (!zi->i_wr_refcnt) { in zonefs_seq_file_write_open()
1127 struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb); in zonefs_seq_file_write_open()
1128 unsigned int wro = atomic_inc_return(&sbi->s_wro_seq_files); in zonefs_seq_file_write_open()
1130 if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) { in zonefs_seq_file_write_open()
1132 if (sbi->s_max_wro_seq_files in zonefs_seq_file_write_open()
1133 && wro > sbi->s_max_wro_seq_files) { in zonefs_seq_file_write_open()
1134 atomic_dec(&sbi->s_wro_seq_files); in zonefs_seq_file_write_open()
1135 ret = -EBUSY; in zonefs_seq_file_write_open()
1139 if (i_size_read(inode) < zi->i_max_size) { in zonefs_seq_file_write_open()
1142 atomic_dec(&sbi->s_wro_seq_files); in zonefs_seq_file_write_open()
1145 zi->i_flags |= ZONEFS_ZONE_OPEN; in zonefs_seq_file_write_open()
1151 zi->i_wr_refcnt++; in zonefs_seq_file_write_open()
1154 mutex_unlock(&zi->i_truncate_mutex); in zonefs_seq_file_write_open()
1176 struct super_block *sb = inode->i_sb; in zonefs_seq_file_write_close()
1180 mutex_lock(&zi->i_truncate_mutex); in zonefs_seq_file_write_close()
1182 zi->i_wr_refcnt--; in zonefs_seq_file_write_close()
1183 if (zi->i_wr_refcnt) in zonefs_seq_file_write_close()
1191 if (zi->i_flags & ZONEFS_ZONE_OPEN) { in zonefs_seq_file_write_close()
1199 * read-only. in zonefs_seq_file_write_close()
1201 if (zi->i_flags & ZONEFS_ZONE_OPEN && in zonefs_seq_file_write_close()
1202 !(sb->s_flags & SB_RDONLY)) { in zonefs_seq_file_write_close()
1205 zi->i_zsector, ret); in zonefs_seq_file_write_close()
1207 "remounting filesystem read-only\n"); in zonefs_seq_file_write_close()
1208 sb->s_flags |= SB_RDONLY; in zonefs_seq_file_write_close()
1213 zi->i_flags &= ~ZONEFS_ZONE_OPEN; in zonefs_seq_file_write_close()
1217 atomic_dec(&sbi->s_wro_seq_files); in zonefs_seq_file_write_close()
1220 mutex_unlock(&zi->i_truncate_mutex); in zonefs_seq_file_write_close()
1228 * the zone has gone offline or read-only). Make sure we don't fail the in zonefs_file_release()
1229 * close(2) for user-space. in zonefs_file_release()
1260 inode_init_once(&zi->i_vnode); in zonefs_alloc_inode()
1261 mutex_init(&zi->i_truncate_mutex); in zonefs_alloc_inode()
1262 zi->i_wr_refcnt = 0; in zonefs_alloc_inode()
1263 zi->i_flags = 0; in zonefs_alloc_inode()
1265 return &zi->i_vnode; in zonefs_alloc_inode()
1278 struct super_block *sb = dentry->d_sb; in zonefs_statfs()
1282 buf->f_type = ZONEFS_MAGIC; in zonefs_statfs()
1283 buf->f_bsize = sb->s_blocksize; in zonefs_statfs()
1284 buf->f_namelen = ZONEFS_NAME_MAX; in zonefs_statfs()
1286 spin_lock(&sbi->s_lock); in zonefs_statfs()
1288 buf->f_blocks = sbi->s_blocks; in zonefs_statfs()
1289 if (WARN_ON(sbi->s_used_blocks > sbi->s_blocks)) in zonefs_statfs()
1290 buf->f_bfree = 0; in zonefs_statfs()
1292 buf->f_bfree = buf->f_blocks - sbi->s_used_blocks; in zonefs_statfs()
1293 buf->f_bavail = buf->f_bfree; in zonefs_statfs()
1296 if (sbi->s_nr_files[t]) in zonefs_statfs()
1297 buf->f_files += sbi->s_nr_files[t] + 1; in zonefs_statfs()
1299 buf->f_ffree = 0; in zonefs_statfs()
1301 spin_unlock(&sbi->s_lock); in zonefs_statfs()
1303 buf->f_fsid = uuid_to_fsid(sbi->s_uuid.b); in zonefs_statfs()
1314 { Opt_errors_ro, "errors=remount-ro"},
1315 { Opt_errors_zro, "errors=zone-ro"},
1316 { Opt_errors_zol, "errors=zone-offline"},
1318 { Opt_explicit_open, "explicit-open" },
1340 sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK; in zonefs_parse_options()
1341 sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_RO; in zonefs_parse_options()
1344 sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK; in zonefs_parse_options()
1345 sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZRO; in zonefs_parse_options()
1348 sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK; in zonefs_parse_options()
1349 sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZOL; in zonefs_parse_options()
1352 sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK; in zonefs_parse_options()
1353 sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_REPAIR; in zonefs_parse_options()
1356 sbi->s_mount_opts |= ZONEFS_MNTOPT_EXPLICIT_OPEN; in zonefs_parse_options()
1359 return -EINVAL; in zonefs_parse_options()
1368 struct zonefs_sb_info *sbi = ZONEFS_SB(root->d_sb); in zonefs_show_options()
1370 if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO) in zonefs_show_options()
1371 seq_puts(seq, ",errors=remount-ro"); in zonefs_show_options()
1372 if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO) in zonefs_show_options()
1373 seq_puts(seq, ",errors=zone-ro"); in zonefs_show_options()
1374 if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL) in zonefs_show_options()
1375 seq_puts(seq, ",errors=zone-offline"); in zonefs_show_options()
1376 if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_REPAIR) in zonefs_show_options()
1405 struct super_block *sb = parent->i_sb; in zonefs_init_dir_inode()
1407 inode->i_ino = bdev_nr_zones(sb->s_bdev) + type + 1; in zonefs_init_dir_inode()
1409 inode->i_op = &zonefs_dir_inode_operations; in zonefs_init_dir_inode()
1410 inode->i_fop = &simple_dir_operations; in zonefs_init_dir_inode()
1418 struct super_block *sb = inode->i_sb; in zonefs_init_file_inode()
1423 inode->i_ino = zone->start >> sbi->s_zone_sectors_shift; in zonefs_init_file_inode()
1424 inode->i_mode = S_IFREG | sbi->s_perm; in zonefs_init_file_inode()
1426 zi->i_ztype = type; in zonefs_init_file_inode()
1427 zi->i_zsector = zone->start; in zonefs_init_file_inode()
1428 zi->i_zone_size = zone->len << SECTOR_SHIFT; in zonefs_init_file_inode()
1429 if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT && in zonefs_init_file_inode()
1430 !(sbi->s_features & ZONEFS_F_AGGRCNV)) { in zonefs_init_file_inode()
1433 zi->i_zone_size, in zonefs_init_file_inode()
1434 bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT); in zonefs_init_file_inode()
1435 return -EINVAL; in zonefs_init_file_inode()
1438 zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE, in zonefs_init_file_inode()
1439 zone->capacity << SECTOR_SHIFT); in zonefs_init_file_inode()
1440 zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true, true); in zonefs_init_file_inode()
1442 inode->i_uid = sbi->s_uid; in zonefs_init_file_inode()
1443 inode->i_gid = sbi->s_gid; in zonefs_init_file_inode()
1444 inode->i_size = zi->i_wpoffset; in zonefs_init_file_inode()
1445 inode->i_blocks = zi->i_max_size >> SECTOR_SHIFT; in zonefs_init_file_inode()
1447 inode->i_op = &zonefs_file_inode_operations; in zonefs_init_file_inode()
1448 inode->i_fop = &zonefs_file_operations; in zonefs_init_file_inode()
1449 inode->i_mapping->a_ops = &zonefs_file_aops; in zonefs_init_file_inode()
1451 sb->s_maxbytes = max(zi->i_max_size, sb->s_maxbytes); in zonefs_init_file_inode()
1452 sbi->s_blocks += zi->i_max_size >> sb->s_blocksize_bits; in zonefs_init_file_inode()
1453 sbi->s_used_blocks += zi->i_wpoffset >> sb->s_blocksize_bits; in zonefs_init_file_inode()
1455 mutex_lock(&zi->i_truncate_mutex); in zonefs_init_file_inode()
1464 (zone->cond == BLK_ZONE_COND_IMP_OPEN || in zonefs_init_file_inode()
1465 zone->cond == BLK_ZONE_COND_EXP_OPEN)) { in zonefs_init_file_inode()
1474 mutex_unlock(&zi->i_truncate_mutex); in zonefs_init_file_inode()
1486 int ret = -ENOMEM; in zonefs_create_inode()
1492 inode = new_inode(parent->d_sb); in zonefs_create_inode()
1496 inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime; in zonefs_create_inode()
1508 dir->i_size++; in zonefs_create_inode()
1530 struct super_block *sb = zd->sb; in zonefs_create_zgroup()
1540 if (!zd->nr_zones[type]) in zonefs_create_zgroup()
1545 return -ENOMEM; in zonefs_create_zgroup()
1552 dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type); in zonefs_create_zgroup()
1561 end = zd->zones + bdev_nr_zones(sb->s_bdev); in zonefs_create_zgroup()
1562 for (zone = &zd->zones[1]; zone < end; zone = next) { in zonefs_create_zgroup()
1572 * aggregated together. If one offline or read-only zone is in zonefs_create_zgroup()
1577 (sbi->s_features & ZONEFS_F_AGGRCNV)) { in zonefs_create_zgroup()
1581 zone->len += next->len; in zonefs_create_zgroup()
1582 zone->capacity += next->capacity; in zonefs_create_zgroup()
1583 if (next->cond == BLK_ZONE_COND_READONLY && in zonefs_create_zgroup()
1584 zone->cond != BLK_ZONE_COND_OFFLINE) in zonefs_create_zgroup()
1585 zone->cond = BLK_ZONE_COND_READONLY; in zonefs_create_zgroup()
1586 else if (next->cond == BLK_ZONE_COND_OFFLINE) in zonefs_create_zgroup()
1587 zone->cond = BLK_ZONE_COND_OFFLINE; in zonefs_create_zgroup()
1589 if (zone->capacity != zone->len) { in zonefs_create_zgroup()
1591 ret = -EINVAL; in zonefs_create_zgroup()
1599 snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n); in zonefs_create_zgroup()
1612 sbi->s_nr_files[type] = n; in zonefs_create_zgroup()
1630 switch (zone->type) { in zonefs_get_zone_info_cb()
1632 zone->wp = zone->start + zone->len; in zonefs_get_zone_info_cb()
1634 zd->nr_zones[ZONEFS_ZTYPE_CNV]++; in zonefs_get_zone_info_cb()
1639 zd->nr_zones[ZONEFS_ZTYPE_SEQ]++; in zonefs_get_zone_info_cb()
1642 zonefs_err(zd->sb, "Unsupported zone type 0x%x\n", in zonefs_get_zone_info_cb()
1643 zone->type); in zonefs_get_zone_info_cb()
1644 return -EIO; in zonefs_get_zone_info_cb()
1647 memcpy(&zd->zones[idx], zone, sizeof(struct blk_zone)); in zonefs_get_zone_info_cb()
1654 struct block_device *bdev = zd->sb->s_bdev; in zonefs_get_zone_info()
1657 zd->zones = kvcalloc(bdev_nr_zones(bdev), sizeof(struct blk_zone), in zonefs_get_zone_info()
1659 if (!zd->zones) in zonefs_get_zone_info()
1660 return -ENOMEM; in zonefs_get_zone_info()
1666 zonefs_err(zd->sb, "Zone report failed %d\n", ret); in zonefs_get_zone_info()
1671 zonefs_err(zd->sb, "Invalid zone report (%d/%u zones)\n", in zonefs_get_zone_info()
1673 return -EIO; in zonefs_get_zone_info()
1681 kvfree(zd->zones); in zonefs_cleanup_zone_info()
1699 return -ENOMEM; in zonefs_read_super()
1701 bio_init(&bio, sb->s_bdev, &bio_vec, 1, REQ_OP_READ); in zonefs_read_super()
1711 ret = -EINVAL; in zonefs_read_super()
1712 if (le32_to_cpu(super->s_magic) != ZONEFS_MAGIC) in zonefs_read_super()
1715 stored_crc = le32_to_cpu(super->s_crc); in zonefs_read_super()
1716 super->s_crc = 0; in zonefs_read_super()
1724 sbi->s_features = le64_to_cpu(super->s_features); in zonefs_read_super()
1725 if (sbi->s_features & ~ZONEFS_F_DEFINED_FEATURES) { in zonefs_read_super()
1727 sbi->s_features); in zonefs_read_super()
1731 if (sbi->s_features & ZONEFS_F_UID) { in zonefs_read_super()
1732 sbi->s_uid = make_kuid(current_user_ns(), in zonefs_read_super()
1733 le32_to_cpu(super->s_uid)); in zonefs_read_super()
1734 if (!uid_valid(sbi->s_uid)) { in zonefs_read_super()
1740 if (sbi->s_features & ZONEFS_F_GID) { in zonefs_read_super()
1741 sbi->s_gid = make_kgid(current_user_ns(), in zonefs_read_super()
1742 le32_to_cpu(super->s_gid)); in zonefs_read_super()
1743 if (!gid_valid(sbi->s_gid)) { in zonefs_read_super()
1749 if (sbi->s_features & ZONEFS_F_PERM) in zonefs_read_super()
1750 sbi->s_perm = le32_to_cpu(super->s_perm); in zonefs_read_super()
1752 if (memchr_inv(super->s_reserved, 0, sizeof(super->s_reserved))) { in zonefs_read_super()
1757 import_uuid(&sbi->s_uuid, super->s_uuid); in zonefs_read_super()
1768 * sub-directories and files according to the device zone configuration and
1779 if (!bdev_is_zoned(sb->s_bdev)) { in zonefs_fill_super()
1781 return -EINVAL; in zonefs_fill_super()
1792 return -ENOMEM; in zonefs_fill_super()
1794 spin_lock_init(&sbi->s_lock); in zonefs_fill_super()
1795 sb->s_fs_info = sbi; in zonefs_fill_super()
1796 sb->s_magic = ZONEFS_MAGIC; in zonefs_fill_super()
1797 sb->s_maxbytes = 0; in zonefs_fill_super()
1798 sb->s_op = &zonefs_sops; in zonefs_fill_super()
1799 sb->s_time_gran = 1; in zonefs_fill_super()
1806 sb_set_blocksize(sb, bdev_zone_write_granularity(sb->s_bdev)); in zonefs_fill_super()
1807 sbi->s_zone_sectors_shift = ilog2(bdev_zone_sectors(sb->s_bdev)); in zonefs_fill_super()
1808 sbi->s_uid = GLOBAL_ROOT_UID; in zonefs_fill_super()
1809 sbi->s_gid = GLOBAL_ROOT_GID; in zonefs_fill_super()
1810 sbi->s_perm = 0640; in zonefs_fill_super()
1811 sbi->s_mount_opts = ZONEFS_MNTOPT_ERRORS_RO; in zonefs_fill_super()
1813 atomic_set(&sbi->s_wro_seq_files, 0); in zonefs_fill_super()
1814 sbi->s_max_wro_seq_files = bdev_max_open_zones(sb->s_bdev); in zonefs_fill_super()
1815 atomic_set(&sbi->s_active_seq_files, 0); in zonefs_fill_super()
1816 sbi->s_max_active_seq_files = bdev_max_active_zones(sb->s_bdev); in zonefs_fill_super()
1836 zonefs_info(sb, "Mounting %u zones", bdev_nr_zones(sb->s_bdev)); in zonefs_fill_super()
1838 if (!sbi->s_max_wro_seq_files && in zonefs_fill_super()
1839 !sbi->s_max_active_seq_files && in zonefs_fill_super()
1840 sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) { in zonefs_fill_super()
1842 "No open and active zone limits. Ignoring explicit_open mount option\n"); in zonefs_fill_super()
1843 sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN; in zonefs_fill_super()
1847 ret = -ENOMEM; in zonefs_fill_super()
1852 inode->i_ino = bdev_nr_zones(sb->s_bdev); in zonefs_fill_super()
1853 inode->i_mode = S_IFDIR | 0555; in zonefs_fill_super()
1854 inode->i_ctime = inode->i_mtime = inode->i_atime = current_time(inode); in zonefs_fill_super()
1855 inode->i_op = &zonefs_dir_inode_operations; in zonefs_fill_super()
1856 inode->i_fop = &simple_dir_operations; in zonefs_fill_super()
1859 sb->s_root = d_make_root(inode); in zonefs_fill_super()
1860 if (!sb->s_root) in zonefs_fill_super()
1886 if (sb->s_root) in zonefs_kill_super()
1887 d_genocide(sb->s_root); in zonefs_kill_super()
1912 return -ENOMEM; in zonefs_init_inodecache()