Lines Matching refs:sbi
67 void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, in f2fs_build_fault_attr() argument
70 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; in f2fs_build_fault_attr()
251 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...) in f2fs_printk() argument
263 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf); in f2fs_printk()
307 static inline void limit_reserve_root(struct f2fs_sb_info *sbi) in limit_reserve_root() argument
309 block_t limit = min((sbi->user_block_count >> 3), in limit_reserve_root()
310 sbi->user_block_count - sbi->reserved_blocks); in limit_reserve_root()
313 if (test_opt(sbi, RESERVE_ROOT) && in limit_reserve_root()
314 F2FS_OPTION(sbi).root_reserved_blocks > limit) { in limit_reserve_root()
315 F2FS_OPTION(sbi).root_reserved_blocks = limit; in limit_reserve_root()
316 f2fs_info(sbi, "Reduce reserved blocks for root = %u", in limit_reserve_root()
317 F2FS_OPTION(sbi).root_reserved_blocks); in limit_reserve_root()
319 if (!test_opt(sbi, RESERVE_ROOT) && in limit_reserve_root()
320 (!uid_eq(F2FS_OPTION(sbi).s_resuid, in limit_reserve_root()
322 !gid_eq(F2FS_OPTION(sbi).s_resgid, in limit_reserve_root()
324 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root", in limit_reserve_root()
326 F2FS_OPTION(sbi).s_resuid), in limit_reserve_root()
328 F2FS_OPTION(sbi).s_resgid)); in limit_reserve_root()
331 static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi) in adjust_reserved_segment() argument
333 unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec; in adjust_reserved_segment()
338 if (!F2FS_IO_ALIGNED(sbi)) in adjust_reserved_segment()
342 avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi); in adjust_reserved_segment()
347 wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) * in adjust_reserved_segment()
348 reserved_segments(sbi); in adjust_reserved_segment()
349 wanted_reserved_segments -= reserved_segments(sbi); in adjust_reserved_segment()
351 avail_user_block_count = sbi->user_block_count - in adjust_reserved_segment()
352 sbi->current_reserved_blocks - in adjust_reserved_segment()
353 F2FS_OPTION(sbi).root_reserved_blocks; in adjust_reserved_segment()
355 if (wanted_reserved_segments * sbi->blocks_per_seg > in adjust_reserved_segment()
357 …f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u… in adjust_reserved_segment()
359 avail_user_block_count >> sbi->log_blocks_per_seg); in adjust_reserved_segment()
363 SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments; in adjust_reserved_segment()
365 f2fs_info(sbi, "IO align feature needs additional reserved segment: %u", in adjust_reserved_segment()
371 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi) in adjust_unusable_cap_perc() argument
373 if (!F2FS_OPTION(sbi).unusable_cap_perc) in adjust_unusable_cap_perc()
376 if (F2FS_OPTION(sbi).unusable_cap_perc == 100) in adjust_unusable_cap_perc()
377 F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count; in adjust_unusable_cap_perc()
379 F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) * in adjust_unusable_cap_perc()
380 F2FS_OPTION(sbi).unusable_cap_perc; in adjust_unusable_cap_perc()
382 f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%", in adjust_unusable_cap_perc()
383 F2FS_OPTION(sbi).unusable_cap, in adjust_unusable_cap_perc()
384 F2FS_OPTION(sbi).unusable_cap_perc); in adjust_unusable_cap_perc()
400 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_set_qf_name() local
404 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) { in f2fs_set_qf_name()
405 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); in f2fs_set_qf_name()
408 if (f2fs_sb_has_quota_ino(sbi)) { in f2fs_set_qf_name()
409 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name"); in f2fs_set_qf_name()
415 f2fs_err(sbi, "Not enough memory for storing quotafile name"); in f2fs_set_qf_name()
418 if (F2FS_OPTION(sbi).s_qf_names[qtype]) { in f2fs_set_qf_name()
419 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0) in f2fs_set_qf_name()
422 f2fs_err(sbi, "%s quota file already specified", in f2fs_set_qf_name()
427 f2fs_err(sbi, "quotafile must be on filesystem root"); in f2fs_set_qf_name()
430 F2FS_OPTION(sbi).s_qf_names[qtype] = qname; in f2fs_set_qf_name()
431 set_opt(sbi, QUOTA); in f2fs_set_qf_name()
440 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_clear_qf_name() local
442 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) { in f2fs_clear_qf_name()
443 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); in f2fs_clear_qf_name()
446 kfree(F2FS_OPTION(sbi).s_qf_names[qtype]); in f2fs_clear_qf_name()
447 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL; in f2fs_clear_qf_name()
451 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi) in f2fs_check_quota_options() argument
458 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) { in f2fs_check_quota_options()
459 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement."); in f2fs_check_quota_options()
462 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || in f2fs_check_quota_options()
463 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || in f2fs_check_quota_options()
464 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) { in f2fs_check_quota_options()
465 if (test_opt(sbi, USRQUOTA) && in f2fs_check_quota_options()
466 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]) in f2fs_check_quota_options()
467 clear_opt(sbi, USRQUOTA); in f2fs_check_quota_options()
469 if (test_opt(sbi, GRPQUOTA) && in f2fs_check_quota_options()
470 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]) in f2fs_check_quota_options()
471 clear_opt(sbi, GRPQUOTA); in f2fs_check_quota_options()
473 if (test_opt(sbi, PRJQUOTA) && in f2fs_check_quota_options()
474 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) in f2fs_check_quota_options()
475 clear_opt(sbi, PRJQUOTA); in f2fs_check_quota_options()
477 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) || in f2fs_check_quota_options()
478 test_opt(sbi, PRJQUOTA)) { in f2fs_check_quota_options()
479 f2fs_err(sbi, "old and new quota format mixing"); in f2fs_check_quota_options()
483 if (!F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_check_quota_options()
484 f2fs_err(sbi, "journaled quota format not specified"); in f2fs_check_quota_options()
489 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_check_quota_options()
490 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt"); in f2fs_check_quota_options()
491 F2FS_OPTION(sbi).s_jquota_fmt = 0; in f2fs_check_quota_options()
502 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_set_test_dummy_encryption() local
508 &F2FS_OPTION(sbi).dummy_enc_policy; in f2fs_set_test_dummy_encryption()
512 f2fs_warn(sbi, "test_dummy_encryption option not supported"); in f2fs_set_test_dummy_encryption()
516 if (!f2fs_sb_has_encrypt(sbi)) { in f2fs_set_test_dummy_encryption()
517 f2fs_err(sbi, "Encrypt feature is off"); in f2fs_set_test_dummy_encryption()
528 f2fs_warn(sbi, "Can't set test_dummy_encryption on remount"); in f2fs_set_test_dummy_encryption()
535 f2fs_warn(sbi, in f2fs_set_test_dummy_encryption()
538 f2fs_warn(sbi, "Value of option \"%s\" is unrecognized", in f2fs_set_test_dummy_encryption()
541 f2fs_warn(sbi, "Error processing option \"%s\" [%d]", in f2fs_set_test_dummy_encryption()
545 f2fs_warn(sbi, "Test dummy encryption mode enabled"); in f2fs_set_test_dummy_encryption()
557 static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi) in f2fs_test_compress_extension() argument
563 ext = F2FS_OPTION(sbi).extensions; in f2fs_test_compress_extension()
564 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt; in f2fs_test_compress_extension()
565 noext = F2FS_OPTION(sbi).noextensions; in f2fs_test_compress_extension()
566 noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt; in f2fs_test_compress_extension()
573 f2fs_info(sbi, "Don't allow the nocompress extension specifies all files"); in f2fs_test_compress_extension()
578 …f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension… in f2fs_test_compress_extension()
588 static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str) in f2fs_set_lz4hc_level() argument
594 F2FS_OPTION(sbi).compress_level = 0; in f2fs_set_lz4hc_level()
601 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>"); in f2fs_set_lz4hc_level()
608 f2fs_info(sbi, "invalid lz4hc compress level: %d", level); in f2fs_set_lz4hc_level()
612 F2FS_OPTION(sbi).compress_level = level; in f2fs_set_lz4hc_level()
616 F2FS_OPTION(sbi).compress_level = 0; in f2fs_set_lz4hc_level()
619 f2fs_info(sbi, "kernel doesn't support lz4hc compression"); in f2fs_set_lz4hc_level()
626 static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str) in f2fs_set_zstd_level() argument
632 F2FS_OPTION(sbi).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL; in f2fs_set_zstd_level()
639 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>"); in f2fs_set_zstd_level()
646 f2fs_info(sbi, "invalid zstd compress level: %d", level); in f2fs_set_zstd_level()
650 F2FS_OPTION(sbi).compress_level = level; in f2fs_set_zstd_level()
658 struct f2fs_sb_info *sbi = F2FS_SB(sb); in parse_options() local
693 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; in parse_options()
695 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF; in parse_options()
697 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC; in parse_options()
705 set_opt(sbi, DISABLE_ROLL_FORWARD); in parse_options()
709 set_opt(sbi, NORECOVERY); in parse_options()
714 if (!f2fs_hw_support_discard(sbi)) { in parse_options()
715 f2fs_warn(sbi, "device does not support discard"); in parse_options()
718 set_opt(sbi, DISCARD); in parse_options()
721 if (f2fs_hw_should_discard(sbi)) { in parse_options()
722 f2fs_warn(sbi, "discard is required for zoned block devices"); in parse_options()
725 clear_opt(sbi, DISCARD); in parse_options()
728 set_opt(sbi, NOHEAP); in parse_options()
731 clear_opt(sbi, NOHEAP); in parse_options()
735 set_opt(sbi, XATTR_USER); in parse_options()
738 clear_opt(sbi, XATTR_USER); in parse_options()
741 set_opt(sbi, INLINE_XATTR); in parse_options()
744 clear_opt(sbi, INLINE_XATTR); in parse_options()
749 set_opt(sbi, INLINE_XATTR_SIZE); in parse_options()
750 F2FS_OPTION(sbi).inline_xattr_size = arg; in parse_options()
754 f2fs_info(sbi, "user_xattr options not supported"); in parse_options()
757 f2fs_info(sbi, "nouser_xattr options not supported"); in parse_options()
760 f2fs_info(sbi, "inline_xattr options not supported"); in parse_options()
763 f2fs_info(sbi, "noinline_xattr options not supported"); in parse_options()
768 set_opt(sbi, POSIX_ACL); in parse_options()
771 clear_opt(sbi, POSIX_ACL); in parse_options()
775 f2fs_info(sbi, "acl options not supported"); in parse_options()
778 f2fs_info(sbi, "noacl options not supported"); in parse_options()
787 F2FS_OPTION(sbi).active_logs = arg; in parse_options()
790 set_opt(sbi, DISABLE_EXT_IDENTIFY); in parse_options()
793 set_opt(sbi, INLINE_DATA); in parse_options()
796 set_opt(sbi, INLINE_DENTRY); in parse_options()
799 clear_opt(sbi, INLINE_DENTRY); in parse_options()
802 set_opt(sbi, FLUSH_MERGE); in parse_options()
805 clear_opt(sbi, FLUSH_MERGE); in parse_options()
808 set_opt(sbi, NOBARRIER); in parse_options()
811 clear_opt(sbi, NOBARRIER); in parse_options()
814 set_opt(sbi, FASTBOOT); in parse_options()
817 set_opt(sbi, READ_EXTENT_CACHE); in parse_options()
820 clear_opt(sbi, READ_EXTENT_CACHE); in parse_options()
823 clear_opt(sbi, INLINE_DATA); in parse_options()
826 set_opt(sbi, DATA_FLUSH); in parse_options()
831 if (test_opt(sbi, RESERVE_ROOT)) { in parse_options()
832 f2fs_info(sbi, "Preserve previous reserve_root=%u", in parse_options()
833 F2FS_OPTION(sbi).root_reserved_blocks); in parse_options()
835 F2FS_OPTION(sbi).root_reserved_blocks = arg; in parse_options()
836 set_opt(sbi, RESERVE_ROOT); in parse_options()
844 f2fs_err(sbi, "Invalid uid value %d", arg); in parse_options()
847 F2FS_OPTION(sbi).s_resuid = uid; in parse_options()
854 f2fs_err(sbi, "Invalid gid value %d", arg); in parse_options()
857 F2FS_OPTION(sbi).s_resgid = gid; in parse_options()
865 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; in parse_options()
867 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; in parse_options()
869 F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG; in parse_options()
871 F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK; in parse_options()
882 f2fs_warn(sbi, "Not support %ld, larger than %d", in parse_options()
886 F2FS_OPTION(sbi).write_io_size_bits = arg; in parse_options()
892 f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE); in parse_options()
893 set_opt(sbi, FAULT_INJECTION); in parse_options()
899 f2fs_build_fault_attr(sbi, 0, arg); in parse_options()
900 set_opt(sbi, FAULT_INJECTION); in parse_options()
904 f2fs_info(sbi, "fault_injection options not supported"); in parse_options()
908 f2fs_info(sbi, "fault_type options not supported"); in parse_options()
920 set_opt(sbi, USRQUOTA); in parse_options()
923 set_opt(sbi, GRPQUOTA); in parse_options()
926 set_opt(sbi, PRJQUOTA); in parse_options()
959 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD; in parse_options()
962 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0; in parse_options()
965 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1; in parse_options()
968 clear_opt(sbi, QUOTA); in parse_options()
969 clear_opt(sbi, USRQUOTA); in parse_options()
970 clear_opt(sbi, GRPQUOTA); in parse_options()
971 clear_opt(sbi, PRJQUOTA); in parse_options()
988 f2fs_info(sbi, "quota operations not supported"); in parse_options()
997 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; in parse_options()
999 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; in parse_options()
1011 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; in parse_options()
1013 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT; in parse_options()
1015 F2FS_OPTION(sbi).fsync_mode = in parse_options()
1033 f2fs_info(sbi, "inline encryption not supported"); in parse_options()
1041 F2FS_OPTION(sbi).unusable_cap_perc = arg; in parse_options()
1042 set_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
1047 F2FS_OPTION(sbi).unusable_cap = arg; in parse_options()
1048 set_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
1051 set_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
1054 clear_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
1057 set_opt(sbi, MERGE_CHECKPOINT); in parse_options()
1060 clear_opt(sbi, MERGE_CHECKPOINT); in parse_options()
1064 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1065 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1073 F2FS_OPTION(sbi).compress_level = 0; in parse_options()
1074 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
1077 f2fs_info(sbi, "kernel doesn't support lzo compression"); in parse_options()
1081 ret = f2fs_set_lz4hc_level(sbi, name); in parse_options()
1086 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
1089 f2fs_info(sbi, "kernel doesn't support lz4 compression"); in parse_options()
1093 ret = f2fs_set_zstd_level(sbi, name); in parse_options()
1098 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
1101 f2fs_info(sbi, "kernel doesn't support zstd compression"); in parse_options()
1105 F2FS_OPTION(sbi).compress_level = 0; in parse_options()
1106 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
1109 f2fs_info(sbi, "kernel doesn't support lzorle compression"); in parse_options()
1118 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1119 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1126 f2fs_err(sbi, in parse_options()
1130 F2FS_OPTION(sbi).compress_log_size = arg; in parse_options()
1133 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1134 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1141 ext = F2FS_OPTION(sbi).extensions; in parse_options()
1142 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt; in parse_options()
1146 f2fs_err(sbi, in parse_options()
1153 F2FS_OPTION(sbi).compress_ext_cnt++; in parse_options()
1157 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1158 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1165 noext = F2FS_OPTION(sbi).noextensions; in parse_options()
1166 noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt; in parse_options()
1170 f2fs_err(sbi, in parse_options()
1177 F2FS_OPTION(sbi).nocompress_ext_cnt++; in parse_options()
1181 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1182 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1185 F2FS_OPTION(sbi).compress_chksum = true; in parse_options()
1188 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1189 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1196 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS; in parse_options()
1198 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER; in parse_options()
1206 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1207 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1210 set_opt(sbi, COMPRESS_CACHE); in parse_options()
1220 f2fs_info(sbi, "compression options not supported"); in parse_options()
1224 set_opt(sbi, ATGC); in parse_options()
1227 set_opt(sbi, GC_MERGE); in parse_options()
1230 clear_opt(sbi, GC_MERGE); in parse_options()
1237 F2FS_OPTION(sbi).discard_unit = in parse_options()
1240 F2FS_OPTION(sbi).discard_unit = in parse_options()
1243 F2FS_OPTION(sbi).discard_unit = in parse_options()
1256 F2FS_OPTION(sbi).memory_mode = in parse_options()
1259 F2FS_OPTION(sbi).memory_mode = in parse_options()
1268 set_opt(sbi, AGE_EXTENT_CACHE); in parse_options()
1275 F2FS_OPTION(sbi).errors = in parse_options()
1278 F2FS_OPTION(sbi).errors = in parse_options()
1281 F2FS_OPTION(sbi).errors = in parse_options()
1290 f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value", in parse_options()
1297 if (f2fs_check_quota_options(sbi)) in parse_options()
1300 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) { in parse_options()
1301 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA"); in parse_options()
1304 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) { in parse_options()
1305 …f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA"); in parse_options()
1310 if (f2fs_sb_has_casefold(sbi)) { in parse_options()
1311 f2fs_err(sbi, in parse_options()
1321 if (f2fs_sb_has_blkzoned(sbi)) { in parse_options()
1323 if (F2FS_OPTION(sbi).discard_unit != in parse_options()
1325 …f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default… in parse_options()
1326 F2FS_OPTION(sbi).discard_unit = in parse_options()
1330 if (F2FS_OPTION(sbi).fs_mode != FS_MODE_LFS) { in parse_options()
1331 f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature"); in parse_options()
1335 f2fs_err(sbi, "Zoned block device support is not enabled"); in parse_options()
1341 if (f2fs_test_compress_extension(sbi)) { in parse_options()
1342 f2fs_err(sbi, "invalid compress or nocompress extension"); in parse_options()
1347 if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) { in parse_options()
1348 f2fs_err(sbi, "Should set mode=lfs with %luKB-sized IO", in parse_options()
1349 F2FS_IO_SIZE_KB(sbi)); in parse_options()
1353 if (test_opt(sbi, INLINE_XATTR_SIZE)) { in parse_options()
1356 if (!f2fs_sb_has_extra_attr(sbi) || in parse_options()
1357 !f2fs_sb_has_flexible_inline_xattr(sbi)) { in parse_options()
1358 f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off"); in parse_options()
1361 if (!test_opt(sbi, INLINE_XATTR)) { in parse_options()
1362 f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option"); in parse_options()
1369 if (F2FS_OPTION(sbi).inline_xattr_size < min_size || in parse_options()
1370 F2FS_OPTION(sbi).inline_xattr_size > max_size) { in parse_options()
1371 f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d", in parse_options()
1377 if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) { in parse_options()
1378 f2fs_err(sbi, "LFS is not compatible with checkpoint=disable"); in parse_options()
1382 if (test_opt(sbi, ATGC) && f2fs_lfs_mode(sbi)) { in parse_options()
1383 f2fs_err(sbi, "LFS is not compatible with ATGC"); in parse_options()
1387 if (f2fs_is_readonly(sbi) && test_opt(sbi, FLUSH_MERGE)) { in parse_options()
1388 f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode"); in parse_options()
1392 if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) { in parse_options()
1393 f2fs_err(sbi, "Allow to mount readonly mode only"); in parse_options()
1431 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_drop_inode() local
1438 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in f2fs_drop_inode()
1439 if (inode->i_ino == F2FS_NODE_INO(sbi) || in f2fs_drop_inode()
1440 inode->i_ino == F2FS_META_INO(sbi)) { in f2fs_drop_inode()
1489 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_inode_dirtied() local
1492 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_dirtied()
1497 stat_inc_dirty_inode(sbi, DIRTY_META); in f2fs_inode_dirtied()
1501 &sbi->inode_list[DIRTY_META]); in f2fs_inode_dirtied()
1502 inc_page_count(sbi, F2FS_DIRTY_IMETA); in f2fs_inode_dirtied()
1504 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_dirtied()
1510 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_inode_synced() local
1512 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_synced()
1514 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_synced()
1519 dec_page_count(sbi, F2FS_DIRTY_IMETA); in f2fs_inode_synced()
1524 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_synced()
1534 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_dirty_inode() local
1536 if (inode->i_ino == F2FS_NODE_INO(sbi) || in f2fs_dirty_inode()
1537 inode->i_ino == F2FS_META_INO(sbi)) in f2fs_dirty_inode()
1552 static void destroy_percpu_info(struct f2fs_sb_info *sbi) in destroy_percpu_info() argument
1554 percpu_counter_destroy(&sbi->total_valid_inode_count); in destroy_percpu_info()
1555 percpu_counter_destroy(&sbi->rf_node_block_count); in destroy_percpu_info()
1556 percpu_counter_destroy(&sbi->alloc_valid_block_count); in destroy_percpu_info()
1559 static void destroy_device_list(struct f2fs_sb_info *sbi) in destroy_device_list() argument
1563 for (i = 0; i < sbi->s_ndevs; i++) { in destroy_device_list()
1565 blkdev_put(FDEV(i).bdev, sbi->sb); in destroy_device_list()
1570 kvfree(sbi->devs); in destroy_device_list()
1575 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_put_super() local
1581 f2fs_unregister_sysfs(sbi); in f2fs_put_super()
1586 mutex_lock(&sbi->umount_mutex); in f2fs_put_super()
1592 f2fs_stop_ckpt_thread(sbi); in f2fs_put_super()
1599 if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) || in f2fs_put_super()
1600 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) { in f2fs_put_super()
1604 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_put_super()
1605 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_put_super()
1609 done = f2fs_issue_discard_timeout(sbi); in f2fs_put_super()
1610 if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) { in f2fs_put_super()
1614 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_put_super()
1615 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_put_super()
1622 f2fs_release_ino_entry(sbi, true); in f2fs_put_super()
1624 f2fs_leave_shrinker(sbi); in f2fs_put_super()
1625 mutex_unlock(&sbi->umount_mutex); in f2fs_put_super()
1628 f2fs_flush_merged_writes(sbi); in f2fs_put_super()
1630 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); in f2fs_put_super()
1633 truncate_inode_pages_final(NODE_MAPPING(sbi)); in f2fs_put_super()
1634 truncate_inode_pages_final(META_MAPPING(sbi)); in f2fs_put_super()
1638 if (!get_pages(sbi, i)) in f2fs_put_super()
1640 f2fs_err(sbi, "detect filesystem reference count leak during " in f2fs_put_super()
1641 "umount, type: %d, count: %lld", i, get_pages(sbi, i)); in f2fs_put_super()
1642 f2fs_bug_on(sbi, 1); in f2fs_put_super()
1645 f2fs_bug_on(sbi, sbi->fsync_node_num); in f2fs_put_super()
1647 f2fs_destroy_compress_inode(sbi); in f2fs_put_super()
1649 iput(sbi->node_inode); in f2fs_put_super()
1650 sbi->node_inode = NULL; in f2fs_put_super()
1652 iput(sbi->meta_inode); in f2fs_put_super()
1653 sbi->meta_inode = NULL; in f2fs_put_super()
1659 f2fs_destroy_stats(sbi); in f2fs_put_super()
1662 f2fs_destroy_node_manager(sbi); in f2fs_put_super()
1663 f2fs_destroy_segment_manager(sbi); in f2fs_put_super()
1666 flush_work(&sbi->s_error_work); in f2fs_put_super()
1668 f2fs_destroy_post_read_wq(sbi); in f2fs_put_super()
1670 kvfree(sbi->ckpt); in f2fs_put_super()
1673 if (sbi->s_chksum_driver) in f2fs_put_super()
1674 crypto_free_shash(sbi->s_chksum_driver); in f2fs_put_super()
1675 kfree(sbi->raw_super); in f2fs_put_super()
1677 destroy_device_list(sbi); in f2fs_put_super()
1678 f2fs_destroy_page_array_cache(sbi); in f2fs_put_super()
1679 f2fs_destroy_xattr_caches(sbi); in f2fs_put_super()
1680 mempool_destroy(sbi->write_io_dummy); in f2fs_put_super()
1683 kfree(F2FS_OPTION(sbi).s_qf_names[i]); in f2fs_put_super()
1685 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy); in f2fs_put_super()
1686 destroy_percpu_info(sbi); in f2fs_put_super()
1687 f2fs_destroy_iostat(sbi); in f2fs_put_super()
1689 kvfree(sbi->write_io[i]); in f2fs_put_super()
1693 kfree(sbi); in f2fs_put_super()
1698 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_sync_fs() local
1701 if (unlikely(f2fs_cp_error(sbi))) in f2fs_sync_fs()
1703 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) in f2fs_sync_fs()
1708 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in f2fs_sync_fs()
1712 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_sync_fs()
1713 err = f2fs_issue_checkpoint(sbi); in f2fs_sync_fs()
1794 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_statfs() local
1800 total_count = le64_to_cpu(sbi->raw_super->block_count); in f2fs_statfs()
1801 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr); in f2fs_statfs()
1803 buf->f_bsize = sbi->blocksize; in f2fs_statfs()
1807 spin_lock(&sbi->stat_lock); in f2fs_statfs()
1809 user_block_count = sbi->user_block_count; in f2fs_statfs()
1810 total_valid_node_count = valid_node_count(sbi); in f2fs_statfs()
1811 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; in f2fs_statfs()
1812 buf->f_bfree = user_block_count - valid_user_blocks(sbi) - in f2fs_statfs()
1813 sbi->current_reserved_blocks; in f2fs_statfs()
1815 if (unlikely(buf->f_bfree <= sbi->unusable_block_count)) in f2fs_statfs()
1818 buf->f_bfree -= sbi->unusable_block_count; in f2fs_statfs()
1819 spin_unlock(&sbi->stat_lock); in f2fs_statfs()
1821 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks) in f2fs_statfs()
1823 F2FS_OPTION(sbi).root_reserved_blocks; in f2fs_statfs()
1852 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_show_quota_options() local
1854 if (F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_show_quota_options()
1857 switch (F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_show_quota_options()
1871 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA]) in f2fs_show_quota_options()
1873 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]); in f2fs_show_quota_options()
1875 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]) in f2fs_show_quota_options()
1877 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]); in f2fs_show_quota_options()
1879 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) in f2fs_show_quota_options()
1881 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]); in f2fs_show_quota_options()
1889 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_show_compress_options() local
1893 if (!f2fs_sb_has_compression(sbi)) in f2fs_show_compress_options()
1896 switch (F2FS_OPTION(sbi).compress_algorithm) { in f2fs_show_compress_options()
1912 if (F2FS_OPTION(sbi).compress_level) in f2fs_show_compress_options()
1913 seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level); in f2fs_show_compress_options()
1916 F2FS_OPTION(sbi).compress_log_size); in f2fs_show_compress_options()
1918 for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) { in f2fs_show_compress_options()
1920 F2FS_OPTION(sbi).extensions[i]); in f2fs_show_compress_options()
1923 for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) { in f2fs_show_compress_options()
1925 F2FS_OPTION(sbi).noextensions[i]); in f2fs_show_compress_options()
1928 if (F2FS_OPTION(sbi).compress_chksum) in f2fs_show_compress_options()
1931 if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS) in f2fs_show_compress_options()
1933 else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER) in f2fs_show_compress_options()
1936 if (test_opt(sbi, COMPRESS_CACHE)) in f2fs_show_compress_options()
1943 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb); in f2fs_show_options() local
1945 if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) in f2fs_show_options()
1947 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON) in f2fs_show_options()
1949 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF) in f2fs_show_options()
1952 if (test_opt(sbi, GC_MERGE)) in f2fs_show_options()
1957 if (test_opt(sbi, DISABLE_ROLL_FORWARD)) in f2fs_show_options()
1959 if (test_opt(sbi, NORECOVERY)) in f2fs_show_options()
1961 if (test_opt(sbi, DISCARD)) { in f2fs_show_options()
1963 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK) in f2fs_show_options()
1965 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT) in f2fs_show_options()
1967 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION) in f2fs_show_options()
1972 if (test_opt(sbi, NOHEAP)) in f2fs_show_options()
1977 if (test_opt(sbi, XATTR_USER)) in f2fs_show_options()
1981 if (test_opt(sbi, INLINE_XATTR)) in f2fs_show_options()
1985 if (test_opt(sbi, INLINE_XATTR_SIZE)) in f2fs_show_options()
1987 F2FS_OPTION(sbi).inline_xattr_size); in f2fs_show_options()
1990 if (test_opt(sbi, POSIX_ACL)) in f2fs_show_options()
1995 if (test_opt(sbi, DISABLE_EXT_IDENTIFY)) in f2fs_show_options()
1997 if (test_opt(sbi, INLINE_DATA)) in f2fs_show_options()
2001 if (test_opt(sbi, INLINE_DENTRY)) in f2fs_show_options()
2005 if (test_opt(sbi, FLUSH_MERGE)) in f2fs_show_options()
2009 if (test_opt(sbi, NOBARRIER)) in f2fs_show_options()
2013 if (test_opt(sbi, FASTBOOT)) in f2fs_show_options()
2015 if (test_opt(sbi, READ_EXTENT_CACHE)) in f2fs_show_options()
2019 if (test_opt(sbi, AGE_EXTENT_CACHE)) in f2fs_show_options()
2021 if (test_opt(sbi, DATA_FLUSH)) in f2fs_show_options()
2025 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE) in f2fs_show_options()
2027 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS) in f2fs_show_options()
2029 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG) in f2fs_show_options()
2031 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) in f2fs_show_options()
2033 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs); in f2fs_show_options()
2034 if (test_opt(sbi, RESERVE_ROOT)) in f2fs_show_options()
2036 F2FS_OPTION(sbi).root_reserved_blocks, in f2fs_show_options()
2038 F2FS_OPTION(sbi).s_resuid), in f2fs_show_options()
2040 F2FS_OPTION(sbi).s_resgid)); in f2fs_show_options()
2041 if (F2FS_IO_SIZE_BITS(sbi)) in f2fs_show_options()
2043 F2FS_OPTION(sbi).write_io_size_bits); in f2fs_show_options()
2045 if (test_opt(sbi, FAULT_INJECTION)) { in f2fs_show_options()
2047 F2FS_OPTION(sbi).fault_info.inject_rate); in f2fs_show_options()
2049 F2FS_OPTION(sbi).fault_info.inject_type); in f2fs_show_options()
2053 if (test_opt(sbi, QUOTA)) in f2fs_show_options()
2055 if (test_opt(sbi, USRQUOTA)) in f2fs_show_options()
2057 if (test_opt(sbi, GRPQUOTA)) in f2fs_show_options()
2059 if (test_opt(sbi, PRJQUOTA)) in f2fs_show_options()
2062 f2fs_show_quota_options(seq, sbi->sb); in f2fs_show_options()
2064 fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb); in f2fs_show_options()
2066 if (sbi->sb->s_flags & SB_INLINECRYPT) in f2fs_show_options()
2069 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT) in f2fs_show_options()
2071 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) in f2fs_show_options()
2074 if (test_opt(sbi, DISABLE_CHECKPOINT)) in f2fs_show_options()
2076 F2FS_OPTION(sbi).unusable_cap); in f2fs_show_options()
2077 if (test_opt(sbi, MERGE_CHECKPOINT)) in f2fs_show_options()
2081 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX) in f2fs_show_options()
2083 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) in f2fs_show_options()
2085 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER) in f2fs_show_options()
2089 f2fs_show_compress_options(seq, sbi->sb); in f2fs_show_options()
2092 if (test_opt(sbi, ATGC)) in f2fs_show_options()
2095 if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL) in f2fs_show_options()
2097 else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW) in f2fs_show_options()
2100 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY) in f2fs_show_options()
2102 else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE) in f2fs_show_options()
2104 else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC) in f2fs_show_options()
2110 static void default_options(struct f2fs_sb_info *sbi, bool remount) in default_options() argument
2114 set_opt(sbi, READ_EXTENT_CACHE); in default_options()
2115 clear_opt(sbi, DISABLE_CHECKPOINT); in default_options()
2117 if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) in default_options()
2118 set_opt(sbi, DISCARD); in default_options()
2120 if (f2fs_sb_has_blkzoned(sbi)) in default_options()
2121 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION; in default_options()
2123 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK; in default_options()
2126 if (f2fs_sb_has_readonly(sbi)) in default_options()
2127 F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE; in default_options()
2129 F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE; in default_options()
2131 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; in default_options()
2132 if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main) <= in default_options()
2134 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; in default_options()
2136 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; in default_options()
2137 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; in default_options()
2138 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID); in default_options()
2139 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID); in default_options()
2140 if (f2fs_sb_has_compression(sbi)) { in default_options()
2141 F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4; in default_options()
2142 F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE; in default_options()
2143 F2FS_OPTION(sbi).compress_ext_cnt = 0; in default_options()
2144 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS; in default_options()
2146 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; in default_options()
2147 F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL; in default_options()
2148 F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE; in default_options()
2150 sbi->sb->s_flags &= ~SB_INLINECRYPT; in default_options()
2152 set_opt(sbi, INLINE_XATTR); in default_options()
2153 set_opt(sbi, INLINE_DATA); in default_options()
2154 set_opt(sbi, INLINE_DENTRY); in default_options()
2155 set_opt(sbi, NOHEAP); in default_options()
2156 set_opt(sbi, MERGE_CHECKPOINT); in default_options()
2157 F2FS_OPTION(sbi).unusable_cap = 0; in default_options()
2158 sbi->sb->s_flags |= SB_LAZYTIME; in default_options()
2159 if (!f2fs_is_readonly(sbi)) in default_options()
2160 set_opt(sbi, FLUSH_MERGE); in default_options()
2161 if (f2fs_sb_has_blkzoned(sbi)) in default_options()
2162 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; in default_options()
2164 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; in default_options()
2167 set_opt(sbi, XATTR_USER); in default_options()
2170 set_opt(sbi, POSIX_ACL); in default_options()
2173 f2fs_build_fault_attr(sbi, 0, 0); in default_options()
2180 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) in f2fs_disable_checkpoint() argument
2182 unsigned int s_flags = sbi->sb->s_flags; in f2fs_disable_checkpoint()
2184 unsigned int gc_mode = sbi->gc_mode; in f2fs_disable_checkpoint()
2190 f2fs_err(sbi, "checkpoint=disable on readonly fs"); in f2fs_disable_checkpoint()
2193 sbi->sb->s_flags |= SB_ACTIVE; in f2fs_disable_checkpoint()
2196 unusable = f2fs_get_unusable_blocks(sbi); in f2fs_disable_checkpoint()
2197 if (!f2fs_disable_cp_again(sbi, unusable)) in f2fs_disable_checkpoint()
2200 f2fs_update_time(sbi, DISABLE_TIME); in f2fs_disable_checkpoint()
2202 sbi->gc_mode = GC_URGENT_HIGH; in f2fs_disable_checkpoint()
2204 while (!f2fs_time_over(sbi, DISABLE_TIME)) { in f2fs_disable_checkpoint()
2212 f2fs_down_write(&sbi->gc_lock); in f2fs_disable_checkpoint()
2213 stat_inc_gc_call_count(sbi, FOREGROUND); in f2fs_disable_checkpoint()
2214 err = f2fs_gc(sbi, &gc_control); in f2fs_disable_checkpoint()
2223 ret = sync_filesystem(sbi->sb); in f2fs_disable_checkpoint()
2229 unusable = f2fs_get_unusable_blocks(sbi); in f2fs_disable_checkpoint()
2230 if (f2fs_disable_cp_again(sbi, unusable)) { in f2fs_disable_checkpoint()
2236 f2fs_down_write(&sbi->gc_lock); in f2fs_disable_checkpoint()
2238 set_sbi_flag(sbi, SBI_CP_DISABLED); in f2fs_disable_checkpoint()
2239 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_disable_checkpoint()
2240 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_disable_checkpoint()
2244 spin_lock(&sbi->stat_lock); in f2fs_disable_checkpoint()
2245 sbi->unusable_block_count = unusable; in f2fs_disable_checkpoint()
2246 spin_unlock(&sbi->stat_lock); in f2fs_disable_checkpoint()
2249 f2fs_up_write(&sbi->gc_lock); in f2fs_disable_checkpoint()
2251 sbi->gc_mode = gc_mode; in f2fs_disable_checkpoint()
2252 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ in f2fs_disable_checkpoint()
2256 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) in f2fs_enable_checkpoint() argument
2262 sync_inodes_sb(sbi->sb); in f2fs_enable_checkpoint()
2264 } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--); in f2fs_enable_checkpoint()
2267 f2fs_warn(sbi, "checkpoint=enable has some unwritten data."); in f2fs_enable_checkpoint()
2269 f2fs_down_write(&sbi->gc_lock); in f2fs_enable_checkpoint()
2270 f2fs_dirty_to_prefree(sbi); in f2fs_enable_checkpoint()
2272 clear_sbi_flag(sbi, SBI_CP_DISABLED); in f2fs_enable_checkpoint()
2273 set_sbi_flag(sbi, SBI_IS_DIRTY); in f2fs_enable_checkpoint()
2274 f2fs_up_write(&sbi->gc_lock); in f2fs_enable_checkpoint()
2276 f2fs_sync_fs(sbi->sb, 1); in f2fs_enable_checkpoint()
2279 f2fs_flush_ckpt_thread(sbi); in f2fs_enable_checkpoint()
2284 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_remount() local
2292 bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE); in f2fs_remount()
2293 bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE); in f2fs_remount()
2294 bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT); in f2fs_remount()
2295 bool no_io_align = !F2FS_IO_ALIGNED(sbi); in f2fs_remount()
2296 bool no_atgc = !test_opt(sbi, ATGC); in f2fs_remount()
2297 bool no_discard = !test_opt(sbi, DISCARD); in f2fs_remount()
2298 bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE); in f2fs_remount()
2299 bool block_unit_discard = f2fs_block_unit_discard(sbi); in f2fs_remount()
2308 org_mount_opt = sbi->mount_opt; in f2fs_remount()
2312 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt; in f2fs_remount()
2314 if (F2FS_OPTION(sbi).s_qf_names[i]) { in f2fs_remount()
2316 kstrdup(F2FS_OPTION(sbi).s_qf_names[i], in f2fs_remount()
2330 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) { in f2fs_remount()
2331 err = f2fs_commit_super(sbi, false); in f2fs_remount()
2332 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d", in f2fs_remount()
2335 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE); in f2fs_remount()
2338 default_options(sbi, true); in f2fs_remount()
2346 flush_work(&sbi->s_error_work); in f2fs_remount()
2355 if (f2fs_dev_is_readonly(sbi) && !(*flags & SB_RDONLY)) { in f2fs_remount()
2370 } else if (f2fs_sb_has_quota_ino(sbi)) { in f2fs_remount()
2377 if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) { in f2fs_remount()
2379 f2fs_warn(sbi, "LFS is not compatible with IPU"); in f2fs_remount()
2384 if (no_atgc == !!test_opt(sbi, ATGC)) { in f2fs_remount()
2386 f2fs_warn(sbi, "switch atgc option is not allowed"); in f2fs_remount()
2391 if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) { in f2fs_remount()
2393 f2fs_warn(sbi, "switch extent_cache option is not allowed"); in f2fs_remount()
2397 if (no_age_extent_cache == !!test_opt(sbi, AGE_EXTENT_CACHE)) { in f2fs_remount()
2399 f2fs_warn(sbi, "switch age_extent_cache option is not allowed"); in f2fs_remount()
2403 if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) { in f2fs_remount()
2405 f2fs_warn(sbi, "switch io_bits option is not allowed"); in f2fs_remount()
2409 if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) { in f2fs_remount()
2411 f2fs_warn(sbi, "switch compress_cache option is not allowed"); in f2fs_remount()
2415 if (block_unit_discard != f2fs_block_unit_discard(sbi)) { in f2fs_remount()
2417 f2fs_warn(sbi, "switch discard_unit option is not allowed"); in f2fs_remount()
2421 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_remount()
2423 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only"); in f2fs_remount()
2433 (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF && in f2fs_remount()
2434 !test_opt(sbi, GC_MERGE))) { in f2fs_remount()
2435 if (sbi->gc_thread) { in f2fs_remount()
2436 f2fs_stop_gc_thread(sbi); in f2fs_remount()
2439 } else if (!sbi->gc_thread) { in f2fs_remount()
2440 err = f2fs_start_gc_thread(sbi); in f2fs_remount()
2449 set_sbi_flag(sbi, SBI_IS_DIRTY); in f2fs_remount()
2450 set_sbi_flag(sbi, SBI_IS_CLOSE); in f2fs_remount()
2452 clear_sbi_flag(sbi, SBI_IS_CLOSE); in f2fs_remount()
2455 if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) || in f2fs_remount()
2456 !test_opt(sbi, MERGE_CHECKPOINT)) { in f2fs_remount()
2457 f2fs_stop_ckpt_thread(sbi); in f2fs_remount()
2461 f2fs_flush_ckpt_thread(sbi); in f2fs_remount()
2463 err = f2fs_start_ckpt_thread(sbi); in f2fs_remount()
2465 f2fs_err(sbi, in f2fs_remount()
2477 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) { in f2fs_remount()
2478 clear_opt(sbi, FLUSH_MERGE); in f2fs_remount()
2479 f2fs_destroy_flush_cmd_control(sbi, false); in f2fs_remount()
2482 err = f2fs_create_flush_cmd_control(sbi); in f2fs_remount()
2488 if (no_discard == !!test_opt(sbi, DISCARD)) { in f2fs_remount()
2489 if (test_opt(sbi, DISCARD)) { in f2fs_remount()
2490 err = f2fs_start_discard_thread(sbi); in f2fs_remount()
2495 f2fs_stop_discard_thread(sbi); in f2fs_remount()
2496 f2fs_issue_discard_timeout(sbi); in f2fs_remount()
2501 if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_remount()
2502 if (test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_remount()
2503 err = f2fs_disable_checkpoint(sbi); in f2fs_remount()
2507 f2fs_enable_checkpoint(sbi); in f2fs_remount()
2519 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); in f2fs_remount()
2521 limit_reserve_root(sbi); in f2fs_remount()
2522 adjust_unusable_cap_perc(sbi); in f2fs_remount()
2527 if (f2fs_start_discard_thread(sbi)) in f2fs_remount()
2528 f2fs_warn(sbi, "discard has been stopped"); in f2fs_remount()
2530 f2fs_stop_discard_thread(sbi); in f2fs_remount()
2534 if (f2fs_create_flush_cmd_control(sbi)) in f2fs_remount()
2535 f2fs_warn(sbi, "background flush thread has stopped"); in f2fs_remount()
2537 clear_opt(sbi, FLUSH_MERGE); in f2fs_remount()
2538 f2fs_destroy_flush_cmd_control(sbi, false); in f2fs_remount()
2542 if (f2fs_start_ckpt_thread(sbi)) in f2fs_remount()
2543 f2fs_warn(sbi, "background ckpt thread has stopped"); in f2fs_remount()
2545 f2fs_stop_ckpt_thread(sbi); in f2fs_remount()
2549 if (f2fs_start_gc_thread(sbi)) in f2fs_remount()
2550 f2fs_warn(sbi, "background gc thread has stopped"); in f2fs_remount()
2552 f2fs_stop_gc_thread(sbi); in f2fs_remount()
2556 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt; in f2fs_remount()
2558 kfree(F2FS_OPTION(sbi).s_qf_names[i]); in f2fs_remount()
2559 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i]; in f2fs_remount()
2562 sbi->mount_opt = org_mount_opt; in f2fs_remount()
2568 static bool f2fs_need_recovery(struct f2fs_sb_info *sbi) in f2fs_need_recovery() argument
2571 if (is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG)) in f2fs_need_recovery()
2574 if (test_opt(sbi, DISABLE_ROLL_FORWARD)) in f2fs_need_recovery()
2576 if (test_opt(sbi, NORECOVERY)) in f2fs_need_recovery()
2578 return !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG); in f2fs_need_recovery()
2581 static bool f2fs_recover_quota_begin(struct f2fs_sb_info *sbi) in f2fs_recover_quota_begin() argument
2583 bool readonly = f2fs_readonly(sbi->sb); in f2fs_recover_quota_begin()
2585 if (!f2fs_need_recovery(sbi)) in f2fs_recover_quota_begin()
2589 if (f2fs_hw_is_readonly(sbi)) in f2fs_recover_quota_begin()
2593 sbi->sb->s_flags &= ~SB_RDONLY; in f2fs_recover_quota_begin()
2594 set_sbi_flag(sbi, SBI_IS_WRITABLE); in f2fs_recover_quota_begin()
2601 return f2fs_enable_quota_files(sbi, readonly); in f2fs_recover_quota_begin()
2604 static void f2fs_recover_quota_end(struct f2fs_sb_info *sbi, in f2fs_recover_quota_end() argument
2608 f2fs_quota_off_umount(sbi->sb); in f2fs_recover_quota_end()
2610 if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) { in f2fs_recover_quota_end()
2611 clear_sbi_flag(sbi, SBI_IS_WRITABLE); in f2fs_recover_quota_end()
2612 sbi->sb->s_flags |= SB_RDONLY; in f2fs_recover_quota_end()
2736 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type) in f2fs_quota_on_mount() argument
2738 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) { in f2fs_quota_on_mount()
2739 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it"); in f2fs_quota_on_mount()
2743 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type], in f2fs_quota_on_mount()
2744 F2FS_OPTION(sbi).s_jquota_fmt, type); in f2fs_quota_on_mount()
2747 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly) in f2fs_enable_quota_files() argument
2752 if (f2fs_sb_has_quota_ino(sbi) && rdonly) { in f2fs_enable_quota_files()
2753 err = f2fs_enable_quotas(sbi->sb); in f2fs_enable_quota_files()
2755 f2fs_err(sbi, "Cannot turn on quota_ino: %d", err); in f2fs_enable_quota_files()
2762 if (F2FS_OPTION(sbi).s_qf_names[i]) { in f2fs_enable_quota_files()
2763 err = f2fs_quota_on_mount(sbi, i); in f2fs_enable_quota_files()
2768 f2fs_err(sbi, "Cannot turn on quotas: %d on %d", in f2fs_enable_quota_files()
2812 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_enable_quotas() local
2816 test_opt(sbi, USRQUOTA), in f2fs_enable_quotas()
2817 test_opt(sbi, GRPQUOTA), in f2fs_enable_quotas()
2818 test_opt(sbi, PRJQUOTA), in f2fs_enable_quotas()
2822 f2fs_err(sbi, "quota file may be corrupted, skip loading it"); in f2fs_enable_quotas()
2835 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.", in f2fs_enable_quotas()
2848 static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type) in f2fs_quota_sync_file() argument
2850 struct quota_info *dqopt = sb_dqopt(sbi->sb); in f2fs_quota_sync_file()
2854 ret = dquot_writeback_dquots(sbi->sb, type); in f2fs_quota_sync_file()
2863 if (is_journalled_quota(sbi)) in f2fs_quota_sync_file()
2871 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_quota_sync_file()
2877 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_quota_sync() local
2894 if (!f2fs_sb_has_quota_ino(sbi)) in f2fs_quota_sync()
2906 f2fs_lock_op(sbi); in f2fs_quota_sync()
2907 f2fs_down_read(&sbi->quota_sem); in f2fs_quota_sync()
2909 ret = f2fs_quota_sync_file(sbi, cnt); in f2fs_quota_sync()
2911 f2fs_up_read(&sbi->quota_sem); in f2fs_quota_sync()
2912 f2fs_unlock_op(sbi); in f2fs_quota_sync()
2914 if (!f2fs_sb_has_quota_ino(sbi)) in f2fs_quota_sync()
2993 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_quota_off() local
3003 if (is_journalled_quota(sbi)) in f2fs_quota_off()
3004 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_quota_off()
3045 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); in f2fs_dquot_commit() local
3048 f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING); in f2fs_dquot_commit()
3051 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_commit()
3052 f2fs_up_read(&sbi->quota_sem); in f2fs_dquot_commit()
3058 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); in f2fs_dquot_acquire() local
3061 f2fs_down_read(&sbi->quota_sem); in f2fs_dquot_acquire()
3064 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_acquire()
3065 f2fs_up_read(&sbi->quota_sem); in f2fs_dquot_acquire()
3071 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); in f2fs_dquot_release() local
3075 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_release()
3082 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_dquot_mark_dquot_dirty() local
3086 if (is_journalled_quota(sbi)) in f2fs_dquot_mark_dquot_dirty()
3087 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); in f2fs_dquot_mark_dquot_dirty()
3094 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_dquot_commit_info() local
3098 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_commit_info()
3179 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_set_context() local
3187 if (f2fs_sb_has_lost_found(sbi) && in f2fs_set_context()
3188 inode->i_ino == F2FS_ROOT_INO(sbi)) in f2fs_set_context()
3216 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_get_devices() local
3220 if (!f2fs_is_multi_device(sbi)) in f2fs_get_devices()
3223 devs = kmalloc_array(sbi->s_ndevs, sizeof(*devs), GFP_KERNEL); in f2fs_get_devices()
3227 for (i = 0; i < sbi->s_ndevs; i++) in f2fs_get_devices()
3229 *num_devs = sbi->s_ndevs; in f2fs_get_devices()
3248 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_nfs_get_inode() local
3251 if (f2fs_check_nid_range(sbi, ino)) in f2fs_nfs_get_inode()
3334 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, in sanity_check_area_boundary() argument
3339 struct super_block *sb = sbi->sb; in sanity_check_area_boundary()
3359 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)", in sanity_check_area_boundary()
3366 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
3374 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
3382 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
3390 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
3397 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)", in sanity_check_area_boundary()
3409 if (f2fs_readonly(sb) || f2fs_hw_is_readonly(sbi)) { in sanity_check_area_boundary()
3410 set_sbi_flag(sbi, SBI_NEED_SB_WRITE); in sanity_check_area_boundary()
3416 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)", in sanity_check_area_boundary()
3425 static int sanity_check_raw_super(struct f2fs_sb_info *sbi, in sanity_check_raw_super() argument
3436 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)", in sanity_check_raw_super()
3446 f2fs_info(sbi, "Invalid SB checksum offset: %zu", in sanity_check_raw_super()
3451 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) { in sanity_check_raw_super()
3452 f2fs_info(sbi, "Invalid SB checksum value: %u", crc); in sanity_check_raw_super()
3459 f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u", in sanity_check_raw_super()
3467 f2fs_info(sbi, "Invalid log blocks per segment (%u)", in sanity_check_raw_super()
3477 f2fs_info(sbi, "Invalid log sectorsize (%u)", in sanity_check_raw_super()
3484 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)", in sanity_check_raw_super()
3501 f2fs_info(sbi, "Invalid segment count (%u)", segment_count); in sanity_check_raw_super()
3507 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)", in sanity_check_raw_super()
3513 f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)", in sanity_check_raw_super()
3519 f2fs_info(sbi, "Small segment_count (%u < %u * %u)", in sanity_check_raw_super()
3525 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)", in sanity_check_raw_super()
3539 f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)", in sanity_check_raw_super()
3545 !bdev_is_zoned(sbi->sb->s_bdev)) { in sanity_check_raw_super()
3546 f2fs_info(sbi, "Zoned block device path is missing"); in sanity_check_raw_super()
3552 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)", in sanity_check_raw_super()
3560 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)", in sanity_check_raw_super()
3570 f2fs_info(sbi, "Insane cp_payload (%u >= %u)", in sanity_check_raw_super()
3581 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)", in sanity_check_raw_super()
3589 if (sanity_check_area_boundary(sbi, bh)) in sanity_check_raw_super()
3595 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) in f2fs_sanity_check_ckpt() argument
3598 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in f2fs_sanity_check_ckpt()
3599 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in f2fs_sanity_check_ckpt()
3627 if (!f2fs_sb_has_readonly(sbi) && in f2fs_sanity_check_ckpt()
3630 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version"); in f2fs_sanity_check_ckpt()
3635 (f2fs_sb_has_readonly(sbi) ? 1 : 0); in f2fs_sanity_check_ckpt()
3639 f2fs_err(sbi, "Wrong user_block_count: %u", in f2fs_sanity_check_ckpt()
3646 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u", in f2fs_sanity_check_ckpt()
3652 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; in f2fs_sanity_check_ckpt()
3654 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u", in f2fs_sanity_check_ckpt()
3660 blocks_per_seg = sbi->blocks_per_seg; in f2fs_sanity_check_ckpt()
3667 if (f2fs_sb_has_readonly(sbi)) in f2fs_sanity_check_ckpt()
3673 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u", in f2fs_sanity_check_ckpt()
3686 if (f2fs_sb_has_readonly(sbi)) in f2fs_sanity_check_ckpt()
3692 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u", in f2fs_sanity_check_ckpt()
3703 f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u", in f2fs_sanity_check_ckpt()
3716 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u", in f2fs_sanity_check_ckpt()
3721 cp_pack_start_sum = __start_sum_addr(sbi); in f2fs_sanity_check_ckpt()
3722 cp_payload = __cp_payload(sbi); in f2fs_sanity_check_ckpt()
3726 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u", in f2fs_sanity_check_ckpt()
3733 f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, " in f2fs_sanity_check_ckpt()
3746 f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)", in f2fs_sanity_check_ckpt()
3751 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_sanity_check_ckpt()
3752 f2fs_err(sbi, "A bug case: need to run fsck"); in f2fs_sanity_check_ckpt()
3758 static void init_sb_info(struct f2fs_sb_info *sbi) in init_sb_info() argument
3760 struct f2fs_super_block *raw_super = sbi->raw_super; in init_sb_info()
3763 sbi->log_sectors_per_block = in init_sb_info()
3765 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize); in init_sb_info()
3766 sbi->blocksize = BIT(sbi->log_blocksize); in init_sb_info()
3767 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); in init_sb_info()
3768 sbi->blocks_per_seg = BIT(sbi->log_blocks_per_seg); in init_sb_info()
3769 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec); in init_sb_info()
3770 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone); in init_sb_info()
3771 sbi->total_sections = le32_to_cpu(raw_super->section_count); in init_sb_info()
3772 sbi->total_node_count = in init_sb_info()
3774 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK; in init_sb_info()
3775 F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino); in init_sb_info()
3776 F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino); in init_sb_info()
3777 F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino); in init_sb_info()
3778 sbi->cur_victim_sec = NULL_SECNO; in init_sb_info()
3779 sbi->gc_mode = GC_NORMAL; in init_sb_info()
3780 sbi->next_victim_seg[BG_GC] = NULL_SEGNO; in init_sb_info()
3781 sbi->next_victim_seg[FG_GC] = NULL_SEGNO; in init_sb_info()
3782 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH; in init_sb_info()
3783 sbi->migration_granularity = sbi->segs_per_sec; in init_sb_info()
3784 sbi->seq_file_ra_mul = MIN_RA_MUL; in init_sb_info()
3785 sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE; in init_sb_info()
3786 sbi->max_fragment_hole = DEF_FRAGMENT_SIZE; in init_sb_info()
3787 spin_lock_init(&sbi->gc_remaining_trials_lock); in init_sb_info()
3788 atomic64_set(&sbi->current_atomic_write, 0); in init_sb_info()
3790 sbi->dir_level = DEF_DIR_LEVEL; in init_sb_info()
3791 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL; in init_sb_info()
3792 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL; in init_sb_info()
3793 sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL; in init_sb_info()
3794 sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL; in init_sb_info()
3795 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL; in init_sb_info()
3796 sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] = in init_sb_info()
3798 clear_sbi_flag(sbi, SBI_NEED_FSCK); in init_sb_info()
3801 atomic_set(&sbi->nr_pages[i], 0); in init_sb_info()
3804 atomic_set(&sbi->wb_sync_req[i], 0); in init_sb_info()
3806 INIT_LIST_HEAD(&sbi->s_list); in init_sb_info()
3807 mutex_init(&sbi->umount_mutex); in init_sb_info()
3808 init_f2fs_rwsem(&sbi->io_order_lock); in init_sb_info()
3809 spin_lock_init(&sbi->cp_lock); in init_sb_info()
3811 sbi->dirty_device = 0; in init_sb_info()
3812 spin_lock_init(&sbi->dev_lock); in init_sb_info()
3814 init_f2fs_rwsem(&sbi->sb_lock); in init_sb_info()
3815 init_f2fs_rwsem(&sbi->pin_sem); in init_sb_info()
3818 static int init_percpu_info(struct f2fs_sb_info *sbi) in init_percpu_info() argument
3822 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL); in init_percpu_info()
3826 err = percpu_counter_init(&sbi->rf_node_block_count, 0, GFP_KERNEL); in init_percpu_info()
3830 err = percpu_counter_init(&sbi->total_valid_inode_count, 0, in init_percpu_info()
3837 percpu_counter_destroy(&sbi->rf_node_block_count); in init_percpu_info()
3839 percpu_counter_destroy(&sbi->alloc_valid_block_count); in init_percpu_info()
3846 struct f2fs_sb_info *sbi; member
3861 if (!rz_args->sbi->unusable_blocks_per_sec) { in f2fs_report_zone_cb()
3862 rz_args->sbi->unusable_blocks_per_sec = unusable_blocks; in f2fs_report_zone_cb()
3865 if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) { in f2fs_report_zone_cb()
3866 f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n"); in f2fs_report_zone_cb()
3872 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) in init_blkz_info() argument
3880 if (!f2fs_sb_has_blkzoned(sbi)) in init_blkz_info()
3885 f2fs_err(sbi, "F2FS does not support non power of 2 zone sizes\n"); in init_blkz_info()
3889 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz != in init_blkz_info()
3892 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(zone_sectors); in init_blkz_info()
3894 sbi->blocks_per_blkz); in init_blkz_info()
3898 FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi, in init_blkz_info()
3905 rep_zone_arg.sbi = sbi; in init_blkz_info()
3922 static int read_raw_super_block(struct f2fs_sb_info *sbi, in read_raw_super_block() argument
3926 struct super_block *sb = sbi->sb; in read_raw_super_block()
3939 f2fs_err(sbi, "Unable to read %dth superblock", in read_raw_super_block()
3947 err = sanity_check_raw_super(sbi, bh); in read_raw_super_block()
3949 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock", in read_raw_super_block()
3974 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) in f2fs_commit_super() argument
3980 if ((recover && f2fs_readonly(sbi->sb)) || in f2fs_commit_super()
3981 f2fs_hw_is_readonly(sbi)) { in f2fs_commit_super()
3982 set_sbi_flag(sbi, SBI_NEED_SB_WRITE); in f2fs_commit_super()
3987 if (!recover && f2fs_sb_has_sb_chksum(sbi)) { in f2fs_commit_super()
3988 crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi), in f2fs_commit_super()
3990 F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc); in f2fs_commit_super()
3994 bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1); in f2fs_commit_super()
3997 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi)); in f2fs_commit_super()
4005 bh = sb_bread(sbi->sb, sbi->valid_super_block); in f2fs_commit_super()
4008 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi)); in f2fs_commit_super()
4013 static void save_stop_reason(struct f2fs_sb_info *sbi, unsigned char reason) in save_stop_reason() argument
4017 spin_lock_irqsave(&sbi->error_lock, flags); in save_stop_reason()
4018 if (sbi->stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0)) in save_stop_reason()
4019 sbi->stop_reason[reason]++; in save_stop_reason()
4020 spin_unlock_irqrestore(&sbi->error_lock, flags); in save_stop_reason()
4023 static void f2fs_record_stop_reason(struct f2fs_sb_info *sbi) in f2fs_record_stop_reason() argument
4025 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in f2fs_record_stop_reason()
4029 f2fs_down_write(&sbi->sb_lock); in f2fs_record_stop_reason()
4031 spin_lock_irqsave(&sbi->error_lock, flags); in f2fs_record_stop_reason()
4032 if (sbi->error_dirty) { in f2fs_record_stop_reason()
4033 memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors, in f2fs_record_stop_reason()
4035 sbi->error_dirty = false; in f2fs_record_stop_reason()
4037 memcpy(raw_super->s_stop_reason, sbi->stop_reason, MAX_STOP_REASON); in f2fs_record_stop_reason()
4038 spin_unlock_irqrestore(&sbi->error_lock, flags); in f2fs_record_stop_reason()
4040 err = f2fs_commit_super(sbi, false); in f2fs_record_stop_reason()
4042 f2fs_up_write(&sbi->sb_lock); in f2fs_record_stop_reason()
4044 f2fs_err(sbi, "f2fs_commit_super fails to record err:%d", err); in f2fs_record_stop_reason()
4047 void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag) in f2fs_save_errors() argument
4051 spin_lock_irqsave(&sbi->error_lock, flags); in f2fs_save_errors()
4052 if (!test_bit(flag, (unsigned long *)sbi->errors)) { in f2fs_save_errors()
4053 set_bit(flag, (unsigned long *)sbi->errors); in f2fs_save_errors()
4054 sbi->error_dirty = true; in f2fs_save_errors()
4056 spin_unlock_irqrestore(&sbi->error_lock, flags); in f2fs_save_errors()
4059 static bool f2fs_update_errors(struct f2fs_sb_info *sbi) in f2fs_update_errors() argument
4064 spin_lock_irqsave(&sbi->error_lock, flags); in f2fs_update_errors()
4065 if (sbi->error_dirty) { in f2fs_update_errors()
4066 memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors, in f2fs_update_errors()
4068 sbi->error_dirty = false; in f2fs_update_errors()
4071 spin_unlock_irqrestore(&sbi->error_lock, flags); in f2fs_update_errors()
4076 static void f2fs_record_errors(struct f2fs_sb_info *sbi, unsigned char error) in f2fs_record_errors() argument
4080 f2fs_down_write(&sbi->sb_lock); in f2fs_record_errors()
4082 if (!f2fs_update_errors(sbi)) in f2fs_record_errors()
4085 err = f2fs_commit_super(sbi, false); in f2fs_record_errors()
4087 f2fs_err(sbi, "f2fs_commit_super fails to record errors:%u, err:%d", in f2fs_record_errors()
4090 f2fs_up_write(&sbi->sb_lock); in f2fs_record_errors()
4093 void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error) in f2fs_handle_error() argument
4095 f2fs_save_errors(sbi, error); in f2fs_handle_error()
4096 f2fs_record_errors(sbi, error); in f2fs_handle_error()
4099 void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error) in f2fs_handle_error_async() argument
4101 f2fs_save_errors(sbi, error); in f2fs_handle_error_async()
4103 if (!sbi->error_dirty) in f2fs_handle_error_async()
4105 if (!test_bit(error, (unsigned long *)sbi->errors)) in f2fs_handle_error_async()
4107 schedule_work(&sbi->s_error_work); in f2fs_handle_error_async()
4116 void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason, in f2fs_handle_critical_error() argument
4119 struct super_block *sb = sbi->sb; in f2fs_handle_critical_error()
4122 F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE; in f2fs_handle_critical_error()
4124 set_ckpt_flags(sbi, CP_ERROR_FLAG); in f2fs_handle_critical_error()
4126 if (!f2fs_hw_is_readonly(sbi)) { in f2fs_handle_critical_error()
4127 save_stop_reason(sbi, reason); in f2fs_handle_critical_error()
4130 schedule_work(&sbi->s_error_work); in f2fs_handle_critical_error()
4132 f2fs_record_stop_reason(sbi); in f2fs_handle_critical_error()
4140 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC && in f2fs_handle_critical_error()
4142 !is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) in f2fs_handle_critical_error()
4147 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); in f2fs_handle_critical_error()
4153 f2fs_warn(sbi, "Remounting filesystem read-only"); in f2fs_handle_critical_error()
4164 struct f2fs_sb_info *sbi = container_of(work, in f2fs_record_error_work() local
4167 f2fs_record_stop_reason(sbi); in f2fs_record_error_work()
4170 static int f2fs_scan_devices(struct f2fs_sb_info *sbi) in f2fs_scan_devices() argument
4172 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in f2fs_scan_devices()
4175 blk_mode_t mode = sb_open_mode(sbi->sb->s_flags); in f2fs_scan_devices()
4180 if (!bdev_is_zoned(sbi->sb->s_bdev)) in f2fs_scan_devices()
4189 sbi->devs = f2fs_kzalloc(sbi, in f2fs_scan_devices()
4193 if (!sbi->devs) in f2fs_scan_devices()
4196 logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev); in f2fs_scan_devices()
4197 sbi->aligned_blksize = true; in f2fs_scan_devices()
4201 FDEV(0).bdev = sbi->sb->s_bdev; in f2fs_scan_devices()
4214 sbi->log_blocks_per_seg) - 1 + in f2fs_scan_devices()
4220 sbi->log_blocks_per_seg) - 1; in f2fs_scan_devices()
4222 mode, sbi->sb, NULL); in f2fs_scan_devices()
4229 sbi->s_ndevs = i + 1; in f2fs_scan_devices()
4232 sbi->aligned_blksize = false; in f2fs_scan_devices()
4236 !f2fs_sb_has_blkzoned(sbi)) { in f2fs_scan_devices()
4237 f2fs_err(sbi, "Zoned block device feature not enabled"); in f2fs_scan_devices()
4241 if (init_blkz_info(sbi, i)) { in f2fs_scan_devices()
4242 f2fs_err(sbi, "Failed to initialize F2FS blkzone information"); in f2fs_scan_devices()
4247 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)", in f2fs_scan_devices()
4256 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x", in f2fs_scan_devices()
4261 f2fs_info(sbi, in f2fs_scan_devices()
4262 "IO Block Size: %8ld KB", F2FS_IO_SIZE_KB(sbi)); in f2fs_scan_devices()
4266 static int f2fs_setup_casefold(struct f2fs_sb_info *sbi) in f2fs_setup_casefold() argument
4269 if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) { in f2fs_setup_casefold()
4274 encoding_info = f2fs_sb_read_encoding(sbi->raw_super); in f2fs_setup_casefold()
4276 f2fs_err(sbi, in f2fs_setup_casefold()
4281 encoding_flags = le16_to_cpu(sbi->raw_super->s_encoding_flags); in f2fs_setup_casefold()
4284 f2fs_err(sbi, in f2fs_setup_casefold()
4294 f2fs_info(sbi, "Using encoding defined by superblock: " in f2fs_setup_casefold()
4301 sbi->sb->s_encoding = encoding; in f2fs_setup_casefold()
4302 sbi->sb->s_encoding_flags = encoding_flags; in f2fs_setup_casefold()
4305 if (f2fs_sb_has_casefold(sbi)) { in f2fs_setup_casefold()
4306 f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE"); in f2fs_setup_casefold()
4313 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi) in f2fs_tuning_parameters() argument
4316 if (MAIN_SEGS(sbi) <= SMALL_VOLUME_SEGMENTS) { in f2fs_tuning_parameters()
4317 if (f2fs_block_unit_discard(sbi)) in f2fs_tuning_parameters()
4318 SM_I(sbi)->dcc_info->discard_granularity = in f2fs_tuning_parameters()
4320 if (!f2fs_lfs_mode(sbi)) in f2fs_tuning_parameters()
4321 SM_I(sbi)->ipu_policy = BIT(F2FS_IPU_FORCE) | in f2fs_tuning_parameters()
4325 sbi->readdir_ra = true; in f2fs_tuning_parameters()
4330 struct f2fs_sb_info *sbi; in f2fs_fill_super() local
4350 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); in f2fs_fill_super()
4351 if (!sbi) in f2fs_fill_super()
4354 sbi->sb = sb; in f2fs_fill_super()
4357 init_f2fs_rwsem(&sbi->gc_lock); in f2fs_fill_super()
4358 mutex_init(&sbi->writepages); in f2fs_fill_super()
4359 init_f2fs_rwsem(&sbi->cp_global_sem); in f2fs_fill_super()
4360 init_f2fs_rwsem(&sbi->node_write); in f2fs_fill_super()
4361 init_f2fs_rwsem(&sbi->node_change); in f2fs_fill_super()
4362 spin_lock_init(&sbi->stat_lock); in f2fs_fill_super()
4363 init_f2fs_rwsem(&sbi->cp_rwsem); in f2fs_fill_super()
4364 init_f2fs_rwsem(&sbi->quota_sem); in f2fs_fill_super()
4365 init_waitqueue_head(&sbi->cp_wait); in f2fs_fill_super()
4366 spin_lock_init(&sbi->error_lock); in f2fs_fill_super()
4369 INIT_LIST_HEAD(&sbi->inode_list[i]); in f2fs_fill_super()
4370 spin_lock_init(&sbi->inode_lock[i]); in f2fs_fill_super()
4372 mutex_init(&sbi->flush_lock); in f2fs_fill_super()
4375 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0); in f2fs_fill_super()
4376 if (IS_ERR(sbi->s_chksum_driver)) { in f2fs_fill_super()
4377 f2fs_err(sbi, "Cannot load crc32 driver."); in f2fs_fill_super()
4378 err = PTR_ERR(sbi->s_chksum_driver); in f2fs_fill_super()
4379 sbi->s_chksum_driver = NULL; in f2fs_fill_super()
4385 f2fs_err(sbi, "unable to set blocksize"); in f2fs_fill_super()
4389 err = read_raw_super_block(sbi, &raw_super, &valid_super_block, in f2fs_fill_super()
4394 sb->s_fs_info = sbi; in f2fs_fill_super()
4395 sbi->raw_super = raw_super; in f2fs_fill_super()
4397 INIT_WORK(&sbi->s_error_work, f2fs_record_error_work); in f2fs_fill_super()
4398 memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS); in f2fs_fill_super()
4399 memcpy(sbi->stop_reason, raw_super->s_stop_reason, MAX_STOP_REASON); in f2fs_fill_super()
4402 if (f2fs_sb_has_inode_chksum(sbi)) in f2fs_fill_super()
4403 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid, in f2fs_fill_super()
4406 default_options(sbi, false); in f2fs_fill_super()
4422 err = f2fs_setup_casefold(sbi); in f2fs_fill_super()
4431 if (f2fs_sb_has_quota_ino(sbi)) { in f2fs_fill_super()
4433 if (f2fs_qf_ino(sbi->sb, i)) in f2fs_fill_super()
4434 sbi->nquota_files++; in f2fs_fill_super()
4451 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); in f2fs_fill_super()
4456 sbi->valid_super_block = valid_super_block; in f2fs_fill_super()
4459 set_sbi_flag(sbi, SBI_POR_DOING); in f2fs_fill_super()
4461 err = f2fs_init_write_merge_io(sbi); in f2fs_fill_super()
4465 init_sb_info(sbi); in f2fs_fill_super()
4467 err = f2fs_init_iostat(sbi); in f2fs_fill_super()
4471 err = init_percpu_info(sbi); in f2fs_fill_super()
4475 if (F2FS_IO_ALIGNED(sbi)) { in f2fs_fill_super()
4476 sbi->write_io_dummy = in f2fs_fill_super()
4477 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0); in f2fs_fill_super()
4478 if (!sbi->write_io_dummy) { in f2fs_fill_super()
4485 err = f2fs_init_xattr_caches(sbi); in f2fs_fill_super()
4488 err = f2fs_init_page_array_cache(sbi); in f2fs_fill_super()
4493 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); in f2fs_fill_super()
4494 if (IS_ERR(sbi->meta_inode)) { in f2fs_fill_super()
4495 f2fs_err(sbi, "Failed to read F2FS meta data inode"); in f2fs_fill_super()
4496 err = PTR_ERR(sbi->meta_inode); in f2fs_fill_super()
4500 err = f2fs_get_valid_checkpoint(sbi); in f2fs_fill_super()
4502 f2fs_err(sbi, "Failed to get valid F2FS checkpoint"); in f2fs_fill_super()
4506 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG)) in f2fs_fill_super()
4507 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_fill_super()
4508 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) { in f2fs_fill_super()
4509 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); in f2fs_fill_super()
4510 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL; in f2fs_fill_super()
4513 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG)) in f2fs_fill_super()
4514 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_fill_super()
4517 err = f2fs_scan_devices(sbi); in f2fs_fill_super()
4519 f2fs_err(sbi, "Failed to find devices"); in f2fs_fill_super()
4523 err = f2fs_init_post_read_wq(sbi); in f2fs_fill_super()
4525 f2fs_err(sbi, "Failed to initialize post read workqueue"); in f2fs_fill_super()
4529 sbi->total_valid_node_count = in f2fs_fill_super()
4530 le32_to_cpu(sbi->ckpt->valid_node_count); in f2fs_fill_super()
4531 percpu_counter_set(&sbi->total_valid_inode_count, in f2fs_fill_super()
4532 le32_to_cpu(sbi->ckpt->valid_inode_count)); in f2fs_fill_super()
4533 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); in f2fs_fill_super()
4534 sbi->total_valid_block_count = in f2fs_fill_super()
4535 le64_to_cpu(sbi->ckpt->valid_block_count); in f2fs_fill_super()
4536 sbi->last_valid_block_count = sbi->total_valid_block_count; in f2fs_fill_super()
4537 sbi->reserved_blocks = 0; in f2fs_fill_super()
4538 sbi->current_reserved_blocks = 0; in f2fs_fill_super()
4539 limit_reserve_root(sbi); in f2fs_fill_super()
4540 adjust_unusable_cap_perc(sbi); in f2fs_fill_super()
4542 f2fs_init_extent_cache_info(sbi); in f2fs_fill_super()
4544 f2fs_init_ino_entry_info(sbi); in f2fs_fill_super()
4546 f2fs_init_fsync_node_info(sbi); in f2fs_fill_super()
4549 f2fs_init_ckpt_req_control(sbi); in f2fs_fill_super()
4550 if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) && in f2fs_fill_super()
4551 test_opt(sbi, MERGE_CHECKPOINT)) { in f2fs_fill_super()
4552 err = f2fs_start_ckpt_thread(sbi); in f2fs_fill_super()
4554 f2fs_err(sbi, in f2fs_fill_super()
4562 err = f2fs_build_segment_manager(sbi); in f2fs_fill_super()
4564 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)", in f2fs_fill_super()
4568 err = f2fs_build_node_manager(sbi); in f2fs_fill_super()
4570 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)", in f2fs_fill_super()
4575 err = adjust_reserved_segment(sbi); in f2fs_fill_super()
4580 sbi->sectors_written_start = f2fs_get_sectors_written(sbi); in f2fs_fill_super()
4583 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); in f2fs_fill_super()
4584 if (__exist_node_summaries(sbi)) in f2fs_fill_super()
4585 sbi->kbytes_written = in f2fs_fill_super()
4588 f2fs_build_gc_manager(sbi); in f2fs_fill_super()
4590 err = f2fs_build_stats(sbi); in f2fs_fill_super()
4595 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); in f2fs_fill_super()
4596 if (IS_ERR(sbi->node_inode)) { in f2fs_fill_super()
4597 f2fs_err(sbi, "Failed to read node inode"); in f2fs_fill_super()
4598 err = PTR_ERR(sbi->node_inode); in f2fs_fill_super()
4603 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); in f2fs_fill_super()
4605 f2fs_err(sbi, "Failed to read root inode"); in f2fs_fill_super()
4622 err = f2fs_init_compress_inode(sbi); in f2fs_fill_super()
4626 err = f2fs_register_sysfs(sbi); in f2fs_fill_super()
4632 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) { in f2fs_fill_super()
4635 f2fs_err(sbi, "Cannot turn on quotas: error %d", err); in f2fs_fill_super()
4638 quota_enabled = f2fs_recover_quota_begin(sbi); in f2fs_fill_super()
4641 err = f2fs_recover_orphan_inodes(sbi); in f2fs_fill_super()
4645 if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG))) in f2fs_fill_super()
4649 if (!test_opt(sbi, DISABLE_ROLL_FORWARD) && in f2fs_fill_super()
4650 !test_opt(sbi, NORECOVERY)) { in f2fs_fill_super()
4655 if (f2fs_hw_is_readonly(sbi)) { in f2fs_fill_super()
4656 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { in f2fs_fill_super()
4657 err = f2fs_recover_fsync_data(sbi, true); in f2fs_fill_super()
4660 f2fs_err(sbi, "Need to recover fsync data, but " in f2fs_fill_super()
4667 f2fs_info(sbi, "write access unavailable, skipping recovery"); in f2fs_fill_super()
4672 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_fill_super()
4677 err = f2fs_recover_fsync_data(sbi, false); in f2fs_fill_super()
4682 f2fs_err(sbi, "Cannot recover all fsync data errno=%d", in f2fs_fill_super()
4687 err = f2fs_recover_fsync_data(sbi, true); in f2fs_fill_super()
4691 f2fs_err(sbi, "Need to recover fsync data"); in f2fs_fill_super()
4697 f2fs_recover_quota_end(sbi, quota_enabled); in f2fs_fill_super()
4704 if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) { in f2fs_fill_super()
4705 err = f2fs_check_write_pointer(sbi); in f2fs_fill_super()
4711 f2fs_init_inmem_curseg(sbi); in f2fs_fill_super()
4714 clear_sbi_flag(sbi, SBI_POR_DOING); in f2fs_fill_super()
4716 if (test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_fill_super()
4717 err = f2fs_disable_checkpoint(sbi); in f2fs_fill_super()
4720 } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) { in f2fs_fill_super()
4721 f2fs_enable_checkpoint(sbi); in f2fs_fill_super()
4728 if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF || in f2fs_fill_super()
4729 test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) { in f2fs_fill_super()
4731 err = f2fs_start_gc_thread(sbi); in f2fs_fill_super()
4739 err = f2fs_commit_super(sbi, true); in f2fs_fill_super()
4740 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d", in f2fs_fill_super()
4741 sbi->valid_super_block ? 1 : 2, err); in f2fs_fill_super()
4744 f2fs_join_shrinker(sbi); in f2fs_fill_super()
4746 f2fs_tuning_parameters(sbi); in f2fs_fill_super()
4748 f2fs_notice(sbi, "Mounted with checkpoint version = %llx", in f2fs_fill_super()
4749 cur_cp_version(F2FS_CKPT(sbi))); in f2fs_fill_super()
4750 f2fs_update_time(sbi, CP_TIME); in f2fs_fill_super()
4751 f2fs_update_time(sbi, REQ_TIME); in f2fs_fill_super()
4752 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); in f2fs_fill_super()
4757 sync_filesystem(sbi->sb); in f2fs_fill_super()
4763 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) in f2fs_fill_super()
4764 f2fs_quota_off_umount(sbi->sb); in f2fs_fill_super()
4772 truncate_inode_pages_final(META_MAPPING(sbi)); in f2fs_fill_super()
4775 f2fs_unregister_sysfs(sbi); in f2fs_fill_super()
4777 f2fs_destroy_compress_inode(sbi); in f2fs_fill_super()
4782 f2fs_release_ino_entry(sbi, true); in f2fs_fill_super()
4783 truncate_inode_pages_final(NODE_MAPPING(sbi)); in f2fs_fill_super()
4784 iput(sbi->node_inode); in f2fs_fill_super()
4785 sbi->node_inode = NULL; in f2fs_fill_super()
4787 f2fs_destroy_stats(sbi); in f2fs_fill_super()
4790 f2fs_stop_discard_thread(sbi); in f2fs_fill_super()
4791 f2fs_destroy_node_manager(sbi); in f2fs_fill_super()
4793 f2fs_destroy_segment_manager(sbi); in f2fs_fill_super()
4795 f2fs_stop_ckpt_thread(sbi); in f2fs_fill_super()
4797 flush_work(&sbi->s_error_work); in f2fs_fill_super()
4798 f2fs_destroy_post_read_wq(sbi); in f2fs_fill_super()
4800 destroy_device_list(sbi); in f2fs_fill_super()
4801 kvfree(sbi->ckpt); in f2fs_fill_super()
4803 make_bad_inode(sbi->meta_inode); in f2fs_fill_super()
4804 iput(sbi->meta_inode); in f2fs_fill_super()
4805 sbi->meta_inode = NULL; in f2fs_fill_super()
4807 f2fs_destroy_page_array_cache(sbi); in f2fs_fill_super()
4809 f2fs_destroy_xattr_caches(sbi); in f2fs_fill_super()
4811 mempool_destroy(sbi->write_io_dummy); in f2fs_fill_super()
4813 destroy_percpu_info(sbi); in f2fs_fill_super()
4815 f2fs_destroy_iostat(sbi); in f2fs_fill_super()
4818 kvfree(sbi->write_io[i]); in f2fs_fill_super()
4827 kfree(F2FS_OPTION(sbi).s_qf_names[i]); in f2fs_fill_super()
4829 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy); in f2fs_fill_super()
4834 if (sbi->s_chksum_driver) in f2fs_fill_super()
4835 crypto_free_shash(sbi->s_chksum_driver); in f2fs_fill_super()
4836 kfree(sbi); in f2fs_fill_super()
4856 struct f2fs_sb_info *sbi = F2FS_SB(sb); in kill_f2fs_super() local
4858 set_sbi_flag(sbi, SBI_IS_CLOSE); in kill_f2fs_super()
4859 f2fs_stop_gc_thread(sbi); in kill_f2fs_super()
4860 f2fs_stop_discard_thread(sbi); in kill_f2fs_super()
4867 if (test_opt(sbi, COMPRESS_CACHE)) in kill_f2fs_super()
4868 truncate_inode_pages_final(COMPRESS_MAPPING(sbi)); in kill_f2fs_super()
4871 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) || in kill_f2fs_super()
4872 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { in kill_f2fs_super()
4876 stat_inc_cp_call_count(sbi, TOTAL_CALL); in kill_f2fs_super()
4877 f2fs_write_checkpoint(sbi, &cpc); in kill_f2fs_super()
4880 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb)) in kill_f2fs_super()