Lines Matching refs:sbi
26 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io) in f2fs_stop_checkpoint() argument
28 f2fs_build_fault_attr(sbi, 0, 0); in f2fs_stop_checkpoint()
29 set_ckpt_flags(sbi, CP_ERROR_FLAG); in f2fs_stop_checkpoint()
31 f2fs_flush_merged_writes(sbi); in f2fs_stop_checkpoint()
37 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_grab_meta_page() argument
39 struct address_space *mapping = META_MAPPING(sbi); in f2fs_grab_meta_page()
56 static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, in __get_meta_page() argument
59 struct address_space *mapping = META_MAPPING(sbi); in __get_meta_page()
62 .sbi = sbi, in __get_meta_page()
106 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_get_meta_page() argument
108 return __get_meta_page(sbi, index, true); in f2fs_get_meta_page()
111 struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_get_meta_page_nofail() argument
117 page = __get_meta_page(sbi, index, true); in f2fs_get_meta_page_nofail()
122 f2fs_stop_checkpoint(sbi, false); in f2fs_get_meta_page_nofail()
128 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_get_tmp_page() argument
130 return __get_meta_page(sbi, index, false); in f2fs_get_tmp_page()
133 static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr, in __is_bitmap_valid() argument
143 segno = GET_SEGNO(sbi, blkaddr); in __is_bitmap_valid()
144 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); in __is_bitmap_valid()
145 se = get_seg_entry(sbi, segno); in __is_bitmap_valid()
149 f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d", in __is_bitmap_valid()
151 set_sbi_flag(sbi, SBI_NEED_FSCK); in __is_bitmap_valid()
157 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, in f2fs_is_valid_blkaddr() argument
164 if (unlikely(blkaddr >= SIT_BLK_CNT(sbi))) in f2fs_is_valid_blkaddr()
168 if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) || in f2fs_is_valid_blkaddr()
169 blkaddr < SM_I(sbi)->ssa_blkaddr)) in f2fs_is_valid_blkaddr()
173 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr || in f2fs_is_valid_blkaddr()
174 blkaddr < __start_cp_addr(sbi))) in f2fs_is_valid_blkaddr()
178 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) || in f2fs_is_valid_blkaddr()
179 blkaddr < MAIN_BLKADDR(sbi))) in f2fs_is_valid_blkaddr()
185 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) || in f2fs_is_valid_blkaddr()
186 blkaddr < MAIN_BLKADDR(sbi))) { in f2fs_is_valid_blkaddr()
187 f2fs_warn(sbi, "access invalid blkaddr:%u", in f2fs_is_valid_blkaddr()
189 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_is_valid_blkaddr()
193 return __is_bitmap_valid(sbi, blkaddr, type); in f2fs_is_valid_blkaddr()
197 if (unlikely(blkaddr < SEG0_BLKADDR(sbi) || in f2fs_is_valid_blkaddr()
198 blkaddr >= MAIN_BLKADDR(sbi))) in f2fs_is_valid_blkaddr()
211 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, in f2fs_ra_meta_pages() argument
217 .sbi = sbi, in f2fs_ra_meta_pages()
233 if (!f2fs_is_valid_blkaddr(sbi, blkno, type)) in f2fs_ra_meta_pages()
239 NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid))) in f2fs_ra_meta_pages()
242 fio.new_blkaddr = current_nat_addr(sbi, in f2fs_ra_meta_pages()
247 fio.new_blkaddr = current_sit_addr(sbi, in f2fs_ra_meta_pages()
259 page = f2fs_grab_cache_page(META_MAPPING(sbi), in f2fs_ra_meta_pages()
277 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_ra_meta_pages_cond() argument
282 page = find_get_page(META_MAPPING(sbi), index); in f2fs_ra_meta_pages_cond()
288 f2fs_ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true); in f2fs_ra_meta_pages_cond()
295 struct f2fs_sb_info *sbi = F2FS_P_SB(page); in __f2fs_write_meta_page() local
299 if (unlikely(f2fs_cp_error(sbi))) in __f2fs_write_meta_page()
301 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in __f2fs_write_meta_page()
303 if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0)) in __f2fs_write_meta_page()
306 f2fs_do_write_meta_page(sbi, page, io_type); in __f2fs_write_meta_page()
307 dec_page_count(sbi, F2FS_DIRTY_META); in __f2fs_write_meta_page()
310 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META); in __f2fs_write_meta_page()
314 if (unlikely(f2fs_cp_error(sbi))) in __f2fs_write_meta_page()
315 f2fs_submit_merged_write(sbi, META); in __f2fs_write_meta_page()
333 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); in f2fs_write_meta_pages() local
336 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in f2fs_write_meta_pages()
341 get_pages(sbi, F2FS_DIRTY_META) < in f2fs_write_meta_pages()
342 nr_pages_to_skip(sbi, META)) in f2fs_write_meta_pages()
346 if (!mutex_trylock(&sbi->cp_mutex)) in f2fs_write_meta_pages()
350 diff = nr_pages_to_write(sbi, META, wbc); in f2fs_write_meta_pages()
351 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO); in f2fs_write_meta_pages()
352 mutex_unlock(&sbi->cp_mutex); in f2fs_write_meta_pages()
357 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META); in f2fs_write_meta_pages()
362 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, in f2fs_sync_meta_pages() argument
365 struct address_space *mapping = META_MAPPING(sbi); in f2fs_sync_meta_pages()
424 f2fs_submit_merged_write(sbi, type); in f2fs_sync_meta_pages()
458 static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, in __add_ino_entry() argument
461 struct inode_management *im = &sbi->im[type]; in __add_ino_entry()
473 f2fs_bug_on(sbi, 1); in __add_ino_entry()
493 static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) in __remove_ino_entry() argument
495 struct inode_management *im = &sbi->im[type]; in __remove_ino_entry()
511 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) in f2fs_add_ino_entry() argument
514 __add_ino_entry(sbi, ino, 0, type); in f2fs_add_ino_entry()
517 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) in f2fs_remove_ino_entry() argument
520 __remove_ino_entry(sbi, ino, type); in f2fs_remove_ino_entry()
524 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode) in f2fs_exist_written_data() argument
526 struct inode_management *im = &sbi->im[mode]; in f2fs_exist_written_data()
535 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all) in f2fs_release_ino_entry() argument
541 struct inode_management *im = &sbi->im[i]; in f2fs_release_ino_entry()
554 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, in f2fs_set_dirty_device() argument
557 __add_ino_entry(sbi, ino, devidx, type); in f2fs_set_dirty_device()
560 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, in f2fs_is_dirty_device() argument
563 struct inode_management *im = &sbi->im[type]; in f2fs_is_dirty_device()
575 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi) in f2fs_acquire_orphan_inode() argument
577 struct inode_management *im = &sbi->im[ORPHAN_INO]; in f2fs_acquire_orphan_inode()
582 if (time_to_inject(sbi, FAULT_ORPHAN)) { in f2fs_acquire_orphan_inode()
588 if (unlikely(im->ino_num >= sbi->max_orphans)) in f2fs_acquire_orphan_inode()
597 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi) in f2fs_release_orphan_inode() argument
599 struct inode_management *im = &sbi->im[ORPHAN_INO]; in f2fs_release_orphan_inode()
602 f2fs_bug_on(sbi, im->ino_num == 0); in f2fs_release_orphan_inode()
614 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) in f2fs_remove_orphan_inode() argument
617 __remove_ino_entry(sbi, ino, ORPHAN_INO); in f2fs_remove_orphan_inode()
620 static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) in recover_orphan_inode() argument
626 inode = f2fs_iget_retry(sbi->sb, ino); in recover_orphan_inode()
632 f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT); in recover_orphan_inode()
647 err = f2fs_get_node_info(sbi, ino, &ni); in recover_orphan_inode()
659 set_sbi_flag(sbi, SBI_NEED_FSCK); in recover_orphan_inode()
660 f2fs_warn(sbi, "%s: orphan failed (ino=%x), run fsck to fix.", in recover_orphan_inode()
665 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi) in f2fs_recover_orphan_inodes() argument
668 unsigned int s_flags = sbi->sb->s_flags; in f2fs_recover_orphan_inodes()
674 if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG)) in f2fs_recover_orphan_inodes()
677 if (bdev_read_only(sbi->sb->s_bdev)) { in f2fs_recover_orphan_inodes()
678 f2fs_info(sbi, "write access unavailable, skipping orphan cleanup"); in f2fs_recover_orphan_inodes()
683 f2fs_info(sbi, "orphan cleanup on readonly fs"); in f2fs_recover_orphan_inodes()
684 sbi->sb->s_flags &= ~SB_RDONLY; in f2fs_recover_orphan_inodes()
689 sbi->sb->s_flags |= SB_ACTIVE; in f2fs_recover_orphan_inodes()
695 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY); in f2fs_recover_orphan_inodes()
698 start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi); in f2fs_recover_orphan_inodes()
699 orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi); in f2fs_recover_orphan_inodes()
701 f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true); in f2fs_recover_orphan_inodes()
707 page = f2fs_get_meta_page(sbi, start_blk + i); in f2fs_recover_orphan_inodes()
716 err = recover_orphan_inode(sbi, ino); in f2fs_recover_orphan_inodes()
725 clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG); in f2fs_recover_orphan_inodes()
727 set_sbi_flag(sbi, SBI_IS_RECOVERED); in f2fs_recover_orphan_inodes()
732 f2fs_quota_off_umount(sbi->sb); in f2fs_recover_orphan_inodes()
734 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ in f2fs_recover_orphan_inodes()
739 static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) in write_orphan_inodes() argument
748 struct inode_management *im = &sbi->im[ORPHAN_INO]; in write_orphan_inodes()
762 page = f2fs_grab_meta_page(sbi, start_blk++); in write_orphan_inodes()
796 static __u32 f2fs_checkpoint_chksum(struct f2fs_sb_info *sbi, in f2fs_checkpoint_chksum() argument
802 chksum = f2fs_crc32(sbi, ckpt, chksum_ofs); in f2fs_checkpoint_chksum()
805 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ckpt + chksum_ofs, in f2fs_checkpoint_chksum()
811 static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr, in get_checkpoint_version() argument
818 *cp_page = f2fs_get_meta_page(sbi, cp_addr); in get_checkpoint_version()
828 f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset); in get_checkpoint_version()
832 crc = f2fs_checkpoint_chksum(sbi, *cp_block); in get_checkpoint_version()
835 f2fs_warn(sbi, "invalid crc value"); in get_checkpoint_version()
843 static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, in validate_checkpoint() argument
851 err = get_checkpoint_version(sbi, cp_addr, &cp_block, in validate_checkpoint()
857 sbi->blocks_per_seg) { in validate_checkpoint()
858 f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u", in validate_checkpoint()
865 err = get_checkpoint_version(sbi, cp_addr, &cp_block, in validate_checkpoint()
882 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) in f2fs_get_valid_checkpoint() argument
885 struct f2fs_super_block *fsb = sbi->raw_super; in f2fs_get_valid_checkpoint()
887 unsigned long blk_size = sbi->blocksize; in f2fs_get_valid_checkpoint()
890 unsigned int cp_blks = 1 + __cp_payload(sbi); in f2fs_get_valid_checkpoint()
895 sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks), in f2fs_get_valid_checkpoint()
897 if (!sbi->ckpt) in f2fs_get_valid_checkpoint()
904 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version); in f2fs_get_valid_checkpoint()
909 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version); in f2fs_get_valid_checkpoint()
926 memcpy(sbi->ckpt, cp_block, blk_size); in f2fs_get_valid_checkpoint()
929 sbi->cur_cp_pack = 1; in f2fs_get_valid_checkpoint()
931 sbi->cur_cp_pack = 2; in f2fs_get_valid_checkpoint()
934 if (f2fs_sanity_check_ckpt(sbi)) { in f2fs_get_valid_checkpoint()
948 unsigned char *ckpt = (unsigned char *)sbi->ckpt; in f2fs_get_valid_checkpoint()
950 cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i); in f2fs_get_valid_checkpoint()
968 kvfree(sbi->ckpt); in f2fs_get_valid_checkpoint()
974 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in __add_dirty_inode() local
983 &sbi->inode_list[type]); in __add_dirty_inode()
984 stat_inc_dirty_inode(sbi, type); in __add_dirty_inode()
1001 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_update_dirty_page() local
1008 spin_lock(&sbi->inode_lock[type]); in f2fs_update_dirty_page()
1009 if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH)) in f2fs_update_dirty_page()
1012 spin_unlock(&sbi->inode_lock[type]); in f2fs_update_dirty_page()
1020 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_remove_dirty_inode() local
1027 if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH)) in f2fs_remove_dirty_inode()
1030 spin_lock(&sbi->inode_lock[type]); in f2fs_remove_dirty_inode()
1032 spin_unlock(&sbi->inode_lock[type]); in f2fs_remove_dirty_inode()
1035 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type) in f2fs_sync_dirty_inodes() argument
1043 trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
1044 get_pages(sbi, is_dir ? in f2fs_sync_dirty_inodes()
1047 if (unlikely(f2fs_cp_error(sbi))) in f2fs_sync_dirty_inodes()
1050 spin_lock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1052 head = &sbi->inode_list[type]; in f2fs_sync_dirty_inodes()
1054 spin_unlock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1055 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
1056 get_pages(sbi, is_dir ? in f2fs_sync_dirty_inodes()
1062 spin_unlock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1083 f2fs_submit_merged_write(sbi, DATA); in f2fs_sync_dirty_inodes()
1089 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi) in f2fs_sync_inode_meta() argument
1091 struct list_head *head = &sbi->inode_list[DIRTY_META]; in f2fs_sync_inode_meta()
1094 s64 total = get_pages(sbi, F2FS_DIRTY_IMETA); in f2fs_sync_inode_meta()
1097 if (unlikely(f2fs_cp_error(sbi))) in f2fs_sync_inode_meta()
1100 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1102 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1108 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1121 static void __prepare_cp_block(struct f2fs_sb_info *sbi) in __prepare_cp_block() argument
1123 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in __prepare_cp_block()
1124 struct f2fs_nm_info *nm_i = NM_I(sbi); in __prepare_cp_block()
1127 next_free_nid(sbi, &last_nid); in __prepare_cp_block()
1128 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); in __prepare_cp_block()
1129 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); in __prepare_cp_block()
1130 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); in __prepare_cp_block()
1134 static bool __need_flush_quota(struct f2fs_sb_info *sbi) in __need_flush_quota() argument
1138 if (!is_journalled_quota(sbi)) in __need_flush_quota()
1141 down_write(&sbi->quota_sem); in __need_flush_quota()
1142 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) { in __need_flush_quota()
1144 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) { in __need_flush_quota()
1146 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) { in __need_flush_quota()
1147 clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); in __need_flush_quota()
1149 } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) { in __need_flush_quota()
1152 up_write(&sbi->quota_sem); in __need_flush_quota()
1159 static int block_operations(struct f2fs_sb_info *sbi) in block_operations() argument
1172 f2fs_lock_all(sbi); in block_operations()
1173 if (__need_flush_quota(sbi)) { in block_operations()
1177 set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH); in block_operations()
1178 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); in block_operations()
1181 f2fs_unlock_all(sbi); in block_operations()
1184 locked = down_read_trylock(&sbi->sb->s_umount); in block_operations()
1185 f2fs_quota_sync(sbi->sb, -1); in block_operations()
1187 up_read(&sbi->sb->s_umount); in block_operations()
1194 if (get_pages(sbi, F2FS_DIRTY_DENTS)) { in block_operations()
1195 f2fs_unlock_all(sbi); in block_operations()
1196 err = f2fs_sync_dirty_inodes(sbi, DIR_INODE); in block_operations()
1207 down_write(&sbi->node_change); in block_operations()
1209 if (get_pages(sbi, F2FS_DIRTY_IMETA)) { in block_operations()
1210 up_write(&sbi->node_change); in block_operations()
1211 f2fs_unlock_all(sbi); in block_operations()
1212 err = f2fs_sync_inode_meta(sbi); in block_operations()
1220 down_write(&sbi->node_write); in block_operations()
1222 if (get_pages(sbi, F2FS_DIRTY_NODES)) { in block_operations()
1223 up_write(&sbi->node_write); in block_operations()
1224 atomic_inc(&sbi->wb_sync_req[NODE]); in block_operations()
1225 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO); in block_operations()
1226 atomic_dec(&sbi->wb_sync_req[NODE]); in block_operations()
1228 up_write(&sbi->node_change); in block_operations()
1229 f2fs_unlock_all(sbi); in block_operations()
1240 __prepare_cp_block(sbi); in block_operations()
1241 up_write(&sbi->node_change); in block_operations()
1247 static void unblock_operations(struct f2fs_sb_info *sbi) in unblock_operations() argument
1249 up_write(&sbi->node_write); in unblock_operations()
1250 f2fs_unlock_all(sbi); in unblock_operations()
1253 void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi) in f2fs_wait_on_all_pages_writeback() argument
1258 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); in f2fs_wait_on_all_pages_writeback()
1260 if (!get_pages(sbi, F2FS_WB_CP_DATA)) in f2fs_wait_on_all_pages_writeback()
1263 if (unlikely(f2fs_cp_error(sbi))) in f2fs_wait_on_all_pages_writeback()
1268 finish_wait(&sbi->cp_wait, &wait); in f2fs_wait_on_all_pages_writeback()
1271 static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) in update_ckpt_flags() argument
1273 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num; in update_ckpt_flags()
1274 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in update_ckpt_flags()
1277 spin_lock_irqsave(&sbi->cp_lock, flags); in update_ckpt_flags()
1281 sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks) in update_ckpt_flags()
1282 disable_nat_bits(sbi, false); in update_ckpt_flags()
1304 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) || in update_ckpt_flags()
1305 is_sbi_flag_set(sbi, SBI_IS_RESIZEFS)) in update_ckpt_flags()
1308 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED)) in update_ckpt_flags()
1313 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK)) in update_ckpt_flags()
1318 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) in update_ckpt_flags()
1323 if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) in update_ckpt_flags()
1330 spin_unlock_irqrestore(&sbi->cp_lock, flags); in update_ckpt_flags()
1333 static void commit_checkpoint(struct f2fs_sb_info *sbi, in commit_checkpoint() argument
1345 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); in commit_checkpoint()
1354 f2fs_bug_on(sbi, 1); in commit_checkpoint()
1358 if (unlikely(err && f2fs_cp_error(sbi))) { in commit_checkpoint()
1363 f2fs_bug_on(sbi, err); in commit_checkpoint()
1367 f2fs_submit_merged_write(sbi, META_FLUSH); in commit_checkpoint()
1370 static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) in do_checkpoint() argument
1372 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in do_checkpoint()
1373 struct f2fs_nm_info *nm_i = NM_I(sbi); in do_checkpoint()
1374 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags; in do_checkpoint()
1379 int cp_payload_blks = __cp_payload(sbi); in do_checkpoint()
1380 struct super_block *sb = sbi->sb; in do_checkpoint()
1381 struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); in do_checkpoint()
1386 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); in do_checkpoint()
1387 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) && in do_checkpoint()
1388 !f2fs_cp_error(sbi)); in do_checkpoint()
1394 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true)); in do_checkpoint()
1395 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); in do_checkpoint()
1398 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE)); in do_checkpoint()
1400 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE)); in do_checkpoint()
1402 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE); in do_checkpoint()
1406 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA)); in do_checkpoint()
1408 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA)); in do_checkpoint()
1410 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA); in do_checkpoint()
1414 data_sum_blocks = f2fs_npages_for_summary_flush(sbi, false); in do_checkpoint()
1415 spin_lock_irqsave(&sbi->cp_lock, flags); in do_checkpoint()
1420 spin_unlock_irqrestore(&sbi->cp_lock, flags); in do_checkpoint()
1436 update_ckpt_flags(sbi, cpc); in do_checkpoint()
1439 get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP)); in do_checkpoint()
1440 get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP)); in do_checkpoint()
1442 crc32 = f2fs_checkpoint_chksum(sbi, ckpt); in do_checkpoint()
1447 start_blk = __start_cp_next_addr(sbi); in do_checkpoint()
1450 if (enabled_nat_bits(sbi, cpc)) { in do_checkpoint()
1457 blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks; in do_checkpoint()
1459 f2fs_update_meta_page(sbi, nm_i->nat_bits + in do_checkpoint()
1464 f2fs_update_meta_page(sbi, ckpt, start_blk++); in do_checkpoint()
1467 f2fs_update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE, in do_checkpoint()
1471 write_orphan_inodes(sbi, start_blk); in do_checkpoint()
1475 f2fs_write_data_summaries(sbi, start_blk); in do_checkpoint()
1479 kbytes_written = sbi->kbytes_written; in do_checkpoint()
1481 kbytes_written += BD_PART_WRITTEN(sbi); in do_checkpoint()
1486 f2fs_write_node_summaries(sbi, start_blk); in do_checkpoint()
1491 sbi->last_valid_block_count = sbi->total_valid_block_count; in do_checkpoint()
1492 percpu_counter_set(&sbi->alloc_valid_block_count, 0); in do_checkpoint()
1495 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); in do_checkpoint()
1496 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) && in do_checkpoint()
1497 !f2fs_cp_error(sbi)); in do_checkpoint()
1500 f2fs_wait_on_all_pages_writeback(sbi); in do_checkpoint()
1503 err = f2fs_flush_device_cache(sbi); in do_checkpoint()
1508 commit_checkpoint(sbi, ckpt, start_blk); in do_checkpoint()
1509 f2fs_wait_on_all_pages_writeback(sbi); in do_checkpoint()
1515 if (f2fs_sb_has_encrypt(sbi)) in do_checkpoint()
1516 invalidate_mapping_pages(META_MAPPING(sbi), in do_checkpoint()
1517 MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1); in do_checkpoint()
1519 f2fs_release_ino_entry(sbi, false); in do_checkpoint()
1521 f2fs_reset_fsync_node_info(sbi); in do_checkpoint()
1523 clear_sbi_flag(sbi, SBI_IS_DIRTY); in do_checkpoint()
1524 clear_sbi_flag(sbi, SBI_NEED_CP); in do_checkpoint()
1525 clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH); in do_checkpoint()
1527 spin_lock(&sbi->stat_lock); in do_checkpoint()
1528 sbi->unusable_block_count = 0; in do_checkpoint()
1529 spin_unlock(&sbi->stat_lock); in do_checkpoint()
1531 __set_cp_next_pack(sbi); in do_checkpoint()
1537 if (get_pages(sbi, F2FS_DIRTY_NODES) || in do_checkpoint()
1538 get_pages(sbi, F2FS_DIRTY_IMETA)) in do_checkpoint()
1539 set_sbi_flag(sbi, SBI_IS_DIRTY); in do_checkpoint()
1541 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS)); in do_checkpoint()
1543 return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0; in do_checkpoint()
1549 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) in f2fs_write_checkpoint() argument
1551 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in f2fs_write_checkpoint()
1555 if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi)) in f2fs_write_checkpoint()
1558 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in f2fs_write_checkpoint()
1561 f2fs_warn(sbi, "Start checkpoint disabled!"); in f2fs_write_checkpoint()
1563 mutex_lock(&sbi->cp_mutex); in f2fs_write_checkpoint()
1565 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && in f2fs_write_checkpoint()
1567 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks))) in f2fs_write_checkpoint()
1569 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_write_checkpoint()
1574 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops"); in f2fs_write_checkpoint()
1576 err = block_operations(sbi); in f2fs_write_checkpoint()
1580 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops"); in f2fs_write_checkpoint()
1582 f2fs_flush_merged_writes(sbi); in f2fs_write_checkpoint()
1586 if (!f2fs_exist_trim_candidates(sbi, cpc)) { in f2fs_write_checkpoint()
1587 unblock_operations(sbi); in f2fs_write_checkpoint()
1591 if (NM_I(sbi)->dirty_nat_cnt == 0 && in f2fs_write_checkpoint()
1592 SIT_I(sbi)->dirty_sentries == 0 && in f2fs_write_checkpoint()
1593 prefree_segments(sbi) == 0) { in f2fs_write_checkpoint()
1594 f2fs_flush_sit_entries(sbi, cpc); in f2fs_write_checkpoint()
1595 f2fs_clear_prefree_segments(sbi, cpc); in f2fs_write_checkpoint()
1596 unblock_operations(sbi); in f2fs_write_checkpoint()
1610 err = f2fs_flush_nat_entries(sbi, cpc); in f2fs_write_checkpoint()
1614 f2fs_flush_sit_entries(sbi, cpc); in f2fs_write_checkpoint()
1617 err = do_checkpoint(sbi, cpc); in f2fs_write_checkpoint()
1619 f2fs_release_discard_addrs(sbi); in f2fs_write_checkpoint()
1621 f2fs_clear_prefree_segments(sbi, cpc); in f2fs_write_checkpoint()
1623 unblock_operations(sbi); in f2fs_write_checkpoint()
1624 stat_inc_cp_count(sbi->stat_info); in f2fs_write_checkpoint()
1627 f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver); in f2fs_write_checkpoint()
1630 f2fs_update_time(sbi, CP_TIME); in f2fs_write_checkpoint()
1631 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); in f2fs_write_checkpoint()
1633 mutex_unlock(&sbi->cp_mutex); in f2fs_write_checkpoint()
1637 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi) in f2fs_init_ino_entry_info() argument
1642 struct inode_management *im = &sbi->im[i]; in f2fs_init_ino_entry_info()
1650 sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS - in f2fs_init_ino_entry_info()
1651 NR_CURSEG_TYPE - __cp_payload(sbi)) * in f2fs_init_ino_entry_info()