Lines Matching refs:sbi
29 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io) in f2fs_stop_checkpoint() argument
31 f2fs_build_fault_attr(sbi, 0, 0); in f2fs_stop_checkpoint()
32 set_ckpt_flags(sbi, CP_ERROR_FLAG); in f2fs_stop_checkpoint()
34 f2fs_flush_merged_writes(sbi); in f2fs_stop_checkpoint()
40 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_grab_meta_page() argument
42 struct address_space *mapping = META_MAPPING(sbi); in f2fs_grab_meta_page()
59 static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, in __get_meta_page() argument
62 struct address_space *mapping = META_MAPPING(sbi); in __get_meta_page()
65 .sbi = sbi, in __get_meta_page()
109 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_get_meta_page() argument
111 return __get_meta_page(sbi, index, true); in f2fs_get_meta_page()
114 struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_get_meta_page_nofail() argument
120 page = __get_meta_page(sbi, index, true); in f2fs_get_meta_page_nofail()
126 f2fs_stop_checkpoint(sbi, false); in f2fs_get_meta_page_nofail()
127 f2fs_bug_on(sbi, 1); in f2fs_get_meta_page_nofail()
134 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_get_tmp_page() argument
136 return __get_meta_page(sbi, index, false); in f2fs_get_tmp_page()
139 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, in f2fs_is_valid_blkaddr() argument
146 if (unlikely(blkaddr >= SIT_BLK_CNT(sbi))) in f2fs_is_valid_blkaddr()
150 if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) || in f2fs_is_valid_blkaddr()
151 blkaddr < SM_I(sbi)->ssa_blkaddr)) in f2fs_is_valid_blkaddr()
155 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr || in f2fs_is_valid_blkaddr()
156 blkaddr < __start_cp_addr(sbi))) in f2fs_is_valid_blkaddr()
161 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) || in f2fs_is_valid_blkaddr()
162 blkaddr < MAIN_BLKADDR(sbi))) { in f2fs_is_valid_blkaddr()
164 f2fs_msg(sbi->sb, KERN_WARNING, in f2fs_is_valid_blkaddr()
172 if (unlikely(blkaddr < SEG0_BLKADDR(sbi) || in f2fs_is_valid_blkaddr()
173 blkaddr >= MAIN_BLKADDR(sbi))) in f2fs_is_valid_blkaddr()
186 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, in f2fs_ra_meta_pages() argument
192 .sbi = sbi, in f2fs_ra_meta_pages()
208 if (!f2fs_is_valid_blkaddr(sbi, blkno, type)) in f2fs_ra_meta_pages()
214 NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid))) in f2fs_ra_meta_pages()
217 fio.new_blkaddr = current_nat_addr(sbi, in f2fs_ra_meta_pages()
222 fio.new_blkaddr = current_sit_addr(sbi, in f2fs_ra_meta_pages()
234 page = f2fs_grab_cache_page(META_MAPPING(sbi), in f2fs_ra_meta_pages()
252 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_ra_meta_pages_cond() argument
257 page = find_get_page(META_MAPPING(sbi), index); in f2fs_ra_meta_pages_cond()
263 f2fs_ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true); in f2fs_ra_meta_pages_cond()
270 struct f2fs_sb_info *sbi = F2FS_P_SB(page); in __f2fs_write_meta_page() local
274 if (unlikely(f2fs_cp_error(sbi))) in __f2fs_write_meta_page()
276 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in __f2fs_write_meta_page()
278 if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0)) in __f2fs_write_meta_page()
281 f2fs_do_write_meta_page(sbi, page, io_type); in __f2fs_write_meta_page()
282 dec_page_count(sbi, F2FS_DIRTY_META); in __f2fs_write_meta_page()
285 f2fs_submit_merged_write_cond(sbi, page->mapping->host, in __f2fs_write_meta_page()
290 if (unlikely(f2fs_cp_error(sbi))) in __f2fs_write_meta_page()
291 f2fs_submit_merged_write(sbi, META); in __f2fs_write_meta_page()
309 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); in f2fs_write_meta_pages() local
312 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in f2fs_write_meta_pages()
317 get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META)) in f2fs_write_meta_pages()
321 if (!mutex_trylock(&sbi->cp_mutex)) in f2fs_write_meta_pages()
325 diff = nr_pages_to_write(sbi, META, wbc); in f2fs_write_meta_pages()
326 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO); in f2fs_write_meta_pages()
327 mutex_unlock(&sbi->cp_mutex); in f2fs_write_meta_pages()
332 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META); in f2fs_write_meta_pages()
337 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, in f2fs_sync_meta_pages() argument
340 struct address_space *mapping = META_MAPPING(sbi); in f2fs_sync_meta_pages()
400 f2fs_submit_merged_write(sbi, type); in f2fs_sync_meta_pages()
434 static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, in __add_ino_entry() argument
437 struct inode_management *im = &sbi->im[type]; in __add_ino_entry()
449 f2fs_bug_on(sbi, 1); in __add_ino_entry()
469 static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) in __remove_ino_entry() argument
471 struct inode_management *im = &sbi->im[type]; in __remove_ino_entry()
487 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) in f2fs_add_ino_entry() argument
490 __add_ino_entry(sbi, ino, 0, type); in f2fs_add_ino_entry()
493 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) in f2fs_remove_ino_entry() argument
496 __remove_ino_entry(sbi, ino, type); in f2fs_remove_ino_entry()
500 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode) in f2fs_exist_written_data() argument
502 struct inode_management *im = &sbi->im[mode]; in f2fs_exist_written_data()
511 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all) in f2fs_release_ino_entry() argument
517 struct inode_management *im = &sbi->im[i]; in f2fs_release_ino_entry()
530 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, in f2fs_set_dirty_device() argument
533 __add_ino_entry(sbi, ino, devidx, type); in f2fs_set_dirty_device()
536 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, in f2fs_is_dirty_device() argument
539 struct inode_management *im = &sbi->im[type]; in f2fs_is_dirty_device()
551 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi) in f2fs_acquire_orphan_inode() argument
553 struct inode_management *im = &sbi->im[ORPHAN_INO]; in f2fs_acquire_orphan_inode()
558 if (time_to_inject(sbi, FAULT_ORPHAN)) { in f2fs_acquire_orphan_inode()
564 if (unlikely(im->ino_num >= sbi->max_orphans)) in f2fs_acquire_orphan_inode()
573 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi) in f2fs_release_orphan_inode() argument
575 struct inode_management *im = &sbi->im[ORPHAN_INO]; in f2fs_release_orphan_inode()
578 f2fs_bug_on(sbi, im->ino_num == 0); in f2fs_release_orphan_inode()
590 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) in f2fs_remove_orphan_inode() argument
593 __remove_ino_entry(sbi, ino, ORPHAN_INO); in f2fs_remove_orphan_inode()
596 static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) in recover_orphan_inode() argument
602 inode = f2fs_iget_retry(sbi->sb, ino); in recover_orphan_inode()
608 f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT); in recover_orphan_inode()
623 err = f2fs_get_node_info(sbi, ino, &ni); in recover_orphan_inode()
635 set_sbi_flag(sbi, SBI_NEED_FSCK); in recover_orphan_inode()
636 f2fs_msg(sbi->sb, KERN_WARNING, in recover_orphan_inode()
642 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi) in f2fs_recover_orphan_inodes() argument
645 unsigned int s_flags = sbi->sb->s_flags; in f2fs_recover_orphan_inodes()
651 if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG)) in f2fs_recover_orphan_inodes()
655 f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs"); in f2fs_recover_orphan_inodes()
656 sbi->sb->s_flags &= ~SB_RDONLY; in f2fs_recover_orphan_inodes()
661 sbi->sb->s_flags |= SB_ACTIVE; in f2fs_recover_orphan_inodes()
667 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY); in f2fs_recover_orphan_inodes()
670 start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi); in f2fs_recover_orphan_inodes()
671 orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi); in f2fs_recover_orphan_inodes()
673 f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true); in f2fs_recover_orphan_inodes()
679 page = f2fs_get_meta_page(sbi, start_blk + i); in f2fs_recover_orphan_inodes()
688 err = recover_orphan_inode(sbi, ino); in f2fs_recover_orphan_inodes()
697 clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG); in f2fs_recover_orphan_inodes()
702 f2fs_quota_off_umount(sbi->sb); in f2fs_recover_orphan_inodes()
704 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ in f2fs_recover_orphan_inodes()
709 static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) in write_orphan_inodes() argument
718 struct inode_management *im = &sbi->im[ORPHAN_INO]; in write_orphan_inodes()
732 page = f2fs_grab_meta_page(sbi, start_blk++); in write_orphan_inodes()
766 static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr, in get_checkpoint_version() argument
770 unsigned long blk_size = sbi->blocksize; in get_checkpoint_version()
774 *cp_page = f2fs_get_meta_page(sbi, cp_addr); in get_checkpoint_version()
783 f2fs_msg(sbi->sb, KERN_WARNING, in get_checkpoint_version()
789 if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) { in get_checkpoint_version()
791 f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value"); in get_checkpoint_version()
799 static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, in validate_checkpoint() argument
807 err = get_checkpoint_version(sbi, cp_addr, &cp_block, in validate_checkpoint()
813 sbi->blocks_per_seg) { in validate_checkpoint()
814 f2fs_msg(sbi->sb, KERN_WARNING, in validate_checkpoint()
822 err = get_checkpoint_version(sbi, cp_addr, &cp_block, in validate_checkpoint()
839 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) in f2fs_get_valid_checkpoint() argument
842 struct f2fs_super_block *fsb = sbi->raw_super; in f2fs_get_valid_checkpoint()
844 unsigned long blk_size = sbi->blocksize; in f2fs_get_valid_checkpoint()
847 unsigned int cp_blks = 1 + __cp_payload(sbi); in f2fs_get_valid_checkpoint()
851 sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks), in f2fs_get_valid_checkpoint()
853 if (!sbi->ckpt) in f2fs_get_valid_checkpoint()
860 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version); in f2fs_get_valid_checkpoint()
865 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version); in f2fs_get_valid_checkpoint()
881 memcpy(sbi->ckpt, cp_block, blk_size); in f2fs_get_valid_checkpoint()
884 sbi->cur_cp_pack = 1; in f2fs_get_valid_checkpoint()
886 sbi->cur_cp_pack = 2; in f2fs_get_valid_checkpoint()
889 if (f2fs_sanity_check_ckpt(sbi)) in f2fs_get_valid_checkpoint()
901 unsigned char *ckpt = (unsigned char *)sbi->ckpt; in f2fs_get_valid_checkpoint()
903 cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i); in f2fs_get_valid_checkpoint()
919 kfree(sbi->ckpt); in f2fs_get_valid_checkpoint()
925 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in __add_dirty_inode() local
934 &sbi->inode_list[type]); in __add_dirty_inode()
935 stat_inc_dirty_inode(sbi, type); in __add_dirty_inode()
952 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_update_dirty_page() local
959 spin_lock(&sbi->inode_lock[type]); in f2fs_update_dirty_page()
960 if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH)) in f2fs_update_dirty_page()
963 spin_unlock(&sbi->inode_lock[type]); in f2fs_update_dirty_page()
971 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_remove_dirty_inode() local
978 if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH)) in f2fs_remove_dirty_inode()
981 spin_lock(&sbi->inode_lock[type]); in f2fs_remove_dirty_inode()
983 spin_unlock(&sbi->inode_lock[type]); in f2fs_remove_dirty_inode()
986 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type) in f2fs_sync_dirty_inodes() argument
994 trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
995 get_pages(sbi, is_dir ? in f2fs_sync_dirty_inodes()
998 if (unlikely(f2fs_cp_error(sbi))) in f2fs_sync_dirty_inodes()
1001 spin_lock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1003 head = &sbi->inode_list[type]; in f2fs_sync_dirty_inodes()
1005 spin_unlock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1006 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
1007 get_pages(sbi, is_dir ? in f2fs_sync_dirty_inodes()
1013 spin_unlock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1036 f2fs_submit_merged_write(sbi, DATA); in f2fs_sync_dirty_inodes()
1042 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi) in f2fs_sync_inode_meta() argument
1044 struct list_head *head = &sbi->inode_list[DIRTY_META]; in f2fs_sync_inode_meta()
1047 s64 total = get_pages(sbi, F2FS_DIRTY_IMETA); in f2fs_sync_inode_meta()
1050 if (unlikely(f2fs_cp_error(sbi))) in f2fs_sync_inode_meta()
1053 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1055 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1061 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1074 static void __prepare_cp_block(struct f2fs_sb_info *sbi) in __prepare_cp_block() argument
1076 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in __prepare_cp_block()
1077 struct f2fs_nm_info *nm_i = NM_I(sbi); in __prepare_cp_block()
1080 next_free_nid(sbi, &last_nid); in __prepare_cp_block()
1081 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); in __prepare_cp_block()
1082 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); in __prepare_cp_block()
1083 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); in __prepare_cp_block()
1090 static int block_operations(struct f2fs_sb_info *sbi) in block_operations() argument
1103 f2fs_lock_all(sbi); in block_operations()
1105 if (get_pages(sbi, F2FS_DIRTY_DENTS)) { in block_operations()
1106 f2fs_unlock_all(sbi); in block_operations()
1107 err = f2fs_sync_dirty_inodes(sbi, DIR_INODE); in block_operations()
1118 down_write(&sbi->node_change); in block_operations()
1120 if (get_pages(sbi, F2FS_DIRTY_IMETA)) { in block_operations()
1121 up_write(&sbi->node_change); in block_operations()
1122 f2fs_unlock_all(sbi); in block_operations()
1123 err = f2fs_sync_inode_meta(sbi); in block_operations()
1131 down_write(&sbi->node_write); in block_operations()
1133 if (get_pages(sbi, F2FS_DIRTY_NODES)) { in block_operations()
1134 up_write(&sbi->node_write); in block_operations()
1135 atomic_inc(&sbi->wb_sync_req[NODE]); in block_operations()
1136 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO); in block_operations()
1137 atomic_dec(&sbi->wb_sync_req[NODE]); in block_operations()
1139 up_write(&sbi->node_change); in block_operations()
1140 f2fs_unlock_all(sbi); in block_operations()
1151 __prepare_cp_block(sbi); in block_operations()
1152 up_write(&sbi->node_change); in block_operations()
1158 static void unblock_operations(struct f2fs_sb_info *sbi) in unblock_operations() argument
1160 up_write(&sbi->node_write); in unblock_operations()
1161 f2fs_unlock_all(sbi); in unblock_operations()
1164 void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi) in f2fs_wait_on_all_pages_writeback() argument
1169 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); in f2fs_wait_on_all_pages_writeback()
1171 if (!get_pages(sbi, F2FS_WB_CP_DATA)) in f2fs_wait_on_all_pages_writeback()
1174 if (unlikely(f2fs_cp_error(sbi))) in f2fs_wait_on_all_pages_writeback()
1179 finish_wait(&sbi->cp_wait, &wait); in f2fs_wait_on_all_pages_writeback()
1182 static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) in update_ckpt_flags() argument
1184 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num; in update_ckpt_flags()
1185 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in update_ckpt_flags()
1188 spin_lock_irqsave(&sbi->cp_lock, flags); in update_ckpt_flags()
1192 sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks) in update_ckpt_flags()
1193 disable_nat_bits(sbi, false); in update_ckpt_flags()
1215 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) in update_ckpt_flags()
1222 spin_unlock_irqrestore(&sbi->cp_lock, flags); in update_ckpt_flags()
1225 static void commit_checkpoint(struct f2fs_sb_info *sbi, in commit_checkpoint() argument
1237 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); in commit_checkpoint()
1244 f2fs_bug_on(sbi, PageWriteback(page)); in commit_checkpoint()
1246 f2fs_bug_on(sbi, 1); in commit_checkpoint()
1250 if (unlikely(err && f2fs_cp_error(sbi))) { in commit_checkpoint()
1255 f2fs_bug_on(sbi, err); in commit_checkpoint()
1259 f2fs_submit_merged_write(sbi, META_FLUSH); in commit_checkpoint()
1262 static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) in do_checkpoint() argument
1264 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in do_checkpoint()
1265 struct f2fs_nm_info *nm_i = NM_I(sbi); in do_checkpoint()
1266 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags; in do_checkpoint()
1271 int cp_payload_blks = __cp_payload(sbi); in do_checkpoint()
1272 struct super_block *sb = sbi->sb; in do_checkpoint()
1273 struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); in do_checkpoint()
1278 while (get_pages(sbi, F2FS_DIRTY_META)) { in do_checkpoint()
1279 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); in do_checkpoint()
1280 if (unlikely(f2fs_cp_error(sbi))) in do_checkpoint()
1288 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true)); in do_checkpoint()
1289 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); in do_checkpoint()
1292 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE)); in do_checkpoint()
1294 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE)); in do_checkpoint()
1296 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE); in do_checkpoint()
1300 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA)); in do_checkpoint()
1302 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA)); in do_checkpoint()
1304 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA); in do_checkpoint()
1308 data_sum_blocks = f2fs_npages_for_summary_flush(sbi, false); in do_checkpoint()
1309 spin_lock_irqsave(&sbi->cp_lock, flags); in do_checkpoint()
1314 spin_unlock_irqrestore(&sbi->cp_lock, flags); in do_checkpoint()
1330 update_ckpt_flags(sbi, cpc); in do_checkpoint()
1333 get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP)); in do_checkpoint()
1334 get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP)); in do_checkpoint()
1336 crc32 = f2fs_crc32(sbi, ckpt, le32_to_cpu(ckpt->checksum_offset)); in do_checkpoint()
1341 start_blk = __start_cp_next_addr(sbi); in do_checkpoint()
1344 if (enabled_nat_bits(sbi, cpc)) { in do_checkpoint()
1351 blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks; in do_checkpoint()
1353 f2fs_update_meta_page(sbi, nm_i->nat_bits + in do_checkpoint()
1357 while (get_pages(sbi, F2FS_DIRTY_META)) { in do_checkpoint()
1358 f2fs_sync_meta_pages(sbi, META, LONG_MAX, in do_checkpoint()
1360 if (unlikely(f2fs_cp_error(sbi))) in do_checkpoint()
1366 f2fs_update_meta_page(sbi, ckpt, start_blk++); in do_checkpoint()
1369 f2fs_update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE, in do_checkpoint()
1373 write_orphan_inodes(sbi, start_blk); in do_checkpoint()
1377 f2fs_write_data_summaries(sbi, start_blk); in do_checkpoint()
1381 kbytes_written = sbi->kbytes_written; in do_checkpoint()
1383 kbytes_written += BD_PART_WRITTEN(sbi); in do_checkpoint()
1388 f2fs_write_node_summaries(sbi, start_blk); in do_checkpoint()
1393 sbi->last_valid_block_count = sbi->total_valid_block_count; in do_checkpoint()
1394 percpu_counter_set(&sbi->alloc_valid_block_count, 0); in do_checkpoint()
1397 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); in do_checkpoint()
1400 f2fs_wait_on_all_pages_writeback(sbi); in do_checkpoint()
1403 err = f2fs_flush_device_cache(sbi); in do_checkpoint()
1408 commit_checkpoint(sbi, ckpt, start_blk); in do_checkpoint()
1409 f2fs_wait_on_all_pages_writeback(sbi); in do_checkpoint()
1415 if (f2fs_sb_has_encrypt(sbi->sb)) in do_checkpoint()
1416 invalidate_mapping_pages(META_MAPPING(sbi), in do_checkpoint()
1417 MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1); in do_checkpoint()
1419 f2fs_release_ino_entry(sbi, false); in do_checkpoint()
1421 f2fs_reset_fsync_node_info(sbi); in do_checkpoint()
1423 clear_sbi_flag(sbi, SBI_IS_DIRTY); in do_checkpoint()
1424 clear_sbi_flag(sbi, SBI_NEED_CP); in do_checkpoint()
1425 __set_cp_next_pack(sbi); in do_checkpoint()
1431 if (get_pages(sbi, F2FS_DIRTY_NODES) || in do_checkpoint()
1432 get_pages(sbi, F2FS_DIRTY_IMETA)) in do_checkpoint()
1433 set_sbi_flag(sbi, SBI_IS_DIRTY); in do_checkpoint()
1435 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS)); in do_checkpoint()
1437 return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0; in do_checkpoint()
1443 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) in f2fs_write_checkpoint() argument
1445 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in f2fs_write_checkpoint()
1449 mutex_lock(&sbi->cp_mutex); in f2fs_write_checkpoint()
1451 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && in f2fs_write_checkpoint()
1453 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks))) in f2fs_write_checkpoint()
1455 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_write_checkpoint()
1459 if (f2fs_readonly(sbi->sb)) { in f2fs_write_checkpoint()
1464 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops"); in f2fs_write_checkpoint()
1466 err = block_operations(sbi); in f2fs_write_checkpoint()
1470 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops"); in f2fs_write_checkpoint()
1472 f2fs_flush_merged_writes(sbi); in f2fs_write_checkpoint()
1476 if (!f2fs_exist_trim_candidates(sbi, cpc)) { in f2fs_write_checkpoint()
1477 unblock_operations(sbi); in f2fs_write_checkpoint()
1481 if (NM_I(sbi)->dirty_nat_cnt == 0 && in f2fs_write_checkpoint()
1482 SIT_I(sbi)->dirty_sentries == 0 && in f2fs_write_checkpoint()
1483 prefree_segments(sbi) == 0) { in f2fs_write_checkpoint()
1484 f2fs_flush_sit_entries(sbi, cpc); in f2fs_write_checkpoint()
1485 f2fs_clear_prefree_segments(sbi, cpc); in f2fs_write_checkpoint()
1486 unblock_operations(sbi); in f2fs_write_checkpoint()
1500 f2fs_flush_nat_entries(sbi, cpc); in f2fs_write_checkpoint()
1501 f2fs_flush_sit_entries(sbi, cpc); in f2fs_write_checkpoint()
1504 err = do_checkpoint(sbi, cpc); in f2fs_write_checkpoint()
1506 f2fs_release_discard_addrs(sbi); in f2fs_write_checkpoint()
1508 f2fs_clear_prefree_segments(sbi, cpc); in f2fs_write_checkpoint()
1510 unblock_operations(sbi); in f2fs_write_checkpoint()
1511 stat_inc_cp_count(sbi->stat_info); in f2fs_write_checkpoint()
1514 f2fs_msg(sbi->sb, KERN_NOTICE, in f2fs_write_checkpoint()
1518 f2fs_update_time(sbi, CP_TIME); in f2fs_write_checkpoint()
1519 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); in f2fs_write_checkpoint()
1521 mutex_unlock(&sbi->cp_mutex); in f2fs_write_checkpoint()
1525 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi) in f2fs_init_ino_entry_info() argument
1530 struct inode_management *im = &sbi->im[i]; in f2fs_init_ino_entry_info()
1538 sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS - in f2fs_init_ino_entry_info()
1539 NR_CURSEG_TYPE - __cp_payload(sbi)) * in f2fs_init_ino_entry_info()