Lines Matching refs:sbi
33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_check_nid_range() argument
35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { in f2fs_check_nid_range()
36 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_check_nid_range()
37 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.", in f2fs_check_nid_range()
39 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); in f2fs_check_nid_range()
45 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) in f2fs_available_free_memory() argument
47 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_available_free_memory()
48 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_available_free_memory()
73 if (excess_cached_nats(sbi)) in f2fs_available_free_memory()
76 if (sbi->sb->s_bdi->wb.dirty_exceeded) in f2fs_available_free_memory()
78 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); in f2fs_available_free_memory()
84 mem_size += sbi->im[i].ino_num * in f2fs_available_free_memory()
89 mem_size = (atomic_read(&sbi->total_ext_tree) * in f2fs_available_free_memory()
91 atomic_read(&sbi->total_ext_node) * in f2fs_available_free_memory()
106 res = (free_ram > avail_ram * sbi->compress_watermark / 100) && in f2fs_available_free_memory()
107 (COMPRESS_MAPPING(sbi)->nrpages < in f2fs_available_free_memory()
108 free_ram * sbi->compress_percent / 100); in f2fs_available_free_memory()
113 if (!sbi->sb->s_bdi->wb.dirty_exceeded) in f2fs_available_free_memory()
129 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) in get_current_nat_page() argument
131 return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid)); in get_current_nat_page()
134 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) in get_next_nat_page() argument
141 struct f2fs_nm_info *nm_i = NM_I(sbi); in get_next_nat_page()
143 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid)); in get_next_nat_page()
146 src_page = get_current_nat_page(sbi, nid); in get_next_nat_page()
149 dst_page = f2fs_grab_meta_page(sbi, dst_off); in get_next_nat_page()
150 f2fs_bug_on(sbi, PageDirty(src_page)); in get_next_nat_page()
163 static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi, in __alloc_nat_entry() argument
169 GFP_F2FS_ZERO, no_fail, sbi); in __alloc_nat_entry()
309 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page) in f2fs_in_warm_node_list() argument
311 return NODE_MAPPING(sbi) == page->mapping && in f2fs_in_warm_node_list()
315 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi) in f2fs_init_fsync_node_info() argument
317 spin_lock_init(&sbi->fsync_node_lock); in f2fs_init_fsync_node_info()
318 INIT_LIST_HEAD(&sbi->fsync_node_list); in f2fs_init_fsync_node_info()
319 sbi->fsync_seg_id = 0; in f2fs_init_fsync_node_info()
320 sbi->fsync_node_num = 0; in f2fs_init_fsync_node_info()
323 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi, in f2fs_add_fsync_node_entry() argument
337 spin_lock_irqsave(&sbi->fsync_node_lock, flags); in f2fs_add_fsync_node_entry()
338 list_add_tail(&fn->list, &sbi->fsync_node_list); in f2fs_add_fsync_node_entry()
339 fn->seq_id = sbi->fsync_seg_id++; in f2fs_add_fsync_node_entry()
341 sbi->fsync_node_num++; in f2fs_add_fsync_node_entry()
342 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_add_fsync_node_entry()
347 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page) in f2fs_del_fsync_node_entry() argument
352 spin_lock_irqsave(&sbi->fsync_node_lock, flags); in f2fs_del_fsync_node_entry()
353 list_for_each_entry(fn, &sbi->fsync_node_list, list) { in f2fs_del_fsync_node_entry()
356 sbi->fsync_node_num--; in f2fs_del_fsync_node_entry()
357 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_del_fsync_node_entry()
363 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_del_fsync_node_entry()
364 f2fs_bug_on(sbi, 1); in f2fs_del_fsync_node_entry()
367 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi) in f2fs_reset_fsync_node_info() argument
371 spin_lock_irqsave(&sbi->fsync_node_lock, flags); in f2fs_reset_fsync_node_info()
372 sbi->fsync_seg_id = 0; in f2fs_reset_fsync_node_info()
373 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_reset_fsync_node_info()
376 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_need_dentry_mark() argument
378 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_need_dentry_mark()
393 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_is_checkpointed_node() argument
395 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_is_checkpointed_node()
407 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) in f2fs_need_inode_block_update() argument
409 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_need_inode_block_update()
424 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, in cache_nat_entry() argument
427 struct f2fs_nm_info *nm_i = NM_I(sbi); in cache_nat_entry()
431 if (f2fs_rwsem_is_locked(&sbi->cp_global_sem)) in cache_nat_entry()
434 new = __alloc_nat_entry(sbi, nid, false); in cache_nat_entry()
443 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || in cache_nat_entry()
452 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, in set_node_addr() argument
455 struct f2fs_nm_info *nm_i = NM_I(sbi); in set_node_addr()
457 struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true); in set_node_addr()
464 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); in set_node_addr()
472 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); in set_node_addr()
479 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); in set_node_addr()
480 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && in set_node_addr()
482 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && in set_node_addr()
484 f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) && in set_node_addr()
511 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) in f2fs_try_to_free_nats() argument
513 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_try_to_free_nats()
542 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, in f2fs_get_node_info() argument
545 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_get_node_info()
546 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); in f2fs_get_node_info()
576 if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) { in f2fs_get_node_info()
596 index = current_nat_addr(sbi, nid); in f2fs_get_node_info()
599 page = f2fs_get_meta_page(sbi, index); in f2fs_get_node_info()
610 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) in f2fs_get_node_info()
614 cache_nat_entry(sbi, nid, &ne); in f2fs_get_node_info()
623 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); in f2fs_ra_node_pages() local
635 f2fs_ra_node_page(sbi, nid); in f2fs_ra_node_pages()
759 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in f2fs_get_dnode_of_data() local
776 npage[0] = f2fs_get_node_page(sbi, nids[0]); in f2fs_get_dnode_of_data()
800 if (!f2fs_alloc_nid(sbi, &(nids[i]))) { in f2fs_get_dnode_of_data()
808 f2fs_alloc_nid_failed(sbi, nids[i]); in f2fs_get_dnode_of_data()
814 f2fs_alloc_nid_done(sbi, nids[i]); in f2fs_get_dnode_of_data()
832 npage[i] = f2fs_get_node_page(sbi, nids[i]); in f2fs_get_dnode_of_data()
850 f2fs_sb_has_readonly(sbi)) { in f2fs_get_dnode_of_data()
887 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in truncate_node() local
892 err = f2fs_get_node_info(sbi, dn->nid, &ni, false); in truncate_node()
897 f2fs_invalidate_blocks(sbi, ni.blk_addr); in truncate_node()
898 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino); in truncate_node()
899 set_node_addr(sbi, &ni, NULL_ADDR, false); in truncate_node()
902 f2fs_remove_orphan_inode(sbi, dn->nid); in truncate_node()
903 dec_valid_inode_count(sbi); in truncate_node()
908 set_sbi_flag(sbi, SBI_IS_DIRTY); in truncate_node()
913 invalidate_mapping_pages(NODE_MAPPING(sbi), in truncate_node()
1092 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_truncate_inode_blocks() local
1108 page = f2fs_get_node_page(sbi, inode->i_ino); in f2fs_truncate_inode_blocks()
1171 BUG_ON(page->mapping != NODE_MAPPING(sbi)); in f2fs_truncate_inode_blocks()
1190 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_truncate_xattr_node() local
1199 npage = f2fs_get_node_page(sbi, nid); in f2fs_truncate_xattr_node()
1275 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in f2fs_new_node_page() local
1283 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false); in f2fs_new_node_page()
1287 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs)))) in f2fs_new_node_page()
1291 err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false); in f2fs_new_node_page()
1293 dec_valid_node_count(sbi, dn->inode, !ofs); in f2fs_new_node_page()
1298 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_new_node_page()
1299 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); in f2fs_new_node_page()
1308 set_node_addr(sbi, &new_ni, NEW_ADDR, false); in f2fs_new_node_page()
1322 inc_valid_inode_count(sbi); in f2fs_new_node_page()
1338 struct f2fs_sb_info *sbi = F2FS_P_SB(page); in read_node_page() local
1341 .sbi = sbi, in read_node_page()
1351 if (!f2fs_inode_chksum_verify(sbi, page)) { in read_node_page()
1358 err = f2fs_get_node_info(sbi, page->index, &ni, false); in read_node_page()
1364 is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) { in read_node_page()
1374 f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE); in read_node_page()
1382 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_ra_node_page() argument
1389 if (f2fs_check_nid_range(sbi, nid)) in f2fs_ra_node_page()
1392 apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid); in f2fs_ra_node_page()
1396 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); in f2fs_ra_node_page()
1404 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, in __get_node_page() argument
1412 if (f2fs_check_nid_range(sbi, nid)) in __get_node_page()
1415 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); in __get_node_page()
1432 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { in __get_node_page()
1442 if (!f2fs_inode_chksum_verify(sbi, page)) { in __get_node_page()
1450 …f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blka… in __get_node_page()
1454 set_sbi_flag(sbi, SBI_NEED_FSCK); in __get_node_page()
1461 f2fs_handle_page_eio(sbi, page->index, NODE); in __get_node_page()
1466 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) in f2fs_get_node_page() argument
1468 return __get_node_page(sbi, nid, NULL, 0); in f2fs_get_node_page()
1473 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); in f2fs_get_node_page_ra() local
1476 return __get_node_page(sbi, nid, parent, start); in f2fs_get_node_page_ra()
1479 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) in flush_inline_data() argument
1486 inode = ilookup(sbi->sb, ino); in flush_inline_data()
1515 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) in last_fsync_dnode() argument
1525 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, in last_fsync_dnode()
1532 if (unlikely(f2fs_cp_error(sbi))) { in last_fsync_dnode()
1545 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { in last_fsync_dnode()
1575 struct f2fs_sb_info *sbi = F2FS_P_SB(page); in __write_node_page() local
1579 .sbi = sbi, in __write_node_page()
1594 if (unlikely(f2fs_cp_error(sbi))) { in __write_node_page()
1596 dec_page_count(sbi, F2FS_DIRTY_NODES); in __write_node_page()
1601 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in __write_node_page()
1604 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && in __write_node_page()
1611 f2fs_bug_on(sbi, page->index != nid); in __write_node_page()
1613 if (f2fs_get_node_info(sbi, nid, &ni, !do_balance)) in __write_node_page()
1617 if (!f2fs_down_read_trylock(&sbi->node_write)) in __write_node_page()
1620 f2fs_down_read(&sbi->node_write); in __write_node_page()
1626 dec_page_count(sbi, F2FS_DIRTY_NODES); in __write_node_page()
1627 f2fs_up_read(&sbi->node_write); in __write_node_page()
1633 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, in __write_node_page()
1635 f2fs_up_read(&sbi->node_write); in __write_node_page()
1639 if (atomic && !test_opt(sbi, NOBARRIER) && !f2fs_sb_has_blkzoned(sbi)) in __write_node_page()
1643 if (f2fs_in_warm_node_list(sbi, page)) { in __write_node_page()
1644 seq = f2fs_add_fsync_node_entry(sbi, page); in __write_node_page()
1654 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); in __write_node_page()
1655 dec_page_count(sbi, F2FS_DIRTY_NODES); in __write_node_page()
1656 f2fs_up_read(&sbi->node_write); in __write_node_page()
1659 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE); in __write_node_page()
1665 if (unlikely(f2fs_cp_error(sbi))) { in __write_node_page()
1666 f2fs_submit_merged_write(sbi, NODE); in __write_node_page()
1673 f2fs_balance_fs(sbi, false); in __write_node_page()
1726 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, in f2fs_fsync_node_pages() argument
1740 last_page = last_fsync_dnode(sbi, ino); in f2fs_fsync_node_pages()
1748 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, in f2fs_fsync_node_pages()
1756 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_fsync_node_pages()
1770 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { in f2fs_fsync_node_pages()
1790 percpu_counter_inc(&sbi->rf_node_block_count); in f2fs_fsync_node_pages()
1796 f2fs_need_dentry_mark(sbi, ino)); in f2fs_fsync_node_pages()
1831 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx", in f2fs_fsync_node_pages()
1841 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE); in f2fs_fsync_node_pages()
1847 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_match_ino() local
1856 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_match_ino()
1858 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_match_ino()
1871 struct f2fs_sb_info *sbi = F2FS_P_SB(page); in flush_dirty_inode() local
1875 inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL); in flush_dirty_inode()
1886 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi) in f2fs_flush_inline_data() argument
1895 NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) { in f2fs_flush_inline_data()
1906 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { in f2fs_flush_inline_data()
1921 flush_inline_data(sbi, ino_of_node(page)); in f2fs_flush_inline_data()
1931 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, in f2fs_sync_node_pages() argument
1948 NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) { in f2fs_sync_node_pages()
1956 if (atomic_read(&sbi->wb_sync_req[NODE]) && in f2fs_sync_node_pages()
1982 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { in f2fs_sync_node_pages()
2001 flush_inline_data(sbi, ino_of_node(page)); in f2fs_sync_node_pages()
2037 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && in f2fs_sync_node_pages()
2045 f2fs_submit_merged_write(sbi, NODE); in f2fs_sync_node_pages()
2047 if (unlikely(f2fs_cp_error(sbi))) in f2fs_sync_node_pages()
2052 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, in f2fs_wait_on_node_pages_writeback() argument
2057 struct list_head *head = &sbi->fsync_node_list; in f2fs_wait_on_node_pages_writeback()
2063 spin_lock_irqsave(&sbi->fsync_node_lock, flags); in f2fs_wait_on_node_pages_writeback()
2065 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_wait_on_node_pages_writeback()
2070 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_wait_on_node_pages_writeback()
2076 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_wait_on_node_pages_writeback()
2088 ret2 = filemap_check_errors(NODE_MAPPING(sbi)); in f2fs_wait_on_node_pages_writeback()
2098 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); in f2fs_write_node_pages() local
2102 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in f2fs_write_node_pages()
2106 f2fs_balance_fs_bg(sbi, true); in f2fs_write_node_pages()
2110 get_pages(sbi, F2FS_DIRTY_NODES) < in f2fs_write_node_pages()
2111 nr_pages_to_skip(sbi, NODE)) in f2fs_write_node_pages()
2115 atomic_inc(&sbi->wb_sync_req[NODE]); in f2fs_write_node_pages()
2116 else if (atomic_read(&sbi->wb_sync_req[NODE])) { in f2fs_write_node_pages()
2125 diff = nr_pages_to_write(sbi, NODE, wbc); in f2fs_write_node_pages()
2127 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO); in f2fs_write_node_pages()
2132 atomic_dec(&sbi->wb_sync_req[NODE]); in f2fs_write_node_pages()
2136 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); in f2fs_write_node_pages()
2178 static int __insert_free_nid(struct f2fs_sb_info *sbi, in __insert_free_nid() argument
2181 struct f2fs_nm_info *nm_i = NM_I(sbi); in __insert_free_nid()
2192 static void __remove_free_nid(struct f2fs_sb_info *sbi, in __remove_free_nid() argument
2195 struct f2fs_nm_info *nm_i = NM_I(sbi); in __remove_free_nid()
2197 f2fs_bug_on(sbi, state != i->state); in __remove_free_nid()
2204 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i, in __move_free_nid() argument
2207 struct f2fs_nm_info *nm_i = NM_I(sbi); in __move_free_nid()
2209 f2fs_bug_on(sbi, org_state != i->state); in __move_free_nid()
2226 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi) in f2fs_nat_bitmap_enabled() argument
2228 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_nat_bitmap_enabled()
2244 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, in update_free_nid_bitmap() argument
2247 struct f2fs_nm_info *nm_i = NM_I(sbi); in update_free_nid_bitmap()
2269 static bool add_free_nid(struct f2fs_sb_info *sbi, in add_free_nid() argument
2272 struct f2fs_nm_info *nm_i = NM_I(sbi); in add_free_nid()
2282 if (unlikely(f2fs_check_nid_range(sbi, nid))) in add_free_nid()
2328 err = __insert_free_nid(sbi, i); in add_free_nid()
2331 update_free_nid_bitmap(sbi, nid, ret, build); in add_free_nid()
2343 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) in remove_free_nid() argument
2345 struct f2fs_nm_info *nm_i = NM_I(sbi); in remove_free_nid()
2352 __remove_free_nid(sbi, i, FREE_NID); in remove_free_nid()
2361 static int scan_nat_page(struct f2fs_sb_info *sbi, in scan_nat_page() argument
2364 struct f2fs_nm_info *nm_i = NM_I(sbi); in scan_nat_page()
2384 add_free_nid(sbi, start_nid, true, true); in scan_nat_page()
2386 spin_lock(&NM_I(sbi)->nid_list_lock); in scan_nat_page()
2387 update_free_nid_bitmap(sbi, start_nid, false, true); in scan_nat_page()
2388 spin_unlock(&NM_I(sbi)->nid_list_lock); in scan_nat_page()
2395 static void scan_curseg_cache(struct f2fs_sb_info *sbi) in scan_curseg_cache() argument
2397 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); in scan_curseg_cache()
2409 add_free_nid(sbi, nid, true, false); in scan_curseg_cache()
2411 remove_free_nid(sbi, nid); in scan_curseg_cache()
2416 static void scan_free_nid_bits(struct f2fs_sb_info *sbi) in scan_free_nid_bits() argument
2418 struct f2fs_nm_info *nm_i = NM_I(sbi); in scan_free_nid_bits()
2436 add_free_nid(sbi, nid, true, false); in scan_free_nid_bits()
2443 scan_curseg_cache(sbi); in scan_free_nid_bits()
2448 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, in __f2fs_build_free_nids() argument
2451 struct f2fs_nm_info *nm_i = NM_I(sbi); in __f2fs_build_free_nids()
2465 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS)) in __f2fs_build_free_nids()
2470 scan_free_nid_bits(sbi); in __f2fs_build_free_nids()
2477 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, in __f2fs_build_free_nids()
2485 struct page *page = get_current_nat_page(sbi, nid); in __f2fs_build_free_nids()
2490 ret = scan_nat_page(sbi, page, nid); in __f2fs_build_free_nids()
2496 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); in __f2fs_build_free_nids()
2513 scan_curseg_cache(sbi); in __f2fs_build_free_nids()
2517 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), in __f2fs_build_free_nids()
2523 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) in f2fs_build_free_nids() argument
2527 mutex_lock(&NM_I(sbi)->build_lock); in f2fs_build_free_nids()
2528 ret = __f2fs_build_free_nids(sbi, sync, mount); in f2fs_build_free_nids()
2529 mutex_unlock(&NM_I(sbi)->build_lock); in f2fs_build_free_nids()
2539 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) in f2fs_alloc_nid() argument
2541 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_alloc_nid()
2544 if (time_to_inject(sbi, FAULT_ALLOC_NID)) { in f2fs_alloc_nid()
2545 f2fs_show_injection_info(sbi, FAULT_ALLOC_NID); in f2fs_alloc_nid()
2558 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); in f2fs_alloc_nid()
2563 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID); in f2fs_alloc_nid()
2566 update_free_nid_bitmap(sbi, *nid, false, false); in f2fs_alloc_nid()
2574 if (!f2fs_build_free_nids(sbi, true, false)) in f2fs_alloc_nid()
2582 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_alloc_nid_done() argument
2584 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_alloc_nid_done()
2589 f2fs_bug_on(sbi, !i); in f2fs_alloc_nid_done()
2590 __remove_free_nid(sbi, i, PREALLOC_NID); in f2fs_alloc_nid_done()
2599 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_alloc_nid_failed() argument
2601 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_alloc_nid_failed()
2610 f2fs_bug_on(sbi, !i); in f2fs_alloc_nid_failed()
2612 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) { in f2fs_alloc_nid_failed()
2613 __remove_free_nid(sbi, i, PREALLOC_NID); in f2fs_alloc_nid_failed()
2616 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID); in f2fs_alloc_nid_failed()
2621 update_free_nid_bitmap(sbi, nid, true, false); in f2fs_alloc_nid_failed()
2629 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) in f2fs_try_to_free_nids() argument
2631 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_try_to_free_nids()
2649 __remove_free_nid(sbi, i, FREE_NID); in f2fs_try_to_free_nids()
2701 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_recover_xattr_data() local
2713 err = f2fs_get_node_info(sbi, prev_xnid, &ni, false); in f2fs_recover_xattr_data()
2717 f2fs_invalidate_blocks(sbi, ni.blk_addr); in f2fs_recover_xattr_data()
2718 dec_valid_node_count(sbi, inode, false); in f2fs_recover_xattr_data()
2719 set_node_addr(sbi, &ni, NULL_ADDR, false); in f2fs_recover_xattr_data()
2723 if (!f2fs_alloc_nid(sbi, &new_xnid)) in f2fs_recover_xattr_data()
2729 f2fs_alloc_nid_failed(sbi, new_xnid); in f2fs_recover_xattr_data()
2733 f2fs_alloc_nid_done(sbi, new_xnid); in f2fs_recover_xattr_data()
2745 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) in f2fs_recover_inode_page() argument
2753 err = f2fs_get_node_info(sbi, ino, &old_ni, false); in f2fs_recover_inode_page()
2760 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); in f2fs_recover_inode_page()
2767 remove_free_nid(sbi, ino); in f2fs_recover_inode_page()
2786 if (f2fs_sb_has_flexible_inline_xattr(sbi) && in f2fs_recover_inode_page()
2791 if (f2fs_sb_has_project_quota(sbi) && in f2fs_recover_inode_page()
2796 if (f2fs_sb_has_inode_crtime(sbi) && in f2fs_recover_inode_page()
2807 if (unlikely(inc_valid_node_count(sbi, NULL, true))) in f2fs_recover_inode_page()
2809 set_node_addr(sbi, &new_ni, NEW_ADDR, false); in f2fs_recover_inode_page()
2810 inc_valid_inode_count(sbi); in f2fs_recover_inode_page()
2816 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, in f2fs_restore_node_summary() argument
2825 last_offset = sbi->blocks_per_seg; in f2fs_restore_node_summary()
2826 addr = START_BLOCK(sbi, segno); in f2fs_restore_node_summary()
2833 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true); in f2fs_restore_node_summary()
2836 struct page *page = f2fs_get_tmp_page(sbi, idx); in f2fs_restore_node_summary()
2849 invalidate_mapping_pages(META_MAPPING(sbi), addr, in f2fs_restore_node_summary()
2855 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) in remove_nats_in_journal() argument
2857 struct f2fs_nm_info *nm_i = NM_I(sbi); in remove_nats_in_journal()
2858 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); in remove_nats_in_journal()
2868 if (f2fs_check_nid_range(sbi, nid)) in remove_nats_in_journal()
2875 ne = __alloc_nat_entry(sbi, nid, true); in remove_nats_in_journal()
2931 static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, in update_nat_bits() argument
2934 struct f2fs_nm_info *nm_i = NM_I(sbi); in update_nat_bits()
2940 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) in update_nat_bits()
2955 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi) in f2fs_enable_nat_bits() argument
2957 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_enable_nat_bits()
2983 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, in __flush_nat_entry_set() argument
2986 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); in __flush_nat_entry_set()
3006 page = get_next_nat_page(sbi, start_nid); in __flush_nat_entry_set()
3011 f2fs_bug_on(sbi, !nat_blk); in __flush_nat_entry_set()
3020 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR); in __flush_nat_entry_set()
3025 f2fs_bug_on(sbi, offset < 0); in __flush_nat_entry_set()
3033 __clear_nat_cache_dirty(NM_I(sbi), set, ne); in __flush_nat_entry_set()
3035 add_free_nid(sbi, nid, false, true); in __flush_nat_entry_set()
3037 spin_lock(&NM_I(sbi)->nid_list_lock); in __flush_nat_entry_set()
3038 update_free_nid_bitmap(sbi, nid, false, false); in __flush_nat_entry_set()
3039 spin_unlock(&NM_I(sbi)->nid_list_lock); in __flush_nat_entry_set()
3046 update_nat_bits(sbi, start_nid, page); in __flush_nat_entry_set()
3052 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); in __flush_nat_entry_set()
3061 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) in f2fs_flush_nat_entries() argument
3063 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_flush_nat_entries()
3064 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); in f2fs_flush_nat_entries()
3079 remove_nats_in_journal(sbi); in f2fs_flush_nat_entries()
3096 remove_nats_in_journal(sbi); in f2fs_flush_nat_entries()
3110 err = __flush_nat_entry_set(sbi, set, cpc); in f2fs_flush_nat_entries()
3121 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) in __get_nat_bitmaps() argument
3123 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in __get_nat_bitmaps()
3124 struct f2fs_nm_info *nm_i = NM_I(sbi); in __get_nat_bitmaps()
3131 nm_i->nat_bits = f2fs_kvzalloc(sbi, in __get_nat_bitmaps()
3139 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) in __get_nat_bitmaps()
3142 nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg - in __get_nat_bitmaps()
3147 page = f2fs_get_meta_page(sbi, nat_bits_addr++); in __get_nat_bitmaps()
3158 clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG); in __get_nat_bitmaps()
3159 f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)", in __get_nat_bitmaps()
3164 f2fs_notice(sbi, "Found nat_bits in checkpoint"); in __get_nat_bitmaps()
3168 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) in load_free_nid_bitmap() argument
3170 struct f2fs_nm_info *nm_i = NM_I(sbi); in load_free_nid_bitmap()
3174 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) in load_free_nid_bitmap()
3187 spin_lock(&NM_I(sbi)->nid_list_lock); in load_free_nid_bitmap()
3189 update_free_nid_bitmap(sbi, nid, true, true); in load_free_nid_bitmap()
3190 spin_unlock(&NM_I(sbi)->nid_list_lock); in load_free_nid_bitmap()
3202 static int init_node_manager(struct f2fs_sb_info *sbi) in init_node_manager() argument
3204 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); in init_node_manager()
3205 struct f2fs_nm_info *nm_i = NM_I(sbi); in init_node_manager()
3218 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - in init_node_manager()
3238 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); in init_node_manager()
3239 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); in init_node_manager()
3240 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); in init_node_manager()
3246 err = __get_nat_bitmaps(sbi); in init_node_manager()
3260 static int init_free_nid_cache(struct f2fs_sb_info *sbi) in init_free_nid_cache() argument
3262 struct f2fs_nm_info *nm_i = NM_I(sbi); in init_free_nid_cache()
3266 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *), in init_free_nid_cache()
3273 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi, in init_free_nid_cache()
3279 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8, in init_free_nid_cache()
3285 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short), in init_free_nid_cache()
3293 int f2fs_build_node_manager(struct f2fs_sb_info *sbi) in f2fs_build_node_manager() argument
3297 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info), in f2fs_build_node_manager()
3299 if (!sbi->nm_info) in f2fs_build_node_manager()
3302 err = init_node_manager(sbi); in f2fs_build_node_manager()
3306 err = init_free_nid_cache(sbi); in f2fs_build_node_manager()
3311 load_free_nid_bitmap(sbi); in f2fs_build_node_manager()
3313 return f2fs_build_free_nids(sbi, true, true); in f2fs_build_node_manager()
3316 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) in f2fs_destroy_node_manager() argument
3318 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_destroy_node_manager()
3331 __remove_free_nid(sbi, i, FREE_NID); in f2fs_destroy_node_manager()
3336 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]); in f2fs_destroy_node_manager()
3337 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]); in f2fs_destroy_node_manager()
3338 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list)); in f2fs_destroy_node_manager()
3356 f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]); in f2fs_destroy_node_manager()
3367 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); in f2fs_destroy_node_manager()
3389 sbi->nm_info = NULL; in f2fs_destroy_node_manager()