Lines Matching +full:im +full:-

1 // SPDX-License-Identifier: GPL-2.0
92 if (unlikely(page->mapping != mapping)) { in __get_meta_page()
99 return ERR_PTR(-EIO); in __get_meta_page()
118 if (PTR_ERR(page) == -EIO && in f2fs_get_meta_page_retry()
146 exist = f2fs_test_bit(offset, se->cur_valid_map); in __is_bitmap_valid()
168 blkaddr < SM_I(sbi)->ssa_blkaddr)) in f2fs_is_valid_blkaddr()
172 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr || in f2fs_is_valid_blkaddr()
231 for (; nrpages-- > 0; blkno++) { in f2fs_ra_meta_pages()
239 NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid))) in f2fs_ra_meta_pages()
279 return blkno - start; in f2fs_ra_meta_pages()
308 if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0)) in __f2fs_write_meta_page()
314 if (wbc->for_reclaim) in __f2fs_write_meta_page()
345 if (wbc->sync_mode != WB_SYNC_ALL && in f2fs_write_meta_pages()
351 if (!mutex_trylock(&sbi->cp_mutex)) in f2fs_write_meta_pages()
354 trace_f2fs_writepages(mapping->host, wbc, META); in f2fs_write_meta_pages()
356 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO); in f2fs_write_meta_pages()
357 mutex_unlock(&sbi->cp_mutex); in f2fs_write_meta_pages()
358 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff); in f2fs_write_meta_pages()
362 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META); in f2fs_write_meta_pages()
363 trace_f2fs_writepages(mapping->host, wbc, META); in f2fs_write_meta_pages()
392 prev = page->index - 1; in f2fs_sync_meta_pages()
393 if (nr_to_write != LONG_MAX && page->index != prev + 1) { in f2fs_sync_meta_pages()
400 if (unlikely(page->mapping != mapping)) { in f2fs_sync_meta_pages()
420 prev = page->index; in f2fs_sync_meta_pages()
466 struct inode_management *im = &sbi->im[type]; in __add_ino_entry() local
473 spin_lock(&im->ino_lock); in __add_ino_entry()
474 e = radix_tree_lookup(&im->ino_root, ino); in __add_ino_entry()
477 if (unlikely(radix_tree_insert(&im->ino_root, ino, e))) in __add_ino_entry()
481 e->ino = ino; in __add_ino_entry()
483 list_add_tail(&e->list, &im->ino_list); in __add_ino_entry()
485 im->ino_num++; in __add_ino_entry()
489 f2fs_set_bit(devidx, (char *)&e->dirty_device); in __add_ino_entry()
491 spin_unlock(&im->ino_lock); in __add_ino_entry()
500 struct inode_management *im = &sbi->im[type]; in __remove_ino_entry() local
503 spin_lock(&im->ino_lock); in __remove_ino_entry()
504 e = radix_tree_lookup(&im->ino_root, ino); in __remove_ino_entry()
506 list_del(&e->list); in __remove_ino_entry()
507 radix_tree_delete(&im->ino_root, ino); in __remove_ino_entry()
508 im->ino_num--; in __remove_ino_entry()
509 spin_unlock(&im->ino_lock); in __remove_ino_entry()
513 spin_unlock(&im->ino_lock); in __remove_ino_entry()
531 struct inode_management *im = &sbi->im[mode]; in f2fs_exist_written_data() local
534 spin_lock(&im->ino_lock); in f2fs_exist_written_data()
535 e = radix_tree_lookup(&im->ino_root, ino); in f2fs_exist_written_data()
536 spin_unlock(&im->ino_lock); in f2fs_exist_written_data()
546 struct inode_management *im = &sbi->im[i]; in f2fs_release_ino_entry() local
548 spin_lock(&im->ino_lock); in f2fs_release_ino_entry()
549 list_for_each_entry_safe(e, tmp, &im->ino_list, list) { in f2fs_release_ino_entry()
550 list_del(&e->list); in f2fs_release_ino_entry()
551 radix_tree_delete(&im->ino_root, e->ino); in f2fs_release_ino_entry()
553 im->ino_num--; in f2fs_release_ino_entry()
555 spin_unlock(&im->ino_lock); in f2fs_release_ino_entry()
568 struct inode_management *im = &sbi->im[type]; in f2fs_is_dirty_device() local
572 spin_lock(&im->ino_lock); in f2fs_is_dirty_device()
573 e = radix_tree_lookup(&im->ino_root, ino); in f2fs_is_dirty_device()
574 if (e && f2fs_test_bit(devidx, (char *)&e->dirty_device)) in f2fs_is_dirty_device()
576 spin_unlock(&im->ino_lock); in f2fs_is_dirty_device()
582 struct inode_management *im = &sbi->im[ORPHAN_INO]; in f2fs_acquire_orphan_inode() local
585 spin_lock(&im->ino_lock); in f2fs_acquire_orphan_inode()
588 spin_unlock(&im->ino_lock); in f2fs_acquire_orphan_inode()
590 return -ENOSPC; in f2fs_acquire_orphan_inode()
593 if (unlikely(im->ino_num >= sbi->max_orphans)) in f2fs_acquire_orphan_inode()
594 err = -ENOSPC; in f2fs_acquire_orphan_inode()
596 im->ino_num++; in f2fs_acquire_orphan_inode()
597 spin_unlock(&im->ino_lock); in f2fs_acquire_orphan_inode()
604 struct inode_management *im = &sbi->im[ORPHAN_INO]; in f2fs_release_orphan_inode() local
606 spin_lock(&im->ino_lock); in f2fs_release_orphan_inode()
607 f2fs_bug_on(sbi, im->ino_num == 0); in f2fs_release_orphan_inode()
608 im->ino_num--; in f2fs_release_orphan_inode()
609 spin_unlock(&im->ino_lock); in f2fs_release_orphan_inode()
615 __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, 0, ORPHAN_INO); in f2fs_add_orphan_inode()
631 inode = f2fs_iget_retry(sbi->sb, ino); in recover_orphan_inode()
637 f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT); in recover_orphan_inode()
658 err = -EIO; in recover_orphan_inode()
673 unsigned int s_flags = sbi->sb->s_flags; in f2fs_recover_orphan_inodes()
682 if (bdev_read_only(sbi->sb->s_bdev)) { in f2fs_recover_orphan_inodes()
689 sbi->sb->s_flags &= ~SB_RDONLY; in f2fs_recover_orphan_inodes()
694 sbi->sb->s_flags |= SB_ACTIVE; in f2fs_recover_orphan_inodes()
697 * Turn on quotas which were not enabled for read-only mounts if in f2fs_recover_orphan_inodes()
704 orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi); in f2fs_recover_orphan_inodes()
719 for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) { in f2fs_recover_orphan_inodes()
720 nid_t ino = le32_to_cpu(orphan_blk->ino[j]); in f2fs_recover_orphan_inodes()
737 f2fs_quota_off_umount(sbi->sb); in f2fs_recover_orphan_inodes()
739 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ in f2fs_recover_orphan_inodes()
753 struct inode_management *im = &sbi->im[ORPHAN_INO]; in write_orphan_inodes() local
755 orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num); in write_orphan_inodes()
758 * we don't need to do spin_lock(&im->ino_lock) here, since all the in write_orphan_inodes()
762 head = &im->ino_list; in write_orphan_inodes()
773 orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino); in write_orphan_inodes()
781 orphan_blk->blk_addr = cpu_to_le16(index); in write_orphan_inodes()
782 orphan_blk->blk_count = cpu_to_le16(orphan_blocks); in write_orphan_inodes()
783 orphan_blk->entry_count = cpu_to_le32(nentries); in write_orphan_inodes()
793 orphan_blk->blk_addr = cpu_to_le16(index); in write_orphan_inodes()
794 orphan_blk->blk_count = cpu_to_le16(orphan_blocks); in write_orphan_inodes()
795 orphan_blk->entry_count = cpu_to_le32(nentries); in write_orphan_inodes()
804 unsigned int chksum_ofs = le32_to_cpu(ckpt->checksum_offset); in f2fs_checkpoint_chksum()
811 F2FS_BLKSIZE - chksum_ofs); in f2fs_checkpoint_chksum()
829 crc_offset = le32_to_cpu((*cp_block)->checksum_offset); in get_checkpoint_version()
834 return -EINVAL; in get_checkpoint_version()
841 return -EINVAL; in get_checkpoint_version()
861 if (le32_to_cpu(cp_block->cp_pack_total_block_count) > in validate_checkpoint()
862 sbi->blocks_per_seg) { in validate_checkpoint()
864 le32_to_cpu(cp_block->cp_pack_total_block_count)); in validate_checkpoint()
869 cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1; in validate_checkpoint()
890 struct f2fs_super_block *fsb = sbi->raw_super; in f2fs_get_valid_checkpoint()
892 unsigned long blk_size = sbi->blocksize; in f2fs_get_valid_checkpoint()
900 sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks), in f2fs_get_valid_checkpoint()
902 if (!sbi->ckpt) in f2fs_get_valid_checkpoint()
903 return -ENOMEM; in f2fs_get_valid_checkpoint()
908 cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr); in f2fs_get_valid_checkpoint()
913 le32_to_cpu(fsb->log_blocks_per_seg); in f2fs_get_valid_checkpoint()
926 err = -EFSCORRUPTED; in f2fs_get_valid_checkpoint()
931 memcpy(sbi->ckpt, cp_block, blk_size); in f2fs_get_valid_checkpoint()
934 sbi->cur_cp_pack = 1; in f2fs_get_valid_checkpoint()
936 sbi->cur_cp_pack = 2; in f2fs_get_valid_checkpoint()
940 err = -EFSCORRUPTED; in f2fs_get_valid_checkpoint()
947 cp_blk_no = le32_to_cpu(fsb->cp_blkaddr); in f2fs_get_valid_checkpoint()
949 cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg); in f2fs_get_valid_checkpoint()
953 unsigned char *ckpt = (unsigned char *)sbi->ckpt; in f2fs_get_valid_checkpoint()
973 kvfree(sbi->ckpt); in f2fs_get_valid_checkpoint()
987 list_add_tail(&F2FS_I(inode)->dirty_list, in __add_dirty_inode()
988 &sbi->inode_list[type]); in __add_dirty_inode()
999 list_del_init(&F2FS_I(inode)->dirty_list); in __remove_dirty_inode()
1007 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE; in f2fs_update_dirty_page()
1009 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && in f2fs_update_dirty_page()
1010 !S_ISLNK(inode->i_mode)) in f2fs_update_dirty_page()
1013 spin_lock(&sbi->inode_lock[type]); in f2fs_update_dirty_page()
1017 spin_unlock(&sbi->inode_lock[type]); in f2fs_update_dirty_page()
1026 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE; in f2fs_remove_dirty_inode()
1028 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && in f2fs_remove_dirty_inode()
1029 !S_ISLNK(inode->i_mode)) in f2fs_remove_dirty_inode()
1035 spin_lock(&sbi->inode_lock[type]); in f2fs_remove_dirty_inode()
1037 spin_unlock(&sbi->inode_lock[type]); in f2fs_remove_dirty_inode()
1048 trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
1053 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
1056 return -EIO; in f2fs_sync_dirty_inodes()
1059 spin_lock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1061 head = &sbi->inode_list[type]; in f2fs_sync_dirty_inodes()
1063 spin_unlock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1064 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
1070 inode = igrab(&fi->vfs_inode); in f2fs_sync_dirty_inodes()
1071 spin_unlock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1073 unsigned long cur_ino = inode->i_ino; in f2fs_sync_dirty_inodes()
1075 F2FS_I(inode)->cp_task = current; in f2fs_sync_dirty_inodes()
1077 filemap_fdatawrite(inode->i_mapping); in f2fs_sync_dirty_inodes()
1079 F2FS_I(inode)->cp_task = NULL; in f2fs_sync_dirty_inodes()
1100 struct list_head *head = &sbi->inode_list[DIRTY_META]; in f2fs_sync_inode_meta()
1105 while (total--) { in f2fs_sync_inode_meta()
1107 return -EIO; in f2fs_sync_inode_meta()
1109 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1111 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1116 inode = igrab(&fi->vfs_inode); in f2fs_sync_inode_meta()
1117 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1134 nid_t last_nid = nm_i->next_scan_nid; in __prepare_cp_block()
1137 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); in __prepare_cp_block()
1138 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); in __prepare_cp_block()
1139 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); in __prepare_cp_block()
1140 ckpt->next_free_nid = cpu_to_le32(last_nid); in __prepare_cp_block()
1150 down_write(&sbi->quota_sem); in __need_flush_quota()
1161 up_write(&sbi->quota_sem); in __need_flush_quota()
1166 * Freeze all the FS-operations for checkpoint.
1195 locked = down_read_trylock(&sbi->sb->s_umount); in block_operations()
1196 f2fs_quota_sync(sbi->sb, -1); in block_operations()
1198 up_read(&sbi->sb->s_umount); in block_operations()
1216 * until finishing nat/sit flush. inode->i_blocks can be updated. in block_operations()
1218 down_write(&sbi->node_change); in block_operations()
1221 up_write(&sbi->node_change); in block_operations()
1231 down_write(&sbi->node_write); in block_operations()
1234 up_write(&sbi->node_write); in block_operations()
1235 atomic_inc(&sbi->wb_sync_req[NODE]); in block_operations()
1237 atomic_dec(&sbi->wb_sync_req[NODE]); in block_operations()
1239 up_write(&sbi->node_change); in block_operations()
1248 * sbi->node_change is used only for AIO write_begin path which produces in block_operations()
1252 up_write(&sbi->node_change); in block_operations()
1258 up_write(&sbi->node_write); in unblock_operations()
1279 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); in f2fs_wait_on_all_pages()
1282 finish_wait(&sbi->cp_wait, &wait); in f2fs_wait_on_all_pages()
1287 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num; in update_ckpt_flags()
1291 spin_lock_irqsave(&sbi->cp_lock, flags); in update_ckpt_flags()
1293 if ((cpc->reason & CP_UMOUNT) && in update_ckpt_flags()
1294 le32_to_cpu(ckpt->cp_pack_total_block_count) > in update_ckpt_flags()
1295 sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks) in update_ckpt_flags()
1298 if (cpc->reason & CP_TRIMMED) in update_ckpt_flags()
1303 if (cpc->reason & CP_UMOUNT) in update_ckpt_flags()
1308 if (cpc->reason & CP_FASTBOOT) in update_ckpt_flags()
1348 spin_unlock_irqrestore(&sbi->cp_lock, flags); in update_ckpt_flags()
1392 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags; in do_checkpoint()
1398 struct super_block *sb = sbi->sb; in do_checkpoint()
1407 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true)); in do_checkpoint()
1408 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); in do_checkpoint()
1410 ckpt->cur_node_segno[i] = in do_checkpoint()
1412 ckpt->cur_node_blkoff[i] = in do_checkpoint()
1414 ckpt->alloc_type[i + CURSEG_HOT_NODE] = in do_checkpoint()
1418 ckpt->cur_data_segno[i] = in do_checkpoint()
1420 ckpt->cur_data_blkoff[i] = in do_checkpoint()
1422 ckpt->alloc_type[i + CURSEG_HOT_DATA] = in do_checkpoint()
1428 spin_lock_irqsave(&sbi->cp_lock, flags); in do_checkpoint()
1433 spin_unlock_irqrestore(&sbi->cp_lock, flags); in do_checkpoint()
1436 ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks + in do_checkpoint()
1439 if (__remain_node_summaries(cpc->reason)) in do_checkpoint()
1440 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+ in do_checkpoint()
1444 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS + in do_checkpoint()
1457 le32_to_cpu(ckpt->checksum_offset))) in do_checkpoint()
1468 *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver); in do_checkpoint()
1470 blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks; in do_checkpoint()
1471 for (i = 0; i < nm_i->nat_bits_blocks; i++) in do_checkpoint()
1472 f2fs_update_meta_page(sbi, nm_i->nat_bits + in do_checkpoint()
1492 kbytes_written = sbi->kbytes_written; in do_checkpoint()
1493 if (sb->s_bdev->bd_part) in do_checkpoint()
1496 seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written); in do_checkpoint()
1498 if (__remain_node_summaries(cpc->reason)) { in do_checkpoint()
1504 sbi->last_valid_block_count = sbi->total_valid_block_count; in do_checkpoint()
1505 percpu_counter_set(&sbi->alloc_valid_block_count, 0); in do_checkpoint()
1531 MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1); in do_checkpoint()
1541 spin_lock(&sbi->stat_lock); in do_checkpoint()
1542 sbi->unusable_block_count = 0; in do_checkpoint()
1543 spin_unlock(&sbi->stat_lock); in do_checkpoint()
1557 return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0; in do_checkpoint()
1566 if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi)) in f2fs_write_checkpoint()
1567 return -EROFS; in f2fs_write_checkpoint()
1570 if (cpc->reason != CP_PAUSE) in f2fs_write_checkpoint()
1574 if (cpc->reason != CP_RESIZE) in f2fs_write_checkpoint()
1575 mutex_lock(&sbi->cp_mutex); in f2fs_write_checkpoint()
1578 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) || in f2fs_write_checkpoint()
1579 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks))) in f2fs_write_checkpoint()
1582 err = -EIO; in f2fs_write_checkpoint()
1586 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops"); in f2fs_write_checkpoint()
1592 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops"); in f2fs_write_checkpoint()
1597 if (cpc->reason & CP_DISCARD) { in f2fs_write_checkpoint()
1603 if (NM_I(sbi)->dirty_nat_cnt == 0 && in f2fs_write_checkpoint()
1604 SIT_I(sbi)->dirty_sentries == 0 && in f2fs_write_checkpoint()
1619 ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver); in f2fs_write_checkpoint()
1640 stat_inc_cp_count(sbi->stat_info); in f2fs_write_checkpoint()
1642 if (cpc->reason & CP_RECOVERY) in f2fs_write_checkpoint()
1647 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); in f2fs_write_checkpoint()
1649 if (cpc->reason != CP_RESIZE) in f2fs_write_checkpoint()
1650 mutex_unlock(&sbi->cp_mutex); in f2fs_write_checkpoint()
1659 struct inode_management *im = &sbi->im[i]; in f2fs_init_ino_entry_info() local
1661 INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC); in f2fs_init_ino_entry_info()
1662 spin_lock_init(&im->ino_lock); in f2fs_init_ino_entry_info()
1663 INIT_LIST_HEAD(&im->ino_list); in f2fs_init_ino_entry_info()
1664 im->ino_num = 0; in f2fs_init_ino_entry_info()
1667 sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS - in f2fs_init_ino_entry_info()
1668 NR_CURSEG_PERSIST_TYPE - __cp_payload(sbi)) * in f2fs_init_ino_entry_info()
1677 return -ENOMEM; in f2fs_create_checkpoint_caches()
1682 return -ENOMEM; in f2fs_create_checkpoint_caches()