Lines Matching +full:im +full:-

1 // SPDX-License-Identifier: GPL-2.0
99 if (unlikely(page->mapping != mapping)) { in __get_meta_page()
105 f2fs_handle_page_eio(sbi, page->index, META); in __get_meta_page()
107 return ERR_PTR(-EIO); in __get_meta_page()
126 if (PTR_ERR(page) == -EIO && in f2fs_get_meta_page_retry()
154 exist = f2fs_test_bit(offset, se->cur_valid_map); in __is_bitmap_valid()
183 blkaddr < SM_I(sbi)->ssa_blkaddr)) in f2fs_is_valid_blkaddr()
187 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr || in f2fs_is_valid_blkaddr()
247 for (; nrpages-- > 0; blkno++) { in f2fs_ra_meta_pages()
255 NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid))) in f2fs_ra_meta_pages()
296 return blkno - start; in f2fs_ra_meta_pages()
329 if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0)) in __f2fs_write_meta_page()
335 if (wbc->for_reclaim) in __f2fs_write_meta_page()
366 if (wbc->sync_mode != WB_SYNC_ALL && in f2fs_write_meta_pages()
372 if (!f2fs_down_write_trylock(&sbi->cp_global_sem)) in f2fs_write_meta_pages()
375 trace_f2fs_writepages(mapping->host, wbc, META); in f2fs_write_meta_pages()
377 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO); in f2fs_write_meta_pages()
378 f2fs_up_write(&sbi->cp_global_sem); in f2fs_write_meta_pages()
379 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff); in f2fs_write_meta_pages()
383 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META); in f2fs_write_meta_pages()
384 trace_f2fs_writepages(mapping->host, wbc, META); in f2fs_write_meta_pages()
413 prev = page->index - 1; in f2fs_sync_meta_pages()
414 if (nr_to_write != LONG_MAX && page->index != prev + 1) { in f2fs_sync_meta_pages()
421 if (unlikely(page->mapping != mapping)) { in f2fs_sync_meta_pages()
441 prev = page->index; in f2fs_sync_meta_pages()
460 trace_f2fs_set_page_dirty(&folio->page, META); in f2fs_dirty_meta_folio()
466 set_page_private_reference(&folio->page); in f2fs_dirty_meta_folio()
484 struct inode_management *im = &sbi->im[type]; in __add_ino_entry() local
489 e = radix_tree_lookup(&im->ino_root, ino); in __add_ino_entry()
500 spin_lock(&im->ino_lock); in __add_ino_entry()
501 e = radix_tree_lookup(&im->ino_root, ino); in __add_ino_entry()
504 spin_unlock(&im->ino_lock); in __add_ino_entry()
508 if (unlikely(radix_tree_insert(&im->ino_root, ino, e))) in __add_ino_entry()
512 e->ino = ino; in __add_ino_entry()
514 list_add_tail(&e->list, &im->ino_list); in __add_ino_entry()
516 im->ino_num++; in __add_ino_entry()
520 f2fs_set_bit(devidx, (char *)&e->dirty_device); in __add_ino_entry()
522 spin_unlock(&im->ino_lock); in __add_ino_entry()
531 struct inode_management *im = &sbi->im[type]; in __remove_ino_entry() local
534 spin_lock(&im->ino_lock); in __remove_ino_entry()
535 e = radix_tree_lookup(&im->ino_root, ino); in __remove_ino_entry()
537 list_del(&e->list); in __remove_ino_entry()
538 radix_tree_delete(&im->ino_root, ino); in __remove_ino_entry()
539 im->ino_num--; in __remove_ino_entry()
540 spin_unlock(&im->ino_lock); in __remove_ino_entry()
544 spin_unlock(&im->ino_lock); in __remove_ino_entry()
562 struct inode_management *im = &sbi->im[mode]; in f2fs_exist_written_data() local
565 spin_lock(&im->ino_lock); in f2fs_exist_written_data()
566 e = radix_tree_lookup(&im->ino_root, ino); in f2fs_exist_written_data()
567 spin_unlock(&im->ino_lock); in f2fs_exist_written_data()
577 struct inode_management *im = &sbi->im[i]; in f2fs_release_ino_entry() local
579 spin_lock(&im->ino_lock); in f2fs_release_ino_entry()
580 list_for_each_entry_safe(e, tmp, &im->ino_list, list) { in f2fs_release_ino_entry()
581 list_del(&e->list); in f2fs_release_ino_entry()
582 radix_tree_delete(&im->ino_root, e->ino); in f2fs_release_ino_entry()
584 im->ino_num--; in f2fs_release_ino_entry()
586 spin_unlock(&im->ino_lock); in f2fs_release_ino_entry()
599 struct inode_management *im = &sbi->im[type]; in f2fs_is_dirty_device() local
603 spin_lock(&im->ino_lock); in f2fs_is_dirty_device()
604 e = radix_tree_lookup(&im->ino_root, ino); in f2fs_is_dirty_device()
605 if (e && f2fs_test_bit(devidx, (char *)&e->dirty_device)) in f2fs_is_dirty_device()
607 spin_unlock(&im->ino_lock); in f2fs_is_dirty_device()
613 struct inode_management *im = &sbi->im[ORPHAN_INO]; in f2fs_acquire_orphan_inode() local
616 spin_lock(&im->ino_lock); in f2fs_acquire_orphan_inode()
619 spin_unlock(&im->ino_lock); in f2fs_acquire_orphan_inode()
621 return -ENOSPC; in f2fs_acquire_orphan_inode()
624 if (unlikely(im->ino_num >= sbi->max_orphans)) in f2fs_acquire_orphan_inode()
625 err = -ENOSPC; in f2fs_acquire_orphan_inode()
627 im->ino_num++; in f2fs_acquire_orphan_inode()
628 spin_unlock(&im->ino_lock); in f2fs_acquire_orphan_inode()
635 struct inode_management *im = &sbi->im[ORPHAN_INO]; in f2fs_release_orphan_inode() local
637 spin_lock(&im->ino_lock); in f2fs_release_orphan_inode()
638 f2fs_bug_on(sbi, im->ino_num == 0); in f2fs_release_orphan_inode()
639 im->ino_num--; in f2fs_release_orphan_inode()
640 spin_unlock(&im->ino_lock); in f2fs_release_orphan_inode()
646 __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, 0, ORPHAN_INO); in f2fs_add_orphan_inode()
662 inode = f2fs_iget_retry(sbi->sb, ino); in recover_orphan_inode()
668 f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT); in recover_orphan_inode()
689 err = -EIO; in recover_orphan_inode()
704 unsigned int s_flags = sbi->sb->s_flags; in f2fs_recover_orphan_inodes()
713 if (bdev_read_only(sbi->sb->s_bdev)) { in f2fs_recover_orphan_inodes()
720 sbi->sb->s_flags &= ~SB_RDONLY; in f2fs_recover_orphan_inodes()
725 * Turn on quotas which were not enabled for read-only mounts if in f2fs_recover_orphan_inodes()
732 orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi); in f2fs_recover_orphan_inodes()
747 for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) { in f2fs_recover_orphan_inodes()
748 nid_t ino = le32_to_cpu(orphan_blk->ino[j]); in f2fs_recover_orphan_inodes()
766 f2fs_quota_off_umount(sbi->sb); in f2fs_recover_orphan_inodes()
768 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ in f2fs_recover_orphan_inodes()
782 struct inode_management *im = &sbi->im[ORPHAN_INO]; in write_orphan_inodes() local
784 orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num); in write_orphan_inodes()
787 * we don't need to do spin_lock(&im->ino_lock) here, since all the in write_orphan_inodes()
791 head = &im->ino_list; in write_orphan_inodes()
802 orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino); in write_orphan_inodes()
810 orphan_blk->blk_addr = cpu_to_le16(index); in write_orphan_inodes()
811 orphan_blk->blk_count = cpu_to_le16(orphan_blocks); in write_orphan_inodes()
812 orphan_blk->entry_count = cpu_to_le32(nentries); in write_orphan_inodes()
822 orphan_blk->blk_addr = cpu_to_le16(index); in write_orphan_inodes()
823 orphan_blk->blk_count = cpu_to_le16(orphan_blocks); in write_orphan_inodes()
824 orphan_blk->entry_count = cpu_to_le32(nentries); in write_orphan_inodes()
833 unsigned int chksum_ofs = le32_to_cpu(ckpt->checksum_offset); in f2fs_checkpoint_chksum()
840 F2FS_BLKSIZE - chksum_ofs); in f2fs_checkpoint_chksum()
858 crc_offset = le32_to_cpu((*cp_block)->checksum_offset); in get_checkpoint_version()
863 return -EINVAL; in get_checkpoint_version()
870 return -EINVAL; in get_checkpoint_version()
891 cp_blocks = le32_to_cpu(cp_block->cp_pack_total_block_count); in validate_checkpoint()
893 if (cp_blocks > sbi->blocks_per_seg || cp_blocks <= F2FS_CP_PACKS) { in validate_checkpoint()
895 le32_to_cpu(cp_block->cp_pack_total_block_count)); in validate_checkpoint()
900 cp_addr += cp_blocks - 1; in validate_checkpoint()
921 struct f2fs_super_block *fsb = sbi->raw_super; in f2fs_get_valid_checkpoint()
923 unsigned long blk_size = sbi->blocksize; in f2fs_get_valid_checkpoint()
931 sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks), in f2fs_get_valid_checkpoint()
933 if (!sbi->ckpt) in f2fs_get_valid_checkpoint()
934 return -ENOMEM; in f2fs_get_valid_checkpoint()
939 cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr); in f2fs_get_valid_checkpoint()
944 le32_to_cpu(fsb->log_blocks_per_seg); in f2fs_get_valid_checkpoint()
957 err = -EFSCORRUPTED; in f2fs_get_valid_checkpoint()
962 memcpy(sbi->ckpt, cp_block, blk_size); in f2fs_get_valid_checkpoint()
965 sbi->cur_cp_pack = 1; in f2fs_get_valid_checkpoint()
967 sbi->cur_cp_pack = 2; in f2fs_get_valid_checkpoint()
971 err = -EFSCORRUPTED; in f2fs_get_valid_checkpoint()
978 cp_blk_no = le32_to_cpu(fsb->cp_blkaddr); in f2fs_get_valid_checkpoint()
980 cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg); in f2fs_get_valid_checkpoint()
984 unsigned char *ckpt = (unsigned char *)sbi->ckpt; in f2fs_get_valid_checkpoint()
1004 kvfree(sbi->ckpt); in f2fs_get_valid_checkpoint()
1017 list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]); in __add_dirty_inode()
1028 list_del_init(&F2FS_I(inode)->dirty_list); in __remove_dirty_inode()
1036 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE; in f2fs_update_dirty_folio()
1038 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && in f2fs_update_dirty_folio()
1039 !S_ISLNK(inode->i_mode)) in f2fs_update_dirty_folio()
1042 spin_lock(&sbi->inode_lock[type]); in f2fs_update_dirty_folio()
1046 spin_unlock(&sbi->inode_lock[type]); in f2fs_update_dirty_folio()
1048 set_page_private_reference(&folio->page); in f2fs_update_dirty_folio()
1054 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE; in f2fs_remove_dirty_inode()
1056 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && in f2fs_remove_dirty_inode()
1057 !S_ISLNK(inode->i_mode)) in f2fs_remove_dirty_inode()
1063 spin_lock(&sbi->inode_lock[type]); in f2fs_remove_dirty_inode()
1065 spin_unlock(&sbi->inode_lock[type]); in f2fs_remove_dirty_inode()
1077 trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
1082 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
1085 return -EIO; in f2fs_sync_dirty_inodes()
1088 spin_lock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1090 head = &sbi->inode_list[type]; in f2fs_sync_dirty_inodes()
1092 spin_unlock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1093 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir, in f2fs_sync_dirty_inodes()
1099 inode = igrab(&fi->vfs_inode); in f2fs_sync_dirty_inodes()
1100 spin_unlock(&sbi->inode_lock[type]); in f2fs_sync_dirty_inodes()
1102 unsigned long cur_ino = inode->i_ino; in f2fs_sync_dirty_inodes()
1105 F2FS_I(inode)->cp_task = current; in f2fs_sync_dirty_inodes()
1106 F2FS_I(inode)->wb_task = current; in f2fs_sync_dirty_inodes()
1108 filemap_fdatawrite(inode->i_mapping); in f2fs_sync_dirty_inodes()
1110 F2FS_I(inode)->wb_task = NULL; in f2fs_sync_dirty_inodes()
1112 F2FS_I(inode)->cp_task = NULL; in f2fs_sync_dirty_inodes()
1133 struct list_head *head = &sbi->inode_list[DIRTY_META]; in f2fs_sync_inode_meta()
1138 while (total--) { in f2fs_sync_inode_meta()
1140 return -EIO; in f2fs_sync_inode_meta()
1142 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1144 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1149 inode = igrab(&fi->vfs_inode); in f2fs_sync_inode_meta()
1150 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_sync_inode_meta()
1167 nid_t last_nid = nm_i->next_scan_nid; in __prepare_cp_block()
1170 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); in __prepare_cp_block()
1171 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); in __prepare_cp_block()
1172 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); in __prepare_cp_block()
1173 ckpt->next_free_nid = cpu_to_le32(last_nid); in __prepare_cp_block()
1183 if (!f2fs_down_write_trylock(&sbi->quota_sem)) in __need_flush_quota()
1195 f2fs_up_write(&sbi->quota_sem); in __need_flush_quota()
1200 * Freeze all the FS-operations for checkpoint.
1229 locked = down_read_trylock(&sbi->sb->s_umount); in block_operations()
1230 f2fs_quota_sync(sbi->sb, -1); in block_operations()
1232 up_read(&sbi->sb->s_umount); in block_operations()
1250 * until finishing nat/sit flush. inode->i_blocks can be updated. in block_operations()
1252 f2fs_down_write(&sbi->node_change); in block_operations()
1255 f2fs_up_write(&sbi->node_change); in block_operations()
1265 f2fs_down_write(&sbi->node_write); in block_operations()
1268 f2fs_up_write(&sbi->node_write); in block_operations()
1269 atomic_inc(&sbi->wb_sync_req[NODE]); in block_operations()
1271 atomic_dec(&sbi->wb_sync_req[NODE]); in block_operations()
1273 f2fs_up_write(&sbi->node_change); in block_operations()
1282 * sbi->node_change is used only for AIO write_begin path which produces in block_operations()
1286 f2fs_up_write(&sbi->node_change); in block_operations()
1292 f2fs_up_write(&sbi->node_write); in unblock_operations()
1313 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); in f2fs_wait_on_all_pages()
1316 finish_wait(&sbi->cp_wait, &wait); in f2fs_wait_on_all_pages()
1321 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num; in update_ckpt_flags()
1325 if (cpc->reason & CP_UMOUNT) { in update_ckpt_flags()
1326 if (le32_to_cpu(ckpt->cp_pack_total_block_count) + in update_ckpt_flags()
1327 NM_I(sbi)->nat_bits_blocks > sbi->blocks_per_seg) { in update_ckpt_flags()
1338 spin_lock_irqsave(&sbi->cp_lock, flags); in update_ckpt_flags()
1340 if (cpc->reason & CP_TRIMMED) in update_ckpt_flags()
1345 if (cpc->reason & CP_UMOUNT) in update_ckpt_flags()
1350 if (cpc->reason & CP_FASTBOOT) in update_ckpt_flags()
1390 spin_unlock_irqrestore(&sbi->cp_lock, flags); in update_ckpt_flags()
1441 for (i = 0; i < sbi->s_ndevs; i++) in f2fs_get_sectors_written()
1447 return get_sectors_written(sbi->sb->s_bdev); in f2fs_get_sectors_written()
1454 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags; in do_checkpoint()
1468 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true)); in do_checkpoint()
1469 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); in do_checkpoint()
1471 ckpt->cur_node_segno[i] = in do_checkpoint()
1473 ckpt->cur_node_blkoff[i] = in do_checkpoint()
1475 ckpt->alloc_type[i + CURSEG_HOT_NODE] = in do_checkpoint()
1479 ckpt->cur_data_segno[i] = in do_checkpoint()
1481 ckpt->cur_data_blkoff[i] = in do_checkpoint()
1483 ckpt->alloc_type[i + CURSEG_HOT_DATA] = in do_checkpoint()
1489 spin_lock_irqsave(&sbi->cp_lock, flags); in do_checkpoint()
1494 spin_unlock_irqrestore(&sbi->cp_lock, flags); in do_checkpoint()
1497 ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks + in do_checkpoint()
1500 if (__remain_node_summaries(cpc->reason)) in do_checkpoint()
1501 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS + in do_checkpoint()
1505 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS + in do_checkpoint()
1518 le32_to_cpu(ckpt->checksum_offset))) in do_checkpoint()
1524 if ((cpc->reason & CP_UMOUNT) && in do_checkpoint()
1530 *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver); in do_checkpoint()
1532 blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks; in do_checkpoint()
1533 for (i = 0; i < nm_i->nat_bits_blocks; i++) in do_checkpoint()
1534 f2fs_update_meta_page(sbi, nm_i->nat_bits + in do_checkpoint()
1554 kbytes_written = sbi->kbytes_written; in do_checkpoint()
1555 kbytes_written += (f2fs_get_sectors_written(sbi) - in do_checkpoint()
1556 sbi->sectors_written_start) >> 1; in do_checkpoint()
1557 seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written); in do_checkpoint()
1559 if (__remain_node_summaries(cpc->reason)) { in do_checkpoint()
1565 sbi->last_valid_block_count = sbi->total_valid_block_count; in do_checkpoint()
1566 percpu_counter_set(&sbi->alloc_valid_block_count, 0); in do_checkpoint()
1567 percpu_counter_set(&sbi->rf_node_block_count, 0); in do_checkpoint()
1593 MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1); in do_checkpoint()
1603 spin_lock(&sbi->stat_lock); in do_checkpoint()
1604 sbi->unusable_block_count = 0; in do_checkpoint()
1605 spin_unlock(&sbi->stat_lock); in do_checkpoint()
1619 return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0; in do_checkpoint()
1628 if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi)) in f2fs_write_checkpoint()
1629 return -EROFS; in f2fs_write_checkpoint()
1632 if (cpc->reason != CP_PAUSE) in f2fs_write_checkpoint()
1636 if (cpc->reason != CP_RESIZE) in f2fs_write_checkpoint()
1637 f2fs_down_write(&sbi->cp_global_sem); in f2fs_write_checkpoint()
1640 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) || in f2fs_write_checkpoint()
1641 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks))) in f2fs_write_checkpoint()
1644 err = -EIO; in f2fs_write_checkpoint()
1648 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops"); in f2fs_write_checkpoint()
1654 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops"); in f2fs_write_checkpoint()
1659 if (cpc->reason & CP_DISCARD) { in f2fs_write_checkpoint()
1665 if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 && in f2fs_write_checkpoint()
1666 SIT_I(sbi)->dirty_sentries == 0 && in f2fs_write_checkpoint()
1681 ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver); in f2fs_write_checkpoint()
1708 stat_inc_cp_count(sbi->stat_info); in f2fs_write_checkpoint()
1710 if (cpc->reason & CP_RECOVERY) in f2fs_write_checkpoint()
1715 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); in f2fs_write_checkpoint()
1717 if (cpc->reason != CP_RESIZE) in f2fs_write_checkpoint()
1718 f2fs_up_write(&sbi->cp_global_sem); in f2fs_write_checkpoint()
1727 struct inode_management *im = &sbi->im[i]; in f2fs_init_ino_entry_info() local
1729 INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC); in f2fs_init_ino_entry_info()
1730 spin_lock_init(&im->ino_lock); in f2fs_init_ino_entry_info()
1731 INIT_LIST_HEAD(&im->ino_list); in f2fs_init_ino_entry_info()
1732 im->ino_num = 0; in f2fs_init_ino_entry_info()
1735 sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS - in f2fs_init_ino_entry_info()
1736 NR_CURSEG_PERSIST_TYPE - __cp_payload(sbi)) * in f2fs_init_ino_entry_info()
1745 return -ENOMEM; in f2fs_create_checkpoint_caches()
1750 return -ENOMEM; in f2fs_create_checkpoint_caches()
1766 f2fs_down_write(&sbi->gc_lock); in __write_checkpoint_sync()
1768 f2fs_up_write(&sbi->gc_lock); in __write_checkpoint_sync()
1775 struct ckpt_req_control *cprc = &sbi->cprc_info; in __checkpoint_and_complete_reqs()
1781 dispatch_list = llist_del_all(&cprc->issue_list); in __checkpoint_and_complete_reqs()
1787 atomic_inc(&cprc->issued_ckpt); in __checkpoint_and_complete_reqs()
1790 diff = (u64)ktime_ms_delta(ktime_get(), req->queue_time); in __checkpoint_and_complete_reqs()
1791 req->ret = ret; in __checkpoint_and_complete_reqs()
1792 complete(&req->wait); in __checkpoint_and_complete_reqs()
1797 atomic_sub(count, &cprc->queued_ckpt); in __checkpoint_and_complete_reqs()
1798 atomic_add(count, &cprc->total_ckpt); in __checkpoint_and_complete_reqs()
1800 spin_lock(&cprc->stat_lock); in __checkpoint_and_complete_reqs()
1801 cprc->cur_time = (unsigned int)div64_u64(sum_diff, count); in __checkpoint_and_complete_reqs()
1802 if (cprc->peak_time < cprc->cur_time) in __checkpoint_and_complete_reqs()
1803 cprc->peak_time = cprc->cur_time; in __checkpoint_and_complete_reqs()
1804 spin_unlock(&cprc->stat_lock); in __checkpoint_and_complete_reqs()
1810 struct ckpt_req_control *cprc = &sbi->cprc_info; in issue_checkpoint_thread()
1811 wait_queue_head_t *q = &cprc->ckpt_wait_queue; in issue_checkpoint_thread()
1816 if (!llist_empty(&cprc->issue_list)) in issue_checkpoint_thread()
1820 kthread_should_stop() || !llist_empty(&cprc->issue_list)); in issue_checkpoint_thread()
1827 struct ckpt_req_control *cprc = &sbi->cprc_info; in flush_remained_ckpt_reqs()
1829 if (!llist_empty(&cprc->issue_list)) { in flush_remained_ckpt_reqs()
1834 wait_for_completion(&wait_req->wait); in flush_remained_ckpt_reqs()
1842 init_completion(&req->wait); in init_ckpt_req()
1843 req->queue_time = ktime_get(); in init_ckpt_req()
1848 struct ckpt_req_control *cprc = &sbi->cprc_info; in f2fs_issue_checkpoint()
1856 f2fs_down_write(&sbi->gc_lock); in f2fs_issue_checkpoint()
1858 f2fs_up_write(&sbi->gc_lock); in f2fs_issue_checkpoint()
1863 if (!cprc->f2fs_issue_ckpt) in f2fs_issue_checkpoint()
1868 llist_add(&req.llnode, &cprc->issue_list); in f2fs_issue_checkpoint()
1869 atomic_inc(&cprc->queued_ckpt); in f2fs_issue_checkpoint()
1878 if (waitqueue_active(&cprc->ckpt_wait_queue)) in f2fs_issue_checkpoint()
1879 wake_up(&cprc->ckpt_wait_queue); in f2fs_issue_checkpoint()
1881 if (cprc->f2fs_issue_ckpt) in f2fs_issue_checkpoint()
1891 dev_t dev = sbi->sb->s_bdev->bd_dev; in f2fs_start_ckpt_thread()
1892 struct ckpt_req_control *cprc = &sbi->cprc_info; in f2fs_start_ckpt_thread()
1894 if (cprc->f2fs_issue_ckpt) in f2fs_start_ckpt_thread()
1897 cprc->f2fs_issue_ckpt = kthread_run(issue_checkpoint_thread, sbi, in f2fs_start_ckpt_thread()
1898 "f2fs_ckpt-%u:%u", MAJOR(dev), MINOR(dev)); in f2fs_start_ckpt_thread()
1899 if (IS_ERR(cprc->f2fs_issue_ckpt)) { in f2fs_start_ckpt_thread()
1900 cprc->f2fs_issue_ckpt = NULL; in f2fs_start_ckpt_thread()
1901 return -ENOMEM; in f2fs_start_ckpt_thread()
1904 set_task_ioprio(cprc->f2fs_issue_ckpt, cprc->ckpt_thread_ioprio); in f2fs_start_ckpt_thread()
1911 struct ckpt_req_control *cprc = &sbi->cprc_info; in f2fs_stop_ckpt_thread()
1914 if (!cprc->f2fs_issue_ckpt) in f2fs_stop_ckpt_thread()
1917 ckpt_task = cprc->f2fs_issue_ckpt; in f2fs_stop_ckpt_thread()
1918 cprc->f2fs_issue_ckpt = NULL; in f2fs_stop_ckpt_thread()
1926 struct ckpt_req_control *cprc = &sbi->cprc_info; in f2fs_flush_ckpt_thread()
1931 while (atomic_read(&cprc->queued_ckpt)) in f2fs_flush_ckpt_thread()
1937 struct ckpt_req_control *cprc = &sbi->cprc_info; in f2fs_init_ckpt_req_control()
1939 atomic_set(&cprc->issued_ckpt, 0); in f2fs_init_ckpt_req_control()
1940 atomic_set(&cprc->total_ckpt, 0); in f2fs_init_ckpt_req_control()
1941 atomic_set(&cprc->queued_ckpt, 0); in f2fs_init_ckpt_req_control()
1942 cprc->ckpt_thread_ioprio = DEFAULT_CHECKPOINT_IOPRIO; in f2fs_init_ckpt_req_control()
1943 init_waitqueue_head(&cprc->ckpt_wait_queue); in f2fs_init_ckpt_req_control()
1944 init_llist_head(&cprc->issue_list); in f2fs_init_ckpt_req_control()
1945 spin_lock_init(&cprc->stat_lock); in f2fs_init_ckpt_req_control()