Lines Matching full:if
40 if (!mapping) in __is_cp_guaranteed()
46 if (inode->i_ino == F2FS_META_INO(sbi) || in __is_cp_guaranteed()
60 if (mapping) { in __read_io_type()
64 if (inode->i_ino == F2FS_META_INO(sbi)) in __read_io_type()
67 if (inode->i_ino == F2FS_NODE_INO(sbi)) in __read_io_type()
96 /* PG_error was set if any post_read step failed */ in __read_end_io()
97 if (bio->bi_status || PageError(page)) { in __read_end_io()
107 if (bio->bi_private) in __read_end_io()
143 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) { in bio_post_read_processing()
151 if (ctx->enabled_steps & (1 << STEP_VERITY)) { in bio_post_read_processing()
170 if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)), in f2fs_read_end_io()
176 if (f2fs_bio_post_read_required(bio)) { in f2fs_read_end_io()
193 if (time_to_inject(sbi, FAULT_WRITE_IO)) { in f2fs_write_end_io()
202 if (IS_DUMMY_WRITTEN_PAGE(page)) { in f2fs_write_end_io()
208 if (unlikely(bio->bi_status)) in f2fs_write_end_io()
215 if (unlikely(bio->bi_status)) { in f2fs_write_end_io()
217 if (type == F2FS_WB_CP_DATA) in f2fs_write_end_io()
225 if (f2fs_in_warm_node_list(sbi, page)) in f2fs_write_end_io()
230 if (!get_pages(sbi, F2FS_WB_CP_DATA) && in f2fs_write_end_io()
238 * Return true, if pre_bio's bdev is same as its target device.
246 if (f2fs_is_multi_device(sbi)) { in f2fs_target_device()
248 if (FDEV(i).start_blk <= blk_addr && in f2fs_target_device()
256 if (bio) { in f2fs_target_device()
267 if (!f2fs_is_multi_device(sbi)) in f2fs_target_device_index()
271 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr) in f2fs_target_device_index()
294 if (is_read_io(fio->op)) { in __bio_alloc()
303 if (fio->io_wbc) in __bio_alloc()
312 if (!is_read_io(bio_op(bio))) { in __submit_bio()
315 if (type != DATA && type != NODE) in __submit_bio()
318 if (test_opt(sbi, LFS) && current->plug) in __submit_bio()
321 if (F2FS_IO_ALIGNED(sbi)) in __submit_bio()
327 if (start == 0) in __submit_bio()
341 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) in __submit_bio()
348 if (type == NODE) in __submit_bio()
352 if (is_read_io(bio_op(bio))) in __submit_bio()
363 if (!io->bio) in __submit_merged_bio()
368 if (is_read_io(fio->op)) in __submit_merged_bio()
384 if (!bio) in __has_merged_page()
387 if (!inode && !page && !ino) in __has_merged_page()
393 if (fscrypt_is_bounce_page(target)) in __has_merged_page()
396 if (inode && inode == target->mapping->host) in __has_merged_page()
398 if (page && page == target) in __has_merged_page()
400 if (ino && ino == ino_of_node(target)) in __has_merged_page()
416 if (type >= META_FLUSH) { in __f2fs_submit_merged_write()
420 if (!test_opt(sbi, NOBARRIER)) in __f2fs_submit_merged_write()
435 if (!force) { in __submit_merged_write_cond()
443 if (ret) in __submit_merged_write_cond()
447 if (type >= META) in __submit_merged_write_cond()
481 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, in f2fs_submit_page_bio()
492 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { in f2fs_submit_page_bio()
497 if (fio->io_wbc && !is_read_io(fio->op)) in f2fs_submit_page_bio()
512 if (last_blkaddr + 1 != cur_blkaddr) in page_is_mergeable()
520 if (io->fio.op != fio->op) in io_type_is_mergeable()
531 if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) { in io_is_mergeable()
538 if (!(filled_blocks % io_size) && left_vecs < io_size) in io_is_mergeable()
541 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr)) in io_is_mergeable()
552 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, in f2fs_merge_page_bio()
559 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block, in f2fs_merge_page_bio()
565 if (!bio) { in f2fs_merge_page_bio()
570 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { in f2fs_merge_page_bio()
576 if (fio->io_wbc) in f2fs_merge_page_bio()
590 if (!bio) in f2fs_submit_ipu_bio()
593 if (!__has_merged_page(*bio, NULL, page, 0)) in f2fs_submit_ipu_bio()
611 if (fio->in_list) { in f2fs_submit_page_write()
613 if (list_empty(&io->io_list)) { in f2fs_submit_page_write()
632 if (io->bio && !io_is_mergeable(sbi, io->bio, io, fio, in f2fs_submit_page_write()
636 if (io->bio == NULL) { in f2fs_submit_page_write()
637 if (F2FS_IO_ALIGNED(sbi) && in f2fs_submit_page_write()
648 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) { in f2fs_submit_page_write()
653 if (fio->io_wbc) in f2fs_submit_page_write()
661 if (fio->in_list) in f2fs_submit_page_write()
664 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || in f2fs_submit_page_write()
686 if (!bio) in f2fs_grab_read_bio()
692 if (f2fs_encrypted_file(inode)) in f2fs_grab_read_bio()
695 if (f2fs_need_verity(inode, first_idx)) in f2fs_grab_read_bio()
698 if (post_read_steps) { in f2fs_grab_read_bio()
700 if (!ctx) { in f2fs_grab_read_bio()
720 if (IS_ERR(bio)) in f2fs_submit_page_read()
726 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { in f2fs_submit_page_read()
742 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) in __set_data_blkaddr()
760 if (set_page_dirty(dn->node_page)) in f2fs_set_data_blkaddr()
777 if (!count) in f2fs_reserve_new_blocks()
780 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) in f2fs_reserve_new_blocks()
782 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count)))) in f2fs_reserve_new_blocks()
793 if (blkaddr == NULL_ADDR) { in f2fs_reserve_new_blocks()
800 if (set_page_dirty(dn->node_page)) in f2fs_reserve_new_blocks()
822 if (err) in f2fs_reserve_block()
825 if (dn->data_blkaddr == NULL_ADDR) in f2fs_reserve_block()
827 if (err || need_put) in f2fs_reserve_block()
837 if (f2fs_lookup_extent_cache(inode, index, &ei)) { in f2fs_get_block()
855 if (!page) in f2fs_get_read_data_page()
858 if (f2fs_lookup_extent_cache(inode, index, &ei)) { in f2fs_get_read_data_page()
860 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr, in f2fs_get_read_data_page()
870 if (err) in f2fs_get_read_data_page()
874 if (unlikely(dn.data_blkaddr == NULL_ADDR)) { in f2fs_get_read_data_page()
878 if (dn.data_blkaddr != NEW_ADDR && in f2fs_get_read_data_page()
886 if (PageUptodate(page)) { in f2fs_get_read_data_page()
898 if (dn.data_blkaddr == NEW_ADDR) { in f2fs_get_read_data_page()
900 if (!PageUptodate(page)) in f2fs_get_read_data_page()
907 if (err) in f2fs_get_read_data_page()
922 if (page && PageUptodate(page)) in f2fs_find_data_page()
927 if (IS_ERR(page)) in f2fs_find_data_page()
930 if (PageUptodate(page)) in f2fs_find_data_page()
934 if (unlikely(!PageUptodate(page))) { in f2fs_find_data_page()
942 * If it tries to access a hole, return an error.
953 if (IS_ERR(page)) in f2fs_get_lock_data_page()
958 if (unlikely(page->mapping != mapping)) { in f2fs_get_lock_data_page()
962 if (unlikely(!PageUptodate(page))) { in f2fs_get_lock_data_page()
975 * Note that, ipage is set only by make_empty_dir, and if any error occur,
987 if (!page) { in f2fs_get_new_data_page()
990 * if any error occur. in f2fs_get_new_data_page()
998 if (err) { in f2fs_get_new_data_page()
1002 if (!ipage) in f2fs_get_new_data_page()
1005 if (PageUptodate(page)) in f2fs_get_new_data_page()
1008 if (dn.data_blkaddr == NEW_ADDR) { in f2fs_get_new_data_page()
1010 if (!PageUptodate(page)) in f2fs_get_new_data_page()
1015 /* if ipage exists, blkaddr should be NEW_ADDR */ in f2fs_get_new_data_page()
1018 if (IS_ERR(page)) in f2fs_get_new_data_page()
1022 if (new_i_size && i_size_read(inode) < in f2fs_get_new_data_page()
1037 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) in __allocate_data_block()
1041 if (err) in __allocate_data_block()
1046 if (dn->data_blkaddr != NULL_ADDR) in __allocate_data_block()
1049 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count)))) in __allocate_data_block()
1057 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) in __allocate_data_block()
1078 if (direct_io) { in f2fs_preallocate_blocks()
1080 if (err) in f2fs_preallocate_blocks()
1084 if (direct_io && allow_outplace_dio(inode, iocb, from)) in f2fs_preallocate_blocks()
1087 if (is_inode_flag_set(inode, FI_NO_PREALLOC)) in f2fs_preallocate_blocks()
1092 if (map.m_len > map.m_lblk) in f2fs_preallocate_blocks()
1102 if (direct_io) { in f2fs_preallocate_blocks()
1109 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) { in f2fs_preallocate_blocks()
1111 if (err) in f2fs_preallocate_blocks()
1114 if (f2fs_has_inline_data(inode)) in f2fs_preallocate_blocks()
1121 if (map.m_len > 0 && err == -ENOSPC) { in f2fs_preallocate_blocks()
1122 if (!direct_io) in f2fs_preallocate_blocks()
1131 if (flag == F2FS_GET_BLOCK_PRE_AIO) { in __do_map_lock()
1132 if (lock) in __do_map_lock()
1137 if (lock) in __do_map_lock()
1147 * If original data blocks are allocated, then give them to blockdev.
1168 if (!maxblocks) in f2fs_map_blocks()
1178 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) { in f2fs_map_blocks()
1179 if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO && in f2fs_map_blocks()
1186 if (map->m_next_extent) in f2fs_map_blocks()
1190 if (flag == F2FS_GET_BLOCK_DIO) in f2fs_map_blocks()
1197 if (map->m_may_create) in f2fs_map_blocks()
1203 if (err) { in f2fs_map_blocks()
1204 if (flag == F2FS_GET_BLOCK_BMAP) in f2fs_map_blocks()
1206 if (err == -ENOENT) { in f2fs_map_blocks()
1208 if (map->m_next_pgofs) in f2fs_map_blocks()
1211 if (map->m_next_extent) in f2fs_map_blocks()
1226 if (__is_valid_data_blkaddr(blkaddr) && in f2fs_map_blocks()
1232 if (__is_valid_data_blkaddr(blkaddr)) { in f2fs_map_blocks()
1234 if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO && in f2fs_map_blocks()
1237 if (err) in f2fs_map_blocks()
1243 if (create) { in f2fs_map_blocks()
1244 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_map_blocks()
1248 if (flag == F2FS_GET_BLOCK_PRE_AIO) { in f2fs_map_blocks()
1249 if (blkaddr == NULL_ADDR) { in f2fs_map_blocks()
1258 if (!err) in f2fs_map_blocks()
1261 if (err) in f2fs_map_blocks()
1266 if (flag == F2FS_GET_BLOCK_BMAP) { in f2fs_map_blocks()
1270 if (flag == F2FS_GET_BLOCK_PRECACHE) in f2fs_map_blocks()
1272 if (flag == F2FS_GET_BLOCK_FIEMAP && in f2fs_map_blocks()
1274 if (map->m_next_pgofs) in f2fs_map_blocks()
1278 if (flag != F2FS_GET_BLOCK_FIEMAP) { in f2fs_map_blocks()
1280 if (map->m_next_pgofs) in f2fs_map_blocks()
1287 if (flag == F2FS_GET_BLOCK_PRE_AIO) in f2fs_map_blocks()
1290 if (map->m_len == 0) { in f2fs_map_blocks()
1292 if (blkaddr == NEW_ADDR) in f2fs_map_blocks()
1298 } else if ((map->m_pblk != NEW_ADDR && in f2fs_map_blocks()
1313 if (flag == F2FS_GET_BLOCK_PRE_AIO && in f2fs_map_blocks()
1318 if (err) in f2fs_map_blocks()
1322 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) { in f2fs_map_blocks()
1329 if (pgofs >= end) in f2fs_map_blocks()
1331 else if (dn.ofs_in_node < end_offset) in f2fs_map_blocks()
1334 if (flag == F2FS_GET_BLOCK_PRECACHE) { in f2fs_map_blocks()
1335 if (map->m_flags & F2FS_MAP_MAPPED) { in f2fs_map_blocks()
1346 if (map->m_may_create) { in f2fs_map_blocks()
1355 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) in f2fs_map_blocks()
1359 if (flag == F2FS_GET_BLOCK_PRECACHE) { in f2fs_map_blocks()
1360 if (map->m_flags & F2FS_MAP_MAPPED) { in f2fs_map_blocks()
1367 if (map->m_next_extent) in f2fs_map_blocks()
1372 if (map->m_may_create) { in f2fs_map_blocks()
1387 if (pos + len > i_size_read(inode)) in f2fs_overwrite_io()
1400 if (err || map.m_len == 0) in f2fs_overwrite_io()
1422 if (!err) { in __get_data_block()
1461 if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks)) in get_data_block_bmap()
1490 if (f2fs_has_inline_xattr(inode)) { in f2fs_xattr_fiemap()
1495 if (!page) in f2fs_xattr_fiemap()
1499 if (err) { in f2fs_xattr_fiemap()
1516 if (!xnid) in f2fs_xattr_fiemap()
1520 if (err || err == 1) in f2fs_xattr_fiemap()
1524 if (xnid) { in f2fs_xattr_fiemap()
1526 if (!page) in f2fs_xattr_fiemap()
1530 if (err) { in f2fs_xattr_fiemap()
1543 if (phys) in f2fs_xattr_fiemap()
1559 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { in f2fs_fiemap()
1561 if (ret) in f2fs_fiemap()
1566 if (ret) in f2fs_fiemap()
1571 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { in f2fs_fiemap()
1576 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { in f2fs_fiemap()
1578 if (ret != -EAGAIN) in f2fs_fiemap()
1582 if (logical_to_blk(inode, len) == 0) in f2fs_fiemap()
1594 if (ret) in f2fs_fiemap()
1598 if (!buffer_mapped(&map_bh)) { in f2fs_fiemap()
1601 if (blk_to_logical(inode, start_blk) < blk_to_logical(inode, in f2fs_fiemap()
1608 if (size) { in f2fs_fiemap()
1609 if (IS_ENCRYPTED(inode)) in f2fs_fiemap()
1616 if (start_blk > last_blk || ret) in f2fs_fiemap()
1623 if (buffer_unwritten(&map_bh)) in f2fs_fiemap()
1630 if (fatal_signal_pending(current)) in f2fs_fiemap()
1635 if (ret == 1) in f2fs_fiemap()
1644 if (IS_ENABLED(CONFIG_FS_VERITY) && in f2fs_readpage_limit()
1671 if (last_block > last_block_in_file) in f2fs_read_single_page()
1675 if (block_in_file >= last_block) in f2fs_read_single_page()
1680 if ((map->m_flags & F2FS_MAP_MAPPED) && in f2fs_read_single_page()
1693 if (ret) in f2fs_read_single_page()
1696 if ((map->m_flags & F2FS_MAP_MAPPED)) { in f2fs_read_single_page()
1700 if (!PageUptodate(page) && (!PageSwapCache(page) && in f2fs_read_single_page()
1706 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr, in f2fs_read_single_page()
1714 if (f2fs_need_verity(inode, page->index) && in f2fs_read_single_page()
1719 if (!PageUptodate(page)) in f2fs_read_single_page()
1729 if (bio && !page_is_mergeable(F2FS_I_SB(inode), bio, in f2fs_read_single_page()
1735 if (bio == NULL) { in f2fs_read_single_page()
1738 if (IS_ERR(bio)) { in f2fs_read_single_page()
1746 * If the page is under writeback, we need to wait for in f2fs_read_single_page()
1751 if (bio_add_page(bio, page, blocksize, 0) < blocksize) in f2fs_read_single_page()
1759 if (bio) { in f2fs_read_single_page()
1773 * Note that the aops->readpages() function is ONLY used for read-ahead. If
1798 if (pages) { in f2fs_mpage_readpages()
1803 if (add_to_page_cache_lru(page, mapping, in f2fs_mpage_readpages()
1811 if (ret) { in f2fs_mpage_readpages()
1817 if (pages) in f2fs_mpage_readpages()
1821 if (bio) in f2fs_mpage_readpages()
1833 /* If the file has inline data, try to read it directly */ in f2fs_read_data_page()
1834 if (f2fs_has_inline_data(inode)) in f2fs_read_data_page()
1836 if (ret == -EAGAIN) in f2fs_read_data_page()
1851 /* If the file has inline data, skip readpages */ in f2fs_read_data_pages()
1852 if (f2fs_has_inline_data(inode)) in f2fs_read_data_pages()
1864 if (!f2fs_encrypted_file(inode)) in encrypt_one_page()
1874 if (IS_ERR(fio->encrypted_page)) { in encrypt_one_page()
1876 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) { in encrypt_one_page()
1886 if (mpage) { in encrypt_one_page()
1887 if (PageUptodate(mpage)) in encrypt_one_page()
1901 if (policy & (0x1 << F2FS_IPU_FORCE)) in check_inplace_update_policy()
1903 if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi)) in check_inplace_update_policy()
1905 if (policy & (0x1 << F2FS_IPU_UTIL) && in check_inplace_update_policy()
1908 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) && in check_inplace_update_policy()
1915 if (policy & (0x1 << F2FS_IPU_ASYNC) && in check_inplace_update_policy()
1922 if (policy & (0x1 << F2FS_IPU_FSYNC) && in check_inplace_update_policy()
1926 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) && in check_inplace_update_policy()
1935 if (f2fs_is_pinned_file(inode)) in f2fs_should_update_inplace()
1938 /* if this is cold file, we should overwrite to avoid fragmentation */ in f2fs_should_update_inplace()
1939 if (file_is_cold(inode)) in f2fs_should_update_inplace()
1949 if (test_opt(sbi, LFS)) in f2fs_should_update_outplace()
1951 if (S_ISDIR(inode->i_mode)) in f2fs_should_update_outplace()
1953 if (IS_NOQUOTA(inode)) in f2fs_should_update_outplace()
1955 if (f2fs_is_atomic_file(inode)) in f2fs_should_update_outplace()
1957 if (fio) { in f2fs_should_update_outplace()
1958 if (is_cold_data(fio->page)) in f2fs_should_update_outplace()
1960 if (IS_ATOMIC_WRITTEN_PAGE(fio->page)) in f2fs_should_update_outplace()
1962 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) && in f2fs_should_update_outplace()
1973 if (f2fs_should_update_outplace(inode, fio)) in need_inplace_update()
1990 if (need_inplace_update(fio) && in f2fs_do_write_data_page()
1994 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, in f2fs_do_write_data_page()
2004 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi)) in f2fs_do_write_data_page()
2008 if (err) in f2fs_do_write_data_page()
2014 if (fio->old_blkaddr == NULL_ADDR) { in f2fs_do_write_data_page()
2020 if (__is_valid_data_blkaddr(fio->old_blkaddr) && in f2fs_do_write_data_page()
2027 * If current allocation needs SSR, in f2fs_do_write_data_page()
2030 if (ipu_force || in f2fs_do_write_data_page()
2034 if (err) in f2fs_do_write_data_page()
2040 if (fio->need_lock == LOCK_REQ) in f2fs_do_write_data_page()
2043 if (err) { in f2fs_do_write_data_page()
2044 if (f2fs_encrypted_file(inode)) in f2fs_do_write_data_page()
2046 if (PageWriteback(page)) in f2fs_do_write_data_page()
2055 if (fio->need_lock == LOCK_RETRY) { in f2fs_do_write_data_page()
2056 if (!f2fs_trylock_op(fio->sbi)) { in f2fs_do_write_data_page()
2064 if (err) in f2fs_do_write_data_page()
2070 if (err) in f2fs_do_write_data_page()
2080 if (page->index == 0) in f2fs_do_write_data_page()
2085 if (fio->need_lock == LOCK_REQ) in f2fs_do_write_data_page()
2125 if (unlikely(f2fs_cp_error(sbi))) { in __write_data_page()
2131 if (S_ISDIR(inode->i_mode)) in __write_data_page()
2136 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in __write_data_page()
2139 if (page->index < end_index || f2fs_verity_in_progress(inode)) in __write_data_page()
2143 * If the offset is out-of-range of file size, in __write_data_page()
2147 if ((page->index >= end_index + 1) || !offset) in __write_data_page()
2152 if (f2fs_is_drop_cache(inode)) in __write_data_page()
2155 if (f2fs_is_volatile_file(inode) && (!page->index || in __write_data_page()
2161 if (S_ISDIR(inode->i_mode)) { in __write_data_page()
2167 if (!wbc->for_reclaim) in __write_data_page()
2169 else if (has_not_enough_free_secs(sbi, 0, 0)) in __write_data_page()
2175 if (f2fs_has_inline_data(inode)) { in __write_data_page()
2177 if (!err) in __write_data_page()
2181 if (err == -EAGAIN) { in __write_data_page()
2183 if (err == -EAGAIN) { in __write_data_page()
2189 if (err) { in __write_data_page()
2193 if (F2FS_I(inode)->last_disk_size < psize) in __write_data_page()
2199 if (err && err != -ENOENT) in __write_data_page()
2204 if (err) { in __write_data_page()
2209 if (wbc->for_reclaim) { in __write_data_page()
2217 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) && in __write_data_page()
2223 if (unlikely(f2fs_cp_error(sbi))) { in __write_data_page()
2229 if (submitted) in __write_data_page()
2242 if (!err || wbc->for_reclaim) in __write_data_page()
2281 if (get_dirty_pages(mapping->host) <= in f2fs_write_cache_pages()
2287 if (wbc->range_cyclic) { in f2fs_write_cache_pages()
2290 if (index == 0) in f2fs_write_cache_pages()
2298 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) in f2fs_write_cache_pages()
2302 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in f2fs_write_cache_pages()
2307 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in f2fs_write_cache_pages()
2315 if (nr_pages == 0) in f2fs_write_cache_pages()
2323 if (atomic_read(&sbi->wb_sync_req[DATA]) && in f2fs_write_cache_pages()
2333 if (unlikely(page->mapping != mapping)) { in f2fs_write_cache_pages()
2339 if (!PageDirty(page)) { in f2fs_write_cache_pages()
2344 if (PageWriteback(page)) { in f2fs_write_cache_pages()
2345 if (wbc->sync_mode != WB_SYNC_NONE) { in f2fs_write_cache_pages()
2354 if (!clear_page_dirty_for_io(page)) in f2fs_write_cache_pages()
2359 if (unlikely(ret)) { in f2fs_write_cache_pages()
2364 if (ret == AOP_WRITEPAGE_ACTIVATE) { in f2fs_write_cache_pages()
2368 } else if (ret == -EAGAIN) { in f2fs_write_cache_pages()
2370 if (wbc->sync_mode == WB_SYNC_ALL) { in f2fs_write_cache_pages()
2381 } else if (submitted) { in f2fs_write_cache_pages()
2385 if (--wbc->nr_to_write <= 0 && in f2fs_write_cache_pages()
2395 if (!cycled && !done) { in f2fs_write_cache_pages()
2401 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) in f2fs_write_cache_pages()
2404 if (nwritten) in f2fs_write_cache_pages()
2408 if (bio) in f2fs_write_cache_pages()
2417 if (!S_ISREG(inode->i_mode)) in __should_serialize_io()
2419 if (IS_NOQUOTA(inode)) in __should_serialize_io()
2422 if (F2FS_I(inode)->cp_task) in __should_serialize_io()
2424 if (wbc->sync_mode != WB_SYNC_ALL) in __should_serialize_io()
2426 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks) in __should_serialize_io()
2442 if (!mapping->a_ops->writepage) in __f2fs_write_data_pages()
2445 /* skip writing if there is no dirty page in this inode */ in __f2fs_write_data_pages()
2446 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE) in __f2fs_write_data_pages()
2450 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in __f2fs_write_data_pages()
2453 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) && in __f2fs_write_data_pages()
2460 if (is_inode_flag_set(inode, FI_DO_DEFRAG)) in __f2fs_write_data_pages()
2466 if (wbc->sync_mode == WB_SYNC_ALL) in __f2fs_write_data_pages()
2468 else if (atomic_read(&sbi->wb_sync_req[DATA])) in __f2fs_write_data_pages()
2471 if (__should_serialize_io(inode, wbc)) { in __f2fs_write_data_pages()
2480 if (locked) in __f2fs_write_data_pages()
2483 if (wbc->sync_mode == WB_SYNC_ALL) in __f2fs_write_data_pages()
2486 * if some pages were truncated, we cannot guarantee its mapping->host in __f2fs_write_data_pages()
2515 if (to > i_size && !f2fs_verity_in_progress(inode)) { in f2fs_write_failed()
2520 if (!IS_NOQUOTA(inode)) in f2fs_write_failed()
2545 if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE && in prepare_write_begin()
2551 if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode)) in prepare_write_begin()
2556 if (f2fs_has_inline_data(inode) || in prepare_write_begin()
2564 if (IS_ERR(ipage)) { in prepare_write_begin()
2571 if (f2fs_has_inline_data(inode)) { in prepare_write_begin()
2572 if (pos + len <= MAX_INLINE_DATA(inode)) { in prepare_write_begin()
2575 if (inode->i_nlink) in prepare_write_begin()
2579 if (err) in prepare_write_begin()
2581 if (dn.data_blkaddr == NULL_ADDR) in prepare_write_begin()
2584 } else if (locked) { in prepare_write_begin()
2587 if (f2fs_lookup_extent_cache(inode, index, &ei)) { in prepare_write_begin()
2592 if (err || dn.data_blkaddr == NULL_ADDR) { in prepare_write_begin()
2609 if (locked) in prepare_write_begin()
2628 if (!f2fs_is_checkpoint_ready(sbi)) { in f2fs_write_begin()
2633 if ((f2fs_is_atomic_file(inode) && in f2fs_write_begin()
2646 if (index != 0) { in f2fs_write_begin()
2648 if (err) in f2fs_write_begin()
2658 if (!page) { in f2fs_write_begin()
2667 if (err) in f2fs_write_begin()
2670 if (need_balance && !IS_NOQUOTA(inode) && in f2fs_write_begin()
2675 if (page->mapping != mapping) { in f2fs_write_begin()
2684 if (len == PAGE_SIZE || PageUptodate(page)) in f2fs_write_begin()
2687 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) && in f2fs_write_begin()
2693 if (blkaddr == NEW_ADDR) { in f2fs_write_begin()
2697 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, in f2fs_write_begin()
2703 if (err) in f2fs_write_begin()
2707 if (unlikely(page->mapping != mapping)) { in f2fs_write_begin()
2711 if (unlikely(!PageUptodate(page))) { in f2fs_write_begin()
2721 if (drop_atomic) in f2fs_write_begin()
2740 if (!PageUptodate(page)) { in f2fs_write_end()
2741 if (unlikely(copied != len)) in f2fs_write_end()
2746 if (!copied) in f2fs_write_end()
2751 if (pos + copied > i_size_read(inode) && in f2fs_write_end()
2769 if (align & blocksize_mask) { in check_direct_IO()
2770 if (bdev) in check_direct_IO()
2773 if (align & blocksize_mask) in check_direct_IO()
2803 if (!dio) in f2fs_dio_submit_bio()
2839 if (err) in f2fs_direct_IO()
2842 if (f2fs_force_buffered_io(inode, iocb, iter)) in f2fs_direct_IO()
2849 if (rw == WRITE && whint_mode == WHINT_MODE_OFF) in f2fs_direct_IO()
2852 if (iocb->ki_flags & IOCB_NOWAIT) { in f2fs_direct_IO()
2853 if (!down_read_trylock(&fi->i_gc_rwsem[rw])) { in f2fs_direct_IO()
2858 if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) { in f2fs_direct_IO()
2866 if (do_opu) in f2fs_direct_IO()
2875 if (do_opu) in f2fs_direct_IO()
2880 if (rw == WRITE) { in f2fs_direct_IO()
2881 if (whint_mode == WHINT_MODE_OFF) in f2fs_direct_IO()
2883 if (err > 0) { in f2fs_direct_IO()
2886 if (!do_opu) in f2fs_direct_IO()
2888 } else if (err < 0) { in f2fs_direct_IO()
2905 if (inode->i_ino >= F2FS_ROOT_INO(sbi) && in f2fs_invalidate_page()
2909 if (PageDirty(page)) { in f2fs_invalidate_page()
2910 if (inode->i_ino == F2FS_META_INO(sbi)) { in f2fs_invalidate_page()
2912 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) { in f2fs_invalidate_page()
2922 if (IS_ATOMIC_WRITTEN_PAGE(page)) in f2fs_invalidate_page()
2930 /* If this is dirty page, keep PagePrivate */ in f2fs_release_page()
2931 if (PageDirty(page)) in f2fs_release_page()
2935 if (IS_ATOMIC_WRITTEN_PAGE(page)) in f2fs_release_page()
2949 if (!PageUptodate(page)) in f2fs_set_data_page_dirty()
2951 if (PageSwapCache(page)) in f2fs_set_data_page_dirty()
2954 if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) { in f2fs_set_data_page_dirty()
2955 if (!IS_ATOMIC_WRITTEN_PAGE(page)) { in f2fs_set_data_page_dirty()
2966 if (!PageDirty(page)) { in f2fs_set_data_page_dirty()
2978 if (f2fs_has_inline_data(inode)) in f2fs_bmap()
2982 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) in f2fs_bmap()
3001 if (atomic_written) { in f2fs_migrate_page()
3002 if (mode != MIGRATE_SYNC) in f2fs_migrate_page()
3004 if (!mutex_trylock(&fi->inmem_lock)) in f2fs_migrate_page()
3012 if (rc != MIGRATEPAGE_SUCCESS) { in f2fs_migrate_page()
3013 if (atomic_written) in f2fs_migrate_page()
3018 if (atomic_written) { in f2fs_migrate_page()
3021 if (cur->page == page) { in f2fs_migrate_page()
3030 if (PagePrivate(page)) { in f2fs_migrate_page()
3035 if (mode != MIGRATE_SYNC_NO_COPY) in f2fs_migrate_page()
3075 if (first_block == 0) in check_swap_activate()
3081 if (first_block & (blocks_per_page - 1)) { in check_swap_activate()
3091 if (block == 0) in check_swap_activate()
3093 if (block != first_block + block_in_page) { in check_swap_activate()
3101 if (page_no) { /* exclude the header page */ in check_swap_activate()
3102 if (first_block < lowest_block) in check_swap_activate()
3104 if (first_block > highest_block) in check_swap_activate()
3126 if (!S_ISREG(inode->i_mode)) in f2fs_swap_activate()
3129 if (f2fs_readonly(F2FS_I_SB(inode)->sb)) in f2fs_swap_activate()
3133 if (ret) in f2fs_swap_activate()
3137 if (ret) in f2fs_swap_activate()
3199 if (!bio_post_read_ctx_cache) in f2fs_init_post_read_processing()
3204 if (!bio_post_read_ctx_pool) in f2fs_init_post_read_processing()