Lines Matching +full:data +full:- +full:mapping
1 // SPDX-License-Identifier: GPL-2.0
3 * fs/f2fs/data.c
13 #include <linux/backing-dev.h>
36 struct address_space *mapping = page->mapping; in __is_cp_guaranteed() local
40 if (!mapping) in __is_cp_guaranteed()
43 inode = mapping->host; in __is_cp_guaranteed()
46 if (inode->i_ino == F2FS_META_INO(sbi) || in __is_cp_guaranteed()
47 inode->i_ino == F2FS_NODE_INO(sbi) || in __is_cp_guaranteed()
48 S_ISDIR(inode->i_mode) || in __is_cp_guaranteed()
49 (S_ISREG(inode->i_mode) && in __is_cp_guaranteed()
58 struct address_space *mapping = page_file_mapping(page); in __read_io_type() local
60 if (mapping) { in __read_io_type()
61 struct inode *inode = mapping->host; in __read_io_type()
64 if (inode->i_ino == F2FS_META_INO(sbi)) in __read_io_type()
67 if (inode->i_ino == F2FS_NODE_INO(sbi)) in __read_io_type()
94 page = bv->bv_page; in __read_end_io()
97 if (bio->bi_status || PageError(page)) { in __read_end_io()
99 /* will re-read again later */ in __read_end_io()
107 if (bio->bi_private) in __read_end_io()
108 mempool_free(bio->bi_private, bio_post_read_ctx_pool); in __read_end_io()
119 fscrypt_decrypt_bio(ctx->bio); in decrypt_work()
129 fsverity_verify_bio(ctx->bio); in verity_work()
141 switch (++ctx->cur_step) { in bio_post_read_processing()
143 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) { in bio_post_read_processing()
144 INIT_WORK(&ctx->work, decrypt_work); in bio_post_read_processing()
145 fscrypt_enqueue_decrypt_work(&ctx->work); in bio_post_read_processing()
148 ctx->cur_step++; in bio_post_read_processing()
149 /* fall-through */ in bio_post_read_processing()
151 if (ctx->enabled_steps & (1 << STEP_VERITY)) { in bio_post_read_processing()
152 INIT_WORK(&ctx->work, verity_work); in bio_post_read_processing()
153 fsverity_enqueue_verify_work(&ctx->work); in bio_post_read_processing()
156 ctx->cur_step++; in bio_post_read_processing()
157 /* fall-through */ in bio_post_read_processing()
159 __read_end_io(ctx->bio); in bio_post_read_processing()
165 return bio->bi_private && !bio->bi_status; in f2fs_bio_post_read_required()
173 bio->bi_status = BLK_STS_IOERR; in f2fs_read_end_io()
177 struct bio_post_read_ctx *ctx = bio->bi_private; in f2fs_read_end_io()
179 ctx->cur_step = STEP_INITIAL; in f2fs_read_end_io()
189 struct f2fs_sb_info *sbi = bio->bi_private; in f2fs_write_end_io()
195 bio->bi_status = BLK_STS_IOERR; in f2fs_write_end_io()
199 struct page *page = bvec->bv_page; in f2fs_write_end_io()
206 mempool_free(page, sbi->write_io_dummy); in f2fs_write_end_io()
208 if (unlikely(bio->bi_status)) in f2fs_write_end_io()
215 if (unlikely(bio->bi_status)) { in f2fs_write_end_io()
216 mapping_set_error(page->mapping, -EIO); in f2fs_write_end_io()
221 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) && in f2fs_write_end_io()
222 page->index != nid_of_node(page)); in f2fs_write_end_io()
231 wq_has_sleeper(&sbi->cp_wait)) in f2fs_write_end_io()
232 wake_up(&sbi->cp_wait); in f2fs_write_end_io()
243 struct block_device *bdev = sbi->sb->s_bdev; in f2fs_target_device()
247 for (i = 0; i < sbi->s_ndevs; i++) { in f2fs_target_device()
250 blk_addr -= FDEV(i).start_blk; in f2fs_target_device()
258 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); in f2fs_target_device()
270 for (i = 0; i < sbi->s_ndevs; i++) in f2fs_target_device_index()
280 return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno; in __same_bdev()
284 * Low-level block read/write IO operations.
288 struct f2fs_sb_info *sbi = fio->sbi; in __bio_alloc()
293 f2fs_target_device(sbi, fio->new_blkaddr, bio); in __bio_alloc()
294 if (is_read_io(fio->op)) { in __bio_alloc()
295 bio->bi_end_io = f2fs_read_end_io; in __bio_alloc()
296 bio->bi_private = NULL; in __bio_alloc()
298 bio->bi_end_io = f2fs_write_end_io; in __bio_alloc()
299 bio->bi_private = sbi; in __bio_alloc()
300 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, in __bio_alloc()
301 fio->type, fio->temp); in __bio_alloc()
303 if (fio->io_wbc) in __bio_alloc()
304 wbc_init_bio(fio->io_wbc, bio); in __bio_alloc()
315 if (type != DATA && type != NODE) in __submit_bio()
318 if (test_opt(sbi, LFS) && current->plug) in __submit_bio()
319 blk_finish_plug(current->plug); in __submit_bio()
324 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS; in __submit_bio()
333 mempool_alloc(sbi->write_io_dummy, in __submit_bio()
353 trace_f2fs_submit_read_bio(sbi->sb, type, bio); in __submit_bio()
355 trace_f2fs_submit_write_bio(sbi->sb, type, bio); in __submit_bio()
361 struct f2fs_io_info *fio = &io->fio; in __submit_merged_bio()
363 if (!io->bio) in __submit_merged_bio()
366 bio_set_op_attrs(io->bio, fio->op, fio->op_flags); in __submit_merged_bio()
368 if (is_read_io(fio->op)) in __submit_merged_bio()
369 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio); in __submit_merged_bio()
371 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio); in __submit_merged_bio()
373 __submit_bio(io->sbi, io->bio, fio->type); in __submit_merged_bio()
374 io->bio = NULL; in __submit_merged_bio()
392 target = bvec->bv_page; in __has_merged_page()
396 if (inode && inode == target->mapping->host) in __has_merged_page()
411 struct f2fs_bio_info *io = sbi->write_io[btype] + temp; in __f2fs_submit_merged_write()
413 down_write(&io->io_rwsem); in __f2fs_submit_merged_write()
417 io->fio.type = META_FLUSH; in __f2fs_submit_merged_write()
418 io->fio.op = REQ_OP_WRITE; in __f2fs_submit_merged_write()
419 io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC; in __f2fs_submit_merged_write()
421 io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA; in __f2fs_submit_merged_write()
424 up_write(&io->io_rwsem); in __f2fs_submit_merged_write()
437 struct f2fs_bio_info *io = sbi->write_io[btype] + temp; in __submit_merged_write_cond()
439 down_read(&io->io_rwsem); in __submit_merged_write_cond()
440 ret = __has_merged_page(io->bio, inode, page, ino); in __submit_merged_write_cond()
441 up_read(&io->io_rwsem); in __submit_merged_write_cond()
466 f2fs_submit_merged_write(sbi, DATA); in f2fs_flush_merged_writes()
472 * Fill the locked page with data located in the block address.
478 struct page *page = fio->encrypted_page ? in f2fs_submit_page_bio()
479 fio->encrypted_page : fio->page; in f2fs_submit_page_bio()
481 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, in f2fs_submit_page_bio()
482 fio->is_por ? META_POR : (__is_meta_io(fio) ? in f2fs_submit_page_bio()
484 return -EFSCORRUPTED; in f2fs_submit_page_bio()
494 return -EFAULT; in f2fs_submit_page_bio()
497 if (fio->io_wbc && !is_read_io(fio->op)) in f2fs_submit_page_bio()
498 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE); in f2fs_submit_page_bio()
500 bio_set_op_attrs(bio, fio->op, fio->op_flags); in f2fs_submit_page_bio()
502 inc_page_count(fio->sbi, is_read_io(fio->op) ? in f2fs_submit_page_bio()
503 __read_io_type(page): WB_DATA_TYPE(fio->page)); in f2fs_submit_page_bio()
505 __submit_bio(fio->sbi, bio, fio->type); in f2fs_submit_page_bio()
520 if (io->fio.op != fio->op) in io_type_is_mergeable()
522 return io->fio.op_flags == fio->op_flags; in io_type_is_mergeable()
531 if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) { in io_is_mergeable()
533 F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size); in io_is_mergeable()
535 unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt; in io_is_mergeable()
548 struct bio *bio = *fio->bio; in f2fs_merge_page_bio()
549 struct page *page = fio->encrypted_page ? in f2fs_merge_page_bio()
550 fio->encrypted_page : fio->page; in f2fs_merge_page_bio()
552 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, in f2fs_merge_page_bio()
554 return -EFSCORRUPTED; in f2fs_merge_page_bio()
559 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block, in f2fs_merge_page_bio()
560 fio->new_blkaddr)) { in f2fs_merge_page_bio()
561 __submit_bio(fio->sbi, bio, fio->type); in f2fs_merge_page_bio()
567 bio_set_op_attrs(bio, fio->op, fio->op_flags); in f2fs_merge_page_bio()
571 __submit_bio(fio->sbi, bio, fio->type); in f2fs_merge_page_bio()
576 if (fio->io_wbc) in f2fs_merge_page_bio()
577 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE); in f2fs_merge_page_bio()
579 inc_page_count(fio->sbi, WB_DATA_TYPE(page)); in f2fs_merge_page_bio()
581 *fio->last_block = fio->new_blkaddr; in f2fs_merge_page_bio()
582 *fio->bio = bio; in f2fs_merge_page_bio()
596 __submit_bio(sbi, *bio, DATA); in f2fs_submit_ipu_bio()
602 struct f2fs_sb_info *sbi = fio->sbi; in f2fs_submit_page_write()
603 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); in f2fs_submit_page_write()
604 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp; in f2fs_submit_page_write()
607 f2fs_bug_on(sbi, is_read_io(fio->op)); in f2fs_submit_page_write()
609 down_write(&io->io_rwsem); in f2fs_submit_page_write()
611 if (fio->in_list) { in f2fs_submit_page_write()
612 spin_lock(&io->io_lock); in f2fs_submit_page_write()
613 if (list_empty(&io->io_list)) { in f2fs_submit_page_write()
614 spin_unlock(&io->io_lock); in f2fs_submit_page_write()
617 fio = list_first_entry(&io->io_list, in f2fs_submit_page_write()
619 list_del(&fio->list); in f2fs_submit_page_write()
620 spin_unlock(&io->io_lock); in f2fs_submit_page_write()
625 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; in f2fs_submit_page_write()
628 fio->submitted = true; in f2fs_submit_page_write()
632 if (io->bio && !io_is_mergeable(sbi, io->bio, io, fio, in f2fs_submit_page_write()
633 io->last_block_in_bio, fio->new_blkaddr)) in f2fs_submit_page_write()
636 if (io->bio == NULL) { in f2fs_submit_page_write()
638 (fio->type == DATA || fio->type == NODE) && in f2fs_submit_page_write()
639 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) { in f2fs_submit_page_write()
641 fio->retry = true; in f2fs_submit_page_write()
644 io->bio = __bio_alloc(fio, BIO_MAX_PAGES); in f2fs_submit_page_write()
645 io->fio = *fio; in f2fs_submit_page_write()
648 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) { in f2fs_submit_page_write()
653 if (fio->io_wbc) in f2fs_submit_page_write()
654 wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE); in f2fs_submit_page_write()
656 io->last_block_in_bio = fio->new_blkaddr; in f2fs_submit_page_write()
659 trace_f2fs_submit_page_write(fio->page, fio); in f2fs_submit_page_write()
661 if (fio->in_list) in f2fs_submit_page_write()
667 up_write(&io->io_rwsem); in f2fs_submit_page_write()
673 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); in f2fs_need_verity()
687 return ERR_PTR(-ENOMEM); in f2fs_grab_read_bio()
689 bio->bi_end_io = f2fs_read_end_io; in f2fs_grab_read_bio()
702 return ERR_PTR(-ENOMEM); in f2fs_grab_read_bio()
704 ctx->bio = bio; in f2fs_grab_read_bio()
705 ctx->enabled_steps = post_read_steps; in f2fs_grab_read_bio()
706 bio->bi_private = ctx; in f2fs_grab_read_bio()
719 bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0, page->index); in f2fs_submit_page_read()
728 return -EFAULT; in f2fs_submit_page_read()
732 __submit_bio(sbi, bio, DATA); in f2fs_submit_page_read()
738 struct f2fs_node *rn = F2FS_NODE(dn->node_page); in __set_data_blkaddr()
742 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) in __set_data_blkaddr()
743 base = get_extra_isize(dn->inode); in __set_data_blkaddr()
745 /* Get physical address of data block */ in __set_data_blkaddr()
747 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr); in __set_data_blkaddr()
751 * Lock ordering for the change of data block address:
752 * ->data_page
753 * ->node_page
758 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true); in f2fs_set_data_blkaddr()
760 if (set_page_dirty(dn->node_page)) in f2fs_set_data_blkaddr()
761 dn->node_changed = true; in f2fs_set_data_blkaddr()
766 dn->data_blkaddr = blkaddr; in f2fs_update_data_blkaddr()
771 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
774 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in f2fs_reserve_new_blocks()
780 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) in f2fs_reserve_new_blocks()
781 return -EPERM; in f2fs_reserve_new_blocks()
782 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count)))) in f2fs_reserve_new_blocks()
785 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid, in f2fs_reserve_new_blocks()
786 dn->ofs_in_node, count); in f2fs_reserve_new_blocks()
788 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true); in f2fs_reserve_new_blocks()
790 for (; count > 0; dn->ofs_in_node++) { in f2fs_reserve_new_blocks()
791 block_t blkaddr = datablock_addr(dn->inode, in f2fs_reserve_new_blocks()
792 dn->node_page, dn->ofs_in_node); in f2fs_reserve_new_blocks()
794 dn->data_blkaddr = NEW_ADDR; in f2fs_reserve_new_blocks()
796 count--; in f2fs_reserve_new_blocks()
800 if (set_page_dirty(dn->node_page)) in f2fs_reserve_new_blocks()
801 dn->node_changed = true; in f2fs_reserve_new_blocks()
805 /* Should keep dn->ofs_in_node unchanged */
808 unsigned int ofs_in_node = dn->ofs_in_node; in f2fs_reserve_new_block()
812 dn->ofs_in_node = ofs_in_node; in f2fs_reserve_new_block()
818 bool need_put = dn->inode_page ? false : true; in f2fs_reserve_block()
825 if (dn->data_blkaddr == NULL_ADDR) in f2fs_reserve_block()
835 struct inode *inode = dn->inode; in f2fs_get_block()
838 dn->data_blkaddr = ei.blk + index - ei.fofs; in f2fs_get_block()
848 struct address_space *mapping = inode->i_mapping; in f2fs_get_read_data_page() local
854 page = f2fs_grab_cache_page(mapping, index, for_write); in f2fs_get_read_data_page()
856 return ERR_PTR(-ENOMEM); in f2fs_get_read_data_page()
859 dn.data_blkaddr = ei.blk + index - ei.fofs; in f2fs_get_read_data_page()
862 err = -EFSCORRUPTED; in f2fs_get_read_data_page()
875 err = -ENOENT; in f2fs_get_read_data_page()
882 err = -EFSCORRUPTED; in f2fs_get_read_data_page()
893 * new inode page couldn't be allocated due to -ENOSPC. in f2fs_get_read_data_page()
895 * see, f2fs_add_link -> f2fs_get_new_data_page -> in f2fs_get_read_data_page()
918 struct address_space *mapping = inode->i_mapping; in f2fs_find_data_page() local
921 page = find_get_page(mapping, index); in f2fs_find_data_page()
936 return ERR_PTR(-EIO); in f2fs_find_data_page()
949 struct address_space *mapping = inode->i_mapping; in f2fs_get_lock_data_page() local
958 if (unlikely(page->mapping != mapping)) { in f2fs_get_lock_data_page()
964 return ERR_PTR(-EIO); in f2fs_get_lock_data_page()
970 * Caller ensures that this data page is never allocated.
971 * A new zero-filled data page is allocated in the page cache.
981 struct address_space *mapping = inode->i_mapping; in f2fs_get_new_data_page() local
986 page = f2fs_grab_cache_page(mapping, index, true); in f2fs_get_new_data_page()
993 return ERR_PTR(-ENOMEM); in f2fs_get_new_data_page()
1030 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in __allocate_data_block()
1037 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) in __allocate_data_block()
1038 return -EPERM; in __allocate_data_block()
1040 err = f2fs_get_node_info(sbi, dn->nid, &ni); in __allocate_data_block()
1044 dn->data_blkaddr = datablock_addr(dn->inode, in __allocate_data_block()
1045 dn->node_page, dn->ofs_in_node); in __allocate_data_block()
1046 if (dn->data_blkaddr != NULL_ADDR) in __allocate_data_block()
1049 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count)))) in __allocate_data_block()
1053 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); in __allocate_data_block()
1054 old_blkaddr = dn->data_blkaddr; in __allocate_data_block()
1055 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr, in __allocate_data_block()
1060 f2fs_update_data_blkaddr(dn, dn->data_blkaddr); in __allocate_data_block()
1064 * data from unwritten block via dio_read. in __allocate_data_block()
1071 struct inode *inode = file_inode(iocb->ki_filp); in f2fs_preallocate_blocks()
1075 bool direct_io = iocb->ki_flags & IOCB_DIRECT; in f2fs_preallocate_blocks()
1077 /* convert inline data for Direct I/O*/ in f2fs_preallocate_blocks()
1090 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos); in f2fs_preallocate_blocks()
1091 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from)); in f2fs_preallocate_blocks()
1093 map.m_len -= map.m_lblk; in f2fs_preallocate_blocks()
1103 map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint); in f2fs_preallocate_blocks()
1109 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) { in f2fs_preallocate_blocks()
1121 if (map.m_len > 0 && err == -ENOSPC) { in f2fs_preallocate_blocks()
1133 down_read(&sbi->node_change); in __do_map_lock()
1135 up_read(&sbi->node_change); in __do_map_lock()
1147 * If original data blocks are allocated, then give them to blockdev.
1156 unsigned int maxblocks = map->m_len; in f2fs_map_blocks()
1159 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE; in f2fs_map_blocks()
1171 map->m_len = 0; in f2fs_map_blocks()
1172 map->m_flags = 0; in f2fs_map_blocks()
1175 pgofs = (pgoff_t)map->m_lblk; in f2fs_map_blocks()
1180 map->m_may_create) in f2fs_map_blocks()
1183 map->m_pblk = ei.blk + pgofs - ei.fofs; in f2fs_map_blocks()
1184 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs); in f2fs_map_blocks()
1185 map->m_flags = F2FS_MAP_MAPPED; in f2fs_map_blocks()
1186 if (map->m_next_extent) in f2fs_map_blocks()
1187 *map->m_next_extent = pgofs + map->m_len; in f2fs_map_blocks()
1192 map->m_pblk, map->m_len); in f2fs_map_blocks()
1197 if (map->m_may_create) in f2fs_map_blocks()
1205 map->m_pblk = 0; in f2fs_map_blocks()
1206 if (err == -ENOENT) { in f2fs_map_blocks()
1208 if (map->m_next_pgofs) in f2fs_map_blocks()
1209 *map->m_next_pgofs = in f2fs_map_blocks()
1211 if (map->m_next_extent) in f2fs_map_blocks()
1212 *map->m_next_extent = in f2fs_map_blocks()
1228 err = -EFSCORRUPTED; in f2fs_map_blocks()
1233 /* use out-place-update for driect IO under LFS mode */ in f2fs_map_blocks()
1235 map->m_may_create) { in f2fs_map_blocks()
1236 err = __allocate_data_block(&dn, map->m_seg_type); in f2fs_map_blocks()
1245 err = -EIO; in f2fs_map_blocks()
1257 map->m_seg_type); in f2fs_map_blocks()
1263 map->m_flags |= F2FS_MAP_NEW; in f2fs_map_blocks()
1267 map->m_pblk = 0; in f2fs_map_blocks()
1274 if (map->m_next_pgofs) in f2fs_map_blocks()
1275 *map->m_next_pgofs = pgofs + 1; in f2fs_map_blocks()
1280 if (map->m_next_pgofs) in f2fs_map_blocks()
1281 *map->m_next_pgofs = pgofs + 1; in f2fs_map_blocks()
1290 if (map->m_len == 0) { in f2fs_map_blocks()
1293 map->m_flags |= F2FS_MAP_UNWRITTEN; in f2fs_map_blocks()
1294 map->m_flags |= F2FS_MAP_MAPPED; in f2fs_map_blocks()
1296 map->m_pblk = blkaddr; in f2fs_map_blocks()
1297 map->m_len = 1; in f2fs_map_blocks()
1298 } else if ((map->m_pblk != NEW_ADDR && in f2fs_map_blocks()
1299 blkaddr == (map->m_pblk + ofs)) || in f2fs_map_blocks()
1300 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) || in f2fs_map_blocks()
1303 map->m_len++; in f2fs_map_blocks()
1321 map->m_len += dn.ofs_in_node - ofs_in_node; in f2fs_map_blocks()
1323 err = -ENOSPC; in f2fs_map_blocks()
1335 if (map->m_flags & F2FS_MAP_MAPPED) { in f2fs_map_blocks()
1336 unsigned int ofs = start_pgofs - map->m_lblk; in f2fs_map_blocks()
1339 start_pgofs, map->m_pblk + ofs, in f2fs_map_blocks()
1340 map->m_len - ofs); in f2fs_map_blocks()
1346 if (map->m_may_create) { in f2fs_map_blocks()
1355 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) in f2fs_map_blocks()
1357 map->m_pblk, map->m_len); in f2fs_map_blocks()
1360 if (map->m_flags & F2FS_MAP_MAPPED) { in f2fs_map_blocks()
1361 unsigned int ofs = start_pgofs - map->m_lblk; in f2fs_map_blocks()
1364 start_pgofs, map->m_pblk + ofs, in f2fs_map_blocks()
1365 map->m_len - ofs); in f2fs_map_blocks()
1367 if (map->m_next_extent) in f2fs_map_blocks()
1368 *map->m_next_extent = pgofs + 1; in f2fs_map_blocks()
1372 if (map->m_may_create) { in f2fs_map_blocks()
1398 map.m_len = last_lblk - map.m_lblk; in f2fs_overwrite_io()
1415 map.m_len = bh->b_size >> inode->i_blkbits; in __get_data_block()
1423 map_bh(bh, inode->i_sb, map.m_pblk); in __get_data_block()
1424 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; in __get_data_block()
1425 bh->b_size = (u64)map.m_len << inode->i_blkbits; in __get_data_block()
1444 f2fs_rw_hint_to_seg_type(inode->i_write_hint), in get_data_block_dio_write()
1453 f2fs_rw_hint_to_seg_type(inode->i_write_hint), in get_data_block_dio()
1461 if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks)) in get_data_block_bmap()
1462 return -EFBIG; in get_data_block_bmap()
1471 return (offset >> inode->i_blkbits); in logical_to_blk()
1476 return (blk << inode->i_blkbits); in blk_to_logical()
1487 nid_t xnid = F2FS_I(inode)->i_xattr_nid; in f2fs_xattr_fiemap()
1494 inode->i_ino, false); in f2fs_xattr_fiemap()
1496 return -ENOMEM; in f2fs_xattr_fiemap()
1498 err = f2fs_get_node_info(sbi, inode->i_ino, &ni); in f2fs_xattr_fiemap()
1506 sizeof(__le32) * (DEF_ADDRS_PER_INODE - in f2fs_xattr_fiemap()
1527 return -ENOMEM; in f2fs_xattr_fiemap()
1536 len = inode->i_sb->s_blocksize; in f2fs_xattr_fiemap()
1559 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { in f2fs_fiemap()
1571 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { in f2fs_fiemap()
1578 if (ret != -EAGAIN) in f2fs_fiemap()
1586 last_blk = logical_to_blk(inode, start + len - 1); in f2fs_fiemap()
1602 F2FS_I_SB(inode)->max_file_blocks)) in f2fs_fiemap()
1631 ret = -EINTR; in f2fs_fiemap()
1646 return inode->i_sb->s_maxbytes; in f2fs_readpage_limit()
1659 const unsigned blkbits = inode->i_blkbits; in f2fs_read_single_page()
1669 last_block_in_file = (f2fs_readpage_limit(inode) + blocksize - 1) >> in f2fs_read_single_page()
1680 if ((map->m_flags & F2FS_MAP_MAPPED) && in f2fs_read_single_page()
1681 block_in_file > map->m_lblk && in f2fs_read_single_page()
1682 block_in_file < (map->m_lblk + map->m_len)) in f2fs_read_single_page()
1689 map->m_lblk = block_in_file; in f2fs_read_single_page()
1690 map->m_len = last_block - block_in_file; in f2fs_read_single_page()
1696 if ((map->m_flags & F2FS_MAP_MAPPED)) { in f2fs_read_single_page()
1697 block_nr = map->m_pblk + block_in_file - map->m_lblk; in f2fs_read_single_page()
1708 ret = -EFSCORRUPTED; in f2fs_read_single_page()
1714 if (f2fs_need_verity(inode, page->index) && in f2fs_read_single_page()
1716 ret = -EIO; in f2fs_read_single_page()
1732 __submit_bio(F2FS_I_SB(inode), bio, DATA); in f2fs_read_single_page()
1737 is_readahead ? REQ_RAHEAD : 0, page->index); in f2fs_read_single_page()
1747 * its completion to see the correct decrypted data. in f2fs_read_single_page()
1760 __submit_bio(F2FS_I_SB(inode), bio, DATA); in f2fs_read_single_page()
1773 * Note that the aops->readpages() function is ONLY used for read-ahead. If
1774 * this function ever deviates from doing just read-ahead, it should either
1775 * use ->readpage() or do the necessary surgery to decouple ->readpages()
1776 * from read-ahead.
1778 static int f2fs_mpage_readpages(struct address_space *mapping, in f2fs_mpage_readpages() argument
1784 struct inode *inode = mapping->host; in f2fs_mpage_readpages()
1797 for (; nr_pages; nr_pages--) { in f2fs_mpage_readpages()
1801 prefetchw(&page->flags); in f2fs_mpage_readpages()
1802 list_del(&page->lru); in f2fs_mpage_readpages()
1803 if (add_to_page_cache_lru(page, mapping, in f2fs_mpage_readpages()
1805 readahead_gfp_mask(mapping))) in f2fs_mpage_readpages()
1822 __submit_bio(F2FS_I_SB(inode), bio, DATA); in f2fs_mpage_readpages()
1828 struct inode *inode = page_file_mapping(page)->host; in f2fs_read_data_page()
1829 int ret = -EAGAIN; in f2fs_read_data_page()
1831 trace_f2fs_readpage(page, DATA); in f2fs_read_data_page()
1833 /* If the file has inline data, try to read it directly */ in f2fs_read_data_page()
1836 if (ret == -EAGAIN) in f2fs_read_data_page()
1843 struct address_space *mapping, in f2fs_read_data_pages() argument
1846 struct inode *inode = mapping->host; in f2fs_read_data_pages()
1851 /* If the file has inline data, skip readpages */ in f2fs_read_data_pages()
1855 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true); in f2fs_read_data_pages()
1860 struct inode *inode = fio->page->mapping->host; in encrypt_one_page()
1868 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); in encrypt_one_page()
1871 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(fio->page, in encrypt_one_page()
1874 if (IS_ERR(fio->encrypted_page)) { in encrypt_one_page()
1876 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) { in encrypt_one_page()
1877 f2fs_flush_merged_writes(fio->sbi); in encrypt_one_page()
1882 return PTR_ERR(fio->encrypted_page); in encrypt_one_page()
1885 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr); in encrypt_one_page()
1889 page_address(fio->encrypted_page), PAGE_SIZE); in encrypt_one_page()
1899 unsigned int policy = SM_I(sbi)->ipu_policy; in check_inplace_update_policy()
1906 utilization(sbi) > SM_I(sbi)->min_ipu_util) in check_inplace_update_policy()
1909 utilization(sbi) > SM_I(sbi)->min_ipu_util) in check_inplace_update_policy()
1916 fio && fio->op == REQ_OP_WRITE && in check_inplace_update_policy()
1917 !(fio->op_flags & REQ_SYNC) && in check_inplace_update_policy()
1927 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr))) in check_inplace_update_policy()
1951 if (S_ISDIR(inode->i_mode)) in f2fs_should_update_outplace()
1958 if (is_cold_data(fio->page)) in f2fs_should_update_outplace()
1960 if (IS_ATOMIC_WRITTEN_PAGE(fio->page)) in f2fs_should_update_outplace()
1963 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr))) in f2fs_should_update_outplace()
1971 struct inode *inode = fio->page->mapping->host; in need_inplace_update()
1981 struct page *page = fio->page; in f2fs_do_write_data_page()
1982 struct inode *inode = page->mapping->host; in f2fs_do_write_data_page()
1991 f2fs_lookup_extent_cache(inode, page->index, &ei)) { in f2fs_do_write_data_page()
1992 fio->old_blkaddr = ei.blk + page->index - ei.fofs; in f2fs_do_write_data_page()
1994 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, in f2fs_do_write_data_page()
1996 return -EFSCORRUPTED; in f2fs_do_write_data_page()
1999 fio->need_lock = LOCK_DONE; in f2fs_do_write_data_page()
2003 /* Deadlock due to between page->lock and f2fs_lock_op */ in f2fs_do_write_data_page()
2004 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi)) in f2fs_do_write_data_page()
2005 return -EAGAIN; in f2fs_do_write_data_page()
2007 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE); in f2fs_do_write_data_page()
2011 fio->old_blkaddr = dn.data_blkaddr; in f2fs_do_write_data_page()
2014 if (fio->old_blkaddr == NULL_ADDR) { in f2fs_do_write_data_page()
2020 if (__is_valid_data_blkaddr(fio->old_blkaddr) && in f2fs_do_write_data_page()
2021 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, in f2fs_do_write_data_page()
2023 err = -EFSCORRUPTED; in f2fs_do_write_data_page()
2028 * it had better in-place writes for updated data. in f2fs_do_write_data_page()
2031 (__is_valid_data_blkaddr(fio->old_blkaddr) && in f2fs_do_write_data_page()
2040 if (fio->need_lock == LOCK_REQ) in f2fs_do_write_data_page()
2041 f2fs_unlock_op(fio->sbi); in f2fs_do_write_data_page()
2045 fscrypt_finalize_bounce_page(&fio->encrypted_page); in f2fs_do_write_data_page()
2051 trace_f2fs_do_write_data_page(fio->page, IPU); in f2fs_do_write_data_page()
2055 if (fio->need_lock == LOCK_RETRY) { in f2fs_do_write_data_page()
2056 if (!f2fs_trylock_op(fio->sbi)) { in f2fs_do_write_data_page()
2057 err = -EAGAIN; in f2fs_do_write_data_page()
2060 fio->need_lock = LOCK_REQ; in f2fs_do_write_data_page()
2063 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni); in f2fs_do_write_data_page()
2067 fio->version = ni.version; in f2fs_do_write_data_page()
2080 if (page->index == 0) in f2fs_do_write_data_page()
2085 if (fio->need_lock == LOCK_REQ) in f2fs_do_write_data_page()
2086 f2fs_unlock_op(fio->sbi); in f2fs_do_write_data_page()
2096 struct inode *inode = page->mapping->host; in __write_data_page()
2101 loff_t psize = (page->index + 1) << PAGE_SHIFT; in __write_data_page()
2107 .ino = inode->i_ino, in __write_data_page()
2108 .type = DATA, in __write_data_page()
2122 trace_f2fs_writepage(page, DATA); in __write_data_page()
2124 /* we should bypass data pages to proceed the kworkder jobs */ in __write_data_page()
2126 mapping_set_error(page->mapping, -EIO); in __write_data_page()
2131 if (S_ISDIR(inode->i_mode)) in __write_data_page()
2139 if (page->index < end_index || f2fs_verity_in_progress(inode)) in __write_data_page()
2143 * If the offset is out-of-range of file size, in __write_data_page()
2146 offset = i_size & (PAGE_SIZE - 1); in __write_data_page()
2147 if ((page->index >= end_index + 1) || !offset) in __write_data_page()
2155 if (f2fs_is_volatile_file(inode) && (!page->index || in __write_data_page()
2156 (!wbc->for_reclaim && in __write_data_page()
2161 if (S_ISDIR(inode->i_mode)) { in __write_data_page()
2167 if (!wbc->for_reclaim) in __write_data_page()
2174 err = -EAGAIN; in __write_data_page()
2181 if (err == -EAGAIN) { in __write_data_page()
2183 if (err == -EAGAIN) { in __write_data_page()
2192 down_write(&F2FS_I(inode)->i_sem); in __write_data_page()
2193 if (F2FS_I(inode)->last_disk_size < psize) in __write_data_page()
2194 F2FS_I(inode)->last_disk_size = psize; in __write_data_page()
2195 up_write(&F2FS_I(inode)->i_sem); in __write_data_page()
2199 if (err && err != -ENOENT) in __write_data_page()
2209 if (wbc->for_reclaim) { in __write_data_page()
2210 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA); in __write_data_page()
2217 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) && in __write_data_page()
2218 !F2FS_I(inode)->cp_task) { in __write_data_page()
2225 f2fs_submit_merged_write(sbi, DATA); in __write_data_page()
2238 * -> mapping_set_error() -> set_bit(AS_EIO, ...). in __write_data_page()
2242 if (!err || wbc->for_reclaim) in __write_data_page()
2255 * This function was copied from write_cche_pages from mm/page-writeback.c.
2256 * The major change is making write step of cold data page separately from
2257 * warm/hot data page.
2259 static int f2fs_write_cache_pages(struct address_space *mapping, in f2fs_write_cache_pages() argument
2266 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); in f2fs_write_cache_pages()
2281 if (get_dirty_pages(mapping->host) <= in f2fs_write_cache_pages()
2282 SM_I(F2FS_M_SB(mapping))->min_hot_blocks) in f2fs_write_cache_pages()
2283 set_inode_flag(mapping->host, FI_HOT_DATA); in f2fs_write_cache_pages()
2285 clear_inode_flag(mapping->host, FI_HOT_DATA); in f2fs_write_cache_pages()
2287 if (wbc->range_cyclic) { in f2fs_write_cache_pages()
2288 writeback_index = mapping->writeback_index; /* prev offset */ in f2fs_write_cache_pages()
2294 end = -1; in f2fs_write_cache_pages()
2296 index = wbc->range_start >> PAGE_SHIFT; in f2fs_write_cache_pages()
2297 end = wbc->range_end >> PAGE_SHIFT; in f2fs_write_cache_pages()
2298 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) in f2fs_write_cache_pages()
2302 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in f2fs_write_cache_pages()
2307 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in f2fs_write_cache_pages()
2308 tag_pages_for_writeback(mapping, index, end); in f2fs_write_cache_pages()
2313 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in f2fs_write_cache_pages()
2323 if (atomic_read(&sbi->wb_sync_req[DATA]) && in f2fs_write_cache_pages()
2324 wbc->sync_mode == WB_SYNC_NONE) { in f2fs_write_cache_pages()
2329 done_index = page->index; in f2fs_write_cache_pages()
2333 if (unlikely(page->mapping != mapping)) { in f2fs_write_cache_pages()
2345 if (wbc->sync_mode != WB_SYNC_NONE) { in f2fs_write_cache_pages()
2347 DATA, true, true); in f2fs_write_cache_pages()
2368 } else if (ret == -EAGAIN) { in f2fs_write_cache_pages()
2370 if (wbc->sync_mode == WB_SYNC_ALL) { in f2fs_write_cache_pages()
2378 done_index = page->index + 1; in f2fs_write_cache_pages()
2385 if (--wbc->nr_to_write <= 0 && in f2fs_write_cache_pages()
2386 wbc->sync_mode == WB_SYNC_NONE) { in f2fs_write_cache_pages()
2398 end = writeback_index - 1; in f2fs_write_cache_pages()
2401 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) in f2fs_write_cache_pages()
2402 mapping->writeback_index = done_index; in f2fs_write_cache_pages()
2405 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host, in f2fs_write_cache_pages()
2406 NULL, 0, DATA); in f2fs_write_cache_pages()
2409 __submit_bio(sbi, bio, DATA); in f2fs_write_cache_pages()
2417 if (!S_ISREG(inode->i_mode)) in __should_serialize_io()
2421 /* to avoid deadlock in path of data flush */ in __should_serialize_io()
2422 if (F2FS_I(inode)->cp_task) in __should_serialize_io()
2424 if (wbc->sync_mode != WB_SYNC_ALL) in __should_serialize_io()
2426 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks) in __should_serialize_io()
2431 static int __f2fs_write_data_pages(struct address_space *mapping, in __f2fs_write_data_pages() argument
2435 struct inode *inode = mapping->host; in __f2fs_write_data_pages()
2442 if (!mapping->a_ops->writepage) in __f2fs_write_data_pages()
2446 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE) in __f2fs_write_data_pages()
2453 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) && in __f2fs_write_data_pages()
2454 wbc->sync_mode == WB_SYNC_NONE && in __f2fs_write_data_pages()
2455 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && in __f2fs_write_data_pages()
2463 trace_f2fs_writepages(mapping->host, wbc, DATA); in __f2fs_write_data_pages()
2466 if (wbc->sync_mode == WB_SYNC_ALL) in __f2fs_write_data_pages()
2467 atomic_inc(&sbi->wb_sync_req[DATA]); in __f2fs_write_data_pages()
2468 else if (atomic_read(&sbi->wb_sync_req[DATA])) in __f2fs_write_data_pages()
2472 mutex_lock(&sbi->writepages); in __f2fs_write_data_pages()
2477 ret = f2fs_write_cache_pages(mapping, wbc, io_type); in __f2fs_write_data_pages()
2481 mutex_unlock(&sbi->writepages); in __f2fs_write_data_pages()
2483 if (wbc->sync_mode == WB_SYNC_ALL) in __f2fs_write_data_pages()
2484 atomic_dec(&sbi->wb_sync_req[DATA]); in __f2fs_write_data_pages()
2486 * if some pages were truncated, we cannot guarantee its mapping->host in __f2fs_write_data_pages()
2494 wbc->pages_skipped += get_dirty_pages(inode); in __f2fs_write_data_pages()
2495 trace_f2fs_writepages(mapping->host, wbc, DATA); in __f2fs_write_data_pages()
2499 static int f2fs_write_data_pages(struct address_space *mapping, in f2fs_write_data_pages() argument
2502 struct inode *inode = mapping->host; in f2fs_write_data_pages()
2504 return __f2fs_write_data_pages(mapping, wbc, in f2fs_write_data_pages()
2505 F2FS_I(inode)->cp_task == current ? in f2fs_write_data_pages()
2509 static void f2fs_write_failed(struct address_space *mapping, loff_t to) in f2fs_write_failed() argument
2511 struct inode *inode = mapping->host; in f2fs_write_failed()
2514 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */ in f2fs_write_failed()
2516 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_write_failed()
2517 down_write(&F2FS_I(inode)->i_mmap_sem); in f2fs_write_failed()
2523 up_write(&F2FS_I(inode)->i_mmap_sem); in f2fs_write_failed()
2524 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_write_failed()
2532 struct inode *inode = page->mapping->host; in prepare_write_begin()
2533 pgoff_t index = page->index; in prepare_write_begin()
2563 ipage = f2fs_get_node_page(sbi, inode->i_ino); in prepare_write_begin()
2575 if (inode->i_nlink) in prepare_write_begin()
2588 dn.data_blkaddr = ei.blk + index - ei.fofs; in prepare_write_begin()
2614 static int f2fs_write_begin(struct file *file, struct address_space *mapping, in f2fs_write_begin() argument
2618 struct inode *inode = mapping->host; in f2fs_write_begin()
2629 err = -ENOSPC; in f2fs_write_begin()
2636 err = -ENOMEM; in f2fs_write_begin()
2644 * lock_page(page #0) -> lock_page(inode_page) in f2fs_write_begin()
2656 page = f2fs_pagecache_get_page(mapping, index, in f2fs_write_begin()
2659 err = -ENOMEM; in f2fs_write_begin()
2675 if (page->mapping != mapping) { in f2fs_write_begin()
2682 f2fs_wait_on_page_writeback(page, DATA, false, true); in f2fs_write_begin()
2687 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) && in f2fs_write_begin()
2699 err = -EFSCORRUPTED; in f2fs_write_begin()
2707 if (unlikely(page->mapping != mapping)) { in f2fs_write_begin()
2712 err = -EIO; in f2fs_write_begin()
2720 f2fs_write_failed(mapping, pos + len); in f2fs_write_begin()
2727 struct address_space *mapping, in f2fs_write_end() argument
2731 struct inode *inode = page->mapping->host; in f2fs_write_end()
2738 * let generic_perform_write() try to copy data again through copied=0. in f2fs_write_end()
2763 unsigned i_blkbits = READ_ONCE(inode->i_blkbits); in check_direct_IO()
2765 unsigned blocksize_mask = (1 << blkbits) - 1; in check_direct_IO()
2767 struct block_device *bdev = inode->i_sb->s_bdev; in check_direct_IO()
2772 blocksize_mask = (1 << blkbits) - 1; in check_direct_IO()
2774 return -EINVAL; in check_direct_IO()
2782 struct f2fs_private_dio *dio = bio->bi_private; in f2fs_dio_end_io()
2784 dec_page_count(F2FS_I_SB(dio->inode), in f2fs_dio_end_io()
2785 dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ); in f2fs_dio_end_io()
2787 bio->bi_private = dio->orig_private; in f2fs_dio_end_io()
2788 bio->bi_end_io = dio->orig_end_io; in f2fs_dio_end_io()
2806 dio->inode = inode; in f2fs_dio_submit_bio()
2807 dio->orig_end_io = bio->bi_end_io; in f2fs_dio_submit_bio()
2808 dio->orig_private = bio->bi_private; in f2fs_dio_submit_bio()
2809 dio->write = write; in f2fs_dio_submit_bio()
2811 bio->bi_end_io = f2fs_dio_end_io; in f2fs_dio_submit_bio()
2812 bio->bi_private = dio; in f2fs_dio_submit_bio()
2820 bio->bi_status = BLK_STS_IOERR; in f2fs_dio_submit_bio()
2826 struct address_space *mapping = iocb->ki_filp->f_mapping; in f2fs_direct_IO() local
2827 struct inode *inode = mapping->host; in f2fs_direct_IO()
2831 loff_t offset = iocb->ki_pos; in f2fs_direct_IO()
2834 enum rw_hint hint = iocb->ki_hint; in f2fs_direct_IO()
2850 iocb->ki_hint = WRITE_LIFE_NOT_SET; in f2fs_direct_IO()
2852 if (iocb->ki_flags & IOCB_NOWAIT) { in f2fs_direct_IO()
2853 if (!down_read_trylock(&fi->i_gc_rwsem[rw])) { in f2fs_direct_IO()
2854 iocb->ki_hint = hint; in f2fs_direct_IO()
2855 err = -EAGAIN; in f2fs_direct_IO()
2858 if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) { in f2fs_direct_IO()
2859 up_read(&fi->i_gc_rwsem[rw]); in f2fs_direct_IO()
2860 iocb->ki_hint = hint; in f2fs_direct_IO()
2861 err = -EAGAIN; in f2fs_direct_IO()
2865 down_read(&fi->i_gc_rwsem[rw]); in f2fs_direct_IO()
2867 down_read(&fi->i_gc_rwsem[READ]); in f2fs_direct_IO()
2870 err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, in f2fs_direct_IO()
2876 up_read(&fi->i_gc_rwsem[READ]); in f2fs_direct_IO()
2878 up_read(&fi->i_gc_rwsem[rw]); in f2fs_direct_IO()
2882 iocb->ki_hint = hint; in f2fs_direct_IO()
2889 f2fs_write_failed(mapping, offset + count); in f2fs_direct_IO()
2902 struct inode *inode = page->mapping->host; in f2fs_invalidate_page()
2905 if (inode->i_ino >= F2FS_ROOT_INO(sbi) && in f2fs_invalidate_page()
2910 if (inode->i_ino == F2FS_META_INO(sbi)) { in f2fs_invalidate_page()
2912 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) { in f2fs_invalidate_page()
2945 struct inode *inode = page_file_mapping(page)->host; in f2fs_set_data_page_dirty()
2947 trace_f2fs_set_page_dirty(page, DATA); in f2fs_set_data_page_dirty()
2974 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) in f2fs_bmap() argument
2976 struct inode *inode = mapping->host; in f2fs_bmap()
2982 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) in f2fs_bmap()
2983 filemap_write_and_wait(mapping); in f2fs_bmap()
2985 return generic_block_bmap(mapping, block, get_data_block_bmap); in f2fs_bmap()
2991 int f2fs_migrate_page(struct address_space *mapping, in f2fs_migrate_page() argument
2995 struct f2fs_inode_info *fi = F2FS_I(mapping->host); in f2fs_migrate_page()
3003 return -EBUSY; in f2fs_migrate_page()
3004 if (!mutex_trylock(&fi->inmem_lock)) in f2fs_migrate_page()
3005 return -EAGAIN; in f2fs_migrate_page()
3010 rc = migrate_page_move_mapping(mapping, newpage, in f2fs_migrate_page()
3014 mutex_unlock(&fi->inmem_lock); in f2fs_migrate_page()
3020 list_for_each_entry(cur, &fi->inmem_pages, list) in f2fs_migrate_page()
3021 if (cur->page == page) { in f2fs_migrate_page()
3022 cur->page = newpage; in f2fs_migrate_page()
3025 mutex_unlock(&fi->inmem_lock); in f2fs_migrate_page()
3048 struct address_space *mapping = swap_file->f_mapping; in check_swap_activate() local
3049 struct inode *inode = mapping->host; in check_swap_activate()
3055 sector_t lowest_block = -1; in check_swap_activate()
3058 blkbits = inode->i_blkbits; in check_swap_activate()
3079 * It must be PAGE_SIZE aligned on-disk in check_swap_activate()
3081 if (first_block & (blocks_per_page - 1)) { in check_swap_activate()
3100 first_block >>= (PAGE_SHIFT - blkbits); in check_swap_activate()
3117 return -EINVAL; in check_swap_activate()
3126 if (!S_ISREG(inode->i_mode)) in f2fs_swap_activate()
3127 return -EINVAL; in f2fs_swap_activate()
3129 if (f2fs_readonly(F2FS_I_SB(inode)->sb)) in f2fs_swap_activate()
3130 return -EROFS; in f2fs_swap_activate()
3136 ret = check_swap_activate(file, sis->max); in f2fs_swap_activate()
3156 return -EOPNOTSUPP; in f2fs_swap_activate()
3185 struct address_space *mapping = page_mapping(page); in f2fs_clear_page_cache_dirty_tag() local
3188 xa_lock_irqsave(&mapping->i_pages, flags); in f2fs_clear_page_cache_dirty_tag()
3189 __xa_clear_mark(&mapping->i_pages, page_index(page), in f2fs_clear_page_cache_dirty_tag()
3191 xa_unlock_irqrestore(&mapping->i_pages, flags); in f2fs_clear_page_cache_dirty_tag()
3211 return -ENOMEM; in f2fs_init_post_read_processing()