Lines Matching +full:data +full:- +full:mapping
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
34 * Fast and loose check if this write could update the on-disk inode size.
38 return ioend->io_offset + ioend->io_size > in xfs_ioend_is_append()
39 XFS_I(ioend->io_inode)->i_disk_size; in xfs_ioend_is_append()
43 * Update on-disk file size now that data has been written to disk.
51 struct xfs_mount *mp = ip->i_mount; in xfs_setfilesize()
56 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); in xfs_setfilesize()
70 ip->i_disk_size = isize; in xfs_setfilesize()
84 struct xfs_inode *ip = XFS_I(ioend->io_inode); in xfs_end_ioend()
85 struct xfs_mount *mp = ip->i_mount; in xfs_end_ioend()
86 xfs_off_t offset = ioend->io_offset; in xfs_end_ioend()
87 size_t size = ioend->io_size; in xfs_end_ioend()
94 * task-wide nofs context for the following operations. in xfs_end_ioend()
99 * Just clean up the in-memory structures if the fs has been shut down. in xfs_end_ioend()
102 error = -EIO; in xfs_end_ioend()
107 * Clean up all COW blocks and underlying data fork delalloc blocks on in xfs_end_ioend()
113 error = blk_status_to_errno(ioend->io_bio->bi_status); in xfs_end_ioend()
115 if (ioend->io_flags & IOMAP_F_SHARED) { in xfs_end_ioend()
127 if (ioend->io_flags & IOMAP_F_SHARED) in xfs_end_ioend()
129 else if (ioend->io_type == IOMAP_UNWRITTEN) in xfs_end_ioend()
133 error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); in xfs_end_ioend()
163 spin_lock_irqsave(&ip->i_ioend_lock, flags); in xfs_end_io()
164 list_replace_init(&ip->i_ioend_list, &tmp); in xfs_end_io()
165 spin_unlock_irqrestore(&ip->i_ioend_lock, flags); in xfs_end_io()
170 list_del_init(&ioend->io_list); in xfs_end_io()
181 struct iomap_ioend *ioend = bio->bi_private; in xfs_end_bio()
182 struct xfs_inode *ip = XFS_I(ioend->io_inode); in xfs_end_bio()
185 spin_lock_irqsave(&ip->i_ioend_lock, flags); in xfs_end_bio()
186 if (list_empty(&ip->i_ioend_list)) in xfs_end_bio()
187 WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue, in xfs_end_bio()
188 &ip->i_ioend_work)); in xfs_end_bio()
189 list_add_tail(&ioend->io_list, &ip->i_ioend_list); in xfs_end_bio()
190 spin_unlock_irqrestore(&ip->i_ioend_lock, flags); in xfs_end_bio()
194 * Fast revalidation of the cached writeback mapping. Return true if the current
195 * mapping is valid, false otherwise.
203 if (offset < wpc->iomap.offset || in xfs_imap_valid()
204 offset >= wpc->iomap.offset + wpc->iomap.length) in xfs_imap_valid()
207 * If this is a COW mapping, it is sufficient to check that the mapping in xfs_imap_valid()
209 * can revalidate a COW mapping without updating the data seqno. in xfs_imap_valid()
211 if (wpc->iomap.flags & IOMAP_F_SHARED) in xfs_imap_valid()
215 * This is not a COW mapping. Check the sequence number of the data fork in xfs_imap_valid()
221 if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq)) in xfs_imap_valid()
224 XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq)) in xfs_imap_valid()
231 * extent that maps offset_fsb in wpc->iomap.
234 * backing offset_fsb, although it could have moved from the COW to the data
248 seq = &XFS_WPC(wpc)->cow_seq; in xfs_convert_blocks()
250 seq = &XFS_WPC(wpc)->data_seq; in xfs_convert_blocks()
254 * and put the result into wpc->iomap. Allocate in a loop because it in xfs_convert_blocks()
260 &wpc->iomap, seq); in xfs_convert_blocks()
263 } while (wpc->iomap.offset + wpc->iomap.length <= offset); in xfs_convert_blocks()
275 struct xfs_mount *mp = ip->i_mount; in xfs_map_blocks()
287 return -EIO; in xfs_map_blocks()
290 * COW fork blocks can overlap data fork blocks even if the blocks in xfs_map_blocks()
292 * check for overlap on reflink inodes unless the mapping is already a in xfs_map_blocks()
317 ASSERT(!xfs_need_iread_extents(&ip->i_df)); in xfs_map_blocks()
321 * it directly instead of looking up anything in the data fork. in xfs_map_blocks()
324 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap)) in xfs_map_blocks()
327 XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq); in xfs_map_blocks()
336 * ->cow_seq. If the data mapping is still valid, we're done. in xfs_map_blocks()
348 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) in xfs_map_blocks()
350 XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq); in xfs_map_blocks()
355 imap.br_blockcount = imap.br_startoff - offset_fsb; in xfs_map_blocks()
364 * subsequent blocks in the mapping; however, the requirement to treat in xfs_map_blocks()
369 imap.br_blockcount = cow_fsb - imap.br_startoff; in xfs_map_blocks()
376 xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0); in xfs_map_blocks()
384 * raced with a COW to data fork conversion or truncate. in xfs_map_blocks()
385 * Restart the lookup to catch the extent in the data fork for in xfs_map_blocks()
389 if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++) in xfs_map_blocks()
391 ASSERT(error != -EAGAIN); in xfs_map_blocks()
398 * boundary again to force a re-lookup. in xfs_map_blocks()
403 if (cow_offset < wpc->iomap.offset + wpc->iomap.length) in xfs_map_blocks()
404 wpc->iomap.length = cow_offset - wpc->iomap.offset; in xfs_map_blocks()
407 ASSERT(wpc->iomap.offset <= offset); in xfs_map_blocks()
408 ASSERT(wpc->iomap.offset + wpc->iomap.length > offset); in xfs_map_blocks()
423 * task-wide nofs context for the following operations. in xfs_prepare_ioend()
428 if (!status && (ioend->io_flags & IOMAP_F_SHARED)) { in xfs_prepare_ioend()
429 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode), in xfs_prepare_ioend()
430 ioend->io_offset, ioend->io_size); in xfs_prepare_ioend()
436 if (xfs_ioend_is_append(ioend) || ioend->io_type == IOMAP_UNWRITTEN || in xfs_prepare_ioend()
437 (ioend->io_flags & IOMAP_F_SHARED)) in xfs_prepare_ioend()
438 ioend->io_bio->bi_end_io = xfs_end_bio; in xfs_prepare_ioend()
444 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
448 * they are delalloc, we can do this without needing a transaction. Indeed - if
458 struct inode *inode = folio->mapping->host; in xfs_discard_folio()
460 struct xfs_mount *mp = ip->i_mount; in xfs_discard_folio()
471 folio, ip->i_ino, pos); in xfs_discard_folio()
474 i_blocks_per_folio(inode, folio) - pageoff_fsb); in xfs_discard_folio()
476 xfs_alert(mp, "page discard unable to remove delalloc mapping."); in xfs_discard_folio()
487 struct address_space *mapping, in xfs_vm_writepages() argument
493 * Writing back data in a transaction context can result in recursive in xfs_vm_writepages()
496 if (WARN_ON_ONCE(current->journal_info)) in xfs_vm_writepages()
499 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); in xfs_vm_writepages()
500 return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops); in xfs_vm_writepages()
505 struct address_space *mapping, in xfs_dax_writepages() argument
508 struct xfs_inode *ip = XFS_I(mapping->host); in xfs_dax_writepages()
511 return dax_writeback_mapping_range(mapping, in xfs_dax_writepages()
512 xfs_inode_buftarg(ip)->bt_daxdev, wbc); in xfs_dax_writepages()
517 struct address_space *mapping, in xfs_vm_bmap() argument
520 struct xfs_inode *ip = XFS_I(mapping->host); in xfs_vm_bmap()
525 * The swap code (ab-)uses ->bmap to get a block mapping and then in xfs_vm_bmap()
535 return iomap_bmap(mapping, block, &xfs_read_iomap_ops); in xfs_vm_bmap()
559 sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev; in xfs_iomap_swapfile_activate()