Lines Matching +full:sub +full:- +full:sampled

1 // SPDX-License-Identifier: GPL-2.0-only
3 * fs/direct-io.c
16 * added support for non-aligned IO.
59 * is determined on a per-invocation basis. When talking to the filesystem
61 * down by dio->blkfactor. Similarly, fs-blocksize quantities are converted
76 finer. blkfactor=2 means 1/4-block
78 unsigned start_zero_done; /* flag: sub-blocksize zeroing has
163 return sdio->tail - sdio->head; in dio_pages_present()
173 ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, in dio_refill_pages()
174 &sdio->from); in dio_refill_pages()
176 if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) { in dio_refill_pages()
183 if (dio->page_errors == 0) in dio_refill_pages()
184 dio->page_errors = ret; in dio_refill_pages()
186 dio->pages[0] = page; in dio_refill_pages()
187 sdio->head = 0; in dio_refill_pages()
188 sdio->tail = 1; in dio_refill_pages()
189 sdio->from = 0; in dio_refill_pages()
190 sdio->to = PAGE_SIZE; in dio_refill_pages()
195 iov_iter_advance(sdio->iter, ret); in dio_refill_pages()
196 ret += sdio->from; in dio_refill_pages()
197 sdio->head = 0; in dio_refill_pages()
198 sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE; in dio_refill_pages()
199 sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1; in dio_refill_pages()
222 return dio->pages[sdio->head]; in dio_get_page()
226 * dio_complete() - called when all DIO BIO I/O has been completed
238 loff_t offset = dio->iocb->ki_pos; in dio_complete()
245 * In that case -EIOCBQUEUED is in fact not an error we want in dio_complete()
248 if (ret == -EIOCBQUEUED) in dio_complete()
251 if (dio->result) { in dio_complete()
252 transferred = dio->result; in dio_complete()
255 if ((dio->op == REQ_OP_READ) && in dio_complete()
256 ((offset + transferred) > dio->i_size)) in dio_complete()
257 transferred = dio->i_size - offset; in dio_complete()
259 if (unlikely(ret == -EFAULT) && transferred) in dio_complete()
264 ret = dio->page_errors; in dio_complete()
266 ret = dio->io_error; in dio_complete()
270 if (dio->end_io) { in dio_complete()
272 err = dio->end_io(dio->iocb, offset, ret, dio->private); in dio_complete()
279 * non-direct readahead, or faulted in by get_user_pages() if the source in dio_complete()
284 * And this page cache invalidation has to be after dio->end_io(), as in dio_complete()
290 ret > 0 && dio->op == REQ_OP_WRITE && in dio_complete()
291 dio->inode->i_mapping->nrpages) { in dio_complete()
292 err = invalidate_inode_pages2_range(dio->inode->i_mapping, in dio_complete()
294 (offset + ret - 1) >> PAGE_SHIFT); in dio_complete()
296 dio_warn_stale_pagecache(dio->iocb->ki_filp); in dio_complete()
299 inode_dio_end(dio->inode); in dio_complete()
307 dio->iocb->ki_pos += transferred; in dio_complete()
309 if (ret > 0 && dio->op == REQ_OP_WRITE) in dio_complete()
310 ret = generic_write_sync(dio->iocb, ret); in dio_complete()
311 dio->iocb->ki_complete(dio->iocb, ret, 0); in dio_complete()
332 struct dio *dio = bio->bi_private; in dio_bio_end_aio()
340 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_end_aio()
341 remaining = --dio->refcount; in dio_bio_end_aio()
342 if (remaining == 1 && dio->waiter) in dio_bio_end_aio()
343 wake_up_process(dio->waiter); in dio_bio_end_aio()
344 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_end_aio()
355 if (dio->result) in dio_bio_end_aio()
356 defer_completion = dio->defer_completion || in dio_bio_end_aio()
357 (dio->op == REQ_OP_WRITE && in dio_bio_end_aio()
358 dio->inode->i_mapping->nrpages); in dio_bio_end_aio()
360 INIT_WORK(&dio->complete_work, dio_aio_complete_work); in dio_bio_end_aio()
361 queue_work(dio->inode->i_sb->s_dio_done_wq, in dio_bio_end_aio()
362 &dio->complete_work); in dio_bio_end_aio()
370 * The BIO completion handler simply queues the BIO up for the process-context
374 * implement a singly-linked list of completed BIOs, at dio->bio_list.
378 struct dio *dio = bio->bi_private; in dio_bio_end_io()
381 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_end_io()
382 bio->bi_private = dio->bio_list; in dio_bio_end_io()
383 dio->bio_list = bio; in dio_bio_end_io()
384 if (--dio->refcount == 1 && dio->waiter) in dio_bio_end_io()
385 wake_up_process(dio->waiter); in dio_bio_end_io()
386 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_end_io()
403 bio->bi_iter.bi_sector = first_sector; in dio_bio_alloc()
404 bio_set_op_attrs(bio, dio->op, dio->op_flags); in dio_bio_alloc()
405 if (dio->is_async) in dio_bio_alloc()
406 bio->bi_end_io = dio_bio_end_aio; in dio_bio_alloc()
408 bio->bi_end_io = dio_bio_end_io; in dio_bio_alloc()
410 bio->bi_write_hint = dio->iocb->ki_hint; in dio_bio_alloc()
412 sdio->bio = bio; in dio_bio_alloc()
413 sdio->logical_offset_in_bio = sdio->cur_page_fs_offset; in dio_bio_alloc()
421 * bios hold a dio reference between submit_bio and ->end_io.
425 struct bio *bio = sdio->bio; in dio_bio_submit()
428 bio->bi_private = dio; in dio_bio_submit()
432 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_submit()
433 dio->refcount++; in dio_bio_submit()
434 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_submit()
436 if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) in dio_bio_submit()
439 dio->bio_disk = bio->bi_bdev->bd_disk; in dio_bio_submit()
441 if (sdio->submit_io) { in dio_bio_submit()
442 sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); in dio_bio_submit()
443 dio->bio_cookie = BLK_QC_T_NONE; in dio_bio_submit()
445 dio->bio_cookie = submit_bio(bio); in dio_bio_submit()
447 sdio->bio = NULL; in dio_bio_submit()
448 sdio->boundary = 0; in dio_bio_submit()
449 sdio->logical_offset_in_bio = 0; in dio_bio_submit()
457 while (sdio->head < sdio->tail) in dio_cleanup()
458 put_page(dio->pages[sdio->head++]); in dio_cleanup()
464 * all bios have been issued so that dio->refcount can only decrease. This
472 spin_lock_irqsave(&dio->bio_lock, flags); in dio_await_one()
480 while (dio->refcount > 1 && dio->bio_list == NULL) { in dio_await_one()
482 dio->waiter = current; in dio_await_one()
483 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_await_one()
484 if (!(dio->iocb->ki_flags & IOCB_HIPRI) || in dio_await_one()
485 !blk_poll(dio->bio_disk->queue, dio->bio_cookie, true)) in dio_await_one()
488 spin_lock_irqsave(&dio->bio_lock, flags); in dio_await_one()
489 dio->waiter = NULL; in dio_await_one()
491 if (dio->bio_list) { in dio_await_one()
492 bio = dio->bio_list; in dio_await_one()
493 dio->bio_list = bio->bi_private; in dio_await_one()
495 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_await_one()
504 blk_status_t err = bio->bi_status; in dio_bio_complete()
505 bool should_dirty = dio->op == REQ_OP_READ && dio->should_dirty; in dio_bio_complete()
508 if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT)) in dio_bio_complete()
509 dio->io_error = -EAGAIN; in dio_bio_complete()
511 dio->io_error = -EIO; in dio_bio_complete()
514 if (dio->is_async && should_dirty) { in dio_bio_complete()
524 * Wait on and process all in-flight BIOs. This must only be called once
527 * errors are propagated through dio->io_error and should be propagated via
551 if (sdio->reap_counter++ >= 64) { in dio_bio_reap()
552 while (dio->bio_list) { in dio_bio_reap()
557 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_reap()
558 bio = dio->bio_list; in dio_bio_reap()
559 dio->bio_list = bio->bi_private; in dio_bio_reap()
560 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_reap()
565 sdio->reap_counter = 0; in dio_bio_reap()
581 sb->s_id); in sb_init_dio_done_wq()
583 return -ENOMEM; in sb_init_dio_done_wq()
587 old = cmpxchg(&sb->s_dio_done_wq, NULL, wq); in sb_init_dio_done_wq()
596 struct super_block *sb = dio->inode->i_sb; in dio_set_defer_completion()
598 if (dio->defer_completion) in dio_set_defer_completion()
600 dio->defer_completion = true; in dio_set_defer_completion()
601 if (!sb->s_dio_done_wq) in dio_set_defer_completion()
608 * of available blocks at sdio->blocks_available. These are in units of the
612 * it uses the passed inode-relative block number as the file offset, as usual.
614 * get_block() is passed the number of i_blkbits-sized blocks which direct_io
617 * If the fs has mapped a lot of blocks, it should populate bh->b_size to
619 * bh->b_blocknr.
624 * In the case of filesystem holes: the fs may return an arbitrarily-large
626 * buffer_mapped(). However the direct-io code will only process holes one
627 * block at a time - it will repeatedly call get_block() as it walks the hole.
633 sector_t fs_startblk; /* Into file, in filesystem-sized blocks */ in get_more_blocks()
634 sector_t fs_endblk; /* Into file, in filesystem-sized blocks */ in get_more_blocks()
635 unsigned long fs_count; /* Number of filesystem-sized blocks */ in get_more_blocks()
637 unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor; in get_more_blocks()
644 ret = dio->page_errors; in get_more_blocks()
646 BUG_ON(sdio->block_in_file >= sdio->final_block_in_request); in get_more_blocks()
647 fs_startblk = sdio->block_in_file >> sdio->blkfactor; in get_more_blocks()
648 fs_endblk = (sdio->final_block_in_request - 1) >> in get_more_blocks()
649 sdio->blkfactor; in get_more_blocks()
650 fs_count = fs_endblk - fs_startblk + 1; in get_more_blocks()
652 map_bh->b_state = 0; in get_more_blocks()
653 map_bh->b_size = fs_count << i_blkbits; in get_more_blocks()
666 create = dio->op == REQ_OP_WRITE; in get_more_blocks()
667 if (dio->flags & DIO_SKIP_HOLES) { in get_more_blocks()
668 i_size = i_size_read(dio->inode); in get_more_blocks()
669 if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits) in get_more_blocks()
673 ret = (*sdio->get_block)(dio->inode, fs_startblk, in get_more_blocks()
677 dio->private = map_bh->b_private; in get_more_blocks()
697 sector = start_sector << (sdio->blkbits - 9); in dio_new_bio()
698 nr_pages = bio_max_segs(sdio->pages_in_io); in dio_new_bio()
700 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); in dio_new_bio()
701 sdio->boundary = 0; in dio_new_bio()
709 * the just-added page.
711 * Return zero on success. Non-zero means the caller needs to start a new BIO.
717 ret = bio_add_page(sdio->bio, sdio->cur_page, in dio_bio_add_page()
718 sdio->cur_page_len, sdio->cur_page_offset); in dio_bio_add_page()
719 if (ret == sdio->cur_page_len) { in dio_bio_add_page()
723 if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE) in dio_bio_add_page()
724 sdio->pages_in_io--; in dio_bio_add_page()
725 get_page(sdio->cur_page); in dio_bio_add_page()
726 sdio->final_block_in_bio = sdio->cur_page_block + in dio_bio_add_page()
727 (sdio->cur_page_len >> sdio->blkbits); in dio_bio_add_page()
738 * starts on-disk at cur_page_block.
750 if (sdio->bio) { in dio_send_cur_page()
751 loff_t cur_offset = sdio->cur_page_fs_offset; in dio_send_cur_page()
752 loff_t bio_next_offset = sdio->logical_offset_in_bio + in dio_send_cur_page()
753 sdio->bio->bi_iter.bi_size; in dio_send_cur_page()
758 * Btrfs cannot handle having logically non-contiguous requests in dio_send_cur_page()
761 * Logical: [0-4095][HOLE][8192-12287] in dio_send_cur_page()
762 * Physical: [0-4095] [4096-8191] in dio_send_cur_page()
769 if (sdio->final_block_in_bio != sdio->cur_page_block || in dio_send_cur_page()
774 if (sdio->bio == NULL) { in dio_send_cur_page()
775 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); in dio_send_cur_page()
782 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); in dio_send_cur_page()
800 * The chunk of page starts on-disk at blocknr.
802 * We perform deferred IO, by recording the last-submitted page inside our
815 int boundary = sdio->boundary; /* dio_send_cur_page may clear it */ in submit_page_section()
817 if (dio->op == REQ_OP_WRITE) { in submit_page_section()
827 if (sdio->cur_page == page && in submit_page_section()
828 sdio->cur_page_offset + sdio->cur_page_len == offset && in submit_page_section()
829 sdio->cur_page_block + in submit_page_section()
830 (sdio->cur_page_len >> sdio->blkbits) == blocknr) { in submit_page_section()
831 sdio->cur_page_len += len; in submit_page_section()
838 if (sdio->cur_page) { in submit_page_section()
840 put_page(sdio->cur_page); in submit_page_section()
841 sdio->cur_page = NULL; in submit_page_section()
847 sdio->cur_page = page; in submit_page_section()
848 sdio->cur_page_offset = offset; in submit_page_section()
849 sdio->cur_page_len = len; in submit_page_section()
850 sdio->cur_page_block = blocknr; in submit_page_section()
851 sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits; in submit_page_section()
859 if (sdio->bio) in submit_page_section()
861 put_page(sdio->cur_page); in submit_page_section()
862 sdio->cur_page = NULL; in submit_page_section()
869 * the block for us, we need to fill-in the unused portion of the
870 * block with zeros. This happens only if user-buffer, fileoffset or
871 * io length is not filesystem block-size multiple.
884 sdio->start_zero_done = 1; in dio_zero_block()
885 if (!sdio->blkfactor || !buffer_new(map_bh)) in dio_zero_block()
888 dio_blocks_per_fs_block = 1 << sdio->blkfactor; in dio_zero_block()
889 this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1); in dio_zero_block()
899 this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks; in dio_zero_block()
901 this_chunk_bytes = this_chunk_blocks << sdio->blkbits; in dio_zero_block()
905 sdio->next_block_for_io, map_bh)) in dio_zero_block()
908 sdio->next_block_for_io += this_chunk_blocks; in dio_zero_block()
917 * happily perform page-sized but 512-byte aligned IOs. It is important that
920 * So what we do is to permit the ->get_block function to populate bh.b_size
923 * For best results, the blockdev should be set up with 512-byte i_blkbits and
930 const unsigned blkbits = sdio->blkbits; in do_direct_IO()
931 const unsigned i_blkbits = blkbits + sdio->blkfactor; in do_direct_IO()
934 while (sdio->block_in_file < sdio->final_block_in_request) { in do_direct_IO()
943 from = sdio->head ? 0 : sdio->from; in do_direct_IO()
944 to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE; in do_direct_IO()
945 sdio->head++; in do_direct_IO()
952 if (sdio->blocks_available == 0) { in do_direct_IO()
967 sdio->blocks_available = in do_direct_IO()
968 map_bh->b_size >> blkbits; in do_direct_IO()
969 sdio->next_block_for_io = in do_direct_IO()
970 map_bh->b_blocknr << sdio->blkfactor; in do_direct_IO()
973 map_bh->b_bdev, in do_direct_IO()
974 map_bh->b_blocknr, in do_direct_IO()
975 map_bh->b_size >> i_blkbits); in do_direct_IO()
978 if (!sdio->blkfactor) in do_direct_IO()
981 blkmask = (1 << sdio->blkfactor) - 1; in do_direct_IO()
982 dio_remainder = (sdio->block_in_file & blkmask); in do_direct_IO()
986 * starts partway into a fs-block, in do_direct_IO()
987 * dio_remainder will be non-zero. If the IO in do_direct_IO()
993 * on-disk in do_direct_IO()
996 sdio->next_block_for_io += dio_remainder; in do_direct_IO()
997 sdio->blocks_available -= dio_remainder; in do_direct_IO()
1004 /* AKPM: eargh, -ENOTBLK is a hack */ in do_direct_IO()
1005 if (dio->op == REQ_OP_WRITE) { in do_direct_IO()
1007 return -ENOTBLK; in do_direct_IO()
1014 i_size_aligned = ALIGN(i_size_read(dio->inode), in do_direct_IO()
1016 if (sdio->block_in_file >= in do_direct_IO()
1023 sdio->block_in_file++; in do_direct_IO()
1025 dio->result += 1 << blkbits; in do_direct_IO()
1034 if (unlikely(sdio->blkfactor && !sdio->start_zero_done)) in do_direct_IO()
1041 this_chunk_blocks = sdio->blocks_available; in do_direct_IO()
1042 u = (to - from) >> blkbits; in do_direct_IO()
1045 u = sdio->final_block_in_request - sdio->block_in_file; in do_direct_IO()
1051 if (this_chunk_blocks == sdio->blocks_available) in do_direct_IO()
1052 sdio->boundary = buffer_boundary(map_bh); in do_direct_IO()
1056 sdio->next_block_for_io, in do_direct_IO()
1062 sdio->next_block_for_io += this_chunk_blocks; in do_direct_IO()
1064 sdio->block_in_file += this_chunk_blocks; in do_direct_IO()
1066 dio->result += this_chunk_bytes; in do_direct_IO()
1067 sdio->blocks_available -= this_chunk_blocks; in do_direct_IO()
1069 BUG_ON(sdio->block_in_file > sdio->final_block_in_request); in do_direct_IO()
1070 if (sdio->block_in_file == sdio->final_block_in_request) in do_direct_IO()
1091 * return code that the caller will hand to ->complete(). in drop_refcount()
1097 spin_lock_irqsave(&dio->bio_lock, flags); in drop_refcount()
1098 ret2 = --dio->refcount; in drop_refcount()
1099 spin_unlock_irqrestore(&dio->bio_lock, flags); in drop_refcount()
1107 * - if the flags value contains DIO_LOCKING we use a fancy locking
1112 * - if the flags value does NOT contain DIO_LOCKING we don't use any
1134 unsigned i_blkbits = READ_ONCE(inode->i_blkbits); in do_blockdev_direct_IO()
1136 unsigned blocksize_mask = (1 << blkbits) - 1; in do_blockdev_direct_IO()
1137 ssize_t retval = -EINVAL; in do_blockdev_direct_IO()
1139 loff_t offset = iocb->ki_pos; in do_blockdev_direct_IO()
1158 return -ENOMEM; in do_blockdev_direct_IO()
1166 dio->flags = flags; in do_blockdev_direct_IO()
1167 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) { in do_blockdev_direct_IO()
1172 /* Once we sampled i_size check for reads beyond EOF */ in do_blockdev_direct_IO()
1173 dio->i_size = i_size_read(inode); in do_blockdev_direct_IO()
1174 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { in do_blockdev_direct_IO()
1182 blocksize_mask = (1 << blkbits) - 1; in do_blockdev_direct_IO()
1187 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) { in do_blockdev_direct_IO()
1188 struct address_space *mapping = iocb->ki_filp->f_mapping; in do_blockdev_direct_IO()
1190 retval = filemap_write_and_wait_range(mapping, offset, end - 1); in do_blockdev_direct_IO()
1202 dio->is_async = false; in do_blockdev_direct_IO()
1204 dio->is_async = false; in do_blockdev_direct_IO()
1206 dio->is_async = true; in do_blockdev_direct_IO()
1208 dio->inode = inode; in do_blockdev_direct_IO()
1210 dio->op = REQ_OP_WRITE; in do_blockdev_direct_IO()
1211 dio->op_flags = REQ_SYNC | REQ_IDLE; in do_blockdev_direct_IO()
1212 if (iocb->ki_flags & IOCB_NOWAIT) in do_blockdev_direct_IO()
1213 dio->op_flags |= REQ_NOWAIT; in do_blockdev_direct_IO()
1215 dio->op = REQ_OP_READ; in do_blockdev_direct_IO()
1217 if (iocb->ki_flags & IOCB_HIPRI) in do_blockdev_direct_IO()
1218 dio->op_flags |= REQ_HIPRI; in do_blockdev_direct_IO()
1222 * so that we can call ->fsync. in do_blockdev_direct_IO()
1224 if (dio->is_async && iov_iter_rw(iter) == WRITE) { in do_blockdev_direct_IO()
1226 if (iocb->ki_flags & IOCB_DSYNC) in do_blockdev_direct_IO()
1228 else if (!dio->inode->i_sb->s_dio_done_wq) { in do_blockdev_direct_IO()
1234 retval = sb_init_dio_done_wq(dio->inode->i_sb); in do_blockdev_direct_IO()
1247 sdio.blkfactor = i_blkbits - blkbits; in do_blockdev_direct_IO()
1251 dio->end_io = end_io; in do_blockdev_direct_IO()
1253 sdio.final_block_in_bio = -1; in do_blockdev_direct_IO()
1254 sdio.next_block_for_io = -1; in do_blockdev_direct_IO()
1256 dio->iocb = iocb; in do_blockdev_direct_IO()
1258 spin_lock_init(&dio->bio_lock); in do_blockdev_direct_IO()
1259 dio->refcount = 1; in do_blockdev_direct_IO()
1261 dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ; in do_blockdev_direct_IO()
1266 * In case of non-aligned buffers, we may need 2 more in do_blockdev_direct_IO()
1280 if (retval == -ENOTBLK) { in do_blockdev_direct_IO()
1288 * There may be some unwritten disk at the end of a part-written in do_blockdev_direct_IO()
1289 * fs-block-sized block. Go zero that now. in do_blockdev_direct_IO()
1318 if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING)) in do_blockdev_direct_IO()
1319 inode_unlock(dio->inode); in do_blockdev_direct_IO()
1325 * call aio_complete is when we return -EIOCBQUEUED, so we key on that. in do_blockdev_direct_IO()
1326 * This had *better* be the only place that raises -EIOCBQUEUED. in do_blockdev_direct_IO()
1328 BUG_ON(retval == -EIOCBQUEUED); in do_blockdev_direct_IO()
1329 if (dio->is_async && retval == 0 && dio->result && in do_blockdev_direct_IO()
1330 (iov_iter_rw(iter) == READ || dio->result == count)) in do_blockdev_direct_IO()
1331 retval = -EIOCBQUEUED; in do_blockdev_direct_IO()
1338 BUG_ON(retval != -EIOCBQUEUED); in do_blockdev_direct_IO()
1343 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) in do_blockdev_direct_IO()
1364 prefetch(&bdev->bd_disk->part_tbl); in __blockdev_direct_IO()
1365 prefetch(bdev->bd_disk->queue); in __blockdev_direct_IO()
1366 prefetch((char *)bdev->bd_disk->queue + SMP_CACHE_BYTES); in __blockdev_direct_IO()