Lines Matching refs:dio
118 struct dio { struct
169 static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) in dio_refill_pages() argument
173 ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, in dio_refill_pages()
176 if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) { in dio_refill_pages()
183 if (dio->page_errors == 0) in dio_refill_pages()
184 dio->page_errors = ret; in dio_refill_pages()
186 dio->pages[0] = page; in dio_refill_pages()
211 static inline struct page *dio_get_page(struct dio *dio, in dio_get_page() argument
217 ret = dio_refill_pages(dio, sdio); in dio_get_page()
222 return dio->pages[sdio->head]; in dio_get_page()
236 static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) in dio_complete() argument
238 loff_t offset = dio->iocb->ki_pos; in dio_complete()
251 if (dio->result) { in dio_complete()
252 transferred = dio->result; in dio_complete()
255 if ((dio->op == REQ_OP_READ) && in dio_complete()
256 ((offset + transferred) > dio->i_size)) in dio_complete()
257 transferred = dio->i_size - offset; in dio_complete()
264 ret = dio->page_errors; in dio_complete()
266 ret = dio->io_error; in dio_complete()
270 if (dio->end_io) { in dio_complete()
272 err = dio->end_io(dio->iocb, offset, ret, dio->private); in dio_complete()
290 ret > 0 && dio->op == REQ_OP_WRITE && in dio_complete()
291 dio->inode->i_mapping->nrpages) { in dio_complete()
292 err = invalidate_inode_pages2_range(dio->inode->i_mapping, in dio_complete()
296 dio_warn_stale_pagecache(dio->iocb->ki_filp); in dio_complete()
299 inode_dio_end(dio->inode); in dio_complete()
307 dio->iocb->ki_pos += transferred; in dio_complete()
309 if (ret > 0 && dio->op == REQ_OP_WRITE) in dio_complete()
310 ret = generic_write_sync(dio->iocb, ret); in dio_complete()
311 dio->iocb->ki_complete(dio->iocb, ret, 0); in dio_complete()
314 kmem_cache_free(dio_cache, dio); in dio_complete()
320 struct dio *dio = container_of(work, struct dio, complete_work); in dio_aio_complete_work() local
322 dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE); in dio_aio_complete_work()
325 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
332 struct dio *dio = bio->bi_private; in dio_bio_end_aio() local
338 dio_bio_complete(dio, bio); in dio_bio_end_aio()
340 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_end_aio()
341 remaining = --dio->refcount; in dio_bio_end_aio()
342 if (remaining == 1 && dio->waiter) in dio_bio_end_aio()
343 wake_up_process(dio->waiter); in dio_bio_end_aio()
344 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_end_aio()
355 if (dio->result) in dio_bio_end_aio()
356 defer_completion = dio->defer_completion || in dio_bio_end_aio()
357 (dio->op == REQ_OP_WRITE && in dio_bio_end_aio()
358 dio->inode->i_mapping->nrpages); in dio_bio_end_aio()
360 INIT_WORK(&dio->complete_work, dio_aio_complete_work); in dio_bio_end_aio()
361 queue_work(dio->inode->i_sb->s_dio_done_wq, in dio_bio_end_aio()
362 &dio->complete_work); in dio_bio_end_aio()
364 dio_complete(dio, 0, DIO_COMPLETE_ASYNC); in dio_bio_end_aio()
378 struct dio *dio = bio->bi_private; in dio_bio_end_io() local
381 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_end_io()
382 bio->bi_private = dio->bio_list; in dio_bio_end_io()
383 dio->bio_list = bio; in dio_bio_end_io()
384 if (--dio->refcount == 1 && dio->waiter) in dio_bio_end_io()
385 wake_up_process(dio->waiter); in dio_bio_end_io()
386 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_end_io()
390 dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, in dio_bio_alloc() argument
404 bio_set_op_attrs(bio, dio->op, dio->op_flags); in dio_bio_alloc()
405 if (dio->is_async) in dio_bio_alloc()
410 bio->bi_write_hint = dio->iocb->ki_hint; in dio_bio_alloc()
423 static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) in dio_bio_submit() argument
428 bio->bi_private = dio; in dio_bio_submit()
430 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_submit()
431 dio->refcount++; in dio_bio_submit()
432 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_submit()
434 if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) in dio_bio_submit()
437 dio->bio_disk = bio->bi_disk; in dio_bio_submit()
440 sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); in dio_bio_submit()
441 dio->bio_cookie = BLK_QC_T_NONE; in dio_bio_submit()
443 dio->bio_cookie = submit_bio(bio); in dio_bio_submit()
453 static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio) in dio_cleanup() argument
456 put_page(dio->pages[sdio->head++]); in dio_cleanup()
465 static struct bio *dio_await_one(struct dio *dio) in dio_await_one() argument
470 spin_lock_irqsave(&dio->bio_lock, flags); in dio_await_one()
478 while (dio->refcount > 1 && dio->bio_list == NULL) { in dio_await_one()
480 dio->waiter = current; in dio_await_one()
481 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_await_one()
482 if (!(dio->iocb->ki_flags & IOCB_HIPRI) || in dio_await_one()
483 !blk_poll(dio->bio_disk->queue, dio->bio_cookie, true)) in dio_await_one()
486 spin_lock_irqsave(&dio->bio_lock, flags); in dio_await_one()
487 dio->waiter = NULL; in dio_await_one()
489 if (dio->bio_list) { in dio_await_one()
490 bio = dio->bio_list; in dio_await_one()
491 dio->bio_list = bio->bi_private; in dio_await_one()
493 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_await_one()
500 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio) in dio_bio_complete() argument
503 bool should_dirty = dio->op == REQ_OP_READ && dio->should_dirty; in dio_bio_complete()
507 dio->io_error = -EAGAIN; in dio_bio_complete()
509 dio->io_error = -EIO; in dio_bio_complete()
512 if (dio->is_async && should_dirty) { in dio_bio_complete()
528 static void dio_await_completion(struct dio *dio) in dio_await_completion() argument
532 bio = dio_await_one(dio); in dio_await_completion()
534 dio_bio_complete(dio, bio); in dio_await_completion()
545 static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio) in dio_bio_reap() argument
550 while (dio->bio_list) { in dio_bio_reap()
555 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_reap()
556 bio = dio->bio_list; in dio_bio_reap()
557 dio->bio_list = bio->bi_private; in dio_bio_reap()
558 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_reap()
559 ret2 = blk_status_to_errno(dio_bio_complete(dio, bio)); in dio_bio_reap()
592 static int dio_set_defer_completion(struct dio *dio) in dio_set_defer_completion() argument
594 struct super_block *sb = dio->inode->i_sb; in dio_set_defer_completion()
596 if (dio->defer_completion) in dio_set_defer_completion()
598 dio->defer_completion = true; in dio_set_defer_completion()
627 static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, in get_more_blocks() argument
642 ret = dio->page_errors; in get_more_blocks()
664 create = dio->op == REQ_OP_WRITE; in get_more_blocks()
665 if (dio->flags & DIO_SKIP_HOLES) { in get_more_blocks()
666 i_size = i_size_read(dio->inode); in get_more_blocks()
671 ret = (*sdio->get_block)(dio->inode, fs_startblk, in get_more_blocks()
675 dio->private = map_bh->b_private; in get_more_blocks()
678 ret = dio_set_defer_completion(dio); in get_more_blocks()
686 static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio, in dio_new_bio() argument
692 ret = dio_bio_reap(dio, sdio); in dio_new_bio()
698 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); in dio_new_bio()
743 static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio, in dio_send_cur_page() argument
769 dio_bio_submit(dio, sdio); in dio_send_cur_page()
773 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); in dio_send_cur_page()
779 dio_bio_submit(dio, sdio); in dio_send_cur_page()
780 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); in dio_send_cur_page()
808 submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, in submit_page_section() argument
814 if (dio->op == REQ_OP_WRITE) { in submit_page_section()
836 ret = dio_send_cur_page(dio, sdio, map_bh); in submit_page_section()
855 ret = dio_send_cur_page(dio, sdio, map_bh); in submit_page_section()
857 dio_bio_submit(dio, sdio); in submit_page_section()
873 static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio, in dio_zero_block() argument
901 if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes, in dio_zero_block()
924 static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, in do_direct_IO() argument
935 page = dio_get_page(dio, sdio); in do_direct_IO()
956 ret = get_more_blocks(dio, sdio, map_bh); in do_direct_IO()
1002 if (dio->op == REQ_OP_WRITE) { in do_direct_IO()
1011 i_size_aligned = ALIGN(i_size_read(dio->inode), in do_direct_IO()
1022 dio->result += 1 << blkbits; in do_direct_IO()
1032 dio_zero_block(dio, sdio, 0, map_bh); in do_direct_IO()
1050 ret = submit_page_section(dio, sdio, page, in do_direct_IO()
1063 dio->result += this_chunk_bytes; in do_direct_IO()
1078 static inline int drop_refcount(struct dio *dio) in drop_refcount() argument
1094 spin_lock_irqsave(&dio->bio_lock, flags); in drop_refcount()
1095 ret2 = --dio->refcount; in drop_refcount()
1096 spin_unlock_irqrestore(&dio->bio_lock, flags); in drop_refcount()
1138 struct dio *dio; in do_blockdev_direct_IO() local
1153 dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); in do_blockdev_direct_IO()
1154 if (!dio) in do_blockdev_direct_IO()
1161 memset(dio, 0, offsetof(struct dio, pages)); in do_blockdev_direct_IO()
1163 dio->flags = flags; in do_blockdev_direct_IO()
1164 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) { in do_blockdev_direct_IO()
1170 dio->i_size = i_size_read(inode); in do_blockdev_direct_IO()
1171 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { in do_blockdev_direct_IO()
1184 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) { in do_blockdev_direct_IO()
1199 dio->is_async = false; in do_blockdev_direct_IO()
1201 dio->is_async = false; in do_blockdev_direct_IO()
1203 dio->is_async = true; in do_blockdev_direct_IO()
1205 dio->inode = inode; in do_blockdev_direct_IO()
1207 dio->op = REQ_OP_WRITE; in do_blockdev_direct_IO()
1208 dio->op_flags = REQ_SYNC | REQ_IDLE; in do_blockdev_direct_IO()
1210 dio->op_flags |= REQ_NOWAIT; in do_blockdev_direct_IO()
1212 dio->op = REQ_OP_READ; in do_blockdev_direct_IO()
1215 dio->op_flags |= REQ_HIPRI; in do_blockdev_direct_IO()
1221 if (dio->is_async && iov_iter_rw(iter) == WRITE) { in do_blockdev_direct_IO()
1224 retval = dio_set_defer_completion(dio); in do_blockdev_direct_IO()
1225 else if (!dio->inode->i_sb->s_dio_done_wq) { in do_blockdev_direct_IO()
1231 retval = sb_init_dio_done_wq(dio->inode->i_sb); in do_blockdev_direct_IO()
1248 dio->end_io = end_io; in do_blockdev_direct_IO()
1253 dio->iocb = iocb; in do_blockdev_direct_IO()
1255 spin_lock_init(&dio->bio_lock); in do_blockdev_direct_IO()
1256 dio->refcount = 1; in do_blockdev_direct_IO()
1258 dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ; in do_blockdev_direct_IO()
1273 retval = do_direct_IO(dio, &sdio, &map_bh); in do_blockdev_direct_IO()
1275 dio_cleanup(dio, &sdio); in do_blockdev_direct_IO()
1288 dio_zero_block(dio, &sdio, 1, &map_bh); in do_blockdev_direct_IO()
1293 ret2 = dio_send_cur_page(dio, &sdio, &map_bh); in do_blockdev_direct_IO()
1300 dio_bio_submit(dio, &sdio); in do_blockdev_direct_IO()
1308 dio_cleanup(dio, &sdio); in do_blockdev_direct_IO()
1315 if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING)) in do_blockdev_direct_IO()
1316 inode_unlock(dio->inode); in do_blockdev_direct_IO()
1326 if (dio->is_async && retval == 0 && dio->result && in do_blockdev_direct_IO()
1327 (iov_iter_rw(iter) == READ || dio->result == count)) in do_blockdev_direct_IO()
1330 dio_await_completion(dio); in do_blockdev_direct_IO()
1332 if (drop_refcount(dio) == 0) { in do_blockdev_direct_IO()
1333 retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE); in do_blockdev_direct_IO()
1340 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) in do_blockdev_direct_IO()
1343 kmem_cache_free(dio_cache, dio); in do_blockdev_direct_IO()
1373 dio_cache = KMEM_CACHE(dio, SLAB_PANIC); in dio_init()