Lines Matching refs:dio
117 struct dio { struct
167 static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) in dio_refill_pages() argument
169 struct page **pages = dio->pages; in dio_refill_pages()
170 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_refill_pages()
182 if (dio->page_errors == 0) in dio_refill_pages()
183 dio->page_errors = ret; in dio_refill_pages()
184 dio->pages[0] = ZERO_PAGE(0); in dio_refill_pages()
208 static inline struct page *dio_get_page(struct dio *dio, in dio_get_page() argument
214 ret = dio_refill_pages(dio, sdio); in dio_get_page()
219 return dio->pages[sdio->head]; in dio_get_page()
222 static void dio_pin_page(struct dio *dio, struct page *page) in dio_pin_page() argument
224 if (dio->is_pinned) in dio_pin_page()
228 static void dio_unpin_page(struct dio *dio, struct page *page) in dio_unpin_page() argument
230 if (dio->is_pinned) in dio_unpin_page()
245 static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) in dio_complete() argument
247 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_complete()
248 loff_t offset = dio->iocb->ki_pos; in dio_complete()
261 if (dio->result) { in dio_complete()
262 transferred = dio->result; in dio_complete()
266 ((offset + transferred) > dio->i_size)) in dio_complete()
267 transferred = dio->i_size - offset; in dio_complete()
274 ret = dio->page_errors; in dio_complete()
276 ret = dio->io_error; in dio_complete()
280 if (dio->end_io) { in dio_complete()
282 err = dio->end_io(dio->iocb, offset, ret, dio->private); in dio_complete()
301 kiocb_invalidate_post_direct_write(dio->iocb, ret); in dio_complete()
303 inode_dio_end(dio->inode); in dio_complete()
311 dio->iocb->ki_pos += transferred; in dio_complete()
314 ret = generic_write_sync(dio->iocb, ret); in dio_complete()
315 dio->iocb->ki_complete(dio->iocb, ret); in dio_complete()
318 kmem_cache_free(dio_cache, dio); in dio_complete()
324 struct dio *dio = container_of(work, struct dio, complete_work); in dio_aio_complete_work() local
326 dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE); in dio_aio_complete_work()
329 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
336 struct dio *dio = bio->bi_private; in dio_bio_end_aio() local
337 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_bio_end_aio()
343 dio_bio_complete(dio, bio); in dio_bio_end_aio()
345 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_end_aio()
346 remaining = --dio->refcount; in dio_bio_end_aio()
347 if (remaining == 1 && dio->waiter) in dio_bio_end_aio()
348 wake_up_process(dio->waiter); in dio_bio_end_aio()
349 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_end_aio()
360 if (dio->result) in dio_bio_end_aio()
361 defer_completion = dio->defer_completion || in dio_bio_end_aio()
363 dio->inode->i_mapping->nrpages); in dio_bio_end_aio()
365 INIT_WORK(&dio->complete_work, dio_aio_complete_work); in dio_bio_end_aio()
366 queue_work(dio->inode->i_sb->s_dio_done_wq, in dio_bio_end_aio()
367 &dio->complete_work); in dio_bio_end_aio()
369 dio_complete(dio, 0, DIO_COMPLETE_ASYNC); in dio_bio_end_aio()
383 struct dio *dio = bio->bi_private; in dio_bio_end_io() local
386 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_end_io()
387 bio->bi_private = dio->bio_list; in dio_bio_end_io()
388 dio->bio_list = bio; in dio_bio_end_io()
389 if (--dio->refcount == 1 && dio->waiter) in dio_bio_end_io()
390 wake_up_process(dio->waiter); in dio_bio_end_io()
391 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_end_io()
395 dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, in dio_bio_alloc() argument
405 bio = bio_alloc(bdev, nr_vecs, dio->opf, GFP_KERNEL); in dio_bio_alloc()
407 if (dio->is_async) in dio_bio_alloc()
411 if (dio->is_pinned) in dio_bio_alloc()
424 static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) in dio_bio_submit() argument
426 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_bio_submit()
430 bio->bi_private = dio; in dio_bio_submit()
432 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_submit()
433 dio->refcount++; in dio_bio_submit()
434 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_submit()
436 if (dio->is_async && dio_op == REQ_OP_READ && dio->should_dirty) in dio_bio_submit()
439 dio->bio_disk = bio->bi_bdev->bd_disk; in dio_bio_submit()
451 static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio) in dio_cleanup() argument
453 if (dio->is_pinned) in dio_cleanup()
454 unpin_user_pages(dio->pages + sdio->head, in dio_cleanup()
465 static struct bio *dio_await_one(struct dio *dio) in dio_await_one() argument
470 spin_lock_irqsave(&dio->bio_lock, flags); in dio_await_one()
478 while (dio->refcount > 1 && dio->bio_list == NULL) { in dio_await_one()
480 dio->waiter = current; in dio_await_one()
481 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_await_one()
484 spin_lock_irqsave(&dio->bio_lock, flags); in dio_await_one()
485 dio->waiter = NULL; in dio_await_one()
487 if (dio->bio_list) { in dio_await_one()
488 bio = dio->bio_list; in dio_await_one()
489 dio->bio_list = bio->bi_private; in dio_await_one()
491 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_await_one()
498 static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio) in dio_bio_complete() argument
501 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_bio_complete()
502 bool should_dirty = dio_op == REQ_OP_READ && dio->should_dirty; in dio_bio_complete()
506 dio->io_error = -EAGAIN; in dio_bio_complete()
508 dio->io_error = -EIO; in dio_bio_complete()
511 if (dio->is_async && should_dirty) { in dio_bio_complete()
527 static void dio_await_completion(struct dio *dio) in dio_await_completion() argument
531 bio = dio_await_one(dio); in dio_await_completion()
533 dio_bio_complete(dio, bio); in dio_await_completion()
544 static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio) in dio_bio_reap() argument
549 while (dio->bio_list) { in dio_bio_reap()
554 spin_lock_irqsave(&dio->bio_lock, flags); in dio_bio_reap()
555 bio = dio->bio_list; in dio_bio_reap()
556 dio->bio_list = bio->bi_private; in dio_bio_reap()
557 spin_unlock_irqrestore(&dio->bio_lock, flags); in dio_bio_reap()
558 ret2 = blk_status_to_errno(dio_bio_complete(dio, bio)); in dio_bio_reap()
567 static int dio_set_defer_completion(struct dio *dio) in dio_set_defer_completion() argument
569 struct super_block *sb = dio->inode->i_sb; in dio_set_defer_completion()
571 if (dio->defer_completion) in dio_set_defer_completion()
573 dio->defer_completion = true; in dio_set_defer_completion()
602 static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, in get_more_blocks() argument
605 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in get_more_blocks()
618 ret = dio->page_errors; in get_more_blocks()
641 if (dio->flags & DIO_SKIP_HOLES) { in get_more_blocks()
642 i_size = i_size_read(dio->inode); in get_more_blocks()
647 ret = (*sdio->get_block)(dio->inode, fs_startblk, in get_more_blocks()
651 dio->private = map_bh->b_private; in get_more_blocks()
654 ret = dio_set_defer_completion(dio); in get_more_blocks()
662 static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio, in dio_new_bio() argument
668 ret = dio_bio_reap(dio, sdio); in dio_new_bio()
674 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); in dio_new_bio()
687 static inline int dio_bio_add_page(struct dio *dio, struct dio_submit *sdio) in dio_bio_add_page() argument
699 dio_pin_page(dio, sdio->cur_page); in dio_bio_add_page()
719 static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio, in dio_send_cur_page() argument
745 dio_bio_submit(dio, sdio); in dio_send_cur_page()
749 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); in dio_send_cur_page()
754 if (dio_bio_add_page(dio, sdio) != 0) { in dio_send_cur_page()
755 dio_bio_submit(dio, sdio); in dio_send_cur_page()
756 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh); in dio_send_cur_page()
758 ret = dio_bio_add_page(dio, sdio); in dio_send_cur_page()
784 submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, in submit_page_section() argument
788 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in submit_page_section()
814 ret = dio_send_cur_page(dio, sdio, map_bh); in submit_page_section()
815 dio_unpin_page(dio, sdio->cur_page); in submit_page_section()
821 dio_pin_page(dio, page); /* It is in dio */ in submit_page_section()
833 ret = dio_send_cur_page(dio, sdio, map_bh); in submit_page_section()
835 dio_bio_submit(dio, sdio); in submit_page_section()
836 dio_unpin_page(dio, sdio->cur_page); in submit_page_section()
851 static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio, in dio_zero_block() argument
879 if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes, in dio_zero_block()
902 static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, in do_direct_IO() argument
905 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in do_direct_IO()
914 page = dio_get_page(dio, sdio); in do_direct_IO()
935 ret = get_more_blocks(dio, sdio, map_bh); in do_direct_IO()
937 dio_unpin_page(dio, page); in do_direct_IO()
982 dio_unpin_page(dio, page); in do_direct_IO()
990 i_size_aligned = ALIGN(i_size_read(dio->inode), in do_direct_IO()
995 dio_unpin_page(dio, page); in do_direct_IO()
1001 dio->result += 1 << blkbits; in do_direct_IO()
1011 dio_zero_block(dio, sdio, 0, map_bh); in do_direct_IO()
1029 ret = submit_page_section(dio, sdio, page, in do_direct_IO()
1035 dio_unpin_page(dio, page); in do_direct_IO()
1042 dio->result += this_chunk_bytes; in do_direct_IO()
1051 dio_unpin_page(dio, page); in do_direct_IO()
1057 static inline int drop_refcount(struct dio *dio) in drop_refcount() argument
1073 spin_lock_irqsave(&dio->bio_lock, flags); in drop_refcount()
1074 ret2 = --dio->refcount; in drop_refcount()
1075 spin_unlock_irqrestore(&dio->bio_lock, flags); in drop_refcount()
1116 struct dio *dio; in __blockdev_direct_IO() local
1131 dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); in __blockdev_direct_IO()
1132 if (!dio) in __blockdev_direct_IO()
1139 memset(dio, 0, offsetof(struct dio, pages)); in __blockdev_direct_IO()
1141 dio->flags = flags; in __blockdev_direct_IO()
1142 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) { in __blockdev_direct_IO()
1146 dio->is_pinned = iov_iter_extract_will_pin(iter); in __blockdev_direct_IO()
1149 dio->i_size = i_size_read(inode); in __blockdev_direct_IO()
1150 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { in __blockdev_direct_IO()
1163 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) { in __blockdev_direct_IO()
1178 dio->is_async = false; in __blockdev_direct_IO()
1180 dio->is_async = false; in __blockdev_direct_IO()
1182 dio->is_async = true; in __blockdev_direct_IO()
1184 dio->inode = inode; in __blockdev_direct_IO()
1186 dio->opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; in __blockdev_direct_IO()
1188 dio->opf |= REQ_NOWAIT; in __blockdev_direct_IO()
1190 dio->opf = REQ_OP_READ; in __blockdev_direct_IO()
1197 if (dio->is_async && iov_iter_rw(iter) == WRITE) { in __blockdev_direct_IO()
1200 retval = dio_set_defer_completion(dio); in __blockdev_direct_IO()
1201 else if (!dio->inode->i_sb->s_dio_done_wq) { in __blockdev_direct_IO()
1207 retval = sb_init_dio_done_wq(dio->inode->i_sb); in __blockdev_direct_IO()
1224 dio->end_io = end_io; in __blockdev_direct_IO()
1228 dio->iocb = iocb; in __blockdev_direct_IO()
1230 spin_lock_init(&dio->bio_lock); in __blockdev_direct_IO()
1231 dio->refcount = 1; in __blockdev_direct_IO()
1233 dio->should_dirty = user_backed_iter(iter) && iov_iter_rw(iter) == READ; in __blockdev_direct_IO()
1248 retval = do_direct_IO(dio, &sdio, &map_bh); in __blockdev_direct_IO()
1250 dio_cleanup(dio, &sdio); in __blockdev_direct_IO()
1263 dio_zero_block(dio, &sdio, 1, &map_bh); in __blockdev_direct_IO()
1268 ret2 = dio_send_cur_page(dio, &sdio, &map_bh); in __blockdev_direct_IO()
1271 dio_unpin_page(dio, sdio.cur_page); in __blockdev_direct_IO()
1275 dio_bio_submit(dio, &sdio); in __blockdev_direct_IO()
1283 dio_cleanup(dio, &sdio); in __blockdev_direct_IO()
1290 if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING)) in __blockdev_direct_IO()
1291 inode_unlock(dio->inode); in __blockdev_direct_IO()
1301 if (dio->is_async && retval == 0 && dio->result && in __blockdev_direct_IO()
1302 (iov_iter_rw(iter) == READ || dio->result == count)) in __blockdev_direct_IO()
1305 dio_await_completion(dio); in __blockdev_direct_IO()
1307 if (drop_refcount(dio) == 0) { in __blockdev_direct_IO()
1308 retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE); in __blockdev_direct_IO()
1315 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) in __blockdev_direct_IO()
1318 kmem_cache_free(dio_cache, dio); in __blockdev_direct_IO()
1325 dio_cache = KMEM_CACHE(dio, SLAB_PANIC); in dio_init()