Lines Matching full:io

18 #include <linux/dm-io.h>
20 #define DM_MSG_PREFIX "io"
30 * Aligning 'struct io' reduces the number of bits required to store
33 struct io { struct
84 * We need to keep track of which region a bio is doing io for.
86 * ensure the 'struct io' pointer is aligned so enough low bits are
90 static void store_io_and_region_in_bio(struct bio *bio, struct io *io, in store_io_and_region_in_bio() argument
93 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { in store_io_and_region_in_bio()
94 DMCRIT("Unaligned struct io pointer %p", io); in store_io_and_region_in_bio()
98 bio->bi_private = (void *)((unsigned long)io | region); in store_io_and_region_in_bio()
101 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, in retrieve_io_and_region_from_bio() argument
106 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); in retrieve_io_and_region_from_bio()
111 * We need an io object to keep track of the number of bios that
112 * have been dispatched for a particular io.
114 static void complete_io(struct io *io) in complete_io() argument
116 unsigned long error_bits = io->error_bits; in complete_io()
117 io_notify_fn fn = io->callback; in complete_io()
118 void *context = io->context; in complete_io()
120 if (io->vma_invalidate_size) in complete_io()
121 invalidate_kernel_vmap_range(io->vma_invalidate_address, in complete_io()
122 io->vma_invalidate_size); in complete_io()
124 mempool_free(io, &io->client->pool); in complete_io()
128 static void dec_count(struct io *io, unsigned int region, blk_status_t error) in dec_count() argument
131 set_bit(region, &io->error_bits); in dec_count()
133 if (atomic_dec_and_test(&io->count)) in dec_count()
134 complete_io(io); in dec_count()
139 struct io *io; in endio() local
147 * The bio destructor in bio_put() may use the io object. in endio()
149 retrieve_io_and_region_from_bio(bio, &io, &region); in endio()
154 dec_count(io, region, error); in endio()
159 * destination page for io.
294 * IO routines that accept a list of pages.
298 struct io *io) in do_region() argument
322 atomic_inc(&io->count); in do_region()
323 dec_count(io, region, BLK_STS_NOTSUPP); in do_region()
348 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios); in do_region()
353 store_io_and_region_in_bio(bio, io, region); in do_region()
385 atomic_inc(&io->count); in do_region()
392 struct io *io, int sync) in dispatch_io() argument
409 do_region(op, op_flags, i, where + i, dp, io); in dispatch_io()
414 * the io being completed too early. in dispatch_io()
416 dec_count(io, 0, 0); in dispatch_io()
436 struct io *io; in sync_io() local
446 io = mempool_alloc(&client->pool, GFP_NOIO); in sync_io()
447 io->error_bits = 0; in sync_io()
448 atomic_set(&io->count, 1); /* see dispatch_io() */ in sync_io()
449 io->client = client; in sync_io()
450 io->callback = sync_io_complete; in sync_io()
451 io->context = &sio; in sync_io()
453 io->vma_invalidate_address = dp->vma_invalidate_address; in sync_io()
454 io->vma_invalidate_size = dp->vma_invalidate_size; in sync_io()
456 dispatch_io(op, op_flags, num_regions, where, dp, io, 1); in sync_io()
470 struct io *io; in async_io() local
478 io = mempool_alloc(&client->pool, GFP_NOIO); in async_io()
479 io->error_bits = 0; in async_io()
480 atomic_set(&io->count, 1); /* see dispatch_io() */ in async_io()
481 io->client = client; in async_io()
482 io->callback = fn; in async_io()
483 io->context = context; in async_io()
485 io->vma_invalidate_address = dp->vma_invalidate_address; in async_io()
486 io->vma_invalidate_size = dp->vma_invalidate_size; in async_io()
488 dispatch_io(op, op_flags, num_regions, where, dp, io, 0); in async_io()
532 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
534 * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
560 _dm_io_cache = KMEM_CACHE(io, 0); in dm_io_init()