Lines Matching refs:clone

21 	struct request *orig, *clone;  member
79 static void end_clone_bio(struct bio *clone) in end_clone_bio() argument
82 container_of(clone, struct dm_rq_clone_bio_info, clone); in end_clone_bio()
85 blk_status_t error = clone->bi_status; in end_clone_bio()
86 bool is_last = !clone->bi_next; in end_clone_bio()
88 bio_put(clone); in end_clone_bio()
157 static void dm_end_request(struct request *clone, blk_status_t error) in dm_end_request() argument
159 struct dm_rq_target_io *tio = clone->end_io_data; in dm_end_request()
163 blk_rq_unprep_clone(clone); in dm_end_request()
164 tio->ti->type->release_clone_rq(clone, NULL); in dm_end_request()
195 if (tio->clone) { in dm_requeue_original_request()
196 blk_rq_unprep_clone(tio->clone); in dm_requeue_original_request()
197 tio->ti->type->release_clone_rq(tio->clone, NULL); in dm_requeue_original_request()
204 static void dm_done(struct request *clone, blk_status_t error, bool mapped) in dm_done() argument
207 struct dm_rq_target_io *tio = clone->end_io_data; in dm_done()
214 r = rq_end_io(tio->ti, clone, error, &tio->info); in dm_done()
218 if (req_op(clone) == REQ_OP_DISCARD && in dm_done()
219 !clone->q->limits.max_discard_sectors) in dm_done()
221 else if (req_op(clone) == REQ_OP_WRITE_SAME && in dm_done()
222 !clone->q->limits.max_write_same_sectors) in dm_done()
224 else if (req_op(clone) == REQ_OP_WRITE_ZEROES && in dm_done()
225 !clone->q->limits.max_write_zeroes_sectors) in dm_done()
232 dm_end_request(clone, error); in dm_done()
258 struct request *clone = tio->clone; in dm_softirq_done() local
260 if (!clone) { in dm_softirq_done()
272 dm_done(clone, tio->error, mapped); in dm_softirq_done()
300 static void end_clone_request(struct request *clone, blk_status_t error) in end_clone_request() argument
302 struct dm_rq_target_io *tio = clone->end_io_data; in end_clone_request()
307 static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq) in dm_dispatch_clone_request() argument
311 if (blk_queue_io_stat(clone->q)) in dm_dispatch_clone_request()
312 clone->rq_flags |= RQF_IO_STAT; in dm_dispatch_clone_request()
314 clone->start_time_ns = ktime_get_ns(); in dm_dispatch_clone_request()
315 r = blk_insert_cloned_request(clone->q, clone); in dm_dispatch_clone_request()
327 container_of(bio, struct dm_rq_clone_bio_info, clone); in dm_rq_bio_constructor()
336 static int setup_clone(struct request *clone, struct request *rq, in setup_clone() argument
341 r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask, in setup_clone()
346 clone->end_io = end_clone_request; in setup_clone()
347 clone->end_io_data = tio; in setup_clone()
349 tio->clone = clone; in setup_clone()
359 tio->clone = NULL; in init_tio()
384 struct request *clone = NULL; in map_request() local
387 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); in map_request()
393 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { in map_request()
395 ti->type->release_clone_rq(clone, &tio->info); in map_request()
400 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), in map_request()
402 ret = dm_dispatch_clone_request(clone, rq); in map_request()
404 blk_rq_unprep_clone(clone); in map_request()
405 blk_mq_cleanup_rq(clone); in map_request()
406 tio->ti->type->release_clone_rq(clone, &tio->info); in map_request()
407 tio->clone = NULL; in map_request()