Searched refs:REQ_OP_DISCARD (Results 1 – 25 of 56) sorted by relevance
123
314 if (op == REQ_OP_DISCARD) in do_region()320 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || in do_region()336 case REQ_OP_DISCARD: in do_region()355 if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) { in do_region()
622 if (bio_op(bio) == REQ_OP_DISCARD) { in write_callback()661 if (bio_op(bio) == REQ_OP_DISCARD) { in do_write()662 io_req.bi_op = REQ_OP_DISCARD; in do_write()701 (bio_op(bio) == REQ_OP_DISCARD)) { in do_writes()1248 bio_op(bio) != REQ_OP_DISCARD) in mirror_end_io()
408 if (bio_op(bio) == REQ_OP_DISCARD) in dm_rh_mark_nosync()531 if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD) in dm_rh_inc_pending()
279 if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && in linear_make_request()
413 bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0); in end_discard()749 if (bio_op(bio) == REQ_OP_DISCARD) in inc_all_io_entry()912 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) in __inc_remap_and_issue_cell()1826 bio_op(bio) == REQ_OP_DISCARD) in __remap_and_issue_shared_cell()2216 if (bio_op(bio) == REQ_OP_DISCARD) in process_thin_deferred_bios()2303 if (bio_op(cell->holder) == REQ_OP_DISCARD) in process_thin_deferred_cells()2730 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) { in thin_bio_map()
1979 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { in snapshot_map()1998 if (unlikely(bio_op(bio) == REQ_OP_DISCARD) && in snapshot_map()2009 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { in snapshot_map()2126 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { in snapshot_merge_map()
225 if (req_op(clone) == REQ_OP_DISCARD && in dm_done()
298 if (unlikely(bio_op(bio) == REQ_OP_DISCARD) || in stripe_map()
835 bio_op(bio) != REQ_OP_DISCARD) { in check_if_tick_bio_needed()885 return bio_op(bio) != REQ_OP_DISCARD; in accountable_bio()1098 return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf); in discard_or_flush()1908 else if (bio_op(bio) == REQ_OP_DISCARD) in process_deferred_bios()
583 if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) { in raid0_make_request()
282 REQ_OP_DISCARD = 3, enumerator417 return (op & REQ_OP_MASK) == REQ_OP_DISCARD; in op_is_discard()
65 bio_op(bio) != REQ_OP_DISCARD && in bio_has_data()75 return bio_op(bio) == REQ_OP_DISCARD || in bio_no_advance_iter()186 case REQ_OP_DISCARD: in bio_segments()
996 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) in blk_queue_get_max_sectors()1032 req_op(rq) == REQ_OP_DISCARD || in blk_rq_get_max_sectors()
74 if (bio_op(bio) == REQ_OP_DISCARD) in should_writeback()
385 (bio_op(bio) == REQ_OP_DISCARD)) in check_should_bypass()1002 if (bio_op(bio) == REQ_OP_DISCARD) in cached_dev_write()1016 if (bio_op(bio) == REQ_OP_DISCARD && in cached_dev_write()1122 if ((bio_op(bio) == REQ_OP_DISCARD) && in detached_dev_do_request()1344 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0; in flash_dev_make_request()
469 if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD) in get_limit()540 case REQ_OP_DISCARD: in wbt_should_throttle()559 if (bio_op(bio) == REQ_OP_DISCARD) in bio_to_wbt_flags()
299 case REQ_OP_DISCARD: in __blk_queue_split()367 case REQ_OP_DISCARD: in blk_recalc_rq_segments()703 if (req_op(req) == REQ_OP_DISCARD && in blk_discard_mergable()
47 op = REQ_OP_DISCARD; in __blkdev_issue_discard()
257 case REQ_OP_DISCARD: in bounce_clone_bio()
522 if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) { in ubd_handler()1395 case REQ_OP_DISCARD: in ubd_queue_rq()1552 case REQ_OP_DISCARD: in do_io()
55 | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0); in drbd_req_new()1182 else if (bio_op(bio) == REQ_OP_DISCARD) in drbd_submit_req_private_bio()1237 bio_op(bio) == REQ_OP_DISCARD) in drbd_request_prepare()
410 case REQ_OP_DISCARD: in btrfs_op()
42 Any REQ_OP_DISCARD requests are treated like WRITE requests. Otherwise we would
51 case REQ_OP_DISCARD: in mmc_cqe_issue_type()
86 case REQ_OP_DISCARD: in do_blktrans_request()