Searched refs:REQ_OP_DISCARD (Results 1 – 25 of 48) sorted by relevance
12
314 if (op == REQ_OP_DISCARD) in do_region()320 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || in do_region()336 case REQ_OP_DISCARD: in do_region()355 if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) { in do_region()
622 if (bio_op(bio) == REQ_OP_DISCARD) { in write_callback()661 if (bio_op(bio) == REQ_OP_DISCARD) { in do_write()662 io_req.bi_op = REQ_OP_DISCARD; in do_write()701 (bio_op(bio) == REQ_OP_DISCARD)) { in do_writes()1250 bio_op(bio) != REQ_OP_DISCARD) in mirror_end_io()
408 if (bio_op(bio) == REQ_OP_DISCARD) in dm_rh_mark_nosync()531 if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD) in dm_rh_inc_pending()
282 if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && in linear_make_request()
383 bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0); in end_discard()719 if (bio_op(bio) == REQ_OP_DISCARD) in inc_all_io_entry()882 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) in __inc_remap_and_issue_cell()1763 bio_op(bio) == REQ_OP_DISCARD) in __remap_and_issue_shared_cell()2153 if (bio_op(bio) == REQ_OP_DISCARD) in process_thin_deferred_bios()2240 if (bio_op(cell->holder) == REQ_OP_DISCARD) in process_thin_deferred_cells()2684 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) { in thin_bio_map()
298 if (unlikely(bio_op(bio) == REQ_OP_DISCARD) || in stripe_map()
840 bio_op(bio) != REQ_OP_DISCARD) { in check_if_tick_bio_needed()890 return bio_op(bio) != REQ_OP_DISCARD; in accountable_bio()1103 return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf); in discard_or_flush()1927 else if (bio_op(bio) == REQ_OP_DISCARD) in process_deferred_bios()
569 if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) { in raid0_make_request()
432 case REQ_OP_DISCARD: in dmz_handle_bio()
286 REQ_OP_DISCARD = 3, enumerator407 return (op & REQ_OP_MASK) == REQ_OP_DISCARD; in op_is_discard()
90 bio_op(bio) != REQ_OP_DISCARD && in bio_has_data()100 return bio_op(bio) == REQ_OP_DISCARD || in bio_no_advance_iter()219 case REQ_OP_DISCARD: in bio_segments()
1096 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) in blk_queue_get_max_sectors()1132 req_op(rq) == REQ_OP_DISCARD || in blk_rq_get_max_sectors()
189 case REQ_OP_DISCARD: in blk_queue_split()245 case REQ_OP_DISCARD: in __blk_recalc_rq_segments()717 if (req_op(req) == REQ_OP_DISCARD) { in attempt_merge()749 if (req_op(req) != REQ_OP_DISCARD) in attempt_merge()845 if (req_op(rq) == REQ_OP_DISCARD && in blk_try_merge()
466 if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD) in get_limit()588 case REQ_OP_DISCARD: in wbt_should_throttle()607 if (bio_op(bio) == REQ_OP_DISCARD) in bio_to_wbt_flags()
237 case REQ_OP_DISCARD: in bounce_clone_bio()
48 op = REQ_OP_DISCARD; in __blkdev_issue_discard()
385 (bio_op(bio) == REQ_OP_DISCARD)) in check_should_bypass()984 if (bio_op(bio) == REQ_OP_DISCARD) in cached_dev_write()998 if (bio_op(bio) == REQ_OP_DISCARD && in cached_dev_write()1104 if ((bio_op(bio) == REQ_OP_DISCARD) && in detached_dev_do_request()1323 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0; in flash_dev_make_request()
479 bio_set_op_attrs(bio, REQ_OP_DISCARD, 0); in do_journal_discard()
67 | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0); in drbd_req_new()1193 bio_op(bio) == REQ_OP_DISCARD) in drbd_submit_req_private_bio()1248 bio_op(bio) == REQ_OP_DISCARD) in drbd_request_prepare()
52 case REQ_OP_DISCARD: in mmc_cqe_issue_type()
377 case REQ_OP_DISCARD: in btrfs_op()
97 case REQ_OP_DISCARD: in do_blktrans_request()
237 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE) in __blk_add_trace()1892 case REQ_OP_DISCARD: in blk_fill_rwbs()
568 if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_ZEROES) in dasd_fba_build_cp()
1077 if (req_op(rq) == REQ_OP_DISCARD) { in null_handle_rq()1111 if (bio_op(bio) == REQ_OP_DISCARD) { in null_handle_bio()