/Linux-v5.15/block/ |
D | blk-merge.c | 556 if (req_op(rq) == REQ_OP_DISCARD) in blk_rq_get_max_segments() 568 if (req_op(req) == REQ_OP_DISCARD) in ll_new_hw_segment() 705 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); in blk_account_io_merge_request() 731 if (req_op(req) != req_op(next)) in attempt_merge() 738 if (req_op(req) == REQ_OP_WRITE_SAME && in attempt_merge() 855 if (req_op(rq) != bio_op(bio)) in blk_rq_merge_ok() 875 if (req_op(rq) == REQ_OP_WRITE_SAME && in blk_rq_merge_ok() 909 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); in blk_account_io_merge_bio()
|
D | blk-core.c | 230 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), in print_req_error() 247 if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) { in req_bio_endio() 1125 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); in blk_cloned_rq_check_limits() 1252 const int sgrp = op_stat_group(req_op(req)); in blk_account_io_completion() 1269 const int sgrp = op_stat_group(req_op(req)); in blk_account_io_done() 1412 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && in blk_update_request()
|
D | blk-map.c | 158 bio->bi_opf |= req_op(rq); in bio_copy_user_iov() 247 bio->bi_opf |= req_op(rq); in bio_map_user_iov() 648 bio->bi_opf |= req_op(rq); in blk_rq_map_kern()
|
D | blk-zoned.c | 66 switch (req_op(rq)) { in blk_req_needs_zone_write_lock()
|
D | blk-wbt.c | 670 const int op = req_op(rq); in wbt_data_dir()
|
/Linux-v5.15/drivers/block/rnbd/ |
D | rnbd-proto.h | 274 switch (req_op(rq)) { in rq_to_rnbd_flags() 295 req_op(rq), (unsigned long long)rq->cmd_flags); in rq_to_rnbd_flags()
|
/Linux-v5.15/include/linux/ |
D | blkdev.h | 246 return blk_op_is_passthrough(req_op(rq)); in blk_rq_is_passthrough() 641 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 644 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) 771 if (req_op(rq) == REQ_OP_FLUSH) in rq_mergeable() 774 if (req_op(rq) == REQ_OP_WRITE_ZEROES) in rq_mergeable() 777 if (req_op(rq) == REQ_OP_ZONE_APPEND) in rq_mergeable() 1060 req_op(rq) == REQ_OP_DISCARD || in blk_rq_get_max_sectors() 1061 req_op(rq) == REQ_OP_SECURE_ERASE) in blk_rq_get_max_sectors() 1062 return blk_queue_get_max_sectors(q, req_op(rq)); in blk_rq_get_max_sectors() 1065 blk_queue_get_max_sectors(q, req_op(rq))); in blk_rq_get_max_sectors() [all …]
|
D | blk_types.h | 434 #define req_op(req) \ macro
|
/Linux-v5.15/drivers/scsi/ |
D | sd_zbc.c | 418 switch (req_op(rq)) { in sd_zbc_need_zone_wp_update() 448 enum req_opf op = req_op(rq); in sd_zbc_zone_wp_update() 521 if (op_is_zone_mgmt(req_op(rq)) && in sd_zbc_complete() 534 if (req_op(rq) == REQ_OP_ZONE_APPEND) in sd_zbc_complete()
|
/Linux-v5.15/arch/um/drivers/ |
D | ubd_kern.c | 482 if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) { in ubd_handler() 1242 if (req_op(req->req) == REQ_OP_READ) { in cowify_req() 1262 int op = req_op(req); in ubd_map_req() 1325 int op = req_op(req); in ubd_submit_request() 1362 switch (req_op(req)) { in ubd_queue_rq() 1480 if (req_op(req->req) == REQ_OP_FLUSH) { in do_io() 1501 switch (req_op(req->req)) { in do_io()
|
/Linux-v5.15/drivers/block/null_blk/ |
D | trace.h | 44 __entry->op = req_op(cmd->rq);
|
D | main.c | 1175 op_is_write(req_op(rq)), sector, in null_handle_rq() 1294 } else if (req_op(cmd->rq) == REQ_OP_READ) { in nullb_zero_read_cmd_buffer() 1514 return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq)); in null_queue_rq()
|
/Linux-v5.15/drivers/mmc/core/ |
D | queue.c | 46 switch (req_op(req)) { in mmc_cqe_issue_type() 66 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) in mmc_issue_type()
|
/Linux-v5.15/drivers/crypto/hisilicon/sec2/ |
D | sec_crypto.c | 258 ctx->req_op->buf_unmap(ctx, req); in sec_req_cb() 260 ctx->req_op->callback(ctx, req, err); in sec_req_cb() 1196 ret = ctx->req_op->buf_map(ctx, req); in sec_request_transfer() 1200 ctx->req_op->do_transfer(ctx, req); in sec_request_transfer() 1202 ret = ctx->req_op->bd_fill(ctx, req); in sec_request_transfer() 1209 ctx->req_op->buf_unmap(ctx, req); in sec_request_transfer() 1215 ctx->req_op->buf_unmap(ctx, req); in sec_request_untransfer() 1738 ret = ctx->req_op->bd_send(ctx, req); in sec_process() 1815 ctx->req_op = &sec_skcipher_req_ops; in sec_skcipher_ctx_init() 1818 ctx->req_op = &sec_skcipher_req_ops_v3; in sec_skcipher_ctx_init() [all …]
|
D | sec.h | 137 const struct sec_req_op *req_op; member
|
/Linux-v5.15/drivers/md/ |
D | dm-rq.c | 218 if (req_op(clone) == REQ_OP_DISCARD && in dm_done() 221 else if (req_op(clone) == REQ_OP_WRITE_SAME && in dm_done() 224 else if (req_op(clone) == REQ_OP_WRITE_ZEROES && in dm_done()
|
/Linux-v5.15/drivers/block/ |
D | xen-blkfront.c | 557 if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard) in blkif_queue_discard_req() 762 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA); in blkif_queue_rw_req() 774 if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) { in blkif_queue_rw_req() 863 if (unlikely(req_op(req) == REQ_OP_DISCARD || in blkif_queue_request() 864 req_op(req) == REQ_OP_SECURE_ERASE)) in blkif_queue_request() 884 ((req_op(req) == REQ_OP_FLUSH) && in blkif_request_flush_invalid() 2074 if (req_op(shadow[j].request) == REQ_OP_FLUSH || in blkfront_resume() 2075 req_op(shadow[j].request) == REQ_OP_DISCARD || in blkfront_resume() 2076 req_op(shadow[j].request) == REQ_OP_SECURE_ERASE || in blkfront_resume()
|
D | ps3disk.c | 168 switch (req_op(req)) { in ps3disk_do_request() 231 if (req_op(req) == REQ_OP_FLUSH) { in ps3disk_interrupt()
|
D | loop.c | 516 req_op(rq) != REQ_OP_READ) { in lo_complete_rq() 648 switch (req_op(rq)) { in do_req_filebacked() 2153 switch (req_op(rq)) { in loop_queue_rq() 2185 const bool write = op_is_write(req_op(rq)); in loop_handle_cmd()
|
/Linux-v5.15/drivers/mtd/ |
D | mtd_blkdevs.c | 55 if (req_op(req) == REQ_OP_FLUSH) { in do_blktrans_request() 65 switch (req_op(req)) { in do_blktrans_request()
|
/Linux-v5.15/drivers/nvme/host/ |
D | zns.c | 241 if (req_op(req) == REQ_OP_ZONE_RESET_ALL) in nvme_setup_zone_mgmt_send()
|
/Linux-v5.15/drivers/block/paride/ |
D | pd.c | 487 switch (req_op(pd_req)) { in do_pd_io_start() 500 if (req_op(pd_req) == REQ_OP_READ) in do_pd_io_start()
|
/Linux-v5.15/drivers/s390/block/ |
D | dasd_fba.c | 560 if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_ZEROES) in dasd_fba_build_cp()
|
/Linux-v5.15/drivers/nvme/target/ |
D | passthru.c | 214 bio->bi_opf = req_op(rq); in nvmet_passthru_map_sg()
|
/Linux-v5.15/drivers/mtd/ubi/ |
D | block.c | 320 switch (req_op(req)) { in ubiblock_queue_rq()
|