Home
last modified time | relevance | path

Searched refs:req_op (Results 1 – 25 of 42) sorted by relevance

12

/Linux-v5.15/block/
Dblk-merge.c556 if (req_op(rq) == REQ_OP_DISCARD) in blk_rq_get_max_segments()
568 if (req_op(req) == REQ_OP_DISCARD) in ll_new_hw_segment()
705 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); in blk_account_io_merge_request()
731 if (req_op(req) != req_op(next)) in attempt_merge()
738 if (req_op(req) == REQ_OP_WRITE_SAME && in attempt_merge()
855 if (req_op(rq) != bio_op(bio)) in blk_rq_merge_ok()
875 if (req_op(rq) == REQ_OP_WRITE_SAME && in blk_rq_merge_ok()
909 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); in blk_account_io_merge_bio()
Dblk-core.c230 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), in print_req_error()
247 if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) { in req_bio_endio()
1125 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); in blk_cloned_rq_check_limits()
1252 const int sgrp = op_stat_group(req_op(req)); in blk_account_io_completion()
1269 const int sgrp = op_stat_group(req_op(req)); in blk_account_io_done()
1412 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && in blk_update_request()
Dblk-map.c158 bio->bi_opf |= req_op(rq); in bio_copy_user_iov()
247 bio->bi_opf |= req_op(rq); in bio_map_user_iov()
648 bio->bi_opf |= req_op(rq); in blk_rq_map_kern()
Dblk-zoned.c66 switch (req_op(rq)) { in blk_req_needs_zone_write_lock()
Dblk-wbt.c670 const int op = req_op(rq); in wbt_data_dir()
/Linux-v5.15/drivers/block/rnbd/
Drnbd-proto.h274 switch (req_op(rq)) { in rq_to_rnbd_flags()
295 req_op(rq), (unsigned long long)rq->cmd_flags); in rq_to_rnbd_flags()
/Linux-v5.15/include/linux/
Dblkdev.h246 return blk_op_is_passthrough(req_op(rq)); in blk_rq_is_passthrough()
641 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
644 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
771 if (req_op(rq) == REQ_OP_FLUSH) in rq_mergeable()
774 if (req_op(rq) == REQ_OP_WRITE_ZEROES) in rq_mergeable()
777 if (req_op(rq) == REQ_OP_ZONE_APPEND) in rq_mergeable()
1060 req_op(rq) == REQ_OP_DISCARD || in blk_rq_get_max_sectors()
1061 req_op(rq) == REQ_OP_SECURE_ERASE) in blk_rq_get_max_sectors()
1062 return blk_queue_get_max_sectors(q, req_op(rq)); in blk_rq_get_max_sectors()
1065 blk_queue_get_max_sectors(q, req_op(rq))); in blk_rq_get_max_sectors()
[all …]
Dblk_types.h434 #define req_op(req) \ macro
/Linux-v5.15/drivers/scsi/
Dsd_zbc.c418 switch (req_op(rq)) { in sd_zbc_need_zone_wp_update()
448 enum req_opf op = req_op(rq); in sd_zbc_zone_wp_update()
521 if (op_is_zone_mgmt(req_op(rq)) && in sd_zbc_complete()
534 if (req_op(rq) == REQ_OP_ZONE_APPEND) in sd_zbc_complete()
/Linux-v5.15/arch/um/drivers/
Dubd_kern.c482 if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) { in ubd_handler()
1242 if (req_op(req->req) == REQ_OP_READ) { in cowify_req()
1262 int op = req_op(req); in ubd_map_req()
1325 int op = req_op(req); in ubd_submit_request()
1362 switch (req_op(req)) { in ubd_queue_rq()
1480 if (req_op(req->req) == REQ_OP_FLUSH) { in do_io()
1501 switch (req_op(req->req)) { in do_io()
/Linux-v5.15/drivers/block/null_blk/
Dtrace.h44 __entry->op = req_op(cmd->rq);
Dmain.c1175 op_is_write(req_op(rq)), sector, in null_handle_rq()
1294 } else if (req_op(cmd->rq) == REQ_OP_READ) { in nullb_zero_read_cmd_buffer()
1514 return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq)); in null_queue_rq()
/Linux-v5.15/drivers/mmc/core/
Dqueue.c46 switch (req_op(req)) { in mmc_cqe_issue_type()
66 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) in mmc_issue_type()
/Linux-v5.15/drivers/crypto/hisilicon/sec2/
Dsec_crypto.c258 ctx->req_op->buf_unmap(ctx, req); in sec_req_cb()
260 ctx->req_op->callback(ctx, req, err); in sec_req_cb()
1196 ret = ctx->req_op->buf_map(ctx, req); in sec_request_transfer()
1200 ctx->req_op->do_transfer(ctx, req); in sec_request_transfer()
1202 ret = ctx->req_op->bd_fill(ctx, req); in sec_request_transfer()
1209 ctx->req_op->buf_unmap(ctx, req); in sec_request_transfer()
1215 ctx->req_op->buf_unmap(ctx, req); in sec_request_untransfer()
1738 ret = ctx->req_op->bd_send(ctx, req); in sec_process()
1815 ctx->req_op = &sec_skcipher_req_ops; in sec_skcipher_ctx_init()
1818 ctx->req_op = &sec_skcipher_req_ops_v3; in sec_skcipher_ctx_init()
[all …]
Dsec.h137 const struct sec_req_op *req_op; member
/Linux-v5.15/drivers/md/
Ddm-rq.c218 if (req_op(clone) == REQ_OP_DISCARD && in dm_done()
221 else if (req_op(clone) == REQ_OP_WRITE_SAME && in dm_done()
224 else if (req_op(clone) == REQ_OP_WRITE_ZEROES && in dm_done()
/Linux-v5.15/drivers/block/
Dxen-blkfront.c557 if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard) in blkif_queue_discard_req()
762 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA); in blkif_queue_rw_req()
774 if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) { in blkif_queue_rw_req()
863 if (unlikely(req_op(req) == REQ_OP_DISCARD || in blkif_queue_request()
864 req_op(req) == REQ_OP_SECURE_ERASE)) in blkif_queue_request()
884 ((req_op(req) == REQ_OP_FLUSH) && in blkif_request_flush_invalid()
2074 if (req_op(shadow[j].request) == REQ_OP_FLUSH || in blkfront_resume()
2075 req_op(shadow[j].request) == REQ_OP_DISCARD || in blkfront_resume()
2076 req_op(shadow[j].request) == REQ_OP_SECURE_ERASE || in blkfront_resume()
Dps3disk.c168 switch (req_op(req)) { in ps3disk_do_request()
231 if (req_op(req) == REQ_OP_FLUSH) { in ps3disk_interrupt()
Dloop.c516 req_op(rq) != REQ_OP_READ) { in lo_complete_rq()
648 switch (req_op(rq)) { in do_req_filebacked()
2153 switch (req_op(rq)) { in loop_queue_rq()
2185 const bool write = op_is_write(req_op(rq)); in loop_handle_cmd()
/Linux-v5.15/drivers/mtd/
Dmtd_blkdevs.c55 if (req_op(req) == REQ_OP_FLUSH) { in do_blktrans_request()
65 switch (req_op(req)) { in do_blktrans_request()
/Linux-v5.15/drivers/nvme/host/
Dzns.c241 if (req_op(req) == REQ_OP_ZONE_RESET_ALL) in nvme_setup_zone_mgmt_send()
/Linux-v5.15/drivers/block/paride/
Dpd.c487 switch (req_op(pd_req)) { in do_pd_io_start()
500 if (req_op(pd_req) == REQ_OP_READ) in do_pd_io_start()
/Linux-v5.15/drivers/s390/block/
Ddasd_fba.c560 if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_ZEROES) in dasd_fba_build_cp()
/Linux-v5.15/drivers/nvme/target/
Dpassthru.c214 bio->bi_opf = req_op(rq); in nvmet_passthru_map_sg()
/Linux-v5.15/drivers/mtd/ubi/
Dblock.c320 switch (req_op(req)) { in ubiblock_queue_rq()

12