/Linux-v6.6/drivers/block/null_blk/ |
D | null_blk.h | 146 blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op, 155 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op, 174 enum req_op op, sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd()
|
D | trace.h | 39 __field(enum req_op, op) 44 __entry->op = req_op(cmd->rq);
|
/Linux-v6.6/block/ |
D | blk.h | 129 if (req_op(rq) == REQ_OP_FLUSH) in rq_mergeable() 132 if (req_op(rq) == REQ_OP_WRITE_ZEROES) in rq_mergeable() 135 if (req_op(rq) == REQ_OP_ZONE_APPEND) in rq_mergeable() 156 if (req_op(req) == REQ_OP_DISCARD && in blk_discard_mergable() 164 if (req_op(rq) == REQ_OP_DISCARD) in blk_rq_get_max_segments() 170 enum req_op op) in blk_queue_get_max_sectors()
|
D | blk-merge.c | 599 max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); in blk_rq_get_max_sectors() 601 req_op(rq) == REQ_OP_DISCARD || in blk_rq_get_max_sectors() 602 req_op(rq) == REQ_OP_SECURE_ERASE) in blk_rq_get_max_sectors() 618 if (req_op(req) == REQ_OP_DISCARD) in ll_new_hw_segment() 785 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); in blk_account_io_merge_request() 811 if (req_op(req) != req_op(next)) in attempt_merge() 925 if (req_op(rq) != bio_op(bio)) in blk_rq_merge_ok() 967 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); in blk_account_io_merge_bio()
|
D | blk-stat.c | 60 if (req_op(rq) == REQ_OP_READ || req_op(rq) == REQ_OP_WRITE) in blk_stat_add()
|
D | blk-map.c | 160 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); in bio_copy_user_iov() 260 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); in blk_rq_map_bio_alloc() 798 bio->bi_opf |= req_op(rq); in blk_rq_map_kern()
|
D | blk-zoned.c | 250 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, in blkdev_zone_mgmt() 384 enum req_op op; in blkdev_zone_mgmt_ioctl()
|
/Linux-v6.6/include/linux/ |
D | blk_types.h | 370 enum req_op { enum 474 static inline enum req_op bio_op(const struct bio *bio) in bio_op() 515 static inline bool op_is_zone_mgmt(enum req_op op) in op_is_zone_mgmt() 528 static inline int op_stat_group(enum req_op op) in op_stat_group()
|
D | blk-mq.h | 193 static inline enum req_op req_op(const struct request *req) in req_op() function 208 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 211 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) 1167 return op_needs_zoned_write_locking(req_op(rq)) && in blk_rq_is_seq_zoned_write()
|
D | blkdev.h | 339 extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, 846 extern const char *blk_op_str(enum req_op op); 1278 static inline bool op_needs_zoned_write_locking(enum req_op op) in op_needs_zoned_write_locking() 1284 enum req_op op) in bdev_op_is_zoned_write() 1430 unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, 1432 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
|
/Linux-v6.6/drivers/block/rnbd/ |
D | rnbd-proto.h | 258 switch (req_op(rq)) { in rq_to_rnbd_flags() 276 (__force u32)req_op(rq), in rq_to_rnbd_flags()
|
/Linux-v6.6/fs/zonefs/ |
D | trace.h | 24 enum req_op op), 29 __field(enum req_op, op)
|
D | zonefs.h | 260 int zonefs_inode_zone_mgmt(struct inode *inode, enum req_op op);
|
/Linux-v6.6/fs/ |
D | direct-io.c | 170 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_refill_pages() 247 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_complete() 337 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_bio_end_aio() 426 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_bio_submit() 501 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_bio_complete() 605 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in get_more_blocks() 788 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in submit_page_section() 905 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in do_direct_IO()
|
/Linux-v6.6/drivers/block/ |
D | ublk_drv.c | 381 switch (req_op(req)) { in ublk_setup_iod_zoned() 871 return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE; in ublk_need_map_req() 877 (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN); in ublk_need_unmap_req() 961 enum req_op op = req_op(req); in ublk_setup_iod() 968 switch (req_op(req)) { in ublk_setup_iod() 1025 if (!io->res && req_op(req) == REQ_OP_READ) in __ublk_complete_rq() 1039 if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE && in __ublk_complete_rq() 1040 req_op(req) != REQ_OP_DRV_IN) in __ublk_complete_rq() 1406 if (req_op(req) == REQ_OP_ZONE_APPEND) in ublk_commit_completion() 1755 req_op(req) == REQ_OP_READ)) in __ublk_ch_uring_cmd() [all …]
|
D | xen-blkfront.c | 566 if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard) in blkif_queue_discard_req() 771 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA); in blkif_queue_rw_req() 783 if (req_op(req) == REQ_OP_FLUSH || in blkif_queue_rw_req() 784 (req_op(req) == REQ_OP_WRITE && (req->cmd_flags & REQ_FUA))) { in blkif_queue_rw_req() 873 if (unlikely(req_op(req) == REQ_OP_DISCARD || in blkif_queue_request() 874 req_op(req) == REQ_OP_SECURE_ERASE)) in blkif_queue_request() 894 ((req_op(req) == REQ_OP_FLUSH) && in blkif_request_flush_invalid() 2083 if (req_op(shadow[j].request) == REQ_OP_FLUSH || in blkfront_resume() 2084 req_op(shadow[j].request) == REQ_OP_DISCARD || in blkfront_resume() 2085 req_op(shadow[j].request) == REQ_OP_SECURE_ERASE || in blkfront_resume()
|
/Linux-v6.6/include/trace/events/ |
D | nilfs2.h | 195 enum req_op mode), 203 __field(enum req_op, mode)
|
/Linux-v6.6/arch/um/drivers/ |
D | ubd_kern.c | 483 if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) { in ubd_handler() 1237 if (req_op(req->req) == REQ_OP_READ) { in cowify_req() 1257 enum req_op op = req_op(req); in ubd_map_req() 1320 enum req_op op = req_op(req); in ubd_submit_request() 1357 switch (req_op(req)) { in ubd_queue_rq() 1475 if (req_op(req->req) == REQ_OP_FLUSH) { in do_io() 1496 switch (req_op(req->req)) { in do_io()
|
/Linux-v6.6/drivers/scsi/ |
D | sd_zbc.c | 509 switch (req_op(rq)) { in sd_zbc_need_zone_wp_update() 538 enum req_op op = req_op(rq); in sd_zbc_zone_wp_update() 612 if (op_is_zone_mgmt(req_op(rq)) && in sd_zbc_complete() 625 if (req_op(rq) == REQ_OP_ZONE_APPEND) in sd_zbc_complete()
|
/Linux-v6.6/fs/xfs/ |
D | xfs_bio_io.c | 18 enum req_op op) in xfs_rw_bdev()
|
D | xfs_linux.h | 202 char *data, enum req_op op);
|
/Linux-v6.6/drivers/mmc/core/ |
D | queue.c | 46 switch (req_op(req)) { in mmc_cqe_issue_type() 67 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) in mmc_issue_type()
|
/Linux-v6.6/drivers/crypto/hisilicon/sec2/ |
D | sec_crypto.c | 269 ctx->req_op->buf_unmap(ctx, req); in sec_req_cb() 271 ctx->req_op->callback(ctx, req, err); in sec_req_cb() 1244 ret = ctx->req_op->buf_map(ctx, req); in sec_request_transfer() 1248 ctx->req_op->do_transfer(ctx, req); in sec_request_transfer() 1250 ret = ctx->req_op->bd_fill(ctx, req); in sec_request_transfer() 1257 ctx->req_op->buf_unmap(ctx, req); in sec_request_transfer() 1263 ctx->req_op->buf_unmap(ctx, req); in sec_request_untransfer() 1787 ret = ctx->req_op->bd_send(ctx, req); in sec_process() 1864 ctx->req_op = &sec_skcipher_req_ops; in sec_skcipher_ctx_init() 1867 ctx->req_op = &sec_skcipher_req_ops_v3; in sec_skcipher_ctx_init() [all …]
|
D | sec.h | 138 const struct sec_req_op *req_op; member
|
/Linux-v6.6/drivers/md/ |
D | dm-ebs-target.c | 65 static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv, in __ebs_rw_bvec() 122 static int __ebs_rw_bio(struct ebs_c *ec, enum req_op op, struct bio *bio) in __ebs_rw_bio()
|