Home
last modified time | relevance | path

Searched refs:se_cmd (Results 1 – 25 of 59) sorted by relevance

123

/Linux-v6.6/include/target/
Dtarget_core_fabric.h66 int (*check_stop_free)(struct se_cmd *);
67 void (*release_cmd)(struct se_cmd *);
76 int (*write_pending)(struct se_cmd *);
78 int (*get_cmd_state)(struct se_cmd *);
79 int (*queue_data_in)(struct se_cmd *);
80 int (*queue_status)(struct se_cmd *);
81 void (*queue_tm_rsp)(struct se_cmd *);
82 void (*aborted_task)(struct se_cmd *);
157 void __target_init_cmd(struct se_cmd *cmd,
162 int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
[all …]
Dtarget_core_backend.h45 sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
55 unsigned char *(*get_sense_buffer)(struct se_cmd *);
66 sense_reason_t (*execute_rw)(struct se_cmd *cmd, struct scatterlist *,
68 sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd);
69 sense_reason_t (*execute_write_same)(struct se_cmd *cmd);
70 sense_reason_t (*execute_unmap)(struct se_cmd *cmd,
72 sense_reason_t (*execute_pr_out)(struct se_cmd *cmd, u8 sa, u64 key,
74 sense_reason_t (*execute_pr_in)(struct se_cmd *cmd, u8 sa,
81 void target_complete_cmd(struct se_cmd *, u8);
82 void target_set_cmd_data_length(struct se_cmd *, int);
[all …]
/Linux-v6.6/drivers/target/
Dtarget_core_xcopy.c115 static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, in target_xcopy_parse_tiddesc_e4() argument
175 xop->src_dev = se_cmd->se_dev; in target_xcopy_parse_tiddesc_e4()
192 xop->dst_dev = se_cmd->se_dev; in target_xcopy_parse_tiddesc_e4()
201 static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, in target_xcopy_parse_target_descriptors() argument
205 struct se_device *local_dev = se_cmd->se_dev; in target_xcopy_parse_target_descriptors()
241 rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop, in target_xcopy_parse_target_descriptors()
259 rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, in target_xcopy_parse_target_descriptors()
265 rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, in target_xcopy_parse_target_descriptors()
391 struct se_cmd se_cmd; member
400 static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) in xcopy_pt_get_cmd_state() argument
[all …]
Dtarget_core_tmr.c27 struct se_cmd *se_cmd, in core_tmr_alloc_req() argument
40 se_cmd->se_cmd_flags |= SCF_SCSI_TMR_CDB; in core_tmr_alloc_req()
41 se_cmd->se_tmr_req = tmr; in core_tmr_alloc_req()
42 tmr->task_cmd = se_cmd; in core_tmr_alloc_req()
57 struct se_cmd *cmd) in target_check_cdb_and_preempt()
71 static bool __target_check_io_state(struct se_cmd *se_cmd, in __target_check_io_state() argument
74 struct se_session *sess = se_cmd->se_sess; in __target_check_io_state()
88 spin_lock(&se_cmd->t_state_lock); in __target_check_io_state()
89 if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) { in __target_check_io_state()
91 " fabric stop, skipping\n", se_cmd->tag); in __target_check_io_state()
[all …]
Dtarget_core_transport.c54 static void transport_complete_task_attr(struct se_cmd *cmd);
55 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
56 static void transport_handle_queue_full(struct se_cmd *cmd,
696 static void target_remove_from_state_list(struct se_cmd *cmd) in target_remove_from_state_list()
712 static void target_remove_from_tmr_list(struct se_cmd *cmd) in target_remove_from_tmr_list()
734 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) in transport_cmd_check_stop_to_fabric()
765 static void transport_lun_remove_cmd(struct se_cmd *cmd) in transport_lun_remove_cmd()
786 struct se_cmd *cmd = container_of(work, struct se_cmd, work); in target_complete_failure_work()
795 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) in transport_get_sense_buffer()
814 void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) in transport_copy_sense_to_cmd()
[all …]
Dtarget_core_device.c48 transport_lookup_cmd_lun(struct se_cmd *se_cmd) in transport_lookup_cmd_lun() argument
51 struct se_session *se_sess = se_cmd->se_sess; in transport_lookup_cmd_lun()
57 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); in transport_lookup_cmd_lun()
61 if (se_cmd->data_direction == DMA_TO_DEVICE) in transport_lookup_cmd_lun()
62 atomic_long_add(se_cmd->data_length, in transport_lookup_cmd_lun()
64 else if (se_cmd->data_direction == DMA_FROM_DEVICE) in transport_lookup_cmd_lun()
65 atomic_long_add(se_cmd->data_length, in transport_lookup_cmd_lun()
68 if ((se_cmd->data_direction == DMA_TO_DEVICE) && in transport_lookup_cmd_lun()
72 se_cmd->se_tfo->fabric_name, in transport_lookup_cmd_lun()
73 se_cmd->orig_fe_lun); in transport_lookup_cmd_lun()
[all …]
Dtarget_core_user.c177 struct se_cmd *se_cmd; member
581 struct se_cmd *se_cmd = cmd->se_cmd; in tcmu_cmd_set_block_cnts() local
584 cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, blk_size); in tcmu_cmd_set_block_cnts()
586 if (se_cmd->se_cmd_flags & SCF_BIDI) { in tcmu_cmd_set_block_cnts()
587 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); in tcmu_cmd_set_block_cnts()
588 for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++) in tcmu_cmd_set_block_cnts()
589 len += se_cmd->t_bidi_data_sg[i].length; in tcmu_cmd_set_block_cnts()
633 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) in tcmu_alloc_cmd() argument
635 struct se_device *se_dev = se_cmd->se_dev; in tcmu_alloc_cmd()
644 tcmu_cmd->se_cmd = se_cmd; in tcmu_alloc_cmd()
[all …]
Dtarget_core_pr.h62 extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
63 extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
76 extern sense_reason_t target_scsi3_emulate_pr_in(struct se_cmd *);
77 extern sense_reason_t target_scsi3_emulate_pr_out(struct se_cmd *);
78 extern sense_reason_t target_check_reservation(struct se_cmd *);
Dtarget_core_alua.h85 extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
86 extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
87 extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
88 extern int core_alua_check_nonop_delay(struct se_cmd *);
149 extern sense_reason_t target_alua_state_check(struct se_cmd *cmd);
Dtarget_core_ua.h36 extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
40 extern bool core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *,
42 extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
/Linux-v6.6/drivers/target/tcm_fc/
Dtfc_cmd.c35 struct se_cmd *se_cmd; in _ft_dump_cmd() local
39 se_cmd = &cmd->se_cmd; in _ft_dump_cmd()
41 caller, cmd, cmd->sess, cmd->seq, se_cmd); in _ft_dump_cmd()
44 caller, cmd, se_cmd->t_data_nents, in _ft_dump_cmd()
45 se_cmd->data_length, se_cmd->se_cmd_flags); in _ft_dump_cmd()
47 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count) in _ft_dump_cmd()
81 target_free_tag(sess->se_sess, &cmd->se_cmd); in ft_free_cmd()
85 void ft_release_cmd(struct se_cmd *se_cmd) in ft_release_cmd() argument
87 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); in ft_release_cmd()
92 int ft_check_stop_free(struct se_cmd *se_cmd) in ft_check_stop_free() argument
[all …]
Dtfc_io.c41 int ft_queue_data_in(struct se_cmd *se_cmd) in ft_queue_data_in() argument
43 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); in ft_queue_data_in()
67 if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL) in ft_queue_data_in()
74 remaining = se_cmd->data_length; in ft_queue_data_in()
79 BUG_ON(remaining && !se_cmd->t_data_sg); in ft_queue_data_in()
81 sg = se_cmd->t_data_sg; in ft_queue_data_in()
177 se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; in ft_queue_data_in()
182 return ft_queue_status(se_cmd); in ft_queue_data_in()
189 target_execute_cmd(&cmd->se_cmd); in ft_execute_work()
197 struct se_cmd *se_cmd = &cmd->se_cmd; in ft_recv_write_data() local
[all …]
Dtcm_fc.h109 struct se_cmd se_cmd; /* Local TCM I/O descriptor */ member
144 int ft_check_stop_free(struct se_cmd *);
145 void ft_release_cmd(struct se_cmd *);
146 int ft_queue_status(struct se_cmd *);
147 int ft_queue_data_in(struct se_cmd *);
148 int ft_write_pending(struct se_cmd *);
149 void ft_queue_tm_resp(struct se_cmd *);
150 void ft_aborted_task(struct se_cmd *);
/Linux-v6.6/drivers/scsi/qla2xxx/
Dtcm_qla2xxx.c239 transport_generic_free_cmd(&mcmd->se_cmd, 0); in tcm_qla2xxx_complete_mcmd()
273 transport_generic_free_cmd(&cmd->se_cmd, 0); in tcm_qla2xxx_complete_free()
288 cmd->se_cmd.map_tag = tag; in tcm_qla2xxx_get_cmd()
289 cmd->se_cmd.map_cpu = cpu; in tcm_qla2xxx_get_cmd()
296 target_free_tag(cmd->sess->se_sess, &cmd->se_cmd); in tcm_qla2xxx_rel_cmd()
319 static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) in tcm_qla2xxx_check_stop_free() argument
323 if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) { in tcm_qla2xxx_check_stop_free()
324 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); in tcm_qla2xxx_check_stop_free()
328 return target_put_sess_cmd(se_cmd); in tcm_qla2xxx_check_stop_free()
334 static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) in tcm_qla2xxx_release_cmd() argument
[all …]
/Linux-v6.6/drivers/target/loopback/
Dtcm_loop.c46 static int tcm_loop_queue_status(struct se_cmd *se_cmd);
60 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd) in tcm_loop_check_stop_free() argument
62 return transport_generic_free_cmd(se_cmd, 0); in tcm_loop_check_stop_free()
65 static void tcm_loop_release_cmd(struct se_cmd *se_cmd) in tcm_loop_release_cmd() argument
67 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, in tcm_loop_release_cmd()
71 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) in tcm_loop_release_cmd()
103 struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd; in tcm_loop_target_queue_cmd() local
137 se_cmd->prot_pto = true; in tcm_loop_target_queue_cmd()
146 se_cmd->tag = tl_cmd->sc_cmd_tag; in tcm_loop_target_queue_cmd()
147 target_init_cmd(se_cmd, tl_nexus->se_sess, &tl_cmd->tl_sense_buf[0], in tcm_loop_target_queue_cmd()
[all …]
/Linux-v6.6/drivers/usb/gadget/function/
Df_tcm.c64 transport_generic_free_cmd(&cmd->se_cmd, 0); in bot_status_complete()
152 if (cmd->se_cmd.scsi_status == SAM_STAT_GOOD) { in bot_send_status()
205 struct se_cmd *se_cmd = &cmd->se_cmd; in bot_send_read_response() local
216 cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC); in bot_send_read_response()
220 sg_copy_to_buffer(se_cmd->t_data_sg, in bot_send_read_response()
221 se_cmd->t_data_nents, in bot_send_read_response()
223 se_cmd->data_length); in bot_send_read_response()
228 fu->bot_req_in->num_sgs = se_cmd->t_data_nents; in bot_send_read_response()
229 fu->bot_req_in->sg = se_cmd->t_data_sg; in bot_send_read_response()
233 fu->bot_req_in->length = se_cmd->data_length; in bot_send_read_response()
[all …]
/Linux-v6.6/drivers/infiniband/ulp/isert/
Dib_isert.c76 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) in isert_prot_cmd()
1065 data_len = cmd->se_cmd.data_length; in isert_handle_scsi_cmd()
1068 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; in isert_handle_scsi_cmd()
1082 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, in isert_handle_scsi_cmd()
1088 cmd->se_cmd.t_data_sg = &isert_cmd->sg; in isert_handle_scsi_cmd()
1089 cmd->se_cmd.t_data_nents = 1; in isert_handle_scsi_cmd()
1098 if (cmd->write_data_done == cmd->se_cmd.data_length) { in isert_handle_scsi_cmd()
1111 target_put_sess_cmd(&cmd->se_cmd); in isert_handle_scsi_cmd()
1144 cmd->se_cmd.data_length); in isert_handle_iscsi_dataout()
1147 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; in isert_handle_iscsi_dataout()
[all …]
/Linux-v6.6/drivers/target/iscsi/
Discsi_target_tmr.c37 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; in iscsit_tmr_abort_task()
113 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; in iscsit_tmr_task_reassign()
157 if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) { in iscsit_tmr_task_reassign()
160 ref_lun, ref_cmd->se_cmd.orig_fe_lun); in iscsit_tmr_task_reassign()
233 struct se_cmd *se_cmd = &cmd->se_cmd; in iscsit_task_reassign_complete_write() local
252 if (!(cmd->se_cmd.transport_state & CMD_T_SENT)) { in iscsit_task_reassign_complete_write()
255 cmd->init_task_tag, cmd->se_cmd.t_state); in iscsit_task_reassign_complete_write()
256 target_execute_cmd(se_cmd); in iscsit_task_reassign_complete_write()
275 cmd->se_cmd.data_length) { in iscsit_task_reassign_complete_write()
277 length = (cmd->se_cmd.data_length - offset); in iscsit_task_reassign_complete_write()
[all …]
Discsi_target_seq_pdu_list.c213 if (cmd->se_cmd.data_direction == DMA_TO_DEVICE) in iscsit_determine_counts_for_list()
224 unsolicited_data_length = min(cmd->se_cmd.data_length, in iscsit_determine_counts_for_list()
227 while (offset < cmd->se_cmd.data_length) { in iscsit_determine_counts_for_list()
240 if ((offset + mdsl) >= cmd->se_cmd.data_length) { in iscsit_determine_counts_for_list()
242 (cmd->se_cmd.data_length - offset); in iscsit_determine_counts_for_list()
243 offset += (cmd->se_cmd.data_length - offset); in iscsit_determine_counts_for_list()
262 if ((offset + mdsl) >= cmd->se_cmd.data_length) { in iscsit_determine_counts_for_list()
263 offset += (cmd->se_cmd.data_length - offset); in iscsit_determine_counts_for_list()
296 if (cmd->se_cmd.data_direction == DMA_TO_DEVICE) in iscsit_do_build_pdu_and_seq_lists()
310 unsolicited_data_length = min(cmd->se_cmd.data_length, in iscsit_do_build_pdu_and_seq_lists()
[all …]
Discsi_target_configfs.c1359 static int iscsi_get_cmd_state(struct se_cmd *se_cmd) in iscsi_get_cmd_state() argument
1361 struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd); in iscsi_get_cmd_state()
1385 static int lio_queue_data_in(struct se_cmd *se_cmd) in lio_queue_data_in() argument
1387 struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd); in lio_queue_data_in()
1394 static int lio_write_pending(struct se_cmd *se_cmd) in lio_write_pending() argument
1396 struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd); in lio_write_pending()
1405 static int lio_queue_status(struct se_cmd *se_cmd) in lio_queue_status() argument
1407 struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd); in lio_queue_status()
1412 if (cmd->se_cmd.scsi_status || cmd->sense_reason) { in lio_queue_status()
1418 static void lio_queue_tm_rsp(struct se_cmd *se_cmd) in lio_queue_tm_rsp() argument
[all …]
Discsi_target.c836 const bool do_put = cmd->se_cmd.se_tfo != NULL; in iscsit_add_reject_from_cmd()
869 target_put_sess_cmd(&cmd->se_cmd); in iscsit_add_reject_from_cmd()
905 if (ent >= cmd->se_cmd.t_data_nents) { in iscsit_map_iovec()
910 sg = &cmd->se_cmd.t_data_sg[ent]; in iscsit_map_iovec()
940 for_each_sg(cmd->se_cmd.t_data_sg, sg, in iscsit_map_iovec()
941 cmd->se_cmd.t_data_nents, i) { in iscsit_map_iovec()
991 u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE)); in iscsit_allocate_iovecs()
1191 __target_init_cmd(&cmd->se_cmd, &iscsi_ops, in iscsit_setup_scsi_cmd()
1202 target_get_sess_cmd(&cmd->se_cmd, true); in iscsit_setup_scsi_cmd()
1204 cmd->se_cmd.tag = (__force u32)cmd->init_task_tag; in iscsit_setup_scsi_cmd()
[all …]
/Linux-v6.6/drivers/xen/
Dxen-scsiback.c135 struct se_cmd se_cmd; member
404 resid = pending_req->se_cmd.residual_count; in scsiback_cmd_done()
418 target_put_sess_cmd(&pending_req->se_cmd); in scsiback_cmd_done()
423 struct se_cmd *se_cmd = &pending_req->se_cmd; in scsiback_cmd_exec() local
427 se_cmd->tag = pending_req->rqid; in scsiback_cmd_exec()
428 target_init_cmd(se_cmd, sess, pending_req->sense_buffer, in scsiback_cmd_exec()
432 if (target_submit_prep(se_cmd, pending_req->cmnd, pending_req->sgl, in scsiback_cmd_exec()
436 target_submit(se_cmd); in scsiback_cmd_exec()
620 struct se_cmd *se_cmd = &pending_req->se_cmd; in scsiback_device_action() local
626 rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess, in scsiback_device_action()
[all …]
/Linux-v6.6/drivers/scsi/ibmvscsi_tgt/
Dibmvscsi_tgt.c58 static void ibmvscsis_determine_resid(struct se_cmd *se_cmd, in ibmvscsis_determine_resid() argument
61 u32 residual_count = se_cmd->residual_count; in ibmvscsis_determine_resid()
66 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { in ibmvscsis_determine_resid()
67 if (se_cmd->data_direction == DMA_TO_DEVICE) { in ibmvscsis_determine_resid()
71 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { in ibmvscsis_determine_resid()
76 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { in ibmvscsis_determine_resid()
77 if (se_cmd->data_direction == DMA_TO_DEVICE) { in ibmvscsis_determine_resid()
81 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { in ibmvscsis_determine_resid()
1290 memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd)); in ibmvscsis_get_free_cmd()
1906 if (cmd->se_cmd.transport_state & CMD_T_ABORTED && in ibmvscsis_send_messages()
[all …]
/Linux-v6.6/drivers/target/sbp/
Dsbp_target.c923 req->se_cmd.map_tag = tag; in sbp_mgt_get_req()
924 req->se_cmd.map_cpu = cpu; in sbp_mgt_get_req()
925 req->se_cmd.tag = next_orb; in sbp_mgt_get_req()
1220 req->se_cmd.tag = req->orb_pointer; in sbp_handle_command()
1221 target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, in sbp_handle_command()
1250 if (req->se_cmd.data_direction == DMA_FROM_DEVICE) { in sbp_rw_data()
1283 length = req->se_cmd.data_length; in sbp_rw_data()
1286 sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents, in sbp_rw_data()
1351 target_put_sess_cmd(&req->se_cmd); in sbp_send_status()
1357 struct se_cmd *se_cmd = &req->se_cmd; in sbp_sense_mangle() local
[all …]
/Linux-v6.6/drivers/vhost/
Dscsi.c96 struct se_cmd tvc_se_cmd;
216 struct se_cmd se_cmd; member
326 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd) in vhost_scsi_release_cmd_res() argument
328 struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd, in vhost_scsi_release_cmd_res()
349 sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag); in vhost_scsi_release_cmd_res()
361 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) in vhost_scsi_release_cmd() argument
363 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { in vhost_scsi_release_cmd()
364 struct vhost_scsi_tmf *tmf = container_of(se_cmd, in vhost_scsi_release_cmd()
365 struct vhost_scsi_tmf, se_cmd); in vhost_scsi_release_cmd()
370 struct vhost_scsi_cmd *cmd = container_of(se_cmd, in vhost_scsi_release_cmd()
[all …]

123