| /Linux-v5.4/tools/io_uring/ |
| D | liburing.h | 97 static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data) in io_uring_sqe_set_data() argument 99 sqe->user_data = (unsigned long) data; in io_uring_sqe_set_data() 107 static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd, in io_uring_prep_rw() argument 111 memset(sqe, 0, sizeof(*sqe)); in io_uring_prep_rw() 112 sqe->opcode = op; in io_uring_prep_rw() 113 sqe->fd = fd; in io_uring_prep_rw() 114 sqe->off = offset; in io_uring_prep_rw() 115 sqe->addr = (unsigned long) addr; in io_uring_prep_rw() 116 sqe->len = len; in io_uring_prep_rw() 119 static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd, in io_uring_prep_readv() argument [all …]
|
| D | io_uring-cp.c | 71 struct io_uring_sqe *sqe; in queue_prepped() local 73 sqe = io_uring_get_sqe(ring); in queue_prepped() 74 assert(sqe); in queue_prepped() 77 io_uring_prep_readv(sqe, infd, &data->iov, 1, data->offset); in queue_prepped() 79 io_uring_prep_writev(sqe, outfd, &data->iov, 1, data->offset); in queue_prepped() 81 io_uring_sqe_set_data(sqe, data); in queue_prepped() 86 struct io_uring_sqe *sqe; in queue_read() local 93 sqe = io_uring_get_sqe(ring); in queue_read() 94 if (!sqe) { in queue_read() 106 io_uring_prep_readv(sqe, infd, &data->iov, 1, offset); in queue_read() [all …]
|
| D | io_uring-bench.c | 145 struct io_uring_sqe *sqe = &s->sqes[index]; in init_io() local 151 sqe->opcode = IORING_OP_NOP; in init_io() 172 sqe->flags = IOSQE_FIXED_FILE; in init_io() 173 sqe->fd = f->fixed_fd; in init_io() 175 sqe->flags = 0; in init_io() 176 sqe->fd = f->real_fd; in init_io() 179 sqe->opcode = IORING_OP_READ_FIXED; in init_io() 180 sqe->addr = (unsigned long) s->iovecs[index].iov_base; in init_io() 181 sqe->len = BS; in init_io() 182 sqe->buf_index = index; in init_io() [all …]
|
| D | queue.c | 145 struct io_uring_sqe *sqe; in io_uring_get_sqe() local 153 sqe = &sq->sqes[sq->sqe_tail & *sq->kring_mask]; in io_uring_get_sqe() 155 return sqe; in io_uring_get_sqe()
|
| /Linux-v5.4/drivers/infiniband/sw/siw/ |
| D | siw_qp.c | 275 wqe->sqe.flags = 0; in siw_qp_mpa_rts() 276 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts() 277 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts() 278 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts() 279 wqe->sqe.sge[0].lkey = 0; in siw_qp_mpa_rts() 284 wqe->sqe.rkey = 1; in siw_qp_mpa_rts() 285 wqe->sqe.raddr = 0; in siw_qp_mpa_rts() 289 wqe->sqe.opcode = SIW_OP_WRITE; in siw_qp_mpa_rts() 293 wqe->sqe.opcode = SIW_OP_READ; in siw_qp_mpa_rts() 299 siw_read_to_orq(rreq, &wqe->sqe); in siw_qp_mpa_rts() [all …]
|
| D | siw_qp_tx.c | 43 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg() 46 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg() 53 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg() 137 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx() 139 cpu_to_be64(wqe->sqe.sge[0].laddr); in siw_qp_prepare_tx() 140 c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx() 141 c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr); in siw_qp_prepare_tx() 142 c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length); in siw_qp_prepare_tx() 185 c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx() 197 c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx() [all …]
|
| D | siw_verbs.c | 657 struct siw_sqe *sqe) in siw_copy_inline_sgl() argument 660 void *kbuf = &sqe->sge[1]; in siw_copy_inline_sgl() 663 sqe->sge[0].laddr = (uintptr_t)kbuf; in siw_copy_inline_sgl() 664 sqe->sge[0].lkey = 0; in siw_copy_inline_sgl() 682 sqe->sge[0].length = bytes > 0 ? bytes : 0; in siw_copy_inline_sgl() 683 sqe->num_sge = bytes > 0 ? 1 : 0; in siw_copy_inline_sgl() 731 struct siw_sqe *sqe = &qp->sendq[idx]; in siw_post_send() local 733 if (sqe->flags) { in siw_post_send() 743 sqe->id = wr->wr_id; in siw_post_send() 747 sqe->flags |= SIW_WQE_SIGNALLED; in siw_post_send() [all …]
|
| D | siw.h | 204 struct siw_sqe sqe; member 496 #define tx_type(wqe) ((wqe)->sqe.opcode) 498 #define tx_flags(wqe) ((wqe)->sqe.flags) 538 void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe); 539 int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes, 641 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; in siw_sq_empty() local 643 return READ_ONCE(sqe->flags) == 0; in siw_sq_empty() 648 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; in sq_get_next() local 650 if (READ_ONCE(sqe->flags) & SIW_WQE_VALID) in sq_get_next() 651 return sqe; in sq_get_next()
|
| D | siw_qp_rx.c | 175 srx->ddp_stag = wqe->sqe.sge[0].lkey; in siw_rresp_check_ntoh() 176 srx->ddp_to = wqe->sqe.sge[0].laddr; in siw_rresp_check_ntoh() 690 resp = &tx_work->sqe; in siw_init_rresp() 749 wqe->sqe.id = orqe->id; in siw_orqe_start_rx() 750 wqe->sqe.opcode = orqe->opcode; in siw_orqe_start_rx() 751 wqe->sqe.sge[0].laddr = orqe->sge[0].laddr; in siw_orqe_start_rx() 752 wqe->sqe.sge[0].lkey = orqe->sge[0].lkey; in siw_orqe_start_rx() 753 wqe->sqe.sge[0].length = orqe->sge[0].length; in siw_orqe_start_rx() 754 wqe->sqe.flags = orqe->flags; in siw_orqe_start_rx() 755 wqe->sqe.num_sge = 1; in siw_orqe_start_rx() [all …]
|
| /Linux-v5.4/fs/ |
| D | io_uring.c | 269 const struct io_uring_sqe *sqe; member 489 if (req->submit.sqe) { in io_queue_async_work() 490 switch (req->submit.sqe->opcode) { in io_queue_async_work() 1078 const struct io_uring_sqe *sqe = s->sqe; in io_prep_rw() local 1099 kiocb->ki_pos = READ_ONCE(sqe->off); in io_prep_rw() 1103 ioprio = READ_ONCE(sqe->ioprio); in io_prep_rw() 1113 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags)); in io_prep_rw() 1163 const struct io_uring_sqe *sqe, in io_import_fixed() argument 1166 size_t len = READ_ONCE(sqe->len); in io_import_fixed() 1176 buf_index = READ_ONCE(sqe->buf_index); in io_import_fixed() [all …]
|
| /Linux-v5.4/drivers/crypto/hisilicon/zip/ |
| D | zip_crypto.c | 81 static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type) in hisi_zip_config_buf_type() argument 85 val = (sqe->dw9) & ~HZIP_BUF_TYPE_M; in hisi_zip_config_buf_type() 87 sqe->dw9 = val; in hisi_zip_config_buf_type() 90 static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag) in hisi_zip_config_tag() argument 92 sqe->tag = tag; in hisi_zip_config_tag() 95 static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type, in hisi_zip_fill_sqe() argument 99 memset(sqe, 0, sizeof(struct hisi_zip_sqe)); in hisi_zip_fill_sqe() 101 sqe->input_data_length = slen; in hisi_zip_fill_sqe() 102 sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type); in hisi_zip_fill_sqe() 103 sqe->dest_avail_out = dlen; in hisi_zip_fill_sqe() [all …]
|
| /Linux-v5.4/drivers/scsi/qedf/ |
| D | drv_fcoe_fw_funcs.c | 13 memset(task_params->sqe, 0, sizeof(*(task_params->sqe))); in init_common_sqe() 14 SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE, in init_common_sqe() 16 task_params->sqe->task_id = task_params->itid; in init_common_sqe() 167 task_params->sqe->additional_info_union.burst_length = in init_initiator_midpath_unsolicited_fcoe_task() 169 SET_FIELD(task_params->sqe->flags, in init_initiator_midpath_unsolicited_fcoe_task() 171 SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE, in init_initiator_midpath_unsolicited_fcoe_task() 193 task_params->sqe->additional_info_union.seq_rec_updated_offset = in init_initiator_sequence_recovery_fcoe_task()
|
| D | qedf_io.c | 596 struct fcoe_wqe *sqe) in qedf_init_task() argument 632 io_req->task_params->sqe = sqe; in qedf_init_task() 685 struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) in qedf_init_mp_task() argument 711 io_req->task_params->sqe = sqe; in qedf_init_mp_task() 864 struct fcoe_wqe *sqe; in qedf_post_io_req() local 913 sqe = &fcport->sq[sqe_idx]; in qedf_post_io_req() 914 memset(sqe, 0, sizeof(struct fcoe_wqe)); in qedf_post_io_req() 927 qedf_init_task(fcport, lport, io_req, task_ctx, sqe); in qedf_post_io_req() 1841 struct fcoe_wqe *sqe; in qedf_initiate_abts() local 1920 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_abts() [all …]
|
| D | qedf_els.c | 23 struct fcoe_wqe *sqe; in qedf_initiate_els() local 120 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_els() 121 memset(sqe, 0, sizeof(struct fcoe_wqe)); in qedf_initiate_els() 125 qedf_init_mp_task(els_req, task, sqe); in qedf_initiate_els() 687 struct fcoe_wqe *sqe; in qedf_initiate_seq_cleanup() local 717 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_seq_cleanup() 718 memset(sqe, 0, sizeof(struct fcoe_wqe)); in qedf_initiate_seq_cleanup() 719 orig_io_req->task_params->sqe = sqe; in qedf_initiate_seq_cleanup()
|
| D | drv_fcoe_fw_funcs.h | 16 struct fcoe_wqe *sqe; member
|
| /Linux-v5.4/drivers/scsi/qedi/ |
| D | qedi_fw_api.c | 98 if (!task_params->sqe) in init_sqe() 101 memset(task_params->sqe, 0, sizeof(*task_params->sqe)); in init_sqe() 102 task_params->sqe->task_id = cpu_to_le16(task_params->itid); in init_sqe() 104 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, in init_sqe() 115 init_dif_context_flags(&task_params->sqe->prot_flags, in init_sqe() 118 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, in init_sqe() 134 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, in init_sqe() 136 SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN, in init_sqe() 141 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe() 147 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, in init_sqe() [all …]
|
| D | qedi_fw_iscsi.h | 14 struct iscsi_wqe *sqe; member
|
| D | qedi_fw.c | 1089 task_params.sqe = &ep->sq[sq_idx]; in qedi_send_iscsi_login() 1091 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); in qedi_send_iscsi_login() 1163 task_params.sqe = &ep->sq[sq_idx]; in qedi_send_iscsi_logout() 1164 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); in qedi_send_iscsi_logout() 1534 task_params.sqe = &ep->sq[sq_idx]; in qedi_send_iscsi_tmf() 1536 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); in qedi_send_iscsi_tmf() 1674 task_params.sqe = &ep->sq[sq_idx]; in qedi_send_iscsi_text() 1676 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); in qedi_send_iscsi_text() 1791 task_params.sqe = &ep->sq[sq_idx]; in qedi_send_iscsi_nopout() 1793 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); in qedi_send_iscsi_nopout() [all …]
|
| /Linux-v5.4/drivers/infiniband/hw/bnxt_re/ |
| D | qplib_fp.c | 1601 struct sq_send_raweth_qp1 *sqe = in bnxt_qplib_post_send() local 1604 sqe->wqe_type = wqe->type; in bnxt_qplib_post_send() 1605 sqe->flags = wqe->flags; in bnxt_qplib_post_send() 1606 sqe->wqe_size = wqe_size16 + in bnxt_qplib_post_send() 1607 ((offsetof(typeof(*sqe), data) + 15) >> 4); in bnxt_qplib_post_send() 1608 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action); in bnxt_qplib_post_send() 1609 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags); in bnxt_qplib_post_send() 1610 sqe->length = cpu_to_le32(data_len); in bnxt_qplib_post_send() 1611 sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta & in bnxt_qplib_post_send() 1621 struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr; in bnxt_qplib_post_send() local [all …]
|
| /Linux-v5.4/drivers/infiniband/hw/cxgb4/ |
| D | restrack.c | 96 struct t4_swsqe *sqe) in fill_swsqe() argument 100 if (rdma_nl_put_driver_u32(msg, "opcode", sqe->opcode)) in fill_swsqe() 102 if (rdma_nl_put_driver_u32(msg, "complete", sqe->complete)) in fill_swsqe() 104 if (sqe->complete && in fill_swsqe() 105 rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe))) in fill_swsqe() 107 if (rdma_nl_put_driver_u32(msg, "signaled", sqe->signaled)) in fill_swsqe() 109 if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed)) in fill_swsqe()
|
| /Linux-v5.4/drivers/scsi/bnx2i/ |
| D | bnx2i.h | 498 struct sqe { struct 634 struct sqe *sq_virt; 638 struct sqe *sq_prod_qe; 639 struct sqe *sq_cons_qe; 640 struct sqe *sq_first_qe; 641 struct sqe *sq_last_qe;
|
| /Linux-v5.4/drivers/nvme/host/ |
| D | rdma.c | 55 struct nvme_rdma_qe sqe; member 284 kfree(req->sqe.data); in nvme_rdma_exit_request() 297 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL); in nvme_rdma_init_request() 298 if (!req->sqe.data) in nvme_rdma_init_request() 1328 container_of(qe, struct nvme_rdma_request, sqe); in nvme_rdma_send_done() 1417 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; in nvme_rdma_submit_async_event() local 1418 struct nvme_command *cmd = sqe->data; in nvme_rdma_submit_async_event() 1422 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); in nvme_rdma_submit_async_event() 1430 sqe->cqe.done = nvme_rdma_async_done; in nvme_rdma_submit_async_event() 1432 ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), in nvme_rdma_submit_async_event() [all …]
|
| D | fc.c | 1562 struct nvme_command *sqe = &op->cmd_iu.sqe; in nvme_fc_fcpio_done() local 1666 sqe->common.command_id != cqe->command_id)) { in nvme_fc_fcpio_done() 1676 sqe->common.command_id, in nvme_fc_fcpio_done() 1785 struct nvme_command *sqe; in nvme_fc_init_aen_ops() local 1797 sqe = &cmdiu->sqe; in nvme_fc_init_aen_ops() 1809 memset(sqe, 0, sizeof(*sqe)); in nvme_fc_init_aen_ops() 1810 sqe->common.opcode = nvme_admin_async_event; in nvme_fc_init_aen_ops() 1812 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; in nvme_fc_init_aen_ops() 2212 struct nvme_command *sqe = &cmdiu->sqe; in nvme_fc_start_fcp_op() local 2250 WARN_ON_ONCE(sqe->common.metadata); in nvme_fc_start_fcp_op() [all …]
|
| /Linux-v5.4/drivers/crypto/hisilicon/ |
| D | qm.h | 174 int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm); 183 void *sqe; member
|
| /Linux-v5.4/drivers/nvme/target/ |
| D | fc.c | 1777 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in nvmet_fc_prep_fcp_rsp() local 1809 nvme_is_fabrics((struct nvme_command *) sqe) || in nvmet_fc_prep_fcp_rsp() 1812 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || in nvmet_fc_prep_fcp_rsp() 2083 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in __nvmet_fc_fcp_nvme_cmd_done() local 2107 cqe->command_id = sqe->command_id; in __nvmet_fc_fcp_nvme_cmd_done() 2174 if (!nvme_is_write(&cmdiu->sqe)) in nvmet_fc_handle_fcp_rqst() 2178 if (nvme_is_write(&cmdiu->sqe)) in nvmet_fc_handle_fcp_rqst() 2186 fod->req.cmd = &fod->cmdiubuf.sqe; in nvmet_fc_handle_fcp_rqst()
|