Home
last modified time | relevance | path

Searched refs:wqe (Results 1 – 25 of 96) sorted by relevance

1234

/Linux-v4.19/drivers/infiniband/sw/rxe/
Drxe_req.c41 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
45 struct rxe_send_wqe *wqe, in retry_first_write_send() argument
51 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send()
52 qp->mtu : wqe->dma.resid; in retry_first_write_send()
54 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send()
55 wqe->wr.opcode); in retry_first_write_send()
57 if (wqe->wr.send_flags & IB_SEND_INLINE) { in retry_first_write_send()
58 wqe->dma.resid -= to_send; in retry_first_write_send()
59 wqe->dma.sge_offset += to_send; in retry_first_write_send()
61 advance_dma_data(&wqe->dma, to_send); in retry_first_write_send()
[all …]
Drxe_comp.c166 struct rxe_send_wqe *wqe; in get_wqe() local
171 wqe = queue_head(qp->sq.queue); in get_wqe()
172 *wqe_p = wqe; in get_wqe()
175 if (!wqe || wqe->state == wqe_state_posted) in get_wqe()
179 if (wqe->state == wqe_state_done) in get_wqe()
183 if (wqe->state == wqe_state_error) in get_wqe()
198 struct rxe_send_wqe *wqe) in check_psn() argument
205 diff = psn_compare(pkt->psn, wqe->last_psn); in check_psn()
207 if (wqe->state == wqe_state_pending) { in check_psn()
208 if (wqe->mask & WR_ATOMIC_OR_READ_MASK) in check_psn()
[all …]
/Linux-v4.19/drivers/infiniband/hw/cxgb3/
Diwch_qp.c42 static int build_rdma_send(union t3_wr *wqe, const struct ib_send_wr *wr, in build_rdma_send() argument
51 wqe->send.rdmaop = T3_SEND_WITH_SE; in build_rdma_send()
53 wqe->send.rdmaop = T3_SEND; in build_rdma_send()
54 wqe->send.rem_stag = 0; in build_rdma_send()
58 wqe->send.rdmaop = T3_SEND_WITH_SE_INV; in build_rdma_send()
60 wqe->send.rdmaop = T3_SEND_WITH_INV; in build_rdma_send()
61 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send()
68 wqe->send.reserved[0] = 0; in build_rdma_send()
69 wqe->send.reserved[1] = 0; in build_rdma_send()
70 wqe->send.reserved[2] = 0; in build_rdma_send()
[all …]
Dcxio_hal.c139 struct t3_modify_qp_wr *wqe; in cxio_hal_clear_qp_ctx() local
140 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); in cxio_hal_clear_qp_ctx()
145 wqe = skb_put_zero(skb, sizeof(*wqe)); in cxio_hal_clear_qp_ctx()
146 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, in cxio_hal_clear_qp_ctx()
149 wqe->flags = cpu_to_be32(MODQP_WRITE_EC); in cxio_hal_clear_qp_ctx()
151 wqe->sge_cmd = cpu_to_be64(sge_cmd); in cxio_hal_clear_qp_ctx()
515 struct t3_modify_qp_wr *wqe; in cxio_hal_init_ctrl_qp() local
518 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); in cxio_hal_init_ctrl_qp()
561 wqe = skb_put_zero(skb, sizeof(*wqe)); in cxio_hal_init_ctrl_qp()
562 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0, in cxio_hal_init_ctrl_qp()
[all …]
/Linux-v4.19/drivers/infiniband/hw/i40iw/
Di40iw_ctrl.c51 void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) in i40iw_insert_wqe_hdr() argument
54 set_64bit_val(wqe, 24, header); in i40iw_insert_wqe_hdr()
607 u64 *wqe = NULL; in i40iw_sc_cqp_get_next_send_wqe_idx() local
627 wqe = cqp->sq_base[*wqe_idx].elem; in i40iw_sc_cqp_get_next_send_wqe_idx()
629 I40IW_CQP_INIT_WQE(wqe); in i40iw_sc_cqp_get_next_send_wqe_idx()
631 return wqe; in i40iw_sc_cqp_get_next_send_wqe_idx()
835 u64 *wqe; in i40iw_sc_manage_push_page() local
841 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); in i40iw_sc_manage_push_page()
842 if (!wqe) in i40iw_sc_manage_push_page()
845 set_64bit_val(wqe, 16, info->qs_handle); in i40iw_sc_manage_push_page()
[all …]
Di40iw_uk.c49 u64 header, *wqe; in i40iw_nop_1() local
58 wqe = qp->sq_base[wqe_idx].elem; in i40iw_nop_1()
69 set_64bit_val(wqe, 0, 0); in i40iw_nop_1()
70 set_64bit_val(wqe, 8, 0); in i40iw_nop_1()
71 set_64bit_val(wqe, 16, 0); in i40iw_nop_1()
79 set_64bit_val(wqe, 24, header); in i40iw_nop_1()
141 u64 *wqe = NULL; in i40iw_qp_get_next_send_wqe() local
183 wqe = qp->sq_base[*wqe_idx].elem; in i40iw_qp_get_next_send_wqe()
196 return wqe; in i40iw_qp_get_next_send_wqe()
205 static void i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge) in i40iw_set_fragment() argument
[all …]
Di40iw_vf.c56 u64 *wqe; in i40iw_manage_vf_pble_bp() local
59 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); in i40iw_manage_vf_pble_bp()
60 if (!wqe) in i40iw_manage_vf_pble_bp()
66 set_64bit_val(wqe, 16, temp); in i40iw_manage_vf_pble_bp()
71 set_64bit_val(wqe, 24, header); in i40iw_manage_vf_pble_bp()
74 set_64bit_val(wqe, 32, pd_pl_pba); in i40iw_manage_vf_pble_bp()
76 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE VF_PBLE_BP WQE", wqe, I40IW_CQP_WQE_SIZE * 8); in i40iw_manage_vf_pble_bp()
Di40iw_puda.c112 u64 *wqe; in i40iw_puda_post_recvbuf() local
117 wqe = qp->qp_uk.rq_base[wqe_idx].elem; in i40iw_puda_post_recvbuf()
120 wqe_idx, buf, wqe); in i40iw_puda_post_recvbuf()
122 get_64bit_val(wqe, 24, &offset24); in i40iw_puda_post_recvbuf()
126 set_64bit_val(wqe, 0, buf->mem.pa); in i40iw_puda_post_recvbuf()
127 set_64bit_val(wqe, 8, in i40iw_puda_post_recvbuf()
129 i40iw_insert_wqe_hdr(wqe, offset24); in i40iw_puda_post_recvbuf()
208 u64 *wqe = NULL; in i40iw_puda_get_next_send_wqe() local
216 return wqe; in i40iw_puda_get_next_send_wqe()
217 wqe = qp->sq_base[*wqe_idx].elem; in i40iw_puda_get_next_send_wqe()
[all …]
/Linux-v4.19/drivers/infiniband/hw/qib/
Dqib_rc.c42 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, in restart_sge() argument
47 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; in restart_sge()
48 ss->sge = wqe->sg_list[0]; in restart_sge()
49 ss->sg_list = wqe->sg_list + 1; in restart_sge()
50 ss->num_sge = wqe->wr.num_sge; in restart_sge()
51 ss->total_len = wqe->length; in restart_sge()
53 return wqe->length - len; in restart_sge()
226 struct rvt_swqe *wqe; in qib_make_rc_req() local
256 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_rc_req()
257 qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in qib_make_rc_req()
[all …]
Dqib_uc.c52 struct rvt_swqe *wqe; in qib_make_uc_req() local
70 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_uc_req()
71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_uc_req()
84 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in qib_make_uc_req()
97 qp->s_psn = wqe->psn; in qib_make_uc_req()
98 qp->s_sge.sge = wqe->sg_list[0]; in qib_make_uc_req()
99 qp->s_sge.sg_list = wqe->sg_list + 1; in qib_make_uc_req()
100 qp->s_sge.num_sge = wqe->wr.num_sge; in qib_make_uc_req()
101 qp->s_sge.total_len = wqe->length; in qib_make_uc_req()
102 len = wqe->length; in qib_make_uc_req()
[all …]
Dqib_ruc.c191 struct rvt_swqe *wqe; in qib_ruc_loopback() local
222 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); in qib_ruc_loopback()
262 sqp->s_sge.sge = wqe->sg_list[0]; in qib_ruc_loopback()
263 sqp->s_sge.sg_list = wqe->sg_list + 1; in qib_ruc_loopback()
264 sqp->s_sge.num_sge = wqe->wr.num_sge; in qib_ruc_loopback()
265 sqp->s_len = wqe->length; in qib_ruc_loopback()
266 switch (wqe->wr.opcode) { in qib_ruc_loopback()
269 wc.ex.imm_data = wqe->wr.ex.imm_data; in qib_ruc_loopback()
283 wc.ex.imm_data = wqe->wr.ex.imm_data; in qib_ruc_loopback()
293 if (wqe->length == 0) in qib_ruc_loopback()
[all …]
Dqib_ud.c242 struct rvt_swqe *wqe; in qib_make_ud_req() local
262 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_ud_req()
263 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_ud_req()
271 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in qib_make_ud_req()
279 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr; in qib_make_ud_req()
304 qib_ud_loopback(qp, wqe); in qib_make_ud_req()
307 qib_send_complete(qp, wqe, IB_WC_SUCCESS); in qib_make_ud_req()
313 extra_bytes = -wqe->length & 3; in qib_make_ud_req()
314 nwords = (wqe->length + extra_bytes) >> 2; in qib_make_ud_req()
318 qp->s_cur_size = wqe->length; in qib_make_ud_req()
[all …]
/Linux-v4.19/drivers/infiniband/sw/rdmavt/
Dtrace_tx.h91 TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, int wr_num_sge),
92 TP_ARGS(qp, wqe, wr_num_sge),
96 __field(struct rvt_swqe *, wqe)
115 __entry->wqe = wqe;
116 __entry->wr_id = wqe->wr.wr_id;
119 __entry->psn = wqe->psn;
120 __entry->lpsn = wqe->lpsn;
121 __entry->length = wqe->length;
122 __entry->opcode = wqe->wr.opcode;
128 __entry->ssn = wqe->ssn;
[all …]
/Linux-v4.19/drivers/infiniband/hw/hfi1/
Drc.c60 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, in restart_sge() argument
65 len = delta_psn(psn, wqe->psn) * pmtu; in restart_sge()
66 ss->sge = wqe->sg_list[0]; in restart_sge()
67 ss->sg_list = wqe->sg_list + 1; in restart_sge()
68 ss->num_sge = wqe->wr.num_sge; in restart_sge()
69 ss->total_len = wqe->length; in restart_sge()
71 return wqe->length - len; in restart_sge()
262 struct rvt_swqe *wqe; in hfi1_make_rc_req() local
311 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_rc_req()
312 hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in hfi1_make_rc_req()
[all …]
Duc.c67 struct rvt_swqe *wqe; in hfi1_make_uc_req() local
90 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_uc_req()
91 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in hfi1_make_uc_req()
113 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_uc_req()
129 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_uc_req()
130 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_uc_req()
138 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { in hfi1_make_uc_req()
140 qp, wqe->wr.ex.invalidate_rkey); in hfi1_make_uc_req()
143 hfi1_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR in hfi1_make_uc_req()
152 qp->s_psn = wqe->psn; in hfi1_make_uc_req()
[all …]
Druc.c173 struct rvt_swqe *wqe; in ruc_loopback() local
206 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); in ruc_loopback()
246 sqp->s_sge.sge = wqe->sg_list[0]; in ruc_loopback()
247 sqp->s_sge.sg_list = wqe->sg_list + 1; in ruc_loopback()
248 sqp->s_sge.num_sge = wqe->wr.num_sge; in ruc_loopback()
249 sqp->s_len = wqe->length; in ruc_loopback()
250 switch (wqe->wr.opcode) { in ruc_loopback()
255 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { in ruc_loopback()
257 wqe->wr.ex.invalidate_rkey)) in ruc_loopback()
264 if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) { in ruc_loopback()
[all …]
Dud.c286 static void hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe, in hfi1_make_bth_deth() argument
294 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in hfi1_make_bth_deth()
295 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; in hfi1_make_bth_deth()
301 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_bth_deth()
305 *pkey = hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index); in hfi1_make_bth_deth()
311 ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn); in hfi1_make_bth_deth()
312 ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn)); in hfi1_make_bth_deth()
317 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ? in hfi1_make_bth_deth()
318 qp->qkey : wqe->ud_wr.remote_qkey); in hfi1_make_bth_deth()
323 struct rvt_swqe *wqe) in hfi1_make_ud_req_9B() argument
[all …]
/Linux-v4.19/drivers/scsi/lpfc/
Dlpfc_nvme.c76 union lpfc_wqe128 *wqe; in lpfc_nvme_cmd_template() local
79 wqe = &lpfc_iread_cmd_template; in lpfc_nvme_cmd_template()
80 memset(wqe, 0, sizeof(union lpfc_wqe128)); in lpfc_nvme_cmd_template()
93 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE); in lpfc_nvme_cmd_template()
94 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK); in lpfc_nvme_cmd_template()
95 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3); in lpfc_nvme_cmd_template()
96 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI); in lpfc_nvme_cmd_template()
103 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0); in lpfc_nvme_cmd_template()
104 bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1); in lpfc_nvme_cmd_template()
105 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); in lpfc_nvme_cmd_template()
[all …]
Dlpfc_nvmet.c85 union lpfc_wqe128 *wqe; in lpfc_nvmet_cmd_template() local
88 wqe = &lpfc_tsend_cmd_template; in lpfc_nvmet_cmd_template()
89 memset(wqe, 0, sizeof(union lpfc_wqe128)); in lpfc_nvmet_cmd_template()
102 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); in lpfc_nvmet_cmd_template()
103 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF); in lpfc_nvmet_cmd_template()
104 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3); in lpfc_nvmet_cmd_template()
105 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI); in lpfc_nvmet_cmd_template()
106 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); in lpfc_nvmet_cmd_template()
113 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); in lpfc_nvmet_cmd_template()
114 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); in lpfc_nvmet_cmd_template()
[all …]
/Linux-v4.19/drivers/infiniband/hw/cxgb4/
Dqp.c487 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, in build_rdma_send() argument
499 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send()
502 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send()
504 wqe->send.stag_inv = 0; in build_rdma_send()
508 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send()
511 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send()
513 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send()
519 wqe->send.r3 = 0; in build_rdma_send()
520 wqe->send.r4 = 0; in build_rdma_send()
525 ret = build_immd(sq, wqe->send.u.immd_src, wr, in build_rdma_send()
[all …]
/Linux-v4.19/drivers/infiniband/hw/bnxt_re/
Dib_verbs.c419 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; in bnxt_re_create_fence_wqe() local
421 memset(wqe, 0, sizeof(*wqe)); in bnxt_re_create_fence_wqe()
422 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; in bnxt_re_create_fence_wqe()
423 wqe->wr_id = BNXT_QPLIB_FENCE_WRID; in bnxt_re_create_fence_wqe()
424 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; in bnxt_re_create_fence_wqe()
425 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; in bnxt_re_create_fence_wqe()
426 wqe->bind.zero_based = false; in bnxt_re_create_fence_wqe()
427 wqe->bind.parent_l_key = ib_mr->lkey; in bnxt_re_create_fence_wqe()
428 wqe->bind.va = (u64)(unsigned long)fence->va; in bnxt_re_create_fence_wqe()
429 wqe->bind.length = fence->size; in bnxt_re_create_fence_wqe()
[all …]
Dqplib_fp.c655 struct bnxt_qplib_swqe *wqe) in bnxt_qplib_post_srq_recv() argument
681 i < wqe->num_sge; i++, hw_sge++) { in bnxt_qplib_post_srq_recv()
682 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr); in bnxt_qplib_post_srq_recv()
683 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey); in bnxt_qplib_post_srq_recv()
684 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size); in bnxt_qplib_post_srq_recv()
686 srqe->wqe_type = wqe->type; in bnxt_qplib_post_srq_recv()
687 srqe->flags = wqe->flags; in bnxt_qplib_post_srq_recv()
688 srqe->wqe_size = wqe->num_sge + in bnxt_qplib_post_srq_recv()
691 srq->swq[next].wr_id = wqe->wr_id; in bnxt_qplib_post_srq_recv()
1514 struct bnxt_qplib_swqe *wqe) in bnxt_qplib_post_send() argument
[all …]
/Linux-v4.19/drivers/infiniband/hw/mthca/
Dmthca_srq.c90 static inline int *wqe_to_link(void *wqe) in wqe_to_link() argument
92 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); in wqe_to_link()
151 void *wqe; in mthca_alloc_srq_buf() local
178 next = wqe = get_wqe(srq, i); in mthca_alloc_srq_buf()
181 *wqe_to_link(wqe) = i + 1; in mthca_alloc_srq_buf()
184 *wqe_to_link(wqe) = -1; in mthca_alloc_srq_buf()
188 for (scatter = wqe + sizeof (struct mthca_next_seg); in mthca_alloc_srq_buf()
189 (void *) scatter < wqe + (1 << srq->wqe_shift); in mthca_alloc_srq_buf()
487 void *wqe; in mthca_tavor_post_srq_recv() local
496 wqe = get_wqe(srq, ind); in mthca_tavor_post_srq_recv()
[all …]
Dmthca_qp.c1619 void *wqe; in mthca_tavor_post_send() local
1655 wqe = get_send_wqe(qp, ind); in mthca_tavor_post_send()
1657 qp->sq.last = wqe; in mthca_tavor_post_send()
1659 ((struct mthca_next_seg *) wqe)->nda_op = 0; in mthca_tavor_post_send()
1660 ((struct mthca_next_seg *) wqe)->ee_nds = 0; in mthca_tavor_post_send()
1661 ((struct mthca_next_seg *) wqe)->flags = in mthca_tavor_post_send()
1669 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; in mthca_tavor_post_send()
1671 wqe += sizeof (struct mthca_next_seg); in mthca_tavor_post_send()
1679 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, in mthca_tavor_post_send()
1681 wqe += sizeof (struct mthca_raddr_seg); in mthca_tavor_post_send()
[all …]
/Linux-v4.19/drivers/net/ethernet/huawei/hinic/
Dhinic_hw_cmdq.c63 #define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe)) argument
191 static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped, in cmdq_prepare_wqe_ctrl() argument
205 wqe_lcmd = &wqe->wqe_lcmd; in cmdq_prepare_wqe_ctrl()
211 wqe_scmd = &wqe->direct_wqe.wqe_scmd; in cmdq_prepare_wqe_ctrl()
223 CMDQ_WQE_HEADER(wqe)->header_info = in cmdq_prepare_wqe_ctrl()
232 saved_data = CMDQ_WQE_HEADER(wqe)->saved_data; in cmdq_prepare_wqe_ctrl()
236 CMDQ_WQE_HEADER(wqe)->saved_data |= in cmdq_prepare_wqe_ctrl()
239 CMDQ_WQE_HEADER(wqe)->saved_data = saved_data; in cmdq_prepare_wqe_ctrl()
248 static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe, in cmdq_set_direct_wqe_data() argument
251 struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; in cmdq_set_direct_wqe_data()
[all …]

1234