/Linux-v5.4/drivers/infiniband/sw/rxe/ |
D | rxe_req.c | 41 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 45 struct rxe_send_wqe *wqe, in retry_first_write_send() argument 51 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send() 52 qp->mtu : wqe->dma.resid; in retry_first_write_send() 54 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send() 55 wqe->wr.opcode); in retry_first_write_send() 57 if (wqe->wr.send_flags & IB_SEND_INLINE) { in retry_first_write_send() 58 wqe->dma.resid -= to_send; in retry_first_write_send() 59 wqe->dma.sge_offset += to_send; in retry_first_write_send() 61 advance_dma_data(&wqe->dma, to_send); in retry_first_write_send() [all …]
|
D | rxe_comp.c | 166 struct rxe_send_wqe *wqe; in get_wqe() local 171 wqe = queue_head(qp->sq.queue); in get_wqe() 172 *wqe_p = wqe; in get_wqe() 175 if (!wqe || wqe->state == wqe_state_posted) in get_wqe() 179 if (wqe->state == wqe_state_done) in get_wqe() 183 if (wqe->state == wqe_state_error) in get_wqe() 199 struct rxe_send_wqe *wqe) in check_psn() argument 206 diff = psn_compare(pkt->psn, wqe->last_psn); in check_psn() 208 if (wqe->state == wqe_state_pending) { in check_psn() 209 if (wqe->mask & WR_ATOMIC_OR_READ_MASK) in check_psn() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/cxgb3/ |
D | iwch_qp.c | 42 static int build_rdma_send(union t3_wr *wqe, const struct ib_send_wr *wr, in build_rdma_send() argument 51 wqe->send.rdmaop = T3_SEND_WITH_SE; in build_rdma_send() 53 wqe->send.rdmaop = T3_SEND; in build_rdma_send() 54 wqe->send.rem_stag = 0; in build_rdma_send() 58 wqe->send.rdmaop = T3_SEND_WITH_SE_INV; in build_rdma_send() 60 wqe->send.rdmaop = T3_SEND_WITH_INV; in build_rdma_send() 61 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send() 68 wqe->send.reserved[0] = 0; in build_rdma_send() 69 wqe->send.reserved[1] = 0; in build_rdma_send() 70 wqe->send.reserved[2] = 0; in build_rdma_send() [all …]
|
D | cxio_hal.c | 139 struct t3_modify_qp_wr *wqe; in cxio_hal_clear_qp_ctx() local 140 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); in cxio_hal_clear_qp_ctx() 145 wqe = skb_put_zero(skb, sizeof(*wqe)); in cxio_hal_clear_qp_ctx() 146 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, in cxio_hal_clear_qp_ctx() 149 wqe->flags = cpu_to_be32(MODQP_WRITE_EC); in cxio_hal_clear_qp_ctx() 151 wqe->sge_cmd = cpu_to_be64(sge_cmd); in cxio_hal_clear_qp_ctx() 497 struct t3_modify_qp_wr *wqe; in cxio_hal_init_ctrl_qp() local 500 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); in cxio_hal_init_ctrl_qp() 541 wqe = skb_put_zero(skb, sizeof(*wqe)); in cxio_hal_init_ctrl_qp() 542 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0, in cxio_hal_init_ctrl_qp() [all …]
|
/Linux-v5.4/drivers/infiniband/sw/siw/ |
D | siw_qp_tx.c | 42 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_try_1seg() local 43 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg() 46 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg() 52 if (tx_flags(wqe) & SIW_WQE_INLINE) { in siw_try_1seg() 53 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg() 55 struct siw_mem *mem = wqe->mem[0]; in siw_try_1seg() 121 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_qp_prepare_tx() local 125 switch (tx_type(wqe)) { in siw_qp_prepare_tx() 137 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx() 139 cpu_to_be64(wqe->sqe.sge[0].laddr); in siw_qp_prepare_tx() [all …]
|
D | siw_qp.c | 262 struct siw_wqe *wqe = tx_wqe(qp); in siw_qp_mpa_rts() local 268 if (unlikely(wqe->wr_status != SIW_WR_IDLE)) { in siw_qp_mpa_rts() 272 memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); in siw_qp_mpa_rts() 274 wqe->wr_status = SIW_WR_QUEUED; in siw_qp_mpa_rts() 275 wqe->sqe.flags = 0; in siw_qp_mpa_rts() 276 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts() 277 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts() 278 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts() 279 wqe->sqe.sge[0].lkey = 0; in siw_qp_mpa_rts() 284 wqe->sqe.rkey = 1; in siw_qp_mpa_rts() [all …]
|
D | siw_qp_rx.c | 168 struct siw_wqe *wqe = &frx->wqe_active; in siw_rresp_check_ntoh() local 175 srx->ddp_stag = wqe->sqe.sge[0].lkey; in siw_rresp_check_ntoh() 176 srx->ddp_to = wqe->sqe.sge[0].laddr; in siw_rresp_check_ntoh() 203 (wqe->processed + srx->fpdu_part_rem != wqe->bytes))) { in siw_rresp_check_ntoh() 206 wqe->processed + srx->fpdu_part_rem, wqe->bytes); in siw_rresp_check_ntoh() 280 struct siw_wqe *wqe = &frx->wqe_active; in siw_send_check_ntoh() local 300 if (unlikely(ddp_mo != wqe->processed)) { in siw_send_check_ntoh() 302 qp_id(rx_qp(srx)), ddp_mo, wqe->processed); in siw_send_check_ntoh() 315 if (unlikely(wqe->bytes < wqe->processed + srx->fpdu_part_rem)) { in siw_send_check_ntoh() 317 wqe->bytes, wqe->processed, srx->fpdu_part_rem); in siw_send_check_ntoh() [all …]
|
/Linux-v5.4/drivers/infiniband/sw/rdmavt/ |
D | trace_tx.h | 91 TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, int wr_num_sge), 92 TP_ARGS(qp, wqe, wr_num_sge), 96 __field(struct rvt_swqe *, wqe) 115 __entry->wqe = wqe; 116 __entry->wr_id = wqe->wr.wr_id; 119 __entry->psn = wqe->psn; 120 __entry->lpsn = wqe->lpsn; 121 __entry->length = wqe->length; 122 __entry->opcode = wqe->wr.opcode; 128 __entry->ssn = wqe->ssn; [all …]
|
D | qp.c | 627 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); in rvt_clear_mr_refs() local 629 rvt_put_qp_swqe(qp, wqe); in rvt_clear_mr_refs() 657 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey) in rvt_swqe_has_lkey() argument 661 for (i = 0; i < wqe->wr.num_sge; i++) { in rvt_swqe_has_lkey() 662 struct rvt_sge *sge = &wqe->sg_list[i]; in rvt_swqe_has_lkey() 680 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last); in rvt_qp_sends_has_lkey() local 682 if (rvt_swqe_has_lkey(wqe, lkey)) in rvt_qp_sends_has_lkey() 990 struct rvt_swqe *wqe; in free_ud_wq_attr() local 994 wqe = rvt_get_swqe_ptr(qp, i); in free_ud_wq_attr() 995 kfree(wqe->ud_wr.attr); in free_ud_wq_attr() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/hfi1/ |
D | rc.c | 435 struct rvt_swqe *wqe; in hfi1_make_rc_req() local 490 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_rc_req() 491 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in hfi1_make_rc_req() 510 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_rc_req() 536 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in hfi1_make_rc_req() 538 (wqe->wr.opcode != IB_WR_TID_RDMA_READ || in hfi1_make_rc_req() 547 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_rc_req() 548 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_rc_req() 558 if (!(wqe->wr.send_flags & in hfi1_make_rc_req() 562 wqe->wr.ex.invalidate_rkey); in hfi1_make_rc_req() [all …]
|
D | uc.c | 67 struct rvt_swqe *wqe; in hfi1_make_uc_req() local 90 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_uc_req() 91 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in hfi1_make_uc_req() 113 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_uc_req() 129 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_uc_req() 130 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_uc_req() 138 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { in hfi1_make_uc_req() 140 qp, wqe->wr.ex.invalidate_rkey); in hfi1_make_uc_req() 143 rvt_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR in hfi1_make_uc_req() 152 qp->s_psn = wqe->psn; in hfi1_make_uc_req() [all …]
|
D | tid_rdma.h | 214 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe); 221 static inline void trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) in trdma_clean_swqe() argument 223 if (!wqe->priv) in trdma_clean_swqe() 225 __trdma_clean_swqe(qp, wqe); in trdma_clean_swqe() 244 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, 247 u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe, 258 void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, 261 bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe); 263 void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe); 265 struct rvt_swqe *wqe) in hfi1_setup_tid_rdma_wqe() argument [all …]
|
D | tid_rdma.c | 377 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i); in hfi1_qp_priv_init() local 385 priv->tid_req.e.swqe = wqe; in hfi1_qp_priv_init() 386 wqe->priv = priv; in hfi1_qp_priv_init() 415 struct rvt_swqe *wqe; in hfi1_qp_priv_tid_free() local 420 wqe = rvt_get_swqe_ptr(qp, i); in hfi1_qp_priv_tid_free() 421 kfree(wqe->priv); in hfi1_qp_priv_tid_free() 422 wqe->priv = NULL; in hfi1_qp_priv_tid_free() 1614 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) in __trdma_clean_swqe() argument 1616 struct hfi1_swqe_priv *p = wqe->priv; in __trdma_clean_swqe() 1696 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, in hfi1_build_tid_rdma_read_packet() argument [all …]
|
/Linux-v5.4/drivers/infiniband/hw/i40iw/ |
D | i40iw_ctrl.c | 51 void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) in i40iw_insert_wqe_hdr() argument 54 set_64bit_val(wqe, 24, header); in i40iw_insert_wqe_hdr() 607 u64 *wqe = NULL; in i40iw_sc_cqp_get_next_send_wqe_idx() local 627 wqe = cqp->sq_base[*wqe_idx].elem; in i40iw_sc_cqp_get_next_send_wqe_idx() 629 I40IW_CQP_INIT_WQE(wqe); in i40iw_sc_cqp_get_next_send_wqe_idx() 631 return wqe; in i40iw_sc_cqp_get_next_send_wqe_idx() 835 u64 *wqe; in i40iw_sc_manage_push_page() local 841 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); in i40iw_sc_manage_push_page() 842 if (!wqe) in i40iw_sc_manage_push_page() 845 set_64bit_val(wqe, 16, info->qs_handle); in i40iw_sc_manage_push_page() [all …]
|
D | i40iw_uk.c | 49 u64 header, *wqe; in i40iw_nop_1() local 58 wqe = qp->sq_base[wqe_idx].elem; in i40iw_nop_1() 69 set_64bit_val(wqe, 0, 0); in i40iw_nop_1() 70 set_64bit_val(wqe, 8, 0); in i40iw_nop_1() 71 set_64bit_val(wqe, 16, 0); in i40iw_nop_1() 79 set_64bit_val(wqe, 24, header); in i40iw_nop_1() 141 u64 *wqe = NULL; in i40iw_qp_get_next_send_wqe() local 183 wqe = qp->sq_base[*wqe_idx].elem; in i40iw_qp_get_next_send_wqe() 196 return wqe; in i40iw_qp_get_next_send_wqe() 205 static void i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge) in i40iw_set_fragment() argument [all …]
|
D | i40iw_vf.c | 56 u64 *wqe; in i40iw_manage_vf_pble_bp() local 59 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); in i40iw_manage_vf_pble_bp() 60 if (!wqe) in i40iw_manage_vf_pble_bp() 66 set_64bit_val(wqe, 16, temp); in i40iw_manage_vf_pble_bp() 71 set_64bit_val(wqe, 24, header); in i40iw_manage_vf_pble_bp() 74 set_64bit_val(wqe, 32, pd_pl_pba); in i40iw_manage_vf_pble_bp() 76 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE VF_PBLE_BP WQE", wqe, I40IW_CQP_WQE_SIZE * 8); in i40iw_manage_vf_pble_bp()
|
/Linux-v5.4/drivers/scsi/lpfc/ |
D | lpfc_nvme.c | 76 union lpfc_wqe128 *wqe; in lpfc_nvme_cmd_template() local 79 wqe = &lpfc_iread_cmd_template; in lpfc_nvme_cmd_template() 80 memset(wqe, 0, sizeof(union lpfc_wqe128)); in lpfc_nvme_cmd_template() 93 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE); in lpfc_nvme_cmd_template() 94 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK); in lpfc_nvme_cmd_template() 95 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3); in lpfc_nvme_cmd_template() 96 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI); in lpfc_nvme_cmd_template() 103 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0); in lpfc_nvme_cmd_template() 104 bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1); in lpfc_nvme_cmd_template() 105 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); in lpfc_nvme_cmd_template() [all …]
|
D | lpfc_nvmet.c | 88 union lpfc_wqe128 *wqe; in lpfc_nvmet_cmd_template() local 91 wqe = &lpfc_tsend_cmd_template; in lpfc_nvmet_cmd_template() 92 memset(wqe, 0, sizeof(union lpfc_wqe128)); in lpfc_nvmet_cmd_template() 105 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); in lpfc_nvmet_cmd_template() 106 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF); in lpfc_nvmet_cmd_template() 107 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3); in lpfc_nvmet_cmd_template() 108 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI); in lpfc_nvmet_cmd_template() 109 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); in lpfc_nvmet_cmd_template() 116 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); in lpfc_nvmet_cmd_template() 117 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); in lpfc_nvmet_cmd_template() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/qib/ |
D | qib_rc.c | 42 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, in restart_sge() argument 47 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; in restart_sge() 48 return rvt_restart_sge(ss, wqe, len); in restart_sge() 221 struct rvt_swqe *wqe; in qib_make_rc_req() local 251 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_rc_req() 252 rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in qib_make_rc_req() 275 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in qib_make_rc_req() 296 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in qib_make_rc_req() 302 qp->s_psn = wqe->psn; in qib_make_rc_req() 309 len = wqe->length; in qib_make_rc_req() [all …]
|
D | qib_uc.c | 52 struct rvt_swqe *wqe; in qib_make_uc_req() local 70 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_uc_req() 71 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_uc_req() 84 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in qib_make_uc_req() 97 qp->s_psn = wqe->psn; in qib_make_uc_req() 98 qp->s_sge.sge = wqe->sg_list[0]; in qib_make_uc_req() 99 qp->s_sge.sg_list = wqe->sg_list + 1; in qib_make_uc_req() 100 qp->s_sge.num_sge = wqe->wr.num_sge; in qib_make_uc_req() 101 qp->s_sge.total_len = wqe->length; in qib_make_uc_req() 102 len = wqe->length; in qib_make_uc_req() [all …]
|
D | qib_ud.c | 237 struct rvt_swqe *wqe; in qib_make_ud_req() local 257 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_ud_req() 258 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_ud_req() 266 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in qib_make_ud_req() 274 ah_attr = rvt_get_swqe_ah_attr(wqe); in qib_make_ud_req() 299 qib_ud_loopback(qp, wqe); in qib_make_ud_req() 302 rvt_send_complete(qp, wqe, IB_WC_SUCCESS); in qib_make_ud_req() 308 extra_bytes = -wqe->length & 3; in qib_make_ud_req() 309 nwords = (wqe->length + extra_bytes) >> 2; in qib_make_ud_req() 313 qp->s_cur_size = wqe->length; in qib_make_ud_req() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/cxgb4/ |
D | qp.c | 488 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, in build_rdma_send() argument 500 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 503 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 505 wqe->send.stag_inv = 0; in build_rdma_send() 509 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 512 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 514 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send() 520 wqe->send.r3 = 0; in build_rdma_send() 521 wqe->send.r4 = 0; in build_rdma_send() 526 ret = build_immd(sq, wqe->send.u.immd_src, wr, in build_rdma_send() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/bnxt_re/ |
D | ib_verbs.c | 409 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; in bnxt_re_create_fence_wqe() local 411 memset(wqe, 0, sizeof(*wqe)); in bnxt_re_create_fence_wqe() 412 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; in bnxt_re_create_fence_wqe() 413 wqe->wr_id = BNXT_QPLIB_FENCE_WRID; in bnxt_re_create_fence_wqe() 414 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; in bnxt_re_create_fence_wqe() 415 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; in bnxt_re_create_fence_wqe() 416 wqe->bind.zero_based = false; in bnxt_re_create_fence_wqe() 417 wqe->bind.parent_l_key = ib_mr->lkey; in bnxt_re_create_fence_wqe() 418 wqe->bind.va = (u64)(unsigned long)fence->va; in bnxt_re_create_fence_wqe() 419 wqe->bind.length = fence->size; in bnxt_re_create_fence_wqe() [all …]
|
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
D | ktls_tx.c | 52 build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn, in build_static_params() argument 56 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; in build_static_params() 57 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; in build_static_params() 72 fill_static_params_ctx(wqe->tls_static_params_ctx, priv_tx); in build_static_params() 86 build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn, in build_progress_params() argument 90 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; in build_progress_params() 102 fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx); in build_progress_params() 151 struct mlx5e_tx_wqe *wqe; in post_progress_params() local 154 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi); in post_progress_params() 155 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); in post_progress_params() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/mthca/ |
D | mthca_srq.c | 92 static inline int *wqe_to_link(void *wqe) in wqe_to_link() argument 94 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); in wqe_to_link() 158 void *wqe; in mthca_alloc_srq_buf() local 185 next = wqe = get_wqe(srq, i); in mthca_alloc_srq_buf() 188 *wqe_to_link(wqe) = i + 1; in mthca_alloc_srq_buf() 191 *wqe_to_link(wqe) = -1; in mthca_alloc_srq_buf() 195 for (scatter = wqe + sizeof (struct mthca_next_seg); in mthca_alloc_srq_buf() 196 (void *) scatter < wqe + (1 << srq->wqe_shift); in mthca_alloc_srq_buf() 495 void *wqe; in mthca_tavor_post_srq_recv() local 504 wqe = get_wqe(srq, ind); in mthca_tavor_post_srq_recv() [all …]
|