Home
last modified time | relevance | path

Searched refs:cqe (Results 1 – 25 of 156) sorted by relevance

1234567

/Linux-v4.19/drivers/infiniband/hw/mthca/
Dmthca_cq.c174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() argument
176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw()
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw()
184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument
186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw()
191 __be32 *cqe = cqe_ptr; in dump_cqe() local
193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe()
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe()
196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe()
197 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); in dump_cqe()
[all …]
/Linux-v4.19/drivers/infiniband/hw/cxgb3/
Diwch_ev.c52 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); in post_qp_event()
56 __func__, CQE_STATUS(rsp_msg->cqe), in post_qp_event()
57 CQE_QPID(rsp_msg->cqe)); in post_qp_event()
67 CQE_STATUS(rsp_msg->cqe)); in post_qp_event()
74 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), in post_qp_event()
75 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), in post_qp_event()
76 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); in post_qp_event()
119 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); in iwch_ev_dispatch()
122 cqid, CQE_QPID(rsp_msg->cqe), in iwch_ev_dispatch()
123 CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe), in iwch_ev_dispatch()
[all …]
Diwch_cq.c39 struct t3_cqe cqe; in __iwch_poll_cq_one() local
45 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, in __iwch_poll_cq_one()
61 wc->vendor_err = CQE_STATUS(cqe); in __iwch_poll_cq_one()
66 CQE_QPID(cqe), CQE_TYPE(cqe), in __iwch_poll_cq_one()
67 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe), in __iwch_poll_cq_one()
68 CQE_WRID_LOW(cqe), (unsigned long long)cookie); in __iwch_poll_cq_one()
70 if (CQE_TYPE(cqe) == 0) { in __iwch_poll_cq_one()
71 if (!CQE_STATUS(cqe)) in __iwch_poll_cq_one()
72 wc->byte_len = CQE_LEN(cqe); in __iwch_poll_cq_one()
76 if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV || in __iwch_poll_cq_one()
[all …]
Dcxio_hal.c75 struct t3_cqe *cqe; in cxio_hal_cq_op() local
109 cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2); in cxio_hal_cq_op()
110 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { in cxio_hal_cq_op()
350 struct t3_cqe cqe; in insert_recv_cqe() local
354 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe()
355 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | in insert_recv_cqe()
362 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe; in insert_recv_cqe()
387 struct t3_cqe cqe; in insert_sq_cqe() local
391 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe()
392 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | in insert_sq_cqe()
[all …]
Dcxio_wr.h675 struct t3_cqe cqe; member
728 #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \ argument
729 CQE_GENBIT(*cqe))
769 struct t3_cqe *cqe; in cxio_next_hw_cqe() local
771 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2)); in cxio_next_hw_cqe()
772 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe)) in cxio_next_hw_cqe()
773 return cqe; in cxio_next_hw_cqe()
779 struct t3_cqe *cqe; in cxio_next_sw_cqe() local
782 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2)); in cxio_next_sw_cqe()
783 return cqe; in cxio_next_sw_cqe()
[all …]
/Linux-v4.19/drivers/infiniband/hw/mlx4/
Dcq.c80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
81 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); in get_sw_cqe()
84 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe()
132 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf() argument
134 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); in mlx4_ib_free_cq_buf()
139 u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() argument
146 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem()
179 int entries = attr->cqe; in mlx4_ib_create_cq()
197 cq->ibcq.cqe = entries - 1; in mlx4_ib_create_cq()
280 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mlx4_ib_create_cq()
[all …]
/Linux-v4.19/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_cq.c82 cq->ibcq.cqe, &head); in pvrdma_req_notify_cq()
107 int entries = attr->cqe; in pvrdma_create_cq()
135 cq->ibcq.cqe = entries; in pvrdma_create_cq()
190 cmd->cqe = entries; in pvrdma_create_cq()
199 cq->ibcq.cqe = resp->cqe; in pvrdma_create_cq()
299 cq->ibcq.cqe, &head); in _pvrdma_flush_cqe()
304 cq->ibcq.cqe); in _pvrdma_flush_cqe()
305 struct pvrdma_cqe *cqe; in _pvrdma_flush_cqe() local
309 (cq->ibcq.cqe - head + tail); in _pvrdma_flush_cqe()
313 curr = cq->ibcq.cqe - 1; in _pvrdma_flush_cqe()
[all …]
/Linux-v4.19/drivers/infiniband/sw/rxe/
Drxe_cq.c39 int cqe, int comp_vector) in rxe_cq_chk_attr() argument
43 if (cqe <= 0) { in rxe_cq_chk_attr()
44 pr_warn("cqe(%d) <= 0\n", cqe); in rxe_cq_chk_attr()
48 if (cqe > rxe->attr.max_cqe) { in rxe_cq_chk_attr()
50 cqe, rxe->attr.max_cqe); in rxe_cq_chk_attr()
56 if (cqe < count) { in rxe_cq_chk_attr()
58 cqe, count); in rxe_cq_chk_attr()
84 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, in rxe_cq_from_init() argument
90 cq->queue = rxe_queue_init(rxe, &cqe, in rxe_cq_from_init()
113 cq->ibcq.cqe = cqe; in rxe_cq_from_init()
[all …]
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx5/core/
Den_rx.c88 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); in mlx5e_cqes_update_owner() local
90 cqe->op_own = op_own; in mlx5e_cqes_update_owner()
96 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); in mlx5e_cqes_update_owner() local
98 cqe->op_own = op_own; in mlx5e_cqes_update_owner()
549 struct mlx5_cqe64 *cqe) in mlx5e_poll_ico_single_cqe() argument
552 u16 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); in mlx5e_poll_ico_single_cqe()
557 if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) { in mlx5e_poll_ico_single_cqe()
559 "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own); in mlx5e_poll_ico_single_cqe()
576 struct mlx5_cqe64 *cqe; in mlx5e_poll_ico_cq() local
581 cqe = mlx5_cqwq_get_cqe(&cq->wq); in mlx5e_poll_ico_cq()
[all …]
/Linux-v4.19/drivers/infiniband/hw/cxgb4/
Dcq.c187 struct t4_cqe cqe; in insert_recv_cqe() local
191 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe()
192 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_recv_cqe()
197 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_recv_cqe()
199 cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx); in insert_recv_cqe()
200 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe()
221 struct t4_cqe cqe; in insert_sq_cqe() local
225 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe()
226 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_sq_cqe()
231 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe()
[all …]
/Linux-v4.19/drivers/net/ethernet/mellanox/mlxsw/
Dpci_hw.h107 static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \
112 return mlxsw_pci_cqe##v0##_##name##_get(cqe); \
114 return mlxsw_pci_cqe##v1##_##name##_get(cqe); \
116 return mlxsw_pci_cqe##v2##_##name##_get(cqe); \
120 char *cqe, u32 val) \
125 mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \
128 mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \
131 mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \
149 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
160 MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
[all …]
/Linux-v4.19/drivers/infiniband/hw/mlx5/
Dcq.c79 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
82 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
85 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
86 return cqe; in get_sw_cqe()
115 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument
119 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req()
135 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req()
164 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument
181 be32_to_cpu(cqe->srqn)); in handle_responder()
187 wqe_ctr = be16_to_cpu(cqe->wqe_counter); in handle_responder()
[all …]
/Linux-v4.19/drivers/infiniband/sw/rdmavt/
Dcq.c79 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter()
80 head = cq->ibcq.cqe; in rvt_cq_enter()
189 unsigned int entries = attr->cqe; in rvt_create_cq()
278 cq->ibcq.cqe = entries; in rvt_create_cq()
364 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in rvt_resize_cq() argument
374 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe) in rvt_resize_cq()
382 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); in rvt_resize_cq()
384 sz += sizeof(struct ib_wc) * (cqe + 1); in rvt_resize_cq()
407 if (head > (u32)cq->ibcq.cqe) in rvt_resize_cq()
408 head = (u32)cq->ibcq.cqe; in rvt_resize_cq()
[all …]
/Linux-v4.19/drivers/infiniband/hw/bnxt_re/
Dqplib_fp.c1380 struct cq_req *cqe = (struct cq_req *)hw_cqe; in __clean_cq() local
1382 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq()
1383 cqe->qp_handle = 0; in __clean_cq()
1390 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; in __clean_cq() local
1392 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq()
1393 cqe->qp_handle = 0; in __clean_cq()
2005 struct bnxt_qplib_cqe *cqe; in __flush_sq() local
2010 cqe = *pcqe; in __flush_sq()
2021 memset(cqe, 0, sizeof(*cqe)); in __flush_sq()
2022 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR; in __flush_sq()
[all …]
/Linux-v4.19/drivers/net/ethernet/qlogic/qede/
Dqede_fp.c635 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_set_gro_params() argument
637 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); in qede_set_gro_params()
645 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params()
646 cqe->header_len; in qede_set_gro_params()
816 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_tpa_start() argument
818 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start()
823 pad = cqe->placement_offset + rxq->rx_headroom; in qede_tpa_start()
826 le16_to_cpu(cqe->len_on_first_bd), in qede_tpa_start()
847 if ((le16_to_cpu(cqe->pars_flags.flags) >> in qede_tpa_start()
850 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in qede_tpa_start()
[all …]
Dqede_ptp.h48 union eth_rx_cqe *cqe, in qede_ptp_record_rx_ts() argument
52 if (unlikely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) & in qede_ptp_record_rx_ts()
54 if (likely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) in qede_ptp_record_rx_ts()
/Linux-v4.19/drivers/net/ethernet/huawei/hinic/
Dhinic_hw_qp.c330 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe()
331 rq->cqe = vzalloc(cqe_size); in alloc_rq_cqe()
332 if (!rq->cqe) in alloc_rq_cqe()
341 rq->cqe[i] = dma_zalloc_coherent(&pdev->dev, in alloc_rq_cqe()
342 sizeof(*rq->cqe[i]), in alloc_rq_cqe()
344 if (!rq->cqe[i]) in alloc_rq_cqe()
352 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], in alloc_rq_cqe()
358 vfree(rq->cqe); in alloc_rq_cqe()
374 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], in free_rq_cqe()
378 vfree(rq->cqe); in free_rq_cqe()
[all …]
/Linux-v4.19/drivers/scsi/qedi/
Dqedi_fw.c34 union iscsi_cqe *cqe, in qedi_process_logout_resp() argument
45 cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response; in qedi_process_logout_resp()
53 resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); in qedi_process_logout_resp()
84 union iscsi_cqe *cqe, in qedi_process_text_resp() argument
99 cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response; in qedi_process_text_resp()
111 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_text_resp()
186 union iscsi_cqe *cqe, in qedi_process_tmf_resp() argument
198 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response; in qedi_process_tmf_resp()
222 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_tmf_resp()
257 union iscsi_cqe *cqe, in qedi_process_login_resp() argument
[all …]
/Linux-v4.19/drivers/infiniband/ulp/iser/
Discsi_iser.h261 struct ib_cqe cqe; member
293 struct ib_cqe cqe; member
313 struct ib_cqe cqe; member
699 iser_rx(struct ib_cqe *cqe) in iser_rx() argument
701 return container_of(cqe, struct iser_rx_desc, cqe); in iser_rx()
705 iser_tx(struct ib_cqe *cqe) in iser_tx() argument
707 return container_of(cqe, struct iser_tx_desc, cqe); in iser_tx()
711 iser_login(struct ib_cqe *cqe) in iser_login() argument
713 return container_of(cqe, struct iser_login_desc, cqe); in iser_login()
/Linux-v4.19/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c1034 int entries = attr->cqe; in ocrdma_create_cq()
1094 ibcq->cqe = new_cnt; in ocrdma_resize_cq()
1105 struct ocrdma_cqe *cqe = NULL; in ocrdma_flush_cq() local
1107 cqe = cq->va; in ocrdma_flush_cq()
1115 if (is_cqe_valid(cq, cqe)) in ocrdma_flush_cq()
1117 cqe++; in ocrdma_flush_cq()
1668 struct ocrdma_cqe *cqe; in ocrdma_discard_cqes() local
1687 cqe = cq->va + cur_getp; in ocrdma_discard_cqes()
1692 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; in ocrdma_discard_cqes()
1698 if (is_cqe_for_sq(cqe)) { in ocrdma_discard_cqes()
[all …]
Docrdma.h498 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) in is_cqe_valid() argument
501 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; in is_cqe_valid()
505 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) in is_cqe_for_sq() argument
507 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_for_sq()
511 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) in is_cqe_invalidated() argument
513 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_invalidated()
517 static inline int is_cqe_imm(struct ocrdma_cqe *cqe) in is_cqe_imm() argument
519 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_imm()
523 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) in is_cqe_wr_imm() argument
525 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_wr_imm()
/Linux-v4.19/include/linux/mlx5/
Ddevice.h760 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) in mlx5_get_cqe_format() argument
762 return (cqe->op_own >> 2) & 0x3; in mlx5_get_cqe_format()
765 static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) in get_cqe_lro_tcppsh() argument
767 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; in get_cqe_lro_tcppsh()
770 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) in get_cqe_l4_hdr_type() argument
772 return (cqe->l4_l3_hdr_type >> 4) & 0x7; in get_cqe_l4_hdr_type()
775 static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) in get_cqe_l3_hdr_type() argument
777 return (cqe->l4_l3_hdr_type >> 2) & 0x3; in get_cqe_l3_hdr_type()
780 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe) in cqe_is_tunneled() argument
782 return cqe->outer_l3_tunneled & 0x1; in cqe_is_tunneled()
[all …]
/Linux-v4.19/drivers/scsi/qedf/
Dqedf.h231 struct fcoe_cqe cqe; member
456 extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
459 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
461 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
466 extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
479 extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
486 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
488 extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
490 extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe);
499 struct fcoe_cqe *cqe);
[all …]
/Linux-v4.19/drivers/scsi/bnx2i/
Dbnx2i.h506 struct cqe { struct
650 struct cqe *cq_virt;
654 struct cqe *cq_prod_qe;
655 struct cqe *cq_cons_qe;
656 struct cqe *cq_first_qe;
657 struct cqe *cq_last_qe;
774 struct cqe cqe; member
881 struct cqe *cqe);
/Linux-v4.19/include/trace/events/
Drpcrdma.h533 const struct ib_cqe *cqe
536 TP_ARGS(cqe),
539 __field(const void *, cqe)
543 __entry->cqe = cqe;
547 __entry->cqe
625 __field(const void *, cqe)
632 __entry->cqe = wc->wr_cqe;
644 __entry->cqe, __entry->byte_len,
1268 __field(const void *, cqe)
1274 __entry->cqe = wc->wr_cqe;
[all …]

1234567