Home
last modified time | relevance | path

Searched refs:cqe (Results 1 – 25 of 207) sorted by relevance

123456789

/Linux-v5.15/drivers/infiniband/hw/mthca/
Dmthca_cq.c174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() argument
176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw()
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw()
184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument
186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw()
191 __be32 *cqe = cqe_ptr; in dump_cqe() local
193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe()
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe()
196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe()
197 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); in dump_cqe()
[all …]
/Linux-v5.15/drivers/infiniband/hw/mlx4/
Dcq.c81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
82 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); in get_sw_cqe()
85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe()
133 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf() argument
135 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); in mlx4_ib_free_cq_buf()
140 struct ib_umem **umem, u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() argument
147 *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem()
178 int entries = attr->cqe; in mlx4_ib_create_cq()
195 cq->ibcq.cqe = entries - 1; in mlx4_ib_create_cq()
281 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mlx4_ib_create_cq()
[all …]
/Linux-v5.15/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_cq.c83 cq->ibcq.cqe, &head); in pvrdma_req_notify_cq()
105 int entries = attr->cqe; in pvrdma_create_cq()
132 cq->ibcq.cqe = entries; in pvrdma_create_cq()
186 cmd->cqe = entries; in pvrdma_create_cq()
195 cq->ibcq.cqe = resp->cqe; in pvrdma_create_cq()
288 cq->ibcq.cqe, &head); in _pvrdma_flush_cqe()
293 cq->ibcq.cqe); in _pvrdma_flush_cqe()
294 struct pvrdma_cqe *cqe; in _pvrdma_flush_cqe() local
298 (cq->ibcq.cqe - head + tail); in _pvrdma_flush_cqe()
302 curr = cq->ibcq.cqe - 1; in _pvrdma_flush_cqe()
[all …]
/Linux-v5.15/drivers/infiniband/sw/siw/
Dsiw_cq.c50 struct siw_cqe *cqe; in siw_reap_cqe() local
55 cqe = &cq->queue[cq->cq_get % cq->num_cqe]; in siw_reap_cqe()
56 if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { in siw_reap_cqe()
58 wc->wr_id = cqe->id; in siw_reap_cqe()
59 wc->status = map_cqe_status[cqe->status].ib; in siw_reap_cqe()
60 wc->opcode = map_wc_opcode[cqe->opcode]; in siw_reap_cqe()
61 wc->byte_len = cqe->bytes; in siw_reap_cqe()
69 if (cqe->flags & SIW_WQE_REM_INVAL) { in siw_reap_cqe()
70 wc->ex.invalidate_rkey = cqe->inval_stag; in siw_reap_cqe()
73 wc->qp = cqe->base_qp; in siw_reap_cqe()
[all …]
/Linux-v5.15/tools/io_uring/
Dio_uring-cp.c126 struct io_uring_cqe *cqe; in copy_file() local
174 ret = io_uring_wait_cqe(ring, &cqe); in copy_file()
177 ret = io_uring_peek_cqe(ring, &cqe); in copy_file()
179 cqe = NULL; in copy_file()
188 if (!cqe) in copy_file()
191 data = io_uring_cqe_get_data(cqe); in copy_file()
192 if (cqe->res < 0) { in copy_file()
193 if (cqe->res == -EAGAIN) { in copy_file()
195 io_uring_cqe_seen(ring, cqe); in copy_file()
199 strerror(-cqe->res)); in copy_file()
[all …]
/Linux-v5.15/drivers/infiniband/sw/rxe/
Drxe_cq.c12 int cqe, int comp_vector) in rxe_cq_chk_attr() argument
16 if (cqe <= 0) { in rxe_cq_chk_attr()
17 pr_warn("cqe(%d) <= 0\n", cqe); in rxe_cq_chk_attr()
21 if (cqe > rxe->attr.max_cqe) { in rxe_cq_chk_attr()
23 cqe, rxe->attr.max_cqe); in rxe_cq_chk_attr()
33 if (cqe < count) { in rxe_cq_chk_attr()
35 cqe, count); in rxe_cq_chk_attr()
61 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, in rxe_cq_from_init() argument
69 cq->queue = rxe_queue_init(rxe, &cqe, in rxe_cq_from_init()
92 cq->ibcq.cqe = cqe; in rxe_cq_from_init()
[all …]
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/
Den_rx.c63 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
64 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
114 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local
116 cqe->op_own = op_own; in mlx5e_cqes_update_owner()
122 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local
124 cqe->op_own = op_own; in mlx5e_cqes_update_owner()
635 struct mlx5_cqe64 *cqe; in mlx5e_poll_ico_cq() local
642 cqe = mlx5_cqwq_get_cqe(&cq->wq); in mlx5e_poll_ico_cq()
643 if (likely(!cqe)) in mlx5e_poll_ico_cq()
658 wqe_counter = be16_to_cpu(cqe->wqe_counter); in mlx5e_poll_ico_cq()
[all …]
/Linux-v5.15/drivers/infiniband/hw/cxgb4/
Dcq.c186 struct t4_cqe cqe; in insert_recv_cqe() local
190 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe()
191 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_recv_cqe()
196 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_recv_cqe()
198 cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx); in insert_recv_cqe()
199 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe()
220 struct t4_cqe cqe; in insert_sq_cqe() local
224 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe()
225 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_sq_cqe()
230 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe()
[all …]
/Linux-v5.15/drivers/net/ethernet/mellanox/mlxsw/
Dpci_hw.h114 static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \
119 return mlxsw_pci_cqe##v0##_##name##_get(cqe); \
121 return mlxsw_pci_cqe##v1##_##name##_get(cqe); \
123 return mlxsw_pci_cqe##v2##_##name##_get(cqe); \
127 char *cqe, u32 val) \
132 mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \
135 mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \
138 mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \
156 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
167 MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
[all …]
/Linux-v5.15/drivers/infiniband/hw/mlx5/
Dcq.c81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
88 return cqe; in get_sw_cqe()
117 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument
121 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req()
137 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req()
166 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument
182 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn)); in handle_responder()
189 wqe_ctr = be16_to_cpu(cqe->wqe_counter); in handle_responder()
[all …]
/Linux-v5.15/drivers/infiniband/hw/bnxt_re/
Dqplib_fp.c1434 struct cq_req *cqe = (struct cq_req *)hw_cqe; in __clean_cq() local
1436 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq()
1437 cqe->qp_handle = 0; in __clean_cq()
1444 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; in __clean_cq() local
1446 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq()
1447 cqe->qp_handle = 0; in __clean_cq()
2125 struct bnxt_qplib_cqe *cqe; in __flush_sq() local
2131 cqe = *pcqe; in __flush_sq()
2141 memset(cqe, 0, sizeof(*cqe)); in __flush_sq()
2142 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR; in __flush_sq()
[all …]
/Linux-v5.15/drivers/infiniband/sw/rdmavt/
Dcq.c54 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter()
55 head = cq->ibcq.cqe; in rvt_cq_enter()
167 unsigned int entries = attr->cqe; in rvt_create_cq()
249 cq->ibcq.cqe = entries; in rvt_create_cq()
338 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in rvt_resize_cq() argument
350 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe) in rvt_resize_cq()
357 sz = sizeof(struct ib_uverbs_wc) * (cqe + 1); in rvt_resize_cq()
363 sz = sizeof(struct ib_wc) * (cqe + 1); in rvt_resize_cq()
393 if (head > (u32)cq->ibcq.cqe) in rvt_resize_cq()
394 head = (u32)cq->ibcq.cqe; in rvt_resize_cq()
[all …]
/Linux-v5.15/drivers/net/ethernet/marvell/octeontx2/nic/
Dotx2_txrx.c79 struct nix_cqe_tx_s *cqe, in otx2_snd_pkt_handler() argument
82 struct nix_send_comp_s *snd_comp = &cqe->comp; in otx2_snd_pkt_handler()
165 struct nix_cqe_rx_s *cqe, struct sk_buff *skb) in otx2_set_rxhash() argument
181 hash = cqe->hdr.flow_tag; in otx2_set_rxhash()
186 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, in otx2_free_rcv_seg() argument
189 struct nix_rx_sg_s *sg = &cqe->sg; in otx2_free_rcv_seg()
195 end = start + ((cqe->parse.desc_sizem1 + 1) * 16); in otx2_free_rcv_seg()
207 struct nix_cqe_rx_s *cqe, int qidx) in otx2_check_rcv_errors() argument
210 struct nix_rx_parse_s *parse = &cqe->parse; in otx2_check_rcv_errors()
265 if (cqe->sg.segs) in otx2_check_rcv_errors()
[all …]
/Linux-v5.15/drivers/infiniband/ulp/iser/
Discsi_iser.h248 struct ib_cqe cqe; member
275 struct ib_cqe cqe; member
295 struct ib_cqe cqe; member
577 iser_rx(struct ib_cqe *cqe) in iser_rx() argument
579 return container_of(cqe, struct iser_rx_desc, cqe); in iser_rx()
583 iser_tx(struct ib_cqe *cqe) in iser_tx() argument
585 return container_of(cqe, struct iser_tx_desc, cqe); in iser_tx()
589 iser_login(struct ib_cqe *cqe) in iser_login() argument
591 return container_of(cqe, struct iser_login_desc, cqe); in iser_login()
/Linux-v5.15/drivers/net/ethernet/qlogic/qede/
Dqede_fp.c651 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_set_gro_params() argument
653 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); in qede_set_gro_params()
661 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params()
662 cqe->header_len; in qede_set_gro_params()
831 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_tpa_start() argument
833 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start()
838 pad = cqe->placement_offset + rxq->rx_headroom; in qede_tpa_start()
841 le16_to_cpu(cqe->len_on_first_bd), in qede_tpa_start()
862 if ((le16_to_cpu(cqe->pars_flags.flags) >> in qede_tpa_start()
865 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in qede_tpa_start()
[all …]
Dqede_ptp.h23 union eth_rx_cqe *cqe, in qede_ptp_record_rx_ts() argument
27 if (unlikely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) & in qede_ptp_record_rx_ts()
29 if (likely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) in qede_ptp_record_rx_ts()
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
Dtls_rxtx.h68 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) in mlx5e_tls_handle_rx_skb() argument
70 if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */ in mlx5e_tls_handle_rx_skb()
71 return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt); in mlx5e_tls_handle_rx_skb()
80 mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; } in mlx5e_accel_is_tls() argument
83 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {} in mlx5e_tls_handle_rx_skb() argument
Dipsec_rxtx.h72 struct mlx5_cqe64 *cqe);
78 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) in mlx5_ipsec_is_rx_flow() argument
80 return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata)); in mlx5_ipsec_is_rx_flow()
134 struct mlx5_cqe64 *cqe) in mlx5e_ipsec_offload_handle_rx_skb() argument
142 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; } in mlx5_ipsec_is_rx_flow() argument
/Linux-v5.15/drivers/net/ethernet/huawei/hinic/
Dhinic_hw_qp.c324 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe()
325 rq->cqe = vzalloc(cqe_size); in alloc_rq_cqe()
326 if (!rq->cqe) in alloc_rq_cqe()
335 rq->cqe[i] = dma_alloc_coherent(&pdev->dev, in alloc_rq_cqe()
336 sizeof(*rq->cqe[i]), in alloc_rq_cqe()
338 if (!rq->cqe[i]) in alloc_rq_cqe()
346 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], in alloc_rq_cqe()
352 vfree(rq->cqe); in alloc_rq_cqe()
368 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], in free_rq_cqe()
372 vfree(rq->cqe); in free_rq_cqe()
[all …]
/Linux-v5.15/drivers/scsi/qedi/
Dqedi_fw.c31 union iscsi_cqe *cqe, in qedi_process_logout_resp() argument
42 cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response; in qedi_process_logout_resp()
50 resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); in qedi_process_logout_resp()
82 union iscsi_cqe *cqe, in qedi_process_text_resp() argument
97 cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response; in qedi_process_text_resp()
109 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_text_resp()
178 union iscsi_cqe *cqe, in qedi_process_tmf_resp() argument
190 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response; in qedi_process_tmf_resp()
214 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_tmf_resp()
258 union iscsi_cqe *cqe, in qedi_process_login_resp() argument
[all …]
/Linux-v5.15/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c967 int entries = attr->cqe; in ocrdma_create_cq()
1021 ibcq->cqe = new_cnt; in ocrdma_resize_cq()
1032 struct ocrdma_cqe *cqe = NULL; in ocrdma_flush_cq() local
1034 cqe = cq->va; in ocrdma_flush_cq()
1042 if (is_cqe_valid(cq, cqe)) in ocrdma_flush_cq()
1044 cqe++; in ocrdma_flush_cq()
1592 struct ocrdma_cqe *cqe; in ocrdma_discard_cqes() local
1611 cqe = cq->va + cur_getp; in ocrdma_discard_cqes()
1616 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; in ocrdma_discard_cqes()
1622 if (is_cqe_for_sq(cqe)) { in ocrdma_discard_cqes()
[all …]
/Linux-v5.15/drivers/nvme/target/
Dfabrics-cmd.c81 req->cqe->result.u64 = cpu_to_le64(val); in nvmet_execute_prop_get()
118 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); in nvmet_install_queue()
133 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); in nvmet_install_queue()
150 req->cqe->sq_head = cpu_to_le16(0xffff); in nvmet_install_queue()
191 req->cqe->result.u32 = 0; in nvmet_execute_admin_connect()
205 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); in nvmet_execute_admin_connect()
227 req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); in nvmet_execute_admin_connect()
257 req->cqe->result.u32 = 0; in nvmet_execute_io_connect()
276 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid); in nvmet_execute_io_connect()
285 req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); in nvmet_execute_io_connect()
/Linux-v5.15/include/linux/mlx5/
Ddevice.h859 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) in mlx5_get_cqe_format() argument
861 return (cqe->op_own >> 2) & 0x3; in mlx5_get_cqe_format()
864 static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe) in get_cqe_opcode() argument
866 return cqe->op_own >> 4; in get_cqe_opcode()
869 static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) in get_cqe_lro_tcppsh() argument
871 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; in get_cqe_lro_tcppsh()
874 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) in get_cqe_l4_hdr_type() argument
876 return (cqe->l4_l3_hdr_type >> 4) & 0x7; in get_cqe_l4_hdr_type()
879 static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) in get_cqe_l3_hdr_type() argument
881 return (cqe->l4_l3_hdr_type >> 2) & 0x3; in get_cqe_l3_hdr_type()
[all …]
/Linux-v5.15/drivers/scsi/qedf/
Dqedf.h250 struct fcoe_cqe cqe; member
486 extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
489 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
491 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
496 extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
509 extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
516 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
518 extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
520 extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe);
529 struct fcoe_cqe *cqe);
[all …]
/Linux-v5.15/drivers/scsi/bnx2i/
Dbnx2i.h506 struct cqe { struct
650 struct cqe *cq_virt;
654 struct cqe *cq_prod_qe;
655 struct cqe *cq_cons_qe;
656 struct cqe *cq_first_qe;
657 struct cqe *cq_last_qe;
774 struct cqe cqe; member
881 struct cqe *cqe);

123456789