Home
last modified time | relevance | path

Searched defs:cqe (Results 1 – 25 of 128) sorted by relevance

123456

/Linux-v5.4/drivers/infiniband/sw/rxe/
Drxe_cq.c39 int cqe, int comp_vector) in rxe_cq_chk_attr()
84 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, in rxe_cq_from_init()
117 int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, in rxe_cq_resize_queue()
132 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) in rxe_cq_post()
Drxe_comp.c403 struct rxe_cqe *cqe) in make_send_cqe()
443 struct rxe_cqe cqe; in do_complete() local
/Linux-v5.4/drivers/infiniband/hw/mlx4/
Dcq.c81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
133 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf()
140 struct ib_umem **umem, u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem()
357 struct mlx4_cqe *cqe, *new_cqe; in mlx4_ib_cq_resize_copy_cqes() local
500 static void dump_cqe(void *cqe) in dump_cqe()
510 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, in mlx4_ib_handle_error_cqe()
586 unsigned tail, struct mlx4_cqe *cqe, int is_eth) in use_tunnel_data()
665 struct mlx4_cqe *cqe; in mlx4_ib_poll_one() local
923 struct mlx4_cqe *cqe, *dest; in __mlx4_ib_cq_clean() local
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/
Den_rx.c96 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local
104 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local
591 struct mlx5_cqe64 *cqe; in mlx5e_poll_ico_cq() local
710 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) in mlx5e_lro_update_tcp_hdr()
726 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, in mlx5e_lro_update_hdr()
780 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, in mlx5e_skb_set_hash()
906 struct mlx5_cqe64 *cqe, in mlx5e_handle_csum()
975 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, in mlx5e_build_rx_skb()
1027 struct mlx5_cqe64 *cqe, in mlx5e_complete_rx_cqe()
1057 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, in mlx5e_skb_from_cqe_linear()
[all …]
/Linux-v5.4/drivers/scsi/qedi/
Dqedi_fw.c31 union iscsi_cqe *cqe, in qedi_process_logout_resp()
81 union iscsi_cqe *cqe, in qedi_process_text_resp()
181 union iscsi_cqe *cqe, in qedi_process_tmf_resp()
252 union iscsi_cqe *cqe, in qedi_process_login_resp()
313 struct iscsi_cqe_unsolicited *cqe, in qedi_get_rq_bdq_buf()
352 struct iscsi_cqe_unsolicited *cqe, in qedi_put_rq_bdq_buf()
391 struct iscsi_cqe_unsolicited *cqe, in qedi_unsol_pdu_adjust_bdq()
403 union iscsi_cqe *cqe, in qedi_process_nopin_mesg()
475 union iscsi_cqe *cqe, in qedi_process_async_mesg()
529 union iscsi_cqe *cqe, in qedi_process_reject_mesg()
[all …]
/Linux-v5.4/drivers/scsi/bnx2i/
Dbnx2i_hwi.c1337 struct cqe *cqe) in bnx2i_process_scsi_cmd_resp()
1434 struct cqe *cqe) in bnx2i_process_login_resp()
1502 struct cqe *cqe) in bnx2i_process_text_resp()
1563 struct cqe *cqe) in bnx2i_process_tmf_resp()
1602 struct cqe *cqe) in bnx2i_process_logout_resp()
1648 struct cqe *cqe) in bnx2i_process_nopin_local_cmpl()
1689 struct cqe *cqe) in bnx2i_process_nopin_mesg()
1741 struct cqe *cqe) in bnx2i_process_async_mesg()
1791 struct cqe *cqe) in bnx2i_process_reject_mesg()
1828 struct cqe *cqe) in bnx2i_process_cmd_cleanup_resp()
[all …]
/Linux-v5.4/drivers/infiniband/hw/mlx5/
Dcq.c80 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
116 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req()
165 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder()
268 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) in dump_cqe()
275 struct mlx5_err_cqe *cqe, in mlx5_handle_error_cqe()
338 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, in get_sig_err_item()
432 void *cqe; in mlx5_poll_one() local
812 void *cqe; in init_cq_frag_buf() local
1014 void *cqe, *dest; in __mlx5_ib_cq_clean() local
/Linux-v5.4/drivers/infiniband/hw/cxgb3/
Dcxio_hal.c75 struct t3_cqe *cqe; in cxio_hal_cq_op() local
332 struct t3_cqe cqe; in insert_recv_cqe() local
369 struct t3_cqe cqe; in insert_sq_cqe() local
408 struct t3_cqe *cqe, *swcqe; in cxio_flush_hw_cq() local
424 static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) in cqe_completes_wr()
444 struct t3_cqe *cqe; in cxio_count_scqes() local
462 struct t3_cqe *cqe; in cxio_count_rcqes() local
1116 int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, in cxio_poll_cq()
Diwch_cq.c39 struct t3_cqe cqe; in __iwch_poll_cq_one() local
/Linux-v5.4/drivers/infiniband/hw/mthca/
Dmthca_cq.c174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw()
184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw()
191 __be32 *cqe = cqe_ptr; in dump_cqe() local
264 static inline int is_recv_cqe(struct mthca_cqe *cqe) in is_recv_cqe()
276 struct mthca_cqe *cqe; in mthca_cq_clean() local
366 void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe) in mthca_free_cq_buf()
374 struct mthca_err_cqe *cqe, in handle_error_cqe()
485 struct mthca_cqe *cqe; in mthca_poll_one() local
/Linux-v5.4/net/sunrpc/xprtrdma/
Dfrwr_ops.c402 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_fastreg() local
484 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv() local
503 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_wake() local
605 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_done() local
Dsvc_rdma_rw.c204 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_write_done() local
262 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_read_done() local
305 struct ib_cqe *cqe; in svc_rdma_post_chunk_ctxt() local
/Linux-v5.4/drivers/infiniband/hw/cxgb4/
Dcq.c186 struct t4_cqe cqe; in insert_recv_cqe() local
220 struct t4_cqe cqe; in insert_sq_cqe() local
422 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) in cqe_completes_wr()
445 struct t4_cqe *cqe; in c4iw_count_rcqes() local
544 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, in poll_cq()
/Linux-v5.4/drivers/net/ethernet/qlogic/qede/
Dqede_fp.c627 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_set_gro_params()
807 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_tpa_start()
933 struct eth_fast_path_rx_tpa_cont_cqe *cqe) in qede_tpa_cont()
948 struct eth_fast_path_rx_tpa_end_cqe *cqe) in qede_tpa_end()
1038 static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, in qede_pkt_is_ip_fragmented()
1058 struct eth_fast_path_rx_reg_cqe *cqe, in qede_rx_xdp()
1127 struct eth_fast_path_rx_reg_cqe *cqe, in qede_rx_build_jumbo()
1184 union eth_rx_cqe *cqe, in qede_rx_process_tpa_cqe()
1209 union eth_rx_cqe *cqe; in qede_rx_process_cqe() local
Dqede_ptp.h48 union eth_rx_cqe *cqe, in qede_ptp_record_rx_ts()
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/fpga/
Dconn.c251 struct mlx5_cqe64 *cqe, u8 status) in mlx5_fpga_conn_rq_cqe()
291 struct mlx5_cqe64 *cqe, u8 status) in mlx5_fpga_conn_sq_cqe()
333 struct mlx5_cqe64 *cqe) in mlx5_fpga_conn_handle_cqe()
385 struct mlx5_cqe64 *cqe; in mlx5_fpga_conn_cqes() local
435 struct mlx5_cqe64 *cqe; in mlx5_fpga_conn_create_cq() local
/Linux-v5.4/drivers/infiniband/hw/bnxt_re/
Dqplib_fp.c1391 struct cq_req *cqe = (struct cq_req *)hw_cqe; in __clean_cq() local
1401 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; in __clean_cq() local
2019 struct bnxt_qplib_cqe *cqe; in __flush_sq() local
2058 struct bnxt_qplib_cqe *cqe; in __flush_rq() local
2222 struct bnxt_qplib_cqe *cqe; in bnxt_qplib_cq_process_req() local
2337 struct bnxt_qplib_cqe *cqe; in bnxt_qplib_cq_process_res_rc() local
2412 struct bnxt_qplib_cqe *cqe; in bnxt_qplib_cq_process_res_ud() local
2509 struct bnxt_qplib_cqe *cqe; in bnxt_qplib_cq_process_res_raweth_qp1() local
2596 struct bnxt_qplib_cqe *cqe; in bnxt_qplib_cq_process_terminal() local
2714 struct bnxt_qplib_cqe *cqe, in bnxt_qplib_process_flush_list()
[all …]
/Linux-v5.4/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c1048 struct ocrdma_cqe *cqe = NULL; in ocrdma_flush_cq() local
1607 struct ocrdma_cqe *cqe; in ocrdma_discard_cqes() local
2454 struct ocrdma_cqe *cqe) in ocrdma_set_cqe_status_flushed()
2485 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, in ocrdma_update_err_cqe()
2507 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, in ocrdma_update_err_rcqe()
2517 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, in ocrdma_update_err_scqe()
2528 struct ocrdma_cqe *cqe, struct ib_wc *ibwc, in ocrdma_poll_err_scqe()
2572 struct ocrdma_cqe *cqe, in ocrdma_poll_success_scqe()
2597 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, in ocrdma_poll_scqe()
2614 struct ocrdma_cqe *cqe) in ocrdma_update_ud_rcqe()
[all …]
/Linux-v5.4/drivers/scsi/qedf/
Dqedf_io.c1126 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, in qedf_scsi_completion()
1432 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, in qedf_process_warning_compl()
1502 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, in qedf_process_error_detect()
1935 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, in qedf_process_abts_compl()
2256 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, in qedf_process_cleanup_compl()
2488 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, in qedf_process_tmf_compl()
2503 struct fcoe_cqe *cqe) in qedf_process_unsol_compl()
/Linux-v5.4/drivers/infiniband/ulp/iser/
Discsi_iser.h252 struct ib_cqe cqe; member
278 struct ib_cqe cqe; member
298 struct ib_cqe cqe; member
656 iser_rx(struct ib_cqe *cqe) in iser_rx()
662 iser_tx(struct ib_cqe *cqe) in iser_tx()
668 iser_login(struct ib_cqe *cqe) in iser_login()
Diser_memory.c368 struct ib_cqe *cqe, in iser_inv_rkey()
387 struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; in iser_reg_sig_mr() local
444 struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; in iser_fast_reg_mr() local
/Linux-v5.4/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_cq.c290 struct pvrdma_cqe *cqe; in _pvrdma_flush_cqe() local
325 struct pvrdma_cqe *cqe; in pvrdma_poll_one() local
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/en/
Dhealth.h9 #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) argument
/Linux-v5.4/drivers/infiniband/sw/siw/
Dsiw_cq.c50 struct siw_cqe *cqe; in siw_reap_cqe() local
/Linux-v5.4/tools/io_uring/
Dliburing.h80 struct io_uring_cqe *cqe) in io_uring_cqe_seen()
102 static inline void *io_uring_cqe_get_data(struct io_uring_cqe *cqe) in io_uring_cqe_get_data()

123456