/Linux-v5.4/drivers/infiniband/hw/mthca/ |
D | mthca_cq.c | 174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() argument 176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw() 181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw() 184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument 186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw() 191 __be32 *cqe = cqe_ptr; in dump_cqe() local 193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe() 195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe() 196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe() 197 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); in dump_cqe() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/cxgb3/ |
D | iwch_ev.c | 52 qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe)); in post_qp_event() 56 __func__, CQE_STATUS(rsp_msg->cqe), in post_qp_event() 57 CQE_QPID(rsp_msg->cqe)); in post_qp_event() 67 CQE_STATUS(rsp_msg->cqe)); in post_qp_event() 74 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), in post_qp_event() 75 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), in post_qp_event() 76 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); in post_qp_event() 119 qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe)); in iwch_ev_dispatch() 122 cqid, CQE_QPID(rsp_msg->cqe), in iwch_ev_dispatch() 123 CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe), in iwch_ev_dispatch() [all …]
|
D | iwch_cq.c | 39 struct t3_cqe cqe; in __iwch_poll_cq_one() local 45 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, in __iwch_poll_cq_one() 61 wc->vendor_err = CQE_STATUS(cqe); in __iwch_poll_cq_one() 66 CQE_QPID(cqe), CQE_TYPE(cqe), in __iwch_poll_cq_one() 67 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe), in __iwch_poll_cq_one() 68 CQE_WRID_LOW(cqe), (unsigned long long)cookie); in __iwch_poll_cq_one() 70 if (CQE_TYPE(cqe) == 0) { in __iwch_poll_cq_one() 71 if (!CQE_STATUS(cqe)) in __iwch_poll_cq_one() 72 wc->byte_len = CQE_LEN(cqe); in __iwch_poll_cq_one() 76 if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV || in __iwch_poll_cq_one() [all …]
|
D | cxio_hal.c | 75 struct t3_cqe *cqe; in cxio_hal_cq_op() local 109 cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2); in cxio_hal_cq_op() 110 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { in cxio_hal_cq_op() 332 struct t3_cqe cqe; in insert_recv_cqe() local 336 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe() 337 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | in insert_recv_cqe() 344 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe; in insert_recv_cqe() 369 struct t3_cqe cqe; in insert_sq_cqe() local 373 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe() 374 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | in insert_sq_cqe() [all …]
|
D | cxio_wr.h | 675 struct t3_cqe cqe; member 728 #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \ argument 729 CQE_GENBIT(*cqe)) 769 struct t3_cqe *cqe; in cxio_next_hw_cqe() local 771 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2)); in cxio_next_hw_cqe() 772 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe)) in cxio_next_hw_cqe() 773 return cqe; in cxio_next_hw_cqe() 779 struct t3_cqe *cqe; in cxio_next_sw_cqe() local 782 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2)); in cxio_next_sw_cqe() 783 return cqe; in cxio_next_sw_cqe() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/mlx4/ |
D | cq.c | 81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 82 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); in get_sw_cqe() 85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe() 133 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf() argument 135 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); in mlx4_ib_free_cq_buf() 140 struct ib_umem **umem, u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() argument 147 *umem = ib_umem_get(udata, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem() 179 int entries = attr->cqe; in mlx4_ib_create_cq() 196 cq->ibcq.cqe = entries - 1; in mlx4_ib_create_cq() 282 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mlx4_ib_create_cq() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_cq.c | 83 cq->ibcq.cqe, &head); in pvrdma_req_notify_cq() 105 int entries = attr->cqe; in pvrdma_create_cq() 129 cq->ibcq.cqe = entries; in pvrdma_create_cq() 183 cmd->cqe = entries; in pvrdma_create_cq() 192 cq->ibcq.cqe = resp->cqe; in pvrdma_create_cq() 284 cq->ibcq.cqe, &head); in _pvrdma_flush_cqe() 289 cq->ibcq.cqe); in _pvrdma_flush_cqe() 290 struct pvrdma_cqe *cqe; in _pvrdma_flush_cqe() local 294 (cq->ibcq.cqe - head + tail); in _pvrdma_flush_cqe() 298 curr = cq->ibcq.cqe - 1; in _pvrdma_flush_cqe() [all …]
|
/Linux-v5.4/drivers/infiniband/sw/siw/ |
D | siw_cq.c | 50 struct siw_cqe *cqe; in siw_reap_cqe() local 55 cqe = &cq->queue[cq->cq_get % cq->num_cqe]; in siw_reap_cqe() 56 if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { in siw_reap_cqe() 58 wc->wr_id = cqe->id; in siw_reap_cqe() 59 wc->status = map_cqe_status[cqe->status].ib; in siw_reap_cqe() 60 wc->opcode = map_wc_opcode[cqe->opcode]; in siw_reap_cqe() 61 wc->byte_len = cqe->bytes; in siw_reap_cqe() 69 if (cqe->flags & SIW_WQE_REM_INVAL) { in siw_reap_cqe() 70 wc->ex.invalidate_rkey = cqe->inval_stag; in siw_reap_cqe() 73 wc->qp = cqe->base_qp; in siw_reap_cqe() [all …]
|
/Linux-v5.4/drivers/infiniband/sw/rxe/ |
D | rxe_cq.c | 39 int cqe, int comp_vector) in rxe_cq_chk_attr() argument 43 if (cqe <= 0) { in rxe_cq_chk_attr() 44 pr_warn("cqe(%d) <= 0\n", cqe); in rxe_cq_chk_attr() 48 if (cqe > rxe->attr.max_cqe) { in rxe_cq_chk_attr() 50 cqe, rxe->attr.max_cqe); in rxe_cq_chk_attr() 56 if (cqe < count) { in rxe_cq_chk_attr() 58 cqe, count); in rxe_cq_chk_attr() 84 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, in rxe_cq_from_init() argument 90 cq->queue = rxe_queue_init(rxe, &cqe, in rxe_cq_from_init() 113 cq->ibcq.cqe = cqe; in rxe_cq_from_init() [all …]
|
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_rx.c | 96 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local 98 cqe->op_own = op_own; in mlx5e_cqes_update_owner() 104 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local 106 cqe->op_own = op_own; in mlx5e_cqes_update_owner() 591 struct mlx5_cqe64 *cqe; in mlx5e_poll_ico_cq() local 598 cqe = mlx5_cqwq_get_cqe(&cq->wq); in mlx5e_poll_ico_cq() 599 if (likely(!cqe)) in mlx5e_poll_ico_cq() 614 wqe_counter = be16_to_cpu(cqe->wqe_counter); in mlx5e_poll_ico_cq() 616 if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { in mlx5e_poll_ico_cq() 618 "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe)); in mlx5e_poll_ico_cq() [all …]
|
/Linux-v5.4/tools/io_uring/ |
D | io_uring-cp.c | 126 struct io_uring_cqe *cqe; in copy_file() local 175 ret = io_uring_wait_cqe(ring, &cqe); in copy_file() 178 ret = io_uring_peek_cqe(ring, &cqe); in copy_file() 184 if (!cqe) in copy_file() 187 data = io_uring_cqe_get_data(cqe); in copy_file() 188 if (cqe->res < 0) { in copy_file() 189 if (cqe->res == -EAGAIN) { in copy_file() 191 io_uring_cqe_seen(ring, cqe); in copy_file() 195 strerror(-cqe->res)); in copy_file() 197 } else if ((size_t) cqe->res != data->iov.iov_len) { in copy_file() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/cxgb4/ |
D | cq.c | 186 struct t4_cqe cqe; in insert_recv_cqe() local 190 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe() 191 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_recv_cqe() 196 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_recv_cqe() 198 cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx); in insert_recv_cqe() 199 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe() 220 struct t4_cqe cqe; in insert_sq_cqe() local 224 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe() 225 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_sq_cqe() 230 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe() [all …]
|
/Linux-v5.4/drivers/net/ethernet/mellanox/mlxsw/ |
D | pci_hw.h | 111 static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \ 116 return mlxsw_pci_cqe##v0##_##name##_get(cqe); \ 118 return mlxsw_pci_cqe##v1##_##name##_get(cqe); \ 120 return mlxsw_pci_cqe##v2##_##name##_get(cqe); \ 124 char *cqe, u32 val) \ 129 mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \ 132 mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \ 135 mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \ 153 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); 164 MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16); [all …]
|
/Linux-v5.4/drivers/infiniband/hw/mlx5/ |
D | cq.c | 80 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 83 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe() 86 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe() 87 return cqe; in get_sw_cqe() 116 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument 120 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req() 136 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req() 165 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument 181 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn)); in handle_responder() 187 wqe_ctr = be16_to_cpu(cqe->wqe_counter); in handle_responder() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.c | 1391 struct cq_req *cqe = (struct cq_req *)hw_cqe; in __clean_cq() local 1393 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq() 1394 cqe->qp_handle = 0; in __clean_cq() 1401 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; in __clean_cq() local 1403 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq() 1404 cqe->qp_handle = 0; in __clean_cq() 2019 struct bnxt_qplib_cqe *cqe; in __flush_sq() local 2024 cqe = *pcqe; in __flush_sq() 2035 memset(cqe, 0, sizeof(*cqe)); in __flush_sq() 2036 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR; in __flush_sq() [all …]
|
/Linux-v5.4/drivers/infiniband/sw/rdmavt/ |
D | cq.c | 96 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter() 97 head = cq->ibcq.cqe; in rvt_cq_enter() 209 unsigned int entries = attr->cqe; in rvt_create_cq() 291 cq->ibcq.cqe = entries; in rvt_create_cq() 379 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in rvt_resize_cq() argument 391 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe) in rvt_resize_cq() 398 sz = sizeof(struct ib_uverbs_wc) * (cqe + 1); in rvt_resize_cq() 404 sz = sizeof(struct ib_wc) * (cqe + 1); in rvt_resize_cq() 434 if (head > (u32)cq->ibcq.cqe) in rvt_resize_cq() 435 head = (u32)cq->ibcq.cqe; in rvt_resize_cq() [all …]
|
/Linux-v5.4/drivers/net/ethernet/qlogic/qede/ |
D | qede_fp.c | 627 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_set_gro_params() argument 629 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); in qede_set_gro_params() 637 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params() 638 cqe->header_len; in qede_set_gro_params() 807 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_tpa_start() argument 809 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start() 814 pad = cqe->placement_offset + rxq->rx_headroom; in qede_tpa_start() 817 le16_to_cpu(cqe->len_on_first_bd), in qede_tpa_start() 838 if ((le16_to_cpu(cqe->pars_flags.flags) >> in qede_tpa_start() 841 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in qede_tpa_start() [all …]
|
D | qede_ptp.h | 48 union eth_rx_cqe *cqe, in qede_ptp_record_rx_ts() argument 52 if (unlikely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) & in qede_ptp_record_rx_ts() 54 if (likely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) in qede_ptp_record_rx_ts()
|
/Linux-v5.4/drivers/scsi/qedi/ |
D | qedi_fw.c | 31 union iscsi_cqe *cqe, in qedi_process_logout_resp() argument 42 cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response; in qedi_process_logout_resp() 50 resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); in qedi_process_logout_resp() 81 union iscsi_cqe *cqe, in qedi_process_text_resp() argument 96 cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response; in qedi_process_text_resp() 108 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_text_resp() 181 union iscsi_cqe *cqe, in qedi_process_tmf_resp() argument 193 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response; in qedi_process_tmf_resp() 217 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_tmf_resp() 252 union iscsi_cqe *cqe, in qedi_process_login_resp() argument [all …]
|
/Linux-v5.4/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_qp.c | 319 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe() 320 rq->cqe = vzalloc(cqe_size); in alloc_rq_cqe() 321 if (!rq->cqe) in alloc_rq_cqe() 330 rq->cqe[i] = dma_alloc_coherent(&pdev->dev, in alloc_rq_cqe() 331 sizeof(*rq->cqe[i]), in alloc_rq_cqe() 333 if (!rq->cqe[i]) in alloc_rq_cqe() 341 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], in alloc_rq_cqe() 347 vfree(rq->cqe); in alloc_rq_cqe() 363 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], in free_rq_cqe() 367 vfree(rq->cqe); in free_rq_cqe() [all …]
|
/Linux-v5.4/drivers/infiniband/ulp/iser/ |
D | iscsi_iser.h | 252 struct ib_cqe cqe; member 278 struct ib_cqe cqe; member 298 struct ib_cqe cqe; member 656 iser_rx(struct ib_cqe *cqe) in iser_rx() argument 658 return container_of(cqe, struct iser_rx_desc, cqe); in iser_rx() 662 iser_tx(struct ib_cqe *cqe) in iser_tx() argument 664 return container_of(cqe, struct iser_tx_desc, cqe); in iser_tx() 668 iser_login(struct ib_cqe *cqe) in iser_login() argument 670 return container_of(cqe, struct iser_login_desc, cqe); in iser_login()
|
/Linux-v5.4/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 983 int entries = attr->cqe; in ocrdma_create_cq() 1037 ibcq->cqe = new_cnt; in ocrdma_resize_cq() 1048 struct ocrdma_cqe *cqe = NULL; in ocrdma_flush_cq() local 1050 cqe = cq->va; in ocrdma_flush_cq() 1058 if (is_cqe_valid(cq, cqe)) in ocrdma_flush_cq() 1060 cqe++; in ocrdma_flush_cq() 1607 struct ocrdma_cqe *cqe; in ocrdma_discard_cqes() local 1626 cqe = cq->va + cur_getp; in ocrdma_discard_cqes() 1631 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; in ocrdma_discard_cqes() 1637 if (is_cqe_for_sq(cqe)) { in ocrdma_discard_cqes() [all …]
|
/Linux-v5.4/include/linux/mlx5/ |
D | device.h | 810 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) in mlx5_get_cqe_format() argument 812 return (cqe->op_own >> 2) & 0x3; in mlx5_get_cqe_format() 815 static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe) in get_cqe_opcode() argument 817 return cqe->op_own >> 4; in get_cqe_opcode() 820 static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) in get_cqe_lro_tcppsh() argument 822 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; in get_cqe_lro_tcppsh() 825 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) in get_cqe_l4_hdr_type() argument 827 return (cqe->l4_l3_hdr_type >> 4) & 0x7; in get_cqe_l4_hdr_type() 830 static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) in get_cqe_l3_hdr_type() argument 832 return (cqe->l4_l3_hdr_type >> 2) & 0x3; in get_cqe_l3_hdr_type() [all …]
|
/Linux-v5.4/drivers/nvme/target/ |
D | fabrics-cmd.c | 75 req->cqe->result.u64 = cpu_to_le64(val); in nvmet_execute_prop_get() 127 req->cqe->sq_head = cpu_to_le16(0xffff); in nvmet_install_queue() 161 req->cqe->result.u32 = 0; in nvmet_execute_admin_connect() 175 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); in nvmet_execute_admin_connect() 198 req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); in nvmet_execute_admin_connect() 225 req->cqe->result.u32 = 0; in nvmet_execute_io_connect() 243 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid); in nvmet_execute_io_connect() 250 req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); in nvmet_execute_io_connect()
|
/Linux-v5.4/drivers/scsi/qedf/ |
D | qedf.h | 253 struct fcoe_cqe cqe; member 483 extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 486 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); 488 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); 493 extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 506 extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 513 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); 515 extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 517 extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe); 526 struct fcoe_cqe *cqe); [all …]
|