/Linux-v4.19/drivers/infiniband/ulp/ipoib/ |
D | ipoib_verbs.c | 187 priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL, in ipoib_transport_dev_init() 189 if (IS_ERR(priv->send_cq)) { in ipoib_transport_dev_init() 197 init_attr.send_cq = priv->send_cq; in ipoib_transport_dev_init() 215 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) in ipoib_transport_dev_init() 241 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_init() 263 if (ib_destroy_cq(priv->send_cq)) in ipoib_transport_dev_cleanup()
|
D | ipoib_ib.c | 436 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in poll_tx() 504 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in ipoib_tx_poll() 516 if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_tx_poll() 645 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_send()
|
D | ipoib_cm.c | 255 .send_cq = priv->recv_cq, /* For drain WR */ in ipoib_cm_create_rx_qp() 769 rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_cm_send() 1059 .send_cq = priv->send_cq, in ipoib_cm_create_tx_qp()
|
/Linux-v4.19/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 731 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); in __mthca_modify_qp() 833 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) in __mthca_modify_qp() 834 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); in __mthca_modify_qp() 1155 struct mthca_cq *send_cq, in mthca_alloc_qp_common() argument 1283 struct mthca_cq *send_cq, in mthca_alloc_qp() argument 1310 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, in mthca_alloc_qp() 1325 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) in mthca_lock_cqs() argument 1326 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mthca_lock_cqs() 1328 if (send_cq == recv_cq) { in mthca_lock_cqs() 1329 spin_lock_irq(&send_cq->lock); in mthca_lock_cqs() [all …]
|
D | mthca_dev.h | 545 struct mthca_cq *send_cq, 553 struct mthca_cq *send_cq,
|
/Linux-v4.19/drivers/infiniband/hw/hns/ |
D | hns_roce_qp.c | 1028 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) in hns_roce_lock_cqs() argument 1029 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in hns_roce_lock_cqs() 1031 if (send_cq == recv_cq) { in hns_roce_lock_cqs() 1032 spin_lock_irq(&send_cq->lock); in hns_roce_lock_cqs() 1034 } else if (send_cq->cqn < recv_cq->cqn) { in hns_roce_lock_cqs() 1035 spin_lock_irq(&send_cq->lock); in hns_roce_lock_cqs() 1039 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); in hns_roce_lock_cqs() 1044 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, in hns_roce_unlock_cqs() argument 1045 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) in hns_roce_unlock_cqs() 1048 if (send_cq == recv_cq) { in hns_roce_unlock_cqs() [all …]
|
D | hns_roce_hw_v1.c | 93 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v1_post_send() 807 hr_qp->ibqp.send_cq = cq; in hns_roce_v1_rsv_lp_qp() 2690 to_hr_cq(ibqp->send_cq)->cqn); in hns_roce_v1_m_sqp() 2733 if (ibqp->send_cq != ibqp->recv_cq) in hns_roce_v1_m_sqp() 2734 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), in hns_roce_v1_m_sqp() 2836 to_hr_cq(ibqp->send_cq)->cqn); in hns_roce_v1_m_qp() 2902 to_hr_cq(ibqp->send_cq)->cqn); in hns_roce_v1_m_qp() 3347 if (ibqp->send_cq != ibqp->recv_cq) in hns_roce_v1_m_qp() 3348 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), in hns_roce_v1_m_qp() 3928 struct hns_roce_cq *send_cq, *recv_cq; in hns_roce_v1_destroy_qp() local [all …]
|
D | hns_roce_hw_v2.c | 218 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v2_post_send() 2822 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn); in modify_qp_reset_to_init() 2926 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn); in modify_qp_init_to_init() 3637 if (ibqp->send_cq != ibqp->recv_cq) in hns_roce_v2_modify_qp() 3638 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), in hns_roce_v2_modify_qp() 3833 struct hns_roce_cq *send_cq, *recv_cq; in hns_roce_v2_destroy_qp_common() local 3848 send_cq = to_hr_cq(hr_qp->ibqp.send_cq); in hns_roce_v2_destroy_qp_common() 3851 hns_roce_lock_cqs(send_cq, recv_cq); in hns_roce_v2_destroy_qp_common() 3856 if (send_cq != recv_cq) in hns_roce_v2_destroy_qp_common() 3857 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL); in hns_roce_v2_destroy_qp_common() [all …]
|
D | hns_roce_device.h | 996 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, 998 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
|
/Linux-v4.19/drivers/infiniband/hw/mlx5/ |
D | qp.c | 98 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq); 628 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, 630 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, 1417 if (init_attr->create_flags || init_attr->send_cq) in create_rss_raw_qp_tir() 1611 struct mlx5_ib_cq *send_cq; in create_qp_common() local 1818 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq); in create_qp_common() 1873 if (init_attr->send_cq) in create_qp_common() 1874 MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn); in create_qp_common() 1929 get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq, in create_qp_common() 1930 &send_cq, &recv_cq); in create_qp_common() [all …]
|
D | gsi.c | 75 struct ib_cq *gsi_cq = gsi->ibqp.send_cq; in generate_completions() 181 hw_init_attr.send_cq = gsi->cq; in mlx5_ib_gsi_create_qp() 257 .send_cq = gsi->cq, in create_gsi_ud_qp()
|
/Linux-v4.19/drivers/infiniband/hw/mlx4/ |
D | qp.c | 51 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, 53 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, 750 if (init_attr->send_cq || init_attr->cap.max_send_wr) { in _mlx4_ib_create_qp_rss() 1160 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), in create_qp_common() 1169 mcq = to_mcq(init_attr->send_cq); in create_qp_common() 1173 mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), in create_qp_common() 1235 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) in mlx4_ib_lock_cqs() argument 1236 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mlx4_ib_lock_cqs() 1238 if (send_cq == recv_cq) { in mlx4_ib_lock_cqs() 1239 spin_lock(&send_cq->lock); in mlx4_ib_lock_cqs() [all …]
|
/Linux-v4.19/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_qp.c | 55 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, in get_cqs() argument 58 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs() 352 cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; in pvrdma_create_qp() 992 init_attr->send_cq = qp->ibqp.send_cq; in pvrdma_query_qp()
|
/Linux-v4.19/drivers/net/ethernet/ibm/ehea/ |
D | ehea_main.c | 209 arr[i++].fwh = pr->send_cq->fw_handle; in ehea_update_firmware_handles() 815 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes() local 824 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes() 826 ehea_inc_cq(send_cq); in ehea_proc_cqes() 866 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes() 869 ehea_update_feca(send_cq, cqe_counter); in ehea_proc_cqes() 904 ehea_reset_cq_ep(pr->send_cq); in ehea_poll() 906 ehea_reset_cq_n1(pr->send_cq); in ehea_poll() 909 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll() 1488 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, in ehea_init_port_res() [all …]
|
D | ehea.h | 364 struct ehea_cq *send_cq; member
|
/Linux-v4.19/drivers/infiniband/sw/rxe/ |
D | rxe_qp.c | 90 if (!init->recv_cq || !init->send_cq) { in rxe_qp_chk_init() 332 struct rxe_cq *scq = to_rcq(init->send_cq); in rxe_qp_from_init() 379 init->send_cq = qp->ibqp.send_cq; in rxe_qp_to_init()
|
/Linux-v4.19/net/sunrpc/xprtrdma/ |
D | verbs.c | 453 ib_free_cq(ep->rep_attr.send_cq); in rpcrdma_ia_remove() 454 ep->rep_attr.send_cq = NULL; in rpcrdma_ia_remove() 567 ep->rep_attr.send_cq = sendcq; in rpcrdma_ep_create() 628 if (ep->rep_attr.send_cq) in rpcrdma_ep_destroy() 629 ib_free_cq(ep->rep_attr.send_cq); in rpcrdma_ep_destroy()
|
/Linux-v4.19/drivers/infiniband/ulp/srp/ |
D | ib_srp.h | 142 struct ib_cq *send_cq; member
|
D | ib_srp.c | 534 ib_process_cq_direct(ch->send_cq, -1); in srp_destroy_qp() 546 struct ib_cq *recv_cq, *send_cq; in srp_create_ch_ib() local 565 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size, in srp_create_ch_ib() 567 if (IS_ERR(send_cq)) { in srp_create_ch_ib() 568 ret = PTR_ERR(send_cq); in srp_create_ch_ib() 579 init_attr->send_cq = send_cq; in srp_create_ch_ib() 623 if (ch->send_cq) in srp_create_ch_ib() 624 ib_free_cq(ch->send_cq); in srp_create_ch_ib() 628 ch->send_cq = send_cq; in srp_create_ch_ib() 650 ib_free_cq(send_cq); in srp_create_ch_ib() [all …]
|
/Linux-v4.19/fs/cifs/ |
D | smbdirect.c | 234 ib_free_cq(info->send_cq); in smbd_destroy_rdma_work() 1678 info->send_cq = NULL; in _smbd_get_connection() 1680 info->send_cq = ib_alloc_cq(info->id->device, info, in _smbd_get_connection() 1682 if (IS_ERR(info->send_cq)) { in _smbd_get_connection() 1683 info->send_cq = NULL; in _smbd_get_connection() 1704 qp_attr.send_cq = info->send_cq; in _smbd_get_connection() 1831 if (info->send_cq) in _smbd_get_connection() 1832 ib_free_cq(info->send_cq); in _smbd_get_connection()
|
D | smbdirect.h | 68 struct ib_cq *send_cq, *recv_cq; member
|
/Linux-v4.19/drivers/infiniband/core/ |
D | verbs.c | 1098 qp->send_cq = qp->recv_cq = NULL; in ib_create_xrc_qp() 1173 qp->send_cq = qp_init_attr->send_cq; in ib_create_qp() 1177 if (qp_init_attr->send_cq) in ib_create_qp() 1178 atomic_inc(&qp_init_attr->send_cq->usecnt); in ib_create_qp() 1831 scq = qp->send_cq; in ib_destroy_qp() 2478 struct ib_cq *cq = qp->send_cq; in __ib_drain_sq()
|
/Linux-v4.19/Documentation/infiniband/ |
D | tag_matching.txt | 28 processed by the sender. A completion send is received in the send_cq
|
/Linux-v4.19/drivers/infiniband/hw/hfi1/ |
D | qp.c | 605 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head, in qp_iter_print() 606 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail, in qp_iter_print()
|
/Linux-v4.19/drivers/infiniband/hw/qedr/ |
D | qedr_roce_cm.c | 62 dev->gsi_sqcq = get_qedr_cq(attrs->send_cq); in qedr_store_gsi_qp_cq() 139 cq = get_qedr_cq(attrs->send_cq); in qedr_destroy_gsi_cq()
|