| /Linux-v5.10/drivers/infiniband/core/ |
| D | uverbs_std_types_qp.c | 95 struct ib_cq *send_cq = NULL; in UVERBS_HANDLER() local 168 send_cq = uverbs_attr_get_obj(attrs, in UVERBS_HANDLER() 170 if (IS_ERR(send_cq)) in UVERBS_HANDLER() 171 return PTR_ERR(send_cq); in UVERBS_HANDLER() 175 send_cq = uverbs_attr_get_obj(attrs, in UVERBS_HANDLER() 177 if (IS_ERR(send_cq)) in UVERBS_HANDLER() 178 return PTR_ERR(send_cq); in UVERBS_HANDLER() 234 attr.send_cq = send_cq; in UVERBS_HANDLER() 264 if (attr.send_cq) in UVERBS_HANDLER() 265 atomic_inc(&attr.send_cq->usecnt); in UVERBS_HANDLER()
|
| D | core_priv.h | 345 qp->send_cq = attr->send_cq; in _ib_create_qp()
|
| D | verbs.c | 1170 qp->send_cq = qp->recv_cq = NULL; in create_xrc_qp_user() 1259 qp->send_cq = qp_init_attr->send_cq; in ib_create_qp() 1263 if (qp_init_attr->send_cq) in ib_create_qp() 1264 atomic_inc(&qp_init_attr->send_cq->usecnt); in ib_create_qp() 1938 scq = qp->send_cq; in ib_destroy_qp_user() 2696 struct ib_cq *cq = qp->send_cq; in __ib_drain_sq() 2789 trace_cq_drain_complete(qp->send_cq); in ib_drain_sq()
|
| /Linux-v5.10/drivers/infiniband/hw/hns/ |
| D | hns_roce_qp.c | 209 struct ib_cq *send_cq, struct ib_cq *recv_cq) in add_qp_to_list() argument 214 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; in add_qp_to_list() 245 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, in hns_roce_qp_store() 1193 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) in hns_roce_lock_cqs() argument 1194 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in hns_roce_lock_cqs() 1196 if (unlikely(send_cq == NULL && recv_cq == NULL)) { in hns_roce_lock_cqs() 1197 __acquire(&send_cq->lock); in hns_roce_lock_cqs() 1199 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { in hns_roce_lock_cqs() 1200 spin_lock_irq(&send_cq->lock); in hns_roce_lock_cqs() 1202 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { in hns_roce_lock_cqs() [all …]
|
| D | hns_roce_hw_v1.c | 93 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v1_post_send() 793 hr_qp->ibqp.send_cq = cq; in hns_roce_v1_rsv_lp_qp() 2578 to_hr_cq(ibqp->send_cq)->cqn); in hns_roce_v1_m_sqp() 2621 if (ibqp->send_cq != ibqp->recv_cq) in hns_roce_v1_m_sqp() 2622 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), in hns_roce_v1_m_sqp() 2748 to_hr_cq(ibqp->send_cq)->cqn); in hns_roce_v1_m_qp() 2814 to_hr_cq(ibqp->send_cq)->cqn); in hns_roce_v1_m_qp() 3245 if (ibqp->send_cq != ibqp->recv_cq) in hns_roce_v1_m_qp() 3246 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), in hns_roce_v1_m_qp() 3544 struct hns_roce_cq *send_cq, *recv_cq; in hns_roce_v1_destroy_qp() local [all …]
|
| D | hns_roce_hw_v2.c | 667 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v2_post_send() 3856 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn); in modify_qp_reset_to_init() 3933 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn); in modify_qp_init_to_init() 4832 if (ibqp->send_cq != ibqp->recv_cq) in hns_roce_v2_modify_qp() 4833 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), in hns_roce_v2_modify_qp() 5025 struct hns_roce_cq *send_cq, *recv_cq; in hns_roce_v2_destroy_qp_common() local 5039 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; in hns_roce_v2_destroy_qp_common() 5043 hns_roce_lock_cqs(send_cq, recv_cq); in hns_roce_v2_destroy_qp_common() 5052 if (send_cq && send_cq != recv_cq) in hns_roce_v2_destroy_qp_common() 5053 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL); in hns_roce_v2_destroy_qp_common() [all …]
|
| /Linux-v5.10/drivers/infiniband/ulp/ipoib/ |
| D | ipoib_verbs.c | 187 priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL, in ipoib_transport_dev_init() 189 if (IS_ERR(priv->send_cq)) { in ipoib_transport_dev_init() 197 init_attr.send_cq = priv->send_cq; in ipoib_transport_dev_init() 218 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) in ipoib_transport_dev_init() 244 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_init() 266 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_cleanup()
|
| D | ipoib_ib.c | 439 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in poll_tx() 507 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in ipoib_tx_poll() 519 if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_tx_poll() 649 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_send()
|
| /Linux-v5.10/drivers/infiniband/hw/mthca/ |
| D | mthca_qp.c | 735 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); in __mthca_modify_qp() 837 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) in __mthca_modify_qp() 838 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); in __mthca_modify_qp() 1161 struct mthca_cq *send_cq, in mthca_alloc_qp_common() argument 1290 struct mthca_cq *send_cq, in mthca_alloc_qp() argument 1318 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, in mthca_alloc_qp() 1333 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) in mthca_lock_cqs() argument 1334 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mthca_lock_cqs() 1336 if (send_cq == recv_cq) { in mthca_lock_cqs() 1337 spin_lock_irq(&send_cq->lock); in mthca_lock_cqs() [all …]
|
| D | mthca_dev.h | 536 struct mthca_cq *send_cq, 545 struct mthca_cq *send_cq,
|
| /Linux-v5.10/include/rdma/ |
| D | rdmavt_qp.h | 805 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq); in rvt_send_cq() 920 static inline u32 ib_cq_tail(struct ib_cq *send_cq) in ib_cq_tail() argument 922 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); in ib_cq_tail() 924 return ibcq_to_rvtcq(send_cq)->ip ? in ib_cq_tail() 926 ibcq_to_rvtcq(send_cq)->kqueue->tail; in ib_cq_tail() 936 static inline u32 ib_cq_head(struct ib_cq *send_cq) in ib_cq_head() argument 938 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); in ib_cq_head() 940 return ibcq_to_rvtcq(send_cq)->ip ? in ib_cq_head() 942 ibcq_to_rvtcq(send_cq)->kqueue->head; in ib_cq_head()
|
| /Linux-v5.10/drivers/infiniband/hw/mlx5/ |
| D | qp.c | 75 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq); 744 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, 746 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, 1784 scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq); in configure_requester_scat_cqe() 1932 struct mlx5_ib_cq *send_cq; in create_user_qp() local 2050 if (init_attr->send_cq) in create_user_qp() 2051 MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn); in create_user_qp() 2088 get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq, in create_user_qp() 2089 &send_cq, &recv_cq); in create_user_qp() 2091 mlx5_ib_lock_cqs(send_cq, recv_cq); in create_user_qp() [all …]
|
| D | gsi.c | 50 struct ib_cq *gsi_cq = mqp->ibqp.send_cq; in generate_completions() 143 hw_init_attr.send_cq = gsi->cq; in mlx5_ib_create_gsi() 162 gsi->rx_qp->send_cq = hw_init_attr.send_cq; in mlx5_ib_create_gsi() 227 .send_cq = gsi->cq, in create_gsi_ud_qp()
|
| D | mem.c | 343 qp_init_attr.send_cq = cq; in mlx5_ib_test_wc()
|
| /Linux-v5.10/drivers/infiniband/hw/mlx4/ |
| D | qp.c | 52 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, 54 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, 723 if (init_attr->send_cq || init_attr->cap.max_send_wr) { in _mlx4_ib_create_qp_rss() 921 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), in create_rq() 930 mcq = to_mcq(init_attr->send_cq); in create_rq() 934 mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), in create_rq() 1193 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), in create_qp_common() 1202 mcq = to_mcq(init_attr->send_cq); in create_qp_common() 1206 mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), in create_qp_common() 1261 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) in mlx4_ib_lock_cqs() argument [all …]
|
| /Linux-v5.10/net/sunrpc/xprtrdma/ |
| D | verbs.c | 369 if (ep->re_attr.send_cq) in rpcrdma_ep_destroy() 370 ib_free_cq(ep->re_attr.send_cq); in rpcrdma_ep_destroy() 371 ep->re_attr.send_cq = NULL; in rpcrdma_ep_destroy() 447 ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create() 450 if (IS_ERR(ep->re_attr.send_cq)) { in rpcrdma_ep_create() 451 rc = PTR_ERR(ep->re_attr.send_cq); in rpcrdma_ep_create()
|
| /Linux-v5.10/drivers/net/ethernet/ibm/ehea/ |
| D | ehea_main.c | 195 arr[i++].fwh = pr->send_cq->fw_handle; in ehea_update_firmware_handles() 800 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes() local 809 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes() 811 ehea_inc_cq(send_cq); in ehea_proc_cqes() 851 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes() 854 ehea_update_feca(send_cq, cqe_counter); in ehea_proc_cqes() 889 ehea_reset_cq_ep(pr->send_cq); in ehea_poll() 891 ehea_reset_cq_n1(pr->send_cq); in ehea_poll() 894 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll() 1473 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, in ehea_init_port_res() [all …]
|
| D | ehea.h | 350 struct ehea_cq *send_cq; member
|
| /Linux-v5.10/drivers/infiniband/hw/vmw_pvrdma/ |
| D | pvrdma_qp.c | 58 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, in get_cqs() argument 61 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs() 366 cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; in pvrdma_create_qp() 1050 init_attr->send_cq = qp->ibqp.send_cq; in pvrdma_query_qp()
|
| /Linux-v5.10/drivers/infiniband/sw/rxe/ |
| D | rxe_qp.c | 65 if (!init->recv_cq || !init->send_cq) { in rxe_qp_chk_init() 315 struct rxe_cq *scq = to_rcq(init->send_cq); in rxe_qp_from_init() 361 init->send_cq = qp->ibqp.send_cq; in rxe_qp_to_init()
|
| /Linux-v5.10/fs/cifs/ |
| D | smbdirect.c | 1380 ib_free_cq(info->send_cq); in smbd_destroy() 1567 info->send_cq = NULL; in _smbd_get_connection() 1569 info->send_cq = in _smbd_get_connection() 1572 if (IS_ERR(info->send_cq)) { in _smbd_get_connection() 1573 info->send_cq = NULL; in _smbd_get_connection() 1595 qp_attr.send_cq = info->send_cq; in _smbd_get_connection() 1711 if (info->send_cq) in _smbd_get_connection() 1712 ib_free_cq(info->send_cq); in _smbd_get_connection()
|
| D | smbdirect.h | 59 struct ib_cq *send_cq, *recv_cq; member
|
| /Linux-v5.10/drivers/infiniband/ulp/srp/ |
| D | ib_srp.h | 147 struct ib_cq *send_cq; member
|
| D | ib_srp.c | 521 ib_process_cq_direct(ch->send_cq, -1); in srp_destroy_qp() 534 struct ib_cq *recv_cq, *send_cq; in srp_create_ch_ib() local 552 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size, in srp_create_ch_ib() 554 if (IS_ERR(send_cq)) { in srp_create_ch_ib() 555 ret = PTR_ERR(send_cq); in srp_create_ch_ib() 566 init_attr->send_cq = send_cq; in srp_create_ch_ib() 604 if (ch->send_cq) in srp_create_ch_ib() 605 ib_free_cq(ch->send_cq); in srp_create_ch_ib() 609 ch->send_cq = send_cq; in srp_create_ch_ib() 627 ib_free_cq(send_cq); in srp_create_ch_ib() [all …]
|
| /Linux-v5.10/Documentation/infiniband/ |
| D | tag_matching.rst | 32 processed by the sender. A completion send is received in the send_cq
|