/Linux-v6.1/drivers/infiniband/hw/hns/ |
D | hns_roce_qp.c | 245 struct ib_cq *send_cq, struct ib_cq *recv_cq) in add_qp_to_list() argument 251 hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL; in add_qp_to_list() 282 init_attr->recv_cq); in hns_roce_qp_store() 1348 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) in hns_roce_lock_cqs() argument 1349 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in hns_roce_lock_cqs() 1351 if (unlikely(send_cq == NULL && recv_cq == NULL)) { in hns_roce_lock_cqs() 1353 __acquire(&recv_cq->lock); in hns_roce_lock_cqs() 1354 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { in hns_roce_lock_cqs() 1356 __acquire(&recv_cq->lock); in hns_roce_lock_cqs() 1357 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { in hns_roce_lock_cqs() [all …]
|
D | hns_roce_hw_v2.c | 866 hr_qp->ibqp.recv_cq))) { in hns_roce_v2_post_recv() 2695 qp_init_attr.recv_cq = free_mr->rsv_cq; in free_mr_alloc_res() 4335 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq)); in modify_qp_reset_to_init() 4370 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq)); in modify_qp_init_to_init() 5171 if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq) in clear_qp() 5172 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), in clear_qp() 5407 qp_init_attr->recv_cq = ibqp->recv_cq; in hns_roce_v2_query_qp() 5432 struct hns_roce_cq *send_cq, *recv_cq; in hns_roce_v2_destroy_qp_common() local 5447 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL; in hns_roce_v2_destroy_qp_common() 5450 hns_roce_lock_cqs(send_cq, recv_cq); in hns_roce_v2_destroy_qp_common() [all …]
|
/Linux-v6.1/drivers/infiniband/ulp/ipoib/ |
D | ipoib_verbs.c | 178 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_rx_completion, NULL, in ipoib_transport_dev_init() 180 if (IS_ERR(priv->recv_cq)) { in ipoib_transport_dev_init() 194 if (ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP)) in ipoib_transport_dev_init() 198 init_attr.recv_cq = priv->recv_cq; in ipoib_transport_dev_init() 247 ib_destroy_cq(priv->recv_cq); in ipoib_transport_dev_init() 267 ib_destroy_cq(priv->recv_cq); in ipoib_transport_dev_cleanup()
|
D | ipoib_ethtool.c | 103 ret = rdma_set_cq_moderation(priv->recv_cq, in ipoib_set_coalesce()
|
D | ipoib_ib.c | 466 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc); in ipoib_rx_poll() 488 if (unlikely(ib_req_notify_cq(priv->recv_cq, in ipoib_rx_poll() 843 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP); in ipoib_ib_dev_stop_default() 976 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); in ipoib_drain_cq()
|
D | ipoib_cm.c | 255 .send_cq = priv->recv_cq, /* For drain WR */ in ipoib_cm_create_rx_qp() 256 .recv_cq = priv->recv_cq, in ipoib_cm_create_rx_qp() 1062 .recv_cq = priv->recv_cq, in ipoib_cm_create_tx_qp()
|
/Linux-v6.1/drivers/infiniband/hw/mlx5/ |
D | qp.c | 76 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq); 747 struct mlx5_ib_cq *recv_cq); 749 struct mlx5_ib_cq *recv_cq); 1207 static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *recv_cq) in get_rq_ts_format() argument 1211 return get_ts_format(dev, recv_cq, fr_supported(ts_cap), in get_rq_ts_format() 1224 struct mlx5_ib_cq *recv_cq) in get_qp_ts_format() argument 1235 recv_cq ? get_ts_format(dev, recv_cq, fr_sup, rt_sup) : in get_qp_ts_format() 1532 to_mcq(init_attr->recv_cq)); in create_raw_packet_qp() 1997 struct mlx5_ib_cq *recv_cq; in create_dci() local 2033 to_mcq(init_attr->recv_cq)); in create_dci() [all …]
|
D | gsi.c | 206 .recv_cq = gsi->rx_qp->recv_cq, in create_gsi_ud_qp()
|
D | mem.c | 245 qp_init_attr.recv_cq = cq; in mlx5_ib_test_wc()
|
/Linux-v6.1/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 774 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); in __mthca_modify_qp() 835 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, in __mthca_modify_qp() 837 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) in __mthca_modify_qp() 1165 struct mthca_cq *recv_cq, in mthca_alloc_qp_common() argument 1294 struct mthca_cq *recv_cq, in mthca_alloc_qp() argument 1321 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, in mthca_alloc_qp() 1336 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) in mthca_lock_cqs() argument 1337 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mthca_lock_cqs() 1339 if (send_cq == recv_cq) { in mthca_lock_cqs() 1341 __acquire(&recv_cq->lock); in mthca_lock_cqs() [all …]
|
D | mthca_dev.h | 536 struct mthca_cq *recv_cq, 545 struct mthca_cq *recv_cq,
|
/Linux-v6.1/drivers/infiniband/core/ |
D | uverbs_std_types_qp.c | 94 struct ib_cq *recv_cq = NULL; in UVERBS_HANDLER() local 181 recv_cq = uverbs_attr_get_obj(attrs, in UVERBS_HANDLER() 183 if (IS_ERR(recv_cq)) in UVERBS_HANDLER() 184 return PTR_ERR(recv_cq); in UVERBS_HANDLER() 235 attr.recv_cq = recv_cq; in UVERBS_HANDLER()
|
D | verbs.c | 1175 qp->send_cq = qp->recv_cq = NULL; in create_xrc_qp_user() 1228 qp->recv_cq = attr->recv_cq; in create_qp() 1242 qp->recv_cq = attr->recv_cq; in create_qp() 1303 if (qp->recv_cq) in ib_qp_usecnt_inc() 1304 atomic_inc(&qp->recv_cq->usecnt); in ib_qp_usecnt_inc() 1318 if (qp->recv_cq) in ib_qp_usecnt_dec() 1319 atomic_dec(&qp->recv_cq->usecnt); in ib_qp_usecnt_dec() 2786 struct ib_cq *cq = qp->recv_cq; in __ib_drain_rq() 2869 trace_cq_drain_complete(qp->recv_cq); in ib_drain_rq()
|
/Linux-v6.1/drivers/infiniband/hw/mlx4/ |
D | qp.c | 53 struct mlx4_ib_cq *recv_cq); 55 struct mlx4_ib_cq *recv_cq); 922 to_mcq(init_attr->recv_cq)); in create_rq() 932 mcq = to_mcq(init_attr->recv_cq); in create_rq() 935 to_mcq(init_attr->recv_cq)); in create_rq() 1196 to_mcq(init_attr->recv_cq)); in create_qp_common() 1206 mcq = to_mcq(init_attr->recv_cq); in create_qp_common() 1209 to_mcq(init_attr->recv_cq)); in create_qp_common() 1263 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) in mlx4_ib_lock_cqs() argument 1264 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mlx4_ib_lock_cqs() [all …]
|
/Linux-v6.1/net/sunrpc/xprtrdma/ |
D | verbs.c | 338 if (ep->re_attr.recv_cq) in rpcrdma_ep_destroy() 339 ib_free_cq(ep->re_attr.recv_cq); in rpcrdma_ep_destroy() 340 ep->re_attr.recv_cq = NULL; in rpcrdma_ep_destroy() 419 ep->re_attr.recv_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create() 422 if (IS_ERR(ep->re_attr.recv_cq)) { in rpcrdma_ep_create() 423 rc = PTR_ERR(ep->re_attr.recv_cq); in rpcrdma_ep_create() 424 ep->re_attr.recv_cq = NULL; in rpcrdma_ep_create() 1367 rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id; in rpcrdma_post_recvs()
|
/Linux-v6.1/fs/ksmbd/ |
D | transport_rdma.c | 105 struct ib_cq *recv_cq; member 448 if (t->recv_cq) in free_transport() 449 ib_free_cq(t->recv_cq); in free_transport() 1885 t->recv_cq = ib_alloc_cq(t->cm_id->device, t, in smb_direct_create_qpair() 1887 if (IS_ERR(t->recv_cq)) { in smb_direct_create_qpair() 1889 ret = PTR_ERR(t->recv_cq); in smb_direct_create_qpair() 1890 t->recv_cq = NULL; in smb_direct_create_qpair() 1901 qp_attr.recv_cq = t->recv_cq; in smb_direct_create_qpair() 1931 if (t->recv_cq) { in smb_direct_create_qpair() 1932 ib_destroy_cq(t->recv_cq); in smb_direct_create_qpair() [all …]
|
/Linux-v6.1/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_qp.c | 59 struct pvrdma_cq **recv_cq) in get_cqs() argument 62 *recv_cq = to_vcq(qp->ibqp.recv_cq); in get_cqs() 360 cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle; in pvrdma_create_qp() 1043 init_attr->recv_cq = qp->ibqp.recv_cq; in pvrdma_query_qp()
|
/Linux-v6.1/drivers/infiniband/sw/rxe/ |
D | rxe_qp.c | 75 if (!init->recv_cq || !init->send_cq) { in rxe_qp_chk_init() 313 struct rxe_cq *rcq = to_rcq(init->recv_cq); in rxe_qp_from_init() 373 init->recv_cq = qp->ibqp.recv_cq; in rxe_qp_to_init()
|
/Linux-v6.1/fs/cifs/ |
D | smbdirect.c | 1392 ib_free_cq(info->recv_cq); in smbd_destroy() 1577 info->recv_cq = NULL; in _smbd_get_connection() 1586 info->recv_cq = in _smbd_get_connection() 1589 if (IS_ERR(info->recv_cq)) { in _smbd_get_connection() 1590 info->recv_cq = NULL; in _smbd_get_connection() 1605 qp_attr.recv_cq = info->recv_cq; in _smbd_get_connection() 1722 if (info->recv_cq) in _smbd_get_connection() 1723 ib_free_cq(info->recv_cq); in _smbd_get_connection()
|
D | smbdirect.h | 59 struct ib_cq *send_cq, *recv_cq; member
|
/Linux-v6.1/drivers/infiniband/ulp/srp/ |
D | ib_srp.h | 156 struct ib_cq *recv_cq; member
|
D | ib_srp.c | 534 struct ib_cq *recv_cq, *send_cq; in srp_create_ch_ib() local 545 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1, in srp_create_ch_ib() 547 if (IS_ERR(recv_cq)) { in srp_create_ch_ib() 548 ret = PTR_ERR(recv_cq); in srp_create_ch_ib() 567 init_attr->recv_cq = recv_cq; in srp_create_ch_ib() 602 if (ch->recv_cq) in srp_create_ch_ib() 603 ib_free_cq(ch->recv_cq); in srp_create_ch_ib() 608 ch->recv_cq = recv_cq; in srp_create_ch_ib() 630 ib_free_cq(recv_cq); in srp_create_ch_ib() 673 ib_free_cq(ch->recv_cq); in srp_free_ch_ib() [all …]
|
/Linux-v6.1/drivers/net/ethernet/ibm/ehea/ |
D | ehea_main.c | 200 arr[i++].fwh = pr->recv_cq->fw_handle; in ehea_update_firmware_handles() 891 ehea_reset_cq_ep(pr->recv_cq); in ehea_poll() 893 ehea_reset_cq_n1(pr->recv_cq); in ehea_poll() 1468 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, in ehea_init_port_res() 1471 if (!pr->recv_cq) { in ehea_init_port_res() 1487 pr->recv_cq->attr.act_nr_of_cqes); in ehea_init_port_res() 1512 init_attr->recv_cq_handle = pr->recv_cq->fw_handle; in ehea_init_port_res() 1562 ehea_destroy_cq(pr->recv_cq); in ehea_init_port_res() 1579 ehea_destroy_cq(pr->recv_cq); in ehea_clean_portres()
|
D | ehea.h | 352 struct ehea_cq *recv_cq; member
|
/Linux-v6.1/drivers/infiniband/sw/siw/ |
D | siw_verbs.c | 348 if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) { in siw_create_qp() 400 qp->rcq = to_siw_cq(attrs->recv_cq); in siw_create_qp() 533 qp_init_attr->recv_cq = base_qp->recv_cq; in siw_query_qp()
|