1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6 #include "mana_ib.h"
7
mana_ib_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)8 int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9 struct ib_udata *udata)
10 {
11 struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
12 struct ib_device *ibdev = ibcq->device;
13 struct mana_ib_create_cq ucmd = {};
14 struct mana_ib_dev *mdev;
15 int err;
16
17 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
18
19 if (udata->inlen < sizeof(ucmd))
20 return -EINVAL;
21
22 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
23 if (err) {
24 ibdev_dbg(ibdev,
25 "Failed to copy from udata for create cq, %d\n", err);
26 return err;
27 }
28
29 if (attr->cqe > MAX_SEND_BUFFERS_PER_QUEUE) {
30 ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
31 return -EINVAL;
32 }
33
34 cq->cqe = attr->cqe;
35 cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
36 IB_ACCESS_LOCAL_WRITE);
37 if (IS_ERR(cq->umem)) {
38 err = PTR_ERR(cq->umem);
39 ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n",
40 err);
41 return err;
42 }
43
44 err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region);
45 if (err) {
46 ibdev_dbg(ibdev,
47 "Failed to create dma region for create cq, %d\n",
48 err);
49 goto err_release_umem;
50 }
51
52 ibdev_dbg(ibdev,
53 "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
54 err, cq->gdma_region);
55
56 /*
57 * The CQ ID is not known at this time. The ID is generated at create_qp
58 */
59
60 return 0;
61
62 err_release_umem:
63 ib_umem_release(cq->umem);
64 return err;
65 }
66
mana_ib_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)67 int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
68 {
69 struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
70 struct ib_device *ibdev = ibcq->device;
71 struct mana_ib_dev *mdev;
72
73 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
74
75 mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
76 ib_umem_release(cq->umem);
77
78 return 0;
79 }
80