1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
4 /* Kai Shen <kaishen@linux.alibaba.com> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
6
7 #include "erdma_verbs.h"
8
get_next_valid_cqe(struct erdma_cq * cq)9 static void *get_next_valid_cqe(struct erdma_cq *cq)
10 {
11 __be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci,
12 cq->depth, CQE_SHIFT);
13 u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
14 __be32_to_cpu(READ_ONCE(*cqe)));
15
16 return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL;
17 }
18
notify_cq(struct erdma_cq * cq,u8 solcitied)19 static void notify_cq(struct erdma_cq *cq, u8 solcitied)
20 {
21 u64 db_data =
22 FIELD_PREP(ERDMA_CQDB_IDX_MASK, (cq->kern_cq.notify_cnt)) |
23 FIELD_PREP(ERDMA_CQDB_CQN_MASK, cq->cqn) |
24 FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
25 FIELD_PREP(ERDMA_CQDB_SOL_MASK, solcitied) |
26 FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cq->kern_cq.cmdsn) |
27 FIELD_PREP(ERDMA_CQDB_CI_MASK, cq->kern_cq.ci);
28
29 *cq->kern_cq.db_record = db_data;
30 writeq(db_data, cq->kern_cq.db);
31 }
32
erdma_req_notify_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)33 int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
34 {
35 struct erdma_cq *cq = to_ecq(ibcq);
36 unsigned long irq_flags;
37 int ret = 0;
38
39 spin_lock_irqsave(&cq->kern_cq.lock, irq_flags);
40
41 notify_cq(cq, (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
42
43 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && get_next_valid_cqe(cq))
44 ret = 1;
45
46 cq->kern_cq.notify_cnt++;
47
48 spin_unlock_irqrestore(&cq->kern_cq.lock, irq_flags);
49
50 return ret;
51 }
52
53 static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
54 [ERDMA_OP_WRITE] = IB_WC_RDMA_WRITE,
55 [ERDMA_OP_READ] = IB_WC_RDMA_READ,
56 [ERDMA_OP_SEND] = IB_WC_SEND,
57 [ERDMA_OP_SEND_WITH_IMM] = IB_WC_SEND,
58 [ERDMA_OP_RECEIVE] = IB_WC_RECV,
59 [ERDMA_OP_RECV_IMM] = IB_WC_RECV_RDMA_WITH_IMM,
60 [ERDMA_OP_RECV_INV] = IB_WC_RECV,
61 [ERDMA_OP_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
62 [ERDMA_OP_RSP_SEND_IMM] = IB_WC_RECV,
63 [ERDMA_OP_SEND_WITH_INV] = IB_WC_SEND,
64 [ERDMA_OP_REG_MR] = IB_WC_REG_MR,
65 [ERDMA_OP_LOCAL_INV] = IB_WC_LOCAL_INV,
66 [ERDMA_OP_READ_WITH_INV] = IB_WC_RDMA_READ,
67 };
68
69 static const struct {
70 enum erdma_wc_status erdma;
71 enum ib_wc_status base;
72 enum erdma_vendor_err vendor;
73 } map_cqe_status[ERDMA_NUM_WC_STATUS] = {
74 { ERDMA_WC_SUCCESS, IB_WC_SUCCESS, ERDMA_WC_VENDOR_NO_ERR },
75 { ERDMA_WC_GENERAL_ERR, IB_WC_GENERAL_ERR, ERDMA_WC_VENDOR_NO_ERR },
76 { ERDMA_WC_RECV_WQE_FORMAT_ERR, IB_WC_GENERAL_ERR,
77 ERDMA_WC_VENDOR_INVALID_RQE },
78 { ERDMA_WC_RECV_STAG_INVALID_ERR, IB_WC_REM_ACCESS_ERR,
79 ERDMA_WC_VENDOR_RQE_INVALID_STAG },
80 { ERDMA_WC_RECV_ADDR_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
81 ERDMA_WC_VENDOR_RQE_ADDR_VIOLATION },
82 { ERDMA_WC_RECV_RIGHT_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
83 ERDMA_WC_VENDOR_RQE_ACCESS_RIGHT_ERR },
84 { ERDMA_WC_RECV_PDID_ERR, IB_WC_REM_ACCESS_ERR,
85 ERDMA_WC_VENDOR_RQE_INVALID_PD },
86 { ERDMA_WC_RECV_WARRPING_ERR, IB_WC_REM_ACCESS_ERR,
87 ERDMA_WC_VENDOR_RQE_WRAP_ERR },
88 { ERDMA_WC_SEND_WQE_FORMAT_ERR, IB_WC_LOC_QP_OP_ERR,
89 ERDMA_WC_VENDOR_INVALID_SQE },
90 { ERDMA_WC_SEND_WQE_ORD_EXCEED, IB_WC_GENERAL_ERR,
91 ERDMA_WC_VENDOR_ZERO_ORD },
92 { ERDMA_WC_SEND_STAG_INVALID_ERR, IB_WC_LOC_ACCESS_ERR,
93 ERDMA_WC_VENDOR_SQE_INVALID_STAG },
94 { ERDMA_WC_SEND_ADDR_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
95 ERDMA_WC_VENDOR_SQE_ADDR_VIOLATION },
96 { ERDMA_WC_SEND_RIGHT_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
97 ERDMA_WC_VENDOR_SQE_ACCESS_ERR },
98 { ERDMA_WC_SEND_PDID_ERR, IB_WC_LOC_ACCESS_ERR,
99 ERDMA_WC_VENDOR_SQE_INVALID_PD },
100 { ERDMA_WC_SEND_WARRPING_ERR, IB_WC_LOC_ACCESS_ERR,
101 ERDMA_WC_VENDOR_SQE_WARP_ERR },
102 { ERDMA_WC_FLUSH_ERR, IB_WC_WR_FLUSH_ERR, ERDMA_WC_VENDOR_NO_ERR },
103 { ERDMA_WC_RETRY_EXC_ERR, IB_WC_RETRY_EXC_ERR, ERDMA_WC_VENDOR_NO_ERR },
104 };
105
106 #define ERDMA_POLLCQ_NO_QP 1
107
erdma_poll_one_cqe(struct erdma_cq * cq,struct ib_wc * wc)108 static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
109 {
110 struct erdma_dev *dev = to_edev(cq->ibcq.device);
111 u8 opcode, syndrome, qtype;
112 struct erdma_kqp *kern_qp;
113 struct erdma_cqe *cqe;
114 struct erdma_qp *qp;
115 u16 wqe_idx, depth;
116 u32 qpn, cqe_hdr;
117 u64 *id_table;
118 u64 *wqe_hdr;
119
120 cqe = get_next_valid_cqe(cq);
121 if (!cqe)
122 return -EAGAIN;
123
124 cq->kern_cq.ci++;
125
126 /* cqbuf should be ready when we poll */
127 dma_rmb();
128
129 qpn = be32_to_cpu(cqe->qpn);
130 wqe_idx = be32_to_cpu(cqe->qe_idx);
131 cqe_hdr = be32_to_cpu(cqe->hdr);
132
133 qp = find_qp_by_qpn(dev, qpn);
134 if (!qp)
135 return ERDMA_POLLCQ_NO_QP;
136
137 kern_qp = &qp->kern_qp;
138
139 qtype = FIELD_GET(ERDMA_CQE_HDR_QTYPE_MASK, cqe_hdr);
140 syndrome = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, cqe_hdr);
141 opcode = FIELD_GET(ERDMA_CQE_HDR_OPCODE_MASK, cqe_hdr);
142
143 if (qtype == ERDMA_CQE_QTYPE_SQ) {
144 id_table = kern_qp->swr_tbl;
145 depth = qp->attrs.sq_size;
146 wqe_hdr = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
147 qp->attrs.sq_size, SQEBB_SHIFT);
148 kern_qp->sq_ci =
149 FIELD_GET(ERDMA_SQE_HDR_WQEBB_CNT_MASK, *wqe_hdr) +
150 wqe_idx + 1;
151 } else {
152 id_table = kern_qp->rwr_tbl;
153 depth = qp->attrs.rq_size;
154 }
155 wc->wr_id = id_table[wqe_idx & (depth - 1)];
156 wc->byte_len = be32_to_cpu(cqe->size);
157
158 wc->wc_flags = 0;
159
160 wc->opcode = wc_mapping_table[opcode];
161 if (opcode == ERDMA_OP_RECV_IMM || opcode == ERDMA_OP_RSP_SEND_IMM) {
162 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->imm_data));
163 wc->wc_flags |= IB_WC_WITH_IMM;
164 } else if (opcode == ERDMA_OP_RECV_INV) {
165 wc->ex.invalidate_rkey = be32_to_cpu(cqe->inv_rkey);
166 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
167 }
168
169 if (syndrome >= ERDMA_NUM_WC_STATUS)
170 syndrome = ERDMA_WC_GENERAL_ERR;
171
172 wc->status = map_cqe_status[syndrome].base;
173 wc->vendor_err = map_cqe_status[syndrome].vendor;
174 wc->qp = &qp->ibqp;
175
176 return 0;
177 }
178
erdma_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)179 int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
180 {
181 struct erdma_cq *cq = to_ecq(ibcq);
182 unsigned long flags;
183 int npolled, ret;
184
185 spin_lock_irqsave(&cq->kern_cq.lock, flags);
186
187 for (npolled = 0; npolled < num_entries;) {
188 ret = erdma_poll_one_cqe(cq, wc + npolled);
189
190 if (ret == -EAGAIN) /* no received new CQEs. */
191 break;
192 else if (ret) /* ignore invalid CQEs. */
193 continue;
194
195 npolled++;
196 }
197
198 spin_unlock_irqrestore(&cq->kern_cq.lock, flags);
199
200 return npolled;
201 }
202