1 /*
2 * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/bpf_trace.h>
34 #include "en/xdp.h"
35
36 static inline bool
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq * sq,struct mlx5e_dma_info * di,struct xdp_buff * xdp)37 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
38 struct xdp_buff *xdp)
39 {
40 struct mlx5e_xdp_info xdpi;
41
42 xdpi.xdpf = convert_to_xdp_frame(xdp);
43 if (unlikely(!xdpi.xdpf))
44 return false;
45 xdpi.dma_addr = di->addr + (xdpi.xdpf->data - (void *)xdpi.xdpf);
46 dma_sync_single_for_device(sq->pdev, xdpi.dma_addr,
47 xdpi.xdpf->len, PCI_DMA_TODEVICE);
48 xdpi.di = *di;
49
50 return mlx5e_xmit_xdp_frame(sq, &xdpi);
51 }
52
53 /* returns true if packet was consumed by xdp */
mlx5e_xdp_handle(struct mlx5e_rq * rq,struct mlx5e_dma_info * di,void * va,u16 * rx_headroom,u32 * len)54 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
55 void *va, u16 *rx_headroom, u32 *len)
56 {
57 struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
58 struct xdp_buff xdp;
59 u32 act;
60 int err;
61
62 if (!prog)
63 return false;
64
65 xdp.data = va + *rx_headroom;
66 xdp_set_data_meta_invalid(&xdp);
67 xdp.data_end = xdp.data + *len;
68 xdp.data_hard_start = va;
69 xdp.rxq = &rq->xdp_rxq;
70
71 act = bpf_prog_run_xdp(prog, &xdp);
72 switch (act) {
73 case XDP_PASS:
74 *rx_headroom = xdp.data - xdp.data_hard_start;
75 *len = xdp.data_end - xdp.data;
76 return false;
77 case XDP_TX:
78 if (unlikely(!mlx5e_xmit_xdp_buff(&rq->xdpsq, di, &xdp)))
79 goto xdp_abort;
80 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
81 return true;
82 case XDP_REDIRECT:
83 /* When XDP enabled then page-refcnt==1 here */
84 err = xdp_do_redirect(rq->netdev, &xdp, prog);
85 if (unlikely(err))
86 goto xdp_abort;
87 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
88 rq->xdpsq.redirect_flush = true;
89 mlx5e_page_dma_unmap(rq, di);
90 rq->stats->xdp_redirect++;
91 return true;
92 default:
93 bpf_warn_invalid_xdp_action(act);
94 /* fall through */
95 case XDP_ABORTED:
96 xdp_abort:
97 trace_xdp_exception(rq->netdev, prog, act);
98 /* fall through */
99 case XDP_DROP:
100 rq->stats->xdp_drop++;
101 return true;
102 }
103 }
104
mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq * sq,struct mlx5e_xdp_info * xdpi)105 bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
106 {
107 struct mlx5_wq_cyc *wq = &sq->wq;
108 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
109 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
110
111 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
112 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
113 struct mlx5_wqe_data_seg *dseg = wqe->data;
114
115 struct xdp_frame *xdpf = xdpi->xdpf;
116 dma_addr_t dma_addr = xdpi->dma_addr;
117 unsigned int dma_len = xdpf->len;
118
119 struct mlx5e_xdpsq_stats *stats = sq->stats;
120
121 prefetchw(wqe);
122
123 if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
124 stats->err++;
125 return false;
126 }
127
128 if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
129 if (sq->doorbell) {
130 /* SQ is full, ring doorbell */
131 mlx5e_xmit_xdp_doorbell(sq);
132 sq->doorbell = false;
133 }
134 stats->full++;
135 return false;
136 }
137
138 cseg->fm_ce_se = 0;
139
140 /* copy the inline part if required */
141 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
142 memcpy(eseg->inline_hdr.start, xdpf->data, MLX5E_XDP_MIN_INLINE);
143 eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
144 dma_len -= MLX5E_XDP_MIN_INLINE;
145 dma_addr += MLX5E_XDP_MIN_INLINE;
146 dseg++;
147 }
148
149 /* write the dma part */
150 dseg->addr = cpu_to_be64(dma_addr);
151 dseg->byte_count = cpu_to_be32(dma_len);
152
153 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
154
155 /* move page to reference to sq responsibility,
156 * and mark so it's not put back in page-cache.
157 */
158 sq->db.xdpi[pi] = *xdpi;
159 sq->pc++;
160
161 sq->doorbell = true;
162
163 stats->xmit++;
164 return true;
165 }
166
mlx5e_poll_xdpsq_cq(struct mlx5e_cq * cq)167 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
168 {
169 struct mlx5e_xdpsq *sq;
170 struct mlx5_cqe64 *cqe;
171 struct mlx5e_rq *rq;
172 bool is_redirect;
173 u16 sqcc;
174 int i;
175
176 sq = container_of(cq, struct mlx5e_xdpsq, cq);
177
178 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
179 return false;
180
181 cqe = mlx5_cqwq_get_cqe(&cq->wq);
182 if (!cqe)
183 return false;
184
185 is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
186 rq = container_of(sq, struct mlx5e_rq, xdpsq);
187
188 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
189 * otherwise a cq overrun may occur
190 */
191 sqcc = sq->cc;
192
193 i = 0;
194 do {
195 u16 wqe_counter;
196 bool last_wqe;
197
198 mlx5_cqwq_pop(&cq->wq);
199
200 wqe_counter = be16_to_cpu(cqe->wqe_counter);
201
202 do {
203 u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
204 struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
205
206 last_wqe = (sqcc == wqe_counter);
207 sqcc++;
208
209 if (is_redirect) {
210 xdp_return_frame(xdpi->xdpf);
211 dma_unmap_single(sq->pdev, xdpi->dma_addr,
212 xdpi->xdpf->len, DMA_TO_DEVICE);
213 } else {
214 /* Recycle RX page */
215 mlx5e_page_release(rq, &xdpi->di, true);
216 }
217 } while (!last_wqe);
218 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
219
220 sq->stats->cqes += i;
221
222 mlx5_cqwq_update_db_record(&cq->wq);
223
224 /* ensure cq space is freed before enabling more cqes */
225 wmb();
226
227 sq->cc = sqcc;
228 return (i == MLX5E_TX_CQ_POLL_BUDGET);
229 }
230
mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq * sq)231 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
232 {
233 struct mlx5e_rq *rq;
234 bool is_redirect;
235
236 is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
237 rq = is_redirect ? NULL : container_of(sq, struct mlx5e_rq, xdpsq);
238
239 while (sq->cc != sq->pc) {
240 u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
241 struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
242
243 sq->cc++;
244
245 if (is_redirect) {
246 xdp_return_frame(xdpi->xdpf);
247 dma_unmap_single(sq->pdev, xdpi->dma_addr,
248 xdpi->xdpf->len, DMA_TO_DEVICE);
249 } else {
250 /* Recycle RX page */
251 mlx5e_page_release(rq, &xdpi->di, false);
252 }
253 }
254 }
255
mlx5e_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)256 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
257 u32 flags)
258 {
259 struct mlx5e_priv *priv = netdev_priv(dev);
260 struct mlx5e_xdpsq *sq;
261 int drops = 0;
262 int sq_num;
263 int i;
264
265 if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
266 return -ENETDOWN;
267
268 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
269 return -EINVAL;
270
271 sq_num = smp_processor_id();
272
273 if (unlikely(sq_num >= priv->channels.num))
274 return -ENXIO;
275
276 sq = &priv->channels.c[sq_num]->xdpsq;
277
278 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
279 return -ENETDOWN;
280
281 for (i = 0; i < n; i++) {
282 struct xdp_frame *xdpf = frames[i];
283 struct mlx5e_xdp_info xdpi;
284
285 xdpi.dma_addr = dma_map_single(sq->pdev, xdpf->data, xdpf->len,
286 DMA_TO_DEVICE);
287 if (unlikely(dma_mapping_error(sq->pdev, xdpi.dma_addr))) {
288 xdp_return_frame_rx_napi(xdpf);
289 drops++;
290 continue;
291 }
292
293 xdpi.xdpf = xdpf;
294
295 if (unlikely(!mlx5e_xmit_xdp_frame(sq, &xdpi))) {
296 dma_unmap_single(sq->pdev, xdpi.dma_addr,
297 xdpf->len, DMA_TO_DEVICE);
298 xdp_return_frame_rx_napi(xdpf);
299 drops++;
300 }
301 }
302
303 if (flags & XDP_XMIT_FLUSH)
304 mlx5e_xmit_xdp_doorbell(sq);
305
306 return n - drops;
307 }
308