1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <net/xdp_sock.h>
5 #include "umem.h"
6 #include "setup.h"
7 #include "en/params.h"
8 
mlx5e_xsk_map_umem(struct mlx5e_priv * priv,struct xdp_umem * umem)9 static int mlx5e_xsk_map_umem(struct mlx5e_priv *priv,
10 			      struct xdp_umem *umem)
11 {
12 	struct device *dev = priv->mdev->device;
13 	u32 i;
14 
15 	for (i = 0; i < umem->npgs; i++) {
16 		dma_addr_t dma = dma_map_page(dev, umem->pgs[i], 0, PAGE_SIZE,
17 					      DMA_BIDIRECTIONAL);
18 
19 		if (unlikely(dma_mapping_error(dev, dma)))
20 			goto err_unmap;
21 		umem->pages[i].dma = dma;
22 	}
23 
24 	return 0;
25 
26 err_unmap:
27 	while (i--) {
28 		dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE,
29 			       DMA_BIDIRECTIONAL);
30 		umem->pages[i].dma = 0;
31 	}
32 
33 	return -ENOMEM;
34 }
35 
mlx5e_xsk_unmap_umem(struct mlx5e_priv * priv,struct xdp_umem * umem)36 static void mlx5e_xsk_unmap_umem(struct mlx5e_priv *priv,
37 				 struct xdp_umem *umem)
38 {
39 	struct device *dev = priv->mdev->device;
40 	u32 i;
41 
42 	for (i = 0; i < umem->npgs; i++) {
43 		dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE,
44 			       DMA_BIDIRECTIONAL);
45 		umem->pages[i].dma = 0;
46 	}
47 }
48 
mlx5e_xsk_get_umems(struct mlx5e_xsk * xsk)49 static int mlx5e_xsk_get_umems(struct mlx5e_xsk *xsk)
50 {
51 	if (!xsk->umems) {
52 		xsk->umems = kcalloc(MLX5E_MAX_NUM_CHANNELS,
53 				     sizeof(*xsk->umems), GFP_KERNEL);
54 		if (unlikely(!xsk->umems))
55 			return -ENOMEM;
56 	}
57 
58 	xsk->refcnt++;
59 	xsk->ever_used = true;
60 
61 	return 0;
62 }
63 
mlx5e_xsk_put_umems(struct mlx5e_xsk * xsk)64 static void mlx5e_xsk_put_umems(struct mlx5e_xsk *xsk)
65 {
66 	if (!--xsk->refcnt) {
67 		kfree(xsk->umems);
68 		xsk->umems = NULL;
69 	}
70 }
71 
mlx5e_xsk_add_umem(struct mlx5e_xsk * xsk,struct xdp_umem * umem,u16 ix)72 static int mlx5e_xsk_add_umem(struct mlx5e_xsk *xsk, struct xdp_umem *umem, u16 ix)
73 {
74 	int err;
75 
76 	err = mlx5e_xsk_get_umems(xsk);
77 	if (unlikely(err))
78 		return err;
79 
80 	xsk->umems[ix] = umem;
81 	return 0;
82 }
83 
mlx5e_xsk_remove_umem(struct mlx5e_xsk * xsk,u16 ix)84 static void mlx5e_xsk_remove_umem(struct mlx5e_xsk *xsk, u16 ix)
85 {
86 	xsk->umems[ix] = NULL;
87 
88 	mlx5e_xsk_put_umems(xsk);
89 }
90 
mlx5e_xsk_is_umem_sane(struct xdp_umem * umem)91 static bool mlx5e_xsk_is_umem_sane(struct xdp_umem *umem)
92 {
93 	return umem->headroom <= 0xffff && umem->chunk_size_nohr <= 0xffff;
94 }
95 
mlx5e_build_xsk_param(struct xdp_umem * umem,struct mlx5e_xsk_param * xsk)96 void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk)
97 {
98 	xsk->headroom = umem->headroom;
99 	xsk->chunk_size = umem->chunk_size_nohr + umem->headroom;
100 }
101 
mlx5e_xsk_enable_locked(struct mlx5e_priv * priv,struct xdp_umem * umem,u16 ix)102 static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
103 				   struct xdp_umem *umem, u16 ix)
104 {
105 	struct mlx5e_params *params = &priv->channels.params;
106 	struct mlx5e_xsk_param xsk;
107 	struct mlx5e_channel *c;
108 	int err;
109 
110 	if (unlikely(mlx5e_xsk_get_umem(&priv->channels.params, &priv->xsk, ix)))
111 		return -EBUSY;
112 
113 	if (unlikely(!mlx5e_xsk_is_umem_sane(umem)))
114 		return -EINVAL;
115 
116 	err = mlx5e_xsk_map_umem(priv, umem);
117 	if (unlikely(err))
118 		return err;
119 
120 	err = mlx5e_xsk_add_umem(&priv->xsk, umem, ix);
121 	if (unlikely(err))
122 		goto err_unmap_umem;
123 
124 	mlx5e_build_xsk_param(umem, &xsk);
125 
126 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
127 		/* XSK objects will be created on open. */
128 		goto validate_closed;
129 	}
130 
131 	if (!params->xdp_prog) {
132 		/* XSK objects will be created when an XDP program is set,
133 		 * and the channels are reopened.
134 		 */
135 		goto validate_closed;
136 	}
137 
138 	c = priv->channels.c[ix];
139 
140 	err = mlx5e_open_xsk(priv, params, &xsk, umem, c);
141 	if (unlikely(err))
142 		goto err_remove_umem;
143 
144 	mlx5e_activate_xsk(c);
145 
146 	/* Don't wait for WQEs, because the newer xdpsock sample doesn't provide
147 	 * any Fill Ring entries at the setup stage.
148 	 */
149 
150 	err = mlx5e_xsk_redirect_rqt_to_channel(priv, priv->channels.c[ix]);
151 	if (unlikely(err))
152 		goto err_deactivate;
153 
154 	return 0;
155 
156 err_deactivate:
157 	mlx5e_deactivate_xsk(c);
158 	mlx5e_close_xsk(c);
159 
160 err_remove_umem:
161 	mlx5e_xsk_remove_umem(&priv->xsk, ix);
162 
163 err_unmap_umem:
164 	mlx5e_xsk_unmap_umem(priv, umem);
165 
166 	return err;
167 
168 validate_closed:
169 	/* Check the configuration in advance, rather than fail at a later stage
170 	 * (in mlx5e_xdp_set or on open) and end up with no channels.
171 	 */
172 	if (!mlx5e_validate_xsk_param(params, &xsk, priv->mdev)) {
173 		err = -EINVAL;
174 		goto err_remove_umem;
175 	}
176 
177 	return 0;
178 }
179 
mlx5e_xsk_disable_locked(struct mlx5e_priv * priv,u16 ix)180 static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
181 {
182 	struct xdp_umem *umem = mlx5e_xsk_get_umem(&priv->channels.params,
183 						   &priv->xsk, ix);
184 	struct mlx5e_channel *c;
185 
186 	if (unlikely(!umem))
187 		return -EINVAL;
188 
189 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
190 		goto remove_umem;
191 
192 	/* XSK RQ and SQ are only created if XDP program is set. */
193 	if (!priv->channels.params.xdp_prog)
194 		goto remove_umem;
195 
196 	c = priv->channels.c[ix];
197 	mlx5e_xsk_redirect_rqt_to_drop(priv, ix);
198 	mlx5e_deactivate_xsk(c);
199 	mlx5e_close_xsk(c);
200 
201 remove_umem:
202 	mlx5e_xsk_remove_umem(&priv->xsk, ix);
203 	mlx5e_xsk_unmap_umem(priv, umem);
204 
205 	return 0;
206 }
207 
mlx5e_xsk_enable_umem(struct mlx5e_priv * priv,struct xdp_umem * umem,u16 ix)208 static int mlx5e_xsk_enable_umem(struct mlx5e_priv *priv, struct xdp_umem *umem,
209 				 u16 ix)
210 {
211 	int err;
212 
213 	mutex_lock(&priv->state_lock);
214 	err = mlx5e_xsk_enable_locked(priv, umem, ix);
215 	mutex_unlock(&priv->state_lock);
216 
217 	return err;
218 }
219 
mlx5e_xsk_disable_umem(struct mlx5e_priv * priv,u16 ix)220 static int mlx5e_xsk_disable_umem(struct mlx5e_priv *priv, u16 ix)
221 {
222 	int err;
223 
224 	mutex_lock(&priv->state_lock);
225 	err = mlx5e_xsk_disable_locked(priv, ix);
226 	mutex_unlock(&priv->state_lock);
227 
228 	return err;
229 }
230 
mlx5e_xsk_setup_umem(struct net_device * dev,struct xdp_umem * umem,u16 qid)231 int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
232 {
233 	struct mlx5e_priv *priv = netdev_priv(dev);
234 	struct mlx5e_params *params = &priv->channels.params;
235 	u16 ix;
236 
237 	if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
238 		return -EINVAL;
239 
240 	return umem ? mlx5e_xsk_enable_umem(priv, umem, ix) :
241 		      mlx5e_xsk_disable_umem(priv, ix);
242 }
243 
mlx5e_xsk_resize_reuseq(struct xdp_umem * umem,u32 nentries)244 int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries)
245 {
246 	struct xdp_umem_fq_reuse *reuseq;
247 
248 	reuseq = xsk_reuseq_prepare(nentries);
249 	if (unlikely(!reuseq))
250 		return -ENOMEM;
251 	xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
252 
253 	return 0;
254 }
255 
mlx5e_xsk_first_unused_channel(struct mlx5e_params * params,struct mlx5e_xsk * xsk)256 u16 mlx5e_xsk_first_unused_channel(struct mlx5e_params *params, struct mlx5e_xsk *xsk)
257 {
258 	u16 res = xsk->refcnt ? params->num_channels : 0;
259 
260 	while (res) {
261 		if (mlx5e_xsk_get_umem(params, xsk, res - 1))
262 			break;
263 		--res;
264 	}
265 
266 	return res;
267 }
268