1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include "en.h"
6 #include "en/fs.h"
7 #include "eswitch.h"
8 #include "ipsec.h"
9 #include "fs_core.h"
10 #include "lib/ipsec_fs_roce.h"
11 #include "lib/fs_chains.h"
12 #include "esw/ipsec_fs.h"
13 #include "en_rep.h"
14 
15 #define NUM_IPSEC_FTE BIT(15)
16 #define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
17 #define IPSEC_TUNNEL_DEFAULT_TTL 0x40
18 
19 struct mlx5e_ipsec_fc {
20 	struct mlx5_fc *cnt;
21 	struct mlx5_fc *drop;
22 };
23 
24 struct mlx5e_ipsec_tx {
25 	struct mlx5e_ipsec_ft ft;
26 	struct mlx5e_ipsec_miss pol;
27 	struct mlx5e_ipsec_miss sa;
28 	struct mlx5e_ipsec_rule status;
29 	struct mlx5_flow_namespace *ns;
30 	struct mlx5e_ipsec_fc *fc;
31 	struct mlx5_fs_chains *chains;
32 	u8 allow_tunnel_mode : 1;
33 };
34 
35 /* IPsec RX flow steering */
family2tt(u32 family)36 static enum mlx5_traffic_types family2tt(u32 family)
37 {
38 	if (family == AF_INET)
39 		return MLX5_TT_IPV4_IPSEC_ESP;
40 	return MLX5_TT_IPV6_IPSEC_ESP;
41 }
42 
ipsec_rx(struct mlx5e_ipsec * ipsec,u32 family,int type)43 static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family, int type)
44 {
45 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
46 		return ipsec->rx_esw;
47 
48 	if (family == AF_INET)
49 		return ipsec->rx_ipv4;
50 
51 	return ipsec->rx_ipv6;
52 }
53 
ipsec_tx(struct mlx5e_ipsec * ipsec,int type)54 static struct mlx5e_ipsec_tx *ipsec_tx(struct mlx5e_ipsec *ipsec, int type)
55 {
56 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
57 		return ipsec->tx_esw;
58 
59 	return ipsec->tx;
60 }
61 
62 static struct mlx5_fs_chains *
ipsec_chains_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * miss_ft,enum mlx5_flow_namespace_type ns,int base_prio,int base_level,struct mlx5_flow_table ** root_ft)63 ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft,
64 		    enum mlx5_flow_namespace_type ns, int base_prio,
65 		    int base_level, struct mlx5_flow_table **root_ft)
66 {
67 	struct mlx5_chains_attr attr = {};
68 	struct mlx5_fs_chains *chains;
69 	struct mlx5_flow_table *ft;
70 	int err;
71 
72 	attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
73 		     MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
74 	attr.max_grp_num = 2;
75 	attr.default_ft = miss_ft;
76 	attr.ns = ns;
77 	attr.fs_base_prio = base_prio;
78 	attr.fs_base_level = base_level;
79 	chains = mlx5_chains_create(mdev, &attr);
80 	if (IS_ERR(chains))
81 		return chains;
82 
83 	/* Create chain 0, prio 1, level 0 to connect chains to prev in fs_core */
84 	ft = mlx5_chains_get_table(chains, 0, 1, 0);
85 	if (IS_ERR(ft)) {
86 		err = PTR_ERR(ft);
87 		goto err_chains_get;
88 	}
89 
90 	*root_ft = ft;
91 	return chains;
92 
93 err_chains_get:
94 	mlx5_chains_destroy(chains);
95 	return ERR_PTR(err);
96 }
97 
ipsec_chains_destroy(struct mlx5_fs_chains * chains)98 static void ipsec_chains_destroy(struct mlx5_fs_chains *chains)
99 {
100 	mlx5_chains_put_table(chains, 0, 1, 0);
101 	mlx5_chains_destroy(chains);
102 }
103 
104 static struct mlx5_flow_table *
ipsec_chains_get_table(struct mlx5_fs_chains * chains,u32 prio)105 ipsec_chains_get_table(struct mlx5_fs_chains *chains, u32 prio)
106 {
107 	return mlx5_chains_get_table(chains, 0, prio + 1, 0);
108 }
109 
ipsec_chains_put_table(struct mlx5_fs_chains * chains,u32 prio)110 static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
111 {
112 	mlx5_chains_put_table(chains, 0, prio + 1, 0);
113 }
114 
ipsec_ft_create(struct mlx5_flow_namespace * ns,int level,int prio,int max_num_groups,u32 flags)115 static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
116 					       int level, int prio,
117 					       int max_num_groups, u32 flags)
118 {
119 	struct mlx5_flow_table_attr ft_attr = {};
120 
121 	ft_attr.autogroup.num_reserved_entries = 1;
122 	ft_attr.autogroup.max_num_groups = max_num_groups;
123 	ft_attr.max_fte = NUM_IPSEC_FTE;
124 	ft_attr.level = level;
125 	ft_attr.prio = prio;
126 	ft_attr.flags = flags;
127 
128 	return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
129 }
130 
ipsec_status_rule(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)131 static int ipsec_status_rule(struct mlx5_core_dev *mdev,
132 			     struct mlx5e_ipsec_rx *rx,
133 			     struct mlx5_flow_destination *dest)
134 {
135 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
136 	struct mlx5_flow_act flow_act = {};
137 	struct mlx5_modify_hdr *modify_hdr;
138 	struct mlx5_flow_handle *fte;
139 	struct mlx5_flow_spec *spec;
140 	int err;
141 
142 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
143 	if (!spec)
144 		return -ENOMEM;
145 
146 	/* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
147 	MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
148 	MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
149 	MLX5_SET(copy_action_in, action, src_offset, 0);
150 	MLX5_SET(copy_action_in, action, length, 7);
151 	MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
152 	MLX5_SET(copy_action_in, action, dst_offset, 24);
153 
154 	modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
155 					      1, action);
156 
157 	if (IS_ERR(modify_hdr)) {
158 		err = PTR_ERR(modify_hdr);
159 		mlx5_core_err(mdev,
160 			      "fail to alloc ipsec copy modify_header_id err=%d\n", err);
161 		goto out_spec;
162 	}
163 
164 	/* create fte */
165 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
166 			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
167 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
168 	flow_act.modify_hdr = modify_hdr;
169 	fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
170 	if (IS_ERR(fte)) {
171 		err = PTR_ERR(fte);
172 		mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
173 		goto out;
174 	}
175 
176 	kvfree(spec);
177 	rx->status.rule = fte;
178 	rx->status.modify_hdr = modify_hdr;
179 	return 0;
180 
181 out:
182 	mlx5_modify_header_dealloc(mdev, modify_hdr);
183 out_spec:
184 	kvfree(spec);
185 	return err;
186 }
187 
ipsec_miss_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft,struct mlx5e_ipsec_miss * miss,struct mlx5_flow_destination * dest)188 static int ipsec_miss_create(struct mlx5_core_dev *mdev,
189 			     struct mlx5_flow_table *ft,
190 			     struct mlx5e_ipsec_miss *miss,
191 			     struct mlx5_flow_destination *dest)
192 {
193 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
194 	MLX5_DECLARE_FLOW_ACT(flow_act);
195 	struct mlx5_flow_spec *spec;
196 	u32 *flow_group_in;
197 	int err = 0;
198 
199 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
200 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
201 	if (!flow_group_in || !spec) {
202 		err = -ENOMEM;
203 		goto out;
204 	}
205 
206 	/* Create miss_group */
207 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
208 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
209 	miss->group = mlx5_create_flow_group(ft, flow_group_in);
210 	if (IS_ERR(miss->group)) {
211 		err = PTR_ERR(miss->group);
212 		mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
213 			      err);
214 		goto out;
215 	}
216 
217 	/* Create miss rule */
218 	miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
219 	if (IS_ERR(miss->rule)) {
220 		mlx5_destroy_flow_group(miss->group);
221 		err = PTR_ERR(miss->rule);
222 		mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
223 			      err);
224 		goto out;
225 	}
226 out:
227 	kvfree(flow_group_in);
228 	kvfree(spec);
229 	return err;
230 }
231 
ipsec_rx_ft_disconnect(struct mlx5e_ipsec * ipsec,u32 family)232 static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
233 {
234 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
235 
236 	mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
237 }
238 
rx_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)239 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
240 		       struct mlx5e_ipsec_rx *rx, u32 family)
241 {
242 	/* disconnect */
243 	if (rx != ipsec->rx_esw)
244 		ipsec_rx_ft_disconnect(ipsec, family);
245 
246 	if (rx->chains) {
247 		ipsec_chains_destroy(rx->chains);
248 	} else {
249 		mlx5_del_flow_rules(rx->pol.rule);
250 		mlx5_destroy_flow_group(rx->pol.group);
251 		mlx5_destroy_flow_table(rx->ft.pol);
252 	}
253 
254 	mlx5_del_flow_rules(rx->sa.rule);
255 	mlx5_destroy_flow_group(rx->sa.group);
256 	mlx5_destroy_flow_table(rx->ft.sa);
257 	if (rx->allow_tunnel_mode)
258 		mlx5_eswitch_unblock_encap(mdev);
259 	if (rx == ipsec->rx_esw) {
260 		mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
261 	} else {
262 		mlx5_del_flow_rules(rx->status.rule);
263 		mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
264 	}
265 	mlx5_destroy_flow_table(rx->ft.status);
266 
267 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
268 }
269 
ipsec_rx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family,struct mlx5e_ipsec_rx_create_attr * attr)270 static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
271 				     struct mlx5e_ipsec_rx *rx,
272 				     u32 family,
273 				     struct mlx5e_ipsec_rx_create_attr *attr)
274 {
275 	if (rx == ipsec->rx_esw) {
276 		/* For packet offload in switchdev mode, RX & TX use FDB namespace */
277 		attr->ns = ipsec->tx_esw->ns;
278 		mlx5_esw_ipsec_rx_create_attr_set(ipsec, attr);
279 		return;
280 	}
281 
282 	attr->ns = mlx5e_fs_get_ns(ipsec->fs, false);
283 	attr->ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
284 	attr->family = family;
285 	attr->prio = MLX5E_NIC_PRIO;
286 	attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
287 	attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
288 	attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
289 	attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
290 }
291 
ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest)292 static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
293 					 struct mlx5e_ipsec_rx *rx,
294 					 struct mlx5e_ipsec_rx_create_attr *attr,
295 					 struct mlx5_flow_destination *dest)
296 {
297 	struct mlx5_flow_table *ft;
298 	int err;
299 
300 	if (rx == ipsec->rx_esw)
301 		return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
302 
303 	*dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
304 	err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
305 					   attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
306 					   attr->prio);
307 	if (err)
308 		return err;
309 
310 	ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, attr->family);
311 	if (ft) {
312 		dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
313 		dest->ft = ft;
314 	}
315 
316 	return 0;
317 }
318 
ipsec_rx_ft_connect(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr)319 static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
320 				struct mlx5e_ipsec_rx *rx,
321 				struct mlx5e_ipsec_rx_create_attr *attr)
322 {
323 	struct mlx5_flow_destination dest = {};
324 
325 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
326 	dest.ft = rx->ft.pol;
327 	mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
328 }
329 
rx_create(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)330 static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
331 		     struct mlx5e_ipsec_rx *rx, u32 family)
332 {
333 	struct mlx5e_ipsec_rx_create_attr attr;
334 	struct mlx5_flow_destination dest[2];
335 	struct mlx5_flow_table *ft;
336 	u32 flags = 0;
337 	int err;
338 
339 	ipsec_rx_create_attr_set(ipsec, rx, family, &attr);
340 
341 	err = ipsec_rx_status_pass_dest_get(ipsec, rx, &attr, &dest[0]);
342 	if (err)
343 		return err;
344 
345 	ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 1, 0);
346 	if (IS_ERR(ft)) {
347 		err = PTR_ERR(ft);
348 		goto err_fs_ft_status;
349 	}
350 	rx->ft.status = ft;
351 
352 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
353 	dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
354 	if (rx == ipsec->rx_esw)
355 		err = mlx5_esw_ipsec_rx_status_create(ipsec, rx, dest);
356 	else
357 		err = ipsec_status_rule(mdev, rx, dest);
358 	if (err)
359 		goto err_add;
360 
361 	/* Create FT */
362 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
363 		rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
364 	if (rx->allow_tunnel_mode)
365 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
366 	ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
367 	if (IS_ERR(ft)) {
368 		err = PTR_ERR(ft);
369 		goto err_fs_ft;
370 	}
371 	rx->ft.sa = ft;
372 
373 	err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
374 	if (err)
375 		goto err_fs;
376 
377 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
378 		rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
379 						 attr.chains_ns,
380 						 attr.prio,
381 						 attr.pol_level,
382 						 &rx->ft.pol);
383 		if (IS_ERR(rx->chains)) {
384 			err = PTR_ERR(rx->chains);
385 			goto err_pol_ft;
386 		}
387 
388 		goto connect;
389 	}
390 
391 	ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0);
392 	if (IS_ERR(ft)) {
393 		err = PTR_ERR(ft);
394 		goto err_pol_ft;
395 	}
396 	rx->ft.pol = ft;
397 	memset(dest, 0x00, 2 * sizeof(*dest));
398 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
399 	dest[0].ft = rx->ft.sa;
400 	err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
401 	if (err)
402 		goto err_pol_miss;
403 
404 connect:
405 	/* connect */
406 	if (rx != ipsec->rx_esw)
407 		ipsec_rx_ft_connect(ipsec, rx, &attr);
408 	return 0;
409 
410 err_pol_miss:
411 	mlx5_destroy_flow_table(rx->ft.pol);
412 err_pol_ft:
413 	mlx5_del_flow_rules(rx->sa.rule);
414 	mlx5_destroy_flow_group(rx->sa.group);
415 err_fs:
416 	mlx5_destroy_flow_table(rx->ft.sa);
417 err_fs_ft:
418 	if (rx->allow_tunnel_mode)
419 		mlx5_eswitch_unblock_encap(mdev);
420 	mlx5_del_flow_rules(rx->status.rule);
421 	mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
422 err_add:
423 	mlx5_destroy_flow_table(rx->ft.status);
424 err_fs_ft_status:
425 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
426 	return err;
427 }
428 
rx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)429 static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
430 		  struct mlx5e_ipsec_rx *rx, u32 family)
431 {
432 	int err;
433 
434 	if (rx->ft.refcnt)
435 		goto skip;
436 
437 	err = mlx5_eswitch_block_mode(mdev);
438 	if (err)
439 		return err;
440 
441 	err = rx_create(mdev, ipsec, rx, family);
442 	if (err) {
443 		mlx5_eswitch_unblock_mode(mdev);
444 		return err;
445 	}
446 
447 skip:
448 	rx->ft.refcnt++;
449 	return 0;
450 }
451 
rx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)452 static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
453 		   u32 family)
454 {
455 	if (--rx->ft.refcnt)
456 		return;
457 
458 	rx_destroy(ipsec->mdev, ipsec, rx, family);
459 	mlx5_eswitch_unblock_mode(ipsec->mdev);
460 }
461 
rx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,int type)462 static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
463 					struct mlx5e_ipsec *ipsec, u32 family,
464 					int type)
465 {
466 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
467 	int err;
468 
469 	mutex_lock(&rx->ft.mutex);
470 	err = rx_get(mdev, ipsec, rx, family);
471 	mutex_unlock(&rx->ft.mutex);
472 	if (err)
473 		return ERR_PTR(err);
474 
475 	return rx;
476 }
477 
rx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)478 static struct mlx5_flow_table *rx_ft_get_policy(struct mlx5_core_dev *mdev,
479 						struct mlx5e_ipsec *ipsec,
480 						u32 family, u32 prio, int type)
481 {
482 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
483 	struct mlx5_flow_table *ft;
484 	int err;
485 
486 	mutex_lock(&rx->ft.mutex);
487 	err = rx_get(mdev, ipsec, rx, family);
488 	if (err)
489 		goto err_get;
490 
491 	ft = rx->chains ? ipsec_chains_get_table(rx->chains, prio) : rx->ft.pol;
492 	if (IS_ERR(ft)) {
493 		err = PTR_ERR(ft);
494 		goto err_get_ft;
495 	}
496 
497 	mutex_unlock(&rx->ft.mutex);
498 	return ft;
499 
500 err_get_ft:
501 	rx_put(ipsec, rx, family);
502 err_get:
503 	mutex_unlock(&rx->ft.mutex);
504 	return ERR_PTR(err);
505 }
506 
rx_ft_put(struct mlx5e_ipsec * ipsec,u32 family,int type)507 static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family, int type)
508 {
509 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
510 
511 	mutex_lock(&rx->ft.mutex);
512 	rx_put(ipsec, rx, family);
513 	mutex_unlock(&rx->ft.mutex);
514 }
515 
rx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)516 static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio, int type)
517 {
518 	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
519 
520 	mutex_lock(&rx->ft.mutex);
521 	if (rx->chains)
522 		ipsec_chains_put_table(rx->chains, prio);
523 
524 	rx_put(ipsec, rx, family);
525 	mutex_unlock(&rx->ft.mutex);
526 }
527 
ipsec_counter_rule_tx(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)528 static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
529 {
530 	struct mlx5_flow_destination dest = {};
531 	struct mlx5_flow_act flow_act = {};
532 	struct mlx5_flow_handle *fte;
533 	struct mlx5_flow_spec *spec;
534 	int err;
535 
536 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
537 	if (!spec)
538 		return -ENOMEM;
539 
540 	/* create fte */
541 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
542 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
543 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
544 	dest.counter_id = mlx5_fc_id(tx->fc->cnt);
545 	fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
546 	if (IS_ERR(fte)) {
547 		err = PTR_ERR(fte);
548 		mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err);
549 		goto err_rule;
550 	}
551 
552 	kvfree(spec);
553 	tx->status.rule = fte;
554 	return 0;
555 
556 err_rule:
557 	kvfree(spec);
558 	return err;
559 }
560 
561 /* IPsec TX flow steering */
tx_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)562 static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
563 		       struct mlx5_ipsec_fs *roce)
564 {
565 	mlx5_ipsec_fs_roce_tx_destroy(roce);
566 	if (tx->chains) {
567 		ipsec_chains_destroy(tx->chains);
568 	} else {
569 		mlx5_del_flow_rules(tx->pol.rule);
570 		mlx5_destroy_flow_group(tx->pol.group);
571 		mlx5_destroy_flow_table(tx->ft.pol);
572 	}
573 
574 	if (tx == ipsec->tx_esw) {
575 		mlx5_del_flow_rules(tx->sa.rule);
576 		mlx5_destroy_flow_group(tx->sa.group);
577 	}
578 	mlx5_destroy_flow_table(tx->ft.sa);
579 	if (tx->allow_tunnel_mode)
580 		mlx5_eswitch_unblock_encap(ipsec->mdev);
581 	mlx5_del_flow_rules(tx->status.rule);
582 	mlx5_destroy_flow_table(tx->ft.status);
583 }
584 
ipsec_tx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5e_ipsec_tx_create_attr * attr)585 static void ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
586 				     struct mlx5e_ipsec_tx *tx,
587 				     struct mlx5e_ipsec_tx_create_attr *attr)
588 {
589 	if (tx == ipsec->tx_esw) {
590 		mlx5_esw_ipsec_tx_create_attr_set(ipsec, attr);
591 		return;
592 	}
593 
594 	attr->prio = 0;
595 	attr->pol_level = 0;
596 	attr->sa_level = 1;
597 	attr->cnt_level = 2;
598 	attr->chains_ns = MLX5_FLOW_NAMESPACE_EGRESS_IPSEC;
599 }
600 
tx_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)601 static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
602 		     struct mlx5_ipsec_fs *roce)
603 {
604 	struct mlx5_core_dev *mdev = ipsec->mdev;
605 	struct mlx5e_ipsec_tx_create_attr attr;
606 	struct mlx5_flow_destination dest = {};
607 	struct mlx5_flow_table *ft;
608 	u32 flags = 0;
609 	int err;
610 
611 	ipsec_tx_create_attr_set(ipsec, tx, &attr);
612 	ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0);
613 	if (IS_ERR(ft))
614 		return PTR_ERR(ft);
615 	tx->ft.status = ft;
616 
617 	err = ipsec_counter_rule_tx(mdev, tx);
618 	if (err)
619 		goto err_status_rule;
620 
621 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
622 		tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
623 	if (tx->allow_tunnel_mode)
624 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
625 	ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
626 	if (IS_ERR(ft)) {
627 		err = PTR_ERR(ft);
628 		goto err_sa_ft;
629 	}
630 	tx->ft.sa = ft;
631 
632 	if (tx == ipsec->tx_esw) {
633 		dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
634 		dest.vport.num = MLX5_VPORT_UPLINK;
635 		err = ipsec_miss_create(mdev, tx->ft.sa, &tx->sa, &dest);
636 		if (err)
637 			goto err_sa_miss;
638 		memset(&dest, 0, sizeof(dest));
639 	}
640 
641 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
642 		tx->chains = ipsec_chains_create(
643 			mdev, tx->ft.sa, attr.chains_ns, attr.prio, attr.pol_level,
644 			&tx->ft.pol);
645 		if (IS_ERR(tx->chains)) {
646 			err = PTR_ERR(tx->chains);
647 			goto err_pol_ft;
648 		}
649 
650 		goto connect_roce;
651 	}
652 
653 	ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0);
654 	if (IS_ERR(ft)) {
655 		err = PTR_ERR(ft);
656 		goto err_pol_ft;
657 	}
658 	tx->ft.pol = ft;
659 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
660 	dest.ft = tx->ft.sa;
661 	err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
662 	if (err) {
663 		mlx5_destroy_flow_table(tx->ft.pol);
664 		goto err_pol_ft;
665 	}
666 
667 connect_roce:
668 	err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol);
669 	if (err)
670 		goto err_roce;
671 	return 0;
672 
673 err_roce:
674 	if (tx->chains) {
675 		ipsec_chains_destroy(tx->chains);
676 	} else {
677 		mlx5_del_flow_rules(tx->pol.rule);
678 		mlx5_destroy_flow_group(tx->pol.group);
679 		mlx5_destroy_flow_table(tx->ft.pol);
680 	}
681 err_pol_ft:
682 	if (tx == ipsec->tx_esw) {
683 		mlx5_del_flow_rules(tx->sa.rule);
684 		mlx5_destroy_flow_group(tx->sa.group);
685 	}
686 err_sa_miss:
687 	mlx5_destroy_flow_table(tx->ft.sa);
688 err_sa_ft:
689 	if (tx->allow_tunnel_mode)
690 		mlx5_eswitch_unblock_encap(mdev);
691 	mlx5_del_flow_rules(tx->status.rule);
692 err_status_rule:
693 	mlx5_destroy_flow_table(tx->ft.status);
694 	return err;
695 }
696 
ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft)697 static void ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev *mdev,
698 				       struct mlx5_flow_table *ft)
699 {
700 #ifdef CONFIG_MLX5_ESWITCH
701 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
702 	struct mlx5e_rep_priv *uplink_rpriv;
703 	struct mlx5e_priv *priv;
704 
705 	esw->offloads.ft_ipsec_tx_pol = ft;
706 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
707 	priv = netdev_priv(uplink_rpriv->netdev);
708 	if (!priv->channels.num)
709 		return;
710 
711 	mlx5e_rep_deactivate_channels(priv);
712 	mlx5e_rep_activate_channels(priv);
713 #endif
714 }
715 
tx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)716 static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
717 		  struct mlx5e_ipsec_tx *tx)
718 {
719 	int err;
720 
721 	if (tx->ft.refcnt)
722 		goto skip;
723 
724 	err = mlx5_eswitch_block_mode(mdev);
725 	if (err)
726 		return err;
727 
728 	err = tx_create(ipsec, tx, ipsec->roce);
729 	if (err) {
730 		mlx5_eswitch_unblock_mode(mdev);
731 		return err;
732 	}
733 
734 	if (tx == ipsec->tx_esw)
735 		ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
736 
737 skip:
738 	tx->ft.refcnt++;
739 	return 0;
740 }
741 
tx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)742 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
743 {
744 	if (--tx->ft.refcnt)
745 		return;
746 
747 	if (tx == ipsec->tx_esw) {
748 		mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
749 		ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
750 	}
751 
752 	tx_destroy(ipsec, tx, ipsec->roce);
753 	mlx5_eswitch_unblock_mode(ipsec->mdev);
754 }
755 
tx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 prio,int type)756 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
757 						struct mlx5e_ipsec *ipsec,
758 						u32 prio, int type)
759 {
760 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
761 	struct mlx5_flow_table *ft;
762 	int err;
763 
764 	mutex_lock(&tx->ft.mutex);
765 	err = tx_get(mdev, ipsec, tx);
766 	if (err)
767 		goto err_get;
768 
769 	ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol;
770 	if (IS_ERR(ft)) {
771 		err = PTR_ERR(ft);
772 		goto err_get_ft;
773 	}
774 
775 	mutex_unlock(&tx->ft.mutex);
776 	return ft;
777 
778 err_get_ft:
779 	tx_put(ipsec, tx);
780 err_get:
781 	mutex_unlock(&tx->ft.mutex);
782 	return ERR_PTR(err);
783 }
784 
tx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,int type)785 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
786 					struct mlx5e_ipsec *ipsec, int type)
787 {
788 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
789 	int err;
790 
791 	mutex_lock(&tx->ft.mutex);
792 	err = tx_get(mdev, ipsec, tx);
793 	mutex_unlock(&tx->ft.mutex);
794 	if (err)
795 		return ERR_PTR(err);
796 
797 	return tx;
798 }
799 
tx_ft_put(struct mlx5e_ipsec * ipsec,int type)800 static void tx_ft_put(struct mlx5e_ipsec *ipsec, int type)
801 {
802 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
803 
804 	mutex_lock(&tx->ft.mutex);
805 	tx_put(ipsec, tx);
806 	mutex_unlock(&tx->ft.mutex);
807 }
808 
tx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 prio,int type)809 static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
810 {
811 	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
812 
813 	mutex_lock(&tx->ft.mutex);
814 	if (tx->chains)
815 		ipsec_chains_put_table(tx->chains, prio);
816 
817 	tx_put(ipsec, tx);
818 	mutex_unlock(&tx->ft.mutex);
819 }
820 
setup_fte_addr4(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)821 static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
822 			    __be32 *daddr)
823 {
824 	if (!*saddr && !*daddr)
825 		return;
826 
827 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
828 
829 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
830 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
831 
832 	if (*saddr) {
833 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
834 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
835 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
836 				 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
837 	}
838 
839 	if (*daddr) {
840 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
841 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
842 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
843 				 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
844 	}
845 }
846 
setup_fte_addr6(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)847 static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
848 			    __be32 *daddr)
849 {
850 	if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
851 		return;
852 
853 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
854 
855 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
856 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
857 
858 	if (!addr6_all_zero(saddr)) {
859 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
860 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
861 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
862 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
863 	}
864 
865 	if (!addr6_all_zero(daddr)) {
866 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
867 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
868 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
869 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
870 	}
871 }
872 
setup_fte_esp(struct mlx5_flow_spec * spec)873 static void setup_fte_esp(struct mlx5_flow_spec *spec)
874 {
875 	/* ESP header */
876 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
877 
878 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
879 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
880 }
881 
setup_fte_spi(struct mlx5_flow_spec * spec,u32 spi)882 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi)
883 {
884 	/* SPI number */
885 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
886 
887 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
888 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
889 }
890 
setup_fte_no_frags(struct mlx5_flow_spec * spec)891 static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
892 {
893 	/* Non fragmented */
894 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
895 
896 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
897 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
898 }
899 
setup_fte_reg_a(struct mlx5_flow_spec * spec)900 static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
901 {
902 	/* Add IPsec indicator in metadata_reg_a */
903 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
904 
905 	MLX5_SET(fte_match_param, spec->match_criteria,
906 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
907 	MLX5_SET(fte_match_param, spec->match_value,
908 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
909 }
910 
setup_fte_reg_c4(struct mlx5_flow_spec * spec,u32 reqid)911 static void setup_fte_reg_c4(struct mlx5_flow_spec *spec, u32 reqid)
912 {
913 	/* Pass policy check before choosing this SA */
914 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
915 
916 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
917 			 misc_parameters_2.metadata_reg_c_4);
918 	MLX5_SET(fte_match_param, spec->match_value,
919 		 misc_parameters_2.metadata_reg_c_4, reqid);
920 }
921 
setup_fte_upper_proto_match(struct mlx5_flow_spec * spec,struct upspec * upspec)922 static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
923 {
924 	switch (upspec->proto) {
925 	case IPPROTO_UDP:
926 		if (upspec->dport) {
927 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
928 				 udp_dport, upspec->dport_mask);
929 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
930 				 udp_dport, upspec->dport);
931 		}
932 		if (upspec->sport) {
933 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
934 				 udp_sport, upspec->sport_mask);
935 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
936 				 udp_sport, upspec->sport);
937 		}
938 		break;
939 	case IPPROTO_TCP:
940 		if (upspec->dport) {
941 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
942 				 tcp_dport, upspec->dport_mask);
943 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
944 				 tcp_dport, upspec->dport);
945 		}
946 		if (upspec->sport) {
947 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
948 				 tcp_sport, upspec->sport_mask);
949 			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
950 				 tcp_sport, upspec->sport);
951 		}
952 		break;
953 	default:
954 		return;
955 	}
956 
957 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
958 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
959 	MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
960 }
961 
ipsec_fs_get_ns(struct mlx5e_ipsec * ipsec,int type,u8 dir)962 static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec,
963 						     int type, u8 dir)
964 {
965 	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
966 		return MLX5_FLOW_NAMESPACE_FDB;
967 
968 	if (dir == XFRM_DEV_OFFLOAD_IN)
969 		return MLX5_FLOW_NAMESPACE_KERNEL;
970 
971 	return MLX5_FLOW_NAMESPACE_EGRESS;
972 }
973 
setup_modify_header(struct mlx5e_ipsec * ipsec,int type,u32 val,u8 dir,struct mlx5_flow_act * flow_act)974 static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 dir,
975 			       struct mlx5_flow_act *flow_act)
976 {
977 	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
978 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
979 	struct mlx5_core_dev *mdev = ipsec->mdev;
980 	struct mlx5_modify_hdr *modify_hdr;
981 
982 	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
983 	switch (dir) {
984 	case XFRM_DEV_OFFLOAD_IN:
985 		MLX5_SET(set_action_in, action, field,
986 			 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
987 		break;
988 	case XFRM_DEV_OFFLOAD_OUT:
989 		MLX5_SET(set_action_in, action, field,
990 			 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
991 		break;
992 	default:
993 		return -EINVAL;
994 	}
995 
996 	MLX5_SET(set_action_in, action, data, val);
997 	MLX5_SET(set_action_in, action, offset, 0);
998 	MLX5_SET(set_action_in, action, length, 32);
999 
1000 	modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
1001 	if (IS_ERR(modify_hdr)) {
1002 		mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
1003 			      PTR_ERR(modify_hdr));
1004 		return PTR_ERR(modify_hdr);
1005 	}
1006 
1007 	flow_act->modify_hdr = modify_hdr;
1008 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1009 	return 0;
1010 }
1011 
1012 static int
setup_pkt_tunnel_reformat(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1013 setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
1014 			  struct mlx5_accel_esp_xfrm_attrs *attrs,
1015 			  struct mlx5_pkt_reformat_params *reformat_params)
1016 {
1017 	struct ip_esp_hdr *esp_hdr;
1018 	struct ipv6hdr *ipv6hdr;
1019 	struct ethhdr *eth_hdr;
1020 	struct iphdr *iphdr;
1021 	char *reformatbf;
1022 	size_t bfflen;
1023 	void *hdr;
1024 
1025 	bfflen = sizeof(*eth_hdr);
1026 
1027 	if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
1028 		bfflen += sizeof(*esp_hdr) + 8;
1029 
1030 		switch (attrs->family) {
1031 		case AF_INET:
1032 			bfflen += sizeof(*iphdr);
1033 			break;
1034 		case AF_INET6:
1035 			bfflen += sizeof(*ipv6hdr);
1036 			break;
1037 		default:
1038 			return -EINVAL;
1039 		}
1040 	}
1041 
1042 	reformatbf = kzalloc(bfflen, GFP_KERNEL);
1043 	if (!reformatbf)
1044 		return -ENOMEM;
1045 
1046 	eth_hdr = (struct ethhdr *)reformatbf;
1047 	switch (attrs->family) {
1048 	case AF_INET:
1049 		eth_hdr->h_proto = htons(ETH_P_IP);
1050 		break;
1051 	case AF_INET6:
1052 		eth_hdr->h_proto = htons(ETH_P_IPV6);
1053 		break;
1054 	default:
1055 		goto free_reformatbf;
1056 	}
1057 
1058 	ether_addr_copy(eth_hdr->h_dest, attrs->dmac);
1059 	ether_addr_copy(eth_hdr->h_source, attrs->smac);
1060 
1061 	switch (attrs->dir) {
1062 	case XFRM_DEV_OFFLOAD_IN:
1063 		reformat_params->type = MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2;
1064 		break;
1065 	case XFRM_DEV_OFFLOAD_OUT:
1066 		reformat_params->type = MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL;
1067 		reformat_params->param_0 = attrs->authsize;
1068 
1069 		hdr = reformatbf + sizeof(*eth_hdr);
1070 		switch (attrs->family) {
1071 		case AF_INET:
1072 			iphdr = (struct iphdr *)hdr;
1073 			memcpy(&iphdr->saddr, &attrs->saddr.a4, 4);
1074 			memcpy(&iphdr->daddr, &attrs->daddr.a4, 4);
1075 			iphdr->version = 4;
1076 			iphdr->ihl = 5;
1077 			iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
1078 			iphdr->protocol = IPPROTO_ESP;
1079 			hdr += sizeof(*iphdr);
1080 			break;
1081 		case AF_INET6:
1082 			ipv6hdr = (struct ipv6hdr *)hdr;
1083 			memcpy(&ipv6hdr->saddr, &attrs->saddr.a6, 16);
1084 			memcpy(&ipv6hdr->daddr, &attrs->daddr.a6, 16);
1085 			ipv6hdr->nexthdr = IPPROTO_ESP;
1086 			ipv6hdr->version = 6;
1087 			ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
1088 			hdr += sizeof(*ipv6hdr);
1089 			break;
1090 		default:
1091 			goto free_reformatbf;
1092 		}
1093 
1094 		esp_hdr = (struct ip_esp_hdr *)hdr;
1095 		esp_hdr->spi = htonl(attrs->spi);
1096 		break;
1097 	default:
1098 		goto free_reformatbf;
1099 	}
1100 
1101 	reformat_params->size = bfflen;
1102 	reformat_params->data = reformatbf;
1103 	return 0;
1104 
1105 free_reformatbf:
1106 	kfree(reformatbf);
1107 	return -EINVAL;
1108 }
1109 
get_reformat_type(struct mlx5_accel_esp_xfrm_attrs * attrs)1110 static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
1111 {
1112 	switch (attrs->dir) {
1113 	case XFRM_DEV_OFFLOAD_IN:
1114 		if (attrs->encap)
1115 			return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
1116 		return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
1117 	case XFRM_DEV_OFFLOAD_OUT:
1118 		if (attrs->family == AF_INET) {
1119 			if (attrs->encap)
1120 				return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
1121 			return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
1122 		}
1123 
1124 		if (attrs->encap)
1125 			return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6;
1126 		return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
1127 	default:
1128 		WARN_ON(true);
1129 	}
1130 
1131 	return -EINVAL;
1132 }
1133 
1134 static int
setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1135 setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
1136 			     struct mlx5_pkt_reformat_params *reformat_params)
1137 {
1138 	struct udphdr *udphdr;
1139 	char *reformatbf;
1140 	size_t bfflen;
1141 	__be32 spi;
1142 	void *hdr;
1143 
1144 	reformat_params->type = get_reformat_type(attrs);
1145 	if (reformat_params->type < 0)
1146 		return reformat_params->type;
1147 
1148 	switch (attrs->dir) {
1149 	case XFRM_DEV_OFFLOAD_IN:
1150 		break;
1151 	case XFRM_DEV_OFFLOAD_OUT:
1152 		bfflen = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE;
1153 		if (attrs->encap)
1154 			bfflen += sizeof(*udphdr);
1155 
1156 		reformatbf = kzalloc(bfflen, GFP_KERNEL);
1157 		if (!reformatbf)
1158 			return -ENOMEM;
1159 
1160 		hdr = reformatbf;
1161 		if (attrs->encap) {
1162 			udphdr = (struct udphdr *)reformatbf;
1163 			udphdr->source = attrs->sport;
1164 			udphdr->dest = attrs->dport;
1165 			hdr += sizeof(*udphdr);
1166 		}
1167 
1168 		/* convert to network format */
1169 		spi = htonl(attrs->spi);
1170 		memcpy(hdr, &spi, sizeof(spi));
1171 
1172 		reformat_params->param_0 = attrs->authsize;
1173 		reformat_params->size = bfflen;
1174 		reformat_params->data = reformatbf;
1175 		break;
1176 	default:
1177 		return -EINVAL;
1178 	}
1179 
1180 	return 0;
1181 }
1182 
setup_pkt_reformat(struct mlx5e_ipsec * ipsec,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_flow_act * flow_act)1183 static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
1184 			      struct mlx5_accel_esp_xfrm_attrs *attrs,
1185 			      struct mlx5_flow_act *flow_act)
1186 {
1187 	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->type,
1188 								attrs->dir);
1189 	struct mlx5_pkt_reformat_params reformat_params = {};
1190 	struct mlx5_core_dev *mdev = ipsec->mdev;
1191 	struct mlx5_pkt_reformat *pkt_reformat;
1192 	int ret;
1193 
1194 	switch (attrs->mode) {
1195 	case XFRM_MODE_TRANSPORT:
1196 		ret = setup_pkt_transport_reformat(attrs, &reformat_params);
1197 		break;
1198 	case XFRM_MODE_TUNNEL:
1199 		ret = setup_pkt_tunnel_reformat(mdev, attrs, &reformat_params);
1200 		break;
1201 	default:
1202 		ret = -EINVAL;
1203 	}
1204 
1205 	if (ret)
1206 		return ret;
1207 
1208 	pkt_reformat =
1209 		mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
1210 	kfree(reformat_params.data);
1211 	if (IS_ERR(pkt_reformat))
1212 		return PTR_ERR(pkt_reformat);
1213 
1214 	flow_act->pkt_reformat = pkt_reformat;
1215 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
1216 	return 0;
1217 }
1218 
rx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1219 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1220 {
1221 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1222 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1223 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1224 	struct mlx5_flow_destination dest[2];
1225 	struct mlx5_flow_act flow_act = {};
1226 	struct mlx5_flow_handle *rule;
1227 	struct mlx5_flow_spec *spec;
1228 	struct mlx5e_ipsec_rx *rx;
1229 	struct mlx5_fc *counter;
1230 	int err = 0;
1231 
1232 	rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
1233 	if (IS_ERR(rx))
1234 		return PTR_ERR(rx);
1235 
1236 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1237 	if (!spec) {
1238 		err = -ENOMEM;
1239 		goto err_alloc;
1240 	}
1241 
1242 	if (attrs->family == AF_INET)
1243 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1244 	else
1245 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1246 
1247 	setup_fte_spi(spec, attrs->spi);
1248 	setup_fte_esp(spec);
1249 	setup_fte_no_frags(spec);
1250 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1251 
1252 	if (rx != ipsec->rx_esw)
1253 		err = setup_modify_header(ipsec, attrs->type,
1254 					  sa_entry->ipsec_obj_id | BIT(31),
1255 					  XFRM_DEV_OFFLOAD_IN, &flow_act);
1256 	else
1257 		err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
1258 
1259 	if (err)
1260 		goto err_mod_header;
1261 
1262 	switch (attrs->type) {
1263 	case XFRM_DEV_OFFLOAD_PACKET:
1264 		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
1265 		if (err)
1266 			goto err_pkt_reformat;
1267 		break;
1268 	default:
1269 		break;
1270 	}
1271 
1272 	counter = mlx5_fc_create(mdev, true);
1273 	if (IS_ERR(counter)) {
1274 		err = PTR_ERR(counter);
1275 		goto err_add_cnt;
1276 	}
1277 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
1278 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
1279 	flow_act.flags |= FLOW_ACT_NO_APPEND;
1280 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
1281 			   MLX5_FLOW_CONTEXT_ACTION_COUNT;
1282 	if (attrs->drop)
1283 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1284 	else
1285 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1286 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1287 	dest[0].ft = rx->ft.status;
1288 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1289 	dest[1].counter_id = mlx5_fc_id(counter);
1290 	rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
1291 	if (IS_ERR(rule)) {
1292 		err = PTR_ERR(rule);
1293 		mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
1294 		goto err_add_flow;
1295 	}
1296 	kvfree(spec);
1297 
1298 	sa_entry->ipsec_rule.rule = rule;
1299 	sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
1300 	sa_entry->ipsec_rule.fc = counter;
1301 	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
1302 	return 0;
1303 
1304 err_add_flow:
1305 	mlx5_fc_destroy(mdev, counter);
1306 err_add_cnt:
1307 	if (flow_act.pkt_reformat)
1308 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
1309 err_pkt_reformat:
1310 	mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
1311 err_mod_header:
1312 	kvfree(spec);
1313 err_alloc:
1314 	rx_ft_put(ipsec, attrs->family, attrs->type);
1315 	return err;
1316 }
1317 
tx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1318 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1319 {
1320 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1321 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1322 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1323 	struct mlx5_flow_destination dest[2];
1324 	struct mlx5_flow_act flow_act = {};
1325 	struct mlx5_flow_handle *rule;
1326 	struct mlx5_flow_spec *spec;
1327 	struct mlx5e_ipsec_tx *tx;
1328 	struct mlx5_fc *counter;
1329 	int err;
1330 
1331 	tx = tx_ft_get(mdev, ipsec, attrs->type);
1332 	if (IS_ERR(tx))
1333 		return PTR_ERR(tx);
1334 
1335 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1336 	if (!spec) {
1337 		err = -ENOMEM;
1338 		goto err_alloc;
1339 	}
1340 
1341 	if (attrs->family == AF_INET)
1342 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1343 	else
1344 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1345 
1346 	setup_fte_no_frags(spec);
1347 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1348 
1349 	switch (attrs->type) {
1350 	case XFRM_DEV_OFFLOAD_CRYPTO:
1351 		setup_fte_spi(spec, attrs->spi);
1352 		setup_fte_esp(spec);
1353 		setup_fte_reg_a(spec);
1354 		break;
1355 	case XFRM_DEV_OFFLOAD_PACKET:
1356 		if (attrs->reqid)
1357 			setup_fte_reg_c4(spec, attrs->reqid);
1358 		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
1359 		if (err)
1360 			goto err_pkt_reformat;
1361 		break;
1362 	default:
1363 		break;
1364 	}
1365 
1366 	counter = mlx5_fc_create(mdev, true);
1367 	if (IS_ERR(counter)) {
1368 		err = PTR_ERR(counter);
1369 		goto err_add_cnt;
1370 	}
1371 
1372 	flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
1373 	flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
1374 	flow_act.flags |= FLOW_ACT_NO_APPEND;
1375 	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
1376 			   MLX5_FLOW_CONTEXT_ACTION_COUNT;
1377 	if (attrs->drop)
1378 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1379 	else
1380 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1381 
1382 	dest[0].ft = tx->ft.status;
1383 	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1384 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1385 	dest[1].counter_id = mlx5_fc_id(counter);
1386 	rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
1387 	if (IS_ERR(rule)) {
1388 		err = PTR_ERR(rule);
1389 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
1390 		goto err_add_flow;
1391 	}
1392 
1393 	kvfree(spec);
1394 	sa_entry->ipsec_rule.rule = rule;
1395 	sa_entry->ipsec_rule.fc = counter;
1396 	sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
1397 	return 0;
1398 
1399 err_add_flow:
1400 	mlx5_fc_destroy(mdev, counter);
1401 err_add_cnt:
1402 	if (flow_act.pkt_reformat)
1403 		mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
1404 err_pkt_reformat:
1405 	kvfree(spec);
1406 err_alloc:
1407 	tx_ft_put(ipsec, attrs->type);
1408 	return err;
1409 }
1410 
tx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1411 static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1412 {
1413 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1414 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1415 	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
1416 	struct mlx5_flow_destination dest[2] = {};
1417 	struct mlx5_flow_act flow_act = {};
1418 	struct mlx5_flow_handle *rule;
1419 	struct mlx5_flow_spec *spec;
1420 	struct mlx5_flow_table *ft;
1421 	struct mlx5e_ipsec_tx *tx;
1422 	int err, dstn = 0;
1423 
1424 	ft = tx_ft_get_policy(mdev, ipsec, attrs->prio, attrs->type);
1425 	if (IS_ERR(ft))
1426 		return PTR_ERR(ft);
1427 
1428 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1429 	if (!spec) {
1430 		err = -ENOMEM;
1431 		goto err_alloc;
1432 	}
1433 
1434 	tx = ipsec_tx(ipsec, attrs->type);
1435 	if (attrs->family == AF_INET)
1436 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1437 	else
1438 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1439 
1440 	setup_fte_no_frags(spec);
1441 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1442 
1443 	switch (attrs->action) {
1444 	case XFRM_POLICY_ALLOW:
1445 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1446 		if (!attrs->reqid)
1447 			break;
1448 
1449 		err = setup_modify_header(ipsec, attrs->type, attrs->reqid,
1450 					  XFRM_DEV_OFFLOAD_OUT, &flow_act);
1451 		if (err)
1452 			goto err_mod_header;
1453 		break;
1454 	case XFRM_POLICY_BLOCK:
1455 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1456 				   MLX5_FLOW_CONTEXT_ACTION_COUNT;
1457 		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1458 		dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
1459 		dstn++;
1460 		break;
1461 	default:
1462 		WARN_ON(true);
1463 		err = -EINVAL;
1464 		goto err_mod_header;
1465 	}
1466 
1467 	flow_act.flags |= FLOW_ACT_NO_APPEND;
1468 	if (tx == ipsec->tx_esw && tx->chains)
1469 		flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1470 	dest[dstn].ft = tx->ft.sa;
1471 	dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1472 	dstn++;
1473 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1474 	if (IS_ERR(rule)) {
1475 		err = PTR_ERR(rule);
1476 		mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
1477 		goto err_action;
1478 	}
1479 
1480 	kvfree(spec);
1481 	pol_entry->ipsec_rule.rule = rule;
1482 	pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
1483 	return 0;
1484 
1485 err_action:
1486 	if (flow_act.modify_hdr)
1487 		mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
1488 err_mod_header:
1489 	kvfree(spec);
1490 err_alloc:
1491 	tx_ft_put_policy(ipsec, attrs->prio, attrs->type);
1492 	return err;
1493 }
1494 
rx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1495 static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1496 {
1497 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1498 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1499 	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
1500 	struct mlx5_flow_destination dest[2];
1501 	struct mlx5_flow_act flow_act = {};
1502 	struct mlx5_flow_handle *rule;
1503 	struct mlx5_flow_spec *spec;
1504 	struct mlx5_flow_table *ft;
1505 	struct mlx5e_ipsec_rx *rx;
1506 	int err, dstn = 0;
1507 
1508 	ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio,
1509 			      attrs->type);
1510 	if (IS_ERR(ft))
1511 		return PTR_ERR(ft);
1512 
1513 	rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type);
1514 
1515 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1516 	if (!spec) {
1517 		err = -ENOMEM;
1518 		goto err_alloc;
1519 	}
1520 
1521 	if (attrs->family == AF_INET)
1522 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1523 	else
1524 		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1525 
1526 	setup_fte_no_frags(spec);
1527 	setup_fte_upper_proto_match(spec, &attrs->upspec);
1528 
1529 	switch (attrs->action) {
1530 	case XFRM_POLICY_ALLOW:
1531 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1532 		break;
1533 	case XFRM_POLICY_BLOCK:
1534 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
1535 		dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1536 		dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
1537 		dstn++;
1538 		break;
1539 	default:
1540 		WARN_ON(true);
1541 		err = -EINVAL;
1542 		goto err_action;
1543 	}
1544 
1545 	flow_act.flags |= FLOW_ACT_NO_APPEND;
1546 	if (rx == ipsec->rx_esw && rx->chains)
1547 		flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1548 	dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1549 	dest[dstn].ft = rx->ft.sa;
1550 	dstn++;
1551 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1552 	if (IS_ERR(rule)) {
1553 		err = PTR_ERR(rule);
1554 		mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
1555 		goto err_action;
1556 	}
1557 
1558 	kvfree(spec);
1559 	pol_entry->ipsec_rule.rule = rule;
1560 	return 0;
1561 
1562 err_action:
1563 	kvfree(spec);
1564 err_alloc:
1565 	rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type);
1566 	return err;
1567 }
1568 
ipsec_fs_destroy_single_counter(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_fc * fc)1569 static void ipsec_fs_destroy_single_counter(struct mlx5_core_dev *mdev,
1570 					    struct mlx5e_ipsec_fc *fc)
1571 {
1572 	mlx5_fc_destroy(mdev, fc->drop);
1573 	mlx5_fc_destroy(mdev, fc->cnt);
1574 	kfree(fc);
1575 }
1576 
ipsec_fs_destroy_counters(struct mlx5e_ipsec * ipsec)1577 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
1578 {
1579 	struct mlx5_core_dev *mdev = ipsec->mdev;
1580 
1581 	ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
1582 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
1583 	if (ipsec->is_uplink_rep) {
1584 		ipsec_fs_destroy_single_counter(mdev, ipsec->tx_esw->fc);
1585 		ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
1586 	}
1587 }
1588 
ipsec_fs_init_single_counter(struct mlx5_core_dev * mdev)1589 static struct mlx5e_ipsec_fc *ipsec_fs_init_single_counter(struct mlx5_core_dev *mdev)
1590 {
1591 	struct mlx5e_ipsec_fc *fc;
1592 	struct mlx5_fc *counter;
1593 	int err;
1594 
1595 	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
1596 	if (!fc)
1597 		return ERR_PTR(-ENOMEM);
1598 
1599 	counter = mlx5_fc_create(mdev, false);
1600 	if (IS_ERR(counter)) {
1601 		err = PTR_ERR(counter);
1602 		goto err_cnt;
1603 	}
1604 	fc->cnt = counter;
1605 
1606 	counter = mlx5_fc_create(mdev, false);
1607 	if (IS_ERR(counter)) {
1608 		err = PTR_ERR(counter);
1609 		goto err_drop;
1610 	}
1611 	fc->drop = counter;
1612 
1613 	return fc;
1614 
1615 err_drop:
1616 	mlx5_fc_destroy(mdev, fc->cnt);
1617 err_cnt:
1618 	kfree(fc);
1619 	return ERR_PTR(err);
1620 }
1621 
ipsec_fs_init_counters(struct mlx5e_ipsec * ipsec)1622 static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
1623 {
1624 	struct mlx5_core_dev *mdev = ipsec->mdev;
1625 	struct mlx5e_ipsec_fc *fc;
1626 	int err;
1627 
1628 	fc = ipsec_fs_init_single_counter(mdev);
1629 	if (IS_ERR(fc)) {
1630 		err = PTR_ERR(fc);
1631 		goto err_rx_cnt;
1632 	}
1633 	ipsec->rx_ipv4->fc = fc;
1634 
1635 	fc = ipsec_fs_init_single_counter(mdev);
1636 	if (IS_ERR(fc)) {
1637 		err = PTR_ERR(fc);
1638 		goto err_tx_cnt;
1639 	}
1640 	ipsec->tx->fc = fc;
1641 
1642 	if (ipsec->is_uplink_rep) {
1643 		fc = ipsec_fs_init_single_counter(mdev);
1644 		if (IS_ERR(fc)) {
1645 			err = PTR_ERR(fc);
1646 			goto err_rx_esw_cnt;
1647 		}
1648 		ipsec->rx_esw->fc = fc;
1649 
1650 		fc = ipsec_fs_init_single_counter(mdev);
1651 		if (IS_ERR(fc)) {
1652 			err = PTR_ERR(fc);
1653 			goto err_tx_esw_cnt;
1654 		}
1655 		ipsec->tx_esw->fc = fc;
1656 	}
1657 
1658 	/* Both IPv4 and IPv6 point to same flow counters struct. */
1659 	ipsec->rx_ipv6->fc = ipsec->rx_ipv4->fc;
1660 	return 0;
1661 
1662 err_tx_esw_cnt:
1663 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
1664 err_rx_esw_cnt:
1665 	ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
1666 err_tx_cnt:
1667 	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
1668 err_rx_cnt:
1669 	return err;
1670 }
1671 
mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv * priv,void * ipsec_stats)1672 void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
1673 {
1674 	struct mlx5_core_dev *mdev = priv->mdev;
1675 	struct mlx5e_ipsec *ipsec = priv->ipsec;
1676 	struct mlx5e_ipsec_hw_stats *stats;
1677 	struct mlx5e_ipsec_fc *fc;
1678 	u64 packets, bytes;
1679 
1680 	stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
1681 
1682 	stats->ipsec_rx_pkts = 0;
1683 	stats->ipsec_rx_bytes = 0;
1684 	stats->ipsec_rx_drop_pkts = 0;
1685 	stats->ipsec_rx_drop_bytes = 0;
1686 	stats->ipsec_tx_pkts = 0;
1687 	stats->ipsec_tx_bytes = 0;
1688 	stats->ipsec_tx_drop_pkts = 0;
1689 	stats->ipsec_tx_drop_bytes = 0;
1690 
1691 	fc = ipsec->rx_ipv4->fc;
1692 	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
1693 	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
1694 		      &stats->ipsec_rx_drop_bytes);
1695 
1696 	fc = ipsec->tx->fc;
1697 	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
1698 	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
1699 		      &stats->ipsec_tx_drop_bytes);
1700 
1701 	if (ipsec->is_uplink_rep) {
1702 		fc = ipsec->rx_esw->fc;
1703 		if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
1704 			stats->ipsec_rx_pkts += packets;
1705 			stats->ipsec_rx_bytes += bytes;
1706 		}
1707 
1708 		if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
1709 			stats->ipsec_rx_drop_pkts += packets;
1710 			stats->ipsec_rx_drop_bytes += bytes;
1711 		}
1712 
1713 		fc = ipsec->tx_esw->fc;
1714 		if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
1715 			stats->ipsec_tx_pkts += packets;
1716 			stats->ipsec_tx_bytes += bytes;
1717 		}
1718 
1719 		if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
1720 			stats->ipsec_tx_drop_pkts += packets;
1721 			stats->ipsec_tx_drop_bytes += bytes;
1722 		}
1723 	}
1724 }
1725 
1726 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)1727 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
1728 {
1729 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
1730 	int err = 0;
1731 
1732 	if (esw)
1733 		down_write(&esw->mode_lock);
1734 
1735 	if (mdev->num_block_ipsec) {
1736 		err = -EBUSY;
1737 		goto unlock;
1738 	}
1739 
1740 	mdev->num_block_tc++;
1741 
1742 unlock:
1743 	if (esw)
1744 		up_write(&esw->mode_lock);
1745 
1746 	return err;
1747 }
1748 #else
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)1749 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
1750 {
1751 	if (mdev->num_block_ipsec)
1752 		return -EBUSY;
1753 
1754 	mdev->num_block_tc++;
1755 	return 0;
1756 }
1757 #endif
1758 
mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev * mdev)1759 static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
1760 {
1761 	mdev->num_block_tc++;
1762 }
1763 
mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1764 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1765 {
1766 	int err;
1767 
1768 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET) {
1769 		err = mlx5e_ipsec_block_tc_offload(sa_entry->ipsec->mdev);
1770 		if (err)
1771 			return err;
1772 	}
1773 
1774 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
1775 		err = tx_add_rule(sa_entry);
1776 	else
1777 		err = rx_add_rule(sa_entry);
1778 
1779 	if (err)
1780 		goto err_out;
1781 
1782 	return 0;
1783 
1784 err_out:
1785 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
1786 		mlx5e_ipsec_unblock_tc_offload(sa_entry->ipsec->mdev);
1787 	return err;
1788 }
1789 
mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1790 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1791 {
1792 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
1793 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1794 
1795 	mlx5_del_flow_rules(ipsec_rule->rule);
1796 	mlx5_fc_destroy(mdev, ipsec_rule->fc);
1797 	if (ipsec_rule->pkt_reformat)
1798 		mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
1799 
1800 	if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
1801 		mlx5e_ipsec_unblock_tc_offload(mdev);
1802 
1803 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
1804 		tx_ft_put(sa_entry->ipsec, sa_entry->attrs.type);
1805 		return;
1806 	}
1807 
1808 	mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
1809 	mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
1810 	rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
1811 }
1812 
mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry * pol_entry)1813 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
1814 {
1815 	int err;
1816 
1817 	err = mlx5e_ipsec_block_tc_offload(pol_entry->ipsec->mdev);
1818 	if (err)
1819 		return err;
1820 
1821 	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
1822 		err = tx_add_policy(pol_entry);
1823 	else
1824 		err = rx_add_policy(pol_entry);
1825 
1826 	if (err)
1827 		goto err_out;
1828 
1829 	return 0;
1830 
1831 err_out:
1832 	mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
1833 	return err;
1834 }
1835 
mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry * pol_entry)1836 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
1837 {
1838 	struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
1839 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1840 
1841 	mlx5_del_flow_rules(ipsec_rule->rule);
1842 
1843 	mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
1844 
1845 	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
1846 		rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
1847 				 pol_entry->attrs.prio, pol_entry->attrs.type);
1848 		return;
1849 	}
1850 
1851 	if (ipsec_rule->modify_hdr)
1852 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
1853 
1854 	tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio, pol_entry->attrs.type);
1855 }
1856 
mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec * ipsec)1857 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
1858 {
1859 	if (!ipsec->tx)
1860 		return;
1861 
1862 	if (ipsec->roce)
1863 		mlx5_ipsec_fs_roce_cleanup(ipsec->roce);
1864 
1865 	ipsec_fs_destroy_counters(ipsec);
1866 	mutex_destroy(&ipsec->tx->ft.mutex);
1867 	WARN_ON(ipsec->tx->ft.refcnt);
1868 	kfree(ipsec->tx);
1869 
1870 	mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
1871 	WARN_ON(ipsec->rx_ipv4->ft.refcnt);
1872 	kfree(ipsec->rx_ipv4);
1873 
1874 	mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
1875 	WARN_ON(ipsec->rx_ipv6->ft.refcnt);
1876 	kfree(ipsec->rx_ipv6);
1877 
1878 	if (ipsec->is_uplink_rep) {
1879 		xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map);
1880 
1881 		mutex_destroy(&ipsec->tx_esw->ft.mutex);
1882 		WARN_ON(ipsec->tx_esw->ft.refcnt);
1883 		kfree(ipsec->tx_esw);
1884 
1885 		mutex_destroy(&ipsec->rx_esw->ft.mutex);
1886 		WARN_ON(ipsec->rx_esw->ft.refcnt);
1887 		kfree(ipsec->rx_esw);
1888 	}
1889 }
1890 
mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec * ipsec)1891 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
1892 {
1893 	struct mlx5_core_dev *mdev = ipsec->mdev;
1894 	struct mlx5_flow_namespace *ns, *ns_esw;
1895 	int err = -ENOMEM;
1896 
1897 	ns = mlx5_get_flow_namespace(ipsec->mdev,
1898 				     MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
1899 	if (!ns)
1900 		return -EOPNOTSUPP;
1901 
1902 	if (ipsec->is_uplink_rep) {
1903 		ns_esw = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_FDB);
1904 		if (!ns_esw)
1905 			return -EOPNOTSUPP;
1906 
1907 		ipsec->tx_esw = kzalloc(sizeof(*ipsec->tx_esw), GFP_KERNEL);
1908 		if (!ipsec->tx_esw)
1909 			return -ENOMEM;
1910 
1911 		ipsec->rx_esw = kzalloc(sizeof(*ipsec->rx_esw), GFP_KERNEL);
1912 		if (!ipsec->rx_esw)
1913 			goto err_rx_esw;
1914 	}
1915 
1916 	ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
1917 	if (!ipsec->tx)
1918 		goto err_tx;
1919 
1920 	ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
1921 	if (!ipsec->rx_ipv4)
1922 		goto err_rx_ipv4;
1923 
1924 	ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
1925 	if (!ipsec->rx_ipv6)
1926 		goto err_rx_ipv6;
1927 
1928 	err = ipsec_fs_init_counters(ipsec);
1929 	if (err)
1930 		goto err_counters;
1931 
1932 	mutex_init(&ipsec->tx->ft.mutex);
1933 	mutex_init(&ipsec->rx_ipv4->ft.mutex);
1934 	mutex_init(&ipsec->rx_ipv6->ft.mutex);
1935 	ipsec->tx->ns = ns;
1936 
1937 	if (ipsec->is_uplink_rep) {
1938 		mutex_init(&ipsec->tx_esw->ft.mutex);
1939 		mutex_init(&ipsec->rx_esw->ft.mutex);
1940 		ipsec->tx_esw->ns = ns_esw;
1941 		xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
1942 	} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
1943 		ipsec->roce = mlx5_ipsec_fs_roce_init(mdev);
1944 	}
1945 
1946 	return 0;
1947 
1948 err_counters:
1949 	kfree(ipsec->rx_ipv6);
1950 err_rx_ipv6:
1951 	kfree(ipsec->rx_ipv4);
1952 err_rx_ipv4:
1953 	kfree(ipsec->tx);
1954 err_tx:
1955 	kfree(ipsec->rx_esw);
1956 err_rx_esw:
1957 	kfree(ipsec->tx_esw);
1958 	return err;
1959 }
1960 
mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry * sa_entry)1961 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
1962 {
1963 	struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
1964 	int err;
1965 
1966 	memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
1967 	memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));
1968 
1969 	err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
1970 	if (err)
1971 		return;
1972 
1973 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
1974 	memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
1975 }
1976 
mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry * sa_entry)1977 bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
1978 {
1979 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1980 	struct mlx5e_ipsec_rx *rx;
1981 	struct mlx5e_ipsec_tx *tx;
1982 
1983 	rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type);
1984 	tx = ipsec_tx(sa_entry->ipsec, attrs->type);
1985 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
1986 		return tx->allow_tunnel_mode;
1987 
1988 	return rx->allow_tunnel_mode;
1989 }
1990