1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
39 #include "eswitch.h"
40 
41 enum {
42 	FDB_FAST_PATH = 0,
43 	FDB_SLOW_PATH
44 };
45 
46 struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec,struct mlx5_esw_flow_attr * attr)47 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 				struct mlx5_flow_spec *spec,
49 				struct mlx5_esw_flow_attr *attr)
50 {
51 	struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
52 	struct mlx5_flow_act flow_act = {0};
53 	struct mlx5_flow_table *ft = NULL;
54 	struct mlx5_fc *counter = NULL;
55 	struct mlx5_flow_handle *rule;
56 	int j, i = 0;
57 	void *misc;
58 
59 	if (esw->mode != SRIOV_OFFLOADS)
60 		return ERR_PTR(-EOPNOTSUPP);
61 
62 	if (attr->mirror_count)
63 		ft = esw->fdb_table.offloads.fwd_fdb;
64 	else
65 		ft = esw->fdb_table.offloads.fast_fdb;
66 
67 	flow_act.action = attr->action;
68 	/* if per flow vlan pop/push is emulated, don't set that into the firmware */
69 	if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
70 		flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
71 				     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
72 	else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
73 		flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
74 		flow_act.vlan[0].vid = attr->vlan_vid[0];
75 		flow_act.vlan[0].prio = attr->vlan_prio[0];
76 		if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
77 			flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
78 			flow_act.vlan[1].vid = attr->vlan_vid[1];
79 			flow_act.vlan[1].prio = attr->vlan_prio[1];
80 		}
81 	}
82 
83 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
84 		for (j = attr->mirror_count; j < attr->out_count; j++) {
85 			dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
86 			dest[i].vport.num = attr->out_rep[j]->vport;
87 			dest[i].vport.vhca_id =
88 				MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
89 			dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
90 			i++;
91 		}
92 	}
93 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
94 		counter = mlx5_fc_create(esw->dev, true);
95 		if (IS_ERR(counter)) {
96 			rule = ERR_CAST(counter);
97 			goto err_counter_alloc;
98 		}
99 		dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
100 		dest[i].counter = counter;
101 		i++;
102 	}
103 
104 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
105 	MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
106 
107 	if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
108 		MLX5_SET(fte_match_set_misc, misc,
109 			 source_eswitch_owner_vhca_id,
110 			 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
111 
112 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
113 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
114 	if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
115 		MLX5_SET_TO_ONES(fte_match_set_misc, misc,
116 				 source_eswitch_owner_vhca_id);
117 
118 	if (attr->match_level == MLX5_MATCH_NONE)
119 		spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
120 	else
121 		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
122 					      MLX5_MATCH_MISC_PARAMETERS;
123 
124 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
125 		spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
126 
127 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
128 		flow_act.modify_id = attr->mod_hdr_id;
129 
130 	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
131 		flow_act.encap_id = attr->encap_id;
132 
133 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i);
134 	if (IS_ERR(rule))
135 		goto err_add_rule;
136 	else
137 		esw->offloads.num_flows++;
138 
139 	return rule;
140 
141 err_add_rule:
142 	mlx5_fc_destroy(esw->dev, counter);
143 err_counter_alloc:
144 	return rule;
145 }
146 
147 struct mlx5_flow_handle *
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch * esw,struct mlx5_flow_spec * spec,struct mlx5_esw_flow_attr * attr)148 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
149 			  struct mlx5_flow_spec *spec,
150 			  struct mlx5_esw_flow_attr *attr)
151 {
152 	struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
153 	struct mlx5_flow_act flow_act = {0};
154 	struct mlx5_flow_handle *rule;
155 	void *misc;
156 	int i;
157 
158 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
159 	for (i = 0; i < attr->mirror_count; i++) {
160 		dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
161 		dest[i].vport.num = attr->out_rep[i]->vport;
162 		dest[i].vport.vhca_id =
163 			MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
164 		dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
165 	}
166 	dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
167 	dest[i].ft = esw->fdb_table.offloads.fwd_fdb,
168 	i++;
169 
170 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
171 	MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
172 
173 	if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
174 		MLX5_SET(fte_match_set_misc, misc,
175 			 source_eswitch_owner_vhca_id,
176 			 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
177 
178 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
179 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
180 	if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
181 		MLX5_SET_TO_ONES(fte_match_set_misc, misc,
182 				 source_eswitch_owner_vhca_id);
183 
184 	if (attr->match_level == MLX5_MATCH_NONE)
185 		spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
186 	else
187 		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
188 					      MLX5_MATCH_MISC_PARAMETERS;
189 
190 	rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i);
191 
192 	if (!IS_ERR(rule))
193 		esw->offloads.num_flows++;
194 
195 	return rule;
196 }
197 
198 void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch * esw,struct mlx5_flow_handle * rule,struct mlx5_esw_flow_attr * attr)199 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
200 				struct mlx5_flow_handle *rule,
201 				struct mlx5_esw_flow_attr *attr)
202 {
203 	struct mlx5_fc *counter = NULL;
204 
205 	counter = mlx5_flow_rule_counter(rule);
206 	mlx5_del_flow_rules(rule);
207 	mlx5_fc_destroy(esw->dev, counter);
208 	esw->offloads.num_flows--;
209 }
210 
esw_set_global_vlan_pop(struct mlx5_eswitch * esw,u8 val)211 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
212 {
213 	struct mlx5_eswitch_rep *rep;
214 	int vf_vport, err = 0;
215 
216 	esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
217 	for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
218 		rep = &esw->offloads.vport_reps[vf_vport];
219 		if (!rep->rep_if[REP_ETH].valid)
220 			continue;
221 
222 		err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
223 		if (err)
224 			goto out;
225 	}
226 
227 out:
228 	return err;
229 }
230 
231 static struct mlx5_eswitch_rep *
esw_vlan_action_get_vport(struct mlx5_esw_flow_attr * attr,bool push,bool pop)232 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
233 {
234 	struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
235 
236 	in_rep  = attr->in_rep;
237 	out_rep = attr->out_rep[0];
238 
239 	if (push)
240 		vport = in_rep;
241 	else if (pop)
242 		vport = out_rep;
243 	else
244 		vport = in_rep;
245 
246 	return vport;
247 }
248 
esw_add_vlan_action_check(struct mlx5_esw_flow_attr * attr,bool push,bool pop,bool fwd)249 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
250 				     bool push, bool pop, bool fwd)
251 {
252 	struct mlx5_eswitch_rep *in_rep, *out_rep;
253 
254 	if ((push || pop) && !fwd)
255 		goto out_notsupp;
256 
257 	in_rep  = attr->in_rep;
258 	out_rep = attr->out_rep[0];
259 
260 	if (push && in_rep->vport == FDB_UPLINK_VPORT)
261 		goto out_notsupp;
262 
263 	if (pop && out_rep->vport == FDB_UPLINK_VPORT)
264 		goto out_notsupp;
265 
266 	/* vport has vlan push configured, can't offload VF --> wire rules w.o it */
267 	if (!push && !pop && fwd)
268 		if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
269 			goto out_notsupp;
270 
271 	/* protects against (1) setting rules with different vlans to push and
272 	 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
273 	 */
274 	if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
275 		goto out_notsupp;
276 
277 	return 0;
278 
279 out_notsupp:
280 	return -EOPNOTSUPP;
281 }
282 
mlx5_eswitch_add_vlan_action(struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * attr)283 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
284 				 struct mlx5_esw_flow_attr *attr)
285 {
286 	struct offloads_fdb *offloads = &esw->fdb_table.offloads;
287 	struct mlx5_eswitch_rep *vport = NULL;
288 	bool push, pop, fwd;
289 	int err = 0;
290 
291 	/* nop if we're on the vlan push/pop non emulation mode */
292 	if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
293 		return 0;
294 
295 	push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
296 	pop  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
297 	fwd  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
298 
299 	err = esw_add_vlan_action_check(attr, push, pop, fwd);
300 	if (err)
301 		return err;
302 
303 	attr->vlan_handled = false;
304 
305 	vport = esw_vlan_action_get_vport(attr, push, pop);
306 
307 	if (!push && !pop && fwd) {
308 		/* tracks VF --> wire rules without vlan push action */
309 		if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) {
310 			vport->vlan_refcount++;
311 			attr->vlan_handled = true;
312 		}
313 
314 		return 0;
315 	}
316 
317 	if (!push && !pop)
318 		return 0;
319 
320 	if (!(offloads->vlan_push_pop_refcount)) {
321 		/* it's the 1st vlan rule, apply global vlan pop policy */
322 		err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
323 		if (err)
324 			goto out;
325 	}
326 	offloads->vlan_push_pop_refcount++;
327 
328 	if (push) {
329 		if (vport->vlan_refcount)
330 			goto skip_set_push;
331 
332 		err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
333 						    SET_VLAN_INSERT | SET_VLAN_STRIP);
334 		if (err)
335 			goto out;
336 		vport->vlan = attr->vlan_vid[0];
337 skip_set_push:
338 		vport->vlan_refcount++;
339 	}
340 out:
341 	if (!err)
342 		attr->vlan_handled = true;
343 	return err;
344 }
345 
mlx5_eswitch_del_vlan_action(struct mlx5_eswitch * esw,struct mlx5_esw_flow_attr * attr)346 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
347 				 struct mlx5_esw_flow_attr *attr)
348 {
349 	struct offloads_fdb *offloads = &esw->fdb_table.offloads;
350 	struct mlx5_eswitch_rep *vport = NULL;
351 	bool push, pop, fwd;
352 	int err = 0;
353 
354 	/* nop if we're on the vlan push/pop non emulation mode */
355 	if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
356 		return 0;
357 
358 	if (!attr->vlan_handled)
359 		return 0;
360 
361 	push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
362 	pop  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
363 	fwd  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
364 
365 	vport = esw_vlan_action_get_vport(attr, push, pop);
366 
367 	if (!push && !pop && fwd) {
368 		/* tracks VF --> wire rules without vlan push action */
369 		if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT)
370 			vport->vlan_refcount--;
371 
372 		return 0;
373 	}
374 
375 	if (push) {
376 		vport->vlan_refcount--;
377 		if (vport->vlan_refcount)
378 			goto skip_unset_push;
379 
380 		vport->vlan = 0;
381 		err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
382 						    0, 0, SET_VLAN_STRIP);
383 		if (err)
384 			goto out;
385 	}
386 
387 skip_unset_push:
388 	offloads->vlan_push_pop_refcount--;
389 	if (offloads->vlan_push_pop_refcount)
390 		return 0;
391 
392 	/* no more vlan rules, stop global vlan pop policy */
393 	err = esw_set_global_vlan_pop(esw, 0);
394 
395 out:
396 	return err;
397 }
398 
399 struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch * esw,int vport,u32 sqn)400 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
401 {
402 	struct mlx5_flow_act flow_act = {0};
403 	struct mlx5_flow_destination dest = {};
404 	struct mlx5_flow_handle *flow_rule;
405 	struct mlx5_flow_spec *spec;
406 	void *misc;
407 
408 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
409 	if (!spec) {
410 		flow_rule = ERR_PTR(-ENOMEM);
411 		goto out;
412 	}
413 
414 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
415 	MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
416 	MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
417 
418 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
419 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
420 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
421 
422 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
423 	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
424 	dest.vport.num = vport;
425 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
426 
427 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
428 					&flow_act, &dest, 1);
429 	if (IS_ERR(flow_rule))
430 		esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
431 out:
432 	kvfree(spec);
433 	return flow_rule;
434 }
435 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
436 
mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle * rule)437 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
438 {
439 	mlx5_del_flow_rules(rule);
440 }
441 
esw_add_fdb_miss_rule(struct mlx5_eswitch * esw)442 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
443 {
444 	struct mlx5_flow_act flow_act = {0};
445 	struct mlx5_flow_destination dest = {};
446 	struct mlx5_flow_handle *flow_rule = NULL;
447 	struct mlx5_flow_spec *spec;
448 	void *headers_c;
449 	void *headers_v;
450 	int err = 0;
451 	u8 *dmac_c;
452 	u8 *dmac_v;
453 
454 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
455 	if (!spec) {
456 		err = -ENOMEM;
457 		goto out;
458 	}
459 
460 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
461 	headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
462 				 outer_headers);
463 	dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
464 			      outer_headers.dmac_47_16);
465 	dmac_c[0] = 0x01;
466 
467 	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
468 	dest.vport.num = 0;
469 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
470 
471 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
472 					&flow_act, &dest, 1);
473 	if (IS_ERR(flow_rule)) {
474 		err = PTR_ERR(flow_rule);
475 		esw_warn(esw->dev,  "FDB: Failed to add unicast miss flow rule err %d\n", err);
476 		goto out;
477 	}
478 
479 	esw->fdb_table.offloads.miss_rule_uni = flow_rule;
480 
481 	headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
482 				 outer_headers);
483 	dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
484 			      outer_headers.dmac_47_16);
485 	dmac_v[0] = 0x01;
486 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
487 					&flow_act, &dest, 1);
488 	if (IS_ERR(flow_rule)) {
489 		err = PTR_ERR(flow_rule);
490 		esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
491 		mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
492 		goto out;
493 	}
494 
495 	esw->fdb_table.offloads.miss_rule_multi = flow_rule;
496 
497 out:
498 	kvfree(spec);
499 	return err;
500 }
501 
502 #define ESW_OFFLOADS_NUM_GROUPS  4
503 
esw_create_offloads_fast_fdb_table(struct mlx5_eswitch * esw)504 static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
505 {
506 	struct mlx5_core_dev *dev = esw->dev;
507 	struct mlx5_flow_namespace *root_ns;
508 	struct mlx5_flow_table *fdb = NULL;
509 	int esw_size, err = 0;
510 	u32 flags = 0;
511 	u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
512 				MLX5_CAP_GEN(dev, max_flow_counter_15_0);
513 
514 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
515 	if (!root_ns) {
516 		esw_warn(dev, "Failed to get FDB flow namespace\n");
517 		err = -EOPNOTSUPP;
518 		goto out_namespace;
519 	}
520 
521 	esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
522 		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
523 		  max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
524 
525 	esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
526 			 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
527 
528 	if (mlx5_esw_has_fwd_fdb(dev))
529 		esw_size >>= 1;
530 
531 	if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
532 		flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
533 
534 	fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
535 						  esw_size,
536 						  ESW_OFFLOADS_NUM_GROUPS, 0,
537 						  flags);
538 	if (IS_ERR(fdb)) {
539 		err = PTR_ERR(fdb);
540 		esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
541 		goto out_namespace;
542 	}
543 	esw->fdb_table.offloads.fast_fdb = fdb;
544 
545 	if (!mlx5_esw_has_fwd_fdb(dev))
546 		goto out_namespace;
547 
548 	fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
549 						  esw_size,
550 						  ESW_OFFLOADS_NUM_GROUPS, 1,
551 						  flags);
552 	if (IS_ERR(fdb)) {
553 		err = PTR_ERR(fdb);
554 		esw_warn(dev, "Failed to create fwd table err %d\n", err);
555 		goto out_ft;
556 	}
557 	esw->fdb_table.offloads.fwd_fdb = fdb;
558 
559 	return err;
560 
561 out_ft:
562 	mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
563 out_namespace:
564 	return err;
565 }
566 
esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch * esw)567 static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
568 {
569 	if (mlx5_esw_has_fwd_fdb(esw->dev))
570 		mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb);
571 	mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
572 }
573 
574 #define MAX_PF_SQ 256
575 #define MAX_SQ_NVPORTS 32
576 
esw_create_offloads_fdb_tables(struct mlx5_eswitch * esw,int nvports)577 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
578 {
579 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
580 	struct mlx5_flow_table_attr ft_attr = {};
581 	struct mlx5_core_dev *dev = esw->dev;
582 	struct mlx5_flow_namespace *root_ns;
583 	struct mlx5_flow_table *fdb = NULL;
584 	int table_size, ix, err = 0;
585 	struct mlx5_flow_group *g;
586 	void *match_criteria;
587 	u32 *flow_group_in;
588 	u8 *dmac;
589 
590 	esw_debug(esw->dev, "Create offloads FDB Tables\n");
591 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
592 	if (!flow_group_in)
593 		return -ENOMEM;
594 
595 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
596 	if (!root_ns) {
597 		esw_warn(dev, "Failed to get FDB flow namespace\n");
598 		err = -EOPNOTSUPP;
599 		goto ns_err;
600 	}
601 
602 	err = esw_create_offloads_fast_fdb_table(esw);
603 	if (err)
604 		goto fast_fdb_err;
605 
606 	table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2;
607 
608 	ft_attr.max_fte = table_size;
609 	ft_attr.prio = FDB_SLOW_PATH;
610 
611 	fdb = mlx5_create_flow_table(root_ns, &ft_attr);
612 	if (IS_ERR(fdb)) {
613 		err = PTR_ERR(fdb);
614 		esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
615 		goto slow_fdb_err;
616 	}
617 	esw->fdb_table.offloads.slow_fdb = fdb;
618 
619 	/* create send-to-vport group */
620 	memset(flow_group_in, 0, inlen);
621 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
622 		 MLX5_MATCH_MISC_PARAMETERS);
623 
624 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
625 
626 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
627 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
628 
629 	ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
630 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
631 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
632 
633 	g = mlx5_create_flow_group(fdb, flow_group_in);
634 	if (IS_ERR(g)) {
635 		err = PTR_ERR(g);
636 		esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
637 		goto send_vport_err;
638 	}
639 	esw->fdb_table.offloads.send_to_vport_grp = g;
640 
641 	/* create miss group */
642 	memset(flow_group_in, 0, inlen);
643 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
644 		 MLX5_MATCH_OUTER_HEADERS);
645 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
646 				      match_criteria);
647 	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
648 			    outer_headers.dmac_47_16);
649 	dmac[0] = 0x01;
650 
651 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
652 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
653 
654 	g = mlx5_create_flow_group(fdb, flow_group_in);
655 	if (IS_ERR(g)) {
656 		err = PTR_ERR(g);
657 		esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
658 		goto miss_err;
659 	}
660 	esw->fdb_table.offloads.miss_grp = g;
661 
662 	err = esw_add_fdb_miss_rule(esw);
663 	if (err)
664 		goto miss_rule_err;
665 
666 	kvfree(flow_group_in);
667 	return 0;
668 
669 miss_rule_err:
670 	mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
671 miss_err:
672 	mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
673 send_vport_err:
674 	mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
675 slow_fdb_err:
676 	esw_destroy_offloads_fast_fdb_table(esw);
677 fast_fdb_err:
678 ns_err:
679 	kvfree(flow_group_in);
680 	return err;
681 }
682 
esw_destroy_offloads_fdb_tables(struct mlx5_eswitch * esw)683 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
684 {
685 	if (!esw->fdb_table.offloads.fast_fdb)
686 		return;
687 
688 	esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
689 	mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
690 	mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
691 	mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
692 	mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
693 
694 	mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
695 	esw_destroy_offloads_fast_fdb_table(esw);
696 }
697 
esw_create_offloads_table(struct mlx5_eswitch * esw)698 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
699 {
700 	struct mlx5_flow_table_attr ft_attr = {};
701 	struct mlx5_core_dev *dev = esw->dev;
702 	struct mlx5_flow_table *ft_offloads;
703 	struct mlx5_flow_namespace *ns;
704 	int err = 0;
705 
706 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
707 	if (!ns) {
708 		esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
709 		return -EOPNOTSUPP;
710 	}
711 
712 	ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
713 
714 	ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
715 	if (IS_ERR(ft_offloads)) {
716 		err = PTR_ERR(ft_offloads);
717 		esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
718 		return err;
719 	}
720 
721 	esw->offloads.ft_offloads = ft_offloads;
722 	return 0;
723 }
724 
esw_destroy_offloads_table(struct mlx5_eswitch * esw)725 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
726 {
727 	struct mlx5_esw_offload *offloads = &esw->offloads;
728 
729 	mlx5_destroy_flow_table(offloads->ft_offloads);
730 }
731 
esw_create_vport_rx_group(struct mlx5_eswitch * esw)732 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
733 {
734 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
735 	struct mlx5_flow_group *g;
736 	struct mlx5_priv *priv = &esw->dev->priv;
737 	u32 *flow_group_in;
738 	void *match_criteria, *misc;
739 	int err = 0;
740 	int nvports = priv->sriov.num_vfs + 2;
741 
742 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
743 	if (!flow_group_in)
744 		return -ENOMEM;
745 
746 	/* create vport rx group */
747 	memset(flow_group_in, 0, inlen);
748 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
749 		 MLX5_MATCH_MISC_PARAMETERS);
750 
751 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
752 	misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
753 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
754 
755 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
756 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
757 
758 	g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
759 
760 	if (IS_ERR(g)) {
761 		err = PTR_ERR(g);
762 		mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
763 		goto out;
764 	}
765 
766 	esw->offloads.vport_rx_group = g;
767 out:
768 	kvfree(flow_group_in);
769 	return err;
770 }
771 
esw_destroy_vport_rx_group(struct mlx5_eswitch * esw)772 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
773 {
774 	mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
775 }
776 
777 struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch * esw,int vport,u32 tirn)778 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
779 {
780 	struct mlx5_flow_act flow_act = {0};
781 	struct mlx5_flow_destination dest = {};
782 	struct mlx5_flow_handle *flow_rule;
783 	struct mlx5_flow_spec *spec;
784 	void *misc;
785 
786 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
787 	if (!spec) {
788 		flow_rule = ERR_PTR(-ENOMEM);
789 		goto out;
790 	}
791 
792 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
793 	MLX5_SET(fte_match_set_misc, misc, source_port, vport);
794 
795 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
796 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
797 
798 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
799 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
800 	dest.tir_num = tirn;
801 
802 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
803 	flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
804 					&flow_act, &dest, 1);
805 	if (IS_ERR(flow_rule)) {
806 		esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
807 		goto out;
808 	}
809 
810 out:
811 	kvfree(spec);
812 	return flow_rule;
813 }
814 
esw_offloads_start(struct mlx5_eswitch * esw)815 static int esw_offloads_start(struct mlx5_eswitch *esw)
816 {
817 	int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
818 
819 	if (esw->mode != SRIOV_LEGACY) {
820 		esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
821 		return -EINVAL;
822 	}
823 
824 	mlx5_eswitch_disable_sriov(esw);
825 	err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
826 	if (err) {
827 		esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
828 		err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
829 		if (err1)
830 			esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
831 	}
832 	if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
833 		if (mlx5_eswitch_inline_mode_get(esw,
834 						 num_vfs,
835 						 &esw->offloads.inline_mode)) {
836 			esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
837 			esw_warn(esw->dev, "Inline mode is different between vports\n");
838 		}
839 	}
840 	return err;
841 }
842 
esw_offloads_cleanup_reps(struct mlx5_eswitch * esw)843 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
844 {
845 	kfree(esw->offloads.vport_reps);
846 }
847 
esw_offloads_init_reps(struct mlx5_eswitch * esw)848 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
849 {
850 	int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
851 	struct mlx5_core_dev *dev = esw->dev;
852 	struct mlx5_esw_offload *offloads;
853 	struct mlx5_eswitch_rep *rep;
854 	u8 hw_id[ETH_ALEN];
855 	int vport;
856 
857 	esw->offloads.vport_reps = kcalloc(total_vfs,
858 					   sizeof(struct mlx5_eswitch_rep),
859 					   GFP_KERNEL);
860 	if (!esw->offloads.vport_reps)
861 		return -ENOMEM;
862 
863 	offloads = &esw->offloads;
864 	mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
865 
866 	for (vport = 0; vport < total_vfs; vport++) {
867 		rep = &offloads->vport_reps[vport];
868 
869 		rep->vport = vport;
870 		ether_addr_copy(rep->hw_id, hw_id);
871 	}
872 
873 	offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
874 
875 	return 0;
876 }
877 
esw_offloads_unload_reps_type(struct mlx5_eswitch * esw,int nvports,u8 rep_type)878 static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
879 					  u8 rep_type)
880 {
881 	struct mlx5_eswitch_rep *rep;
882 	int vport;
883 
884 	for (vport = nvports - 1; vport >= 0; vport--) {
885 		rep = &esw->offloads.vport_reps[vport];
886 		if (!rep->rep_if[rep_type].valid)
887 			continue;
888 
889 		rep->rep_if[rep_type].unload(rep);
890 	}
891 }
892 
esw_offloads_unload_reps(struct mlx5_eswitch * esw,int nvports)893 static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
894 {
895 	u8 rep_type = NUM_REP_TYPES;
896 
897 	while (rep_type-- > 0)
898 		esw_offloads_unload_reps_type(esw, nvports, rep_type);
899 }
900 
esw_offloads_load_reps_type(struct mlx5_eswitch * esw,int nvports,u8 rep_type)901 static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
902 				       u8 rep_type)
903 {
904 	struct mlx5_eswitch_rep *rep;
905 	int vport;
906 	int err;
907 
908 	for (vport = 0; vport < nvports; vport++) {
909 		rep = &esw->offloads.vport_reps[vport];
910 		if (!rep->rep_if[rep_type].valid)
911 			continue;
912 
913 		err = rep->rep_if[rep_type].load(esw->dev, rep);
914 		if (err)
915 			goto err_reps;
916 	}
917 
918 	return 0;
919 
920 err_reps:
921 	esw_offloads_unload_reps_type(esw, vport, rep_type);
922 	return err;
923 }
924 
esw_offloads_load_reps(struct mlx5_eswitch * esw,int nvports)925 static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
926 {
927 	u8 rep_type = 0;
928 	int err;
929 
930 	for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
931 		err = esw_offloads_load_reps_type(esw, nvports, rep_type);
932 		if (err)
933 			goto err_reps;
934 	}
935 
936 	return err;
937 
938 err_reps:
939 	while (rep_type-- > 0)
940 		esw_offloads_unload_reps_type(esw, nvports, rep_type);
941 	return err;
942 }
943 
esw_offloads_init(struct mlx5_eswitch * esw,int nvports)944 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
945 {
946 	int err;
947 
948 	err = esw_create_offloads_fdb_tables(esw, nvports);
949 	if (err)
950 		return err;
951 
952 	err = esw_create_offloads_table(esw);
953 	if (err)
954 		goto create_ft_err;
955 
956 	err = esw_create_vport_rx_group(esw);
957 	if (err)
958 		goto create_fg_err;
959 
960 	err = esw_offloads_load_reps(esw, nvports);
961 	if (err)
962 		goto err_reps;
963 
964 	return 0;
965 
966 err_reps:
967 	esw_destroy_vport_rx_group(esw);
968 
969 create_fg_err:
970 	esw_destroy_offloads_table(esw);
971 
972 create_ft_err:
973 	esw_destroy_offloads_fdb_tables(esw);
974 
975 	return err;
976 }
977 
esw_offloads_stop(struct mlx5_eswitch * esw)978 static int esw_offloads_stop(struct mlx5_eswitch *esw)
979 {
980 	int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
981 
982 	mlx5_eswitch_disable_sriov(esw);
983 	err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
984 	if (err) {
985 		esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
986 		err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
987 		if (err1)
988 			esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
989 	}
990 
991 	/* enable back PF RoCE */
992 	mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
993 
994 	return err;
995 }
996 
esw_offloads_cleanup(struct mlx5_eswitch * esw,int nvports)997 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
998 {
999 	esw_offloads_unload_reps(esw, nvports);
1000 	esw_destroy_vport_rx_group(esw);
1001 	esw_destroy_offloads_table(esw);
1002 	esw_destroy_offloads_fdb_tables(esw);
1003 }
1004 
esw_mode_from_devlink(u16 mode,u16 * mlx5_mode)1005 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
1006 {
1007 	switch (mode) {
1008 	case DEVLINK_ESWITCH_MODE_LEGACY:
1009 		*mlx5_mode = SRIOV_LEGACY;
1010 		break;
1011 	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1012 		*mlx5_mode = SRIOV_OFFLOADS;
1013 		break;
1014 	default:
1015 		return -EINVAL;
1016 	}
1017 
1018 	return 0;
1019 }
1020 
esw_mode_to_devlink(u16 mlx5_mode,u16 * mode)1021 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1022 {
1023 	switch (mlx5_mode) {
1024 	case SRIOV_LEGACY:
1025 		*mode = DEVLINK_ESWITCH_MODE_LEGACY;
1026 		break;
1027 	case SRIOV_OFFLOADS:
1028 		*mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1029 		break;
1030 	default:
1031 		return -EINVAL;
1032 	}
1033 
1034 	return 0;
1035 }
1036 
esw_inline_mode_from_devlink(u8 mode,u8 * mlx5_mode)1037 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1038 {
1039 	switch (mode) {
1040 	case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1041 		*mlx5_mode = MLX5_INLINE_MODE_NONE;
1042 		break;
1043 	case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1044 		*mlx5_mode = MLX5_INLINE_MODE_L2;
1045 		break;
1046 	case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1047 		*mlx5_mode = MLX5_INLINE_MODE_IP;
1048 		break;
1049 	case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1050 		*mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1051 		break;
1052 	default:
1053 		return -EINVAL;
1054 	}
1055 
1056 	return 0;
1057 }
1058 
esw_inline_mode_to_devlink(u8 mlx5_mode,u8 * mode)1059 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1060 {
1061 	switch (mlx5_mode) {
1062 	case MLX5_INLINE_MODE_NONE:
1063 		*mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1064 		break;
1065 	case MLX5_INLINE_MODE_L2:
1066 		*mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1067 		break;
1068 	case MLX5_INLINE_MODE_IP:
1069 		*mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1070 		break;
1071 	case MLX5_INLINE_MODE_TCP_UDP:
1072 		*mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1073 		break;
1074 	default:
1075 		return -EINVAL;
1076 	}
1077 
1078 	return 0;
1079 }
1080 
mlx5_devlink_eswitch_check(struct devlink * devlink)1081 static int mlx5_devlink_eswitch_check(struct devlink *devlink)
1082 {
1083 	struct mlx5_core_dev *dev = devlink_priv(devlink);
1084 
1085 	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1086 		return -EOPNOTSUPP;
1087 
1088 	if(!MLX5_ESWITCH_MANAGER(dev))
1089 		return -EPERM;
1090 
1091 	if (dev->priv.eswitch->mode == SRIOV_NONE)
1092 		return -EOPNOTSUPP;
1093 
1094 	return 0;
1095 }
1096 
mlx5_devlink_eswitch_mode_set(struct devlink * devlink,u16 mode)1097 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
1098 {
1099 	struct mlx5_core_dev *dev = devlink_priv(devlink);
1100 	u16 cur_mlx5_mode, mlx5_mode = 0;
1101 	int err;
1102 
1103 	err = mlx5_devlink_eswitch_check(devlink);
1104 	if (err)
1105 		return err;
1106 
1107 	cur_mlx5_mode = dev->priv.eswitch->mode;
1108 
1109 	if (esw_mode_from_devlink(mode, &mlx5_mode))
1110 		return -EINVAL;
1111 
1112 	if (cur_mlx5_mode == mlx5_mode)
1113 		return 0;
1114 
1115 	if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1116 		return esw_offloads_start(dev->priv.eswitch);
1117 	else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
1118 		return esw_offloads_stop(dev->priv.eswitch);
1119 	else
1120 		return -EINVAL;
1121 }
1122 
mlx5_devlink_eswitch_mode_get(struct devlink * devlink,u16 * mode)1123 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1124 {
1125 	struct mlx5_core_dev *dev = devlink_priv(devlink);
1126 	int err;
1127 
1128 	err = mlx5_devlink_eswitch_check(devlink);
1129 	if (err)
1130 		return err;
1131 
1132 	return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
1133 }
1134 
mlx5_devlink_eswitch_inline_mode_set(struct devlink * devlink,u8 mode)1135 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
1136 {
1137 	struct mlx5_core_dev *dev = devlink_priv(devlink);
1138 	struct mlx5_eswitch *esw = dev->priv.eswitch;
1139 	int err, vport;
1140 	u8 mlx5_mode;
1141 
1142 	err = mlx5_devlink_eswitch_check(devlink);
1143 	if (err)
1144 		return err;
1145 
1146 	switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1147 	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1148 		if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1149 			return 0;
1150 		/* fall through */
1151 	case MLX5_CAP_INLINE_MODE_L2:
1152 		esw_warn(dev, "Inline mode can't be set\n");
1153 		return -EOPNOTSUPP;
1154 	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1155 		break;
1156 	}
1157 
1158 	if (esw->offloads.num_flows > 0) {
1159 		esw_warn(dev, "Can't set inline mode when flows are configured\n");
1160 		return -EOPNOTSUPP;
1161 	}
1162 
1163 	err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1164 	if (err)
1165 		goto out;
1166 
1167 	for (vport = 1; vport < esw->enabled_vports; vport++) {
1168 		err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1169 		if (err) {
1170 			esw_warn(dev, "Failed to set min inline on vport %d\n",
1171 				 vport);
1172 			goto revert_inline_mode;
1173 		}
1174 	}
1175 
1176 	esw->offloads.inline_mode = mlx5_mode;
1177 	return 0;
1178 
1179 revert_inline_mode:
1180 	while (--vport > 0)
1181 		mlx5_modify_nic_vport_min_inline(dev,
1182 						 vport,
1183 						 esw->offloads.inline_mode);
1184 out:
1185 	return err;
1186 }
1187 
mlx5_devlink_eswitch_inline_mode_get(struct devlink * devlink,u8 * mode)1188 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1189 {
1190 	struct mlx5_core_dev *dev = devlink_priv(devlink);
1191 	struct mlx5_eswitch *esw = dev->priv.eswitch;
1192 	int err;
1193 
1194 	err = mlx5_devlink_eswitch_check(devlink);
1195 	if (err)
1196 		return err;
1197 
1198 	return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1199 }
1200 
mlx5_eswitch_inline_mode_get(struct mlx5_eswitch * esw,int nvfs,u8 * mode)1201 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1202 {
1203 	u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1204 	struct mlx5_core_dev *dev = esw->dev;
1205 	int vport;
1206 
1207 	if (!MLX5_CAP_GEN(dev, vport_group_manager))
1208 		return -EOPNOTSUPP;
1209 
1210 	if (esw->mode == SRIOV_NONE)
1211 		return -EOPNOTSUPP;
1212 
1213 	switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1214 	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1215 		mlx5_mode = MLX5_INLINE_MODE_NONE;
1216 		goto out;
1217 	case MLX5_CAP_INLINE_MODE_L2:
1218 		mlx5_mode = MLX5_INLINE_MODE_L2;
1219 		goto out;
1220 	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1221 		goto query_vports;
1222 	}
1223 
1224 query_vports:
1225 	for (vport = 1; vport <= nvfs; vport++) {
1226 		mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1227 		if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1228 			return -EINVAL;
1229 		prev_mlx5_mode = mlx5_mode;
1230 	}
1231 
1232 out:
1233 	*mode = mlx5_mode;
1234 	return 0;
1235 }
1236 
mlx5_devlink_eswitch_encap_mode_set(struct devlink * devlink,u8 encap)1237 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1238 {
1239 	struct mlx5_core_dev *dev = devlink_priv(devlink);
1240 	struct mlx5_eswitch *esw = dev->priv.eswitch;
1241 	int err;
1242 
1243 	err = mlx5_devlink_eswitch_check(devlink);
1244 	if (err)
1245 		return err;
1246 
1247 	if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1248 	    (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
1249 	     !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1250 		return -EOPNOTSUPP;
1251 
1252 	if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1253 		return -EOPNOTSUPP;
1254 
1255 	if (esw->mode == SRIOV_LEGACY) {
1256 		esw->offloads.encap = encap;
1257 		return 0;
1258 	}
1259 
1260 	if (esw->offloads.encap == encap)
1261 		return 0;
1262 
1263 	if (esw->offloads.num_flows > 0) {
1264 		esw_warn(dev, "Can't set encapsulation when flows are configured\n");
1265 		return -EOPNOTSUPP;
1266 	}
1267 
1268 	esw_destroy_offloads_fast_fdb_table(esw);
1269 
1270 	esw->offloads.encap = encap;
1271 	err = esw_create_offloads_fast_fdb_table(esw);
1272 	if (err) {
1273 		esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
1274 		esw->offloads.encap = !encap;
1275 		(void)esw_create_offloads_fast_fdb_table(esw);
1276 	}
1277 	return err;
1278 }
1279 
mlx5_devlink_eswitch_encap_mode_get(struct devlink * devlink,u8 * encap)1280 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1281 {
1282 	struct mlx5_core_dev *dev = devlink_priv(devlink);
1283 	struct mlx5_eswitch *esw = dev->priv.eswitch;
1284 	int err;
1285 
1286 	err = mlx5_devlink_eswitch_check(devlink);
1287 	if (err)
1288 		return err;
1289 
1290 	*encap = esw->offloads.encap;
1291 	return 0;
1292 }
1293 
mlx5_eswitch_register_vport_rep(struct mlx5_eswitch * esw,int vport_index,struct mlx5_eswitch_rep_if * __rep_if,u8 rep_type)1294 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
1295 				     int vport_index,
1296 				     struct mlx5_eswitch_rep_if *__rep_if,
1297 				     u8 rep_type)
1298 {
1299 	struct mlx5_esw_offload *offloads = &esw->offloads;
1300 	struct mlx5_eswitch_rep_if *rep_if;
1301 
1302 	rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
1303 
1304 	rep_if->load   = __rep_if->load;
1305 	rep_if->unload = __rep_if->unload;
1306 	rep_if->get_proto_dev = __rep_if->get_proto_dev;
1307 	rep_if->priv = __rep_if->priv;
1308 
1309 	rep_if->valid = true;
1310 }
1311 EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
1312 
mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch * esw,int vport_index,u8 rep_type)1313 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
1314 				       int vport_index, u8 rep_type)
1315 {
1316 	struct mlx5_esw_offload *offloads = &esw->offloads;
1317 	struct mlx5_eswitch_rep *rep;
1318 
1319 	rep = &offloads->vport_reps[vport_index];
1320 
1321 	if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
1322 		rep->rep_if[rep_type].unload(rep);
1323 
1324 	rep->rep_if[rep_type].valid = false;
1325 }
1326 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
1327 
mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch * esw,u8 rep_type)1328 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
1329 {
1330 #define UPLINK_REP_INDEX 0
1331 	struct mlx5_esw_offload *offloads = &esw->offloads;
1332 	struct mlx5_eswitch_rep *rep;
1333 
1334 	rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1335 	return rep->rep_if[rep_type].priv;
1336 }
1337 
mlx5_eswitch_get_proto_dev(struct mlx5_eswitch * esw,int vport,u8 rep_type)1338 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
1339 				 int vport,
1340 				 u8 rep_type)
1341 {
1342 	struct mlx5_esw_offload *offloads = &esw->offloads;
1343 	struct mlx5_eswitch_rep *rep;
1344 
1345 	if (vport == FDB_UPLINK_VPORT)
1346 		vport = UPLINK_REP_INDEX;
1347 
1348 	rep = &offloads->vport_reps[vport];
1349 
1350 	if (rep->rep_if[rep_type].valid &&
1351 	    rep->rep_if[rep_type].get_proto_dev)
1352 		return rep->rep_if[rep_type].get_proto_dev(rep);
1353 	return NULL;
1354 }
1355 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
1356 
mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch * esw,u8 rep_type)1357 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
1358 {
1359 	return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
1360 }
1361 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
1362 
mlx5_eswitch_vport_rep(struct mlx5_eswitch * esw,int vport)1363 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
1364 						int vport)
1365 {
1366 	return &esw->offloads.vport_reps[vport];
1367 }
1368 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
1369