1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
39 #include "lib/eq.h"
40 #include "eswitch.h"
41 #include "fs_core.h"
42 #include "ecpf.h"
43 
44 enum {
45 	MLX5_ACTION_NONE = 0,
46 	MLX5_ACTION_ADD  = 1,
47 	MLX5_ACTION_DEL  = 2,
48 };
49 
50 /* Vport UC/MC hash node */
51 struct vport_addr {
52 	struct l2addr_node     node;
53 	u8                     action;
54 	u16                    vport;
55 	struct mlx5_flow_handle *flow_rule;
56 	bool mpfs; /* UC MAC was added to MPFs */
57 	/* A flag indicating that mac was added due to mc promiscuous vport */
58 	bool mc_promisc;
59 };
60 
61 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
62 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
63 
64 struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch * esw,u16 vport_num)65 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
66 {
67 	u16 idx;
68 
69 	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
70 		return ERR_PTR(-EPERM);
71 
72 	idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
73 
74 	if (idx > esw->total_vports - 1) {
75 		esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
76 			  vport_num, idx);
77 		return ERR_PTR(-EINVAL);
78 	}
79 
80 	return &esw->vports[idx];
81 }
82 
arm_vport_context_events_cmd(struct mlx5_core_dev * dev,u16 vport,u32 events_mask)83 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
84 					u32 events_mask)
85 {
86 	int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)]   = {0};
87 	int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
88 	void *nic_vport_ctx;
89 
90 	MLX5_SET(modify_nic_vport_context_in, in,
91 		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
92 	MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
93 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
94 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
95 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
96 				     in, nic_vport_context);
97 
98 	MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
99 
100 	if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
101 		MLX5_SET(nic_vport_context, nic_vport_ctx,
102 			 event_on_uc_address_change, 1);
103 	if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
104 		MLX5_SET(nic_vport_context, nic_vport_ctx,
105 			 event_on_mc_address_change, 1);
106 	if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
107 		MLX5_SET(nic_vport_context, nic_vport_ctx,
108 			 event_on_promisc_change, 1);
109 
110 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
111 }
112 
113 /* E-Switch vport context HW commands */
modify_esw_vport_context_cmd(struct mlx5_core_dev * dev,u16 vport,void * in,int inlen)114 static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
115 					void *in, int inlen)
116 {
117 	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
118 
119 	MLX5_SET(modify_esw_vport_context_in, in, opcode,
120 		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
121 	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
122 	MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
123 	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
124 }
125 
mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch * esw,u16 vport,void * in,int inlen)126 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
127 					  void *in, int inlen)
128 {
129 	return modify_esw_vport_context_cmd(esw->dev, vport, in, inlen);
130 }
131 
query_esw_vport_context_cmd(struct mlx5_core_dev * dev,u16 vport,void * out,int outlen)132 static int query_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
133 				       void *out, int outlen)
134 {
135 	u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
136 
137 	MLX5_SET(query_esw_vport_context_in, in, opcode,
138 		 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
139 	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
140 	MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
141 	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
142 }
143 
mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch * esw,u16 vport,void * out,int outlen)144 int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
145 					 void *out, int outlen)
146 {
147 	return query_esw_vport_context_cmd(esw->dev, vport, out, outlen);
148 }
149 
modify_esw_vport_cvlan(struct mlx5_core_dev * dev,u16 vport,u16 vlan,u8 qos,u8 set_flags)150 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
151 				  u16 vlan, u8 qos, u8 set_flags)
152 {
153 	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
154 
155 	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
156 	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
157 		return -EOPNOTSUPP;
158 
159 	esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
160 		  vport, vlan, qos, set_flags);
161 
162 	if (set_flags & SET_VLAN_STRIP)
163 		MLX5_SET(modify_esw_vport_context_in, in,
164 			 esw_vport_context.vport_cvlan_strip, 1);
165 
166 	if (set_flags & SET_VLAN_INSERT) {
167 		/* insert only if no vlan in packet */
168 		MLX5_SET(modify_esw_vport_context_in, in,
169 			 esw_vport_context.vport_cvlan_insert, 1);
170 
171 		MLX5_SET(modify_esw_vport_context_in, in,
172 			 esw_vport_context.cvlan_pcp, qos);
173 		MLX5_SET(modify_esw_vport_context_in, in,
174 			 esw_vport_context.cvlan_id, vlan);
175 	}
176 
177 	MLX5_SET(modify_esw_vport_context_in, in,
178 		 field_select.vport_cvlan_strip, 1);
179 	MLX5_SET(modify_esw_vport_context_in, in,
180 		 field_select.vport_cvlan_insert, 1);
181 
182 	return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
183 }
184 
185 /* E-Switch FDB */
186 static struct mlx5_flow_handle *
__esw_fdb_set_vport_rule(struct mlx5_eswitch * esw,u16 vport,bool rx_rule,u8 mac_c[ETH_ALEN],u8 mac_v[ETH_ALEN])187 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
188 			 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
189 {
190 	int match_header = (is_zero_ether_addr(mac_c) ? 0 :
191 			    MLX5_MATCH_OUTER_HEADERS);
192 	struct mlx5_flow_handle *flow_rule = NULL;
193 	struct mlx5_flow_act flow_act = {0};
194 	struct mlx5_flow_destination dest = {};
195 	struct mlx5_flow_spec *spec;
196 	void *mv_misc = NULL;
197 	void *mc_misc = NULL;
198 	u8 *dmac_v = NULL;
199 	u8 *dmac_c = NULL;
200 
201 	if (rx_rule)
202 		match_header |= MLX5_MATCH_MISC_PARAMETERS;
203 
204 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
205 	if (!spec)
206 		return NULL;
207 
208 	dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
209 			      outer_headers.dmac_47_16);
210 	dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
211 			      outer_headers.dmac_47_16);
212 
213 	if (match_header & MLX5_MATCH_OUTER_HEADERS) {
214 		ether_addr_copy(dmac_v, mac_v);
215 		ether_addr_copy(dmac_c, mac_c);
216 	}
217 
218 	if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
219 		mv_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_value,
220 					misc_parameters);
221 		mc_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
222 					misc_parameters);
223 		MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
224 		MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
225 	}
226 
227 	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
228 	dest.vport.num = vport;
229 
230 	esw_debug(esw->dev,
231 		  "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
232 		  dmac_v, dmac_c, vport);
233 	spec->match_criteria_enable = match_header;
234 	flow_act.action =  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
235 	flow_rule =
236 		mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
237 				    &flow_act, &dest, 1);
238 	if (IS_ERR(flow_rule)) {
239 		esw_warn(esw->dev,
240 			 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
241 			 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
242 		flow_rule = NULL;
243 	}
244 
245 	kvfree(spec);
246 	return flow_rule;
247 }
248 
249 static struct mlx5_flow_handle *
esw_fdb_set_vport_rule(struct mlx5_eswitch * esw,u8 mac[ETH_ALEN],u16 vport)250 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
251 {
252 	u8 mac_c[ETH_ALEN];
253 
254 	eth_broadcast_addr(mac_c);
255 	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
256 }
257 
258 static struct mlx5_flow_handle *
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch * esw,u16 vport)259 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
260 {
261 	u8 mac_c[ETH_ALEN];
262 	u8 mac_v[ETH_ALEN];
263 
264 	eth_zero_addr(mac_c);
265 	eth_zero_addr(mac_v);
266 	mac_c[0] = 0x01;
267 	mac_v[0] = 0x01;
268 	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
269 }
270 
271 static struct mlx5_flow_handle *
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch * esw,u16 vport)272 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
273 {
274 	u8 mac_c[ETH_ALEN];
275 	u8 mac_v[ETH_ALEN];
276 
277 	eth_zero_addr(mac_c);
278 	eth_zero_addr(mac_v);
279 	return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
280 }
281 
282 enum {
283 	LEGACY_VEPA_PRIO = 0,
284 	LEGACY_FDB_PRIO,
285 };
286 
esw_create_legacy_vepa_table(struct mlx5_eswitch * esw)287 static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
288 {
289 	struct mlx5_core_dev *dev = esw->dev;
290 	struct mlx5_flow_namespace *root_ns;
291 	struct mlx5_flow_table *fdb;
292 	int err;
293 
294 	root_ns = mlx5_get_fdb_sub_ns(dev, 0);
295 	if (!root_ns) {
296 		esw_warn(dev, "Failed to get FDB flow namespace\n");
297 		return -EOPNOTSUPP;
298 	}
299 
300 	/* num FTE 2, num FG 2 */
301 	fdb = mlx5_create_auto_grouped_flow_table(root_ns, LEGACY_VEPA_PRIO,
302 						  2, 2, 0, 0);
303 	if (IS_ERR(fdb)) {
304 		err = PTR_ERR(fdb);
305 		esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
306 		return err;
307 	}
308 	esw->fdb_table.legacy.vepa_fdb = fdb;
309 
310 	return 0;
311 }
312 
esw_create_legacy_fdb_table(struct mlx5_eswitch * esw)313 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
314 {
315 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
316 	struct mlx5_flow_table_attr ft_attr = {};
317 	struct mlx5_core_dev *dev = esw->dev;
318 	struct mlx5_flow_namespace *root_ns;
319 	struct mlx5_flow_table *fdb;
320 	struct mlx5_flow_group *g;
321 	void *match_criteria;
322 	int table_size;
323 	u32 *flow_group_in;
324 	u8 *dmac;
325 	int err = 0;
326 
327 	esw_debug(dev, "Create FDB log_max_size(%d)\n",
328 		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
329 
330 	root_ns = mlx5_get_fdb_sub_ns(dev, 0);
331 	if (!root_ns) {
332 		esw_warn(dev, "Failed to get FDB flow namespace\n");
333 		return -EOPNOTSUPP;
334 	}
335 
336 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
337 	if (!flow_group_in)
338 		return -ENOMEM;
339 
340 	table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
341 	ft_attr.max_fte = table_size;
342 	ft_attr.prio = LEGACY_FDB_PRIO;
343 	fdb = mlx5_create_flow_table(root_ns, &ft_attr);
344 	if (IS_ERR(fdb)) {
345 		err = PTR_ERR(fdb);
346 		esw_warn(dev, "Failed to create FDB Table err %d\n", err);
347 		goto out;
348 	}
349 	esw->fdb_table.legacy.fdb = fdb;
350 
351 	/* Addresses group : Full match unicast/multicast addresses */
352 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
353 		 MLX5_MATCH_OUTER_HEADERS);
354 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
355 	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
356 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
357 	/* Preserve 2 entries for allmulti and promisc rules*/
358 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
359 	eth_broadcast_addr(dmac);
360 	g = mlx5_create_flow_group(fdb, flow_group_in);
361 	if (IS_ERR(g)) {
362 		err = PTR_ERR(g);
363 		esw_warn(dev, "Failed to create flow group err(%d)\n", err);
364 		goto out;
365 	}
366 	esw->fdb_table.legacy.addr_grp = g;
367 
368 	/* Allmulti group : One rule that forwards any mcast traffic */
369 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
370 		 MLX5_MATCH_OUTER_HEADERS);
371 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
372 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
373 	eth_zero_addr(dmac);
374 	dmac[0] = 0x01;
375 	g = mlx5_create_flow_group(fdb, flow_group_in);
376 	if (IS_ERR(g)) {
377 		err = PTR_ERR(g);
378 		esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
379 		goto out;
380 	}
381 	esw->fdb_table.legacy.allmulti_grp = g;
382 
383 	/* Promiscuous group :
384 	 * One rule that forward all unmatched traffic from previous groups
385 	 */
386 	eth_zero_addr(dmac);
387 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
388 		 MLX5_MATCH_MISC_PARAMETERS);
389 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
390 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
391 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
392 	g = mlx5_create_flow_group(fdb, flow_group_in);
393 	if (IS_ERR(g)) {
394 		err = PTR_ERR(g);
395 		esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
396 		goto out;
397 	}
398 	esw->fdb_table.legacy.promisc_grp = g;
399 
400 out:
401 	if (err)
402 		esw_destroy_legacy_fdb_table(esw);
403 
404 	kvfree(flow_group_in);
405 	return err;
406 }
407 
esw_destroy_legacy_vepa_table(struct mlx5_eswitch * esw)408 static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
409 {
410 	esw_debug(esw->dev, "Destroy VEPA Table\n");
411 	if (!esw->fdb_table.legacy.vepa_fdb)
412 		return;
413 
414 	mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
415 	esw->fdb_table.legacy.vepa_fdb = NULL;
416 }
417 
esw_destroy_legacy_fdb_table(struct mlx5_eswitch * esw)418 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
419 {
420 	esw_debug(esw->dev, "Destroy FDB Table\n");
421 	if (!esw->fdb_table.legacy.fdb)
422 		return;
423 
424 	if (esw->fdb_table.legacy.promisc_grp)
425 		mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
426 	if (esw->fdb_table.legacy.allmulti_grp)
427 		mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
428 	if (esw->fdb_table.legacy.addr_grp)
429 		mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
430 	mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
431 
432 	esw->fdb_table.legacy.fdb = NULL;
433 	esw->fdb_table.legacy.addr_grp = NULL;
434 	esw->fdb_table.legacy.allmulti_grp = NULL;
435 	esw->fdb_table.legacy.promisc_grp = NULL;
436 }
437 
esw_create_legacy_table(struct mlx5_eswitch * esw)438 static int esw_create_legacy_table(struct mlx5_eswitch *esw)
439 {
440 	int err;
441 
442 	memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
443 
444 	err = esw_create_legacy_vepa_table(esw);
445 	if (err)
446 		return err;
447 
448 	err = esw_create_legacy_fdb_table(esw);
449 	if (err)
450 		esw_destroy_legacy_vepa_table(esw);
451 
452 	return err;
453 }
454 
455 #define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
456 					MLX5_VPORT_MC_ADDR_CHANGE | \
457 					MLX5_VPORT_PROMISC_CHANGE)
458 
esw_legacy_enable(struct mlx5_eswitch * esw)459 static int esw_legacy_enable(struct mlx5_eswitch *esw)
460 {
461 	int ret;
462 
463 	ret = esw_create_legacy_table(esw);
464 	if (ret)
465 		return ret;
466 
467 	mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
468 	return 0;
469 }
470 
esw_destroy_legacy_table(struct mlx5_eswitch * esw)471 static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
472 {
473 	esw_cleanup_vepa_rules(esw);
474 	esw_destroy_legacy_fdb_table(esw);
475 	esw_destroy_legacy_vepa_table(esw);
476 }
477 
esw_legacy_disable(struct mlx5_eswitch * esw)478 static void esw_legacy_disable(struct mlx5_eswitch *esw)
479 {
480 	struct esw_mc_addr *mc_promisc;
481 
482 	mlx5_eswitch_disable_pf_vf_vports(esw);
483 
484 	mc_promisc = &esw->mc_promisc;
485 	if (mc_promisc->uplink_rule)
486 		mlx5_del_flow_rules(mc_promisc->uplink_rule);
487 
488 	esw_destroy_legacy_table(esw);
489 }
490 
491 /* E-Switch vport UC/MC lists management */
492 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
493 				 struct vport_addr *vaddr);
494 
esw_add_uc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)495 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
496 {
497 	u8 *mac = vaddr->node.addr;
498 	u16 vport = vaddr->vport;
499 	int err;
500 
501 	/* Skip mlx5_mpfs_add_mac for eswitch_managers,
502 	 * it is already done by its netdev in mlx5e_execute_l2_action
503 	 */
504 	if (esw->manager_vport == vport)
505 		goto fdb_add;
506 
507 	err = mlx5_mpfs_add_mac(esw->dev, mac);
508 	if (err) {
509 		esw_warn(esw->dev,
510 			 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
511 			 mac, vport, err);
512 		return err;
513 	}
514 	vaddr->mpfs = true;
515 
516 fdb_add:
517 	/* SRIOV is enabled: Forward UC MAC to vport */
518 	if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
519 		vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
520 
521 	esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
522 		  vport, mac, vaddr->flow_rule);
523 
524 	return 0;
525 }
526 
esw_del_uc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)527 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
528 {
529 	u8 *mac = vaddr->node.addr;
530 	u16 vport = vaddr->vport;
531 	int err = 0;
532 
533 	/* Skip mlx5_mpfs_del_mac for eswitch managerss,
534 	 * it is already done by its netdev in mlx5e_execute_l2_action
535 	 */
536 	if (!vaddr->mpfs || esw->manager_vport == vport)
537 		goto fdb_del;
538 
539 	err = mlx5_mpfs_del_mac(esw->dev, mac);
540 	if (err)
541 		esw_warn(esw->dev,
542 			 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
543 			 mac, vport, err);
544 	vaddr->mpfs = false;
545 
546 fdb_del:
547 	if (vaddr->flow_rule)
548 		mlx5_del_flow_rules(vaddr->flow_rule);
549 	vaddr->flow_rule = NULL;
550 
551 	return 0;
552 }
553 
update_allmulti_vports(struct mlx5_eswitch * esw,struct vport_addr * vaddr,struct esw_mc_addr * esw_mc)554 static void update_allmulti_vports(struct mlx5_eswitch *esw,
555 				   struct vport_addr *vaddr,
556 				   struct esw_mc_addr *esw_mc)
557 {
558 	u8 *mac = vaddr->node.addr;
559 	struct mlx5_vport *vport;
560 	u16 i, vport_num;
561 
562 	mlx5_esw_for_all_vports(esw, i, vport) {
563 		struct hlist_head *vport_hash = vport->mc_list;
564 		struct vport_addr *iter_vaddr =
565 					l2addr_hash_find(vport_hash,
566 							 mac,
567 							 struct vport_addr);
568 		vport_num = vport->vport;
569 		if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
570 		    vaddr->vport == vport_num)
571 			continue;
572 		switch (vaddr->action) {
573 		case MLX5_ACTION_ADD:
574 			if (iter_vaddr)
575 				continue;
576 			iter_vaddr = l2addr_hash_add(vport_hash, mac,
577 						     struct vport_addr,
578 						     GFP_KERNEL);
579 			if (!iter_vaddr) {
580 				esw_warn(esw->dev,
581 					 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
582 					 mac, vport_num);
583 				continue;
584 			}
585 			iter_vaddr->vport = vport_num;
586 			iter_vaddr->flow_rule =
587 					esw_fdb_set_vport_rule(esw,
588 							       mac,
589 							       vport_num);
590 			iter_vaddr->mc_promisc = true;
591 			break;
592 		case MLX5_ACTION_DEL:
593 			if (!iter_vaddr)
594 				continue;
595 			mlx5_del_flow_rules(iter_vaddr->flow_rule);
596 			l2addr_hash_del(iter_vaddr);
597 			break;
598 		}
599 	}
600 }
601 
esw_add_mc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)602 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
603 {
604 	struct hlist_head *hash = esw->mc_table;
605 	struct esw_mc_addr *esw_mc;
606 	u8 *mac = vaddr->node.addr;
607 	u16 vport = vaddr->vport;
608 
609 	if (!esw->fdb_table.legacy.fdb)
610 		return 0;
611 
612 	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
613 	if (esw_mc)
614 		goto add;
615 
616 	esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
617 	if (!esw_mc)
618 		return -ENOMEM;
619 
620 	esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
621 		esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
622 
623 	/* Add this multicast mac to all the mc promiscuous vports */
624 	update_allmulti_vports(esw, vaddr, esw_mc);
625 
626 add:
627 	/* If the multicast mac is added as a result of mc promiscuous vport,
628 	 * don't increment the multicast ref count
629 	 */
630 	if (!vaddr->mc_promisc)
631 		esw_mc->refcnt++;
632 
633 	/* Forward MC MAC to vport */
634 	vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
635 	esw_debug(esw->dev,
636 		  "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
637 		  vport, mac, vaddr->flow_rule,
638 		  esw_mc->refcnt, esw_mc->uplink_rule);
639 	return 0;
640 }
641 
esw_del_mc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)642 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
643 {
644 	struct hlist_head *hash = esw->mc_table;
645 	struct esw_mc_addr *esw_mc;
646 	u8 *mac = vaddr->node.addr;
647 	u16 vport = vaddr->vport;
648 
649 	if (!esw->fdb_table.legacy.fdb)
650 		return 0;
651 
652 	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
653 	if (!esw_mc) {
654 		esw_warn(esw->dev,
655 			 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
656 			 mac, vport);
657 		return -EINVAL;
658 	}
659 	esw_debug(esw->dev,
660 		  "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
661 		  vport, mac, vaddr->flow_rule, esw_mc->refcnt,
662 		  esw_mc->uplink_rule);
663 
664 	if (vaddr->flow_rule)
665 		mlx5_del_flow_rules(vaddr->flow_rule);
666 	vaddr->flow_rule = NULL;
667 
668 	/* If the multicast mac is added as a result of mc promiscuous vport,
669 	 * don't decrement the multicast ref count.
670 	 */
671 	if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
672 		return 0;
673 
674 	/* Remove this multicast mac from all the mc promiscuous vports */
675 	update_allmulti_vports(esw, vaddr, esw_mc);
676 
677 	if (esw_mc->uplink_rule)
678 		mlx5_del_flow_rules(esw_mc->uplink_rule);
679 
680 	l2addr_hash_del(esw_mc);
681 	return 0;
682 }
683 
684 /* Apply vport UC/MC list to HW l2 table and FDB table */
esw_apply_vport_addr_list(struct mlx5_eswitch * esw,struct mlx5_vport * vport,int list_type)685 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
686 				      struct mlx5_vport *vport, int list_type)
687 {
688 	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
689 	vport_addr_action vport_addr_add;
690 	vport_addr_action vport_addr_del;
691 	struct vport_addr *addr;
692 	struct l2addr_node *node;
693 	struct hlist_head *hash;
694 	struct hlist_node *tmp;
695 	int hi;
696 
697 	vport_addr_add = is_uc ? esw_add_uc_addr :
698 				 esw_add_mc_addr;
699 	vport_addr_del = is_uc ? esw_del_uc_addr :
700 				 esw_del_mc_addr;
701 
702 	hash = is_uc ? vport->uc_list : vport->mc_list;
703 	for_each_l2hash_node(node, tmp, hash, hi) {
704 		addr = container_of(node, struct vport_addr, node);
705 		switch (addr->action) {
706 		case MLX5_ACTION_ADD:
707 			vport_addr_add(esw, addr);
708 			addr->action = MLX5_ACTION_NONE;
709 			break;
710 		case MLX5_ACTION_DEL:
711 			vport_addr_del(esw, addr);
712 			l2addr_hash_del(addr);
713 			break;
714 		}
715 	}
716 }
717 
718 /* Sync vport UC/MC list from vport context */
esw_update_vport_addr_list(struct mlx5_eswitch * esw,struct mlx5_vport * vport,int list_type)719 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
720 				       struct mlx5_vport *vport, int list_type)
721 {
722 	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
723 	u8 (*mac_list)[ETH_ALEN];
724 	struct l2addr_node *node;
725 	struct vport_addr *addr;
726 	struct hlist_head *hash;
727 	struct hlist_node *tmp;
728 	int size;
729 	int err;
730 	int hi;
731 	int i;
732 
733 	size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
734 		       MLX5_MAX_MC_PER_VPORT(esw->dev);
735 
736 	mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
737 	if (!mac_list)
738 		return;
739 
740 	hash = is_uc ? vport->uc_list : vport->mc_list;
741 
742 	for_each_l2hash_node(node, tmp, hash, hi) {
743 		addr = container_of(node, struct vport_addr, node);
744 		addr->action = MLX5_ACTION_DEL;
745 	}
746 
747 	if (!vport->enabled)
748 		goto out;
749 
750 	err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
751 					    mac_list, &size);
752 	if (err)
753 		goto out;
754 	esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
755 		  vport->vport, is_uc ? "UC" : "MC", size);
756 
757 	for (i = 0; i < size; i++) {
758 		if (is_uc && !is_valid_ether_addr(mac_list[i]))
759 			continue;
760 
761 		if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
762 			continue;
763 
764 		addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
765 		if (addr) {
766 			addr->action = MLX5_ACTION_NONE;
767 			/* If this mac was previously added because of allmulti
768 			 * promiscuous rx mode, its now converted to be original
769 			 * vport mac.
770 			 */
771 			if (addr->mc_promisc) {
772 				struct esw_mc_addr *esw_mc =
773 					l2addr_hash_find(esw->mc_table,
774 							 mac_list[i],
775 							 struct esw_mc_addr);
776 				if (!esw_mc) {
777 					esw_warn(esw->dev,
778 						 "Failed to MAC(%pM) in mcast DB\n",
779 						 mac_list[i]);
780 					continue;
781 				}
782 				esw_mc->refcnt++;
783 				addr->mc_promisc = false;
784 			}
785 			continue;
786 		}
787 
788 		addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
789 				       GFP_KERNEL);
790 		if (!addr) {
791 			esw_warn(esw->dev,
792 				 "Failed to add MAC(%pM) to vport[%d] DB\n",
793 				 mac_list[i], vport->vport);
794 			continue;
795 		}
796 		addr->vport = vport->vport;
797 		addr->action = MLX5_ACTION_ADD;
798 	}
799 out:
800 	kfree(mac_list);
801 }
802 
803 /* Sync vport UC/MC list from vport context
804  * Must be called after esw_update_vport_addr_list
805  */
esw_update_vport_mc_promisc(struct mlx5_eswitch * esw,struct mlx5_vport * vport)806 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
807 					struct mlx5_vport *vport)
808 {
809 	struct l2addr_node *node;
810 	struct vport_addr *addr;
811 	struct hlist_head *hash;
812 	struct hlist_node *tmp;
813 	int hi;
814 
815 	hash = vport->mc_list;
816 
817 	for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
818 		u8 *mac = node->addr;
819 
820 		addr = l2addr_hash_find(hash, mac, struct vport_addr);
821 		if (addr) {
822 			if (addr->action == MLX5_ACTION_DEL)
823 				addr->action = MLX5_ACTION_NONE;
824 			continue;
825 		}
826 		addr = l2addr_hash_add(hash, mac, struct vport_addr,
827 				       GFP_KERNEL);
828 		if (!addr) {
829 			esw_warn(esw->dev,
830 				 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
831 				 mac, vport->vport);
832 			continue;
833 		}
834 		addr->vport = vport->vport;
835 		addr->action = MLX5_ACTION_ADD;
836 		addr->mc_promisc = true;
837 	}
838 }
839 
840 /* Apply vport rx mode to HW FDB table */
esw_apply_vport_rx_mode(struct mlx5_eswitch * esw,struct mlx5_vport * vport,bool promisc,bool mc_promisc)841 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
842 				    struct mlx5_vport *vport,
843 				    bool promisc, bool mc_promisc)
844 {
845 	struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
846 
847 	if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
848 		goto promisc;
849 
850 	if (mc_promisc) {
851 		vport->allmulti_rule =
852 			esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
853 		if (!allmulti_addr->uplink_rule)
854 			allmulti_addr->uplink_rule =
855 				esw_fdb_set_vport_allmulti_rule(esw,
856 								MLX5_VPORT_UPLINK);
857 		allmulti_addr->refcnt++;
858 	} else if (vport->allmulti_rule) {
859 		mlx5_del_flow_rules(vport->allmulti_rule);
860 		vport->allmulti_rule = NULL;
861 
862 		if (--allmulti_addr->refcnt > 0)
863 			goto promisc;
864 
865 		if (allmulti_addr->uplink_rule)
866 			mlx5_del_flow_rules(allmulti_addr->uplink_rule);
867 		allmulti_addr->uplink_rule = NULL;
868 	}
869 
870 promisc:
871 	if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
872 		return;
873 
874 	if (promisc) {
875 		vport->promisc_rule =
876 			esw_fdb_set_vport_promisc_rule(esw, vport->vport);
877 	} else if (vport->promisc_rule) {
878 		mlx5_del_flow_rules(vport->promisc_rule);
879 		vport->promisc_rule = NULL;
880 	}
881 }
882 
883 /* Sync vport rx mode from vport context */
esw_update_vport_rx_mode(struct mlx5_eswitch * esw,struct mlx5_vport * vport)884 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
885 				     struct mlx5_vport *vport)
886 {
887 	int promisc_all = 0;
888 	int promisc_uc = 0;
889 	int promisc_mc = 0;
890 	int err;
891 
892 	err = mlx5_query_nic_vport_promisc(esw->dev,
893 					   vport->vport,
894 					   &promisc_uc,
895 					   &promisc_mc,
896 					   &promisc_all);
897 	if (err)
898 		return;
899 	esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
900 		  vport->vport, promisc_all, promisc_mc);
901 
902 	if (!vport->info.trusted || !vport->enabled) {
903 		promisc_uc = 0;
904 		promisc_mc = 0;
905 		promisc_all = 0;
906 	}
907 
908 	esw_apply_vport_rx_mode(esw, vport, promisc_all,
909 				(promisc_all || promisc_mc));
910 }
911 
esw_vport_change_handle_locked(struct mlx5_vport * vport)912 static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
913 {
914 	struct mlx5_core_dev *dev = vport->dev;
915 	struct mlx5_eswitch *esw = dev->priv.eswitch;
916 	u8 mac[ETH_ALEN];
917 
918 	mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
919 	esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
920 		  vport->vport, mac);
921 
922 	if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
923 		esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
924 		esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
925 	}
926 
927 	if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
928 		esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
929 
930 	if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
931 		esw_update_vport_rx_mode(esw, vport);
932 		if (!IS_ERR_OR_NULL(vport->allmulti_rule))
933 			esw_update_vport_mc_promisc(esw, vport);
934 	}
935 
936 	if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
937 		esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
938 
939 	esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
940 	if (vport->enabled)
941 		arm_vport_context_events_cmd(dev, vport->vport,
942 					     vport->enabled_events);
943 }
944 
esw_vport_change_handler(struct work_struct * work)945 static void esw_vport_change_handler(struct work_struct *work)
946 {
947 	struct mlx5_vport *vport =
948 		container_of(work, struct mlx5_vport, vport_change_handler);
949 	struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
950 
951 	mutex_lock(&esw->state_lock);
952 	esw_vport_change_handle_locked(vport);
953 	mutex_unlock(&esw->state_lock);
954 }
955 
esw_vport_enable_egress_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)956 int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
957 				struct mlx5_vport *vport)
958 {
959 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
960 	struct mlx5_flow_group *vlan_grp = NULL;
961 	struct mlx5_flow_group *drop_grp = NULL;
962 	struct mlx5_core_dev *dev = esw->dev;
963 	struct mlx5_flow_namespace *root_ns;
964 	struct mlx5_flow_table *acl;
965 	void *match_criteria;
966 	u32 *flow_group_in;
967 	/* The egress acl table contains 2 rules:
968 	 * 1)Allow traffic with vlan_tag=vst_vlan_id
969 	 * 2)Drop all other traffic.
970 	 */
971 	int table_size = 2;
972 	int err = 0;
973 
974 	if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
975 		return -EOPNOTSUPP;
976 
977 	if (!IS_ERR_OR_NULL(vport->egress.acl))
978 		return 0;
979 
980 	esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
981 		  vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
982 
983 	root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
984 			mlx5_eswitch_vport_num_to_index(esw, vport->vport));
985 	if (!root_ns) {
986 		esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
987 		return -EOPNOTSUPP;
988 	}
989 
990 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
991 	if (!flow_group_in)
992 		return -ENOMEM;
993 
994 	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
995 	if (IS_ERR(acl)) {
996 		err = PTR_ERR(acl);
997 		esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
998 			 vport->vport, err);
999 		goto out;
1000 	}
1001 
1002 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1003 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1004 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1005 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
1006 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1007 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1008 
1009 	vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
1010 	if (IS_ERR(vlan_grp)) {
1011 		err = PTR_ERR(vlan_grp);
1012 		esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
1013 			 vport->vport, err);
1014 		goto out;
1015 	}
1016 
1017 	memset(flow_group_in, 0, inlen);
1018 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1019 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1020 	drop_grp = mlx5_create_flow_group(acl, flow_group_in);
1021 	if (IS_ERR(drop_grp)) {
1022 		err = PTR_ERR(drop_grp);
1023 		esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
1024 			 vport->vport, err);
1025 		goto out;
1026 	}
1027 
1028 	vport->egress.acl = acl;
1029 	vport->egress.drop_grp = drop_grp;
1030 	vport->egress.allowed_vlans_grp = vlan_grp;
1031 out:
1032 	kvfree(flow_group_in);
1033 	if (err && !IS_ERR_OR_NULL(vlan_grp))
1034 		mlx5_destroy_flow_group(vlan_grp);
1035 	if (err && !IS_ERR_OR_NULL(acl))
1036 		mlx5_destroy_flow_table(acl);
1037 	return err;
1038 }
1039 
esw_vport_cleanup_egress_rules(struct mlx5_eswitch * esw,struct mlx5_vport * vport)1040 void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
1041 				    struct mlx5_vport *vport)
1042 {
1043 	if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
1044 		mlx5_del_flow_rules(vport->egress.allowed_vlan);
1045 
1046 	if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
1047 		mlx5_del_flow_rules(vport->egress.drop_rule);
1048 
1049 	vport->egress.allowed_vlan = NULL;
1050 	vport->egress.drop_rule = NULL;
1051 }
1052 
esw_vport_disable_egress_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)1053 void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
1054 				  struct mlx5_vport *vport)
1055 {
1056 	if (IS_ERR_OR_NULL(vport->egress.acl))
1057 		return;
1058 
1059 	esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
1060 
1061 	esw_vport_cleanup_egress_rules(esw, vport);
1062 	mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
1063 	mlx5_destroy_flow_group(vport->egress.drop_grp);
1064 	mlx5_destroy_flow_table(vport->egress.acl);
1065 	vport->egress.allowed_vlans_grp = NULL;
1066 	vport->egress.drop_grp = NULL;
1067 	vport->egress.acl = NULL;
1068 }
1069 
esw_vport_enable_ingress_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)1070 int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1071 				 struct mlx5_vport *vport)
1072 {
1073 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1074 	struct mlx5_core_dev *dev = esw->dev;
1075 	struct mlx5_flow_namespace *root_ns;
1076 	struct mlx5_flow_table *acl;
1077 	struct mlx5_flow_group *g;
1078 	void *match_criteria;
1079 	u32 *flow_group_in;
1080 	/* The ingress acl table contains 4 groups
1081 	 * (2 active rules at the same time -
1082 	 *      1 allow rule from one of the first 3 groups.
1083 	 *      1 drop rule from the last group):
1084 	 * 1)Allow untagged traffic with smac=original mac.
1085 	 * 2)Allow untagged traffic.
1086 	 * 3)Allow traffic with smac=original mac.
1087 	 * 4)Drop all other traffic.
1088 	 */
1089 	int table_size = 4;
1090 	int err = 0;
1091 
1092 	if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1093 		return -EOPNOTSUPP;
1094 
1095 	if (!IS_ERR_OR_NULL(vport->ingress.acl))
1096 		return 0;
1097 
1098 	esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
1099 		  vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
1100 
1101 	root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1102 			mlx5_eswitch_vport_num_to_index(esw, vport->vport));
1103 	if (!root_ns) {
1104 		esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
1105 		return -EOPNOTSUPP;
1106 	}
1107 
1108 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1109 	if (!flow_group_in)
1110 		return -ENOMEM;
1111 
1112 	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
1113 	if (IS_ERR(acl)) {
1114 		err = PTR_ERR(acl);
1115 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
1116 			 vport->vport, err);
1117 		goto out;
1118 	}
1119 	vport->ingress.acl = acl;
1120 
1121 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1122 
1123 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1124 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1125 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1126 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1127 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1128 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1129 
1130 	g = mlx5_create_flow_group(acl, flow_group_in);
1131 	if (IS_ERR(g)) {
1132 		err = PTR_ERR(g);
1133 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
1134 			 vport->vport, err);
1135 		goto out;
1136 	}
1137 	vport->ingress.allow_untagged_spoofchk_grp = g;
1138 
1139 	memset(flow_group_in, 0, inlen);
1140 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1141 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1142 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1143 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1144 
1145 	g = mlx5_create_flow_group(acl, flow_group_in);
1146 	if (IS_ERR(g)) {
1147 		err = PTR_ERR(g);
1148 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
1149 			 vport->vport, err);
1150 		goto out;
1151 	}
1152 	vport->ingress.allow_untagged_only_grp = g;
1153 
1154 	memset(flow_group_in, 0, inlen);
1155 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1156 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1157 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1158 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
1159 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
1160 
1161 	g = mlx5_create_flow_group(acl, flow_group_in);
1162 	if (IS_ERR(g)) {
1163 		err = PTR_ERR(g);
1164 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
1165 			 vport->vport, err);
1166 		goto out;
1167 	}
1168 	vport->ingress.allow_spoofchk_only_grp = g;
1169 
1170 	memset(flow_group_in, 0, inlen);
1171 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
1172 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
1173 
1174 	g = mlx5_create_flow_group(acl, flow_group_in);
1175 	if (IS_ERR(g)) {
1176 		err = PTR_ERR(g);
1177 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
1178 			 vport->vport, err);
1179 		goto out;
1180 	}
1181 	vport->ingress.drop_grp = g;
1182 
1183 out:
1184 	if (err) {
1185 		if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
1186 			mlx5_destroy_flow_group(
1187 					vport->ingress.allow_spoofchk_only_grp);
1188 		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
1189 			mlx5_destroy_flow_group(
1190 					vport->ingress.allow_untagged_only_grp);
1191 		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
1192 			mlx5_destroy_flow_group(
1193 				vport->ingress.allow_untagged_spoofchk_grp);
1194 		if (!IS_ERR_OR_NULL(vport->ingress.acl))
1195 			mlx5_destroy_flow_table(vport->ingress.acl);
1196 	}
1197 
1198 	kvfree(flow_group_in);
1199 	return err;
1200 }
1201 
esw_vport_cleanup_ingress_rules(struct mlx5_eswitch * esw,struct mlx5_vport * vport)1202 void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
1203 				     struct mlx5_vport *vport)
1204 {
1205 	if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
1206 		mlx5_del_flow_rules(vport->ingress.drop_rule);
1207 
1208 	if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
1209 		mlx5_del_flow_rules(vport->ingress.allow_rule);
1210 
1211 	vport->ingress.drop_rule = NULL;
1212 	vport->ingress.allow_rule = NULL;
1213 
1214 	esw_vport_del_ingress_acl_modify_metadata(esw, vport);
1215 }
1216 
esw_vport_disable_ingress_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)1217 void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
1218 				   struct mlx5_vport *vport)
1219 {
1220 	if (IS_ERR_OR_NULL(vport->ingress.acl))
1221 		return;
1222 
1223 	esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
1224 
1225 	esw_vport_cleanup_ingress_rules(esw, vport);
1226 	mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
1227 	mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
1228 	mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
1229 	mlx5_destroy_flow_group(vport->ingress.drop_grp);
1230 	mlx5_destroy_flow_table(vport->ingress.acl);
1231 	vport->ingress.acl = NULL;
1232 	vport->ingress.drop_grp = NULL;
1233 	vport->ingress.allow_spoofchk_only_grp = NULL;
1234 	vport->ingress.allow_untagged_only_grp = NULL;
1235 	vport->ingress.allow_untagged_spoofchk_grp = NULL;
1236 }
1237 
esw_vport_ingress_config(struct mlx5_eswitch * esw,struct mlx5_vport * vport)1238 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1239 				    struct mlx5_vport *vport)
1240 {
1241 	struct mlx5_fc *counter = vport->ingress.drop_counter;
1242 	struct mlx5_flow_destination drop_ctr_dst = {0};
1243 	struct mlx5_flow_destination *dst = NULL;
1244 	struct mlx5_flow_act flow_act = {0};
1245 	struct mlx5_flow_spec *spec;
1246 	int dest_num = 0;
1247 	int err = 0;
1248 	u8 *smac_v;
1249 
1250 	esw_vport_cleanup_ingress_rules(esw, vport);
1251 
1252 	if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
1253 		esw_vport_disable_ingress_acl(esw, vport);
1254 		return 0;
1255 	}
1256 
1257 	err = esw_vport_enable_ingress_acl(esw, vport);
1258 	if (err) {
1259 		mlx5_core_warn(esw->dev,
1260 			       "failed to enable ingress acl (%d) on vport[%d]\n",
1261 			       err, vport->vport);
1262 		return err;
1263 	}
1264 
1265 	esw_debug(esw->dev,
1266 		  "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1267 		  vport->vport, vport->info.vlan, vport->info.qos);
1268 
1269 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1270 	if (!spec) {
1271 		err = -ENOMEM;
1272 		goto out;
1273 	}
1274 
1275 	if (vport->info.vlan || vport->info.qos)
1276 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1277 
1278 	if (vport->info.spoofchk) {
1279 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
1280 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
1281 		smac_v = MLX5_ADDR_OF(fte_match_param,
1282 				      spec->match_value,
1283 				      outer_headers.smac_47_16);
1284 		ether_addr_copy(smac_v, vport->info.mac);
1285 	}
1286 
1287 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1288 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1289 	vport->ingress.allow_rule =
1290 		mlx5_add_flow_rules(vport->ingress.acl, spec,
1291 				    &flow_act, NULL, 0);
1292 	if (IS_ERR(vport->ingress.allow_rule)) {
1293 		err = PTR_ERR(vport->ingress.allow_rule);
1294 		esw_warn(esw->dev,
1295 			 "vport[%d] configure ingress allow rule, err(%d)\n",
1296 			 vport->vport, err);
1297 		vport->ingress.allow_rule = NULL;
1298 		goto out;
1299 	}
1300 
1301 	memset(spec, 0, sizeof(*spec));
1302 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1303 
1304 	/* Attach drop flow counter */
1305 	if (counter) {
1306 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1307 		drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1308 		drop_ctr_dst.counter_id = mlx5_fc_id(counter);
1309 		dst = &drop_ctr_dst;
1310 		dest_num++;
1311 	}
1312 	vport->ingress.drop_rule =
1313 		mlx5_add_flow_rules(vport->ingress.acl, spec,
1314 				    &flow_act, dst, dest_num);
1315 	if (IS_ERR(vport->ingress.drop_rule)) {
1316 		err = PTR_ERR(vport->ingress.drop_rule);
1317 		esw_warn(esw->dev,
1318 			 "vport[%d] configure ingress drop rule, err(%d)\n",
1319 			 vport->vport, err);
1320 		vport->ingress.drop_rule = NULL;
1321 		goto out;
1322 	}
1323 
1324 out:
1325 	if (err)
1326 		esw_vport_cleanup_ingress_rules(esw, vport);
1327 	kvfree(spec);
1328 	return err;
1329 }
1330 
esw_vport_egress_config(struct mlx5_eswitch * esw,struct mlx5_vport * vport)1331 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1332 				   struct mlx5_vport *vport)
1333 {
1334 	struct mlx5_fc *counter = vport->egress.drop_counter;
1335 	struct mlx5_flow_destination drop_ctr_dst = {0};
1336 	struct mlx5_flow_destination *dst = NULL;
1337 	struct mlx5_flow_act flow_act = {0};
1338 	struct mlx5_flow_spec *spec;
1339 	int dest_num = 0;
1340 	int err = 0;
1341 
1342 	esw_vport_cleanup_egress_rules(esw, vport);
1343 
1344 	if (!vport->info.vlan && !vport->info.qos) {
1345 		esw_vport_disable_egress_acl(esw, vport);
1346 		return 0;
1347 	}
1348 
1349 	err = esw_vport_enable_egress_acl(esw, vport);
1350 	if (err) {
1351 		mlx5_core_warn(esw->dev,
1352 			       "failed to enable egress acl (%d) on vport[%d]\n",
1353 			       err, vport->vport);
1354 		return err;
1355 	}
1356 
1357 	esw_debug(esw->dev,
1358 		  "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1359 		  vport->vport, vport->info.vlan, vport->info.qos);
1360 
1361 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1362 	if (!spec) {
1363 		err = -ENOMEM;
1364 		goto out;
1365 	}
1366 
1367 	/* Allowed vlan rule */
1368 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1369 	MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1370 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1371 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
1372 
1373 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1374 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1375 	vport->egress.allowed_vlan =
1376 		mlx5_add_flow_rules(vport->egress.acl, spec,
1377 				    &flow_act, NULL, 0);
1378 	if (IS_ERR(vport->egress.allowed_vlan)) {
1379 		err = PTR_ERR(vport->egress.allowed_vlan);
1380 		esw_warn(esw->dev,
1381 			 "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
1382 			 vport->vport, err);
1383 		vport->egress.allowed_vlan = NULL;
1384 		goto out;
1385 	}
1386 
1387 	/* Drop others rule (star rule) */
1388 	memset(spec, 0, sizeof(*spec));
1389 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1390 
1391 	/* Attach egress drop flow counter */
1392 	if (counter) {
1393 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1394 		drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1395 		drop_ctr_dst.counter_id = mlx5_fc_id(counter);
1396 		dst = &drop_ctr_dst;
1397 		dest_num++;
1398 	}
1399 	vport->egress.drop_rule =
1400 		mlx5_add_flow_rules(vport->egress.acl, spec,
1401 				    &flow_act, dst, dest_num);
1402 	if (IS_ERR(vport->egress.drop_rule)) {
1403 		err = PTR_ERR(vport->egress.drop_rule);
1404 		esw_warn(esw->dev,
1405 			 "vport[%d] configure egress drop rule failed, err(%d)\n",
1406 			 vport->vport, err);
1407 		vport->egress.drop_rule = NULL;
1408 	}
1409 out:
1410 	kvfree(spec);
1411 	return err;
1412 }
1413 
element_type_supported(struct mlx5_eswitch * esw,int type)1414 static bool element_type_supported(struct mlx5_eswitch *esw, int type)
1415 {
1416 	const struct mlx5_core_dev *dev = esw->dev;
1417 
1418 	switch (type) {
1419 	case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
1420 		return MLX5_CAP_QOS(dev, esw_element_type) &
1421 		       ELEMENT_TYPE_CAP_MASK_TASR;
1422 	case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
1423 		return MLX5_CAP_QOS(dev, esw_element_type) &
1424 		       ELEMENT_TYPE_CAP_MASK_VPORT;
1425 	case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
1426 		return MLX5_CAP_QOS(dev, esw_element_type) &
1427 		       ELEMENT_TYPE_CAP_MASK_VPORT_TC;
1428 	case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
1429 		return MLX5_CAP_QOS(dev, esw_element_type) &
1430 		       ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
1431 	}
1432 	return false;
1433 }
1434 
1435 /* Vport QoS management */
esw_create_tsar(struct mlx5_eswitch * esw)1436 static void esw_create_tsar(struct mlx5_eswitch *esw)
1437 {
1438 	u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1439 	struct mlx5_core_dev *dev = esw->dev;
1440 	__be32 *attr;
1441 	int err;
1442 
1443 	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
1444 		return;
1445 
1446 	if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
1447 		return;
1448 
1449 	if (esw->qos.enabled)
1450 		return;
1451 
1452 	MLX5_SET(scheduling_context, tsar_ctx, element_type,
1453 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
1454 
1455 	attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
1456 	*attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
1457 
1458 	err = mlx5_create_scheduling_element_cmd(dev,
1459 						 SCHEDULING_HIERARCHY_E_SWITCH,
1460 						 tsar_ctx,
1461 						 &esw->qos.root_tsar_id);
1462 	if (err) {
1463 		esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
1464 		return;
1465 	}
1466 
1467 	esw->qos.enabled = true;
1468 }
1469 
esw_destroy_tsar(struct mlx5_eswitch * esw)1470 static void esw_destroy_tsar(struct mlx5_eswitch *esw)
1471 {
1472 	int err;
1473 
1474 	if (!esw->qos.enabled)
1475 		return;
1476 
1477 	err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1478 						  SCHEDULING_HIERARCHY_E_SWITCH,
1479 						  esw->qos.root_tsar_id);
1480 	if (err)
1481 		esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
1482 
1483 	esw->qos.enabled = false;
1484 }
1485 
esw_vport_enable_qos(struct mlx5_eswitch * esw,struct mlx5_vport * vport,u32 initial_max_rate,u32 initial_bw_share)1486 static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
1487 				struct mlx5_vport *vport,
1488 				u32 initial_max_rate, u32 initial_bw_share)
1489 {
1490 	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1491 	struct mlx5_core_dev *dev = esw->dev;
1492 	void *vport_elem;
1493 	int err = 0;
1494 
1495 	if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
1496 	    !MLX5_CAP_QOS(dev, esw_scheduling))
1497 		return 0;
1498 
1499 	if (vport->qos.enabled)
1500 		return -EEXIST;
1501 
1502 	MLX5_SET(scheduling_context, sched_ctx, element_type,
1503 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1504 	vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1505 				  element_attributes);
1506 	MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
1507 	MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1508 		 esw->qos.root_tsar_id);
1509 	MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1510 		 initial_max_rate);
1511 	MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
1512 
1513 	err = mlx5_create_scheduling_element_cmd(dev,
1514 						 SCHEDULING_HIERARCHY_E_SWITCH,
1515 						 sched_ctx,
1516 						 &vport->qos.esw_tsar_ix);
1517 	if (err) {
1518 		esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
1519 			 vport->vport, err);
1520 		return err;
1521 	}
1522 
1523 	vport->qos.enabled = true;
1524 	return 0;
1525 }
1526 
esw_vport_disable_qos(struct mlx5_eswitch * esw,struct mlx5_vport * vport)1527 static void esw_vport_disable_qos(struct mlx5_eswitch *esw,
1528 				  struct mlx5_vport *vport)
1529 {
1530 	int err;
1531 
1532 	if (!vport->qos.enabled)
1533 		return;
1534 
1535 	err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1536 						  SCHEDULING_HIERARCHY_E_SWITCH,
1537 						  vport->qos.esw_tsar_ix);
1538 	if (err)
1539 		esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
1540 			 vport->vport, err);
1541 
1542 	vport->qos.enabled = false;
1543 }
1544 
esw_vport_qos_config(struct mlx5_eswitch * esw,struct mlx5_vport * vport,u32 max_rate,u32 bw_share)1545 static int esw_vport_qos_config(struct mlx5_eswitch *esw,
1546 				struct mlx5_vport *vport,
1547 				u32 max_rate, u32 bw_share)
1548 {
1549 	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1550 	struct mlx5_core_dev *dev = esw->dev;
1551 	void *vport_elem;
1552 	u32 bitmask = 0;
1553 	int err = 0;
1554 
1555 	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
1556 		return -EOPNOTSUPP;
1557 
1558 	if (!vport->qos.enabled)
1559 		return -EIO;
1560 
1561 	MLX5_SET(scheduling_context, sched_ctx, element_type,
1562 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1563 	vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1564 				  element_attributes);
1565 	MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
1566 	MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1567 		 esw->qos.root_tsar_id);
1568 	MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1569 		 max_rate);
1570 	MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
1571 	bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
1572 	bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
1573 
1574 	err = mlx5_modify_scheduling_element_cmd(dev,
1575 						 SCHEDULING_HIERARCHY_E_SWITCH,
1576 						 sched_ctx,
1577 						 vport->qos.esw_tsar_ix,
1578 						 bitmask);
1579 	if (err) {
1580 		esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
1581 			 vport->vport, err);
1582 		return err;
1583 	}
1584 
1585 	return 0;
1586 }
1587 
mlx5_esw_modify_vport_rate(struct mlx5_eswitch * esw,u16 vport_num,u32 rate_mbps)1588 int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
1589 			       u32 rate_mbps)
1590 {
1591 	u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
1592 	struct mlx5_vport *vport;
1593 
1594 	vport = mlx5_eswitch_get_vport(esw, vport_num);
1595 	MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
1596 
1597 	return mlx5_modify_scheduling_element_cmd(esw->dev,
1598 						  SCHEDULING_HIERARCHY_E_SWITCH,
1599 						  ctx,
1600 						  vport->qos.esw_tsar_ix,
1601 						  MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW);
1602 }
1603 
node_guid_gen_from_mac(u64 * node_guid,u8 mac[ETH_ALEN])1604 static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1605 {
1606 	((u8 *)node_guid)[7] = mac[0];
1607 	((u8 *)node_guid)[6] = mac[1];
1608 	((u8 *)node_guid)[5] = mac[2];
1609 	((u8 *)node_guid)[4] = 0xff;
1610 	((u8 *)node_guid)[3] = 0xfe;
1611 	((u8 *)node_guid)[2] = mac[3];
1612 	((u8 *)node_guid)[1] = mac[4];
1613 	((u8 *)node_guid)[0] = mac[5];
1614 }
1615 
esw_apply_vport_conf(struct mlx5_eswitch * esw,struct mlx5_vport * vport)1616 static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
1617 				 struct mlx5_vport *vport)
1618 {
1619 	u16 vport_num = vport->vport;
1620 	int flags;
1621 
1622 	if (esw->manager_vport == vport_num)
1623 		return;
1624 
1625 	mlx5_modify_vport_admin_state(esw->dev,
1626 				      MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1627 				      vport_num, 1,
1628 				      vport->info.link_state);
1629 
1630 	/* Host PF has its own mac/guid. */
1631 	if (vport_num) {
1632 		mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
1633 						  vport->info.mac);
1634 		mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
1635 						vport->info.node_guid);
1636 	}
1637 
1638 	flags = (vport->info.vlan || vport->info.qos) ?
1639 		SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
1640 	modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
1641 			       flags);
1642 
1643 	/* Only legacy mode needs ACLs */
1644 	if (esw->mode == MLX5_ESWITCH_LEGACY) {
1645 		esw_vport_ingress_config(esw, vport);
1646 		esw_vport_egress_config(esw, vport);
1647 	}
1648 }
1649 
esw_vport_create_drop_counters(struct mlx5_vport * vport)1650 static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
1651 {
1652 	struct mlx5_core_dev *dev = vport->dev;
1653 
1654 	if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
1655 		vport->ingress.drop_counter = mlx5_fc_create(dev, false);
1656 		if (IS_ERR(vport->ingress.drop_counter)) {
1657 			esw_warn(dev,
1658 				 "vport[%d] configure ingress drop rule counter failed\n",
1659 				 vport->vport);
1660 			vport->ingress.drop_counter = NULL;
1661 		}
1662 	}
1663 
1664 	if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
1665 		vport->egress.drop_counter = mlx5_fc_create(dev, false);
1666 		if (IS_ERR(vport->egress.drop_counter)) {
1667 			esw_warn(dev,
1668 				 "vport[%d] configure egress drop rule counter failed\n",
1669 				 vport->vport);
1670 			vport->egress.drop_counter = NULL;
1671 		}
1672 	}
1673 }
1674 
esw_vport_destroy_drop_counters(struct mlx5_vport * vport)1675 static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
1676 {
1677 	struct mlx5_core_dev *dev = vport->dev;
1678 
1679 	if (vport->ingress.drop_counter)
1680 		mlx5_fc_destroy(dev, vport->ingress.drop_counter);
1681 	if (vport->egress.drop_counter)
1682 		mlx5_fc_destroy(dev, vport->egress.drop_counter);
1683 }
1684 
esw_enable_vport(struct mlx5_eswitch * esw,struct mlx5_vport * vport,enum mlx5_eswitch_vport_event enabled_events)1685 static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
1686 			     enum mlx5_eswitch_vport_event enabled_events)
1687 {
1688 	u16 vport_num = vport->vport;
1689 
1690 	mutex_lock(&esw->state_lock);
1691 	WARN_ON(vport->enabled);
1692 
1693 	esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1694 
1695 	/* Create steering drop counters for ingress and egress ACLs */
1696 	if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY)
1697 		esw_vport_create_drop_counters(vport);
1698 
1699 	/* Restore old vport configuration */
1700 	esw_apply_vport_conf(esw, vport);
1701 
1702 	/* Attach vport to the eswitch rate limiter */
1703 	if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
1704 				 vport->qos.bw_share))
1705 		esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
1706 
1707 	/* Sync with current vport context */
1708 	vport->enabled_events = enabled_events;
1709 	vport->enabled = true;
1710 
1711 	/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
1712 	 * in smartNIC as it's a vport group manager.
1713 	 */
1714 	if (esw->manager_vport == vport_num ||
1715 	    (!vport_num && mlx5_core_is_ecpf(esw->dev)))
1716 		vport->info.trusted = true;
1717 
1718 	esw_vport_change_handle_locked(vport);
1719 
1720 	esw->enabled_vports++;
1721 	esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
1722 	mutex_unlock(&esw->state_lock);
1723 }
1724 
esw_disable_vport(struct mlx5_eswitch * esw,struct mlx5_vport * vport)1725 static void esw_disable_vport(struct mlx5_eswitch *esw,
1726 			      struct mlx5_vport *vport)
1727 {
1728 	u16 vport_num = vport->vport;
1729 
1730 	if (!vport->enabled)
1731 		return;
1732 
1733 	esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1734 	/* Mark this vport as disabled to discard new events */
1735 	vport->enabled = false;
1736 
1737 	/* Wait for current already scheduled events to complete */
1738 	flush_workqueue(esw->work_queue);
1739 	/* Disable events from this vport */
1740 	arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1741 	mutex_lock(&esw->state_lock);
1742 	/* We don't assume VFs will cleanup after themselves.
1743 	 * Calling vport change handler while vport is disabled will cleanup
1744 	 * the vport resources.
1745 	 */
1746 	esw_vport_change_handle_locked(vport);
1747 	vport->enabled_events = 0;
1748 	esw_vport_disable_qos(esw, vport);
1749 	if (esw->manager_vport != vport_num &&
1750 	    esw->mode == MLX5_ESWITCH_LEGACY) {
1751 		mlx5_modify_vport_admin_state(esw->dev,
1752 					      MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1753 					      vport_num, 1,
1754 					      MLX5_VPORT_ADMIN_STATE_DOWN);
1755 		esw_vport_disable_egress_acl(esw, vport);
1756 		esw_vport_disable_ingress_acl(esw, vport);
1757 		esw_vport_destroy_drop_counters(vport);
1758 	}
1759 	esw->enabled_vports--;
1760 	mutex_unlock(&esw->state_lock);
1761 }
1762 
eswitch_vport_event(struct notifier_block * nb,unsigned long type,void * data)1763 static int eswitch_vport_event(struct notifier_block *nb,
1764 			       unsigned long type, void *data)
1765 {
1766 	struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
1767 	struct mlx5_eqe *eqe = data;
1768 	struct mlx5_vport *vport;
1769 	u16 vport_num;
1770 
1771 	vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
1772 	vport = mlx5_eswitch_get_vport(esw, vport_num);
1773 	if (IS_ERR(vport))
1774 		return NOTIFY_OK;
1775 
1776 	if (vport->enabled)
1777 		queue_work(esw->work_queue, &vport->vport_change_handler);
1778 
1779 	return NOTIFY_OK;
1780 }
1781 
1782 /**
1783  * mlx5_esw_query_functions - Returns raw output about functions state
1784  * @dev:	Pointer to device to query
1785  *
1786  * mlx5_esw_query_functions() allocates and returns functions changed
1787  * raw output memory pointer from device on success. Otherwise returns ERR_PTR.
1788  * Caller must free the memory using kvfree() when valid pointer is returned.
1789  */
mlx5_esw_query_functions(struct mlx5_core_dev * dev)1790 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
1791 {
1792 	int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
1793 	u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
1794 	u32 *out;
1795 	int err;
1796 
1797 	out = kvzalloc(outlen, GFP_KERNEL);
1798 	if (!out)
1799 		return ERR_PTR(-ENOMEM);
1800 
1801 	MLX5_SET(query_esw_functions_in, in, opcode,
1802 		 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
1803 
1804 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
1805 	if (!err)
1806 		return out;
1807 
1808 	kvfree(out);
1809 	return ERR_PTR(err);
1810 }
1811 
mlx5_eswitch_event_handlers_register(struct mlx5_eswitch * esw)1812 static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
1813 {
1814 	MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
1815 	mlx5_eq_notifier_register(esw->dev, &esw->nb);
1816 
1817 	if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
1818 		MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
1819 			     ESW_FUNCTIONS_CHANGED);
1820 		mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
1821 	}
1822 }
1823 
mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch * esw)1824 static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
1825 {
1826 	if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
1827 		mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
1828 
1829 	mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
1830 
1831 	flush_workqueue(esw->work_queue);
1832 }
1833 
1834 /* Public E-Switch API */
1835 #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
1836 
1837 /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
1838  * whichever are present on the eswitch.
1839  */
1840 void
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch * esw,enum mlx5_eswitch_vport_event enabled_events)1841 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
1842 				 enum mlx5_eswitch_vport_event enabled_events)
1843 {
1844 	struct mlx5_vport *vport;
1845 	int i;
1846 
1847 	/* Enable PF vport */
1848 	vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1849 	esw_enable_vport(esw, vport, enabled_events);
1850 
1851 	/* Enable ECPF vports */
1852 	if (mlx5_ecpf_vport_exists(esw->dev)) {
1853 		vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1854 		esw_enable_vport(esw, vport, enabled_events);
1855 	}
1856 
1857 	/* Enable VF vports */
1858 	mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
1859 		esw_enable_vport(esw, vport, enabled_events);
1860 }
1861 
1862 /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
1863  * whichever are previously enabled on the eswitch.
1864  */
mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch * esw)1865 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
1866 {
1867 	struct mlx5_vport *vport;
1868 	int i;
1869 
1870 	mlx5_esw_for_all_vports_reverse(esw, i, vport)
1871 		esw_disable_vport(esw, vport);
1872 }
1873 
mlx5_eswitch_enable(struct mlx5_eswitch * esw,int mode)1874 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
1875 {
1876 	int err;
1877 
1878 	if (!ESW_ALLOWED(esw) ||
1879 	    !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1880 		esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
1881 		return -EOPNOTSUPP;
1882 	}
1883 
1884 	if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1885 		esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
1886 
1887 	if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1888 		esw_warn(esw->dev, "engress ACL is not supported by FW\n");
1889 
1890 	esw_create_tsar(esw);
1891 
1892 	esw->mode = mode;
1893 
1894 	mlx5_lag_update(esw->dev);
1895 
1896 	if (mode == MLX5_ESWITCH_LEGACY) {
1897 		err = esw_legacy_enable(esw);
1898 	} else {
1899 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1900 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1901 		err = esw_offloads_enable(esw);
1902 	}
1903 
1904 	if (err)
1905 		goto abort;
1906 
1907 	mlx5_eswitch_event_handlers_register(esw);
1908 
1909 	esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
1910 		 mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1911 		 esw->esw_funcs.num_vfs, esw->enabled_vports);
1912 
1913 	return 0;
1914 
1915 abort:
1916 	esw->mode = MLX5_ESWITCH_NONE;
1917 
1918 	if (mode == MLX5_ESWITCH_OFFLOADS) {
1919 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1920 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1921 	}
1922 
1923 	return err;
1924 }
1925 
mlx5_eswitch_disable(struct mlx5_eswitch * esw)1926 void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
1927 {
1928 	int old_mode;
1929 
1930 	if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
1931 		return;
1932 
1933 	esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
1934 		 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1935 		 esw->esw_funcs.num_vfs, esw->enabled_vports);
1936 
1937 	mlx5_eswitch_event_handlers_unregister(esw);
1938 
1939 	if (esw->mode == MLX5_ESWITCH_LEGACY)
1940 		esw_legacy_disable(esw);
1941 	else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
1942 		esw_offloads_disable(esw);
1943 
1944 	esw_destroy_tsar(esw);
1945 
1946 	old_mode = esw->mode;
1947 	esw->mode = MLX5_ESWITCH_NONE;
1948 
1949 	mlx5_lag_update(esw->dev);
1950 
1951 	if (old_mode == MLX5_ESWITCH_OFFLOADS) {
1952 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1953 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1954 	}
1955 }
1956 
mlx5_eswitch_init(struct mlx5_core_dev * dev)1957 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1958 {
1959 	struct mlx5_eswitch *esw;
1960 	struct mlx5_vport *vport;
1961 	int total_vports;
1962 	int err, i;
1963 
1964 	if (!MLX5_VPORT_MANAGER(dev))
1965 		return 0;
1966 
1967 	total_vports = mlx5_eswitch_get_total_vports(dev);
1968 
1969 	esw_info(dev,
1970 		 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
1971 		 total_vports,
1972 		 MLX5_MAX_UC_PER_VPORT(dev),
1973 		 MLX5_MAX_MC_PER_VPORT(dev));
1974 
1975 	esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1976 	if (!esw)
1977 		return -ENOMEM;
1978 
1979 	esw->dev = dev;
1980 	esw->manager_vport = mlx5_eswitch_manager_vport(dev);
1981 	esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
1982 
1983 	esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1984 	if (!esw->work_queue) {
1985 		err = -ENOMEM;
1986 		goto abort;
1987 	}
1988 
1989 	esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1990 			      GFP_KERNEL);
1991 	if (!esw->vports) {
1992 		err = -ENOMEM;
1993 		goto abort;
1994 	}
1995 
1996 	esw->total_vports = total_vports;
1997 
1998 	err = esw_offloads_init_reps(esw);
1999 	if (err)
2000 		goto abort;
2001 
2002 	mutex_init(&esw->offloads.encap_tbl_lock);
2003 	hash_init(esw->offloads.encap_tbl);
2004 	mutex_init(&esw->offloads.mod_hdr.lock);
2005 	hash_init(esw->offloads.mod_hdr.hlist);
2006 	atomic64_set(&esw->offloads.num_flows, 0);
2007 	mutex_init(&esw->state_lock);
2008 
2009 	mlx5_esw_for_all_vports(esw, i, vport) {
2010 		vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
2011 		vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
2012 		vport->dev = dev;
2013 		INIT_WORK(&vport->vport_change_handler,
2014 			  esw_vport_change_handler);
2015 	}
2016 
2017 	esw->enabled_vports = 0;
2018 	esw->mode = MLX5_ESWITCH_NONE;
2019 	esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
2020 
2021 	dev->priv.eswitch = esw;
2022 	return 0;
2023 abort:
2024 	if (esw->work_queue)
2025 		destroy_workqueue(esw->work_queue);
2026 	esw_offloads_cleanup_reps(esw);
2027 	kfree(esw->vports);
2028 	kfree(esw);
2029 	return err;
2030 }
2031 
mlx5_eswitch_cleanup(struct mlx5_eswitch * esw)2032 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
2033 {
2034 	if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
2035 		return;
2036 
2037 	esw_info(esw->dev, "cleanup\n");
2038 
2039 	esw->dev->priv.eswitch = NULL;
2040 	destroy_workqueue(esw->work_queue);
2041 	esw_offloads_cleanup_reps(esw);
2042 	mutex_destroy(&esw->offloads.mod_hdr.lock);
2043 	mutex_destroy(&esw->offloads.encap_tbl_lock);
2044 	kfree(esw->vports);
2045 	kfree(esw);
2046 }
2047 
2048 /* Vport Administration */
mlx5_eswitch_set_vport_mac(struct mlx5_eswitch * esw,u16 vport,u8 mac[ETH_ALEN])2049 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
2050 			       u16 vport, u8 mac[ETH_ALEN])
2051 {
2052 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2053 	u64 node_guid;
2054 	int err = 0;
2055 
2056 	if (IS_ERR(evport))
2057 		return PTR_ERR(evport);
2058 	if (is_multicast_ether_addr(mac))
2059 		return -EINVAL;
2060 
2061 	mutex_lock(&esw->state_lock);
2062 
2063 	if (evport->info.spoofchk && !is_valid_ether_addr(mac))
2064 		mlx5_core_warn(esw->dev,
2065 			       "Set invalid MAC while spoofchk is on, vport(%d)\n",
2066 			       vport);
2067 
2068 	err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
2069 	if (err) {
2070 		mlx5_core_warn(esw->dev,
2071 			       "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
2072 			       vport, err);
2073 		goto unlock;
2074 	}
2075 
2076 	node_guid_gen_from_mac(&node_guid, mac);
2077 	err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
2078 	if (err)
2079 		mlx5_core_warn(esw->dev,
2080 			       "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
2081 			       vport, err);
2082 
2083 	ether_addr_copy(evport->info.mac, mac);
2084 	evport->info.node_guid = node_guid;
2085 	if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
2086 		err = esw_vport_ingress_config(esw, evport);
2087 
2088 unlock:
2089 	mutex_unlock(&esw->state_lock);
2090 	return err;
2091 }
2092 
mlx5_eswitch_set_vport_state(struct mlx5_eswitch * esw,u16 vport,int link_state)2093 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
2094 				 u16 vport, int link_state)
2095 {
2096 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2097 	int err = 0;
2098 
2099 	if (!ESW_ALLOWED(esw))
2100 		return -EPERM;
2101 	if (IS_ERR(evport))
2102 		return PTR_ERR(evport);
2103 
2104 	mutex_lock(&esw->state_lock);
2105 
2106 	err = mlx5_modify_vport_admin_state(esw->dev,
2107 					    MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
2108 					    vport, 1, link_state);
2109 	if (err) {
2110 		mlx5_core_warn(esw->dev,
2111 			       "Failed to set vport %d link state, err = %d",
2112 			       vport, err);
2113 		goto unlock;
2114 	}
2115 
2116 	evport->info.link_state = link_state;
2117 
2118 unlock:
2119 	mutex_unlock(&esw->state_lock);
2120 	return err;
2121 }
2122 
mlx5_eswitch_get_vport_config(struct mlx5_eswitch * esw,u16 vport,struct ifla_vf_info * ivi)2123 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
2124 				  u16 vport, struct ifla_vf_info *ivi)
2125 {
2126 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2127 
2128 	if (IS_ERR(evport))
2129 		return PTR_ERR(evport);
2130 
2131 	memset(ivi, 0, sizeof(*ivi));
2132 	ivi->vf = vport - 1;
2133 
2134 	mutex_lock(&esw->state_lock);
2135 	ether_addr_copy(ivi->mac, evport->info.mac);
2136 	ivi->linkstate = evport->info.link_state;
2137 	ivi->vlan = evport->info.vlan;
2138 	ivi->qos = evport->info.qos;
2139 	ivi->spoofchk = evport->info.spoofchk;
2140 	ivi->trusted = evport->info.trusted;
2141 	ivi->min_tx_rate = evport->info.min_rate;
2142 	ivi->max_tx_rate = evport->info.max_rate;
2143 	mutex_unlock(&esw->state_lock);
2144 
2145 	return 0;
2146 }
2147 
__mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch * esw,u16 vport,u16 vlan,u8 qos,u8 set_flags)2148 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
2149 				  u16 vport, u16 vlan, u8 qos, u8 set_flags)
2150 {
2151 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2152 	int err = 0;
2153 
2154 	if (!ESW_ALLOWED(esw))
2155 		return -EPERM;
2156 	if (IS_ERR(evport))
2157 		return PTR_ERR(evport);
2158 	if (vlan > 4095 || qos > 7)
2159 		return -EINVAL;
2160 
2161 	err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
2162 	if (err)
2163 		return err;
2164 
2165 	evport->info.vlan = vlan;
2166 	evport->info.qos = qos;
2167 	if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
2168 		err = esw_vport_ingress_config(esw, evport);
2169 		if (err)
2170 			return err;
2171 		err = esw_vport_egress_config(esw, evport);
2172 	}
2173 
2174 	return err;
2175 }
2176 
mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch * esw,u16 vport,u16 vlan,u8 qos)2177 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
2178 				u16 vport, u16 vlan, u8 qos)
2179 {
2180 	u8 set_flags = 0;
2181 	int err;
2182 
2183 	if (vlan || qos)
2184 		set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
2185 
2186 	mutex_lock(&esw->state_lock);
2187 	err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
2188 	mutex_unlock(&esw->state_lock);
2189 
2190 	return err;
2191 }
2192 
mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch * esw,u16 vport,bool spoofchk)2193 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
2194 				    u16 vport, bool spoofchk)
2195 {
2196 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2197 	bool pschk;
2198 	int err = 0;
2199 
2200 	if (!ESW_ALLOWED(esw))
2201 		return -EPERM;
2202 	if (IS_ERR(evport))
2203 		return PTR_ERR(evport);
2204 
2205 	mutex_lock(&esw->state_lock);
2206 	pschk = evport->info.spoofchk;
2207 	evport->info.spoofchk = spoofchk;
2208 	if (pschk && !is_valid_ether_addr(evport->info.mac))
2209 		mlx5_core_warn(esw->dev,
2210 			       "Spoofchk in set while MAC is invalid, vport(%d)\n",
2211 			       evport->vport);
2212 	if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
2213 		err = esw_vport_ingress_config(esw, evport);
2214 	if (err)
2215 		evport->info.spoofchk = pschk;
2216 	mutex_unlock(&esw->state_lock);
2217 
2218 	return err;
2219 }
2220 
esw_cleanup_vepa_rules(struct mlx5_eswitch * esw)2221 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
2222 {
2223 	if (esw->fdb_table.legacy.vepa_uplink_rule)
2224 		mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
2225 
2226 	if (esw->fdb_table.legacy.vepa_star_rule)
2227 		mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
2228 
2229 	esw->fdb_table.legacy.vepa_uplink_rule = NULL;
2230 	esw->fdb_table.legacy.vepa_star_rule = NULL;
2231 }
2232 
_mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch * esw,u8 setting)2233 static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
2234 					 u8 setting)
2235 {
2236 	struct mlx5_flow_destination dest = {};
2237 	struct mlx5_flow_act flow_act = {};
2238 	struct mlx5_flow_handle *flow_rule;
2239 	struct mlx5_flow_spec *spec;
2240 	int err = 0;
2241 	void *misc;
2242 
2243 	if (!setting) {
2244 		esw_cleanup_vepa_rules(esw);
2245 		return 0;
2246 	}
2247 
2248 	if (esw->fdb_table.legacy.vepa_uplink_rule)
2249 		return 0;
2250 
2251 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2252 	if (!spec)
2253 		return -ENOMEM;
2254 
2255 	/* Uplink rule forward uplink traffic to FDB */
2256 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
2257 	MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2258 
2259 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2260 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2261 
2262 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2263 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2264 	dest.ft = esw->fdb_table.legacy.fdb;
2265 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2266 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
2267 					&flow_act, &dest, 1);
2268 	if (IS_ERR(flow_rule)) {
2269 		err = PTR_ERR(flow_rule);
2270 		goto out;
2271 	} else {
2272 		esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
2273 	}
2274 
2275 	/* Star rule to forward all traffic to uplink vport */
2276 	memset(spec, 0, sizeof(*spec));
2277 	memset(&dest, 0, sizeof(dest));
2278 	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2279 	dest.vport.num = MLX5_VPORT_UPLINK;
2280 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2281 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
2282 					&flow_act, &dest, 1);
2283 	if (IS_ERR(flow_rule)) {
2284 		err = PTR_ERR(flow_rule);
2285 		goto out;
2286 	} else {
2287 		esw->fdb_table.legacy.vepa_star_rule = flow_rule;
2288 	}
2289 
2290 out:
2291 	kvfree(spec);
2292 	if (err)
2293 		esw_cleanup_vepa_rules(esw);
2294 	return err;
2295 }
2296 
mlx5_eswitch_set_vepa(struct mlx5_eswitch * esw,u8 setting)2297 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
2298 {
2299 	int err = 0;
2300 
2301 	if (!esw)
2302 		return -EOPNOTSUPP;
2303 
2304 	if (!ESW_ALLOWED(esw))
2305 		return -EPERM;
2306 
2307 	mutex_lock(&esw->state_lock);
2308 	if (esw->mode != MLX5_ESWITCH_LEGACY) {
2309 		err = -EOPNOTSUPP;
2310 		goto out;
2311 	}
2312 
2313 	err = _mlx5_eswitch_set_vepa_locked(esw, setting);
2314 
2315 out:
2316 	mutex_unlock(&esw->state_lock);
2317 	return err;
2318 }
2319 
mlx5_eswitch_get_vepa(struct mlx5_eswitch * esw,u8 * setting)2320 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
2321 {
2322 	int err = 0;
2323 
2324 	if (!esw)
2325 		return -EOPNOTSUPP;
2326 
2327 	if (!ESW_ALLOWED(esw))
2328 		return -EPERM;
2329 
2330 	mutex_lock(&esw->state_lock);
2331 	if (esw->mode != MLX5_ESWITCH_LEGACY) {
2332 		err = -EOPNOTSUPP;
2333 		goto out;
2334 	}
2335 
2336 	*setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
2337 
2338 out:
2339 	mutex_unlock(&esw->state_lock);
2340 	return err;
2341 }
2342 
mlx5_eswitch_set_vport_trust(struct mlx5_eswitch * esw,u16 vport,bool setting)2343 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
2344 				 u16 vport, bool setting)
2345 {
2346 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2347 
2348 	if (!ESW_ALLOWED(esw))
2349 		return -EPERM;
2350 	if (IS_ERR(evport))
2351 		return PTR_ERR(evport);
2352 
2353 	mutex_lock(&esw->state_lock);
2354 	evport->info.trusted = setting;
2355 	if (evport->enabled)
2356 		esw_vport_change_handle_locked(evport);
2357 	mutex_unlock(&esw->state_lock);
2358 
2359 	return 0;
2360 }
2361 
calculate_vports_min_rate_divider(struct mlx5_eswitch * esw)2362 static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
2363 {
2364 	u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2365 	struct mlx5_vport *evport;
2366 	u32 max_guarantee = 0;
2367 	int i;
2368 
2369 	mlx5_esw_for_all_vports(esw, i, evport) {
2370 		if (!evport->enabled || evport->info.min_rate < max_guarantee)
2371 			continue;
2372 		max_guarantee = evport->info.min_rate;
2373 	}
2374 
2375 	return max_t(u32, max_guarantee / fw_max_bw_share, 1);
2376 }
2377 
normalize_vports_min_rate(struct mlx5_eswitch * esw,u32 divider)2378 static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
2379 {
2380 	u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2381 	struct mlx5_vport *evport;
2382 	u32 vport_max_rate;
2383 	u32 vport_min_rate;
2384 	u32 bw_share;
2385 	int err;
2386 	int i;
2387 
2388 	mlx5_esw_for_all_vports(esw, i, evport) {
2389 		if (!evport->enabled)
2390 			continue;
2391 		vport_min_rate = evport->info.min_rate;
2392 		vport_max_rate = evport->info.max_rate;
2393 		bw_share = MLX5_MIN_BW_SHARE;
2394 
2395 		if (vport_min_rate)
2396 			bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
2397 							 divider,
2398 							 fw_max_bw_share);
2399 
2400 		if (bw_share == evport->qos.bw_share)
2401 			continue;
2402 
2403 		err = esw_vport_qos_config(esw, evport, vport_max_rate,
2404 					   bw_share);
2405 		if (!err)
2406 			evport->qos.bw_share = bw_share;
2407 		else
2408 			return err;
2409 	}
2410 
2411 	return 0;
2412 }
2413 
mlx5_eswitch_set_vport_rate(struct mlx5_eswitch * esw,u16 vport,u32 max_rate,u32 min_rate)2414 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
2415 				u32 max_rate, u32 min_rate)
2416 {
2417 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2418 	u32 fw_max_bw_share;
2419 	u32 previous_min_rate;
2420 	u32 divider;
2421 	bool min_rate_supported;
2422 	bool max_rate_supported;
2423 	int err = 0;
2424 
2425 	if (!ESW_ALLOWED(esw))
2426 		return -EPERM;
2427 	if (IS_ERR(evport))
2428 		return PTR_ERR(evport);
2429 
2430 	fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2431 	min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
2432 				fw_max_bw_share >= MLX5_MIN_BW_SHARE;
2433 	max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
2434 
2435 	if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
2436 		return -EOPNOTSUPP;
2437 
2438 	mutex_lock(&esw->state_lock);
2439 
2440 	if (min_rate == evport->info.min_rate)
2441 		goto set_max_rate;
2442 
2443 	previous_min_rate = evport->info.min_rate;
2444 	evport->info.min_rate = min_rate;
2445 	divider = calculate_vports_min_rate_divider(esw);
2446 	err = normalize_vports_min_rate(esw, divider);
2447 	if (err) {
2448 		evport->info.min_rate = previous_min_rate;
2449 		goto unlock;
2450 	}
2451 
2452 set_max_rate:
2453 	if (max_rate == evport->info.max_rate)
2454 		goto unlock;
2455 
2456 	err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
2457 	if (!err)
2458 		evport->info.max_rate = max_rate;
2459 
2460 unlock:
2461 	mutex_unlock(&esw->state_lock);
2462 	return err;
2463 }
2464 
mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev * dev,struct mlx5_vport * vport,struct mlx5_vport_drop_stats * stats)2465 static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
2466 					       struct mlx5_vport *vport,
2467 					       struct mlx5_vport_drop_stats *stats)
2468 {
2469 	struct mlx5_eswitch *esw = dev->priv.eswitch;
2470 	u64 rx_discard_vport_down, tx_discard_vport_down;
2471 	u64 bytes = 0;
2472 	int err = 0;
2473 
2474 	if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY)
2475 		return 0;
2476 
2477 	if (vport->egress.drop_counter)
2478 		mlx5_fc_query(dev, vport->egress.drop_counter,
2479 			      &stats->rx_dropped, &bytes);
2480 
2481 	if (vport->ingress.drop_counter)
2482 		mlx5_fc_query(dev, vport->ingress.drop_counter,
2483 			      &stats->tx_dropped, &bytes);
2484 
2485 	if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
2486 	    !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2487 		return 0;
2488 
2489 	err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
2490 					  &rx_discard_vport_down,
2491 					  &tx_discard_vport_down);
2492 	if (err)
2493 		return err;
2494 
2495 	if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
2496 		stats->rx_dropped += rx_discard_vport_down;
2497 	if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2498 		stats->tx_dropped += tx_discard_vport_down;
2499 
2500 	return 0;
2501 }
2502 
mlx5_eswitch_get_vport_stats(struct mlx5_eswitch * esw,u16 vport_num,struct ifla_vf_stats * vf_stats)2503 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
2504 				 u16 vport_num,
2505 				 struct ifla_vf_stats *vf_stats)
2506 {
2507 	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
2508 	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
2509 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
2510 	struct mlx5_vport_drop_stats stats = {0};
2511 	int err = 0;
2512 	u32 *out;
2513 
2514 	if (IS_ERR(vport))
2515 		return PTR_ERR(vport);
2516 
2517 	out = kvzalloc(outlen, GFP_KERNEL);
2518 	if (!out)
2519 		return -ENOMEM;
2520 
2521 	MLX5_SET(query_vport_counter_in, in, opcode,
2522 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
2523 	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
2524 	MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
2525 	MLX5_SET(query_vport_counter_in, in, other_vport, 1);
2526 
2527 	err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
2528 	if (err)
2529 		goto free_out;
2530 
2531 	#define MLX5_GET_CTR(p, x) \
2532 		MLX5_GET64(query_vport_counter_out, p, x)
2533 
2534 	memset(vf_stats, 0, sizeof(*vf_stats));
2535 	vf_stats->rx_packets =
2536 		MLX5_GET_CTR(out, received_eth_unicast.packets) +
2537 		MLX5_GET_CTR(out, received_ib_unicast.packets) +
2538 		MLX5_GET_CTR(out, received_eth_multicast.packets) +
2539 		MLX5_GET_CTR(out, received_ib_multicast.packets) +
2540 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
2541 
2542 	vf_stats->rx_bytes =
2543 		MLX5_GET_CTR(out, received_eth_unicast.octets) +
2544 		MLX5_GET_CTR(out, received_ib_unicast.octets) +
2545 		MLX5_GET_CTR(out, received_eth_multicast.octets) +
2546 		MLX5_GET_CTR(out, received_ib_multicast.octets) +
2547 		MLX5_GET_CTR(out, received_eth_broadcast.octets);
2548 
2549 	vf_stats->tx_packets =
2550 		MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
2551 		MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
2552 		MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
2553 		MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
2554 		MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
2555 
2556 	vf_stats->tx_bytes =
2557 		MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
2558 		MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
2559 		MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
2560 		MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
2561 		MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
2562 
2563 	vf_stats->multicast =
2564 		MLX5_GET_CTR(out, received_eth_multicast.packets) +
2565 		MLX5_GET_CTR(out, received_ib_multicast.packets);
2566 
2567 	vf_stats->broadcast =
2568 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
2569 
2570 	err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
2571 	if (err)
2572 		goto free_out;
2573 	vf_stats->rx_dropped = stats.rx_dropped;
2574 	vf_stats->tx_dropped = stats.tx_dropped;
2575 
2576 free_out:
2577 	kvfree(out);
2578 	return err;
2579 }
2580 
mlx5_eswitch_mode(struct mlx5_eswitch * esw)2581 u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
2582 {
2583 	return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE;
2584 }
2585 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
2586 
2587 enum devlink_eswitch_encap_mode
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev * dev)2588 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
2589 {
2590 	struct mlx5_eswitch *esw;
2591 
2592 	esw = dev->priv.eswitch;
2593 	return ESW_ALLOWED(esw) ? esw->offloads.encap :
2594 		DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2595 }
2596 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
2597 
mlx5_esw_lag_prereq(struct mlx5_core_dev * dev0,struct mlx5_core_dev * dev1)2598 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
2599 {
2600 	if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
2601 	     dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) ||
2602 	    (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2603 	     dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS))
2604 		return true;
2605 
2606 	return false;
2607 }
2608 
mlx5_esw_multipath_prereq(struct mlx5_core_dev * dev0,struct mlx5_core_dev * dev1)2609 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
2610 			       struct mlx5_core_dev *dev1)
2611 {
2612 	return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2613 		dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
2614 }
2615 
mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch * esw,const int num_vfs)2616 void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs)
2617 {
2618 	const u32 *out;
2619 
2620 	WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
2621 
2622 	if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
2623 		esw->esw_funcs.num_vfs = num_vfs;
2624 		return;
2625 	}
2626 
2627 	out = mlx5_esw_query_functions(esw->dev);
2628 	if (IS_ERR(out))
2629 		return;
2630 
2631 	esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
2632 					  host_params_context.host_num_of_vfs);
2633 	kvfree(out);
2634 }
2635