1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/mpfs.h>
39 #include <linux/debugfs.h>
40 #include "esw/acl/lgcy.h"
41 #include "esw/legacy.h"
42 #include "esw/qos.h"
43 #include "mlx5_core.h"
44 #include "lib/eq.h"
45 #include "eswitch.h"
46 #include "fs_core.h"
47 #include "devlink.h"
48 #include "ecpf.h"
49 #include "en/mod_hdr.h"
50 
51 enum {
52 	MLX5_ACTION_NONE = 0,
53 	MLX5_ACTION_ADD  = 1,
54 	MLX5_ACTION_DEL  = 2,
55 };
56 
57 /* Vport UC/MC hash node */
58 struct vport_addr {
59 	struct l2addr_node     node;
60 	u8                     action;
61 	u16                    vport;
62 	struct mlx5_flow_handle *flow_rule;
63 	bool mpfs; /* UC MAC was added to MPFs */
64 	/* A flag indicating that mac was added due to mc promiscuous vport */
65 	bool mc_promisc;
66 };
67 
mlx5_eswitch_check(const struct mlx5_core_dev * dev)68 static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
69 {
70 	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
71 		return -EOPNOTSUPP;
72 
73 	if (!MLX5_ESWITCH_MANAGER(dev))
74 		return -EOPNOTSUPP;
75 
76 	return 0;
77 }
78 
mlx5_devlink_eswitch_get(struct devlink * devlink)79 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
80 {
81 	struct mlx5_core_dev *dev = devlink_priv(devlink);
82 	int err;
83 
84 	err = mlx5_eswitch_check(dev);
85 	if (err)
86 		return ERR_PTR(err);
87 
88 	return dev->priv.eswitch;
89 }
90 
91 struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch * esw,u16 vport_num)92 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
93 {
94 	struct mlx5_vport *vport;
95 
96 	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
97 		return ERR_PTR(-EPERM);
98 
99 	vport = xa_load(&esw->vports, vport_num);
100 	if (!vport) {
101 		esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num);
102 		return ERR_PTR(-EINVAL);
103 	}
104 	return vport;
105 }
106 
arm_vport_context_events_cmd(struct mlx5_core_dev * dev,u16 vport,u32 events_mask)107 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
108 					u32 events_mask)
109 {
110 	u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
111 	void *nic_vport_ctx;
112 
113 	MLX5_SET(modify_nic_vport_context_in, in,
114 		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
115 	MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
116 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
117 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
118 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
119 				     in, nic_vport_context);
120 
121 	MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
122 
123 	if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
124 		MLX5_SET(nic_vport_context, nic_vport_ctx,
125 			 event_on_uc_address_change, 1);
126 	if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
127 		MLX5_SET(nic_vport_context, nic_vport_ctx,
128 			 event_on_mc_address_change, 1);
129 	if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
130 		MLX5_SET(nic_vport_context, nic_vport_ctx,
131 			 event_on_promisc_change, 1);
132 
133 	return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in);
134 }
135 
136 /* E-Switch vport context HW commands */
mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev * dev,u16 vport,bool other_vport,void * in)137 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
138 					  bool other_vport, void *in)
139 {
140 	MLX5_SET(modify_esw_vport_context_in, in, opcode,
141 		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
142 	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
143 	MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
144 	return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in);
145 }
146 
modify_esw_vport_cvlan(struct mlx5_core_dev * dev,u16 vport,u16 vlan,u8 qos,u8 set_flags)147 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
148 				  u16 vlan, u8 qos, u8 set_flags)
149 {
150 	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
151 
152 	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
153 	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
154 		return -EOPNOTSUPP;
155 
156 	esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
157 		  vport, vlan, qos, set_flags);
158 
159 	if (set_flags & SET_VLAN_STRIP)
160 		MLX5_SET(modify_esw_vport_context_in, in,
161 			 esw_vport_context.vport_cvlan_strip, 1);
162 
163 	if (set_flags & SET_VLAN_INSERT) {
164 		/* insert only if no vlan in packet */
165 		MLX5_SET(modify_esw_vport_context_in, in,
166 			 esw_vport_context.vport_cvlan_insert, 1);
167 
168 		MLX5_SET(modify_esw_vport_context_in, in,
169 			 esw_vport_context.cvlan_pcp, qos);
170 		MLX5_SET(modify_esw_vport_context_in, in,
171 			 esw_vport_context.cvlan_id, vlan);
172 	}
173 
174 	MLX5_SET(modify_esw_vport_context_in, in,
175 		 field_select.vport_cvlan_strip, 1);
176 	MLX5_SET(modify_esw_vport_context_in, in,
177 		 field_select.vport_cvlan_insert, 1);
178 
179 	return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in);
180 }
181 
182 /* E-Switch FDB */
183 static struct mlx5_flow_handle *
__esw_fdb_set_vport_rule(struct mlx5_eswitch * esw,u16 vport,bool rx_rule,u8 mac_c[ETH_ALEN],u8 mac_v[ETH_ALEN])184 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
185 			 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
186 {
187 	int match_header = (is_zero_ether_addr(mac_c) ? 0 :
188 			    MLX5_MATCH_OUTER_HEADERS);
189 	struct mlx5_flow_handle *flow_rule = NULL;
190 	struct mlx5_flow_act flow_act = {0};
191 	struct mlx5_flow_destination dest = {};
192 	struct mlx5_flow_spec *spec;
193 	void *mv_misc = NULL;
194 	void *mc_misc = NULL;
195 	u8 *dmac_v = NULL;
196 	u8 *dmac_c = NULL;
197 
198 	if (rx_rule)
199 		match_header |= MLX5_MATCH_MISC_PARAMETERS;
200 
201 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
202 	if (!spec)
203 		return NULL;
204 
205 	dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
206 			      outer_headers.dmac_47_16);
207 	dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
208 			      outer_headers.dmac_47_16);
209 
210 	if (match_header & MLX5_MATCH_OUTER_HEADERS) {
211 		ether_addr_copy(dmac_v, mac_v);
212 		ether_addr_copy(dmac_c, mac_c);
213 	}
214 
215 	if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
216 		mv_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_value,
217 					misc_parameters);
218 		mc_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
219 					misc_parameters);
220 		MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
221 		MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
222 	}
223 
224 	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
225 	dest.vport.num = vport;
226 
227 	esw_debug(esw->dev,
228 		  "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
229 		  dmac_v, dmac_c, vport);
230 	spec->match_criteria_enable = match_header;
231 	flow_act.action =  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
232 	flow_rule =
233 		mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
234 				    &flow_act, &dest, 1);
235 	if (IS_ERR(flow_rule)) {
236 		esw_warn(esw->dev,
237 			 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
238 			 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
239 		flow_rule = NULL;
240 	}
241 
242 	kvfree(spec);
243 	return flow_rule;
244 }
245 
246 static struct mlx5_flow_handle *
esw_fdb_set_vport_rule(struct mlx5_eswitch * esw,u8 mac[ETH_ALEN],u16 vport)247 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
248 {
249 	u8 mac_c[ETH_ALEN];
250 
251 	eth_broadcast_addr(mac_c);
252 	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
253 }
254 
255 static struct mlx5_flow_handle *
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch * esw,u16 vport)256 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
257 {
258 	u8 mac_c[ETH_ALEN];
259 	u8 mac_v[ETH_ALEN];
260 
261 	eth_zero_addr(mac_c);
262 	eth_zero_addr(mac_v);
263 	mac_c[0] = 0x01;
264 	mac_v[0] = 0x01;
265 	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
266 }
267 
268 static struct mlx5_flow_handle *
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch * esw,u16 vport)269 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
270 {
271 	u8 mac_c[ETH_ALEN];
272 	u8 mac_v[ETH_ALEN];
273 
274 	eth_zero_addr(mac_c);
275 	eth_zero_addr(mac_v);
276 	return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
277 }
278 
279 /* E-Switch vport UC/MC lists management */
280 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
281 				 struct vport_addr *vaddr);
282 
esw_add_uc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)283 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
284 {
285 	u8 *mac = vaddr->node.addr;
286 	u16 vport = vaddr->vport;
287 	int err;
288 
289 	/* Skip mlx5_mpfs_add_mac for eswitch_managers,
290 	 * it is already done by its netdev in mlx5e_execute_l2_action
291 	 */
292 	if (mlx5_esw_is_manager_vport(esw, vport))
293 		goto fdb_add;
294 
295 	err = mlx5_mpfs_add_mac(esw->dev, mac);
296 	if (err) {
297 		esw_warn(esw->dev,
298 			 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
299 			 mac, vport, err);
300 		return err;
301 	}
302 	vaddr->mpfs = true;
303 
304 fdb_add:
305 	/* SRIOV is enabled: Forward UC MAC to vport */
306 	if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
307 		vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
308 
309 	esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
310 		  vport, mac, vaddr->flow_rule);
311 
312 	return 0;
313 }
314 
esw_del_uc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)315 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
316 {
317 	u8 *mac = vaddr->node.addr;
318 	u16 vport = vaddr->vport;
319 	int err = 0;
320 
321 	/* Skip mlx5_mpfs_del_mac for eswitch managers,
322 	 * it is already done by its netdev in mlx5e_execute_l2_action
323 	 */
324 	if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
325 		goto fdb_del;
326 
327 	err = mlx5_mpfs_del_mac(esw->dev, mac);
328 	if (err)
329 		esw_warn(esw->dev,
330 			 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
331 			 mac, vport, err);
332 	vaddr->mpfs = false;
333 
334 fdb_del:
335 	if (vaddr->flow_rule)
336 		mlx5_del_flow_rules(vaddr->flow_rule);
337 	vaddr->flow_rule = NULL;
338 
339 	return 0;
340 }
341 
update_allmulti_vports(struct mlx5_eswitch * esw,struct vport_addr * vaddr,struct esw_mc_addr * esw_mc)342 static void update_allmulti_vports(struct mlx5_eswitch *esw,
343 				   struct vport_addr *vaddr,
344 				   struct esw_mc_addr *esw_mc)
345 {
346 	u8 *mac = vaddr->node.addr;
347 	struct mlx5_vport *vport;
348 	unsigned long i;
349 	u16 vport_num;
350 
351 	mlx5_esw_for_each_vport(esw, i, vport) {
352 		struct hlist_head *vport_hash = vport->mc_list;
353 		struct vport_addr *iter_vaddr =
354 					l2addr_hash_find(vport_hash,
355 							 mac,
356 							 struct vport_addr);
357 		vport_num = vport->vport;
358 		if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
359 		    vaddr->vport == vport_num)
360 			continue;
361 		switch (vaddr->action) {
362 		case MLX5_ACTION_ADD:
363 			if (iter_vaddr)
364 				continue;
365 			iter_vaddr = l2addr_hash_add(vport_hash, mac,
366 						     struct vport_addr,
367 						     GFP_KERNEL);
368 			if (!iter_vaddr) {
369 				esw_warn(esw->dev,
370 					 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
371 					 mac, vport_num);
372 				continue;
373 			}
374 			iter_vaddr->vport = vport_num;
375 			iter_vaddr->flow_rule =
376 					esw_fdb_set_vport_rule(esw,
377 							       mac,
378 							       vport_num);
379 			iter_vaddr->mc_promisc = true;
380 			break;
381 		case MLX5_ACTION_DEL:
382 			if (!iter_vaddr)
383 				continue;
384 			mlx5_del_flow_rules(iter_vaddr->flow_rule);
385 			l2addr_hash_del(iter_vaddr);
386 			break;
387 		}
388 	}
389 }
390 
esw_add_mc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)391 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
392 {
393 	struct hlist_head *hash = esw->mc_table;
394 	struct esw_mc_addr *esw_mc;
395 	u8 *mac = vaddr->node.addr;
396 	u16 vport = vaddr->vport;
397 
398 	if (!esw->fdb_table.legacy.fdb)
399 		return 0;
400 
401 	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
402 	if (esw_mc)
403 		goto add;
404 
405 	esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
406 	if (!esw_mc)
407 		return -ENOMEM;
408 
409 	esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
410 		esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
411 
412 	/* Add this multicast mac to all the mc promiscuous vports */
413 	update_allmulti_vports(esw, vaddr, esw_mc);
414 
415 add:
416 	/* If the multicast mac is added as a result of mc promiscuous vport,
417 	 * don't increment the multicast ref count
418 	 */
419 	if (!vaddr->mc_promisc)
420 		esw_mc->refcnt++;
421 
422 	/* Forward MC MAC to vport */
423 	vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
424 	esw_debug(esw->dev,
425 		  "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
426 		  vport, mac, vaddr->flow_rule,
427 		  esw_mc->refcnt, esw_mc->uplink_rule);
428 	return 0;
429 }
430 
esw_del_mc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)431 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
432 {
433 	struct hlist_head *hash = esw->mc_table;
434 	struct esw_mc_addr *esw_mc;
435 	u8 *mac = vaddr->node.addr;
436 	u16 vport = vaddr->vport;
437 
438 	if (!esw->fdb_table.legacy.fdb)
439 		return 0;
440 
441 	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
442 	if (!esw_mc) {
443 		esw_warn(esw->dev,
444 			 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
445 			 mac, vport);
446 		return -EINVAL;
447 	}
448 	esw_debug(esw->dev,
449 		  "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
450 		  vport, mac, vaddr->flow_rule, esw_mc->refcnt,
451 		  esw_mc->uplink_rule);
452 
453 	if (vaddr->flow_rule)
454 		mlx5_del_flow_rules(vaddr->flow_rule);
455 	vaddr->flow_rule = NULL;
456 
457 	/* If the multicast mac is added as a result of mc promiscuous vport,
458 	 * don't decrement the multicast ref count.
459 	 */
460 	if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
461 		return 0;
462 
463 	/* Remove this multicast mac from all the mc promiscuous vports */
464 	update_allmulti_vports(esw, vaddr, esw_mc);
465 
466 	if (esw_mc->uplink_rule)
467 		mlx5_del_flow_rules(esw_mc->uplink_rule);
468 
469 	l2addr_hash_del(esw_mc);
470 	return 0;
471 }
472 
473 /* Apply vport UC/MC list to HW l2 table and FDB table */
esw_apply_vport_addr_list(struct mlx5_eswitch * esw,struct mlx5_vport * vport,int list_type)474 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
475 				      struct mlx5_vport *vport, int list_type)
476 {
477 	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
478 	vport_addr_action vport_addr_add;
479 	vport_addr_action vport_addr_del;
480 	struct vport_addr *addr;
481 	struct l2addr_node *node;
482 	struct hlist_head *hash;
483 	struct hlist_node *tmp;
484 	int hi;
485 
486 	vport_addr_add = is_uc ? esw_add_uc_addr :
487 				 esw_add_mc_addr;
488 	vport_addr_del = is_uc ? esw_del_uc_addr :
489 				 esw_del_mc_addr;
490 
491 	hash = is_uc ? vport->uc_list : vport->mc_list;
492 	for_each_l2hash_node(node, tmp, hash, hi) {
493 		addr = container_of(node, struct vport_addr, node);
494 		switch (addr->action) {
495 		case MLX5_ACTION_ADD:
496 			vport_addr_add(esw, addr);
497 			addr->action = MLX5_ACTION_NONE;
498 			break;
499 		case MLX5_ACTION_DEL:
500 			vport_addr_del(esw, addr);
501 			l2addr_hash_del(addr);
502 			break;
503 		}
504 	}
505 }
506 
507 /* Sync vport UC/MC list from vport context */
esw_update_vport_addr_list(struct mlx5_eswitch * esw,struct mlx5_vport * vport,int list_type)508 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
509 				       struct mlx5_vport *vport, int list_type)
510 {
511 	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
512 	u8 (*mac_list)[ETH_ALEN];
513 	struct l2addr_node *node;
514 	struct vport_addr *addr;
515 	struct hlist_head *hash;
516 	struct hlist_node *tmp;
517 	int size;
518 	int err;
519 	int hi;
520 	int i;
521 
522 	size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
523 		       MLX5_MAX_MC_PER_VPORT(esw->dev);
524 
525 	mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
526 	if (!mac_list)
527 		return;
528 
529 	hash = is_uc ? vport->uc_list : vport->mc_list;
530 
531 	for_each_l2hash_node(node, tmp, hash, hi) {
532 		addr = container_of(node, struct vport_addr, node);
533 		addr->action = MLX5_ACTION_DEL;
534 	}
535 
536 	if (!vport->enabled)
537 		goto out;
538 
539 	err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
540 					    mac_list, &size);
541 	if (err)
542 		goto out;
543 	esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
544 		  vport->vport, is_uc ? "UC" : "MC", size);
545 
546 	for (i = 0; i < size; i++) {
547 		if (is_uc && !is_valid_ether_addr(mac_list[i]))
548 			continue;
549 
550 		if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
551 			continue;
552 
553 		addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
554 		if (addr) {
555 			addr->action = MLX5_ACTION_NONE;
556 			/* If this mac was previously added because of allmulti
557 			 * promiscuous rx mode, its now converted to be original
558 			 * vport mac.
559 			 */
560 			if (addr->mc_promisc) {
561 				struct esw_mc_addr *esw_mc =
562 					l2addr_hash_find(esw->mc_table,
563 							 mac_list[i],
564 							 struct esw_mc_addr);
565 				if (!esw_mc) {
566 					esw_warn(esw->dev,
567 						 "Failed to MAC(%pM) in mcast DB\n",
568 						 mac_list[i]);
569 					continue;
570 				}
571 				esw_mc->refcnt++;
572 				addr->mc_promisc = false;
573 			}
574 			continue;
575 		}
576 
577 		addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
578 				       GFP_KERNEL);
579 		if (!addr) {
580 			esw_warn(esw->dev,
581 				 "Failed to add MAC(%pM) to vport[%d] DB\n",
582 				 mac_list[i], vport->vport);
583 			continue;
584 		}
585 		addr->vport = vport->vport;
586 		addr->action = MLX5_ACTION_ADD;
587 	}
588 out:
589 	kfree(mac_list);
590 }
591 
592 /* Sync vport UC/MC list from vport context
593  * Must be called after esw_update_vport_addr_list
594  */
esw_update_vport_mc_promisc(struct mlx5_eswitch * esw,struct mlx5_vport * vport)595 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
596 					struct mlx5_vport *vport)
597 {
598 	struct l2addr_node *node;
599 	struct vport_addr *addr;
600 	struct hlist_head *hash;
601 	struct hlist_node *tmp;
602 	int hi;
603 
604 	hash = vport->mc_list;
605 
606 	for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
607 		u8 *mac = node->addr;
608 
609 		addr = l2addr_hash_find(hash, mac, struct vport_addr);
610 		if (addr) {
611 			if (addr->action == MLX5_ACTION_DEL)
612 				addr->action = MLX5_ACTION_NONE;
613 			continue;
614 		}
615 		addr = l2addr_hash_add(hash, mac, struct vport_addr,
616 				       GFP_KERNEL);
617 		if (!addr) {
618 			esw_warn(esw->dev,
619 				 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
620 				 mac, vport->vport);
621 			continue;
622 		}
623 		addr->vport = vport->vport;
624 		addr->action = MLX5_ACTION_ADD;
625 		addr->mc_promisc = true;
626 	}
627 }
628 
629 /* Apply vport rx mode to HW FDB table */
esw_apply_vport_rx_mode(struct mlx5_eswitch * esw,struct mlx5_vport * vport,bool promisc,bool mc_promisc)630 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
631 				    struct mlx5_vport *vport,
632 				    bool promisc, bool mc_promisc)
633 {
634 	struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
635 
636 	if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
637 		goto promisc;
638 
639 	if (mc_promisc) {
640 		vport->allmulti_rule =
641 			esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
642 		if (!allmulti_addr->uplink_rule)
643 			allmulti_addr->uplink_rule =
644 				esw_fdb_set_vport_allmulti_rule(esw,
645 								MLX5_VPORT_UPLINK);
646 		allmulti_addr->refcnt++;
647 	} else if (vport->allmulti_rule) {
648 		mlx5_del_flow_rules(vport->allmulti_rule);
649 		vport->allmulti_rule = NULL;
650 
651 		if (--allmulti_addr->refcnt > 0)
652 			goto promisc;
653 
654 		if (allmulti_addr->uplink_rule)
655 			mlx5_del_flow_rules(allmulti_addr->uplink_rule);
656 		allmulti_addr->uplink_rule = NULL;
657 	}
658 
659 promisc:
660 	if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
661 		return;
662 
663 	if (promisc) {
664 		vport->promisc_rule =
665 			esw_fdb_set_vport_promisc_rule(esw, vport->vport);
666 	} else if (vport->promisc_rule) {
667 		mlx5_del_flow_rules(vport->promisc_rule);
668 		vport->promisc_rule = NULL;
669 	}
670 }
671 
672 /* Sync vport rx mode from vport context */
esw_update_vport_rx_mode(struct mlx5_eswitch * esw,struct mlx5_vport * vport)673 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
674 				     struct mlx5_vport *vport)
675 {
676 	int promisc_all = 0;
677 	int promisc_uc = 0;
678 	int promisc_mc = 0;
679 	int err;
680 
681 	err = mlx5_query_nic_vport_promisc(esw->dev,
682 					   vport->vport,
683 					   &promisc_uc,
684 					   &promisc_mc,
685 					   &promisc_all);
686 	if (err)
687 		return;
688 	esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
689 		  vport->vport, promisc_all, promisc_mc);
690 
691 	if (!vport->info.trusted || !vport->enabled) {
692 		promisc_uc = 0;
693 		promisc_mc = 0;
694 		promisc_all = 0;
695 	}
696 
697 	esw_apply_vport_rx_mode(esw, vport, promisc_all,
698 				(promisc_all || promisc_mc));
699 }
700 
esw_vport_change_handle_locked(struct mlx5_vport * vport)701 void esw_vport_change_handle_locked(struct mlx5_vport *vport)
702 {
703 	struct mlx5_core_dev *dev = vport->dev;
704 	struct mlx5_eswitch *esw = dev->priv.eswitch;
705 	u8 mac[ETH_ALEN];
706 
707 	mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
708 	esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
709 		  vport->vport, mac);
710 
711 	if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
712 		esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
713 		esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
714 	}
715 
716 	if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
717 		esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
718 
719 	if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
720 		esw_update_vport_rx_mode(esw, vport);
721 		if (!IS_ERR_OR_NULL(vport->allmulti_rule))
722 			esw_update_vport_mc_promisc(esw, vport);
723 	}
724 
725 	if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
726 		esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
727 
728 	esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
729 	if (vport->enabled)
730 		arm_vport_context_events_cmd(dev, vport->vport,
731 					     vport->enabled_events);
732 }
733 
esw_vport_change_handler(struct work_struct * work)734 static void esw_vport_change_handler(struct work_struct *work)
735 {
736 	struct mlx5_vport *vport =
737 		container_of(work, struct mlx5_vport, vport_change_handler);
738 	struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
739 
740 	mutex_lock(&esw->state_lock);
741 	esw_vport_change_handle_locked(vport);
742 	mutex_unlock(&esw->state_lock);
743 }
744 
node_guid_gen_from_mac(u64 * node_guid,const u8 * mac)745 static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac)
746 {
747 	((u8 *)node_guid)[7] = mac[0];
748 	((u8 *)node_guid)[6] = mac[1];
749 	((u8 *)node_guid)[5] = mac[2];
750 	((u8 *)node_guid)[4] = 0xff;
751 	((u8 *)node_guid)[3] = 0xfe;
752 	((u8 *)node_guid)[2] = mac[3];
753 	((u8 *)node_guid)[1] = mac[4];
754 	((u8 *)node_guid)[0] = mac[5];
755 }
756 
esw_vport_setup_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)757 static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
758 			       struct mlx5_vport *vport)
759 {
760 	if (esw->mode == MLX5_ESWITCH_LEGACY)
761 		return esw_legacy_vport_acl_setup(esw, vport);
762 	else
763 		return esw_vport_create_offloads_acl_tables(esw, vport);
764 }
765 
esw_vport_cleanup_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)766 static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
767 				  struct mlx5_vport *vport)
768 {
769 	if (esw->mode == MLX5_ESWITCH_LEGACY)
770 		esw_legacy_vport_acl_cleanup(esw, vport);
771 	else
772 		esw_vport_destroy_offloads_acl_tables(esw, vport);
773 }
774 
esw_vport_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)775 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
776 {
777 	u16 vport_num = vport->vport;
778 	int flags;
779 	int err;
780 
781 	err = esw_vport_setup_acl(esw, vport);
782 	if (err)
783 		return err;
784 
785 	if (mlx5_esw_is_manager_vport(esw, vport_num))
786 		return 0;
787 
788 	mlx5_modify_vport_admin_state(esw->dev,
789 				      MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
790 				      vport_num, 1,
791 				      vport->info.link_state);
792 
793 	/* Host PF has its own mac/guid. */
794 	if (vport_num) {
795 		mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
796 						  vport->info.mac);
797 		mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
798 						vport->info.node_guid);
799 	}
800 
801 	flags = (vport->info.vlan || vport->info.qos) ?
802 		SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
803 	modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
804 			       vport->info.qos, flags);
805 
806 	return 0;
807 }
808 
809 /* Don't cleanup vport->info, it's needed to restore vport configuration */
esw_vport_cleanup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)810 static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
811 {
812 	u16 vport_num = vport->vport;
813 
814 	if (!mlx5_esw_is_manager_vport(esw, vport_num))
815 		mlx5_modify_vport_admin_state(esw->dev,
816 					      MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
817 					      vport_num, 1,
818 					      MLX5_VPORT_ADMIN_STATE_DOWN);
819 
820 	mlx5_esw_qos_vport_disable(esw, vport);
821 	esw_vport_cleanup_acl(esw, vport);
822 }
823 
mlx5_esw_vport_enable(struct mlx5_eswitch * esw,u16 vport_num,enum mlx5_eswitch_vport_event enabled_events)824 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
825 			  enum mlx5_eswitch_vport_event enabled_events)
826 {
827 	struct mlx5_vport *vport;
828 	int ret;
829 
830 	vport = mlx5_eswitch_get_vport(esw, vport_num);
831 	if (IS_ERR(vport))
832 		return PTR_ERR(vport);
833 
834 	mutex_lock(&esw->state_lock);
835 	WARN_ON(vport->enabled);
836 
837 	esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
838 
839 	ret = esw_vport_setup(esw, vport);
840 	if (ret)
841 		goto done;
842 
843 	/* Sync with current vport context */
844 	vport->enabled_events = enabled_events;
845 	vport->enabled = true;
846 
847 	/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
848 	 * in smartNIC as it's a vport group manager.
849 	 */
850 	if (mlx5_esw_is_manager_vport(esw, vport_num) ||
851 	    (!vport_num && mlx5_core_is_ecpf(esw->dev)))
852 		vport->info.trusted = true;
853 
854 	if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
855 	    MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
856 		ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
857 		if (ret)
858 			goto err_vhca_mapping;
859 	}
860 
861 	/* External controller host PF has factory programmed MAC.
862 	 * Read it from the device.
863 	 */
864 	if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
865 		mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
866 
867 	esw_vport_change_handle_locked(vport);
868 
869 	esw->enabled_vports++;
870 	esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
871 done:
872 	mutex_unlock(&esw->state_lock);
873 	return ret;
874 
875 err_vhca_mapping:
876 	esw_vport_cleanup(esw, vport);
877 	mutex_unlock(&esw->state_lock);
878 	return ret;
879 }
880 
mlx5_esw_vport_disable(struct mlx5_eswitch * esw,u16 vport_num)881 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
882 {
883 	struct mlx5_vport *vport;
884 
885 	vport = mlx5_eswitch_get_vport(esw, vport_num);
886 	if (IS_ERR(vport))
887 		return;
888 
889 	mutex_lock(&esw->state_lock);
890 	if (!vport->enabled)
891 		goto done;
892 
893 	esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
894 	/* Mark this vport as disabled to discard new events */
895 	vport->enabled = false;
896 
897 	/* Disable events from this vport */
898 	arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
899 
900 	if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
901 	    MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
902 		mlx5_esw_vport_vhca_id_clear(esw, vport_num);
903 
904 	/* We don't assume VFs will cleanup after themselves.
905 	 * Calling vport change handler while vport is disabled will cleanup
906 	 * the vport resources.
907 	 */
908 	esw_vport_change_handle_locked(vport);
909 	vport->enabled_events = 0;
910 	esw_vport_cleanup(esw, vport);
911 	esw->enabled_vports--;
912 
913 done:
914 	mutex_unlock(&esw->state_lock);
915 }
916 
eswitch_vport_event(struct notifier_block * nb,unsigned long type,void * data)917 static int eswitch_vport_event(struct notifier_block *nb,
918 			       unsigned long type, void *data)
919 {
920 	struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
921 	struct mlx5_eqe *eqe = data;
922 	struct mlx5_vport *vport;
923 	u16 vport_num;
924 
925 	vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
926 	vport = mlx5_eswitch_get_vport(esw, vport_num);
927 	if (!IS_ERR(vport))
928 		queue_work(esw->work_queue, &vport->vport_change_handler);
929 	return NOTIFY_OK;
930 }
931 
932 /**
933  * mlx5_esw_query_functions - Returns raw output about functions state
934  * @dev:	Pointer to device to query
935  *
936  * mlx5_esw_query_functions() allocates and returns functions changed
937  * raw output memory pointer from device on success. Otherwise returns ERR_PTR.
938  * Caller must free the memory using kvfree() when valid pointer is returned.
939  */
mlx5_esw_query_functions(struct mlx5_core_dev * dev)940 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
941 {
942 	int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
943 	u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
944 	u32 *out;
945 	int err;
946 
947 	out = kvzalloc(outlen, GFP_KERNEL);
948 	if (!out)
949 		return ERR_PTR(-ENOMEM);
950 
951 	MLX5_SET(query_esw_functions_in, in, opcode,
952 		 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
953 
954 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
955 	if (!err)
956 		return out;
957 
958 	kvfree(out);
959 	return ERR_PTR(err);
960 }
961 
mlx5_eswitch_event_handlers_register(struct mlx5_eswitch * esw)962 static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
963 {
964 	MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
965 	mlx5_eq_notifier_register(esw->dev, &esw->nb);
966 
967 	if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
968 		MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
969 			     ESW_FUNCTIONS_CHANGED);
970 		mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
971 	}
972 }
973 
mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch * esw)974 static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
975 {
976 	if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
977 		mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
978 
979 	mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
980 
981 	flush_workqueue(esw->work_queue);
982 }
983 
mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch * esw)984 static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
985 {
986 	struct mlx5_vport *vport;
987 	unsigned long i;
988 
989 	mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
990 		memset(&vport->qos, 0, sizeof(vport->qos));
991 		memset(&vport->info, 0, sizeof(vport->info));
992 		vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
993 	}
994 }
995 
996 /* Public E-Switch API */
mlx5_eswitch_load_vport(struct mlx5_eswitch * esw,u16 vport_num,enum mlx5_eswitch_vport_event enabled_events)997 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
998 			    enum mlx5_eswitch_vport_event enabled_events)
999 {
1000 	int err;
1001 
1002 	err = mlx5_esw_vport_enable(esw, vport_num, enabled_events);
1003 	if (err)
1004 		return err;
1005 
1006 	mlx5_esw_vport_debugfs_create(esw, vport_num, false, 0);
1007 	err = esw_offloads_load_rep(esw, vport_num);
1008 	if (err)
1009 		goto err_rep;
1010 
1011 	return err;
1012 
1013 err_rep:
1014 	mlx5_esw_vport_debugfs_destroy(esw, vport_num);
1015 	mlx5_esw_vport_disable(esw, vport_num);
1016 	return err;
1017 }
1018 
mlx5_eswitch_unload_vport(struct mlx5_eswitch * esw,u16 vport_num)1019 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
1020 {
1021 	esw_offloads_unload_rep(esw, vport_num);
1022 	mlx5_esw_vport_debugfs_destroy(esw, vport_num);
1023 	mlx5_esw_vport_disable(esw, vport_num);
1024 }
1025 
mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch * esw,u16 num_vfs)1026 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
1027 {
1028 	struct mlx5_vport *vport;
1029 	unsigned long i;
1030 
1031 	mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1032 		if (!vport->enabled)
1033 			continue;
1034 		mlx5_eswitch_unload_vport(esw, vport->vport);
1035 	}
1036 }
1037 
mlx5_eswitch_load_vf_vports(struct mlx5_eswitch * esw,u16 num_vfs,enum mlx5_eswitch_vport_event enabled_events)1038 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
1039 				enum mlx5_eswitch_vport_event enabled_events)
1040 {
1041 	struct mlx5_vport *vport;
1042 	unsigned long i;
1043 	int err;
1044 
1045 	mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1046 		err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
1047 		if (err)
1048 			goto vf_err;
1049 	}
1050 
1051 	return 0;
1052 
1053 vf_err:
1054 	mlx5_eswitch_unload_vf_vports(esw, num_vfs);
1055 	return err;
1056 }
1057 
host_pf_enable_hca(struct mlx5_core_dev * dev)1058 static int host_pf_enable_hca(struct mlx5_core_dev *dev)
1059 {
1060 	if (!mlx5_core_is_ecpf(dev))
1061 		return 0;
1062 
1063 	/* Once vport and representor are ready, take out the external host PF
1064 	 * out of initializing state. Enabling HCA clears the iser->initializing
1065 	 * bit and host PF driver loading can progress.
1066 	 */
1067 	return mlx5_cmd_host_pf_enable_hca(dev);
1068 }
1069 
host_pf_disable_hca(struct mlx5_core_dev * dev)1070 static void host_pf_disable_hca(struct mlx5_core_dev *dev)
1071 {
1072 	if (!mlx5_core_is_ecpf(dev))
1073 		return;
1074 
1075 	mlx5_cmd_host_pf_disable_hca(dev);
1076 }
1077 
1078 /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
1079  * whichever are present on the eswitch.
1080  */
1081 int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch * esw,enum mlx5_eswitch_vport_event enabled_events)1082 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
1083 				 enum mlx5_eswitch_vport_event enabled_events)
1084 {
1085 	int ret;
1086 
1087 	/* Enable PF vport */
1088 	ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
1089 	if (ret)
1090 		return ret;
1091 
1092 	/* Enable external host PF HCA */
1093 	ret = host_pf_enable_hca(esw->dev);
1094 	if (ret)
1095 		goto pf_hca_err;
1096 
1097 	/* Enable ECPF vport */
1098 	if (mlx5_ecpf_vport_exists(esw->dev)) {
1099 		ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
1100 		if (ret)
1101 			goto ecpf_err;
1102 	}
1103 
1104 	/* Enable VF vports */
1105 	ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs,
1106 					  enabled_events);
1107 	if (ret)
1108 		goto vf_err;
1109 	return 0;
1110 
1111 vf_err:
1112 	if (mlx5_ecpf_vport_exists(esw->dev))
1113 		mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1114 ecpf_err:
1115 	host_pf_disable_hca(esw->dev);
1116 pf_hca_err:
1117 	mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1118 	return ret;
1119 }
1120 
1121 /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
1122  * whichever are previously enabled on the eswitch.
1123  */
mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch * esw)1124 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
1125 {
1126 	mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
1127 
1128 	if (mlx5_ecpf_vport_exists(esw->dev))
1129 		mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1130 
1131 	host_pf_disable_hca(esw->dev);
1132 	mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1133 }
1134 
mlx5_eswitch_get_devlink_param(struct mlx5_eswitch * esw)1135 static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
1136 {
1137 	struct devlink *devlink = priv_to_devlink(esw->dev);
1138 	union devlink_param_value val;
1139 	int err;
1140 
1141 	err = devlink_param_driverinit_value_get(devlink,
1142 						 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
1143 						 &val);
1144 	if (!err) {
1145 		esw->params.large_group_num = val.vu32;
1146 	} else {
1147 		esw_warn(esw->dev,
1148 			 "Devlink can't get param fdb_large_groups, uses default (%d).\n",
1149 			 ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
1150 		esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
1151 	}
1152 }
1153 
1154 static void
mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch * esw,int num_vfs)1155 mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
1156 {
1157 	const u32 *out;
1158 
1159 	if (num_vfs < 0)
1160 		return;
1161 
1162 	if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1163 		esw->esw_funcs.num_vfs = num_vfs;
1164 		return;
1165 	}
1166 
1167 	out = mlx5_esw_query_functions(esw->dev);
1168 	if (IS_ERR(out))
1169 		return;
1170 
1171 	esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
1172 					  host_params_context.host_num_of_vfs);
1173 	kvfree(out);
1174 }
1175 
mlx5_esw_mode_change_notify(struct mlx5_eswitch * esw,u16 mode)1176 static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
1177 {
1178 	struct mlx5_esw_event_info info = {};
1179 
1180 	info.new_mode = mode;
1181 
1182 	blocking_notifier_call_chain(&esw->n_head, 0, &info);
1183 }
1184 
mlx5_esw_acls_ns_init(struct mlx5_eswitch * esw)1185 static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
1186 {
1187 	struct mlx5_core_dev *dev = esw->dev;
1188 	int total_vports;
1189 	int err;
1190 
1191 	if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED)
1192 		return 0;
1193 
1194 	total_vports = mlx5_eswitch_get_total_vports(dev);
1195 
1196 	if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
1197 		err = mlx5_fs_egress_acls_init(dev, total_vports);
1198 		if (err)
1199 			return err;
1200 	} else {
1201 		esw_warn(dev, "engress ACL is not supported by FW\n");
1202 	}
1203 
1204 	if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
1205 		err = mlx5_fs_ingress_acls_init(dev, total_vports);
1206 		if (err)
1207 			goto err;
1208 	} else {
1209 		esw_warn(dev, "ingress ACL is not supported by FW\n");
1210 	}
1211 	esw->flags |= MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
1212 	return 0;
1213 
1214 err:
1215 	if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
1216 		mlx5_fs_egress_acls_cleanup(dev);
1217 	return err;
1218 }
1219 
mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch * esw)1220 static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
1221 {
1222 	struct mlx5_core_dev *dev = esw->dev;
1223 
1224 	esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
1225 	if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1226 		mlx5_fs_ingress_acls_cleanup(dev);
1227 	if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
1228 		mlx5_fs_egress_acls_cleanup(dev);
1229 }
1230 
1231 /**
1232  * mlx5_eswitch_enable_locked - Enable eswitch
1233  * @esw:	Pointer to eswitch
1234  * @num_vfs:	Enable eswitch for given number of VFs. This is optional.
1235  *		Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
1236  *		Caller should pass num_vfs > 0 when enabling eswitch for
1237  *		vf vports. Caller should pass num_vfs = 0, when eswitch
1238  *		is enabled without sriov VFs or when caller
1239  *		is unaware of the sriov state of the host PF on ECPF based
1240  *		eswitch. Caller should pass < 0 when num_vfs should be
1241  *		completely ignored. This is typically the case when eswitch
1242  *		is enabled without sriov regardless of PF/ECPF system.
1243  * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads
1244  * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
1245  * It returns 0 on success or error code on failure.
1246  */
mlx5_eswitch_enable_locked(struct mlx5_eswitch * esw,int num_vfs)1247 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
1248 {
1249 	int err;
1250 
1251 	lockdep_assert_held(&esw->mode_lock);
1252 
1253 	if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1254 		esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
1255 		return -EOPNOTSUPP;
1256 	}
1257 
1258 	mlx5_eswitch_get_devlink_param(esw);
1259 
1260 	err = mlx5_esw_acls_ns_init(esw);
1261 	if (err)
1262 		return err;
1263 
1264 	mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
1265 
1266 	if (esw->mode == MLX5_ESWITCH_LEGACY) {
1267 		err = esw_legacy_enable(esw);
1268 	} else {
1269 		mlx5_rescan_drivers(esw->dev);
1270 		err = esw_offloads_enable(esw);
1271 	}
1272 
1273 	if (err)
1274 		goto abort;
1275 
1276 	esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
1277 
1278 	mlx5_eswitch_event_handlers_register(esw);
1279 
1280 	esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
1281 		 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1282 		 esw->esw_funcs.num_vfs, esw->enabled_vports);
1283 
1284 	mlx5_esw_mode_change_notify(esw, esw->mode);
1285 
1286 	return 0;
1287 
1288 abort:
1289 	mlx5_esw_acls_ns_cleanup(esw);
1290 	return err;
1291 }
1292 
1293 /**
1294  * mlx5_eswitch_enable - Enable eswitch
1295  * @esw:	Pointer to eswitch
1296  * @num_vfs:	Enable eswitch switch for given number of VFs.
1297  *		Caller must pass num_vfs > 0 when enabling eswitch for
1298  *		vf vports.
1299  * mlx5_eswitch_enable() returns 0 on success or error code on failure.
1300  */
mlx5_eswitch_enable(struct mlx5_eswitch * esw,int num_vfs)1301 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
1302 {
1303 	bool toggle_lag;
1304 	int ret;
1305 
1306 	if (!mlx5_esw_allowed(esw))
1307 		return 0;
1308 
1309 	devl_assert_locked(priv_to_devlink(esw->dev));
1310 
1311 	toggle_lag = !mlx5_esw_is_fdb_created(esw);
1312 
1313 	if (toggle_lag)
1314 		mlx5_lag_disable_change(esw->dev);
1315 
1316 	down_write(&esw->mode_lock);
1317 	if (!mlx5_esw_is_fdb_created(esw)) {
1318 		ret = mlx5_eswitch_enable_locked(esw, num_vfs);
1319 	} else {
1320 		enum mlx5_eswitch_vport_event vport_events;
1321 
1322 		vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ?
1323 					MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE;
1324 		ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
1325 		if (!ret)
1326 			esw->esw_funcs.num_vfs = num_vfs;
1327 	}
1328 	up_write(&esw->mode_lock);
1329 
1330 	if (toggle_lag)
1331 		mlx5_lag_enable_change(esw->dev);
1332 
1333 	return ret;
1334 }
1335 
1336 /* When disabling sriov, free driver level resources. */
mlx5_eswitch_disable_sriov(struct mlx5_eswitch * esw,bool clear_vf)1337 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
1338 {
1339 	if (!mlx5_esw_allowed(esw))
1340 		return;
1341 
1342 	devl_assert_locked(priv_to_devlink(esw->dev));
1343 	down_write(&esw->mode_lock);
1344 	/* If driver is unloaded, this function is called twice by remove_one()
1345 	 * and mlx5_unload(). Prevent the second call.
1346 	 */
1347 	if (!esw->esw_funcs.num_vfs && !clear_vf)
1348 		goto unlock;
1349 
1350 	esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), active vports(%d)\n",
1351 		 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1352 		 esw->esw_funcs.num_vfs, esw->enabled_vports);
1353 
1354 	mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
1355 	if (clear_vf)
1356 		mlx5_eswitch_clear_vf_vports_info(esw);
1357 	/* If disabling sriov in switchdev mode, free meta rules here
1358 	 * because it depends on num_vfs.
1359 	 */
1360 	if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
1361 		struct devlink *devlink = priv_to_devlink(esw->dev);
1362 
1363 		devl_rate_nodes_destroy(devlink);
1364 	}
1365 	/* Destroy legacy fdb when disabling sriov in legacy mode. */
1366 	if (esw->mode == MLX5_ESWITCH_LEGACY)
1367 		mlx5_eswitch_disable_locked(esw);
1368 
1369 	esw->esw_funcs.num_vfs = 0;
1370 
1371 unlock:
1372 	up_write(&esw->mode_lock);
1373 }
1374 
1375 /* Free resources for corresponding eswitch mode. It is called by devlink
1376  * when changing eswitch mode or modprobe when unloading driver.
1377  */
mlx5_eswitch_disable_locked(struct mlx5_eswitch * esw)1378 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
1379 {
1380 	struct devlink *devlink = priv_to_devlink(esw->dev);
1381 
1382 	/* Notify eswitch users that it is exiting from current mode.
1383 	 * So that it can do necessary cleanup before the eswitch is disabled.
1384 	 */
1385 	mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY);
1386 
1387 	mlx5_eswitch_event_handlers_unregister(esw);
1388 
1389 	esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
1390 		 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1391 		 esw->esw_funcs.num_vfs, esw->enabled_vports);
1392 
1393 	if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) {
1394 		esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
1395 		if (esw->mode == MLX5_ESWITCH_OFFLOADS)
1396 			esw_offloads_disable(esw);
1397 		else if (esw->mode == MLX5_ESWITCH_LEGACY)
1398 			esw_legacy_disable(esw);
1399 		mlx5_esw_acls_ns_cleanup(esw);
1400 	}
1401 
1402 	if (esw->mode == MLX5_ESWITCH_OFFLOADS)
1403 		devl_rate_nodes_destroy(devlink);
1404 }
1405 
mlx5_eswitch_disable(struct mlx5_eswitch * esw)1406 void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
1407 {
1408 	if (!mlx5_esw_allowed(esw))
1409 		return;
1410 
1411 	devl_assert_locked(priv_to_devlink(esw->dev));
1412 	mlx5_lag_disable_change(esw->dev);
1413 	down_write(&esw->mode_lock);
1414 	mlx5_eswitch_disable_locked(esw);
1415 	up_write(&esw->mode_lock);
1416 	mlx5_lag_enable_change(esw->dev);
1417 }
1418 
mlx5_query_hca_cap_host_pf(struct mlx5_core_dev * dev,void * out)1419 static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out)
1420 {
1421 	u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
1422 	u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
1423 
1424 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
1425 	MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
1426 	MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF);
1427 	MLX5_SET(query_hca_cap_in, in, other_function, true);
1428 	return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
1429 }
1430 
mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev * dev,u16 * max_sfs,u16 * sf_base_id)1431 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id)
1432 
1433 {
1434 	int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
1435 	void *query_ctx;
1436 	void *hca_caps;
1437 	int err;
1438 
1439 	if (!mlx5_core_is_ecpf(dev)) {
1440 		*max_sfs = 0;
1441 		return 0;
1442 	}
1443 
1444 	query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
1445 	if (!query_ctx)
1446 		return -ENOMEM;
1447 
1448 	err = mlx5_query_hca_cap_host_pf(dev, query_ctx);
1449 	if (err)
1450 		goto out_free;
1451 
1452 	hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
1453 	*max_sfs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_sf);
1454 	*sf_base_id = MLX5_GET(cmd_hca_cap, hca_caps, sf_base_id);
1455 
1456 out_free:
1457 	kfree(query_ctx);
1458 	return err;
1459 }
1460 
mlx5_esw_vport_alloc(struct mlx5_eswitch * esw,struct mlx5_core_dev * dev,int index,u16 vport_num)1461 static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, struct mlx5_core_dev *dev,
1462 				int index, u16 vport_num)
1463 {
1464 	struct mlx5_vport *vport;
1465 	int err;
1466 
1467 	vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1468 	if (!vport)
1469 		return -ENOMEM;
1470 
1471 	vport->dev = esw->dev;
1472 	vport->vport = vport_num;
1473 	vport->index = index;
1474 	vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1475 	INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler);
1476 	err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL);
1477 	if (err)
1478 		goto insert_err;
1479 
1480 	esw->total_vports++;
1481 	return 0;
1482 
1483 insert_err:
1484 	kfree(vport);
1485 	return err;
1486 }
1487 
mlx5_esw_vport_free(struct mlx5_eswitch * esw,struct mlx5_vport * vport)1488 static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1489 {
1490 	xa_erase(&esw->vports, vport->vport);
1491 	kfree(vport);
1492 }
1493 
mlx5_esw_vports_cleanup(struct mlx5_eswitch * esw)1494 static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw)
1495 {
1496 	struct mlx5_vport *vport;
1497 	unsigned long i;
1498 
1499 	mlx5_esw_for_each_vport(esw, i, vport)
1500 		mlx5_esw_vport_free(esw, vport);
1501 	xa_destroy(&esw->vports);
1502 }
1503 
mlx5_esw_vports_init(struct mlx5_eswitch * esw)1504 static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
1505 {
1506 	struct mlx5_core_dev *dev = esw->dev;
1507 	u16 max_host_pf_sfs;
1508 	u16 base_sf_num;
1509 	int idx = 0;
1510 	int err;
1511 	int i;
1512 
1513 	xa_init(&esw->vports);
1514 
1515 	err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_PF);
1516 	if (err)
1517 		goto err;
1518 	if (esw->first_host_vport == MLX5_VPORT_PF)
1519 		xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
1520 	idx++;
1521 
1522 	for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
1523 		err = mlx5_esw_vport_alloc(esw, dev, idx, idx);
1524 		if (err)
1525 			goto err;
1526 		xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
1527 		xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
1528 		idx++;
1529 	}
1530 	base_sf_num = mlx5_sf_start_function_id(dev);
1531 	for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
1532 		err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
1533 		if (err)
1534 			goto err;
1535 		xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
1536 		idx++;
1537 	}
1538 
1539 	err = mlx5_esw_sf_max_hpf_functions(dev, &max_host_pf_sfs, &base_sf_num);
1540 	if (err)
1541 		goto err;
1542 	for (i = 0; i < max_host_pf_sfs; i++) {
1543 		err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
1544 		if (err)
1545 			goto err;
1546 		xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
1547 		idx++;
1548 	}
1549 
1550 	if (mlx5_ecpf_vport_exists(dev)) {
1551 		err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_ECPF);
1552 		if (err)
1553 			goto err;
1554 		idx++;
1555 	}
1556 	err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_UPLINK);
1557 	if (err)
1558 		goto err;
1559 	return 0;
1560 
1561 err:
1562 	mlx5_esw_vports_cleanup(esw);
1563 	return err;
1564 }
1565 
mlx5_eswitch_init(struct mlx5_core_dev * dev)1566 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1567 {
1568 	struct mlx5_eswitch *esw;
1569 	int err;
1570 
1571 	if (!MLX5_VPORT_MANAGER(dev))
1572 		return 0;
1573 
1574 	esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1575 	if (!esw)
1576 		return -ENOMEM;
1577 
1578 	esw->dev = dev;
1579 	esw->manager_vport = mlx5_eswitch_manager_vport(dev);
1580 	esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
1581 
1582 	esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1583 	if (!esw->work_queue) {
1584 		err = -ENOMEM;
1585 		goto abort;
1586 	}
1587 
1588 	err = mlx5_esw_vports_init(esw);
1589 	if (err)
1590 		goto abort;
1591 
1592 	err = esw_offloads_init_reps(esw);
1593 	if (err)
1594 		goto reps_err;
1595 
1596 	mutex_init(&esw->offloads.encap_tbl_lock);
1597 	hash_init(esw->offloads.encap_tbl);
1598 	mutex_init(&esw->offloads.decap_tbl_lock);
1599 	hash_init(esw->offloads.decap_tbl);
1600 	mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr);
1601 	atomic64_set(&esw->offloads.num_flows, 0);
1602 	ida_init(&esw->offloads.vport_metadata_ida);
1603 	xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
1604 	mutex_init(&esw->state_lock);
1605 	init_rwsem(&esw->mode_lock);
1606 	refcount_set(&esw->qos.refcnt, 0);
1607 
1608 	esw->enabled_vports = 0;
1609 	esw->mode = MLX5_ESWITCH_LEGACY;
1610 	esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
1611 	if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
1612 	    MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
1613 		esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
1614 	else
1615 		esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
1616 	if (MLX5_ESWITCH_MANAGER(dev) &&
1617 	    mlx5_esw_vport_match_metadata_supported(esw))
1618 		esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
1619 
1620 	dev->priv.eswitch = esw;
1621 	BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
1622 
1623 	esw->dbgfs = debugfs_create_dir("esw", mlx5_debugfs_get_dev_root(esw->dev));
1624 	esw_info(dev,
1625 		 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
1626 		 esw->total_vports,
1627 		 MLX5_MAX_UC_PER_VPORT(dev),
1628 		 MLX5_MAX_MC_PER_VPORT(dev));
1629 	return 0;
1630 
1631 reps_err:
1632 	mlx5_esw_vports_cleanup(esw);
1633 abort:
1634 	if (esw->work_queue)
1635 		destroy_workqueue(esw->work_queue);
1636 	kfree(esw);
1637 	return err;
1638 }
1639 
mlx5_eswitch_cleanup(struct mlx5_eswitch * esw)1640 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1641 {
1642 	if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
1643 		return;
1644 
1645 	esw_info(esw->dev, "cleanup\n");
1646 
1647 	debugfs_remove_recursive(esw->dbgfs);
1648 	esw->dev->priv.eswitch = NULL;
1649 	destroy_workqueue(esw->work_queue);
1650 	WARN_ON(refcount_read(&esw->qos.refcnt));
1651 	mutex_destroy(&esw->state_lock);
1652 	WARN_ON(!xa_empty(&esw->offloads.vhca_map));
1653 	xa_destroy(&esw->offloads.vhca_map);
1654 	ida_destroy(&esw->offloads.vport_metadata_ida);
1655 	mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
1656 	mutex_destroy(&esw->offloads.encap_tbl_lock);
1657 	mutex_destroy(&esw->offloads.decap_tbl_lock);
1658 	esw_offloads_cleanup_reps(esw);
1659 	mlx5_esw_vports_cleanup(esw);
1660 	kfree(esw);
1661 }
1662 
1663 /* Vport Administration */
1664 static int
mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch * esw,struct mlx5_vport * evport,const u8 * mac)1665 mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw,
1666 			      struct mlx5_vport *evport, const u8 *mac)
1667 {
1668 	u16 vport_num = evport->vport;
1669 	u64 node_guid;
1670 	int err = 0;
1671 
1672 	if (is_multicast_ether_addr(mac))
1673 		return -EINVAL;
1674 
1675 	if (evport->info.spoofchk && !is_valid_ether_addr(mac))
1676 		mlx5_core_warn(esw->dev,
1677 			       "Set invalid MAC while spoofchk is on, vport(%d)\n",
1678 			       vport_num);
1679 
1680 	err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac);
1681 	if (err) {
1682 		mlx5_core_warn(esw->dev,
1683 			       "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1684 			       vport_num, err);
1685 		return err;
1686 	}
1687 
1688 	node_guid_gen_from_mac(&node_guid, mac);
1689 	err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid);
1690 	if (err)
1691 		mlx5_core_warn(esw->dev,
1692 			       "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1693 			       vport_num, err);
1694 
1695 	ether_addr_copy(evport->info.mac, mac);
1696 	evport->info.node_guid = node_guid;
1697 	if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
1698 		err = esw_acl_ingress_lgcy_setup(esw, evport);
1699 
1700 	return err;
1701 }
1702 
mlx5_eswitch_set_vport_mac(struct mlx5_eswitch * esw,u16 vport,const u8 * mac)1703 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1704 			       u16 vport, const u8 *mac)
1705 {
1706 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1707 	int err = 0;
1708 
1709 	if (IS_ERR(evport))
1710 		return PTR_ERR(evport);
1711 
1712 	mutex_lock(&esw->state_lock);
1713 	err = mlx5_esw_set_vport_mac_locked(esw, evport, mac);
1714 	mutex_unlock(&esw->state_lock);
1715 	return err;
1716 }
1717 
mlx5_esw_check_port_type(struct mlx5_eswitch * esw,u16 vport_num,xa_mark_t mark)1718 static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark)
1719 {
1720 	struct mlx5_vport *vport;
1721 
1722 	vport = mlx5_eswitch_get_vport(esw, vport_num);
1723 	if (IS_ERR(vport))
1724 		return false;
1725 
1726 	return xa_get_mark(&esw->vports, vport_num, mark);
1727 }
1728 
mlx5_eswitch_is_vf_vport(struct mlx5_eswitch * esw,u16 vport_num)1729 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
1730 {
1731 	return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF);
1732 }
1733 
mlx5_esw_is_sf_vport(struct mlx5_eswitch * esw,u16 vport_num)1734 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
1735 {
1736 	return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
1737 }
1738 
mlx5_eswitch_set_vport_state(struct mlx5_eswitch * esw,u16 vport,int link_state)1739 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1740 				 u16 vport, int link_state)
1741 {
1742 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1743 	int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
1744 	int other_vport = 1;
1745 	int err = 0;
1746 
1747 	if (!mlx5_esw_allowed(esw))
1748 		return -EPERM;
1749 	if (IS_ERR(evport))
1750 		return PTR_ERR(evport);
1751 
1752 	if (vport == MLX5_VPORT_UPLINK) {
1753 		opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
1754 		other_vport = 0;
1755 		vport = 0;
1756 	}
1757 	mutex_lock(&esw->state_lock);
1758 	if (esw->mode != MLX5_ESWITCH_LEGACY) {
1759 		err = -EOPNOTSUPP;
1760 		goto unlock;
1761 	}
1762 
1763 	err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
1764 	if (err) {
1765 		mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
1766 			       vport, opmod, err);
1767 		goto unlock;
1768 	}
1769 
1770 	evport->info.link_state = link_state;
1771 
1772 unlock:
1773 	mutex_unlock(&esw->state_lock);
1774 	return err;
1775 }
1776 
mlx5_eswitch_get_vport_config(struct mlx5_eswitch * esw,u16 vport,struct ifla_vf_info * ivi)1777 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1778 				  u16 vport, struct ifla_vf_info *ivi)
1779 {
1780 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1781 
1782 	if (IS_ERR(evport))
1783 		return PTR_ERR(evport);
1784 
1785 	memset(ivi, 0, sizeof(*ivi));
1786 	ivi->vf = vport - 1;
1787 
1788 	mutex_lock(&esw->state_lock);
1789 	ether_addr_copy(ivi->mac, evport->info.mac);
1790 	ivi->linkstate = evport->info.link_state;
1791 	ivi->vlan = evport->info.vlan;
1792 	ivi->qos = evport->info.qos;
1793 	ivi->spoofchk = evport->info.spoofchk;
1794 	ivi->trusted = evport->info.trusted;
1795 	if (evport->qos.enabled) {
1796 		ivi->min_tx_rate = evport->qos.min_rate;
1797 		ivi->max_tx_rate = evport->qos.max_rate;
1798 	}
1799 	mutex_unlock(&esw->state_lock);
1800 
1801 	return 0;
1802 }
1803 
__mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch * esw,u16 vport,u16 vlan,u8 qos,u8 set_flags)1804 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1805 				  u16 vport, u16 vlan, u8 qos, u8 set_flags)
1806 {
1807 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1808 	int err = 0;
1809 
1810 	if (IS_ERR(evport))
1811 		return PTR_ERR(evport);
1812 	if (vlan > 4095 || qos > 7)
1813 		return -EINVAL;
1814 
1815 	err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
1816 	if (err)
1817 		return err;
1818 
1819 	evport->info.vlan = vlan;
1820 	evport->info.qos = qos;
1821 	if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
1822 		err = esw_acl_ingress_lgcy_setup(esw, evport);
1823 		if (err)
1824 			return err;
1825 		err = esw_acl_egress_lgcy_setup(esw, evport);
1826 	}
1827 
1828 	return err;
1829 }
1830 
mlx5_eswitch_get_vport_stats(struct mlx5_eswitch * esw,u16 vport_num,struct ifla_vf_stats * vf_stats)1831 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
1832 				 u16 vport_num,
1833 				 struct ifla_vf_stats *vf_stats)
1834 {
1835 	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
1836 	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1837 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
1838 	struct mlx5_vport_drop_stats stats = {};
1839 	int err = 0;
1840 	u32 *out;
1841 
1842 	if (IS_ERR(vport))
1843 		return PTR_ERR(vport);
1844 
1845 	out = kvzalloc(outlen, GFP_KERNEL);
1846 	if (!out)
1847 		return -ENOMEM;
1848 
1849 	MLX5_SET(query_vport_counter_in, in, opcode,
1850 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1851 	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
1852 	MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
1853 	MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1854 
1855 	err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out);
1856 	if (err)
1857 		goto free_out;
1858 
1859 	#define MLX5_GET_CTR(p, x) \
1860 		MLX5_GET64(query_vport_counter_out, p, x)
1861 
1862 	memset(vf_stats, 0, sizeof(*vf_stats));
1863 	vf_stats->rx_packets =
1864 		MLX5_GET_CTR(out, received_eth_unicast.packets) +
1865 		MLX5_GET_CTR(out, received_ib_unicast.packets) +
1866 		MLX5_GET_CTR(out, received_eth_multicast.packets) +
1867 		MLX5_GET_CTR(out, received_ib_multicast.packets) +
1868 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
1869 
1870 	vf_stats->rx_bytes =
1871 		MLX5_GET_CTR(out, received_eth_unicast.octets) +
1872 		MLX5_GET_CTR(out, received_ib_unicast.octets) +
1873 		MLX5_GET_CTR(out, received_eth_multicast.octets) +
1874 		MLX5_GET_CTR(out, received_ib_multicast.octets) +
1875 		MLX5_GET_CTR(out, received_eth_broadcast.octets);
1876 
1877 	vf_stats->tx_packets =
1878 		MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
1879 		MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
1880 		MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
1881 		MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
1882 		MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
1883 
1884 	vf_stats->tx_bytes =
1885 		MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
1886 		MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
1887 		MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
1888 		MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
1889 		MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
1890 
1891 	vf_stats->multicast =
1892 		MLX5_GET_CTR(out, received_eth_multicast.packets) +
1893 		MLX5_GET_CTR(out, received_ib_multicast.packets);
1894 
1895 	vf_stats->broadcast =
1896 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
1897 
1898 	err = mlx5_esw_query_vport_drop_stats(esw->dev, vport, &stats);
1899 	if (err)
1900 		goto free_out;
1901 	vf_stats->rx_dropped = stats.rx_dropped;
1902 	vf_stats->tx_dropped = stats.tx_dropped;
1903 
1904 free_out:
1905 	kvfree(out);
1906 	return err;
1907 }
1908 
mlx5_eswitch_mode(const struct mlx5_core_dev * dev)1909 u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
1910 {
1911 	struct mlx5_eswitch *esw = dev->priv.eswitch;
1912 
1913 	return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_LEGACY;
1914 }
1915 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
1916 
1917 enum devlink_eswitch_encap_mode
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev * dev)1918 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
1919 {
1920 	struct mlx5_eswitch *esw;
1921 
1922 	esw = dev->priv.eswitch;
1923 	return (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS)  ? esw->offloads.encap :
1924 		DEVLINK_ESWITCH_ENCAP_MODE_NONE;
1925 }
1926 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
1927 
mlx5_esw_multipath_prereq(struct mlx5_core_dev * dev0,struct mlx5_core_dev * dev1)1928 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
1929 			       struct mlx5_core_dev *dev1)
1930 {
1931 	return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
1932 		dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
1933 }
1934 
mlx5_esw_event_notifier_register(struct mlx5_eswitch * esw,struct notifier_block * nb)1935 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb)
1936 {
1937 	return blocking_notifier_chain_register(&esw->n_head, nb);
1938 }
1939 
mlx5_esw_event_notifier_unregister(struct mlx5_eswitch * esw,struct notifier_block * nb)1940 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb)
1941 {
1942 	blocking_notifier_chain_unregister(&esw->n_head, nb);
1943 }
1944 
1945 /**
1946  * mlx5_esw_hold() - Try to take a read lock on esw mode lock.
1947  * @mdev: mlx5 core device.
1948  *
1949  * Should be called by esw resources callers.
1950  *
1951  * Return: true on success or false.
1952  */
mlx5_esw_hold(struct mlx5_core_dev * mdev)1953 bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
1954 {
1955 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
1956 
1957 	/* e.g. VF doesn't have eswitch so nothing to do */
1958 	if (!mlx5_esw_allowed(esw))
1959 		return true;
1960 
1961 	if (down_read_trylock(&esw->mode_lock) != 0)
1962 		return true;
1963 
1964 	return false;
1965 }
1966 
1967 /**
1968  * mlx5_esw_release() - Release a read lock on esw mode lock.
1969  * @mdev: mlx5 core device.
1970  */
mlx5_esw_release(struct mlx5_core_dev * mdev)1971 void mlx5_esw_release(struct mlx5_core_dev *mdev)
1972 {
1973 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
1974 
1975 	if (mlx5_esw_allowed(esw))
1976 		up_read(&esw->mode_lock);
1977 }
1978 
1979 /**
1980  * mlx5_esw_get() - Increase esw user count.
1981  * @mdev: mlx5 core device.
1982  */
mlx5_esw_get(struct mlx5_core_dev * mdev)1983 void mlx5_esw_get(struct mlx5_core_dev *mdev)
1984 {
1985 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
1986 
1987 	if (mlx5_esw_allowed(esw))
1988 		atomic64_inc(&esw->user_count);
1989 }
1990 
1991 /**
1992  * mlx5_esw_put() - Decrease esw user count.
1993  * @mdev: mlx5 core device.
1994  */
mlx5_esw_put(struct mlx5_core_dev * mdev)1995 void mlx5_esw_put(struct mlx5_core_dev *mdev)
1996 {
1997 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
1998 
1999 	if (mlx5_esw_allowed(esw))
2000 		atomic64_dec_if_positive(&esw->user_count);
2001 }
2002 
2003 /**
2004  * mlx5_esw_try_lock() - Take a write lock on esw mode lock.
2005  * @esw: eswitch device.
2006  *
2007  * Should be called by esw mode change routine.
2008  *
2009  * Return:
2010  * * 0       - esw mode if successfully locked and refcount is 0.
2011  * * -EBUSY  - refcount is not 0.
2012  * * -EINVAL - In the middle of switching mode or lock is already held.
2013  */
mlx5_esw_try_lock(struct mlx5_eswitch * esw)2014 int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
2015 {
2016 	if (down_write_trylock(&esw->mode_lock) == 0)
2017 		return -EINVAL;
2018 
2019 	if (atomic64_read(&esw->user_count) > 0) {
2020 		up_write(&esw->mode_lock);
2021 		return -EBUSY;
2022 	}
2023 
2024 	return esw->mode;
2025 }
2026 
2027 /**
2028  * mlx5_esw_unlock() - Release write lock on esw mode lock
2029  * @esw: eswitch device.
2030  */
mlx5_esw_unlock(struct mlx5_eswitch * esw)2031 void mlx5_esw_unlock(struct mlx5_eswitch *esw)
2032 {
2033 	up_write(&esw->mode_lock);
2034 }
2035 
2036 /**
2037  * mlx5_eswitch_get_total_vports - Get total vports of the eswitch
2038  *
2039  * @dev: Pointer to core device
2040  *
2041  * mlx5_eswitch_get_total_vports returns total number of eswitch vports.
2042  */
mlx5_eswitch_get_total_vports(const struct mlx5_core_dev * dev)2043 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
2044 {
2045 	struct mlx5_eswitch *esw;
2046 
2047 	esw = dev->priv.eswitch;
2048 	return mlx5_esw_allowed(esw) ? esw->total_vports : 0;
2049 }
2050 EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
2051 
2052 /**
2053  * mlx5_eswitch_get_core_dev - Get the mdev device
2054  * @esw : eswitch device.
2055  *
2056  * Return the mellanox core device which manages the eswitch.
2057  */
mlx5_eswitch_get_core_dev(struct mlx5_eswitch * esw)2058 struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
2059 {
2060 	return mlx5_esw_allowed(esw) ? esw->dev : NULL;
2061 }
2062 EXPORT_SYMBOL(mlx5_eswitch_get_core_dev);
2063