1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <generated/utsrelease.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/switchdev.h>
36 #include <net/pkt_cls.h>
37 #include <net/act_api.h>
38 #include <net/netevent.h>
39 #include <net/arp.h>
40 #include <net/devlink.h>
41 #include <net/ipv6_stubs.h>
42 
43 #include "eswitch.h"
44 #include "en.h"
45 #include "en_rep.h"
46 #include "en_tc.h"
47 #include "en/tc_tun.h"
48 #include "fs_core.h"
49 #include "lib/port_tun.h"
50 #define CREATE_TRACE_POINTS
51 #include "diag/en_rep_tracepoint.h"
52 
53 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
54         max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
55 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
56 
57 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
58 
59 struct mlx5e_rep_indr_block_priv {
60 	struct net_device *netdev;
61 	struct mlx5e_rep_priv *rpriv;
62 
63 	struct list_head list;
64 };
65 
66 static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
67 					    struct net_device *netdev);
68 
mlx5e_rep_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)69 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
70 				  struct ethtool_drvinfo *drvinfo)
71 {
72 	struct mlx5e_priv *priv = netdev_priv(dev);
73 	struct mlx5_core_dev *mdev = priv->mdev;
74 
75 	strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
76 		sizeof(drvinfo->driver));
77 	strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
78 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
79 		 "%d.%d.%04d (%.16s)",
80 		 fw_rev_maj(mdev), fw_rev_min(mdev),
81 		 fw_rev_sub(mdev), mdev->board_id);
82 }
83 
mlx5e_uplink_rep_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)84 static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
85 					 struct ethtool_drvinfo *drvinfo)
86 {
87 	struct mlx5e_priv *priv = netdev_priv(dev);
88 
89 	mlx5e_rep_get_drvinfo(dev, drvinfo);
90 	strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev),
91 		sizeof(drvinfo->bus_info));
92 }
93 
94 static const struct counter_desc sw_rep_stats_desc[] = {
95 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
96 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
97 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
98 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
99 };
100 
101 struct vport_stats {
102 	u64 vport_rx_packets;
103 	u64 vport_tx_packets;
104 	u64 vport_rx_bytes;
105 	u64 vport_tx_bytes;
106 };
107 
108 static const struct counter_desc vport_rep_stats_desc[] = {
109 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
110 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
111 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
112 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
113 };
114 
115 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
116 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
117 
mlx5e_rep_get_strings(struct net_device * dev,u32 stringset,uint8_t * data)118 static void mlx5e_rep_get_strings(struct net_device *dev,
119 				  u32 stringset, uint8_t *data)
120 {
121 	int i, j;
122 
123 	switch (stringset) {
124 	case ETH_SS_STATS:
125 		for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
126 			strcpy(data + (i * ETH_GSTRING_LEN),
127 			       sw_rep_stats_desc[i].format);
128 		for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
129 			strcpy(data + (i * ETH_GSTRING_LEN),
130 			       vport_rep_stats_desc[j].format);
131 		break;
132 	}
133 }
134 
mlx5e_rep_update_hw_counters(struct mlx5e_priv * priv)135 static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
136 {
137 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
138 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
139 	struct mlx5_eswitch_rep *rep = rpriv->rep;
140 	struct rtnl_link_stats64 *vport_stats;
141 	struct ifla_vf_stats vf_stats;
142 	int err;
143 
144 	err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
145 	if (err) {
146 		pr_warn("vport %d error %d reading stats\n", rep->vport, err);
147 		return;
148 	}
149 
150 	vport_stats = &priv->stats.vf_vport;
151 	/* flip tx/rx as we are reporting the counters for the switch vport */
152 	vport_stats->rx_packets = vf_stats.tx_packets;
153 	vport_stats->rx_bytes   = vf_stats.tx_bytes;
154 	vport_stats->tx_packets = vf_stats.rx_packets;
155 	vport_stats->tx_bytes   = vf_stats.rx_bytes;
156 }
157 
mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv * priv)158 static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
159 {
160 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
161 	struct rtnl_link_stats64 *vport_stats;
162 
163 	mlx5e_grp_802_3_update_stats(priv);
164 
165 	vport_stats = &priv->stats.vf_vport;
166 
167 	vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
168 	vport_stats->rx_bytes   = PPORT_802_3_GET(pstats, a_octets_received_ok);
169 	vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
170 	vport_stats->tx_bytes   = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
171 }
172 
mlx5e_rep_update_sw_counters(struct mlx5e_priv * priv)173 static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
174 {
175 	struct mlx5e_sw_stats *s = &priv->stats.sw;
176 	struct rtnl_link_stats64 stats64 = {};
177 
178 	memset(s, 0, sizeof(*s));
179 	mlx5e_fold_sw_stats64(priv, &stats64);
180 
181 	s->rx_packets = stats64.rx_packets;
182 	s->rx_bytes   = stats64.rx_bytes;
183 	s->tx_packets = stats64.tx_packets;
184 	s->tx_bytes   = stats64.tx_bytes;
185 	s->tx_queue_dropped = stats64.tx_dropped;
186 }
187 
mlx5e_rep_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)188 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
189 					struct ethtool_stats *stats, u64 *data)
190 {
191 	struct mlx5e_priv *priv = netdev_priv(dev);
192 	int i, j;
193 
194 	if (!data)
195 		return;
196 
197 	mutex_lock(&priv->state_lock);
198 	mlx5e_rep_update_sw_counters(priv);
199 	priv->profile->update_stats(priv);
200 	mutex_unlock(&priv->state_lock);
201 
202 	for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
203 		data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
204 					       sw_rep_stats_desc, i);
205 
206 	for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
207 		data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
208 					       vport_rep_stats_desc, j);
209 }
210 
mlx5e_rep_get_sset_count(struct net_device * dev,int sset)211 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
212 {
213 	switch (sset) {
214 	case ETH_SS_STATS:
215 		return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
216 	default:
217 		return -EOPNOTSUPP;
218 	}
219 }
220 
mlx5e_rep_get_ringparam(struct net_device * dev,struct ethtool_ringparam * param)221 static void mlx5e_rep_get_ringparam(struct net_device *dev,
222 				struct ethtool_ringparam *param)
223 {
224 	struct mlx5e_priv *priv = netdev_priv(dev);
225 
226 	mlx5e_ethtool_get_ringparam(priv, param);
227 }
228 
mlx5e_rep_set_ringparam(struct net_device * dev,struct ethtool_ringparam * param)229 static int mlx5e_rep_set_ringparam(struct net_device *dev,
230 			       struct ethtool_ringparam *param)
231 {
232 	struct mlx5e_priv *priv = netdev_priv(dev);
233 
234 	return mlx5e_ethtool_set_ringparam(priv, param);
235 }
236 
mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv * priv,struct mlx5_flow_destination * dest)237 static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
238 					   struct mlx5_flow_destination *dest)
239 {
240 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
241 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
242 	struct mlx5_eswitch_rep *rep = rpriv->rep;
243 	struct mlx5_flow_handle *flow_rule;
244 
245 	flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
246 						      rep->vport,
247 						      dest);
248 	if (IS_ERR(flow_rule))
249 		return PTR_ERR(flow_rule);
250 
251 	mlx5_del_flow_rules(rpriv->vport_rx_rule);
252 	rpriv->vport_rx_rule = flow_rule;
253 	return 0;
254 }
255 
mlx5e_rep_get_channels(struct net_device * dev,struct ethtool_channels * ch)256 static void mlx5e_rep_get_channels(struct net_device *dev,
257 				   struct ethtool_channels *ch)
258 {
259 	struct mlx5e_priv *priv = netdev_priv(dev);
260 
261 	mlx5e_ethtool_get_channels(priv, ch);
262 }
263 
mlx5e_rep_set_channels(struct net_device * dev,struct ethtool_channels * ch)264 static int mlx5e_rep_set_channels(struct net_device *dev,
265 				  struct ethtool_channels *ch)
266 {
267 	struct mlx5e_priv *priv = netdev_priv(dev);
268 	u16 curr_channels_amount = priv->channels.params.num_channels;
269 	u32 new_channels_amount = ch->combined_count;
270 	struct mlx5_flow_destination new_dest;
271 	int err = 0;
272 
273 	err = mlx5e_ethtool_set_channels(priv, ch);
274 	if (err)
275 		return err;
276 
277 	if (curr_channels_amount == 1 && new_channels_amount > 1) {
278 		new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
279 		new_dest.ft = priv->fs.ttc.ft.t;
280 	} else if (new_channels_amount == 1 && curr_channels_amount > 1) {
281 		new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
282 		new_dest.tir_num = priv->direct_tir[0].tirn;
283 	} else {
284 		return 0;
285 	}
286 
287 	err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
288 	if (err) {
289 		netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
290 			    curr_channels_amount, new_channels_amount);
291 		return err;
292 	}
293 
294 	return 0;
295 }
296 
mlx5e_rep_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal)297 static int mlx5e_rep_get_coalesce(struct net_device *netdev,
298 				  struct ethtool_coalesce *coal)
299 {
300 	struct mlx5e_priv *priv = netdev_priv(netdev);
301 
302 	return mlx5e_ethtool_get_coalesce(priv, coal);
303 }
304 
mlx5e_rep_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal)305 static int mlx5e_rep_set_coalesce(struct net_device *netdev,
306 				  struct ethtool_coalesce *coal)
307 {
308 	struct mlx5e_priv *priv = netdev_priv(netdev);
309 
310 	return mlx5e_ethtool_set_coalesce(priv, coal);
311 }
312 
mlx5e_rep_get_rxfh_key_size(struct net_device * netdev)313 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
314 {
315 	struct mlx5e_priv *priv = netdev_priv(netdev);
316 
317 	return mlx5e_ethtool_get_rxfh_key_size(priv);
318 }
319 
mlx5e_rep_get_rxfh_indir_size(struct net_device * netdev)320 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
321 {
322 	struct mlx5e_priv *priv = netdev_priv(netdev);
323 
324 	return mlx5e_ethtool_get_rxfh_indir_size(priv);
325 }
326 
mlx5e_uplink_rep_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pauseparam)327 static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
328 					    struct ethtool_pauseparam *pauseparam)
329 {
330 	struct mlx5e_priv *priv = netdev_priv(netdev);
331 
332 	mlx5e_ethtool_get_pauseparam(priv, pauseparam);
333 }
334 
mlx5e_uplink_rep_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pauseparam)335 static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
336 					   struct ethtool_pauseparam *pauseparam)
337 {
338 	struct mlx5e_priv *priv = netdev_priv(netdev);
339 
340 	return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
341 }
342 
mlx5e_uplink_rep_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * link_ksettings)343 static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
344 					       struct ethtool_link_ksettings *link_ksettings)
345 {
346 	struct mlx5e_priv *priv = netdev_priv(netdev);
347 
348 	return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
349 }
350 
mlx5e_uplink_rep_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * link_ksettings)351 static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
352 					       const struct ethtool_link_ksettings *link_ksettings)
353 {
354 	struct mlx5e_priv *priv = netdev_priv(netdev);
355 
356 	return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
357 }
358 
359 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
360 	.get_drvinfo	   = mlx5e_rep_get_drvinfo,
361 	.get_link	   = ethtool_op_get_link,
362 	.get_strings       = mlx5e_rep_get_strings,
363 	.get_sset_count    = mlx5e_rep_get_sset_count,
364 	.get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
365 	.get_ringparam     = mlx5e_rep_get_ringparam,
366 	.set_ringparam     = mlx5e_rep_set_ringparam,
367 	.get_channels      = mlx5e_rep_get_channels,
368 	.set_channels      = mlx5e_rep_set_channels,
369 	.get_coalesce      = mlx5e_rep_get_coalesce,
370 	.set_coalesce      = mlx5e_rep_set_coalesce,
371 	.get_rxfh_key_size   = mlx5e_rep_get_rxfh_key_size,
372 	.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
373 };
374 
375 static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
376 	.get_drvinfo	   = mlx5e_uplink_rep_get_drvinfo,
377 	.get_link	   = ethtool_op_get_link,
378 	.get_strings       = mlx5e_rep_get_strings,
379 	.get_sset_count    = mlx5e_rep_get_sset_count,
380 	.get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
381 	.get_ringparam     = mlx5e_rep_get_ringparam,
382 	.set_ringparam     = mlx5e_rep_set_ringparam,
383 	.get_channels      = mlx5e_rep_get_channels,
384 	.set_channels      = mlx5e_rep_set_channels,
385 	.get_coalesce      = mlx5e_rep_get_coalesce,
386 	.set_coalesce      = mlx5e_rep_set_coalesce,
387 	.get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
388 	.set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
389 	.get_rxfh_key_size   = mlx5e_rep_get_rxfh_key_size,
390 	.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
391 	.get_pauseparam    = mlx5e_uplink_rep_get_pauseparam,
392 	.set_pauseparam    = mlx5e_uplink_rep_set_pauseparam,
393 };
394 
mlx5e_rep_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)395 static void mlx5e_rep_get_port_parent_id(struct net_device *dev,
396 					 struct netdev_phys_item_id *ppid)
397 {
398 	struct mlx5e_priv *priv;
399 	u64 parent_id;
400 
401 	priv = netdev_priv(dev);
402 
403 	parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
404 	ppid->id_len = sizeof(parent_id);
405 	memcpy(ppid->id, &parent_id, sizeof(parent_id));
406 }
407 
mlx5e_sqs2vport_stop(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep)408 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
409 				 struct mlx5_eswitch_rep *rep)
410 {
411 	struct mlx5e_rep_sq *rep_sq, *tmp;
412 	struct mlx5e_rep_priv *rpriv;
413 
414 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
415 		return;
416 
417 	rpriv = mlx5e_rep_to_rep_priv(rep);
418 	list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
419 		mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
420 		list_del(&rep_sq->list);
421 		kfree(rep_sq);
422 	}
423 }
424 
mlx5e_sqs2vport_start(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,u32 * sqns_array,int sqns_num)425 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
426 				 struct mlx5_eswitch_rep *rep,
427 				 u32 *sqns_array, int sqns_num)
428 {
429 	struct mlx5_flow_handle *flow_rule;
430 	struct mlx5e_rep_priv *rpriv;
431 	struct mlx5e_rep_sq *rep_sq;
432 	int err;
433 	int i;
434 
435 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
436 		return 0;
437 
438 	rpriv = mlx5e_rep_to_rep_priv(rep);
439 	for (i = 0; i < sqns_num; i++) {
440 		rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
441 		if (!rep_sq) {
442 			err = -ENOMEM;
443 			goto out_err;
444 		}
445 
446 		/* Add re-inject rule to the PF/representor sqs */
447 		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
448 								rep->vport,
449 								sqns_array[i]);
450 		if (IS_ERR(flow_rule)) {
451 			err = PTR_ERR(flow_rule);
452 			kfree(rep_sq);
453 			goto out_err;
454 		}
455 		rep_sq->send_to_vport_rule = flow_rule;
456 		list_add(&rep_sq->list, &rpriv->vport_sqs_list);
457 	}
458 	return 0;
459 
460 out_err:
461 	mlx5e_sqs2vport_stop(esw, rep);
462 	return err;
463 }
464 
mlx5e_add_sqs_fwd_rules(struct mlx5e_priv * priv)465 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
466 {
467 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
468 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
469 	struct mlx5_eswitch_rep *rep = rpriv->rep;
470 	struct mlx5e_channel *c;
471 	int n, tc, num_sqs = 0;
472 	int err = -ENOMEM;
473 	u32 *sqs;
474 
475 	sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
476 	if (!sqs)
477 		goto out;
478 
479 	for (n = 0; n < priv->channels.num; n++) {
480 		c = priv->channels.c[n];
481 		for (tc = 0; tc < c->num_tc; tc++)
482 			sqs[num_sqs++] = c->sq[tc].sqn;
483 	}
484 
485 	err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
486 	kfree(sqs);
487 
488 out:
489 	if (err)
490 		netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
491 	return err;
492 }
493 
mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv * priv)494 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
495 {
496 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
497 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
498 	struct mlx5_eswitch_rep *rep = rpriv->rep;
499 
500 	mlx5e_sqs2vport_stop(esw, rep);
501 }
502 
mlx5e_rep_ipv6_interval(void)503 static unsigned long mlx5e_rep_ipv6_interval(void)
504 {
505 	if (IS_ENABLED(CONFIG_IPV6) && ipv6_stub->nd_tbl)
506 		return NEIGH_VAR(&ipv6_stub->nd_tbl->parms, DELAY_PROBE_TIME);
507 
508 	return ~0UL;
509 }
510 
mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv * rpriv)511 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
512 {
513 	unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
514 	unsigned long ipv6_interval = mlx5e_rep_ipv6_interval();
515 	struct net_device *netdev = rpriv->netdev;
516 	struct mlx5e_priv *priv = netdev_priv(netdev);
517 
518 	rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
519 	mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
520 }
521 
mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv * priv)522 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
523 {
524 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
525 	struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
526 
527 	mlx5_fc_queue_stats_work(priv->mdev,
528 				 &neigh_update->neigh_stats_work,
529 				 neigh_update->min_interval);
530 }
531 
mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry * nhe)532 static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
533 {
534 	return refcount_inc_not_zero(&nhe->refcnt);
535 }
536 
537 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe);
538 
mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry * nhe)539 static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
540 {
541 	if (refcount_dec_and_test(&nhe->refcnt)) {
542 		mlx5e_rep_neigh_entry_remove(nhe);
543 		kfree_rcu(nhe, rcu);
544 	}
545 }
546 
547 static struct mlx5e_neigh_hash_entry *
mlx5e_get_next_nhe(struct mlx5e_rep_priv * rpriv,struct mlx5e_neigh_hash_entry * nhe)548 mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv,
549 		   struct mlx5e_neigh_hash_entry *nhe)
550 {
551 	struct mlx5e_neigh_hash_entry *next = NULL;
552 
553 	rcu_read_lock();
554 
555 	for (next = nhe ?
556 		     list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
557 					   &nhe->neigh_list,
558 					   struct mlx5e_neigh_hash_entry,
559 					   neigh_list) :
560 		     list_first_or_null_rcu(&rpriv->neigh_update.neigh_list,
561 					    struct mlx5e_neigh_hash_entry,
562 					    neigh_list);
563 	     next;
564 	     next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
565 					  &next->neigh_list,
566 					  struct mlx5e_neigh_hash_entry,
567 					  neigh_list))
568 		if (mlx5e_rep_neigh_entry_hold(next))
569 			break;
570 
571 	rcu_read_unlock();
572 
573 	if (nhe)
574 		mlx5e_rep_neigh_entry_release(nhe);
575 
576 	return next;
577 }
578 
mlx5e_rep_neigh_stats_work(struct work_struct * work)579 static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
580 {
581 	struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
582 						    neigh_update.neigh_stats_work.work);
583 	struct net_device *netdev = rpriv->netdev;
584 	struct mlx5e_priv *priv = netdev_priv(netdev);
585 	struct mlx5e_neigh_hash_entry *nhe = NULL;
586 
587 	rtnl_lock();
588 	if (!list_empty(&rpriv->neigh_update.neigh_list))
589 		mlx5e_rep_queue_neigh_stats_work(priv);
590 
591 	while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL)
592 		mlx5e_tc_update_neigh_used_value(nhe);
593 
594 	rtnl_unlock();
595 }
596 
mlx5e_rep_update_flows(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e,bool neigh_connected,unsigned char ha[ETH_ALEN])597 static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
598 				   struct mlx5e_encap_entry *e,
599 				   bool neigh_connected,
600 				   unsigned char ha[ETH_ALEN])
601 {
602 	struct ethhdr *eth = (struct ethhdr *)e->encap_header;
603 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
604 	bool encap_connected;
605 	LIST_HEAD(flow_list);
606 
607 	ASSERT_RTNL();
608 
609 	/* wait for encap to be fully initialized */
610 	wait_for_completion(&e->res_ready);
611 
612 	mutex_lock(&esw->offloads.encap_tbl_lock);
613 	encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
614 	if (e->compl_result < 0 || (encap_connected == neigh_connected &&
615 				    ether_addr_equal(e->h_dest, ha)))
616 		goto unlock;
617 
618 	mlx5e_take_all_encap_flows(e, &flow_list);
619 
620 	if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
621 	    (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
622 		mlx5e_tc_encap_flows_del(priv, e, &flow_list);
623 
624 	if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
625 		ether_addr_copy(e->h_dest, ha);
626 		ether_addr_copy(eth->h_dest, ha);
627 		/* Update the encap source mac, in case that we delete
628 		 * the flows when encap source mac changed.
629 		 */
630 		ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
631 
632 		mlx5e_tc_encap_flows_add(priv, e, &flow_list);
633 	}
634 unlock:
635 	mutex_unlock(&esw->offloads.encap_tbl_lock);
636 	mlx5e_put_encap_flow_list(priv, &flow_list);
637 }
638 
mlx5e_rep_neigh_update(struct work_struct * work)639 static void mlx5e_rep_neigh_update(struct work_struct *work)
640 {
641 	struct mlx5e_neigh_hash_entry *nhe =
642 		container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
643 	struct neighbour *n = nhe->n;
644 	struct mlx5e_encap_entry *e;
645 	unsigned char ha[ETH_ALEN];
646 	struct mlx5e_priv *priv;
647 	bool neigh_connected;
648 	u8 nud_state, dead;
649 
650 	rtnl_lock();
651 
652 	/* If these parameters are changed after we release the lock,
653 	 * we'll receive another event letting us know about it.
654 	 * We use this lock to avoid inconsistency between the neigh validity
655 	 * and it's hw address.
656 	 */
657 	read_lock_bh(&n->lock);
658 	memcpy(ha, n->ha, ETH_ALEN);
659 	nud_state = n->nud_state;
660 	dead = n->dead;
661 	read_unlock_bh(&n->lock);
662 
663 	neigh_connected = (nud_state & NUD_VALID) && !dead;
664 
665 	trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected);
666 
667 	list_for_each_entry(e, &nhe->encap_list, encap_list) {
668 		if (!mlx5e_encap_take(e))
669 			continue;
670 
671 		priv = netdev_priv(e->out_dev);
672 		mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
673 		mlx5e_encap_put(priv, e);
674 	}
675 	mlx5e_rep_neigh_entry_release(nhe);
676 	rtnl_unlock();
677 	neigh_release(n);
678 }
679 
680 static struct mlx5e_rep_indr_block_priv *
mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv * rpriv,struct net_device * netdev)681 mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
682 				 struct net_device *netdev)
683 {
684 	struct mlx5e_rep_indr_block_priv *cb_priv;
685 
686 	/* All callback list access should be protected by RTNL. */
687 	ASSERT_RTNL();
688 
689 	list_for_each_entry(cb_priv,
690 			    &rpriv->uplink_priv.tc_indr_block_priv_list,
691 			    list)
692 		if (cb_priv->netdev == netdev)
693 			return cb_priv;
694 
695 	return NULL;
696 }
697 
mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv * rpriv)698 static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
699 {
700 	struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
701 	struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
702 
703 	list_for_each_entry_safe(cb_priv, temp, head, list) {
704 		mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
705 		kfree(cb_priv);
706 	}
707 }
708 
709 static int
mlx5e_rep_indr_offload(struct net_device * netdev,struct flow_cls_offload * flower,struct mlx5e_rep_indr_block_priv * indr_priv)710 mlx5e_rep_indr_offload(struct net_device *netdev,
711 		       struct flow_cls_offload *flower,
712 		       struct mlx5e_rep_indr_block_priv *indr_priv)
713 {
714 	unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
715 	struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
716 	int err = 0;
717 
718 	switch (flower->command) {
719 	case FLOW_CLS_REPLACE:
720 		err = mlx5e_configure_flower(netdev, priv, flower, flags);
721 		break;
722 	case FLOW_CLS_DESTROY:
723 		err = mlx5e_delete_flower(netdev, priv, flower, flags);
724 		break;
725 	case FLOW_CLS_STATS:
726 		err = mlx5e_stats_flower(netdev, priv, flower, flags);
727 		break;
728 	default:
729 		err = -EOPNOTSUPP;
730 	}
731 
732 	return err;
733 }
734 
mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,void * type_data,void * indr_priv)735 static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
736 					 void *type_data, void *indr_priv)
737 {
738 	struct mlx5e_rep_indr_block_priv *priv = indr_priv;
739 
740 	switch (type) {
741 	case TC_SETUP_CLSFLOWER:
742 		return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
743 	default:
744 		return -EOPNOTSUPP;
745 	}
746 }
747 
mlx5e_rep_indr_tc_block_unbind(void * cb_priv)748 static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv)
749 {
750 	struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
751 
752 	list_del(&indr_priv->list);
753 	kfree(indr_priv);
754 }
755 
756 static LIST_HEAD(mlx5e_block_cb_list);
757 
758 static int
mlx5e_rep_indr_setup_tc_block(struct net_device * netdev,struct mlx5e_rep_priv * rpriv,struct flow_block_offload * f)759 mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
760 			      struct mlx5e_rep_priv *rpriv,
761 			      struct flow_block_offload *f)
762 {
763 	struct mlx5e_rep_indr_block_priv *indr_priv;
764 	struct flow_block_cb *block_cb;
765 
766 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
767 		return -EOPNOTSUPP;
768 
769 	f->unlocked_driver_cb = true;
770 	f->driver_block_list = &mlx5e_block_cb_list;
771 
772 	switch (f->command) {
773 	case FLOW_BLOCK_BIND:
774 		indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
775 		if (indr_priv)
776 			return -EEXIST;
777 
778 		indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
779 		if (!indr_priv)
780 			return -ENOMEM;
781 
782 		indr_priv->netdev = netdev;
783 		indr_priv->rpriv = rpriv;
784 		list_add(&indr_priv->list,
785 			 &rpriv->uplink_priv.tc_indr_block_priv_list);
786 
787 		block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb,
788 					       indr_priv, indr_priv,
789 					       mlx5e_rep_indr_tc_block_unbind);
790 		if (IS_ERR(block_cb)) {
791 			list_del(&indr_priv->list);
792 			kfree(indr_priv);
793 			return PTR_ERR(block_cb);
794 		}
795 		flow_block_cb_add(block_cb, f);
796 		list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
797 
798 		return 0;
799 	case FLOW_BLOCK_UNBIND:
800 		indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
801 		if (!indr_priv)
802 			return -ENOENT;
803 
804 		block_cb = flow_block_cb_lookup(f->block,
805 						mlx5e_rep_indr_setup_block_cb,
806 						indr_priv);
807 		if (!block_cb)
808 			return -ENOENT;
809 
810 		flow_block_cb_remove(block_cb, f);
811 		list_del(&block_cb->driver_list);
812 		return 0;
813 	default:
814 		return -EOPNOTSUPP;
815 	}
816 	return 0;
817 }
818 
819 static
mlx5e_rep_indr_setup_tc_cb(struct net_device * netdev,void * cb_priv,enum tc_setup_type type,void * type_data)820 int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
821 			       enum tc_setup_type type, void *type_data)
822 {
823 	switch (type) {
824 	case TC_SETUP_BLOCK:
825 		return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
826 						      type_data);
827 	default:
828 		return -EOPNOTSUPP;
829 	}
830 }
831 
mlx5e_rep_indr_register_block(struct mlx5e_rep_priv * rpriv,struct net_device * netdev)832 static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
833 					 struct net_device *netdev)
834 {
835 	int err;
836 
837 	err = __flow_indr_block_cb_register(netdev, rpriv,
838 					    mlx5e_rep_indr_setup_tc_cb,
839 					    rpriv);
840 	if (err) {
841 		struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
842 
843 		mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
844 			      netdev_name(netdev), err);
845 	}
846 	return err;
847 }
848 
mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv * rpriv,struct net_device * netdev)849 static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
850 					    struct net_device *netdev)
851 {
852 	__flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
853 					rpriv);
854 }
855 
mlx5e_nic_rep_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)856 static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
857 					 unsigned long event, void *ptr)
858 {
859 	struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
860 						     uplink_priv.netdevice_nb);
861 	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
862 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
863 
864 	if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
865 	    !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
866 		return NOTIFY_OK;
867 
868 	switch (event) {
869 	case NETDEV_REGISTER:
870 		mlx5e_rep_indr_register_block(rpriv, netdev);
871 		break;
872 	case NETDEV_UNREGISTER:
873 		mlx5e_rep_indr_unregister_block(rpriv, netdev);
874 		break;
875 	}
876 	return NOTIFY_OK;
877 }
878 
879 static void
mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv * priv,struct mlx5e_neigh_hash_entry * nhe,struct neighbour * n)880 mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv,
881 				  struct mlx5e_neigh_hash_entry *nhe,
882 				  struct neighbour *n)
883 {
884 	/* Take a reference to ensure the neighbour and mlx5 encap
885 	 * entry won't be destructed until we drop the reference in
886 	 * delayed work.
887 	 */
888 	neigh_hold(n);
889 
890 	/* This assignment is valid as long as the the neigh reference
891 	 * is taken
892 	 */
893 	nhe->n = n;
894 
895 	if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
896 		mlx5e_rep_neigh_entry_release(nhe);
897 		neigh_release(n);
898 	}
899 }
900 
901 static struct mlx5e_neigh_hash_entry *
902 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
903 			     struct mlx5e_neigh *m_neigh);
904 
mlx5e_rep_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)905 static int mlx5e_rep_netevent_event(struct notifier_block *nb,
906 				    unsigned long event, void *ptr)
907 {
908 	struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
909 						    neigh_update.netevent_nb);
910 	struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
911 	struct net_device *netdev = rpriv->netdev;
912 	struct mlx5e_priv *priv = netdev_priv(netdev);
913 	struct mlx5e_neigh_hash_entry *nhe = NULL;
914 	struct mlx5e_neigh m_neigh = {};
915 	struct neigh_parms *p;
916 	struct neighbour *n;
917 	bool found = false;
918 
919 	switch (event) {
920 	case NETEVENT_NEIGH_UPDATE:
921 		n = ptr;
922 #if IS_ENABLED(CONFIG_IPV6)
923 		if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
924 #else
925 		if (n->tbl != &arp_tbl)
926 #endif
927 			return NOTIFY_DONE;
928 
929 		m_neigh.dev = n->dev;
930 		m_neigh.family = n->ops->family;
931 		memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
932 
933 		rcu_read_lock();
934 		nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
935 		rcu_read_unlock();
936 		if (!nhe)
937 			return NOTIFY_DONE;
938 
939 		mlx5e_rep_queue_neigh_update_work(priv, nhe, n);
940 		break;
941 
942 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
943 		p = ptr;
944 
945 		/* We check the device is present since we don't care about
946 		 * changes in the default table, we only care about changes
947 		 * done per device delay prob time parameter.
948 		 */
949 #if IS_ENABLED(CONFIG_IPV6)
950 		if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
951 #else
952 		if (!p->dev || p->tbl != &arp_tbl)
953 #endif
954 			return NOTIFY_DONE;
955 
956 		rcu_read_lock();
957 		list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
958 					neigh_list) {
959 			if (p->dev == nhe->m_neigh.dev) {
960 				found = true;
961 				break;
962 			}
963 		}
964 		rcu_read_unlock();
965 		if (!found)
966 			return NOTIFY_DONE;
967 
968 		neigh_update->min_interval = min_t(unsigned long,
969 						   NEIGH_VAR(p, DELAY_PROBE_TIME),
970 						   neigh_update->min_interval);
971 		mlx5_fc_update_sampling_interval(priv->mdev,
972 						 neigh_update->min_interval);
973 		break;
974 	}
975 	return NOTIFY_DONE;
976 }
977 
978 static const struct rhashtable_params mlx5e_neigh_ht_params = {
979 	.head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
980 	.key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
981 	.key_len = sizeof(struct mlx5e_neigh),
982 	.automatic_shrinking = true,
983 };
984 
mlx5e_rep_neigh_init(struct mlx5e_rep_priv * rpriv)985 static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
986 {
987 	struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
988 	int err;
989 
990 	err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
991 	if (err)
992 		return err;
993 
994 	INIT_LIST_HEAD(&neigh_update->neigh_list);
995 	mutex_init(&neigh_update->encap_lock);
996 	INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
997 			  mlx5e_rep_neigh_stats_work);
998 	mlx5e_rep_neigh_update_init_interval(rpriv);
999 
1000 	rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
1001 	err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
1002 	if (err)
1003 		goto out_err;
1004 	return 0;
1005 
1006 out_err:
1007 	rhashtable_destroy(&neigh_update->neigh_ht);
1008 	return err;
1009 }
1010 
mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv * rpriv)1011 static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
1012 {
1013 	struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
1014 	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1015 
1016 	unregister_netevent_notifier(&neigh_update->netevent_nb);
1017 
1018 	flush_workqueue(priv->wq); /* flush neigh update works */
1019 
1020 	cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
1021 
1022 	mutex_destroy(&neigh_update->encap_lock);
1023 	rhashtable_destroy(&neigh_update->neigh_ht);
1024 }
1025 
mlx5e_rep_neigh_entry_insert(struct mlx5e_priv * priv,struct mlx5e_neigh_hash_entry * nhe)1026 static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
1027 					struct mlx5e_neigh_hash_entry *nhe)
1028 {
1029 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1030 	int err;
1031 
1032 	err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
1033 				     &nhe->rhash_node,
1034 				     mlx5e_neigh_ht_params);
1035 	if (err)
1036 		return err;
1037 
1038 	list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
1039 
1040 	return err;
1041 }
1042 
mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry * nhe)1043 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe)
1044 {
1045 	struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv;
1046 
1047 	mutex_lock(&rpriv->neigh_update.encap_lock);
1048 
1049 	list_del_rcu(&nhe->neigh_list);
1050 
1051 	rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
1052 			       &nhe->rhash_node,
1053 			       mlx5e_neigh_ht_params);
1054 	mutex_unlock(&rpriv->neigh_update.encap_lock);
1055 }
1056 
1057 /* This function must only be called under the representor's encap_lock or
1058  * inside rcu read lock section.
1059  */
1060 static struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv * priv,struct mlx5e_neigh * m_neigh)1061 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
1062 			     struct mlx5e_neigh *m_neigh)
1063 {
1064 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1065 	struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
1066 	struct mlx5e_neigh_hash_entry *nhe;
1067 
1068 	nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
1069 				     mlx5e_neigh_ht_params);
1070 	return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL;
1071 }
1072 
mlx5e_rep_neigh_entry_create(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e,struct mlx5e_neigh_hash_entry ** nhe)1073 static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
1074 					struct mlx5e_encap_entry *e,
1075 					struct mlx5e_neigh_hash_entry **nhe)
1076 {
1077 	int err;
1078 
1079 	*nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
1080 	if (!*nhe)
1081 		return -ENOMEM;
1082 
1083 	(*nhe)->priv = priv;
1084 	memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
1085 	INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
1086 	spin_lock_init(&(*nhe)->encap_list_lock);
1087 	INIT_LIST_HEAD(&(*nhe)->encap_list);
1088 	refcount_set(&(*nhe)->refcnt, 1);
1089 
1090 	err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
1091 	if (err)
1092 		goto out_free;
1093 	return 0;
1094 
1095 out_free:
1096 	kfree(*nhe);
1097 	return err;
1098 }
1099 
mlx5e_rep_encap_entry_attach(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e)1100 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
1101 				 struct mlx5e_encap_entry *e)
1102 {
1103 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1104 	struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1105 	struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
1106 	struct mlx5e_neigh_hash_entry *nhe;
1107 	int err;
1108 
1109 	err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
1110 	if (err)
1111 		return err;
1112 
1113 	mutex_lock(&rpriv->neigh_update.encap_lock);
1114 	nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1115 	if (!nhe) {
1116 		err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
1117 		if (err) {
1118 			mutex_unlock(&rpriv->neigh_update.encap_lock);
1119 			mlx5_tun_entropy_refcount_dec(tun_entropy,
1120 						      e->reformat_type);
1121 			return err;
1122 		}
1123 	}
1124 
1125 	e->nhe = nhe;
1126 	spin_lock(&nhe->encap_list_lock);
1127 	list_add_rcu(&e->encap_list, &nhe->encap_list);
1128 	spin_unlock(&nhe->encap_list_lock);
1129 
1130 	mutex_unlock(&rpriv->neigh_update.encap_lock);
1131 
1132 	return 0;
1133 }
1134 
mlx5e_rep_encap_entry_detach(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e)1135 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
1136 				  struct mlx5e_encap_entry *e)
1137 {
1138 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1139 	struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1140 	struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
1141 
1142 	if (!e->nhe)
1143 		return;
1144 
1145 	spin_lock(&e->nhe->encap_list_lock);
1146 	list_del_rcu(&e->encap_list);
1147 	spin_unlock(&e->nhe->encap_list_lock);
1148 
1149 	mlx5e_rep_neigh_entry_release(e->nhe);
1150 	e->nhe = NULL;
1151 	mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
1152 }
1153 
mlx5e_rep_open(struct net_device * dev)1154 static int mlx5e_rep_open(struct net_device *dev)
1155 {
1156 	struct mlx5e_priv *priv = netdev_priv(dev);
1157 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1158 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1159 	int err;
1160 
1161 	mutex_lock(&priv->state_lock);
1162 	err = mlx5e_open_locked(dev);
1163 	if (err)
1164 		goto unlock;
1165 
1166 	if (!mlx5_modify_vport_admin_state(priv->mdev,
1167 					   MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1168 					   rep->vport, 1,
1169 					   MLX5_VPORT_ADMIN_STATE_UP))
1170 		netif_carrier_on(dev);
1171 
1172 unlock:
1173 	mutex_unlock(&priv->state_lock);
1174 	return err;
1175 }
1176 
mlx5e_rep_close(struct net_device * dev)1177 static int mlx5e_rep_close(struct net_device *dev)
1178 {
1179 	struct mlx5e_priv *priv = netdev_priv(dev);
1180 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1181 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1182 	int ret;
1183 
1184 	mutex_lock(&priv->state_lock);
1185 	mlx5_modify_vport_admin_state(priv->mdev,
1186 				      MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1187 				      rep->vport, 1,
1188 				      MLX5_VPORT_ADMIN_STATE_DOWN);
1189 	ret = mlx5e_close_locked(dev);
1190 	mutex_unlock(&priv->state_lock);
1191 	return ret;
1192 }
1193 
1194 static int
mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv * priv,struct flow_cls_offload * cls_flower,int flags)1195 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
1196 			      struct flow_cls_offload *cls_flower, int flags)
1197 {
1198 	switch (cls_flower->command) {
1199 	case FLOW_CLS_REPLACE:
1200 		return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
1201 					      flags);
1202 	case FLOW_CLS_DESTROY:
1203 		return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
1204 					   flags);
1205 	case FLOW_CLS_STATS:
1206 		return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
1207 					  flags);
1208 	default:
1209 		return -EOPNOTSUPP;
1210 	}
1211 }
1212 
1213 static
mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv * priv,struct tc_cls_matchall_offload * ma)1214 int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
1215 				    struct tc_cls_matchall_offload *ma)
1216 {
1217 	switch (ma->command) {
1218 	case TC_CLSMATCHALL_REPLACE:
1219 		return mlx5e_tc_configure_matchall(priv, ma);
1220 	case TC_CLSMATCHALL_DESTROY:
1221 		return mlx5e_tc_delete_matchall(priv, ma);
1222 	case TC_CLSMATCHALL_STATS:
1223 		mlx5e_tc_stats_matchall(priv, ma);
1224 		return 0;
1225 	default:
1226 		return -EOPNOTSUPP;
1227 	}
1228 }
1229 
mlx5e_rep_setup_tc_cb(enum tc_setup_type type,void * type_data,void * cb_priv)1230 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
1231 				 void *cb_priv)
1232 {
1233 	unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
1234 	struct mlx5e_priv *priv = cb_priv;
1235 
1236 	switch (type) {
1237 	case TC_SETUP_CLSFLOWER:
1238 		return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
1239 	case TC_SETUP_CLSMATCHALL:
1240 		return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
1241 	default:
1242 		return -EOPNOTSUPP;
1243 	}
1244 }
1245 
1246 static LIST_HEAD(mlx5e_rep_block_cb_list);
1247 
mlx5e_rep_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)1248 static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
1249 			      void *type_data)
1250 {
1251 	struct mlx5e_priv *priv = netdev_priv(dev);
1252 	struct flow_block_offload *f = type_data;
1253 
1254 	switch (type) {
1255 	case TC_SETUP_BLOCK:
1256 		f->unlocked_driver_cb = true;
1257 		return flow_block_cb_setup_simple(type_data,
1258 						  &mlx5e_rep_block_cb_list,
1259 						  mlx5e_rep_setup_tc_cb,
1260 						  priv, priv, true);
1261 	default:
1262 		return -EOPNOTSUPP;
1263 	}
1264 }
1265 
mlx5e_is_uplink_rep(struct mlx5e_priv * priv)1266 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
1267 {
1268 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1269 	struct mlx5_eswitch_rep *rep;
1270 
1271 	if (!MLX5_ESWITCH_MANAGER(priv->mdev))
1272 		return false;
1273 
1274 	if (!rpriv) /* non vport rep mlx5e instances don't use this field */
1275 		return false;
1276 
1277 	rep = rpriv->rep;
1278 	return (rep->vport == MLX5_VPORT_UPLINK);
1279 }
1280 
mlx5e_rep_has_offload_stats(const struct net_device * dev,int attr_id)1281 static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
1282 {
1283 	switch (attr_id) {
1284 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1285 			return true;
1286 	}
1287 
1288 	return false;
1289 }
1290 
1291 static int
mlx5e_get_sw_stats64(const struct net_device * dev,struct rtnl_link_stats64 * stats)1292 mlx5e_get_sw_stats64(const struct net_device *dev,
1293 		     struct rtnl_link_stats64 *stats)
1294 {
1295 	struct mlx5e_priv *priv = netdev_priv(dev);
1296 
1297 	mlx5e_fold_sw_stats64(priv, stats);
1298 	return 0;
1299 }
1300 
mlx5e_rep_get_offload_stats(int attr_id,const struct net_device * dev,void * sp)1301 static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
1302 				       void *sp)
1303 {
1304 	switch (attr_id) {
1305 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1306 		return mlx5e_get_sw_stats64(dev, sp);
1307 	}
1308 
1309 	return -EINVAL;
1310 }
1311 
1312 static void
mlx5e_rep_get_stats(struct net_device * dev,struct rtnl_link_stats64 * stats)1313 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
1314 {
1315 	struct mlx5e_priv *priv = netdev_priv(dev);
1316 
1317 	/* update HW stats in background for next time */
1318 	mlx5e_queue_update_stats(priv);
1319 	memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
1320 }
1321 
mlx5e_rep_change_mtu(struct net_device * netdev,int new_mtu)1322 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
1323 {
1324 	return mlx5e_change_mtu(netdev, new_mtu, NULL);
1325 }
1326 
mlx5e_uplink_rep_change_mtu(struct net_device * netdev,int new_mtu)1327 static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
1328 {
1329 	return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
1330 }
1331 
mlx5e_uplink_rep_set_mac(struct net_device * netdev,void * addr)1332 static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
1333 {
1334 	struct sockaddr *saddr = addr;
1335 
1336 	if (!is_valid_ether_addr(saddr->sa_data))
1337 		return -EADDRNOTAVAIL;
1338 
1339 	ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1340 	return 0;
1341 }
1342 
mlx5e_uplink_rep_set_vf_vlan(struct net_device * dev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)1343 static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1344 					__be16 vlan_proto)
1345 {
1346 	netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
1347 
1348 	if (vlan != 0)
1349 		return -EOPNOTSUPP;
1350 
1351 	/* allow setting 0-vid for compatibility with libvirt */
1352 	return 0;
1353 }
1354 
mlx5e_get_devlink_port(struct net_device * dev)1355 static struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
1356 {
1357 	struct mlx5e_priv *priv = netdev_priv(dev);
1358 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1359 
1360 	return &rpriv->dl_port;
1361 }
1362 
1363 static const struct net_device_ops mlx5e_netdev_ops_rep = {
1364 	.ndo_open                = mlx5e_rep_open,
1365 	.ndo_stop                = mlx5e_rep_close,
1366 	.ndo_start_xmit          = mlx5e_xmit,
1367 	.ndo_setup_tc            = mlx5e_rep_setup_tc,
1368 	.ndo_get_devlink_port = mlx5e_get_devlink_port,
1369 	.ndo_get_stats64         = mlx5e_rep_get_stats,
1370 	.ndo_has_offload_stats	 = mlx5e_rep_has_offload_stats,
1371 	.ndo_get_offload_stats	 = mlx5e_rep_get_offload_stats,
1372 	.ndo_change_mtu          = mlx5e_rep_change_mtu,
1373 };
1374 
1375 static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
1376 	.ndo_open                = mlx5e_open,
1377 	.ndo_stop                = mlx5e_close,
1378 	.ndo_start_xmit          = mlx5e_xmit,
1379 	.ndo_set_mac_address     = mlx5e_uplink_rep_set_mac,
1380 	.ndo_setup_tc            = mlx5e_rep_setup_tc,
1381 	.ndo_get_devlink_port = mlx5e_get_devlink_port,
1382 	.ndo_get_stats64         = mlx5e_get_stats,
1383 	.ndo_has_offload_stats	 = mlx5e_rep_has_offload_stats,
1384 	.ndo_get_offload_stats	 = mlx5e_rep_get_offload_stats,
1385 	.ndo_change_mtu          = mlx5e_uplink_rep_change_mtu,
1386 	.ndo_udp_tunnel_add      = mlx5e_add_vxlan_port,
1387 	.ndo_udp_tunnel_del      = mlx5e_del_vxlan_port,
1388 	.ndo_features_check      = mlx5e_features_check,
1389 	.ndo_set_vf_mac          = mlx5e_set_vf_mac,
1390 	.ndo_set_vf_rate         = mlx5e_set_vf_rate,
1391 	.ndo_get_vf_config       = mlx5e_get_vf_config,
1392 	.ndo_get_vf_stats        = mlx5e_get_vf_stats,
1393 	.ndo_set_vf_vlan         = mlx5e_uplink_rep_set_vf_vlan,
1394 	.ndo_set_features        = mlx5e_set_features,
1395 };
1396 
mlx5e_eswitch_rep(struct net_device * netdev)1397 bool mlx5e_eswitch_rep(struct net_device *netdev)
1398 {
1399 	if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
1400 	    netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
1401 		return true;
1402 
1403 	return false;
1404 }
1405 
mlx5e_build_rep_params(struct net_device * netdev)1406 static void mlx5e_build_rep_params(struct net_device *netdev)
1407 {
1408 	struct mlx5e_priv *priv = netdev_priv(netdev);
1409 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1410 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1411 	struct mlx5_core_dev *mdev = priv->mdev;
1412 	struct mlx5e_params *params;
1413 
1414 	u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
1415 					 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
1416 					 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1417 
1418 	params = &priv->channels.params;
1419 	params->hard_mtu    = MLX5E_ETH_HARD_MTU;
1420 	params->sw_mtu      = netdev->mtu;
1421 
1422 	/* SQ */
1423 	if (rep->vport == MLX5_VPORT_UPLINK)
1424 		params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1425 	else
1426 		params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
1427 
1428 	/* RQ */
1429 	mlx5e_build_rq_params(mdev, params);
1430 
1431 	/* CQ moderation params */
1432 	params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
1433 	mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
1434 
1435 	params->num_tc                = 1;
1436 	params->tunneled_offload_en = false;
1437 
1438 	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
1439 
1440 	/* RSS */
1441 	mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
1442 }
1443 
mlx5e_build_rep_netdev(struct net_device * netdev)1444 static void mlx5e_build_rep_netdev(struct net_device *netdev)
1445 {
1446 	struct mlx5e_priv *priv = netdev_priv(netdev);
1447 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1448 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1449 	struct mlx5_core_dev *mdev = priv->mdev;
1450 
1451 	if (rep->vport == MLX5_VPORT_UPLINK) {
1452 		SET_NETDEV_DEV(netdev, mdev->device);
1453 		netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
1454 		/* we want a persistent mac for the uplink rep */
1455 		mlx5_query_mac_address(mdev, netdev->dev_addr);
1456 		netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
1457 #ifdef CONFIG_MLX5_CORE_EN_DCB
1458 		if (MLX5_CAP_GEN(mdev, qos))
1459 			netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1460 #endif
1461 	} else {
1462 		netdev->netdev_ops = &mlx5e_netdev_ops_rep;
1463 		eth_hw_addr_random(netdev);
1464 		netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
1465 	}
1466 
1467 	netdev->watchdog_timeo    = 15 * HZ;
1468 
1469 	netdev->features       |= NETIF_F_NETNS_LOCAL;
1470 
1471 	netdev->hw_features    |= NETIF_F_HW_TC;
1472 	netdev->hw_features    |= NETIF_F_SG;
1473 	netdev->hw_features    |= NETIF_F_IP_CSUM;
1474 	netdev->hw_features    |= NETIF_F_IPV6_CSUM;
1475 	netdev->hw_features    |= NETIF_F_GRO;
1476 	netdev->hw_features    |= NETIF_F_TSO;
1477 	netdev->hw_features    |= NETIF_F_TSO6;
1478 	netdev->hw_features    |= NETIF_F_RXCSUM;
1479 
1480 	if (rep->vport == MLX5_VPORT_UPLINK)
1481 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1482 	else
1483 		netdev->features |= NETIF_F_VLAN_CHALLENGED;
1484 
1485 	netdev->features |= netdev->hw_features;
1486 }
1487 
mlx5e_init_rep(struct mlx5_core_dev * mdev,struct net_device * netdev,const struct mlx5e_profile * profile,void * ppriv)1488 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
1489 			  struct net_device *netdev,
1490 			  const struct mlx5e_profile *profile,
1491 			  void *ppriv)
1492 {
1493 	struct mlx5e_priv *priv = netdev_priv(netdev);
1494 	int err;
1495 
1496 	err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
1497 	if (err)
1498 		return err;
1499 
1500 	priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
1501 
1502 	mlx5e_build_rep_params(netdev);
1503 	mlx5e_build_rep_netdev(netdev);
1504 
1505 	mlx5e_timestamp_init(priv);
1506 
1507 	return 0;
1508 }
1509 
mlx5e_cleanup_rep(struct mlx5e_priv * priv)1510 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
1511 {
1512 	mlx5e_netdev_cleanup(priv->netdev, priv);
1513 }
1514 
mlx5e_create_rep_ttc_table(struct mlx5e_priv * priv)1515 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
1516 {
1517 	struct ttc_params ttc_params = {};
1518 	int tt, err;
1519 
1520 	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1521 					      MLX5_FLOW_NAMESPACE_KERNEL);
1522 
1523 	/* The inner_ttc in the ttc params is intentionally not set */
1524 	ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
1525 	mlx5e_set_ttc_ft_params(&ttc_params);
1526 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1527 		ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1528 
1529 	err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1530 	if (err) {
1531 		netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
1532 		return err;
1533 	}
1534 	return 0;
1535 }
1536 
mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv * priv)1537 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
1538 {
1539 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1540 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1541 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1542 	struct mlx5_flow_handle *flow_rule;
1543 	struct mlx5_flow_destination dest;
1544 
1545 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1546 	dest.tir_num = priv->direct_tir[0].tirn;
1547 	flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
1548 						      rep->vport,
1549 						      &dest);
1550 	if (IS_ERR(flow_rule))
1551 		return PTR_ERR(flow_rule);
1552 	rpriv->vport_rx_rule = flow_rule;
1553 	return 0;
1554 }
1555 
mlx5e_init_rep_rx(struct mlx5e_priv * priv)1556 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
1557 {
1558 	struct mlx5_core_dev *mdev = priv->mdev;
1559 	int err;
1560 
1561 	mlx5e_init_l2_addr(priv);
1562 
1563 	err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
1564 	if (err) {
1565 		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
1566 		return err;
1567 	}
1568 
1569 	err = mlx5e_create_indirect_rqt(priv);
1570 	if (err)
1571 		goto err_close_drop_rq;
1572 
1573 	err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
1574 	if (err)
1575 		goto err_destroy_indirect_rqts;
1576 
1577 	err = mlx5e_create_indirect_tirs(priv, false);
1578 	if (err)
1579 		goto err_destroy_direct_rqts;
1580 
1581 	err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
1582 	if (err)
1583 		goto err_destroy_indirect_tirs;
1584 
1585 	err = mlx5e_create_rep_ttc_table(priv);
1586 	if (err)
1587 		goto err_destroy_direct_tirs;
1588 
1589 	err = mlx5e_create_rep_vport_rx_rule(priv);
1590 	if (err)
1591 		goto err_destroy_ttc_table;
1592 
1593 	return 0;
1594 
1595 err_destroy_ttc_table:
1596 	mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1597 err_destroy_direct_tirs:
1598 	mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
1599 err_destroy_indirect_tirs:
1600 	mlx5e_destroy_indirect_tirs(priv, false);
1601 err_destroy_direct_rqts:
1602 	mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
1603 err_destroy_indirect_rqts:
1604 	mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1605 err_close_drop_rq:
1606 	mlx5e_close_drop_rq(&priv->drop_rq);
1607 	return err;
1608 }
1609 
mlx5e_cleanup_rep_rx(struct mlx5e_priv * priv)1610 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1611 {
1612 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1613 
1614 	mlx5_del_flow_rules(rpriv->vport_rx_rule);
1615 	mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1616 	mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
1617 	mlx5e_destroy_indirect_tirs(priv, false);
1618 	mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
1619 	mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1620 	mlx5e_close_drop_rq(&priv->drop_rq);
1621 }
1622 
mlx5e_init_rep_tx(struct mlx5e_priv * priv)1623 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1624 {
1625 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1626 	struct mlx5_rep_uplink_priv *uplink_priv;
1627 	int err;
1628 
1629 	err = mlx5e_create_tises(priv);
1630 	if (err) {
1631 		mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1632 		return err;
1633 	}
1634 
1635 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
1636 		uplink_priv = &rpriv->uplink_priv;
1637 
1638 		mutex_init(&uplink_priv->unready_flows_lock);
1639 		INIT_LIST_HEAD(&uplink_priv->unready_flows);
1640 
1641 		/* init shared tc flow table */
1642 		err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
1643 		if (err)
1644 			goto destroy_tises;
1645 
1646 		mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
1647 
1648 		/* init indirect block notifications */
1649 		INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
1650 		uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
1651 		err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
1652 		if (err) {
1653 			mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
1654 			goto tc_esw_cleanup;
1655 		}
1656 	}
1657 
1658 	return 0;
1659 
1660 tc_esw_cleanup:
1661 	mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
1662 destroy_tises:
1663 	mlx5e_destroy_tises(priv);
1664 	return err;
1665 }
1666 
mlx5e_cleanup_rep_tx(struct mlx5e_priv * priv)1667 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
1668 {
1669 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1670 
1671 	mlx5e_destroy_tises(priv);
1672 
1673 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
1674 		/* clean indirect TC block notifications */
1675 		unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
1676 		mlx5e_rep_indr_clean_block_privs(rpriv);
1677 
1678 		/* delete shared tc flow table */
1679 		mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
1680 		mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
1681 	}
1682 }
1683 
mlx5e_rep_enable(struct mlx5e_priv * priv)1684 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
1685 {
1686 	mlx5e_set_netdev_mtu_boundaries(priv);
1687 }
1688 
mlx5e_update_rep_rx(struct mlx5e_priv * priv)1689 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
1690 {
1691 	return 0;
1692 }
1693 
uplink_rep_async_event(struct notifier_block * nb,unsigned long event,void * data)1694 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
1695 {
1696 	struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
1697 
1698 	if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
1699 		struct mlx5_eqe *eqe = data;
1700 
1701 		switch (eqe->sub_type) {
1702 		case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
1703 		case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
1704 			queue_work(priv->wq, &priv->update_carrier_work);
1705 			break;
1706 		default:
1707 			return NOTIFY_DONE;
1708 		}
1709 
1710 		return NOTIFY_OK;
1711 	}
1712 
1713 	if (event == MLX5_DEV_EVENT_PORT_AFFINITY) {
1714 		struct mlx5e_rep_priv *rpriv = priv->ppriv;
1715 
1716 		queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
1717 
1718 		return NOTIFY_OK;
1719 	}
1720 
1721 	return NOTIFY_DONE;
1722 }
1723 
mlx5e_uplink_rep_enable(struct mlx5e_priv * priv)1724 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1725 {
1726 	struct net_device *netdev = priv->netdev;
1727 	struct mlx5_core_dev *mdev = priv->mdev;
1728 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1729 	u16 max_mtu;
1730 
1731 	netdev->min_mtu = ETH_MIN_MTU;
1732 	mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1733 	netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1734 	mlx5e_set_dev_port_mtu(priv);
1735 
1736 	INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
1737 		  mlx5e_tc_reoffload_flows_work);
1738 
1739 	mlx5_lag_add(mdev, netdev);
1740 	priv->events_nb.notifier_call = uplink_rep_async_event;
1741 	mlx5_notifier_register(mdev, &priv->events_nb);
1742 #ifdef CONFIG_MLX5_CORE_EN_DCB
1743 	mlx5e_dcbnl_initialize(priv);
1744 	mlx5e_dcbnl_init_app(priv);
1745 #endif
1746 }
1747 
mlx5e_uplink_rep_disable(struct mlx5e_priv * priv)1748 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1749 {
1750 	struct mlx5_core_dev *mdev = priv->mdev;
1751 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1752 
1753 #ifdef CONFIG_MLX5_CORE_EN_DCB
1754 	mlx5e_dcbnl_delete_app(priv);
1755 #endif
1756 	mlx5_notifier_unregister(mdev, &priv->events_nb);
1757 	cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
1758 	mlx5_lag_remove(mdev);
1759 }
1760 
1761 static const struct mlx5e_profile mlx5e_rep_profile = {
1762 	.init			= mlx5e_init_rep,
1763 	.cleanup		= mlx5e_cleanup_rep,
1764 	.init_rx		= mlx5e_init_rep_rx,
1765 	.cleanup_rx		= mlx5e_cleanup_rep_rx,
1766 	.init_tx		= mlx5e_init_rep_tx,
1767 	.cleanup_tx		= mlx5e_cleanup_rep_tx,
1768 	.enable		        = mlx5e_rep_enable,
1769 	.update_rx		= mlx5e_update_rep_rx,
1770 	.update_stats           = mlx5e_rep_update_hw_counters,
1771 	.rx_handlers.handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
1772 	.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1773 	.max_tc			= 1,
1774 	.rq_groups		= MLX5E_NUM_RQ_GROUPS(REGULAR),
1775 };
1776 
1777 static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1778 	.init			= mlx5e_init_rep,
1779 	.cleanup		= mlx5e_cleanup_rep,
1780 	.init_rx		= mlx5e_init_rep_rx,
1781 	.cleanup_rx		= mlx5e_cleanup_rep_rx,
1782 	.init_tx		= mlx5e_init_rep_tx,
1783 	.cleanup_tx		= mlx5e_cleanup_rep_tx,
1784 	.enable		        = mlx5e_uplink_rep_enable,
1785 	.disable	        = mlx5e_uplink_rep_disable,
1786 	.update_rx		= mlx5e_update_rep_rx,
1787 	.update_stats           = mlx5e_uplink_rep_update_hw_counters,
1788 	.update_carrier	        = mlx5e_update_carrier,
1789 	.rx_handlers.handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
1790 	.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1791 	.max_tc			= MLX5E_MAX_NUM_TC,
1792 	.rq_groups		= MLX5E_NUM_RQ_GROUPS(REGULAR),
1793 };
1794 
1795 static bool
is_devlink_port_supported(const struct mlx5_core_dev * dev,const struct mlx5e_rep_priv * rpriv)1796 is_devlink_port_supported(const struct mlx5_core_dev *dev,
1797 			  const struct mlx5e_rep_priv *rpriv)
1798 {
1799 	return rpriv->rep->vport == MLX5_VPORT_UPLINK ||
1800 	       rpriv->rep->vport == MLX5_VPORT_PF ||
1801 	       mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
1802 }
1803 
1804 static unsigned int
vport_to_devlink_port_index(const struct mlx5_core_dev * dev,u16 vport_num)1805 vport_to_devlink_port_index(const struct mlx5_core_dev *dev, u16 vport_num)
1806 {
1807 	return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
1808 }
1809 
register_devlink_port(struct mlx5_core_dev * dev,struct mlx5e_rep_priv * rpriv)1810 static int register_devlink_port(struct mlx5_core_dev *dev,
1811 				 struct mlx5e_rep_priv *rpriv)
1812 {
1813 	struct devlink *devlink = priv_to_devlink(dev);
1814 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1815 	struct netdev_phys_item_id ppid = {};
1816 	unsigned int dl_port_index = 0;
1817 
1818 	if (!is_devlink_port_supported(dev, rpriv))
1819 		return 0;
1820 
1821 	mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
1822 
1823 	if (rep->vport == MLX5_VPORT_UPLINK) {
1824 		devlink_port_attrs_set(&rpriv->dl_port,
1825 				       DEVLINK_PORT_FLAVOUR_PHYSICAL,
1826 				       PCI_FUNC(dev->pdev->devfn), false, 0,
1827 				       &ppid.id[0], ppid.id_len);
1828 		dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
1829 	} else if (rep->vport == MLX5_VPORT_PF) {
1830 		devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
1831 					      &ppid.id[0], ppid.id_len,
1832 					      dev->pdev->devfn);
1833 		dl_port_index = rep->vport;
1834 	} else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
1835 					    rpriv->rep->vport)) {
1836 		devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
1837 					      &ppid.id[0], ppid.id_len,
1838 					      dev->pdev->devfn,
1839 					      rep->vport - 1);
1840 		dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
1841 	}
1842 
1843 	return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
1844 }
1845 
unregister_devlink_port(struct mlx5_core_dev * dev,struct mlx5e_rep_priv * rpriv)1846 static void unregister_devlink_port(struct mlx5_core_dev *dev,
1847 				    struct mlx5e_rep_priv *rpriv)
1848 {
1849 	if (is_devlink_port_supported(dev, rpriv))
1850 		devlink_port_unregister(&rpriv->dl_port);
1851 }
1852 
1853 /* e-Switch vport representors */
1854 static int
mlx5e_vport_rep_load(struct mlx5_core_dev * dev,struct mlx5_eswitch_rep * rep)1855 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1856 {
1857 	const struct mlx5e_profile *profile;
1858 	struct mlx5e_rep_priv *rpriv;
1859 	struct net_device *netdev;
1860 	int nch, err;
1861 
1862 	rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1863 	if (!rpriv)
1864 		return -ENOMEM;
1865 
1866 	/* rpriv->rep to be looked up when profile->init() is called */
1867 	rpriv->rep = rep;
1868 
1869 	nch = mlx5e_get_max_num_channels(dev);
1870 	profile = (rep->vport == MLX5_VPORT_UPLINK) ?
1871 		  &mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
1872 	netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
1873 	if (!netdev) {
1874 		pr_warn("Failed to create representor netdev for vport %d\n",
1875 			rep->vport);
1876 		kfree(rpriv);
1877 		return -EINVAL;
1878 	}
1879 
1880 	rpriv->netdev = netdev;
1881 	rep->rep_data[REP_ETH].priv = rpriv;
1882 	INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1883 
1884 	if (rep->vport == MLX5_VPORT_UPLINK) {
1885 		err = mlx5e_create_mdev_resources(dev);
1886 		if (err)
1887 			goto err_destroy_netdev;
1888 	}
1889 
1890 	err = mlx5e_attach_netdev(netdev_priv(netdev));
1891 	if (err) {
1892 		pr_warn("Failed to attach representor netdev for vport %d\n",
1893 			rep->vport);
1894 		goto err_destroy_mdev_resources;
1895 	}
1896 
1897 	err = mlx5e_rep_neigh_init(rpriv);
1898 	if (err) {
1899 		pr_warn("Failed to initialized neighbours handling for vport %d\n",
1900 			rep->vport);
1901 		goto err_detach_netdev;
1902 	}
1903 
1904 	err = register_devlink_port(dev, rpriv);
1905 	if (err) {
1906 		esw_warn(dev, "Failed to register devlink port %d\n",
1907 			 rep->vport);
1908 		goto err_neigh_cleanup;
1909 	}
1910 
1911 	err = register_netdev(netdev);
1912 	if (err) {
1913 		pr_warn("Failed to register representor netdev for vport %d\n",
1914 			rep->vport);
1915 		goto err_devlink_cleanup;
1916 	}
1917 
1918 	if (is_devlink_port_supported(dev, rpriv))
1919 		devlink_port_type_eth_set(&rpriv->dl_port, netdev);
1920 	return 0;
1921 
1922 err_devlink_cleanup:
1923 	unregister_devlink_port(dev, rpriv);
1924 
1925 err_neigh_cleanup:
1926 	mlx5e_rep_neigh_cleanup(rpriv);
1927 
1928 err_detach_netdev:
1929 	mlx5e_detach_netdev(netdev_priv(netdev));
1930 
1931 err_destroy_mdev_resources:
1932 	if (rep->vport == MLX5_VPORT_UPLINK)
1933 		mlx5e_destroy_mdev_resources(dev);
1934 
1935 err_destroy_netdev:
1936 	mlx5e_destroy_netdev(netdev_priv(netdev));
1937 	kfree(rpriv);
1938 	return err;
1939 }
1940 
1941 static void
mlx5e_vport_rep_unload(struct mlx5_eswitch_rep * rep)1942 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1943 {
1944 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1945 	struct net_device *netdev = rpriv->netdev;
1946 	struct mlx5e_priv *priv = netdev_priv(netdev);
1947 	struct mlx5_core_dev *dev = priv->mdev;
1948 	void *ppriv = priv->ppriv;
1949 
1950 	if (is_devlink_port_supported(dev, rpriv))
1951 		devlink_port_type_clear(&rpriv->dl_port);
1952 	unregister_netdev(netdev);
1953 	unregister_devlink_port(dev, rpriv);
1954 	mlx5e_rep_neigh_cleanup(rpriv);
1955 	mlx5e_detach_netdev(priv);
1956 	if (rep->vport == MLX5_VPORT_UPLINK)
1957 		mlx5e_destroy_mdev_resources(priv->mdev);
1958 	mlx5e_destroy_netdev(priv);
1959 	kfree(ppriv); /* mlx5e_rep_priv */
1960 }
1961 
mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep * rep)1962 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1963 {
1964 	struct mlx5e_rep_priv *rpriv;
1965 
1966 	rpriv = mlx5e_rep_to_rep_priv(rep);
1967 
1968 	return rpriv->netdev;
1969 }
1970 
1971 static const struct mlx5_eswitch_rep_ops rep_ops = {
1972 	.load = mlx5e_vport_rep_load,
1973 	.unload = mlx5e_vport_rep_unload,
1974 	.get_proto_dev = mlx5e_vport_rep_get_proto_dev
1975 };
1976 
mlx5e_rep_register_vport_reps(struct mlx5_core_dev * mdev)1977 void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
1978 {
1979 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
1980 
1981 	mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
1982 }
1983 
mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev * mdev)1984 void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
1985 {
1986 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
1987 
1988 	mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
1989 }
1990