1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <generated/utsrelease.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/switchdev.h>
36 #include <net/pkt_cls.h>
37 #include <net/act_api.h>
38 #include <net/netevent.h>
39 #include <net/arp.h>
40
41 #include "eswitch.h"
42 #include "en.h"
43 #include "en_rep.h"
44 #include "en_tc.h"
45 #include "fs_core.h"
46
47 #define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
48 max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
49 #define MLX5E_REP_PARAMS_LOG_RQ_SIZE \
50 max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)
51
52 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
53
mlx5e_rep_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)54 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
55 struct ethtool_drvinfo *drvinfo)
56 {
57 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
58 sizeof(drvinfo->driver));
59 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
60 }
61
62 static const struct counter_desc sw_rep_stats_desc[] = {
63 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
64 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
65 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
66 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
67 };
68
69 struct vport_stats {
70 u64 vport_rx_packets;
71 u64 vport_tx_packets;
72 u64 vport_rx_bytes;
73 u64 vport_tx_bytes;
74 };
75
76 static const struct counter_desc vport_rep_stats_desc[] = {
77 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
78 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
79 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
80 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
81 };
82
83 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
84 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
85
mlx5e_rep_get_strings(struct net_device * dev,u32 stringset,uint8_t * data)86 static void mlx5e_rep_get_strings(struct net_device *dev,
87 u32 stringset, uint8_t *data)
88 {
89 int i, j;
90
91 switch (stringset) {
92 case ETH_SS_STATS:
93 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
94 strcpy(data + (i * ETH_GSTRING_LEN),
95 sw_rep_stats_desc[i].format);
96 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
97 strcpy(data + (i * ETH_GSTRING_LEN),
98 vport_rep_stats_desc[j].format);
99 break;
100 }
101 }
102
mlx5e_rep_update_hw_counters(struct mlx5e_priv * priv)103 static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
104 {
105 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
106 struct mlx5e_rep_priv *rpriv = priv->ppriv;
107 struct mlx5_eswitch_rep *rep = rpriv->rep;
108 struct rtnl_link_stats64 *vport_stats;
109 struct ifla_vf_stats vf_stats;
110 int err;
111
112 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
113 if (err) {
114 pr_warn("vport %d error %d reading stats\n", rep->vport, err);
115 return;
116 }
117
118 vport_stats = &priv->stats.vf_vport;
119 /* flip tx/rx as we are reporting the counters for the switch vport */
120 vport_stats->rx_packets = vf_stats.tx_packets;
121 vport_stats->rx_bytes = vf_stats.tx_bytes;
122 vport_stats->tx_packets = vf_stats.rx_packets;
123 vport_stats->tx_bytes = vf_stats.rx_bytes;
124 }
125
mlx5e_rep_update_sw_counters(struct mlx5e_priv * priv)126 static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
127 {
128 struct mlx5e_sw_stats *s = &priv->stats.sw;
129 struct mlx5e_rq_stats *rq_stats;
130 struct mlx5e_sq_stats *sq_stats;
131 int i, j;
132
133 memset(s, 0, sizeof(*s));
134 for (i = 0; i < priv->channels.num; i++) {
135 struct mlx5e_channel *c = priv->channels.c[i];
136
137 rq_stats = c->rq.stats;
138
139 s->rx_packets += rq_stats->packets;
140 s->rx_bytes += rq_stats->bytes;
141
142 for (j = 0; j < priv->channels.params.num_tc; j++) {
143 sq_stats = c->sq[j].stats;
144
145 s->tx_packets += sq_stats->packets;
146 s->tx_bytes += sq_stats->bytes;
147 }
148 }
149 }
150
mlx5e_rep_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)151 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
152 struct ethtool_stats *stats, u64 *data)
153 {
154 struct mlx5e_priv *priv = netdev_priv(dev);
155 int i, j;
156
157 if (!data)
158 return;
159
160 mutex_lock(&priv->state_lock);
161 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
162 mlx5e_rep_update_sw_counters(priv);
163 mlx5e_rep_update_hw_counters(priv);
164 mutex_unlock(&priv->state_lock);
165
166 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
167 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
168 sw_rep_stats_desc, i);
169
170 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
171 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
172 vport_rep_stats_desc, j);
173 }
174
mlx5e_rep_get_sset_count(struct net_device * dev,int sset)175 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
176 {
177 switch (sset) {
178 case ETH_SS_STATS:
179 return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
180 default:
181 return -EOPNOTSUPP;
182 }
183 }
184
185 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
186 .get_drvinfo = mlx5e_rep_get_drvinfo,
187 .get_link = ethtool_op_get_link,
188 .get_strings = mlx5e_rep_get_strings,
189 .get_sset_count = mlx5e_rep_get_sset_count,
190 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
191 };
192
mlx5e_attr_get(struct net_device * dev,struct switchdev_attr * attr)193 int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
194 {
195 struct mlx5e_priv *priv = netdev_priv(dev);
196 struct mlx5e_rep_priv *rpriv = priv->ppriv;
197 struct mlx5_eswitch_rep *rep = rpriv->rep;
198 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
199
200 if (esw->mode == SRIOV_NONE)
201 return -EOPNOTSUPP;
202
203 switch (attr->id) {
204 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
205 attr->u.ppid.id_len = ETH_ALEN;
206 ether_addr_copy(attr->u.ppid.id, rep->hw_id);
207 break;
208 default:
209 return -EOPNOTSUPP;
210 }
211
212 return 0;
213 }
214
mlx5e_sqs2vport_stop(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep)215 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
216 struct mlx5_eswitch_rep *rep)
217 {
218 struct mlx5e_rep_sq *rep_sq, *tmp;
219 struct mlx5e_rep_priv *rpriv;
220
221 if (esw->mode != SRIOV_OFFLOADS)
222 return;
223
224 rpriv = mlx5e_rep_to_rep_priv(rep);
225 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
226 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
227 list_del(&rep_sq->list);
228 kfree(rep_sq);
229 }
230 }
231
mlx5e_sqs2vport_start(struct mlx5_eswitch * esw,struct mlx5_eswitch_rep * rep,u32 * sqns_array,int sqns_num)232 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
233 struct mlx5_eswitch_rep *rep,
234 u32 *sqns_array, int sqns_num)
235 {
236 struct mlx5_flow_handle *flow_rule;
237 struct mlx5e_rep_priv *rpriv;
238 struct mlx5e_rep_sq *rep_sq;
239 int err;
240 int i;
241
242 if (esw->mode != SRIOV_OFFLOADS)
243 return 0;
244
245 rpriv = mlx5e_rep_to_rep_priv(rep);
246 for (i = 0; i < sqns_num; i++) {
247 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
248 if (!rep_sq) {
249 err = -ENOMEM;
250 goto out_err;
251 }
252
253 /* Add re-inject rule to the PF/representor sqs */
254 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
255 rep->vport,
256 sqns_array[i]);
257 if (IS_ERR(flow_rule)) {
258 err = PTR_ERR(flow_rule);
259 kfree(rep_sq);
260 goto out_err;
261 }
262 rep_sq->send_to_vport_rule = flow_rule;
263 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
264 }
265 return 0;
266
267 out_err:
268 mlx5e_sqs2vport_stop(esw, rep);
269 return err;
270 }
271
mlx5e_add_sqs_fwd_rules(struct mlx5e_priv * priv)272 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
273 {
274 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
275 struct mlx5e_rep_priv *rpriv = priv->ppriv;
276 struct mlx5_eswitch_rep *rep = rpriv->rep;
277 struct mlx5e_channel *c;
278 int n, tc, num_sqs = 0;
279 int err = -ENOMEM;
280 u32 *sqs;
281
282 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
283 if (!sqs)
284 goto out;
285
286 for (n = 0; n < priv->channels.num; n++) {
287 c = priv->channels.c[n];
288 for (tc = 0; tc < c->num_tc; tc++)
289 sqs[num_sqs++] = c->sq[tc].sqn;
290 }
291
292 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
293 kfree(sqs);
294
295 out:
296 if (err)
297 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
298 return err;
299 }
300
mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv * priv)301 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
302 {
303 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
304 struct mlx5e_rep_priv *rpriv = priv->ppriv;
305 struct mlx5_eswitch_rep *rep = rpriv->rep;
306
307 mlx5e_sqs2vport_stop(esw, rep);
308 }
309
mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv * rpriv)310 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
311 {
312 #if IS_ENABLED(CONFIG_IPV6)
313 unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
314 DELAY_PROBE_TIME);
315 #else
316 unsigned long ipv6_interval = ~0UL;
317 #endif
318 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
319 DELAY_PROBE_TIME);
320 struct net_device *netdev = rpriv->netdev;
321 struct mlx5e_priv *priv = netdev_priv(netdev);
322
323 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
324 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
325 }
326
mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv * priv)327 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
328 {
329 struct mlx5e_rep_priv *rpriv = priv->ppriv;
330 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
331
332 mlx5_fc_queue_stats_work(priv->mdev,
333 &neigh_update->neigh_stats_work,
334 neigh_update->min_interval);
335 }
336
mlx5e_rep_neigh_stats_work(struct work_struct * work)337 static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
338 {
339 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
340 neigh_update.neigh_stats_work.work);
341 struct net_device *netdev = rpriv->netdev;
342 struct mlx5e_priv *priv = netdev_priv(netdev);
343 struct mlx5e_neigh_hash_entry *nhe;
344
345 rtnl_lock();
346 if (!list_empty(&rpriv->neigh_update.neigh_list))
347 mlx5e_rep_queue_neigh_stats_work(priv);
348
349 list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
350 mlx5e_tc_update_neigh_used_value(nhe);
351
352 rtnl_unlock();
353 }
354
mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry * nhe)355 static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
356 {
357 refcount_inc(&nhe->refcnt);
358 }
359
mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry * nhe)360 static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
361 {
362 if (refcount_dec_and_test(&nhe->refcnt))
363 kfree(nhe);
364 }
365
mlx5e_rep_update_flows(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e,bool neigh_connected,unsigned char ha[ETH_ALEN])366 static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
367 struct mlx5e_encap_entry *e,
368 bool neigh_connected,
369 unsigned char ha[ETH_ALEN])
370 {
371 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
372
373 ASSERT_RTNL();
374
375 if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) ||
376 !ether_addr_equal(e->h_dest, ha))
377 mlx5e_tc_encap_flows_del(priv, e);
378
379 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
380 ether_addr_copy(e->h_dest, ha);
381 ether_addr_copy(eth->h_dest, ha);
382
383 mlx5e_tc_encap_flows_add(priv, e);
384 }
385 }
386
mlx5e_rep_neigh_update(struct work_struct * work)387 static void mlx5e_rep_neigh_update(struct work_struct *work)
388 {
389 struct mlx5e_neigh_hash_entry *nhe =
390 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
391 struct neighbour *n = nhe->n;
392 struct mlx5e_encap_entry *e;
393 unsigned char ha[ETH_ALEN];
394 struct mlx5e_priv *priv;
395 bool neigh_connected;
396 bool encap_connected;
397 u8 nud_state, dead;
398
399 rtnl_lock();
400
401 /* If these parameters are changed after we release the lock,
402 * we'll receive another event letting us know about it.
403 * We use this lock to avoid inconsistency between the neigh validity
404 * and it's hw address.
405 */
406 read_lock_bh(&n->lock);
407 memcpy(ha, n->ha, ETH_ALEN);
408 nud_state = n->nud_state;
409 dead = n->dead;
410 read_unlock_bh(&n->lock);
411
412 neigh_connected = (nud_state & NUD_VALID) && !dead;
413
414 list_for_each_entry(e, &nhe->encap_list, encap_list) {
415 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
416 priv = netdev_priv(e->out_dev);
417
418 if (encap_connected != neigh_connected ||
419 !ether_addr_equal(e->h_dest, ha))
420 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
421 }
422 mlx5e_rep_neigh_entry_release(nhe);
423 rtnl_unlock();
424 neigh_release(n);
425 }
426
427 static struct mlx5e_neigh_hash_entry *
428 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
429 struct mlx5e_neigh *m_neigh);
430
mlx5e_rep_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)431 static int mlx5e_rep_netevent_event(struct notifier_block *nb,
432 unsigned long event, void *ptr)
433 {
434 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
435 neigh_update.netevent_nb);
436 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
437 struct net_device *netdev = rpriv->netdev;
438 struct mlx5e_priv *priv = netdev_priv(netdev);
439 struct mlx5e_neigh_hash_entry *nhe = NULL;
440 struct mlx5e_neigh m_neigh = {};
441 struct neigh_parms *p;
442 struct neighbour *n;
443 bool found = false;
444
445 switch (event) {
446 case NETEVENT_NEIGH_UPDATE:
447 n = ptr;
448 #if IS_ENABLED(CONFIG_IPV6)
449 if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
450 #else
451 if (n->tbl != &arp_tbl)
452 #endif
453 return NOTIFY_DONE;
454
455 m_neigh.dev = n->dev;
456 m_neigh.family = n->ops->family;
457 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
458
459 /* We are in atomic context and can't take RTNL mutex, so use
460 * spin_lock_bh to lookup the neigh table. bh is used since
461 * netevent can be called from a softirq context.
462 */
463 spin_lock_bh(&neigh_update->encap_lock);
464 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
465 if (!nhe) {
466 spin_unlock_bh(&neigh_update->encap_lock);
467 return NOTIFY_DONE;
468 }
469
470 /* This assignment is valid as long as the the neigh reference
471 * is taken
472 */
473 nhe->n = n;
474
475 /* Take a reference to ensure the neighbour and mlx5 encap
476 * entry won't be destructed until we drop the reference in
477 * delayed work.
478 */
479 neigh_hold(n);
480 mlx5e_rep_neigh_entry_hold(nhe);
481
482 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
483 mlx5e_rep_neigh_entry_release(nhe);
484 neigh_release(n);
485 }
486 spin_unlock_bh(&neigh_update->encap_lock);
487 break;
488
489 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
490 p = ptr;
491
492 /* We check the device is present since we don't care about
493 * changes in the default table, we only care about changes
494 * done per device delay prob time parameter.
495 */
496 #if IS_ENABLED(CONFIG_IPV6)
497 if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
498 #else
499 if (!p->dev || p->tbl != &arp_tbl)
500 #endif
501 return NOTIFY_DONE;
502
503 /* We are in atomic context and can't take RTNL mutex,
504 * so use spin_lock_bh to walk the neigh list and look for
505 * the relevant device. bh is used since netevent can be
506 * called from a softirq context.
507 */
508 spin_lock_bh(&neigh_update->encap_lock);
509 list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
510 if (p->dev == nhe->m_neigh.dev) {
511 found = true;
512 break;
513 }
514 }
515 spin_unlock_bh(&neigh_update->encap_lock);
516 if (!found)
517 return NOTIFY_DONE;
518
519 neigh_update->min_interval = min_t(unsigned long,
520 NEIGH_VAR(p, DELAY_PROBE_TIME),
521 neigh_update->min_interval);
522 mlx5_fc_update_sampling_interval(priv->mdev,
523 neigh_update->min_interval);
524 break;
525 }
526 return NOTIFY_DONE;
527 }
528
529 static const struct rhashtable_params mlx5e_neigh_ht_params = {
530 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
531 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
532 .key_len = sizeof(struct mlx5e_neigh),
533 .automatic_shrinking = true,
534 };
535
mlx5e_rep_neigh_init(struct mlx5e_rep_priv * rpriv)536 static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
537 {
538 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
539 int err;
540
541 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
542 if (err)
543 return err;
544
545 INIT_LIST_HEAD(&neigh_update->neigh_list);
546 spin_lock_init(&neigh_update->encap_lock);
547 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
548 mlx5e_rep_neigh_stats_work);
549 mlx5e_rep_neigh_update_init_interval(rpriv);
550
551 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
552 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
553 if (err)
554 goto out_err;
555 return 0;
556
557 out_err:
558 rhashtable_destroy(&neigh_update->neigh_ht);
559 return err;
560 }
561
mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv * rpriv)562 static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
563 {
564 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
565 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
566
567 unregister_netevent_notifier(&neigh_update->netevent_nb);
568
569 flush_workqueue(priv->wq); /* flush neigh update works */
570
571 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
572
573 rhashtable_destroy(&neigh_update->neigh_ht);
574 }
575
mlx5e_rep_neigh_entry_insert(struct mlx5e_priv * priv,struct mlx5e_neigh_hash_entry * nhe)576 static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
577 struct mlx5e_neigh_hash_entry *nhe)
578 {
579 struct mlx5e_rep_priv *rpriv = priv->ppriv;
580 int err;
581
582 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
583 &nhe->rhash_node,
584 mlx5e_neigh_ht_params);
585 if (err)
586 return err;
587
588 list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
589
590 return err;
591 }
592
mlx5e_rep_neigh_entry_remove(struct mlx5e_priv * priv,struct mlx5e_neigh_hash_entry * nhe)593 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
594 struct mlx5e_neigh_hash_entry *nhe)
595 {
596 struct mlx5e_rep_priv *rpriv = priv->ppriv;
597
598 spin_lock_bh(&rpriv->neigh_update.encap_lock);
599
600 list_del(&nhe->neigh_list);
601
602 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
603 &nhe->rhash_node,
604 mlx5e_neigh_ht_params);
605 spin_unlock_bh(&rpriv->neigh_update.encap_lock);
606 }
607
608 /* This function must only be called under RTNL lock or under the
609 * representor's encap_lock in case RTNL mutex can't be held.
610 */
611 static struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv * priv,struct mlx5e_neigh * m_neigh)612 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
613 struct mlx5e_neigh *m_neigh)
614 {
615 struct mlx5e_rep_priv *rpriv = priv->ppriv;
616 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
617
618 return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
619 mlx5e_neigh_ht_params);
620 }
621
mlx5e_rep_neigh_entry_create(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e,struct mlx5e_neigh_hash_entry ** nhe)622 static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
623 struct mlx5e_encap_entry *e,
624 struct mlx5e_neigh_hash_entry **nhe)
625 {
626 int err;
627
628 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
629 if (!*nhe)
630 return -ENOMEM;
631
632 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
633 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
634 INIT_LIST_HEAD(&(*nhe)->encap_list);
635 refcount_set(&(*nhe)->refcnt, 1);
636
637 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
638 if (err)
639 goto out_free;
640 return 0;
641
642 out_free:
643 kfree(*nhe);
644 return err;
645 }
646
mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv * priv,struct mlx5e_neigh_hash_entry * nhe)647 static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
648 struct mlx5e_neigh_hash_entry *nhe)
649 {
650 /* The neigh hash entry must be removed from the hash table regardless
651 * of the reference count value, so it won't be found by the next
652 * neigh notification call. The neigh hash entry reference count is
653 * incremented only during creation and neigh notification calls and
654 * protects from freeing the nhe struct.
655 */
656 mlx5e_rep_neigh_entry_remove(priv, nhe);
657 mlx5e_rep_neigh_entry_release(nhe);
658 }
659
mlx5e_rep_encap_entry_attach(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e)660 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
661 struct mlx5e_encap_entry *e)
662 {
663 struct mlx5e_neigh_hash_entry *nhe;
664 int err;
665
666 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
667 if (!nhe) {
668 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
669 if (err)
670 return err;
671 }
672 list_add(&e->encap_list, &nhe->encap_list);
673 return 0;
674 }
675
mlx5e_rep_encap_entry_detach(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e)676 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
677 struct mlx5e_encap_entry *e)
678 {
679 struct mlx5e_neigh_hash_entry *nhe;
680
681 list_del(&e->encap_list);
682 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
683
684 if (list_empty(&nhe->encap_list))
685 mlx5e_rep_neigh_entry_destroy(priv, nhe);
686 }
687
mlx5e_rep_open(struct net_device * dev)688 static int mlx5e_rep_open(struct net_device *dev)
689 {
690 struct mlx5e_priv *priv = netdev_priv(dev);
691 struct mlx5e_rep_priv *rpriv = priv->ppriv;
692 struct mlx5_eswitch_rep *rep = rpriv->rep;
693 int err;
694
695 mutex_lock(&priv->state_lock);
696 err = mlx5e_open_locked(dev);
697 if (err)
698 goto unlock;
699
700 if (!mlx5_modify_vport_admin_state(priv->mdev,
701 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
702 rep->vport, MLX5_VPORT_ADMIN_STATE_UP))
703 netif_carrier_on(dev);
704
705 unlock:
706 mutex_unlock(&priv->state_lock);
707 return err;
708 }
709
mlx5e_rep_close(struct net_device * dev)710 static int mlx5e_rep_close(struct net_device *dev)
711 {
712 struct mlx5e_priv *priv = netdev_priv(dev);
713 struct mlx5e_rep_priv *rpriv = priv->ppriv;
714 struct mlx5_eswitch_rep *rep = rpriv->rep;
715 int ret;
716
717 mutex_lock(&priv->state_lock);
718 mlx5_modify_vport_admin_state(priv->mdev,
719 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
720 rep->vport, MLX5_VPORT_ADMIN_STATE_DOWN);
721 ret = mlx5e_close_locked(dev);
722 mutex_unlock(&priv->state_lock);
723 return ret;
724 }
725
mlx5e_rep_get_phys_port_name(struct net_device * dev,char * buf,size_t len)726 static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
727 char *buf, size_t len)
728 {
729 struct mlx5e_priv *priv = netdev_priv(dev);
730 struct mlx5e_rep_priv *rpriv = priv->ppriv;
731 struct mlx5_eswitch_rep *rep = rpriv->rep;
732 int ret;
733
734 ret = snprintf(buf, len, "%d", rep->vport - 1);
735 if (ret >= len)
736 return -EOPNOTSUPP;
737
738 return 0;
739 }
740
741 static int
mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv * priv,struct tc_cls_flower_offload * cls_flower,int flags)742 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
743 struct tc_cls_flower_offload *cls_flower, int flags)
744 {
745 switch (cls_flower->command) {
746 case TC_CLSFLOWER_REPLACE:
747 return mlx5e_configure_flower(priv, cls_flower, flags);
748 case TC_CLSFLOWER_DESTROY:
749 return mlx5e_delete_flower(priv, cls_flower, flags);
750 case TC_CLSFLOWER_STATS:
751 return mlx5e_stats_flower(priv, cls_flower, flags);
752 default:
753 return -EOPNOTSUPP;
754 }
755 }
756
mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type,void * type_data,void * cb_priv)757 static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data,
758 void *cb_priv)
759 {
760 struct mlx5e_priv *priv = cb_priv;
761
762 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
763 return -EOPNOTSUPP;
764
765 switch (type) {
766 case TC_SETUP_CLSFLOWER:
767 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS);
768 default:
769 return -EOPNOTSUPP;
770 }
771 }
772
mlx5e_rep_setup_tc_cb(enum tc_setup_type type,void * type_data,void * cb_priv)773 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
774 void *cb_priv)
775 {
776 struct mlx5e_priv *priv = cb_priv;
777
778 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
779 return -EOPNOTSUPP;
780
781 switch (type) {
782 case TC_SETUP_CLSFLOWER:
783 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
784 default:
785 return -EOPNOTSUPP;
786 }
787 }
788
mlx5e_rep_setup_tc_block(struct net_device * dev,struct tc_block_offload * f)789 static int mlx5e_rep_setup_tc_block(struct net_device *dev,
790 struct tc_block_offload *f)
791 {
792 struct mlx5e_priv *priv = netdev_priv(dev);
793
794 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
795 return -EOPNOTSUPP;
796
797 switch (f->command) {
798 case TC_BLOCK_BIND:
799 return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
800 priv, priv, f->extack);
801 case TC_BLOCK_UNBIND:
802 tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
803 return 0;
804 default:
805 return -EOPNOTSUPP;
806 }
807 }
808
mlx5e_rep_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)809 static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
810 void *type_data)
811 {
812 switch (type) {
813 case TC_SETUP_BLOCK:
814 return mlx5e_rep_setup_tc_block(dev, type_data);
815 default:
816 return -EOPNOTSUPP;
817 }
818 }
819
mlx5e_is_uplink_rep(struct mlx5e_priv * priv)820 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
821 {
822 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
823 struct mlx5e_rep_priv *rpriv = priv->ppriv;
824 struct mlx5_eswitch_rep *rep;
825
826 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
827 return false;
828
829 rep = rpriv->rep;
830 if (esw->mode == SRIOV_OFFLOADS &&
831 rep && rep->vport == FDB_UPLINK_VPORT)
832 return true;
833
834 return false;
835 }
836
mlx5e_is_vf_vport_rep(struct mlx5e_priv * priv)837 static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
838 {
839 struct mlx5e_rep_priv *rpriv = priv->ppriv;
840 struct mlx5_eswitch_rep *rep;
841
842 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
843 return false;
844
845 rep = rpriv->rep;
846 if (rep && rep->vport != FDB_UPLINK_VPORT)
847 return true;
848
849 return false;
850 }
851
mlx5e_has_offload_stats(const struct net_device * dev,int attr_id)852 bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
853 {
854 struct mlx5e_priv *priv = netdev_priv(dev);
855
856 switch (attr_id) {
857 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
858 if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv))
859 return true;
860 }
861
862 return false;
863 }
864
865 static int
mlx5e_get_sw_stats64(const struct net_device * dev,struct rtnl_link_stats64 * stats)866 mlx5e_get_sw_stats64(const struct net_device *dev,
867 struct rtnl_link_stats64 *stats)
868 {
869 struct mlx5e_priv *priv = netdev_priv(dev);
870 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
871
872 mlx5e_rep_update_sw_counters(priv);
873
874 stats->rx_packets = sstats->rx_packets;
875 stats->rx_bytes = sstats->rx_bytes;
876 stats->tx_packets = sstats->tx_packets;
877 stats->tx_bytes = sstats->tx_bytes;
878
879 stats->tx_dropped = sstats->tx_queue_dropped;
880
881 return 0;
882 }
883
mlx5e_get_offload_stats(int attr_id,const struct net_device * dev,void * sp)884 int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
885 void *sp)
886 {
887 switch (attr_id) {
888 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
889 return mlx5e_get_sw_stats64(dev, sp);
890 }
891
892 return -EINVAL;
893 }
894
895 static void
mlx5e_rep_get_stats(struct net_device * dev,struct rtnl_link_stats64 * stats)896 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
897 {
898 struct mlx5e_priv *priv = netdev_priv(dev);
899
900 /* update HW stats in background for next time */
901 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
902
903 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
904 }
905
906 static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
907 .switchdev_port_attr_get = mlx5e_attr_get,
908 };
909
mlx5e_change_rep_mtu(struct net_device * netdev,int new_mtu)910 static int mlx5e_change_rep_mtu(struct net_device *netdev, int new_mtu)
911 {
912 return mlx5e_change_mtu(netdev, new_mtu, NULL);
913 }
914
915 static const struct net_device_ops mlx5e_netdev_ops_rep = {
916 .ndo_open = mlx5e_rep_open,
917 .ndo_stop = mlx5e_rep_close,
918 .ndo_start_xmit = mlx5e_xmit,
919 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
920 .ndo_setup_tc = mlx5e_rep_setup_tc,
921 .ndo_get_stats64 = mlx5e_rep_get_stats,
922 .ndo_has_offload_stats = mlx5e_has_offload_stats,
923 .ndo_get_offload_stats = mlx5e_get_offload_stats,
924 .ndo_change_mtu = mlx5e_change_rep_mtu,
925 };
926
mlx5e_build_rep_params(struct mlx5_core_dev * mdev,struct mlx5e_params * params,u16 mtu)927 static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
928 struct mlx5e_params *params, u16 mtu)
929 {
930 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
931 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
932 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
933
934 params->hard_mtu = MLX5E_ETH_HARD_MTU;
935 params->sw_mtu = mtu;
936 params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
937 params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
938 params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
939
940 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
941 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
942
943 params->num_tc = 1;
944 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
945
946 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
947 }
948
mlx5e_build_rep_netdev(struct net_device * netdev)949 static void mlx5e_build_rep_netdev(struct net_device *netdev)
950 {
951 struct mlx5e_priv *priv = netdev_priv(netdev);
952 struct mlx5_core_dev *mdev = priv->mdev;
953 u16 max_mtu;
954
955 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
956
957 netdev->watchdog_timeo = 15 * HZ;
958
959 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
960
961 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
962
963 netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
964 netdev->hw_features |= NETIF_F_HW_TC;
965
966 eth_hw_addr_random(netdev);
967
968 netdev->min_mtu = ETH_MIN_MTU;
969 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
970 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
971 }
972
mlx5e_init_rep(struct mlx5_core_dev * mdev,struct net_device * netdev,const struct mlx5e_profile * profile,void * ppriv)973 static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
974 struct net_device *netdev,
975 const struct mlx5e_profile *profile,
976 void *ppriv)
977 {
978 struct mlx5e_priv *priv = netdev_priv(netdev);
979
980 priv->mdev = mdev;
981 priv->netdev = netdev;
982 priv->profile = profile;
983 priv->ppriv = ppriv;
984
985 mutex_init(&priv->state_lock);
986
987 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
988
989 priv->channels.params.num_channels = profile->max_nch(mdev);
990
991 mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
992 mlx5e_build_rep_netdev(netdev);
993
994 mlx5e_timestamp_init(priv);
995 }
996
mlx5e_init_rep_rx(struct mlx5e_priv * priv)997 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
998 {
999 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1000 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1001 struct mlx5_eswitch_rep *rep = rpriv->rep;
1002 struct mlx5_flow_handle *flow_rule;
1003 int err;
1004
1005 mlx5e_init_l2_addr(priv);
1006
1007 err = mlx5e_create_direct_rqts(priv);
1008 if (err)
1009 return err;
1010
1011 err = mlx5e_create_direct_tirs(priv);
1012 if (err)
1013 goto err_destroy_direct_rqts;
1014
1015 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
1016 rep->vport,
1017 priv->direct_tir[0].tirn);
1018 if (IS_ERR(flow_rule)) {
1019 err = PTR_ERR(flow_rule);
1020 goto err_destroy_direct_tirs;
1021 }
1022 rpriv->vport_rx_rule = flow_rule;
1023
1024 return 0;
1025
1026 err_destroy_direct_tirs:
1027 mlx5e_destroy_direct_tirs(priv);
1028 err_destroy_direct_rqts:
1029 mlx5e_destroy_direct_rqts(priv);
1030 return err;
1031 }
1032
mlx5e_cleanup_rep_rx(struct mlx5e_priv * priv)1033 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1034 {
1035 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1036
1037 mlx5_del_flow_rules(rpriv->vport_rx_rule);
1038 mlx5e_destroy_direct_tirs(priv);
1039 mlx5e_destroy_direct_rqts(priv);
1040 }
1041
mlx5e_init_rep_tx(struct mlx5e_priv * priv)1042 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1043 {
1044 int err;
1045
1046 err = mlx5e_create_tises(priv);
1047 if (err) {
1048 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1049 return err;
1050 }
1051 return 0;
1052 }
1053
mlx5e_get_rep_max_num_channels(struct mlx5_core_dev * mdev)1054 static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
1055 {
1056 #define MLX5E_PORT_REPRESENTOR_NCH 1
1057 return MLX5E_PORT_REPRESENTOR_NCH;
1058 }
1059
1060 static const struct mlx5e_profile mlx5e_rep_profile = {
1061 .init = mlx5e_init_rep,
1062 .init_rx = mlx5e_init_rep_rx,
1063 .cleanup_rx = mlx5e_cleanup_rep_rx,
1064 .init_tx = mlx5e_init_rep_tx,
1065 .cleanup_tx = mlx5e_cleanup_nic_tx,
1066 .update_stats = mlx5e_rep_update_hw_counters,
1067 .max_nch = mlx5e_get_rep_max_num_channels,
1068 .update_carrier = NULL,
1069 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1070 .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
1071 .max_tc = 1,
1072 };
1073
1074 /* e-Switch vport representors */
1075
1076 static int
mlx5e_nic_rep_load(struct mlx5_core_dev * dev,struct mlx5_eswitch_rep * rep)1077 mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1078 {
1079 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1080 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1081
1082 int err;
1083
1084 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
1085 err = mlx5e_add_sqs_fwd_rules(priv);
1086 if (err)
1087 return err;
1088 }
1089
1090 err = mlx5e_rep_neigh_init(rpriv);
1091 if (err)
1092 goto err_remove_sqs;
1093
1094 /* init shared tc flow table */
1095 err = mlx5e_tc_esw_init(&rpriv->tc_ht);
1096 if (err)
1097 goto err_neigh_cleanup;
1098
1099 return 0;
1100
1101 err_neigh_cleanup:
1102 mlx5e_rep_neigh_cleanup(rpriv);
1103 err_remove_sqs:
1104 mlx5e_remove_sqs_fwd_rules(priv);
1105 return err;
1106 }
1107
1108 static void
mlx5e_nic_rep_unload(struct mlx5_eswitch_rep * rep)1109 mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
1110 {
1111 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1112 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1113
1114 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1115 mlx5e_remove_sqs_fwd_rules(priv);
1116
1117 /* clean uplink offloaded TC rules, delete shared tc flow table */
1118 mlx5e_tc_esw_cleanup(&rpriv->tc_ht);
1119
1120 mlx5e_rep_neigh_cleanup(rpriv);
1121 }
1122
1123 static int
mlx5e_vport_rep_load(struct mlx5_core_dev * dev,struct mlx5_eswitch_rep * rep)1124 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1125 {
1126 struct mlx5e_rep_priv *uplink_rpriv;
1127 struct mlx5e_rep_priv *rpriv;
1128 struct net_device *netdev;
1129 struct mlx5e_priv *upriv;
1130 int err;
1131
1132 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1133 if (!rpriv)
1134 return -ENOMEM;
1135
1136 netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
1137 if (!netdev) {
1138 pr_warn("Failed to create representor netdev for vport %d\n",
1139 rep->vport);
1140 kfree(rpriv);
1141 return -EINVAL;
1142 }
1143
1144 rpriv->netdev = netdev;
1145 rpriv->rep = rep;
1146 rep->rep_if[REP_ETH].priv = rpriv;
1147 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1148
1149 err = mlx5e_attach_netdev(netdev_priv(netdev));
1150 if (err) {
1151 pr_warn("Failed to attach representor netdev for vport %d\n",
1152 rep->vport);
1153 goto err_destroy_netdev;
1154 }
1155
1156 err = mlx5e_rep_neigh_init(rpriv);
1157 if (err) {
1158 pr_warn("Failed to initialized neighbours handling for vport %d\n",
1159 rep->vport);
1160 goto err_detach_netdev;
1161 }
1162
1163 uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
1164 upriv = netdev_priv(uplink_rpriv->netdev);
1165 err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb_egdev,
1166 upriv);
1167 if (err)
1168 goto err_neigh_cleanup;
1169
1170 err = register_netdev(netdev);
1171 if (err) {
1172 pr_warn("Failed to register representor netdev for vport %d\n",
1173 rep->vport);
1174 goto err_egdev_cleanup;
1175 }
1176
1177 return 0;
1178
1179 err_egdev_cleanup:
1180 tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
1181 upriv);
1182
1183 err_neigh_cleanup:
1184 mlx5e_rep_neigh_cleanup(rpriv);
1185
1186 err_detach_netdev:
1187 mlx5e_detach_netdev(netdev_priv(netdev));
1188
1189 err_destroy_netdev:
1190 mlx5e_destroy_netdev(netdev_priv(netdev));
1191 kfree(rpriv);
1192 return err;
1193 }
1194
1195 static void
mlx5e_vport_rep_unload(struct mlx5_eswitch_rep * rep)1196 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1197 {
1198 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1199 struct net_device *netdev = rpriv->netdev;
1200 struct mlx5e_priv *priv = netdev_priv(netdev);
1201 struct mlx5e_rep_priv *uplink_rpriv;
1202 void *ppriv = priv->ppriv;
1203 struct mlx5e_priv *upriv;
1204
1205 unregister_netdev(netdev);
1206 uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
1207 REP_ETH);
1208 upriv = netdev_priv(uplink_rpriv->netdev);
1209 tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
1210 upriv);
1211 mlx5e_rep_neigh_cleanup(rpriv);
1212 mlx5e_detach_netdev(priv);
1213 mlx5e_destroy_netdev(priv);
1214 kfree(ppriv); /* mlx5e_rep_priv */
1215 }
1216
mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep * rep)1217 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1218 {
1219 struct mlx5e_rep_priv *rpriv;
1220
1221 rpriv = mlx5e_rep_to_rep_priv(rep);
1222
1223 return rpriv->netdev;
1224 }
1225
mlx5e_rep_register_vf_vports(struct mlx5e_priv * priv)1226 static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
1227 {
1228 struct mlx5_core_dev *mdev = priv->mdev;
1229 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1230 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1231 int vport;
1232
1233 for (vport = 1; vport < total_vfs; vport++) {
1234 struct mlx5_eswitch_rep_if rep_if = {};
1235
1236 rep_if.load = mlx5e_vport_rep_load;
1237 rep_if.unload = mlx5e_vport_rep_unload;
1238 rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
1239 mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
1240 }
1241 }
1242
mlx5e_rep_unregister_vf_vports(struct mlx5e_priv * priv)1243 static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
1244 {
1245 struct mlx5_core_dev *mdev = priv->mdev;
1246 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1247 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1248 int vport;
1249
1250 for (vport = 1; vport < total_vfs; vport++)
1251 mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
1252 }
1253
mlx5e_register_vport_reps(struct mlx5e_priv * priv)1254 void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
1255 {
1256 struct mlx5_core_dev *mdev = priv->mdev;
1257 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1258 struct mlx5_eswitch_rep_if rep_if;
1259 struct mlx5e_rep_priv *rpriv;
1260
1261 rpriv = priv->ppriv;
1262 rpriv->netdev = priv->netdev;
1263
1264 rep_if.load = mlx5e_nic_rep_load;
1265 rep_if.unload = mlx5e_nic_rep_unload;
1266 rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
1267 rep_if.priv = rpriv;
1268 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1269 mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
1270
1271 mlx5e_rep_register_vf_vports(priv); /* VFs vports */
1272 }
1273
mlx5e_unregister_vport_reps(struct mlx5e_priv * priv)1274 void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
1275 {
1276 struct mlx5_core_dev *mdev = priv->mdev;
1277 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1278
1279 mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
1280 mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/
1281 }
1282
mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev * mdev)1283 void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
1284 {
1285 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1286 struct mlx5e_rep_priv *rpriv;
1287
1288 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1289 if (!rpriv)
1290 return NULL;
1291
1292 rpriv->rep = &esw->offloads.vport_reps[0];
1293 return rpriv;
1294 }
1295