1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/list.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
38 #include "en.h"
39 #include "lib/mpfs.h"
40
41 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
42 struct mlx5e_l2_rule *ai, int type);
43 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
44 struct mlx5e_l2_rule *ai);
45
46 enum {
47 MLX5E_FULLMATCH = 0,
48 MLX5E_ALLMULTI = 1,
49 MLX5E_PROMISC = 2,
50 };
51
52 enum {
53 MLX5E_UC = 0,
54 MLX5E_MC_IPV4 = 1,
55 MLX5E_MC_IPV6 = 2,
56 MLX5E_MC_OTHER = 3,
57 };
58
59 enum {
60 MLX5E_ACTION_NONE = 0,
61 MLX5E_ACTION_ADD = 1,
62 MLX5E_ACTION_DEL = 2,
63 };
64
65 struct mlx5e_l2_hash_node {
66 struct hlist_node hlist;
67 u8 action;
68 struct mlx5e_l2_rule ai;
69 bool mpfs;
70 };
71
mlx5e_hash_l2(u8 * addr)72 static inline int mlx5e_hash_l2(u8 *addr)
73 {
74 return addr[5];
75 }
76
mlx5e_add_l2_to_hash(struct hlist_head * hash,u8 * addr)77 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
78 {
79 struct mlx5e_l2_hash_node *hn;
80 int ix = mlx5e_hash_l2(addr);
81 int found = 0;
82
83 hlist_for_each_entry(hn, &hash[ix], hlist)
84 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
85 found = 1;
86 break;
87 }
88
89 if (found) {
90 hn->action = MLX5E_ACTION_NONE;
91 return;
92 }
93
94 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
95 if (!hn)
96 return;
97
98 ether_addr_copy(hn->ai.addr, addr);
99 hn->action = MLX5E_ACTION_ADD;
100
101 hlist_add_head(&hn->hlist, &hash[ix]);
102 }
103
mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node * hn)104 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
105 {
106 hlist_del(&hn->hlist);
107 kfree(hn);
108 }
109
mlx5e_vport_context_update_vlans(struct mlx5e_priv * priv)110 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
111 {
112 struct net_device *ndev = priv->netdev;
113 int max_list_size;
114 int list_size;
115 u16 *vlans;
116 int vlan;
117 int err;
118 int i;
119
120 list_size = 0;
121 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID)
122 list_size++;
123
124 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
125
126 if (list_size > max_list_size) {
127 netdev_warn(ndev,
128 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
129 list_size, max_list_size);
130 list_size = max_list_size;
131 }
132
133 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
134 if (!vlans)
135 return -ENOMEM;
136
137 i = 0;
138 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
139 if (i >= list_size)
140 break;
141 vlans[i++] = vlan;
142 }
143
144 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
145 if (err)
146 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
147 err);
148
149 kfree(vlans);
150 return err;
151 }
152
153 enum mlx5e_vlan_rule_type {
154 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
155 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
156 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
157 MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
158 MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
159 };
160
__mlx5e_add_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid,struct mlx5_flow_spec * spec)161 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
162 enum mlx5e_vlan_rule_type rule_type,
163 u16 vid, struct mlx5_flow_spec *spec)
164 {
165 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
166 struct mlx5_flow_destination dest = {};
167 struct mlx5_flow_handle **rule_p;
168 MLX5_DECLARE_FLOW_ACT(flow_act);
169 int err = 0;
170
171 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
172 dest.ft = priv->fs.l2.ft.t;
173
174 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
175
176 switch (rule_type) {
177 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
178 /* cvlan_tag enabled in match criteria and
179 * disabled in match value means both S & C tags
180 * don't exist (untagged of both)
181 */
182 rule_p = &priv->fs.vlan.untagged_rule;
183 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
184 outer_headers.cvlan_tag);
185 break;
186 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
187 rule_p = &priv->fs.vlan.any_cvlan_rule;
188 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
189 outer_headers.cvlan_tag);
190 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
191 break;
192 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
193 rule_p = &priv->fs.vlan.any_svlan_rule;
194 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
195 outer_headers.svlan_tag);
196 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
197 break;
198 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
199 rule_p = &priv->fs.vlan.active_svlans_rule[vid];
200 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
201 outer_headers.svlan_tag);
202 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
203 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
204 outer_headers.first_vid);
205 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
206 vid);
207 break;
208 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
209 rule_p = &priv->fs.vlan.active_cvlans_rule[vid];
210 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
211 outer_headers.cvlan_tag);
212 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
213 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
214 outer_headers.first_vid);
215 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
216 vid);
217 break;
218 }
219
220 if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type))
221 return 0;
222
223 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
224
225 if (IS_ERR(*rule_p)) {
226 err = PTR_ERR(*rule_p);
227 *rule_p = NULL;
228 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
229 }
230
231 return err;
232 }
233
mlx5e_add_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)234 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
235 enum mlx5e_vlan_rule_type rule_type, u16 vid)
236 {
237 struct mlx5_flow_spec *spec;
238 int err = 0;
239
240 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
241 if (!spec)
242 return -ENOMEM;
243
244 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
245 mlx5e_vport_context_update_vlans(priv);
246
247 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
248
249 kvfree(spec);
250
251 return err;
252 }
253
mlx5e_del_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)254 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
255 enum mlx5e_vlan_rule_type rule_type, u16 vid)
256 {
257 switch (rule_type) {
258 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
259 if (priv->fs.vlan.untagged_rule) {
260 mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
261 priv->fs.vlan.untagged_rule = NULL;
262 }
263 break;
264 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
265 if (priv->fs.vlan.any_cvlan_rule) {
266 mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
267 priv->fs.vlan.any_cvlan_rule = NULL;
268 }
269 break;
270 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
271 if (priv->fs.vlan.any_svlan_rule) {
272 mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
273 priv->fs.vlan.any_svlan_rule = NULL;
274 }
275 break;
276 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
277 if (priv->fs.vlan.active_svlans_rule[vid]) {
278 mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]);
279 priv->fs.vlan.active_svlans_rule[vid] = NULL;
280 }
281 break;
282 case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
283 if (priv->fs.vlan.active_cvlans_rule[vid]) {
284 mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
285 priv->fs.vlan.active_cvlans_rule[vid] = NULL;
286 }
287 mlx5e_vport_context_update_vlans(priv);
288 break;
289 }
290 }
291
mlx5e_del_any_vid_rules(struct mlx5e_priv * priv)292 static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
293 {
294 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
295 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
296 }
297
mlx5e_add_any_vid_rules(struct mlx5e_priv * priv)298 static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
299 {
300 int err;
301
302 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
303 if (err)
304 return err;
305
306 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
307 }
308
mlx5e_enable_cvlan_filter(struct mlx5e_priv * priv)309 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
310 {
311 if (!priv->fs.vlan.cvlan_filter_disabled)
312 return;
313
314 priv->fs.vlan.cvlan_filter_disabled = false;
315 if (priv->netdev->flags & IFF_PROMISC)
316 return;
317 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
318 }
319
mlx5e_disable_cvlan_filter(struct mlx5e_priv * priv)320 void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
321 {
322 if (priv->fs.vlan.cvlan_filter_disabled)
323 return;
324
325 priv->fs.vlan.cvlan_filter_disabled = true;
326 if (priv->netdev->flags & IFF_PROMISC)
327 return;
328 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
329 }
330
mlx5e_vlan_rx_add_cvid(struct mlx5e_priv * priv,u16 vid)331 static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
332 {
333 int err;
334
335 set_bit(vid, priv->fs.vlan.active_cvlans);
336
337 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
338 if (err)
339 clear_bit(vid, priv->fs.vlan.active_cvlans);
340
341 return err;
342 }
343
mlx5e_vlan_rx_add_svid(struct mlx5e_priv * priv,u16 vid)344 static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
345 {
346 struct net_device *netdev = priv->netdev;
347 int err;
348
349 set_bit(vid, priv->fs.vlan.active_svlans);
350
351 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
352 if (err) {
353 clear_bit(vid, priv->fs.vlan.active_svlans);
354 return err;
355 }
356
357 /* Need to fix some features.. */
358 netdev_update_features(netdev);
359 return err;
360 }
361
mlx5e_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)362 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
363 {
364 struct mlx5e_priv *priv = netdev_priv(dev);
365
366 if (be16_to_cpu(proto) == ETH_P_8021Q)
367 return mlx5e_vlan_rx_add_cvid(priv, vid);
368 else if (be16_to_cpu(proto) == ETH_P_8021AD)
369 return mlx5e_vlan_rx_add_svid(priv, vid);
370
371 return -EOPNOTSUPP;
372 }
373
mlx5e_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)374 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
375 {
376 struct mlx5e_priv *priv = netdev_priv(dev);
377
378 if (be16_to_cpu(proto) == ETH_P_8021Q) {
379 clear_bit(vid, priv->fs.vlan.active_cvlans);
380 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
381 } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
382 clear_bit(vid, priv->fs.vlan.active_svlans);
383 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
384 netdev_update_features(dev);
385 }
386
387 return 0;
388 }
389
mlx5e_add_vlan_rules(struct mlx5e_priv * priv)390 static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
391 {
392 int i;
393
394 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
395
396 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
397 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
398 }
399
400 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
401 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
402
403 if (priv->fs.vlan.cvlan_filter_disabled)
404 mlx5e_add_any_vid_rules(priv);
405 }
406
mlx5e_del_vlan_rules(struct mlx5e_priv * priv)407 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
408 {
409 int i;
410
411 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
412
413 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
414 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
415 }
416
417 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
418 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
419
420 WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
421
422 /* must be called after DESTROY bit is set and
423 * set_rx_mode is called and flushed
424 */
425 if (priv->fs.vlan.cvlan_filter_disabled)
426 mlx5e_del_any_vid_rules(priv);
427 }
428
429 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
430 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
431 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
432
mlx5e_execute_l2_action(struct mlx5e_priv * priv,struct mlx5e_l2_hash_node * hn)433 static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
434 struct mlx5e_l2_hash_node *hn)
435 {
436 u8 action = hn->action;
437 u8 mac_addr[ETH_ALEN];
438 int l2_err = 0;
439
440 ether_addr_copy(mac_addr, hn->ai.addr);
441
442 switch (action) {
443 case MLX5E_ACTION_ADD:
444 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
445 if (!is_multicast_ether_addr(mac_addr)) {
446 l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
447 hn->mpfs = !l2_err;
448 }
449 hn->action = MLX5E_ACTION_NONE;
450 break;
451
452 case MLX5E_ACTION_DEL:
453 if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
454 l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
455 mlx5e_del_l2_flow_rule(priv, &hn->ai);
456 mlx5e_del_l2_from_hash(hn);
457 break;
458 }
459
460 if (l2_err)
461 netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
462 action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
463 }
464
mlx5e_sync_netdev_addr(struct mlx5e_priv * priv)465 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
466 {
467 struct net_device *netdev = priv->netdev;
468 struct netdev_hw_addr *ha;
469
470 netif_addr_lock_bh(netdev);
471
472 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
473 priv->netdev->dev_addr);
474
475 netdev_for_each_uc_addr(ha, netdev)
476 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
477
478 netdev_for_each_mc_addr(ha, netdev)
479 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
480
481 netif_addr_unlock_bh(netdev);
482 }
483
mlx5e_fill_addr_array(struct mlx5e_priv * priv,int list_type,u8 addr_array[][ETH_ALEN],int size)484 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
485 u8 addr_array[][ETH_ALEN], int size)
486 {
487 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
488 struct net_device *ndev = priv->netdev;
489 struct mlx5e_l2_hash_node *hn;
490 struct hlist_head *addr_list;
491 struct hlist_node *tmp;
492 int i = 0;
493 int hi;
494
495 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
496
497 if (is_uc) /* Make sure our own address is pushed first */
498 ether_addr_copy(addr_array[i++], ndev->dev_addr);
499 else if (priv->fs.l2.broadcast_enabled)
500 ether_addr_copy(addr_array[i++], ndev->broadcast);
501
502 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
503 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
504 continue;
505 if (i >= size)
506 break;
507 ether_addr_copy(addr_array[i++], hn->ai.addr);
508 }
509 }
510
mlx5e_vport_context_update_addr_list(struct mlx5e_priv * priv,int list_type)511 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
512 int list_type)
513 {
514 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
515 struct mlx5e_l2_hash_node *hn;
516 u8 (*addr_array)[ETH_ALEN] = NULL;
517 struct hlist_head *addr_list;
518 struct hlist_node *tmp;
519 int max_size;
520 int size;
521 int err;
522 int hi;
523
524 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
525 max_size = is_uc ?
526 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
527 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
528
529 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
530 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
531 size++;
532
533 if (size > max_size) {
534 netdev_warn(priv->netdev,
535 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
536 is_uc ? "UC" : "MC", size, max_size);
537 size = max_size;
538 }
539
540 if (size) {
541 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
542 if (!addr_array) {
543 err = -ENOMEM;
544 goto out;
545 }
546 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
547 }
548
549 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
550 out:
551 if (err)
552 netdev_err(priv->netdev,
553 "Failed to modify vport %s list err(%d)\n",
554 is_uc ? "UC" : "MC", err);
555 kfree(addr_array);
556 }
557
mlx5e_vport_context_update(struct mlx5e_priv * priv)558 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
559 {
560 struct mlx5e_l2_table *ea = &priv->fs.l2;
561
562 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
563 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
564 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
565 ea->allmulti_enabled,
566 ea->promisc_enabled);
567 }
568
mlx5e_apply_netdev_addr(struct mlx5e_priv * priv)569 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
570 {
571 struct mlx5e_l2_hash_node *hn;
572 struct hlist_node *tmp;
573 int i;
574
575 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
576 mlx5e_execute_l2_action(priv, hn);
577
578 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
579 mlx5e_execute_l2_action(priv, hn);
580 }
581
mlx5e_handle_netdev_addr(struct mlx5e_priv * priv)582 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
583 {
584 struct mlx5e_l2_hash_node *hn;
585 struct hlist_node *tmp;
586 int i;
587
588 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
589 hn->action = MLX5E_ACTION_DEL;
590 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
591 hn->action = MLX5E_ACTION_DEL;
592
593 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
594 mlx5e_sync_netdev_addr(priv);
595
596 mlx5e_apply_netdev_addr(priv);
597 }
598
mlx5e_set_rx_mode_work(struct work_struct * work)599 void mlx5e_set_rx_mode_work(struct work_struct *work)
600 {
601 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
602 set_rx_mode_work);
603
604 struct mlx5e_l2_table *ea = &priv->fs.l2;
605 struct net_device *ndev = priv->netdev;
606
607 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
608 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
609 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
610 bool broadcast_enabled = rx_mode_enable;
611
612 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
613 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
614 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
615 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
616 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
617 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
618
619 if (enable_promisc) {
620 if (!priv->channels.params.vlan_strip_disable)
621 netdev_warn_once(ndev,
622 "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
623 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
624 if (!priv->fs.vlan.cvlan_filter_disabled)
625 mlx5e_add_any_vid_rules(priv);
626 }
627 if (enable_allmulti)
628 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
629 if (enable_broadcast)
630 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
631
632 mlx5e_handle_netdev_addr(priv);
633
634 if (disable_broadcast)
635 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
636 if (disable_allmulti)
637 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
638 if (disable_promisc) {
639 if (!priv->fs.vlan.cvlan_filter_disabled)
640 mlx5e_del_any_vid_rules(priv);
641 mlx5e_del_l2_flow_rule(priv, &ea->promisc);
642 }
643
644 ea->promisc_enabled = promisc_enabled;
645 ea->allmulti_enabled = allmulti_enabled;
646 ea->broadcast_enabled = broadcast_enabled;
647
648 mlx5e_vport_context_update(priv);
649 }
650
mlx5e_destroy_groups(struct mlx5e_flow_table * ft)651 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
652 {
653 int i;
654
655 for (i = ft->num_groups - 1; i >= 0; i--) {
656 if (!IS_ERR_OR_NULL(ft->g[i]))
657 mlx5_destroy_flow_group(ft->g[i]);
658 ft->g[i] = NULL;
659 }
660 ft->num_groups = 0;
661 }
662
mlx5e_init_l2_addr(struct mlx5e_priv * priv)663 void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
664 {
665 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
666 }
667
mlx5e_destroy_flow_table(struct mlx5e_flow_table * ft)668 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
669 {
670 mlx5e_destroy_groups(ft);
671 kfree(ft->g);
672 mlx5_destroy_flow_table(ft->t);
673 ft->t = NULL;
674 }
675
mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table * ttc)676 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
677 {
678 int i;
679
680 for (i = 0; i < MLX5E_NUM_TT; i++) {
681 if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) {
682 mlx5_del_flow_rules(ttc->rules[i].rule);
683 ttc->rules[i].rule = NULL;
684 }
685 }
686
687 for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) {
688 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
689 mlx5_del_flow_rules(ttc->tunnel_rules[i]);
690 ttc->tunnel_rules[i] = NULL;
691 }
692 }
693 }
694
695 struct mlx5e_etype_proto {
696 u16 etype;
697 u8 proto;
698 };
699
700 static struct mlx5e_etype_proto ttc_rules[] = {
701 [MLX5E_TT_IPV4_TCP] = {
702 .etype = ETH_P_IP,
703 .proto = IPPROTO_TCP,
704 },
705 [MLX5E_TT_IPV6_TCP] = {
706 .etype = ETH_P_IPV6,
707 .proto = IPPROTO_TCP,
708 },
709 [MLX5E_TT_IPV4_UDP] = {
710 .etype = ETH_P_IP,
711 .proto = IPPROTO_UDP,
712 },
713 [MLX5E_TT_IPV6_UDP] = {
714 .etype = ETH_P_IPV6,
715 .proto = IPPROTO_UDP,
716 },
717 [MLX5E_TT_IPV4_IPSEC_AH] = {
718 .etype = ETH_P_IP,
719 .proto = IPPROTO_AH,
720 },
721 [MLX5E_TT_IPV6_IPSEC_AH] = {
722 .etype = ETH_P_IPV6,
723 .proto = IPPROTO_AH,
724 },
725 [MLX5E_TT_IPV4_IPSEC_ESP] = {
726 .etype = ETH_P_IP,
727 .proto = IPPROTO_ESP,
728 },
729 [MLX5E_TT_IPV6_IPSEC_ESP] = {
730 .etype = ETH_P_IPV6,
731 .proto = IPPROTO_ESP,
732 },
733 [MLX5E_TT_IPV4] = {
734 .etype = ETH_P_IP,
735 .proto = 0,
736 },
737 [MLX5E_TT_IPV6] = {
738 .etype = ETH_P_IPV6,
739 .proto = 0,
740 },
741 [MLX5E_TT_ANY] = {
742 .etype = 0,
743 .proto = 0,
744 },
745 };
746
747 static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
748 [MLX5E_TT_IPV4_GRE] = {
749 .etype = ETH_P_IP,
750 .proto = IPPROTO_GRE,
751 },
752 [MLX5E_TT_IPV6_GRE] = {
753 .etype = ETH_P_IPV6,
754 .proto = IPPROTO_GRE,
755 },
756 [MLX5E_TT_IPV4_IPIP] = {
757 .etype = ETH_P_IP,
758 .proto = IPPROTO_IPIP,
759 },
760 [MLX5E_TT_IPV6_IPIP] = {
761 .etype = ETH_P_IPV6,
762 .proto = IPPROTO_IPIP,
763 },
764 [MLX5E_TT_IPV4_IPV6] = {
765 .etype = ETH_P_IP,
766 .proto = IPPROTO_IPV6,
767 },
768 [MLX5E_TT_IPV6_IPV6] = {
769 .etype = ETH_P_IPV6,
770 .proto = IPPROTO_IPV6,
771 },
772
773 };
774
mlx5e_tunnel_proto_supported(struct mlx5_core_dev * mdev,u8 proto_type)775 bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type)
776 {
777 switch (proto_type) {
778 case IPPROTO_GRE:
779 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
780 case IPPROTO_IPIP:
781 case IPPROTO_IPV6:
782 return MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip);
783 default:
784 return false;
785 }
786 }
787
mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev * mdev)788 bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev)
789 {
790 int tt;
791
792 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
793 if (mlx5e_tunnel_proto_supported(mdev, ttc_tunnel_rules[tt].proto))
794 return true;
795 }
796 return false;
797 }
798
mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev * mdev)799 bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
800 {
801 return (mlx5e_any_tunnel_proto_supported(mdev) &&
802 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
803 }
804
mlx5e_etype_to_ipv(u16 ethertype)805 static u8 mlx5e_etype_to_ipv(u16 ethertype)
806 {
807 if (ethertype == ETH_P_IP)
808 return 4;
809
810 if (ethertype == ETH_P_IPV6)
811 return 6;
812
813 return 0;
814 }
815
816 static struct mlx5_flow_handle *
mlx5e_generate_ttc_rule(struct mlx5e_priv * priv,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto)817 mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
818 struct mlx5_flow_table *ft,
819 struct mlx5_flow_destination *dest,
820 u16 etype,
821 u8 proto)
822 {
823 int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
824 MLX5_DECLARE_FLOW_ACT(flow_act);
825 struct mlx5_flow_handle *rule;
826 struct mlx5_flow_spec *spec;
827 int err = 0;
828 u8 ipv;
829
830 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
831 if (!spec)
832 return ERR_PTR(-ENOMEM);
833
834 if (proto) {
835 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
836 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
837 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
838 }
839
840 ipv = mlx5e_etype_to_ipv(etype);
841 if (match_ipv_outer && ipv) {
842 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
843 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
844 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
845 } else if (etype) {
846 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
847 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
848 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
849 }
850
851 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
852 if (IS_ERR(rule)) {
853 err = PTR_ERR(rule);
854 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
855 }
856
857 kvfree(spec);
858 return err ? ERR_PTR(err) : rule;
859 }
860
mlx5e_generate_ttc_table_rules(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)861 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
862 struct ttc_params *params,
863 struct mlx5e_ttc_table *ttc)
864 {
865 struct mlx5_flow_destination dest = {};
866 struct mlx5_flow_handle **trules;
867 struct mlx5e_ttc_rule *rules;
868 struct mlx5_flow_table *ft;
869 int tt;
870 int err;
871
872 ft = ttc->ft.t;
873 rules = ttc->rules;
874
875 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
876 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
877 struct mlx5e_ttc_rule *rule = &rules[tt];
878
879 if (tt == MLX5E_TT_ANY)
880 dest.tir_num = params->any_tt_tirn;
881 else
882 dest.tir_num = params->indir_tirn[tt];
883
884 rule->rule = mlx5e_generate_ttc_rule(priv, ft, &dest,
885 ttc_rules[tt].etype,
886 ttc_rules[tt].proto);
887 if (IS_ERR(rule->rule)) {
888 err = PTR_ERR(rule->rule);
889 rule->rule = NULL;
890 goto del_rules;
891 }
892 rule->default_dest = dest;
893 }
894
895 if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
896 return 0;
897
898 trules = ttc->tunnel_rules;
899 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
900 dest.ft = params->inner_ttc->ft.t;
901 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
902 if (!mlx5e_tunnel_proto_supported(priv->mdev,
903 ttc_tunnel_rules[tt].proto))
904 continue;
905 trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
906 ttc_tunnel_rules[tt].etype,
907 ttc_tunnel_rules[tt].proto);
908 if (IS_ERR(trules[tt])) {
909 err = PTR_ERR(trules[tt]);
910 trules[tt] = NULL;
911 goto del_rules;
912 }
913 }
914
915 return 0;
916
917 del_rules:
918 mlx5e_cleanup_ttc_rules(ttc);
919 return err;
920 }
921
mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table * ttc,bool use_ipv)922 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
923 bool use_ipv)
924 {
925 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
926 struct mlx5e_flow_table *ft = &ttc->ft;
927 int ix = 0;
928 u32 *in;
929 int err;
930 u8 *mc;
931
932 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
933 sizeof(*ft->g), GFP_KERNEL);
934 if (!ft->g)
935 return -ENOMEM;
936 in = kvzalloc(inlen, GFP_KERNEL);
937 if (!in) {
938 kfree(ft->g);
939 return -ENOMEM;
940 }
941
942 /* L4 Group */
943 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
944 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
945 if (use_ipv)
946 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
947 else
948 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
949 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
950 MLX5_SET_CFG(in, start_flow_index, ix);
951 ix += MLX5E_TTC_GROUP1_SIZE;
952 MLX5_SET_CFG(in, end_flow_index, ix - 1);
953 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
954 if (IS_ERR(ft->g[ft->num_groups]))
955 goto err;
956 ft->num_groups++;
957
958 /* L3 Group */
959 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
960 MLX5_SET_CFG(in, start_flow_index, ix);
961 ix += MLX5E_TTC_GROUP2_SIZE;
962 MLX5_SET_CFG(in, end_flow_index, ix - 1);
963 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
964 if (IS_ERR(ft->g[ft->num_groups]))
965 goto err;
966 ft->num_groups++;
967
968 /* Any Group */
969 memset(in, 0, inlen);
970 MLX5_SET_CFG(in, start_flow_index, ix);
971 ix += MLX5E_TTC_GROUP3_SIZE;
972 MLX5_SET_CFG(in, end_flow_index, ix - 1);
973 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
974 if (IS_ERR(ft->g[ft->num_groups]))
975 goto err;
976 ft->num_groups++;
977
978 kvfree(in);
979 return 0;
980
981 err:
982 err = PTR_ERR(ft->g[ft->num_groups]);
983 ft->g[ft->num_groups] = NULL;
984 kvfree(in);
985
986 return err;
987 }
988
989 static struct mlx5_flow_handle *
mlx5e_generate_inner_ttc_rule(struct mlx5e_priv * priv,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto)990 mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
991 struct mlx5_flow_table *ft,
992 struct mlx5_flow_destination *dest,
993 u16 etype, u8 proto)
994 {
995 MLX5_DECLARE_FLOW_ACT(flow_act);
996 struct mlx5_flow_handle *rule;
997 struct mlx5_flow_spec *spec;
998 int err = 0;
999 u8 ipv;
1000
1001 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1002 if (!spec)
1003 return ERR_PTR(-ENOMEM);
1004
1005 ipv = mlx5e_etype_to_ipv(etype);
1006 if (etype && ipv) {
1007 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
1008 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
1009 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
1010 }
1011
1012 if (proto) {
1013 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
1014 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
1015 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
1016 }
1017
1018 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
1019 if (IS_ERR(rule)) {
1020 err = PTR_ERR(rule);
1021 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
1022 }
1023
1024 kvfree(spec);
1025 return err ? ERR_PTR(err) : rule;
1026 }
1027
mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)1028 static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
1029 struct ttc_params *params,
1030 struct mlx5e_ttc_table *ttc)
1031 {
1032 struct mlx5_flow_destination dest = {};
1033 struct mlx5e_ttc_rule *rules;
1034 struct mlx5_flow_table *ft;
1035 int err;
1036 int tt;
1037
1038 ft = ttc->ft.t;
1039 rules = ttc->rules;
1040 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1041
1042 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
1043 struct mlx5e_ttc_rule *rule = &rules[tt];
1044
1045 if (tt == MLX5E_TT_ANY)
1046 dest.tir_num = params->any_tt_tirn;
1047 else
1048 dest.tir_num = params->indir_tirn[tt];
1049
1050 rule->rule = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
1051 ttc_rules[tt].etype,
1052 ttc_rules[tt].proto);
1053 if (IS_ERR(rule->rule)) {
1054 err = PTR_ERR(rule->rule);
1055 rule->rule = NULL;
1056 goto del_rules;
1057 }
1058 rule->default_dest = dest;
1059 }
1060
1061 return 0;
1062
1063 del_rules:
1064
1065 mlx5e_cleanup_ttc_rules(ttc);
1066 return err;
1067 }
1068
mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table * ttc)1069 static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
1070 {
1071 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1072 struct mlx5e_flow_table *ft = &ttc->ft;
1073 int ix = 0;
1074 u32 *in;
1075 int err;
1076 u8 *mc;
1077
1078 ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1079 if (!ft->g)
1080 return -ENOMEM;
1081 in = kvzalloc(inlen, GFP_KERNEL);
1082 if (!in) {
1083 kfree(ft->g);
1084 return -ENOMEM;
1085 }
1086
1087 /* L4 Group */
1088 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1089 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1090 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
1091 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1092 MLX5_SET_CFG(in, start_flow_index, ix);
1093 ix += MLX5E_INNER_TTC_GROUP1_SIZE;
1094 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1095 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1096 if (IS_ERR(ft->g[ft->num_groups]))
1097 goto err;
1098 ft->num_groups++;
1099
1100 /* L3 Group */
1101 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
1102 MLX5_SET_CFG(in, start_flow_index, ix);
1103 ix += MLX5E_INNER_TTC_GROUP2_SIZE;
1104 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1105 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1106 if (IS_ERR(ft->g[ft->num_groups]))
1107 goto err;
1108 ft->num_groups++;
1109
1110 /* Any Group */
1111 memset(in, 0, inlen);
1112 MLX5_SET_CFG(in, start_flow_index, ix);
1113 ix += MLX5E_INNER_TTC_GROUP3_SIZE;
1114 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1115 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1116 if (IS_ERR(ft->g[ft->num_groups]))
1117 goto err;
1118 ft->num_groups++;
1119
1120 kvfree(in);
1121 return 0;
1122
1123 err:
1124 err = PTR_ERR(ft->g[ft->num_groups]);
1125 ft->g[ft->num_groups] = NULL;
1126 kvfree(in);
1127
1128 return err;
1129 }
1130
mlx5e_set_ttc_basic_params(struct mlx5e_priv * priv,struct ttc_params * ttc_params)1131 void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
1132 struct ttc_params *ttc_params)
1133 {
1134 ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
1135 ttc_params->inner_ttc = &priv->fs.inner_ttc;
1136 }
1137
mlx5e_set_inner_ttc_ft_params(struct ttc_params * ttc_params)1138 void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
1139 {
1140 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1141
1142 ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
1143 ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
1144 ft_attr->prio = MLX5E_NIC_PRIO;
1145 }
1146
mlx5e_set_ttc_ft_params(struct ttc_params * ttc_params)1147 void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
1148
1149 {
1150 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1151
1152 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
1153 ft_attr->level = MLX5E_TTC_FT_LEVEL;
1154 ft_attr->prio = MLX5E_NIC_PRIO;
1155 }
1156
mlx5e_create_inner_ttc_table(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)1157 int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1158 struct mlx5e_ttc_table *ttc)
1159 {
1160 struct mlx5e_flow_table *ft = &ttc->ft;
1161 int err;
1162
1163 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1164 return 0;
1165
1166 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr);
1167 if (IS_ERR(ft->t)) {
1168 err = PTR_ERR(ft->t);
1169 ft->t = NULL;
1170 return err;
1171 }
1172
1173 err = mlx5e_create_inner_ttc_table_groups(ttc);
1174 if (err)
1175 goto err;
1176
1177 err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
1178 if (err)
1179 goto err;
1180
1181 return 0;
1182
1183 err:
1184 mlx5e_destroy_flow_table(ft);
1185 return err;
1186 }
1187
mlx5e_destroy_inner_ttc_table(struct mlx5e_priv * priv,struct mlx5e_ttc_table * ttc)1188 void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
1189 struct mlx5e_ttc_table *ttc)
1190 {
1191 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1192 return;
1193
1194 mlx5e_cleanup_ttc_rules(ttc);
1195 mlx5e_destroy_flow_table(&ttc->ft);
1196 }
1197
mlx5e_destroy_ttc_table(struct mlx5e_priv * priv,struct mlx5e_ttc_table * ttc)1198 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
1199 struct mlx5e_ttc_table *ttc)
1200 {
1201 mlx5e_cleanup_ttc_rules(ttc);
1202 mlx5e_destroy_flow_table(&ttc->ft);
1203 }
1204
mlx5e_create_ttc_table(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)1205 int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1206 struct mlx5e_ttc_table *ttc)
1207 {
1208 bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
1209 struct mlx5e_flow_table *ft = &ttc->ft;
1210 int err;
1211
1212 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr);
1213 if (IS_ERR(ft->t)) {
1214 err = PTR_ERR(ft->t);
1215 ft->t = NULL;
1216 return err;
1217 }
1218
1219 err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer);
1220 if (err)
1221 goto err;
1222
1223 err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
1224 if (err)
1225 goto err;
1226
1227 return 0;
1228 err:
1229 mlx5e_destroy_flow_table(ft);
1230 return err;
1231 }
1232
mlx5e_ttc_fwd_dest(struct mlx5e_priv * priv,enum mlx5e_traffic_types type,struct mlx5_flow_destination * new_dest)1233 int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type,
1234 struct mlx5_flow_destination *new_dest)
1235 {
1236 return mlx5_modify_rule_destination(priv->fs.ttc.rules[type].rule, new_dest, NULL);
1237 }
1238
1239 struct mlx5_flow_destination
mlx5e_ttc_get_default_dest(struct mlx5e_priv * priv,enum mlx5e_traffic_types type)1240 mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type)
1241 {
1242 struct mlx5_flow_destination *dest = &priv->fs.ttc.rules[type].default_dest;
1243
1244 WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR,
1245 "TTC[%d] default dest is not setup yet", type);
1246
1247 return *dest;
1248 }
1249
mlx5e_ttc_fwd_default_dest(struct mlx5e_priv * priv,enum mlx5e_traffic_types type)1250 int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type)
1251 {
1252 struct mlx5_flow_destination dest = mlx5e_ttc_get_default_dest(priv, type);
1253
1254 return mlx5e_ttc_fwd_dest(priv, type, &dest);
1255 }
1256
mlx5e_del_l2_flow_rule(struct mlx5e_priv * priv,struct mlx5e_l2_rule * ai)1257 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
1258 struct mlx5e_l2_rule *ai)
1259 {
1260 if (!IS_ERR_OR_NULL(ai->rule)) {
1261 mlx5_del_flow_rules(ai->rule);
1262 ai->rule = NULL;
1263 }
1264 }
1265
mlx5e_add_l2_flow_rule(struct mlx5e_priv * priv,struct mlx5e_l2_rule * ai,int type)1266 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
1267 struct mlx5e_l2_rule *ai, int type)
1268 {
1269 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
1270 struct mlx5_flow_destination dest = {};
1271 MLX5_DECLARE_FLOW_ACT(flow_act);
1272 struct mlx5_flow_spec *spec;
1273 int err = 0;
1274 u8 *mc_dmac;
1275 u8 *mv_dmac;
1276
1277 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1278 if (!spec)
1279 return -ENOMEM;
1280
1281 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1282 outer_headers.dmac_47_16);
1283 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1284 outer_headers.dmac_47_16);
1285
1286 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1287 dest.ft = priv->fs.ttc.ft.t;
1288
1289 switch (type) {
1290 case MLX5E_FULLMATCH:
1291 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1292 eth_broadcast_addr(mc_dmac);
1293 ether_addr_copy(mv_dmac, ai->addr);
1294 break;
1295
1296 case MLX5E_ALLMULTI:
1297 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1298 mc_dmac[0] = 0x01;
1299 mv_dmac[0] = 0x01;
1300 break;
1301
1302 case MLX5E_PROMISC:
1303 break;
1304 }
1305
1306 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1307 if (IS_ERR(ai->rule)) {
1308 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
1309 __func__, mv_dmac);
1310 err = PTR_ERR(ai->rule);
1311 ai->rule = NULL;
1312 }
1313
1314 kvfree(spec);
1315
1316 return err;
1317 }
1318
1319 #define MLX5E_NUM_L2_GROUPS 3
1320 #define MLX5E_L2_GROUP1_SIZE BIT(0)
1321 #define MLX5E_L2_GROUP2_SIZE BIT(15)
1322 #define MLX5E_L2_GROUP3_SIZE BIT(0)
1323 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
1324 MLX5E_L2_GROUP2_SIZE +\
1325 MLX5E_L2_GROUP3_SIZE)
mlx5e_create_l2_table_groups(struct mlx5e_l2_table * l2_table)1326 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
1327 {
1328 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1329 struct mlx5e_flow_table *ft = &l2_table->ft;
1330 int ix = 0;
1331 u8 *mc_dmac;
1332 u32 *in;
1333 int err;
1334 u8 *mc;
1335
1336 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1337 if (!ft->g)
1338 return -ENOMEM;
1339 in = kvzalloc(inlen, GFP_KERNEL);
1340 if (!in) {
1341 kfree(ft->g);
1342 return -ENOMEM;
1343 }
1344
1345 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1346 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1347 outer_headers.dmac_47_16);
1348 /* Flow Group for promiscuous */
1349 MLX5_SET_CFG(in, start_flow_index, ix);
1350 ix += MLX5E_L2_GROUP1_SIZE;
1351 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1352 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1353 if (IS_ERR(ft->g[ft->num_groups]))
1354 goto err_destroy_groups;
1355 ft->num_groups++;
1356
1357 /* Flow Group for full match */
1358 eth_broadcast_addr(mc_dmac);
1359 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1360 MLX5_SET_CFG(in, start_flow_index, ix);
1361 ix += MLX5E_L2_GROUP2_SIZE;
1362 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1363 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1364 if (IS_ERR(ft->g[ft->num_groups]))
1365 goto err_destroy_groups;
1366 ft->num_groups++;
1367
1368 /* Flow Group for allmulti */
1369 eth_zero_addr(mc_dmac);
1370 mc_dmac[0] = 0x01;
1371 MLX5_SET_CFG(in, start_flow_index, ix);
1372 ix += MLX5E_L2_GROUP3_SIZE;
1373 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1374 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1375 if (IS_ERR(ft->g[ft->num_groups]))
1376 goto err_destroy_groups;
1377 ft->num_groups++;
1378
1379 kvfree(in);
1380 return 0;
1381
1382 err_destroy_groups:
1383 err = PTR_ERR(ft->g[ft->num_groups]);
1384 ft->g[ft->num_groups] = NULL;
1385 mlx5e_destroy_groups(ft);
1386 kvfree(in);
1387
1388 return err;
1389 }
1390
mlx5e_destroy_l2_table(struct mlx5e_priv * priv)1391 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
1392 {
1393 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
1394 }
1395
mlx5e_create_l2_table(struct mlx5e_priv * priv)1396 static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
1397 {
1398 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
1399 struct mlx5e_flow_table *ft = &l2_table->ft;
1400 struct mlx5_flow_table_attr ft_attr = {};
1401 int err;
1402
1403 ft->num_groups = 0;
1404
1405 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1406 ft_attr.level = MLX5E_L2_FT_LEVEL;
1407 ft_attr.prio = MLX5E_NIC_PRIO;
1408
1409 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1410 if (IS_ERR(ft->t)) {
1411 err = PTR_ERR(ft->t);
1412 ft->t = NULL;
1413 return err;
1414 }
1415
1416 err = mlx5e_create_l2_table_groups(l2_table);
1417 if (err)
1418 goto err_destroy_flow_table;
1419
1420 return 0;
1421
1422 err_destroy_flow_table:
1423 mlx5_destroy_flow_table(ft->t);
1424 ft->t = NULL;
1425
1426 return err;
1427 }
1428
1429 #define MLX5E_NUM_VLAN_GROUPS 4
1430 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1431 #define MLX5E_VLAN_GROUP1_SIZE BIT(12)
1432 #define MLX5E_VLAN_GROUP2_SIZE BIT(1)
1433 #define MLX5E_VLAN_GROUP3_SIZE BIT(0)
1434 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1435 MLX5E_VLAN_GROUP1_SIZE +\
1436 MLX5E_VLAN_GROUP2_SIZE +\
1437 MLX5E_VLAN_GROUP3_SIZE)
1438
__mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft,u32 * in,int inlen)1439 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1440 int inlen)
1441 {
1442 int err;
1443 int ix = 0;
1444 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1445
1446 memset(in, 0, inlen);
1447 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1448 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1449 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1450 MLX5_SET_CFG(in, start_flow_index, ix);
1451 ix += MLX5E_VLAN_GROUP0_SIZE;
1452 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1453 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1454 if (IS_ERR(ft->g[ft->num_groups]))
1455 goto err_destroy_groups;
1456 ft->num_groups++;
1457
1458 memset(in, 0, inlen);
1459 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1460 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1461 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1462 MLX5_SET_CFG(in, start_flow_index, ix);
1463 ix += MLX5E_VLAN_GROUP1_SIZE;
1464 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1465 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1466 if (IS_ERR(ft->g[ft->num_groups]))
1467 goto err_destroy_groups;
1468 ft->num_groups++;
1469
1470 memset(in, 0, inlen);
1471 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1472 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1473 MLX5_SET_CFG(in, start_flow_index, ix);
1474 ix += MLX5E_VLAN_GROUP2_SIZE;
1475 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1476 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1477 if (IS_ERR(ft->g[ft->num_groups]))
1478 goto err_destroy_groups;
1479 ft->num_groups++;
1480
1481 memset(in, 0, inlen);
1482 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1483 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1484 MLX5_SET_CFG(in, start_flow_index, ix);
1485 ix += MLX5E_VLAN_GROUP3_SIZE;
1486 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1487 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1488 if (IS_ERR(ft->g[ft->num_groups]))
1489 goto err_destroy_groups;
1490 ft->num_groups++;
1491
1492 return 0;
1493
1494 err_destroy_groups:
1495 err = PTR_ERR(ft->g[ft->num_groups]);
1496 ft->g[ft->num_groups] = NULL;
1497 mlx5e_destroy_groups(ft);
1498
1499 return err;
1500 }
1501
mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft)1502 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1503 {
1504 u32 *in;
1505 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1506 int err;
1507
1508 in = kvzalloc(inlen, GFP_KERNEL);
1509 if (!in)
1510 return -ENOMEM;
1511
1512 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1513
1514 kvfree(in);
1515 return err;
1516 }
1517
mlx5e_create_vlan_table(struct mlx5e_priv * priv)1518 static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1519 {
1520 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1521 struct mlx5_flow_table_attr ft_attr = {};
1522 int err;
1523
1524 ft->num_groups = 0;
1525
1526 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1527 ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1528 ft_attr.prio = MLX5E_NIC_PRIO;
1529
1530 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1531
1532 if (IS_ERR(ft->t)) {
1533 err = PTR_ERR(ft->t);
1534 ft->t = NULL;
1535 return err;
1536 }
1537 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1538 if (!ft->g) {
1539 err = -ENOMEM;
1540 goto err_destroy_vlan_table;
1541 }
1542
1543 err = mlx5e_create_vlan_table_groups(ft);
1544 if (err)
1545 goto err_free_g;
1546
1547 mlx5e_add_vlan_rules(priv);
1548
1549 return 0;
1550
1551 err_free_g:
1552 kfree(ft->g);
1553 err_destroy_vlan_table:
1554 mlx5_destroy_flow_table(ft->t);
1555 ft->t = NULL;
1556
1557 return err;
1558 }
1559
mlx5e_destroy_vlan_table(struct mlx5e_priv * priv)1560 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1561 {
1562 mlx5e_del_vlan_rules(priv);
1563 mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1564 }
1565
mlx5e_create_flow_steering(struct mlx5e_priv * priv)1566 int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1567 {
1568 struct ttc_params ttc_params = {};
1569 int tt, err;
1570
1571 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1572 MLX5_FLOW_NAMESPACE_KERNEL);
1573
1574 if (!priv->fs.ns)
1575 return -EOPNOTSUPP;
1576
1577 err = mlx5e_arfs_create_tables(priv);
1578 if (err) {
1579 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1580 err);
1581 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1582 }
1583
1584 mlx5e_set_ttc_basic_params(priv, &ttc_params);
1585 mlx5e_set_inner_ttc_ft_params(&ttc_params);
1586 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1587 ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
1588
1589 err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
1590 if (err) {
1591 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
1592 err);
1593 goto err_destroy_arfs_tables;
1594 }
1595
1596 mlx5e_set_ttc_ft_params(&ttc_params);
1597 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1598 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1599
1600 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1601 if (err) {
1602 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1603 err);
1604 goto err_destroy_inner_ttc_table;
1605 }
1606
1607 err = mlx5e_create_l2_table(priv);
1608 if (err) {
1609 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1610 err);
1611 goto err_destroy_ttc_table;
1612 }
1613
1614 err = mlx5e_create_vlan_table(priv);
1615 if (err) {
1616 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1617 err);
1618 goto err_destroy_l2_table;
1619 }
1620
1621 mlx5e_ethtool_init_steering(priv);
1622
1623 return 0;
1624
1625 err_destroy_l2_table:
1626 mlx5e_destroy_l2_table(priv);
1627 err_destroy_ttc_table:
1628 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1629 err_destroy_inner_ttc_table:
1630 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1631 err_destroy_arfs_tables:
1632 mlx5e_arfs_destroy_tables(priv);
1633
1634 return err;
1635 }
1636
mlx5e_destroy_flow_steering(struct mlx5e_priv * priv)1637 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1638 {
1639 mlx5e_destroy_vlan_table(priv);
1640 mlx5e_destroy_l2_table(priv);
1641 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1642 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1643 mlx5e_arfs_destroy_tables(priv);
1644 mlx5e_ethtool_cleanup_steering(priv);
1645 }
1646