1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/list.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
38 #include "en.h"
39 #include "lib/mpfs.h"
40
41 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
42 struct mlx5e_l2_rule *ai, int type);
43 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
44 struct mlx5e_l2_rule *ai);
45
46 enum {
47 MLX5E_FULLMATCH = 0,
48 MLX5E_ALLMULTI = 1,
49 MLX5E_PROMISC = 2,
50 };
51
52 enum {
53 MLX5E_UC = 0,
54 MLX5E_MC_IPV4 = 1,
55 MLX5E_MC_IPV6 = 2,
56 MLX5E_MC_OTHER = 3,
57 };
58
59 enum {
60 MLX5E_ACTION_NONE = 0,
61 MLX5E_ACTION_ADD = 1,
62 MLX5E_ACTION_DEL = 2,
63 };
64
65 struct mlx5e_l2_hash_node {
66 struct hlist_node hlist;
67 u8 action;
68 struct mlx5e_l2_rule ai;
69 bool mpfs;
70 };
71
mlx5e_hash_l2(u8 * addr)72 static inline int mlx5e_hash_l2(u8 *addr)
73 {
74 return addr[5];
75 }
76
mlx5e_add_l2_to_hash(struct hlist_head * hash,u8 * addr)77 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
78 {
79 struct mlx5e_l2_hash_node *hn;
80 int ix = mlx5e_hash_l2(addr);
81 int found = 0;
82
83 hlist_for_each_entry(hn, &hash[ix], hlist)
84 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
85 found = 1;
86 break;
87 }
88
89 if (found) {
90 hn->action = MLX5E_ACTION_NONE;
91 return;
92 }
93
94 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
95 if (!hn)
96 return;
97
98 ether_addr_copy(hn->ai.addr, addr);
99 hn->action = MLX5E_ACTION_ADD;
100
101 hlist_add_head(&hn->hlist, &hash[ix]);
102 }
103
mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node * hn)104 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
105 {
106 hlist_del(&hn->hlist);
107 kfree(hn);
108 }
109
mlx5e_vport_context_update_vlans(struct mlx5e_priv * priv)110 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
111 {
112 struct net_device *ndev = priv->netdev;
113 int max_list_size;
114 int list_size;
115 u16 *vlans;
116 int vlan;
117 int err;
118 int i;
119
120 list_size = 0;
121 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID)
122 list_size++;
123
124 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
125
126 if (list_size > max_list_size) {
127 netdev_warn(ndev,
128 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
129 list_size, max_list_size);
130 list_size = max_list_size;
131 }
132
133 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
134 if (!vlans)
135 return -ENOMEM;
136
137 i = 0;
138 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
139 if (i >= list_size)
140 break;
141 vlans[i++] = vlan;
142 }
143
144 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
145 if (err)
146 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
147 err);
148
149 kfree(vlans);
150 return err;
151 }
152
153 enum mlx5e_vlan_rule_type {
154 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
155 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
156 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
157 MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
158 MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
159 };
160
__mlx5e_add_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid,struct mlx5_flow_spec * spec)161 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
162 enum mlx5e_vlan_rule_type rule_type,
163 u16 vid, struct mlx5_flow_spec *spec)
164 {
165 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
166 struct mlx5_flow_destination dest = {};
167 struct mlx5_flow_handle **rule_p;
168 MLX5_DECLARE_FLOW_ACT(flow_act);
169 int err = 0;
170
171 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
172 dest.ft = priv->fs.l2.ft.t;
173
174 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
175
176 switch (rule_type) {
177 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
178 /* cvlan_tag enabled in match criteria and
179 * disabled in match value means both S & C tags
180 * don't exist (untagged of both)
181 */
182 rule_p = &priv->fs.vlan.untagged_rule;
183 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
184 outer_headers.cvlan_tag);
185 break;
186 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
187 rule_p = &priv->fs.vlan.any_cvlan_rule;
188 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
189 outer_headers.cvlan_tag);
190 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
191 break;
192 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
193 rule_p = &priv->fs.vlan.any_svlan_rule;
194 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
195 outer_headers.svlan_tag);
196 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
197 break;
198 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
199 rule_p = &priv->fs.vlan.active_svlans_rule[vid];
200 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
201 outer_headers.svlan_tag);
202 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
203 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
204 outer_headers.first_vid);
205 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
206 vid);
207 break;
208 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
209 rule_p = &priv->fs.vlan.active_cvlans_rule[vid];
210 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
211 outer_headers.cvlan_tag);
212 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
213 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
214 outer_headers.first_vid);
215 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
216 vid);
217 break;
218 }
219
220 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
221
222 if (IS_ERR(*rule_p)) {
223 err = PTR_ERR(*rule_p);
224 *rule_p = NULL;
225 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
226 }
227
228 return err;
229 }
230
mlx5e_add_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)231 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
232 enum mlx5e_vlan_rule_type rule_type, u16 vid)
233 {
234 struct mlx5_flow_spec *spec;
235 int err = 0;
236
237 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
238 if (!spec)
239 return -ENOMEM;
240
241 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
242 mlx5e_vport_context_update_vlans(priv);
243
244 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
245
246 kvfree(spec);
247
248 return err;
249 }
250
mlx5e_del_vlan_rule(struct mlx5e_priv * priv,enum mlx5e_vlan_rule_type rule_type,u16 vid)251 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
252 enum mlx5e_vlan_rule_type rule_type, u16 vid)
253 {
254 switch (rule_type) {
255 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
256 if (priv->fs.vlan.untagged_rule) {
257 mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
258 priv->fs.vlan.untagged_rule = NULL;
259 }
260 break;
261 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
262 if (priv->fs.vlan.any_cvlan_rule) {
263 mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
264 priv->fs.vlan.any_cvlan_rule = NULL;
265 }
266 break;
267 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
268 if (priv->fs.vlan.any_svlan_rule) {
269 mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
270 priv->fs.vlan.any_svlan_rule = NULL;
271 }
272 break;
273 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
274 if (priv->fs.vlan.active_svlans_rule[vid]) {
275 mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]);
276 priv->fs.vlan.active_svlans_rule[vid] = NULL;
277 }
278 break;
279 case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
280 if (priv->fs.vlan.active_cvlans_rule[vid]) {
281 mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
282 priv->fs.vlan.active_cvlans_rule[vid] = NULL;
283 }
284 mlx5e_vport_context_update_vlans(priv);
285 break;
286 }
287 }
288
mlx5e_del_any_vid_rules(struct mlx5e_priv * priv)289 static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
290 {
291 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
292 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
293 }
294
mlx5e_add_any_vid_rules(struct mlx5e_priv * priv)295 static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
296 {
297 int err;
298
299 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
300 if (err)
301 return err;
302
303 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
304 }
305
mlx5e_enable_cvlan_filter(struct mlx5e_priv * priv)306 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
307 {
308 if (!priv->fs.vlan.cvlan_filter_disabled)
309 return;
310
311 priv->fs.vlan.cvlan_filter_disabled = false;
312 if (priv->netdev->flags & IFF_PROMISC)
313 return;
314 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
315 }
316
mlx5e_disable_cvlan_filter(struct mlx5e_priv * priv)317 void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
318 {
319 if (priv->fs.vlan.cvlan_filter_disabled)
320 return;
321
322 priv->fs.vlan.cvlan_filter_disabled = true;
323 if (priv->netdev->flags & IFF_PROMISC)
324 return;
325 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
326 }
327
mlx5e_vlan_rx_add_cvid(struct mlx5e_priv * priv,u16 vid)328 static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
329 {
330 int err;
331
332 set_bit(vid, priv->fs.vlan.active_cvlans);
333
334 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
335 if (err)
336 clear_bit(vid, priv->fs.vlan.active_cvlans);
337
338 return err;
339 }
340
mlx5e_vlan_rx_add_svid(struct mlx5e_priv * priv,u16 vid)341 static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
342 {
343 struct net_device *netdev = priv->netdev;
344 int err;
345
346 set_bit(vid, priv->fs.vlan.active_svlans);
347
348 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
349 if (err) {
350 clear_bit(vid, priv->fs.vlan.active_svlans);
351 return err;
352 }
353
354 /* Need to fix some features.. */
355 netdev_update_features(netdev);
356 return err;
357 }
358
mlx5e_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)359 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
360 {
361 struct mlx5e_priv *priv = netdev_priv(dev);
362
363 if (be16_to_cpu(proto) == ETH_P_8021Q)
364 return mlx5e_vlan_rx_add_cvid(priv, vid);
365 else if (be16_to_cpu(proto) == ETH_P_8021AD)
366 return mlx5e_vlan_rx_add_svid(priv, vid);
367
368 return -EOPNOTSUPP;
369 }
370
mlx5e_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)371 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
372 {
373 struct mlx5e_priv *priv = netdev_priv(dev);
374
375 if (be16_to_cpu(proto) == ETH_P_8021Q) {
376 clear_bit(vid, priv->fs.vlan.active_cvlans);
377 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
378 } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
379 clear_bit(vid, priv->fs.vlan.active_svlans);
380 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
381 netdev_update_features(dev);
382 }
383
384 return 0;
385 }
386
mlx5e_add_vlan_rules(struct mlx5e_priv * priv)387 static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
388 {
389 int i;
390
391 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
392
393 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
394 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
395 }
396
397 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
398 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
399
400 if (priv->fs.vlan.cvlan_filter_disabled &&
401 !(priv->netdev->flags & IFF_PROMISC))
402 mlx5e_add_any_vid_rules(priv);
403 }
404
mlx5e_del_vlan_rules(struct mlx5e_priv * priv)405 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
406 {
407 int i;
408
409 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
410
411 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
412 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
413 }
414
415 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
416 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
417
418 if (priv->fs.vlan.cvlan_filter_disabled &&
419 !(priv->netdev->flags & IFF_PROMISC))
420 mlx5e_del_any_vid_rules(priv);
421 }
422
423 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
424 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
425 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
426
mlx5e_execute_l2_action(struct mlx5e_priv * priv,struct mlx5e_l2_hash_node * hn)427 static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
428 struct mlx5e_l2_hash_node *hn)
429 {
430 u8 action = hn->action;
431 u8 mac_addr[ETH_ALEN];
432 int l2_err = 0;
433
434 ether_addr_copy(mac_addr, hn->ai.addr);
435
436 switch (action) {
437 case MLX5E_ACTION_ADD:
438 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
439 if (!is_multicast_ether_addr(mac_addr)) {
440 l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
441 hn->mpfs = !l2_err;
442 }
443 hn->action = MLX5E_ACTION_NONE;
444 break;
445
446 case MLX5E_ACTION_DEL:
447 if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
448 l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
449 mlx5e_del_l2_flow_rule(priv, &hn->ai);
450 mlx5e_del_l2_from_hash(hn);
451 break;
452 }
453
454 if (l2_err)
455 netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
456 action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
457 }
458
mlx5e_sync_netdev_addr(struct mlx5e_priv * priv)459 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
460 {
461 struct net_device *netdev = priv->netdev;
462 struct netdev_hw_addr *ha;
463
464 netif_addr_lock_bh(netdev);
465
466 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
467 priv->netdev->dev_addr);
468
469 netdev_for_each_uc_addr(ha, netdev)
470 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
471
472 netdev_for_each_mc_addr(ha, netdev)
473 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
474
475 netif_addr_unlock_bh(netdev);
476 }
477
mlx5e_fill_addr_array(struct mlx5e_priv * priv,int list_type,u8 addr_array[][ETH_ALEN],int size)478 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
479 u8 addr_array[][ETH_ALEN], int size)
480 {
481 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
482 struct net_device *ndev = priv->netdev;
483 struct mlx5e_l2_hash_node *hn;
484 struct hlist_head *addr_list;
485 struct hlist_node *tmp;
486 int i = 0;
487 int hi;
488
489 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
490
491 if (is_uc) /* Make sure our own address is pushed first */
492 ether_addr_copy(addr_array[i++], ndev->dev_addr);
493 else if (priv->fs.l2.broadcast_enabled)
494 ether_addr_copy(addr_array[i++], ndev->broadcast);
495
496 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
497 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
498 continue;
499 if (i >= size)
500 break;
501 ether_addr_copy(addr_array[i++], hn->ai.addr);
502 }
503 }
504
mlx5e_vport_context_update_addr_list(struct mlx5e_priv * priv,int list_type)505 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
506 int list_type)
507 {
508 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
509 struct mlx5e_l2_hash_node *hn;
510 u8 (*addr_array)[ETH_ALEN] = NULL;
511 struct hlist_head *addr_list;
512 struct hlist_node *tmp;
513 int max_size;
514 int size;
515 int err;
516 int hi;
517
518 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
519 max_size = is_uc ?
520 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
521 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
522
523 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
524 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
525 size++;
526
527 if (size > max_size) {
528 netdev_warn(priv->netdev,
529 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
530 is_uc ? "UC" : "MC", size, max_size);
531 size = max_size;
532 }
533
534 if (size) {
535 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
536 if (!addr_array) {
537 err = -ENOMEM;
538 goto out;
539 }
540 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
541 }
542
543 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
544 out:
545 if (err)
546 netdev_err(priv->netdev,
547 "Failed to modify vport %s list err(%d)\n",
548 is_uc ? "UC" : "MC", err);
549 kfree(addr_array);
550 }
551
mlx5e_vport_context_update(struct mlx5e_priv * priv)552 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
553 {
554 struct mlx5e_l2_table *ea = &priv->fs.l2;
555
556 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
557 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
558 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
559 ea->allmulti_enabled,
560 ea->promisc_enabled);
561 }
562
mlx5e_apply_netdev_addr(struct mlx5e_priv * priv)563 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
564 {
565 struct mlx5e_l2_hash_node *hn;
566 struct hlist_node *tmp;
567 int i;
568
569 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
570 mlx5e_execute_l2_action(priv, hn);
571
572 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
573 mlx5e_execute_l2_action(priv, hn);
574 }
575
mlx5e_handle_netdev_addr(struct mlx5e_priv * priv)576 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
577 {
578 struct mlx5e_l2_hash_node *hn;
579 struct hlist_node *tmp;
580 int i;
581
582 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
583 hn->action = MLX5E_ACTION_DEL;
584 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
585 hn->action = MLX5E_ACTION_DEL;
586
587 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
588 mlx5e_sync_netdev_addr(priv);
589
590 mlx5e_apply_netdev_addr(priv);
591 }
592
mlx5e_set_rx_mode_work(struct work_struct * work)593 void mlx5e_set_rx_mode_work(struct work_struct *work)
594 {
595 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
596 set_rx_mode_work);
597
598 struct mlx5e_l2_table *ea = &priv->fs.l2;
599 struct net_device *ndev = priv->netdev;
600
601 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
602 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
603 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
604 bool broadcast_enabled = rx_mode_enable;
605
606 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
607 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
608 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
609 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
610 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
611 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
612
613 if (enable_promisc) {
614 if (!priv->channels.params.vlan_strip_disable)
615 netdev_warn_once(ndev,
616 "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
617 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
618 if (!priv->fs.vlan.cvlan_filter_disabled)
619 mlx5e_add_any_vid_rules(priv);
620 }
621 if (enable_allmulti)
622 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
623 if (enable_broadcast)
624 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
625
626 mlx5e_handle_netdev_addr(priv);
627
628 if (disable_broadcast)
629 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
630 if (disable_allmulti)
631 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
632 if (disable_promisc) {
633 if (!priv->fs.vlan.cvlan_filter_disabled)
634 mlx5e_del_any_vid_rules(priv);
635 mlx5e_del_l2_flow_rule(priv, &ea->promisc);
636 }
637
638 ea->promisc_enabled = promisc_enabled;
639 ea->allmulti_enabled = allmulti_enabled;
640 ea->broadcast_enabled = broadcast_enabled;
641
642 mlx5e_vport_context_update(priv);
643 }
644
mlx5e_destroy_groups(struct mlx5e_flow_table * ft)645 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
646 {
647 int i;
648
649 for (i = ft->num_groups - 1; i >= 0; i--) {
650 if (!IS_ERR_OR_NULL(ft->g[i]))
651 mlx5_destroy_flow_group(ft->g[i]);
652 ft->g[i] = NULL;
653 }
654 ft->num_groups = 0;
655 }
656
mlx5e_init_l2_addr(struct mlx5e_priv * priv)657 void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
658 {
659 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
660 }
661
mlx5e_destroy_flow_table(struct mlx5e_flow_table * ft)662 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
663 {
664 mlx5e_destroy_groups(ft);
665 kfree(ft->g);
666 mlx5_destroy_flow_table(ft->t);
667 ft->t = NULL;
668 }
669
mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table * ttc)670 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
671 {
672 int i;
673
674 for (i = 0; i < MLX5E_NUM_TT; i++) {
675 if (!IS_ERR_OR_NULL(ttc->rules[i])) {
676 mlx5_del_flow_rules(ttc->rules[i]);
677 ttc->rules[i] = NULL;
678 }
679 }
680
681 for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) {
682 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
683 mlx5_del_flow_rules(ttc->tunnel_rules[i]);
684 ttc->tunnel_rules[i] = NULL;
685 }
686 }
687 }
688
689 struct mlx5e_etype_proto {
690 u16 etype;
691 u8 proto;
692 };
693
694 static struct mlx5e_etype_proto ttc_rules[] = {
695 [MLX5E_TT_IPV4_TCP] = {
696 .etype = ETH_P_IP,
697 .proto = IPPROTO_TCP,
698 },
699 [MLX5E_TT_IPV6_TCP] = {
700 .etype = ETH_P_IPV6,
701 .proto = IPPROTO_TCP,
702 },
703 [MLX5E_TT_IPV4_UDP] = {
704 .etype = ETH_P_IP,
705 .proto = IPPROTO_UDP,
706 },
707 [MLX5E_TT_IPV6_UDP] = {
708 .etype = ETH_P_IPV6,
709 .proto = IPPROTO_UDP,
710 },
711 [MLX5E_TT_IPV4_IPSEC_AH] = {
712 .etype = ETH_P_IP,
713 .proto = IPPROTO_AH,
714 },
715 [MLX5E_TT_IPV6_IPSEC_AH] = {
716 .etype = ETH_P_IPV6,
717 .proto = IPPROTO_AH,
718 },
719 [MLX5E_TT_IPV4_IPSEC_ESP] = {
720 .etype = ETH_P_IP,
721 .proto = IPPROTO_ESP,
722 },
723 [MLX5E_TT_IPV6_IPSEC_ESP] = {
724 .etype = ETH_P_IPV6,
725 .proto = IPPROTO_ESP,
726 },
727 [MLX5E_TT_IPV4] = {
728 .etype = ETH_P_IP,
729 .proto = 0,
730 },
731 [MLX5E_TT_IPV6] = {
732 .etype = ETH_P_IPV6,
733 .proto = 0,
734 },
735 [MLX5E_TT_ANY] = {
736 .etype = 0,
737 .proto = 0,
738 },
739 };
740
741 static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
742 [MLX5E_TT_IPV4_GRE] = {
743 .etype = ETH_P_IP,
744 .proto = IPPROTO_GRE,
745 },
746 [MLX5E_TT_IPV6_GRE] = {
747 .etype = ETH_P_IPV6,
748 .proto = IPPROTO_GRE,
749 },
750 [MLX5E_TT_IPV4_IPIP] = {
751 .etype = ETH_P_IP,
752 .proto = IPPROTO_IPIP,
753 },
754 [MLX5E_TT_IPV6_IPIP] = {
755 .etype = ETH_P_IPV6,
756 .proto = IPPROTO_IPIP,
757 },
758 [MLX5E_TT_IPV4_IPV6] = {
759 .etype = ETH_P_IP,
760 .proto = IPPROTO_IPV6,
761 },
762 [MLX5E_TT_IPV6_IPV6] = {
763 .etype = ETH_P_IPV6,
764 .proto = IPPROTO_IPV6,
765 },
766
767 };
768
mlx5e_tunnel_proto_supported(struct mlx5_core_dev * mdev,u8 proto_type)769 bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type)
770 {
771 switch (proto_type) {
772 case IPPROTO_GRE:
773 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
774 case IPPROTO_IPIP:
775 case IPPROTO_IPV6:
776 return MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip);
777 default:
778 return false;
779 }
780 }
781
mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev * mdev)782 bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev)
783 {
784 int tt;
785
786 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
787 if (mlx5e_tunnel_proto_supported(mdev, ttc_tunnel_rules[tt].proto))
788 return true;
789 }
790 return false;
791 }
792
mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev * mdev)793 bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
794 {
795 return (mlx5e_any_tunnel_proto_supported(mdev) &&
796 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
797 }
798
mlx5e_etype_to_ipv(u16 ethertype)799 static u8 mlx5e_etype_to_ipv(u16 ethertype)
800 {
801 if (ethertype == ETH_P_IP)
802 return 4;
803
804 if (ethertype == ETH_P_IPV6)
805 return 6;
806
807 return 0;
808 }
809
810 static struct mlx5_flow_handle *
mlx5e_generate_ttc_rule(struct mlx5e_priv * priv,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto)811 mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
812 struct mlx5_flow_table *ft,
813 struct mlx5_flow_destination *dest,
814 u16 etype,
815 u8 proto)
816 {
817 int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
818 MLX5_DECLARE_FLOW_ACT(flow_act);
819 struct mlx5_flow_handle *rule;
820 struct mlx5_flow_spec *spec;
821 int err = 0;
822 u8 ipv;
823
824 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
825 if (!spec)
826 return ERR_PTR(-ENOMEM);
827
828 if (proto) {
829 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
830 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
831 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
832 }
833
834 ipv = mlx5e_etype_to_ipv(etype);
835 if (match_ipv_outer && ipv) {
836 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
837 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
838 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
839 } else if (etype) {
840 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
841 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
842 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
843 }
844
845 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
846 if (IS_ERR(rule)) {
847 err = PTR_ERR(rule);
848 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
849 }
850
851 kvfree(spec);
852 return err ? ERR_PTR(err) : rule;
853 }
854
mlx5e_generate_ttc_table_rules(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)855 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
856 struct ttc_params *params,
857 struct mlx5e_ttc_table *ttc)
858 {
859 struct mlx5_flow_destination dest = {};
860 struct mlx5_flow_handle **rules;
861 struct mlx5_flow_table *ft;
862 int tt;
863 int err;
864
865 ft = ttc->ft.t;
866 rules = ttc->rules;
867
868 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
869 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
870 if (tt == MLX5E_TT_ANY)
871 dest.tir_num = params->any_tt_tirn;
872 else
873 dest.tir_num = params->indir_tirn[tt];
874 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
875 ttc_rules[tt].etype,
876 ttc_rules[tt].proto);
877 if (IS_ERR(rules[tt]))
878 goto del_rules;
879 }
880
881 if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
882 return 0;
883
884 rules = ttc->tunnel_rules;
885 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
886 dest.ft = params->inner_ttc->ft.t;
887 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
888 if (!mlx5e_tunnel_proto_supported(priv->mdev,
889 ttc_tunnel_rules[tt].proto))
890 continue;
891 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
892 ttc_tunnel_rules[tt].etype,
893 ttc_tunnel_rules[tt].proto);
894 if (IS_ERR(rules[tt]))
895 goto del_rules;
896 }
897
898 return 0;
899
900 del_rules:
901 err = PTR_ERR(rules[tt]);
902 rules[tt] = NULL;
903 mlx5e_cleanup_ttc_rules(ttc);
904 return err;
905 }
906
907 #define MLX5E_TTC_NUM_GROUPS 3
908 #define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
909 #define MLX5E_TTC_GROUP2_SIZE BIT(1)
910 #define MLX5E_TTC_GROUP3_SIZE BIT(0)
911 #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
912 MLX5E_TTC_GROUP2_SIZE +\
913 MLX5E_TTC_GROUP3_SIZE)
914
915 #define MLX5E_INNER_TTC_NUM_GROUPS 3
916 #define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
917 #define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
918 #define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
919 #define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
920 MLX5E_INNER_TTC_GROUP2_SIZE +\
921 MLX5E_INNER_TTC_GROUP3_SIZE)
922
mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table * ttc,bool use_ipv)923 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
924 bool use_ipv)
925 {
926 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
927 struct mlx5e_flow_table *ft = &ttc->ft;
928 int ix = 0;
929 u32 *in;
930 int err;
931 u8 *mc;
932
933 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
934 sizeof(*ft->g), GFP_KERNEL);
935 if (!ft->g)
936 return -ENOMEM;
937 in = kvzalloc(inlen, GFP_KERNEL);
938 if (!in) {
939 kfree(ft->g);
940 return -ENOMEM;
941 }
942
943 /* L4 Group */
944 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
945 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
946 if (use_ipv)
947 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
948 else
949 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
950 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
951 MLX5_SET_CFG(in, start_flow_index, ix);
952 ix += MLX5E_TTC_GROUP1_SIZE;
953 MLX5_SET_CFG(in, end_flow_index, ix - 1);
954 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
955 if (IS_ERR(ft->g[ft->num_groups]))
956 goto err;
957 ft->num_groups++;
958
959 /* L3 Group */
960 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
961 MLX5_SET_CFG(in, start_flow_index, ix);
962 ix += MLX5E_TTC_GROUP2_SIZE;
963 MLX5_SET_CFG(in, end_flow_index, ix - 1);
964 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
965 if (IS_ERR(ft->g[ft->num_groups]))
966 goto err;
967 ft->num_groups++;
968
969 /* Any Group */
970 memset(in, 0, inlen);
971 MLX5_SET_CFG(in, start_flow_index, ix);
972 ix += MLX5E_TTC_GROUP3_SIZE;
973 MLX5_SET_CFG(in, end_flow_index, ix - 1);
974 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
975 if (IS_ERR(ft->g[ft->num_groups]))
976 goto err;
977 ft->num_groups++;
978
979 kvfree(in);
980 return 0;
981
982 err:
983 err = PTR_ERR(ft->g[ft->num_groups]);
984 ft->g[ft->num_groups] = NULL;
985 kvfree(in);
986
987 return err;
988 }
989
990 static struct mlx5_flow_handle *
mlx5e_generate_inner_ttc_rule(struct mlx5e_priv * priv,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto)991 mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
992 struct mlx5_flow_table *ft,
993 struct mlx5_flow_destination *dest,
994 u16 etype, u8 proto)
995 {
996 MLX5_DECLARE_FLOW_ACT(flow_act);
997 struct mlx5_flow_handle *rule;
998 struct mlx5_flow_spec *spec;
999 int err = 0;
1000 u8 ipv;
1001
1002 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1003 if (!spec)
1004 return ERR_PTR(-ENOMEM);
1005
1006 ipv = mlx5e_etype_to_ipv(etype);
1007 if (etype && ipv) {
1008 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
1009 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
1010 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
1011 }
1012
1013 if (proto) {
1014 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
1015 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
1016 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
1017 }
1018
1019 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
1020 if (IS_ERR(rule)) {
1021 err = PTR_ERR(rule);
1022 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
1023 }
1024
1025 kvfree(spec);
1026 return err ? ERR_PTR(err) : rule;
1027 }
1028
mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)1029 static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
1030 struct ttc_params *params,
1031 struct mlx5e_ttc_table *ttc)
1032 {
1033 struct mlx5_flow_destination dest = {};
1034 struct mlx5_flow_handle **rules;
1035 struct mlx5_flow_table *ft;
1036 int err;
1037 int tt;
1038
1039 ft = ttc->ft.t;
1040 rules = ttc->rules;
1041
1042 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1043 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
1044 if (tt == MLX5E_TT_ANY)
1045 dest.tir_num = params->any_tt_tirn;
1046 else
1047 dest.tir_num = params->indir_tirn[tt];
1048
1049 rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
1050 ttc_rules[tt].etype,
1051 ttc_rules[tt].proto);
1052 if (IS_ERR(rules[tt]))
1053 goto del_rules;
1054 }
1055
1056 return 0;
1057
1058 del_rules:
1059 err = PTR_ERR(rules[tt]);
1060 rules[tt] = NULL;
1061 mlx5e_cleanup_ttc_rules(ttc);
1062 return err;
1063 }
1064
mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table * ttc)1065 static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
1066 {
1067 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1068 struct mlx5e_flow_table *ft = &ttc->ft;
1069 int ix = 0;
1070 u32 *in;
1071 int err;
1072 u8 *mc;
1073
1074 ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1075 if (!ft->g)
1076 return -ENOMEM;
1077 in = kvzalloc(inlen, GFP_KERNEL);
1078 if (!in) {
1079 kfree(ft->g);
1080 return -ENOMEM;
1081 }
1082
1083 /* L4 Group */
1084 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1085 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1086 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
1087 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1088 MLX5_SET_CFG(in, start_flow_index, ix);
1089 ix += MLX5E_INNER_TTC_GROUP1_SIZE;
1090 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1091 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1092 if (IS_ERR(ft->g[ft->num_groups]))
1093 goto err;
1094 ft->num_groups++;
1095
1096 /* L3 Group */
1097 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
1098 MLX5_SET_CFG(in, start_flow_index, ix);
1099 ix += MLX5E_INNER_TTC_GROUP2_SIZE;
1100 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1101 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1102 if (IS_ERR(ft->g[ft->num_groups]))
1103 goto err;
1104 ft->num_groups++;
1105
1106 /* Any Group */
1107 memset(in, 0, inlen);
1108 MLX5_SET_CFG(in, start_flow_index, ix);
1109 ix += MLX5E_INNER_TTC_GROUP3_SIZE;
1110 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1111 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1112 if (IS_ERR(ft->g[ft->num_groups]))
1113 goto err;
1114 ft->num_groups++;
1115
1116 kvfree(in);
1117 return 0;
1118
1119 err:
1120 err = PTR_ERR(ft->g[ft->num_groups]);
1121 ft->g[ft->num_groups] = NULL;
1122 kvfree(in);
1123
1124 return err;
1125 }
1126
mlx5e_set_ttc_basic_params(struct mlx5e_priv * priv,struct ttc_params * ttc_params)1127 void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
1128 struct ttc_params *ttc_params)
1129 {
1130 ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
1131 ttc_params->inner_ttc = &priv->fs.inner_ttc;
1132 }
1133
mlx5e_set_inner_ttc_ft_params(struct ttc_params * ttc_params)1134 void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
1135 {
1136 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1137
1138 ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
1139 ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
1140 ft_attr->prio = MLX5E_NIC_PRIO;
1141 }
1142
mlx5e_set_ttc_ft_params(struct ttc_params * ttc_params)1143 void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
1144
1145 {
1146 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1147
1148 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
1149 ft_attr->level = MLX5E_TTC_FT_LEVEL;
1150 ft_attr->prio = MLX5E_NIC_PRIO;
1151 }
1152
mlx5e_create_inner_ttc_table(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)1153 int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1154 struct mlx5e_ttc_table *ttc)
1155 {
1156 struct mlx5e_flow_table *ft = &ttc->ft;
1157 int err;
1158
1159 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1160 return 0;
1161
1162 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr);
1163 if (IS_ERR(ft->t)) {
1164 err = PTR_ERR(ft->t);
1165 ft->t = NULL;
1166 return err;
1167 }
1168
1169 err = mlx5e_create_inner_ttc_table_groups(ttc);
1170 if (err)
1171 goto err;
1172
1173 err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
1174 if (err)
1175 goto err;
1176
1177 return 0;
1178
1179 err:
1180 mlx5e_destroy_flow_table(ft);
1181 return err;
1182 }
1183
mlx5e_destroy_inner_ttc_table(struct mlx5e_priv * priv,struct mlx5e_ttc_table * ttc)1184 void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
1185 struct mlx5e_ttc_table *ttc)
1186 {
1187 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1188 return;
1189
1190 mlx5e_cleanup_ttc_rules(ttc);
1191 mlx5e_destroy_flow_table(&ttc->ft);
1192 }
1193
mlx5e_destroy_ttc_table(struct mlx5e_priv * priv,struct mlx5e_ttc_table * ttc)1194 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
1195 struct mlx5e_ttc_table *ttc)
1196 {
1197 mlx5e_cleanup_ttc_rules(ttc);
1198 mlx5e_destroy_flow_table(&ttc->ft);
1199 }
1200
mlx5e_create_ttc_table(struct mlx5e_priv * priv,struct ttc_params * params,struct mlx5e_ttc_table * ttc)1201 int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1202 struct mlx5e_ttc_table *ttc)
1203 {
1204 bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
1205 struct mlx5e_flow_table *ft = &ttc->ft;
1206 int err;
1207
1208 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr);
1209 if (IS_ERR(ft->t)) {
1210 err = PTR_ERR(ft->t);
1211 ft->t = NULL;
1212 return err;
1213 }
1214
1215 err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer);
1216 if (err)
1217 goto err;
1218
1219 err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
1220 if (err)
1221 goto err;
1222
1223 return 0;
1224 err:
1225 mlx5e_destroy_flow_table(ft);
1226 return err;
1227 }
1228
mlx5e_del_l2_flow_rule(struct mlx5e_priv * priv,struct mlx5e_l2_rule * ai)1229 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
1230 struct mlx5e_l2_rule *ai)
1231 {
1232 if (!IS_ERR_OR_NULL(ai->rule)) {
1233 mlx5_del_flow_rules(ai->rule);
1234 ai->rule = NULL;
1235 }
1236 }
1237
mlx5e_add_l2_flow_rule(struct mlx5e_priv * priv,struct mlx5e_l2_rule * ai,int type)1238 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
1239 struct mlx5e_l2_rule *ai, int type)
1240 {
1241 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
1242 struct mlx5_flow_destination dest = {};
1243 MLX5_DECLARE_FLOW_ACT(flow_act);
1244 struct mlx5_flow_spec *spec;
1245 int err = 0;
1246 u8 *mc_dmac;
1247 u8 *mv_dmac;
1248
1249 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1250 if (!spec)
1251 return -ENOMEM;
1252
1253 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1254 outer_headers.dmac_47_16);
1255 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1256 outer_headers.dmac_47_16);
1257
1258 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1259 dest.ft = priv->fs.ttc.ft.t;
1260
1261 switch (type) {
1262 case MLX5E_FULLMATCH:
1263 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1264 eth_broadcast_addr(mc_dmac);
1265 ether_addr_copy(mv_dmac, ai->addr);
1266 break;
1267
1268 case MLX5E_ALLMULTI:
1269 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1270 mc_dmac[0] = 0x01;
1271 mv_dmac[0] = 0x01;
1272 break;
1273
1274 case MLX5E_PROMISC:
1275 break;
1276 }
1277
1278 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1279 if (IS_ERR(ai->rule)) {
1280 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
1281 __func__, mv_dmac);
1282 err = PTR_ERR(ai->rule);
1283 ai->rule = NULL;
1284 }
1285
1286 kvfree(spec);
1287
1288 return err;
1289 }
1290
1291 #define MLX5E_NUM_L2_GROUPS 3
1292 #define MLX5E_L2_GROUP1_SIZE BIT(0)
1293 #define MLX5E_L2_GROUP2_SIZE BIT(15)
1294 #define MLX5E_L2_GROUP3_SIZE BIT(0)
1295 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
1296 MLX5E_L2_GROUP2_SIZE +\
1297 MLX5E_L2_GROUP3_SIZE)
mlx5e_create_l2_table_groups(struct mlx5e_l2_table * l2_table)1298 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
1299 {
1300 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1301 struct mlx5e_flow_table *ft = &l2_table->ft;
1302 int ix = 0;
1303 u8 *mc_dmac;
1304 u32 *in;
1305 int err;
1306 u8 *mc;
1307
1308 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1309 if (!ft->g)
1310 return -ENOMEM;
1311 in = kvzalloc(inlen, GFP_KERNEL);
1312 if (!in) {
1313 kfree(ft->g);
1314 return -ENOMEM;
1315 }
1316
1317 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1318 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1319 outer_headers.dmac_47_16);
1320 /* Flow Group for promiscuous */
1321 MLX5_SET_CFG(in, start_flow_index, ix);
1322 ix += MLX5E_L2_GROUP1_SIZE;
1323 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1324 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1325 if (IS_ERR(ft->g[ft->num_groups]))
1326 goto err_destroy_groups;
1327 ft->num_groups++;
1328
1329 /* Flow Group for full match */
1330 eth_broadcast_addr(mc_dmac);
1331 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1332 MLX5_SET_CFG(in, start_flow_index, ix);
1333 ix += MLX5E_L2_GROUP2_SIZE;
1334 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1335 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1336 if (IS_ERR(ft->g[ft->num_groups]))
1337 goto err_destroy_groups;
1338 ft->num_groups++;
1339
1340 /* Flow Group for allmulti */
1341 eth_zero_addr(mc_dmac);
1342 mc_dmac[0] = 0x01;
1343 MLX5_SET_CFG(in, start_flow_index, ix);
1344 ix += MLX5E_L2_GROUP3_SIZE;
1345 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1346 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1347 if (IS_ERR(ft->g[ft->num_groups]))
1348 goto err_destroy_groups;
1349 ft->num_groups++;
1350
1351 kvfree(in);
1352 return 0;
1353
1354 err_destroy_groups:
1355 err = PTR_ERR(ft->g[ft->num_groups]);
1356 ft->g[ft->num_groups] = NULL;
1357 mlx5e_destroy_groups(ft);
1358 kvfree(in);
1359
1360 return err;
1361 }
1362
mlx5e_destroy_l2_table(struct mlx5e_priv * priv)1363 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
1364 {
1365 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
1366 }
1367
mlx5e_create_l2_table(struct mlx5e_priv * priv)1368 static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
1369 {
1370 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
1371 struct mlx5e_flow_table *ft = &l2_table->ft;
1372 struct mlx5_flow_table_attr ft_attr = {};
1373 int err;
1374
1375 ft->num_groups = 0;
1376
1377 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1378 ft_attr.level = MLX5E_L2_FT_LEVEL;
1379 ft_attr.prio = MLX5E_NIC_PRIO;
1380
1381 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1382 if (IS_ERR(ft->t)) {
1383 err = PTR_ERR(ft->t);
1384 ft->t = NULL;
1385 return err;
1386 }
1387
1388 err = mlx5e_create_l2_table_groups(l2_table);
1389 if (err)
1390 goto err_destroy_flow_table;
1391
1392 return 0;
1393
1394 err_destroy_flow_table:
1395 mlx5_destroy_flow_table(ft->t);
1396 ft->t = NULL;
1397
1398 return err;
1399 }
1400
1401 #define MLX5E_NUM_VLAN_GROUPS 4
1402 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1403 #define MLX5E_VLAN_GROUP1_SIZE BIT(12)
1404 #define MLX5E_VLAN_GROUP2_SIZE BIT(1)
1405 #define MLX5E_VLAN_GROUP3_SIZE BIT(0)
1406 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1407 MLX5E_VLAN_GROUP1_SIZE +\
1408 MLX5E_VLAN_GROUP2_SIZE +\
1409 MLX5E_VLAN_GROUP3_SIZE)
1410
__mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft,u32 * in,int inlen)1411 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1412 int inlen)
1413 {
1414 int err;
1415 int ix = 0;
1416 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1417
1418 memset(in, 0, inlen);
1419 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1420 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1421 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1422 MLX5_SET_CFG(in, start_flow_index, ix);
1423 ix += MLX5E_VLAN_GROUP0_SIZE;
1424 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1425 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1426 if (IS_ERR(ft->g[ft->num_groups]))
1427 goto err_destroy_groups;
1428 ft->num_groups++;
1429
1430 memset(in, 0, inlen);
1431 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1432 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1433 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1434 MLX5_SET_CFG(in, start_flow_index, ix);
1435 ix += MLX5E_VLAN_GROUP1_SIZE;
1436 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1437 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1438 if (IS_ERR(ft->g[ft->num_groups]))
1439 goto err_destroy_groups;
1440 ft->num_groups++;
1441
1442 memset(in, 0, inlen);
1443 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1444 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1445 MLX5_SET_CFG(in, start_flow_index, ix);
1446 ix += MLX5E_VLAN_GROUP2_SIZE;
1447 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1448 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1449 if (IS_ERR(ft->g[ft->num_groups]))
1450 goto err_destroy_groups;
1451 ft->num_groups++;
1452
1453 memset(in, 0, inlen);
1454 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1455 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1456 MLX5_SET_CFG(in, start_flow_index, ix);
1457 ix += MLX5E_VLAN_GROUP3_SIZE;
1458 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1459 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1460 if (IS_ERR(ft->g[ft->num_groups]))
1461 goto err_destroy_groups;
1462 ft->num_groups++;
1463
1464 return 0;
1465
1466 err_destroy_groups:
1467 err = PTR_ERR(ft->g[ft->num_groups]);
1468 ft->g[ft->num_groups] = NULL;
1469 mlx5e_destroy_groups(ft);
1470
1471 return err;
1472 }
1473
mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft)1474 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1475 {
1476 u32 *in;
1477 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1478 int err;
1479
1480 in = kvzalloc(inlen, GFP_KERNEL);
1481 if (!in)
1482 return -ENOMEM;
1483
1484 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1485
1486 kvfree(in);
1487 return err;
1488 }
1489
mlx5e_create_vlan_table(struct mlx5e_priv * priv)1490 static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1491 {
1492 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1493 struct mlx5_flow_table_attr ft_attr = {};
1494 int err;
1495
1496 ft->num_groups = 0;
1497
1498 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1499 ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1500 ft_attr.prio = MLX5E_NIC_PRIO;
1501
1502 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1503
1504 if (IS_ERR(ft->t)) {
1505 err = PTR_ERR(ft->t);
1506 ft->t = NULL;
1507 return err;
1508 }
1509 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1510 if (!ft->g) {
1511 err = -ENOMEM;
1512 goto err_destroy_vlan_table;
1513 }
1514
1515 err = mlx5e_create_vlan_table_groups(ft);
1516 if (err)
1517 goto err_free_g;
1518
1519 mlx5e_add_vlan_rules(priv);
1520
1521 return 0;
1522
1523 err_free_g:
1524 kfree(ft->g);
1525 err_destroy_vlan_table:
1526 mlx5_destroy_flow_table(ft->t);
1527 ft->t = NULL;
1528
1529 return err;
1530 }
1531
mlx5e_destroy_vlan_table(struct mlx5e_priv * priv)1532 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1533 {
1534 mlx5e_del_vlan_rules(priv);
1535 mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1536 }
1537
mlx5e_create_flow_steering(struct mlx5e_priv * priv)1538 int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1539 {
1540 struct ttc_params ttc_params = {};
1541 int tt, err;
1542
1543 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1544 MLX5_FLOW_NAMESPACE_KERNEL);
1545
1546 if (!priv->fs.ns)
1547 return -EOPNOTSUPP;
1548
1549 err = mlx5e_arfs_create_tables(priv);
1550 if (err) {
1551 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1552 err);
1553 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1554 }
1555
1556 mlx5e_set_ttc_basic_params(priv, &ttc_params);
1557 mlx5e_set_inner_ttc_ft_params(&ttc_params);
1558 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1559 ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
1560
1561 err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
1562 if (err) {
1563 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
1564 err);
1565 goto err_destroy_arfs_tables;
1566 }
1567
1568 mlx5e_set_ttc_ft_params(&ttc_params);
1569 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1570 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1571
1572 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1573 if (err) {
1574 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1575 err);
1576 goto err_destroy_inner_ttc_table;
1577 }
1578
1579 err = mlx5e_create_l2_table(priv);
1580 if (err) {
1581 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1582 err);
1583 goto err_destroy_ttc_table;
1584 }
1585
1586 err = mlx5e_create_vlan_table(priv);
1587 if (err) {
1588 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1589 err);
1590 goto err_destroy_l2_table;
1591 }
1592
1593 mlx5e_ethtool_init_steering(priv);
1594
1595 return 0;
1596
1597 err_destroy_l2_table:
1598 mlx5e_destroy_l2_table(priv);
1599 err_destroy_ttc_table:
1600 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1601 err_destroy_inner_ttc_table:
1602 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1603 err_destroy_arfs_tables:
1604 mlx5e_arfs_destroy_tables(priv);
1605
1606 return err;
1607 }
1608
mlx5e_destroy_flow_steering(struct mlx5e_priv * priv)1609 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1610 {
1611 mlx5e_destroy_vlan_table(priv);
1612 mlx5e_destroy_l2_table(priv);
1613 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1614 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1615 mlx5e_arfs_destroy_tables(priv);
1616 mlx5e_ethtool_cleanup_steering(priv);
1617 }
1618