1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/hash.h>
34 #include <linux/mlx5/fs.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include "en.h"
38
39 #define ARFS_HASH_SHIFT BITS_PER_BYTE
40 #define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
41
42 struct arfs_table {
43 struct mlx5e_flow_table ft;
44 struct mlx5_flow_handle *default_rule;
45 struct hlist_head rules_hash[ARFS_HASH_SIZE];
46 };
47
48 enum arfs_type {
49 ARFS_IPV4_TCP,
50 ARFS_IPV6_TCP,
51 ARFS_IPV4_UDP,
52 ARFS_IPV6_UDP,
53 ARFS_NUM_TYPES,
54 };
55
56 struct mlx5e_arfs_tables {
57 struct arfs_table arfs_tables[ARFS_NUM_TYPES];
58 /* Protect aRFS rules list */
59 spinlock_t arfs_lock;
60 int last_filter_id;
61 struct workqueue_struct *wq;
62 };
63
64 struct arfs_tuple {
65 __be16 etype;
66 u8 ip_proto;
67 union {
68 __be32 src_ipv4;
69 struct in6_addr src_ipv6;
70 };
71 union {
72 __be32 dst_ipv4;
73 struct in6_addr dst_ipv6;
74 };
75 __be16 src_port;
76 __be16 dst_port;
77 };
78
79 struct arfs_rule {
80 struct mlx5e_priv *priv;
81 struct work_struct arfs_work;
82 struct mlx5_flow_handle *rule;
83 struct hlist_node hlist;
84 int rxq;
85 /* Flow ID passed to ndo_rx_flow_steer */
86 int flow_id;
87 /* Filter ID returned by ndo_rx_flow_steer */
88 int filter_id;
89 struct arfs_tuple tuple;
90 };
91
92 #define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
93 for (i = 0; i < ARFS_NUM_TYPES; i++) \
94 mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
95
96 #define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
97 for (j = 0; j < ARFS_HASH_SIZE; j++) \
98 hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
99
arfs_get_tt(enum arfs_type type)100 static enum mlx5_traffic_types arfs_get_tt(enum arfs_type type)
101 {
102 switch (type) {
103 case ARFS_IPV4_TCP:
104 return MLX5_TT_IPV4_TCP;
105 case ARFS_IPV4_UDP:
106 return MLX5_TT_IPV4_UDP;
107 case ARFS_IPV6_TCP:
108 return MLX5_TT_IPV6_TCP;
109 case ARFS_IPV6_UDP:
110 return MLX5_TT_IPV6_UDP;
111 default:
112 return -EINVAL;
113 }
114 }
115
arfs_disable(struct mlx5e_flow_steering * fs)116 static int arfs_disable(struct mlx5e_flow_steering *fs)
117 {
118 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
119 int err, i;
120
121 for (i = 0; i < ARFS_NUM_TYPES; i++) {
122 /* Modify ttc rules destination back to their default */
123 err = mlx5_ttc_fwd_default_dest(ttc, arfs_get_tt(i));
124 if (err) {
125 fs_err(fs,
126 "%s: modify ttc[%d] default destination failed, err(%d)\n",
127 __func__, arfs_get_tt(i), err);
128 return err;
129 }
130 }
131 return 0;
132 }
133
134 static void arfs_del_rules(struct mlx5e_flow_steering *fs);
135
mlx5e_arfs_disable(struct mlx5e_flow_steering * fs)136 int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
137 {
138 /* Moving to switchdev mode, fs->arfs is freed by mlx5e_nic_profile
139 * cleanup_rx callback and it is not recreated when
140 * mlx5e_uplink_rep_profile is loaded as mlx5e_create_flow_steering()
141 * is not called by the uplink_rep profile init_rx callback. Thus, if
142 * ntuple is set, moving to switchdev flow will enter this function
143 * with fs->arfs nullified.
144 */
145 if (!mlx5e_fs_get_arfs(fs))
146 return 0;
147
148 arfs_del_rules(fs);
149
150 return arfs_disable(fs);
151 }
152
mlx5e_arfs_enable(struct mlx5e_flow_steering * fs)153 int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
154 {
155 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
156 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
157 struct mlx5_flow_destination dest = {};
158 int err, i;
159
160 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
161 for (i = 0; i < ARFS_NUM_TYPES; i++) {
162 dest.ft = arfs->arfs_tables[i].ft.t;
163 /* Modify ttc rules destination to point on the aRFS FTs */
164 err = mlx5_ttc_fwd_dest(ttc, arfs_get_tt(i), &dest);
165 if (err) {
166 fs_err(fs, "%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
167 __func__, arfs_get_tt(i), err);
168 arfs_disable(fs);
169 return err;
170 }
171 }
172 return 0;
173 }
174
arfs_destroy_table(struct arfs_table * arfs_t)175 static void arfs_destroy_table(struct arfs_table *arfs_t)
176 {
177 mlx5_del_flow_rules(arfs_t->default_rule);
178 mlx5e_destroy_flow_table(&arfs_t->ft);
179 }
180
_mlx5e_cleanup_tables(struct mlx5e_flow_steering * fs)181 static void _mlx5e_cleanup_tables(struct mlx5e_flow_steering *fs)
182 {
183 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
184 int i;
185
186 arfs_del_rules(fs);
187 destroy_workqueue(arfs->wq);
188 for (i = 0; i < ARFS_NUM_TYPES; i++) {
189 if (!IS_ERR_OR_NULL(arfs->arfs_tables[i].ft.t))
190 arfs_destroy_table(&arfs->arfs_tables[i]);
191 }
192 }
193
mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering * fs,bool ntuple)194 void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple)
195 {
196 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
197
198 if (!ntuple)
199 return;
200
201 _mlx5e_cleanup_tables(fs);
202 mlx5e_fs_set_arfs(fs, NULL);
203 kvfree(arfs);
204 }
205
arfs_add_default_rule(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,enum arfs_type type)206 static int arfs_add_default_rule(struct mlx5e_flow_steering *fs,
207 struct mlx5e_rx_res *rx_res,
208 enum arfs_type type)
209 {
210 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
211 struct arfs_table *arfs_t = &arfs->arfs_tables[type];
212 struct mlx5_flow_destination dest = {};
213 MLX5_DECLARE_FLOW_ACT(flow_act);
214 enum mlx5_traffic_types tt;
215 int err = 0;
216
217 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
218 tt = arfs_get_tt(type);
219 if (tt == -EINVAL) {
220 fs_err(fs, "%s: bad arfs_type: %d\n", __func__, type);
221 return -EINVAL;
222 }
223
224 /* FIXME: Must use mlx5_ttc_get_default_dest(),
225 * but can't since TTC default is not setup yet !
226 */
227 dest.tir_num = mlx5e_rx_res_get_tirn_rss(rx_res, tt);
228 arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
229 &flow_act,
230 &dest, 1);
231 if (IS_ERR(arfs_t->default_rule)) {
232 err = PTR_ERR(arfs_t->default_rule);
233 arfs_t->default_rule = NULL;
234 fs_err(fs, "%s: add rule failed, arfs type=%d\n", __func__, type);
235 }
236
237 return err;
238 }
239
240 #define MLX5E_ARFS_NUM_GROUPS 2
241 #define MLX5E_ARFS_GROUP1_SIZE (BIT(16) - 1)
242 #define MLX5E_ARFS_GROUP2_SIZE BIT(0)
243 #define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\
244 MLX5E_ARFS_GROUP2_SIZE)
arfs_create_groups(struct mlx5e_flow_table * ft,enum arfs_type type)245 static int arfs_create_groups(struct mlx5e_flow_table *ft,
246 enum arfs_type type)
247 {
248 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
249 void *outer_headers_c;
250 int ix = 0;
251 u32 *in;
252 int err;
253 u8 *mc;
254
255 ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
256 sizeof(*ft->g), GFP_KERNEL);
257 in = kvzalloc(inlen, GFP_KERNEL);
258 if (!in || !ft->g) {
259 kfree(ft->g);
260 kvfree(in);
261 return -ENOMEM;
262 }
263
264 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
265 outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
266 outer_headers);
267 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
268 switch (type) {
269 case ARFS_IPV4_TCP:
270 case ARFS_IPV6_TCP:
271 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
272 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
273 break;
274 case ARFS_IPV4_UDP:
275 case ARFS_IPV6_UDP:
276 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
277 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
278 break;
279 default:
280 err = -EINVAL;
281 goto out;
282 }
283
284 switch (type) {
285 case ARFS_IPV4_TCP:
286 case ARFS_IPV4_UDP:
287 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
288 src_ipv4_src_ipv6.ipv4_layout.ipv4);
289 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
290 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
291 break;
292 case ARFS_IPV6_TCP:
293 case ARFS_IPV6_UDP:
294 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
295 src_ipv4_src_ipv6.ipv6_layout.ipv6),
296 0xff, 16);
297 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
298 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
299 0xff, 16);
300 break;
301 default:
302 err = -EINVAL;
303 goto out;
304 }
305
306 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
307 MLX5_SET_CFG(in, start_flow_index, ix);
308 ix += MLX5E_ARFS_GROUP1_SIZE;
309 MLX5_SET_CFG(in, end_flow_index, ix - 1);
310 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
311 if (IS_ERR(ft->g[ft->num_groups]))
312 goto err;
313 ft->num_groups++;
314
315 memset(in, 0, inlen);
316 MLX5_SET_CFG(in, start_flow_index, ix);
317 ix += MLX5E_ARFS_GROUP2_SIZE;
318 MLX5_SET_CFG(in, end_flow_index, ix - 1);
319 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
320 if (IS_ERR(ft->g[ft->num_groups]))
321 goto err;
322 ft->num_groups++;
323
324 kvfree(in);
325 return 0;
326
327 err:
328 err = PTR_ERR(ft->g[ft->num_groups]);
329 ft->g[ft->num_groups] = NULL;
330 out:
331 kvfree(in);
332
333 return err;
334 }
335
arfs_create_table(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,enum arfs_type type)336 static int arfs_create_table(struct mlx5e_flow_steering *fs,
337 struct mlx5e_rx_res *rx_res,
338 enum arfs_type type)
339 {
340 struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
341 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
342 struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
343 struct mlx5_flow_table_attr ft_attr = {};
344 int err;
345
346 ft->num_groups = 0;
347
348 ft_attr.max_fte = MLX5E_ARFS_TABLE_SIZE;
349 ft_attr.level = MLX5E_ARFS_FT_LEVEL;
350 ft_attr.prio = MLX5E_NIC_PRIO;
351
352 ft->t = mlx5_create_flow_table(ns, &ft_attr);
353 if (IS_ERR(ft->t)) {
354 err = PTR_ERR(ft->t);
355 ft->t = NULL;
356 return err;
357 }
358
359 err = arfs_create_groups(ft, type);
360 if (err)
361 goto err;
362
363 err = arfs_add_default_rule(fs, rx_res, type);
364 if (err)
365 goto err;
366
367 return 0;
368 err:
369 mlx5e_destroy_flow_table(ft);
370 return err;
371 }
372
mlx5e_arfs_create_tables(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,bool ntuple)373 int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
374 struct mlx5e_rx_res *rx_res, bool ntuple)
375 {
376 struct mlx5e_arfs_tables *arfs;
377 int err = -ENOMEM;
378 int i;
379
380 if (!ntuple)
381 return 0;
382
383 arfs = kvzalloc(sizeof(*arfs), GFP_KERNEL);
384 if (!arfs)
385 return -ENOMEM;
386
387 spin_lock_init(&arfs->arfs_lock);
388 arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
389 if (!arfs->wq)
390 goto err;
391
392 mlx5e_fs_set_arfs(fs, arfs);
393
394 for (i = 0; i < ARFS_NUM_TYPES; i++) {
395 err = arfs_create_table(fs, rx_res, i);
396 if (err)
397 goto err_des;
398 }
399 return 0;
400
401 err_des:
402 _mlx5e_cleanup_tables(fs);
403 err:
404 mlx5e_fs_set_arfs(fs, NULL);
405 kvfree(arfs);
406 return err;
407 }
408
409 #define MLX5E_ARFS_EXPIRY_QUOTA 60
410
arfs_may_expire_flow(struct mlx5e_priv * priv)411 static void arfs_may_expire_flow(struct mlx5e_priv *priv)
412 {
413 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
414 struct arfs_rule *arfs_rule;
415 struct hlist_node *htmp;
416 HLIST_HEAD(del_list);
417 int quota = 0;
418 int i;
419 int j;
420
421 spin_lock_bh(&arfs->arfs_lock);
422 mlx5e_for_each_arfs_rule(arfs_rule, htmp, arfs->arfs_tables, i, j) {
423 if (!work_pending(&arfs_rule->arfs_work) &&
424 rps_may_expire_flow(priv->netdev,
425 arfs_rule->rxq, arfs_rule->flow_id,
426 arfs_rule->filter_id)) {
427 hlist_del_init(&arfs_rule->hlist);
428 hlist_add_head(&arfs_rule->hlist, &del_list);
429 if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
430 break;
431 }
432 }
433 spin_unlock_bh(&arfs->arfs_lock);
434 hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
435 if (arfs_rule->rule) {
436 mlx5_del_flow_rules(arfs_rule->rule);
437 priv->channel_stats[arfs_rule->rxq]->rq.arfs_expired++;
438 }
439 hlist_del(&arfs_rule->hlist);
440 kfree(arfs_rule);
441 }
442 }
443
arfs_del_rules(struct mlx5e_flow_steering * fs)444 static void arfs_del_rules(struct mlx5e_flow_steering *fs)
445 {
446 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
447 struct hlist_node *htmp;
448 struct arfs_rule *rule;
449 HLIST_HEAD(del_list);
450 int i;
451 int j;
452
453 spin_lock_bh(&arfs->arfs_lock);
454 mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
455 hlist_del_init(&rule->hlist);
456 hlist_add_head(&rule->hlist, &del_list);
457 }
458 spin_unlock_bh(&arfs->arfs_lock);
459
460 hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
461 cancel_work_sync(&rule->arfs_work);
462 if (rule->rule)
463 mlx5_del_flow_rules(rule->rule);
464 hlist_del(&rule->hlist);
465 kfree(rule);
466 }
467 }
468
469 static struct hlist_head *
arfs_hash_bucket(struct arfs_table * arfs_t,__be16 src_port,__be16 dst_port)470 arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
471 __be16 dst_port)
472 {
473 unsigned long l;
474 int bucket_idx;
475
476 l = (__force unsigned long)src_port |
477 ((__force unsigned long)dst_port << 2);
478
479 bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
480
481 return &arfs_t->rules_hash[bucket_idx];
482 }
483
arfs_get_table(struct mlx5e_arfs_tables * arfs,u8 ip_proto,__be16 etype)484 static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
485 u8 ip_proto, __be16 etype)
486 {
487 if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
488 return &arfs->arfs_tables[ARFS_IPV4_TCP];
489 if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
490 return &arfs->arfs_tables[ARFS_IPV4_UDP];
491 if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
492 return &arfs->arfs_tables[ARFS_IPV6_TCP];
493 if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
494 return &arfs->arfs_tables[ARFS_IPV6_UDP];
495
496 return NULL;
497 }
498
arfs_add_rule(struct mlx5e_priv * priv,struct arfs_rule * arfs_rule)499 static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
500 struct arfs_rule *arfs_rule)
501 {
502 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
503 struct arfs_tuple *tuple = &arfs_rule->tuple;
504 struct mlx5_flow_handle *rule = NULL;
505 struct mlx5_flow_destination dest = {};
506 MLX5_DECLARE_FLOW_ACT(flow_act);
507 struct arfs_table *arfs_table;
508 struct mlx5_flow_spec *spec;
509 struct mlx5_flow_table *ft;
510 int err = 0;
511
512 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
513 if (!spec) {
514 priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
515 err = -ENOMEM;
516 goto out;
517 }
518 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
519 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
520 outer_headers.ethertype);
521 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
522 ntohs(tuple->etype));
523 arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
524 if (!arfs_table) {
525 WARN_ONCE(1, "arfs table does not exist for etype %u and ip_proto %u\n",
526 tuple->etype, tuple->ip_proto);
527 err = -EINVAL;
528 goto out;
529 }
530
531 ft = arfs_table->ft.t;
532 if (tuple->ip_proto == IPPROTO_TCP) {
533 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
534 outer_headers.tcp_dport);
535 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
536 outer_headers.tcp_sport);
537 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
538 ntohs(tuple->dst_port));
539 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
540 ntohs(tuple->src_port));
541 } else {
542 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
543 outer_headers.udp_dport);
544 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
545 outer_headers.udp_sport);
546 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
547 ntohs(tuple->dst_port));
548 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
549 ntohs(tuple->src_port));
550 }
551 if (tuple->etype == htons(ETH_P_IP)) {
552 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
553 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
554 &tuple->src_ipv4,
555 4);
556 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
557 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
558 &tuple->dst_ipv4,
559 4);
560 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
561 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
562 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
563 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
564 } else {
565 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
566 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
567 &tuple->src_ipv6,
568 16);
569 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
570 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
571 &tuple->dst_ipv6,
572 16);
573 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
574 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
575 0xff,
576 16);
577 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
578 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
579 0xff,
580 16);
581 }
582 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
583 dest.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, arfs_rule->rxq);
584 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
585 if (IS_ERR(rule)) {
586 err = PTR_ERR(rule);
587 priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
588 netdev_dbg(priv->netdev,
589 "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
590 __func__, arfs_rule->filter_id, arfs_rule->rxq,
591 tuple->ip_proto, err);
592 }
593
594 out:
595 kvfree(spec);
596 return err ? ERR_PTR(err) : rule;
597 }
598
arfs_modify_rule_rq(struct mlx5e_priv * priv,struct mlx5_flow_handle * rule,u16 rxq)599 static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
600 struct mlx5_flow_handle *rule, u16 rxq)
601 {
602 struct mlx5_flow_destination dst = {};
603 int err = 0;
604
605 dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
606 dst.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, rxq);
607 err = mlx5_modify_rule_destination(rule, &dst, NULL);
608 if (err) {
609 priv->channel_stats[rxq]->rq.arfs_err++;
610 netdev_warn(priv->netdev,
611 "Failed to modify aRFS rule destination to rq=%d\n", rxq);
612 }
613 }
614
arfs_handle_work(struct work_struct * work)615 static void arfs_handle_work(struct work_struct *work)
616 {
617 struct arfs_rule *arfs_rule = container_of(work,
618 struct arfs_rule,
619 arfs_work);
620 struct mlx5e_priv *priv = arfs_rule->priv;
621 struct mlx5e_arfs_tables *arfs;
622 struct mlx5_flow_handle *rule;
623
624 arfs = mlx5e_fs_get_arfs(priv->fs);
625 mutex_lock(&priv->state_lock);
626 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
627 spin_lock_bh(&arfs->arfs_lock);
628 hlist_del(&arfs_rule->hlist);
629 spin_unlock_bh(&arfs->arfs_lock);
630
631 mutex_unlock(&priv->state_lock);
632 kfree(arfs_rule);
633 goto out;
634 }
635 mutex_unlock(&priv->state_lock);
636
637 if (!arfs_rule->rule) {
638 rule = arfs_add_rule(priv, arfs_rule);
639 if (IS_ERR(rule))
640 goto out;
641 arfs_rule->rule = rule;
642 priv->channel_stats[arfs_rule->rxq]->rq.arfs_add++;
643 } else {
644 arfs_modify_rule_rq(priv, arfs_rule->rule,
645 arfs_rule->rxq);
646 }
647 out:
648 arfs_may_expire_flow(priv);
649 }
650
arfs_alloc_rule(struct mlx5e_priv * priv,struct arfs_table * arfs_t,const struct flow_keys * fk,u16 rxq,u32 flow_id)651 static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
652 struct arfs_table *arfs_t,
653 const struct flow_keys *fk,
654 u16 rxq, u32 flow_id)
655 {
656 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
657 struct arfs_rule *rule;
658 struct arfs_tuple *tuple;
659
660 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
661 if (!rule) {
662 priv->channel_stats[rxq]->rq.arfs_err++;
663 return NULL;
664 }
665
666 rule->priv = priv;
667 rule->rxq = rxq;
668 INIT_WORK(&rule->arfs_work, arfs_handle_work);
669
670 tuple = &rule->tuple;
671 tuple->etype = fk->basic.n_proto;
672 tuple->ip_proto = fk->basic.ip_proto;
673 if (tuple->etype == htons(ETH_P_IP)) {
674 tuple->src_ipv4 = fk->addrs.v4addrs.src;
675 tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
676 } else {
677 memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
678 sizeof(struct in6_addr));
679 memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
680 sizeof(struct in6_addr));
681 }
682 tuple->src_port = fk->ports.src;
683 tuple->dst_port = fk->ports.dst;
684
685 rule->flow_id = flow_id;
686 rule->filter_id = arfs->last_filter_id++ % RPS_NO_FILTER;
687
688 hlist_add_head(&rule->hlist,
689 arfs_hash_bucket(arfs_t, tuple->src_port,
690 tuple->dst_port));
691 return rule;
692 }
693
arfs_cmp(const struct arfs_tuple * tuple,const struct flow_keys * fk)694 static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
695 {
696 if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
697 return false;
698 if (tuple->etype != fk->basic.n_proto)
699 return false;
700 if (tuple->etype == htons(ETH_P_IP))
701 return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
702 tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
703 if (tuple->etype == htons(ETH_P_IPV6))
704 return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
705 sizeof(struct in6_addr)) &&
706 !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
707 sizeof(struct in6_addr));
708 return false;
709 }
710
arfs_find_rule(struct arfs_table * arfs_t,const struct flow_keys * fk)711 static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
712 const struct flow_keys *fk)
713 {
714 struct arfs_rule *arfs_rule;
715 struct hlist_head *head;
716
717 head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
718 hlist_for_each_entry(arfs_rule, head, hlist) {
719 if (arfs_cmp(&arfs_rule->tuple, fk))
720 return arfs_rule;
721 }
722
723 return NULL;
724 }
725
mlx5e_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)726 int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
727 u16 rxq_index, u32 flow_id)
728 {
729 struct mlx5e_priv *priv = netdev_priv(dev);
730 struct mlx5e_arfs_tables *arfs;
731 struct arfs_rule *arfs_rule;
732 struct arfs_table *arfs_t;
733 struct flow_keys fk;
734
735 arfs = mlx5e_fs_get_arfs(priv->fs);
736 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
737 return -EPROTONOSUPPORT;
738
739 if (fk.basic.n_proto != htons(ETH_P_IP) &&
740 fk.basic.n_proto != htons(ETH_P_IPV6))
741 return -EPROTONOSUPPORT;
742
743 if (skb->encapsulation)
744 return -EPROTONOSUPPORT;
745
746 arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
747 if (!arfs_t)
748 return -EPROTONOSUPPORT;
749
750 spin_lock_bh(&arfs->arfs_lock);
751 arfs_rule = arfs_find_rule(arfs_t, &fk);
752 if (arfs_rule) {
753 if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
754 spin_unlock_bh(&arfs->arfs_lock);
755 return arfs_rule->filter_id;
756 }
757
758 priv->channel_stats[rxq_index]->rq.arfs_request_in++;
759 priv->channel_stats[arfs_rule->rxq]->rq.arfs_request_out++;
760 arfs_rule->rxq = rxq_index;
761 } else {
762 arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
763 if (!arfs_rule) {
764 spin_unlock_bh(&arfs->arfs_lock);
765 return -ENOMEM;
766 }
767 }
768 queue_work(arfs->wq, &arfs_rule->arfs_work);
769 spin_unlock_bh(&arfs->arfs_lock);
770 return arfs_rule->filter_id;
771 }
772
773