1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
37
38 #include "mlx5_core.h"
39 #include "fs_core.h"
40 #include "fs_cmd.h"
41 #include "diag/fs_tracepoint.h"
42 #include "accel/ipsec.h"
43 #include "fpga/ipsec.h"
44 #include "eswitch.h"
45
46 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 sizeof(struct init_tree_node))
48
49 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
50 ...) {.type = FS_TYPE_PRIO,\
51 .min_ft_level = min_level_val,\
52 .num_levels = num_levels_val,\
53 .num_leaf_prios = num_prios_val,\
54 .caps = caps_val,\
55 .children = (struct init_tree_node[]) {__VA_ARGS__},\
56 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
57 }
58
59 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
61 __VA_ARGS__)\
62
63 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
64 .def_miss_action = def_miss_act,\
65 .children = (struct init_tree_node[]) {__VA_ARGS__},\
66 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
67 }
68
69 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
70 sizeof(long))
71
72 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
73
74 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
75 .caps = (long[]) {__VA_ARGS__} }
76
77 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
78 FS_CAP(flow_table_properties_nic_receive.modify_root), \
79 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
80 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
81
82 #define FS_CHAINING_CAPS_EGRESS \
83 FS_REQUIRED_CAPS( \
84 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
85 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
86 FS_CAP(flow_table_properties_nic_transmit \
87 .identified_miss_table_mode), \
88 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
89
90 #define LEFTOVERS_NUM_LEVELS 1
91 #define LEFTOVERS_NUM_PRIOS 1
92
93 #define BY_PASS_PRIO_NUM_LEVELS 1
94 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
95 LEFTOVERS_NUM_PRIOS)
96
97 #define ETHTOOL_PRIO_NUM_LEVELS 1
98 #define ETHTOOL_NUM_PRIOS 11
99 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
100 /* Vlan, mac, ttc, inner ttc, aRFS */
101 #define KERNEL_NIC_PRIO_NUM_LEVELS 5
102 #define KERNEL_NIC_NUM_PRIOS 1
103 /* One more level for tc */
104 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
105
106 #define KERNEL_NIC_TC_NUM_PRIOS 1
107 #define KERNEL_NIC_TC_NUM_LEVELS 2
108
109 #define ANCHOR_NUM_LEVELS 1
110 #define ANCHOR_NUM_PRIOS 1
111 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
112
113 #define OFFLOADS_MAX_FT 1
114 #define OFFLOADS_NUM_PRIOS 1
115 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
116
117 #define LAG_PRIO_NUM_LEVELS 1
118 #define LAG_NUM_PRIOS 1
119 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
120
121 struct node_caps {
122 size_t arr_sz;
123 long *caps;
124 };
125
126 static struct init_tree_node {
127 enum fs_node_type type;
128 struct init_tree_node *children;
129 int ar_size;
130 struct node_caps caps;
131 int min_ft_level;
132 int num_leaf_prios;
133 int prio;
134 int num_levels;
135 enum mlx5_flow_table_miss_action def_miss_action;
136 } root_fs = {
137 .type = FS_TYPE_NAMESPACE,
138 .ar_size = 7,
139 .children = (struct init_tree_node[]){
140 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
141 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
142 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
143 BY_PASS_PRIO_NUM_LEVELS))),
144 ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
145 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
146 ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
147 LAG_PRIO_NUM_LEVELS))),
148 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
149 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
150 ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
151 OFFLOADS_MAX_FT))),
152 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
153 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
154 ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
155 ETHTOOL_PRIO_NUM_LEVELS))),
156 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
157 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
158 ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
159 KERNEL_NIC_TC_NUM_LEVELS),
160 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
161 KERNEL_NIC_PRIO_NUM_LEVELS))),
162 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
163 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
164 ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
165 LEFTOVERS_NUM_LEVELS))),
166 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
167 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
168 ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
169 ANCHOR_NUM_LEVELS))),
170 }
171 };
172
173 static struct init_tree_node egress_root_fs = {
174 .type = FS_TYPE_NAMESPACE,
175 .ar_size = 1,
176 .children = (struct init_tree_node[]) {
177 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
178 FS_CHAINING_CAPS_EGRESS,
179 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
180 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
181 BY_PASS_PRIO_NUM_LEVELS))),
182 }
183 };
184
185 #define RDMA_RX_BYPASS_PRIO 0
186 #define RDMA_RX_KERNEL_PRIO 1
187 static struct init_tree_node rdma_rx_root_fs = {
188 .type = FS_TYPE_NAMESPACE,
189 .ar_size = 2,
190 .children = (struct init_tree_node[]) {
191 [RDMA_RX_BYPASS_PRIO] =
192 ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
193 FS_CHAINING_CAPS,
194 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
195 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
196 BY_PASS_PRIO_NUM_LEVELS))),
197 [RDMA_RX_KERNEL_PRIO] =
198 ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
199 FS_CHAINING_CAPS,
200 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
201 ADD_MULTIPLE_PRIO(1, 1))),
202 }
203 };
204
205 enum fs_i_lock_class {
206 FS_LOCK_GRANDPARENT,
207 FS_LOCK_PARENT,
208 FS_LOCK_CHILD
209 };
210
211 static const struct rhashtable_params rhash_fte = {
212 .key_len = FIELD_SIZEOF(struct fs_fte, val),
213 .key_offset = offsetof(struct fs_fte, val),
214 .head_offset = offsetof(struct fs_fte, hash),
215 .automatic_shrinking = true,
216 .min_size = 1,
217 };
218
219 static const struct rhashtable_params rhash_fg = {
220 .key_len = FIELD_SIZEOF(struct mlx5_flow_group, mask),
221 .key_offset = offsetof(struct mlx5_flow_group, mask),
222 .head_offset = offsetof(struct mlx5_flow_group, hash),
223 .automatic_shrinking = true,
224 .min_size = 1,
225
226 };
227
228 static void del_hw_flow_table(struct fs_node *node);
229 static void del_hw_flow_group(struct fs_node *node);
230 static void del_hw_fte(struct fs_node *node);
231 static void del_sw_flow_table(struct fs_node *node);
232 static void del_sw_flow_group(struct fs_node *node);
233 static void del_sw_fte(struct fs_node *node);
234 static void del_sw_prio(struct fs_node *node);
235 static void del_sw_ns(struct fs_node *node);
236 /* Delete rule (destination) is special case that
237 * requires to lock the FTE for all the deletion process.
238 */
239 static void del_sw_hw_rule(struct fs_node *node);
240 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
241 struct mlx5_flow_destination *d2);
242 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
243 static struct mlx5_flow_rule *
244 find_flow_rule(struct fs_fte *fte,
245 struct mlx5_flow_destination *dest);
246
tree_init_node(struct fs_node * node,void (* del_hw_func)(struct fs_node *),void (* del_sw_func)(struct fs_node *))247 static void tree_init_node(struct fs_node *node,
248 void (*del_hw_func)(struct fs_node *),
249 void (*del_sw_func)(struct fs_node *))
250 {
251 refcount_set(&node->refcount, 1);
252 INIT_LIST_HEAD(&node->list);
253 INIT_LIST_HEAD(&node->children);
254 init_rwsem(&node->lock);
255 node->del_hw_func = del_hw_func;
256 node->del_sw_func = del_sw_func;
257 node->active = false;
258 }
259
tree_add_node(struct fs_node * node,struct fs_node * parent)260 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
261 {
262 if (parent)
263 refcount_inc(&parent->refcount);
264 node->parent = parent;
265
266 /* Parent is the root */
267 if (!parent)
268 node->root = node;
269 else
270 node->root = parent->root;
271 }
272
tree_get_node(struct fs_node * node)273 static int tree_get_node(struct fs_node *node)
274 {
275 return refcount_inc_not_zero(&node->refcount);
276 }
277
nested_down_read_ref_node(struct fs_node * node,enum fs_i_lock_class class)278 static void nested_down_read_ref_node(struct fs_node *node,
279 enum fs_i_lock_class class)
280 {
281 if (node) {
282 down_read_nested(&node->lock, class);
283 refcount_inc(&node->refcount);
284 }
285 }
286
nested_down_write_ref_node(struct fs_node * node,enum fs_i_lock_class class)287 static void nested_down_write_ref_node(struct fs_node *node,
288 enum fs_i_lock_class class)
289 {
290 if (node) {
291 down_write_nested(&node->lock, class);
292 refcount_inc(&node->refcount);
293 }
294 }
295
down_write_ref_node(struct fs_node * node,bool locked)296 static void down_write_ref_node(struct fs_node *node, bool locked)
297 {
298 if (node) {
299 if (!locked)
300 down_write(&node->lock);
301 refcount_inc(&node->refcount);
302 }
303 }
304
up_read_ref_node(struct fs_node * node)305 static void up_read_ref_node(struct fs_node *node)
306 {
307 refcount_dec(&node->refcount);
308 up_read(&node->lock);
309 }
310
up_write_ref_node(struct fs_node * node,bool locked)311 static void up_write_ref_node(struct fs_node *node, bool locked)
312 {
313 refcount_dec(&node->refcount);
314 if (!locked)
315 up_write(&node->lock);
316 }
317
tree_put_node(struct fs_node * node,bool locked)318 static void tree_put_node(struct fs_node *node, bool locked)
319 {
320 struct fs_node *parent_node = node->parent;
321
322 if (refcount_dec_and_test(&node->refcount)) {
323 if (node->del_hw_func)
324 node->del_hw_func(node);
325 if (parent_node) {
326 /* Only root namespace doesn't have parent and we just
327 * need to free its node.
328 */
329 down_write_ref_node(parent_node, locked);
330 list_del_init(&node->list);
331 if (node->del_sw_func)
332 node->del_sw_func(node);
333 up_write_ref_node(parent_node, locked);
334 } else {
335 kfree(node);
336 }
337 node = NULL;
338 }
339 if (!node && parent_node)
340 tree_put_node(parent_node, locked);
341 }
342
tree_remove_node(struct fs_node * node,bool locked)343 static int tree_remove_node(struct fs_node *node, bool locked)
344 {
345 if (refcount_read(&node->refcount) > 1) {
346 refcount_dec(&node->refcount);
347 return -EEXIST;
348 }
349 tree_put_node(node, locked);
350 return 0;
351 }
352
find_prio(struct mlx5_flow_namespace * ns,unsigned int prio)353 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
354 unsigned int prio)
355 {
356 struct fs_prio *iter_prio;
357
358 fs_for_each_prio(iter_prio, ns) {
359 if (iter_prio->prio == prio)
360 return iter_prio;
361 }
362
363 return NULL;
364 }
365
check_valid_spec(const struct mlx5_flow_spec * spec)366 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
367 {
368 int i;
369
370 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
371 if (spec->match_value[i] & ~spec->match_criteria[i]) {
372 pr_warn("mlx5_core: match_value differs from match_criteria\n");
373 return false;
374 }
375
376 return true;
377 }
378
find_root(struct fs_node * node)379 static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
380 {
381 struct fs_node *root;
382 struct mlx5_flow_namespace *ns;
383
384 root = node->root;
385
386 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
387 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
388 return NULL;
389 }
390
391 ns = container_of(root, struct mlx5_flow_namespace, node);
392 return container_of(ns, struct mlx5_flow_root_namespace, ns);
393 }
394
get_steering(struct fs_node * node)395 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
396 {
397 struct mlx5_flow_root_namespace *root = find_root(node);
398
399 if (root)
400 return root->dev->priv.steering;
401 return NULL;
402 }
403
get_dev(struct fs_node * node)404 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
405 {
406 struct mlx5_flow_root_namespace *root = find_root(node);
407
408 if (root)
409 return root->dev;
410 return NULL;
411 }
412
del_sw_ns(struct fs_node * node)413 static void del_sw_ns(struct fs_node *node)
414 {
415 kfree(node);
416 }
417
del_sw_prio(struct fs_node * node)418 static void del_sw_prio(struct fs_node *node)
419 {
420 kfree(node);
421 }
422
del_hw_flow_table(struct fs_node * node)423 static void del_hw_flow_table(struct fs_node *node)
424 {
425 struct mlx5_flow_root_namespace *root;
426 struct mlx5_flow_table *ft;
427 struct mlx5_core_dev *dev;
428 int err;
429
430 fs_get_obj(ft, node);
431 dev = get_dev(&ft->node);
432 root = find_root(&ft->node);
433 trace_mlx5_fs_del_ft(ft);
434
435 if (node->active) {
436 err = root->cmds->destroy_flow_table(root, ft);
437 if (err)
438 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
439 }
440 }
441
del_sw_flow_table(struct fs_node * node)442 static void del_sw_flow_table(struct fs_node *node)
443 {
444 struct mlx5_flow_table *ft;
445 struct fs_prio *prio;
446
447 fs_get_obj(ft, node);
448
449 rhltable_destroy(&ft->fgs_hash);
450 fs_get_obj(prio, ft->node.parent);
451 prio->num_ft--;
452 kfree(ft);
453 }
454
modify_fte(struct fs_fte * fte)455 static void modify_fte(struct fs_fte *fte)
456 {
457 struct mlx5_flow_root_namespace *root;
458 struct mlx5_flow_table *ft;
459 struct mlx5_flow_group *fg;
460 struct mlx5_core_dev *dev;
461 int err;
462
463 fs_get_obj(fg, fte->node.parent);
464 fs_get_obj(ft, fg->node.parent);
465 dev = get_dev(&fte->node);
466
467 root = find_root(&ft->node);
468 err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
469 if (err)
470 mlx5_core_warn(dev,
471 "%s can't del rule fg id=%d fte_index=%d\n",
472 __func__, fg->id, fte->index);
473 fte->modify_mask = 0;
474 }
475
del_sw_hw_rule(struct fs_node * node)476 static void del_sw_hw_rule(struct fs_node *node)
477 {
478 struct mlx5_flow_rule *rule;
479 struct fs_fte *fte;
480
481 fs_get_obj(rule, node);
482 fs_get_obj(fte, rule->node.parent);
483 trace_mlx5_fs_del_rule(rule);
484 if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
485 mutex_lock(&rule->dest_attr.ft->lock);
486 list_del(&rule->next_ft);
487 mutex_unlock(&rule->dest_attr.ft->lock);
488 }
489
490 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
491 --fte->dests_size) {
492 fte->modify_mask |=
493 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
494 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
495 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
496 goto out;
497 }
498
499 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
500 --fte->dests_size) {
501 fte->modify_mask |=
502 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
503 }
504 out:
505 kfree(rule);
506 }
507
del_hw_fte(struct fs_node * node)508 static void del_hw_fte(struct fs_node *node)
509 {
510 struct mlx5_flow_root_namespace *root;
511 struct mlx5_flow_table *ft;
512 struct mlx5_flow_group *fg;
513 struct mlx5_core_dev *dev;
514 struct fs_fte *fte;
515 int err;
516
517 fs_get_obj(fte, node);
518 fs_get_obj(fg, fte->node.parent);
519 fs_get_obj(ft, fg->node.parent);
520
521 trace_mlx5_fs_del_fte(fte);
522 dev = get_dev(&ft->node);
523 root = find_root(&ft->node);
524 if (node->active) {
525 err = root->cmds->delete_fte(root, ft, fte);
526 if (err)
527 mlx5_core_warn(dev,
528 "flow steering can't delete fte in index %d of flow group id %d\n",
529 fte->index, fg->id);
530 node->active = 0;
531 }
532 }
533
del_sw_fte(struct fs_node * node)534 static void del_sw_fte(struct fs_node *node)
535 {
536 struct mlx5_flow_steering *steering = get_steering(node);
537 struct mlx5_flow_group *fg;
538 struct fs_fte *fte;
539 int err;
540
541 fs_get_obj(fte, node);
542 fs_get_obj(fg, fte->node.parent);
543
544 err = rhashtable_remove_fast(&fg->ftes_hash,
545 &fte->hash,
546 rhash_fte);
547 WARN_ON(err);
548 ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
549 kmem_cache_free(steering->ftes_cache, fte);
550 }
551
del_hw_flow_group(struct fs_node * node)552 static void del_hw_flow_group(struct fs_node *node)
553 {
554 struct mlx5_flow_root_namespace *root;
555 struct mlx5_flow_group *fg;
556 struct mlx5_flow_table *ft;
557 struct mlx5_core_dev *dev;
558
559 fs_get_obj(fg, node);
560 fs_get_obj(ft, fg->node.parent);
561 dev = get_dev(&ft->node);
562 trace_mlx5_fs_del_fg(fg);
563
564 root = find_root(&ft->node);
565 if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
566 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
567 fg->id, ft->id);
568 }
569
del_sw_flow_group(struct fs_node * node)570 static void del_sw_flow_group(struct fs_node *node)
571 {
572 struct mlx5_flow_steering *steering = get_steering(node);
573 struct mlx5_flow_group *fg;
574 struct mlx5_flow_table *ft;
575 int err;
576
577 fs_get_obj(fg, node);
578 fs_get_obj(ft, fg->node.parent);
579
580 rhashtable_destroy(&fg->ftes_hash);
581 ida_destroy(&fg->fte_allocator);
582 if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size)
583 ft->autogroup.num_groups--;
584 err = rhltable_remove(&ft->fgs_hash,
585 &fg->hash,
586 rhash_fg);
587 WARN_ON(err);
588 kmem_cache_free(steering->fgs_cache, fg);
589 }
590
insert_fte(struct mlx5_flow_group * fg,struct fs_fte * fte)591 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
592 {
593 int index;
594 int ret;
595
596 index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
597 if (index < 0)
598 return index;
599
600 fte->index = index + fg->start_index;
601 ret = rhashtable_insert_fast(&fg->ftes_hash,
602 &fte->hash,
603 rhash_fte);
604 if (ret)
605 goto err_ida_remove;
606
607 tree_add_node(&fte->node, &fg->node);
608 list_add_tail(&fte->node.list, &fg->node.children);
609 return 0;
610
611 err_ida_remove:
612 ida_simple_remove(&fg->fte_allocator, index);
613 return ret;
614 }
615
alloc_fte(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act)616 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
617 const struct mlx5_flow_spec *spec,
618 struct mlx5_flow_act *flow_act)
619 {
620 struct mlx5_flow_steering *steering = get_steering(&ft->node);
621 struct fs_fte *fte;
622
623 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
624 if (!fte)
625 return ERR_PTR(-ENOMEM);
626
627 memcpy(fte->val, &spec->match_value, sizeof(fte->val));
628 fte->node.type = FS_TYPE_FLOW_ENTRY;
629 fte->action = *flow_act;
630 fte->flow_context = spec->flow_context;
631
632 tree_init_node(&fte->node, NULL, del_sw_fte);
633
634 return fte;
635 }
636
dealloc_flow_group(struct mlx5_flow_steering * steering,struct mlx5_flow_group * fg)637 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
638 struct mlx5_flow_group *fg)
639 {
640 rhashtable_destroy(&fg->ftes_hash);
641 kmem_cache_free(steering->fgs_cache, fg);
642 }
643
alloc_flow_group(struct mlx5_flow_steering * steering,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index)644 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
645 u8 match_criteria_enable,
646 const void *match_criteria,
647 int start_index,
648 int end_index)
649 {
650 struct mlx5_flow_group *fg;
651 int ret;
652
653 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
654 if (!fg)
655 return ERR_PTR(-ENOMEM);
656
657 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
658 if (ret) {
659 kmem_cache_free(steering->fgs_cache, fg);
660 return ERR_PTR(ret);
661 }
662
663 ida_init(&fg->fte_allocator);
664 fg->mask.match_criteria_enable = match_criteria_enable;
665 memcpy(&fg->mask.match_criteria, match_criteria,
666 sizeof(fg->mask.match_criteria));
667 fg->node.type = FS_TYPE_FLOW_GROUP;
668 fg->start_index = start_index;
669 fg->max_ftes = end_index - start_index + 1;
670
671 return fg;
672 }
673
alloc_insert_flow_group(struct mlx5_flow_table * ft,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index,struct list_head * prev)674 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
675 u8 match_criteria_enable,
676 const void *match_criteria,
677 int start_index,
678 int end_index,
679 struct list_head *prev)
680 {
681 struct mlx5_flow_steering *steering = get_steering(&ft->node);
682 struct mlx5_flow_group *fg;
683 int ret;
684
685 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
686 start_index, end_index);
687 if (IS_ERR(fg))
688 return fg;
689
690 /* initialize refcnt, add to parent list */
691 ret = rhltable_insert(&ft->fgs_hash,
692 &fg->hash,
693 rhash_fg);
694 if (ret) {
695 dealloc_flow_group(steering, fg);
696 return ERR_PTR(ret);
697 }
698
699 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
700 tree_add_node(&fg->node, &ft->node);
701 /* Add node to group list */
702 list_add(&fg->node.list, prev);
703 atomic_inc(&ft->node.version);
704
705 return fg;
706 }
707
alloc_flow_table(int level,u16 vport,int max_fte,enum fs_flow_table_type table_type,enum fs_flow_table_op_mod op_mod,u32 flags)708 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
709 enum fs_flow_table_type table_type,
710 enum fs_flow_table_op_mod op_mod,
711 u32 flags)
712 {
713 struct mlx5_flow_table *ft;
714 int ret;
715
716 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
717 if (!ft)
718 return ERR_PTR(-ENOMEM);
719
720 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
721 if (ret) {
722 kfree(ft);
723 return ERR_PTR(ret);
724 }
725
726 ft->level = level;
727 ft->node.type = FS_TYPE_FLOW_TABLE;
728 ft->op_mod = op_mod;
729 ft->type = table_type;
730 ft->vport = vport;
731 ft->max_fte = max_fte;
732 ft->flags = flags;
733 INIT_LIST_HEAD(&ft->fwd_rules);
734 mutex_init(&ft->lock);
735
736 return ft;
737 }
738
739 /* If reverse is false, then we search for the first flow table in the
740 * root sub-tree from start(closest from right), else we search for the
741 * last flow table in the root sub-tree till start(closest from left).
742 */
find_closest_ft_recursive(struct fs_node * root,struct list_head * start,bool reverse)743 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
744 struct list_head *start,
745 bool reverse)
746 {
747 #define list_advance_entry(pos, reverse) \
748 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
749
750 #define list_for_each_advance_continue(pos, head, reverse) \
751 for (pos = list_advance_entry(pos, reverse); \
752 &pos->list != (head); \
753 pos = list_advance_entry(pos, reverse))
754
755 struct fs_node *iter = list_entry(start, struct fs_node, list);
756 struct mlx5_flow_table *ft = NULL;
757
758 if (!root || root->type == FS_TYPE_PRIO_CHAINS)
759 return NULL;
760
761 list_for_each_advance_continue(iter, &root->children, reverse) {
762 if (iter->type == FS_TYPE_FLOW_TABLE) {
763 fs_get_obj(ft, iter);
764 return ft;
765 }
766 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
767 if (ft)
768 return ft;
769 }
770
771 return ft;
772 }
773
774 /* If reverse if false then return the first flow table in next priority of
775 * prio in the tree, else return the last flow table in the previous priority
776 * of prio in the tree.
777 */
find_closest_ft(struct fs_prio * prio,bool reverse)778 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
779 {
780 struct mlx5_flow_table *ft = NULL;
781 struct fs_node *curr_node;
782 struct fs_node *parent;
783
784 parent = prio->node.parent;
785 curr_node = &prio->node;
786 while (!ft && parent) {
787 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
788 curr_node = parent;
789 parent = curr_node->parent;
790 }
791 return ft;
792 }
793
794 /* Assuming all the tree is locked by mutex chain lock */
find_next_chained_ft(struct fs_prio * prio)795 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
796 {
797 return find_closest_ft(prio, false);
798 }
799
800 /* Assuming all the tree is locked by mutex chain lock */
find_prev_chained_ft(struct fs_prio * prio)801 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
802 {
803 return find_closest_ft(prio, true);
804 }
805
connect_fts_in_prio(struct mlx5_core_dev * dev,struct fs_prio * prio,struct mlx5_flow_table * ft)806 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
807 struct fs_prio *prio,
808 struct mlx5_flow_table *ft)
809 {
810 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
811 struct mlx5_flow_table *iter;
812 int i = 0;
813 int err;
814
815 fs_for_each_ft(iter, prio) {
816 i++;
817 err = root->cmds->modify_flow_table(root, iter, ft);
818 if (err) {
819 mlx5_core_warn(dev, "Failed to modify flow table %d\n",
820 iter->id);
821 /* The driver is out of sync with the FW */
822 if (i > 1)
823 WARN_ON(true);
824 return err;
825 }
826 }
827 return 0;
828 }
829
830 /* Connect flow tables from previous priority of prio to ft */
connect_prev_fts(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)831 static int connect_prev_fts(struct mlx5_core_dev *dev,
832 struct mlx5_flow_table *ft,
833 struct fs_prio *prio)
834 {
835 struct mlx5_flow_table *prev_ft;
836
837 prev_ft = find_prev_chained_ft(prio);
838 if (prev_ft) {
839 struct fs_prio *prev_prio;
840
841 fs_get_obj(prev_prio, prev_ft->node.parent);
842 return connect_fts_in_prio(dev, prev_prio, ft);
843 }
844 return 0;
845 }
846
update_root_ft_create(struct mlx5_flow_table * ft,struct fs_prio * prio)847 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
848 *prio)
849 {
850 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
851 struct mlx5_ft_underlay_qp *uqp;
852 int min_level = INT_MAX;
853 int err = 0;
854 u32 qpn;
855
856 if (root->root_ft)
857 min_level = root->root_ft->level;
858
859 if (ft->level >= min_level)
860 return 0;
861
862 if (list_empty(&root->underlay_qpns)) {
863 /* Don't set any QPN (zero) in case QPN list is empty */
864 qpn = 0;
865 err = root->cmds->update_root_ft(root, ft, qpn, false);
866 } else {
867 list_for_each_entry(uqp, &root->underlay_qpns, list) {
868 qpn = uqp->qpn;
869 err = root->cmds->update_root_ft(root, ft,
870 qpn, false);
871 if (err)
872 break;
873 }
874 }
875
876 if (err)
877 mlx5_core_warn(root->dev,
878 "Update root flow table of id(%u) qpn(%d) failed\n",
879 ft->id, qpn);
880 else
881 root->root_ft = ft;
882
883 return err;
884 }
885
_mlx5_modify_rule_destination(struct mlx5_flow_rule * rule,struct mlx5_flow_destination * dest)886 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
887 struct mlx5_flow_destination *dest)
888 {
889 struct mlx5_flow_root_namespace *root;
890 struct mlx5_flow_table *ft;
891 struct mlx5_flow_group *fg;
892 struct fs_fte *fte;
893 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
894 int err = 0;
895
896 fs_get_obj(fte, rule->node.parent);
897 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
898 return -EINVAL;
899 down_write_ref_node(&fte->node, false);
900 fs_get_obj(fg, fte->node.parent);
901 fs_get_obj(ft, fg->node.parent);
902
903 memcpy(&rule->dest_attr, dest, sizeof(*dest));
904 root = find_root(&ft->node);
905 err = root->cmds->update_fte(root, ft, fg,
906 modify_mask, fte);
907 up_write_ref_node(&fte->node, false);
908
909 return err;
910 }
911
mlx5_modify_rule_destination(struct mlx5_flow_handle * handle,struct mlx5_flow_destination * new_dest,struct mlx5_flow_destination * old_dest)912 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
913 struct mlx5_flow_destination *new_dest,
914 struct mlx5_flow_destination *old_dest)
915 {
916 int i;
917
918 if (!old_dest) {
919 if (handle->num_rules != 1)
920 return -EINVAL;
921 return _mlx5_modify_rule_destination(handle->rule[0],
922 new_dest);
923 }
924
925 for (i = 0; i < handle->num_rules; i++) {
926 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
927 return _mlx5_modify_rule_destination(handle->rule[i],
928 new_dest);
929 }
930
931 return -EINVAL;
932 }
933
934 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
connect_fwd_rules(struct mlx5_core_dev * dev,struct mlx5_flow_table * new_next_ft,struct mlx5_flow_table * old_next_ft)935 static int connect_fwd_rules(struct mlx5_core_dev *dev,
936 struct mlx5_flow_table *new_next_ft,
937 struct mlx5_flow_table *old_next_ft)
938 {
939 struct mlx5_flow_destination dest = {};
940 struct mlx5_flow_rule *iter;
941 int err = 0;
942
943 /* new_next_ft and old_next_ft could be NULL only
944 * when we create/destroy the anchor flow table.
945 */
946 if (!new_next_ft || !old_next_ft)
947 return 0;
948
949 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
950 dest.ft = new_next_ft;
951
952 mutex_lock(&old_next_ft->lock);
953 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
954 mutex_unlock(&old_next_ft->lock);
955 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
956 err = _mlx5_modify_rule_destination(iter, &dest);
957 if (err)
958 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
959 new_next_ft->id);
960 }
961 return 0;
962 }
963
connect_flow_table(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)964 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
965 struct fs_prio *prio)
966 {
967 struct mlx5_flow_table *next_ft;
968 int err = 0;
969
970 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
971
972 if (list_empty(&prio->node.children)) {
973 err = connect_prev_fts(dev, ft, prio);
974 if (err)
975 return err;
976
977 next_ft = find_next_chained_ft(prio);
978 err = connect_fwd_rules(dev, ft, next_ft);
979 if (err)
980 return err;
981 }
982
983 if (MLX5_CAP_FLOWTABLE(dev,
984 flow_table_properties_nic_receive.modify_root))
985 err = update_root_ft_create(ft, prio);
986 return err;
987 }
988
list_add_flow_table(struct mlx5_flow_table * ft,struct fs_prio * prio)989 static void list_add_flow_table(struct mlx5_flow_table *ft,
990 struct fs_prio *prio)
991 {
992 struct list_head *prev = &prio->node.children;
993 struct mlx5_flow_table *iter;
994
995 fs_for_each_ft(iter, prio) {
996 if (iter->level > ft->level)
997 break;
998 prev = &iter->node.list;
999 }
1000 list_add(&ft->node.list, prev);
1001 }
1002
__mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr,enum fs_flow_table_op_mod op_mod,u16 vport)1003 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1004 struct mlx5_flow_table_attr *ft_attr,
1005 enum fs_flow_table_op_mod op_mod,
1006 u16 vport)
1007 {
1008 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1009 struct mlx5_flow_table *next_ft = NULL;
1010 struct fs_prio *fs_prio = NULL;
1011 struct mlx5_flow_table *ft;
1012 int log_table_sz;
1013 int err;
1014
1015 if (!root) {
1016 pr_err("mlx5: flow steering failed to find root of namespace\n");
1017 return ERR_PTR(-ENODEV);
1018 }
1019
1020 mutex_lock(&root->chain_lock);
1021 fs_prio = find_prio(ns, ft_attr->prio);
1022 if (!fs_prio) {
1023 err = -EINVAL;
1024 goto unlock_root;
1025 }
1026 if (ft_attr->level >= fs_prio->num_levels) {
1027 err = -ENOSPC;
1028 goto unlock_root;
1029 }
1030 /* The level is related to the
1031 * priority level range.
1032 */
1033 ft_attr->level += fs_prio->start_level;
1034 ft = alloc_flow_table(ft_attr->level,
1035 vport,
1036 ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
1037 root->table_type,
1038 op_mod, ft_attr->flags);
1039 if (IS_ERR(ft)) {
1040 err = PTR_ERR(ft);
1041 goto unlock_root;
1042 }
1043
1044 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1045 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
1046 next_ft = find_next_chained_ft(fs_prio);
1047 ft->def_miss_action = ns->def_miss_action;
1048 err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
1049 if (err)
1050 goto free_ft;
1051
1052 err = connect_flow_table(root->dev, ft, fs_prio);
1053 if (err)
1054 goto destroy_ft;
1055 ft->node.active = true;
1056 down_write_ref_node(&fs_prio->node, false);
1057 tree_add_node(&ft->node, &fs_prio->node);
1058 list_add_flow_table(ft, fs_prio);
1059 fs_prio->num_ft++;
1060 up_write_ref_node(&fs_prio->node, false);
1061 mutex_unlock(&root->chain_lock);
1062 trace_mlx5_fs_add_ft(ft);
1063 return ft;
1064 destroy_ft:
1065 root->cmds->destroy_flow_table(root, ft);
1066 free_ft:
1067 kfree(ft);
1068 unlock_root:
1069 mutex_unlock(&root->chain_lock);
1070 return ERR_PTR(err);
1071 }
1072
mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr)1073 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1074 struct mlx5_flow_table_attr *ft_attr)
1075 {
1076 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1077 }
1078
mlx5_create_vport_flow_table(struct mlx5_flow_namespace * ns,int prio,int max_fte,u32 level,u16 vport)1079 struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1080 int prio, int max_fte,
1081 u32 level, u16 vport)
1082 {
1083 struct mlx5_flow_table_attr ft_attr = {};
1084
1085 ft_attr.max_fte = max_fte;
1086 ft_attr.level = level;
1087 ft_attr.prio = prio;
1088
1089 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1090 }
1091
1092 struct mlx5_flow_table*
mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace * ns,int prio,u32 level)1093 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1094 int prio, u32 level)
1095 {
1096 struct mlx5_flow_table_attr ft_attr = {};
1097
1098 ft_attr.level = level;
1099 ft_attr.prio = prio;
1100 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1101 }
1102 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1103
1104 struct mlx5_flow_table*
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace * ns,int prio,int num_flow_table_entries,int max_num_groups,u32 level,u32 flags)1105 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1106 int prio,
1107 int num_flow_table_entries,
1108 int max_num_groups,
1109 u32 level,
1110 u32 flags)
1111 {
1112 struct mlx5_flow_table_attr ft_attr = {};
1113 struct mlx5_flow_table *ft;
1114
1115 if (max_num_groups > num_flow_table_entries)
1116 return ERR_PTR(-EINVAL);
1117
1118 ft_attr.max_fte = num_flow_table_entries;
1119 ft_attr.prio = prio;
1120 ft_attr.level = level;
1121 ft_attr.flags = flags;
1122
1123 ft = mlx5_create_flow_table(ns, &ft_attr);
1124 if (IS_ERR(ft))
1125 return ft;
1126
1127 ft->autogroup.active = true;
1128 ft->autogroup.required_groups = max_num_groups;
1129 /* We save place for flow groups in addition to max types */
1130 ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1);
1131
1132 return ft;
1133 }
1134 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1135
mlx5_create_flow_group(struct mlx5_flow_table * ft,u32 * fg_in)1136 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1137 u32 *fg_in)
1138 {
1139 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1140 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1141 fg_in, match_criteria);
1142 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1143 fg_in,
1144 match_criteria_enable);
1145 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1146 start_flow_index);
1147 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1148 end_flow_index);
1149 struct mlx5_flow_group *fg;
1150 int err;
1151
1152 if (ft->autogroup.active)
1153 return ERR_PTR(-EPERM);
1154
1155 down_write_ref_node(&ft->node, false);
1156 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1157 start_index, end_index,
1158 ft->node.children.prev);
1159 up_write_ref_node(&ft->node, false);
1160 if (IS_ERR(fg))
1161 return fg;
1162
1163 err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1164 if (err) {
1165 tree_put_node(&fg->node, false);
1166 return ERR_PTR(err);
1167 }
1168 trace_mlx5_fs_add_fg(fg);
1169 fg->node.active = true;
1170
1171 return fg;
1172 }
1173
alloc_rule(struct mlx5_flow_destination * dest)1174 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1175 {
1176 struct mlx5_flow_rule *rule;
1177
1178 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1179 if (!rule)
1180 return NULL;
1181
1182 INIT_LIST_HEAD(&rule->next_ft);
1183 rule->node.type = FS_TYPE_FLOW_DEST;
1184 if (dest)
1185 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1186
1187 return rule;
1188 }
1189
alloc_handle(int num_rules)1190 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1191 {
1192 struct mlx5_flow_handle *handle;
1193
1194 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1195 if (!handle)
1196 return NULL;
1197
1198 handle->num_rules = num_rules;
1199
1200 return handle;
1201 }
1202
destroy_flow_handle(struct fs_fte * fte,struct mlx5_flow_handle * handle,struct mlx5_flow_destination * dest,int i)1203 static void destroy_flow_handle(struct fs_fte *fte,
1204 struct mlx5_flow_handle *handle,
1205 struct mlx5_flow_destination *dest,
1206 int i)
1207 {
1208 for (; --i >= 0;) {
1209 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1210 fte->dests_size--;
1211 list_del(&handle->rule[i]->node.list);
1212 kfree(handle->rule[i]);
1213 }
1214 }
1215 kfree(handle);
1216 }
1217
1218 static struct mlx5_flow_handle *
create_flow_handle(struct fs_fte * fte,struct mlx5_flow_destination * dest,int dest_num,int * modify_mask,bool * new_rule)1219 create_flow_handle(struct fs_fte *fte,
1220 struct mlx5_flow_destination *dest,
1221 int dest_num,
1222 int *modify_mask,
1223 bool *new_rule)
1224 {
1225 struct mlx5_flow_handle *handle;
1226 struct mlx5_flow_rule *rule = NULL;
1227 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1228 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1229 int type;
1230 int i = 0;
1231
1232 handle = alloc_handle((dest_num) ? dest_num : 1);
1233 if (!handle)
1234 return ERR_PTR(-ENOMEM);
1235
1236 do {
1237 if (dest) {
1238 rule = find_flow_rule(fte, dest + i);
1239 if (rule) {
1240 refcount_inc(&rule->node.refcount);
1241 goto rule_found;
1242 }
1243 }
1244
1245 *new_rule = true;
1246 rule = alloc_rule(dest + i);
1247 if (!rule)
1248 goto free_rules;
1249
1250 /* Add dest to dests list- we need flow tables to be in the
1251 * end of the list for forward to next prio rules.
1252 */
1253 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1254 if (dest &&
1255 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1256 list_add(&rule->node.list, &fte->node.children);
1257 else
1258 list_add_tail(&rule->node.list, &fte->node.children);
1259 if (dest) {
1260 fte->dests_size++;
1261
1262 type = dest[i].type ==
1263 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1264 *modify_mask |= type ? count : dst;
1265 }
1266 rule_found:
1267 handle->rule[i] = rule;
1268 } while (++i < dest_num);
1269
1270 return handle;
1271
1272 free_rules:
1273 destroy_flow_handle(fte, handle, dest, i);
1274 return ERR_PTR(-ENOMEM);
1275 }
1276
1277 /* fte should not be deleted while calling this function */
1278 static struct mlx5_flow_handle *
add_rule_fte(struct fs_fte * fte,struct mlx5_flow_group * fg,struct mlx5_flow_destination * dest,int dest_num,bool update_action)1279 add_rule_fte(struct fs_fte *fte,
1280 struct mlx5_flow_group *fg,
1281 struct mlx5_flow_destination *dest,
1282 int dest_num,
1283 bool update_action)
1284 {
1285 struct mlx5_flow_root_namespace *root;
1286 struct mlx5_flow_handle *handle;
1287 struct mlx5_flow_table *ft;
1288 int modify_mask = 0;
1289 int err;
1290 bool new_rule = false;
1291
1292 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1293 &new_rule);
1294 if (IS_ERR(handle) || !new_rule)
1295 goto out;
1296
1297 if (update_action)
1298 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1299
1300 fs_get_obj(ft, fg->node.parent);
1301 root = find_root(&fg->node);
1302 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1303 err = root->cmds->create_fte(root, ft, fg, fte);
1304 else
1305 err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1306 if (err)
1307 goto free_handle;
1308
1309 fte->node.active = true;
1310 fte->status |= FS_FTE_STATUS_EXISTING;
1311 atomic_inc(&fte->node.version);
1312
1313 out:
1314 return handle;
1315
1316 free_handle:
1317 destroy_flow_handle(fte, handle, dest, handle->num_rules);
1318 return ERR_PTR(err);
1319 }
1320
alloc_auto_flow_group(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec)1321 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1322 const struct mlx5_flow_spec *spec)
1323 {
1324 struct list_head *prev = &ft->node.children;
1325 struct mlx5_flow_group *fg;
1326 unsigned int candidate_index = 0;
1327 unsigned int group_size = 0;
1328
1329 if (!ft->autogroup.active)
1330 return ERR_PTR(-ENOENT);
1331
1332 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1333 group_size = ft->autogroup.group_size;
1334
1335 /* ft->max_fte == ft->autogroup.max_types */
1336 if (group_size == 0)
1337 group_size = 1;
1338
1339 /* sorted by start_index */
1340 fs_for_each_fg(fg, ft) {
1341 if (candidate_index + group_size > fg->start_index)
1342 candidate_index = fg->start_index + fg->max_ftes;
1343 else
1344 break;
1345 prev = &fg->node.list;
1346 }
1347
1348 if (candidate_index + group_size > ft->max_fte)
1349 return ERR_PTR(-ENOSPC);
1350
1351 fg = alloc_insert_flow_group(ft,
1352 spec->match_criteria_enable,
1353 spec->match_criteria,
1354 candidate_index,
1355 candidate_index + group_size - 1,
1356 prev);
1357 if (IS_ERR(fg))
1358 goto out;
1359
1360 if (group_size == ft->autogroup.group_size)
1361 ft->autogroup.num_groups++;
1362
1363 out:
1364 return fg;
1365 }
1366
create_auto_flow_group(struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)1367 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1368 struct mlx5_flow_group *fg)
1369 {
1370 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1371 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1372 void *match_criteria_addr;
1373 u8 src_esw_owner_mask_on;
1374 void *misc;
1375 int err;
1376 u32 *in;
1377
1378 in = kvzalloc(inlen, GFP_KERNEL);
1379 if (!in)
1380 return -ENOMEM;
1381
1382 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1383 fg->mask.match_criteria_enable);
1384 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1385 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1386 fg->max_ftes - 1);
1387
1388 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1389 misc_parameters);
1390 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1391 source_eswitch_owner_vhca_id);
1392 MLX5_SET(create_flow_group_in, in,
1393 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1394
1395 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1396 in, match_criteria);
1397 memcpy(match_criteria_addr, fg->mask.match_criteria,
1398 sizeof(fg->mask.match_criteria));
1399
1400 err = root->cmds->create_flow_group(root, ft, in, fg);
1401 if (!err) {
1402 fg->node.active = true;
1403 trace_mlx5_fs_add_fg(fg);
1404 }
1405
1406 kvfree(in);
1407 return err;
1408 }
1409
mlx5_flow_dests_cmp(struct mlx5_flow_destination * d1,struct mlx5_flow_destination * d2)1410 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1411 struct mlx5_flow_destination *d2)
1412 {
1413 if (d1->type == d2->type) {
1414 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1415 d1->vport.num == d2->vport.num &&
1416 d1->vport.flags == d2->vport.flags &&
1417 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1418 (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1419 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1420 (d1->vport.pkt_reformat->id ==
1421 d2->vport.pkt_reformat->id) : true)) ||
1422 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1423 d1->ft == d2->ft) ||
1424 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1425 d1->tir_num == d2->tir_num) ||
1426 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1427 d1->ft_num == d2->ft_num))
1428 return true;
1429 }
1430
1431 return false;
1432 }
1433
find_flow_rule(struct fs_fte * fte,struct mlx5_flow_destination * dest)1434 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1435 struct mlx5_flow_destination *dest)
1436 {
1437 struct mlx5_flow_rule *rule;
1438
1439 list_for_each_entry(rule, &fte->node.children, node.list) {
1440 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1441 return rule;
1442 }
1443 return NULL;
1444 }
1445
check_conflicting_actions(u32 action1,u32 action2)1446 static bool check_conflicting_actions(u32 action1, u32 action2)
1447 {
1448 u32 xored_actions = action1 ^ action2;
1449
1450 /* if one rule only wants to count, it's ok */
1451 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1452 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1453 return false;
1454
1455 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1456 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1457 MLX5_FLOW_CONTEXT_ACTION_DECAP |
1458 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1459 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1460 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1461 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1462 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1463 return true;
1464
1465 return false;
1466 }
1467
check_conflicting_ftes(struct fs_fte * fte,const struct mlx5_flow_context * flow_context,const struct mlx5_flow_act * flow_act)1468 static int check_conflicting_ftes(struct fs_fte *fte,
1469 const struct mlx5_flow_context *flow_context,
1470 const struct mlx5_flow_act *flow_act)
1471 {
1472 if (check_conflicting_actions(flow_act->action, fte->action.action)) {
1473 mlx5_core_warn(get_dev(&fte->node),
1474 "Found two FTEs with conflicting actions\n");
1475 return -EEXIST;
1476 }
1477
1478 if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1479 fte->flow_context.flow_tag != flow_context->flow_tag) {
1480 mlx5_core_warn(get_dev(&fte->node),
1481 "FTE flow tag %u already exists with different flow tag %u\n",
1482 fte->flow_context.flow_tag,
1483 flow_context->flow_tag);
1484 return -EEXIST;
1485 }
1486
1487 return 0;
1488 }
1489
add_rule_fg(struct mlx5_flow_group * fg,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,struct fs_fte * fte)1490 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1491 const struct mlx5_flow_spec *spec,
1492 struct mlx5_flow_act *flow_act,
1493 struct mlx5_flow_destination *dest,
1494 int dest_num,
1495 struct fs_fte *fte)
1496 {
1497 struct mlx5_flow_handle *handle;
1498 int old_action;
1499 int i;
1500 int ret;
1501
1502 ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1503 if (ret)
1504 return ERR_PTR(ret);
1505
1506 old_action = fte->action.action;
1507 fte->action.action |= flow_act->action;
1508 handle = add_rule_fte(fte, fg, dest, dest_num,
1509 old_action != flow_act->action);
1510 if (IS_ERR(handle)) {
1511 fte->action.action = old_action;
1512 return handle;
1513 }
1514 trace_mlx5_fs_set_fte(fte, false);
1515
1516 for (i = 0; i < handle->num_rules; i++) {
1517 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1518 tree_add_node(&handle->rule[i]->node, &fte->node);
1519 trace_mlx5_fs_add_rule(handle->rule[i]);
1520 }
1521 }
1522 return handle;
1523 }
1524
counter_is_valid(u32 action)1525 static bool counter_is_valid(u32 action)
1526 {
1527 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1528 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1529 }
1530
dest_is_valid(struct mlx5_flow_destination * dest,u32 action,struct mlx5_flow_table * ft)1531 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1532 u32 action,
1533 struct mlx5_flow_table *ft)
1534 {
1535 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1536 return counter_is_valid(action);
1537
1538 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1539 return true;
1540
1541 if (!dest || ((dest->type ==
1542 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1543 (dest->ft->level <= ft->level)))
1544 return false;
1545 return true;
1546 }
1547
1548 struct match_list {
1549 struct list_head list;
1550 struct mlx5_flow_group *g;
1551 };
1552
1553 struct match_list_head {
1554 struct list_head list;
1555 struct match_list first;
1556 };
1557
free_match_list(struct match_list_head * head)1558 static void free_match_list(struct match_list_head *head)
1559 {
1560 if (!list_empty(&head->list)) {
1561 struct match_list *iter, *match_tmp;
1562
1563 list_del(&head->first.list);
1564 tree_put_node(&head->first.g->node, false);
1565 list_for_each_entry_safe(iter, match_tmp, &head->list,
1566 list) {
1567 tree_put_node(&iter->g->node, false);
1568 list_del(&iter->list);
1569 kfree(iter);
1570 }
1571 }
1572 }
1573
build_match_list(struct match_list_head * match_head,struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec)1574 static int build_match_list(struct match_list_head *match_head,
1575 struct mlx5_flow_table *ft,
1576 const struct mlx5_flow_spec *spec)
1577 {
1578 struct rhlist_head *tmp, *list;
1579 struct mlx5_flow_group *g;
1580 int err = 0;
1581
1582 rcu_read_lock();
1583 INIT_LIST_HEAD(&match_head->list);
1584 /* Collect all fgs which has a matching match_criteria */
1585 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1586 /* RCU is atomic, we can't execute FW commands here */
1587 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1588 struct match_list *curr_match;
1589
1590 if (likely(list_empty(&match_head->list))) {
1591 if (!tree_get_node(&g->node))
1592 continue;
1593 match_head->first.g = g;
1594 list_add_tail(&match_head->first.list,
1595 &match_head->list);
1596 continue;
1597 }
1598
1599 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1600 if (!curr_match) {
1601 free_match_list(match_head);
1602 err = -ENOMEM;
1603 goto out;
1604 }
1605 if (!tree_get_node(&g->node)) {
1606 kfree(curr_match);
1607 continue;
1608 }
1609 curr_match->g = g;
1610 list_add_tail(&curr_match->list, &match_head->list);
1611 }
1612 out:
1613 rcu_read_unlock();
1614 return err;
1615 }
1616
matched_fgs_get_version(struct list_head * match_head)1617 static u64 matched_fgs_get_version(struct list_head *match_head)
1618 {
1619 struct match_list *iter;
1620 u64 version = 0;
1621
1622 list_for_each_entry(iter, match_head, list)
1623 version += (u64)atomic_read(&iter->g->node.version);
1624 return version;
1625 }
1626
1627 static struct fs_fte *
lookup_fte_locked(struct mlx5_flow_group * g,const u32 * match_value,bool take_write)1628 lookup_fte_locked(struct mlx5_flow_group *g,
1629 const u32 *match_value,
1630 bool take_write)
1631 {
1632 struct fs_fte *fte_tmp;
1633
1634 if (take_write)
1635 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1636 else
1637 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1638 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1639 rhash_fte);
1640 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1641 fte_tmp = NULL;
1642 goto out;
1643 }
1644 if (!fte_tmp->node.active) {
1645 tree_put_node(&fte_tmp->node, false);
1646 fte_tmp = NULL;
1647 goto out;
1648 }
1649
1650 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1651 out:
1652 if (take_write)
1653 up_write_ref_node(&g->node, false);
1654 else
1655 up_read_ref_node(&g->node);
1656 return fte_tmp;
1657 }
1658
1659 static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table * ft,struct list_head * match_head,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,int ft_version)1660 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1661 struct list_head *match_head,
1662 const struct mlx5_flow_spec *spec,
1663 struct mlx5_flow_act *flow_act,
1664 struct mlx5_flow_destination *dest,
1665 int dest_num,
1666 int ft_version)
1667 {
1668 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1669 struct mlx5_flow_group *g;
1670 struct mlx5_flow_handle *rule;
1671 struct match_list *iter;
1672 bool take_write = false;
1673 struct fs_fte *fte;
1674 u64 version;
1675 int err;
1676
1677 fte = alloc_fte(ft, spec, flow_act);
1678 if (IS_ERR(fte))
1679 return ERR_PTR(-ENOMEM);
1680
1681 search_again_locked:
1682 version = matched_fgs_get_version(match_head);
1683 if (flow_act->flags & FLOW_ACT_NO_APPEND)
1684 goto skip_search;
1685 /* Try to find a fg that already contains a matching fte */
1686 list_for_each_entry(iter, match_head, list) {
1687 struct fs_fte *fte_tmp;
1688
1689 g = iter->g;
1690 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1691 if (!fte_tmp)
1692 continue;
1693 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1694 up_write_ref_node(&fte_tmp->node, false);
1695 tree_put_node(&fte_tmp->node, false);
1696 kmem_cache_free(steering->ftes_cache, fte);
1697 return rule;
1698 }
1699
1700 skip_search:
1701 /* No group with matching fte found, or we skipped the search.
1702 * Try to add a new fte to any matching fg.
1703 */
1704
1705 /* Check the ft version, for case that new flow group
1706 * was added while the fgs weren't locked
1707 */
1708 if (atomic_read(&ft->node.version) != ft_version) {
1709 rule = ERR_PTR(-EAGAIN);
1710 goto out;
1711 }
1712
1713 /* Check the fgs version, for case the new FTE with the
1714 * same values was added while the fgs weren't locked
1715 */
1716 if (version != matched_fgs_get_version(match_head)) {
1717 take_write = true;
1718 goto search_again_locked;
1719 }
1720
1721 list_for_each_entry(iter, match_head, list) {
1722 g = iter->g;
1723
1724 if (!g->node.active)
1725 continue;
1726
1727 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1728
1729 err = insert_fte(g, fte);
1730 if (err) {
1731 up_write_ref_node(&g->node, false);
1732 if (err == -ENOSPC)
1733 continue;
1734 kmem_cache_free(steering->ftes_cache, fte);
1735 return ERR_PTR(err);
1736 }
1737
1738 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1739 up_write_ref_node(&g->node, false);
1740 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1741 up_write_ref_node(&fte->node, false);
1742 tree_put_node(&fte->node, false);
1743 return rule;
1744 }
1745 rule = ERR_PTR(-ENOENT);
1746 out:
1747 kmem_cache_free(steering->ftes_cache, fte);
1748 return rule;
1749 }
1750
1751 static struct mlx5_flow_handle *
_mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num)1752 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1753 const struct mlx5_flow_spec *spec,
1754 struct mlx5_flow_act *flow_act,
1755 struct mlx5_flow_destination *dest,
1756 int dest_num)
1757
1758 {
1759 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1760 struct mlx5_flow_group *g;
1761 struct mlx5_flow_handle *rule;
1762 struct match_list_head match_head;
1763 bool take_write = false;
1764 struct fs_fte *fte;
1765 int version;
1766 int err;
1767 int i;
1768
1769 if (!check_valid_spec(spec))
1770 return ERR_PTR(-EINVAL);
1771
1772 for (i = 0; i < dest_num; i++) {
1773 if (!dest_is_valid(&dest[i], flow_act->action, ft))
1774 return ERR_PTR(-EINVAL);
1775 }
1776 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1777 search_again_locked:
1778 version = atomic_read(&ft->node.version);
1779
1780 /* Collect all fgs which has a matching match_criteria */
1781 err = build_match_list(&match_head, ft, spec);
1782 if (err) {
1783 if (take_write)
1784 up_write_ref_node(&ft->node, false);
1785 else
1786 up_read_ref_node(&ft->node);
1787 return ERR_PTR(err);
1788 }
1789
1790 if (!take_write)
1791 up_read_ref_node(&ft->node);
1792
1793 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1794 dest_num, version);
1795 free_match_list(&match_head);
1796 if (!IS_ERR(rule) ||
1797 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1798 if (take_write)
1799 up_write_ref_node(&ft->node, false);
1800 return rule;
1801 }
1802
1803 if (!take_write) {
1804 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1805 take_write = true;
1806 }
1807
1808 if (PTR_ERR(rule) == -EAGAIN ||
1809 version != atomic_read(&ft->node.version))
1810 goto search_again_locked;
1811
1812 g = alloc_auto_flow_group(ft, spec);
1813 if (IS_ERR(g)) {
1814 rule = ERR_CAST(g);
1815 up_write_ref_node(&ft->node, false);
1816 return rule;
1817 }
1818
1819 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1820 up_write_ref_node(&ft->node, false);
1821
1822 err = create_auto_flow_group(ft, g);
1823 if (err)
1824 goto err_release_fg;
1825
1826 fte = alloc_fte(ft, spec, flow_act);
1827 if (IS_ERR(fte)) {
1828 err = PTR_ERR(fte);
1829 goto err_release_fg;
1830 }
1831
1832 err = insert_fte(g, fte);
1833 if (err) {
1834 kmem_cache_free(steering->ftes_cache, fte);
1835 goto err_release_fg;
1836 }
1837
1838 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1839 up_write_ref_node(&g->node, false);
1840 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1841 up_write_ref_node(&fte->node, false);
1842 tree_put_node(&fte->node, false);
1843 tree_put_node(&g->node, false);
1844 return rule;
1845
1846 err_release_fg:
1847 up_write_ref_node(&g->node, false);
1848 tree_put_node(&g->node, false);
1849 return ERR_PTR(err);
1850 }
1851
fwd_next_prio_supported(struct mlx5_flow_table * ft)1852 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1853 {
1854 return ((ft->type == FS_FT_NIC_RX) &&
1855 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1856 }
1857
1858 struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int num_dest)1859 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1860 const struct mlx5_flow_spec *spec,
1861 struct mlx5_flow_act *flow_act,
1862 struct mlx5_flow_destination *dest,
1863 int num_dest)
1864 {
1865 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1866 struct mlx5_flow_destination gen_dest = {};
1867 struct mlx5_flow_table *next_ft = NULL;
1868 struct mlx5_flow_handle *handle = NULL;
1869 u32 sw_action = flow_act->action;
1870 struct fs_prio *prio;
1871
1872 fs_get_obj(prio, ft->node.parent);
1873 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1874 if (!fwd_next_prio_supported(ft))
1875 return ERR_PTR(-EOPNOTSUPP);
1876 if (num_dest)
1877 return ERR_PTR(-EINVAL);
1878 mutex_lock(&root->chain_lock);
1879 next_ft = find_next_chained_ft(prio);
1880 if (next_ft) {
1881 gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1882 gen_dest.ft = next_ft;
1883 dest = &gen_dest;
1884 num_dest = 1;
1885 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1886 } else {
1887 mutex_unlock(&root->chain_lock);
1888 return ERR_PTR(-EOPNOTSUPP);
1889 }
1890 }
1891
1892 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
1893
1894 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1895 if (!IS_ERR_OR_NULL(handle) &&
1896 (list_empty(&handle->rule[0]->next_ft))) {
1897 mutex_lock(&next_ft->lock);
1898 list_add(&handle->rule[0]->next_ft,
1899 &next_ft->fwd_rules);
1900 mutex_unlock(&next_ft->lock);
1901 handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1902 }
1903 mutex_unlock(&root->chain_lock);
1904 }
1905 return handle;
1906 }
1907 EXPORT_SYMBOL(mlx5_add_flow_rules);
1908
mlx5_del_flow_rules(struct mlx5_flow_handle * handle)1909 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
1910 {
1911 struct fs_fte *fte;
1912 int i;
1913
1914 /* In order to consolidate the HW changes we lock the FTE for other
1915 * changes, and increase its refcount, in order not to perform the
1916 * "del" functions of the FTE. Will handle them here.
1917 * The removal of the rules is done under locked FTE.
1918 * After removing all the handle's rules, if there are remaining
1919 * rules, it means we just need to modify the FTE in FW, and
1920 * unlock/decrease the refcount we increased before.
1921 * Otherwise, it means the FTE should be deleted. First delete the
1922 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
1923 * the FTE, which will handle the last decrease of the refcount, as
1924 * well as required handling of its parent.
1925 */
1926 fs_get_obj(fte, handle->rule[0]->node.parent);
1927 down_write_ref_node(&fte->node, false);
1928 for (i = handle->num_rules - 1; i >= 0; i--)
1929 tree_remove_node(&handle->rule[i]->node, true);
1930 if (fte->modify_mask && fte->dests_size) {
1931 modify_fte(fte);
1932 up_write_ref_node(&fte->node, false);
1933 } else {
1934 del_hw_fte(&fte->node);
1935 up_write(&fte->node.lock);
1936 tree_put_node(&fte->node, false);
1937 }
1938 kfree(handle);
1939 }
1940 EXPORT_SYMBOL(mlx5_del_flow_rules);
1941
1942 /* Assuming prio->node.children(flow tables) is sorted by level */
find_next_ft(struct mlx5_flow_table * ft)1943 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
1944 {
1945 struct fs_prio *prio;
1946
1947 fs_get_obj(prio, ft->node.parent);
1948
1949 if (!list_is_last(&ft->node.list, &prio->node.children))
1950 return list_next_entry(ft, node.list);
1951 return find_next_chained_ft(prio);
1952 }
1953
update_root_ft_destroy(struct mlx5_flow_table * ft)1954 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1955 {
1956 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1957 struct mlx5_ft_underlay_qp *uqp;
1958 struct mlx5_flow_table *new_root_ft = NULL;
1959 int err = 0;
1960 u32 qpn;
1961
1962 if (root->root_ft != ft)
1963 return 0;
1964
1965 new_root_ft = find_next_ft(ft);
1966 if (!new_root_ft) {
1967 root->root_ft = NULL;
1968 return 0;
1969 }
1970
1971 if (list_empty(&root->underlay_qpns)) {
1972 /* Don't set any QPN (zero) in case QPN list is empty */
1973 qpn = 0;
1974 err = root->cmds->update_root_ft(root, new_root_ft,
1975 qpn, false);
1976 } else {
1977 list_for_each_entry(uqp, &root->underlay_qpns, list) {
1978 qpn = uqp->qpn;
1979 err = root->cmds->update_root_ft(root,
1980 new_root_ft, qpn,
1981 false);
1982 if (err)
1983 break;
1984 }
1985 }
1986
1987 if (err)
1988 mlx5_core_warn(root->dev,
1989 "Update root flow table of id(%u) qpn(%d) failed\n",
1990 ft->id, qpn);
1991 else
1992 root->root_ft = new_root_ft;
1993
1994 return 0;
1995 }
1996
1997 /* Connect flow table from previous priority to
1998 * the next flow table.
1999 */
disconnect_flow_table(struct mlx5_flow_table * ft)2000 static int disconnect_flow_table(struct mlx5_flow_table *ft)
2001 {
2002 struct mlx5_core_dev *dev = get_dev(&ft->node);
2003 struct mlx5_flow_table *next_ft;
2004 struct fs_prio *prio;
2005 int err = 0;
2006
2007 err = update_root_ft_destroy(ft);
2008 if (err)
2009 return err;
2010
2011 fs_get_obj(prio, ft->node.parent);
2012 if (!(list_first_entry(&prio->node.children,
2013 struct mlx5_flow_table,
2014 node.list) == ft))
2015 return 0;
2016
2017 next_ft = find_next_chained_ft(prio);
2018 err = connect_fwd_rules(dev, next_ft, ft);
2019 if (err)
2020 return err;
2021
2022 err = connect_prev_fts(dev, next_ft, prio);
2023 if (err)
2024 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2025 ft->id);
2026 return err;
2027 }
2028
mlx5_destroy_flow_table(struct mlx5_flow_table * ft)2029 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2030 {
2031 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2032 int err = 0;
2033
2034 mutex_lock(&root->chain_lock);
2035 err = disconnect_flow_table(ft);
2036 if (err) {
2037 mutex_unlock(&root->chain_lock);
2038 return err;
2039 }
2040 if (tree_remove_node(&ft->node, false))
2041 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2042 ft->id);
2043 mutex_unlock(&root->chain_lock);
2044
2045 return err;
2046 }
2047 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2048
mlx5_destroy_flow_group(struct mlx5_flow_group * fg)2049 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2050 {
2051 if (tree_remove_node(&fg->node, false))
2052 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2053 fg->id);
2054 }
2055
mlx5_get_fdb_sub_ns(struct mlx5_core_dev * dev,int n)2056 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2057 int n)
2058 {
2059 struct mlx5_flow_steering *steering = dev->priv.steering;
2060
2061 if (!steering || !steering->fdb_sub_ns)
2062 return NULL;
2063
2064 return steering->fdb_sub_ns[n];
2065 }
2066 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2067
mlx5_get_flow_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type)2068 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2069 enum mlx5_flow_namespace_type type)
2070 {
2071 struct mlx5_flow_steering *steering = dev->priv.steering;
2072 struct mlx5_flow_root_namespace *root_ns;
2073 int prio = 0;
2074 struct fs_prio *fs_prio;
2075 struct mlx5_flow_namespace *ns;
2076
2077 if (!steering)
2078 return NULL;
2079
2080 switch (type) {
2081 case MLX5_FLOW_NAMESPACE_FDB:
2082 if (steering->fdb_root_ns)
2083 return &steering->fdb_root_ns->ns;
2084 return NULL;
2085 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2086 if (steering->sniffer_rx_root_ns)
2087 return &steering->sniffer_rx_root_ns->ns;
2088 return NULL;
2089 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2090 if (steering->sniffer_tx_root_ns)
2091 return &steering->sniffer_tx_root_ns->ns;
2092 return NULL;
2093 default:
2094 break;
2095 }
2096
2097 if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
2098 root_ns = steering->egress_root_ns;
2099 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
2100 root_ns = steering->rdma_rx_root_ns;
2101 prio = RDMA_RX_BYPASS_PRIO;
2102 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
2103 root_ns = steering->rdma_rx_root_ns;
2104 prio = RDMA_RX_KERNEL_PRIO;
2105 } else { /* Must be NIC RX */
2106 root_ns = steering->root_ns;
2107 prio = type;
2108 }
2109
2110 if (!root_ns)
2111 return NULL;
2112
2113 fs_prio = find_prio(&root_ns->ns, prio);
2114 if (!fs_prio)
2115 return NULL;
2116
2117 ns = list_first_entry(&fs_prio->node.children,
2118 typeof(*ns),
2119 node.list);
2120
2121 return ns;
2122 }
2123 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2124
mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type,int vport)2125 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2126 enum mlx5_flow_namespace_type type,
2127 int vport)
2128 {
2129 struct mlx5_flow_steering *steering = dev->priv.steering;
2130
2131 if (!steering || vport >= mlx5_eswitch_get_total_vports(dev))
2132 return NULL;
2133
2134 switch (type) {
2135 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2136 if (steering->esw_egress_root_ns &&
2137 steering->esw_egress_root_ns[vport])
2138 return &steering->esw_egress_root_ns[vport]->ns;
2139 else
2140 return NULL;
2141 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2142 if (steering->esw_ingress_root_ns &&
2143 steering->esw_ingress_root_ns[vport])
2144 return &steering->esw_ingress_root_ns[vport]->ns;
2145 else
2146 return NULL;
2147 default:
2148 return NULL;
2149 }
2150 }
2151
_fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels,enum fs_node_type type)2152 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2153 unsigned int prio,
2154 int num_levels,
2155 enum fs_node_type type)
2156 {
2157 struct fs_prio *fs_prio;
2158
2159 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2160 if (!fs_prio)
2161 return ERR_PTR(-ENOMEM);
2162
2163 fs_prio->node.type = type;
2164 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2165 tree_add_node(&fs_prio->node, &ns->node);
2166 fs_prio->num_levels = num_levels;
2167 fs_prio->prio = prio;
2168 list_add_tail(&fs_prio->node.list, &ns->node.children);
2169
2170 return fs_prio;
2171 }
2172
fs_create_prio_chained(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2173 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2174 unsigned int prio,
2175 int num_levels)
2176 {
2177 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2178 }
2179
fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2180 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2181 unsigned int prio, int num_levels)
2182 {
2183 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2184 }
2185
fs_init_namespace(struct mlx5_flow_namespace * ns)2186 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2187 *ns)
2188 {
2189 ns->node.type = FS_TYPE_NAMESPACE;
2190
2191 return ns;
2192 }
2193
fs_create_namespace(struct fs_prio * prio,int def_miss_act)2194 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2195 int def_miss_act)
2196 {
2197 struct mlx5_flow_namespace *ns;
2198
2199 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2200 if (!ns)
2201 return ERR_PTR(-ENOMEM);
2202
2203 fs_init_namespace(ns);
2204 ns->def_miss_action = def_miss_act;
2205 tree_init_node(&ns->node, NULL, del_sw_ns);
2206 tree_add_node(&ns->node, &prio->node);
2207 list_add_tail(&ns->node.list, &prio->node.children);
2208
2209 return ns;
2210 }
2211
create_leaf_prios(struct mlx5_flow_namespace * ns,int prio,struct init_tree_node * prio_metadata)2212 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2213 struct init_tree_node *prio_metadata)
2214 {
2215 struct fs_prio *fs_prio;
2216 int i;
2217
2218 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2219 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2220 if (IS_ERR(fs_prio))
2221 return PTR_ERR(fs_prio);
2222 }
2223 return 0;
2224 }
2225
2226 #define FLOW_TABLE_BIT_SZ 1
2227 #define GET_FLOW_TABLE_CAP(dev, offset) \
2228 ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
2229 offset / 32)) >> \
2230 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
has_required_caps(struct mlx5_core_dev * dev,struct node_caps * caps)2231 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2232 {
2233 int i;
2234
2235 for (i = 0; i < caps->arr_sz; i++) {
2236 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2237 return false;
2238 }
2239 return true;
2240 }
2241
init_root_tree_recursive(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node,struct init_tree_node * init_parent_node,int prio)2242 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2243 struct init_tree_node *init_node,
2244 struct fs_node *fs_parent_node,
2245 struct init_tree_node *init_parent_node,
2246 int prio)
2247 {
2248 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2249 flow_table_properties_nic_receive.
2250 max_ft_level);
2251 struct mlx5_flow_namespace *fs_ns;
2252 struct fs_prio *fs_prio;
2253 struct fs_node *base;
2254 int i;
2255 int err;
2256
2257 if (init_node->type == FS_TYPE_PRIO) {
2258 if ((init_node->min_ft_level > max_ft_level) ||
2259 !has_required_caps(steering->dev, &init_node->caps))
2260 return 0;
2261
2262 fs_get_obj(fs_ns, fs_parent_node);
2263 if (init_node->num_leaf_prios)
2264 return create_leaf_prios(fs_ns, prio, init_node);
2265 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2266 if (IS_ERR(fs_prio))
2267 return PTR_ERR(fs_prio);
2268 base = &fs_prio->node;
2269 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2270 fs_get_obj(fs_prio, fs_parent_node);
2271 fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2272 if (IS_ERR(fs_ns))
2273 return PTR_ERR(fs_ns);
2274 base = &fs_ns->node;
2275 } else {
2276 return -EINVAL;
2277 }
2278 prio = 0;
2279 for (i = 0; i < init_node->ar_size; i++) {
2280 err = init_root_tree_recursive(steering, &init_node->children[i],
2281 base, init_node, prio);
2282 if (err)
2283 return err;
2284 if (init_node->children[i].type == FS_TYPE_PRIO &&
2285 init_node->children[i].num_leaf_prios) {
2286 prio += init_node->children[i].num_leaf_prios;
2287 }
2288 }
2289
2290 return 0;
2291 }
2292
init_root_tree(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node)2293 static int init_root_tree(struct mlx5_flow_steering *steering,
2294 struct init_tree_node *init_node,
2295 struct fs_node *fs_parent_node)
2296 {
2297 int i;
2298 struct mlx5_flow_namespace *fs_ns;
2299 int err;
2300
2301 fs_get_obj(fs_ns, fs_parent_node);
2302 for (i = 0; i < init_node->ar_size; i++) {
2303 err = init_root_tree_recursive(steering, &init_node->children[i],
2304 &fs_ns->node,
2305 init_node, i);
2306 if (err)
2307 return err;
2308 }
2309 return 0;
2310 }
2311
2312 static struct mlx5_flow_root_namespace
create_root_ns(struct mlx5_flow_steering * steering,enum fs_flow_table_type table_type)2313 *create_root_ns(struct mlx5_flow_steering *steering,
2314 enum fs_flow_table_type table_type)
2315 {
2316 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2317 struct mlx5_flow_root_namespace *root_ns;
2318 struct mlx5_flow_namespace *ns;
2319
2320 if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2321 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2322 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2323
2324 /* Create the root namespace */
2325 root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2326 if (!root_ns)
2327 return NULL;
2328
2329 root_ns->dev = steering->dev;
2330 root_ns->table_type = table_type;
2331 root_ns->cmds = cmds;
2332
2333 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2334
2335 ns = &root_ns->ns;
2336 fs_init_namespace(ns);
2337 mutex_init(&root_ns->chain_lock);
2338 tree_init_node(&ns->node, NULL, NULL);
2339 tree_add_node(&ns->node, NULL);
2340
2341 return root_ns;
2342 }
2343
2344 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2345
set_prio_attrs_in_ns(struct mlx5_flow_namespace * ns,int acc_level)2346 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2347 {
2348 struct fs_prio *prio;
2349
2350 fs_for_each_prio(prio, ns) {
2351 /* This updates prio start_level and num_levels */
2352 set_prio_attrs_in_prio(prio, acc_level);
2353 acc_level += prio->num_levels;
2354 }
2355 return acc_level;
2356 }
2357
set_prio_attrs_in_prio(struct fs_prio * prio,int acc_level)2358 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2359 {
2360 struct mlx5_flow_namespace *ns;
2361 int acc_level_ns = acc_level;
2362
2363 prio->start_level = acc_level;
2364 fs_for_each_ns(ns, prio)
2365 /* This updates start_level and num_levels of ns's priority descendants */
2366 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2367 if (!prio->num_levels)
2368 prio->num_levels = acc_level_ns - prio->start_level;
2369 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2370 }
2371
set_prio_attrs(struct mlx5_flow_root_namespace * root_ns)2372 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2373 {
2374 struct mlx5_flow_namespace *ns = &root_ns->ns;
2375 struct fs_prio *prio;
2376 int start_level = 0;
2377
2378 fs_for_each_prio(prio, ns) {
2379 set_prio_attrs_in_prio(prio, start_level);
2380 start_level += prio->num_levels;
2381 }
2382 }
2383
2384 #define ANCHOR_PRIO 0
2385 #define ANCHOR_SIZE 1
2386 #define ANCHOR_LEVEL 0
create_anchor_flow_table(struct mlx5_flow_steering * steering)2387 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2388 {
2389 struct mlx5_flow_namespace *ns = NULL;
2390 struct mlx5_flow_table_attr ft_attr = {};
2391 struct mlx5_flow_table *ft;
2392
2393 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2394 if (WARN_ON(!ns))
2395 return -EINVAL;
2396
2397 ft_attr.max_fte = ANCHOR_SIZE;
2398 ft_attr.level = ANCHOR_LEVEL;
2399 ft_attr.prio = ANCHOR_PRIO;
2400
2401 ft = mlx5_create_flow_table(ns, &ft_attr);
2402 if (IS_ERR(ft)) {
2403 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2404 return PTR_ERR(ft);
2405 }
2406 return 0;
2407 }
2408
init_root_ns(struct mlx5_flow_steering * steering)2409 static int init_root_ns(struct mlx5_flow_steering *steering)
2410 {
2411 int err;
2412
2413 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2414 if (!steering->root_ns)
2415 return -ENOMEM;
2416
2417 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2418 if (err)
2419 goto out_err;
2420
2421 set_prio_attrs(steering->root_ns);
2422 err = create_anchor_flow_table(steering);
2423 if (err)
2424 goto out_err;
2425
2426 return 0;
2427
2428 out_err:
2429 cleanup_root_ns(steering->root_ns);
2430 steering->root_ns = NULL;
2431 return err;
2432 }
2433
clean_tree(struct fs_node * node)2434 static void clean_tree(struct fs_node *node)
2435 {
2436 if (node) {
2437 struct fs_node *iter;
2438 struct fs_node *temp;
2439
2440 tree_get_node(node);
2441 list_for_each_entry_safe(iter, temp, &node->children, list)
2442 clean_tree(iter);
2443 tree_put_node(node, false);
2444 tree_remove_node(node, false);
2445 }
2446 }
2447
cleanup_root_ns(struct mlx5_flow_root_namespace * root_ns)2448 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2449 {
2450 if (!root_ns)
2451 return;
2452
2453 clean_tree(&root_ns->ns.node);
2454 }
2455
cleanup_egress_acls_root_ns(struct mlx5_core_dev * dev)2456 static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
2457 {
2458 struct mlx5_flow_steering *steering = dev->priv.steering;
2459 int i;
2460
2461 if (!steering->esw_egress_root_ns)
2462 return;
2463
2464 for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
2465 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2466
2467 kfree(steering->esw_egress_root_ns);
2468 steering->esw_egress_root_ns = NULL;
2469 }
2470
cleanup_ingress_acls_root_ns(struct mlx5_core_dev * dev)2471 static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2472 {
2473 struct mlx5_flow_steering *steering = dev->priv.steering;
2474 int i;
2475
2476 if (!steering->esw_ingress_root_ns)
2477 return;
2478
2479 for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
2480 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2481
2482 kfree(steering->esw_ingress_root_ns);
2483 steering->esw_ingress_root_ns = NULL;
2484 }
2485
mlx5_cleanup_fs(struct mlx5_core_dev * dev)2486 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2487 {
2488 struct mlx5_flow_steering *steering = dev->priv.steering;
2489
2490 cleanup_root_ns(steering->root_ns);
2491 cleanup_egress_acls_root_ns(dev);
2492 cleanup_ingress_acls_root_ns(dev);
2493 cleanup_root_ns(steering->fdb_root_ns);
2494 steering->fdb_root_ns = NULL;
2495 kfree(steering->fdb_sub_ns);
2496 steering->fdb_sub_ns = NULL;
2497 cleanup_root_ns(steering->sniffer_rx_root_ns);
2498 cleanup_root_ns(steering->sniffer_tx_root_ns);
2499 cleanup_root_ns(steering->rdma_rx_root_ns);
2500 cleanup_root_ns(steering->egress_root_ns);
2501 mlx5_cleanup_fc_stats(dev);
2502 kmem_cache_destroy(steering->ftes_cache);
2503 kmem_cache_destroy(steering->fgs_cache);
2504 kfree(steering);
2505 }
2506
init_sniffer_tx_root_ns(struct mlx5_flow_steering * steering)2507 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2508 {
2509 struct fs_prio *prio;
2510
2511 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2512 if (!steering->sniffer_tx_root_ns)
2513 return -ENOMEM;
2514
2515 /* Create single prio */
2516 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2517 return PTR_ERR_OR_ZERO(prio);
2518 }
2519
init_sniffer_rx_root_ns(struct mlx5_flow_steering * steering)2520 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2521 {
2522 struct fs_prio *prio;
2523
2524 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2525 if (!steering->sniffer_rx_root_ns)
2526 return -ENOMEM;
2527
2528 /* Create single prio */
2529 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2530 return PTR_ERR_OR_ZERO(prio);
2531 }
2532
init_rdma_rx_root_ns(struct mlx5_flow_steering * steering)2533 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2534 {
2535 int err;
2536
2537 steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2538 if (!steering->rdma_rx_root_ns)
2539 return -ENOMEM;
2540
2541 err = init_root_tree(steering, &rdma_rx_root_fs,
2542 &steering->rdma_rx_root_ns->ns.node);
2543 if (err)
2544 goto out_err;
2545
2546 set_prio_attrs(steering->rdma_rx_root_ns);
2547
2548 return 0;
2549
2550 out_err:
2551 cleanup_root_ns(steering->rdma_rx_root_ns);
2552 steering->rdma_rx_root_ns = NULL;
2553 return err;
2554 }
init_fdb_root_ns(struct mlx5_flow_steering * steering)2555 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2556 {
2557 struct mlx5_flow_namespace *ns;
2558 struct fs_prio *maj_prio;
2559 struct fs_prio *min_prio;
2560 int levels;
2561 int chain;
2562 int prio;
2563 int err;
2564
2565 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2566 if (!steering->fdb_root_ns)
2567 return -ENOMEM;
2568
2569 steering->fdb_sub_ns = kzalloc(sizeof(steering->fdb_sub_ns) *
2570 (FDB_MAX_CHAIN + 1), GFP_KERNEL);
2571 if (!steering->fdb_sub_ns)
2572 return -ENOMEM;
2573
2574 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
2575 1);
2576 if (IS_ERR(maj_prio)) {
2577 err = PTR_ERR(maj_prio);
2578 goto out_err;
2579 }
2580
2581 levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1);
2582 maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2583 FDB_FAST_PATH,
2584 levels);
2585 if (IS_ERR(maj_prio)) {
2586 err = PTR_ERR(maj_prio);
2587 goto out_err;
2588 }
2589
2590 for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) {
2591 ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2592 if (IS_ERR(ns)) {
2593 err = PTR_ERR(ns);
2594 goto out_err;
2595 }
2596
2597 for (prio = 0; prio < FDB_MAX_PRIO * (chain + 1); prio++) {
2598 min_prio = fs_create_prio(ns, prio, 2);
2599 if (IS_ERR(min_prio)) {
2600 err = PTR_ERR(min_prio);
2601 goto out_err;
2602 }
2603 }
2604
2605 steering->fdb_sub_ns[chain] = ns;
2606 }
2607
2608 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2609 if (IS_ERR(maj_prio)) {
2610 err = PTR_ERR(maj_prio);
2611 goto out_err;
2612 }
2613
2614 set_prio_attrs(steering->fdb_root_ns);
2615 return 0;
2616
2617 out_err:
2618 cleanup_root_ns(steering->fdb_root_ns);
2619 kfree(steering->fdb_sub_ns);
2620 steering->fdb_sub_ns = NULL;
2621 steering->fdb_root_ns = NULL;
2622 return err;
2623 }
2624
init_egress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)2625 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2626 {
2627 struct fs_prio *prio;
2628
2629 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2630 if (!steering->esw_egress_root_ns[vport])
2631 return -ENOMEM;
2632
2633 /* create 1 prio*/
2634 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2635 return PTR_ERR_OR_ZERO(prio);
2636 }
2637
init_ingress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)2638 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2639 {
2640 struct fs_prio *prio;
2641
2642 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2643 if (!steering->esw_ingress_root_ns[vport])
2644 return -ENOMEM;
2645
2646 /* create 1 prio*/
2647 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2648 return PTR_ERR_OR_ZERO(prio);
2649 }
2650
init_egress_acls_root_ns(struct mlx5_core_dev * dev)2651 static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
2652 {
2653 struct mlx5_flow_steering *steering = dev->priv.steering;
2654 int total_vports = mlx5_eswitch_get_total_vports(dev);
2655 int err;
2656 int i;
2657
2658 steering->esw_egress_root_ns =
2659 kcalloc(total_vports,
2660 sizeof(*steering->esw_egress_root_ns),
2661 GFP_KERNEL);
2662 if (!steering->esw_egress_root_ns)
2663 return -ENOMEM;
2664
2665 for (i = 0; i < total_vports; i++) {
2666 err = init_egress_acl_root_ns(steering, i);
2667 if (err)
2668 goto cleanup_root_ns;
2669 }
2670
2671 return 0;
2672
2673 cleanup_root_ns:
2674 for (i--; i >= 0; i--)
2675 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2676 kfree(steering->esw_egress_root_ns);
2677 steering->esw_egress_root_ns = NULL;
2678 return err;
2679 }
2680
init_ingress_acls_root_ns(struct mlx5_core_dev * dev)2681 static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2682 {
2683 struct mlx5_flow_steering *steering = dev->priv.steering;
2684 int total_vports = mlx5_eswitch_get_total_vports(dev);
2685 int err;
2686 int i;
2687
2688 steering->esw_ingress_root_ns =
2689 kcalloc(total_vports,
2690 sizeof(*steering->esw_ingress_root_ns),
2691 GFP_KERNEL);
2692 if (!steering->esw_ingress_root_ns)
2693 return -ENOMEM;
2694
2695 for (i = 0; i < total_vports; i++) {
2696 err = init_ingress_acl_root_ns(steering, i);
2697 if (err)
2698 goto cleanup_root_ns;
2699 }
2700
2701 return 0;
2702
2703 cleanup_root_ns:
2704 for (i--; i >= 0; i--)
2705 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2706 kfree(steering->esw_ingress_root_ns);
2707 steering->esw_ingress_root_ns = NULL;
2708 return err;
2709 }
2710
init_egress_root_ns(struct mlx5_flow_steering * steering)2711 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
2712 {
2713 int err;
2714
2715 steering->egress_root_ns = create_root_ns(steering,
2716 FS_FT_NIC_TX);
2717 if (!steering->egress_root_ns)
2718 return -ENOMEM;
2719
2720 err = init_root_tree(steering, &egress_root_fs,
2721 &steering->egress_root_ns->ns.node);
2722 if (err)
2723 goto cleanup;
2724 set_prio_attrs(steering->egress_root_ns);
2725 return 0;
2726 cleanup:
2727 cleanup_root_ns(steering->egress_root_ns);
2728 steering->egress_root_ns = NULL;
2729 return err;
2730 }
2731
mlx5_init_fs(struct mlx5_core_dev * dev)2732 int mlx5_init_fs(struct mlx5_core_dev *dev)
2733 {
2734 struct mlx5_flow_steering *steering;
2735 int err = 0;
2736
2737 err = mlx5_init_fc_stats(dev);
2738 if (err)
2739 return err;
2740
2741 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
2742 if (!steering)
2743 return -ENOMEM;
2744 steering->dev = dev;
2745 dev->priv.steering = steering;
2746
2747 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
2748 sizeof(struct mlx5_flow_group), 0,
2749 0, NULL);
2750 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
2751 0, NULL);
2752 if (!steering->ftes_cache || !steering->fgs_cache) {
2753 err = -ENOMEM;
2754 goto err;
2755 }
2756
2757 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
2758 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
2759 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
2760 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
2761 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
2762 err = init_root_ns(steering);
2763 if (err)
2764 goto err;
2765 }
2766
2767 if (MLX5_ESWITCH_MANAGER(dev)) {
2768 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
2769 err = init_fdb_root_ns(steering);
2770 if (err)
2771 goto err;
2772 }
2773 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
2774 err = init_egress_acls_root_ns(dev);
2775 if (err)
2776 goto err;
2777 }
2778 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
2779 err = init_ingress_acls_root_ns(dev);
2780 if (err)
2781 goto err;
2782 }
2783 }
2784
2785 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
2786 err = init_sniffer_rx_root_ns(steering);
2787 if (err)
2788 goto err;
2789 }
2790
2791 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
2792 err = init_sniffer_tx_root_ns(steering);
2793 if (err)
2794 goto err;
2795 }
2796
2797 if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
2798 MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
2799 err = init_rdma_rx_root_ns(steering);
2800 if (err)
2801 goto err;
2802 }
2803
2804 if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
2805 err = init_egress_root_ns(steering);
2806 if (err)
2807 goto err;
2808 }
2809
2810 return 0;
2811 err:
2812 mlx5_cleanup_fs(dev);
2813 return err;
2814 }
2815
mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)2816 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2817 {
2818 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2819 struct mlx5_ft_underlay_qp *new_uqp;
2820 int err = 0;
2821
2822 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
2823 if (!new_uqp)
2824 return -ENOMEM;
2825
2826 mutex_lock(&root->chain_lock);
2827
2828 if (!root->root_ft) {
2829 err = -EINVAL;
2830 goto update_ft_fail;
2831 }
2832
2833 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
2834 false);
2835 if (err) {
2836 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
2837 underlay_qpn, err);
2838 goto update_ft_fail;
2839 }
2840
2841 new_uqp->qpn = underlay_qpn;
2842 list_add_tail(&new_uqp->list, &root->underlay_qpns);
2843
2844 mutex_unlock(&root->chain_lock);
2845
2846 return 0;
2847
2848 update_ft_fail:
2849 mutex_unlock(&root->chain_lock);
2850 kfree(new_uqp);
2851 return err;
2852 }
2853 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
2854
mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)2855 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2856 {
2857 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2858 struct mlx5_ft_underlay_qp *uqp;
2859 bool found = false;
2860 int err = 0;
2861
2862 mutex_lock(&root->chain_lock);
2863 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2864 if (uqp->qpn == underlay_qpn) {
2865 found = true;
2866 break;
2867 }
2868 }
2869
2870 if (!found) {
2871 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
2872 underlay_qpn);
2873 err = -EINVAL;
2874 goto out;
2875 }
2876
2877 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
2878 true);
2879 if (err)
2880 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
2881 underlay_qpn, err);
2882
2883 list_del(&uqp->list);
2884 mutex_unlock(&root->chain_lock);
2885 kfree(uqp);
2886
2887 return 0;
2888
2889 out:
2890 mutex_unlock(&root->chain_lock);
2891 return err;
2892 }
2893 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
2894
2895 static struct mlx5_flow_root_namespace
get_root_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type ns_type)2896 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
2897 {
2898 struct mlx5_flow_namespace *ns;
2899
2900 if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
2901 ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
2902 ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
2903 else
2904 ns = mlx5_get_flow_namespace(dev, ns_type);
2905 if (!ns)
2906 return NULL;
2907
2908 return find_root(&ns->node);
2909 }
2910
mlx5_modify_header_alloc(struct mlx5_core_dev * dev,u8 ns_type,u8 num_actions,void * modify_actions)2911 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
2912 u8 ns_type, u8 num_actions,
2913 void *modify_actions)
2914 {
2915 struct mlx5_flow_root_namespace *root;
2916 struct mlx5_modify_hdr *modify_hdr;
2917 int err;
2918
2919 root = get_root_namespace(dev, ns_type);
2920 if (!root)
2921 return ERR_PTR(-EOPNOTSUPP);
2922
2923 modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
2924 if (!modify_hdr)
2925 return ERR_PTR(-ENOMEM);
2926
2927 modify_hdr->ns_type = ns_type;
2928 err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
2929 modify_actions, modify_hdr);
2930 if (err) {
2931 kfree(modify_hdr);
2932 return ERR_PTR(err);
2933 }
2934
2935 return modify_hdr;
2936 }
2937 EXPORT_SYMBOL(mlx5_modify_header_alloc);
2938
mlx5_modify_header_dealloc(struct mlx5_core_dev * dev,struct mlx5_modify_hdr * modify_hdr)2939 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
2940 struct mlx5_modify_hdr *modify_hdr)
2941 {
2942 struct mlx5_flow_root_namespace *root;
2943
2944 root = get_root_namespace(dev, modify_hdr->ns_type);
2945 if (WARN_ON(!root))
2946 return;
2947 root->cmds->modify_header_dealloc(root, modify_hdr);
2948 kfree(modify_hdr);
2949 }
2950 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
2951
mlx5_packet_reformat_alloc(struct mlx5_core_dev * dev,int reformat_type,size_t size,void * reformat_data,enum mlx5_flow_namespace_type ns_type)2952 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
2953 int reformat_type,
2954 size_t size,
2955 void *reformat_data,
2956 enum mlx5_flow_namespace_type ns_type)
2957 {
2958 struct mlx5_pkt_reformat *pkt_reformat;
2959 struct mlx5_flow_root_namespace *root;
2960 int err;
2961
2962 root = get_root_namespace(dev, ns_type);
2963 if (!root)
2964 return ERR_PTR(-EOPNOTSUPP);
2965
2966 pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
2967 if (!pkt_reformat)
2968 return ERR_PTR(-ENOMEM);
2969
2970 pkt_reformat->ns_type = ns_type;
2971 pkt_reformat->reformat_type = reformat_type;
2972 err = root->cmds->packet_reformat_alloc(root, reformat_type, size,
2973 reformat_data, ns_type,
2974 pkt_reformat);
2975 if (err) {
2976 kfree(pkt_reformat);
2977 return ERR_PTR(err);
2978 }
2979
2980 return pkt_reformat;
2981 }
2982 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
2983
mlx5_packet_reformat_dealloc(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat * pkt_reformat)2984 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
2985 struct mlx5_pkt_reformat *pkt_reformat)
2986 {
2987 struct mlx5_flow_root_namespace *root;
2988
2989 root = get_root_namespace(dev, pkt_reformat->ns_type);
2990 if (WARN_ON(!root))
2991 return;
2992 root->cmds->packet_reformat_dealloc(root, pkt_reformat);
2993 kfree(pkt_reformat);
2994 }
2995 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
2996
mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns)2997 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
2998 struct mlx5_flow_root_namespace *peer_ns)
2999 {
3000 if (peer_ns && ns->mode != peer_ns->mode) {
3001 mlx5_core_err(ns->dev,
3002 "Can't peer namespace of different steering mode\n");
3003 return -EINVAL;
3004 }
3005
3006 return ns->cmds->set_peer(ns, peer_ns);
3007 }
3008
3009 /* This function should be called only at init stage of the namespace.
3010 * It is not safe to call this function while steering operations
3011 * are executed in the namespace.
3012 */
mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace * ns,enum mlx5_flow_steering_mode mode)3013 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3014 enum mlx5_flow_steering_mode mode)
3015 {
3016 struct mlx5_flow_root_namespace *root;
3017 const struct mlx5_flow_cmds *cmds;
3018 int err;
3019
3020 root = find_root(&ns->node);
3021 if (&root->ns != ns)
3022 /* Can't set cmds to non root namespace */
3023 return -EINVAL;
3024
3025 if (root->table_type != FS_FT_FDB)
3026 return -EOPNOTSUPP;
3027
3028 if (root->mode == mode)
3029 return 0;
3030
3031 if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3032 cmds = mlx5_fs_cmd_get_dr_cmds();
3033 else
3034 cmds = mlx5_fs_cmd_get_fw_cmds();
3035 if (!cmds)
3036 return -EOPNOTSUPP;
3037
3038 err = cmds->create_ns(root);
3039 if (err) {
3040 mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3041 err);
3042 return err;
3043 }
3044
3045 root->cmds->destroy_ns(root);
3046 root->cmds = cmds;
3047 root->mode = mode;
3048
3049 return 0;
3050 }
3051