1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
37 
38 #include "mlx5_core.h"
39 #include "fs_core.h"
40 #include "fs_cmd.h"
41 #include "fs_ft_pool.h"
42 #include "diag/fs_tracepoint.h"
43 #include "accel/ipsec.h"
44 #include "fpga/ipsec.h"
45 
46 #define INIT_TREE_NODE_ARRAY_SIZE(...)	(sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 					 sizeof(struct init_tree_node))
48 
49 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
50 		 ...) {.type = FS_TYPE_PRIO,\
51 	.min_ft_level = min_level_val,\
52 	.num_levels = num_levels_val,\
53 	.num_leaf_prios = num_prios_val,\
54 	.caps = caps_val,\
55 	.children = (struct init_tree_node[]) {__VA_ARGS__},\
56 	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
57 }
58 
59 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 	ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
61 		 __VA_ARGS__)\
62 
63 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE,	\
64 	.def_miss_action = def_miss_act,\
65 	.children = (struct init_tree_node[]) {__VA_ARGS__},\
66 	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
67 }
68 
69 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
70 				   sizeof(long))
71 
72 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
73 
74 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
75 			       .caps = (long[]) {__VA_ARGS__} }
76 
77 #define FS_CHAINING_CAPS  FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
78 					   FS_CAP(flow_table_properties_nic_receive.modify_root), \
79 					   FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
80 					   FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
81 
82 #define FS_CHAINING_CAPS_EGRESS                                                \
83 	FS_REQUIRED_CAPS(                                                      \
84 		FS_CAP(flow_table_properties_nic_transmit.flow_modify_en),     \
85 		FS_CAP(flow_table_properties_nic_transmit.modify_root),        \
86 		FS_CAP(flow_table_properties_nic_transmit                      \
87 			       .identified_miss_table_mode),                   \
88 		FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
89 
90 #define FS_CHAINING_CAPS_RDMA_TX                                                \
91 	FS_REQUIRED_CAPS(                                                       \
92 		FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
93 		FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root),    \
94 		FS_CAP(flow_table_properties_nic_transmit_rdma                  \
95 			       .identified_miss_table_mode),                    \
96 		FS_CAP(flow_table_properties_nic_transmit_rdma                  \
97 			       .flow_table_modify))
98 
99 #define LEFTOVERS_NUM_LEVELS 1
100 #define LEFTOVERS_NUM_PRIOS 1
101 
102 #define BY_PASS_PRIO_NUM_LEVELS 1
103 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
104 			   LEFTOVERS_NUM_PRIOS)
105 
106 #define ETHTOOL_PRIO_NUM_LEVELS 1
107 #define ETHTOOL_NUM_PRIOS 11
108 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
109 /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
110 #define KERNEL_NIC_PRIO_NUM_LEVELS 7
111 #define KERNEL_NIC_NUM_PRIOS 1
112 /* One more level for tc */
113 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
114 
115 #define KERNEL_NIC_TC_NUM_PRIOS  1
116 #define KERNEL_NIC_TC_NUM_LEVELS 2
117 
118 #define ANCHOR_NUM_LEVELS 1
119 #define ANCHOR_NUM_PRIOS 1
120 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
121 
122 #define OFFLOADS_MAX_FT 2
123 #define OFFLOADS_NUM_PRIOS 2
124 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
125 
126 #define LAG_PRIO_NUM_LEVELS 1
127 #define LAG_NUM_PRIOS 1
128 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
129 
130 #define KERNEL_TX_IPSEC_NUM_PRIOS  1
131 #define KERNEL_TX_IPSEC_NUM_LEVELS 1
132 #define KERNEL_TX_MIN_LEVEL        (KERNEL_TX_IPSEC_NUM_LEVELS)
133 
134 struct node_caps {
135 	size_t	arr_sz;
136 	long	*caps;
137 };
138 
139 static struct init_tree_node {
140 	enum fs_node_type	type;
141 	struct init_tree_node *children;
142 	int ar_size;
143 	struct node_caps caps;
144 	int min_ft_level;
145 	int num_leaf_prios;
146 	int prio;
147 	int num_levels;
148 	enum mlx5_flow_table_miss_action def_miss_action;
149 } root_fs = {
150 	.type = FS_TYPE_NAMESPACE,
151 	.ar_size = 7,
152 	  .children = (struct init_tree_node[]){
153 		  ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
154 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
155 				  ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
156 						    BY_PASS_PRIO_NUM_LEVELS))),
157 		  ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
158 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
159 				  ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
160 						    LAG_PRIO_NUM_LEVELS))),
161 		  ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
162 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
163 				  ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
164 						    OFFLOADS_MAX_FT))),
165 		  ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
166 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
167 				  ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
168 						    ETHTOOL_PRIO_NUM_LEVELS))),
169 		  ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
170 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
171 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
172 						    KERNEL_NIC_TC_NUM_LEVELS),
173 				  ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
174 						    KERNEL_NIC_PRIO_NUM_LEVELS))),
175 		  ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
176 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
177 				  ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
178 						    LEFTOVERS_NUM_LEVELS))),
179 		  ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
180 			   ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
181 				  ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
182 						    ANCHOR_NUM_LEVELS))),
183 	}
184 };
185 
186 static struct init_tree_node egress_root_fs = {
187 	.type = FS_TYPE_NAMESPACE,
188 #ifdef CONFIG_MLX5_IPSEC
189 	.ar_size = 2,
190 #else
191 	.ar_size = 1,
192 #endif
193 	.children = (struct init_tree_node[]) {
194 		ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
195 			 FS_CHAINING_CAPS_EGRESS,
196 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
197 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
198 						  BY_PASS_PRIO_NUM_LEVELS))),
199 #ifdef CONFIG_MLX5_IPSEC
200 		ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
201 			 FS_CHAINING_CAPS_EGRESS,
202 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
203 				ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
204 						  KERNEL_TX_IPSEC_NUM_LEVELS))),
205 #endif
206 	}
207 };
208 
209 #define RDMA_RX_BYPASS_PRIO 0
210 #define RDMA_RX_KERNEL_PRIO 1
211 static struct init_tree_node rdma_rx_root_fs = {
212 	.type = FS_TYPE_NAMESPACE,
213 	.ar_size = 2,
214 	.children = (struct init_tree_node[]) {
215 		[RDMA_RX_BYPASS_PRIO] =
216 		ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
217 			 FS_CHAINING_CAPS,
218 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
219 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
220 						  BY_PASS_PRIO_NUM_LEVELS))),
221 		[RDMA_RX_KERNEL_PRIO] =
222 		ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
223 			 FS_CHAINING_CAPS,
224 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
225 				ADD_MULTIPLE_PRIO(1, 1))),
226 	}
227 };
228 
229 static struct init_tree_node rdma_tx_root_fs = {
230 	.type = FS_TYPE_NAMESPACE,
231 	.ar_size = 1,
232 	.children = (struct init_tree_node[]) {
233 		ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
234 			 FS_CHAINING_CAPS_RDMA_TX,
235 			 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
236 				ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
237 						  BY_PASS_PRIO_NUM_LEVELS))),
238 	}
239 };
240 
241 enum fs_i_lock_class {
242 	FS_LOCK_GRANDPARENT,
243 	FS_LOCK_PARENT,
244 	FS_LOCK_CHILD
245 };
246 
247 static const struct rhashtable_params rhash_fte = {
248 	.key_len = sizeof_field(struct fs_fte, val),
249 	.key_offset = offsetof(struct fs_fte, val),
250 	.head_offset = offsetof(struct fs_fte, hash),
251 	.automatic_shrinking = true,
252 	.min_size = 1,
253 };
254 
255 static const struct rhashtable_params rhash_fg = {
256 	.key_len = sizeof_field(struct mlx5_flow_group, mask),
257 	.key_offset = offsetof(struct mlx5_flow_group, mask),
258 	.head_offset = offsetof(struct mlx5_flow_group, hash),
259 	.automatic_shrinking = true,
260 	.min_size = 1,
261 
262 };
263 
264 static void del_hw_flow_table(struct fs_node *node);
265 static void del_hw_flow_group(struct fs_node *node);
266 static void del_hw_fte(struct fs_node *node);
267 static void del_sw_flow_table(struct fs_node *node);
268 static void del_sw_flow_group(struct fs_node *node);
269 static void del_sw_fte(struct fs_node *node);
270 static void del_sw_prio(struct fs_node *node);
271 static void del_sw_ns(struct fs_node *node);
272 /* Delete rule (destination) is special case that
273  * requires to lock the FTE for all the deletion process.
274  */
275 static void del_sw_hw_rule(struct fs_node *node);
276 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
277 				struct mlx5_flow_destination *d2);
278 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
279 static struct mlx5_flow_rule *
280 find_flow_rule(struct fs_fte *fte,
281 	       struct mlx5_flow_destination *dest);
282 
tree_init_node(struct fs_node * node,void (* del_hw_func)(struct fs_node *),void (* del_sw_func)(struct fs_node *))283 static void tree_init_node(struct fs_node *node,
284 			   void (*del_hw_func)(struct fs_node *),
285 			   void (*del_sw_func)(struct fs_node *))
286 {
287 	refcount_set(&node->refcount, 1);
288 	INIT_LIST_HEAD(&node->list);
289 	INIT_LIST_HEAD(&node->children);
290 	init_rwsem(&node->lock);
291 	node->del_hw_func = del_hw_func;
292 	node->del_sw_func = del_sw_func;
293 	node->active = false;
294 }
295 
tree_add_node(struct fs_node * node,struct fs_node * parent)296 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
297 {
298 	if (parent)
299 		refcount_inc(&parent->refcount);
300 	node->parent = parent;
301 
302 	/* Parent is the root */
303 	if (!parent)
304 		node->root = node;
305 	else
306 		node->root = parent->root;
307 }
308 
tree_get_node(struct fs_node * node)309 static int tree_get_node(struct fs_node *node)
310 {
311 	return refcount_inc_not_zero(&node->refcount);
312 }
313 
nested_down_read_ref_node(struct fs_node * node,enum fs_i_lock_class class)314 static void nested_down_read_ref_node(struct fs_node *node,
315 				      enum fs_i_lock_class class)
316 {
317 	if (node) {
318 		down_read_nested(&node->lock, class);
319 		refcount_inc(&node->refcount);
320 	}
321 }
322 
nested_down_write_ref_node(struct fs_node * node,enum fs_i_lock_class class)323 static void nested_down_write_ref_node(struct fs_node *node,
324 				       enum fs_i_lock_class class)
325 {
326 	if (node) {
327 		down_write_nested(&node->lock, class);
328 		refcount_inc(&node->refcount);
329 	}
330 }
331 
down_write_ref_node(struct fs_node * node,bool locked)332 static void down_write_ref_node(struct fs_node *node, bool locked)
333 {
334 	if (node) {
335 		if (!locked)
336 			down_write(&node->lock);
337 		refcount_inc(&node->refcount);
338 	}
339 }
340 
up_read_ref_node(struct fs_node * node)341 static void up_read_ref_node(struct fs_node *node)
342 {
343 	refcount_dec(&node->refcount);
344 	up_read(&node->lock);
345 }
346 
up_write_ref_node(struct fs_node * node,bool locked)347 static void up_write_ref_node(struct fs_node *node, bool locked)
348 {
349 	refcount_dec(&node->refcount);
350 	if (!locked)
351 		up_write(&node->lock);
352 }
353 
tree_put_node(struct fs_node * node,bool locked)354 static void tree_put_node(struct fs_node *node, bool locked)
355 {
356 	struct fs_node *parent_node = node->parent;
357 
358 	if (refcount_dec_and_test(&node->refcount)) {
359 		if (node->del_hw_func)
360 			node->del_hw_func(node);
361 		if (parent_node) {
362 			down_write_ref_node(parent_node, locked);
363 			list_del_init(&node->list);
364 		}
365 		node->del_sw_func(node);
366 		if (parent_node)
367 			up_write_ref_node(parent_node, locked);
368 		node = NULL;
369 	}
370 	if (!node && parent_node)
371 		tree_put_node(parent_node, locked);
372 }
373 
tree_remove_node(struct fs_node * node,bool locked)374 static int tree_remove_node(struct fs_node *node, bool locked)
375 {
376 	if (refcount_read(&node->refcount) > 1) {
377 		refcount_dec(&node->refcount);
378 		return -EEXIST;
379 	}
380 	tree_put_node(node, locked);
381 	return 0;
382 }
383 
find_prio(struct mlx5_flow_namespace * ns,unsigned int prio)384 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
385 				 unsigned int prio)
386 {
387 	struct fs_prio *iter_prio;
388 
389 	fs_for_each_prio(iter_prio, ns) {
390 		if (iter_prio->prio == prio)
391 			return iter_prio;
392 	}
393 
394 	return NULL;
395 }
396 
is_fwd_next_action(u32 action)397 static bool is_fwd_next_action(u32 action)
398 {
399 	return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
400 			 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
401 }
402 
check_valid_spec(const struct mlx5_flow_spec * spec)403 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
404 {
405 	int i;
406 
407 	for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
408 		if (spec->match_value[i] & ~spec->match_criteria[i]) {
409 			pr_warn("mlx5_core: match_value differs from match_criteria\n");
410 			return false;
411 		}
412 
413 	return true;
414 }
415 
find_root(struct fs_node * node)416 struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
417 {
418 	struct fs_node *root;
419 	struct mlx5_flow_namespace *ns;
420 
421 	root = node->root;
422 
423 	if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
424 		pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
425 		return NULL;
426 	}
427 
428 	ns = container_of(root, struct mlx5_flow_namespace, node);
429 	return container_of(ns, struct mlx5_flow_root_namespace, ns);
430 }
431 
get_steering(struct fs_node * node)432 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
433 {
434 	struct mlx5_flow_root_namespace *root = find_root(node);
435 
436 	if (root)
437 		return root->dev->priv.steering;
438 	return NULL;
439 }
440 
get_dev(struct fs_node * node)441 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
442 {
443 	struct mlx5_flow_root_namespace *root = find_root(node);
444 
445 	if (root)
446 		return root->dev;
447 	return NULL;
448 }
449 
del_sw_ns(struct fs_node * node)450 static void del_sw_ns(struct fs_node *node)
451 {
452 	kfree(node);
453 }
454 
del_sw_prio(struct fs_node * node)455 static void del_sw_prio(struct fs_node *node)
456 {
457 	kfree(node);
458 }
459 
del_hw_flow_table(struct fs_node * node)460 static void del_hw_flow_table(struct fs_node *node)
461 {
462 	struct mlx5_flow_root_namespace *root;
463 	struct mlx5_flow_table *ft;
464 	struct mlx5_core_dev *dev;
465 	int err;
466 
467 	fs_get_obj(ft, node);
468 	dev = get_dev(&ft->node);
469 	root = find_root(&ft->node);
470 	trace_mlx5_fs_del_ft(ft);
471 
472 	if (node->active) {
473 		err = root->cmds->destroy_flow_table(root, ft);
474 		if (err)
475 			mlx5_core_warn(dev, "flow steering can't destroy ft\n");
476 	}
477 }
478 
del_sw_flow_table(struct fs_node * node)479 static void del_sw_flow_table(struct fs_node *node)
480 {
481 	struct mlx5_flow_table *ft;
482 	struct fs_prio *prio;
483 
484 	fs_get_obj(ft, node);
485 
486 	rhltable_destroy(&ft->fgs_hash);
487 	if (ft->node.parent) {
488 		fs_get_obj(prio, ft->node.parent);
489 		prio->num_ft--;
490 	}
491 	kfree(ft);
492 }
493 
modify_fte(struct fs_fte * fte)494 static void modify_fte(struct fs_fte *fte)
495 {
496 	struct mlx5_flow_root_namespace *root;
497 	struct mlx5_flow_table *ft;
498 	struct mlx5_flow_group *fg;
499 	struct mlx5_core_dev *dev;
500 	int err;
501 
502 	fs_get_obj(fg, fte->node.parent);
503 	fs_get_obj(ft, fg->node.parent);
504 	dev = get_dev(&fte->node);
505 
506 	root = find_root(&ft->node);
507 	err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
508 	if (err)
509 		mlx5_core_warn(dev,
510 			       "%s can't del rule fg id=%d fte_index=%d\n",
511 			       __func__, fg->id, fte->index);
512 	fte->modify_mask = 0;
513 }
514 
del_sw_hw_rule(struct fs_node * node)515 static void del_sw_hw_rule(struct fs_node *node)
516 {
517 	struct mlx5_flow_rule *rule;
518 	struct fs_fte *fte;
519 
520 	fs_get_obj(rule, node);
521 	fs_get_obj(fte, rule->node.parent);
522 	trace_mlx5_fs_del_rule(rule);
523 	if (is_fwd_next_action(rule->sw_action)) {
524 		mutex_lock(&rule->dest_attr.ft->lock);
525 		list_del(&rule->next_ft);
526 		mutex_unlock(&rule->dest_attr.ft->lock);
527 	}
528 
529 	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER  &&
530 	    --fte->dests_size) {
531 		fte->modify_mask |=
532 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
533 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
534 		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
535 		goto out;
536 	}
537 
538 	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
539 	    --fte->dests_size) {
540 		fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
541 		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
542 		goto out;
543 	}
544 
545 	if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
546 	    --fte->dests_size) {
547 		fte->modify_mask |=
548 			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
549 	}
550 out:
551 	kfree(rule);
552 }
553 
del_hw_fte(struct fs_node * node)554 static void del_hw_fte(struct fs_node *node)
555 {
556 	struct mlx5_flow_root_namespace *root;
557 	struct mlx5_flow_table *ft;
558 	struct mlx5_flow_group *fg;
559 	struct mlx5_core_dev *dev;
560 	struct fs_fte *fte;
561 	int err;
562 
563 	fs_get_obj(fte, node);
564 	fs_get_obj(fg, fte->node.parent);
565 	fs_get_obj(ft, fg->node.parent);
566 
567 	trace_mlx5_fs_del_fte(fte);
568 	dev = get_dev(&ft->node);
569 	root = find_root(&ft->node);
570 	if (node->active) {
571 		err = root->cmds->delete_fte(root, ft, fte);
572 		if (err)
573 			mlx5_core_warn(dev,
574 				       "flow steering can't delete fte in index %d of flow group id %d\n",
575 				       fte->index, fg->id);
576 		node->active = false;
577 	}
578 }
579 
del_sw_fte(struct fs_node * node)580 static void del_sw_fte(struct fs_node *node)
581 {
582 	struct mlx5_flow_steering *steering = get_steering(node);
583 	struct mlx5_flow_group *fg;
584 	struct fs_fte *fte;
585 	int err;
586 
587 	fs_get_obj(fte, node);
588 	fs_get_obj(fg, fte->node.parent);
589 
590 	err = rhashtable_remove_fast(&fg->ftes_hash,
591 				     &fte->hash,
592 				     rhash_fte);
593 	WARN_ON(err);
594 	ida_free(&fg->fte_allocator, fte->index - fg->start_index);
595 	kmem_cache_free(steering->ftes_cache, fte);
596 }
597 
del_hw_flow_group(struct fs_node * node)598 static void del_hw_flow_group(struct fs_node *node)
599 {
600 	struct mlx5_flow_root_namespace *root;
601 	struct mlx5_flow_group *fg;
602 	struct mlx5_flow_table *ft;
603 	struct mlx5_core_dev *dev;
604 
605 	fs_get_obj(fg, node);
606 	fs_get_obj(ft, fg->node.parent);
607 	dev = get_dev(&ft->node);
608 	trace_mlx5_fs_del_fg(fg);
609 
610 	root = find_root(&ft->node);
611 	if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
612 		mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
613 			       fg->id, ft->id);
614 }
615 
del_sw_flow_group(struct fs_node * node)616 static void del_sw_flow_group(struct fs_node *node)
617 {
618 	struct mlx5_flow_steering *steering = get_steering(node);
619 	struct mlx5_flow_group *fg;
620 	struct mlx5_flow_table *ft;
621 	int err;
622 
623 	fs_get_obj(fg, node);
624 	fs_get_obj(ft, fg->node.parent);
625 
626 	rhashtable_destroy(&fg->ftes_hash);
627 	ida_destroy(&fg->fte_allocator);
628 	if (ft->autogroup.active &&
629 	    fg->max_ftes == ft->autogroup.group_size &&
630 	    fg->start_index < ft->autogroup.max_fte)
631 		ft->autogroup.num_groups--;
632 	err = rhltable_remove(&ft->fgs_hash,
633 			      &fg->hash,
634 			      rhash_fg);
635 	WARN_ON(err);
636 	kmem_cache_free(steering->fgs_cache, fg);
637 }
638 
insert_fte(struct mlx5_flow_group * fg,struct fs_fte * fte)639 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
640 {
641 	int index;
642 	int ret;
643 
644 	index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
645 	if (index < 0)
646 		return index;
647 
648 	fte->index = index + fg->start_index;
649 	ret = rhashtable_insert_fast(&fg->ftes_hash,
650 				     &fte->hash,
651 				     rhash_fte);
652 	if (ret)
653 		goto err_ida_remove;
654 
655 	tree_add_node(&fte->node, &fg->node);
656 	list_add_tail(&fte->node.list, &fg->node.children);
657 	return 0;
658 
659 err_ida_remove:
660 	ida_free(&fg->fte_allocator, index);
661 	return ret;
662 }
663 
alloc_fte(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act)664 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
665 				const struct mlx5_flow_spec *spec,
666 				struct mlx5_flow_act *flow_act)
667 {
668 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
669 	struct fs_fte *fte;
670 
671 	fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
672 	if (!fte)
673 		return ERR_PTR(-ENOMEM);
674 
675 	memcpy(fte->val, &spec->match_value, sizeof(fte->val));
676 	fte->node.type =  FS_TYPE_FLOW_ENTRY;
677 	fte->action = *flow_act;
678 	fte->flow_context = spec->flow_context;
679 
680 	tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
681 
682 	return fte;
683 }
684 
dealloc_flow_group(struct mlx5_flow_steering * steering,struct mlx5_flow_group * fg)685 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
686 			       struct mlx5_flow_group *fg)
687 {
688 	rhashtable_destroy(&fg->ftes_hash);
689 	kmem_cache_free(steering->fgs_cache, fg);
690 }
691 
alloc_flow_group(struct mlx5_flow_steering * steering,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index)692 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
693 						u8 match_criteria_enable,
694 						const void *match_criteria,
695 						int start_index,
696 						int end_index)
697 {
698 	struct mlx5_flow_group *fg;
699 	int ret;
700 
701 	fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
702 	if (!fg)
703 		return ERR_PTR(-ENOMEM);
704 
705 	ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
706 	if (ret) {
707 		kmem_cache_free(steering->fgs_cache, fg);
708 		return ERR_PTR(ret);
709 	}
710 
711 	ida_init(&fg->fte_allocator);
712 	fg->mask.match_criteria_enable = match_criteria_enable;
713 	memcpy(&fg->mask.match_criteria, match_criteria,
714 	       sizeof(fg->mask.match_criteria));
715 	fg->node.type =  FS_TYPE_FLOW_GROUP;
716 	fg->start_index = start_index;
717 	fg->max_ftes = end_index - start_index + 1;
718 
719 	return fg;
720 }
721 
alloc_insert_flow_group(struct mlx5_flow_table * ft,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index,struct list_head * prev)722 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
723 						       u8 match_criteria_enable,
724 						       const void *match_criteria,
725 						       int start_index,
726 						       int end_index,
727 						       struct list_head *prev)
728 {
729 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
730 	struct mlx5_flow_group *fg;
731 	int ret;
732 
733 	fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
734 			      start_index, end_index);
735 	if (IS_ERR(fg))
736 		return fg;
737 
738 	/* initialize refcnt, add to parent list */
739 	ret = rhltable_insert(&ft->fgs_hash,
740 			      &fg->hash,
741 			      rhash_fg);
742 	if (ret) {
743 		dealloc_flow_group(steering, fg);
744 		return ERR_PTR(ret);
745 	}
746 
747 	tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
748 	tree_add_node(&fg->node, &ft->node);
749 	/* Add node to group list */
750 	list_add(&fg->node.list, prev);
751 	atomic_inc(&ft->node.version);
752 
753 	return fg;
754 }
755 
alloc_flow_table(int level,u16 vport,enum fs_flow_table_type table_type,enum fs_flow_table_op_mod op_mod,u32 flags)756 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
757 						enum fs_flow_table_type table_type,
758 						enum fs_flow_table_op_mod op_mod,
759 						u32 flags)
760 {
761 	struct mlx5_flow_table *ft;
762 	int ret;
763 
764 	ft  = kzalloc(sizeof(*ft), GFP_KERNEL);
765 	if (!ft)
766 		return ERR_PTR(-ENOMEM);
767 
768 	ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
769 	if (ret) {
770 		kfree(ft);
771 		return ERR_PTR(ret);
772 	}
773 
774 	ft->level = level;
775 	ft->node.type = FS_TYPE_FLOW_TABLE;
776 	ft->op_mod = op_mod;
777 	ft->type = table_type;
778 	ft->vport = vport;
779 	ft->flags = flags;
780 	INIT_LIST_HEAD(&ft->fwd_rules);
781 	mutex_init(&ft->lock);
782 
783 	return ft;
784 }
785 
786 /* If reverse is false, then we search for the first flow table in the
787  * root sub-tree from start(closest from right), else we search for the
788  * last flow table in the root sub-tree till start(closest from left).
789  */
find_closest_ft_recursive(struct fs_node * root,struct list_head * start,bool reverse)790 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node  *root,
791 							 struct list_head *start,
792 							 bool reverse)
793 {
794 #define list_advance_entry(pos, reverse)		\
795 	((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
796 
797 #define list_for_each_advance_continue(pos, head, reverse)	\
798 	for (pos = list_advance_entry(pos, reverse);		\
799 	     &pos->list != (head);				\
800 	     pos = list_advance_entry(pos, reverse))
801 
802 	struct fs_node *iter = list_entry(start, struct fs_node, list);
803 	struct mlx5_flow_table *ft = NULL;
804 
805 	if (!root || root->type == FS_TYPE_PRIO_CHAINS)
806 		return NULL;
807 
808 	list_for_each_advance_continue(iter, &root->children, reverse) {
809 		if (iter->type == FS_TYPE_FLOW_TABLE) {
810 			fs_get_obj(ft, iter);
811 			return ft;
812 		}
813 		ft = find_closest_ft_recursive(iter, &iter->children, reverse);
814 		if (ft)
815 			return ft;
816 	}
817 
818 	return ft;
819 }
820 
821 /* If reverse is false then return the first flow table in next priority of
822  * prio in the tree, else return the last flow table in the previous priority
823  * of prio in the tree.
824  */
find_closest_ft(struct fs_prio * prio,bool reverse)825 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
826 {
827 	struct mlx5_flow_table *ft = NULL;
828 	struct fs_node *curr_node;
829 	struct fs_node *parent;
830 
831 	parent = prio->node.parent;
832 	curr_node = &prio->node;
833 	while (!ft && parent) {
834 		ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
835 		curr_node = parent;
836 		parent = curr_node->parent;
837 	}
838 	return ft;
839 }
840 
841 /* Assuming all the tree is locked by mutex chain lock */
find_next_chained_ft(struct fs_prio * prio)842 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
843 {
844 	return find_closest_ft(prio, false);
845 }
846 
847 /* Assuming all the tree is locked by mutex chain lock */
find_prev_chained_ft(struct fs_prio * prio)848 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
849 {
850 	return find_closest_ft(prio, true);
851 }
852 
find_next_fwd_ft(struct mlx5_flow_table * ft,struct mlx5_flow_act * flow_act)853 static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
854 						struct mlx5_flow_act *flow_act)
855 {
856 	struct fs_prio *prio;
857 	bool next_ns;
858 
859 	next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
860 	fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
861 
862 	return find_next_chained_ft(prio);
863 }
864 
connect_fts_in_prio(struct mlx5_core_dev * dev,struct fs_prio * prio,struct mlx5_flow_table * ft)865 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
866 			       struct fs_prio *prio,
867 			       struct mlx5_flow_table *ft)
868 {
869 	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
870 	struct mlx5_flow_table *iter;
871 	int err;
872 
873 	fs_for_each_ft(iter, prio) {
874 		err = root->cmds->modify_flow_table(root, iter, ft);
875 		if (err) {
876 			mlx5_core_err(dev,
877 				      "Failed to modify flow table id %d, type %d, err %d\n",
878 				      iter->id, iter->type, err);
879 			/* The driver is out of sync with the FW */
880 			return err;
881 		}
882 	}
883 	return 0;
884 }
885 
886 /* Connect flow tables from previous priority of prio to ft */
connect_prev_fts(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)887 static int connect_prev_fts(struct mlx5_core_dev *dev,
888 			    struct mlx5_flow_table *ft,
889 			    struct fs_prio *prio)
890 {
891 	struct mlx5_flow_table *prev_ft;
892 
893 	prev_ft = find_prev_chained_ft(prio);
894 	if (prev_ft) {
895 		struct fs_prio *prev_prio;
896 
897 		fs_get_obj(prev_prio, prev_ft->node.parent);
898 		return connect_fts_in_prio(dev, prev_prio, ft);
899 	}
900 	return 0;
901 }
902 
update_root_ft_create(struct mlx5_flow_table * ft,struct fs_prio * prio)903 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
904 				 *prio)
905 {
906 	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
907 	struct mlx5_ft_underlay_qp *uqp;
908 	int min_level = INT_MAX;
909 	int err = 0;
910 	u32 qpn;
911 
912 	if (root->root_ft)
913 		min_level = root->root_ft->level;
914 
915 	if (ft->level >= min_level)
916 		return 0;
917 
918 	if (list_empty(&root->underlay_qpns)) {
919 		/* Don't set any QPN (zero) in case QPN list is empty */
920 		qpn = 0;
921 		err = root->cmds->update_root_ft(root, ft, qpn, false);
922 	} else {
923 		list_for_each_entry(uqp, &root->underlay_qpns, list) {
924 			qpn = uqp->qpn;
925 			err = root->cmds->update_root_ft(root, ft,
926 							 qpn, false);
927 			if (err)
928 				break;
929 		}
930 	}
931 
932 	if (err)
933 		mlx5_core_warn(root->dev,
934 			       "Update root flow table of id(%u) qpn(%d) failed\n",
935 			       ft->id, qpn);
936 	else
937 		root->root_ft = ft;
938 
939 	return err;
940 }
941 
_mlx5_modify_rule_destination(struct mlx5_flow_rule * rule,struct mlx5_flow_destination * dest)942 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
943 					 struct mlx5_flow_destination *dest)
944 {
945 	struct mlx5_flow_root_namespace *root;
946 	struct mlx5_flow_table *ft;
947 	struct mlx5_flow_group *fg;
948 	struct fs_fte *fte;
949 	int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
950 	int err = 0;
951 
952 	fs_get_obj(fte, rule->node.parent);
953 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
954 		return -EINVAL;
955 	down_write_ref_node(&fte->node, false);
956 	fs_get_obj(fg, fte->node.parent);
957 	fs_get_obj(ft, fg->node.parent);
958 
959 	memcpy(&rule->dest_attr, dest, sizeof(*dest));
960 	root = find_root(&ft->node);
961 	err = root->cmds->update_fte(root, ft, fg,
962 				     modify_mask, fte);
963 	up_write_ref_node(&fte->node, false);
964 
965 	return err;
966 }
967 
mlx5_modify_rule_destination(struct mlx5_flow_handle * handle,struct mlx5_flow_destination * new_dest,struct mlx5_flow_destination * old_dest)968 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
969 				 struct mlx5_flow_destination *new_dest,
970 				 struct mlx5_flow_destination *old_dest)
971 {
972 	int i;
973 
974 	if (!old_dest) {
975 		if (handle->num_rules != 1)
976 			return -EINVAL;
977 		return _mlx5_modify_rule_destination(handle->rule[0],
978 						     new_dest);
979 	}
980 
981 	for (i = 0; i < handle->num_rules; i++) {
982 		if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
983 			return _mlx5_modify_rule_destination(handle->rule[i],
984 							     new_dest);
985 	}
986 
987 	return -EINVAL;
988 }
989 
990 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft  */
connect_fwd_rules(struct mlx5_core_dev * dev,struct mlx5_flow_table * new_next_ft,struct mlx5_flow_table * old_next_ft)991 static int connect_fwd_rules(struct mlx5_core_dev *dev,
992 			     struct mlx5_flow_table *new_next_ft,
993 			     struct mlx5_flow_table *old_next_ft)
994 {
995 	struct mlx5_flow_destination dest = {};
996 	struct mlx5_flow_rule *iter;
997 	int err = 0;
998 
999 	/* new_next_ft and old_next_ft could be NULL only
1000 	 * when we create/destroy the anchor flow table.
1001 	 */
1002 	if (!new_next_ft || !old_next_ft)
1003 		return 0;
1004 
1005 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1006 	dest.ft = new_next_ft;
1007 
1008 	mutex_lock(&old_next_ft->lock);
1009 	list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
1010 	mutex_unlock(&old_next_ft->lock);
1011 	list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1012 		if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1013 		    iter->ft->ns == new_next_ft->ns)
1014 			continue;
1015 
1016 		err = _mlx5_modify_rule_destination(iter, &dest);
1017 		if (err)
1018 			pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
1019 			       new_next_ft->id);
1020 	}
1021 	return 0;
1022 }
1023 
connect_flow_table(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)1024 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
1025 			      struct fs_prio *prio)
1026 {
1027 	struct mlx5_flow_table *next_ft, *first_ft;
1028 	int err = 0;
1029 
1030 	/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
1031 
1032 	first_ft = list_first_entry_or_null(&prio->node.children,
1033 					    struct mlx5_flow_table, node.list);
1034 	if (!first_ft || first_ft->level > ft->level) {
1035 		err = connect_prev_fts(dev, ft, prio);
1036 		if (err)
1037 			return err;
1038 
1039 		next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
1040 		err = connect_fwd_rules(dev, ft, next_ft);
1041 		if (err)
1042 			return err;
1043 	}
1044 
1045 	if (MLX5_CAP_FLOWTABLE(dev,
1046 			       flow_table_properties_nic_receive.modify_root))
1047 		err = update_root_ft_create(ft, prio);
1048 	return err;
1049 }
1050 
list_add_flow_table(struct mlx5_flow_table * ft,struct fs_prio * prio)1051 static void list_add_flow_table(struct mlx5_flow_table *ft,
1052 				struct fs_prio *prio)
1053 {
1054 	struct list_head *prev = &prio->node.children;
1055 	struct mlx5_flow_table *iter;
1056 
1057 	fs_for_each_ft(iter, prio) {
1058 		if (iter->level > ft->level)
1059 			break;
1060 		prev = &iter->node.list;
1061 	}
1062 	list_add(&ft->node.list, prev);
1063 }
1064 
__mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr,enum fs_flow_table_op_mod op_mod,u16 vport)1065 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1066 							struct mlx5_flow_table_attr *ft_attr,
1067 							enum fs_flow_table_op_mod op_mod,
1068 							u16 vport)
1069 {
1070 	struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1071 	bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1072 	struct mlx5_flow_table *next_ft;
1073 	struct fs_prio *fs_prio = NULL;
1074 	struct mlx5_flow_table *ft;
1075 	int err;
1076 
1077 	if (!root) {
1078 		pr_err("mlx5: flow steering failed to find root of namespace\n");
1079 		return ERR_PTR(-ENODEV);
1080 	}
1081 
1082 	mutex_lock(&root->chain_lock);
1083 	fs_prio = find_prio(ns, ft_attr->prio);
1084 	if (!fs_prio) {
1085 		err = -EINVAL;
1086 		goto unlock_root;
1087 	}
1088 	if (!unmanaged) {
1089 		/* The level is related to the
1090 		 * priority level range.
1091 		 */
1092 		if (ft_attr->level >= fs_prio->num_levels) {
1093 			err = -ENOSPC;
1094 			goto unlock_root;
1095 		}
1096 
1097 		ft_attr->level += fs_prio->start_level;
1098 	}
1099 
1100 	/* The level is related to the
1101 	 * priority level range.
1102 	 */
1103 	ft = alloc_flow_table(ft_attr->level,
1104 			      vport,
1105 			      root->table_type,
1106 			      op_mod, ft_attr->flags);
1107 	if (IS_ERR(ft)) {
1108 		err = PTR_ERR(ft);
1109 		goto unlock_root;
1110 	}
1111 
1112 	tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1113 	next_ft = unmanaged ? ft_attr->next_ft :
1114 			      find_next_chained_ft(fs_prio);
1115 	ft->def_miss_action = ns->def_miss_action;
1116 	ft->ns = ns;
1117 	err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft);
1118 	if (err)
1119 		goto free_ft;
1120 
1121 	if (!unmanaged) {
1122 		err = connect_flow_table(root->dev, ft, fs_prio);
1123 		if (err)
1124 			goto destroy_ft;
1125 	}
1126 
1127 	ft->node.active = true;
1128 	down_write_ref_node(&fs_prio->node, false);
1129 	if (!unmanaged) {
1130 		tree_add_node(&ft->node, &fs_prio->node);
1131 		list_add_flow_table(ft, fs_prio);
1132 	} else {
1133 		ft->node.root = fs_prio->node.root;
1134 	}
1135 	fs_prio->num_ft++;
1136 	up_write_ref_node(&fs_prio->node, false);
1137 	mutex_unlock(&root->chain_lock);
1138 	trace_mlx5_fs_add_ft(ft);
1139 	return ft;
1140 destroy_ft:
1141 	root->cmds->destroy_flow_table(root, ft);
1142 free_ft:
1143 	rhltable_destroy(&ft->fgs_hash);
1144 	kfree(ft);
1145 unlock_root:
1146 	mutex_unlock(&root->chain_lock);
1147 	return ERR_PTR(err);
1148 }
1149 
mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr)1150 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1151 					       struct mlx5_flow_table_attr *ft_attr)
1152 {
1153 	return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1154 }
1155 EXPORT_SYMBOL(mlx5_create_flow_table);
1156 
1157 struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr,u16 vport)1158 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1159 			     struct mlx5_flow_table_attr *ft_attr, u16 vport)
1160 {
1161 	return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1162 }
1163 
1164 struct mlx5_flow_table*
mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace * ns,int prio,u32 level)1165 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1166 				 int prio, u32 level)
1167 {
1168 	struct mlx5_flow_table_attr ft_attr = {};
1169 
1170 	ft_attr.level = level;
1171 	ft_attr.prio  = prio;
1172 	ft_attr.max_fte = 1;
1173 
1174 	return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1175 }
1176 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1177 
1178 #define MAX_FLOW_GROUP_SIZE BIT(24)
1179 struct mlx5_flow_table*
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr)1180 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1181 				    struct mlx5_flow_table_attr *ft_attr)
1182 {
1183 	int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1184 	int max_num_groups = ft_attr->autogroup.max_num_groups;
1185 	struct mlx5_flow_table *ft;
1186 	int autogroups_max_fte;
1187 
1188 	ft = mlx5_create_flow_table(ns, ft_attr);
1189 	if (IS_ERR(ft))
1190 		return ft;
1191 
1192 	autogroups_max_fte = ft->max_fte - num_reserved_entries;
1193 	if (max_num_groups > autogroups_max_fte)
1194 		goto err_validate;
1195 	if (num_reserved_entries > ft->max_fte)
1196 		goto err_validate;
1197 
1198 	/* Align the number of groups according to the largest group size */
1199 	if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
1200 		max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
1201 
1202 	ft->autogroup.active = true;
1203 	ft->autogroup.required_groups = max_num_groups;
1204 	ft->autogroup.max_fte = autogroups_max_fte;
1205 	/* We save place for flow groups in addition to max types */
1206 	ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
1207 
1208 	return ft;
1209 
1210 err_validate:
1211 	mlx5_destroy_flow_table(ft);
1212 	return ERR_PTR(-ENOSPC);
1213 }
1214 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1215 
mlx5_create_flow_group(struct mlx5_flow_table * ft,u32 * fg_in)1216 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1217 					       u32 *fg_in)
1218 {
1219 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1220 	void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1221 					    fg_in, match_criteria);
1222 	u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1223 					    fg_in,
1224 					    match_criteria_enable);
1225 	int start_index = MLX5_GET(create_flow_group_in, fg_in,
1226 				   start_flow_index);
1227 	int end_index = MLX5_GET(create_flow_group_in, fg_in,
1228 				 end_flow_index);
1229 	struct mlx5_flow_group *fg;
1230 	int err;
1231 
1232 	if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
1233 		return ERR_PTR(-EPERM);
1234 
1235 	down_write_ref_node(&ft->node, false);
1236 	fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1237 				     start_index, end_index,
1238 				     ft->node.children.prev);
1239 	up_write_ref_node(&ft->node, false);
1240 	if (IS_ERR(fg))
1241 		return fg;
1242 
1243 	err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1244 	if (err) {
1245 		tree_put_node(&fg->node, false);
1246 		return ERR_PTR(err);
1247 	}
1248 	trace_mlx5_fs_add_fg(fg);
1249 	fg->node.active = true;
1250 
1251 	return fg;
1252 }
1253 EXPORT_SYMBOL(mlx5_create_flow_group);
1254 
alloc_rule(struct mlx5_flow_destination * dest)1255 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1256 {
1257 	struct mlx5_flow_rule *rule;
1258 
1259 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1260 	if (!rule)
1261 		return NULL;
1262 
1263 	INIT_LIST_HEAD(&rule->next_ft);
1264 	rule->node.type = FS_TYPE_FLOW_DEST;
1265 	if (dest)
1266 		memcpy(&rule->dest_attr, dest, sizeof(*dest));
1267 
1268 	return rule;
1269 }
1270 
alloc_handle(int num_rules)1271 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1272 {
1273 	struct mlx5_flow_handle *handle;
1274 
1275 	handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1276 	if (!handle)
1277 		return NULL;
1278 
1279 	handle->num_rules = num_rules;
1280 
1281 	return handle;
1282 }
1283 
destroy_flow_handle(struct fs_fte * fte,struct mlx5_flow_handle * handle,struct mlx5_flow_destination * dest,int i)1284 static void destroy_flow_handle(struct fs_fte *fte,
1285 				struct mlx5_flow_handle *handle,
1286 				struct mlx5_flow_destination *dest,
1287 				int i)
1288 {
1289 	for (; --i >= 0;) {
1290 		if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1291 			fte->dests_size--;
1292 			list_del(&handle->rule[i]->node.list);
1293 			kfree(handle->rule[i]);
1294 		}
1295 	}
1296 	kfree(handle);
1297 }
1298 
1299 static struct mlx5_flow_handle *
create_flow_handle(struct fs_fte * fte,struct mlx5_flow_destination * dest,int dest_num,int * modify_mask,bool * new_rule)1300 create_flow_handle(struct fs_fte *fte,
1301 		   struct mlx5_flow_destination *dest,
1302 		   int dest_num,
1303 		   int *modify_mask,
1304 		   bool *new_rule)
1305 {
1306 	struct mlx5_flow_handle *handle;
1307 	struct mlx5_flow_rule *rule = NULL;
1308 	static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1309 	static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1310 	int type;
1311 	int i = 0;
1312 
1313 	handle = alloc_handle((dest_num) ? dest_num : 1);
1314 	if (!handle)
1315 		return ERR_PTR(-ENOMEM);
1316 
1317 	do {
1318 		if (dest) {
1319 			rule = find_flow_rule(fte, dest + i);
1320 			if (rule) {
1321 				refcount_inc(&rule->node.refcount);
1322 				goto rule_found;
1323 			}
1324 		}
1325 
1326 		*new_rule = true;
1327 		rule = alloc_rule(dest + i);
1328 		if (!rule)
1329 			goto free_rules;
1330 
1331 		/* Add dest to dests list- we need flow tables to be in the
1332 		 * end of the list for forward to next prio rules.
1333 		 */
1334 		tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1335 		if (dest &&
1336 		    dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1337 			list_add(&rule->node.list, &fte->node.children);
1338 		else
1339 			list_add_tail(&rule->node.list, &fte->node.children);
1340 		if (dest) {
1341 			fte->dests_size++;
1342 
1343 			type = dest[i].type ==
1344 				MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1345 			*modify_mask |= type ? count : dst;
1346 		}
1347 rule_found:
1348 		handle->rule[i] = rule;
1349 	} while (++i < dest_num);
1350 
1351 	return handle;
1352 
1353 free_rules:
1354 	destroy_flow_handle(fte, handle, dest, i);
1355 	return ERR_PTR(-ENOMEM);
1356 }
1357 
1358 /* fte should not be deleted while calling this function */
1359 static struct mlx5_flow_handle *
add_rule_fte(struct fs_fte * fte,struct mlx5_flow_group * fg,struct mlx5_flow_destination * dest,int dest_num,bool update_action)1360 add_rule_fte(struct fs_fte *fte,
1361 	     struct mlx5_flow_group *fg,
1362 	     struct mlx5_flow_destination *dest,
1363 	     int dest_num,
1364 	     bool update_action)
1365 {
1366 	struct mlx5_flow_root_namespace *root;
1367 	struct mlx5_flow_handle *handle;
1368 	struct mlx5_flow_table *ft;
1369 	int modify_mask = 0;
1370 	int err;
1371 	bool new_rule = false;
1372 
1373 	handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1374 				    &new_rule);
1375 	if (IS_ERR(handle) || !new_rule)
1376 		goto out;
1377 
1378 	if (update_action)
1379 		modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1380 
1381 	fs_get_obj(ft, fg->node.parent);
1382 	root = find_root(&fg->node);
1383 	if (!(fte->status & FS_FTE_STATUS_EXISTING))
1384 		err = root->cmds->create_fte(root, ft, fg, fte);
1385 	else
1386 		err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1387 	if (err)
1388 		goto free_handle;
1389 
1390 	fte->node.active = true;
1391 	fte->status |= FS_FTE_STATUS_EXISTING;
1392 	atomic_inc(&fg->node.version);
1393 
1394 out:
1395 	return handle;
1396 
1397 free_handle:
1398 	destroy_flow_handle(fte, handle, dest, handle->num_rules);
1399 	return ERR_PTR(err);
1400 }
1401 
alloc_auto_flow_group(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec)1402 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table  *ft,
1403 						     const struct mlx5_flow_spec *spec)
1404 {
1405 	struct list_head *prev = &ft->node.children;
1406 	u32 max_fte = ft->autogroup.max_fte;
1407 	unsigned int candidate_index = 0;
1408 	unsigned int group_size = 0;
1409 	struct mlx5_flow_group *fg;
1410 
1411 	if (!ft->autogroup.active)
1412 		return ERR_PTR(-ENOENT);
1413 
1414 	if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1415 		group_size = ft->autogroup.group_size;
1416 
1417 	/*  max_fte == ft->autogroup.max_types */
1418 	if (group_size == 0)
1419 		group_size = 1;
1420 
1421 	/* sorted by start_index */
1422 	fs_for_each_fg(fg, ft) {
1423 		if (candidate_index + group_size > fg->start_index)
1424 			candidate_index = fg->start_index + fg->max_ftes;
1425 		else
1426 			break;
1427 		prev = &fg->node.list;
1428 	}
1429 
1430 	if (candidate_index + group_size > max_fte)
1431 		return ERR_PTR(-ENOSPC);
1432 
1433 	fg = alloc_insert_flow_group(ft,
1434 				     spec->match_criteria_enable,
1435 				     spec->match_criteria,
1436 				     candidate_index,
1437 				     candidate_index + group_size - 1,
1438 				     prev);
1439 	if (IS_ERR(fg))
1440 		goto out;
1441 
1442 	if (group_size == ft->autogroup.group_size)
1443 		ft->autogroup.num_groups++;
1444 
1445 out:
1446 	return fg;
1447 }
1448 
create_auto_flow_group(struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)1449 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1450 				  struct mlx5_flow_group *fg)
1451 {
1452 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1453 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1454 	void *match_criteria_addr;
1455 	u8 src_esw_owner_mask_on;
1456 	void *misc;
1457 	int err;
1458 	u32 *in;
1459 
1460 	in = kvzalloc(inlen, GFP_KERNEL);
1461 	if (!in)
1462 		return -ENOMEM;
1463 
1464 	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1465 		 fg->mask.match_criteria_enable);
1466 	MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1467 	MLX5_SET(create_flow_group_in, in, end_flow_index,   fg->start_index +
1468 		 fg->max_ftes - 1);
1469 
1470 	misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1471 			    misc_parameters);
1472 	src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1473 					 source_eswitch_owner_vhca_id);
1474 	MLX5_SET(create_flow_group_in, in,
1475 		 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1476 
1477 	match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1478 					   in, match_criteria);
1479 	memcpy(match_criteria_addr, fg->mask.match_criteria,
1480 	       sizeof(fg->mask.match_criteria));
1481 
1482 	err = root->cmds->create_flow_group(root, ft, in, fg);
1483 	if (!err) {
1484 		fg->node.active = true;
1485 		trace_mlx5_fs_add_fg(fg);
1486 	}
1487 
1488 	kvfree(in);
1489 	return err;
1490 }
1491 
mlx5_flow_dests_cmp(struct mlx5_flow_destination * d1,struct mlx5_flow_destination * d2)1492 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1493 				struct mlx5_flow_destination *d2)
1494 {
1495 	if (d1->type == d2->type) {
1496 		if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1497 		     d1->vport.num == d2->vport.num &&
1498 		     d1->vport.flags == d2->vport.flags &&
1499 		     ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1500 		      (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1501 		     ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1502 		      (d1->vport.pkt_reformat->id ==
1503 		       d2->vport.pkt_reformat->id) : true)) ||
1504 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1505 		     d1->ft == d2->ft) ||
1506 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1507 		     d1->tir_num == d2->tir_num) ||
1508 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1509 		     d1->ft_num == d2->ft_num) ||
1510 		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
1511 		     d1->sampler_id == d2->sampler_id))
1512 			return true;
1513 	}
1514 
1515 	return false;
1516 }
1517 
find_flow_rule(struct fs_fte * fte,struct mlx5_flow_destination * dest)1518 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1519 					     struct mlx5_flow_destination *dest)
1520 {
1521 	struct mlx5_flow_rule *rule;
1522 
1523 	list_for_each_entry(rule, &fte->node.children, node.list) {
1524 		if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1525 			return rule;
1526 	}
1527 	return NULL;
1528 }
1529 
check_conflicting_actions(u32 action1,u32 action2)1530 static bool check_conflicting_actions(u32 action1, u32 action2)
1531 {
1532 	u32 xored_actions = action1 ^ action2;
1533 
1534 	/* if one rule only wants to count, it's ok */
1535 	if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1536 	    action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1537 		return false;
1538 
1539 	if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP  |
1540 			     MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1541 			     MLX5_FLOW_CONTEXT_ACTION_DECAP |
1542 			     MLX5_FLOW_CONTEXT_ACTION_MOD_HDR  |
1543 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1544 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1545 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1546 			     MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1547 		return true;
1548 
1549 	return false;
1550 }
1551 
check_conflicting_ftes(struct fs_fte * fte,const struct mlx5_flow_context * flow_context,const struct mlx5_flow_act * flow_act)1552 static int check_conflicting_ftes(struct fs_fte *fte,
1553 				  const struct mlx5_flow_context *flow_context,
1554 				  const struct mlx5_flow_act *flow_act)
1555 {
1556 	if (check_conflicting_actions(flow_act->action, fte->action.action)) {
1557 		mlx5_core_warn(get_dev(&fte->node),
1558 			       "Found two FTEs with conflicting actions\n");
1559 		return -EEXIST;
1560 	}
1561 
1562 	if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1563 	    fte->flow_context.flow_tag != flow_context->flow_tag) {
1564 		mlx5_core_warn(get_dev(&fte->node),
1565 			       "FTE flow tag %u already exists with different flow tag %u\n",
1566 			       fte->flow_context.flow_tag,
1567 			       flow_context->flow_tag);
1568 		return -EEXIST;
1569 	}
1570 
1571 	return 0;
1572 }
1573 
add_rule_fg(struct mlx5_flow_group * fg,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,struct fs_fte * fte)1574 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1575 					    const struct mlx5_flow_spec *spec,
1576 					    struct mlx5_flow_act *flow_act,
1577 					    struct mlx5_flow_destination *dest,
1578 					    int dest_num,
1579 					    struct fs_fte *fte)
1580 {
1581 	struct mlx5_flow_handle *handle;
1582 	int old_action;
1583 	int i;
1584 	int ret;
1585 
1586 	ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1587 	if (ret)
1588 		return ERR_PTR(ret);
1589 
1590 	old_action = fte->action.action;
1591 	fte->action.action |= flow_act->action;
1592 	handle = add_rule_fte(fte, fg, dest, dest_num,
1593 			      old_action != flow_act->action);
1594 	if (IS_ERR(handle)) {
1595 		fte->action.action = old_action;
1596 		return handle;
1597 	}
1598 	trace_mlx5_fs_set_fte(fte, false);
1599 
1600 	for (i = 0; i < handle->num_rules; i++) {
1601 		if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1602 			tree_add_node(&handle->rule[i]->node, &fte->node);
1603 			trace_mlx5_fs_add_rule(handle->rule[i]);
1604 		}
1605 	}
1606 	return handle;
1607 }
1608 
counter_is_valid(u32 action)1609 static bool counter_is_valid(u32 action)
1610 {
1611 	return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1612 			  MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1613 			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1614 }
1615 
dest_is_valid(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_flow_table * ft)1616 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1617 			  struct mlx5_flow_act *flow_act,
1618 			  struct mlx5_flow_table *ft)
1619 {
1620 	bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1621 	u32 action = flow_act->action;
1622 
1623 	if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1624 		return counter_is_valid(action);
1625 
1626 	if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1627 		return true;
1628 
1629 	if (ignore_level) {
1630 		if (ft->type != FS_FT_FDB &&
1631 		    ft->type != FS_FT_NIC_RX)
1632 			return false;
1633 
1634 		if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1635 		    ft->type != dest->ft->type)
1636 			return false;
1637 	}
1638 
1639 	if (!dest || ((dest->type ==
1640 	    MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1641 	    (dest->ft->level <= ft->level && !ignore_level)))
1642 		return false;
1643 	return true;
1644 }
1645 
1646 struct match_list {
1647 	struct list_head	list;
1648 	struct mlx5_flow_group *g;
1649 };
1650 
free_match_list(struct match_list * head,bool ft_locked)1651 static void free_match_list(struct match_list *head, bool ft_locked)
1652 {
1653 	struct match_list *iter, *match_tmp;
1654 
1655 	list_for_each_entry_safe(iter, match_tmp, &head->list,
1656 				 list) {
1657 		tree_put_node(&iter->g->node, ft_locked);
1658 		list_del(&iter->list);
1659 		kfree(iter);
1660 	}
1661 }
1662 
build_match_list(struct match_list * match_head,struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,bool ft_locked)1663 static int build_match_list(struct match_list *match_head,
1664 			    struct mlx5_flow_table *ft,
1665 			    const struct mlx5_flow_spec *spec,
1666 			    bool ft_locked)
1667 {
1668 	struct rhlist_head *tmp, *list;
1669 	struct mlx5_flow_group *g;
1670 	int err = 0;
1671 
1672 	rcu_read_lock();
1673 	INIT_LIST_HEAD(&match_head->list);
1674 	/* Collect all fgs which has a matching match_criteria */
1675 	list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1676 	/* RCU is atomic, we can't execute FW commands here */
1677 	rhl_for_each_entry_rcu(g, tmp, list, hash) {
1678 		struct match_list *curr_match;
1679 
1680 		if (unlikely(!tree_get_node(&g->node)))
1681 			continue;
1682 
1683 		curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1684 		if (!curr_match) {
1685 			rcu_read_unlock();
1686 			free_match_list(match_head, ft_locked);
1687 			return -ENOMEM;
1688 		}
1689 		curr_match->g = g;
1690 		list_add_tail(&curr_match->list, &match_head->list);
1691 	}
1692 	rcu_read_unlock();
1693 	return err;
1694 }
1695 
matched_fgs_get_version(struct list_head * match_head)1696 static u64 matched_fgs_get_version(struct list_head *match_head)
1697 {
1698 	struct match_list *iter;
1699 	u64 version = 0;
1700 
1701 	list_for_each_entry(iter, match_head, list)
1702 		version += (u64)atomic_read(&iter->g->node.version);
1703 	return version;
1704 }
1705 
1706 static struct fs_fte *
lookup_fte_locked(struct mlx5_flow_group * g,const u32 * match_value,bool take_write)1707 lookup_fte_locked(struct mlx5_flow_group *g,
1708 		  const u32 *match_value,
1709 		  bool take_write)
1710 {
1711 	struct fs_fte *fte_tmp;
1712 
1713 	if (take_write)
1714 		nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1715 	else
1716 		nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1717 	fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1718 					 rhash_fte);
1719 	if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1720 		fte_tmp = NULL;
1721 		goto out;
1722 	}
1723 	if (!fte_tmp->node.active) {
1724 		tree_put_node(&fte_tmp->node, false);
1725 		fte_tmp = NULL;
1726 		goto out;
1727 	}
1728 
1729 	nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1730 out:
1731 	if (take_write)
1732 		up_write_ref_node(&g->node, false);
1733 	else
1734 		up_read_ref_node(&g->node);
1735 	return fte_tmp;
1736 }
1737 
1738 static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table * ft,struct list_head * match_head,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,int ft_version)1739 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1740 		       struct list_head *match_head,
1741 		       const struct mlx5_flow_spec *spec,
1742 		       struct mlx5_flow_act *flow_act,
1743 		       struct mlx5_flow_destination *dest,
1744 		       int dest_num,
1745 		       int ft_version)
1746 {
1747 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
1748 	struct mlx5_flow_group *g;
1749 	struct mlx5_flow_handle *rule;
1750 	struct match_list *iter;
1751 	bool take_write = false;
1752 	struct fs_fte *fte;
1753 	u64  version = 0;
1754 	int err;
1755 
1756 	fte = alloc_fte(ft, spec, flow_act);
1757 	if (IS_ERR(fte))
1758 		return  ERR_PTR(-ENOMEM);
1759 
1760 search_again_locked:
1761 	if (flow_act->flags & FLOW_ACT_NO_APPEND)
1762 		goto skip_search;
1763 	version = matched_fgs_get_version(match_head);
1764 	/* Try to find an fte with identical match value and attempt update its
1765 	 * action.
1766 	 */
1767 	list_for_each_entry(iter, match_head, list) {
1768 		struct fs_fte *fte_tmp;
1769 
1770 		g = iter->g;
1771 		fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1772 		if (!fte_tmp)
1773 			continue;
1774 		rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1775 		/* No error check needed here, because insert_fte() is not called */
1776 		up_write_ref_node(&fte_tmp->node, false);
1777 		tree_put_node(&fte_tmp->node, false);
1778 		kmem_cache_free(steering->ftes_cache, fte);
1779 		return rule;
1780 	}
1781 
1782 skip_search:
1783 	/* No group with matching fte found, or we skipped the search.
1784 	 * Try to add a new fte to any matching fg.
1785 	 */
1786 
1787 	/* Check the ft version, for case that new flow group
1788 	 * was added while the fgs weren't locked
1789 	 */
1790 	if (atomic_read(&ft->node.version) != ft_version) {
1791 		rule = ERR_PTR(-EAGAIN);
1792 		goto out;
1793 	}
1794 
1795 	/* Check the fgs version. If version have changed it could be that an
1796 	 * FTE with the same match value was added while the fgs weren't
1797 	 * locked.
1798 	 */
1799 	if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1800 	    version != matched_fgs_get_version(match_head)) {
1801 		take_write = true;
1802 		goto search_again_locked;
1803 	}
1804 
1805 	list_for_each_entry(iter, match_head, list) {
1806 		g = iter->g;
1807 
1808 		nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1809 
1810 		if (!g->node.active) {
1811 			up_write_ref_node(&g->node, false);
1812 			continue;
1813 		}
1814 
1815 		err = insert_fte(g, fte);
1816 		if (err) {
1817 			up_write_ref_node(&g->node, false);
1818 			if (err == -ENOSPC)
1819 				continue;
1820 			kmem_cache_free(steering->ftes_cache, fte);
1821 			return ERR_PTR(err);
1822 		}
1823 
1824 		nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1825 		up_write_ref_node(&g->node, false);
1826 		rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1827 		up_write_ref_node(&fte->node, false);
1828 		if (IS_ERR(rule))
1829 			tree_put_node(&fte->node, false);
1830 		return rule;
1831 	}
1832 	rule = ERR_PTR(-ENOENT);
1833 out:
1834 	kmem_cache_free(steering->ftes_cache, fte);
1835 	return rule;
1836 }
1837 
1838 static struct mlx5_flow_handle *
_mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num)1839 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1840 		     const struct mlx5_flow_spec *spec,
1841 		     struct mlx5_flow_act *flow_act,
1842 		     struct mlx5_flow_destination *dest,
1843 		     int dest_num)
1844 
1845 {
1846 	struct mlx5_flow_steering *steering = get_steering(&ft->node);
1847 	struct mlx5_flow_handle *rule;
1848 	struct match_list match_head;
1849 	struct mlx5_flow_group *g;
1850 	bool take_write = false;
1851 	struct fs_fte *fte;
1852 	int version;
1853 	int err;
1854 	int i;
1855 
1856 	if (!check_valid_spec(spec))
1857 		return ERR_PTR(-EINVAL);
1858 
1859 	for (i = 0; i < dest_num; i++) {
1860 		if (!dest_is_valid(&dest[i], flow_act, ft))
1861 			return ERR_PTR(-EINVAL);
1862 	}
1863 	nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1864 search_again_locked:
1865 	version = atomic_read(&ft->node.version);
1866 
1867 	/* Collect all fgs which has a matching match_criteria */
1868 	err = build_match_list(&match_head, ft, spec, take_write);
1869 	if (err) {
1870 		if (take_write)
1871 			up_write_ref_node(&ft->node, false);
1872 		else
1873 			up_read_ref_node(&ft->node);
1874 		return ERR_PTR(err);
1875 	}
1876 
1877 	if (!take_write)
1878 		up_read_ref_node(&ft->node);
1879 
1880 	rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1881 				      dest_num, version);
1882 	free_match_list(&match_head, take_write);
1883 	if (!IS_ERR(rule) ||
1884 	    (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1885 		if (take_write)
1886 			up_write_ref_node(&ft->node, false);
1887 		return rule;
1888 	}
1889 
1890 	if (!take_write) {
1891 		nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1892 		take_write = true;
1893 	}
1894 
1895 	if (PTR_ERR(rule) == -EAGAIN ||
1896 	    version != atomic_read(&ft->node.version))
1897 		goto search_again_locked;
1898 
1899 	g = alloc_auto_flow_group(ft, spec);
1900 	if (IS_ERR(g)) {
1901 		rule = ERR_CAST(g);
1902 		up_write_ref_node(&ft->node, false);
1903 		return rule;
1904 	}
1905 
1906 	fte = alloc_fte(ft, spec, flow_act);
1907 	if (IS_ERR(fte)) {
1908 		up_write_ref_node(&ft->node, false);
1909 		err = PTR_ERR(fte);
1910 		goto err_alloc_fte;
1911 	}
1912 
1913 	nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1914 	up_write_ref_node(&ft->node, false);
1915 
1916 	err = create_auto_flow_group(ft, g);
1917 	if (err)
1918 		goto err_release_fg;
1919 
1920 	err = insert_fte(g, fte);
1921 	if (err)
1922 		goto err_release_fg;
1923 
1924 	nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1925 	up_write_ref_node(&g->node, false);
1926 	rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1927 	up_write_ref_node(&fte->node, false);
1928 	if (IS_ERR(rule))
1929 		tree_put_node(&fte->node, false);
1930 	tree_put_node(&g->node, false);
1931 	return rule;
1932 
1933 err_release_fg:
1934 	up_write_ref_node(&g->node, false);
1935 	kmem_cache_free(steering->ftes_cache, fte);
1936 err_alloc_fte:
1937 	tree_put_node(&g->node, false);
1938 	return ERR_PTR(err);
1939 }
1940 
fwd_next_prio_supported(struct mlx5_flow_table * ft)1941 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1942 {
1943 	return ((ft->type == FS_FT_NIC_RX) &&
1944 		(MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1945 }
1946 
1947 struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int num_dest)1948 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1949 		    const struct mlx5_flow_spec *spec,
1950 		    struct mlx5_flow_act *flow_act,
1951 		    struct mlx5_flow_destination *dest,
1952 		    int num_dest)
1953 {
1954 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1955 	static const struct mlx5_flow_spec zero_spec = {};
1956 	struct mlx5_flow_destination *gen_dest = NULL;
1957 	struct mlx5_flow_table *next_ft = NULL;
1958 	struct mlx5_flow_handle *handle = NULL;
1959 	u32 sw_action = flow_act->action;
1960 	int i;
1961 
1962 	if (!spec)
1963 		spec = &zero_spec;
1964 
1965 	if (!is_fwd_next_action(sw_action))
1966 		return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
1967 
1968 	if (!fwd_next_prio_supported(ft))
1969 		return ERR_PTR(-EOPNOTSUPP);
1970 
1971 	mutex_lock(&root->chain_lock);
1972 	next_ft = find_next_fwd_ft(ft, flow_act);
1973 	if (!next_ft) {
1974 		handle = ERR_PTR(-EOPNOTSUPP);
1975 		goto unlock;
1976 	}
1977 
1978 	gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
1979 			   GFP_KERNEL);
1980 	if (!gen_dest) {
1981 		handle = ERR_PTR(-ENOMEM);
1982 		goto unlock;
1983 	}
1984 	for (i = 0; i < num_dest; i++)
1985 		gen_dest[i] = dest[i];
1986 	gen_dest[i].type =
1987 		MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1988 	gen_dest[i].ft = next_ft;
1989 	dest = gen_dest;
1990 	num_dest++;
1991 	flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
1992 			      MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
1993 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1994 	handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
1995 	if (IS_ERR(handle))
1996 		goto unlock;
1997 
1998 	if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
1999 		mutex_lock(&next_ft->lock);
2000 		list_add(&handle->rule[num_dest - 1]->next_ft,
2001 			 &next_ft->fwd_rules);
2002 		mutex_unlock(&next_ft->lock);
2003 		handle->rule[num_dest - 1]->sw_action = sw_action;
2004 		handle->rule[num_dest - 1]->ft = ft;
2005 	}
2006 unlock:
2007 	mutex_unlock(&root->chain_lock);
2008 	kfree(gen_dest);
2009 	return handle;
2010 }
2011 EXPORT_SYMBOL(mlx5_add_flow_rules);
2012 
mlx5_del_flow_rules(struct mlx5_flow_handle * handle)2013 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2014 {
2015 	struct fs_fte *fte;
2016 	int i;
2017 
2018 	/* In order to consolidate the HW changes we lock the FTE for other
2019 	 * changes, and increase its refcount, in order not to perform the
2020 	 * "del" functions of the FTE. Will handle them here.
2021 	 * The removal of the rules is done under locked FTE.
2022 	 * After removing all the handle's rules, if there are remaining
2023 	 * rules, it means we just need to modify the FTE in FW, and
2024 	 * unlock/decrease the refcount we increased before.
2025 	 * Otherwise, it means the FTE should be deleted. First delete the
2026 	 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2027 	 * the FTE, which will handle the last decrease of the refcount, as
2028 	 * well as required handling of its parent.
2029 	 */
2030 	fs_get_obj(fte, handle->rule[0]->node.parent);
2031 	down_write_ref_node(&fte->node, false);
2032 	for (i = handle->num_rules - 1; i >= 0; i--)
2033 		tree_remove_node(&handle->rule[i]->node, true);
2034 	if (fte->dests_size) {
2035 		if (fte->modify_mask)
2036 			modify_fte(fte);
2037 		up_write_ref_node(&fte->node, false);
2038 	} else if (list_empty(&fte->node.children)) {
2039 		del_hw_fte(&fte->node);
2040 		/* Avoid double call to del_hw_fte */
2041 		fte->node.del_hw_func = NULL;
2042 		up_write_ref_node(&fte->node, false);
2043 		tree_put_node(&fte->node, false);
2044 	}
2045 	kfree(handle);
2046 }
2047 EXPORT_SYMBOL(mlx5_del_flow_rules);
2048 
2049 /* Assuming prio->node.children(flow tables) is sorted by level */
find_next_ft(struct mlx5_flow_table * ft)2050 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
2051 {
2052 	struct fs_prio *prio;
2053 
2054 	fs_get_obj(prio, ft->node.parent);
2055 
2056 	if (!list_is_last(&ft->node.list, &prio->node.children))
2057 		return list_next_entry(ft, node.list);
2058 	return find_next_chained_ft(prio);
2059 }
2060 
update_root_ft_destroy(struct mlx5_flow_table * ft)2061 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
2062 {
2063 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2064 	struct mlx5_ft_underlay_qp *uqp;
2065 	struct mlx5_flow_table *new_root_ft = NULL;
2066 	int err = 0;
2067 	u32 qpn;
2068 
2069 	if (root->root_ft != ft)
2070 		return 0;
2071 
2072 	new_root_ft = find_next_ft(ft);
2073 	if (!new_root_ft) {
2074 		root->root_ft = NULL;
2075 		return 0;
2076 	}
2077 
2078 	if (list_empty(&root->underlay_qpns)) {
2079 		/* Don't set any QPN (zero) in case QPN list is empty */
2080 		qpn = 0;
2081 		err = root->cmds->update_root_ft(root, new_root_ft,
2082 						 qpn, false);
2083 	} else {
2084 		list_for_each_entry(uqp, &root->underlay_qpns, list) {
2085 			qpn = uqp->qpn;
2086 			err = root->cmds->update_root_ft(root,
2087 							 new_root_ft, qpn,
2088 							 false);
2089 			if (err)
2090 				break;
2091 		}
2092 	}
2093 
2094 	if (err)
2095 		mlx5_core_warn(root->dev,
2096 			       "Update root flow table of id(%u) qpn(%d) failed\n",
2097 			       ft->id, qpn);
2098 	else
2099 		root->root_ft = new_root_ft;
2100 
2101 	return 0;
2102 }
2103 
2104 /* Connect flow table from previous priority to
2105  * the next flow table.
2106  */
disconnect_flow_table(struct mlx5_flow_table * ft)2107 static int disconnect_flow_table(struct mlx5_flow_table *ft)
2108 {
2109 	struct mlx5_core_dev *dev = get_dev(&ft->node);
2110 	struct mlx5_flow_table *next_ft;
2111 	struct fs_prio *prio;
2112 	int err = 0;
2113 
2114 	err = update_root_ft_destroy(ft);
2115 	if (err)
2116 		return err;
2117 
2118 	fs_get_obj(prio, ft->node.parent);
2119 	if  (!(list_first_entry(&prio->node.children,
2120 				struct mlx5_flow_table,
2121 				node.list) == ft))
2122 		return 0;
2123 
2124 	next_ft = find_next_ft(ft);
2125 	err = connect_fwd_rules(dev, next_ft, ft);
2126 	if (err)
2127 		return err;
2128 
2129 	err = connect_prev_fts(dev, next_ft, prio);
2130 	if (err)
2131 		mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2132 			       ft->id);
2133 	return err;
2134 }
2135 
mlx5_destroy_flow_table(struct mlx5_flow_table * ft)2136 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2137 {
2138 	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2139 	int err = 0;
2140 
2141 	mutex_lock(&root->chain_lock);
2142 	if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2143 		err = disconnect_flow_table(ft);
2144 	if (err) {
2145 		mutex_unlock(&root->chain_lock);
2146 		return err;
2147 	}
2148 	if (tree_remove_node(&ft->node, false))
2149 		mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2150 			       ft->id);
2151 	mutex_unlock(&root->chain_lock);
2152 
2153 	return err;
2154 }
2155 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2156 
mlx5_destroy_flow_group(struct mlx5_flow_group * fg)2157 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2158 {
2159 	if (tree_remove_node(&fg->node, false))
2160 		mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2161 			       fg->id);
2162 }
2163 EXPORT_SYMBOL(mlx5_destroy_flow_group);
2164 
mlx5_get_fdb_sub_ns(struct mlx5_core_dev * dev,int n)2165 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2166 						int n)
2167 {
2168 	struct mlx5_flow_steering *steering = dev->priv.steering;
2169 
2170 	if (!steering || !steering->fdb_sub_ns)
2171 		return NULL;
2172 
2173 	return steering->fdb_sub_ns[n];
2174 }
2175 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2176 
mlx5_get_flow_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type)2177 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2178 						    enum mlx5_flow_namespace_type type)
2179 {
2180 	struct mlx5_flow_steering *steering = dev->priv.steering;
2181 	struct mlx5_flow_root_namespace *root_ns;
2182 	int prio = 0;
2183 	struct fs_prio *fs_prio;
2184 	struct mlx5_flow_namespace *ns;
2185 
2186 	if (!steering)
2187 		return NULL;
2188 
2189 	switch (type) {
2190 	case MLX5_FLOW_NAMESPACE_FDB:
2191 		if (steering->fdb_root_ns)
2192 			return &steering->fdb_root_ns->ns;
2193 		return NULL;
2194 	case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2195 		if (steering->sniffer_rx_root_ns)
2196 			return &steering->sniffer_rx_root_ns->ns;
2197 		return NULL;
2198 	case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2199 		if (steering->sniffer_tx_root_ns)
2200 			return &steering->sniffer_tx_root_ns->ns;
2201 		return NULL;
2202 	default:
2203 		break;
2204 	}
2205 
2206 	if (type == MLX5_FLOW_NAMESPACE_EGRESS ||
2207 	    type == MLX5_FLOW_NAMESPACE_EGRESS_KERNEL) {
2208 		root_ns = steering->egress_root_ns;
2209 		prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2210 	} else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
2211 		root_ns = steering->rdma_rx_root_ns;
2212 		prio = RDMA_RX_BYPASS_PRIO;
2213 	} else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
2214 		root_ns = steering->rdma_rx_root_ns;
2215 		prio = RDMA_RX_KERNEL_PRIO;
2216 	} else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
2217 		root_ns = steering->rdma_tx_root_ns;
2218 	} else { /* Must be NIC RX */
2219 		root_ns = steering->root_ns;
2220 		prio = type;
2221 	}
2222 
2223 	if (!root_ns)
2224 		return NULL;
2225 
2226 	fs_prio = find_prio(&root_ns->ns, prio);
2227 	if (!fs_prio)
2228 		return NULL;
2229 
2230 	ns = list_first_entry(&fs_prio->node.children,
2231 			      typeof(*ns),
2232 			      node.list);
2233 
2234 	return ns;
2235 }
2236 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2237 
mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type,int vport)2238 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2239 							      enum mlx5_flow_namespace_type type,
2240 							      int vport)
2241 {
2242 	struct mlx5_flow_steering *steering = dev->priv.steering;
2243 
2244 	if (!steering)
2245 		return NULL;
2246 
2247 	switch (type) {
2248 	case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2249 		if (vport >= steering->esw_egress_acl_vports)
2250 			return NULL;
2251 		if (steering->esw_egress_root_ns &&
2252 		    steering->esw_egress_root_ns[vport])
2253 			return &steering->esw_egress_root_ns[vport]->ns;
2254 		else
2255 			return NULL;
2256 	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2257 		if (vport >= steering->esw_ingress_acl_vports)
2258 			return NULL;
2259 		if (steering->esw_ingress_root_ns &&
2260 		    steering->esw_ingress_root_ns[vport])
2261 			return &steering->esw_ingress_root_ns[vport]->ns;
2262 		else
2263 			return NULL;
2264 	default:
2265 		return NULL;
2266 	}
2267 }
2268 
_fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels,enum fs_node_type type)2269 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2270 				       unsigned int prio,
2271 				       int num_levels,
2272 				       enum fs_node_type type)
2273 {
2274 	struct fs_prio *fs_prio;
2275 
2276 	fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2277 	if (!fs_prio)
2278 		return ERR_PTR(-ENOMEM);
2279 
2280 	fs_prio->node.type = type;
2281 	tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2282 	tree_add_node(&fs_prio->node, &ns->node);
2283 	fs_prio->num_levels = num_levels;
2284 	fs_prio->prio = prio;
2285 	list_add_tail(&fs_prio->node.list, &ns->node.children);
2286 
2287 	return fs_prio;
2288 }
2289 
fs_create_prio_chained(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2290 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2291 					      unsigned int prio,
2292 					      int num_levels)
2293 {
2294 	return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2295 }
2296 
fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2297 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2298 				      unsigned int prio, int num_levels)
2299 {
2300 	return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2301 }
2302 
fs_init_namespace(struct mlx5_flow_namespace * ns)2303 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2304 						     *ns)
2305 {
2306 	ns->node.type = FS_TYPE_NAMESPACE;
2307 
2308 	return ns;
2309 }
2310 
fs_create_namespace(struct fs_prio * prio,int def_miss_act)2311 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2312 						       int def_miss_act)
2313 {
2314 	struct mlx5_flow_namespace	*ns;
2315 
2316 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2317 	if (!ns)
2318 		return ERR_PTR(-ENOMEM);
2319 
2320 	fs_init_namespace(ns);
2321 	ns->def_miss_action = def_miss_act;
2322 	tree_init_node(&ns->node, NULL, del_sw_ns);
2323 	tree_add_node(&ns->node, &prio->node);
2324 	list_add_tail(&ns->node.list, &prio->node.children);
2325 
2326 	return ns;
2327 }
2328 
create_leaf_prios(struct mlx5_flow_namespace * ns,int prio,struct init_tree_node * prio_metadata)2329 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2330 			     struct init_tree_node *prio_metadata)
2331 {
2332 	struct fs_prio *fs_prio;
2333 	int i;
2334 
2335 	for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2336 		fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2337 		if (IS_ERR(fs_prio))
2338 			return PTR_ERR(fs_prio);
2339 	}
2340 	return 0;
2341 }
2342 
2343 #define FLOW_TABLE_BIT_SZ 1
2344 #define GET_FLOW_TABLE_CAP(dev, offset) \
2345 	((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) +	\
2346 			offset / 32)) >>					\
2347 	  (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
has_required_caps(struct mlx5_core_dev * dev,struct node_caps * caps)2348 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2349 {
2350 	int i;
2351 
2352 	for (i = 0; i < caps->arr_sz; i++) {
2353 		if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2354 			return false;
2355 	}
2356 	return true;
2357 }
2358 
init_root_tree_recursive(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node,struct init_tree_node * init_parent_node,int prio)2359 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2360 				    struct init_tree_node *init_node,
2361 				    struct fs_node *fs_parent_node,
2362 				    struct init_tree_node *init_parent_node,
2363 				    int prio)
2364 {
2365 	int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2366 					      flow_table_properties_nic_receive.
2367 					      max_ft_level);
2368 	struct mlx5_flow_namespace *fs_ns;
2369 	struct fs_prio *fs_prio;
2370 	struct fs_node *base;
2371 	int i;
2372 	int err;
2373 
2374 	if (init_node->type == FS_TYPE_PRIO) {
2375 		if ((init_node->min_ft_level > max_ft_level) ||
2376 		    !has_required_caps(steering->dev, &init_node->caps))
2377 			return 0;
2378 
2379 		fs_get_obj(fs_ns, fs_parent_node);
2380 		if (init_node->num_leaf_prios)
2381 			return create_leaf_prios(fs_ns, prio, init_node);
2382 		fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2383 		if (IS_ERR(fs_prio))
2384 			return PTR_ERR(fs_prio);
2385 		base = &fs_prio->node;
2386 	} else if (init_node->type == FS_TYPE_NAMESPACE) {
2387 		fs_get_obj(fs_prio, fs_parent_node);
2388 		fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2389 		if (IS_ERR(fs_ns))
2390 			return PTR_ERR(fs_ns);
2391 		base = &fs_ns->node;
2392 	} else {
2393 		return -EINVAL;
2394 	}
2395 	prio = 0;
2396 	for (i = 0; i < init_node->ar_size; i++) {
2397 		err = init_root_tree_recursive(steering, &init_node->children[i],
2398 					       base, init_node, prio);
2399 		if (err)
2400 			return err;
2401 		if (init_node->children[i].type == FS_TYPE_PRIO &&
2402 		    init_node->children[i].num_leaf_prios) {
2403 			prio += init_node->children[i].num_leaf_prios;
2404 		}
2405 	}
2406 
2407 	return 0;
2408 }
2409 
init_root_tree(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node)2410 static int init_root_tree(struct mlx5_flow_steering *steering,
2411 			  struct init_tree_node *init_node,
2412 			  struct fs_node *fs_parent_node)
2413 {
2414 	int err;
2415 	int i;
2416 
2417 	for (i = 0; i < init_node->ar_size; i++) {
2418 		err = init_root_tree_recursive(steering, &init_node->children[i],
2419 					       fs_parent_node,
2420 					       init_node, i);
2421 		if (err)
2422 			return err;
2423 	}
2424 	return 0;
2425 }
2426 
del_sw_root_ns(struct fs_node * node)2427 static void del_sw_root_ns(struct fs_node *node)
2428 {
2429 	struct mlx5_flow_root_namespace *root_ns;
2430 	struct mlx5_flow_namespace *ns;
2431 
2432 	fs_get_obj(ns, node);
2433 	root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2434 	mutex_destroy(&root_ns->chain_lock);
2435 	kfree(node);
2436 }
2437 
2438 static struct mlx5_flow_root_namespace
create_root_ns(struct mlx5_flow_steering * steering,enum fs_flow_table_type table_type)2439 *create_root_ns(struct mlx5_flow_steering *steering,
2440 		enum fs_flow_table_type table_type)
2441 {
2442 	const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2443 	struct mlx5_flow_root_namespace *root_ns;
2444 	struct mlx5_flow_namespace *ns;
2445 
2446 	if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2447 	    (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2448 		cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2449 
2450 	/* Create the root namespace */
2451 	root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2452 	if (!root_ns)
2453 		return NULL;
2454 
2455 	root_ns->dev = steering->dev;
2456 	root_ns->table_type = table_type;
2457 	root_ns->cmds = cmds;
2458 
2459 	INIT_LIST_HEAD(&root_ns->underlay_qpns);
2460 
2461 	ns = &root_ns->ns;
2462 	fs_init_namespace(ns);
2463 	mutex_init(&root_ns->chain_lock);
2464 	tree_init_node(&ns->node, NULL, del_sw_root_ns);
2465 	tree_add_node(&ns->node, NULL);
2466 
2467 	return root_ns;
2468 }
2469 
2470 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2471 
set_prio_attrs_in_ns(struct mlx5_flow_namespace * ns,int acc_level)2472 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2473 {
2474 	struct fs_prio *prio;
2475 
2476 	fs_for_each_prio(prio, ns) {
2477 		 /* This updates prio start_level and num_levels */
2478 		set_prio_attrs_in_prio(prio, acc_level);
2479 		acc_level += prio->num_levels;
2480 	}
2481 	return acc_level;
2482 }
2483 
set_prio_attrs_in_prio(struct fs_prio * prio,int acc_level)2484 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2485 {
2486 	struct mlx5_flow_namespace *ns;
2487 	int acc_level_ns = acc_level;
2488 
2489 	prio->start_level = acc_level;
2490 	fs_for_each_ns(ns, prio) {
2491 		/* This updates start_level and num_levels of ns's priority descendants */
2492 		acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2493 
2494 		/* If this a prio with chains, and we can jump from one chain
2495 		 * (namespace) to another, so we accumulate the levels
2496 		 */
2497 		if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2498 			acc_level = acc_level_ns;
2499 	}
2500 
2501 	if (!prio->num_levels)
2502 		prio->num_levels = acc_level_ns - prio->start_level;
2503 	WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2504 }
2505 
set_prio_attrs(struct mlx5_flow_root_namespace * root_ns)2506 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2507 {
2508 	struct mlx5_flow_namespace *ns = &root_ns->ns;
2509 	struct fs_prio *prio;
2510 	int start_level = 0;
2511 
2512 	fs_for_each_prio(prio, ns) {
2513 		set_prio_attrs_in_prio(prio, start_level);
2514 		start_level += prio->num_levels;
2515 	}
2516 }
2517 
2518 #define ANCHOR_PRIO 0
2519 #define ANCHOR_SIZE 1
2520 #define ANCHOR_LEVEL 0
create_anchor_flow_table(struct mlx5_flow_steering * steering)2521 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2522 {
2523 	struct mlx5_flow_namespace *ns = NULL;
2524 	struct mlx5_flow_table_attr ft_attr = {};
2525 	struct mlx5_flow_table *ft;
2526 
2527 	ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2528 	if (WARN_ON(!ns))
2529 		return -EINVAL;
2530 
2531 	ft_attr.max_fte = ANCHOR_SIZE;
2532 	ft_attr.level   = ANCHOR_LEVEL;
2533 	ft_attr.prio    = ANCHOR_PRIO;
2534 
2535 	ft = mlx5_create_flow_table(ns, &ft_attr);
2536 	if (IS_ERR(ft)) {
2537 		mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2538 		return PTR_ERR(ft);
2539 	}
2540 	return 0;
2541 }
2542 
init_root_ns(struct mlx5_flow_steering * steering)2543 static int init_root_ns(struct mlx5_flow_steering *steering)
2544 {
2545 	int err;
2546 
2547 	steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2548 	if (!steering->root_ns)
2549 		return -ENOMEM;
2550 
2551 	err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2552 	if (err)
2553 		goto out_err;
2554 
2555 	set_prio_attrs(steering->root_ns);
2556 	err = create_anchor_flow_table(steering);
2557 	if (err)
2558 		goto out_err;
2559 
2560 	return 0;
2561 
2562 out_err:
2563 	cleanup_root_ns(steering->root_ns);
2564 	steering->root_ns = NULL;
2565 	return err;
2566 }
2567 
clean_tree(struct fs_node * node)2568 static void clean_tree(struct fs_node *node)
2569 {
2570 	if (node) {
2571 		struct fs_node *iter;
2572 		struct fs_node *temp;
2573 
2574 		tree_get_node(node);
2575 		list_for_each_entry_safe(iter, temp, &node->children, list)
2576 			clean_tree(iter);
2577 		tree_put_node(node, false);
2578 		tree_remove_node(node, false);
2579 	}
2580 }
2581 
cleanup_root_ns(struct mlx5_flow_root_namespace * root_ns)2582 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2583 {
2584 	if (!root_ns)
2585 		return;
2586 
2587 	clean_tree(&root_ns->ns.node);
2588 }
2589 
mlx5_cleanup_fs(struct mlx5_core_dev * dev)2590 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2591 {
2592 	struct mlx5_flow_steering *steering = dev->priv.steering;
2593 
2594 	cleanup_root_ns(steering->root_ns);
2595 	cleanup_root_ns(steering->fdb_root_ns);
2596 	steering->fdb_root_ns = NULL;
2597 	kfree(steering->fdb_sub_ns);
2598 	steering->fdb_sub_ns = NULL;
2599 	cleanup_root_ns(steering->sniffer_rx_root_ns);
2600 	cleanup_root_ns(steering->sniffer_tx_root_ns);
2601 	cleanup_root_ns(steering->rdma_rx_root_ns);
2602 	cleanup_root_ns(steering->rdma_tx_root_ns);
2603 	cleanup_root_ns(steering->egress_root_ns);
2604 	mlx5_cleanup_fc_stats(dev);
2605 	kmem_cache_destroy(steering->ftes_cache);
2606 	kmem_cache_destroy(steering->fgs_cache);
2607 	mlx5_ft_pool_destroy(dev);
2608 	kfree(steering);
2609 }
2610 
init_sniffer_tx_root_ns(struct mlx5_flow_steering * steering)2611 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2612 {
2613 	struct fs_prio *prio;
2614 
2615 	steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2616 	if (!steering->sniffer_tx_root_ns)
2617 		return -ENOMEM;
2618 
2619 	/* Create single prio */
2620 	prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2621 	return PTR_ERR_OR_ZERO(prio);
2622 }
2623 
init_sniffer_rx_root_ns(struct mlx5_flow_steering * steering)2624 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2625 {
2626 	struct fs_prio *prio;
2627 
2628 	steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2629 	if (!steering->sniffer_rx_root_ns)
2630 		return -ENOMEM;
2631 
2632 	/* Create single prio */
2633 	prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2634 	return PTR_ERR_OR_ZERO(prio);
2635 }
2636 
init_rdma_rx_root_ns(struct mlx5_flow_steering * steering)2637 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2638 {
2639 	int err;
2640 
2641 	steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2642 	if (!steering->rdma_rx_root_ns)
2643 		return -ENOMEM;
2644 
2645 	err = init_root_tree(steering, &rdma_rx_root_fs,
2646 			     &steering->rdma_rx_root_ns->ns.node);
2647 	if (err)
2648 		goto out_err;
2649 
2650 	set_prio_attrs(steering->rdma_rx_root_ns);
2651 
2652 	return 0;
2653 
2654 out_err:
2655 	cleanup_root_ns(steering->rdma_rx_root_ns);
2656 	steering->rdma_rx_root_ns = NULL;
2657 	return err;
2658 }
2659 
init_rdma_tx_root_ns(struct mlx5_flow_steering * steering)2660 static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2661 {
2662 	int err;
2663 
2664 	steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2665 	if (!steering->rdma_tx_root_ns)
2666 		return -ENOMEM;
2667 
2668 	err = init_root_tree(steering, &rdma_tx_root_fs,
2669 			     &steering->rdma_tx_root_ns->ns.node);
2670 	if (err)
2671 		goto out_err;
2672 
2673 	set_prio_attrs(steering->rdma_tx_root_ns);
2674 
2675 	return 0;
2676 
2677 out_err:
2678 	cleanup_root_ns(steering->rdma_tx_root_ns);
2679 	steering->rdma_tx_root_ns = NULL;
2680 	return err;
2681 }
2682 
2683 /* FT and tc chains are stored in the same array so we can re-use the
2684  * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2685  * When creating a new ns for each chain store it in the first available slot.
2686  * Assume tc chains are created and stored first and only then the FT chain.
2687  */
store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering * steering,struct mlx5_flow_namespace * ns)2688 static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2689 					struct mlx5_flow_namespace *ns)
2690 {
2691 	int chain = 0;
2692 
2693 	while (steering->fdb_sub_ns[chain])
2694 		++chain;
2695 
2696 	steering->fdb_sub_ns[chain] = ns;
2697 }
2698 
create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering * steering,struct fs_prio * maj_prio)2699 static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2700 					struct fs_prio *maj_prio)
2701 {
2702 	struct mlx5_flow_namespace *ns;
2703 	struct fs_prio *min_prio;
2704 	int prio;
2705 
2706 	ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2707 	if (IS_ERR(ns))
2708 		return PTR_ERR(ns);
2709 
2710 	for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2711 		min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2712 		if (IS_ERR(min_prio))
2713 			return PTR_ERR(min_prio);
2714 	}
2715 
2716 	store_fdb_sub_ns_prio_chain(steering, ns);
2717 
2718 	return 0;
2719 }
2720 
create_fdb_chains(struct mlx5_flow_steering * steering,int fs_prio,int chains)2721 static int create_fdb_chains(struct mlx5_flow_steering *steering,
2722 			     int fs_prio,
2723 			     int chains)
2724 {
2725 	struct fs_prio *maj_prio;
2726 	int levels;
2727 	int chain;
2728 	int err;
2729 
2730 	levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2731 	maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2732 					  fs_prio,
2733 					  levels);
2734 	if (IS_ERR(maj_prio))
2735 		return PTR_ERR(maj_prio);
2736 
2737 	for (chain = 0; chain < chains; chain++) {
2738 		err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2739 		if (err)
2740 			return err;
2741 	}
2742 
2743 	return 0;
2744 }
2745 
create_fdb_fast_path(struct mlx5_flow_steering * steering)2746 static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2747 {
2748 	int err;
2749 
2750 	steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2751 				       sizeof(*steering->fdb_sub_ns),
2752 				       GFP_KERNEL);
2753 	if (!steering->fdb_sub_ns)
2754 		return -ENOMEM;
2755 
2756 	err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2757 	if (err)
2758 		return err;
2759 
2760 	err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2761 	if (err)
2762 		return err;
2763 
2764 	return 0;
2765 }
2766 
init_fdb_root_ns(struct mlx5_flow_steering * steering)2767 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2768 {
2769 	struct fs_prio *maj_prio;
2770 	int err;
2771 
2772 	steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2773 	if (!steering->fdb_root_ns)
2774 		return -ENOMEM;
2775 
2776 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
2777 				  1);
2778 	if (IS_ERR(maj_prio)) {
2779 		err = PTR_ERR(maj_prio);
2780 		goto out_err;
2781 	}
2782 	err = create_fdb_fast_path(steering);
2783 	if (err)
2784 		goto out_err;
2785 
2786 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
2787 	if (IS_ERR(maj_prio)) {
2788 		err = PTR_ERR(maj_prio);
2789 		goto out_err;
2790 	}
2791 
2792 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 3);
2793 	if (IS_ERR(maj_prio)) {
2794 		err = PTR_ERR(maj_prio);
2795 		goto out_err;
2796 	}
2797 
2798 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2799 	if (IS_ERR(maj_prio)) {
2800 		err = PTR_ERR(maj_prio);
2801 		goto out_err;
2802 	}
2803 
2804 	/* We put this priority last, knowing that nothing will get here
2805 	 * unless explicitly forwarded to. This is possible because the
2806 	 * slow path tables have catch all rules and nothing gets passed
2807 	 * those tables.
2808 	 */
2809 	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2810 	if (IS_ERR(maj_prio)) {
2811 		err = PTR_ERR(maj_prio);
2812 		goto out_err;
2813 	}
2814 
2815 	set_prio_attrs(steering->fdb_root_ns);
2816 	return 0;
2817 
2818 out_err:
2819 	cleanup_root_ns(steering->fdb_root_ns);
2820 	kfree(steering->fdb_sub_ns);
2821 	steering->fdb_sub_ns = NULL;
2822 	steering->fdb_root_ns = NULL;
2823 	return err;
2824 }
2825 
init_egress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)2826 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2827 {
2828 	struct fs_prio *prio;
2829 
2830 	steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2831 	if (!steering->esw_egress_root_ns[vport])
2832 		return -ENOMEM;
2833 
2834 	/* create 1 prio*/
2835 	prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2836 	return PTR_ERR_OR_ZERO(prio);
2837 }
2838 
init_ingress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)2839 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2840 {
2841 	struct fs_prio *prio;
2842 
2843 	steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2844 	if (!steering->esw_ingress_root_ns[vport])
2845 		return -ENOMEM;
2846 
2847 	/* create 1 prio*/
2848 	prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2849 	return PTR_ERR_OR_ZERO(prio);
2850 }
2851 
mlx5_fs_egress_acls_init(struct mlx5_core_dev * dev,int total_vports)2852 int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
2853 {
2854 	struct mlx5_flow_steering *steering = dev->priv.steering;
2855 	int err;
2856 	int i;
2857 
2858 	steering->esw_egress_root_ns =
2859 			kcalloc(total_vports,
2860 				sizeof(*steering->esw_egress_root_ns),
2861 				GFP_KERNEL);
2862 	if (!steering->esw_egress_root_ns)
2863 		return -ENOMEM;
2864 
2865 	for (i = 0; i < total_vports; i++) {
2866 		err = init_egress_acl_root_ns(steering, i);
2867 		if (err)
2868 			goto cleanup_root_ns;
2869 	}
2870 	steering->esw_egress_acl_vports = total_vports;
2871 	return 0;
2872 
2873 cleanup_root_ns:
2874 	for (i--; i >= 0; i--)
2875 		cleanup_root_ns(steering->esw_egress_root_ns[i]);
2876 	kfree(steering->esw_egress_root_ns);
2877 	steering->esw_egress_root_ns = NULL;
2878 	return err;
2879 }
2880 
mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev * dev)2881 void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
2882 {
2883 	struct mlx5_flow_steering *steering = dev->priv.steering;
2884 	int i;
2885 
2886 	if (!steering->esw_egress_root_ns)
2887 		return;
2888 
2889 	for (i = 0; i < steering->esw_egress_acl_vports; i++)
2890 		cleanup_root_ns(steering->esw_egress_root_ns[i]);
2891 
2892 	kfree(steering->esw_egress_root_ns);
2893 	steering->esw_egress_root_ns = NULL;
2894 }
2895 
mlx5_fs_ingress_acls_init(struct mlx5_core_dev * dev,int total_vports)2896 int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
2897 {
2898 	struct mlx5_flow_steering *steering = dev->priv.steering;
2899 	int err;
2900 	int i;
2901 
2902 	steering->esw_ingress_root_ns =
2903 			kcalloc(total_vports,
2904 				sizeof(*steering->esw_ingress_root_ns),
2905 				GFP_KERNEL);
2906 	if (!steering->esw_ingress_root_ns)
2907 		return -ENOMEM;
2908 
2909 	for (i = 0; i < total_vports; i++) {
2910 		err = init_ingress_acl_root_ns(steering, i);
2911 		if (err)
2912 			goto cleanup_root_ns;
2913 	}
2914 	steering->esw_ingress_acl_vports = total_vports;
2915 	return 0;
2916 
2917 cleanup_root_ns:
2918 	for (i--; i >= 0; i--)
2919 		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2920 	kfree(steering->esw_ingress_root_ns);
2921 	steering->esw_ingress_root_ns = NULL;
2922 	return err;
2923 }
2924 
mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev * dev)2925 void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
2926 {
2927 	struct mlx5_flow_steering *steering = dev->priv.steering;
2928 	int i;
2929 
2930 	if (!steering->esw_ingress_root_ns)
2931 		return;
2932 
2933 	for (i = 0; i < steering->esw_ingress_acl_vports; i++)
2934 		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2935 
2936 	kfree(steering->esw_ingress_root_ns);
2937 	steering->esw_ingress_root_ns = NULL;
2938 }
2939 
init_egress_root_ns(struct mlx5_flow_steering * steering)2940 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
2941 {
2942 	int err;
2943 
2944 	steering->egress_root_ns = create_root_ns(steering,
2945 						  FS_FT_NIC_TX);
2946 	if (!steering->egress_root_ns)
2947 		return -ENOMEM;
2948 
2949 	err = init_root_tree(steering, &egress_root_fs,
2950 			     &steering->egress_root_ns->ns.node);
2951 	if (err)
2952 		goto cleanup;
2953 	set_prio_attrs(steering->egress_root_ns);
2954 	return 0;
2955 cleanup:
2956 	cleanup_root_ns(steering->egress_root_ns);
2957 	steering->egress_root_ns = NULL;
2958 	return err;
2959 }
2960 
mlx5_init_fs(struct mlx5_core_dev * dev)2961 int mlx5_init_fs(struct mlx5_core_dev *dev)
2962 {
2963 	struct mlx5_flow_steering *steering;
2964 	int err = 0;
2965 
2966 	err = mlx5_init_fc_stats(dev);
2967 	if (err)
2968 		return err;
2969 
2970 	err = mlx5_ft_pool_init(dev);
2971 	if (err)
2972 		return err;
2973 
2974 	steering = kzalloc(sizeof(*steering), GFP_KERNEL);
2975 	if (!steering) {
2976 		err = -ENOMEM;
2977 		goto err;
2978 	}
2979 
2980 	steering->dev = dev;
2981 	dev->priv.steering = steering;
2982 
2983 	steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
2984 						sizeof(struct mlx5_flow_group), 0,
2985 						0, NULL);
2986 	steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
2987 						 0, NULL);
2988 	if (!steering->ftes_cache || !steering->fgs_cache) {
2989 		err = -ENOMEM;
2990 		goto err;
2991 	}
2992 
2993 	if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
2994 	      (MLX5_CAP_GEN(dev, nic_flow_table))) ||
2995 	     ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
2996 	      MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
2997 	    MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
2998 		err = init_root_ns(steering);
2999 		if (err)
3000 			goto err;
3001 	}
3002 
3003 	if (MLX5_ESWITCH_MANAGER(dev)) {
3004 		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
3005 			err = init_fdb_root_ns(steering);
3006 			if (err)
3007 				goto err;
3008 		}
3009 	}
3010 
3011 	if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
3012 		err = init_sniffer_rx_root_ns(steering);
3013 		if (err)
3014 			goto err;
3015 	}
3016 
3017 	if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
3018 		err = init_sniffer_tx_root_ns(steering);
3019 		if (err)
3020 			goto err;
3021 	}
3022 
3023 	if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3024 	    MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
3025 		err = init_rdma_rx_root_ns(steering);
3026 		if (err)
3027 			goto err;
3028 	}
3029 
3030 	if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3031 		err = init_rdma_tx_root_ns(steering);
3032 		if (err)
3033 			goto err;
3034 	}
3035 
3036 	if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
3037 	    MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
3038 		err = init_egress_root_ns(steering);
3039 		if (err)
3040 			goto err;
3041 	}
3042 
3043 	return 0;
3044 err:
3045 	mlx5_cleanup_fs(dev);
3046 	return err;
3047 }
3048 
mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)3049 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3050 {
3051 	struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3052 	struct mlx5_ft_underlay_qp *new_uqp;
3053 	int err = 0;
3054 
3055 	new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
3056 	if (!new_uqp)
3057 		return -ENOMEM;
3058 
3059 	mutex_lock(&root->chain_lock);
3060 
3061 	if (!root->root_ft) {
3062 		err = -EINVAL;
3063 		goto update_ft_fail;
3064 	}
3065 
3066 	err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3067 					 false);
3068 	if (err) {
3069 		mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
3070 			       underlay_qpn, err);
3071 		goto update_ft_fail;
3072 	}
3073 
3074 	new_uqp->qpn = underlay_qpn;
3075 	list_add_tail(&new_uqp->list, &root->underlay_qpns);
3076 
3077 	mutex_unlock(&root->chain_lock);
3078 
3079 	return 0;
3080 
3081 update_ft_fail:
3082 	mutex_unlock(&root->chain_lock);
3083 	kfree(new_uqp);
3084 	return err;
3085 }
3086 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
3087 
mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)3088 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3089 {
3090 	struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3091 	struct mlx5_ft_underlay_qp *uqp;
3092 	bool found = false;
3093 	int err = 0;
3094 
3095 	mutex_lock(&root->chain_lock);
3096 	list_for_each_entry(uqp, &root->underlay_qpns, list) {
3097 		if (uqp->qpn == underlay_qpn) {
3098 			found = true;
3099 			break;
3100 		}
3101 	}
3102 
3103 	if (!found) {
3104 		mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3105 			       underlay_qpn);
3106 		err = -EINVAL;
3107 		goto out;
3108 	}
3109 
3110 	err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3111 					 true);
3112 	if (err)
3113 		mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3114 			       underlay_qpn, err);
3115 
3116 	list_del(&uqp->list);
3117 	mutex_unlock(&root->chain_lock);
3118 	kfree(uqp);
3119 
3120 	return 0;
3121 
3122 out:
3123 	mutex_unlock(&root->chain_lock);
3124 	return err;
3125 }
3126 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3127 
3128 static struct mlx5_flow_root_namespace
get_root_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type ns_type)3129 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3130 {
3131 	struct mlx5_flow_namespace *ns;
3132 
3133 	if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3134 	    ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3135 		ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3136 	else
3137 		ns = mlx5_get_flow_namespace(dev, ns_type);
3138 	if (!ns)
3139 		return NULL;
3140 
3141 	return find_root(&ns->node);
3142 }
3143 
mlx5_modify_header_alloc(struct mlx5_core_dev * dev,u8 ns_type,u8 num_actions,void * modify_actions)3144 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3145 						 u8 ns_type, u8 num_actions,
3146 						 void *modify_actions)
3147 {
3148 	struct mlx5_flow_root_namespace *root;
3149 	struct mlx5_modify_hdr *modify_hdr;
3150 	int err;
3151 
3152 	root = get_root_namespace(dev, ns_type);
3153 	if (!root)
3154 		return ERR_PTR(-EOPNOTSUPP);
3155 
3156 	modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3157 	if (!modify_hdr)
3158 		return ERR_PTR(-ENOMEM);
3159 
3160 	modify_hdr->ns_type = ns_type;
3161 	err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3162 					      modify_actions, modify_hdr);
3163 	if (err) {
3164 		kfree(modify_hdr);
3165 		return ERR_PTR(err);
3166 	}
3167 
3168 	return modify_hdr;
3169 }
3170 EXPORT_SYMBOL(mlx5_modify_header_alloc);
3171 
mlx5_modify_header_dealloc(struct mlx5_core_dev * dev,struct mlx5_modify_hdr * modify_hdr)3172 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3173 				struct mlx5_modify_hdr *modify_hdr)
3174 {
3175 	struct mlx5_flow_root_namespace *root;
3176 
3177 	root = get_root_namespace(dev, modify_hdr->ns_type);
3178 	if (WARN_ON(!root))
3179 		return;
3180 	root->cmds->modify_header_dealloc(root, modify_hdr);
3181 	kfree(modify_hdr);
3182 }
3183 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3184 
mlx5_packet_reformat_alloc(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type ns_type)3185 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3186 						     struct mlx5_pkt_reformat_params *params,
3187 						     enum mlx5_flow_namespace_type ns_type)
3188 {
3189 	struct mlx5_pkt_reformat *pkt_reformat;
3190 	struct mlx5_flow_root_namespace *root;
3191 	int err;
3192 
3193 	root = get_root_namespace(dev, ns_type);
3194 	if (!root)
3195 		return ERR_PTR(-EOPNOTSUPP);
3196 
3197 	pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3198 	if (!pkt_reformat)
3199 		return ERR_PTR(-ENOMEM);
3200 
3201 	pkt_reformat->ns_type = ns_type;
3202 	pkt_reformat->reformat_type = params->type;
3203 	err = root->cmds->packet_reformat_alloc(root, params, ns_type,
3204 						pkt_reformat);
3205 	if (err) {
3206 		kfree(pkt_reformat);
3207 		return ERR_PTR(err);
3208 	}
3209 
3210 	return pkt_reformat;
3211 }
3212 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3213 
mlx5_packet_reformat_dealloc(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat * pkt_reformat)3214 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3215 				  struct mlx5_pkt_reformat *pkt_reformat)
3216 {
3217 	struct mlx5_flow_root_namespace *root;
3218 
3219 	root = get_root_namespace(dev, pkt_reformat->ns_type);
3220 	if (WARN_ON(!root))
3221 		return;
3222 	root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3223 	kfree(pkt_reformat);
3224 }
3225 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3226 
mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns)3227 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3228 				 struct mlx5_flow_root_namespace *peer_ns)
3229 {
3230 	if (peer_ns && ns->mode != peer_ns->mode) {
3231 		mlx5_core_err(ns->dev,
3232 			      "Can't peer namespace of different steering mode\n");
3233 		return -EINVAL;
3234 	}
3235 
3236 	return ns->cmds->set_peer(ns, peer_ns);
3237 }
3238 
3239 /* This function should be called only at init stage of the namespace.
3240  * It is not safe to call this function while steering operations
3241  * are executed in the namespace.
3242  */
mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace * ns,enum mlx5_flow_steering_mode mode)3243 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3244 				 enum mlx5_flow_steering_mode mode)
3245 {
3246 	struct mlx5_flow_root_namespace *root;
3247 	const struct mlx5_flow_cmds *cmds;
3248 	int err;
3249 
3250 	root = find_root(&ns->node);
3251 	if (&root->ns != ns)
3252 	/* Can't set cmds to non root namespace */
3253 		return -EINVAL;
3254 
3255 	if (root->table_type != FS_FT_FDB)
3256 		return -EOPNOTSUPP;
3257 
3258 	if (root->mode == mode)
3259 		return 0;
3260 
3261 	if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3262 		cmds = mlx5_fs_cmd_get_dr_cmds();
3263 	else
3264 		cmds = mlx5_fs_cmd_get_fw_cmds();
3265 	if (!cmds)
3266 		return -EOPNOTSUPP;
3267 
3268 	err = cmds->create_ns(root);
3269 	if (err) {
3270 		mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3271 			      err);
3272 		return err;
3273 	}
3274 
3275 	root->cmds->destroy_ns(root);
3276 	root->cmds = cmds;
3277 	root->mode = mode;
3278 
3279 	return 0;
3280 }
3281