1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2020 Mellanox Technologies.
3 
4 #include <linux/mlx5/driver.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/mlx5/fs.h>
7 
8 #include "lib/fs_chains.h"
9 #include "fs_ft_pool.h"
10 #include "en/mapping.h"
11 #include "fs_core.h"
12 #include "en_tc.h"
13 
14 #define chains_lock(chains) ((chains)->lock)
15 #define chains_ht(chains) ((chains)->chains_ht)
16 #define prios_ht(chains) ((chains)->prios_ht)
17 #define tc_default_ft(chains) ((chains)->tc_default_ft)
18 #define tc_end_ft(chains) ((chains)->tc_end_ft)
19 #define ns_to_chains_fs_prio(ns) ((ns) == MLX5_FLOW_NAMESPACE_FDB ? \
20 				  FDB_TC_OFFLOAD : MLX5E_TC_PRIO)
21 #define FT_TBL_SZ (64 * 1024)
22 
23 struct mlx5_fs_chains {
24 	struct mlx5_core_dev *dev;
25 
26 	struct rhashtable chains_ht;
27 	struct rhashtable prios_ht;
28 	/* Protects above chains_ht and prios_ht */
29 	struct mutex lock;
30 
31 	struct mlx5_flow_table *tc_default_ft;
32 	struct mlx5_flow_table *tc_end_ft;
33 	struct mapping_ctx *chains_mapping;
34 
35 	enum mlx5_flow_namespace_type ns;
36 	u32 group_num;
37 	u32 flags;
38 };
39 
40 struct fs_chain {
41 	struct rhash_head node;
42 
43 	u32 chain;
44 
45 	int ref;
46 	int id;
47 
48 	struct mlx5_fs_chains *chains;
49 	struct list_head prios_list;
50 	struct mlx5_flow_handle *restore_rule;
51 	struct mlx5_modify_hdr *miss_modify_hdr;
52 };
53 
54 struct prio_key {
55 	u32 chain;
56 	u32 prio;
57 	u32 level;
58 };
59 
60 struct prio {
61 	struct rhash_head node;
62 	struct list_head list;
63 
64 	struct prio_key key;
65 
66 	int ref;
67 
68 	struct fs_chain *chain;
69 	struct mlx5_flow_table *ft;
70 	struct mlx5_flow_table *next_ft;
71 	struct mlx5_flow_group *miss_group;
72 	struct mlx5_flow_handle *miss_rule;
73 };
74 
75 static const struct rhashtable_params chain_params = {
76 	.head_offset = offsetof(struct fs_chain, node),
77 	.key_offset = offsetof(struct fs_chain, chain),
78 	.key_len = sizeof_field(struct fs_chain, chain),
79 	.automatic_shrinking = true,
80 };
81 
82 static const struct rhashtable_params prio_params = {
83 	.head_offset = offsetof(struct prio, node),
84 	.key_offset = offsetof(struct prio, key),
85 	.key_len = sizeof_field(struct prio, key),
86 	.automatic_shrinking = true,
87 };
88 
mlx5_chains_prios_supported(struct mlx5_fs_chains * chains)89 bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
90 {
91 	return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
92 }
93 
mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains * chains)94 bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
95 {
96 	return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
97 }
98 
mlx5_chains_backwards_supported(struct mlx5_fs_chains * chains)99 bool mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains)
100 {
101 	return mlx5_chains_prios_supported(chains) &&
102 	       mlx5_chains_ignore_flow_level_supported(chains);
103 }
104 
mlx5_chains_get_chain_range(struct mlx5_fs_chains * chains)105 u32 mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains)
106 {
107 	if (!mlx5_chains_prios_supported(chains))
108 		return 1;
109 
110 	if (mlx5_chains_ignore_flow_level_supported(chains))
111 		return UINT_MAX - 1;
112 
113 	/* We should get here only for eswitch case */
114 	return FDB_TC_MAX_CHAIN;
115 }
116 
mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains * chains)117 u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
118 {
119 	return mlx5_chains_get_chain_range(chains) + 1;
120 }
121 
mlx5_chains_get_prio_range(struct mlx5_fs_chains * chains)122 u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
123 {
124 	if (mlx5_chains_ignore_flow_level_supported(chains))
125 		return UINT_MAX;
126 
127 	/* We should get here only for eswitch case */
128 	return FDB_TC_MAX_PRIO;
129 }
130 
mlx5_chains_get_level_range(struct mlx5_fs_chains * chains)131 static unsigned int mlx5_chains_get_level_range(struct mlx5_fs_chains *chains)
132 {
133 	if (mlx5_chains_ignore_flow_level_supported(chains))
134 		return UINT_MAX;
135 
136 	/* Same value for FDB and NIC RX tables */
137 	return FDB_TC_LEVELS_PER_PRIO;
138 }
139 
140 void
mlx5_chains_set_end_ft(struct mlx5_fs_chains * chains,struct mlx5_flow_table * ft)141 mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
142 		       struct mlx5_flow_table *ft)
143 {
144 	tc_end_ft(chains) = ft;
145 }
146 
147 static struct mlx5_flow_table *
mlx5_chains_create_table(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)148 mlx5_chains_create_table(struct mlx5_fs_chains *chains,
149 			 u32 chain, u32 prio, u32 level)
150 {
151 	struct mlx5_flow_table_attr ft_attr = {};
152 	struct mlx5_flow_namespace *ns;
153 	struct mlx5_flow_table *ft;
154 	int sz;
155 
156 	if (chains->flags & MLX5_CHAINS_FT_TUNNEL_SUPPORTED)
157 		ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
158 				  MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
159 
160 	sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
161 	ft_attr.max_fte = sz;
162 
163 	/* We use tc_default_ft(chains) as the table's next_ft till
164 	 * ignore_flow_level is allowed on FT creation and not just for FTEs.
165 	 * Instead caller should add an explicit miss rule if needed.
166 	 */
167 	ft_attr.next_ft = tc_default_ft(chains);
168 
169 	/* The root table(chain 0, prio 1, level 0) is required to be
170 	 * connected to the previous fs_core managed prio.
171 	 * We always create it, as a managed table, in order to align with
172 	 * fs_core logic.
173 	 */
174 	if (!mlx5_chains_ignore_flow_level_supported(chains) ||
175 	    (chain == 0 && prio == 1 && level == 0)) {
176 		ft_attr.level = level;
177 		ft_attr.prio = prio - 1;
178 		ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
179 			mlx5_get_fdb_sub_ns(chains->dev, chain) :
180 			mlx5_get_flow_namespace(chains->dev, chains->ns);
181 	} else {
182 		ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
183 		ft_attr.prio = ns_to_chains_fs_prio(chains->ns);
184 		/* Firmware doesn't allow us to create another level 0 table,
185 		 * so we create all unmanaged tables as level 1.
186 		 *
187 		 * To connect them, we use explicit miss rules with
188 		 * ignore_flow_level. Caller is responsible to create
189 		 * these rules (if needed).
190 		 */
191 		ft_attr.level = 1;
192 		ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
193 	}
194 
195 	ft_attr.autogroup.num_reserved_entries = 2;
196 	ft_attr.autogroup.max_num_groups = chains->group_num;
197 	ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
198 	if (IS_ERR(ft)) {
199 		mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
200 			       (int)PTR_ERR(ft), chain, prio, level, sz);
201 		return ft;
202 	}
203 
204 	return ft;
205 }
206 
207 static int
create_chain_restore(struct fs_chain * chain)208 create_chain_restore(struct fs_chain *chain)
209 {
210 	struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
211 	char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
212 	struct mlx5_fs_chains *chains = chain->chains;
213 	enum mlx5e_tc_attr_to_reg chain_to_reg;
214 	struct mlx5_modify_hdr *mod_hdr;
215 	u32 index;
216 	int err;
217 
218 	if (chain->chain == mlx5_chains_get_nf_ft_chain(chains) ||
219 	    !mlx5_chains_prios_supported(chains))
220 		return 0;
221 
222 	err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
223 	if (err)
224 		return err;
225 	if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
226 		/* we got the special default flow tag id, so we won't know
227 		 * if we actually marked the packet with the restore rule
228 		 * we create.
229 		 *
230 		 * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
231 		 */
232 		err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
233 		mapping_remove(chains->chains_mapping, MLX5_FS_DEFAULT_FLOW_TAG);
234 		if (err)
235 			return err;
236 	}
237 
238 	chain->id = index;
239 
240 	if (chains->ns == MLX5_FLOW_NAMESPACE_FDB) {
241 		chain_to_reg = CHAIN_TO_REG;
242 		chain->restore_rule = esw_add_restore_rule(esw, chain->id);
243 		if (IS_ERR(chain->restore_rule)) {
244 			err = PTR_ERR(chain->restore_rule);
245 			goto err_rule;
246 		}
247 	} else if (chains->ns == MLX5_FLOW_NAMESPACE_KERNEL) {
248 		/* For NIC RX we don't need a restore rule
249 		 * since we write the metadata to reg_b
250 		 * that is passed to SW directly.
251 		 */
252 		chain_to_reg = NIC_CHAIN_TO_REG;
253 	} else {
254 		err = -EINVAL;
255 		goto err_rule;
256 	}
257 
258 	MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
259 	MLX5_SET(set_action_in, modact, field,
260 		 mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mfield);
261 	MLX5_SET(set_action_in, modact, offset,
262 		 mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset);
263 	MLX5_SET(set_action_in, modact, length,
264 		 mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen == 32 ?
265 		 0 : mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen);
266 	MLX5_SET(set_action_in, modact, data, chain->id);
267 	mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
268 					   1, modact);
269 	if (IS_ERR(mod_hdr)) {
270 		err = PTR_ERR(mod_hdr);
271 		goto err_mod_hdr;
272 	}
273 	chain->miss_modify_hdr = mod_hdr;
274 
275 	return 0;
276 
277 err_mod_hdr:
278 	if (!IS_ERR_OR_NULL(chain->restore_rule))
279 		mlx5_del_flow_rules(chain->restore_rule);
280 err_rule:
281 	/* Datapath can't find this mapping, so we can safely remove it */
282 	mapping_remove(chains->chains_mapping, chain->id);
283 	return err;
284 }
285 
destroy_chain_restore(struct fs_chain * chain)286 static void destroy_chain_restore(struct fs_chain *chain)
287 {
288 	struct mlx5_fs_chains *chains = chain->chains;
289 
290 	if (!chain->miss_modify_hdr)
291 		return;
292 
293 	if (chain->restore_rule)
294 		mlx5_del_flow_rules(chain->restore_rule);
295 
296 	mlx5_modify_header_dealloc(chains->dev, chain->miss_modify_hdr);
297 	mapping_remove(chains->chains_mapping, chain->id);
298 }
299 
300 static struct fs_chain *
mlx5_chains_create_chain(struct mlx5_fs_chains * chains,u32 chain)301 mlx5_chains_create_chain(struct mlx5_fs_chains *chains, u32 chain)
302 {
303 	struct fs_chain *chain_s = NULL;
304 	int err;
305 
306 	chain_s = kvzalloc(sizeof(*chain_s), GFP_KERNEL);
307 	if (!chain_s)
308 		return ERR_PTR(-ENOMEM);
309 
310 	chain_s->chains = chains;
311 	chain_s->chain = chain;
312 	INIT_LIST_HEAD(&chain_s->prios_list);
313 
314 	err = create_chain_restore(chain_s);
315 	if (err)
316 		goto err_restore;
317 
318 	err = rhashtable_insert_fast(&chains_ht(chains), &chain_s->node,
319 				     chain_params);
320 	if (err)
321 		goto err_insert;
322 
323 	return chain_s;
324 
325 err_insert:
326 	destroy_chain_restore(chain_s);
327 err_restore:
328 	kvfree(chain_s);
329 	return ERR_PTR(err);
330 }
331 
332 static void
mlx5_chains_destroy_chain(struct fs_chain * chain)333 mlx5_chains_destroy_chain(struct fs_chain *chain)
334 {
335 	struct mlx5_fs_chains *chains = chain->chains;
336 
337 	rhashtable_remove_fast(&chains_ht(chains), &chain->node,
338 			       chain_params);
339 
340 	destroy_chain_restore(chain);
341 	kvfree(chain);
342 }
343 
344 static struct fs_chain *
mlx5_chains_get_chain(struct mlx5_fs_chains * chains,u32 chain)345 mlx5_chains_get_chain(struct mlx5_fs_chains *chains, u32 chain)
346 {
347 	struct fs_chain *chain_s;
348 
349 	chain_s = rhashtable_lookup_fast(&chains_ht(chains), &chain,
350 					 chain_params);
351 	if (!chain_s) {
352 		chain_s = mlx5_chains_create_chain(chains, chain);
353 		if (IS_ERR(chain_s))
354 			return chain_s;
355 	}
356 
357 	chain_s->ref++;
358 
359 	return chain_s;
360 }
361 
362 static struct mlx5_flow_handle *
mlx5_chains_add_miss_rule(struct fs_chain * chain,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)363 mlx5_chains_add_miss_rule(struct fs_chain *chain,
364 			  struct mlx5_flow_table *ft,
365 			  struct mlx5_flow_table *next_ft)
366 {
367 	struct mlx5_fs_chains *chains = chain->chains;
368 	struct mlx5_flow_destination dest = {};
369 	struct mlx5_flow_act act = {};
370 
371 	act.flags  = FLOW_ACT_NO_APPEND;
372 	if (mlx5_chains_ignore_flow_level_supported(chain->chains))
373 		act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
374 
375 	act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
376 	dest.type  = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
377 	dest.ft = next_ft;
378 
379 	if (next_ft == tc_end_ft(chains) &&
380 	    chain->chain != mlx5_chains_get_nf_ft_chain(chains) &&
381 	    mlx5_chains_prios_supported(chains)) {
382 		act.modify_hdr = chain->miss_modify_hdr;
383 		act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
384 	}
385 
386 	return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
387 }
388 
389 static int
mlx5_chains_update_prio_prevs(struct prio * prio,struct mlx5_flow_table * next_ft)390 mlx5_chains_update_prio_prevs(struct prio *prio,
391 			      struct mlx5_flow_table *next_ft)
392 {
393 	struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
394 	struct fs_chain *chain = prio->chain;
395 	struct prio *pos;
396 	int n = 0, err;
397 
398 	if (prio->key.level)
399 		return 0;
400 
401 	/* Iterate in reverse order until reaching the level 0 rule of
402 	 * the previous priority, adding all the miss rules first, so we can
403 	 * revert them if any of them fails.
404 	 */
405 	pos = prio;
406 	list_for_each_entry_continue_reverse(pos,
407 					     &chain->prios_list,
408 					     list) {
409 		miss_rules[n] = mlx5_chains_add_miss_rule(chain,
410 							  pos->ft,
411 							  next_ft);
412 		if (IS_ERR(miss_rules[n])) {
413 			err = PTR_ERR(miss_rules[n]);
414 			goto err_prev_rule;
415 		}
416 
417 		n++;
418 		if (!pos->key.level)
419 			break;
420 	}
421 
422 	/* Success, delete old miss rules, and update the pointers. */
423 	n = 0;
424 	pos = prio;
425 	list_for_each_entry_continue_reverse(pos,
426 					     &chain->prios_list,
427 					     list) {
428 		mlx5_del_flow_rules(pos->miss_rule);
429 
430 		pos->miss_rule = miss_rules[n];
431 		pos->next_ft = next_ft;
432 
433 		n++;
434 		if (!pos->key.level)
435 			break;
436 	}
437 
438 	return 0;
439 
440 err_prev_rule:
441 	while (--n >= 0)
442 		mlx5_del_flow_rules(miss_rules[n]);
443 
444 	return err;
445 }
446 
447 static void
mlx5_chains_put_chain(struct fs_chain * chain)448 mlx5_chains_put_chain(struct fs_chain *chain)
449 {
450 	if (--chain->ref == 0)
451 		mlx5_chains_destroy_chain(chain);
452 }
453 
454 static struct prio *
mlx5_chains_create_prio(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)455 mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
456 			u32 chain, u32 prio, u32 level)
457 {
458 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
459 	struct mlx5_flow_handle *miss_rule;
460 	struct mlx5_flow_group *miss_group;
461 	struct mlx5_flow_table *next_ft;
462 	struct mlx5_flow_table *ft;
463 	struct fs_chain *chain_s;
464 	struct list_head *pos;
465 	struct prio *prio_s;
466 	u32 *flow_group_in;
467 	int err;
468 
469 	chain_s = mlx5_chains_get_chain(chains, chain);
470 	if (IS_ERR(chain_s))
471 		return ERR_CAST(chain_s);
472 
473 	prio_s = kvzalloc(sizeof(*prio_s), GFP_KERNEL);
474 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
475 	if (!prio_s || !flow_group_in) {
476 		err = -ENOMEM;
477 		goto err_alloc;
478 	}
479 
480 	/* Chain's prio list is sorted by prio and level.
481 	 * And all levels of some prio point to the next prio's level 0.
482 	 * Example list (prio, level):
483 	 * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
484 	 * In hardware, we will we have the following pointers:
485 	 * (3,0) -> (5,0) -> (7,0) -> Slow path
486 	 * (3,1) -> (5,0)
487 	 * (5,1) -> (7,0)
488 	 * (6,1) -> (7,0)
489 	 */
490 
491 	/* Default miss for each chain: */
492 	next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
493 		  tc_default_ft(chains) :
494 		  tc_end_ft(chains);
495 	list_for_each(pos, &chain_s->prios_list) {
496 		struct prio *p = list_entry(pos, struct prio, list);
497 
498 		/* exit on first pos that is larger */
499 		if (prio < p->key.prio || (prio == p->key.prio &&
500 					   level < p->key.level)) {
501 			/* Get next level 0 table */
502 			next_ft = p->key.level == 0 ? p->ft : p->next_ft;
503 			break;
504 		}
505 	}
506 
507 	ft = mlx5_chains_create_table(chains, chain, prio, level);
508 	if (IS_ERR(ft)) {
509 		err = PTR_ERR(ft);
510 		goto err_create;
511 	}
512 
513 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
514 		 ft->max_fte - 2);
515 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
516 		 ft->max_fte - 1);
517 	miss_group = mlx5_create_flow_group(ft, flow_group_in);
518 	if (IS_ERR(miss_group)) {
519 		err = PTR_ERR(miss_group);
520 		goto err_group;
521 	}
522 
523 	/* Add miss rule to next_ft */
524 	miss_rule = mlx5_chains_add_miss_rule(chain_s, ft, next_ft);
525 	if (IS_ERR(miss_rule)) {
526 		err = PTR_ERR(miss_rule);
527 		goto err_miss_rule;
528 	}
529 
530 	prio_s->miss_group = miss_group;
531 	prio_s->miss_rule = miss_rule;
532 	prio_s->next_ft = next_ft;
533 	prio_s->chain = chain_s;
534 	prio_s->key.chain = chain;
535 	prio_s->key.prio = prio;
536 	prio_s->key.level = level;
537 	prio_s->ft = ft;
538 
539 	err = rhashtable_insert_fast(&prios_ht(chains), &prio_s->node,
540 				     prio_params);
541 	if (err)
542 		goto err_insert;
543 
544 	list_add(&prio_s->list, pos->prev);
545 
546 	/* Table is ready, connect it */
547 	err = mlx5_chains_update_prio_prevs(prio_s, ft);
548 	if (err)
549 		goto err_update;
550 
551 	kvfree(flow_group_in);
552 	return prio_s;
553 
554 err_update:
555 	list_del(&prio_s->list);
556 	rhashtable_remove_fast(&prios_ht(chains), &prio_s->node,
557 			       prio_params);
558 err_insert:
559 	mlx5_del_flow_rules(miss_rule);
560 err_miss_rule:
561 	mlx5_destroy_flow_group(miss_group);
562 err_group:
563 	mlx5_destroy_flow_table(ft);
564 err_create:
565 err_alloc:
566 	kvfree(prio_s);
567 	kvfree(flow_group_in);
568 	mlx5_chains_put_chain(chain_s);
569 	return ERR_PTR(err);
570 }
571 
572 static void
mlx5_chains_destroy_prio(struct mlx5_fs_chains * chains,struct prio * prio)573 mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
574 			 struct prio *prio)
575 {
576 	struct fs_chain *chain = prio->chain;
577 
578 	WARN_ON(mlx5_chains_update_prio_prevs(prio,
579 					      prio->next_ft));
580 
581 	list_del(&prio->list);
582 	rhashtable_remove_fast(&prios_ht(chains), &prio->node,
583 			       prio_params);
584 	mlx5_del_flow_rules(prio->miss_rule);
585 	mlx5_destroy_flow_group(prio->miss_group);
586 	mlx5_destroy_flow_table(prio->ft);
587 	mlx5_chains_put_chain(chain);
588 	kvfree(prio);
589 }
590 
591 struct mlx5_flow_table *
mlx5_chains_get_table(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)592 mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
593 		      u32 level)
594 {
595 	struct mlx5_flow_table *prev_fts;
596 	struct prio *prio_s;
597 	struct prio_key key;
598 	int l = 0;
599 
600 	if ((chain > mlx5_chains_get_chain_range(chains) &&
601 	     chain != mlx5_chains_get_nf_ft_chain(chains)) ||
602 	    prio > mlx5_chains_get_prio_range(chains) ||
603 	    level > mlx5_chains_get_level_range(chains))
604 		return ERR_PTR(-EOPNOTSUPP);
605 
606 	/* create earlier levels for correct fs_core lookup when
607 	 * connecting tables.
608 	 */
609 	for (l = 0; l < level; l++) {
610 		prev_fts = mlx5_chains_get_table(chains, chain, prio, l);
611 		if (IS_ERR(prev_fts)) {
612 			prio_s = ERR_CAST(prev_fts);
613 			goto err_get_prevs;
614 		}
615 	}
616 
617 	key.chain = chain;
618 	key.prio = prio;
619 	key.level = level;
620 
621 	mutex_lock(&chains_lock(chains));
622 	prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
623 					prio_params);
624 	if (!prio_s) {
625 		prio_s = mlx5_chains_create_prio(chains, chain,
626 						 prio, level);
627 		if (IS_ERR(prio_s))
628 			goto err_create_prio;
629 	}
630 
631 	++prio_s->ref;
632 	mutex_unlock(&chains_lock(chains));
633 
634 	return prio_s->ft;
635 
636 err_create_prio:
637 	mutex_unlock(&chains_lock(chains));
638 err_get_prevs:
639 	while (--l >= 0)
640 		mlx5_chains_put_table(chains, chain, prio, l);
641 	return ERR_CAST(prio_s);
642 }
643 
644 void
mlx5_chains_put_table(struct mlx5_fs_chains * chains,u32 chain,u32 prio,u32 level)645 mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
646 		      u32 level)
647 {
648 	struct prio *prio_s;
649 	struct prio_key key;
650 
651 	key.chain = chain;
652 	key.prio = prio;
653 	key.level = level;
654 
655 	mutex_lock(&chains_lock(chains));
656 	prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
657 					prio_params);
658 	if (!prio_s)
659 		goto err_get_prio;
660 
661 	if (--prio_s->ref == 0)
662 		mlx5_chains_destroy_prio(chains, prio_s);
663 	mutex_unlock(&chains_lock(chains));
664 
665 	while (level-- > 0)
666 		mlx5_chains_put_table(chains, chain, prio, level);
667 
668 	return;
669 
670 err_get_prio:
671 	mutex_unlock(&chains_lock(chains));
672 	WARN_ONCE(1,
673 		  "Couldn't find table: (chain: %d prio: %d level: %d)",
674 		  chain, prio, level);
675 }
676 
677 struct mlx5_flow_table *
mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains * chains)678 mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains)
679 {
680 	return tc_end_ft(chains);
681 }
682 
683 struct mlx5_flow_table *
mlx5_chains_create_global_table(struct mlx5_fs_chains * chains)684 mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
685 {
686 	u32 chain, prio, level;
687 	int err;
688 
689 	if (!mlx5_chains_ignore_flow_level_supported(chains)) {
690 		err = -EOPNOTSUPP;
691 
692 		mlx5_core_warn(chains->dev,
693 			       "Couldn't create global flow table, ignore_flow_level not supported.");
694 		goto err_ignore;
695 	}
696 
697 	chain = mlx5_chains_get_chain_range(chains),
698 	prio = mlx5_chains_get_prio_range(chains);
699 	level = mlx5_chains_get_level_range(chains);
700 
701 	return mlx5_chains_create_table(chains, chain, prio, level);
702 
703 err_ignore:
704 	return ERR_PTR(err);
705 }
706 
707 void
mlx5_chains_destroy_global_table(struct mlx5_fs_chains * chains,struct mlx5_flow_table * ft)708 mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
709 				 struct mlx5_flow_table *ft)
710 {
711 	mlx5_destroy_flow_table(ft);
712 }
713 
714 static struct mlx5_fs_chains *
mlx5_chains_init(struct mlx5_core_dev * dev,struct mlx5_chains_attr * attr)715 mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
716 {
717 	struct mlx5_fs_chains *chains_priv;
718 	u32 max_flow_counter;
719 	int err;
720 
721 	chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
722 	if (!chains_priv)
723 		return ERR_PTR(-ENOMEM);
724 
725 	max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
726 			    MLX5_CAP_GEN(dev, max_flow_counter_15_0);
727 
728 	mlx5_core_dbg(dev,
729 		      "Init flow table chains, max counters(%d), groups(%d), max flow table size(%d)\n",
730 		      max_flow_counter, attr->max_grp_num, attr->max_ft_sz);
731 
732 	chains_priv->dev = dev;
733 	chains_priv->flags = attr->flags;
734 	chains_priv->ns = attr->ns;
735 	chains_priv->group_num = attr->max_grp_num;
736 	chains_priv->chains_mapping = attr->mapping;
737 	tc_default_ft(chains_priv) = tc_end_ft(chains_priv) = attr->default_ft;
738 
739 	mlx5_core_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
740 		       mlx5_chains_get_chain_range(chains_priv),
741 		       mlx5_chains_get_prio_range(chains_priv));
742 
743 	err = rhashtable_init(&chains_ht(chains_priv), &chain_params);
744 	if (err)
745 		goto init_chains_ht_err;
746 
747 	err = rhashtable_init(&prios_ht(chains_priv), &prio_params);
748 	if (err)
749 		goto init_prios_ht_err;
750 
751 	mutex_init(&chains_lock(chains_priv));
752 
753 	return chains_priv;
754 
755 init_prios_ht_err:
756 	rhashtable_destroy(&chains_ht(chains_priv));
757 init_chains_ht_err:
758 	kfree(chains_priv);
759 	return ERR_PTR(err);
760 }
761 
762 static void
mlx5_chains_cleanup(struct mlx5_fs_chains * chains)763 mlx5_chains_cleanup(struct mlx5_fs_chains *chains)
764 {
765 	mutex_destroy(&chains_lock(chains));
766 	rhashtable_destroy(&prios_ht(chains));
767 	rhashtable_destroy(&chains_ht(chains));
768 
769 	kfree(chains);
770 }
771 
772 struct mlx5_fs_chains *
mlx5_chains_create(struct mlx5_core_dev * dev,struct mlx5_chains_attr * attr)773 mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
774 {
775 	struct mlx5_fs_chains *chains;
776 
777 	chains = mlx5_chains_init(dev, attr);
778 
779 	return chains;
780 }
781 
782 void
mlx5_chains_destroy(struct mlx5_fs_chains * chains)783 mlx5_chains_destroy(struct mlx5_fs_chains *chains)
784 {
785 	mlx5_chains_cleanup(chains);
786 }
787 
788 int
mlx5_chains_get_chain_mapping(struct mlx5_fs_chains * chains,u32 chain,u32 * chain_mapping)789 mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
790 			      u32 *chain_mapping)
791 {
792 	struct mapping_ctx *ctx = chains->chains_mapping;
793 	struct mlx5_mapped_obj mapped_obj = {};
794 
795 	mapped_obj.type = MLX5_MAPPED_OBJ_CHAIN;
796 	mapped_obj.chain = chain;
797 	return mapping_add(ctx, &mapped_obj, chain_mapping);
798 }
799 
800 int
mlx5_chains_put_chain_mapping(struct mlx5_fs_chains * chains,u32 chain_mapping)801 mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping)
802 {
803 	struct mapping_ctx *ctx = chains->chains_mapping;
804 
805 	return mapping_remove(ctx, chain_mapping);
806 }
807