1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/slab.h>
7 #include <linux/errno.h>
8 #include <linux/rhashtable.h>
9 #include <linux/list.h>
10
11 #include "item.h"
12 #include "trap.h"
13 #include "core_acl_flex_actions.h"
14
15 enum mlxsw_afa_set_type {
16 MLXSW_AFA_SET_TYPE_NEXT,
17 MLXSW_AFA_SET_TYPE_GOTO,
18 };
19
20 /* afa_set_type
21 * Type of the record at the end of the action set.
22 */
23 MLXSW_ITEM32(afa, set, type, 0xA0, 28, 4);
24
25 /* afa_set_next_action_set_ptr
26 * A pointer to the next action set in the KVD Centralized database.
27 */
28 MLXSW_ITEM32(afa, set, next_action_set_ptr, 0xA4, 0, 24);
29
30 /* afa_set_goto_g
31 * group - When set, the binding is of an ACL group. When cleared,
32 * the binding is of an ACL.
33 * Must be set to 1 for Spectrum.
34 */
35 MLXSW_ITEM32(afa, set, goto_g, 0xA4, 29, 1);
36
37 enum mlxsw_afa_set_goto_binding_cmd {
38 /* continue go the next binding point */
39 MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE,
40 /* jump to the next binding point no return */
41 MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP,
42 /* terminate the acl binding */
43 MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM = 4,
44 };
45
46 /* afa_set_goto_binding_cmd */
47 MLXSW_ITEM32(afa, set, goto_binding_cmd, 0xA4, 24, 3);
48
49 /* afa_set_goto_next_binding
50 * ACL/ACL group identifier. If the g bit is set, this field should hold
51 * the acl_group_id, else it should hold the acl_id.
52 */
53 MLXSW_ITEM32(afa, set, goto_next_binding, 0xA4, 0, 16);
54
55 /* afa_all_action_type
56 * Action Type.
57 */
58 MLXSW_ITEM32(afa, all, action_type, 0x00, 24, 6);
59
60 struct mlxsw_afa {
61 unsigned int max_acts_per_set;
62 const struct mlxsw_afa_ops *ops;
63 void *ops_priv;
64 struct rhashtable set_ht;
65 struct rhashtable fwd_entry_ht;
66 };
67
68 #define MLXSW_AFA_SET_LEN 0xA8
69
70 struct mlxsw_afa_set_ht_key {
71 char enc_actions[MLXSW_AFA_SET_LEN]; /* Encoded set */
72 bool is_first;
73 };
74
75 /* Set structure holds one action set record. It contains up to three
76 * actions (depends on size of particular actions). The set is either
77 * put directly to a rule, or it is stored in KVD linear area.
78 * To prevent duplicate entries in KVD linear area, a hashtable is
79 * used to track sets that were previously inserted and may be shared.
80 */
81
82 struct mlxsw_afa_set {
83 struct rhash_head ht_node;
84 struct mlxsw_afa_set_ht_key ht_key;
85 u32 kvdl_index;
86 bool shared; /* Inserted in hashtable (doesn't mean that
87 * kvdl_index is valid).
88 */
89 unsigned int ref_count;
90 struct mlxsw_afa_set *next; /* Pointer to the next set. */
91 struct mlxsw_afa_set *prev; /* Pointer to the previous set,
92 * note that set may have multiple
93 * sets from multiple blocks
94 * pointing at it. This is only
95 * usable until commit.
96 */
97 };
98
99 static const struct rhashtable_params mlxsw_afa_set_ht_params = {
100 .key_len = sizeof(struct mlxsw_afa_set_ht_key),
101 .key_offset = offsetof(struct mlxsw_afa_set, ht_key),
102 .head_offset = offsetof(struct mlxsw_afa_set, ht_node),
103 .automatic_shrinking = true,
104 };
105
106 struct mlxsw_afa_fwd_entry_ht_key {
107 u8 local_port;
108 };
109
110 struct mlxsw_afa_fwd_entry {
111 struct rhash_head ht_node;
112 struct mlxsw_afa_fwd_entry_ht_key ht_key;
113 u32 kvdl_index;
114 unsigned int ref_count;
115 };
116
117 static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
118 .key_len = sizeof(struct mlxsw_afa_fwd_entry_ht_key),
119 .key_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_key),
120 .head_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_node),
121 .automatic_shrinking = true,
122 };
123
mlxsw_afa_create(unsigned int max_acts_per_set,const struct mlxsw_afa_ops * ops,void * ops_priv)124 struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
125 const struct mlxsw_afa_ops *ops,
126 void *ops_priv)
127 {
128 struct mlxsw_afa *mlxsw_afa;
129 int err;
130
131 mlxsw_afa = kzalloc(sizeof(*mlxsw_afa), GFP_KERNEL);
132 if (!mlxsw_afa)
133 return ERR_PTR(-ENOMEM);
134 err = rhashtable_init(&mlxsw_afa->set_ht, &mlxsw_afa_set_ht_params);
135 if (err)
136 goto err_set_rhashtable_init;
137 err = rhashtable_init(&mlxsw_afa->fwd_entry_ht,
138 &mlxsw_afa_fwd_entry_ht_params);
139 if (err)
140 goto err_fwd_entry_rhashtable_init;
141 mlxsw_afa->max_acts_per_set = max_acts_per_set;
142 mlxsw_afa->ops = ops;
143 mlxsw_afa->ops_priv = ops_priv;
144 return mlxsw_afa;
145
146 err_fwd_entry_rhashtable_init:
147 rhashtable_destroy(&mlxsw_afa->set_ht);
148 err_set_rhashtable_init:
149 kfree(mlxsw_afa);
150 return ERR_PTR(err);
151 }
152 EXPORT_SYMBOL(mlxsw_afa_create);
153
mlxsw_afa_destroy(struct mlxsw_afa * mlxsw_afa)154 void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa)
155 {
156 rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
157 rhashtable_destroy(&mlxsw_afa->set_ht);
158 kfree(mlxsw_afa);
159 }
160 EXPORT_SYMBOL(mlxsw_afa_destroy);
161
mlxsw_afa_set_goto_set(struct mlxsw_afa_set * set,enum mlxsw_afa_set_goto_binding_cmd cmd,u16 group_id)162 static void mlxsw_afa_set_goto_set(struct mlxsw_afa_set *set,
163 enum mlxsw_afa_set_goto_binding_cmd cmd,
164 u16 group_id)
165 {
166 char *actions = set->ht_key.enc_actions;
167
168 mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_GOTO);
169 mlxsw_afa_set_goto_g_set(actions, true);
170 mlxsw_afa_set_goto_binding_cmd_set(actions, cmd);
171 mlxsw_afa_set_goto_next_binding_set(actions, group_id);
172 }
173
mlxsw_afa_set_next_set(struct mlxsw_afa_set * set,u32 next_set_kvdl_index)174 static void mlxsw_afa_set_next_set(struct mlxsw_afa_set *set,
175 u32 next_set_kvdl_index)
176 {
177 char *actions = set->ht_key.enc_actions;
178
179 mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_NEXT);
180 mlxsw_afa_set_next_action_set_ptr_set(actions, next_set_kvdl_index);
181 }
182
mlxsw_afa_set_create(bool is_first)183 static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
184 {
185 struct mlxsw_afa_set *set;
186
187 set = kzalloc(sizeof(*set), GFP_KERNEL);
188 if (!set)
189 return NULL;
190 /* Need to initialize the set to pass by default */
191 mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
192 set->ht_key.is_first = is_first;
193 set->ref_count = 1;
194 return set;
195 }
196
mlxsw_afa_set_destroy(struct mlxsw_afa_set * set)197 static void mlxsw_afa_set_destroy(struct mlxsw_afa_set *set)
198 {
199 kfree(set);
200 }
201
mlxsw_afa_set_share(struct mlxsw_afa * mlxsw_afa,struct mlxsw_afa_set * set)202 static int mlxsw_afa_set_share(struct mlxsw_afa *mlxsw_afa,
203 struct mlxsw_afa_set *set)
204 {
205 int err;
206
207 err = rhashtable_insert_fast(&mlxsw_afa->set_ht, &set->ht_node,
208 mlxsw_afa_set_ht_params);
209 if (err)
210 return err;
211 err = mlxsw_afa->ops->kvdl_set_add(mlxsw_afa->ops_priv,
212 &set->kvdl_index,
213 set->ht_key.enc_actions,
214 set->ht_key.is_first);
215 if (err)
216 goto err_kvdl_set_add;
217 set->shared = true;
218 set->prev = NULL;
219 return 0;
220
221 err_kvdl_set_add:
222 rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
223 mlxsw_afa_set_ht_params);
224 return err;
225 }
226
mlxsw_afa_set_unshare(struct mlxsw_afa * mlxsw_afa,struct mlxsw_afa_set * set)227 static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
228 struct mlxsw_afa_set *set)
229 {
230 mlxsw_afa->ops->kvdl_set_del(mlxsw_afa->ops_priv,
231 set->kvdl_index,
232 set->ht_key.is_first);
233 rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
234 mlxsw_afa_set_ht_params);
235 set->shared = false;
236 }
237
mlxsw_afa_set_put(struct mlxsw_afa * mlxsw_afa,struct mlxsw_afa_set * set)238 static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
239 struct mlxsw_afa_set *set)
240 {
241 if (--set->ref_count)
242 return;
243 if (set->shared)
244 mlxsw_afa_set_unshare(mlxsw_afa, set);
245 mlxsw_afa_set_destroy(set);
246 }
247
mlxsw_afa_set_get(struct mlxsw_afa * mlxsw_afa,struct mlxsw_afa_set * orig_set)248 static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
249 struct mlxsw_afa_set *orig_set)
250 {
251 struct mlxsw_afa_set *set;
252 int err;
253
254 /* There is a hashtable of sets maintained. If a set with the exact
255 * same encoding exists, we reuse it. Otherwise, the current set
256 * is shared by making it available to others using the hash table.
257 */
258 set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
259 mlxsw_afa_set_ht_params);
260 if (set) {
261 set->ref_count++;
262 mlxsw_afa_set_put(mlxsw_afa, orig_set);
263 } else {
264 set = orig_set;
265 err = mlxsw_afa_set_share(mlxsw_afa, set);
266 if (err)
267 return ERR_PTR(err);
268 }
269 return set;
270 }
271
272 /* Block structure holds a list of action sets. One action block
273 * represents one chain of actions executed upon match of a rule.
274 */
275
276 struct mlxsw_afa_block {
277 struct mlxsw_afa *afa;
278 bool finished;
279 struct mlxsw_afa_set *first_set;
280 struct mlxsw_afa_set *cur_set;
281 unsigned int cur_act_index; /* In current set. */
282 struct list_head resource_list; /* List of resources held by actions
283 * in this block.
284 */
285 };
286
287 struct mlxsw_afa_resource {
288 struct list_head list;
289 void (*destructor)(struct mlxsw_afa_block *block,
290 struct mlxsw_afa_resource *resource);
291 };
292
mlxsw_afa_resource_add(struct mlxsw_afa_block * block,struct mlxsw_afa_resource * resource)293 static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
294 struct mlxsw_afa_resource *resource)
295 {
296 list_add(&resource->list, &block->resource_list);
297 }
298
mlxsw_afa_resource_del(struct mlxsw_afa_resource * resource)299 static void mlxsw_afa_resource_del(struct mlxsw_afa_resource *resource)
300 {
301 list_del(&resource->list);
302 }
303
mlxsw_afa_resources_destroy(struct mlxsw_afa_block * block)304 static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
305 {
306 struct mlxsw_afa_resource *resource, *tmp;
307
308 list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
309 resource->destructor(block, resource);
310 }
311 }
312
mlxsw_afa_block_create(struct mlxsw_afa * mlxsw_afa)313 struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
314 {
315 struct mlxsw_afa_block *block;
316
317 block = kzalloc(sizeof(*block), GFP_KERNEL);
318 if (!block)
319 return NULL;
320 INIT_LIST_HEAD(&block->resource_list);
321 block->afa = mlxsw_afa;
322
323 /* At least one action set is always present, so just create it here */
324 block->first_set = mlxsw_afa_set_create(true);
325 if (!block->first_set)
326 goto err_first_set_create;
327
328 /* In case user instructs to have dummy first set, we leave it
329 * empty here and create another, real, set right away.
330 */
331 if (mlxsw_afa->ops->dummy_first_set) {
332 block->cur_set = mlxsw_afa_set_create(false);
333 if (!block->cur_set)
334 goto err_second_set_create;
335 block->cur_set->prev = block->first_set;
336 block->first_set->next = block->cur_set;
337 } else {
338 block->cur_set = block->first_set;
339 }
340
341 return block;
342
343 err_second_set_create:
344 mlxsw_afa_set_destroy(block->first_set);
345 err_first_set_create:
346 kfree(block);
347 return NULL;
348 }
349 EXPORT_SYMBOL(mlxsw_afa_block_create);
350
mlxsw_afa_block_destroy(struct mlxsw_afa_block * block)351 void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
352 {
353 struct mlxsw_afa_set *set = block->first_set;
354 struct mlxsw_afa_set *next_set;
355
356 do {
357 next_set = set->next;
358 mlxsw_afa_set_put(block->afa, set);
359 set = next_set;
360 } while (set);
361 mlxsw_afa_resources_destroy(block);
362 kfree(block);
363 }
364 EXPORT_SYMBOL(mlxsw_afa_block_destroy);
365
mlxsw_afa_block_commit(struct mlxsw_afa_block * block)366 int mlxsw_afa_block_commit(struct mlxsw_afa_block *block)
367 {
368 struct mlxsw_afa_set *set = block->cur_set;
369 struct mlxsw_afa_set *prev_set;
370
371 block->cur_set = NULL;
372 block->finished = true;
373
374 /* Go over all linked sets starting from last
375 * and try to find existing set in the hash table.
376 * In case it is not there, assign a KVD linear index
377 * and insert it.
378 */
379 do {
380 prev_set = set->prev;
381 set = mlxsw_afa_set_get(block->afa, set);
382 if (IS_ERR(set))
383 /* No rollback is needed since the chain is
384 * in consistent state and mlxsw_afa_block_destroy
385 * will take care of putting it away.
386 */
387 return PTR_ERR(set);
388 if (prev_set) {
389 prev_set->next = set;
390 mlxsw_afa_set_next_set(prev_set, set->kvdl_index);
391 set = prev_set;
392 }
393 } while (prev_set);
394
395 block->first_set = set;
396 return 0;
397 }
398 EXPORT_SYMBOL(mlxsw_afa_block_commit);
399
mlxsw_afa_block_first_set(struct mlxsw_afa_block * block)400 char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
401 {
402 return block->first_set->ht_key.enc_actions;
403 }
404 EXPORT_SYMBOL(mlxsw_afa_block_first_set);
405
mlxsw_afa_block_cur_set(struct mlxsw_afa_block * block)406 char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block)
407 {
408 return block->cur_set->ht_key.enc_actions;
409 }
410 EXPORT_SYMBOL(mlxsw_afa_block_cur_set);
411
mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block * block)412 u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block)
413 {
414 /* First set is never in KVD linear. So the first set
415 * with valid KVD linear index is always the second one.
416 */
417 if (WARN_ON(!block->first_set->next))
418 return 0;
419 return block->first_set->next->kvdl_index;
420 }
421 EXPORT_SYMBOL(mlxsw_afa_block_first_kvdl_index);
422
mlxsw_afa_block_activity_get(struct mlxsw_afa_block * block,bool * activity)423 int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity)
424 {
425 u32 kvdl_index = mlxsw_afa_block_first_kvdl_index(block);
426
427 return block->afa->ops->kvdl_set_activity_get(block->afa->ops_priv,
428 kvdl_index, activity);
429 }
430 EXPORT_SYMBOL(mlxsw_afa_block_activity_get);
431
mlxsw_afa_block_continue(struct mlxsw_afa_block * block)432 int mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
433 {
434 if (block->finished)
435 return -EINVAL;
436 mlxsw_afa_set_goto_set(block->cur_set,
437 MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0);
438 block->finished = true;
439 return 0;
440 }
441 EXPORT_SYMBOL(mlxsw_afa_block_continue);
442
mlxsw_afa_block_jump(struct mlxsw_afa_block * block,u16 group_id)443 int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
444 {
445 if (block->finished)
446 return -EINVAL;
447 mlxsw_afa_set_goto_set(block->cur_set,
448 MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id);
449 block->finished = true;
450 return 0;
451 }
452 EXPORT_SYMBOL(mlxsw_afa_block_jump);
453
mlxsw_afa_block_terminate(struct mlxsw_afa_block * block)454 int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block)
455 {
456 if (block->finished)
457 return -EINVAL;
458 mlxsw_afa_set_goto_set(block->cur_set,
459 MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
460 block->finished = true;
461 return 0;
462 }
463 EXPORT_SYMBOL(mlxsw_afa_block_terminate);
464
465 static struct mlxsw_afa_fwd_entry *
mlxsw_afa_fwd_entry_create(struct mlxsw_afa * mlxsw_afa,u8 local_port)466 mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
467 {
468 struct mlxsw_afa_fwd_entry *fwd_entry;
469 int err;
470
471 fwd_entry = kzalloc(sizeof(*fwd_entry), GFP_KERNEL);
472 if (!fwd_entry)
473 return ERR_PTR(-ENOMEM);
474 fwd_entry->ht_key.local_port = local_port;
475 fwd_entry->ref_count = 1;
476
477 err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
478 &fwd_entry->ht_node,
479 mlxsw_afa_fwd_entry_ht_params);
480 if (err)
481 goto err_rhashtable_insert;
482
483 err = mlxsw_afa->ops->kvdl_fwd_entry_add(mlxsw_afa->ops_priv,
484 &fwd_entry->kvdl_index,
485 local_port);
486 if (err)
487 goto err_kvdl_fwd_entry_add;
488 return fwd_entry;
489
490 err_kvdl_fwd_entry_add:
491 rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
492 mlxsw_afa_fwd_entry_ht_params);
493 err_rhashtable_insert:
494 kfree(fwd_entry);
495 return ERR_PTR(err);
496 }
497
mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa * mlxsw_afa,struct mlxsw_afa_fwd_entry * fwd_entry)498 static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa,
499 struct mlxsw_afa_fwd_entry *fwd_entry)
500 {
501 mlxsw_afa->ops->kvdl_fwd_entry_del(mlxsw_afa->ops_priv,
502 fwd_entry->kvdl_index);
503 rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
504 mlxsw_afa_fwd_entry_ht_params);
505 kfree(fwd_entry);
506 }
507
508 static struct mlxsw_afa_fwd_entry *
mlxsw_afa_fwd_entry_get(struct mlxsw_afa * mlxsw_afa,u8 local_port)509 mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port)
510 {
511 struct mlxsw_afa_fwd_entry_ht_key ht_key = {0};
512 struct mlxsw_afa_fwd_entry *fwd_entry;
513
514 ht_key.local_port = local_port;
515 fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
516 mlxsw_afa_fwd_entry_ht_params);
517 if (fwd_entry) {
518 fwd_entry->ref_count++;
519 return fwd_entry;
520 }
521 return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
522 }
523
mlxsw_afa_fwd_entry_put(struct mlxsw_afa * mlxsw_afa,struct mlxsw_afa_fwd_entry * fwd_entry)524 static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
525 struct mlxsw_afa_fwd_entry *fwd_entry)
526 {
527 if (--fwd_entry->ref_count)
528 return;
529 mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
530 }
531
532 struct mlxsw_afa_fwd_entry_ref {
533 struct mlxsw_afa_resource resource;
534 struct mlxsw_afa_fwd_entry *fwd_entry;
535 };
536
537 static void
mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block * block,struct mlxsw_afa_fwd_entry_ref * fwd_entry_ref)538 mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
539 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
540 {
541 mlxsw_afa_resource_del(&fwd_entry_ref->resource);
542 mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
543 kfree(fwd_entry_ref);
544 }
545
546 static void
mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block * block,struct mlxsw_afa_resource * resource)547 mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block,
548 struct mlxsw_afa_resource *resource)
549 {
550 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
551
552 fwd_entry_ref = container_of(resource, struct mlxsw_afa_fwd_entry_ref,
553 resource);
554 mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
555 }
556
557 static struct mlxsw_afa_fwd_entry_ref *
mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block * block,u8 local_port)558 mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
559 {
560 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
561 struct mlxsw_afa_fwd_entry *fwd_entry;
562 int err;
563
564 fwd_entry_ref = kzalloc(sizeof(*fwd_entry_ref), GFP_KERNEL);
565 if (!fwd_entry_ref)
566 return ERR_PTR(-ENOMEM);
567 fwd_entry = mlxsw_afa_fwd_entry_get(block->afa, local_port);
568 if (IS_ERR(fwd_entry)) {
569 err = PTR_ERR(fwd_entry);
570 goto err_fwd_entry_get;
571 }
572 fwd_entry_ref->fwd_entry = fwd_entry;
573 fwd_entry_ref->resource.destructor = mlxsw_afa_fwd_entry_ref_destructor;
574 mlxsw_afa_resource_add(block, &fwd_entry_ref->resource);
575 return fwd_entry_ref;
576
577 err_fwd_entry_get:
578 kfree(fwd_entry_ref);
579 return ERR_PTR(err);
580 }
581
582 struct mlxsw_afa_counter {
583 struct mlxsw_afa_resource resource;
584 u32 counter_index;
585 };
586
587 static void
mlxsw_afa_counter_destroy(struct mlxsw_afa_block * block,struct mlxsw_afa_counter * counter)588 mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
589 struct mlxsw_afa_counter *counter)
590 {
591 mlxsw_afa_resource_del(&counter->resource);
592 block->afa->ops->counter_index_put(block->afa->ops_priv,
593 counter->counter_index);
594 kfree(counter);
595 }
596
597 static void
mlxsw_afa_counter_destructor(struct mlxsw_afa_block * block,struct mlxsw_afa_resource * resource)598 mlxsw_afa_counter_destructor(struct mlxsw_afa_block *block,
599 struct mlxsw_afa_resource *resource)
600 {
601 struct mlxsw_afa_counter *counter;
602
603 counter = container_of(resource, struct mlxsw_afa_counter, resource);
604 mlxsw_afa_counter_destroy(block, counter);
605 }
606
607 static struct mlxsw_afa_counter *
mlxsw_afa_counter_create(struct mlxsw_afa_block * block)608 mlxsw_afa_counter_create(struct mlxsw_afa_block *block)
609 {
610 struct mlxsw_afa_counter *counter;
611 int err;
612
613 counter = kzalloc(sizeof(*counter), GFP_KERNEL);
614 if (!counter)
615 return ERR_PTR(-ENOMEM);
616
617 err = block->afa->ops->counter_index_get(block->afa->ops_priv,
618 &counter->counter_index);
619 if (err)
620 goto err_counter_index_get;
621 counter->resource.destructor = mlxsw_afa_counter_destructor;
622 mlxsw_afa_resource_add(block, &counter->resource);
623 return counter;
624
625 err_counter_index_get:
626 kfree(counter);
627 return ERR_PTR(err);
628 }
629
630 #define MLXSW_AFA_ONE_ACTION_LEN 32
631 #define MLXSW_AFA_PAYLOAD_OFFSET 4
632
mlxsw_afa_block_append_action(struct mlxsw_afa_block * block,u8 action_code,u8 action_size)633 static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
634 u8 action_code, u8 action_size)
635 {
636 char *oneact;
637 char *actions;
638
639 if (block->finished)
640 return ERR_PTR(-EINVAL);
641 if (block->cur_act_index + action_size >
642 block->afa->max_acts_per_set) {
643 struct mlxsw_afa_set *set;
644
645 /* The appended action won't fit into the current action set,
646 * so create a new set.
647 */
648 set = mlxsw_afa_set_create(false);
649 if (!set)
650 return ERR_PTR(-ENOBUFS);
651 set->prev = block->cur_set;
652 block->cur_act_index = 0;
653 block->cur_set->next = set;
654 block->cur_set = set;
655 }
656
657 actions = block->cur_set->ht_key.enc_actions;
658 oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN;
659 block->cur_act_index += action_size;
660 mlxsw_afa_all_action_type_set(oneact, action_code);
661 return oneact + MLXSW_AFA_PAYLOAD_OFFSET;
662 }
663
664 /* VLAN Action
665 * -----------
666 * VLAN action is used for manipulating VLANs. It can be used to implement QinQ,
667 * VLAN translation, change of PCP bits of the VLAN tag, push, pop as swap VLANs
668 * and more.
669 */
670
671 #define MLXSW_AFA_VLAN_CODE 0x02
672 #define MLXSW_AFA_VLAN_SIZE 1
673
674 enum mlxsw_afa_vlan_vlan_tag_cmd {
675 MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
676 MLXSW_AFA_VLAN_VLAN_TAG_CMD_PUSH_TAG,
677 MLXSW_AFA_VLAN_VLAN_TAG_CMD_POP_TAG,
678 };
679
680 enum mlxsw_afa_vlan_cmd {
681 MLXSW_AFA_VLAN_CMD_NOP,
682 MLXSW_AFA_VLAN_CMD_SET_OUTER,
683 MLXSW_AFA_VLAN_CMD_SET_INNER,
684 MLXSW_AFA_VLAN_CMD_COPY_OUTER_TO_INNER,
685 MLXSW_AFA_VLAN_CMD_COPY_INNER_TO_OUTER,
686 MLXSW_AFA_VLAN_CMD_SWAP,
687 };
688
689 /* afa_vlan_vlan_tag_cmd
690 * Tag command: push, pop, nop VLAN header.
691 */
692 MLXSW_ITEM32(afa, vlan, vlan_tag_cmd, 0x00, 29, 3);
693
694 /* afa_vlan_vid_cmd */
695 MLXSW_ITEM32(afa, vlan, vid_cmd, 0x04, 29, 3);
696
697 /* afa_vlan_vid */
698 MLXSW_ITEM32(afa, vlan, vid, 0x04, 0, 12);
699
700 /* afa_vlan_ethertype_cmd */
701 MLXSW_ITEM32(afa, vlan, ethertype_cmd, 0x08, 29, 3);
702
703 /* afa_vlan_ethertype
704 * Index to EtherTypes in Switch VLAN EtherType Register (SVER).
705 */
706 MLXSW_ITEM32(afa, vlan, ethertype, 0x08, 24, 3);
707
708 /* afa_vlan_pcp_cmd */
709 MLXSW_ITEM32(afa, vlan, pcp_cmd, 0x08, 13, 3);
710
711 /* afa_vlan_pcp */
712 MLXSW_ITEM32(afa, vlan, pcp, 0x08, 8, 3);
713
714 static inline void
mlxsw_afa_vlan_pack(char * payload,enum mlxsw_afa_vlan_vlan_tag_cmd vlan_tag_cmd,enum mlxsw_afa_vlan_cmd vid_cmd,u16 vid,enum mlxsw_afa_vlan_cmd pcp_cmd,u8 pcp,enum mlxsw_afa_vlan_cmd ethertype_cmd,u8 ethertype)715 mlxsw_afa_vlan_pack(char *payload,
716 enum mlxsw_afa_vlan_vlan_tag_cmd vlan_tag_cmd,
717 enum mlxsw_afa_vlan_cmd vid_cmd, u16 vid,
718 enum mlxsw_afa_vlan_cmd pcp_cmd, u8 pcp,
719 enum mlxsw_afa_vlan_cmd ethertype_cmd, u8 ethertype)
720 {
721 mlxsw_afa_vlan_vlan_tag_cmd_set(payload, vlan_tag_cmd);
722 mlxsw_afa_vlan_vid_cmd_set(payload, vid_cmd);
723 mlxsw_afa_vlan_vid_set(payload, vid);
724 mlxsw_afa_vlan_pcp_cmd_set(payload, pcp_cmd);
725 mlxsw_afa_vlan_pcp_set(payload, pcp);
726 mlxsw_afa_vlan_ethertype_cmd_set(payload, ethertype_cmd);
727 mlxsw_afa_vlan_ethertype_set(payload, ethertype);
728 }
729
mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block * block,u16 vid,u8 pcp,u8 et,struct netlink_ext_ack * extack)730 int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
731 u16 vid, u8 pcp, u8 et,
732 struct netlink_ext_ack *extack)
733 {
734 char *act = mlxsw_afa_block_append_action(block,
735 MLXSW_AFA_VLAN_CODE,
736 MLXSW_AFA_VLAN_SIZE);
737
738 if (IS_ERR(act)) {
739 NL_SET_ERR_MSG_MOD(extack, "Cannot append vlan_modify action");
740 return PTR_ERR(act);
741 }
742 mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
743 MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
744 MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
745 MLXSW_AFA_VLAN_CMD_SET_OUTER, et);
746 return 0;
747 }
748 EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
749
750 /* Trap / Discard Action
751 * ---------------------
752 * The Trap / Discard action enables trapping / mirroring packets to the CPU
753 * as well as discarding packets.
754 * The ACL Trap / Discard separates the forward/discard control from CPU
755 * trap control. In addition, the Trap / Discard action enables activating
756 * SPAN (port mirroring).
757 */
758
759 #define MLXSW_AFA_TRAPDISC_CODE 0x03
760 #define MLXSW_AFA_TRAPDISC_SIZE 1
761
762 enum mlxsw_afa_trapdisc_trap_action {
763 MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP = 0,
764 MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP = 2,
765 };
766
767 /* afa_trapdisc_trap_action
768 * Trap Action.
769 */
770 MLXSW_ITEM32(afa, trapdisc, trap_action, 0x00, 24, 4);
771
772 enum mlxsw_afa_trapdisc_forward_action {
773 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD = 1,
774 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3,
775 };
776
777 /* afa_trapdisc_forward_action
778 * Forward Action.
779 */
780 MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
781
782 /* afa_trapdisc_trap_id
783 * Trap ID to configure.
784 */
785 MLXSW_ITEM32(afa, trapdisc, trap_id, 0x04, 0, 9);
786
787 /* afa_trapdisc_mirror_agent
788 * Mirror agent.
789 */
790 MLXSW_ITEM32(afa, trapdisc, mirror_agent, 0x08, 29, 3);
791
792 /* afa_trapdisc_mirror_enable
793 * Mirror enable.
794 */
795 MLXSW_ITEM32(afa, trapdisc, mirror_enable, 0x08, 24, 1);
796
797 static inline void
mlxsw_afa_trapdisc_pack(char * payload,enum mlxsw_afa_trapdisc_trap_action trap_action,enum mlxsw_afa_trapdisc_forward_action forward_action,u16 trap_id)798 mlxsw_afa_trapdisc_pack(char *payload,
799 enum mlxsw_afa_trapdisc_trap_action trap_action,
800 enum mlxsw_afa_trapdisc_forward_action forward_action,
801 u16 trap_id)
802 {
803 mlxsw_afa_trapdisc_trap_action_set(payload, trap_action);
804 mlxsw_afa_trapdisc_forward_action_set(payload, forward_action);
805 mlxsw_afa_trapdisc_trap_id_set(payload, trap_id);
806 }
807
808 static inline void
mlxsw_afa_trapdisc_mirror_pack(char * payload,bool mirror_enable,u8 mirror_agent)809 mlxsw_afa_trapdisc_mirror_pack(char *payload, bool mirror_enable,
810 u8 mirror_agent)
811 {
812 mlxsw_afa_trapdisc_mirror_enable_set(payload, mirror_enable);
813 mlxsw_afa_trapdisc_mirror_agent_set(payload, mirror_agent);
814 }
815
mlxsw_afa_block_append_drop(struct mlxsw_afa_block * block)816 int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
817 {
818 char *act = mlxsw_afa_block_append_action(block,
819 MLXSW_AFA_TRAPDISC_CODE,
820 MLXSW_AFA_TRAPDISC_SIZE);
821
822 if (IS_ERR(act))
823 return PTR_ERR(act);
824 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
825 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0);
826 return 0;
827 }
828 EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
829
mlxsw_afa_block_append_trap(struct mlxsw_afa_block * block,u16 trap_id)830 int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
831 {
832 char *act = mlxsw_afa_block_append_action(block,
833 MLXSW_AFA_TRAPDISC_CODE,
834 MLXSW_AFA_TRAPDISC_SIZE);
835
836 if (IS_ERR(act))
837 return PTR_ERR(act);
838 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
839 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD,
840 trap_id);
841 return 0;
842 }
843 EXPORT_SYMBOL(mlxsw_afa_block_append_trap);
844
mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block * block,u16 trap_id)845 int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
846 u16 trap_id)
847 {
848 char *act = mlxsw_afa_block_append_action(block,
849 MLXSW_AFA_TRAPDISC_CODE,
850 MLXSW_AFA_TRAPDISC_SIZE);
851
852 if (IS_ERR(act))
853 return PTR_ERR(act);
854 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
855 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD,
856 trap_id);
857 return 0;
858 }
859 EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
860
861 struct mlxsw_afa_mirror {
862 struct mlxsw_afa_resource resource;
863 int span_id;
864 u8 local_in_port;
865 bool ingress;
866 };
867
868 static void
mlxsw_afa_mirror_destroy(struct mlxsw_afa_block * block,struct mlxsw_afa_mirror * mirror)869 mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
870 struct mlxsw_afa_mirror *mirror)
871 {
872 mlxsw_afa_resource_del(&mirror->resource);
873 block->afa->ops->mirror_del(block->afa->ops_priv,
874 mirror->local_in_port,
875 mirror->span_id,
876 mirror->ingress);
877 kfree(mirror);
878 }
879
880 static void
mlxsw_afa_mirror_destructor(struct mlxsw_afa_block * block,struct mlxsw_afa_resource * resource)881 mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
882 struct mlxsw_afa_resource *resource)
883 {
884 struct mlxsw_afa_mirror *mirror;
885
886 mirror = container_of(resource, struct mlxsw_afa_mirror, resource);
887 mlxsw_afa_mirror_destroy(block, mirror);
888 }
889
890 static struct mlxsw_afa_mirror *
mlxsw_afa_mirror_create(struct mlxsw_afa_block * block,u8 local_in_port,const struct net_device * out_dev,bool ingress)891 mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u8 local_in_port,
892 const struct net_device *out_dev, bool ingress)
893 {
894 struct mlxsw_afa_mirror *mirror;
895 int err;
896
897 mirror = kzalloc(sizeof(*mirror), GFP_KERNEL);
898 if (!mirror)
899 return ERR_PTR(-ENOMEM);
900
901 err = block->afa->ops->mirror_add(block->afa->ops_priv,
902 local_in_port, out_dev,
903 ingress, &mirror->span_id);
904 if (err)
905 goto err_mirror_add;
906
907 mirror->ingress = ingress;
908 mirror->local_in_port = local_in_port;
909 mirror->resource.destructor = mlxsw_afa_mirror_destructor;
910 mlxsw_afa_resource_add(block, &mirror->resource);
911 return mirror;
912
913 err_mirror_add:
914 kfree(mirror);
915 return ERR_PTR(err);
916 }
917
918 static int
mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block * block,u8 mirror_agent)919 mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
920 u8 mirror_agent)
921 {
922 char *act = mlxsw_afa_block_append_action(block,
923 MLXSW_AFA_TRAPDISC_CODE,
924 MLXSW_AFA_TRAPDISC_SIZE);
925 if (IS_ERR(act))
926 return PTR_ERR(act);
927 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
928 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
929 mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
930 return 0;
931 }
932
933 int
mlxsw_afa_block_append_mirror(struct mlxsw_afa_block * block,u8 local_in_port,const struct net_device * out_dev,bool ingress,struct netlink_ext_ack * extack)934 mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port,
935 const struct net_device *out_dev, bool ingress,
936 struct netlink_ext_ack *extack)
937 {
938 struct mlxsw_afa_mirror *mirror;
939 int err;
940
941 mirror = mlxsw_afa_mirror_create(block, local_in_port, out_dev,
942 ingress);
943 if (IS_ERR(mirror)) {
944 NL_SET_ERR_MSG_MOD(extack, "Cannot create mirror action");
945 return PTR_ERR(mirror);
946 }
947 err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id);
948 if (err) {
949 NL_SET_ERR_MSG_MOD(extack, "Cannot append mirror action");
950 goto err_append_allocated_mirror;
951 }
952
953 return 0;
954
955 err_append_allocated_mirror:
956 mlxsw_afa_mirror_destroy(block, mirror);
957 return err;
958 }
959 EXPORT_SYMBOL(mlxsw_afa_block_append_mirror);
960
961 /* Forwarding Action
962 * -----------------
963 * Forwarding Action can be used to implement Policy Based Switching (PBS)
964 * as well as OpenFlow related "Output" action.
965 */
966
967 #define MLXSW_AFA_FORWARD_CODE 0x07
968 #define MLXSW_AFA_FORWARD_SIZE 1
969
970 enum mlxsw_afa_forward_type {
971 /* PBS, Policy Based Switching */
972 MLXSW_AFA_FORWARD_TYPE_PBS,
973 /* Output, OpenFlow output type */
974 MLXSW_AFA_FORWARD_TYPE_OUTPUT,
975 };
976
977 /* afa_forward_type */
978 MLXSW_ITEM32(afa, forward, type, 0x00, 24, 2);
979
980 /* afa_forward_pbs_ptr
981 * A pointer to the PBS entry configured by PPBS register.
982 * Reserved when in_port is set.
983 */
984 MLXSW_ITEM32(afa, forward, pbs_ptr, 0x08, 0, 24);
985
986 /* afa_forward_in_port
987 * Packet is forwarded back to the ingress port.
988 */
989 MLXSW_ITEM32(afa, forward, in_port, 0x0C, 0, 1);
990
991 static inline void
mlxsw_afa_forward_pack(char * payload,enum mlxsw_afa_forward_type type,u32 pbs_ptr,bool in_port)992 mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type,
993 u32 pbs_ptr, bool in_port)
994 {
995 mlxsw_afa_forward_type_set(payload, type);
996 mlxsw_afa_forward_pbs_ptr_set(payload, pbs_ptr);
997 mlxsw_afa_forward_in_port_set(payload, in_port);
998 }
999
mlxsw_afa_block_append_fwd(struct mlxsw_afa_block * block,u8 local_port,bool in_port,struct netlink_ext_ack * extack)1000 int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
1001 u8 local_port, bool in_port,
1002 struct netlink_ext_ack *extack)
1003 {
1004 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
1005 u32 kvdl_index;
1006 char *act;
1007 int err;
1008
1009 if (in_port) {
1010 NL_SET_ERR_MSG_MOD(extack, "Forwarding to ingress port is not supported");
1011 return -EOPNOTSUPP;
1012 }
1013 fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port);
1014 if (IS_ERR(fwd_entry_ref)) {
1015 NL_SET_ERR_MSG_MOD(extack, "Cannot create forward action");
1016 return PTR_ERR(fwd_entry_ref);
1017 }
1018 kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index;
1019
1020 act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
1021 MLXSW_AFA_FORWARD_SIZE);
1022 if (IS_ERR(act)) {
1023 NL_SET_ERR_MSG_MOD(extack, "Cannot append forward action");
1024 err = PTR_ERR(act);
1025 goto err_append_action;
1026 }
1027 mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS,
1028 kvdl_index, in_port);
1029 return 0;
1030
1031 err_append_action:
1032 mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
1033 return err;
1034 }
1035 EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
1036
1037 /* Policing and Counting Action
1038 * ----------------------------
1039 * Policing and Counting action is used for binding policer and counter
1040 * to ACL rules.
1041 */
1042
1043 #define MLXSW_AFA_POLCNT_CODE 0x08
1044 #define MLXSW_AFA_POLCNT_SIZE 1
1045
1046 enum mlxsw_afa_polcnt_counter_set_type {
1047 /* No count */
1048 MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
1049 /* Count packets and bytes */
1050 MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
1051 /* Count only packets */
1052 MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS = 0x05,
1053 };
1054
1055 /* afa_polcnt_counter_set_type
1056 * Counter set type for flow counters.
1057 */
1058 MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8);
1059
1060 /* afa_polcnt_counter_index
1061 * Counter index for flow counters.
1062 */
1063 MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24);
1064
1065 static inline void
mlxsw_afa_polcnt_pack(char * payload,enum mlxsw_afa_polcnt_counter_set_type set_type,u32 counter_index)1066 mlxsw_afa_polcnt_pack(char *payload,
1067 enum mlxsw_afa_polcnt_counter_set_type set_type,
1068 u32 counter_index)
1069 {
1070 mlxsw_afa_polcnt_counter_set_type_set(payload, set_type);
1071 mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
1072 }
1073
mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block * block,u32 counter_index)1074 int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
1075 u32 counter_index)
1076 {
1077 char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
1078 MLXSW_AFA_POLCNT_SIZE);
1079 if (IS_ERR(act))
1080 return PTR_ERR(act);
1081 mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
1082 counter_index);
1083 return 0;
1084 }
1085 EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter);
1086
mlxsw_afa_block_append_counter(struct mlxsw_afa_block * block,u32 * p_counter_index,struct netlink_ext_ack * extack)1087 int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
1088 u32 *p_counter_index,
1089 struct netlink_ext_ack *extack)
1090 {
1091 struct mlxsw_afa_counter *counter;
1092 u32 counter_index;
1093 int err;
1094
1095 counter = mlxsw_afa_counter_create(block);
1096 if (IS_ERR(counter)) {
1097 NL_SET_ERR_MSG_MOD(extack, "Cannot create count action");
1098 return PTR_ERR(counter);
1099 }
1100 counter_index = counter->counter_index;
1101
1102 err = mlxsw_afa_block_append_allocated_counter(block, counter_index);
1103 if (err) {
1104 NL_SET_ERR_MSG_MOD(extack, "Cannot append count action");
1105 goto err_append_allocated_counter;
1106 }
1107 if (p_counter_index)
1108 *p_counter_index = counter_index;
1109 return 0;
1110
1111 err_append_allocated_counter:
1112 mlxsw_afa_counter_destroy(block, counter);
1113 return err;
1114 }
1115 EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
1116
1117 /* Virtual Router and Forwarding Domain Action
1118 * -------------------------------------------
1119 * Virtual Switch action is used for manipulate the Virtual Router (VR),
1120 * MPLS label space and the Forwarding Identifier (FID).
1121 */
1122
1123 #define MLXSW_AFA_VIRFWD_CODE 0x0E
1124 #define MLXSW_AFA_VIRFWD_SIZE 1
1125
1126 enum mlxsw_afa_virfwd_fid_cmd {
1127 /* Do nothing */
1128 MLXSW_AFA_VIRFWD_FID_CMD_NOOP,
1129 /* Set the Forwarding Identifier (FID) to fid */
1130 MLXSW_AFA_VIRFWD_FID_CMD_SET,
1131 };
1132
1133 /* afa_virfwd_fid_cmd */
1134 MLXSW_ITEM32(afa, virfwd, fid_cmd, 0x08, 29, 3);
1135
1136 /* afa_virfwd_fid
1137 * The FID value.
1138 */
1139 MLXSW_ITEM32(afa, virfwd, fid, 0x08, 0, 16);
1140
mlxsw_afa_virfwd_pack(char * payload,enum mlxsw_afa_virfwd_fid_cmd fid_cmd,u16 fid)1141 static inline void mlxsw_afa_virfwd_pack(char *payload,
1142 enum mlxsw_afa_virfwd_fid_cmd fid_cmd,
1143 u16 fid)
1144 {
1145 mlxsw_afa_virfwd_fid_cmd_set(payload, fid_cmd);
1146 mlxsw_afa_virfwd_fid_set(payload, fid);
1147 }
1148
mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block * block,u16 fid,struct netlink_ext_ack * extack)1149 int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
1150 struct netlink_ext_ack *extack)
1151 {
1152 char *act = mlxsw_afa_block_append_action(block,
1153 MLXSW_AFA_VIRFWD_CODE,
1154 MLXSW_AFA_VIRFWD_SIZE);
1155 if (IS_ERR(act)) {
1156 NL_SET_ERR_MSG_MOD(extack, "Cannot append fid_set action");
1157 return PTR_ERR(act);
1158 }
1159 mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
1160 return 0;
1161 }
1162 EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set);
1163
1164 /* MC Routing Action
1165 * -----------------
1166 * The Multicast router action. Can be used by RMFT_V2 - Router Multicast
1167 * Forwarding Table Version 2 Register.
1168 */
1169
1170 #define MLXSW_AFA_MCROUTER_CODE 0x10
1171 #define MLXSW_AFA_MCROUTER_SIZE 2
1172
1173 enum mlxsw_afa_mcrouter_rpf_action {
1174 MLXSW_AFA_MCROUTER_RPF_ACTION_NOP,
1175 MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
1176 MLXSW_AFA_MCROUTER_RPF_ACTION_DISCARD_ERROR,
1177 };
1178
1179 /* afa_mcrouter_rpf_action */
1180 MLXSW_ITEM32(afa, mcrouter, rpf_action, 0x00, 28, 3);
1181
1182 /* afa_mcrouter_expected_irif */
1183 MLXSW_ITEM32(afa, mcrouter, expected_irif, 0x00, 0, 16);
1184
1185 /* afa_mcrouter_min_mtu */
1186 MLXSW_ITEM32(afa, mcrouter, min_mtu, 0x08, 0, 16);
1187
1188 enum mlxsw_afa_mrouter_vrmid {
1189 MLXSW_AFA_MCROUTER_VRMID_INVALID,
1190 MLXSW_AFA_MCROUTER_VRMID_VALID
1191 };
1192
1193 /* afa_mcrouter_vrmid
1194 * Valid RMID: rigr_rmid_index is used as RMID
1195 */
1196 MLXSW_ITEM32(afa, mcrouter, vrmid, 0x0C, 31, 1);
1197
1198 /* afa_mcrouter_rigr_rmid_index
1199 * When the vrmid field is set to invalid, the field is used as pointer to
1200 * Router Interface Group (RIGR) Table in the KVD linear.
1201 * When the vrmid is set to valid, the field is used as RMID index, ranged
1202 * from 0 to max_mid - 1. The index is to the Port Group Table.
1203 */
1204 MLXSW_ITEM32(afa, mcrouter, rigr_rmid_index, 0x0C, 0, 24);
1205
1206 static inline void
mlxsw_afa_mcrouter_pack(char * payload,enum mlxsw_afa_mcrouter_rpf_action rpf_action,u16 expected_irif,u16 min_mtu,enum mlxsw_afa_mrouter_vrmid vrmid,u32 rigr_rmid_index)1207 mlxsw_afa_mcrouter_pack(char *payload,
1208 enum mlxsw_afa_mcrouter_rpf_action rpf_action,
1209 u16 expected_irif, u16 min_mtu,
1210 enum mlxsw_afa_mrouter_vrmid vrmid, u32 rigr_rmid_index)
1211
1212 {
1213 mlxsw_afa_mcrouter_rpf_action_set(payload, rpf_action);
1214 mlxsw_afa_mcrouter_expected_irif_set(payload, expected_irif);
1215 mlxsw_afa_mcrouter_min_mtu_set(payload, min_mtu);
1216 mlxsw_afa_mcrouter_vrmid_set(payload, vrmid);
1217 mlxsw_afa_mcrouter_rigr_rmid_index_set(payload, rigr_rmid_index);
1218 }
1219
mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block * block,u16 expected_irif,u16 min_mtu,bool rmid_valid,u32 kvdl_index)1220 int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
1221 u16 expected_irif, u16 min_mtu,
1222 bool rmid_valid, u32 kvdl_index)
1223 {
1224 char *act = mlxsw_afa_block_append_action(block,
1225 MLXSW_AFA_MCROUTER_CODE,
1226 MLXSW_AFA_MCROUTER_SIZE);
1227 if (IS_ERR(act))
1228 return PTR_ERR(act);
1229 mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
1230 expected_irif, min_mtu, rmid_valid, kvdl_index);
1231 return 0;
1232 }
1233 EXPORT_SYMBOL(mlxsw_afa_block_append_mcrouter);
1234