1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/list.h>
8 #include <linux/string.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <net/net_namespace.h>
13 #include <net/tc_act/tc_vlan.h>
14
15 #include "reg.h"
16 #include "core.h"
17 #include "resources.h"
18 #include "spectrum.h"
19 #include "core_acl_flex_keys.h"
20 #include "core_acl_flex_actions.h"
21 #include "spectrum_acl_tcam.h"
22
23 struct mlxsw_sp_acl {
24 struct mlxsw_sp *mlxsw_sp;
25 struct mlxsw_afk *afk;
26 struct mlxsw_sp_fid *dummy_fid;
27 struct rhashtable ruleset_ht;
28 struct list_head rules;
29 struct mutex rules_lock; /* Protects rules list */
30 struct {
31 struct delayed_work dw;
32 unsigned long interval; /* ms */
33 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
34 } rule_activity_update;
35 struct mlxsw_sp_acl_tcam tcam;
36 };
37
mlxsw_sp_acl_afk(struct mlxsw_sp_acl * acl)38 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
39 {
40 return acl->afk;
41 }
42
43 struct mlxsw_sp_acl_ruleset_ht_key {
44 struct mlxsw_sp_flow_block *block;
45 u32 chain_index;
46 const struct mlxsw_sp_acl_profile_ops *ops;
47 };
48
49 struct mlxsw_sp_acl_ruleset {
50 struct rhash_head ht_node; /* Member of acl HT */
51 struct mlxsw_sp_acl_ruleset_ht_key ht_key;
52 struct rhashtable rule_ht;
53 unsigned int ref_count;
54 unsigned int min_prio;
55 unsigned int max_prio;
56 unsigned long priv[];
57 /* priv has to be always the last item */
58 };
59
60 struct mlxsw_sp_acl_rule {
61 struct rhash_head ht_node; /* Member of rule HT */
62 struct list_head list;
63 unsigned long cookie; /* HT key */
64 struct mlxsw_sp_acl_ruleset *ruleset;
65 struct mlxsw_sp_acl_rule_info *rulei;
66 u64 last_used;
67 u64 last_packets;
68 u64 last_bytes;
69 u64 last_drops;
70 unsigned long priv[];
71 /* priv has to be always the last item */
72 };
73
74 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
75 .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
76 .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
77 .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
78 .automatic_shrinking = true,
79 };
80
81 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
82 .key_len = sizeof(unsigned long),
83 .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
84 .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
85 .automatic_shrinking = true,
86 };
87
mlxsw_sp_acl_dummy_fid(struct mlxsw_sp * mlxsw_sp)88 struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
89 {
90 return mlxsw_sp->acl->dummy_fid;
91 }
92
93 static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset * ruleset)94 mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
95 {
96 /* We hold a reference on ruleset ourselves */
97 return ruleset->ref_count == 2;
98 }
99
mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_flow_block * block,struct mlxsw_sp_flow_block_binding * binding)100 int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
101 struct mlxsw_sp_flow_block *block,
102 struct mlxsw_sp_flow_block_binding *binding)
103 {
104 struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
105 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
106
107 return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
108 binding->mlxsw_sp_port, binding->ingress);
109 }
110
mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_flow_block * block,struct mlxsw_sp_flow_block_binding * binding)111 void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
112 struct mlxsw_sp_flow_block *block,
113 struct mlxsw_sp_flow_block_binding *binding)
114 {
115 struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
116 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
117
118 ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
119 binding->mlxsw_sp_port, binding->ingress);
120 }
121
122 static int
mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_ruleset * ruleset,struct mlxsw_sp_flow_block * block)123 mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
124 struct mlxsw_sp_acl_ruleset *ruleset,
125 struct mlxsw_sp_flow_block *block)
126 {
127 struct mlxsw_sp_flow_block_binding *binding;
128 int err;
129
130 block->ruleset_zero = ruleset;
131 list_for_each_entry(binding, &block->binding_list, list) {
132 err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
133 if (err)
134 goto rollback;
135 }
136 return 0;
137
138 rollback:
139 list_for_each_entry_continue_reverse(binding, &block->binding_list,
140 list)
141 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
142 block->ruleset_zero = NULL;
143
144 return err;
145 }
146
147 static void
mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_ruleset * ruleset,struct mlxsw_sp_flow_block * block)148 mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
149 struct mlxsw_sp_acl_ruleset *ruleset,
150 struct mlxsw_sp_flow_block *block)
151 {
152 struct mlxsw_sp_flow_block_binding *binding;
153
154 list_for_each_entry(binding, &block->binding_list, list)
155 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
156 block->ruleset_zero = NULL;
157 }
158
159 static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_flow_block * block,u32 chain_index,const struct mlxsw_sp_acl_profile_ops * ops,struct mlxsw_afk_element_usage * tmplt_elusage)160 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
161 struct mlxsw_sp_flow_block *block, u32 chain_index,
162 const struct mlxsw_sp_acl_profile_ops *ops,
163 struct mlxsw_afk_element_usage *tmplt_elusage)
164 {
165 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
166 struct mlxsw_sp_acl_ruleset *ruleset;
167 size_t alloc_size;
168 int err;
169
170 alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
171 ruleset = kzalloc(alloc_size, GFP_KERNEL);
172 if (!ruleset)
173 return ERR_PTR(-ENOMEM);
174 ruleset->ref_count = 1;
175 ruleset->ht_key.block = block;
176 ruleset->ht_key.chain_index = chain_index;
177 ruleset->ht_key.ops = ops;
178
179 err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
180 if (err)
181 goto err_rhashtable_init;
182
183 err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
184 tmplt_elusage, &ruleset->min_prio,
185 &ruleset->max_prio);
186 if (err)
187 goto err_ops_ruleset_add;
188
189 err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
190 mlxsw_sp_acl_ruleset_ht_params);
191 if (err)
192 goto err_ht_insert;
193
194 return ruleset;
195
196 err_ht_insert:
197 ops->ruleset_del(mlxsw_sp, ruleset->priv);
198 err_ops_ruleset_add:
199 rhashtable_destroy(&ruleset->rule_ht);
200 err_rhashtable_init:
201 kfree(ruleset);
202 return ERR_PTR(err);
203 }
204
mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_ruleset * ruleset)205 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
206 struct mlxsw_sp_acl_ruleset *ruleset)
207 {
208 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
209 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
210
211 rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
212 mlxsw_sp_acl_ruleset_ht_params);
213 ops->ruleset_del(mlxsw_sp, ruleset->priv);
214 rhashtable_destroy(&ruleset->rule_ht);
215 kfree(ruleset);
216 }
217
mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset * ruleset)218 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
219 {
220 ruleset->ref_count++;
221 }
222
mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_ruleset * ruleset)223 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
224 struct mlxsw_sp_acl_ruleset *ruleset)
225 {
226 if (--ruleset->ref_count)
227 return;
228 mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
229 }
230
231 static struct mlxsw_sp_acl_ruleset *
__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl * acl,struct mlxsw_sp_flow_block * block,u32 chain_index,const struct mlxsw_sp_acl_profile_ops * ops)232 __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
233 struct mlxsw_sp_flow_block *block, u32 chain_index,
234 const struct mlxsw_sp_acl_profile_ops *ops)
235 {
236 struct mlxsw_sp_acl_ruleset_ht_key ht_key;
237
238 memset(&ht_key, 0, sizeof(ht_key));
239 ht_key.block = block;
240 ht_key.chain_index = chain_index;
241 ht_key.ops = ops;
242 return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
243 mlxsw_sp_acl_ruleset_ht_params);
244 }
245
246 struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_flow_block * block,u32 chain_index,enum mlxsw_sp_acl_profile profile)247 mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
248 struct mlxsw_sp_flow_block *block, u32 chain_index,
249 enum mlxsw_sp_acl_profile profile)
250 {
251 const struct mlxsw_sp_acl_profile_ops *ops;
252 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
253 struct mlxsw_sp_acl_ruleset *ruleset;
254
255 ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
256 if (!ops)
257 return ERR_PTR(-EINVAL);
258 ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
259 if (!ruleset)
260 return ERR_PTR(-ENOENT);
261 return ruleset;
262 }
263
264 struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_flow_block * block,u32 chain_index,enum mlxsw_sp_acl_profile profile,struct mlxsw_afk_element_usage * tmplt_elusage)265 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
266 struct mlxsw_sp_flow_block *block, u32 chain_index,
267 enum mlxsw_sp_acl_profile profile,
268 struct mlxsw_afk_element_usage *tmplt_elusage)
269 {
270 const struct mlxsw_sp_acl_profile_ops *ops;
271 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
272 struct mlxsw_sp_acl_ruleset *ruleset;
273
274 ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
275 if (!ops)
276 return ERR_PTR(-EINVAL);
277
278 ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
279 if (ruleset) {
280 mlxsw_sp_acl_ruleset_ref_inc(ruleset);
281 return ruleset;
282 }
283 return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops,
284 tmplt_elusage);
285 }
286
mlxsw_sp_acl_ruleset_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_ruleset * ruleset)287 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
288 struct mlxsw_sp_acl_ruleset *ruleset)
289 {
290 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
291 }
292
mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset * ruleset)293 u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
294 {
295 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
296
297 return ops->ruleset_group_id(ruleset->priv);
298 }
299
mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset * ruleset,unsigned int * p_min_prio,unsigned int * p_max_prio)300 void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
301 unsigned int *p_min_prio,
302 unsigned int *p_max_prio)
303 {
304 *p_min_prio = ruleset->min_prio;
305 *p_max_prio = ruleset->max_prio;
306 }
307
308 struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl * acl,struct mlxsw_afa_block * afa_block)309 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
310 struct mlxsw_afa_block *afa_block)
311 {
312 struct mlxsw_sp_acl_rule_info *rulei;
313 int err;
314
315 rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
316 if (!rulei)
317 return ERR_PTR(-ENOMEM);
318
319 if (afa_block) {
320 rulei->act_block = afa_block;
321 return rulei;
322 }
323
324 rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
325 if (IS_ERR(rulei->act_block)) {
326 err = PTR_ERR(rulei->act_block);
327 goto err_afa_block_create;
328 }
329 rulei->action_created = 1;
330 return rulei;
331
332 err_afa_block_create:
333 kfree(rulei);
334 return ERR_PTR(err);
335 }
336
mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info * rulei)337 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
338 {
339 if (rulei->action_created)
340 mlxsw_afa_block_destroy(rulei->act_block);
341 kfree(rulei);
342 }
343
mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info * rulei)344 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
345 {
346 return mlxsw_afa_block_commit(rulei->act_block);
347 }
348
mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info * rulei,unsigned int priority)349 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
350 unsigned int priority)
351 {
352 rulei->priority = priority;
353 }
354
mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info * rulei,enum mlxsw_afk_element element,u32 key_value,u32 mask_value)355 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
356 enum mlxsw_afk_element element,
357 u32 key_value, u32 mask_value)
358 {
359 mlxsw_afk_values_add_u32(&rulei->values, element,
360 key_value, mask_value);
361 }
362
mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info * rulei,enum mlxsw_afk_element element,const char * key_value,const char * mask_value,unsigned int len)363 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
364 enum mlxsw_afk_element element,
365 const char *key_value,
366 const char *mask_value, unsigned int len)
367 {
368 mlxsw_afk_values_add_buf(&rulei->values, element,
369 key_value, mask_value, len);
370 }
371
mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info * rulei)372 int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
373 {
374 return mlxsw_afa_block_continue(rulei->act_block);
375 }
376
mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info * rulei,u16 group_id)377 int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
378 u16 group_id)
379 {
380 return mlxsw_afa_block_jump(rulei->act_block, group_id);
381 }
382
mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info * rulei)383 int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
384 {
385 return mlxsw_afa_block_terminate(rulei->act_block);
386 }
387
mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info * rulei,bool ingress,const struct flow_action_cookie * fa_cookie,struct netlink_ext_ack * extack)388 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
389 bool ingress,
390 const struct flow_action_cookie *fa_cookie,
391 struct netlink_ext_ack *extack)
392 {
393 return mlxsw_afa_block_append_drop(rulei->act_block, ingress,
394 fa_cookie, extack);
395 }
396
mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info * rulei)397 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
398 {
399 return mlxsw_afa_block_append_trap(rulei->act_block,
400 MLXSW_TRAP_ID_ACL0);
401 }
402
mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct net_device * out_dev,struct netlink_ext_ack * extack)403 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
404 struct mlxsw_sp_acl_rule_info *rulei,
405 struct net_device *out_dev,
406 struct netlink_ext_ack *extack)
407 {
408 struct mlxsw_sp_port *mlxsw_sp_port;
409 u8 local_port;
410 bool in_port;
411
412 if (out_dev) {
413 if (!mlxsw_sp_port_dev_check(out_dev)) {
414 NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
415 return -EINVAL;
416 }
417 mlxsw_sp_port = netdev_priv(out_dev);
418 if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) {
419 NL_SET_ERR_MSG_MOD(extack, "Invalid output device");
420 return -EINVAL;
421 }
422 local_port = mlxsw_sp_port->local_port;
423 in_port = false;
424 } else {
425 /* If out_dev is NULL, the caller wants to
426 * set forward to ingress port.
427 */
428 local_port = 0;
429 in_port = true;
430 }
431 return mlxsw_afa_block_append_fwd(rulei->act_block,
432 local_port, in_port, extack);
433 }
434
mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct mlxsw_sp_flow_block * block,struct net_device * out_dev,struct netlink_ext_ack * extack)435 int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
436 struct mlxsw_sp_acl_rule_info *rulei,
437 struct mlxsw_sp_flow_block *block,
438 struct net_device *out_dev,
439 struct netlink_ext_ack *extack)
440 {
441 struct mlxsw_sp_flow_block_binding *binding;
442 struct mlxsw_sp_port *in_port;
443
444 if (!list_is_singular(&block->binding_list)) {
445 NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed");
446 return -EOPNOTSUPP;
447 }
448 binding = list_first_entry(&block->binding_list,
449 struct mlxsw_sp_flow_block_binding, list);
450 in_port = binding->mlxsw_sp_port;
451
452 return mlxsw_afa_block_append_mirror(rulei->act_block,
453 in_port->local_port,
454 out_dev,
455 binding->ingress,
456 extack);
457 }
458
mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,u32 action,u16 vid,u16 proto,u8 prio,struct netlink_ext_ack * extack)459 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
460 struct mlxsw_sp_acl_rule_info *rulei,
461 u32 action, u16 vid, u16 proto, u8 prio,
462 struct netlink_ext_ack *extack)
463 {
464 u8 ethertype;
465
466 if (action == FLOW_ACTION_VLAN_MANGLE) {
467 switch (proto) {
468 case ETH_P_8021Q:
469 ethertype = 0;
470 break;
471 case ETH_P_8021AD:
472 ethertype = 1;
473 break;
474 default:
475 NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol");
476 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
477 proto);
478 return -EINVAL;
479 }
480
481 return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
482 vid, prio, ethertype,
483 extack);
484 } else {
485 NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action");
486 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
487 return -EINVAL;
488 }
489 }
490
mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,u32 prio,struct netlink_ext_ack * extack)491 int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
492 struct mlxsw_sp_acl_rule_info *rulei,
493 u32 prio, struct netlink_ext_ack *extack)
494 {
495 /* Even though both Linux and Spectrum switches support 16 priorities,
496 * spectrum_qdisc only processes the first eight priomap elements, and
497 * the DCB and PFC features are tied to 8 priorities as well. Therefore
498 * bounce attempts to prioritize packets to higher priorities.
499 */
500 if (prio >= IEEE_8021QAZ_MAX_TCS) {
501 NL_SET_ERR_MSG_MOD(extack, "Only priorities 0..7 are supported");
502 return -EINVAL;
503 }
504 return mlxsw_afa_block_append_qos_switch_prio(rulei->act_block, prio,
505 extack);
506 }
507
508 enum mlxsw_sp_acl_mangle_field {
509 MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD,
510 MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP,
511 MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN,
512 MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT,
513 MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT,
514 };
515
516 struct mlxsw_sp_acl_mangle_action {
517 enum flow_action_mangle_base htype;
518 /* Offset is u32-aligned. */
519 u32 offset;
520 /* Mask bits are unset for the modified field. */
521 u32 mask;
522 /* Shift required to extract the set value. */
523 u32 shift;
524 enum mlxsw_sp_acl_mangle_field field;
525 };
526
527 #define MLXSW_SP_ACL_MANGLE_ACTION(_htype, _offset, _mask, _shift, _field) \
528 { \
529 .htype = _htype, \
530 .offset = _offset, \
531 .mask = _mask, \
532 .shift = _shift, \
533 .field = MLXSW_SP_ACL_MANGLE_FIELD_##_field, \
534 }
535
536 #define MLXSW_SP_ACL_MANGLE_ACTION_IP4(_offset, _mask, _shift, _field) \
537 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP4, \
538 _offset, _mask, _shift, _field)
539
540 #define MLXSW_SP_ACL_MANGLE_ACTION_IP6(_offset, _mask, _shift, _field) \
541 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP6, \
542 _offset, _mask, _shift, _field)
543
544 #define MLXSW_SP_ACL_MANGLE_ACTION_TCP(_offset, _mask, _shift, _field) \
545 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_TCP, _offset, _mask, _shift, _field)
546
547 #define MLXSW_SP_ACL_MANGLE_ACTION_UDP(_offset, _mask, _shift, _field) \
548 MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_UDP, _offset, _mask, _shift, _field)
549
550 static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
551 MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff00ffff, 16, IP_DSFIELD),
552 MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff03ffff, 18, IP_DSCP),
553 MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xfffcffff, 16, IP_ECN),
554
555 MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf00fffff, 20, IP_DSFIELD),
556 MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf03fffff, 22, IP_DSCP),
557 MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xffcfffff, 20, IP_ECN),
558
559 MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0x0000ffff, 16, IP_SPORT),
560 MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0xffff0000, 0, IP_DPORT),
561
562 MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0x0000ffff, 16, IP_SPORT),
563 MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0xffff0000, 0, IP_DPORT),
564 };
565
566 static int
mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct mlxsw_sp_acl_mangle_action * mact,u32 val,struct netlink_ext_ack * extack)567 mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
568 struct mlxsw_sp_acl_rule_info *rulei,
569 struct mlxsw_sp_acl_mangle_action *mact,
570 u32 val, struct netlink_ext_ack *extack)
571 {
572 switch (mact->field) {
573 case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD:
574 return mlxsw_afa_block_append_qos_dsfield(rulei->act_block,
575 val, extack);
576 case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP:
577 return mlxsw_afa_block_append_qos_dscp(rulei->act_block,
578 val, extack);
579 case MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN:
580 return mlxsw_afa_block_append_qos_ecn(rulei->act_block,
581 val, extack);
582 default:
583 return -EOPNOTSUPP;
584 }
585 }
586
mlxsw_sp1_acl_rulei_act_mangle_field(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct mlxsw_sp_acl_mangle_action * mact,u32 val,struct netlink_ext_ack * extack)587 static int mlxsw_sp1_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
588 struct mlxsw_sp_acl_rule_info *rulei,
589 struct mlxsw_sp_acl_mangle_action *mact,
590 u32 val, struct netlink_ext_ack *extack)
591 {
592 int err;
593
594 err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
595 if (err != -EOPNOTSUPP)
596 return err;
597
598 NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
599 return err;
600 }
601
mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct mlxsw_sp_acl_mangle_action * mact,u32 val,struct netlink_ext_ack * extack)602 static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
603 struct mlxsw_sp_acl_rule_info *rulei,
604 struct mlxsw_sp_acl_mangle_action *mact,
605 u32 val, struct netlink_ext_ack *extack)
606 {
607 int err;
608
609 err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
610 if (err != -EOPNOTSUPP)
611 return err;
612
613 switch (mact->field) {
614 case MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT:
615 return mlxsw_afa_block_append_l4port(rulei->act_block, false, val, extack);
616 case MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT:
617 return mlxsw_afa_block_append_l4port(rulei->act_block, true, val, extack);
618 default:
619 break;
620 }
621
622 NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
623 return err;
624 }
625
mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,enum flow_action_mangle_base htype,u32 offset,u32 mask,u32 val,struct netlink_ext_ack * extack)626 int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
627 struct mlxsw_sp_acl_rule_info *rulei,
628 enum flow_action_mangle_base htype,
629 u32 offset, u32 mask, u32 val,
630 struct netlink_ext_ack *extack)
631 {
632 const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops = mlxsw_sp->acl_rulei_ops;
633 struct mlxsw_sp_acl_mangle_action *mact;
634 size_t i;
635
636 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_acl_mangle_actions); ++i) {
637 mact = &mlxsw_sp_acl_mangle_actions[i];
638 if (mact->htype == htype &&
639 mact->offset == offset &&
640 mact->mask == mask) {
641 val >>= mact->shift;
642 return acl_rulei_ops->act_mangle_field(mlxsw_sp,
643 rulei, mact,
644 val, extack);
645 }
646 }
647
648 NL_SET_ERR_MSG_MOD(extack, "Unknown mangle field");
649 return -EINVAL;
650 }
651
mlxsw_sp_acl_rulei_act_police(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,u32 index,u64 rate_bytes_ps,u32 burst,struct netlink_ext_ack * extack)652 int mlxsw_sp_acl_rulei_act_police(struct mlxsw_sp *mlxsw_sp,
653 struct mlxsw_sp_acl_rule_info *rulei,
654 u32 index, u64 rate_bytes_ps,
655 u32 burst, struct netlink_ext_ack *extack)
656 {
657 int err;
658
659 err = mlxsw_afa_block_append_police(rulei->act_block, index,
660 rate_bytes_ps, burst,
661 &rulei->policer_index, extack);
662 if (err)
663 return err;
664
665 rulei->policer_index_valid = true;
666
667 return 0;
668 }
669
mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct netlink_ext_ack * extack)670 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
671 struct mlxsw_sp_acl_rule_info *rulei,
672 struct netlink_ext_ack *extack)
673 {
674 int err;
675
676 err = mlxsw_afa_block_append_counter(rulei->act_block,
677 &rulei->counter_index, extack);
678 if (err)
679 return err;
680 rulei->counter_valid = true;
681 return 0;
682 }
683
mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,u16 fid,struct netlink_ext_ack * extack)684 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
685 struct mlxsw_sp_acl_rule_info *rulei,
686 u16 fid, struct netlink_ext_ack *extack)
687 {
688 return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
689 }
690
mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,struct mlxsw_sp_flow_block * block,struct psample_group * psample_group,u32 rate,u32 trunc_size,bool truncate,struct netlink_ext_ack * extack)691 int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp,
692 struct mlxsw_sp_acl_rule_info *rulei,
693 struct mlxsw_sp_flow_block *block,
694 struct psample_group *psample_group, u32 rate,
695 u32 trunc_size, bool truncate,
696 struct netlink_ext_ack *extack)
697 {
698 struct mlxsw_sp_flow_block_binding *binding;
699 struct mlxsw_sp_port *mlxsw_sp_port;
700
701 if (!list_is_singular(&block->binding_list)) {
702 NL_SET_ERR_MSG_MOD(extack, "Only a single sampling source is allowed");
703 return -EOPNOTSUPP;
704 }
705 binding = list_first_entry(&block->binding_list,
706 struct mlxsw_sp_flow_block_binding, list);
707 mlxsw_sp_port = binding->mlxsw_sp_port;
708
709 return mlxsw_afa_block_append_sampler(rulei->act_block,
710 mlxsw_sp_port->local_port,
711 psample_group, rate, trunc_size,
712 truncate, binding->ingress,
713 extack);
714 }
715
716 struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_ruleset * ruleset,unsigned long cookie,struct mlxsw_afa_block * afa_block,struct netlink_ext_ack * extack)717 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
718 struct mlxsw_sp_acl_ruleset *ruleset,
719 unsigned long cookie,
720 struct mlxsw_afa_block *afa_block,
721 struct netlink_ext_ack *extack)
722 {
723 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
724 struct mlxsw_sp_acl_rule *rule;
725 int err;
726
727 mlxsw_sp_acl_ruleset_ref_inc(ruleset);
728 rule = kzalloc(sizeof(*rule) + ops->rule_priv_size,
729 GFP_KERNEL);
730 if (!rule) {
731 err = -ENOMEM;
732 goto err_alloc;
733 }
734 rule->cookie = cookie;
735 rule->ruleset = ruleset;
736
737 rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, afa_block);
738 if (IS_ERR(rule->rulei)) {
739 err = PTR_ERR(rule->rulei);
740 goto err_rulei_create;
741 }
742
743 return rule;
744
745 err_rulei_create:
746 kfree(rule);
747 err_alloc:
748 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
749 return ERR_PTR(err);
750 }
751
mlxsw_sp_acl_rule_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule * rule)752 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
753 struct mlxsw_sp_acl_rule *rule)
754 {
755 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
756
757 mlxsw_sp_acl_rulei_destroy(rule->rulei);
758 kfree(rule);
759 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
760 }
761
mlxsw_sp_acl_rule_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule * rule)762 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
763 struct mlxsw_sp_acl_rule *rule)
764 {
765 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
766 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
767 struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
768 int err;
769
770 err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
771 if (err)
772 return err;
773
774 err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
775 mlxsw_sp_acl_rule_ht_params);
776 if (err)
777 goto err_rhashtable_insert;
778
779 if (!ruleset->ht_key.chain_index &&
780 mlxsw_sp_acl_ruleset_is_singular(ruleset)) {
781 /* We only need ruleset with chain index 0, the implicit
782 * one, to be directly bound to device. The rest of the
783 * rulesets are bound by "Goto action set".
784 */
785 err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
786 if (err)
787 goto err_ruleset_block_bind;
788 }
789
790 mutex_lock(&mlxsw_sp->acl->rules_lock);
791 list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
792 mutex_unlock(&mlxsw_sp->acl->rules_lock);
793 block->rule_count++;
794 block->ingress_blocker_rule_count += rule->rulei->ingress_bind_blocker;
795 block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
796 return 0;
797
798 err_ruleset_block_bind:
799 rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
800 mlxsw_sp_acl_rule_ht_params);
801 err_rhashtable_insert:
802 ops->rule_del(mlxsw_sp, rule->priv);
803 return err;
804 }
805
mlxsw_sp_acl_rule_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule * rule)806 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
807 struct mlxsw_sp_acl_rule *rule)
808 {
809 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
810 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
811 struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
812
813 block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
814 block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
815 block->rule_count--;
816 mutex_lock(&mlxsw_sp->acl->rules_lock);
817 list_del(&rule->list);
818 mutex_unlock(&mlxsw_sp->acl->rules_lock);
819 if (!ruleset->ht_key.chain_index &&
820 mlxsw_sp_acl_ruleset_is_singular(ruleset))
821 mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
822 rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
823 mlxsw_sp_acl_rule_ht_params);
824 ops->rule_del(mlxsw_sp, rule->priv);
825 }
826
mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule * rule,struct mlxsw_afa_block * afa_block)827 int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
828 struct mlxsw_sp_acl_rule *rule,
829 struct mlxsw_afa_block *afa_block)
830 {
831 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
832 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
833 struct mlxsw_sp_acl_rule_info *rulei;
834
835 rulei = mlxsw_sp_acl_rule_rulei(rule);
836 rulei->act_block = afa_block;
837
838 return ops->rule_action_replace(mlxsw_sp, rule->priv, rule->rulei);
839 }
840
841 struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_ruleset * ruleset,unsigned long cookie)842 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
843 struct mlxsw_sp_acl_ruleset *ruleset,
844 unsigned long cookie)
845 {
846 return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
847 mlxsw_sp_acl_rule_ht_params);
848 }
849
850 struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule * rule)851 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
852 {
853 return rule->rulei;
854 }
855
mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule * rule)856 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
857 struct mlxsw_sp_acl_rule *rule)
858 {
859 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
860 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
861 bool active;
862 int err;
863
864 err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
865 if (err)
866 return err;
867 if (active)
868 rule->last_used = jiffies;
869 return 0;
870 }
871
mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl * acl)872 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
873 {
874 struct mlxsw_sp_acl_rule *rule;
875 int err;
876
877 mutex_lock(&acl->rules_lock);
878 list_for_each_entry(rule, &acl->rules, list) {
879 err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
880 rule);
881 if (err)
882 goto err_rule_update;
883 }
884 mutex_unlock(&acl->rules_lock);
885 return 0;
886
887 err_rule_update:
888 mutex_unlock(&acl->rules_lock);
889 return err;
890 }
891
mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl * acl)892 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
893 {
894 unsigned long interval = acl->rule_activity_update.interval;
895
896 mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
897 msecs_to_jiffies(interval));
898 }
899
mlxsw_sp_acl_rule_activity_update_work(struct work_struct * work)900 static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
901 {
902 struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
903 rule_activity_update.dw.work);
904 int err;
905
906 err = mlxsw_sp_acl_rules_activity_update(acl);
907 if (err)
908 dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
909
910 mlxsw_sp_acl_rule_activity_work_schedule(acl);
911 }
912
mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule * rule,u64 * packets,u64 * bytes,u64 * drops,u64 * last_use,enum flow_action_hw_stats * used_hw_stats)913 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
914 struct mlxsw_sp_acl_rule *rule,
915 u64 *packets, u64 *bytes, u64 *drops,
916 u64 *last_use,
917 enum flow_action_hw_stats *used_hw_stats)
918
919 {
920 enum mlxsw_sp_policer_type type = MLXSW_SP_POLICER_TYPE_SINGLE_RATE;
921 struct mlxsw_sp_acl_rule_info *rulei;
922 u64 current_packets = 0;
923 u64 current_bytes = 0;
924 u64 current_drops = 0;
925 int err;
926
927 rulei = mlxsw_sp_acl_rule_rulei(rule);
928 if (rulei->counter_valid) {
929 err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
930 ¤t_packets,
931 ¤t_bytes);
932 if (err)
933 return err;
934 *used_hw_stats = FLOW_ACTION_HW_STATS_IMMEDIATE;
935 }
936 if (rulei->policer_index_valid) {
937 err = mlxsw_sp_policer_drops_counter_get(mlxsw_sp, type,
938 rulei->policer_index,
939 ¤t_drops);
940 if (err)
941 return err;
942 }
943 *packets = current_packets - rule->last_packets;
944 *bytes = current_bytes - rule->last_bytes;
945 *drops = current_drops - rule->last_drops;
946 *last_use = rule->last_used;
947
948 rule->last_bytes = current_bytes;
949 rule->last_packets = current_packets;
950 rule->last_drops = current_drops;
951
952 return 0;
953 }
954
mlxsw_sp_acl_init(struct mlxsw_sp * mlxsw_sp)955 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
956 {
957 struct mlxsw_sp_fid *fid;
958 struct mlxsw_sp_acl *acl;
959 size_t alloc_size;
960 int err;
961
962 alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
963 acl = kzalloc(alloc_size, GFP_KERNEL);
964 if (!acl)
965 return -ENOMEM;
966 mlxsw_sp->acl = acl;
967 acl->mlxsw_sp = mlxsw_sp;
968 acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
969 ACL_FLEX_KEYS),
970 mlxsw_sp->afk_ops);
971 if (!acl->afk) {
972 err = -ENOMEM;
973 goto err_afk_create;
974 }
975
976 err = rhashtable_init(&acl->ruleset_ht,
977 &mlxsw_sp_acl_ruleset_ht_params);
978 if (err)
979 goto err_rhashtable_init;
980
981 fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
982 if (IS_ERR(fid)) {
983 err = PTR_ERR(fid);
984 goto err_fid_get;
985 }
986 acl->dummy_fid = fid;
987
988 INIT_LIST_HEAD(&acl->rules);
989 mutex_init(&acl->rules_lock);
990 err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
991 if (err)
992 goto err_acl_ops_init;
993
994 /* Create the delayed work for the rule activity_update */
995 INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
996 mlxsw_sp_acl_rule_activity_update_work);
997 acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
998 mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
999 return 0;
1000
1001 err_acl_ops_init:
1002 mutex_destroy(&acl->rules_lock);
1003 mlxsw_sp_fid_put(fid);
1004 err_fid_get:
1005 rhashtable_destroy(&acl->ruleset_ht);
1006 err_rhashtable_init:
1007 mlxsw_afk_destroy(acl->afk);
1008 err_afk_create:
1009 kfree(acl);
1010 return err;
1011 }
1012
mlxsw_sp_acl_fini(struct mlxsw_sp * mlxsw_sp)1013 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
1014 {
1015 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1016
1017 cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
1018 mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
1019 mutex_destroy(&acl->rules_lock);
1020 WARN_ON(!list_empty(&acl->rules));
1021 mlxsw_sp_fid_put(acl->dummy_fid);
1022 rhashtable_destroy(&acl->ruleset_ht);
1023 mlxsw_afk_destroy(acl->afk);
1024 kfree(acl);
1025 }
1026
mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp * mlxsw_sp)1027 u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp)
1028 {
1029 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1030
1031 return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp,
1032 &acl->tcam);
1033 }
1034
mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp * mlxsw_sp,u32 val)1035 int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val)
1036 {
1037 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
1038
1039 return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp,
1040 &acl->tcam, val);
1041 }
1042
1043 struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops = {
1044 .act_mangle_field = mlxsw_sp1_acl_rulei_act_mangle_field,
1045 };
1046
1047 struct mlxsw_sp_acl_rulei_ops mlxsw_sp2_acl_rulei_ops = {
1048 .act_mangle_field = mlxsw_sp2_acl_rulei_act_mangle_field,
1049 };
1050