1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DPAA2 Ethernet Switch flower support
4 *
5 * Copyright 2021 NXP
6 *
7 */
8
9 #include "dpaa2-switch.h"
10
dpaa2_switch_flower_parse_key(struct flow_cls_offload * cls,struct dpsw_acl_key * acl_key)11 static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
12 struct dpsw_acl_key *acl_key)
13 {
14 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
15 struct flow_dissector *dissector = rule->match.dissector;
16 struct netlink_ext_ack *extack = cls->common.extack;
17 struct dpsw_acl_fields *acl_h, *acl_m;
18
19 if (dissector->used_keys &
20 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
21 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
22 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
23 BIT(FLOW_DISSECTOR_KEY_VLAN) |
24 BIT(FLOW_DISSECTOR_KEY_PORTS) |
25 BIT(FLOW_DISSECTOR_KEY_IP) |
26 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
27 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
28 NL_SET_ERR_MSG_MOD(extack,
29 "Unsupported keys used");
30 return -EOPNOTSUPP;
31 }
32
33 acl_h = &acl_key->match;
34 acl_m = &acl_key->mask;
35
36 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
37 struct flow_match_basic match;
38
39 flow_rule_match_basic(rule, &match);
40 acl_h->l3_protocol = match.key->ip_proto;
41 acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
42 acl_m->l3_protocol = match.mask->ip_proto;
43 acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
44 }
45
46 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
47 struct flow_match_eth_addrs match;
48
49 flow_rule_match_eth_addrs(rule, &match);
50 ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
51 ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
52 ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
53 ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
54 }
55
56 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
57 struct flow_match_vlan match;
58
59 flow_rule_match_vlan(rule, &match);
60 acl_h->l2_vlan_id = match.key->vlan_id;
61 acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
62 acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
63 match.key->vlan_dei;
64
65 acl_m->l2_vlan_id = match.mask->vlan_id;
66 acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
67 acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
68 match.mask->vlan_dei;
69 }
70
71 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
72 struct flow_match_ipv4_addrs match;
73
74 flow_rule_match_ipv4_addrs(rule, &match);
75 acl_h->l3_source_ip = be32_to_cpu(match.key->src);
76 acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
77 acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
78 acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
79 }
80
81 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
82 struct flow_match_ports match;
83
84 flow_rule_match_ports(rule, &match);
85 acl_h->l4_source_port = be16_to_cpu(match.key->src);
86 acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
87 acl_m->l4_source_port = be16_to_cpu(match.mask->src);
88 acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
89 }
90
91 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
92 struct flow_match_ip match;
93
94 flow_rule_match_ip(rule, &match);
95 if (match.mask->ttl != 0) {
96 NL_SET_ERR_MSG_MOD(extack,
97 "Matching on TTL not supported");
98 return -EOPNOTSUPP;
99 }
100
101 if ((match.mask->tos & 0x3) != 0) {
102 NL_SET_ERR_MSG_MOD(extack,
103 "Matching on ECN not supported, only DSCP");
104 return -EOPNOTSUPP;
105 }
106
107 acl_h->l3_dscp = match.key->tos >> 2;
108 acl_m->l3_dscp = match.mask->tos >> 2;
109 }
110
111 return 0;
112 }
113
dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block * filter_block,struct dpaa2_switch_acl_entry * entry)114 int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
115 struct dpaa2_switch_acl_entry *entry)
116 {
117 struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
118 struct ethsw_core *ethsw = filter_block->ethsw;
119 struct dpsw_acl_key *acl_key = &entry->key;
120 struct device *dev = ethsw->dev;
121 u8 *cmd_buff;
122 int err;
123
124 cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
125 if (!cmd_buff)
126 return -ENOMEM;
127
128 dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
129
130 acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
131 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
132 DMA_TO_DEVICE);
133 if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
134 dev_err(dev, "DMA mapping failed\n");
135 return -EFAULT;
136 }
137
138 err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
139 filter_block->acl_id, acl_entry_cfg);
140
141 dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
142 DMA_TO_DEVICE);
143 if (err) {
144 dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
145 return err;
146 }
147
148 kfree(cmd_buff);
149
150 return 0;
151 }
152
153 static int
dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)154 dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
155 struct dpaa2_switch_acl_entry *entry)
156 {
157 struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
158 struct dpsw_acl_key *acl_key = &entry->key;
159 struct ethsw_core *ethsw = block->ethsw;
160 struct device *dev = ethsw->dev;
161 u8 *cmd_buff;
162 int err;
163
164 cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
165 if (!cmd_buff)
166 return -ENOMEM;
167
168 dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
169
170 acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
171 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
172 DMA_TO_DEVICE);
173 if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
174 dev_err(dev, "DMA mapping failed\n");
175 return -EFAULT;
176 }
177
178 err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
179 block->acl_id, acl_entry_cfg);
180
181 dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
182 DMA_TO_DEVICE);
183 if (err) {
184 dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
185 return err;
186 }
187
188 kfree(cmd_buff);
189
190 return 0;
191 }
192
193 static int
dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)194 dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block,
195 struct dpaa2_switch_acl_entry *entry)
196 {
197 struct dpaa2_switch_acl_entry *tmp;
198 struct list_head *pos, *n;
199 int index = 0;
200
201 if (list_empty(&block->acl_entries)) {
202 list_add(&entry->list, &block->acl_entries);
203 return index;
204 }
205
206 list_for_each_safe(pos, n, &block->acl_entries) {
207 tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
208 if (entry->prio < tmp->prio)
209 break;
210 index++;
211 }
212 list_add(&entry->list, pos->prev);
213 return index;
214 }
215
216 static struct dpaa2_switch_acl_entry*
dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block * block,int index)217 dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block,
218 int index)
219 {
220 struct dpaa2_switch_acl_entry *tmp;
221 int i = 0;
222
223 list_for_each_entry(tmp, &block->acl_entries, list) {
224 if (i == index)
225 return tmp;
226 ++i;
227 }
228
229 return NULL;
230 }
231
232 static int
dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry,int precedence)233 dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block,
234 struct dpaa2_switch_acl_entry *entry,
235 int precedence)
236 {
237 int err;
238
239 err = dpaa2_switch_acl_entry_remove(block, entry);
240 if (err)
241 return err;
242
243 entry->cfg.precedence = precedence;
244 return dpaa2_switch_acl_entry_add(block, entry);
245 }
246
247 static int
dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)248 dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block,
249 struct dpaa2_switch_acl_entry *entry)
250 {
251 struct dpaa2_switch_acl_entry *tmp;
252 int index, i, precedence, err;
253
254 /* Add the new ACL entry to the linked list and get its index */
255 index = dpaa2_switch_acl_entry_add_to_list(block, entry);
256
257 /* Move up in priority the ACL entries to make space
258 * for the new filter.
259 */
260 precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1;
261 for (i = 0; i < index; i++) {
262 tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
263
264 err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
265 precedence);
266 if (err)
267 return err;
268
269 precedence++;
270 }
271
272 /* Add the new entry to hardware */
273 entry->cfg.precedence = precedence;
274 err = dpaa2_switch_acl_entry_add(block, entry);
275 block->num_acl_rules++;
276
277 return err;
278 }
279
280 static struct dpaa2_switch_acl_entry *
dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block * block,unsigned long cookie)281 dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
282 unsigned long cookie)
283 {
284 struct dpaa2_switch_acl_entry *tmp, *n;
285
286 list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
287 if (tmp->cookie == cookie)
288 return tmp;
289 }
290 return NULL;
291 }
292
293 static int
dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)294 dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block,
295 struct dpaa2_switch_acl_entry *entry)
296 {
297 struct dpaa2_switch_acl_entry *tmp, *n;
298 int index = 0;
299
300 list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
301 if (tmp->cookie == entry->cookie)
302 return index;
303 index++;
304 }
305 return -ENOENT;
306 }
307
308 static struct dpaa2_switch_mirror_entry *
dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block * block,unsigned long cookie)309 dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
310 unsigned long cookie)
311 {
312 struct dpaa2_switch_mirror_entry *tmp, *n;
313
314 list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) {
315 if (tmp->cookie == cookie)
316 return tmp;
317 }
318 return NULL;
319 }
320
321 static int
dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)322 dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block,
323 struct dpaa2_switch_acl_entry *entry)
324 {
325 struct dpaa2_switch_acl_entry *tmp;
326 int index, i, precedence, err;
327
328 index = dpaa2_switch_acl_entry_get_index(block, entry);
329
330 /* Remove from hardware the ACL entry */
331 err = dpaa2_switch_acl_entry_remove(block, entry);
332 if (err)
333 return err;
334
335 block->num_acl_rules--;
336
337 /* Remove it from the list also */
338 list_del(&entry->list);
339
340 /* Move down in priority the entries over the deleted one */
341 precedence = entry->cfg.precedence;
342 for (i = index - 1; i >= 0; i--) {
343 tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
344 err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
345 precedence);
346 if (err)
347 return err;
348
349 precedence--;
350 }
351
352 kfree(entry);
353
354 return 0;
355 }
356
dpaa2_switch_tc_parse_action_acl(struct ethsw_core * ethsw,struct flow_action_entry * cls_act,struct dpsw_acl_result * dpsw_act,struct netlink_ext_ack * extack)357 static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw,
358 struct flow_action_entry *cls_act,
359 struct dpsw_acl_result *dpsw_act,
360 struct netlink_ext_ack *extack)
361 {
362 int err = 0;
363
364 switch (cls_act->id) {
365 case FLOW_ACTION_TRAP:
366 dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
367 break;
368 case FLOW_ACTION_REDIRECT:
369 if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
370 NL_SET_ERR_MSG_MOD(extack,
371 "Destination not a DPAA2 switch port");
372 return -EOPNOTSUPP;
373 }
374
375 dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
376 dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
377 break;
378 case FLOW_ACTION_DROP:
379 dpsw_act->action = DPSW_ACL_ACTION_DROP;
380 break;
381 default:
382 NL_SET_ERR_MSG_MOD(extack,
383 "Action not supported");
384 err = -EOPNOTSUPP;
385 goto out;
386 }
387
388 out:
389 return err;
390 }
391
392 static int
dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_mirror_entry * entry,u16 to,struct netlink_ext_ack * extack)393 dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block,
394 struct dpaa2_switch_mirror_entry *entry,
395 u16 to, struct netlink_ext_ack *extack)
396 {
397 unsigned long block_ports = block->ports;
398 struct ethsw_core *ethsw = block->ethsw;
399 struct ethsw_port_priv *port_priv;
400 unsigned long ports_added = 0;
401 u16 vlan = entry->cfg.vlan_id;
402 bool mirror_port_enabled;
403 int err, port;
404
405 /* Setup the mirroring port */
406 mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
407 if (!mirror_port_enabled) {
408 err = dpsw_set_reflection_if(ethsw->mc_io, 0,
409 ethsw->dpsw_handle, to);
410 if (err)
411 return err;
412 ethsw->mirror_port = to;
413 }
414
415 /* Setup the same egress mirroring configuration on all the switch
416 * ports that share the same filter block.
417 */
418 for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) {
419 port_priv = ethsw->ports[port];
420
421 /* We cannot add a per VLAN mirroring rule if the VLAN in
422 * question is not installed on the switch port.
423 */
424 if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
425 !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) {
426 NL_SET_ERR_MSG(extack,
427 "VLAN must be installed on the switch port");
428 err = -EINVAL;
429 goto err_remove_filters;
430 }
431
432 err = dpsw_if_add_reflection(ethsw->mc_io, 0,
433 ethsw->dpsw_handle,
434 port, &entry->cfg);
435 if (err)
436 goto err_remove_filters;
437
438 ports_added |= BIT(port);
439 }
440
441 list_add(&entry->list, &block->mirror_entries);
442
443 return 0;
444
445 err_remove_filters:
446 for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) {
447 dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
448 port, &entry->cfg);
449 }
450
451 if (!mirror_port_enabled)
452 ethsw->mirror_port = ethsw->sw_attr.num_ifs;
453
454 return err;
455 }
456
457 static int
dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_mirror_entry * entry)458 dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block,
459 struct dpaa2_switch_mirror_entry *entry)
460 {
461 struct dpsw_reflection_cfg *cfg = &entry->cfg;
462 unsigned long block_ports = block->ports;
463 struct ethsw_core *ethsw = block->ethsw;
464 int port;
465
466 /* Remove this mirroring configuration from all the ports belonging to
467 * the filter block.
468 */
469 for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs)
470 dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
471 port, cfg);
472
473 /* Also remove it from the list of mirror filters */
474 list_del(&entry->list);
475 kfree(entry);
476
477 /* If this was the last mirror filter, then unset the mirror port */
478 if (list_empty(&block->mirror_entries))
479 ethsw->mirror_port = ethsw->sw_attr.num_ifs;
480
481 return 0;
482 }
483
484 static int
dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)485 dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block,
486 struct flow_cls_offload *cls)
487 {
488 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
489 struct netlink_ext_ack *extack = cls->common.extack;
490 struct dpaa2_switch_acl_entry *acl_entry;
491 struct ethsw_core *ethsw = block->ethsw;
492 struct flow_action_entry *act;
493 int err;
494
495 if (dpaa2_switch_acl_tbl_is_full(block)) {
496 NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
497 return -ENOMEM;
498 }
499
500 acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
501 if (!acl_entry)
502 return -ENOMEM;
503
504 err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
505 if (err)
506 goto free_acl_entry;
507
508 act = &rule->action.entries[0];
509 err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
510 &acl_entry->cfg.result, extack);
511 if (err)
512 goto free_acl_entry;
513
514 acl_entry->prio = cls->common.prio;
515 acl_entry->cookie = cls->cookie;
516
517 err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
518 if (err)
519 goto free_acl_entry;
520
521 return 0;
522
523 free_acl_entry:
524 kfree(acl_entry);
525
526 return err;
527 }
528
dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload * cls,u16 * vlan)529 static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
530 u16 *vlan)
531 {
532 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
533 struct flow_dissector *dissector = rule->match.dissector;
534 struct netlink_ext_ack *extack = cls->common.extack;
535
536 if (dissector->used_keys &
537 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
538 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
539 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
540 NL_SET_ERR_MSG_MOD(extack,
541 "Mirroring is supported only per VLAN");
542 return -EOPNOTSUPP;
543 }
544
545 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
546 struct flow_match_vlan match;
547
548 flow_rule_match_vlan(rule, &match);
549
550 if (match.mask->vlan_priority != 0 ||
551 match.mask->vlan_dei != 0) {
552 NL_SET_ERR_MSG_MOD(extack,
553 "Only matching on VLAN ID supported");
554 return -EOPNOTSUPP;
555 }
556
557 if (match.mask->vlan_id != 0xFFF) {
558 NL_SET_ERR_MSG_MOD(extack,
559 "Masked matching not supported");
560 return -EOPNOTSUPP;
561 }
562
563 *vlan = (u16)match.key->vlan_id;
564 }
565
566 return 0;
567 }
568
569 static int
dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)570 dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block,
571 struct flow_cls_offload *cls)
572 {
573 struct netlink_ext_ack *extack = cls->common.extack;
574 struct dpaa2_switch_mirror_entry *mirror_entry;
575 struct ethsw_core *ethsw = block->ethsw;
576 struct dpaa2_switch_mirror_entry *tmp;
577 struct flow_action_entry *cls_act;
578 struct list_head *pos, *n;
579 bool mirror_port_enabled;
580 u16 if_id, vlan;
581 int err;
582
583 mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
584 cls_act = &cls->rule->action.entries[0];
585
586 /* Offload rules only when the destination is a DPAA2 switch port */
587 if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
588 NL_SET_ERR_MSG_MOD(extack,
589 "Destination not a DPAA2 switch port");
590 return -EOPNOTSUPP;
591 }
592 if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
593
594 /* We have a single mirror port but can configure egress mirroring on
595 * all the other switch ports. We need to allow mirroring rules only
596 * when the destination port is the same.
597 */
598 if (mirror_port_enabled && ethsw->mirror_port != if_id) {
599 NL_SET_ERR_MSG_MOD(extack,
600 "Multiple mirror ports not supported");
601 return -EBUSY;
602 }
603
604 /* Parse the key */
605 err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
606 if (err)
607 return err;
608
609 /* Make sure that we don't already have a mirror rule with the same
610 * configuration.
611 */
612 list_for_each_safe(pos, n, &block->mirror_entries) {
613 tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
614
615 if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
616 tmp->cfg.vlan_id == vlan) {
617 NL_SET_ERR_MSG_MOD(extack,
618 "VLAN mirror filter already installed");
619 return -EBUSY;
620 }
621 }
622
623 mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
624 if (!mirror_entry)
625 return -ENOMEM;
626
627 mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
628 mirror_entry->cfg.vlan_id = vlan;
629 mirror_entry->cookie = cls->cookie;
630
631 return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
632 extack);
633 }
634
dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)635 int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
636 struct flow_cls_offload *cls)
637 {
638 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
639 struct netlink_ext_ack *extack = cls->common.extack;
640 struct flow_action_entry *act;
641
642 if (!flow_offload_has_one_action(&rule->action)) {
643 NL_SET_ERR_MSG(extack, "Only singular actions are supported");
644 return -EOPNOTSUPP;
645 }
646
647 act = &rule->action.entries[0];
648 switch (act->id) {
649 case FLOW_ACTION_REDIRECT:
650 case FLOW_ACTION_TRAP:
651 case FLOW_ACTION_DROP:
652 return dpaa2_switch_cls_flower_replace_acl(block, cls);
653 case FLOW_ACTION_MIRRED:
654 return dpaa2_switch_cls_flower_replace_mirror(block, cls);
655 default:
656 NL_SET_ERR_MSG_MOD(extack, "Action not supported");
657 return -EOPNOTSUPP;
658 }
659 }
660
dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)661 int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
662 struct flow_cls_offload *cls)
663 {
664 struct dpaa2_switch_mirror_entry *mirror_entry;
665 struct dpaa2_switch_acl_entry *acl_entry;
666
667 /* If this filter is a an ACL one, remove it */
668 acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
669 cls->cookie);
670 if (acl_entry)
671 return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry);
672
673 /* If not, then it has to be a mirror */
674 mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
675 cls->cookie);
676 if (mirror_entry)
677 return dpaa2_switch_block_remove_mirror(block,
678 mirror_entry);
679
680 return 0;
681 }
682
683 static int
dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)684 dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block,
685 struct tc_cls_matchall_offload *cls)
686 {
687 struct netlink_ext_ack *extack = cls->common.extack;
688 struct ethsw_core *ethsw = block->ethsw;
689 struct dpaa2_switch_acl_entry *acl_entry;
690 struct flow_action_entry *act;
691 int err;
692
693 if (dpaa2_switch_acl_tbl_is_full(block)) {
694 NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
695 return -ENOMEM;
696 }
697
698 acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
699 if (!acl_entry)
700 return -ENOMEM;
701
702 act = &cls->rule->action.entries[0];
703 err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
704 &acl_entry->cfg.result, extack);
705 if (err)
706 goto free_acl_entry;
707
708 acl_entry->prio = cls->common.prio;
709 acl_entry->cookie = cls->cookie;
710
711 err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
712 if (err)
713 goto free_acl_entry;
714
715 return 0;
716
717 free_acl_entry:
718 kfree(acl_entry);
719
720 return err;
721 }
722
723 static int
dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)724 dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block,
725 struct tc_cls_matchall_offload *cls)
726 {
727 struct netlink_ext_ack *extack = cls->common.extack;
728 struct dpaa2_switch_mirror_entry *mirror_entry;
729 struct ethsw_core *ethsw = block->ethsw;
730 struct dpaa2_switch_mirror_entry *tmp;
731 struct flow_action_entry *cls_act;
732 struct list_head *pos, *n;
733 bool mirror_port_enabled;
734 u16 if_id;
735
736 mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
737 cls_act = &cls->rule->action.entries[0];
738
739 /* Offload rules only when the destination is a DPAA2 switch port */
740 if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
741 NL_SET_ERR_MSG_MOD(extack,
742 "Destination not a DPAA2 switch port");
743 return -EOPNOTSUPP;
744 }
745 if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
746
747 /* We have a single mirror port but can configure egress mirroring on
748 * all the other switch ports. We need to allow mirroring rules only
749 * when the destination port is the same.
750 */
751 if (mirror_port_enabled && ethsw->mirror_port != if_id) {
752 NL_SET_ERR_MSG_MOD(extack,
753 "Multiple mirror ports not supported");
754 return -EBUSY;
755 }
756
757 /* Make sure that we don't already have a mirror rule with the same
758 * configuration. One matchall rule per block is the maximum.
759 */
760 list_for_each_safe(pos, n, &block->mirror_entries) {
761 tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
762
763 if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) {
764 NL_SET_ERR_MSG_MOD(extack,
765 "Matchall mirror filter already installed");
766 return -EBUSY;
767 }
768 }
769
770 mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
771 if (!mirror_entry)
772 return -ENOMEM;
773
774 mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL;
775 mirror_entry->cookie = cls->cookie;
776
777 return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
778 extack);
779 }
780
dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)781 int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
782 struct tc_cls_matchall_offload *cls)
783 {
784 struct netlink_ext_ack *extack = cls->common.extack;
785 struct flow_action_entry *act;
786
787 if (!flow_offload_has_one_action(&cls->rule->action)) {
788 NL_SET_ERR_MSG(extack, "Only singular actions are supported");
789 return -EOPNOTSUPP;
790 }
791
792 act = &cls->rule->action.entries[0];
793 switch (act->id) {
794 case FLOW_ACTION_REDIRECT:
795 case FLOW_ACTION_TRAP:
796 case FLOW_ACTION_DROP:
797 return dpaa2_switch_cls_matchall_replace_acl(block, cls);
798 case FLOW_ACTION_MIRRED:
799 return dpaa2_switch_cls_matchall_replace_mirror(block, cls);
800 default:
801 NL_SET_ERR_MSG_MOD(extack, "Action not supported");
802 return -EOPNOTSUPP;
803 }
804 }
805
dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block * block,struct ethsw_port_priv * port_priv)806 int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
807 struct ethsw_port_priv *port_priv)
808 {
809 struct ethsw_core *ethsw = port_priv->ethsw_data;
810 struct dpaa2_switch_mirror_entry *tmp;
811 int err;
812
813 list_for_each_entry(tmp, &block->mirror_entries, list) {
814 err = dpsw_if_add_reflection(ethsw->mc_io, 0,
815 ethsw->dpsw_handle,
816 port_priv->idx, &tmp->cfg);
817 if (err)
818 goto unwind_add;
819 }
820
821 return 0;
822
823 unwind_add:
824 list_for_each_entry(tmp, &block->mirror_entries, list)
825 dpsw_if_remove_reflection(ethsw->mc_io, 0,
826 ethsw->dpsw_handle,
827 port_priv->idx, &tmp->cfg);
828
829 return err;
830 }
831
dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block * block,struct ethsw_port_priv * port_priv)832 int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
833 struct ethsw_port_priv *port_priv)
834 {
835 struct ethsw_core *ethsw = port_priv->ethsw_data;
836 struct dpaa2_switch_mirror_entry *tmp;
837 int err;
838
839 list_for_each_entry(tmp, &block->mirror_entries, list) {
840 err = dpsw_if_remove_reflection(ethsw->mc_io, 0,
841 ethsw->dpsw_handle,
842 port_priv->idx, &tmp->cfg);
843 if (err)
844 goto unwind_remove;
845 }
846
847 return 0;
848
849 unwind_remove:
850 list_for_each_entry(tmp, &block->mirror_entries, list)
851 dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
852 port_priv->idx, &tmp->cfg);
853
854 return err;
855 }
856
dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)857 int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
858 struct tc_cls_matchall_offload *cls)
859 {
860 struct dpaa2_switch_mirror_entry *mirror_entry;
861 struct dpaa2_switch_acl_entry *acl_entry;
862
863 /* If this filter is a an ACL one, remove it */
864 acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
865 cls->cookie);
866 if (acl_entry)
867 return dpaa2_switch_acl_tbl_remove_entry(block,
868 acl_entry);
869
870 /* If not, then it has to be a mirror */
871 mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
872 cls->cookie);
873 if (mirror_entry)
874 return dpaa2_switch_block_remove_mirror(block,
875 mirror_entry);
876
877 return 0;
878 }
879