1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DPAA2 Ethernet Switch flower support
4  *
5  * Copyright 2021 NXP
6  *
7  */
8 
9 #include "dpaa2-switch.h"
10 
dpaa2_switch_flower_parse_key(struct flow_cls_offload * cls,struct dpsw_acl_key * acl_key)11 static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
12 					 struct dpsw_acl_key *acl_key)
13 {
14 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
15 	struct flow_dissector *dissector = rule->match.dissector;
16 	struct netlink_ext_ack *extack = cls->common.extack;
17 	struct dpsw_acl_fields *acl_h, *acl_m;
18 
19 	if (dissector->used_keys &
20 	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
21 	      BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
22 	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
23 	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
24 	      BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
25 	      BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
26 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
27 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
28 		NL_SET_ERR_MSG_MOD(extack,
29 				   "Unsupported keys used");
30 		return -EOPNOTSUPP;
31 	}
32 
33 	acl_h = &acl_key->match;
34 	acl_m = &acl_key->mask;
35 
36 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
37 		struct flow_match_basic match;
38 
39 		flow_rule_match_basic(rule, &match);
40 		acl_h->l3_protocol = match.key->ip_proto;
41 		acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
42 		acl_m->l3_protocol = match.mask->ip_proto;
43 		acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
44 	}
45 
46 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
47 		struct flow_match_eth_addrs match;
48 
49 		flow_rule_match_eth_addrs(rule, &match);
50 		ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
51 		ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
52 		ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
53 		ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
54 	}
55 
56 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
57 		struct flow_match_vlan match;
58 
59 		flow_rule_match_vlan(rule, &match);
60 		acl_h->l2_vlan_id = match.key->vlan_id;
61 		acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
62 		acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
63 				    match.key->vlan_dei;
64 
65 		acl_m->l2_vlan_id = match.mask->vlan_id;
66 		acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
67 		acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
68 				    match.mask->vlan_dei;
69 	}
70 
71 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
72 		struct flow_match_ipv4_addrs match;
73 
74 		flow_rule_match_ipv4_addrs(rule, &match);
75 		acl_h->l3_source_ip = be32_to_cpu(match.key->src);
76 		acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
77 		acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
78 		acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
79 	}
80 
81 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
82 		struct flow_match_ports match;
83 
84 		flow_rule_match_ports(rule, &match);
85 		acl_h->l4_source_port = be16_to_cpu(match.key->src);
86 		acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
87 		acl_m->l4_source_port = be16_to_cpu(match.mask->src);
88 		acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
89 	}
90 
91 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
92 		struct flow_match_ip match;
93 
94 		flow_rule_match_ip(rule, &match);
95 		if (match.mask->ttl != 0) {
96 			NL_SET_ERR_MSG_MOD(extack,
97 					   "Matching on TTL not supported");
98 			return -EOPNOTSUPP;
99 		}
100 
101 		if ((match.mask->tos & 0x3) != 0) {
102 			NL_SET_ERR_MSG_MOD(extack,
103 					   "Matching on ECN not supported, only DSCP");
104 			return -EOPNOTSUPP;
105 		}
106 
107 		acl_h->l3_dscp = match.key->tos >> 2;
108 		acl_m->l3_dscp = match.mask->tos >> 2;
109 	}
110 
111 	return 0;
112 }
113 
dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block * filter_block,struct dpaa2_switch_acl_entry * entry)114 int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
115 			       struct dpaa2_switch_acl_entry *entry)
116 {
117 	struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
118 	struct ethsw_core *ethsw = filter_block->ethsw;
119 	struct dpsw_acl_key *acl_key = &entry->key;
120 	struct device *dev = ethsw->dev;
121 	u8 *cmd_buff;
122 	int err;
123 
124 	cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
125 	if (!cmd_buff)
126 		return -ENOMEM;
127 
128 	dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
129 
130 	acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
131 						 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
132 						 DMA_TO_DEVICE);
133 	if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
134 		dev_err(dev, "DMA mapping failed\n");
135 		kfree(cmd_buff);
136 		return -EFAULT;
137 	}
138 
139 	err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
140 				 filter_block->acl_id, acl_entry_cfg);
141 
142 	dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
143 			 DMA_TO_DEVICE);
144 	if (err) {
145 		dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
146 		kfree(cmd_buff);
147 		return err;
148 	}
149 
150 	kfree(cmd_buff);
151 
152 	return 0;
153 }
154 
155 static int
dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)156 dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
157 			      struct dpaa2_switch_acl_entry *entry)
158 {
159 	struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
160 	struct dpsw_acl_key *acl_key = &entry->key;
161 	struct ethsw_core *ethsw = block->ethsw;
162 	struct device *dev = ethsw->dev;
163 	u8 *cmd_buff;
164 	int err;
165 
166 	cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
167 	if (!cmd_buff)
168 		return -ENOMEM;
169 
170 	dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
171 
172 	acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
173 						 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
174 						 DMA_TO_DEVICE);
175 	if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
176 		dev_err(dev, "DMA mapping failed\n");
177 		kfree(cmd_buff);
178 		return -EFAULT;
179 	}
180 
181 	err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
182 				    block->acl_id, acl_entry_cfg);
183 
184 	dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
185 			 DMA_TO_DEVICE);
186 	if (err) {
187 		dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
188 		kfree(cmd_buff);
189 		return err;
190 	}
191 
192 	kfree(cmd_buff);
193 
194 	return 0;
195 }
196 
197 static int
dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)198 dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block,
199 				   struct dpaa2_switch_acl_entry *entry)
200 {
201 	struct dpaa2_switch_acl_entry *tmp;
202 	struct list_head *pos, *n;
203 	int index = 0;
204 
205 	if (list_empty(&block->acl_entries)) {
206 		list_add(&entry->list, &block->acl_entries);
207 		return index;
208 	}
209 
210 	list_for_each_safe(pos, n, &block->acl_entries) {
211 		tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
212 		if (entry->prio < tmp->prio)
213 			break;
214 		index++;
215 	}
216 	list_add(&entry->list, pos->prev);
217 	return index;
218 }
219 
220 static struct dpaa2_switch_acl_entry*
dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block * block,int index)221 dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block,
222 				    int index)
223 {
224 	struct dpaa2_switch_acl_entry *tmp;
225 	int i = 0;
226 
227 	list_for_each_entry(tmp, &block->acl_entries, list) {
228 		if (i == index)
229 			return tmp;
230 		++i;
231 	}
232 
233 	return NULL;
234 }
235 
236 static int
dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry,int precedence)237 dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block,
238 				      struct dpaa2_switch_acl_entry *entry,
239 				      int precedence)
240 {
241 	int err;
242 
243 	err = dpaa2_switch_acl_entry_remove(block, entry);
244 	if (err)
245 		return err;
246 
247 	entry->cfg.precedence = precedence;
248 	return dpaa2_switch_acl_entry_add(block, entry);
249 }
250 
251 static int
dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)252 dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block,
253 			       struct dpaa2_switch_acl_entry *entry)
254 {
255 	struct dpaa2_switch_acl_entry *tmp;
256 	int index, i, precedence, err;
257 
258 	/* Add the new ACL entry to the linked list and get its index */
259 	index = dpaa2_switch_acl_entry_add_to_list(block, entry);
260 
261 	/* Move up in priority the ACL entries to make space
262 	 * for the new filter.
263 	 */
264 	precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1;
265 	for (i = 0; i < index; i++) {
266 		tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
267 
268 		err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
269 							    precedence);
270 		if (err)
271 			return err;
272 
273 		precedence++;
274 	}
275 
276 	/* Add the new entry to hardware */
277 	entry->cfg.precedence = precedence;
278 	err = dpaa2_switch_acl_entry_add(block, entry);
279 	block->num_acl_rules++;
280 
281 	return err;
282 }
283 
284 static struct dpaa2_switch_acl_entry *
dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block * block,unsigned long cookie)285 dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
286 					  unsigned long cookie)
287 {
288 	struct dpaa2_switch_acl_entry *tmp, *n;
289 
290 	list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
291 		if (tmp->cookie == cookie)
292 			return tmp;
293 	}
294 	return NULL;
295 }
296 
297 static int
dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)298 dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block,
299 				 struct dpaa2_switch_acl_entry *entry)
300 {
301 	struct dpaa2_switch_acl_entry *tmp, *n;
302 	int index = 0;
303 
304 	list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
305 		if (tmp->cookie == entry->cookie)
306 			return index;
307 		index++;
308 	}
309 	return -ENOENT;
310 }
311 
312 static struct dpaa2_switch_mirror_entry *
dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block * block,unsigned long cookie)313 dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
314 					 unsigned long cookie)
315 {
316 	struct dpaa2_switch_mirror_entry *tmp, *n;
317 
318 	list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) {
319 		if (tmp->cookie == cookie)
320 			return tmp;
321 	}
322 	return NULL;
323 }
324 
325 static int
dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)326 dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block,
327 				  struct dpaa2_switch_acl_entry *entry)
328 {
329 	struct dpaa2_switch_acl_entry *tmp;
330 	int index, i, precedence, err;
331 
332 	index = dpaa2_switch_acl_entry_get_index(block, entry);
333 
334 	/* Remove from hardware the ACL entry */
335 	err = dpaa2_switch_acl_entry_remove(block, entry);
336 	if (err)
337 		return err;
338 
339 	block->num_acl_rules--;
340 
341 	/* Remove it from the list also */
342 	list_del(&entry->list);
343 
344 	/* Move down in priority the entries over the deleted one */
345 	precedence = entry->cfg.precedence;
346 	for (i = index - 1; i >= 0; i--) {
347 		tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
348 		err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
349 							    precedence);
350 		if (err)
351 			return err;
352 
353 		precedence--;
354 	}
355 
356 	kfree(entry);
357 
358 	return 0;
359 }
360 
dpaa2_switch_tc_parse_action_acl(struct ethsw_core * ethsw,struct flow_action_entry * cls_act,struct dpsw_acl_result * dpsw_act,struct netlink_ext_ack * extack)361 static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw,
362 					    struct flow_action_entry *cls_act,
363 					    struct dpsw_acl_result *dpsw_act,
364 					    struct netlink_ext_ack *extack)
365 {
366 	int err = 0;
367 
368 	switch (cls_act->id) {
369 	case FLOW_ACTION_TRAP:
370 		dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
371 		break;
372 	case FLOW_ACTION_REDIRECT:
373 		if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
374 			NL_SET_ERR_MSG_MOD(extack,
375 					   "Destination not a DPAA2 switch port");
376 			return -EOPNOTSUPP;
377 		}
378 
379 		dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
380 		dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
381 		break;
382 	case FLOW_ACTION_DROP:
383 		dpsw_act->action = DPSW_ACL_ACTION_DROP;
384 		break;
385 	default:
386 		NL_SET_ERR_MSG_MOD(extack,
387 				   "Action not supported");
388 		err = -EOPNOTSUPP;
389 		goto out;
390 	}
391 
392 out:
393 	return err;
394 }
395 
396 static int
dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_mirror_entry * entry,u16 to,struct netlink_ext_ack * extack)397 dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block,
398 			      struct dpaa2_switch_mirror_entry *entry,
399 			      u16 to, struct netlink_ext_ack *extack)
400 {
401 	unsigned long block_ports = block->ports;
402 	struct ethsw_core *ethsw = block->ethsw;
403 	struct ethsw_port_priv *port_priv;
404 	unsigned long ports_added = 0;
405 	u16 vlan = entry->cfg.vlan_id;
406 	bool mirror_port_enabled;
407 	int err, port;
408 
409 	/* Setup the mirroring port */
410 	mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
411 	if (!mirror_port_enabled) {
412 		err = dpsw_set_reflection_if(ethsw->mc_io, 0,
413 					     ethsw->dpsw_handle, to);
414 		if (err)
415 			return err;
416 		ethsw->mirror_port = to;
417 	}
418 
419 	/* Setup the same egress mirroring configuration on all the switch
420 	 * ports that share the same filter block.
421 	 */
422 	for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) {
423 		port_priv = ethsw->ports[port];
424 
425 		/* We cannot add a per VLAN mirroring rule if the VLAN in
426 		 * question is not installed on the switch port.
427 		 */
428 		if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
429 		    !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) {
430 			NL_SET_ERR_MSG(extack,
431 				       "VLAN must be installed on the switch port");
432 			err = -EINVAL;
433 			goto err_remove_filters;
434 		}
435 
436 		err = dpsw_if_add_reflection(ethsw->mc_io, 0,
437 					     ethsw->dpsw_handle,
438 					     port, &entry->cfg);
439 		if (err)
440 			goto err_remove_filters;
441 
442 		ports_added |= BIT(port);
443 	}
444 
445 	list_add(&entry->list, &block->mirror_entries);
446 
447 	return 0;
448 
449 err_remove_filters:
450 	for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) {
451 		dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
452 					  port, &entry->cfg);
453 	}
454 
455 	if (!mirror_port_enabled)
456 		ethsw->mirror_port = ethsw->sw_attr.num_ifs;
457 
458 	return err;
459 }
460 
461 static int
dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_mirror_entry * entry)462 dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block,
463 				 struct dpaa2_switch_mirror_entry *entry)
464 {
465 	struct dpsw_reflection_cfg *cfg = &entry->cfg;
466 	unsigned long block_ports = block->ports;
467 	struct ethsw_core *ethsw = block->ethsw;
468 	int port;
469 
470 	/* Remove this mirroring configuration from all the ports belonging to
471 	 * the filter block.
472 	 */
473 	for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs)
474 		dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
475 					  port, cfg);
476 
477 	/* Also remove it from the list of mirror filters */
478 	list_del(&entry->list);
479 	kfree(entry);
480 
481 	/* If this was the last mirror filter, then unset the mirror port */
482 	if (list_empty(&block->mirror_entries))
483 		ethsw->mirror_port =  ethsw->sw_attr.num_ifs;
484 
485 	return 0;
486 }
487 
488 static int
dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)489 dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block,
490 				    struct flow_cls_offload *cls)
491 {
492 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
493 	struct netlink_ext_ack *extack = cls->common.extack;
494 	struct dpaa2_switch_acl_entry *acl_entry;
495 	struct ethsw_core *ethsw = block->ethsw;
496 	struct flow_action_entry *act;
497 	int err;
498 
499 	if (dpaa2_switch_acl_tbl_is_full(block)) {
500 		NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
501 		return -ENOMEM;
502 	}
503 
504 	acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
505 	if (!acl_entry)
506 		return -ENOMEM;
507 
508 	err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
509 	if (err)
510 		goto free_acl_entry;
511 
512 	act = &rule->action.entries[0];
513 	err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
514 					       &acl_entry->cfg.result, extack);
515 	if (err)
516 		goto free_acl_entry;
517 
518 	acl_entry->prio = cls->common.prio;
519 	acl_entry->cookie = cls->cookie;
520 
521 	err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
522 	if (err)
523 		goto free_acl_entry;
524 
525 	return 0;
526 
527 free_acl_entry:
528 	kfree(acl_entry);
529 
530 	return err;
531 }
532 
dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload * cls,u16 * vlan)533 static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
534 						u16 *vlan)
535 {
536 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
537 	struct flow_dissector *dissector = rule->match.dissector;
538 	struct netlink_ext_ack *extack = cls->common.extack;
539 	int ret = -EOPNOTSUPP;
540 
541 	if (dissector->used_keys &
542 	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
543 	      BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
544 	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
545 		NL_SET_ERR_MSG_MOD(extack,
546 				   "Mirroring is supported only per VLAN");
547 		return -EOPNOTSUPP;
548 	}
549 
550 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
551 		struct flow_match_vlan match;
552 
553 		flow_rule_match_vlan(rule, &match);
554 
555 		if (match.mask->vlan_priority != 0 ||
556 		    match.mask->vlan_dei != 0) {
557 			NL_SET_ERR_MSG_MOD(extack,
558 					   "Only matching on VLAN ID supported");
559 			return -EOPNOTSUPP;
560 		}
561 
562 		if (match.mask->vlan_id != 0xFFF) {
563 			NL_SET_ERR_MSG_MOD(extack,
564 					   "Masked matching not supported");
565 			return -EOPNOTSUPP;
566 		}
567 
568 		*vlan = (u16)match.key->vlan_id;
569 		ret = 0;
570 	}
571 
572 	return ret;
573 }
574 
575 static int
dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)576 dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block,
577 				       struct flow_cls_offload *cls)
578 {
579 	struct netlink_ext_ack *extack = cls->common.extack;
580 	struct dpaa2_switch_mirror_entry *mirror_entry;
581 	struct ethsw_core *ethsw = block->ethsw;
582 	struct dpaa2_switch_mirror_entry *tmp;
583 	struct flow_action_entry *cls_act;
584 	struct list_head *pos, *n;
585 	bool mirror_port_enabled;
586 	u16 if_id, vlan;
587 	int err;
588 
589 	mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
590 	cls_act = &cls->rule->action.entries[0];
591 
592 	/* Offload rules only when the destination is a DPAA2 switch port */
593 	if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
594 		NL_SET_ERR_MSG_MOD(extack,
595 				   "Destination not a DPAA2 switch port");
596 		return -EOPNOTSUPP;
597 	}
598 	if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
599 
600 	/* We have a single mirror port but can configure egress mirroring on
601 	 * all the other switch ports. We need to allow mirroring rules only
602 	 * when the destination port is the same.
603 	 */
604 	if (mirror_port_enabled && ethsw->mirror_port != if_id) {
605 		NL_SET_ERR_MSG_MOD(extack,
606 				   "Multiple mirror ports not supported");
607 		return -EBUSY;
608 	}
609 
610 	/* Parse the key */
611 	err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
612 	if (err)
613 		return err;
614 
615 	/* Make sure that we don't already have a mirror rule with the same
616 	 * configuration.
617 	 */
618 	list_for_each_safe(pos, n, &block->mirror_entries) {
619 		tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
620 
621 		if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
622 		    tmp->cfg.vlan_id == vlan) {
623 			NL_SET_ERR_MSG_MOD(extack,
624 					   "VLAN mirror filter already installed");
625 			return -EBUSY;
626 		}
627 	}
628 
629 	mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
630 	if (!mirror_entry)
631 		return -ENOMEM;
632 
633 	mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
634 	mirror_entry->cfg.vlan_id = vlan;
635 	mirror_entry->cookie = cls->cookie;
636 
637 	return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
638 					     extack);
639 }
640 
dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)641 int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
642 				    struct flow_cls_offload *cls)
643 {
644 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
645 	struct netlink_ext_ack *extack = cls->common.extack;
646 	struct flow_action_entry *act;
647 
648 	if (!flow_offload_has_one_action(&rule->action)) {
649 		NL_SET_ERR_MSG(extack, "Only singular actions are supported");
650 		return -EOPNOTSUPP;
651 	}
652 
653 	act = &rule->action.entries[0];
654 	switch (act->id) {
655 	case FLOW_ACTION_REDIRECT:
656 	case FLOW_ACTION_TRAP:
657 	case FLOW_ACTION_DROP:
658 		return dpaa2_switch_cls_flower_replace_acl(block, cls);
659 	case FLOW_ACTION_MIRRED:
660 		return dpaa2_switch_cls_flower_replace_mirror(block, cls);
661 	default:
662 		NL_SET_ERR_MSG_MOD(extack, "Action not supported");
663 		return -EOPNOTSUPP;
664 	}
665 }
666 
dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)667 int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
668 				    struct flow_cls_offload *cls)
669 {
670 	struct dpaa2_switch_mirror_entry *mirror_entry;
671 	struct dpaa2_switch_acl_entry *acl_entry;
672 
673 	/* If this filter is a an ACL one, remove it */
674 	acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
675 							      cls->cookie);
676 	if (acl_entry)
677 		return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry);
678 
679 	/* If not, then it has to be a mirror */
680 	mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
681 								cls->cookie);
682 	if (mirror_entry)
683 		return dpaa2_switch_block_remove_mirror(block,
684 							mirror_entry);
685 
686 	return 0;
687 }
688 
689 static int
dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)690 dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block,
691 				      struct tc_cls_matchall_offload *cls)
692 {
693 	struct netlink_ext_ack *extack = cls->common.extack;
694 	struct ethsw_core *ethsw = block->ethsw;
695 	struct dpaa2_switch_acl_entry *acl_entry;
696 	struct flow_action_entry *act;
697 	int err;
698 
699 	if (dpaa2_switch_acl_tbl_is_full(block)) {
700 		NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
701 		return -ENOMEM;
702 	}
703 
704 	acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
705 	if (!acl_entry)
706 		return -ENOMEM;
707 
708 	act = &cls->rule->action.entries[0];
709 	err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
710 					       &acl_entry->cfg.result, extack);
711 	if (err)
712 		goto free_acl_entry;
713 
714 	acl_entry->prio = cls->common.prio;
715 	acl_entry->cookie = cls->cookie;
716 
717 	err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
718 	if (err)
719 		goto free_acl_entry;
720 
721 	return 0;
722 
723 free_acl_entry:
724 	kfree(acl_entry);
725 
726 	return err;
727 }
728 
729 static int
dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)730 dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block,
731 					 struct tc_cls_matchall_offload *cls)
732 {
733 	struct netlink_ext_ack *extack = cls->common.extack;
734 	struct dpaa2_switch_mirror_entry *mirror_entry;
735 	struct ethsw_core *ethsw = block->ethsw;
736 	struct dpaa2_switch_mirror_entry *tmp;
737 	struct flow_action_entry *cls_act;
738 	struct list_head *pos, *n;
739 	bool mirror_port_enabled;
740 	u16 if_id;
741 
742 	mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
743 	cls_act = &cls->rule->action.entries[0];
744 
745 	/* Offload rules only when the destination is a DPAA2 switch port */
746 	if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
747 		NL_SET_ERR_MSG_MOD(extack,
748 				   "Destination not a DPAA2 switch port");
749 		return -EOPNOTSUPP;
750 	}
751 	if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
752 
753 	/* We have a single mirror port but can configure egress mirroring on
754 	 * all the other switch ports. We need to allow mirroring rules only
755 	 * when the destination port is the same.
756 	 */
757 	if (mirror_port_enabled && ethsw->mirror_port != if_id) {
758 		NL_SET_ERR_MSG_MOD(extack,
759 				   "Multiple mirror ports not supported");
760 		return -EBUSY;
761 	}
762 
763 	/* Make sure that we don't already have a mirror rule with the same
764 	 * configuration. One matchall rule per block is the maximum.
765 	 */
766 	list_for_each_safe(pos, n, &block->mirror_entries) {
767 		tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
768 
769 		if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) {
770 			NL_SET_ERR_MSG_MOD(extack,
771 					   "Matchall mirror filter already installed");
772 			return -EBUSY;
773 		}
774 	}
775 
776 	mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
777 	if (!mirror_entry)
778 		return -ENOMEM;
779 
780 	mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL;
781 	mirror_entry->cookie = cls->cookie;
782 
783 	return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
784 					     extack);
785 }
786 
dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)787 int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
788 				      struct tc_cls_matchall_offload *cls)
789 {
790 	struct netlink_ext_ack *extack = cls->common.extack;
791 	struct flow_action_entry *act;
792 
793 	if (!flow_offload_has_one_action(&cls->rule->action)) {
794 		NL_SET_ERR_MSG(extack, "Only singular actions are supported");
795 		return -EOPNOTSUPP;
796 	}
797 
798 	act = &cls->rule->action.entries[0];
799 	switch (act->id) {
800 	case FLOW_ACTION_REDIRECT:
801 	case FLOW_ACTION_TRAP:
802 	case FLOW_ACTION_DROP:
803 		return dpaa2_switch_cls_matchall_replace_acl(block, cls);
804 	case FLOW_ACTION_MIRRED:
805 		return dpaa2_switch_cls_matchall_replace_mirror(block, cls);
806 	default:
807 		NL_SET_ERR_MSG_MOD(extack, "Action not supported");
808 		return -EOPNOTSUPP;
809 	}
810 }
811 
dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block * block,struct ethsw_port_priv * port_priv)812 int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
813 				      struct ethsw_port_priv *port_priv)
814 {
815 	struct ethsw_core *ethsw = port_priv->ethsw_data;
816 	struct dpaa2_switch_mirror_entry *tmp;
817 	int err;
818 
819 	list_for_each_entry(tmp, &block->mirror_entries, list) {
820 		err = dpsw_if_add_reflection(ethsw->mc_io, 0,
821 					     ethsw->dpsw_handle,
822 					     port_priv->idx, &tmp->cfg);
823 		if (err)
824 			goto unwind_add;
825 	}
826 
827 	return 0;
828 
829 unwind_add:
830 	list_for_each_entry(tmp, &block->mirror_entries, list)
831 		dpsw_if_remove_reflection(ethsw->mc_io, 0,
832 					  ethsw->dpsw_handle,
833 					  port_priv->idx, &tmp->cfg);
834 
835 	return err;
836 }
837 
dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block * block,struct ethsw_port_priv * port_priv)838 int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
839 					struct ethsw_port_priv *port_priv)
840 {
841 	struct ethsw_core *ethsw = port_priv->ethsw_data;
842 	struct dpaa2_switch_mirror_entry *tmp;
843 	int err;
844 
845 	list_for_each_entry(tmp, &block->mirror_entries, list) {
846 		err = dpsw_if_remove_reflection(ethsw->mc_io, 0,
847 						ethsw->dpsw_handle,
848 						port_priv->idx, &tmp->cfg);
849 		if (err)
850 			goto unwind_remove;
851 	}
852 
853 	return 0;
854 
855 unwind_remove:
856 	list_for_each_entry(tmp, &block->mirror_entries, list)
857 		dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
858 				       port_priv->idx, &tmp->cfg);
859 
860 	return err;
861 }
862 
dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)863 int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
864 				      struct tc_cls_matchall_offload *cls)
865 {
866 	struct dpaa2_switch_mirror_entry *mirror_entry;
867 	struct dpaa2_switch_acl_entry *acl_entry;
868 
869 	/* If this filter is a an ACL one, remove it */
870 	acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
871 							      cls->cookie);
872 	if (acl_entry)
873 		return dpaa2_switch_acl_tbl_remove_entry(block,
874 							 acl_entry);
875 
876 	/* If not, then it has to be a mirror */
877 	mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
878 								cls->cookie);
879 	if (mirror_entry)
880 		return dpaa2_switch_block_remove_mirror(block,
881 							mirror_entry);
882 
883 	return 0;
884 }
885