1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DPAA2 Ethernet Switch driver
4 *
5 * Copyright 2014-2016 Freescale Semiconductor Inc.
6 * Copyright 2017-2021 NXP
7 *
8 */
9
10 #include <linux/module.h>
11
12 #include <linux/interrupt.h>
13 #include <linux/msi.h>
14 #include <linux/kthread.h>
15 #include <linux/workqueue.h>
16 #include <linux/iommu.h>
17 #include <net/pkt_cls.h>
18
19 #include <linux/fsl/mc.h>
20
21 #include "dpaa2-switch.h"
22
23 /* Minimal supported DPSW version */
24 #define DPSW_MIN_VER_MAJOR 8
25 #define DPSW_MIN_VER_MINOR 9
26
27 #define DEFAULT_VLAN_ID 1
28
dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv * port_priv)29 static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv)
30 {
31 return port_priv->fdb->fdb_id;
32 }
33
dpaa2_switch_fdb_get_unused(struct ethsw_core * ethsw)34 static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw)
35 {
36 int i;
37
38 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
39 if (!ethsw->fdbs[i].in_use)
40 return ðsw->fdbs[i];
41 return NULL;
42 }
43
44 static struct dpaa2_switch_filter_block *
dpaa2_switch_filter_block_get_unused(struct ethsw_core * ethsw)45 dpaa2_switch_filter_block_get_unused(struct ethsw_core *ethsw)
46 {
47 int i;
48
49 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
50 if (!ethsw->filter_blocks[i].in_use)
51 return ðsw->filter_blocks[i];
52 return NULL;
53 }
54
dpaa2_switch_port_set_fdb(struct ethsw_port_priv * port_priv,struct net_device * bridge_dev)55 static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv,
56 struct net_device *bridge_dev)
57 {
58 struct ethsw_port_priv *other_port_priv = NULL;
59 struct dpaa2_switch_fdb *fdb;
60 struct net_device *other_dev;
61 struct list_head *iter;
62
63 /* If we leave a bridge (bridge_dev is NULL), find an unused
64 * FDB and use that.
65 */
66 if (!bridge_dev) {
67 fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data);
68
69 /* If there is no unused FDB, we must be the last port that
70 * leaves the last bridge, all the others are standalone. We
71 * can just keep the FDB that we already have.
72 */
73
74 if (!fdb) {
75 port_priv->fdb->bridge_dev = NULL;
76 return 0;
77 }
78
79 port_priv->fdb = fdb;
80 port_priv->fdb->in_use = true;
81 port_priv->fdb->bridge_dev = NULL;
82 return 0;
83 }
84
85 /* The below call to netdev_for_each_lower_dev() demands the RTNL lock
86 * being held. Assert on it so that it's easier to catch new code
87 * paths that reach this point without the RTNL lock.
88 */
89 ASSERT_RTNL();
90
91 /* If part of a bridge, use the FDB of the first dpaa2 switch interface
92 * to be present in that bridge
93 */
94 netdev_for_each_lower_dev(bridge_dev, other_dev, iter) {
95 if (!dpaa2_switch_port_dev_check(other_dev))
96 continue;
97
98 if (other_dev == port_priv->netdev)
99 continue;
100
101 other_port_priv = netdev_priv(other_dev);
102 break;
103 }
104
105 /* The current port is about to change its FDB to the one used by the
106 * first port that joined the bridge.
107 */
108 if (other_port_priv) {
109 /* The previous FDB is about to become unused, since the
110 * interface is no longer standalone.
111 */
112 port_priv->fdb->in_use = false;
113 port_priv->fdb->bridge_dev = NULL;
114
115 /* Get a reference to the new FDB */
116 port_priv->fdb = other_port_priv->fdb;
117 }
118
119 /* Keep track of the new upper bridge device */
120 port_priv->fdb->bridge_dev = bridge_dev;
121
122 return 0;
123 }
124
dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core * ethsw,u16 fdb_id,enum dpsw_flood_type type,struct dpsw_egress_flood_cfg * cfg)125 static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id,
126 enum dpsw_flood_type type,
127 struct dpsw_egress_flood_cfg *cfg)
128 {
129 int i = 0, j;
130
131 memset(cfg, 0, sizeof(*cfg));
132
133 /* Add all the DPAA2 switch ports found in the same bridging domain to
134 * the egress flooding domain
135 */
136 for (j = 0; j < ethsw->sw_attr.num_ifs; j++) {
137 if (!ethsw->ports[j])
138 continue;
139 if (ethsw->ports[j]->fdb->fdb_id != fdb_id)
140 continue;
141
142 if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood)
143 cfg->if_id[i++] = ethsw->ports[j]->idx;
144 else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood)
145 cfg->if_id[i++] = ethsw->ports[j]->idx;
146 }
147
148 /* Add the CTRL interface to the egress flooding domain */
149 cfg->if_id[i++] = ethsw->sw_attr.num_ifs;
150
151 cfg->fdb_id = fdb_id;
152 cfg->flood_type = type;
153 cfg->num_ifs = i;
154 }
155
dpaa2_switch_fdb_set_egress_flood(struct ethsw_core * ethsw,u16 fdb_id)156 static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id)
157 {
158 struct dpsw_egress_flood_cfg flood_cfg;
159 int err;
160
161 /* Setup broadcast flooding domain */
162 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg);
163 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
164 &flood_cfg);
165 if (err) {
166 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
167 return err;
168 }
169
170 /* Setup unknown flooding domain */
171 dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg);
172 err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
173 &flood_cfg);
174 if (err) {
175 dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
176 return err;
177 }
178
179 return 0;
180 }
181
dpaa2_iova_to_virt(struct iommu_domain * domain,dma_addr_t iova_addr)182 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
183 dma_addr_t iova_addr)
184 {
185 phys_addr_t phys_addr;
186
187 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
188
189 return phys_to_virt(phys_addr);
190 }
191
dpaa2_switch_add_vlan(struct ethsw_port_priv * port_priv,u16 vid)192 static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid)
193 {
194 struct ethsw_core *ethsw = port_priv->ethsw_data;
195 struct dpsw_vlan_cfg vcfg = {0};
196 int err;
197
198 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
199 err = dpsw_vlan_add(ethsw->mc_io, 0,
200 ethsw->dpsw_handle, vid, &vcfg);
201 if (err) {
202 dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
203 return err;
204 }
205 ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
206
207 return 0;
208 }
209
dpaa2_switch_port_is_up(struct ethsw_port_priv * port_priv)210 static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv)
211 {
212 struct net_device *netdev = port_priv->netdev;
213 struct dpsw_link_state state;
214 int err;
215
216 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
217 port_priv->ethsw_data->dpsw_handle,
218 port_priv->idx, &state);
219 if (err) {
220 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
221 return true;
222 }
223
224 WARN_ONCE(state.up > 1, "Garbage read into link_state");
225
226 return state.up ? true : false;
227 }
228
dpaa2_switch_port_set_pvid(struct ethsw_port_priv * port_priv,u16 pvid)229 static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
230 {
231 struct ethsw_core *ethsw = port_priv->ethsw_data;
232 struct net_device *netdev = port_priv->netdev;
233 struct dpsw_tci_cfg tci_cfg = { 0 };
234 bool up;
235 int err, ret;
236
237 err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
238 port_priv->idx, &tci_cfg);
239 if (err) {
240 netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
241 return err;
242 }
243
244 tci_cfg.vlan_id = pvid;
245
246 /* Interface needs to be down to change PVID */
247 up = dpaa2_switch_port_is_up(port_priv);
248 if (up) {
249 err = dpsw_if_disable(ethsw->mc_io, 0,
250 ethsw->dpsw_handle,
251 port_priv->idx);
252 if (err) {
253 netdev_err(netdev, "dpsw_if_disable err %d\n", err);
254 return err;
255 }
256 }
257
258 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
259 port_priv->idx, &tci_cfg);
260 if (err) {
261 netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
262 goto set_tci_error;
263 }
264
265 /* Delete previous PVID info and mark the new one */
266 port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
267 port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
268 port_priv->pvid = pvid;
269
270 set_tci_error:
271 if (up) {
272 ret = dpsw_if_enable(ethsw->mc_io, 0,
273 ethsw->dpsw_handle,
274 port_priv->idx);
275 if (ret) {
276 netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
277 return ret;
278 }
279 }
280
281 return err;
282 }
283
dpaa2_switch_port_add_vlan(struct ethsw_port_priv * port_priv,u16 vid,u16 flags)284 static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
285 u16 vid, u16 flags)
286 {
287 struct ethsw_core *ethsw = port_priv->ethsw_data;
288 struct net_device *netdev = port_priv->netdev;
289 struct dpsw_vlan_if_cfg vcfg = {0};
290 int err;
291
292 if (port_priv->vlans[vid]) {
293 netdev_warn(netdev, "VLAN %d already configured\n", vid);
294 return -EEXIST;
295 }
296
297 /* If hit, this VLAN rule will lead the packet into the FDB table
298 * specified in the vlan configuration below
299 */
300 vcfg.num_ifs = 1;
301 vcfg.if_id[0] = port_priv->idx;
302 vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
303 vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID;
304 err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
305 if (err) {
306 netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
307 return err;
308 }
309
310 port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
311
312 if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
313 err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
314 ethsw->dpsw_handle,
315 vid, &vcfg);
316 if (err) {
317 netdev_err(netdev,
318 "dpsw_vlan_add_if_untagged err %d\n", err);
319 return err;
320 }
321 port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
322 }
323
324 if (flags & BRIDGE_VLAN_INFO_PVID) {
325 err = dpaa2_switch_port_set_pvid(port_priv, vid);
326 if (err)
327 return err;
328 }
329
330 return 0;
331 }
332
br_stp_state_to_dpsw(u8 state)333 static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state)
334 {
335 switch (state) {
336 case BR_STATE_DISABLED:
337 return DPSW_STP_STATE_DISABLED;
338 case BR_STATE_LISTENING:
339 return DPSW_STP_STATE_LISTENING;
340 case BR_STATE_LEARNING:
341 return DPSW_STP_STATE_LEARNING;
342 case BR_STATE_FORWARDING:
343 return DPSW_STP_STATE_FORWARDING;
344 case BR_STATE_BLOCKING:
345 return DPSW_STP_STATE_BLOCKING;
346 default:
347 return DPSW_STP_STATE_DISABLED;
348 }
349 }
350
dpaa2_switch_port_set_stp_state(struct ethsw_port_priv * port_priv,u8 state)351 static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
352 {
353 struct dpsw_stp_cfg stp_cfg = {0};
354 int err;
355 u16 vid;
356
357 if (!netif_running(port_priv->netdev) || state == port_priv->stp_state)
358 return 0; /* Nothing to do */
359
360 stp_cfg.state = br_stp_state_to_dpsw(state);
361 for (vid = 0; vid <= VLAN_VID_MASK; vid++) {
362 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
363 stp_cfg.vlan_id = vid;
364 err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
365 port_priv->ethsw_data->dpsw_handle,
366 port_priv->idx, &stp_cfg);
367 if (err) {
368 netdev_err(port_priv->netdev,
369 "dpsw_if_set_stp err %d\n", err);
370 return err;
371 }
372 }
373 }
374
375 port_priv->stp_state = state;
376
377 return 0;
378 }
379
dpaa2_switch_dellink(struct ethsw_core * ethsw,u16 vid)380 static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
381 {
382 struct ethsw_port_priv *ppriv_local = NULL;
383 int i, err;
384
385 if (!ethsw->vlans[vid])
386 return -ENOENT;
387
388 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
389 if (err) {
390 dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
391 return err;
392 }
393 ethsw->vlans[vid] = 0;
394
395 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
396 ppriv_local = ethsw->ports[i];
397 if (ppriv_local)
398 ppriv_local->vlans[vid] = 0;
399 }
400
401 return 0;
402 }
403
dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv * port_priv,const unsigned char * addr)404 static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
405 const unsigned char *addr)
406 {
407 struct dpsw_fdb_unicast_cfg entry = {0};
408 u16 fdb_id;
409 int err;
410
411 entry.if_egress = port_priv->idx;
412 entry.type = DPSW_FDB_ENTRY_STATIC;
413 ether_addr_copy(entry.mac_addr, addr);
414
415 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
416 err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
417 port_priv->ethsw_data->dpsw_handle,
418 fdb_id, &entry);
419 if (err)
420 netdev_err(port_priv->netdev,
421 "dpsw_fdb_add_unicast err %d\n", err);
422 return err;
423 }
424
dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv * port_priv,const unsigned char * addr)425 static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
426 const unsigned char *addr)
427 {
428 struct dpsw_fdb_unicast_cfg entry = {0};
429 u16 fdb_id;
430 int err;
431
432 entry.if_egress = port_priv->idx;
433 entry.type = DPSW_FDB_ENTRY_STATIC;
434 ether_addr_copy(entry.mac_addr, addr);
435
436 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
437 err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
438 port_priv->ethsw_data->dpsw_handle,
439 fdb_id, &entry);
440 /* Silently discard error for calling multiple times the del command */
441 if (err && err != -ENXIO)
442 netdev_err(port_priv->netdev,
443 "dpsw_fdb_remove_unicast err %d\n", err);
444 return err;
445 }
446
dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv * port_priv,const unsigned char * addr)447 static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
448 const unsigned char *addr)
449 {
450 struct dpsw_fdb_multicast_cfg entry = {0};
451 u16 fdb_id;
452 int err;
453
454 ether_addr_copy(entry.mac_addr, addr);
455 entry.type = DPSW_FDB_ENTRY_STATIC;
456 entry.num_ifs = 1;
457 entry.if_id[0] = port_priv->idx;
458
459 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
460 err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
461 port_priv->ethsw_data->dpsw_handle,
462 fdb_id, &entry);
463 /* Silently discard error for calling multiple times the add command */
464 if (err && err != -ENXIO)
465 netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
466 err);
467 return err;
468 }
469
dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv * port_priv,const unsigned char * addr)470 static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
471 const unsigned char *addr)
472 {
473 struct dpsw_fdb_multicast_cfg entry = {0};
474 u16 fdb_id;
475 int err;
476
477 ether_addr_copy(entry.mac_addr, addr);
478 entry.type = DPSW_FDB_ENTRY_STATIC;
479 entry.num_ifs = 1;
480 entry.if_id[0] = port_priv->idx;
481
482 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
483 err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
484 port_priv->ethsw_data->dpsw_handle,
485 fdb_id, &entry);
486 /* Silently discard error for calling multiple times the del command */
487 if (err && err != -ENAVAIL)
488 netdev_err(port_priv->netdev,
489 "dpsw_fdb_remove_multicast err %d\n", err);
490 return err;
491 }
492
dpaa2_switch_port_get_stats(struct net_device * netdev,struct rtnl_link_stats64 * stats)493 static void dpaa2_switch_port_get_stats(struct net_device *netdev,
494 struct rtnl_link_stats64 *stats)
495 {
496 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
497 u64 tmp;
498 int err;
499
500 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
501 port_priv->ethsw_data->dpsw_handle,
502 port_priv->idx,
503 DPSW_CNT_ING_FRAME, &stats->rx_packets);
504 if (err)
505 goto error;
506
507 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
508 port_priv->ethsw_data->dpsw_handle,
509 port_priv->idx,
510 DPSW_CNT_EGR_FRAME, &stats->tx_packets);
511 if (err)
512 goto error;
513
514 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
515 port_priv->ethsw_data->dpsw_handle,
516 port_priv->idx,
517 DPSW_CNT_ING_BYTE, &stats->rx_bytes);
518 if (err)
519 goto error;
520
521 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
522 port_priv->ethsw_data->dpsw_handle,
523 port_priv->idx,
524 DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
525 if (err)
526 goto error;
527
528 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
529 port_priv->ethsw_data->dpsw_handle,
530 port_priv->idx,
531 DPSW_CNT_ING_FRAME_DISCARD,
532 &stats->rx_dropped);
533 if (err)
534 goto error;
535
536 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
537 port_priv->ethsw_data->dpsw_handle,
538 port_priv->idx,
539 DPSW_CNT_ING_FLTR_FRAME,
540 &tmp);
541 if (err)
542 goto error;
543 stats->rx_dropped += tmp;
544
545 err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
546 port_priv->ethsw_data->dpsw_handle,
547 port_priv->idx,
548 DPSW_CNT_EGR_FRAME_DISCARD,
549 &stats->tx_dropped);
550 if (err)
551 goto error;
552
553 return;
554
555 error:
556 netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
557 }
558
dpaa2_switch_port_has_offload_stats(const struct net_device * netdev,int attr_id)559 static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev,
560 int attr_id)
561 {
562 return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
563 }
564
dpaa2_switch_port_get_offload_stats(int attr_id,const struct net_device * netdev,void * sp)565 static int dpaa2_switch_port_get_offload_stats(int attr_id,
566 const struct net_device *netdev,
567 void *sp)
568 {
569 switch (attr_id) {
570 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
571 dpaa2_switch_port_get_stats((struct net_device *)netdev, sp);
572 return 0;
573 }
574
575 return -EINVAL;
576 }
577
dpaa2_switch_port_change_mtu(struct net_device * netdev,int mtu)578 static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
579 {
580 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
581 int err;
582
583 err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
584 0,
585 port_priv->ethsw_data->dpsw_handle,
586 port_priv->idx,
587 (u16)ETHSW_L2_MAX_FRM(mtu));
588 if (err) {
589 netdev_err(netdev,
590 "dpsw_if_set_max_frame_length() err %d\n", err);
591 return err;
592 }
593
594 netdev->mtu = mtu;
595 return 0;
596 }
597
dpaa2_switch_port_link_state_update(struct net_device * netdev)598 static int dpaa2_switch_port_link_state_update(struct net_device *netdev)
599 {
600 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
601 struct dpsw_link_state state;
602 int err;
603
604 /* When we manage the MAC/PHY using phylink there is no need
605 * to manually update the netif_carrier.
606 */
607 if (dpaa2_switch_port_is_type_phy(port_priv))
608 return 0;
609
610 /* Interrupts are received even though no one issued an 'ifconfig up'
611 * on the switch interface. Ignore these link state update interrupts
612 */
613 if (!netif_running(netdev))
614 return 0;
615
616 err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
617 port_priv->ethsw_data->dpsw_handle,
618 port_priv->idx, &state);
619 if (err) {
620 netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
621 return err;
622 }
623
624 WARN_ONCE(state.up > 1, "Garbage read into link_state");
625
626 if (state.up != port_priv->link_state) {
627 if (state.up) {
628 netif_carrier_on(netdev);
629 netif_tx_start_all_queues(netdev);
630 } else {
631 netif_carrier_off(netdev);
632 netif_tx_stop_all_queues(netdev);
633 }
634 port_priv->link_state = state.up;
635 }
636
637 return 0;
638 }
639
640 /* Manage all NAPI instances for the control interface.
641 *
642 * We only have one RX queue and one Tx Conf queue for all
643 * switch ports. Therefore, we only need to enable the NAPI instance once, the
644 * first time one of the switch ports runs .dev_open().
645 */
646
dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core * ethsw)647 static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw)
648 {
649 int i;
650
651 /* Access to the ethsw->napi_users relies on the RTNL lock */
652 ASSERT_RTNL();
653
654 /* a new interface is using the NAPI instance */
655 ethsw->napi_users++;
656
657 /* if there is already a user of the instance, return */
658 if (ethsw->napi_users > 1)
659 return;
660
661 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
662 napi_enable(ðsw->fq[i].napi);
663 }
664
dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core * ethsw)665 static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw)
666 {
667 int i;
668
669 /* Access to the ethsw->napi_users relies on the RTNL lock */
670 ASSERT_RTNL();
671
672 /* If we are not the last interface using the NAPI, return */
673 ethsw->napi_users--;
674 if (ethsw->napi_users)
675 return;
676
677 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
678 napi_disable(ðsw->fq[i].napi);
679 }
680
dpaa2_switch_port_open(struct net_device * netdev)681 static int dpaa2_switch_port_open(struct net_device *netdev)
682 {
683 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
684 struct ethsw_core *ethsw = port_priv->ethsw_data;
685 int err;
686
687 if (!dpaa2_switch_port_is_type_phy(port_priv)) {
688 /* Explicitly set carrier off, otherwise
689 * netif_carrier_ok() will return true and cause 'ip link show'
690 * to report the LOWER_UP flag, even though the link
691 * notification wasn't even received.
692 */
693 netif_carrier_off(netdev);
694 }
695
696 err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
697 port_priv->ethsw_data->dpsw_handle,
698 port_priv->idx);
699 if (err) {
700 netdev_err(netdev, "dpsw_if_enable err %d\n", err);
701 return err;
702 }
703
704 dpaa2_switch_enable_ctrl_if_napi(ethsw);
705
706 if (dpaa2_switch_port_is_type_phy(port_priv)) {
707 dpaa2_mac_start(port_priv->mac);
708 phylink_start(port_priv->mac->phylink);
709 }
710
711 return 0;
712 }
713
dpaa2_switch_port_stop(struct net_device * netdev)714 static int dpaa2_switch_port_stop(struct net_device *netdev)
715 {
716 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
717 struct ethsw_core *ethsw = port_priv->ethsw_data;
718 int err;
719
720 if (dpaa2_switch_port_is_type_phy(port_priv)) {
721 phylink_stop(port_priv->mac->phylink);
722 dpaa2_mac_stop(port_priv->mac);
723 } else {
724 netif_tx_stop_all_queues(netdev);
725 netif_carrier_off(netdev);
726 }
727
728 err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
729 port_priv->ethsw_data->dpsw_handle,
730 port_priv->idx);
731 if (err) {
732 netdev_err(netdev, "dpsw_if_disable err %d\n", err);
733 return err;
734 }
735
736 dpaa2_switch_disable_ctrl_if_napi(ethsw);
737
738 return 0;
739 }
740
dpaa2_switch_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)741 static int dpaa2_switch_port_parent_id(struct net_device *dev,
742 struct netdev_phys_item_id *ppid)
743 {
744 struct ethsw_port_priv *port_priv = netdev_priv(dev);
745
746 ppid->id_len = 1;
747 ppid->id[0] = port_priv->ethsw_data->dev_id;
748
749 return 0;
750 }
751
dpaa2_switch_port_get_phys_name(struct net_device * netdev,char * name,size_t len)752 static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name,
753 size_t len)
754 {
755 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
756 int err;
757
758 err = snprintf(name, len, "p%d", port_priv->idx);
759 if (err >= len)
760 return -EINVAL;
761
762 return 0;
763 }
764
765 struct ethsw_dump_ctx {
766 struct net_device *dev;
767 struct sk_buff *skb;
768 struct netlink_callback *cb;
769 int idx;
770 };
771
dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry * entry,struct ethsw_dump_ctx * dump)772 static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
773 struct ethsw_dump_ctx *dump)
774 {
775 int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
776 u32 portid = NETLINK_CB(dump->cb->skb).portid;
777 u32 seq = dump->cb->nlh->nlmsg_seq;
778 struct nlmsghdr *nlh;
779 struct ndmsg *ndm;
780
781 if (dump->idx < dump->cb->args[2])
782 goto skip;
783
784 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
785 sizeof(*ndm), NLM_F_MULTI);
786 if (!nlh)
787 return -EMSGSIZE;
788
789 ndm = nlmsg_data(nlh);
790 ndm->ndm_family = AF_BRIDGE;
791 ndm->ndm_pad1 = 0;
792 ndm->ndm_pad2 = 0;
793 ndm->ndm_flags = NTF_SELF;
794 ndm->ndm_type = 0;
795 ndm->ndm_ifindex = dump->dev->ifindex;
796 ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP;
797
798 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
799 goto nla_put_failure;
800
801 nlmsg_end(dump->skb, nlh);
802
803 skip:
804 dump->idx++;
805 return 0;
806
807 nla_put_failure:
808 nlmsg_cancel(dump->skb, nlh);
809 return -EMSGSIZE;
810 }
811
dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry * entry,struct ethsw_port_priv * port_priv)812 static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry,
813 struct ethsw_port_priv *port_priv)
814 {
815 int idx = port_priv->idx;
816 int valid;
817
818 if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
819 valid = entry->if_info == port_priv->idx;
820 else
821 valid = entry->if_mask[idx / 8] & BIT(idx % 8);
822
823 return valid;
824 }
825
dpaa2_switch_fdb_iterate(struct ethsw_port_priv * port_priv,dpaa2_switch_fdb_cb_t cb,void * data)826 static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv,
827 dpaa2_switch_fdb_cb_t cb, void *data)
828 {
829 struct net_device *net_dev = port_priv->netdev;
830 struct ethsw_core *ethsw = port_priv->ethsw_data;
831 struct device *dev = net_dev->dev.parent;
832 struct fdb_dump_entry *fdb_entries;
833 struct fdb_dump_entry fdb_entry;
834 dma_addr_t fdb_dump_iova;
835 u16 num_fdb_entries;
836 u32 fdb_dump_size;
837 int err = 0, i;
838 u8 *dma_mem;
839 u16 fdb_id;
840
841 fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry);
842 dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL);
843 if (!dma_mem)
844 return -ENOMEM;
845
846 fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size,
847 DMA_FROM_DEVICE);
848 if (dma_mapping_error(dev, fdb_dump_iova)) {
849 netdev_err(net_dev, "dma_map_single() failed\n");
850 err = -ENOMEM;
851 goto err_map;
852 }
853
854 fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
855 err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id,
856 fdb_dump_iova, fdb_dump_size, &num_fdb_entries);
857 if (err) {
858 netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err);
859 goto err_dump;
860 }
861
862 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE);
863
864 fdb_entries = (struct fdb_dump_entry *)dma_mem;
865 for (i = 0; i < num_fdb_entries; i++) {
866 fdb_entry = fdb_entries[i];
867
868 err = cb(port_priv, &fdb_entry, data);
869 if (err)
870 goto end;
871 }
872
873 end:
874 kfree(dma_mem);
875
876 return 0;
877
878 err_dump:
879 dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE);
880 err_map:
881 kfree(dma_mem);
882 return err;
883 }
884
dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv * port_priv,struct fdb_dump_entry * fdb_entry,void * data)885 static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv,
886 struct fdb_dump_entry *fdb_entry,
887 void *data)
888 {
889 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
890 return 0;
891
892 return dpaa2_switch_fdb_dump_nl(fdb_entry, data);
893 }
894
dpaa2_switch_port_fdb_dump(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * net_dev,struct net_device * filter_dev,int * idx)895 static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
896 struct net_device *net_dev,
897 struct net_device *filter_dev, int *idx)
898 {
899 struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
900 struct ethsw_dump_ctx dump = {
901 .dev = net_dev,
902 .skb = skb,
903 .cb = cb,
904 .idx = *idx,
905 };
906 int err;
907
908 err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump);
909 *idx = dump.idx;
910
911 return err;
912 }
913
dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv * port_priv,struct fdb_dump_entry * fdb_entry,void * data __always_unused)914 static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv,
915 struct fdb_dump_entry *fdb_entry,
916 void *data __always_unused)
917 {
918 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
919 return 0;
920
921 if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC))
922 return 0;
923
924 if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
925 dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr);
926 else
927 dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr);
928
929 return 0;
930 }
931
dpaa2_switch_port_fast_age(struct ethsw_port_priv * port_priv)932 static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv)
933 {
934 dpaa2_switch_fdb_iterate(port_priv,
935 dpaa2_switch_fdb_entry_fast_age, NULL);
936 }
937
dpaa2_switch_port_vlan_add(struct net_device * netdev,__be16 proto,u16 vid)938 static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto,
939 u16 vid)
940 {
941 struct switchdev_obj_port_vlan vlan = {
942 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
943 .vid = vid,
944 .obj.orig_dev = netdev,
945 /* This API only allows programming tagged, non-PVID VIDs */
946 .flags = 0,
947 };
948
949 return dpaa2_switch_port_vlans_add(netdev, &vlan);
950 }
951
dpaa2_switch_port_vlan_kill(struct net_device * netdev,__be16 proto,u16 vid)952 static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto,
953 u16 vid)
954 {
955 struct switchdev_obj_port_vlan vlan = {
956 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
957 .vid = vid,
958 .obj.orig_dev = netdev,
959 /* This API only allows programming tagged, non-PVID VIDs */
960 .flags = 0,
961 };
962
963 return dpaa2_switch_port_vlans_del(netdev, &vlan);
964 }
965
dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv * port_priv)966 static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
967 {
968 struct ethsw_core *ethsw = port_priv->ethsw_data;
969 struct net_device *net_dev = port_priv->netdev;
970 struct device *dev = net_dev->dev.parent;
971 u8 mac_addr[ETH_ALEN];
972 int err;
973
974 if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR))
975 return 0;
976
977 /* Get firmware address, if any */
978 err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle,
979 port_priv->idx, mac_addr);
980 if (err) {
981 dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n");
982 return err;
983 }
984
985 /* First check if firmware has any address configured by bootloader */
986 if (!is_zero_ether_addr(mac_addr)) {
987 eth_hw_addr_set(net_dev, mac_addr);
988 } else {
989 /* No MAC address configured, fill in net_dev->dev_addr
990 * with a random one
991 */
992 eth_hw_addr_random(net_dev);
993 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
994
995 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
996 * practical purposes, this will be our "permanent" mac address,
997 * at least until the next reboot. This move will also permit
998 * register_netdevice() to properly fill up net_dev->perm_addr.
999 */
1000 net_dev->addr_assign_type = NET_ADDR_PERM;
1001 }
1002
1003 return 0;
1004 }
1005
dpaa2_switch_free_fd(const struct ethsw_core * ethsw,const struct dpaa2_fd * fd)1006 static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw,
1007 const struct dpaa2_fd *fd)
1008 {
1009 struct device *dev = ethsw->dev;
1010 unsigned char *buffer_start;
1011 struct sk_buff **skbh, *skb;
1012 dma_addr_t fd_addr;
1013
1014 fd_addr = dpaa2_fd_get_addr(fd);
1015 skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr);
1016
1017 skb = *skbh;
1018 buffer_start = (unsigned char *)skbh;
1019
1020 dma_unmap_single(dev, fd_addr,
1021 skb_tail_pointer(skb) - buffer_start,
1022 DMA_TO_DEVICE);
1023
1024 /* Move on with skb release */
1025 dev_kfree_skb(skb);
1026 }
1027
dpaa2_switch_build_single_fd(struct ethsw_core * ethsw,struct sk_buff * skb,struct dpaa2_fd * fd)1028 static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw,
1029 struct sk_buff *skb,
1030 struct dpaa2_fd *fd)
1031 {
1032 struct device *dev = ethsw->dev;
1033 struct sk_buff **skbh;
1034 dma_addr_t addr;
1035 u8 *buff_start;
1036 void *hwa;
1037
1038 buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET -
1039 DPAA2_SWITCH_TX_BUF_ALIGN,
1040 DPAA2_SWITCH_TX_BUF_ALIGN);
1041
1042 /* Clear FAS to have consistent values for TX confirmation. It is
1043 * located in the first 8 bytes of the buffer's hardware annotation
1044 * area
1045 */
1046 hwa = buff_start + DPAA2_SWITCH_SWA_SIZE;
1047 memset(hwa, 0, 8);
1048
1049 /* Store a backpointer to the skb at the beginning of the buffer
1050 * (in the private data area) such that we can release it
1051 * on Tx confirm
1052 */
1053 skbh = (struct sk_buff **)buff_start;
1054 *skbh = skb;
1055
1056 addr = dma_map_single(dev, buff_start,
1057 skb_tail_pointer(skb) - buff_start,
1058 DMA_TO_DEVICE);
1059 if (unlikely(dma_mapping_error(dev, addr)))
1060 return -ENOMEM;
1061
1062 /* Setup the FD fields */
1063 memset(fd, 0, sizeof(*fd));
1064
1065 dpaa2_fd_set_addr(fd, addr);
1066 dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start));
1067 dpaa2_fd_set_len(fd, skb->len);
1068 dpaa2_fd_set_format(fd, dpaa2_fd_single);
1069
1070 return 0;
1071 }
1072
dpaa2_switch_port_tx(struct sk_buff * skb,struct net_device * net_dev)1073 static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb,
1074 struct net_device *net_dev)
1075 {
1076 struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
1077 struct ethsw_core *ethsw = port_priv->ethsw_data;
1078 int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES;
1079 struct dpaa2_fd fd;
1080 int err;
1081
1082 if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) {
1083 struct sk_buff *ns;
1084
1085 ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM);
1086 if (unlikely(!ns)) {
1087 net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name);
1088 goto err_free_skb;
1089 }
1090 dev_consume_skb_any(skb);
1091 skb = ns;
1092 }
1093
1094 /* We'll be holding a back-reference to the skb until Tx confirmation */
1095 skb = skb_unshare(skb, GFP_ATOMIC);
1096 if (unlikely(!skb)) {
1097 /* skb_unshare() has already freed the skb */
1098 net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name);
1099 goto err_exit;
1100 }
1101
1102 /* At this stage, we do not support non-linear skbs so just try to
1103 * linearize the skb and if that's not working, just drop the packet.
1104 */
1105 err = skb_linearize(skb);
1106 if (err) {
1107 net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err);
1108 goto err_free_skb;
1109 }
1110
1111 err = dpaa2_switch_build_single_fd(ethsw, skb, &fd);
1112 if (unlikely(err)) {
1113 net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err);
1114 goto err_free_skb;
1115 }
1116
1117 do {
1118 err = dpaa2_io_service_enqueue_qd(NULL,
1119 port_priv->tx_qdid,
1120 8, 0, &fd);
1121 retries--;
1122 } while (err == -EBUSY && retries);
1123
1124 if (unlikely(err < 0)) {
1125 dpaa2_switch_free_fd(ethsw, &fd);
1126 goto err_exit;
1127 }
1128
1129 return NETDEV_TX_OK;
1130
1131 err_free_skb:
1132 dev_kfree_skb(skb);
1133 err_exit:
1134 return NETDEV_TX_OK;
1135 }
1136
1137 static int
dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block * filter_block,struct flow_cls_offload * f)1138 dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block *filter_block,
1139 struct flow_cls_offload *f)
1140 {
1141 switch (f->command) {
1142 case FLOW_CLS_REPLACE:
1143 return dpaa2_switch_cls_flower_replace(filter_block, f);
1144 case FLOW_CLS_DESTROY:
1145 return dpaa2_switch_cls_flower_destroy(filter_block, f);
1146 default:
1147 return -EOPNOTSUPP;
1148 }
1149 }
1150
1151 static int
dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * f)1152 dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block *block,
1153 struct tc_cls_matchall_offload *f)
1154 {
1155 switch (f->command) {
1156 case TC_CLSMATCHALL_REPLACE:
1157 return dpaa2_switch_cls_matchall_replace(block, f);
1158 case TC_CLSMATCHALL_DESTROY:
1159 return dpaa2_switch_cls_matchall_destroy(block, f);
1160 default:
1161 return -EOPNOTSUPP;
1162 }
1163 }
1164
dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,void * type_data,void * cb_priv)1165 static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,
1166 void *type_data,
1167 void *cb_priv)
1168 {
1169 switch (type) {
1170 case TC_SETUP_CLSFLOWER:
1171 return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data);
1172 case TC_SETUP_CLSMATCHALL:
1173 return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data);
1174 default:
1175 return -EOPNOTSUPP;
1176 }
1177 }
1178
1179 static LIST_HEAD(dpaa2_switch_block_cb_list);
1180
1181 static int
dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv * port_priv,struct dpaa2_switch_filter_block * block)1182 dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
1183 struct dpaa2_switch_filter_block *block)
1184 {
1185 struct ethsw_core *ethsw = port_priv->ethsw_data;
1186 struct net_device *netdev = port_priv->netdev;
1187 struct dpsw_acl_if_cfg acl_if_cfg;
1188 int err;
1189
1190 if (port_priv->filter_block)
1191 return -EINVAL;
1192
1193 acl_if_cfg.if_id[0] = port_priv->idx;
1194 acl_if_cfg.num_ifs = 1;
1195 err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1196 block->acl_id, &acl_if_cfg);
1197 if (err) {
1198 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
1199 return err;
1200 }
1201
1202 block->ports |= BIT(port_priv->idx);
1203 port_priv->filter_block = block;
1204
1205 return 0;
1206 }
1207
1208 static int
dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv * port_priv,struct dpaa2_switch_filter_block * block)1209 dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv,
1210 struct dpaa2_switch_filter_block *block)
1211 {
1212 struct ethsw_core *ethsw = port_priv->ethsw_data;
1213 struct net_device *netdev = port_priv->netdev;
1214 struct dpsw_acl_if_cfg acl_if_cfg;
1215 int err;
1216
1217 if (port_priv->filter_block != block)
1218 return -EINVAL;
1219
1220 acl_if_cfg.if_id[0] = port_priv->idx;
1221 acl_if_cfg.num_ifs = 1;
1222 err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1223 block->acl_id, &acl_if_cfg);
1224 if (err) {
1225 netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
1226 return err;
1227 }
1228
1229 block->ports &= ~BIT(port_priv->idx);
1230 port_priv->filter_block = NULL;
1231 return 0;
1232 }
1233
dpaa2_switch_port_block_bind(struct ethsw_port_priv * port_priv,struct dpaa2_switch_filter_block * block)1234 static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv,
1235 struct dpaa2_switch_filter_block *block)
1236 {
1237 struct dpaa2_switch_filter_block *old_block = port_priv->filter_block;
1238 int err;
1239
1240 /* Offload all the mirror entries found in the block on this new port
1241 * joining it.
1242 */
1243 err = dpaa2_switch_block_offload_mirror(block, port_priv);
1244 if (err)
1245 return err;
1246
1247 /* If the port is already bound to this ACL table then do nothing. This
1248 * can happen when this port is the first one to join a tc block
1249 */
1250 if (port_priv->filter_block == block)
1251 return 0;
1252
1253 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_block);
1254 if (err)
1255 return err;
1256
1257 /* Mark the previous ACL table as being unused if this was the last
1258 * port that was using it.
1259 */
1260 if (old_block->ports == 0)
1261 old_block->in_use = false;
1262
1263 return dpaa2_switch_port_acl_tbl_bind(port_priv, block);
1264 }
1265
1266 static int
dpaa2_switch_port_block_unbind(struct ethsw_port_priv * port_priv,struct dpaa2_switch_filter_block * block)1267 dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
1268 struct dpaa2_switch_filter_block *block)
1269 {
1270 struct ethsw_core *ethsw = port_priv->ethsw_data;
1271 struct dpaa2_switch_filter_block *new_block;
1272 int err;
1273
1274 /* Unoffload all the mirror entries found in the block from the
1275 * port leaving it.
1276 */
1277 err = dpaa2_switch_block_unoffload_mirror(block, port_priv);
1278 if (err)
1279 return err;
1280
1281 /* We are the last port that leaves a block (an ACL table).
1282 * We'll continue to use this table.
1283 */
1284 if (block->ports == BIT(port_priv->idx))
1285 return 0;
1286
1287 err = dpaa2_switch_port_acl_tbl_unbind(port_priv, block);
1288 if (err)
1289 return err;
1290
1291 if (block->ports == 0)
1292 block->in_use = false;
1293
1294 new_block = dpaa2_switch_filter_block_get_unused(ethsw);
1295 new_block->in_use = true;
1296 return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block);
1297 }
1298
dpaa2_switch_setup_tc_block_bind(struct net_device * netdev,struct flow_block_offload * f)1299 static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
1300 struct flow_block_offload *f)
1301 {
1302 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1303 struct ethsw_core *ethsw = port_priv->ethsw_data;
1304 struct dpaa2_switch_filter_block *filter_block;
1305 struct flow_block_cb *block_cb;
1306 bool register_block = false;
1307 int err;
1308
1309 block_cb = flow_block_cb_lookup(f->block,
1310 dpaa2_switch_port_setup_tc_block_cb_ig,
1311 ethsw);
1312
1313 if (!block_cb) {
1314 /* If the filter block is not already known, then this port
1315 * must be the first to join it. In this case, we can just
1316 * continue to use our private table
1317 */
1318 filter_block = port_priv->filter_block;
1319
1320 block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig,
1321 ethsw, filter_block, NULL);
1322 if (IS_ERR(block_cb))
1323 return PTR_ERR(block_cb);
1324
1325 register_block = true;
1326 } else {
1327 filter_block = flow_block_cb_priv(block_cb);
1328 }
1329
1330 flow_block_cb_incref(block_cb);
1331 err = dpaa2_switch_port_block_bind(port_priv, filter_block);
1332 if (err)
1333 goto err_block_bind;
1334
1335 if (register_block) {
1336 flow_block_cb_add(block_cb, f);
1337 list_add_tail(&block_cb->driver_list,
1338 &dpaa2_switch_block_cb_list);
1339 }
1340
1341 return 0;
1342
1343 err_block_bind:
1344 if (!flow_block_cb_decref(block_cb))
1345 flow_block_cb_free(block_cb);
1346 return err;
1347 }
1348
dpaa2_switch_setup_tc_block_unbind(struct net_device * netdev,struct flow_block_offload * f)1349 static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
1350 struct flow_block_offload *f)
1351 {
1352 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1353 struct ethsw_core *ethsw = port_priv->ethsw_data;
1354 struct dpaa2_switch_filter_block *filter_block;
1355 struct flow_block_cb *block_cb;
1356 int err;
1357
1358 block_cb = flow_block_cb_lookup(f->block,
1359 dpaa2_switch_port_setup_tc_block_cb_ig,
1360 ethsw);
1361 if (!block_cb)
1362 return;
1363
1364 filter_block = flow_block_cb_priv(block_cb);
1365 err = dpaa2_switch_port_block_unbind(port_priv, filter_block);
1366 if (!err && !flow_block_cb_decref(block_cb)) {
1367 flow_block_cb_remove(block_cb, f);
1368 list_del(&block_cb->driver_list);
1369 }
1370 }
1371
dpaa2_switch_setup_tc_block(struct net_device * netdev,struct flow_block_offload * f)1372 static int dpaa2_switch_setup_tc_block(struct net_device *netdev,
1373 struct flow_block_offload *f)
1374 {
1375 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1376 return -EOPNOTSUPP;
1377
1378 f->driver_block_list = &dpaa2_switch_block_cb_list;
1379
1380 switch (f->command) {
1381 case FLOW_BLOCK_BIND:
1382 return dpaa2_switch_setup_tc_block_bind(netdev, f);
1383 case FLOW_BLOCK_UNBIND:
1384 dpaa2_switch_setup_tc_block_unbind(netdev, f);
1385 return 0;
1386 default:
1387 return -EOPNOTSUPP;
1388 }
1389 }
1390
dpaa2_switch_port_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)1391 static int dpaa2_switch_port_setup_tc(struct net_device *netdev,
1392 enum tc_setup_type type,
1393 void *type_data)
1394 {
1395 switch (type) {
1396 case TC_SETUP_BLOCK: {
1397 return dpaa2_switch_setup_tc_block(netdev, type_data);
1398 }
1399 default:
1400 return -EOPNOTSUPP;
1401 }
1402
1403 return 0;
1404 }
1405
1406 static const struct net_device_ops dpaa2_switch_port_ops = {
1407 .ndo_open = dpaa2_switch_port_open,
1408 .ndo_stop = dpaa2_switch_port_stop,
1409
1410 .ndo_set_mac_address = eth_mac_addr,
1411 .ndo_get_stats64 = dpaa2_switch_port_get_stats,
1412 .ndo_change_mtu = dpaa2_switch_port_change_mtu,
1413 .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats,
1414 .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats,
1415 .ndo_fdb_dump = dpaa2_switch_port_fdb_dump,
1416 .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add,
1417 .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill,
1418
1419 .ndo_start_xmit = dpaa2_switch_port_tx,
1420 .ndo_get_port_parent_id = dpaa2_switch_port_parent_id,
1421 .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name,
1422 .ndo_setup_tc = dpaa2_switch_port_setup_tc,
1423 };
1424
dpaa2_switch_port_dev_check(const struct net_device * netdev)1425 bool dpaa2_switch_port_dev_check(const struct net_device *netdev)
1426 {
1427 return netdev->netdev_ops == &dpaa2_switch_port_ops;
1428 }
1429
dpaa2_switch_port_connect_mac(struct ethsw_port_priv * port_priv)1430 static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
1431 {
1432 struct fsl_mc_device *dpsw_port_dev, *dpmac_dev;
1433 struct dpaa2_mac *mac;
1434 int err;
1435
1436 dpsw_port_dev = to_fsl_mc_device(port_priv->netdev->dev.parent);
1437 dpmac_dev = fsl_mc_get_endpoint(dpsw_port_dev, port_priv->idx);
1438
1439 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
1440 return PTR_ERR(dpmac_dev);
1441
1442 if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
1443 return 0;
1444
1445 mac = kzalloc(sizeof(*mac), GFP_KERNEL);
1446 if (!mac)
1447 return -ENOMEM;
1448
1449 mac->mc_dev = dpmac_dev;
1450 mac->mc_io = port_priv->ethsw_data->mc_io;
1451 mac->net_dev = port_priv->netdev;
1452
1453 err = dpaa2_mac_open(mac);
1454 if (err)
1455 goto err_free_mac;
1456 port_priv->mac = mac;
1457
1458 if (dpaa2_switch_port_is_type_phy(port_priv)) {
1459 err = dpaa2_mac_connect(mac);
1460 if (err) {
1461 netdev_err(port_priv->netdev,
1462 "Error connecting to the MAC endpoint %pe\n",
1463 ERR_PTR(err));
1464 goto err_close_mac;
1465 }
1466 }
1467
1468 return 0;
1469
1470 err_close_mac:
1471 dpaa2_mac_close(mac);
1472 port_priv->mac = NULL;
1473 err_free_mac:
1474 kfree(mac);
1475 return err;
1476 }
1477
dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv * port_priv)1478 static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
1479 {
1480 if (dpaa2_switch_port_is_type_phy(port_priv))
1481 dpaa2_mac_disconnect(port_priv->mac);
1482
1483 if (!dpaa2_switch_port_has_mac(port_priv))
1484 return;
1485
1486 dpaa2_mac_close(port_priv->mac);
1487 kfree(port_priv->mac);
1488 port_priv->mac = NULL;
1489 }
1490
dpaa2_switch_irq0_handler_thread(int irq_num,void * arg)1491 static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
1492 {
1493 struct device *dev = (struct device *)arg;
1494 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1495 struct ethsw_port_priv *port_priv;
1496 u32 status = ~0;
1497 int err, if_id;
1498
1499 err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
1500 DPSW_IRQ_INDEX_IF, &status);
1501 if (err) {
1502 dev_err(dev, "Can't get irq status (err %d)\n", err);
1503 goto out;
1504 }
1505
1506 if_id = (status & 0xFFFF0000) >> 16;
1507 port_priv = ethsw->ports[if_id];
1508
1509 if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
1510 dpaa2_switch_port_link_state_update(port_priv->netdev);
1511 dpaa2_switch_port_set_mac_addr(port_priv);
1512 }
1513
1514 if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
1515 rtnl_lock();
1516 if (dpaa2_switch_port_has_mac(port_priv))
1517 dpaa2_switch_port_disconnect_mac(port_priv);
1518 else
1519 dpaa2_switch_port_connect_mac(port_priv);
1520 rtnl_unlock();
1521 }
1522
1523 out:
1524 err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
1525 DPSW_IRQ_INDEX_IF, status);
1526 if (err)
1527 dev_err(dev, "Can't clear irq status (err %d)\n", err);
1528
1529 return IRQ_HANDLED;
1530 }
1531
dpaa2_switch_setup_irqs(struct fsl_mc_device * sw_dev)1532 static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
1533 {
1534 struct device *dev = &sw_dev->dev;
1535 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1536 u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
1537 struct fsl_mc_device_irq *irq;
1538 int err;
1539
1540 err = fsl_mc_allocate_irqs(sw_dev);
1541 if (err) {
1542 dev_err(dev, "MC irqs allocation failed\n");
1543 return err;
1544 }
1545
1546 if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
1547 err = -EINVAL;
1548 goto free_irq;
1549 }
1550
1551 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1552 DPSW_IRQ_INDEX_IF, 0);
1553 if (err) {
1554 dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
1555 goto free_irq;
1556 }
1557
1558 irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
1559
1560 err = devm_request_threaded_irq(dev, irq->virq, NULL,
1561 dpaa2_switch_irq0_handler_thread,
1562 IRQF_NO_SUSPEND | IRQF_ONESHOT,
1563 dev_name(dev), dev);
1564 if (err) {
1565 dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
1566 goto free_irq;
1567 }
1568
1569 err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
1570 DPSW_IRQ_INDEX_IF, mask);
1571 if (err) {
1572 dev_err(dev, "dpsw_set_irq_mask(): %d\n", err);
1573 goto free_devm_irq;
1574 }
1575
1576 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1577 DPSW_IRQ_INDEX_IF, 1);
1578 if (err) {
1579 dev_err(dev, "dpsw_set_irq_enable(): %d\n", err);
1580 goto free_devm_irq;
1581 }
1582
1583 return 0;
1584
1585 free_devm_irq:
1586 devm_free_irq(dev, irq->virq, dev);
1587 free_irq:
1588 fsl_mc_free_irqs(sw_dev);
1589 return err;
1590 }
1591
dpaa2_switch_teardown_irqs(struct fsl_mc_device * sw_dev)1592 static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
1593 {
1594 struct device *dev = &sw_dev->dev;
1595 struct ethsw_core *ethsw = dev_get_drvdata(dev);
1596 int err;
1597
1598 err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1599 DPSW_IRQ_INDEX_IF, 0);
1600 if (err)
1601 dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
1602
1603 fsl_mc_free_irqs(sw_dev);
1604 }
1605
dpaa2_switch_port_set_learning(struct ethsw_port_priv * port_priv,bool enable)1606 static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable)
1607 {
1608 struct ethsw_core *ethsw = port_priv->ethsw_data;
1609 enum dpsw_learning_mode learn_mode;
1610 int err;
1611
1612 if (enable)
1613 learn_mode = DPSW_LEARNING_MODE_HW;
1614 else
1615 learn_mode = DPSW_LEARNING_MODE_DIS;
1616
1617 err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle,
1618 port_priv->idx, learn_mode);
1619 if (err)
1620 netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err);
1621
1622 if (!enable)
1623 dpaa2_switch_port_fast_age(port_priv);
1624
1625 return err;
1626 }
1627
dpaa2_switch_port_attr_stp_state_set(struct net_device * netdev,u8 state)1628 static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
1629 u8 state)
1630 {
1631 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1632 int err;
1633
1634 err = dpaa2_switch_port_set_stp_state(port_priv, state);
1635 if (err)
1636 return err;
1637
1638 switch (state) {
1639 case BR_STATE_DISABLED:
1640 case BR_STATE_BLOCKING:
1641 case BR_STATE_LISTENING:
1642 err = dpaa2_switch_port_set_learning(port_priv, false);
1643 break;
1644 case BR_STATE_LEARNING:
1645 case BR_STATE_FORWARDING:
1646 err = dpaa2_switch_port_set_learning(port_priv,
1647 port_priv->learn_ena);
1648 break;
1649 }
1650
1651 return err;
1652 }
1653
dpaa2_switch_port_flood(struct ethsw_port_priv * port_priv,struct switchdev_brport_flags flags)1654 static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv,
1655 struct switchdev_brport_flags flags)
1656 {
1657 struct ethsw_core *ethsw = port_priv->ethsw_data;
1658
1659 if (flags.mask & BR_BCAST_FLOOD)
1660 port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD);
1661
1662 if (flags.mask & BR_FLOOD)
1663 port_priv->ucast_flood = !!(flags.val & BR_FLOOD);
1664
1665 return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
1666 }
1667
dpaa2_switch_port_pre_bridge_flags(struct net_device * netdev,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)1668 static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev,
1669 struct switchdev_brport_flags flags,
1670 struct netlink_ext_ack *extack)
1671 {
1672 if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD |
1673 BR_MCAST_FLOOD))
1674 return -EINVAL;
1675
1676 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) {
1677 bool multicast = !!(flags.val & BR_MCAST_FLOOD);
1678 bool unicast = !!(flags.val & BR_FLOOD);
1679
1680 if (unicast != multicast) {
1681 NL_SET_ERR_MSG_MOD(extack,
1682 "Cannot configure multicast flooding independently of unicast");
1683 return -EINVAL;
1684 }
1685 }
1686
1687 return 0;
1688 }
1689
dpaa2_switch_port_bridge_flags(struct net_device * netdev,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)1690 static int dpaa2_switch_port_bridge_flags(struct net_device *netdev,
1691 struct switchdev_brport_flags flags,
1692 struct netlink_ext_ack *extack)
1693 {
1694 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1695 int err;
1696
1697 if (flags.mask & BR_LEARNING) {
1698 bool learn_ena = !!(flags.val & BR_LEARNING);
1699
1700 err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
1701 if (err)
1702 return err;
1703 port_priv->learn_ena = learn_ena;
1704 }
1705
1706 if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) {
1707 err = dpaa2_switch_port_flood(port_priv, flags);
1708 if (err)
1709 return err;
1710 }
1711
1712 return 0;
1713 }
1714
dpaa2_switch_port_attr_set(struct net_device * netdev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)1715 static int dpaa2_switch_port_attr_set(struct net_device *netdev, const void *ctx,
1716 const struct switchdev_attr *attr,
1717 struct netlink_ext_ack *extack)
1718 {
1719 int err = 0;
1720
1721 switch (attr->id) {
1722 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
1723 err = dpaa2_switch_port_attr_stp_state_set(netdev,
1724 attr->u.stp_state);
1725 break;
1726 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
1727 if (!attr->u.vlan_filtering) {
1728 NL_SET_ERR_MSG_MOD(extack,
1729 "The DPAA2 switch does not support VLAN-unaware operation");
1730 return -EOPNOTSUPP;
1731 }
1732 break;
1733 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
1734 err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack);
1735 break;
1736 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
1737 err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack);
1738 break;
1739 default:
1740 err = -EOPNOTSUPP;
1741 break;
1742 }
1743
1744 return err;
1745 }
1746
dpaa2_switch_port_vlans_add(struct net_device * netdev,const struct switchdev_obj_port_vlan * vlan)1747 int dpaa2_switch_port_vlans_add(struct net_device *netdev,
1748 const struct switchdev_obj_port_vlan *vlan)
1749 {
1750 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1751 struct ethsw_core *ethsw = port_priv->ethsw_data;
1752 struct dpsw_attr *attr = ðsw->sw_attr;
1753 int err = 0;
1754
1755 /* Make sure that the VLAN is not already configured
1756 * on the switch port
1757 */
1758 if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
1759 return -EEXIST;
1760
1761 /* Check if there is space for a new VLAN */
1762 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1763 ðsw->sw_attr);
1764 if (err) {
1765 netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
1766 return err;
1767 }
1768 if (attr->max_vlans - attr->num_vlans < 1)
1769 return -ENOSPC;
1770
1771 /* Check if there is space for a new VLAN */
1772 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1773 ðsw->sw_attr);
1774 if (err) {
1775 netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
1776 return err;
1777 }
1778 if (attr->max_vlans - attr->num_vlans < 1)
1779 return -ENOSPC;
1780
1781 if (!port_priv->ethsw_data->vlans[vlan->vid]) {
1782 /* this is a new VLAN */
1783 err = dpaa2_switch_add_vlan(port_priv, vlan->vid);
1784 if (err)
1785 return err;
1786
1787 port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL;
1788 }
1789
1790 return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags);
1791 }
1792
dpaa2_switch_port_lookup_address(struct net_device * netdev,int is_uc,const unsigned char * addr)1793 static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
1794 const unsigned char *addr)
1795 {
1796 struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
1797 struct netdev_hw_addr *ha;
1798
1799 netif_addr_lock_bh(netdev);
1800 list_for_each_entry(ha, &list->list, list) {
1801 if (ether_addr_equal(ha->addr, addr)) {
1802 netif_addr_unlock_bh(netdev);
1803 return 1;
1804 }
1805 }
1806 netif_addr_unlock_bh(netdev);
1807 return 0;
1808 }
1809
dpaa2_switch_port_mdb_add(struct net_device * netdev,const struct switchdev_obj_port_mdb * mdb)1810 static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
1811 const struct switchdev_obj_port_mdb *mdb)
1812 {
1813 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1814 int err;
1815
1816 /* Check if address is already set on this port */
1817 if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
1818 return -EEXIST;
1819
1820 err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr);
1821 if (err)
1822 return err;
1823
1824 err = dev_mc_add(netdev, mdb->addr);
1825 if (err) {
1826 netdev_err(netdev, "dev_mc_add err %d\n", err);
1827 dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
1828 }
1829
1830 return err;
1831 }
1832
dpaa2_switch_port_obj_add(struct net_device * netdev,const struct switchdev_obj * obj)1833 static int dpaa2_switch_port_obj_add(struct net_device *netdev,
1834 const struct switchdev_obj *obj)
1835 {
1836 int err;
1837
1838 switch (obj->id) {
1839 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1840 err = dpaa2_switch_port_vlans_add(netdev,
1841 SWITCHDEV_OBJ_PORT_VLAN(obj));
1842 break;
1843 case SWITCHDEV_OBJ_ID_PORT_MDB:
1844 err = dpaa2_switch_port_mdb_add(netdev,
1845 SWITCHDEV_OBJ_PORT_MDB(obj));
1846 break;
1847 default:
1848 err = -EOPNOTSUPP;
1849 break;
1850 }
1851
1852 return err;
1853 }
1854
dpaa2_switch_port_del_vlan(struct ethsw_port_priv * port_priv,u16 vid)1855 static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
1856 {
1857 struct ethsw_core *ethsw = port_priv->ethsw_data;
1858 struct net_device *netdev = port_priv->netdev;
1859 struct dpsw_vlan_if_cfg vcfg;
1860 int i, err;
1861
1862 if (!port_priv->vlans[vid])
1863 return -ENOENT;
1864
1865 if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
1866 /* If we are deleting the PVID of a port, use VLAN 4095 instead
1867 * as we are sure that neither the bridge nor the 8021q module
1868 * will use it
1869 */
1870 err = dpaa2_switch_port_set_pvid(port_priv, 4095);
1871 if (err)
1872 return err;
1873 }
1874
1875 vcfg.num_ifs = 1;
1876 vcfg.if_id[0] = port_priv->idx;
1877 if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
1878 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
1879 ethsw->dpsw_handle,
1880 vid, &vcfg);
1881 if (err) {
1882 netdev_err(netdev,
1883 "dpsw_vlan_remove_if_untagged err %d\n",
1884 err);
1885 }
1886 port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
1887 }
1888
1889 if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
1890 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1891 vid, &vcfg);
1892 if (err) {
1893 netdev_err(netdev,
1894 "dpsw_vlan_remove_if err %d\n", err);
1895 return err;
1896 }
1897 port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
1898
1899 /* Delete VLAN from switch if it is no longer configured on
1900 * any port
1901 */
1902 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1903 if (ethsw->ports[i] &&
1904 ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
1905 return 0; /* Found a port member in VID */
1906 }
1907
1908 ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
1909
1910 err = dpaa2_switch_dellink(ethsw, vid);
1911 if (err)
1912 return err;
1913 }
1914
1915 return 0;
1916 }
1917
dpaa2_switch_port_vlans_del(struct net_device * netdev,const struct switchdev_obj_port_vlan * vlan)1918 int dpaa2_switch_port_vlans_del(struct net_device *netdev,
1919 const struct switchdev_obj_port_vlan *vlan)
1920 {
1921 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1922
1923 if (netif_is_bridge_master(vlan->obj.orig_dev))
1924 return -EOPNOTSUPP;
1925
1926 return dpaa2_switch_port_del_vlan(port_priv, vlan->vid);
1927 }
1928
dpaa2_switch_port_mdb_del(struct net_device * netdev,const struct switchdev_obj_port_mdb * mdb)1929 static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
1930 const struct switchdev_obj_port_mdb *mdb)
1931 {
1932 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1933 int err;
1934
1935 if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
1936 return -ENOENT;
1937
1938 err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
1939 if (err)
1940 return err;
1941
1942 err = dev_mc_del(netdev, mdb->addr);
1943 if (err) {
1944 netdev_err(netdev, "dev_mc_del err %d\n", err);
1945 return err;
1946 }
1947
1948 return err;
1949 }
1950
dpaa2_switch_port_obj_del(struct net_device * netdev,const struct switchdev_obj * obj)1951 static int dpaa2_switch_port_obj_del(struct net_device *netdev,
1952 const struct switchdev_obj *obj)
1953 {
1954 int err;
1955
1956 switch (obj->id) {
1957 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1958 err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
1959 break;
1960 case SWITCHDEV_OBJ_ID_PORT_MDB:
1961 err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
1962 break;
1963 default:
1964 err = -EOPNOTSUPP;
1965 break;
1966 }
1967 return err;
1968 }
1969
dpaa2_switch_port_attr_set_event(struct net_device * netdev,struct switchdev_notifier_port_attr_info * ptr)1970 static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
1971 struct switchdev_notifier_port_attr_info *ptr)
1972 {
1973 int err;
1974
1975 err = switchdev_handle_port_attr_set(netdev, ptr,
1976 dpaa2_switch_port_dev_check,
1977 dpaa2_switch_port_attr_set);
1978 return notifier_from_errno(err);
1979 }
1980
1981 static struct notifier_block dpaa2_switch_port_switchdev_nb;
1982 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb;
1983
dpaa2_switch_port_bridge_join(struct net_device * netdev,struct net_device * upper_dev,struct netlink_ext_ack * extack)1984 static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
1985 struct net_device *upper_dev,
1986 struct netlink_ext_ack *extack)
1987 {
1988 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1989 struct ethsw_core *ethsw = port_priv->ethsw_data;
1990 struct ethsw_port_priv *other_port_priv;
1991 struct net_device *other_dev;
1992 struct list_head *iter;
1993 bool learn_ena;
1994 int err;
1995
1996 netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
1997 if (!dpaa2_switch_port_dev_check(other_dev))
1998 continue;
1999
2000 other_port_priv = netdev_priv(other_dev);
2001 if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
2002 NL_SET_ERR_MSG_MOD(extack,
2003 "Interface from a different DPSW is in the bridge already");
2004 return -EINVAL;
2005 }
2006 }
2007
2008 /* Delete the previously manually installed VLAN 1 */
2009 err = dpaa2_switch_port_del_vlan(port_priv, 1);
2010 if (err)
2011 return err;
2012
2013 dpaa2_switch_port_set_fdb(port_priv, upper_dev);
2014
2015 /* Inherit the initial bridge port learning state */
2016 learn_ena = br_port_flag_is_set(netdev, BR_LEARNING);
2017 err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
2018 port_priv->learn_ena = learn_ena;
2019
2020 /* Setup the egress flood policy (broadcast, unknown unicast) */
2021 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
2022 if (err)
2023 goto err_egress_flood;
2024
2025 err = switchdev_bridge_port_offload(netdev, netdev, NULL,
2026 &dpaa2_switch_port_switchdev_nb,
2027 &dpaa2_switch_port_switchdev_blocking_nb,
2028 false, extack);
2029 if (err)
2030 goto err_switchdev_offload;
2031
2032 return 0;
2033
2034 err_switchdev_offload:
2035 err_egress_flood:
2036 dpaa2_switch_port_set_fdb(port_priv, NULL);
2037 return err;
2038 }
2039
dpaa2_switch_port_clear_rxvlan(struct net_device * vdev,int vid,void * arg)2040 static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg)
2041 {
2042 __be16 vlan_proto = htons(ETH_P_8021Q);
2043
2044 if (vdev)
2045 vlan_proto = vlan_dev_vlan_proto(vdev);
2046
2047 return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid);
2048 }
2049
dpaa2_switch_port_restore_rxvlan(struct net_device * vdev,int vid,void * arg)2050 static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg)
2051 {
2052 __be16 vlan_proto = htons(ETH_P_8021Q);
2053
2054 if (vdev)
2055 vlan_proto = vlan_dev_vlan_proto(vdev);
2056
2057 return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid);
2058 }
2059
dpaa2_switch_port_pre_bridge_leave(struct net_device * netdev)2060 static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
2061 {
2062 switchdev_bridge_port_unoffload(netdev, NULL,
2063 &dpaa2_switch_port_switchdev_nb,
2064 &dpaa2_switch_port_switchdev_blocking_nb);
2065 }
2066
dpaa2_switch_port_bridge_leave(struct net_device * netdev)2067 static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
2068 {
2069 struct ethsw_port_priv *port_priv = netdev_priv(netdev);
2070 struct dpaa2_switch_fdb *old_fdb = port_priv->fdb;
2071 struct ethsw_core *ethsw = port_priv->ethsw_data;
2072 int err;
2073
2074 /* First of all, fast age any learn FDB addresses on this switch port */
2075 dpaa2_switch_port_fast_age(port_priv);
2076
2077 /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN
2078 * upper devices or otherwise from the FDB table that we are about to
2079 * leave
2080 */
2081 err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev);
2082 if (err)
2083 netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err);
2084
2085 dpaa2_switch_port_set_fdb(port_priv, NULL);
2086
2087 /* Restore all RX VLANs into the new FDB table that we just joined */
2088 err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev);
2089 if (err)
2090 netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err);
2091
2092 /* Reset the flooding state to denote that this port can send any
2093 * packet in standalone mode. With this, we are also ensuring that any
2094 * later bridge join will have the flooding flag on.
2095 */
2096 port_priv->bcast_flood = true;
2097 port_priv->ucast_flood = true;
2098
2099 /* Setup the egress flood policy (broadcast, unknown unicast).
2100 * When the port is not under a bridge, only the CTRL interface is part
2101 * of the flooding domain besides the actual port
2102 */
2103 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
2104 if (err)
2105 return err;
2106
2107 /* Recreate the egress flood domain of the FDB that we just left */
2108 err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id);
2109 if (err)
2110 return err;
2111
2112 /* No HW learning when not under a bridge */
2113 err = dpaa2_switch_port_set_learning(port_priv, false);
2114 if (err)
2115 return err;
2116 port_priv->learn_ena = false;
2117
2118 /* Add the VLAN 1 as PVID when not under a bridge. We need this since
2119 * the dpaa2 switch interfaces are not capable to be VLAN unaware
2120 */
2121 return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID,
2122 BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID);
2123 }
2124
dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device * netdev)2125 static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev)
2126 {
2127 struct net_device *upper_dev;
2128 struct list_head *iter;
2129
2130 /* RCU read lock not necessary because we have write-side protection
2131 * (rtnl_mutex), however a non-rcu iterator does not exist.
2132 */
2133 netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter)
2134 if (is_vlan_dev(upper_dev))
2135 return -EOPNOTSUPP;
2136
2137 return 0;
2138 }
2139
2140 static int
dpaa2_switch_prechangeupper_sanity_checks(struct net_device * netdev,struct net_device * upper_dev,struct netlink_ext_ack * extack)2141 dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev,
2142 struct net_device *upper_dev,
2143 struct netlink_ext_ack *extack)
2144 {
2145 int err;
2146
2147 if (!br_vlan_enabled(upper_dev)) {
2148 NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge");
2149 return -EOPNOTSUPP;
2150 }
2151
2152 err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev);
2153 if (err) {
2154 NL_SET_ERR_MSG_MOD(extack,
2155 "Cannot join a bridge while VLAN uppers are present");
2156 return 0;
2157 }
2158
2159 return 0;
2160 }
2161
dpaa2_switch_port_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)2162 static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
2163 unsigned long event, void *ptr)
2164 {
2165 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
2166 struct netdev_notifier_changeupper_info *info = ptr;
2167 struct netlink_ext_ack *extack;
2168 struct net_device *upper_dev;
2169 int err = 0;
2170
2171 if (!dpaa2_switch_port_dev_check(netdev))
2172 return NOTIFY_DONE;
2173
2174 extack = netdev_notifier_info_to_extack(&info->info);
2175
2176 switch (event) {
2177 case NETDEV_PRECHANGEUPPER:
2178 upper_dev = info->upper_dev;
2179 if (!netif_is_bridge_master(upper_dev))
2180 break;
2181
2182 err = dpaa2_switch_prechangeupper_sanity_checks(netdev,
2183 upper_dev,
2184 extack);
2185 if (err)
2186 goto out;
2187
2188 if (!info->linking)
2189 dpaa2_switch_port_pre_bridge_leave(netdev);
2190
2191 break;
2192 case NETDEV_CHANGEUPPER:
2193 upper_dev = info->upper_dev;
2194 if (netif_is_bridge_master(upper_dev)) {
2195 if (info->linking)
2196 err = dpaa2_switch_port_bridge_join(netdev,
2197 upper_dev,
2198 extack);
2199 else
2200 err = dpaa2_switch_port_bridge_leave(netdev);
2201 }
2202 break;
2203 }
2204
2205 out:
2206 return notifier_from_errno(err);
2207 }
2208
2209 struct ethsw_switchdev_event_work {
2210 struct work_struct work;
2211 struct switchdev_notifier_fdb_info fdb_info;
2212 struct net_device *dev;
2213 unsigned long event;
2214 };
2215
dpaa2_switch_event_work(struct work_struct * work)2216 static void dpaa2_switch_event_work(struct work_struct *work)
2217 {
2218 struct ethsw_switchdev_event_work *switchdev_work =
2219 container_of(work, struct ethsw_switchdev_event_work, work);
2220 struct net_device *dev = switchdev_work->dev;
2221 struct switchdev_notifier_fdb_info *fdb_info;
2222 int err;
2223
2224 rtnl_lock();
2225 fdb_info = &switchdev_work->fdb_info;
2226
2227 switch (switchdev_work->event) {
2228 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2229 if (!fdb_info->added_by_user || fdb_info->is_local)
2230 break;
2231 if (is_unicast_ether_addr(fdb_info->addr))
2232 err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
2233 fdb_info->addr);
2234 else
2235 err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
2236 fdb_info->addr);
2237 if (err)
2238 break;
2239 fdb_info->offloaded = true;
2240 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
2241 &fdb_info->info, NULL);
2242 break;
2243 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2244 if (!fdb_info->added_by_user || fdb_info->is_local)
2245 break;
2246 if (is_unicast_ether_addr(fdb_info->addr))
2247 dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
2248 else
2249 dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
2250 break;
2251 }
2252
2253 rtnl_unlock();
2254 kfree(switchdev_work->fdb_info.addr);
2255 kfree(switchdev_work);
2256 dev_put(dev);
2257 }
2258
2259 /* Called under rcu_read_lock() */
dpaa2_switch_port_event(struct notifier_block * nb,unsigned long event,void * ptr)2260 static int dpaa2_switch_port_event(struct notifier_block *nb,
2261 unsigned long event, void *ptr)
2262 {
2263 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2264 struct ethsw_port_priv *port_priv = netdev_priv(dev);
2265 struct ethsw_switchdev_event_work *switchdev_work;
2266 struct switchdev_notifier_fdb_info *fdb_info = ptr;
2267 struct ethsw_core *ethsw = port_priv->ethsw_data;
2268
2269 if (event == SWITCHDEV_PORT_ATTR_SET)
2270 return dpaa2_switch_port_attr_set_event(dev, ptr);
2271
2272 if (!dpaa2_switch_port_dev_check(dev))
2273 return NOTIFY_DONE;
2274
2275 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2276 if (!switchdev_work)
2277 return NOTIFY_BAD;
2278
2279 INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work);
2280 switchdev_work->dev = dev;
2281 switchdev_work->event = event;
2282
2283 switch (event) {
2284 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2285 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2286 memcpy(&switchdev_work->fdb_info, ptr,
2287 sizeof(switchdev_work->fdb_info));
2288 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
2289 if (!switchdev_work->fdb_info.addr)
2290 goto err_addr_alloc;
2291
2292 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
2293 fdb_info->addr);
2294
2295 /* Take a reference on the device to avoid being freed. */
2296 dev_hold(dev);
2297 break;
2298 default:
2299 kfree(switchdev_work);
2300 return NOTIFY_DONE;
2301 }
2302
2303 queue_work(ethsw->workqueue, &switchdev_work->work);
2304
2305 return NOTIFY_DONE;
2306
2307 err_addr_alloc:
2308 kfree(switchdev_work);
2309 return NOTIFY_BAD;
2310 }
2311
dpaa2_switch_port_obj_event(unsigned long event,struct net_device * netdev,struct switchdev_notifier_port_obj_info * port_obj_info)2312 static int dpaa2_switch_port_obj_event(unsigned long event,
2313 struct net_device *netdev,
2314 struct switchdev_notifier_port_obj_info *port_obj_info)
2315 {
2316 int err = -EOPNOTSUPP;
2317
2318 if (!dpaa2_switch_port_dev_check(netdev))
2319 return NOTIFY_DONE;
2320
2321 switch (event) {
2322 case SWITCHDEV_PORT_OBJ_ADD:
2323 err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj);
2324 break;
2325 case SWITCHDEV_PORT_OBJ_DEL:
2326 err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
2327 break;
2328 }
2329
2330 port_obj_info->handled = true;
2331 return notifier_from_errno(err);
2332 }
2333
dpaa2_switch_port_blocking_event(struct notifier_block * nb,unsigned long event,void * ptr)2334 static int dpaa2_switch_port_blocking_event(struct notifier_block *nb,
2335 unsigned long event, void *ptr)
2336 {
2337 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2338
2339 switch (event) {
2340 case SWITCHDEV_PORT_OBJ_ADD:
2341 case SWITCHDEV_PORT_OBJ_DEL:
2342 return dpaa2_switch_port_obj_event(event, dev, ptr);
2343 case SWITCHDEV_PORT_ATTR_SET:
2344 return dpaa2_switch_port_attr_set_event(dev, ptr);
2345 }
2346
2347 return NOTIFY_DONE;
2348 }
2349
2350 /* Build a linear skb based on a single-buffer frame descriptor */
dpaa2_switch_build_linear_skb(struct ethsw_core * ethsw,const struct dpaa2_fd * fd)2351 static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw,
2352 const struct dpaa2_fd *fd)
2353 {
2354 u16 fd_offset = dpaa2_fd_get_offset(fd);
2355 dma_addr_t addr = dpaa2_fd_get_addr(fd);
2356 u32 fd_length = dpaa2_fd_get_len(fd);
2357 struct device *dev = ethsw->dev;
2358 struct sk_buff *skb = NULL;
2359 void *fd_vaddr;
2360
2361 fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr);
2362 dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE,
2363 DMA_FROM_DEVICE);
2364
2365 skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE +
2366 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
2367 if (unlikely(!skb)) {
2368 dev_err(dev, "build_skb() failed\n");
2369 return NULL;
2370 }
2371
2372 skb_reserve(skb, fd_offset);
2373 skb_put(skb, fd_length);
2374
2375 ethsw->buf_count--;
2376
2377 return skb;
2378 }
2379
dpaa2_switch_tx_conf(struct dpaa2_switch_fq * fq,const struct dpaa2_fd * fd)2380 static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq,
2381 const struct dpaa2_fd *fd)
2382 {
2383 dpaa2_switch_free_fd(fq->ethsw, fd);
2384 }
2385
dpaa2_switch_rx(struct dpaa2_switch_fq * fq,const struct dpaa2_fd * fd)2386 static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq,
2387 const struct dpaa2_fd *fd)
2388 {
2389 struct ethsw_core *ethsw = fq->ethsw;
2390 struct ethsw_port_priv *port_priv;
2391 struct net_device *netdev;
2392 struct vlan_ethhdr *hdr;
2393 struct sk_buff *skb;
2394 u16 vlan_tci, vid;
2395 int if_id, err;
2396
2397 /* get switch ingress interface ID */
2398 if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF;
2399
2400 if (if_id >= ethsw->sw_attr.num_ifs) {
2401 dev_err(ethsw->dev, "Frame received from unknown interface!\n");
2402 goto err_free_fd;
2403 }
2404 port_priv = ethsw->ports[if_id];
2405 netdev = port_priv->netdev;
2406
2407 /* build the SKB based on the FD received */
2408 if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) {
2409 if (net_ratelimit()) {
2410 netdev_err(netdev, "Received invalid frame format\n");
2411 goto err_free_fd;
2412 }
2413 }
2414
2415 skb = dpaa2_switch_build_linear_skb(ethsw, fd);
2416 if (unlikely(!skb))
2417 goto err_free_fd;
2418
2419 skb_reset_mac_header(skb);
2420
2421 /* Remove the VLAN header if the packet that we just received has a vid
2422 * equal to the port PVIDs. Since the dpaa2-switch can operate only in
2423 * VLAN-aware mode and no alterations are made on the packet when it's
2424 * redirected/mirrored to the control interface, we are sure that there
2425 * will always be a VLAN header present.
2426 */
2427 hdr = vlan_eth_hdr(skb);
2428 vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK;
2429 if (vid == port_priv->pvid) {
2430 err = __skb_vlan_pop(skb, &vlan_tci);
2431 if (err) {
2432 dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err);
2433 goto err_free_fd;
2434 }
2435 }
2436
2437 skb->dev = netdev;
2438 skb->protocol = eth_type_trans(skb, skb->dev);
2439
2440 /* Setup the offload_fwd_mark only if the port is under a bridge */
2441 skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev);
2442
2443 netif_receive_skb(skb);
2444
2445 return;
2446
2447 err_free_fd:
2448 dpaa2_switch_free_fd(ethsw, fd);
2449 }
2450
dpaa2_switch_detect_features(struct ethsw_core * ethsw)2451 static void dpaa2_switch_detect_features(struct ethsw_core *ethsw)
2452 {
2453 ethsw->features = 0;
2454
2455 if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6))
2456 ethsw->features |= ETHSW_FEATURE_MAC_ADDR;
2457 }
2458
dpaa2_switch_setup_fqs(struct ethsw_core * ethsw)2459 static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw)
2460 {
2461 struct dpsw_ctrl_if_attr ctrl_if_attr;
2462 struct device *dev = ethsw->dev;
2463 int i = 0;
2464 int err;
2465
2466 err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
2467 &ctrl_if_attr);
2468 if (err) {
2469 dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err);
2470 return err;
2471 }
2472
2473 ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid;
2474 ethsw->fq[i].ethsw = ethsw;
2475 ethsw->fq[i++].type = DPSW_QUEUE_RX;
2476
2477 ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid;
2478 ethsw->fq[i].ethsw = ethsw;
2479 ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF;
2480
2481 return 0;
2482 }
2483
2484 /* Free buffers acquired from the buffer pool or which were meant to
2485 * be released in the pool
2486 */
dpaa2_switch_free_bufs(struct ethsw_core * ethsw,u64 * buf_array,int count)2487 static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count)
2488 {
2489 struct device *dev = ethsw->dev;
2490 void *vaddr;
2491 int i;
2492
2493 for (i = 0; i < count; i++) {
2494 vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]);
2495 dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE,
2496 DMA_FROM_DEVICE);
2497 free_pages((unsigned long)vaddr, 0);
2498 }
2499 }
2500
2501 /* Perform a single release command to add buffers
2502 * to the specified buffer pool
2503 */
dpaa2_switch_add_bufs(struct ethsw_core * ethsw,u16 bpid)2504 static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid)
2505 {
2506 struct device *dev = ethsw->dev;
2507 u64 buf_array[BUFS_PER_CMD];
2508 struct page *page;
2509 int retries = 0;
2510 dma_addr_t addr;
2511 int err;
2512 int i;
2513
2514 for (i = 0; i < BUFS_PER_CMD; i++) {
2515 /* Allocate one page for each Rx buffer. WRIOP sees
2516 * the entire page except for a tailroom reserved for
2517 * skb shared info
2518 */
2519 page = dev_alloc_pages(0);
2520 if (!page) {
2521 dev_err(dev, "buffer allocation failed\n");
2522 goto err_alloc;
2523 }
2524
2525 addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE,
2526 DMA_FROM_DEVICE);
2527 if (dma_mapping_error(dev, addr)) {
2528 dev_err(dev, "dma_map_single() failed\n");
2529 goto err_map;
2530 }
2531 buf_array[i] = addr;
2532 }
2533
2534 release_bufs:
2535 /* In case the portal is busy, retry until successful or
2536 * max retries hit.
2537 */
2538 while ((err = dpaa2_io_service_release(NULL, bpid,
2539 buf_array, i)) == -EBUSY) {
2540 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES)
2541 break;
2542
2543 cpu_relax();
2544 }
2545
2546 /* If release command failed, clean up and bail out. */
2547 if (err) {
2548 dpaa2_switch_free_bufs(ethsw, buf_array, i);
2549 return 0;
2550 }
2551
2552 return i;
2553
2554 err_map:
2555 __free_pages(page, 0);
2556 err_alloc:
2557 /* If we managed to allocate at least some buffers,
2558 * release them to hardware
2559 */
2560 if (i)
2561 goto release_bufs;
2562
2563 return 0;
2564 }
2565
dpaa2_switch_refill_bp(struct ethsw_core * ethsw)2566 static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
2567 {
2568 int *count = ðsw->buf_count;
2569 int new_count;
2570 int err = 0;
2571
2572 if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) {
2573 do {
2574 new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
2575 if (unlikely(!new_count)) {
2576 /* Out of memory; abort for now, we'll
2577 * try later on
2578 */
2579 break;
2580 }
2581 *count += new_count;
2582 } while (*count < DPAA2_ETHSW_NUM_BUFS);
2583
2584 if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS))
2585 err = -ENOMEM;
2586 }
2587
2588 return err;
2589 }
2590
dpaa2_switch_seed_bp(struct ethsw_core * ethsw)2591 static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
2592 {
2593 int *count, i;
2594
2595 for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
2596 count = ðsw->buf_count;
2597 *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
2598
2599 if (unlikely(*count < BUFS_PER_CMD))
2600 return -ENOMEM;
2601 }
2602
2603 return 0;
2604 }
2605
dpaa2_switch_drain_bp(struct ethsw_core * ethsw)2606 static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw)
2607 {
2608 u64 buf_array[BUFS_PER_CMD];
2609 int ret;
2610
2611 do {
2612 ret = dpaa2_io_service_acquire(NULL, ethsw->bpid,
2613 buf_array, BUFS_PER_CMD);
2614 if (ret < 0) {
2615 dev_err(ethsw->dev,
2616 "dpaa2_io_service_acquire() = %d\n", ret);
2617 return;
2618 }
2619 dpaa2_switch_free_bufs(ethsw, buf_array, ret);
2620
2621 } while (ret);
2622 }
2623
dpaa2_switch_setup_dpbp(struct ethsw_core * ethsw)2624 static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
2625 {
2626 struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 };
2627 struct device *dev = ethsw->dev;
2628 struct fsl_mc_device *dpbp_dev;
2629 struct dpbp_attr dpbp_attrs;
2630 int err;
2631
2632 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2633 &dpbp_dev);
2634 if (err) {
2635 if (err == -ENXIO)
2636 err = -EPROBE_DEFER;
2637 else
2638 dev_err(dev, "DPBP device allocation failed\n");
2639 return err;
2640 }
2641 ethsw->dpbp_dev = dpbp_dev;
2642
2643 err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id,
2644 &dpbp_dev->mc_handle);
2645 if (err) {
2646 dev_err(dev, "dpbp_open() failed\n");
2647 goto err_open;
2648 }
2649
2650 err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2651 if (err) {
2652 dev_err(dev, "dpbp_reset() failed\n");
2653 goto err_reset;
2654 }
2655
2656 err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2657 if (err) {
2658 dev_err(dev, "dpbp_enable() failed\n");
2659 goto err_enable;
2660 }
2661
2662 err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle,
2663 &dpbp_attrs);
2664 if (err) {
2665 dev_err(dev, "dpbp_get_attributes() failed\n");
2666 goto err_get_attr;
2667 }
2668
2669 dpsw_ctrl_if_pools_cfg.num_dpbp = 1;
2670 dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id;
2671 dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE;
2672 dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0;
2673
2674 err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle,
2675 &dpsw_ctrl_if_pools_cfg);
2676 if (err) {
2677 dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
2678 goto err_get_attr;
2679 }
2680 ethsw->bpid = dpbp_attrs.id;
2681
2682 return 0;
2683
2684 err_get_attr:
2685 dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2686 err_enable:
2687 err_reset:
2688 dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2689 err_open:
2690 fsl_mc_object_free(dpbp_dev);
2691 return err;
2692 }
2693
dpaa2_switch_free_dpbp(struct ethsw_core * ethsw)2694 static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw)
2695 {
2696 dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
2697 dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
2698 fsl_mc_object_free(ethsw->dpbp_dev);
2699 }
2700
dpaa2_switch_alloc_rings(struct ethsw_core * ethsw)2701 static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw)
2702 {
2703 int i;
2704
2705 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
2706 ethsw->fq[i].store =
2707 dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE,
2708 ethsw->dev);
2709 if (!ethsw->fq[i].store) {
2710 dev_err(ethsw->dev, "dpaa2_io_store_create failed\n");
2711 while (--i >= 0)
2712 dpaa2_io_store_destroy(ethsw->fq[i].store);
2713 return -ENOMEM;
2714 }
2715 }
2716
2717 return 0;
2718 }
2719
dpaa2_switch_destroy_rings(struct ethsw_core * ethsw)2720 static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw)
2721 {
2722 int i;
2723
2724 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
2725 dpaa2_io_store_destroy(ethsw->fq[i].store);
2726 }
2727
dpaa2_switch_pull_fq(struct dpaa2_switch_fq * fq)2728 static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq)
2729 {
2730 int err, retries = 0;
2731
2732 /* Try to pull from the FQ while the portal is busy and we didn't hit
2733 * the maximum number fo retries
2734 */
2735 do {
2736 err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store);
2737 cpu_relax();
2738 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
2739
2740 if (unlikely(err))
2741 dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err);
2742
2743 return err;
2744 }
2745
2746 /* Consume all frames pull-dequeued into the store */
dpaa2_switch_store_consume(struct dpaa2_switch_fq * fq)2747 static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq)
2748 {
2749 struct ethsw_core *ethsw = fq->ethsw;
2750 int cleaned = 0, is_last;
2751 struct dpaa2_dq *dq;
2752 int retries = 0;
2753
2754 do {
2755 /* Get the next available FD from the store */
2756 dq = dpaa2_io_store_next(fq->store, &is_last);
2757 if (unlikely(!dq)) {
2758 if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) {
2759 dev_err_once(ethsw->dev,
2760 "No valid dequeue response\n");
2761 return -ETIMEDOUT;
2762 }
2763 continue;
2764 }
2765
2766 if (fq->type == DPSW_QUEUE_RX)
2767 dpaa2_switch_rx(fq, dpaa2_dq_fd(dq));
2768 else
2769 dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq));
2770 cleaned++;
2771
2772 } while (!is_last);
2773
2774 return cleaned;
2775 }
2776
2777 /* NAPI poll routine */
dpaa2_switch_poll(struct napi_struct * napi,int budget)2778 static int dpaa2_switch_poll(struct napi_struct *napi, int budget)
2779 {
2780 int err, cleaned = 0, store_cleaned, work_done;
2781 struct dpaa2_switch_fq *fq;
2782 int retries = 0;
2783
2784 fq = container_of(napi, struct dpaa2_switch_fq, napi);
2785
2786 do {
2787 err = dpaa2_switch_pull_fq(fq);
2788 if (unlikely(err))
2789 break;
2790
2791 /* Refill pool if appropriate */
2792 dpaa2_switch_refill_bp(fq->ethsw);
2793
2794 store_cleaned = dpaa2_switch_store_consume(fq);
2795 cleaned += store_cleaned;
2796
2797 if (cleaned >= budget) {
2798 work_done = budget;
2799 goto out;
2800 }
2801
2802 } while (store_cleaned);
2803
2804 /* We didn't consume the entire budget, so finish napi and re-enable
2805 * data availability notifications
2806 */
2807 napi_complete_done(napi, cleaned);
2808 do {
2809 err = dpaa2_io_service_rearm(NULL, &fq->nctx);
2810 cpu_relax();
2811 } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
2812
2813 work_done = max(cleaned, 1);
2814 out:
2815
2816 return work_done;
2817 }
2818
dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx * nctx)2819 static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
2820 {
2821 struct dpaa2_switch_fq *fq;
2822
2823 fq = container_of(nctx, struct dpaa2_switch_fq, nctx);
2824
2825 napi_schedule(&fq->napi);
2826 }
2827
dpaa2_switch_setup_dpio(struct ethsw_core * ethsw)2828 static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw)
2829 {
2830 struct dpsw_ctrl_if_queue_cfg queue_cfg;
2831 struct dpaa2_io_notification_ctx *nctx;
2832 int err, i, j;
2833
2834 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
2835 nctx = ðsw->fq[i].nctx;
2836
2837 /* Register a new software context for the FQID.
2838 * By using NULL as the first parameter, we specify that we do
2839 * not care on which cpu are interrupts received for this queue
2840 */
2841 nctx->is_cdan = 0;
2842 nctx->id = ethsw->fq[i].fqid;
2843 nctx->desired_cpu = DPAA2_IO_ANY_CPU;
2844 nctx->cb = dpaa2_switch_fqdan_cb;
2845 err = dpaa2_io_service_register(NULL, nctx, ethsw->dev);
2846 if (err) {
2847 err = -EPROBE_DEFER;
2848 goto err_register;
2849 }
2850
2851 queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST |
2852 DPSW_CTRL_IF_QUEUE_OPT_USER_CTX;
2853 queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO;
2854 queue_cfg.dest_cfg.dest_id = nctx->dpio_id;
2855 queue_cfg.dest_cfg.priority = 0;
2856 queue_cfg.user_ctx = nctx->qman64;
2857
2858 err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0,
2859 ethsw->dpsw_handle,
2860 ethsw->fq[i].type,
2861 &queue_cfg);
2862 if (err)
2863 goto err_set_queue;
2864 }
2865
2866 return 0;
2867
2868 err_set_queue:
2869 dpaa2_io_service_deregister(NULL, nctx, ethsw->dev);
2870 err_register:
2871 for (j = 0; j < i; j++)
2872 dpaa2_io_service_deregister(NULL, ðsw->fq[j].nctx,
2873 ethsw->dev);
2874
2875 return err;
2876 }
2877
dpaa2_switch_free_dpio(struct ethsw_core * ethsw)2878 static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw)
2879 {
2880 int i;
2881
2882 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
2883 dpaa2_io_service_deregister(NULL, ðsw->fq[i].nctx,
2884 ethsw->dev);
2885 }
2886
dpaa2_switch_ctrl_if_setup(struct ethsw_core * ethsw)2887 static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
2888 {
2889 int err;
2890
2891 /* setup FQs for Rx and Tx Conf */
2892 err = dpaa2_switch_setup_fqs(ethsw);
2893 if (err)
2894 return err;
2895
2896 /* setup the buffer pool needed on the Rx path */
2897 err = dpaa2_switch_setup_dpbp(ethsw);
2898 if (err)
2899 return err;
2900
2901 err = dpaa2_switch_alloc_rings(ethsw);
2902 if (err)
2903 goto err_free_dpbp;
2904
2905 err = dpaa2_switch_setup_dpio(ethsw);
2906 if (err)
2907 goto err_destroy_rings;
2908
2909 err = dpaa2_switch_seed_bp(ethsw);
2910 if (err)
2911 goto err_deregister_dpio;
2912
2913 err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
2914 if (err) {
2915 dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
2916 goto err_drain_dpbp;
2917 }
2918
2919 return 0;
2920
2921 err_drain_dpbp:
2922 dpaa2_switch_drain_bp(ethsw);
2923 err_deregister_dpio:
2924 dpaa2_switch_free_dpio(ethsw);
2925 err_destroy_rings:
2926 dpaa2_switch_destroy_rings(ethsw);
2927 err_free_dpbp:
2928 dpaa2_switch_free_dpbp(ethsw);
2929
2930 return err;
2931 }
2932
dpaa2_switch_remove_port(struct ethsw_core * ethsw,u16 port_idx)2933 static void dpaa2_switch_remove_port(struct ethsw_core *ethsw,
2934 u16 port_idx)
2935 {
2936 struct ethsw_port_priv *port_priv = ethsw->ports[port_idx];
2937
2938 rtnl_lock();
2939 dpaa2_switch_port_disconnect_mac(port_priv);
2940 rtnl_unlock();
2941 free_netdev(port_priv->netdev);
2942 ethsw->ports[port_idx] = NULL;
2943 }
2944
dpaa2_switch_init(struct fsl_mc_device * sw_dev)2945 static int dpaa2_switch_init(struct fsl_mc_device *sw_dev)
2946 {
2947 struct device *dev = &sw_dev->dev;
2948 struct ethsw_core *ethsw = dev_get_drvdata(dev);
2949 struct dpsw_vlan_if_cfg vcfg = {0};
2950 struct dpsw_tci_cfg tci_cfg = {0};
2951 struct dpsw_stp_cfg stp_cfg;
2952 int err;
2953 u16 i;
2954
2955 ethsw->dev_id = sw_dev->obj_desc.id;
2956
2957 err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle);
2958 if (err) {
2959 dev_err(dev, "dpsw_open err %d\n", err);
2960 return err;
2961 }
2962
2963 err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
2964 ðsw->sw_attr);
2965 if (err) {
2966 dev_err(dev, "dpsw_get_attributes err %d\n", err);
2967 goto err_close;
2968 }
2969
2970 err = dpsw_get_api_version(ethsw->mc_io, 0,
2971 ðsw->major,
2972 ðsw->minor);
2973 if (err) {
2974 dev_err(dev, "dpsw_get_api_version err %d\n", err);
2975 goto err_close;
2976 }
2977
2978 /* Minimum supported DPSW version check */
2979 if (ethsw->major < DPSW_MIN_VER_MAJOR ||
2980 (ethsw->major == DPSW_MIN_VER_MAJOR &&
2981 ethsw->minor < DPSW_MIN_VER_MINOR)) {
2982 dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n",
2983 ethsw->major, ethsw->minor);
2984 err = -EOPNOTSUPP;
2985 goto err_close;
2986 }
2987
2988 if (!dpaa2_switch_supports_cpu_traffic(ethsw)) {
2989 err = -EOPNOTSUPP;
2990 goto err_close;
2991 }
2992
2993 dpaa2_switch_detect_features(ethsw);
2994
2995 err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
2996 if (err) {
2997 dev_err(dev, "dpsw_reset err %d\n", err);
2998 goto err_close;
2999 }
3000
3001 stp_cfg.vlan_id = DEFAULT_VLAN_ID;
3002 stp_cfg.state = DPSW_STP_STATE_FORWARDING;
3003
3004 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3005 err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i);
3006 if (err) {
3007 dev_err(dev, "dpsw_if_disable err %d\n", err);
3008 goto err_close;
3009 }
3010
3011 err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
3012 &stp_cfg);
3013 if (err) {
3014 dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
3015 err, i);
3016 goto err_close;
3017 }
3018
3019 /* Switch starts with all ports configured to VLAN 1. Need to
3020 * remove this setting to allow configuration at bridge join
3021 */
3022 vcfg.num_ifs = 1;
3023 vcfg.if_id[0] = i;
3024 err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
3025 DEFAULT_VLAN_ID, &vcfg);
3026 if (err) {
3027 dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n",
3028 err);
3029 goto err_close;
3030 }
3031
3032 tci_cfg.vlan_id = 4095;
3033 err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg);
3034 if (err) {
3035 dev_err(dev, "dpsw_if_set_tci err %d\n", err);
3036 goto err_close;
3037 }
3038
3039 err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
3040 DEFAULT_VLAN_ID, &vcfg);
3041 if (err) {
3042 dev_err(dev, "dpsw_vlan_remove_if err %d\n", err);
3043 goto err_close;
3044 }
3045 }
3046
3047 err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID);
3048 if (err) {
3049 dev_err(dev, "dpsw_vlan_remove err %d\n", err);
3050 goto err_close;
3051 }
3052
3053 ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
3054 WQ_MEM_RECLAIM, "ethsw",
3055 ethsw->sw_attr.id);
3056 if (!ethsw->workqueue) {
3057 err = -ENOMEM;
3058 goto err_close;
3059 }
3060
3061 err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0);
3062 if (err)
3063 goto err_destroy_ordered_workqueue;
3064
3065 err = dpaa2_switch_ctrl_if_setup(ethsw);
3066 if (err)
3067 goto err_destroy_ordered_workqueue;
3068
3069 return 0;
3070
3071 err_destroy_ordered_workqueue:
3072 destroy_workqueue(ethsw->workqueue);
3073
3074 err_close:
3075 dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
3076 return err;
3077 }
3078
3079 /* Add an ACL to redirect frames with specific destination MAC address to
3080 * control interface
3081 */
dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv * port_priv,const char * mac)3082 static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv,
3083 const char *mac)
3084 {
3085 struct dpaa2_switch_acl_entry acl_entry = {0};
3086
3087 /* Match on the destination MAC address */
3088 ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac);
3089 eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac);
3090
3091 /* Trap to CPU */
3092 acl_entry.cfg.precedence = 0;
3093 acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
3094
3095 return dpaa2_switch_acl_entry_add(port_priv->filter_block, &acl_entry);
3096 }
3097
dpaa2_switch_port_init(struct ethsw_port_priv * port_priv,u16 port)3098 static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
3099 {
3100 const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
3101 struct switchdev_obj_port_vlan vlan = {
3102 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
3103 .vid = DEFAULT_VLAN_ID,
3104 .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID,
3105 };
3106 struct net_device *netdev = port_priv->netdev;
3107 struct ethsw_core *ethsw = port_priv->ethsw_data;
3108 struct dpaa2_switch_filter_block *filter_block;
3109 struct dpsw_fdb_cfg fdb_cfg = {0};
3110 struct dpsw_if_attr dpsw_if_attr;
3111 struct dpaa2_switch_fdb *fdb;
3112 struct dpsw_acl_cfg acl_cfg;
3113 u16 fdb_id, acl_tbl_id;
3114 int err;
3115
3116 /* Get the Tx queue for this specific port */
3117 err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
3118 port_priv->idx, &dpsw_if_attr);
3119 if (err) {
3120 netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err);
3121 return err;
3122 }
3123 port_priv->tx_qdid = dpsw_if_attr.qdid;
3124
3125 /* Create a FDB table for this particular switch port */
3126 fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs;
3127 err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
3128 &fdb_id, &fdb_cfg);
3129 if (err) {
3130 netdev_err(netdev, "dpsw_fdb_add err %d\n", err);
3131 return err;
3132 }
3133
3134 /* Find an unused dpaa2_switch_fdb structure and use it */
3135 fdb = dpaa2_switch_fdb_get_unused(ethsw);
3136 fdb->fdb_id = fdb_id;
3137 fdb->in_use = true;
3138 fdb->bridge_dev = NULL;
3139 port_priv->fdb = fdb;
3140
3141 /* We need to add VLAN 1 as the PVID on this port until it is under a
3142 * bridge since the DPAA2 switch is not able to handle the traffic in a
3143 * VLAN unaware fashion
3144 */
3145 err = dpaa2_switch_port_vlans_add(netdev, &vlan);
3146 if (err)
3147 return err;
3148
3149 /* Setup the egress flooding domains (broadcast, unknown unicast */
3150 err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
3151 if (err)
3152 return err;
3153
3154 /* Create an ACL table to be used by this switch port */
3155 acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES;
3156 err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
3157 &acl_tbl_id, &acl_cfg);
3158 if (err) {
3159 netdev_err(netdev, "dpsw_acl_add err %d\n", err);
3160 return err;
3161 }
3162
3163 filter_block = dpaa2_switch_filter_block_get_unused(ethsw);
3164 filter_block->ethsw = ethsw;
3165 filter_block->acl_id = acl_tbl_id;
3166 filter_block->in_use = true;
3167 filter_block->num_acl_rules = 0;
3168 INIT_LIST_HEAD(&filter_block->acl_entries);
3169 INIT_LIST_HEAD(&filter_block->mirror_entries);
3170
3171 err = dpaa2_switch_port_acl_tbl_bind(port_priv, filter_block);
3172 if (err)
3173 return err;
3174
3175 err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa);
3176 if (err)
3177 return err;
3178
3179 return err;
3180 }
3181
dpaa2_switch_ctrl_if_teardown(struct ethsw_core * ethsw)3182 static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
3183 {
3184 dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3185 dpaa2_switch_free_dpio(ethsw);
3186 dpaa2_switch_destroy_rings(ethsw);
3187 dpaa2_switch_drain_bp(ethsw);
3188 dpaa2_switch_free_dpbp(ethsw);
3189 }
3190
dpaa2_switch_teardown(struct fsl_mc_device * sw_dev)3191 static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
3192 {
3193 struct device *dev = &sw_dev->dev;
3194 struct ethsw_core *ethsw = dev_get_drvdata(dev);
3195 int err;
3196
3197 dpaa2_switch_ctrl_if_teardown(ethsw);
3198
3199 destroy_workqueue(ethsw->workqueue);
3200
3201 err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
3202 if (err)
3203 dev_warn(dev, "dpsw_close err %d\n", err);
3204 }
3205
dpaa2_switch_remove(struct fsl_mc_device * sw_dev)3206 static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
3207 {
3208 struct ethsw_port_priv *port_priv;
3209 struct ethsw_core *ethsw;
3210 struct device *dev;
3211 int i;
3212
3213 dev = &sw_dev->dev;
3214 ethsw = dev_get_drvdata(dev);
3215
3216 dpaa2_switch_teardown_irqs(sw_dev);
3217
3218 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3219
3220 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3221 port_priv = ethsw->ports[i];
3222 unregister_netdev(port_priv->netdev);
3223 dpaa2_switch_remove_port(ethsw, i);
3224 }
3225
3226 kfree(ethsw->fdbs);
3227 kfree(ethsw->filter_blocks);
3228 kfree(ethsw->ports);
3229
3230 dpaa2_switch_teardown(sw_dev);
3231
3232 fsl_mc_portal_free(ethsw->mc_io);
3233
3234 kfree(ethsw);
3235
3236 dev_set_drvdata(dev, NULL);
3237
3238 return 0;
3239 }
3240
dpaa2_switch_probe_port(struct ethsw_core * ethsw,u16 port_idx)3241 static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
3242 u16 port_idx)
3243 {
3244 struct ethsw_port_priv *port_priv;
3245 struct device *dev = ethsw->dev;
3246 struct net_device *port_netdev;
3247 int err;
3248
3249 port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
3250 if (!port_netdev) {
3251 dev_err(dev, "alloc_etherdev error\n");
3252 return -ENOMEM;
3253 }
3254
3255 port_priv = netdev_priv(port_netdev);
3256 port_priv->netdev = port_netdev;
3257 port_priv->ethsw_data = ethsw;
3258
3259 port_priv->idx = port_idx;
3260 port_priv->stp_state = BR_STATE_FORWARDING;
3261
3262 SET_NETDEV_DEV(port_netdev, dev);
3263 port_netdev->netdev_ops = &dpaa2_switch_port_ops;
3264 port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops;
3265
3266 port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM;
3267
3268 port_priv->bcast_flood = true;
3269 port_priv->ucast_flood = true;
3270
3271 /* Set MTU limits */
3272 port_netdev->min_mtu = ETH_MIN_MTU;
3273 port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
3274
3275 /* Populate the private port structure so that later calls to
3276 * dpaa2_switch_port_init() can use it.
3277 */
3278 ethsw->ports[port_idx] = port_priv;
3279
3280 /* The DPAA2 switch's ingress path depends on the VLAN table,
3281 * thus we are not able to disable VLAN filtering.
3282 */
3283 port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER |
3284 NETIF_F_HW_VLAN_STAG_FILTER |
3285 NETIF_F_HW_TC;
3286
3287 err = dpaa2_switch_port_init(port_priv, port_idx);
3288 if (err)
3289 goto err_port_probe;
3290
3291 err = dpaa2_switch_port_set_mac_addr(port_priv);
3292 if (err)
3293 goto err_port_probe;
3294
3295 err = dpaa2_switch_port_set_learning(port_priv, false);
3296 if (err)
3297 goto err_port_probe;
3298 port_priv->learn_ena = false;
3299
3300 err = dpaa2_switch_port_connect_mac(port_priv);
3301 if (err)
3302 goto err_port_probe;
3303
3304 return 0;
3305
3306 err_port_probe:
3307 free_netdev(port_netdev);
3308 ethsw->ports[port_idx] = NULL;
3309
3310 return err;
3311 }
3312
dpaa2_switch_probe(struct fsl_mc_device * sw_dev)3313 static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
3314 {
3315 struct device *dev = &sw_dev->dev;
3316 struct ethsw_core *ethsw;
3317 int i, err;
3318
3319 /* Allocate switch core*/
3320 ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
3321
3322 if (!ethsw)
3323 return -ENOMEM;
3324
3325 ethsw->dev = dev;
3326 ethsw->iommu_domain = iommu_get_domain_for_dev(dev);
3327 dev_set_drvdata(dev, ethsw);
3328
3329 err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
3330 ðsw->mc_io);
3331 if (err) {
3332 if (err == -ENXIO)
3333 err = -EPROBE_DEFER;
3334 else
3335 dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
3336 goto err_free_drvdata;
3337 }
3338
3339 err = dpaa2_switch_init(sw_dev);
3340 if (err)
3341 goto err_free_cmdport;
3342
3343 ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
3344 GFP_KERNEL);
3345 if (!(ethsw->ports)) {
3346 err = -ENOMEM;
3347 goto err_teardown;
3348 }
3349
3350 ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
3351 GFP_KERNEL);
3352 if (!ethsw->fdbs) {
3353 err = -ENOMEM;
3354 goto err_free_ports;
3355 }
3356
3357 ethsw->filter_blocks = kcalloc(ethsw->sw_attr.num_ifs,
3358 sizeof(*ethsw->filter_blocks),
3359 GFP_KERNEL);
3360 if (!ethsw->filter_blocks) {
3361 err = -ENOMEM;
3362 goto err_free_fdbs;
3363 }
3364
3365 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3366 err = dpaa2_switch_probe_port(ethsw, i);
3367 if (err)
3368 goto err_free_netdev;
3369 }
3370
3371 /* Add a NAPI instance for each of the Rx queues. The first port's
3372 * net_device will be associated with the instances since we do not have
3373 * different queues for each switch ports.
3374 */
3375 for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
3376 netif_napi_add(ethsw->ports[0]->netdev, ðsw->fq[i].napi,
3377 dpaa2_switch_poll);
3378
3379 /* Setup IRQs */
3380 err = dpaa2_switch_setup_irqs(sw_dev);
3381 if (err)
3382 goto err_stop;
3383
3384 /* By convention, if the mirror port is equal to the number of switch
3385 * interfaces, then mirroring of any kind is disabled.
3386 */
3387 ethsw->mirror_port = ethsw->sw_attr.num_ifs;
3388
3389 /* Register the netdev only when the entire setup is done and the
3390 * switch port interfaces are ready to receive traffic
3391 */
3392 for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3393 err = register_netdev(ethsw->ports[i]->netdev);
3394 if (err < 0) {
3395 dev_err(dev, "register_netdev error %d\n", err);
3396 goto err_unregister_ports;
3397 }
3398 }
3399
3400 return 0;
3401
3402 err_unregister_ports:
3403 for (i--; i >= 0; i--)
3404 unregister_netdev(ethsw->ports[i]->netdev);
3405 dpaa2_switch_teardown_irqs(sw_dev);
3406 err_stop:
3407 dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3408 err_free_netdev:
3409 for (i--; i >= 0; i--)
3410 dpaa2_switch_remove_port(ethsw, i);
3411 kfree(ethsw->filter_blocks);
3412 err_free_fdbs:
3413 kfree(ethsw->fdbs);
3414 err_free_ports:
3415 kfree(ethsw->ports);
3416
3417 err_teardown:
3418 dpaa2_switch_teardown(sw_dev);
3419
3420 err_free_cmdport:
3421 fsl_mc_portal_free(ethsw->mc_io);
3422
3423 err_free_drvdata:
3424 kfree(ethsw);
3425 dev_set_drvdata(dev, NULL);
3426
3427 return err;
3428 }
3429
3430 static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = {
3431 {
3432 .vendor = FSL_MC_VENDOR_FREESCALE,
3433 .obj_type = "dpsw",
3434 },
3435 { .vendor = 0x0 }
3436 };
3437 MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
3438
3439 static struct fsl_mc_driver dpaa2_switch_drv = {
3440 .driver = {
3441 .name = KBUILD_MODNAME,
3442 .owner = THIS_MODULE,
3443 },
3444 .probe = dpaa2_switch_probe,
3445 .remove = dpaa2_switch_remove,
3446 .match_id_table = dpaa2_switch_match_id_table
3447 };
3448
3449 static struct notifier_block dpaa2_switch_port_nb __read_mostly = {
3450 .notifier_call = dpaa2_switch_port_netdevice_event,
3451 };
3452
3453 static struct notifier_block dpaa2_switch_port_switchdev_nb = {
3454 .notifier_call = dpaa2_switch_port_event,
3455 };
3456
3457 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = {
3458 .notifier_call = dpaa2_switch_port_blocking_event,
3459 };
3460
dpaa2_switch_register_notifiers(void)3461 static int dpaa2_switch_register_notifiers(void)
3462 {
3463 int err;
3464
3465 err = register_netdevice_notifier(&dpaa2_switch_port_nb);
3466 if (err) {
3467 pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err);
3468 return err;
3469 }
3470
3471 err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3472 if (err) {
3473 pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err);
3474 goto err_switchdev_nb;
3475 }
3476
3477 err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
3478 if (err) {
3479 pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err);
3480 goto err_switchdev_blocking_nb;
3481 }
3482
3483 return 0;
3484
3485 err_switchdev_blocking_nb:
3486 unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3487 err_switchdev_nb:
3488 unregister_netdevice_notifier(&dpaa2_switch_port_nb);
3489
3490 return err;
3491 }
3492
dpaa2_switch_unregister_notifiers(void)3493 static void dpaa2_switch_unregister_notifiers(void)
3494 {
3495 int err;
3496
3497 err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
3498 if (err)
3499 pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n",
3500 err);
3501
3502 err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3503 if (err)
3504 pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err);
3505
3506 err = unregister_netdevice_notifier(&dpaa2_switch_port_nb);
3507 if (err)
3508 pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err);
3509 }
3510
dpaa2_switch_driver_init(void)3511 static int __init dpaa2_switch_driver_init(void)
3512 {
3513 int err;
3514
3515 err = fsl_mc_driver_register(&dpaa2_switch_drv);
3516 if (err)
3517 return err;
3518
3519 err = dpaa2_switch_register_notifiers();
3520 if (err) {
3521 fsl_mc_driver_unregister(&dpaa2_switch_drv);
3522 return err;
3523 }
3524
3525 return 0;
3526 }
3527
dpaa2_switch_driver_exit(void)3528 static void __exit dpaa2_switch_driver_exit(void)
3529 {
3530 dpaa2_switch_unregister_notifiers();
3531 fsl_mc_driver_unregister(&dpaa2_switch_drv);
3532 }
3533
3534 module_init(dpaa2_switch_driver_init);
3535 module_exit(dpaa2_switch_driver_exit);
3536
3537 MODULE_LICENSE("GPL v2");
3538 MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
3539