1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DPAA2 Ethernet Switch driver
4  *
5  * Copyright 2014-2016 Freescale Semiconductor Inc.
6  * Copyright 2017-2018 NXP
7  *
8  */
9 
10 #include <linux/module.h>
11 
12 #include <linux/interrupt.h>
13 #include <linux/msi.h>
14 #include <linux/kthread.h>
15 #include <linux/workqueue.h>
16 
17 #include <linux/fsl/mc.h>
18 
19 #include "ethsw.h"
20 
21 /* Minimal supported DPSW version */
22 #define DPSW_MIN_VER_MAJOR		8
23 #define DPSW_MIN_VER_MINOR		1
24 
25 #define DEFAULT_VLAN_ID			1
26 
dpaa2_switch_add_vlan(struct ethsw_core * ethsw,u16 vid)27 static int dpaa2_switch_add_vlan(struct ethsw_core *ethsw, u16 vid)
28 {
29 	int err;
30 
31 	struct dpsw_vlan_cfg	vcfg = {
32 		.fdb_id = 0,
33 	};
34 
35 	err = dpsw_vlan_add(ethsw->mc_io, 0,
36 			    ethsw->dpsw_handle, vid, &vcfg);
37 	if (err) {
38 		dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
39 		return err;
40 	}
41 	ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
42 
43 	return 0;
44 }
45 
dpaa2_switch_port_is_up(struct ethsw_port_priv * port_priv)46 static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv)
47 {
48 	struct net_device *netdev = port_priv->netdev;
49 	struct dpsw_link_state state;
50 	int err;
51 
52 	err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
53 				     port_priv->ethsw_data->dpsw_handle,
54 				     port_priv->idx, &state);
55 	if (err) {
56 		netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
57 		return true;
58 	}
59 
60 	WARN_ONCE(state.up > 1, "Garbage read into link_state");
61 
62 	return state.up ? true : false;
63 }
64 
dpaa2_switch_port_set_pvid(struct ethsw_port_priv * port_priv,u16 pvid)65 static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
66 {
67 	struct ethsw_core *ethsw = port_priv->ethsw_data;
68 	struct net_device *netdev = port_priv->netdev;
69 	struct dpsw_tci_cfg tci_cfg = { 0 };
70 	bool up;
71 	int err, ret;
72 
73 	err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
74 			      port_priv->idx, &tci_cfg);
75 	if (err) {
76 		netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
77 		return err;
78 	}
79 
80 	tci_cfg.vlan_id = pvid;
81 
82 	/* Interface needs to be down to change PVID */
83 	up = dpaa2_switch_port_is_up(port_priv);
84 	if (up) {
85 		err = dpsw_if_disable(ethsw->mc_io, 0,
86 				      ethsw->dpsw_handle,
87 				      port_priv->idx);
88 		if (err) {
89 			netdev_err(netdev, "dpsw_if_disable err %d\n", err);
90 			return err;
91 		}
92 	}
93 
94 	err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
95 			      port_priv->idx, &tci_cfg);
96 	if (err) {
97 		netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
98 		goto set_tci_error;
99 	}
100 
101 	/* Delete previous PVID info and mark the new one */
102 	port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
103 	port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
104 	port_priv->pvid = pvid;
105 
106 set_tci_error:
107 	if (up) {
108 		ret = dpsw_if_enable(ethsw->mc_io, 0,
109 				     ethsw->dpsw_handle,
110 				     port_priv->idx);
111 		if (ret) {
112 			netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
113 			return ret;
114 		}
115 	}
116 
117 	return err;
118 }
119 
dpaa2_switch_port_add_vlan(struct ethsw_port_priv * port_priv,u16 vid,u16 flags)120 static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
121 				      u16 vid, u16 flags)
122 {
123 	struct ethsw_core *ethsw = port_priv->ethsw_data;
124 	struct net_device *netdev = port_priv->netdev;
125 	struct dpsw_vlan_if_cfg vcfg;
126 	int err;
127 
128 	if (port_priv->vlans[vid]) {
129 		netdev_warn(netdev, "VLAN %d already configured\n", vid);
130 		return -EEXIST;
131 	}
132 
133 	vcfg.num_ifs = 1;
134 	vcfg.if_id[0] = port_priv->idx;
135 	err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
136 	if (err) {
137 		netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
138 		return err;
139 	}
140 
141 	port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
142 
143 	if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
144 		err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
145 						ethsw->dpsw_handle,
146 						vid, &vcfg);
147 		if (err) {
148 			netdev_err(netdev,
149 				   "dpsw_vlan_add_if_untagged err %d\n", err);
150 			return err;
151 		}
152 		port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
153 	}
154 
155 	if (flags & BRIDGE_VLAN_INFO_PVID) {
156 		err = dpaa2_switch_port_set_pvid(port_priv, vid);
157 		if (err)
158 			return err;
159 	}
160 
161 	return 0;
162 }
163 
dpaa2_switch_set_learning(struct ethsw_core * ethsw,bool enable)164 static int dpaa2_switch_set_learning(struct ethsw_core *ethsw, bool enable)
165 {
166 	enum dpsw_fdb_learning_mode learn_mode;
167 	int err;
168 
169 	if (enable)
170 		learn_mode = DPSW_FDB_LEARNING_MODE_HW;
171 	else
172 		learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
173 
174 	err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
175 					 learn_mode);
176 	if (err) {
177 		dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n", err);
178 		return err;
179 	}
180 	ethsw->learning = enable;
181 
182 	return 0;
183 }
184 
dpaa2_switch_port_set_flood(struct ethsw_port_priv * port_priv,bool enable)185 static int dpaa2_switch_port_set_flood(struct ethsw_port_priv *port_priv, bool enable)
186 {
187 	int err;
188 
189 	err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0,
190 				   port_priv->ethsw_data->dpsw_handle,
191 				   port_priv->idx, enable);
192 	if (err) {
193 		netdev_err(port_priv->netdev,
194 			   "dpsw_if_set_flooding err %d\n", err);
195 		return err;
196 	}
197 	port_priv->flood = enable;
198 
199 	return 0;
200 }
201 
dpaa2_switch_port_set_stp_state(struct ethsw_port_priv * port_priv,u8 state)202 static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
203 {
204 	struct dpsw_stp_cfg stp_cfg = {
205 		.state = state,
206 	};
207 	int err;
208 	u16 vid;
209 
210 	if (!netif_running(port_priv->netdev) || state == port_priv->stp_state)
211 		return 0;	/* Nothing to do */
212 
213 	for (vid = 0; vid <= VLAN_VID_MASK; vid++) {
214 		if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
215 			stp_cfg.vlan_id = vid;
216 			err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
217 					      port_priv->ethsw_data->dpsw_handle,
218 					      port_priv->idx, &stp_cfg);
219 			if (err) {
220 				netdev_err(port_priv->netdev,
221 					   "dpsw_if_set_stp err %d\n", err);
222 				return err;
223 			}
224 		}
225 	}
226 
227 	port_priv->stp_state = state;
228 
229 	return 0;
230 }
231 
dpaa2_switch_dellink(struct ethsw_core * ethsw,u16 vid)232 static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
233 {
234 	struct ethsw_port_priv *ppriv_local = NULL;
235 	int i, err;
236 
237 	if (!ethsw->vlans[vid])
238 		return -ENOENT;
239 
240 	err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
241 	if (err) {
242 		dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
243 		return err;
244 	}
245 	ethsw->vlans[vid] = 0;
246 
247 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
248 		ppriv_local = ethsw->ports[i];
249 		ppriv_local->vlans[vid] = 0;
250 	}
251 
252 	return 0;
253 }
254 
dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv * port_priv,const unsigned char * addr)255 static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
256 					const unsigned char *addr)
257 {
258 	struct dpsw_fdb_unicast_cfg entry = {0};
259 	int err;
260 
261 	entry.if_egress = port_priv->idx;
262 	entry.type = DPSW_FDB_ENTRY_STATIC;
263 	ether_addr_copy(entry.mac_addr, addr);
264 
265 	err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
266 				   port_priv->ethsw_data->dpsw_handle,
267 				   0, &entry);
268 	if (err)
269 		netdev_err(port_priv->netdev,
270 			   "dpsw_fdb_add_unicast err %d\n", err);
271 	return err;
272 }
273 
dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv * port_priv,const unsigned char * addr)274 static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
275 					const unsigned char *addr)
276 {
277 	struct dpsw_fdb_unicast_cfg entry = {0};
278 	int err;
279 
280 	entry.if_egress = port_priv->idx;
281 	entry.type = DPSW_FDB_ENTRY_STATIC;
282 	ether_addr_copy(entry.mac_addr, addr);
283 
284 	err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
285 				      port_priv->ethsw_data->dpsw_handle,
286 				      0, &entry);
287 	/* Silently discard error for calling multiple times the del command */
288 	if (err && err != -ENXIO)
289 		netdev_err(port_priv->netdev,
290 			   "dpsw_fdb_remove_unicast err %d\n", err);
291 	return err;
292 }
293 
dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv * port_priv,const unsigned char * addr)294 static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
295 					const unsigned char *addr)
296 {
297 	struct dpsw_fdb_multicast_cfg entry = {0};
298 	int err;
299 
300 	ether_addr_copy(entry.mac_addr, addr);
301 	entry.type = DPSW_FDB_ENTRY_STATIC;
302 	entry.num_ifs = 1;
303 	entry.if_id[0] = port_priv->idx;
304 
305 	err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
306 				     port_priv->ethsw_data->dpsw_handle,
307 				     0, &entry);
308 	/* Silently discard error for calling multiple times the add command */
309 	if (err && err != -ENXIO)
310 		netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
311 			   err);
312 	return err;
313 }
314 
dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv * port_priv,const unsigned char * addr)315 static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
316 					const unsigned char *addr)
317 {
318 	struct dpsw_fdb_multicast_cfg entry = {0};
319 	int err;
320 
321 	ether_addr_copy(entry.mac_addr, addr);
322 	entry.type = DPSW_FDB_ENTRY_STATIC;
323 	entry.num_ifs = 1;
324 	entry.if_id[0] = port_priv->idx;
325 
326 	err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
327 					port_priv->ethsw_data->dpsw_handle,
328 					0, &entry);
329 	/* Silently discard error for calling multiple times the del command */
330 	if (err && err != -ENAVAIL)
331 		netdev_err(port_priv->netdev,
332 			   "dpsw_fdb_remove_multicast err %d\n", err);
333 	return err;
334 }
335 
dpaa2_switch_port_fdb_add(struct ndmsg * ndm,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 flags,struct netlink_ext_ack * extack)336 static int dpaa2_switch_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
337 				     struct net_device *dev, const unsigned char *addr,
338 				     u16 vid, u16 flags,
339 				     struct netlink_ext_ack *extack)
340 {
341 	if (is_unicast_ether_addr(addr))
342 		return dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
343 						    addr);
344 	else
345 		return dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
346 						    addr);
347 }
348 
dpaa2_switch_port_fdb_del(struct ndmsg * ndm,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid)349 static int dpaa2_switch_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
350 				     struct net_device *dev,
351 				     const unsigned char *addr, u16 vid)
352 {
353 	if (is_unicast_ether_addr(addr))
354 		return dpaa2_switch_port_fdb_del_uc(netdev_priv(dev),
355 						    addr);
356 	else
357 		return dpaa2_switch_port_fdb_del_mc(netdev_priv(dev),
358 						    addr);
359 }
360 
dpaa2_switch_port_get_stats(struct net_device * netdev,struct rtnl_link_stats64 * stats)361 static void dpaa2_switch_port_get_stats(struct net_device *netdev,
362 					struct rtnl_link_stats64 *stats)
363 {
364 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
365 	u64 tmp;
366 	int err;
367 
368 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
369 				  port_priv->ethsw_data->dpsw_handle,
370 				  port_priv->idx,
371 				  DPSW_CNT_ING_FRAME, &stats->rx_packets);
372 	if (err)
373 		goto error;
374 
375 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
376 				  port_priv->ethsw_data->dpsw_handle,
377 				  port_priv->idx,
378 				  DPSW_CNT_EGR_FRAME, &stats->tx_packets);
379 	if (err)
380 		goto error;
381 
382 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
383 				  port_priv->ethsw_data->dpsw_handle,
384 				  port_priv->idx,
385 				  DPSW_CNT_ING_BYTE, &stats->rx_bytes);
386 	if (err)
387 		goto error;
388 
389 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
390 				  port_priv->ethsw_data->dpsw_handle,
391 				  port_priv->idx,
392 				  DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
393 	if (err)
394 		goto error;
395 
396 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
397 				  port_priv->ethsw_data->dpsw_handle,
398 				  port_priv->idx,
399 				  DPSW_CNT_ING_FRAME_DISCARD,
400 				  &stats->rx_dropped);
401 	if (err)
402 		goto error;
403 
404 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
405 				  port_priv->ethsw_data->dpsw_handle,
406 				  port_priv->idx,
407 				  DPSW_CNT_ING_FLTR_FRAME,
408 				  &tmp);
409 	if (err)
410 		goto error;
411 	stats->rx_dropped += tmp;
412 
413 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
414 				  port_priv->ethsw_data->dpsw_handle,
415 				  port_priv->idx,
416 				  DPSW_CNT_EGR_FRAME_DISCARD,
417 				  &stats->tx_dropped);
418 	if (err)
419 		goto error;
420 
421 	return;
422 
423 error:
424 	netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
425 }
426 
dpaa2_switch_port_has_offload_stats(const struct net_device * netdev,int attr_id)427 static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev,
428 						int attr_id)
429 {
430 	return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
431 }
432 
dpaa2_switch_port_get_offload_stats(int attr_id,const struct net_device * netdev,void * sp)433 static int dpaa2_switch_port_get_offload_stats(int attr_id,
434 					       const struct net_device *netdev,
435 					       void *sp)
436 {
437 	switch (attr_id) {
438 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
439 		dpaa2_switch_port_get_stats((struct net_device *)netdev, sp);
440 		return 0;
441 	}
442 
443 	return -EINVAL;
444 }
445 
dpaa2_switch_port_change_mtu(struct net_device * netdev,int mtu)446 static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
447 {
448 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
449 	int err;
450 
451 	err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
452 					   0,
453 					   port_priv->ethsw_data->dpsw_handle,
454 					   port_priv->idx,
455 					   (u16)ETHSW_L2_MAX_FRM(mtu));
456 	if (err) {
457 		netdev_err(netdev,
458 			   "dpsw_if_set_max_frame_length() err %d\n", err);
459 		return err;
460 	}
461 
462 	netdev->mtu = mtu;
463 	return 0;
464 }
465 
dpaa2_switch_port_carrier_state_sync(struct net_device * netdev)466 static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev)
467 {
468 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
469 	struct dpsw_link_state state;
470 	int err;
471 
472 	/* Interrupts are received even though no one issued an 'ifconfig up'
473 	 * on the switch interface. Ignore these link state update interrupts
474 	 */
475 	if (!netif_running(netdev))
476 		return 0;
477 
478 	err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
479 				     port_priv->ethsw_data->dpsw_handle,
480 				     port_priv->idx, &state);
481 	if (err) {
482 		netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
483 		return err;
484 	}
485 
486 	WARN_ONCE(state.up > 1, "Garbage read into link_state");
487 
488 	if (state.up != port_priv->link_state) {
489 		if (state.up)
490 			netif_carrier_on(netdev);
491 		else
492 			netif_carrier_off(netdev);
493 		port_priv->link_state = state.up;
494 	}
495 
496 	return 0;
497 }
498 
dpaa2_switch_port_open(struct net_device * netdev)499 static int dpaa2_switch_port_open(struct net_device *netdev)
500 {
501 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
502 	int err;
503 
504 	/* No need to allow Tx as control interface is disabled */
505 	netif_tx_stop_all_queues(netdev);
506 
507 	/* Explicitly set carrier off, otherwise
508 	 * netif_carrier_ok() will return true and cause 'ip link show'
509 	 * to report the LOWER_UP flag, even though the link
510 	 * notification wasn't even received.
511 	 */
512 	netif_carrier_off(netdev);
513 
514 	err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
515 			     port_priv->ethsw_data->dpsw_handle,
516 			     port_priv->idx);
517 	if (err) {
518 		netdev_err(netdev, "dpsw_if_enable err %d\n", err);
519 		return err;
520 	}
521 
522 	/* sync carrier state */
523 	err = dpaa2_switch_port_carrier_state_sync(netdev);
524 	if (err) {
525 		netdev_err(netdev,
526 			   "dpaa2_switch_port_carrier_state_sync err %d\n", err);
527 		goto err_carrier_sync;
528 	}
529 
530 	return 0;
531 
532 err_carrier_sync:
533 	dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
534 			port_priv->ethsw_data->dpsw_handle,
535 			port_priv->idx);
536 	return err;
537 }
538 
dpaa2_switch_port_stop(struct net_device * netdev)539 static int dpaa2_switch_port_stop(struct net_device *netdev)
540 {
541 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
542 	int err;
543 
544 	err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
545 			      port_priv->ethsw_data->dpsw_handle,
546 			      port_priv->idx);
547 	if (err) {
548 		netdev_err(netdev, "dpsw_if_disable err %d\n", err);
549 		return err;
550 	}
551 
552 	return 0;
553 }
554 
dpaa2_switch_port_dropframe(struct sk_buff * skb,struct net_device * netdev)555 static netdev_tx_t dpaa2_switch_port_dropframe(struct sk_buff *skb,
556 					       struct net_device *netdev)
557 {
558 	/* we don't support I/O for now, drop the frame */
559 	dev_kfree_skb_any(skb);
560 
561 	return NETDEV_TX_OK;
562 }
563 
dpaa2_switch_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)564 static int dpaa2_switch_port_parent_id(struct net_device *dev,
565 				       struct netdev_phys_item_id *ppid)
566 {
567 	struct ethsw_port_priv *port_priv = netdev_priv(dev);
568 
569 	ppid->id_len = 1;
570 	ppid->id[0] = port_priv->ethsw_data->dev_id;
571 
572 	return 0;
573 }
574 
dpaa2_switch_port_get_phys_name(struct net_device * netdev,char * name,size_t len)575 static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name,
576 					   size_t len)
577 {
578 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
579 	int err;
580 
581 	err = snprintf(name, len, "p%d", port_priv->idx);
582 	if (err >= len)
583 		return -EINVAL;
584 
585 	return 0;
586 }
587 
588 struct ethsw_dump_ctx {
589 	struct net_device *dev;
590 	struct sk_buff *skb;
591 	struct netlink_callback *cb;
592 	int idx;
593 };
594 
dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry * entry,struct ethsw_dump_ctx * dump)595 static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
596 				    struct ethsw_dump_ctx *dump)
597 {
598 	int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
599 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
600 	u32 seq = dump->cb->nlh->nlmsg_seq;
601 	struct nlmsghdr *nlh;
602 	struct ndmsg *ndm;
603 
604 	if (dump->idx < dump->cb->args[2])
605 		goto skip;
606 
607 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
608 			sizeof(*ndm), NLM_F_MULTI);
609 	if (!nlh)
610 		return -EMSGSIZE;
611 
612 	ndm = nlmsg_data(nlh);
613 	ndm->ndm_family  = AF_BRIDGE;
614 	ndm->ndm_pad1    = 0;
615 	ndm->ndm_pad2    = 0;
616 	ndm->ndm_flags   = NTF_SELF;
617 	ndm->ndm_type    = 0;
618 	ndm->ndm_ifindex = dump->dev->ifindex;
619 	ndm->ndm_state   = is_dynamic ? NUD_REACHABLE : NUD_NOARP;
620 
621 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
622 		goto nla_put_failure;
623 
624 	nlmsg_end(dump->skb, nlh);
625 
626 skip:
627 	dump->idx++;
628 	return 0;
629 
630 nla_put_failure:
631 	nlmsg_cancel(dump->skb, nlh);
632 	return -EMSGSIZE;
633 }
634 
dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry * entry,struct ethsw_port_priv * port_priv)635 static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry,
636 					     struct ethsw_port_priv *port_priv)
637 {
638 	int idx = port_priv->idx;
639 	int valid;
640 
641 	if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
642 		valid = entry->if_info == port_priv->idx;
643 	else
644 		valid = entry->if_mask[idx / 8] & BIT(idx % 8);
645 
646 	return valid;
647 }
648 
dpaa2_switch_port_fdb_dump(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * net_dev,struct net_device * filter_dev,int * idx)649 static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
650 				      struct net_device *net_dev,
651 				      struct net_device *filter_dev, int *idx)
652 {
653 	struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
654 	struct ethsw_core *ethsw = port_priv->ethsw_data;
655 	struct device *dev = net_dev->dev.parent;
656 	struct fdb_dump_entry *fdb_entries;
657 	struct fdb_dump_entry fdb_entry;
658 	struct ethsw_dump_ctx dump = {
659 		.dev = net_dev,
660 		.skb = skb,
661 		.cb = cb,
662 		.idx = *idx,
663 	};
664 	dma_addr_t fdb_dump_iova;
665 	u16 num_fdb_entries;
666 	u32 fdb_dump_size;
667 	int err = 0, i;
668 	u8 *dma_mem;
669 
670 	fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry);
671 	dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL);
672 	if (!dma_mem)
673 		return -ENOMEM;
674 
675 	fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size,
676 				       DMA_FROM_DEVICE);
677 	if (dma_mapping_error(dev, fdb_dump_iova)) {
678 		netdev_err(net_dev, "dma_map_single() failed\n");
679 		err = -ENOMEM;
680 		goto err_map;
681 	}
682 
683 	err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
684 			    fdb_dump_iova, fdb_dump_size, &num_fdb_entries);
685 	if (err) {
686 		netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err);
687 		goto err_dump;
688 	}
689 
690 	dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE);
691 
692 	fdb_entries = (struct fdb_dump_entry *)dma_mem;
693 	for (i = 0; i < num_fdb_entries; i++) {
694 		fdb_entry = fdb_entries[i];
695 
696 		if (!dpaa2_switch_port_fdb_valid_entry(&fdb_entry, port_priv))
697 			continue;
698 
699 		err = dpaa2_switch_fdb_dump_nl(&fdb_entry, &dump);
700 		if (err)
701 			goto end;
702 	}
703 
704 end:
705 	*idx = dump.idx;
706 
707 	kfree(dma_mem);
708 
709 	return 0;
710 
711 err_dump:
712 	dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE);
713 err_map:
714 	kfree(dma_mem);
715 	return err;
716 }
717 
dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv * port_priv)718 static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
719 {
720 	struct ethsw_core *ethsw = port_priv->ethsw_data;
721 	struct net_device *net_dev = port_priv->netdev;
722 	struct device *dev = net_dev->dev.parent;
723 	u8 mac_addr[ETH_ALEN];
724 	int err;
725 
726 	if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR))
727 		return 0;
728 
729 	/* Get firmware address, if any */
730 	err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle,
731 					port_priv->idx, mac_addr);
732 	if (err) {
733 		dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n");
734 		return err;
735 	}
736 
737 	/* First check if firmware has any address configured by bootloader */
738 	if (!is_zero_ether_addr(mac_addr)) {
739 		memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
740 	} else {
741 		/* No MAC address configured, fill in net_dev->dev_addr
742 		 * with a random one
743 		 */
744 		eth_hw_addr_random(net_dev);
745 		dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
746 
747 		/* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
748 		 * practical purposes, this will be our "permanent" mac address,
749 		 * at least until the next reboot. This move will also permit
750 		 * register_netdevice() to properly fill up net_dev->perm_addr.
751 		 */
752 		net_dev->addr_assign_type = NET_ADDR_PERM;
753 	}
754 
755 	return 0;
756 }
757 
758 static const struct net_device_ops dpaa2_switch_port_ops = {
759 	.ndo_open		= dpaa2_switch_port_open,
760 	.ndo_stop		= dpaa2_switch_port_stop,
761 
762 	.ndo_set_mac_address	= eth_mac_addr,
763 	.ndo_get_stats64	= dpaa2_switch_port_get_stats,
764 	.ndo_change_mtu		= dpaa2_switch_port_change_mtu,
765 	.ndo_has_offload_stats	= dpaa2_switch_port_has_offload_stats,
766 	.ndo_get_offload_stats	= dpaa2_switch_port_get_offload_stats,
767 	.ndo_fdb_add		= dpaa2_switch_port_fdb_add,
768 	.ndo_fdb_del		= dpaa2_switch_port_fdb_del,
769 	.ndo_fdb_dump		= dpaa2_switch_port_fdb_dump,
770 
771 	.ndo_start_xmit		= dpaa2_switch_port_dropframe,
772 	.ndo_get_port_parent_id	= dpaa2_switch_port_parent_id,
773 	.ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name,
774 };
775 
dpaa2_switch_port_dev_check(const struct net_device * netdev,struct notifier_block * nb)776 static bool dpaa2_switch_port_dev_check(const struct net_device *netdev,
777 					struct notifier_block *nb)
778 {
779 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
780 
781 	if (netdev->netdev_ops == &dpaa2_switch_port_ops &&
782 	    (!nb || &port_priv->ethsw_data->port_nb == nb ||
783 	     &port_priv->ethsw_data->port_switchdev_nb == nb ||
784 	     &port_priv->ethsw_data->port_switchdevb_nb == nb))
785 		return true;
786 
787 	return false;
788 }
789 
dpaa2_switch_links_state_update(struct ethsw_core * ethsw)790 static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw)
791 {
792 	int i;
793 
794 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
795 		dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev);
796 		dpaa2_switch_port_set_mac_addr(ethsw->ports[i]);
797 	}
798 }
799 
dpaa2_switch_irq0_handler_thread(int irq_num,void * arg)800 static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
801 {
802 	struct device *dev = (struct device *)arg;
803 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
804 
805 	/* Mask the events and the if_id reserved bits to be cleared on read */
806 	u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
807 	int err;
808 
809 	err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
810 				  DPSW_IRQ_INDEX_IF, &status);
811 	if (err) {
812 		dev_err(dev, "Can't get irq status (err %d)\n", err);
813 
814 		err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
815 					    DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
816 		if (err)
817 			dev_err(dev, "Can't clear irq status (err %d)\n", err);
818 		goto out;
819 	}
820 
821 	if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
822 		dpaa2_switch_links_state_update(ethsw);
823 
824 out:
825 	return IRQ_HANDLED;
826 }
827 
dpaa2_switch_setup_irqs(struct fsl_mc_device * sw_dev)828 static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
829 {
830 	struct device *dev = &sw_dev->dev;
831 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
832 	u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
833 	struct fsl_mc_device_irq *irq;
834 	int err;
835 
836 	err = fsl_mc_allocate_irqs(sw_dev);
837 	if (err) {
838 		dev_err(dev, "MC irqs allocation failed\n");
839 		return err;
840 	}
841 
842 	if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
843 		err = -EINVAL;
844 		goto free_irq;
845 	}
846 
847 	err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
848 				  DPSW_IRQ_INDEX_IF, 0);
849 	if (err) {
850 		dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
851 		goto free_irq;
852 	}
853 
854 	irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
855 
856 	err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
857 					NULL,
858 					dpaa2_switch_irq0_handler_thread,
859 					IRQF_NO_SUSPEND | IRQF_ONESHOT,
860 					dev_name(dev), dev);
861 	if (err) {
862 		dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
863 		goto free_irq;
864 	}
865 
866 	err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
867 				DPSW_IRQ_INDEX_IF, mask);
868 	if (err) {
869 		dev_err(dev, "dpsw_set_irq_mask(): %d\n", err);
870 		goto free_devm_irq;
871 	}
872 
873 	err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
874 				  DPSW_IRQ_INDEX_IF, 1);
875 	if (err) {
876 		dev_err(dev, "dpsw_set_irq_enable(): %d\n", err);
877 		goto free_devm_irq;
878 	}
879 
880 	return 0;
881 
882 free_devm_irq:
883 	devm_free_irq(dev, irq->msi_desc->irq, dev);
884 free_irq:
885 	fsl_mc_free_irqs(sw_dev);
886 	return err;
887 }
888 
dpaa2_switch_teardown_irqs(struct fsl_mc_device * sw_dev)889 static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
890 {
891 	struct device *dev = &sw_dev->dev;
892 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
893 	int err;
894 
895 	err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
896 				  DPSW_IRQ_INDEX_IF, 0);
897 	if (err)
898 		dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
899 
900 	fsl_mc_free_irqs(sw_dev);
901 }
902 
dpaa2_switch_port_attr_stp_state_set(struct net_device * netdev,struct switchdev_trans * trans,u8 state)903 static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
904 						struct switchdev_trans *trans,
905 						u8 state)
906 {
907 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
908 
909 	if (switchdev_trans_ph_prepare(trans))
910 		return 0;
911 
912 	return dpaa2_switch_port_set_stp_state(port_priv, state);
913 }
914 
dpaa2_switch_port_attr_br_flags_pre_set(struct net_device * netdev,struct switchdev_trans * trans,unsigned long flags)915 static int dpaa2_switch_port_attr_br_flags_pre_set(struct net_device *netdev,
916 						   struct switchdev_trans *trans,
917 						   unsigned long flags)
918 {
919 	if (flags & ~(BR_LEARNING | BR_FLOOD))
920 		return -EINVAL;
921 
922 	return 0;
923 }
924 
dpaa2_switch_port_attr_br_flags_set(struct net_device * netdev,struct switchdev_trans * trans,unsigned long flags)925 static int dpaa2_switch_port_attr_br_flags_set(struct net_device *netdev,
926 					       struct switchdev_trans *trans,
927 					       unsigned long flags)
928 {
929 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
930 	int err = 0;
931 
932 	if (switchdev_trans_ph_prepare(trans))
933 		return 0;
934 
935 	/* Learning is enabled per switch */
936 	err = dpaa2_switch_set_learning(port_priv->ethsw_data,
937 					!!(flags & BR_LEARNING));
938 	if (err)
939 		goto exit;
940 
941 	err = dpaa2_switch_port_set_flood(port_priv, !!(flags & BR_FLOOD));
942 
943 exit:
944 	return err;
945 }
946 
dpaa2_switch_port_attr_set(struct net_device * netdev,const struct switchdev_attr * attr,struct switchdev_trans * trans)947 static int dpaa2_switch_port_attr_set(struct net_device *netdev,
948 				      const struct switchdev_attr *attr,
949 				      struct switchdev_trans *trans)
950 {
951 	int err = 0;
952 
953 	switch (attr->id) {
954 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
955 		err = dpaa2_switch_port_attr_stp_state_set(netdev, trans,
956 							   attr->u.stp_state);
957 		break;
958 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
959 		err = dpaa2_switch_port_attr_br_flags_pre_set(netdev, trans,
960 							      attr->u.brport_flags);
961 		break;
962 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
963 		err = dpaa2_switch_port_attr_br_flags_set(netdev, trans,
964 							  attr->u.brport_flags);
965 		break;
966 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
967 		/* VLANs are supported by default  */
968 		break;
969 	default:
970 		err = -EOPNOTSUPP;
971 		break;
972 	}
973 
974 	return err;
975 }
976 
dpaa2_switch_port_vlans_add(struct net_device * netdev,const struct switchdev_obj_port_vlan * vlan,struct switchdev_trans * trans)977 static int dpaa2_switch_port_vlans_add(struct net_device *netdev,
978 				       const struct switchdev_obj_port_vlan *vlan,
979 				       struct switchdev_trans *trans)
980 {
981 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
982 	struct ethsw_core *ethsw = port_priv->ethsw_data;
983 	struct dpsw_attr *attr = &ethsw->sw_attr;
984 	int vid, err = 0, new_vlans = 0;
985 
986 	if (switchdev_trans_ph_prepare(trans)) {
987 		for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
988 			if (!port_priv->ethsw_data->vlans[vid])
989 				new_vlans++;
990 
991 		/* Check if there is space for a new VLAN */
992 		err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
993 					  &ethsw->sw_attr);
994 		if (err) {
995 			netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
996 			return err;
997 		}
998 		if (attr->max_vlans - attr->num_vlans < new_vlans)
999 			return -ENOSPC;
1000 
1001 		return 0;
1002 	}
1003 
1004 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1005 		if (!port_priv->ethsw_data->vlans[vid]) {
1006 			/* this is a new VLAN */
1007 			err = dpaa2_switch_add_vlan(port_priv->ethsw_data, vid);
1008 			if (err)
1009 				return err;
1010 
1011 			port_priv->ethsw_data->vlans[vid] |= ETHSW_VLAN_GLOBAL;
1012 		}
1013 		err = dpaa2_switch_port_add_vlan(port_priv, vid, vlan->flags);
1014 		if (err)
1015 			break;
1016 	}
1017 
1018 	return err;
1019 }
1020 
dpaa2_switch_port_lookup_address(struct net_device * netdev,int is_uc,const unsigned char * addr)1021 static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
1022 					    const unsigned char *addr)
1023 {
1024 	struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
1025 	struct netdev_hw_addr *ha;
1026 
1027 	netif_addr_lock_bh(netdev);
1028 	list_for_each_entry(ha, &list->list, list) {
1029 		if (ether_addr_equal(ha->addr, addr)) {
1030 			netif_addr_unlock_bh(netdev);
1031 			return 1;
1032 		}
1033 	}
1034 	netif_addr_unlock_bh(netdev);
1035 	return 0;
1036 }
1037 
dpaa2_switch_port_mdb_add(struct net_device * netdev,const struct switchdev_obj_port_mdb * mdb,struct switchdev_trans * trans)1038 static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
1039 				     const struct switchdev_obj_port_mdb *mdb,
1040 				     struct switchdev_trans *trans)
1041 {
1042 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1043 	int err;
1044 
1045 	if (switchdev_trans_ph_prepare(trans))
1046 		return 0;
1047 
1048 	/* Check if address is already set on this port */
1049 	if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
1050 		return -EEXIST;
1051 
1052 	err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr);
1053 	if (err)
1054 		return err;
1055 
1056 	err = dev_mc_add(netdev, mdb->addr);
1057 	if (err) {
1058 		netdev_err(netdev, "dev_mc_add err %d\n", err);
1059 		dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
1060 	}
1061 
1062 	return err;
1063 }
1064 
dpaa2_switch_port_obj_add(struct net_device * netdev,const struct switchdev_obj * obj,struct switchdev_trans * trans)1065 static int dpaa2_switch_port_obj_add(struct net_device *netdev,
1066 				     const struct switchdev_obj *obj,
1067 				     struct switchdev_trans *trans)
1068 {
1069 	int err;
1070 
1071 	switch (obj->id) {
1072 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1073 		err = dpaa2_switch_port_vlans_add(netdev,
1074 						  SWITCHDEV_OBJ_PORT_VLAN(obj),
1075 						  trans);
1076 		break;
1077 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1078 		err = dpaa2_switch_port_mdb_add(netdev,
1079 						SWITCHDEV_OBJ_PORT_MDB(obj),
1080 						trans);
1081 		break;
1082 	default:
1083 		err = -EOPNOTSUPP;
1084 		break;
1085 	}
1086 
1087 	return err;
1088 }
1089 
dpaa2_switch_port_del_vlan(struct ethsw_port_priv * port_priv,u16 vid)1090 static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
1091 {
1092 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1093 	struct net_device *netdev = port_priv->netdev;
1094 	struct dpsw_vlan_if_cfg vcfg;
1095 	int i, err;
1096 
1097 	if (!port_priv->vlans[vid])
1098 		return -ENOENT;
1099 
1100 	if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
1101 		err = dpaa2_switch_port_set_pvid(port_priv, 0);
1102 		if (err)
1103 			return err;
1104 	}
1105 
1106 	vcfg.num_ifs = 1;
1107 	vcfg.if_id[0] = port_priv->idx;
1108 	if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
1109 		err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
1110 						   ethsw->dpsw_handle,
1111 						   vid, &vcfg);
1112 		if (err) {
1113 			netdev_err(netdev,
1114 				   "dpsw_vlan_remove_if_untagged err %d\n",
1115 				   err);
1116 		}
1117 		port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
1118 	}
1119 
1120 	if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
1121 		err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1122 					  vid, &vcfg);
1123 		if (err) {
1124 			netdev_err(netdev,
1125 				   "dpsw_vlan_remove_if err %d\n", err);
1126 			return err;
1127 		}
1128 		port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
1129 
1130 		/* Delete VLAN from switch if it is no longer configured on
1131 		 * any port
1132 		 */
1133 		for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
1134 			if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
1135 				return 0; /* Found a port member in VID */
1136 
1137 		ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
1138 
1139 		err = dpaa2_switch_dellink(ethsw, vid);
1140 		if (err)
1141 			return err;
1142 	}
1143 
1144 	return 0;
1145 }
1146 
dpaa2_switch_port_vlans_del(struct net_device * netdev,const struct switchdev_obj_port_vlan * vlan)1147 static int dpaa2_switch_port_vlans_del(struct net_device *netdev,
1148 				       const struct switchdev_obj_port_vlan *vlan)
1149 {
1150 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1151 	int vid, err = 0;
1152 
1153 	if (netif_is_bridge_master(vlan->obj.orig_dev))
1154 		return -EOPNOTSUPP;
1155 
1156 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1157 		err = dpaa2_switch_port_del_vlan(port_priv, vid);
1158 		if (err)
1159 			break;
1160 	}
1161 
1162 	return err;
1163 }
1164 
dpaa2_switch_port_mdb_del(struct net_device * netdev,const struct switchdev_obj_port_mdb * mdb)1165 static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
1166 				     const struct switchdev_obj_port_mdb *mdb)
1167 {
1168 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1169 	int err;
1170 
1171 	if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
1172 		return -ENOENT;
1173 
1174 	err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
1175 	if (err)
1176 		return err;
1177 
1178 	err = dev_mc_del(netdev, mdb->addr);
1179 	if (err) {
1180 		netdev_err(netdev, "dev_mc_del err %d\n", err);
1181 		return err;
1182 	}
1183 
1184 	return err;
1185 }
1186 
dpaa2_switch_port_obj_del(struct net_device * netdev,const struct switchdev_obj * obj)1187 static int dpaa2_switch_port_obj_del(struct net_device *netdev,
1188 				     const struct switchdev_obj *obj)
1189 {
1190 	int err;
1191 
1192 	switch (obj->id) {
1193 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1194 		err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
1195 		break;
1196 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1197 		err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
1198 		break;
1199 	default:
1200 		err = -EOPNOTSUPP;
1201 		break;
1202 	}
1203 	return err;
1204 }
1205 
dpaa2_switch_port_attr_set_event(struct net_device * netdev,struct switchdev_notifier_port_attr_info * port_attr_info)1206 static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
1207 					    struct switchdev_notifier_port_attr_info
1208 					    *port_attr_info)
1209 {
1210 	int err;
1211 
1212 	err = dpaa2_switch_port_attr_set(netdev, port_attr_info->attr,
1213 					 port_attr_info->trans);
1214 
1215 	port_attr_info->handled = true;
1216 	return notifier_from_errno(err);
1217 }
1218 
1219 /* For the moment, only flood setting needs to be updated */
dpaa2_switch_port_bridge_join(struct net_device * netdev,struct net_device * upper_dev)1220 static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
1221 					 struct net_device *upper_dev)
1222 {
1223 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1224 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1225 	struct ethsw_port_priv *other_port_priv;
1226 	struct net_device *other_dev;
1227 	struct list_head *iter;
1228 	int i, err;
1229 
1230 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
1231 		if (ethsw->ports[i]->bridge_dev &&
1232 		    (ethsw->ports[i]->bridge_dev != upper_dev)) {
1233 			netdev_err(netdev,
1234 				   "Only one bridge supported per DPSW object!\n");
1235 			return -EINVAL;
1236 		}
1237 
1238 	netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
1239 		if (!dpaa2_switch_port_dev_check(other_dev, NULL))
1240 			continue;
1241 
1242 		other_port_priv = netdev_priv(other_dev);
1243 		if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
1244 			netdev_err(netdev,
1245 				   "Interface from a different DPSW is in the bridge already!\n");
1246 			return -EINVAL;
1247 		}
1248 	}
1249 
1250 	/* Enable flooding */
1251 	err = dpaa2_switch_port_set_flood(port_priv, 1);
1252 	if (!err)
1253 		port_priv->bridge_dev = upper_dev;
1254 
1255 	return err;
1256 }
1257 
dpaa2_switch_port_bridge_leave(struct net_device * netdev)1258 static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
1259 {
1260 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1261 	int err;
1262 
1263 	/* Disable flooding */
1264 	err = dpaa2_switch_port_set_flood(port_priv, 0);
1265 	if (!err)
1266 		port_priv->bridge_dev = NULL;
1267 
1268 	return err;
1269 }
1270 
dpaa2_switch_port_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)1271 static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
1272 					     unsigned long event, void *ptr)
1273 {
1274 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1275 	struct netdev_notifier_changeupper_info *info = ptr;
1276 	struct net_device *upper_dev;
1277 	int err = 0;
1278 
1279 	if (!dpaa2_switch_port_dev_check(netdev, nb))
1280 		return NOTIFY_DONE;
1281 
1282 	/* Handle just upper dev link/unlink for the moment */
1283 	if (event == NETDEV_CHANGEUPPER) {
1284 		upper_dev = info->upper_dev;
1285 		if (netif_is_bridge_master(upper_dev)) {
1286 			if (info->linking)
1287 				err = dpaa2_switch_port_bridge_join(netdev, upper_dev);
1288 			else
1289 				err = dpaa2_switch_port_bridge_leave(netdev);
1290 		}
1291 	}
1292 
1293 	return notifier_from_errno(err);
1294 }
1295 
1296 struct ethsw_switchdev_event_work {
1297 	struct work_struct work;
1298 	struct switchdev_notifier_fdb_info fdb_info;
1299 	struct net_device *dev;
1300 	unsigned long event;
1301 };
1302 
dpaa2_switch_event_work(struct work_struct * work)1303 static void dpaa2_switch_event_work(struct work_struct *work)
1304 {
1305 	struct ethsw_switchdev_event_work *switchdev_work =
1306 		container_of(work, struct ethsw_switchdev_event_work, work);
1307 	struct net_device *dev = switchdev_work->dev;
1308 	struct switchdev_notifier_fdb_info *fdb_info;
1309 	int err;
1310 
1311 	rtnl_lock();
1312 	fdb_info = &switchdev_work->fdb_info;
1313 
1314 	switch (switchdev_work->event) {
1315 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
1316 		if (!fdb_info->added_by_user)
1317 			break;
1318 		if (is_unicast_ether_addr(fdb_info->addr))
1319 			err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
1320 							   fdb_info->addr);
1321 		else
1322 			err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
1323 							   fdb_info->addr);
1324 		if (err)
1325 			break;
1326 		fdb_info->offloaded = true;
1327 		call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
1328 					 &fdb_info->info, NULL);
1329 		break;
1330 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
1331 		if (!fdb_info->added_by_user)
1332 			break;
1333 		if (is_unicast_ether_addr(fdb_info->addr))
1334 			dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
1335 		else
1336 			dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
1337 		break;
1338 	}
1339 
1340 	rtnl_unlock();
1341 	kfree(switchdev_work->fdb_info.addr);
1342 	kfree(switchdev_work);
1343 	dev_put(dev);
1344 }
1345 
1346 /* Called under rcu_read_lock() */
dpaa2_switch_port_event(struct notifier_block * nb,unsigned long event,void * ptr)1347 static int dpaa2_switch_port_event(struct notifier_block *nb,
1348 				   unsigned long event, void *ptr)
1349 {
1350 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1351 	struct ethsw_port_priv *port_priv = netdev_priv(dev);
1352 	struct ethsw_switchdev_event_work *switchdev_work;
1353 	struct switchdev_notifier_fdb_info *fdb_info = ptr;
1354 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1355 
1356 	if (!dpaa2_switch_port_dev_check(dev, nb))
1357 		return NOTIFY_DONE;
1358 
1359 	if (event == SWITCHDEV_PORT_ATTR_SET)
1360 		return dpaa2_switch_port_attr_set_event(dev, ptr);
1361 
1362 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
1363 	if (!switchdev_work)
1364 		return NOTIFY_BAD;
1365 
1366 	INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work);
1367 	switchdev_work->dev = dev;
1368 	switchdev_work->event = event;
1369 
1370 	switch (event) {
1371 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
1372 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
1373 		memcpy(&switchdev_work->fdb_info, ptr,
1374 		       sizeof(switchdev_work->fdb_info));
1375 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
1376 		if (!switchdev_work->fdb_info.addr)
1377 			goto err_addr_alloc;
1378 
1379 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
1380 				fdb_info->addr);
1381 
1382 		/* Take a reference on the device to avoid being freed. */
1383 		dev_hold(dev);
1384 		break;
1385 	default:
1386 		kfree(switchdev_work);
1387 		return NOTIFY_DONE;
1388 	}
1389 
1390 	queue_work(ethsw->workqueue, &switchdev_work->work);
1391 
1392 	return NOTIFY_DONE;
1393 
1394 err_addr_alloc:
1395 	kfree(switchdev_work);
1396 	return NOTIFY_BAD;
1397 }
1398 
dpaa2_switch_port_obj_event(unsigned long event,struct net_device * netdev,struct switchdev_notifier_port_obj_info * port_obj_info)1399 static int dpaa2_switch_port_obj_event(unsigned long event,
1400 				       struct net_device *netdev,
1401 				       struct switchdev_notifier_port_obj_info *port_obj_info)
1402 {
1403 	int err = -EOPNOTSUPP;
1404 
1405 	switch (event) {
1406 	case SWITCHDEV_PORT_OBJ_ADD:
1407 		err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj,
1408 						port_obj_info->trans);
1409 		break;
1410 	case SWITCHDEV_PORT_OBJ_DEL:
1411 		err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
1412 		break;
1413 	}
1414 
1415 	port_obj_info->handled = true;
1416 	return notifier_from_errno(err);
1417 }
1418 
dpaa2_switch_port_blocking_event(struct notifier_block * nb,unsigned long event,void * ptr)1419 static int dpaa2_switch_port_blocking_event(struct notifier_block *nb,
1420 					    unsigned long event, void *ptr)
1421 {
1422 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1423 
1424 	if (!dpaa2_switch_port_dev_check(dev, nb))
1425 		return NOTIFY_DONE;
1426 
1427 	switch (event) {
1428 	case SWITCHDEV_PORT_OBJ_ADD:
1429 	case SWITCHDEV_PORT_OBJ_DEL:
1430 		return dpaa2_switch_port_obj_event(event, dev, ptr);
1431 	case SWITCHDEV_PORT_ATTR_SET:
1432 		return dpaa2_switch_port_attr_set_event(dev, ptr);
1433 	}
1434 
1435 	return NOTIFY_DONE;
1436 }
1437 
dpaa2_switch_register_notifier(struct device * dev)1438 static int dpaa2_switch_register_notifier(struct device *dev)
1439 {
1440 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
1441 	int err;
1442 
1443 	ethsw->port_nb.notifier_call = dpaa2_switch_port_netdevice_event;
1444 	err = register_netdevice_notifier(&ethsw->port_nb);
1445 	if (err) {
1446 		dev_err(dev, "Failed to register netdev notifier\n");
1447 		return err;
1448 	}
1449 
1450 	ethsw->port_switchdev_nb.notifier_call = dpaa2_switch_port_event;
1451 	err = register_switchdev_notifier(&ethsw->port_switchdev_nb);
1452 	if (err) {
1453 		dev_err(dev, "Failed to register switchdev notifier\n");
1454 		goto err_switchdev_nb;
1455 	}
1456 
1457 	ethsw->port_switchdevb_nb.notifier_call = dpaa2_switch_port_blocking_event;
1458 	err = register_switchdev_blocking_notifier(&ethsw->port_switchdevb_nb);
1459 	if (err) {
1460 		dev_err(dev, "Failed to register switchdev blocking notifier\n");
1461 		goto err_switchdev_blocking_nb;
1462 	}
1463 
1464 	return 0;
1465 
1466 err_switchdev_blocking_nb:
1467 	unregister_switchdev_notifier(&ethsw->port_switchdev_nb);
1468 err_switchdev_nb:
1469 	unregister_netdevice_notifier(&ethsw->port_nb);
1470 	return err;
1471 }
1472 
dpaa2_switch_detect_features(struct ethsw_core * ethsw)1473 static void dpaa2_switch_detect_features(struct ethsw_core *ethsw)
1474 {
1475 	ethsw->features = 0;
1476 
1477 	if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6))
1478 		ethsw->features |= ETHSW_FEATURE_MAC_ADDR;
1479 }
1480 
dpaa2_switch_init(struct fsl_mc_device * sw_dev)1481 static int dpaa2_switch_init(struct fsl_mc_device *sw_dev)
1482 {
1483 	struct device *dev = &sw_dev->dev;
1484 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
1485 	struct dpsw_stp_cfg stp_cfg;
1486 	int err;
1487 	u16 i;
1488 
1489 	ethsw->dev_id = sw_dev->obj_desc.id;
1490 
1491 	err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, &ethsw->dpsw_handle);
1492 	if (err) {
1493 		dev_err(dev, "dpsw_open err %d\n", err);
1494 		return err;
1495 	}
1496 
1497 	err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1498 				  &ethsw->sw_attr);
1499 	if (err) {
1500 		dev_err(dev, "dpsw_get_attributes err %d\n", err);
1501 		goto err_close;
1502 	}
1503 
1504 	err = dpsw_get_api_version(ethsw->mc_io, 0,
1505 				   &ethsw->major,
1506 				   &ethsw->minor);
1507 	if (err) {
1508 		dev_err(dev, "dpsw_get_api_version err %d\n", err);
1509 		goto err_close;
1510 	}
1511 
1512 	/* Minimum supported DPSW version check */
1513 	if (ethsw->major < DPSW_MIN_VER_MAJOR ||
1514 	    (ethsw->major == DPSW_MIN_VER_MAJOR &&
1515 	     ethsw->minor < DPSW_MIN_VER_MINOR)) {
1516 		dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n",
1517 			ethsw->major,
1518 			ethsw->minor,
1519 			DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
1520 		err = -ENOTSUPP;
1521 		goto err_close;
1522 	}
1523 
1524 	dpaa2_switch_detect_features(ethsw);
1525 
1526 	err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
1527 	if (err) {
1528 		dev_err(dev, "dpsw_reset err %d\n", err);
1529 		goto err_close;
1530 	}
1531 
1532 	err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
1533 					 DPSW_FDB_LEARNING_MODE_HW);
1534 	if (err) {
1535 		dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
1536 		goto err_close;
1537 	}
1538 
1539 	stp_cfg.vlan_id = DEFAULT_VLAN_ID;
1540 	stp_cfg.state = DPSW_STP_STATE_FORWARDING;
1541 
1542 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1543 		err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
1544 				      &stp_cfg);
1545 		if (err) {
1546 			dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
1547 				err, i);
1548 			goto err_close;
1549 		}
1550 
1551 		err = dpsw_if_set_broadcast(ethsw->mc_io, 0,
1552 					    ethsw->dpsw_handle, i, 1);
1553 		if (err) {
1554 			dev_err(dev,
1555 				"dpsw_if_set_broadcast err %d for port %d\n",
1556 				err, i);
1557 			goto err_close;
1558 		}
1559 	}
1560 
1561 	ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
1562 						   WQ_MEM_RECLAIM, "ethsw",
1563 						   ethsw->sw_attr.id);
1564 	if (!ethsw->workqueue) {
1565 		err = -ENOMEM;
1566 		goto err_close;
1567 	}
1568 
1569 	err = dpaa2_switch_register_notifier(dev);
1570 	if (err)
1571 		goto err_destroy_ordered_workqueue;
1572 
1573 	return 0;
1574 
1575 err_destroy_ordered_workqueue:
1576 	destroy_workqueue(ethsw->workqueue);
1577 
1578 err_close:
1579 	dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
1580 	return err;
1581 }
1582 
dpaa2_switch_port_init(struct ethsw_port_priv * port_priv,u16 port)1583 static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
1584 {
1585 	struct net_device *netdev = port_priv->netdev;
1586 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1587 	struct dpsw_vlan_if_cfg vcfg;
1588 	int err;
1589 
1590 	/* Switch starts with all ports configured to VLAN 1. Need to
1591 	 * remove this setting to allow configuration at bridge join
1592 	 */
1593 	vcfg.num_ifs = 1;
1594 	vcfg.if_id[0] = port_priv->idx;
1595 
1596 	err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
1597 					   DEFAULT_VLAN_ID, &vcfg);
1598 	if (err) {
1599 		netdev_err(netdev, "dpsw_vlan_remove_if_untagged err %d\n",
1600 			   err);
1601 		return err;
1602 	}
1603 
1604 	err = dpaa2_switch_port_set_pvid(port_priv, 0);
1605 	if (err)
1606 		return err;
1607 
1608 	err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1609 				  DEFAULT_VLAN_ID, &vcfg);
1610 	if (err)
1611 		netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err);
1612 
1613 	return err;
1614 }
1615 
dpaa2_switch_unregister_notifier(struct device * dev)1616 static void dpaa2_switch_unregister_notifier(struct device *dev)
1617 {
1618 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
1619 	struct notifier_block *nb;
1620 	int err;
1621 
1622 	nb = &ethsw->port_switchdevb_nb;
1623 	err = unregister_switchdev_blocking_notifier(nb);
1624 	if (err)
1625 		dev_err(dev,
1626 			"Failed to unregister switchdev blocking notifier (%d)\n",
1627 			err);
1628 
1629 	err = unregister_switchdev_notifier(&ethsw->port_switchdev_nb);
1630 	if (err)
1631 		dev_err(dev,
1632 			"Failed to unregister switchdev notifier (%d)\n", err);
1633 
1634 	err = unregister_netdevice_notifier(&ethsw->port_nb);
1635 	if (err)
1636 		dev_err(dev,
1637 			"Failed to unregister netdev notifier (%d)\n", err);
1638 }
1639 
dpaa2_switch_takedown(struct fsl_mc_device * sw_dev)1640 static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev)
1641 {
1642 	struct device *dev = &sw_dev->dev;
1643 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
1644 	int err;
1645 
1646 	dpaa2_switch_unregister_notifier(dev);
1647 
1648 	err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
1649 	if (err)
1650 		dev_warn(dev, "dpsw_close err %d\n", err);
1651 }
1652 
dpaa2_switch_remove(struct fsl_mc_device * sw_dev)1653 static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
1654 {
1655 	struct ethsw_port_priv *port_priv;
1656 	struct ethsw_core *ethsw;
1657 	struct device *dev;
1658 	int i;
1659 
1660 	dev = &sw_dev->dev;
1661 	ethsw = dev_get_drvdata(dev);
1662 
1663 	dpaa2_switch_teardown_irqs(sw_dev);
1664 
1665 	dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
1666 
1667 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1668 		port_priv = ethsw->ports[i];
1669 		unregister_netdev(port_priv->netdev);
1670 		free_netdev(port_priv->netdev);
1671 	}
1672 	kfree(ethsw->ports);
1673 
1674 	dpaa2_switch_takedown(sw_dev);
1675 
1676 	destroy_workqueue(ethsw->workqueue);
1677 
1678 	fsl_mc_portal_free(ethsw->mc_io);
1679 
1680 	kfree(ethsw);
1681 
1682 	dev_set_drvdata(dev, NULL);
1683 
1684 	return 0;
1685 }
1686 
dpaa2_switch_probe_port(struct ethsw_core * ethsw,u16 port_idx)1687 static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
1688 				   u16 port_idx)
1689 {
1690 	struct ethsw_port_priv *port_priv;
1691 	struct device *dev = ethsw->dev;
1692 	struct net_device *port_netdev;
1693 	int err;
1694 
1695 	port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
1696 	if (!port_netdev) {
1697 		dev_err(dev, "alloc_etherdev error\n");
1698 		return -ENOMEM;
1699 	}
1700 
1701 	port_priv = netdev_priv(port_netdev);
1702 	port_priv->netdev = port_netdev;
1703 	port_priv->ethsw_data = ethsw;
1704 
1705 	port_priv->idx = port_idx;
1706 	port_priv->stp_state = BR_STATE_FORWARDING;
1707 
1708 	/* Flooding is implicitly enabled */
1709 	port_priv->flood = true;
1710 
1711 	SET_NETDEV_DEV(port_netdev, dev);
1712 	port_netdev->netdev_ops = &dpaa2_switch_port_ops;
1713 	port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops;
1714 
1715 	/* Set MTU limits */
1716 	port_netdev->min_mtu = ETH_MIN_MTU;
1717 	port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
1718 
1719 	err = dpaa2_switch_port_init(port_priv, port_idx);
1720 	if (err)
1721 		goto err_port_probe;
1722 
1723 	err = dpaa2_switch_port_set_mac_addr(port_priv);
1724 	if (err)
1725 		goto err_port_probe;
1726 
1727 	err = register_netdev(port_netdev);
1728 	if (err < 0) {
1729 		dev_err(dev, "register_netdev error %d\n", err);
1730 		goto err_port_probe;
1731 	}
1732 
1733 	ethsw->ports[port_idx] = port_priv;
1734 
1735 	return 0;
1736 
1737 err_port_probe:
1738 	free_netdev(port_netdev);
1739 
1740 	return err;
1741 }
1742 
dpaa2_switch_probe(struct fsl_mc_device * sw_dev)1743 static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
1744 {
1745 	struct device *dev = &sw_dev->dev;
1746 	struct ethsw_core *ethsw;
1747 	int i, err;
1748 
1749 	/* Allocate switch core*/
1750 	ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
1751 
1752 	if (!ethsw)
1753 		return -ENOMEM;
1754 
1755 	ethsw->dev = dev;
1756 	dev_set_drvdata(dev, ethsw);
1757 
1758 	err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
1759 				     &ethsw->mc_io);
1760 	if (err) {
1761 		if (err == -ENXIO)
1762 			err = -EPROBE_DEFER;
1763 		else
1764 			dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
1765 		goto err_free_drvdata;
1766 	}
1767 
1768 	err = dpaa2_switch_init(sw_dev);
1769 	if (err)
1770 		goto err_free_cmdport;
1771 
1772 	/* DEFAULT_VLAN_ID is implicitly configured on the switch */
1773 	ethsw->vlans[DEFAULT_VLAN_ID] = ETHSW_VLAN_MEMBER;
1774 
1775 	/* Learning is implicitly enabled */
1776 	ethsw->learning = true;
1777 
1778 	ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
1779 			       GFP_KERNEL);
1780 	if (!(ethsw->ports)) {
1781 		err = -ENOMEM;
1782 		goto err_takedown;
1783 	}
1784 
1785 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1786 		err = dpaa2_switch_probe_port(ethsw, i);
1787 		if (err)
1788 			goto err_free_ports;
1789 	}
1790 
1791 	err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
1792 	if (err) {
1793 		dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
1794 		goto err_free_ports;
1795 	}
1796 
1797 	/* Make sure the switch ports are disabled at probe time */
1798 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
1799 		dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i);
1800 
1801 	/* Setup IRQs */
1802 	err = dpaa2_switch_setup_irqs(sw_dev);
1803 	if (err)
1804 		goto err_stop;
1805 
1806 	dev_info(dev, "probed %d port switch\n", ethsw->sw_attr.num_ifs);
1807 	return 0;
1808 
1809 err_stop:
1810 	dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
1811 
1812 err_free_ports:
1813 	/* Cleanup registered ports only */
1814 	for (i--; i >= 0; i--) {
1815 		unregister_netdev(ethsw->ports[i]->netdev);
1816 		free_netdev(ethsw->ports[i]->netdev);
1817 	}
1818 	kfree(ethsw->ports);
1819 
1820 err_takedown:
1821 	dpaa2_switch_takedown(sw_dev);
1822 
1823 err_free_cmdport:
1824 	fsl_mc_portal_free(ethsw->mc_io);
1825 
1826 err_free_drvdata:
1827 	kfree(ethsw);
1828 	dev_set_drvdata(dev, NULL);
1829 
1830 	return err;
1831 }
1832 
1833 static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = {
1834 	{
1835 		.vendor = FSL_MC_VENDOR_FREESCALE,
1836 		.obj_type = "dpsw",
1837 	},
1838 	{ .vendor = 0x0 }
1839 };
1840 MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
1841 
1842 static struct fsl_mc_driver dpaa2_switch_drv = {
1843 	.driver = {
1844 		.name = KBUILD_MODNAME,
1845 		.owner = THIS_MODULE,
1846 	},
1847 	.probe = dpaa2_switch_probe,
1848 	.remove = dpaa2_switch_remove,
1849 	.match_id_table = dpaa2_switch_match_id_table
1850 };
1851 
1852 module_fsl_mc_driver(dpaa2_switch_drv);
1853 
1854 MODULE_LICENSE("GPL v2");
1855 MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
1856