1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DPAA2 Ethernet Switch driver
4  *
5  * Copyright 2014-2016 Freescale Semiconductor Inc.
6  * Copyright 2017-2018 NXP
7  *
8  */
9 
10 #include <linux/module.h>
11 
12 #include <linux/interrupt.h>
13 #include <linux/msi.h>
14 #include <linux/kthread.h>
15 #include <linux/workqueue.h>
16 
17 #include <linux/fsl/mc.h>
18 
19 #include "ethsw.h"
20 
21 static struct workqueue_struct *ethsw_owq;
22 
23 /* Minimal supported DPSW version */
24 #define DPSW_MIN_VER_MAJOR		8
25 #define DPSW_MIN_VER_MINOR		0
26 
27 #define DEFAULT_VLAN_ID			1
28 
ethsw_add_vlan(struct ethsw_core * ethsw,u16 vid)29 static int ethsw_add_vlan(struct ethsw_core *ethsw, u16 vid)
30 {
31 	int err;
32 
33 	struct dpsw_vlan_cfg	vcfg = {
34 		.fdb_id = 0,
35 	};
36 
37 	if (ethsw->vlans[vid]) {
38 		dev_err(ethsw->dev, "VLAN already configured\n");
39 		return -EEXIST;
40 	}
41 
42 	err = dpsw_vlan_add(ethsw->mc_io, 0,
43 			    ethsw->dpsw_handle, vid, &vcfg);
44 	if (err) {
45 		dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
46 		return err;
47 	}
48 	ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
49 
50 	return 0;
51 }
52 
ethsw_port_set_pvid(struct ethsw_port_priv * port_priv,u16 pvid)53 static int ethsw_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
54 {
55 	struct ethsw_core *ethsw = port_priv->ethsw_data;
56 	struct net_device *netdev = port_priv->netdev;
57 	struct dpsw_tci_cfg tci_cfg = { 0 };
58 	bool is_oper;
59 	int err, ret;
60 
61 	err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
62 			      port_priv->idx, &tci_cfg);
63 	if (err) {
64 		netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
65 		return err;
66 	}
67 
68 	tci_cfg.vlan_id = pvid;
69 
70 	/* Interface needs to be down to change PVID */
71 	is_oper = netif_oper_up(netdev);
72 	if (is_oper) {
73 		err = dpsw_if_disable(ethsw->mc_io, 0,
74 				      ethsw->dpsw_handle,
75 				      port_priv->idx);
76 		if (err) {
77 			netdev_err(netdev, "dpsw_if_disable err %d\n", err);
78 			return err;
79 		}
80 	}
81 
82 	err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
83 			      port_priv->idx, &tci_cfg);
84 	if (err) {
85 		netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
86 		goto set_tci_error;
87 	}
88 
89 	/* Delete previous PVID info and mark the new one */
90 	port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
91 	port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
92 	port_priv->pvid = pvid;
93 
94 set_tci_error:
95 	if (is_oper) {
96 		ret = dpsw_if_enable(ethsw->mc_io, 0,
97 				     ethsw->dpsw_handle,
98 				     port_priv->idx);
99 		if (ret) {
100 			netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
101 			return ret;
102 		}
103 	}
104 
105 	return err;
106 }
107 
ethsw_port_add_vlan(struct ethsw_port_priv * port_priv,u16 vid,u16 flags)108 static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
109 			       u16 vid, u16 flags)
110 {
111 	struct ethsw_core *ethsw = port_priv->ethsw_data;
112 	struct net_device *netdev = port_priv->netdev;
113 	struct dpsw_vlan_if_cfg vcfg;
114 	int err;
115 
116 	if (port_priv->vlans[vid]) {
117 		netdev_warn(netdev, "VLAN %d already configured\n", vid);
118 		return -EEXIST;
119 	}
120 
121 	vcfg.num_ifs = 1;
122 	vcfg.if_id[0] = port_priv->idx;
123 	err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
124 	if (err) {
125 		netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
126 		return err;
127 	}
128 
129 	port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
130 
131 	if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
132 		err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
133 						ethsw->dpsw_handle,
134 						vid, &vcfg);
135 		if (err) {
136 			netdev_err(netdev,
137 				   "dpsw_vlan_add_if_untagged err %d\n", err);
138 			return err;
139 		}
140 		port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
141 	}
142 
143 	if (flags & BRIDGE_VLAN_INFO_PVID) {
144 		err = ethsw_port_set_pvid(port_priv, vid);
145 		if (err)
146 			return err;
147 	}
148 
149 	return 0;
150 }
151 
ethsw_set_learning(struct ethsw_core * ethsw,u8 flag)152 static int ethsw_set_learning(struct ethsw_core *ethsw, u8 flag)
153 {
154 	enum dpsw_fdb_learning_mode learn_mode;
155 	int err;
156 
157 	if (flag)
158 		learn_mode = DPSW_FDB_LEARNING_MODE_HW;
159 	else
160 		learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
161 
162 	err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
163 					 learn_mode);
164 	if (err) {
165 		dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n", err);
166 		return err;
167 	}
168 	ethsw->learning = !!flag;
169 
170 	return 0;
171 }
172 
ethsw_port_set_flood(struct ethsw_port_priv * port_priv,u8 flag)173 static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, u8 flag)
174 {
175 	int err;
176 
177 	err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0,
178 				   port_priv->ethsw_data->dpsw_handle,
179 				   port_priv->idx, flag);
180 	if (err) {
181 		netdev_err(port_priv->netdev,
182 			   "dpsw_if_set_flooding err %d\n", err);
183 		return err;
184 	}
185 	port_priv->flood = !!flag;
186 
187 	return 0;
188 }
189 
ethsw_port_set_stp_state(struct ethsw_port_priv * port_priv,u8 state)190 static int ethsw_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
191 {
192 	struct dpsw_stp_cfg stp_cfg = {
193 		.vlan_id = DEFAULT_VLAN_ID,
194 		.state = state,
195 	};
196 	int err;
197 
198 	if (!netif_oper_up(port_priv->netdev) || state == port_priv->stp_state)
199 		return 0;	/* Nothing to do */
200 
201 	err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
202 			      port_priv->ethsw_data->dpsw_handle,
203 			      port_priv->idx, &stp_cfg);
204 	if (err) {
205 		netdev_err(port_priv->netdev,
206 			   "dpsw_if_set_stp err %d\n", err);
207 		return err;
208 	}
209 
210 	port_priv->stp_state = state;
211 
212 	return 0;
213 }
214 
ethsw_dellink_switch(struct ethsw_core * ethsw,u16 vid)215 static int ethsw_dellink_switch(struct ethsw_core *ethsw, u16 vid)
216 {
217 	struct ethsw_port_priv *ppriv_local = NULL;
218 	int i, err;
219 
220 	if (!ethsw->vlans[vid])
221 		return -ENOENT;
222 
223 	err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
224 	if (err) {
225 		dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
226 		return err;
227 	}
228 	ethsw->vlans[vid] = 0;
229 
230 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
231 		ppriv_local = ethsw->ports[i];
232 		ppriv_local->vlans[vid] = 0;
233 	}
234 
235 	return 0;
236 }
237 
ethsw_port_fdb_add_uc(struct ethsw_port_priv * port_priv,const unsigned char * addr)238 static int ethsw_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
239 				 const unsigned char *addr)
240 {
241 	struct dpsw_fdb_unicast_cfg entry = {0};
242 	int err;
243 
244 	entry.if_egress = port_priv->idx;
245 	entry.type = DPSW_FDB_ENTRY_STATIC;
246 	ether_addr_copy(entry.mac_addr, addr);
247 
248 	err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
249 				   port_priv->ethsw_data->dpsw_handle,
250 				   0, &entry);
251 	if (err)
252 		netdev_err(port_priv->netdev,
253 			   "dpsw_fdb_add_unicast err %d\n", err);
254 	return err;
255 }
256 
ethsw_port_fdb_del_uc(struct ethsw_port_priv * port_priv,const unsigned char * addr)257 static int ethsw_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
258 				 const unsigned char *addr)
259 {
260 	struct dpsw_fdb_unicast_cfg entry = {0};
261 	int err;
262 
263 	entry.if_egress = port_priv->idx;
264 	entry.type = DPSW_FDB_ENTRY_STATIC;
265 	ether_addr_copy(entry.mac_addr, addr);
266 
267 	err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
268 				      port_priv->ethsw_data->dpsw_handle,
269 				      0, &entry);
270 	/* Silently discard error for calling multiple times the del command */
271 	if (err && err != -ENXIO)
272 		netdev_err(port_priv->netdev,
273 			   "dpsw_fdb_remove_unicast err %d\n", err);
274 	return err;
275 }
276 
ethsw_port_fdb_add_mc(struct ethsw_port_priv * port_priv,const unsigned char * addr)277 static int ethsw_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
278 				 const unsigned char *addr)
279 {
280 	struct dpsw_fdb_multicast_cfg entry = {0};
281 	int err;
282 
283 	ether_addr_copy(entry.mac_addr, addr);
284 	entry.type = DPSW_FDB_ENTRY_STATIC;
285 	entry.num_ifs = 1;
286 	entry.if_id[0] = port_priv->idx;
287 
288 	err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
289 				     port_priv->ethsw_data->dpsw_handle,
290 				     0, &entry);
291 	/* Silently discard error for calling multiple times the add command */
292 	if (err && err != -ENXIO)
293 		netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
294 			   err);
295 	return err;
296 }
297 
ethsw_port_fdb_del_mc(struct ethsw_port_priv * port_priv,const unsigned char * addr)298 static int ethsw_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
299 				 const unsigned char *addr)
300 {
301 	struct dpsw_fdb_multicast_cfg entry = {0};
302 	int err;
303 
304 	ether_addr_copy(entry.mac_addr, addr);
305 	entry.type = DPSW_FDB_ENTRY_STATIC;
306 	entry.num_ifs = 1;
307 	entry.if_id[0] = port_priv->idx;
308 
309 	err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
310 					port_priv->ethsw_data->dpsw_handle,
311 					0, &entry);
312 	/* Silently discard error for calling multiple times the del command */
313 	if (err && err != -ENAVAIL)
314 		netdev_err(port_priv->netdev,
315 			   "dpsw_fdb_remove_multicast err %d\n", err);
316 	return err;
317 }
318 
port_get_stats(struct net_device * netdev,struct rtnl_link_stats64 * stats)319 static void port_get_stats(struct net_device *netdev,
320 			   struct rtnl_link_stats64 *stats)
321 {
322 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
323 	u64 tmp;
324 	int err;
325 
326 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
327 				  port_priv->ethsw_data->dpsw_handle,
328 				  port_priv->idx,
329 				  DPSW_CNT_ING_FRAME, &stats->rx_packets);
330 	if (err)
331 		goto error;
332 
333 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
334 				  port_priv->ethsw_data->dpsw_handle,
335 				  port_priv->idx,
336 				  DPSW_CNT_EGR_FRAME, &stats->tx_packets);
337 	if (err)
338 		goto error;
339 
340 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
341 				  port_priv->ethsw_data->dpsw_handle,
342 				  port_priv->idx,
343 				  DPSW_CNT_ING_BYTE, &stats->rx_bytes);
344 	if (err)
345 		goto error;
346 
347 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
348 				  port_priv->ethsw_data->dpsw_handle,
349 				  port_priv->idx,
350 				  DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
351 	if (err)
352 		goto error;
353 
354 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
355 				  port_priv->ethsw_data->dpsw_handle,
356 				  port_priv->idx,
357 				  DPSW_CNT_ING_FRAME_DISCARD,
358 				  &stats->rx_dropped);
359 	if (err)
360 		goto error;
361 
362 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
363 				  port_priv->ethsw_data->dpsw_handle,
364 				  port_priv->idx,
365 				  DPSW_CNT_ING_FLTR_FRAME,
366 				  &tmp);
367 	if (err)
368 		goto error;
369 	stats->rx_dropped += tmp;
370 
371 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
372 				  port_priv->ethsw_data->dpsw_handle,
373 				  port_priv->idx,
374 				  DPSW_CNT_EGR_FRAME_DISCARD,
375 				  &stats->tx_dropped);
376 	if (err)
377 		goto error;
378 
379 	return;
380 
381 error:
382 	netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
383 }
384 
port_has_offload_stats(const struct net_device * netdev,int attr_id)385 static bool port_has_offload_stats(const struct net_device *netdev,
386 				   int attr_id)
387 {
388 	return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
389 }
390 
port_get_offload_stats(int attr_id,const struct net_device * netdev,void * sp)391 static int port_get_offload_stats(int attr_id,
392 				  const struct net_device *netdev,
393 				  void *sp)
394 {
395 	switch (attr_id) {
396 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
397 		port_get_stats((struct net_device *)netdev, sp);
398 		return 0;
399 	}
400 
401 	return -EINVAL;
402 }
403 
port_change_mtu(struct net_device * netdev,int mtu)404 static int port_change_mtu(struct net_device *netdev, int mtu)
405 {
406 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
407 	int err;
408 
409 	err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
410 					   0,
411 					   port_priv->ethsw_data->dpsw_handle,
412 					   port_priv->idx,
413 					   (u16)ETHSW_L2_MAX_FRM(mtu));
414 	if (err) {
415 		netdev_err(netdev,
416 			   "dpsw_if_set_max_frame_length() err %d\n", err);
417 		return err;
418 	}
419 
420 	netdev->mtu = mtu;
421 	return 0;
422 }
423 
port_carrier_state_sync(struct net_device * netdev)424 static int port_carrier_state_sync(struct net_device *netdev)
425 {
426 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
427 	struct dpsw_link_state state;
428 	int err;
429 
430 	err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
431 				     port_priv->ethsw_data->dpsw_handle,
432 				     port_priv->idx, &state);
433 	if (err) {
434 		netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
435 		return err;
436 	}
437 
438 	WARN_ONCE(state.up > 1, "Garbage read into link_state");
439 
440 	if (state.up != port_priv->link_state) {
441 		if (state.up)
442 			netif_carrier_on(netdev);
443 		else
444 			netif_carrier_off(netdev);
445 		port_priv->link_state = state.up;
446 	}
447 	return 0;
448 }
449 
port_open(struct net_device * netdev)450 static int port_open(struct net_device *netdev)
451 {
452 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
453 	int err;
454 
455 	/* No need to allow Tx as control interface is disabled */
456 	netif_tx_stop_all_queues(netdev);
457 
458 	err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
459 			     port_priv->ethsw_data->dpsw_handle,
460 			     port_priv->idx);
461 	if (err) {
462 		netdev_err(netdev, "dpsw_if_enable err %d\n", err);
463 		return err;
464 	}
465 
466 	/* sync carrier state */
467 	err = port_carrier_state_sync(netdev);
468 	if (err) {
469 		netdev_err(netdev,
470 			   "port_carrier_state_sync err %d\n", err);
471 		goto err_carrier_sync;
472 	}
473 
474 	return 0;
475 
476 err_carrier_sync:
477 	dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
478 			port_priv->ethsw_data->dpsw_handle,
479 			port_priv->idx);
480 	return err;
481 }
482 
port_stop(struct net_device * netdev)483 static int port_stop(struct net_device *netdev)
484 {
485 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
486 	int err;
487 
488 	err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
489 			      port_priv->ethsw_data->dpsw_handle,
490 			      port_priv->idx);
491 	if (err) {
492 		netdev_err(netdev, "dpsw_if_disable err %d\n", err);
493 		return err;
494 	}
495 
496 	return 0;
497 }
498 
port_dropframe(struct sk_buff * skb,struct net_device * netdev)499 static netdev_tx_t port_dropframe(struct sk_buff *skb,
500 				  struct net_device *netdev)
501 {
502 	/* we don't support I/O for now, drop the frame */
503 	dev_kfree_skb_any(skb);
504 
505 	return NETDEV_TX_OK;
506 }
507 
508 static const struct net_device_ops ethsw_port_ops = {
509 	.ndo_open		= port_open,
510 	.ndo_stop		= port_stop,
511 
512 	.ndo_set_mac_address	= eth_mac_addr,
513 	.ndo_change_mtu		= port_change_mtu,
514 	.ndo_has_offload_stats	= port_has_offload_stats,
515 	.ndo_get_offload_stats	= port_get_offload_stats,
516 
517 	.ndo_start_xmit		= port_dropframe,
518 };
519 
ethsw_links_state_update(struct ethsw_core * ethsw)520 static void ethsw_links_state_update(struct ethsw_core *ethsw)
521 {
522 	int i;
523 
524 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
525 		port_carrier_state_sync(ethsw->ports[i]->netdev);
526 }
527 
ethsw_irq0_handler_thread(int irq_num,void * arg)528 static irqreturn_t ethsw_irq0_handler_thread(int irq_num, void *arg)
529 {
530 	struct device *dev = (struct device *)arg;
531 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
532 
533 	/* Mask the events and the if_id reserved bits to be cleared on read */
534 	u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
535 	int err;
536 
537 	err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
538 				  DPSW_IRQ_INDEX_IF, &status);
539 	if (err) {
540 		dev_err(dev, "Can't get irq status (err %d)", err);
541 
542 		err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
543 					    DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
544 		if (err)
545 			dev_err(dev, "Can't clear irq status (err %d)", err);
546 		goto out;
547 	}
548 
549 	if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
550 		ethsw_links_state_update(ethsw);
551 
552 out:
553 	return IRQ_HANDLED;
554 }
555 
ethsw_setup_irqs(struct fsl_mc_device * sw_dev)556 static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
557 {
558 	struct device *dev = &sw_dev->dev;
559 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
560 	u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
561 	struct fsl_mc_device_irq *irq;
562 	int err;
563 
564 	err = fsl_mc_allocate_irqs(sw_dev);
565 	if (err) {
566 		dev_err(dev, "MC irqs allocation failed\n");
567 		return err;
568 	}
569 
570 	if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
571 		err = -EINVAL;
572 		goto free_irq;
573 	}
574 
575 	err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
576 				  DPSW_IRQ_INDEX_IF, 0);
577 	if (err) {
578 		dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
579 		goto free_irq;
580 	}
581 
582 	irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
583 
584 	err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
585 					NULL,
586 					ethsw_irq0_handler_thread,
587 					IRQF_NO_SUSPEND | IRQF_ONESHOT,
588 					dev_name(dev), dev);
589 	if (err) {
590 		dev_err(dev, "devm_request_threaded_irq(): %d", err);
591 		goto free_irq;
592 	}
593 
594 	err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
595 				DPSW_IRQ_INDEX_IF, mask);
596 	if (err) {
597 		dev_err(dev, "dpsw_set_irq_mask(): %d", err);
598 		goto free_devm_irq;
599 	}
600 
601 	err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
602 				  DPSW_IRQ_INDEX_IF, 1);
603 	if (err) {
604 		dev_err(dev, "dpsw_set_irq_enable(): %d", err);
605 		goto free_devm_irq;
606 	}
607 
608 	return 0;
609 
610 free_devm_irq:
611 	devm_free_irq(dev, irq->msi_desc->irq, dev);
612 free_irq:
613 	fsl_mc_free_irqs(sw_dev);
614 	return err;
615 }
616 
ethsw_teardown_irqs(struct fsl_mc_device * sw_dev)617 static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
618 {
619 	struct device *dev = &sw_dev->dev;
620 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
621 	int err;
622 
623 	err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
624 				  DPSW_IRQ_INDEX_IF, 0);
625 	if (err)
626 		dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
627 
628 	fsl_mc_free_irqs(sw_dev);
629 }
630 
swdev_port_attr_get(struct net_device * netdev,struct switchdev_attr * attr)631 static int swdev_port_attr_get(struct net_device *netdev,
632 			       struct switchdev_attr *attr)
633 {
634 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
635 
636 	switch (attr->id) {
637 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
638 		attr->u.ppid.id_len = 1;
639 		attr->u.ppid.id[0] = port_priv->ethsw_data->dev_id;
640 		break;
641 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
642 		attr->u.brport_flags =
643 			(port_priv->ethsw_data->learning ? BR_LEARNING : 0) |
644 			(port_priv->flood ? BR_FLOOD : 0);
645 		break;
646 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
647 		attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
648 		break;
649 	default:
650 		return -EOPNOTSUPP;
651 	}
652 
653 	return 0;
654 }
655 
port_attr_stp_state_set(struct net_device * netdev,struct switchdev_trans * trans,u8 state)656 static int port_attr_stp_state_set(struct net_device *netdev,
657 				   struct switchdev_trans *trans,
658 				   u8 state)
659 {
660 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
661 
662 	if (switchdev_trans_ph_prepare(trans))
663 		return 0;
664 
665 	return ethsw_port_set_stp_state(port_priv, state);
666 }
667 
port_attr_br_flags_set(struct net_device * netdev,struct switchdev_trans * trans,unsigned long flags)668 static int port_attr_br_flags_set(struct net_device *netdev,
669 				  struct switchdev_trans *trans,
670 				  unsigned long flags)
671 {
672 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
673 	int err = 0;
674 
675 	if (switchdev_trans_ph_prepare(trans))
676 		return 0;
677 
678 	/* Learning is enabled per switch */
679 	err = ethsw_set_learning(port_priv->ethsw_data, flags & BR_LEARNING);
680 	if (err)
681 		goto exit;
682 
683 	err = ethsw_port_set_flood(port_priv, flags & BR_FLOOD);
684 
685 exit:
686 	return err;
687 }
688 
swdev_port_attr_set(struct net_device * netdev,const struct switchdev_attr * attr,struct switchdev_trans * trans)689 static int swdev_port_attr_set(struct net_device *netdev,
690 			       const struct switchdev_attr *attr,
691 			       struct switchdev_trans *trans)
692 {
693 	int err = 0;
694 
695 	switch (attr->id) {
696 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
697 		err = port_attr_stp_state_set(netdev, trans,
698 					      attr->u.stp_state);
699 		break;
700 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
701 		err = port_attr_br_flags_set(netdev, trans,
702 					     attr->u.brport_flags);
703 		break;
704 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
705 		/* VLANs are supported by default  */
706 		break;
707 	default:
708 		err = -EOPNOTSUPP;
709 		break;
710 	}
711 
712 	return err;
713 }
714 
port_vlans_add(struct net_device * netdev,const struct switchdev_obj_port_vlan * vlan,struct switchdev_trans * trans)715 static int port_vlans_add(struct net_device *netdev,
716 			  const struct switchdev_obj_port_vlan *vlan,
717 			  struct switchdev_trans *trans)
718 {
719 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
720 	int vid, err;
721 
722 	if (netif_is_bridge_master(vlan->obj.orig_dev))
723 		return -EOPNOTSUPP;
724 
725 	if (switchdev_trans_ph_prepare(trans))
726 		return 0;
727 
728 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
729 		if (!port_priv->ethsw_data->vlans[vid]) {
730 			/* this is a new VLAN */
731 			err = ethsw_add_vlan(port_priv->ethsw_data, vid);
732 			if (err)
733 				return err;
734 
735 			port_priv->ethsw_data->vlans[vid] |= ETHSW_VLAN_GLOBAL;
736 		}
737 		err = ethsw_port_add_vlan(port_priv, vid, vlan->flags);
738 		if (err)
739 			break;
740 	}
741 
742 	return err;
743 }
744 
port_lookup_address(struct net_device * netdev,int is_uc,const unsigned char * addr)745 static int port_lookup_address(struct net_device *netdev, int is_uc,
746 			       const unsigned char *addr)
747 {
748 	struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
749 	struct netdev_hw_addr *ha;
750 
751 	netif_addr_lock_bh(netdev);
752 	list_for_each_entry(ha, &list->list, list) {
753 		if (ether_addr_equal(ha->addr, addr)) {
754 			netif_addr_unlock_bh(netdev);
755 			return 1;
756 		}
757 	}
758 	netif_addr_unlock_bh(netdev);
759 	return 0;
760 }
761 
port_mdb_add(struct net_device * netdev,const struct switchdev_obj_port_mdb * mdb,struct switchdev_trans * trans)762 static int port_mdb_add(struct net_device *netdev,
763 			const struct switchdev_obj_port_mdb *mdb,
764 			struct switchdev_trans *trans)
765 {
766 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
767 	int err;
768 
769 	if (switchdev_trans_ph_prepare(trans))
770 		return 0;
771 
772 	/* Check if address is already set on this port */
773 	if (port_lookup_address(netdev, 0, mdb->addr))
774 		return -EEXIST;
775 
776 	err = ethsw_port_fdb_add_mc(port_priv, mdb->addr);
777 	if (err)
778 		return err;
779 
780 	err = dev_mc_add(netdev, mdb->addr);
781 	if (err) {
782 		netdev_err(netdev, "dev_mc_add err %d\n", err);
783 		ethsw_port_fdb_del_mc(port_priv, mdb->addr);
784 	}
785 
786 	return err;
787 }
788 
swdev_port_obj_add(struct net_device * netdev,const struct switchdev_obj * obj,struct switchdev_trans * trans)789 static int swdev_port_obj_add(struct net_device *netdev,
790 			      const struct switchdev_obj *obj,
791 			      struct switchdev_trans *trans)
792 {
793 	int err;
794 
795 	switch (obj->id) {
796 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
797 		err = port_vlans_add(netdev,
798 				     SWITCHDEV_OBJ_PORT_VLAN(obj),
799 				     trans);
800 		break;
801 	case SWITCHDEV_OBJ_ID_PORT_MDB:
802 		err = port_mdb_add(netdev,
803 				   SWITCHDEV_OBJ_PORT_MDB(obj),
804 				   trans);
805 		break;
806 	default:
807 		err = -EOPNOTSUPP;
808 		break;
809 	}
810 
811 	return err;
812 }
813 
ethsw_port_del_vlan(struct ethsw_port_priv * port_priv,u16 vid)814 static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
815 {
816 	struct ethsw_core *ethsw = port_priv->ethsw_data;
817 	struct net_device *netdev = port_priv->netdev;
818 	struct dpsw_vlan_if_cfg vcfg;
819 	int i, err;
820 
821 	if (!port_priv->vlans[vid])
822 		return -ENOENT;
823 
824 	if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
825 		err = ethsw_port_set_pvid(port_priv, 0);
826 		if (err)
827 			return err;
828 	}
829 
830 	vcfg.num_ifs = 1;
831 	vcfg.if_id[0] = port_priv->idx;
832 	if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
833 		err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
834 						   ethsw->dpsw_handle,
835 						   vid, &vcfg);
836 		if (err) {
837 			netdev_err(netdev,
838 				   "dpsw_vlan_remove_if_untagged err %d\n",
839 				   err);
840 		}
841 		port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
842 	}
843 
844 	if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
845 		err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
846 					  vid, &vcfg);
847 		if (err) {
848 			netdev_err(netdev,
849 				   "dpsw_vlan_remove_if err %d\n", err);
850 			return err;
851 		}
852 		port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
853 
854 		/* Delete VLAN from switch if it is no longer configured on
855 		 * any port
856 		 */
857 		for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
858 			if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
859 				return 0; /* Found a port member in VID */
860 
861 		ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
862 
863 		err = ethsw_dellink_switch(ethsw, vid);
864 		if (err)
865 			return err;
866 	}
867 
868 	return 0;
869 }
870 
port_vlans_del(struct net_device * netdev,const struct switchdev_obj_port_vlan * vlan)871 static int port_vlans_del(struct net_device *netdev,
872 			  const struct switchdev_obj_port_vlan *vlan)
873 {
874 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
875 	int vid, err;
876 
877 	if (netif_is_bridge_master(vlan->obj.orig_dev))
878 		return -EOPNOTSUPP;
879 
880 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
881 		err = ethsw_port_del_vlan(port_priv, vid);
882 		if (err)
883 			break;
884 	}
885 
886 	return err;
887 }
888 
port_mdb_del(struct net_device * netdev,const struct switchdev_obj_port_mdb * mdb)889 static int port_mdb_del(struct net_device *netdev,
890 			const struct switchdev_obj_port_mdb *mdb)
891 {
892 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
893 	int err;
894 
895 	if (!port_lookup_address(netdev, 0, mdb->addr))
896 		return -ENOENT;
897 
898 	err = ethsw_port_fdb_del_mc(port_priv, mdb->addr);
899 	if (err)
900 		return err;
901 
902 	err = dev_mc_del(netdev, mdb->addr);
903 	if (err) {
904 		netdev_err(netdev, "dev_mc_del err %d\n", err);
905 		return err;
906 	}
907 
908 	return err;
909 }
910 
swdev_port_obj_del(struct net_device * netdev,const struct switchdev_obj * obj)911 static int swdev_port_obj_del(struct net_device *netdev,
912 			      const struct switchdev_obj *obj)
913 {
914 	int err;
915 
916 	switch (obj->id) {
917 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
918 		err = port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
919 		break;
920 	case SWITCHDEV_OBJ_ID_PORT_MDB:
921 		err = port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
922 		break;
923 	default:
924 		err = -EOPNOTSUPP;
925 		break;
926 	}
927 	return err;
928 }
929 
930 static const struct switchdev_ops ethsw_port_switchdev_ops = {
931 	.switchdev_port_attr_get	= swdev_port_attr_get,
932 	.switchdev_port_attr_set	= swdev_port_attr_set,
933 	.switchdev_port_obj_add		= swdev_port_obj_add,
934 	.switchdev_port_obj_del		= swdev_port_obj_del,
935 };
936 
937 /* For the moment, only flood setting needs to be updated */
port_bridge_join(struct net_device * netdev,struct net_device * upper_dev)938 static int port_bridge_join(struct net_device *netdev,
939 			    struct net_device *upper_dev)
940 {
941 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
942 	struct ethsw_core *ethsw = port_priv->ethsw_data;
943 	int i, err;
944 
945 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
946 		if (ethsw->ports[i]->bridge_dev &&
947 		    (ethsw->ports[i]->bridge_dev != upper_dev)) {
948 			netdev_err(netdev,
949 				   "Another switch port is connected to %s\n",
950 				   ethsw->ports[i]->bridge_dev->name);
951 			return -EINVAL;
952 		}
953 
954 	/* Enable flooding */
955 	err = ethsw_port_set_flood(port_priv, 1);
956 	if (!err)
957 		port_priv->bridge_dev = upper_dev;
958 
959 	return err;
960 }
961 
port_bridge_leave(struct net_device * netdev)962 static int port_bridge_leave(struct net_device *netdev)
963 {
964 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
965 	int err;
966 
967 	/* Disable flooding */
968 	err = ethsw_port_set_flood(port_priv, 0);
969 	if (!err)
970 		port_priv->bridge_dev = NULL;
971 
972 	return err;
973 }
974 
port_netdevice_event(struct notifier_block * unused,unsigned long event,void * ptr)975 static int port_netdevice_event(struct notifier_block *unused,
976 				unsigned long event, void *ptr)
977 {
978 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
979 	struct netdev_notifier_changeupper_info *info = ptr;
980 	struct net_device *upper_dev;
981 	int err = 0;
982 
983 	if (netdev->netdev_ops != &ethsw_port_ops)
984 		return NOTIFY_DONE;
985 
986 	/* Handle just upper dev link/unlink for the moment */
987 	if (event == NETDEV_CHANGEUPPER) {
988 		upper_dev = info->upper_dev;
989 		if (netif_is_bridge_master(upper_dev)) {
990 			if (info->linking)
991 				err = port_bridge_join(netdev, upper_dev);
992 			else
993 				err = port_bridge_leave(netdev);
994 		}
995 	}
996 
997 	return notifier_from_errno(err);
998 }
999 
1000 static struct notifier_block port_nb __read_mostly = {
1001 	.notifier_call = port_netdevice_event,
1002 };
1003 
1004 struct ethsw_switchdev_event_work {
1005 	struct work_struct work;
1006 	struct switchdev_notifier_fdb_info fdb_info;
1007 	struct net_device *dev;
1008 	unsigned long event;
1009 };
1010 
ethsw_switchdev_event_work(struct work_struct * work)1011 static void ethsw_switchdev_event_work(struct work_struct *work)
1012 {
1013 	struct ethsw_switchdev_event_work *switchdev_work =
1014 		container_of(work, struct ethsw_switchdev_event_work, work);
1015 	struct net_device *dev = switchdev_work->dev;
1016 	struct switchdev_notifier_fdb_info *fdb_info;
1017 	struct ethsw_port_priv *port_priv;
1018 
1019 	rtnl_lock();
1020 	port_priv = netdev_priv(dev);
1021 	fdb_info = &switchdev_work->fdb_info;
1022 
1023 	switch (switchdev_work->event) {
1024 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
1025 		if (is_unicast_ether_addr(fdb_info->addr))
1026 			ethsw_port_fdb_add_uc(netdev_priv(dev), fdb_info->addr);
1027 		else
1028 			ethsw_port_fdb_add_mc(netdev_priv(dev), fdb_info->addr);
1029 		break;
1030 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
1031 		if (is_unicast_ether_addr(fdb_info->addr))
1032 			ethsw_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
1033 		else
1034 			ethsw_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
1035 		break;
1036 	}
1037 
1038 	rtnl_unlock();
1039 	kfree(switchdev_work->fdb_info.addr);
1040 	kfree(switchdev_work);
1041 	dev_put(dev);
1042 }
1043 
1044 /* Called under rcu_read_lock() */
port_switchdev_event(struct notifier_block * unused,unsigned long event,void * ptr)1045 static int port_switchdev_event(struct notifier_block *unused,
1046 				unsigned long event, void *ptr)
1047 {
1048 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1049 	struct ethsw_switchdev_event_work *switchdev_work;
1050 	struct switchdev_notifier_fdb_info *fdb_info = ptr;
1051 
1052 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
1053 	if (!switchdev_work)
1054 		return NOTIFY_BAD;
1055 
1056 	INIT_WORK(&switchdev_work->work, ethsw_switchdev_event_work);
1057 	switchdev_work->dev = dev;
1058 	switchdev_work->event = event;
1059 
1060 	switch (event) {
1061 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
1062 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
1063 		memcpy(&switchdev_work->fdb_info, ptr,
1064 		       sizeof(switchdev_work->fdb_info));
1065 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
1066 		if (!switchdev_work->fdb_info.addr)
1067 			goto err_addr_alloc;
1068 
1069 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
1070 				fdb_info->addr);
1071 
1072 		/* Take a reference on the device to avoid being freed. */
1073 		dev_hold(dev);
1074 		break;
1075 	default:
1076 		return NOTIFY_DONE;
1077 	}
1078 
1079 	queue_work(ethsw_owq, &switchdev_work->work);
1080 
1081 	return NOTIFY_DONE;
1082 
1083 err_addr_alloc:
1084 	kfree(switchdev_work);
1085 	return NOTIFY_BAD;
1086 }
1087 
1088 static struct notifier_block port_switchdev_nb = {
1089 	.notifier_call = port_switchdev_event,
1090 };
1091 
ethsw_register_notifier(struct device * dev)1092 static int ethsw_register_notifier(struct device *dev)
1093 {
1094 	int err;
1095 
1096 	err = register_netdevice_notifier(&port_nb);
1097 	if (err) {
1098 		dev_err(dev, "Failed to register netdev notifier\n");
1099 		return err;
1100 	}
1101 
1102 	err = register_switchdev_notifier(&port_switchdev_nb);
1103 	if (err) {
1104 		dev_err(dev, "Failed to register switchdev notifier\n");
1105 		goto err_switchdev_nb;
1106 	}
1107 
1108 	return 0;
1109 
1110 err_switchdev_nb:
1111 	unregister_netdevice_notifier(&port_nb);
1112 	return err;
1113 }
1114 
ethsw_open(struct ethsw_core * ethsw)1115 static int ethsw_open(struct ethsw_core *ethsw)
1116 {
1117 	struct ethsw_port_priv *port_priv = NULL;
1118 	int i, err;
1119 
1120 	err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
1121 	if (err) {
1122 		dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
1123 		return err;
1124 	}
1125 
1126 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1127 		port_priv = ethsw->ports[i];
1128 		err = dev_open(port_priv->netdev);
1129 		if (err) {
1130 			netdev_err(port_priv->netdev, "dev_open err %d\n", err);
1131 			return err;
1132 		}
1133 	}
1134 
1135 	return 0;
1136 }
1137 
ethsw_stop(struct ethsw_core * ethsw)1138 static int ethsw_stop(struct ethsw_core *ethsw)
1139 {
1140 	struct ethsw_port_priv *port_priv = NULL;
1141 	int i, err;
1142 
1143 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1144 		port_priv = ethsw->ports[i];
1145 		dev_close(port_priv->netdev);
1146 	}
1147 
1148 	err = dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
1149 	if (err) {
1150 		dev_err(ethsw->dev, "dpsw_disable err %d\n", err);
1151 		return err;
1152 	}
1153 
1154 	return 0;
1155 }
1156 
ethsw_init(struct fsl_mc_device * sw_dev)1157 static int ethsw_init(struct fsl_mc_device *sw_dev)
1158 {
1159 	struct device *dev = &sw_dev->dev;
1160 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
1161 	u16 version_major, version_minor, i;
1162 	struct dpsw_stp_cfg stp_cfg;
1163 	int err;
1164 
1165 	ethsw->dev_id = sw_dev->obj_desc.id;
1166 
1167 	err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, &ethsw->dpsw_handle);
1168 	if (err) {
1169 		dev_err(dev, "dpsw_open err %d\n", err);
1170 		return err;
1171 	}
1172 
1173 	err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1174 				  &ethsw->sw_attr);
1175 	if (err) {
1176 		dev_err(dev, "dpsw_get_attributes err %d\n", err);
1177 		goto err_close;
1178 	}
1179 
1180 	err = dpsw_get_api_version(ethsw->mc_io, 0,
1181 				   &version_major,
1182 				   &version_minor);
1183 	if (err) {
1184 		dev_err(dev, "dpsw_get_api_version err %d\n", err);
1185 		goto err_close;
1186 	}
1187 
1188 	/* Minimum supported DPSW version check */
1189 	if (version_major < DPSW_MIN_VER_MAJOR ||
1190 	    (version_major == DPSW_MIN_VER_MAJOR &&
1191 	     version_minor < DPSW_MIN_VER_MINOR)) {
1192 		dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n",
1193 			version_major,
1194 			version_minor,
1195 			DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
1196 		err = -ENOTSUPP;
1197 		goto err_close;
1198 	}
1199 
1200 	err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
1201 	if (err) {
1202 		dev_err(dev, "dpsw_reset err %d\n", err);
1203 		goto err_close;
1204 	}
1205 
1206 	err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
1207 					 DPSW_FDB_LEARNING_MODE_HW);
1208 	if (err) {
1209 		dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
1210 		goto err_close;
1211 	}
1212 
1213 	stp_cfg.vlan_id = DEFAULT_VLAN_ID;
1214 	stp_cfg.state = DPSW_STP_STATE_FORWARDING;
1215 
1216 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1217 		err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
1218 				      &stp_cfg);
1219 		if (err) {
1220 			dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
1221 				err, i);
1222 			goto err_close;
1223 		}
1224 
1225 		err = dpsw_if_set_broadcast(ethsw->mc_io, 0,
1226 					    ethsw->dpsw_handle, i, 1);
1227 		if (err) {
1228 			dev_err(dev,
1229 				"dpsw_if_set_broadcast err %d for port %d\n",
1230 				err, i);
1231 			goto err_close;
1232 		}
1233 	}
1234 
1235 	ethsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
1236 					    "ethsw");
1237 	if (!ethsw_owq) {
1238 		err = -ENOMEM;
1239 		goto err_close;
1240 	}
1241 
1242 	err = ethsw_register_notifier(dev);
1243 	if (err)
1244 		goto err_destroy_ordered_workqueue;
1245 
1246 	return 0;
1247 
1248 err_destroy_ordered_workqueue:
1249 	destroy_workqueue(ethsw_owq);
1250 
1251 err_close:
1252 	dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
1253 	return err;
1254 }
1255 
ethsw_port_init(struct ethsw_port_priv * port_priv,u16 port)1256 static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
1257 {
1258 	const char def_mcast[ETH_ALEN] = {0x01, 0x00, 0x5e, 0x00, 0x00, 0x01};
1259 	struct net_device *netdev = port_priv->netdev;
1260 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1261 	struct dpsw_vlan_if_cfg vcfg;
1262 	int err;
1263 
1264 	/* Switch starts with all ports configured to VLAN 1. Need to
1265 	 * remove this setting to allow configuration at bridge join
1266 	 */
1267 	vcfg.num_ifs = 1;
1268 	vcfg.if_id[0] = port_priv->idx;
1269 
1270 	err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
1271 					   DEFAULT_VLAN_ID, &vcfg);
1272 	if (err) {
1273 		netdev_err(netdev, "dpsw_vlan_remove_if_untagged err %d\n",
1274 			   err);
1275 		return err;
1276 	}
1277 
1278 	err = ethsw_port_set_pvid(port_priv, 0);
1279 	if (err)
1280 		return err;
1281 
1282 	err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1283 				  DEFAULT_VLAN_ID, &vcfg);
1284 	if (err) {
1285 		netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err);
1286 		return err;
1287 	}
1288 
1289 	err = ethsw_port_fdb_add_mc(port_priv, def_mcast);
1290 
1291 	return err;
1292 }
1293 
ethsw_unregister_notifier(struct device * dev)1294 static void ethsw_unregister_notifier(struct device *dev)
1295 {
1296 	int err;
1297 
1298 	err = unregister_switchdev_notifier(&port_switchdev_nb);
1299 	if (err)
1300 		dev_err(dev,
1301 			"Failed to unregister switchdev notifier (%d)\n", err);
1302 
1303 	err = unregister_netdevice_notifier(&port_nb);
1304 	if (err)
1305 		dev_err(dev,
1306 			"Failed to unregister netdev notifier (%d)\n", err);
1307 }
1308 
ethsw_takedown(struct fsl_mc_device * sw_dev)1309 static void ethsw_takedown(struct fsl_mc_device *sw_dev)
1310 {
1311 	struct device *dev = &sw_dev->dev;
1312 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
1313 	int err;
1314 
1315 	ethsw_unregister_notifier(dev);
1316 
1317 	err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
1318 	if (err)
1319 		dev_warn(dev, "dpsw_close err %d\n", err);
1320 }
1321 
ethsw_remove(struct fsl_mc_device * sw_dev)1322 static int ethsw_remove(struct fsl_mc_device *sw_dev)
1323 {
1324 	struct ethsw_port_priv *port_priv;
1325 	struct ethsw_core *ethsw;
1326 	struct device *dev;
1327 	int i;
1328 
1329 	dev = &sw_dev->dev;
1330 	ethsw = dev_get_drvdata(dev);
1331 
1332 	ethsw_teardown_irqs(sw_dev);
1333 
1334 	destroy_workqueue(ethsw_owq);
1335 
1336 	rtnl_lock();
1337 	ethsw_stop(ethsw);
1338 	rtnl_unlock();
1339 
1340 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1341 		port_priv = ethsw->ports[i];
1342 		unregister_netdev(port_priv->netdev);
1343 		free_netdev(port_priv->netdev);
1344 	}
1345 	kfree(ethsw->ports);
1346 
1347 	ethsw_takedown(sw_dev);
1348 	fsl_mc_portal_free(ethsw->mc_io);
1349 
1350 	kfree(ethsw);
1351 
1352 	dev_set_drvdata(dev, NULL);
1353 
1354 	return 0;
1355 }
1356 
ethsw_probe_port(struct ethsw_core * ethsw,u16 port_idx)1357 static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
1358 {
1359 	struct ethsw_port_priv *port_priv;
1360 	struct device *dev = ethsw->dev;
1361 	struct net_device *port_netdev;
1362 	int err;
1363 
1364 	port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
1365 	if (!port_netdev) {
1366 		dev_err(dev, "alloc_etherdev error\n");
1367 		return -ENOMEM;
1368 	}
1369 
1370 	port_priv = netdev_priv(port_netdev);
1371 	port_priv->netdev = port_netdev;
1372 	port_priv->ethsw_data = ethsw;
1373 
1374 	port_priv->idx = port_idx;
1375 	port_priv->stp_state = BR_STATE_FORWARDING;
1376 
1377 	/* Flooding is implicitly enabled */
1378 	port_priv->flood = true;
1379 
1380 	SET_NETDEV_DEV(port_netdev, dev);
1381 	port_netdev->netdev_ops = &ethsw_port_ops;
1382 	port_netdev->ethtool_ops = &ethsw_port_ethtool_ops;
1383 	port_netdev->switchdev_ops = &ethsw_port_switchdev_ops;
1384 
1385 	/* Set MTU limits */
1386 	port_netdev->min_mtu = ETH_MIN_MTU;
1387 	port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
1388 
1389 	err = register_netdev(port_netdev);
1390 	if (err < 0) {
1391 		dev_err(dev, "register_netdev error %d\n", err);
1392 		free_netdev(port_netdev);
1393 		return err;
1394 	}
1395 
1396 	ethsw->ports[port_idx] = port_priv;
1397 
1398 	return ethsw_port_init(port_priv, port_idx);
1399 }
1400 
ethsw_probe(struct fsl_mc_device * sw_dev)1401 static int ethsw_probe(struct fsl_mc_device *sw_dev)
1402 {
1403 	struct device *dev = &sw_dev->dev;
1404 	struct ethsw_core *ethsw;
1405 	int i, err;
1406 
1407 	/* Allocate switch core*/
1408 	ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
1409 
1410 	if (!ethsw)
1411 		return -ENOMEM;
1412 
1413 	ethsw->dev = dev;
1414 	dev_set_drvdata(dev, ethsw);
1415 
1416 	err = fsl_mc_portal_allocate(sw_dev, 0, &ethsw->mc_io);
1417 	if (err) {
1418 		if (err == -ENXIO)
1419 			err = -EPROBE_DEFER;
1420 		else
1421 			dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
1422 		goto err_free_drvdata;
1423 	}
1424 
1425 	err = ethsw_init(sw_dev);
1426 	if (err)
1427 		goto err_free_cmdport;
1428 
1429 	/* DEFAULT_VLAN_ID is implicitly configured on the switch */
1430 	ethsw->vlans[DEFAULT_VLAN_ID] = ETHSW_VLAN_MEMBER;
1431 
1432 	/* Learning is implicitly enabled */
1433 	ethsw->learning = true;
1434 
1435 	ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
1436 			       GFP_KERNEL);
1437 	if (!(ethsw->ports)) {
1438 		err = -ENOMEM;
1439 		goto err_takedown;
1440 	}
1441 
1442 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1443 		err = ethsw_probe_port(ethsw, i);
1444 		if (err)
1445 			goto err_free_ports;
1446 	}
1447 
1448 	/* Switch starts up enabled */
1449 	rtnl_lock();
1450 	err = ethsw_open(ethsw);
1451 	rtnl_unlock();
1452 	if (err)
1453 		goto err_free_ports;
1454 
1455 	/* Setup IRQs */
1456 	err = ethsw_setup_irqs(sw_dev);
1457 	if (err)
1458 		goto err_stop;
1459 
1460 	dev_info(dev, "probed %d port switch\n", ethsw->sw_attr.num_ifs);
1461 	return 0;
1462 
1463 err_stop:
1464 	rtnl_lock();
1465 	ethsw_stop(ethsw);
1466 	rtnl_unlock();
1467 
1468 err_free_ports:
1469 	/* Cleanup registered ports only */
1470 	for (i--; i >= 0; i--) {
1471 		unregister_netdev(ethsw->ports[i]->netdev);
1472 		free_netdev(ethsw->ports[i]->netdev);
1473 	}
1474 	kfree(ethsw->ports);
1475 
1476 err_takedown:
1477 	ethsw_takedown(sw_dev);
1478 
1479 err_free_cmdport:
1480 	fsl_mc_portal_free(ethsw->mc_io);
1481 
1482 err_free_drvdata:
1483 	kfree(ethsw);
1484 	dev_set_drvdata(dev, NULL);
1485 
1486 	return err;
1487 }
1488 
1489 static const struct fsl_mc_device_id ethsw_match_id_table[] = {
1490 	{
1491 		.vendor = FSL_MC_VENDOR_FREESCALE,
1492 		.obj_type = "dpsw",
1493 	},
1494 	{ .vendor = 0x0 }
1495 };
1496 MODULE_DEVICE_TABLE(fslmc, ethsw_match_id_table);
1497 
1498 static struct fsl_mc_driver eth_sw_drv = {
1499 	.driver = {
1500 		.name = KBUILD_MODNAME,
1501 		.owner = THIS_MODULE,
1502 	},
1503 	.probe = ethsw_probe,
1504 	.remove = ethsw_remove,
1505 	.match_id_table = ethsw_match_id_table
1506 };
1507 
1508 module_fsl_mc_driver(eth_sw_drv);
1509 
1510 MODULE_LICENSE("GPL v2");
1511 MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
1512