1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * net/dsa/slave.c - Slave device handling
4   * Copyright (c) 2008-2009 Marvell Semiconductor
5   */
6  
7  #include <linux/list.h>
8  #include <linux/etherdevice.h>
9  #include <linux/netdevice.h>
10  #include <linux/phy.h>
11  #include <linux/phy_fixed.h>
12  #include <linux/phylink.h>
13  #include <linux/of_net.h>
14  #include <linux/of_mdio.h>
15  #include <linux/mdio.h>
16  #include <net/rtnetlink.h>
17  #include <net/pkt_cls.h>
18  #include <net/selftests.h>
19  #include <net/tc_act/tc_mirred.h>
20  #include <linux/if_bridge.h>
21  #include <linux/if_hsr.h>
22  #include <net/dcbnl.h>
23  #include <linux/netpoll.h>
24  #include <linux/string.h>
25  
26  #include "dsa.h"
27  #include "port.h"
28  #include "master.h"
29  #include "netlink.h"
30  #include "slave.h"
31  #include "switch.h"
32  #include "tag.h"
33  
34  struct dsa_switchdev_event_work {
35  	struct net_device *dev;
36  	struct net_device *orig_dev;
37  	struct work_struct work;
38  	unsigned long event;
39  	/* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
40  	 * SWITCHDEV_FDB_DEL_TO_DEVICE
41  	 */
42  	unsigned char addr[ETH_ALEN];
43  	u16 vid;
44  	bool host_addr;
45  };
46  
47  enum dsa_standalone_event {
48  	DSA_UC_ADD,
49  	DSA_UC_DEL,
50  	DSA_MC_ADD,
51  	DSA_MC_DEL,
52  };
53  
54  struct dsa_standalone_event_work {
55  	struct work_struct work;
56  	struct net_device *dev;
57  	enum dsa_standalone_event event;
58  	unsigned char addr[ETH_ALEN];
59  	u16 vid;
60  };
61  
62  struct dsa_host_vlan_rx_filtering_ctx {
63  	struct net_device *dev;
64  	const unsigned char *addr;
65  	enum dsa_standalone_event event;
66  };
67  
dsa_switch_supports_uc_filtering(struct dsa_switch * ds)68  static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds)
69  {
70  	return ds->ops->port_fdb_add && ds->ops->port_fdb_del &&
71  	       ds->fdb_isolation && !ds->vlan_filtering_is_global &&
72  	       !ds->needs_standalone_vlan_filtering;
73  }
74  
dsa_switch_supports_mc_filtering(struct dsa_switch * ds)75  static bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds)
76  {
77  	return ds->ops->port_mdb_add && ds->ops->port_mdb_del &&
78  	       ds->fdb_isolation && !ds->vlan_filtering_is_global &&
79  	       !ds->needs_standalone_vlan_filtering;
80  }
81  
dsa_slave_standalone_event_work(struct work_struct * work)82  static void dsa_slave_standalone_event_work(struct work_struct *work)
83  {
84  	struct dsa_standalone_event_work *standalone_work =
85  		container_of(work, struct dsa_standalone_event_work, work);
86  	const unsigned char *addr = standalone_work->addr;
87  	struct net_device *dev = standalone_work->dev;
88  	struct dsa_port *dp = dsa_slave_to_port(dev);
89  	struct switchdev_obj_port_mdb mdb;
90  	struct dsa_switch *ds = dp->ds;
91  	u16 vid = standalone_work->vid;
92  	int err;
93  
94  	switch (standalone_work->event) {
95  	case DSA_UC_ADD:
96  		err = dsa_port_standalone_host_fdb_add(dp, addr, vid);
97  		if (err) {
98  			dev_err(ds->dev,
99  				"port %d failed to add %pM vid %d to fdb: %d\n",
100  				dp->index, addr, vid, err);
101  			break;
102  		}
103  		break;
104  
105  	case DSA_UC_DEL:
106  		err = dsa_port_standalone_host_fdb_del(dp, addr, vid);
107  		if (err) {
108  			dev_err(ds->dev,
109  				"port %d failed to delete %pM vid %d from fdb: %d\n",
110  				dp->index, addr, vid, err);
111  		}
112  
113  		break;
114  	case DSA_MC_ADD:
115  		ether_addr_copy(mdb.addr, addr);
116  		mdb.vid = vid;
117  
118  		err = dsa_port_standalone_host_mdb_add(dp, &mdb);
119  		if (err) {
120  			dev_err(ds->dev,
121  				"port %d failed to add %pM vid %d to mdb: %d\n",
122  				dp->index, addr, vid, err);
123  			break;
124  		}
125  		break;
126  	case DSA_MC_DEL:
127  		ether_addr_copy(mdb.addr, addr);
128  		mdb.vid = vid;
129  
130  		err = dsa_port_standalone_host_mdb_del(dp, &mdb);
131  		if (err) {
132  			dev_err(ds->dev,
133  				"port %d failed to delete %pM vid %d from mdb: %d\n",
134  				dp->index, addr, vid, err);
135  		}
136  
137  		break;
138  	}
139  
140  	kfree(standalone_work);
141  }
142  
dsa_slave_schedule_standalone_work(struct net_device * dev,enum dsa_standalone_event event,const unsigned char * addr,u16 vid)143  static int dsa_slave_schedule_standalone_work(struct net_device *dev,
144  					      enum dsa_standalone_event event,
145  					      const unsigned char *addr,
146  					      u16 vid)
147  {
148  	struct dsa_standalone_event_work *standalone_work;
149  
150  	standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC);
151  	if (!standalone_work)
152  		return -ENOMEM;
153  
154  	INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work);
155  	standalone_work->event = event;
156  	standalone_work->dev = dev;
157  
158  	ether_addr_copy(standalone_work->addr, addr);
159  	standalone_work->vid = vid;
160  
161  	dsa_schedule_work(&standalone_work->work);
162  
163  	return 0;
164  }
165  
dsa_slave_host_vlan_rx_filtering(void * arg,int vid)166  static int dsa_slave_host_vlan_rx_filtering(void *arg, int vid)
167  {
168  	struct dsa_host_vlan_rx_filtering_ctx *ctx = arg;
169  
170  	return dsa_slave_schedule_standalone_work(ctx->dev, ctx->event,
171  						  ctx->addr, vid);
172  }
173  
dsa_slave_vlan_for_each(struct net_device * dev,int (* cb)(void * arg,int vid),void * arg)174  static int dsa_slave_vlan_for_each(struct net_device *dev,
175  				   int (*cb)(void *arg, int vid), void *arg)
176  {
177  	struct dsa_port *dp = dsa_slave_to_port(dev);
178  	struct dsa_vlan *v;
179  	int err;
180  
181  	lockdep_assert_held(&dev->addr_list_lock);
182  
183  	err = cb(arg, 0);
184  	if (err)
185  		return err;
186  
187  	list_for_each_entry(v, &dp->user_vlans, list) {
188  		err = cb(arg, v->vid);
189  		if (err)
190  			return err;
191  	}
192  
193  	return 0;
194  }
195  
dsa_slave_sync_uc(struct net_device * dev,const unsigned char * addr)196  static int dsa_slave_sync_uc(struct net_device *dev,
197  			     const unsigned char *addr)
198  {
199  	struct net_device *master = dsa_slave_to_master(dev);
200  	struct dsa_port *dp = dsa_slave_to_port(dev);
201  	struct dsa_host_vlan_rx_filtering_ctx ctx = {
202  		.dev = dev,
203  		.addr = addr,
204  		.event = DSA_UC_ADD,
205  	};
206  
207  	dev_uc_add(master, addr);
208  
209  	if (!dsa_switch_supports_uc_filtering(dp->ds))
210  		return 0;
211  
212  	return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
213  				       &ctx);
214  }
215  
dsa_slave_unsync_uc(struct net_device * dev,const unsigned char * addr)216  static int dsa_slave_unsync_uc(struct net_device *dev,
217  			       const unsigned char *addr)
218  {
219  	struct net_device *master = dsa_slave_to_master(dev);
220  	struct dsa_port *dp = dsa_slave_to_port(dev);
221  	struct dsa_host_vlan_rx_filtering_ctx ctx = {
222  		.dev = dev,
223  		.addr = addr,
224  		.event = DSA_UC_DEL,
225  	};
226  
227  	dev_uc_del(master, addr);
228  
229  	if (!dsa_switch_supports_uc_filtering(dp->ds))
230  		return 0;
231  
232  	return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
233  				       &ctx);
234  }
235  
dsa_slave_sync_mc(struct net_device * dev,const unsigned char * addr)236  static int dsa_slave_sync_mc(struct net_device *dev,
237  			     const unsigned char *addr)
238  {
239  	struct net_device *master = dsa_slave_to_master(dev);
240  	struct dsa_port *dp = dsa_slave_to_port(dev);
241  	struct dsa_host_vlan_rx_filtering_ctx ctx = {
242  		.dev = dev,
243  		.addr = addr,
244  		.event = DSA_MC_ADD,
245  	};
246  
247  	dev_mc_add(master, addr);
248  
249  	if (!dsa_switch_supports_mc_filtering(dp->ds))
250  		return 0;
251  
252  	return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
253  				       &ctx);
254  }
255  
dsa_slave_unsync_mc(struct net_device * dev,const unsigned char * addr)256  static int dsa_slave_unsync_mc(struct net_device *dev,
257  			       const unsigned char *addr)
258  {
259  	struct net_device *master = dsa_slave_to_master(dev);
260  	struct dsa_port *dp = dsa_slave_to_port(dev);
261  	struct dsa_host_vlan_rx_filtering_ctx ctx = {
262  		.dev = dev,
263  		.addr = addr,
264  		.event = DSA_MC_DEL,
265  	};
266  
267  	dev_mc_del(master, addr);
268  
269  	if (!dsa_switch_supports_mc_filtering(dp->ds))
270  		return 0;
271  
272  	return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
273  				       &ctx);
274  }
275  
dsa_slave_sync_ha(struct net_device * dev)276  void dsa_slave_sync_ha(struct net_device *dev)
277  {
278  	struct dsa_port *dp = dsa_slave_to_port(dev);
279  	struct dsa_switch *ds = dp->ds;
280  	struct netdev_hw_addr *ha;
281  
282  	netif_addr_lock_bh(dev);
283  
284  	netdev_for_each_synced_mc_addr(ha, dev)
285  		dsa_slave_sync_mc(dev, ha->addr);
286  
287  	netdev_for_each_synced_uc_addr(ha, dev)
288  		dsa_slave_sync_uc(dev, ha->addr);
289  
290  	netif_addr_unlock_bh(dev);
291  
292  	if (dsa_switch_supports_uc_filtering(ds) ||
293  	    dsa_switch_supports_mc_filtering(ds))
294  		dsa_flush_workqueue();
295  }
296  
dsa_slave_unsync_ha(struct net_device * dev)297  void dsa_slave_unsync_ha(struct net_device *dev)
298  {
299  	struct dsa_port *dp = dsa_slave_to_port(dev);
300  	struct dsa_switch *ds = dp->ds;
301  	struct netdev_hw_addr *ha;
302  
303  	netif_addr_lock_bh(dev);
304  
305  	netdev_for_each_synced_uc_addr(ha, dev)
306  		dsa_slave_unsync_uc(dev, ha->addr);
307  
308  	netdev_for_each_synced_mc_addr(ha, dev)
309  		dsa_slave_unsync_mc(dev, ha->addr);
310  
311  	netif_addr_unlock_bh(dev);
312  
313  	if (dsa_switch_supports_uc_filtering(ds) ||
314  	    dsa_switch_supports_mc_filtering(ds))
315  		dsa_flush_workqueue();
316  }
317  
318  /* slave mii_bus handling ***************************************************/
dsa_slave_phy_read(struct mii_bus * bus,int addr,int reg)319  static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
320  {
321  	struct dsa_switch *ds = bus->priv;
322  
323  	if (ds->phys_mii_mask & (1 << addr))
324  		return ds->ops->phy_read(ds, addr, reg);
325  
326  	return 0xffff;
327  }
328  
dsa_slave_phy_write(struct mii_bus * bus,int addr,int reg,u16 val)329  static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
330  {
331  	struct dsa_switch *ds = bus->priv;
332  
333  	if (ds->phys_mii_mask & (1 << addr))
334  		return ds->ops->phy_write(ds, addr, reg, val);
335  
336  	return 0;
337  }
338  
dsa_slave_mii_bus_init(struct dsa_switch * ds)339  void dsa_slave_mii_bus_init(struct dsa_switch *ds)
340  {
341  	ds->slave_mii_bus->priv = (void *)ds;
342  	ds->slave_mii_bus->name = "dsa slave smi";
343  	ds->slave_mii_bus->read = dsa_slave_phy_read;
344  	ds->slave_mii_bus->write = dsa_slave_phy_write;
345  	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
346  		 ds->dst->index, ds->index);
347  	ds->slave_mii_bus->parent = ds->dev;
348  	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
349  }
350  
351  
352  /* slave device handling ****************************************************/
dsa_slave_get_iflink(const struct net_device * dev)353  static int dsa_slave_get_iflink(const struct net_device *dev)
354  {
355  	return dsa_slave_to_master(dev)->ifindex;
356  }
357  
dsa_slave_open(struct net_device * dev)358  static int dsa_slave_open(struct net_device *dev)
359  {
360  	struct net_device *master = dsa_slave_to_master(dev);
361  	struct dsa_port *dp = dsa_slave_to_port(dev);
362  	struct dsa_switch *ds = dp->ds;
363  	int err;
364  
365  	err = dev_open(master, NULL);
366  	if (err < 0) {
367  		netdev_err(dev, "failed to open master %s\n", master->name);
368  		goto out;
369  	}
370  
371  	if (dsa_switch_supports_uc_filtering(ds)) {
372  		err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0);
373  		if (err)
374  			goto out;
375  	}
376  
377  	if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
378  		err = dev_uc_add(master, dev->dev_addr);
379  		if (err < 0)
380  			goto del_host_addr;
381  	}
382  
383  	err = dsa_port_enable_rt(dp, dev->phydev);
384  	if (err)
385  		goto del_unicast;
386  
387  	return 0;
388  
389  del_unicast:
390  	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
391  		dev_uc_del(master, dev->dev_addr);
392  del_host_addr:
393  	if (dsa_switch_supports_uc_filtering(ds))
394  		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
395  out:
396  	return err;
397  }
398  
dsa_slave_close(struct net_device * dev)399  static int dsa_slave_close(struct net_device *dev)
400  {
401  	struct net_device *master = dsa_slave_to_master(dev);
402  	struct dsa_port *dp = dsa_slave_to_port(dev);
403  	struct dsa_switch *ds = dp->ds;
404  
405  	dsa_port_disable_rt(dp);
406  
407  	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
408  		dev_uc_del(master, dev->dev_addr);
409  
410  	if (dsa_switch_supports_uc_filtering(ds))
411  		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
412  
413  	return 0;
414  }
415  
dsa_slave_manage_host_flood(struct net_device * dev)416  static void dsa_slave_manage_host_flood(struct net_device *dev)
417  {
418  	bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI);
419  	struct dsa_port *dp = dsa_slave_to_port(dev);
420  	bool uc = dev->flags & IFF_PROMISC;
421  
422  	dsa_port_set_host_flood(dp, uc, mc);
423  }
424  
dsa_slave_change_rx_flags(struct net_device * dev,int change)425  static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
426  {
427  	struct net_device *master = dsa_slave_to_master(dev);
428  	struct dsa_port *dp = dsa_slave_to_port(dev);
429  	struct dsa_switch *ds = dp->ds;
430  
431  	if (change & IFF_ALLMULTI)
432  		dev_set_allmulti(master,
433  				 dev->flags & IFF_ALLMULTI ? 1 : -1);
434  	if (change & IFF_PROMISC)
435  		dev_set_promiscuity(master,
436  				    dev->flags & IFF_PROMISC ? 1 : -1);
437  
438  	if (dsa_switch_supports_uc_filtering(ds) &&
439  	    dsa_switch_supports_mc_filtering(ds))
440  		dsa_slave_manage_host_flood(dev);
441  }
442  
dsa_slave_set_rx_mode(struct net_device * dev)443  static void dsa_slave_set_rx_mode(struct net_device *dev)
444  {
445  	__dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc);
446  	__dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc);
447  }
448  
dsa_slave_set_mac_address(struct net_device * dev,void * a)449  static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
450  {
451  	struct net_device *master = dsa_slave_to_master(dev);
452  	struct dsa_port *dp = dsa_slave_to_port(dev);
453  	struct dsa_switch *ds = dp->ds;
454  	struct sockaddr *addr = a;
455  	int err;
456  
457  	if (!is_valid_ether_addr(addr->sa_data))
458  		return -EADDRNOTAVAIL;
459  
460  	/* If the port is down, the address isn't synced yet to hardware or
461  	 * to the DSA master, so there is nothing to change.
462  	 */
463  	if (!(dev->flags & IFF_UP))
464  		goto out_change_dev_addr;
465  
466  	if (dsa_switch_supports_uc_filtering(ds)) {
467  		err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0);
468  		if (err)
469  			return err;
470  	}
471  
472  	if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
473  		err = dev_uc_add(master, addr->sa_data);
474  		if (err < 0)
475  			goto del_unicast;
476  	}
477  
478  	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
479  		dev_uc_del(master, dev->dev_addr);
480  
481  	if (dsa_switch_supports_uc_filtering(ds))
482  		dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
483  
484  out_change_dev_addr:
485  	eth_hw_addr_set(dev, addr->sa_data);
486  
487  	return 0;
488  
489  del_unicast:
490  	if (dsa_switch_supports_uc_filtering(ds))
491  		dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0);
492  
493  	return err;
494  }
495  
496  struct dsa_slave_dump_ctx {
497  	struct net_device *dev;
498  	struct sk_buff *skb;
499  	struct netlink_callback *cb;
500  	int idx;
501  };
502  
503  static int
dsa_slave_port_fdb_do_dump(const unsigned char * addr,u16 vid,bool is_static,void * data)504  dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
505  			   bool is_static, void *data)
506  {
507  	struct dsa_slave_dump_ctx *dump = data;
508  	u32 portid = NETLINK_CB(dump->cb->skb).portid;
509  	u32 seq = dump->cb->nlh->nlmsg_seq;
510  	struct nlmsghdr *nlh;
511  	struct ndmsg *ndm;
512  
513  	if (dump->idx < dump->cb->args[2])
514  		goto skip;
515  
516  	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
517  			sizeof(*ndm), NLM_F_MULTI);
518  	if (!nlh)
519  		return -EMSGSIZE;
520  
521  	ndm = nlmsg_data(nlh);
522  	ndm->ndm_family  = AF_BRIDGE;
523  	ndm->ndm_pad1    = 0;
524  	ndm->ndm_pad2    = 0;
525  	ndm->ndm_flags   = NTF_SELF;
526  	ndm->ndm_type    = 0;
527  	ndm->ndm_ifindex = dump->dev->ifindex;
528  	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
529  
530  	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
531  		goto nla_put_failure;
532  
533  	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
534  		goto nla_put_failure;
535  
536  	nlmsg_end(dump->skb, nlh);
537  
538  skip:
539  	dump->idx++;
540  	return 0;
541  
542  nla_put_failure:
543  	nlmsg_cancel(dump->skb, nlh);
544  	return -EMSGSIZE;
545  }
546  
547  static int
dsa_slave_fdb_dump(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * dev,struct net_device * filter_dev,int * idx)548  dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
549  		   struct net_device *dev, struct net_device *filter_dev,
550  		   int *idx)
551  {
552  	struct dsa_port *dp = dsa_slave_to_port(dev);
553  	struct dsa_slave_dump_ctx dump = {
554  		.dev = dev,
555  		.skb = skb,
556  		.cb = cb,
557  		.idx = *idx,
558  	};
559  	int err;
560  
561  	err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
562  	*idx = dump.idx;
563  
564  	return err;
565  }
566  
dsa_slave_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)567  static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
568  {
569  	struct dsa_slave_priv *p = netdev_priv(dev);
570  	struct dsa_switch *ds = p->dp->ds;
571  	int port = p->dp->index;
572  
573  	/* Pass through to switch driver if it supports timestamping */
574  	switch (cmd) {
575  	case SIOCGHWTSTAMP:
576  		if (ds->ops->port_hwtstamp_get)
577  			return ds->ops->port_hwtstamp_get(ds, port, ifr);
578  		break;
579  	case SIOCSHWTSTAMP:
580  		if (ds->ops->port_hwtstamp_set)
581  			return ds->ops->port_hwtstamp_set(ds, port, ifr);
582  		break;
583  	}
584  
585  	return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
586  }
587  
dsa_slave_port_attr_set(struct net_device * dev,const void * ctx,const struct switchdev_attr * attr,struct netlink_ext_ack * extack)588  static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
589  				   const struct switchdev_attr *attr,
590  				   struct netlink_ext_ack *extack)
591  {
592  	struct dsa_port *dp = dsa_slave_to_port(dev);
593  	int ret;
594  
595  	if (ctx && ctx != dp)
596  		return 0;
597  
598  	switch (attr->id) {
599  	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
600  		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
601  			return -EOPNOTSUPP;
602  
603  		ret = dsa_port_set_state(dp, attr->u.stp_state, true);
604  		break;
605  	case SWITCHDEV_ATTR_ID_PORT_MST_STATE:
606  		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
607  			return -EOPNOTSUPP;
608  
609  		ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack);
610  		break;
611  	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
612  		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
613  			return -EOPNOTSUPP;
614  
615  		ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
616  					      extack);
617  		break;
618  	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
619  		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
620  			return -EOPNOTSUPP;
621  
622  		ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
623  		break;
624  	case SWITCHDEV_ATTR_ID_BRIDGE_MST:
625  		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
626  			return -EOPNOTSUPP;
627  
628  		ret = dsa_port_mst_enable(dp, attr->u.mst, extack);
629  		break;
630  	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
631  		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
632  			return -EOPNOTSUPP;
633  
634  		ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
635  						extack);
636  		break;
637  	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
638  		if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
639  			return -EOPNOTSUPP;
640  
641  		ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
642  		break;
643  	case SWITCHDEV_ATTR_ID_VLAN_MSTI:
644  		if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
645  			return -EOPNOTSUPP;
646  
647  		ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti);
648  		break;
649  	default:
650  		ret = -EOPNOTSUPP;
651  		break;
652  	}
653  
654  	return ret;
655  }
656  
657  /* Must be called under rcu_read_lock() */
658  static int
dsa_slave_vlan_check_for_8021q_uppers(struct net_device * slave,const struct switchdev_obj_port_vlan * vlan)659  dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
660  				      const struct switchdev_obj_port_vlan *vlan)
661  {
662  	struct net_device *upper_dev;
663  	struct list_head *iter;
664  
665  	netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
666  		u16 vid;
667  
668  		if (!is_vlan_dev(upper_dev))
669  			continue;
670  
671  		vid = vlan_dev_vlan_id(upper_dev);
672  		if (vid == vlan->vid)
673  			return -EBUSY;
674  	}
675  
676  	return 0;
677  }
678  
dsa_slave_vlan_add(struct net_device * dev,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)679  static int dsa_slave_vlan_add(struct net_device *dev,
680  			      const struct switchdev_obj *obj,
681  			      struct netlink_ext_ack *extack)
682  {
683  	struct dsa_port *dp = dsa_slave_to_port(dev);
684  	struct switchdev_obj_port_vlan *vlan;
685  	int err;
686  
687  	if (dsa_port_skip_vlan_configuration(dp)) {
688  		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
689  		return 0;
690  	}
691  
692  	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
693  
694  	/* Deny adding a bridge VLAN when there is already an 802.1Q upper with
695  	 * the same VID.
696  	 */
697  	if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
698  		rcu_read_lock();
699  		err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan);
700  		rcu_read_unlock();
701  		if (err) {
702  			NL_SET_ERR_MSG_MOD(extack,
703  					   "Port already has a VLAN upper with this VID");
704  			return err;
705  		}
706  	}
707  
708  	return dsa_port_vlan_add(dp, vlan, extack);
709  }
710  
711  /* Offload a VLAN installed on the bridge or on a foreign interface by
712   * installing it as a VLAN towards the CPU port.
713   */
dsa_slave_host_vlan_add(struct net_device * dev,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)714  static int dsa_slave_host_vlan_add(struct net_device *dev,
715  				   const struct switchdev_obj *obj,
716  				   struct netlink_ext_ack *extack)
717  {
718  	struct dsa_port *dp = dsa_slave_to_port(dev);
719  	struct switchdev_obj_port_vlan vlan;
720  
721  	/* Do nothing if this is a software bridge */
722  	if (!dp->bridge)
723  		return -EOPNOTSUPP;
724  
725  	if (dsa_port_skip_vlan_configuration(dp)) {
726  		NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
727  		return 0;
728  	}
729  
730  	vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
731  
732  	/* Even though drivers often handle CPU membership in special ways,
733  	 * it doesn't make sense to program a PVID, so clear this flag.
734  	 */
735  	vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
736  
737  	return dsa_port_host_vlan_add(dp, &vlan, extack);
738  }
739  
dsa_slave_port_obj_add(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj,struct netlink_ext_ack * extack)740  static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
741  				  const struct switchdev_obj *obj,
742  				  struct netlink_ext_ack *extack)
743  {
744  	struct dsa_port *dp = dsa_slave_to_port(dev);
745  	int err;
746  
747  	if (ctx && ctx != dp)
748  		return 0;
749  
750  	switch (obj->id) {
751  	case SWITCHDEV_OBJ_ID_PORT_MDB:
752  		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
753  			return -EOPNOTSUPP;
754  
755  		err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
756  		break;
757  	case SWITCHDEV_OBJ_ID_HOST_MDB:
758  		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
759  			return -EOPNOTSUPP;
760  
761  		err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
762  		break;
763  	case SWITCHDEV_OBJ_ID_PORT_VLAN:
764  		if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
765  			err = dsa_slave_vlan_add(dev, obj, extack);
766  		else
767  			err = dsa_slave_host_vlan_add(dev, obj, extack);
768  		break;
769  	case SWITCHDEV_OBJ_ID_MRP:
770  		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
771  			return -EOPNOTSUPP;
772  
773  		err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
774  		break;
775  	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
776  		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
777  			return -EOPNOTSUPP;
778  
779  		err = dsa_port_mrp_add_ring_role(dp,
780  						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
781  		break;
782  	default:
783  		err = -EOPNOTSUPP;
784  		break;
785  	}
786  
787  	return err;
788  }
789  
dsa_slave_vlan_del(struct net_device * dev,const struct switchdev_obj * obj)790  static int dsa_slave_vlan_del(struct net_device *dev,
791  			      const struct switchdev_obj *obj)
792  {
793  	struct dsa_port *dp = dsa_slave_to_port(dev);
794  	struct switchdev_obj_port_vlan *vlan;
795  
796  	if (dsa_port_skip_vlan_configuration(dp))
797  		return 0;
798  
799  	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
800  
801  	return dsa_port_vlan_del(dp, vlan);
802  }
803  
dsa_slave_host_vlan_del(struct net_device * dev,const struct switchdev_obj * obj)804  static int dsa_slave_host_vlan_del(struct net_device *dev,
805  				   const struct switchdev_obj *obj)
806  {
807  	struct dsa_port *dp = dsa_slave_to_port(dev);
808  	struct switchdev_obj_port_vlan *vlan;
809  
810  	/* Do nothing if this is a software bridge */
811  	if (!dp->bridge)
812  		return -EOPNOTSUPP;
813  
814  	if (dsa_port_skip_vlan_configuration(dp))
815  		return 0;
816  
817  	vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
818  
819  	return dsa_port_host_vlan_del(dp, vlan);
820  }
821  
dsa_slave_port_obj_del(struct net_device * dev,const void * ctx,const struct switchdev_obj * obj)822  static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
823  				  const struct switchdev_obj *obj)
824  {
825  	struct dsa_port *dp = dsa_slave_to_port(dev);
826  	int err;
827  
828  	if (ctx && ctx != dp)
829  		return 0;
830  
831  	switch (obj->id) {
832  	case SWITCHDEV_OBJ_ID_PORT_MDB:
833  		if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
834  			return -EOPNOTSUPP;
835  
836  		err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
837  		break;
838  	case SWITCHDEV_OBJ_ID_HOST_MDB:
839  		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
840  			return -EOPNOTSUPP;
841  
842  		err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
843  		break;
844  	case SWITCHDEV_OBJ_ID_PORT_VLAN:
845  		if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
846  			err = dsa_slave_vlan_del(dev, obj);
847  		else
848  			err = dsa_slave_host_vlan_del(dev, obj);
849  		break;
850  	case SWITCHDEV_OBJ_ID_MRP:
851  		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
852  			return -EOPNOTSUPP;
853  
854  		err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
855  		break;
856  	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
857  		if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
858  			return -EOPNOTSUPP;
859  
860  		err = dsa_port_mrp_del_ring_role(dp,
861  						 SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
862  		break;
863  	default:
864  		err = -EOPNOTSUPP;
865  		break;
866  	}
867  
868  	return err;
869  }
870  
dsa_slave_netpoll_send_skb(struct net_device * dev,struct sk_buff * skb)871  static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
872  						     struct sk_buff *skb)
873  {
874  #ifdef CONFIG_NET_POLL_CONTROLLER
875  	struct dsa_slave_priv *p = netdev_priv(dev);
876  
877  	return netpoll_send_skb(p->netpoll, skb);
878  #else
879  	BUG();
880  	return NETDEV_TX_OK;
881  #endif
882  }
883  
dsa_skb_tx_timestamp(struct dsa_slave_priv * p,struct sk_buff * skb)884  static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
885  				 struct sk_buff *skb)
886  {
887  	struct dsa_switch *ds = p->dp->ds;
888  
889  	if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
890  		return;
891  
892  	if (!ds->ops->port_txtstamp)
893  		return;
894  
895  	ds->ops->port_txtstamp(ds, p->dp->index, skb);
896  }
897  
dsa_enqueue_skb(struct sk_buff * skb,struct net_device * dev)898  netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
899  {
900  	/* SKB for netpoll still need to be mangled with the protocol-specific
901  	 * tag to be successfully transmitted
902  	 */
903  	if (unlikely(netpoll_tx_running(dev)))
904  		return dsa_slave_netpoll_send_skb(dev, skb);
905  
906  	/* Queue the SKB for transmission on the parent interface, but
907  	 * do not modify its EtherType
908  	 */
909  	skb->dev = dsa_slave_to_master(dev);
910  	dev_queue_xmit(skb);
911  
912  	return NETDEV_TX_OK;
913  }
914  EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
915  
dsa_realloc_skb(struct sk_buff * skb,struct net_device * dev)916  static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
917  {
918  	int needed_headroom = dev->needed_headroom;
919  	int needed_tailroom = dev->needed_tailroom;
920  
921  	/* For tail taggers, we need to pad short frames ourselves, to ensure
922  	 * that the tail tag does not fail at its role of being at the end of
923  	 * the packet, once the master interface pads the frame. Account for
924  	 * that pad length here, and pad later.
925  	 */
926  	if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
927  		needed_tailroom += ETH_ZLEN - skb->len;
928  	/* skb_headroom() returns unsigned int... */
929  	needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
930  	needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
931  
932  	if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
933  		/* No reallocation needed, yay! */
934  		return 0;
935  
936  	return pskb_expand_head(skb, needed_headroom, needed_tailroom,
937  				GFP_ATOMIC);
938  }
939  
dsa_slave_xmit(struct sk_buff * skb,struct net_device * dev)940  static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
941  {
942  	struct dsa_slave_priv *p = netdev_priv(dev);
943  	struct sk_buff *nskb;
944  
945  	dev_sw_netstats_tx_add(dev, 1, skb->len);
946  
947  	memset(skb->cb, 0, sizeof(skb->cb));
948  
949  	/* Handle tx timestamp if any */
950  	dsa_skb_tx_timestamp(p, skb);
951  
952  	if (dsa_realloc_skb(skb, dev)) {
953  		dev_kfree_skb_any(skb);
954  		return NETDEV_TX_OK;
955  	}
956  
957  	/* needed_tailroom should still be 'warm' in the cache line from
958  	 * dsa_realloc_skb(), which has also ensured that padding is safe.
959  	 */
960  	if (dev->needed_tailroom)
961  		eth_skb_pad(skb);
962  
963  	/* Transmit function may have to reallocate the original SKB,
964  	 * in which case it must have freed it. Only free it here on error.
965  	 */
966  	nskb = p->xmit(skb, dev);
967  	if (!nskb) {
968  		kfree_skb(skb);
969  		return NETDEV_TX_OK;
970  	}
971  
972  	return dsa_enqueue_skb(nskb, dev);
973  }
974  
975  /* ethtool operations *******************************************************/
976  
dsa_slave_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)977  static void dsa_slave_get_drvinfo(struct net_device *dev,
978  				  struct ethtool_drvinfo *drvinfo)
979  {
980  	strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
981  	strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
982  	strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
983  }
984  
dsa_slave_get_regs_len(struct net_device * dev)985  static int dsa_slave_get_regs_len(struct net_device *dev)
986  {
987  	struct dsa_port *dp = dsa_slave_to_port(dev);
988  	struct dsa_switch *ds = dp->ds;
989  
990  	if (ds->ops->get_regs_len)
991  		return ds->ops->get_regs_len(ds, dp->index);
992  
993  	return -EOPNOTSUPP;
994  }
995  
996  static void
dsa_slave_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)997  dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
998  {
999  	struct dsa_port *dp = dsa_slave_to_port(dev);
1000  	struct dsa_switch *ds = dp->ds;
1001  
1002  	if (ds->ops->get_regs)
1003  		ds->ops->get_regs(ds, dp->index, regs, _p);
1004  }
1005  
dsa_slave_nway_reset(struct net_device * dev)1006  static int dsa_slave_nway_reset(struct net_device *dev)
1007  {
1008  	struct dsa_port *dp = dsa_slave_to_port(dev);
1009  
1010  	return phylink_ethtool_nway_reset(dp->pl);
1011  }
1012  
dsa_slave_get_eeprom_len(struct net_device * dev)1013  static int dsa_slave_get_eeprom_len(struct net_device *dev)
1014  {
1015  	struct dsa_port *dp = dsa_slave_to_port(dev);
1016  	struct dsa_switch *ds = dp->ds;
1017  
1018  	if (ds->cd && ds->cd->eeprom_len)
1019  		return ds->cd->eeprom_len;
1020  
1021  	if (ds->ops->get_eeprom_len)
1022  		return ds->ops->get_eeprom_len(ds);
1023  
1024  	return 0;
1025  }
1026  
dsa_slave_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)1027  static int dsa_slave_get_eeprom(struct net_device *dev,
1028  				struct ethtool_eeprom *eeprom, u8 *data)
1029  {
1030  	struct dsa_port *dp = dsa_slave_to_port(dev);
1031  	struct dsa_switch *ds = dp->ds;
1032  
1033  	if (ds->ops->get_eeprom)
1034  		return ds->ops->get_eeprom(ds, eeprom, data);
1035  
1036  	return -EOPNOTSUPP;
1037  }
1038  
dsa_slave_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)1039  static int dsa_slave_set_eeprom(struct net_device *dev,
1040  				struct ethtool_eeprom *eeprom, u8 *data)
1041  {
1042  	struct dsa_port *dp = dsa_slave_to_port(dev);
1043  	struct dsa_switch *ds = dp->ds;
1044  
1045  	if (ds->ops->set_eeprom)
1046  		return ds->ops->set_eeprom(ds, eeprom, data);
1047  
1048  	return -EOPNOTSUPP;
1049  }
1050  
dsa_slave_get_strings(struct net_device * dev,uint32_t stringset,uint8_t * data)1051  static void dsa_slave_get_strings(struct net_device *dev,
1052  				  uint32_t stringset, uint8_t *data)
1053  {
1054  	struct dsa_port *dp = dsa_slave_to_port(dev);
1055  	struct dsa_switch *ds = dp->ds;
1056  
1057  	if (stringset == ETH_SS_STATS) {
1058  		int len = ETH_GSTRING_LEN;
1059  
1060  		strscpy_pad(data, "tx_packets", len);
1061  		strscpy_pad(data + len, "tx_bytes", len);
1062  		strscpy_pad(data + 2 * len, "rx_packets", len);
1063  		strscpy_pad(data + 3 * len, "rx_bytes", len);
1064  		if (ds->ops->get_strings)
1065  			ds->ops->get_strings(ds, dp->index, stringset,
1066  					     data + 4 * len);
1067  	} else if (stringset ==  ETH_SS_TEST) {
1068  		net_selftest_get_strings(data);
1069  	}
1070  
1071  }
1072  
dsa_slave_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,uint64_t * data)1073  static void dsa_slave_get_ethtool_stats(struct net_device *dev,
1074  					struct ethtool_stats *stats,
1075  					uint64_t *data)
1076  {
1077  	struct dsa_port *dp = dsa_slave_to_port(dev);
1078  	struct dsa_switch *ds = dp->ds;
1079  	struct pcpu_sw_netstats *s;
1080  	unsigned int start;
1081  	int i;
1082  
1083  	for_each_possible_cpu(i) {
1084  		u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
1085  
1086  		s = per_cpu_ptr(dev->tstats, i);
1087  		do {
1088  			start = u64_stats_fetch_begin(&s->syncp);
1089  			tx_packets = u64_stats_read(&s->tx_packets);
1090  			tx_bytes = u64_stats_read(&s->tx_bytes);
1091  			rx_packets = u64_stats_read(&s->rx_packets);
1092  			rx_bytes = u64_stats_read(&s->rx_bytes);
1093  		} while (u64_stats_fetch_retry(&s->syncp, start));
1094  		data[0] += tx_packets;
1095  		data[1] += tx_bytes;
1096  		data[2] += rx_packets;
1097  		data[3] += rx_bytes;
1098  	}
1099  	if (ds->ops->get_ethtool_stats)
1100  		ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
1101  }
1102  
dsa_slave_get_sset_count(struct net_device * dev,int sset)1103  static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
1104  {
1105  	struct dsa_port *dp = dsa_slave_to_port(dev);
1106  	struct dsa_switch *ds = dp->ds;
1107  
1108  	if (sset == ETH_SS_STATS) {
1109  		int count = 0;
1110  
1111  		if (ds->ops->get_sset_count) {
1112  			count = ds->ops->get_sset_count(ds, dp->index, sset);
1113  			if (count < 0)
1114  				return count;
1115  		}
1116  
1117  		return count + 4;
1118  	} else if (sset ==  ETH_SS_TEST) {
1119  		return net_selftest_get_count();
1120  	}
1121  
1122  	return -EOPNOTSUPP;
1123  }
1124  
dsa_slave_get_eth_phy_stats(struct net_device * dev,struct ethtool_eth_phy_stats * phy_stats)1125  static void dsa_slave_get_eth_phy_stats(struct net_device *dev,
1126  					struct ethtool_eth_phy_stats *phy_stats)
1127  {
1128  	struct dsa_port *dp = dsa_slave_to_port(dev);
1129  	struct dsa_switch *ds = dp->ds;
1130  
1131  	if (ds->ops->get_eth_phy_stats)
1132  		ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
1133  }
1134  
dsa_slave_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)1135  static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
1136  					struct ethtool_eth_mac_stats *mac_stats)
1137  {
1138  	struct dsa_port *dp = dsa_slave_to_port(dev);
1139  	struct dsa_switch *ds = dp->ds;
1140  
1141  	if (ds->ops->get_eth_mac_stats)
1142  		ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats);
1143  }
1144  
1145  static void
dsa_slave_get_eth_ctrl_stats(struct net_device * dev,struct ethtool_eth_ctrl_stats * ctrl_stats)1146  dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
1147  			     struct ethtool_eth_ctrl_stats *ctrl_stats)
1148  {
1149  	struct dsa_port *dp = dsa_slave_to_port(dev);
1150  	struct dsa_switch *ds = dp->ds;
1151  
1152  	if (ds->ops->get_eth_ctrl_stats)
1153  		ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
1154  }
1155  
1156  static void
dsa_slave_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)1157  dsa_slave_get_rmon_stats(struct net_device *dev,
1158  			 struct ethtool_rmon_stats *rmon_stats,
1159  			 const struct ethtool_rmon_hist_range **ranges)
1160  {
1161  	struct dsa_port *dp = dsa_slave_to_port(dev);
1162  	struct dsa_switch *ds = dp->ds;
1163  
1164  	if (ds->ops->get_rmon_stats)
1165  		ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges);
1166  }
1167  
dsa_slave_net_selftest(struct net_device * ndev,struct ethtool_test * etest,u64 * buf)1168  static void dsa_slave_net_selftest(struct net_device *ndev,
1169  				   struct ethtool_test *etest, u64 *buf)
1170  {
1171  	struct dsa_port *dp = dsa_slave_to_port(ndev);
1172  	struct dsa_switch *ds = dp->ds;
1173  
1174  	if (ds->ops->self_test) {
1175  		ds->ops->self_test(ds, dp->index, etest, buf);
1176  		return;
1177  	}
1178  
1179  	net_selftest(ndev, etest, buf);
1180  }
1181  
dsa_slave_get_mm(struct net_device * dev,struct ethtool_mm_state * state)1182  static int dsa_slave_get_mm(struct net_device *dev,
1183  			    struct ethtool_mm_state *state)
1184  {
1185  	struct dsa_port *dp = dsa_slave_to_port(dev);
1186  	struct dsa_switch *ds = dp->ds;
1187  
1188  	if (!ds->ops->get_mm)
1189  		return -EOPNOTSUPP;
1190  
1191  	return ds->ops->get_mm(ds, dp->index, state);
1192  }
1193  
dsa_slave_set_mm(struct net_device * dev,struct ethtool_mm_cfg * cfg,struct netlink_ext_ack * extack)1194  static int dsa_slave_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg,
1195  			    struct netlink_ext_ack *extack)
1196  {
1197  	struct dsa_port *dp = dsa_slave_to_port(dev);
1198  	struct dsa_switch *ds = dp->ds;
1199  
1200  	if (!ds->ops->set_mm)
1201  		return -EOPNOTSUPP;
1202  
1203  	return ds->ops->set_mm(ds, dp->index, cfg, extack);
1204  }
1205  
dsa_slave_get_mm_stats(struct net_device * dev,struct ethtool_mm_stats * stats)1206  static void dsa_slave_get_mm_stats(struct net_device *dev,
1207  				   struct ethtool_mm_stats *stats)
1208  {
1209  	struct dsa_port *dp = dsa_slave_to_port(dev);
1210  	struct dsa_switch *ds = dp->ds;
1211  
1212  	if (ds->ops->get_mm_stats)
1213  		ds->ops->get_mm_stats(ds, dp->index, stats);
1214  }
1215  
dsa_slave_get_wol(struct net_device * dev,struct ethtool_wolinfo * w)1216  static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1217  {
1218  	struct dsa_port *dp = dsa_slave_to_port(dev);
1219  	struct dsa_switch *ds = dp->ds;
1220  
1221  	phylink_ethtool_get_wol(dp->pl, w);
1222  
1223  	if (ds->ops->get_wol)
1224  		ds->ops->get_wol(ds, dp->index, w);
1225  }
1226  
dsa_slave_set_wol(struct net_device * dev,struct ethtool_wolinfo * w)1227  static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1228  {
1229  	struct dsa_port *dp = dsa_slave_to_port(dev);
1230  	struct dsa_switch *ds = dp->ds;
1231  	int ret = -EOPNOTSUPP;
1232  
1233  	phylink_ethtool_set_wol(dp->pl, w);
1234  
1235  	if (ds->ops->set_wol)
1236  		ret = ds->ops->set_wol(ds, dp->index, w);
1237  
1238  	return ret;
1239  }
1240  
dsa_slave_set_eee(struct net_device * dev,struct ethtool_eee * e)1241  static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
1242  {
1243  	struct dsa_port *dp = dsa_slave_to_port(dev);
1244  	struct dsa_switch *ds = dp->ds;
1245  	int ret;
1246  
1247  	/* Port's PHY and MAC both need to be EEE capable */
1248  	if (!dev->phydev || !dp->pl)
1249  		return -ENODEV;
1250  
1251  	if (!ds->ops->set_mac_eee)
1252  		return -EOPNOTSUPP;
1253  
1254  	ret = ds->ops->set_mac_eee(ds, dp->index, e);
1255  	if (ret)
1256  		return ret;
1257  
1258  	return phylink_ethtool_set_eee(dp->pl, e);
1259  }
1260  
dsa_slave_get_eee(struct net_device * dev,struct ethtool_eee * e)1261  static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
1262  {
1263  	struct dsa_port *dp = dsa_slave_to_port(dev);
1264  	struct dsa_switch *ds = dp->ds;
1265  	int ret;
1266  
1267  	/* Port's PHY and MAC both need to be EEE capable */
1268  	if (!dev->phydev || !dp->pl)
1269  		return -ENODEV;
1270  
1271  	if (!ds->ops->get_mac_eee)
1272  		return -EOPNOTSUPP;
1273  
1274  	ret = ds->ops->get_mac_eee(ds, dp->index, e);
1275  	if (ret)
1276  		return ret;
1277  
1278  	return phylink_ethtool_get_eee(dp->pl, e);
1279  }
1280  
dsa_slave_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1281  static int dsa_slave_get_link_ksettings(struct net_device *dev,
1282  					struct ethtool_link_ksettings *cmd)
1283  {
1284  	struct dsa_port *dp = dsa_slave_to_port(dev);
1285  
1286  	return phylink_ethtool_ksettings_get(dp->pl, cmd);
1287  }
1288  
dsa_slave_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1289  static int dsa_slave_set_link_ksettings(struct net_device *dev,
1290  					const struct ethtool_link_ksettings *cmd)
1291  {
1292  	struct dsa_port *dp = dsa_slave_to_port(dev);
1293  
1294  	return phylink_ethtool_ksettings_set(dp->pl, cmd);
1295  }
1296  
dsa_slave_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * pause_stats)1297  static void dsa_slave_get_pause_stats(struct net_device *dev,
1298  				  struct ethtool_pause_stats *pause_stats)
1299  {
1300  	struct dsa_port *dp = dsa_slave_to_port(dev);
1301  	struct dsa_switch *ds = dp->ds;
1302  
1303  	if (ds->ops->get_pause_stats)
1304  		ds->ops->get_pause_stats(ds, dp->index, pause_stats);
1305  }
1306  
dsa_slave_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1307  static void dsa_slave_get_pauseparam(struct net_device *dev,
1308  				     struct ethtool_pauseparam *pause)
1309  {
1310  	struct dsa_port *dp = dsa_slave_to_port(dev);
1311  
1312  	phylink_ethtool_get_pauseparam(dp->pl, pause);
1313  }
1314  
dsa_slave_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1315  static int dsa_slave_set_pauseparam(struct net_device *dev,
1316  				    struct ethtool_pauseparam *pause)
1317  {
1318  	struct dsa_port *dp = dsa_slave_to_port(dev);
1319  
1320  	return phylink_ethtool_set_pauseparam(dp->pl, pause);
1321  }
1322  
1323  #ifdef CONFIG_NET_POLL_CONTROLLER
dsa_slave_netpoll_setup(struct net_device * dev,struct netpoll_info * ni)1324  static int dsa_slave_netpoll_setup(struct net_device *dev,
1325  				   struct netpoll_info *ni)
1326  {
1327  	struct net_device *master = dsa_slave_to_master(dev);
1328  	struct dsa_slave_priv *p = netdev_priv(dev);
1329  	struct netpoll *netpoll;
1330  	int err = 0;
1331  
1332  	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
1333  	if (!netpoll)
1334  		return -ENOMEM;
1335  
1336  	err = __netpoll_setup(netpoll, master);
1337  	if (err) {
1338  		kfree(netpoll);
1339  		goto out;
1340  	}
1341  
1342  	p->netpoll = netpoll;
1343  out:
1344  	return err;
1345  }
1346  
dsa_slave_netpoll_cleanup(struct net_device * dev)1347  static void dsa_slave_netpoll_cleanup(struct net_device *dev)
1348  {
1349  	struct dsa_slave_priv *p = netdev_priv(dev);
1350  	struct netpoll *netpoll = p->netpoll;
1351  
1352  	if (!netpoll)
1353  		return;
1354  
1355  	p->netpoll = NULL;
1356  
1357  	__netpoll_free(netpoll);
1358  }
1359  
dsa_slave_poll_controller(struct net_device * dev)1360  static void dsa_slave_poll_controller(struct net_device *dev)
1361  {
1362  }
1363  #endif
1364  
1365  static struct dsa_mall_tc_entry *
dsa_slave_mall_tc_entry_find(struct net_device * dev,unsigned long cookie)1366  dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
1367  {
1368  	struct dsa_slave_priv *p = netdev_priv(dev);
1369  	struct dsa_mall_tc_entry *mall_tc_entry;
1370  
1371  	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
1372  		if (mall_tc_entry->cookie == cookie)
1373  			return mall_tc_entry;
1374  
1375  	return NULL;
1376  }
1377  
1378  static int
dsa_slave_add_cls_matchall_mirred(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress)1379  dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
1380  				  struct tc_cls_matchall_offload *cls,
1381  				  bool ingress)
1382  {
1383  	struct netlink_ext_ack *extack = cls->common.extack;
1384  	struct dsa_port *dp = dsa_slave_to_port(dev);
1385  	struct dsa_slave_priv *p = netdev_priv(dev);
1386  	struct dsa_mall_mirror_tc_entry *mirror;
1387  	struct dsa_mall_tc_entry *mall_tc_entry;
1388  	struct dsa_switch *ds = dp->ds;
1389  	struct flow_action_entry *act;
1390  	struct dsa_port *to_dp;
1391  	int err;
1392  
1393  	if (!ds->ops->port_mirror_add)
1394  		return -EOPNOTSUPP;
1395  
1396  	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1397  					      cls->common.extack))
1398  		return -EOPNOTSUPP;
1399  
1400  	act = &cls->rule->action.entries[0];
1401  
1402  	if (!act->dev)
1403  		return -EINVAL;
1404  
1405  	if (!dsa_slave_dev_check(act->dev))
1406  		return -EOPNOTSUPP;
1407  
1408  	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1409  	if (!mall_tc_entry)
1410  		return -ENOMEM;
1411  
1412  	mall_tc_entry->cookie = cls->cookie;
1413  	mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
1414  	mirror = &mall_tc_entry->mirror;
1415  
1416  	to_dp = dsa_slave_to_port(act->dev);
1417  
1418  	mirror->to_local_port = to_dp->index;
1419  	mirror->ingress = ingress;
1420  
1421  	err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack);
1422  	if (err) {
1423  		kfree(mall_tc_entry);
1424  		return err;
1425  	}
1426  
1427  	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1428  
1429  	return err;
1430  }
1431  
1432  static int
dsa_slave_add_cls_matchall_police(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress)1433  dsa_slave_add_cls_matchall_police(struct net_device *dev,
1434  				  struct tc_cls_matchall_offload *cls,
1435  				  bool ingress)
1436  {
1437  	struct netlink_ext_ack *extack = cls->common.extack;
1438  	struct dsa_port *dp = dsa_slave_to_port(dev);
1439  	struct dsa_slave_priv *p = netdev_priv(dev);
1440  	struct dsa_mall_policer_tc_entry *policer;
1441  	struct dsa_mall_tc_entry *mall_tc_entry;
1442  	struct dsa_switch *ds = dp->ds;
1443  	struct flow_action_entry *act;
1444  	int err;
1445  
1446  	if (!ds->ops->port_policer_add) {
1447  		NL_SET_ERR_MSG_MOD(extack,
1448  				   "Policing offload not implemented");
1449  		return -EOPNOTSUPP;
1450  	}
1451  
1452  	if (!ingress) {
1453  		NL_SET_ERR_MSG_MOD(extack,
1454  				   "Only supported on ingress qdisc");
1455  		return -EOPNOTSUPP;
1456  	}
1457  
1458  	if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1459  					      cls->common.extack))
1460  		return -EOPNOTSUPP;
1461  
1462  	list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1463  		if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1464  			NL_SET_ERR_MSG_MOD(extack,
1465  					   "Only one port policer allowed");
1466  			return -EEXIST;
1467  		}
1468  	}
1469  
1470  	act = &cls->rule->action.entries[0];
1471  
1472  	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1473  	if (!mall_tc_entry)
1474  		return -ENOMEM;
1475  
1476  	mall_tc_entry->cookie = cls->cookie;
1477  	mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1478  	policer = &mall_tc_entry->policer;
1479  	policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1480  	policer->burst = act->police.burst;
1481  
1482  	err = ds->ops->port_policer_add(ds, dp->index, policer);
1483  	if (err) {
1484  		kfree(mall_tc_entry);
1485  		return err;
1486  	}
1487  
1488  	list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1489  
1490  	return err;
1491  }
1492  
dsa_slave_add_cls_matchall(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress)1493  static int dsa_slave_add_cls_matchall(struct net_device *dev,
1494  				      struct tc_cls_matchall_offload *cls,
1495  				      bool ingress)
1496  {
1497  	int err = -EOPNOTSUPP;
1498  
1499  	if (cls->common.protocol == htons(ETH_P_ALL) &&
1500  	    flow_offload_has_one_action(&cls->rule->action) &&
1501  	    cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1502  		err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1503  	else if (flow_offload_has_one_action(&cls->rule->action) &&
1504  		 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1505  		err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1506  
1507  	return err;
1508  }
1509  
dsa_slave_del_cls_matchall(struct net_device * dev,struct tc_cls_matchall_offload * cls)1510  static void dsa_slave_del_cls_matchall(struct net_device *dev,
1511  				       struct tc_cls_matchall_offload *cls)
1512  {
1513  	struct dsa_port *dp = dsa_slave_to_port(dev);
1514  	struct dsa_mall_tc_entry *mall_tc_entry;
1515  	struct dsa_switch *ds = dp->ds;
1516  
1517  	mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1518  	if (!mall_tc_entry)
1519  		return;
1520  
1521  	list_del(&mall_tc_entry->list);
1522  
1523  	switch (mall_tc_entry->type) {
1524  	case DSA_PORT_MALL_MIRROR:
1525  		if (ds->ops->port_mirror_del)
1526  			ds->ops->port_mirror_del(ds, dp->index,
1527  						 &mall_tc_entry->mirror);
1528  		break;
1529  	case DSA_PORT_MALL_POLICER:
1530  		if (ds->ops->port_policer_del)
1531  			ds->ops->port_policer_del(ds, dp->index);
1532  		break;
1533  	default:
1534  		WARN_ON(1);
1535  	}
1536  
1537  	kfree(mall_tc_entry);
1538  }
1539  
dsa_slave_setup_tc_cls_matchall(struct net_device * dev,struct tc_cls_matchall_offload * cls,bool ingress)1540  static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1541  					   struct tc_cls_matchall_offload *cls,
1542  					   bool ingress)
1543  {
1544  	if (cls->common.chain_index)
1545  		return -EOPNOTSUPP;
1546  
1547  	switch (cls->command) {
1548  	case TC_CLSMATCHALL_REPLACE:
1549  		return dsa_slave_add_cls_matchall(dev, cls, ingress);
1550  	case TC_CLSMATCHALL_DESTROY:
1551  		dsa_slave_del_cls_matchall(dev, cls);
1552  		return 0;
1553  	default:
1554  		return -EOPNOTSUPP;
1555  	}
1556  }
1557  
dsa_slave_add_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1558  static int dsa_slave_add_cls_flower(struct net_device *dev,
1559  				    struct flow_cls_offload *cls,
1560  				    bool ingress)
1561  {
1562  	struct dsa_port *dp = dsa_slave_to_port(dev);
1563  	struct dsa_switch *ds = dp->ds;
1564  	int port = dp->index;
1565  
1566  	if (!ds->ops->cls_flower_add)
1567  		return -EOPNOTSUPP;
1568  
1569  	return ds->ops->cls_flower_add(ds, port, cls, ingress);
1570  }
1571  
dsa_slave_del_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1572  static int dsa_slave_del_cls_flower(struct net_device *dev,
1573  				    struct flow_cls_offload *cls,
1574  				    bool ingress)
1575  {
1576  	struct dsa_port *dp = dsa_slave_to_port(dev);
1577  	struct dsa_switch *ds = dp->ds;
1578  	int port = dp->index;
1579  
1580  	if (!ds->ops->cls_flower_del)
1581  		return -EOPNOTSUPP;
1582  
1583  	return ds->ops->cls_flower_del(ds, port, cls, ingress);
1584  }
1585  
dsa_slave_stats_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1586  static int dsa_slave_stats_cls_flower(struct net_device *dev,
1587  				      struct flow_cls_offload *cls,
1588  				      bool ingress)
1589  {
1590  	struct dsa_port *dp = dsa_slave_to_port(dev);
1591  	struct dsa_switch *ds = dp->ds;
1592  	int port = dp->index;
1593  
1594  	if (!ds->ops->cls_flower_stats)
1595  		return -EOPNOTSUPP;
1596  
1597  	return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1598  }
1599  
dsa_slave_setup_tc_cls_flower(struct net_device * dev,struct flow_cls_offload * cls,bool ingress)1600  static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1601  					 struct flow_cls_offload *cls,
1602  					 bool ingress)
1603  {
1604  	switch (cls->command) {
1605  	case FLOW_CLS_REPLACE:
1606  		return dsa_slave_add_cls_flower(dev, cls, ingress);
1607  	case FLOW_CLS_DESTROY:
1608  		return dsa_slave_del_cls_flower(dev, cls, ingress);
1609  	case FLOW_CLS_STATS:
1610  		return dsa_slave_stats_cls_flower(dev, cls, ingress);
1611  	default:
1612  		return -EOPNOTSUPP;
1613  	}
1614  }
1615  
dsa_slave_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv,bool ingress)1616  static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1617  				       void *cb_priv, bool ingress)
1618  {
1619  	struct net_device *dev = cb_priv;
1620  
1621  	if (!tc_can_offload(dev))
1622  		return -EOPNOTSUPP;
1623  
1624  	switch (type) {
1625  	case TC_SETUP_CLSMATCHALL:
1626  		return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1627  	case TC_SETUP_CLSFLOWER:
1628  		return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1629  	default:
1630  		return -EOPNOTSUPP;
1631  	}
1632  }
1633  
dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,void * type_data,void * cb_priv)1634  static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1635  					  void *type_data, void *cb_priv)
1636  {
1637  	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1638  }
1639  
dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,void * type_data,void * cb_priv)1640  static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1641  					  void *type_data, void *cb_priv)
1642  {
1643  	return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1644  }
1645  
1646  static LIST_HEAD(dsa_slave_block_cb_list);
1647  
dsa_slave_setup_tc_block(struct net_device * dev,struct flow_block_offload * f)1648  static int dsa_slave_setup_tc_block(struct net_device *dev,
1649  				    struct flow_block_offload *f)
1650  {
1651  	struct flow_block_cb *block_cb;
1652  	flow_setup_cb_t *cb;
1653  
1654  	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1655  		cb = dsa_slave_setup_tc_block_cb_ig;
1656  	else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1657  		cb = dsa_slave_setup_tc_block_cb_eg;
1658  	else
1659  		return -EOPNOTSUPP;
1660  
1661  	f->driver_block_list = &dsa_slave_block_cb_list;
1662  
1663  	switch (f->command) {
1664  	case FLOW_BLOCK_BIND:
1665  		if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1666  			return -EBUSY;
1667  
1668  		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1669  		if (IS_ERR(block_cb))
1670  			return PTR_ERR(block_cb);
1671  
1672  		flow_block_cb_add(block_cb, f);
1673  		list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1674  		return 0;
1675  	case FLOW_BLOCK_UNBIND:
1676  		block_cb = flow_block_cb_lookup(f->block, cb, dev);
1677  		if (!block_cb)
1678  			return -ENOENT;
1679  
1680  		flow_block_cb_remove(block_cb, f);
1681  		list_del(&block_cb->driver_list);
1682  		return 0;
1683  	default:
1684  		return -EOPNOTSUPP;
1685  	}
1686  }
1687  
dsa_slave_setup_ft_block(struct dsa_switch * ds,int port,void * type_data)1688  static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port,
1689  				    void *type_data)
1690  {
1691  	struct net_device *master = dsa_port_to_master(dsa_to_port(ds, port));
1692  
1693  	if (!master->netdev_ops->ndo_setup_tc)
1694  		return -EOPNOTSUPP;
1695  
1696  	return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data);
1697  }
1698  
dsa_slave_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)1699  static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1700  			      void *type_data)
1701  {
1702  	struct dsa_port *dp = dsa_slave_to_port(dev);
1703  	struct dsa_switch *ds = dp->ds;
1704  
1705  	switch (type) {
1706  	case TC_SETUP_BLOCK:
1707  		return dsa_slave_setup_tc_block(dev, type_data);
1708  	case TC_SETUP_FT:
1709  		return dsa_slave_setup_ft_block(ds, dp->index, type_data);
1710  	default:
1711  		break;
1712  	}
1713  
1714  	if (!ds->ops->port_setup_tc)
1715  		return -EOPNOTSUPP;
1716  
1717  	return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1718  }
1719  
dsa_slave_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * nfc,u32 * rule_locs)1720  static int dsa_slave_get_rxnfc(struct net_device *dev,
1721  			       struct ethtool_rxnfc *nfc, u32 *rule_locs)
1722  {
1723  	struct dsa_port *dp = dsa_slave_to_port(dev);
1724  	struct dsa_switch *ds = dp->ds;
1725  
1726  	if (!ds->ops->get_rxnfc)
1727  		return -EOPNOTSUPP;
1728  
1729  	return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1730  }
1731  
dsa_slave_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * nfc)1732  static int dsa_slave_set_rxnfc(struct net_device *dev,
1733  			       struct ethtool_rxnfc *nfc)
1734  {
1735  	struct dsa_port *dp = dsa_slave_to_port(dev);
1736  	struct dsa_switch *ds = dp->ds;
1737  
1738  	if (!ds->ops->set_rxnfc)
1739  		return -EOPNOTSUPP;
1740  
1741  	return ds->ops->set_rxnfc(ds, dp->index, nfc);
1742  }
1743  
dsa_slave_get_ts_info(struct net_device * dev,struct ethtool_ts_info * ts)1744  static int dsa_slave_get_ts_info(struct net_device *dev,
1745  				 struct ethtool_ts_info *ts)
1746  {
1747  	struct dsa_slave_priv *p = netdev_priv(dev);
1748  	struct dsa_switch *ds = p->dp->ds;
1749  
1750  	if (!ds->ops->get_ts_info)
1751  		return -EOPNOTSUPP;
1752  
1753  	return ds->ops->get_ts_info(ds, p->dp->index, ts);
1754  }
1755  
dsa_slave_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)1756  static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1757  				     u16 vid)
1758  {
1759  	struct dsa_port *dp = dsa_slave_to_port(dev);
1760  	struct switchdev_obj_port_vlan vlan = {
1761  		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1762  		.vid = vid,
1763  		/* This API only allows programming tagged, non-PVID VIDs */
1764  		.flags = 0,
1765  	};
1766  	struct netlink_ext_ack extack = {0};
1767  	struct dsa_switch *ds = dp->ds;
1768  	struct netdev_hw_addr *ha;
1769  	struct dsa_vlan *v;
1770  	int ret;
1771  
1772  	/* User port... */
1773  	ret = dsa_port_vlan_add(dp, &vlan, &extack);
1774  	if (ret) {
1775  		if (extack._msg)
1776  			netdev_err(dev, "%s\n", extack._msg);
1777  		return ret;
1778  	}
1779  
1780  	/* And CPU port... */
1781  	ret = dsa_port_host_vlan_add(dp, &vlan, &extack);
1782  	if (ret) {
1783  		if (extack._msg)
1784  			netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
1785  				   extack._msg);
1786  		return ret;
1787  	}
1788  
1789  	if (!dsa_switch_supports_uc_filtering(ds) &&
1790  	    !dsa_switch_supports_mc_filtering(ds))
1791  		return 0;
1792  
1793  	v = kzalloc(sizeof(*v), GFP_KERNEL);
1794  	if (!v) {
1795  		ret = -ENOMEM;
1796  		goto rollback;
1797  	}
1798  
1799  	netif_addr_lock_bh(dev);
1800  
1801  	v->vid = vid;
1802  	list_add_tail(&v->list, &dp->user_vlans);
1803  
1804  	if (dsa_switch_supports_mc_filtering(ds)) {
1805  		netdev_for_each_synced_mc_addr(ha, dev) {
1806  			dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD,
1807  							   ha->addr, vid);
1808  		}
1809  	}
1810  
1811  	if (dsa_switch_supports_uc_filtering(ds)) {
1812  		netdev_for_each_synced_uc_addr(ha, dev) {
1813  			dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD,
1814  							   ha->addr, vid);
1815  		}
1816  	}
1817  
1818  	netif_addr_unlock_bh(dev);
1819  
1820  	dsa_flush_workqueue();
1821  
1822  	return 0;
1823  
1824  rollback:
1825  	dsa_port_host_vlan_del(dp, &vlan);
1826  	dsa_port_vlan_del(dp, &vlan);
1827  
1828  	return ret;
1829  }
1830  
dsa_slave_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)1831  static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1832  				      u16 vid)
1833  {
1834  	struct dsa_port *dp = dsa_slave_to_port(dev);
1835  	struct switchdev_obj_port_vlan vlan = {
1836  		.vid = vid,
1837  		/* This API only allows programming tagged, non-PVID VIDs */
1838  		.flags = 0,
1839  	};
1840  	struct dsa_switch *ds = dp->ds;
1841  	struct netdev_hw_addr *ha;
1842  	struct dsa_vlan *v;
1843  	int err;
1844  
1845  	err = dsa_port_vlan_del(dp, &vlan);
1846  	if (err)
1847  		return err;
1848  
1849  	err = dsa_port_host_vlan_del(dp, &vlan);
1850  	if (err)
1851  		return err;
1852  
1853  	if (!dsa_switch_supports_uc_filtering(ds) &&
1854  	    !dsa_switch_supports_mc_filtering(ds))
1855  		return 0;
1856  
1857  	netif_addr_lock_bh(dev);
1858  
1859  	v = dsa_vlan_find(&dp->user_vlans, &vlan);
1860  	if (!v) {
1861  		netif_addr_unlock_bh(dev);
1862  		return -ENOENT;
1863  	}
1864  
1865  	list_del(&v->list);
1866  	kfree(v);
1867  
1868  	if (dsa_switch_supports_mc_filtering(ds)) {
1869  		netdev_for_each_synced_mc_addr(ha, dev) {
1870  			dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL,
1871  							   ha->addr, vid);
1872  		}
1873  	}
1874  
1875  	if (dsa_switch_supports_uc_filtering(ds)) {
1876  		netdev_for_each_synced_uc_addr(ha, dev) {
1877  			dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL,
1878  							   ha->addr, vid);
1879  		}
1880  	}
1881  
1882  	netif_addr_unlock_bh(dev);
1883  
1884  	dsa_flush_workqueue();
1885  
1886  	return 0;
1887  }
1888  
dsa_slave_restore_vlan(struct net_device * vdev,int vid,void * arg)1889  static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
1890  {
1891  	__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1892  
1893  	return dsa_slave_vlan_rx_add_vid(arg, proto, vid);
1894  }
1895  
dsa_slave_clear_vlan(struct net_device * vdev,int vid,void * arg)1896  static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg)
1897  {
1898  	__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1899  
1900  	return dsa_slave_vlan_rx_kill_vid(arg, proto, vid);
1901  }
1902  
1903  /* Keep the VLAN RX filtering list in sync with the hardware only if VLAN
1904   * filtering is enabled. The baseline is that only ports that offload a
1905   * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware,
1906   * but there are exceptions for quirky hardware.
1907   *
1908   * If ds->vlan_filtering_is_global = true, then standalone ports which share
1909   * the same switch with other ports that offload a VLAN-aware bridge are also
1910   * inevitably VLAN-aware.
1911   *
1912   * To summarize, a DSA switch port offloads:
1913   *
1914   * - If standalone (this includes software bridge, software LAG):
1915   *     - if ds->needs_standalone_vlan_filtering = true, OR if
1916   *       (ds->vlan_filtering_is_global = true AND there are bridges spanning
1917   *       this switch chip which have vlan_filtering=1)
1918   *         - the 8021q upper VLANs
1919   *     - else (standalone VLAN filtering is not needed, VLAN filtering is not
1920   *       global, or it is, but no port is under a VLAN-aware bridge):
1921   *         - no VLAN (any 8021q upper is a software VLAN)
1922   *
1923   * - If under a vlan_filtering=0 bridge which it offload:
1924   *     - if ds->configure_vlan_while_not_filtering = true (default):
1925   *         - the bridge VLANs. These VLANs are committed to hardware but inactive.
1926   *     - else (deprecated):
1927   *         - no VLAN. The bridge VLANs are not restored when VLAN awareness is
1928   *           enabled, so this behavior is broken and discouraged.
1929   *
1930   * - If under a vlan_filtering=1 bridge which it offload:
1931   *     - the bridge VLANs
1932   *     - the 8021q upper VLANs
1933   */
dsa_slave_manage_vlan_filtering(struct net_device * slave,bool vlan_filtering)1934  int dsa_slave_manage_vlan_filtering(struct net_device *slave,
1935  				    bool vlan_filtering)
1936  {
1937  	int err;
1938  
1939  	if (vlan_filtering) {
1940  		slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1941  
1942  		err = vlan_for_each(slave, dsa_slave_restore_vlan, slave);
1943  		if (err) {
1944  			vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1945  			slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1946  			return err;
1947  		}
1948  	} else {
1949  		err = vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1950  		if (err)
1951  			return err;
1952  
1953  		slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1954  	}
1955  
1956  	return 0;
1957  }
1958  
1959  struct dsa_hw_port {
1960  	struct list_head list;
1961  	struct net_device *dev;
1962  	int old_mtu;
1963  };
1964  
dsa_hw_port_list_set_mtu(struct list_head * hw_port_list,int mtu)1965  static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1966  {
1967  	const struct dsa_hw_port *p;
1968  	int err;
1969  
1970  	list_for_each_entry(p, hw_port_list, list) {
1971  		if (p->dev->mtu == mtu)
1972  			continue;
1973  
1974  		err = dev_set_mtu(p->dev, mtu);
1975  		if (err)
1976  			goto rollback;
1977  	}
1978  
1979  	return 0;
1980  
1981  rollback:
1982  	list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1983  		if (p->dev->mtu == p->old_mtu)
1984  			continue;
1985  
1986  		if (dev_set_mtu(p->dev, p->old_mtu))
1987  			netdev_err(p->dev, "Failed to restore MTU\n");
1988  	}
1989  
1990  	return err;
1991  }
1992  
dsa_hw_port_list_free(struct list_head * hw_port_list)1993  static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1994  {
1995  	struct dsa_hw_port *p, *n;
1996  
1997  	list_for_each_entry_safe(p, n, hw_port_list, list)
1998  		kfree(p);
1999  }
2000  
2001  /* Make the hardware datapath to/from @dev limited to a common MTU */
dsa_bridge_mtu_normalization(struct dsa_port * dp)2002  static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
2003  {
2004  	struct list_head hw_port_list;
2005  	struct dsa_switch_tree *dst;
2006  	int min_mtu = ETH_MAX_MTU;
2007  	struct dsa_port *other_dp;
2008  	int err;
2009  
2010  	if (!dp->ds->mtu_enforcement_ingress)
2011  		return;
2012  
2013  	if (!dp->bridge)
2014  		return;
2015  
2016  	INIT_LIST_HEAD(&hw_port_list);
2017  
2018  	/* Populate the list of ports that are part of the same bridge
2019  	 * as the newly added/modified port
2020  	 */
2021  	list_for_each_entry(dst, &dsa_tree_list, list) {
2022  		list_for_each_entry(other_dp, &dst->ports, list) {
2023  			struct dsa_hw_port *hw_port;
2024  			struct net_device *slave;
2025  
2026  			if (other_dp->type != DSA_PORT_TYPE_USER)
2027  				continue;
2028  
2029  			if (!dsa_port_bridge_same(dp, other_dp))
2030  				continue;
2031  
2032  			if (!other_dp->ds->mtu_enforcement_ingress)
2033  				continue;
2034  
2035  			slave = other_dp->slave;
2036  
2037  			if (min_mtu > slave->mtu)
2038  				min_mtu = slave->mtu;
2039  
2040  			hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
2041  			if (!hw_port)
2042  				goto out;
2043  
2044  			hw_port->dev = slave;
2045  			hw_port->old_mtu = slave->mtu;
2046  
2047  			list_add(&hw_port->list, &hw_port_list);
2048  		}
2049  	}
2050  
2051  	/* Attempt to configure the entire hardware bridge to the newly added
2052  	 * interface's MTU first, regardless of whether the intention of the
2053  	 * user was to raise or lower it.
2054  	 */
2055  	err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
2056  	if (!err)
2057  		goto out;
2058  
2059  	/* Clearly that didn't work out so well, so just set the minimum MTU on
2060  	 * all hardware bridge ports now. If this fails too, then all ports will
2061  	 * still have their old MTU rolled back anyway.
2062  	 */
2063  	dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
2064  
2065  out:
2066  	dsa_hw_port_list_free(&hw_port_list);
2067  }
2068  
dsa_slave_change_mtu(struct net_device * dev,int new_mtu)2069  int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
2070  {
2071  	struct net_device *master = dsa_slave_to_master(dev);
2072  	struct dsa_port *dp = dsa_slave_to_port(dev);
2073  	struct dsa_port *cpu_dp = dp->cpu_dp;
2074  	struct dsa_switch *ds = dp->ds;
2075  	struct dsa_port *other_dp;
2076  	int largest_mtu = 0;
2077  	int new_master_mtu;
2078  	int old_master_mtu;
2079  	int mtu_limit;
2080  	int overhead;
2081  	int cpu_mtu;
2082  	int err;
2083  
2084  	if (!ds->ops->port_change_mtu)
2085  		return -EOPNOTSUPP;
2086  
2087  	dsa_tree_for_each_user_port(other_dp, ds->dst) {
2088  		int slave_mtu;
2089  
2090  		/* During probe, this function will be called for each slave
2091  		 * device, while not all of them have been allocated. That's
2092  		 * ok, it doesn't change what the maximum is, so ignore it.
2093  		 */
2094  		if (!other_dp->slave)
2095  			continue;
2096  
2097  		/* Pretend that we already applied the setting, which we
2098  		 * actually haven't (still haven't done all integrity checks)
2099  		 */
2100  		if (dp == other_dp)
2101  			slave_mtu = new_mtu;
2102  		else
2103  			slave_mtu = other_dp->slave->mtu;
2104  
2105  		if (largest_mtu < slave_mtu)
2106  			largest_mtu = slave_mtu;
2107  	}
2108  
2109  	overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops);
2110  	mtu_limit = min_t(int, master->max_mtu, dev->max_mtu + overhead);
2111  	old_master_mtu = master->mtu;
2112  	new_master_mtu = largest_mtu + overhead;
2113  	if (new_master_mtu > mtu_limit)
2114  		return -ERANGE;
2115  
2116  	/* If the master MTU isn't over limit, there's no need to check the CPU
2117  	 * MTU, since that surely isn't either.
2118  	 */
2119  	cpu_mtu = largest_mtu;
2120  
2121  	/* Start applying stuff */
2122  	if (new_master_mtu != old_master_mtu) {
2123  		err = dev_set_mtu(master, new_master_mtu);
2124  		if (err < 0)
2125  			goto out_master_failed;
2126  
2127  		/* We only need to propagate the MTU of the CPU port to
2128  		 * upstream switches, so emit a notifier which updates them.
2129  		 */
2130  		err = dsa_port_mtu_change(cpu_dp, cpu_mtu);
2131  		if (err)
2132  			goto out_cpu_failed;
2133  	}
2134  
2135  	err = ds->ops->port_change_mtu(ds, dp->index, new_mtu);
2136  	if (err)
2137  		goto out_port_failed;
2138  
2139  	dev->mtu = new_mtu;
2140  
2141  	dsa_bridge_mtu_normalization(dp);
2142  
2143  	return 0;
2144  
2145  out_port_failed:
2146  	if (new_master_mtu != old_master_mtu)
2147  		dsa_port_mtu_change(cpu_dp, old_master_mtu - overhead);
2148  out_cpu_failed:
2149  	if (new_master_mtu != old_master_mtu)
2150  		dev_set_mtu(master, old_master_mtu);
2151  out_master_failed:
2152  	return err;
2153  }
2154  
2155  static int __maybe_unused
dsa_slave_dcbnl_set_default_prio(struct net_device * dev,struct dcb_app * app)2156  dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
2157  {
2158  	struct dsa_port *dp = dsa_slave_to_port(dev);
2159  	struct dsa_switch *ds = dp->ds;
2160  	unsigned long mask, new_prio;
2161  	int err, port = dp->index;
2162  
2163  	if (!ds->ops->port_set_default_prio)
2164  		return -EOPNOTSUPP;
2165  
2166  	err = dcb_ieee_setapp(dev, app);
2167  	if (err)
2168  		return err;
2169  
2170  	mask = dcb_ieee_getapp_mask(dev, app);
2171  	new_prio = __fls(mask);
2172  
2173  	err = ds->ops->port_set_default_prio(ds, port, new_prio);
2174  	if (err) {
2175  		dcb_ieee_delapp(dev, app);
2176  		return err;
2177  	}
2178  
2179  	return 0;
2180  }
2181  
2182  static int __maybe_unused
dsa_slave_dcbnl_add_dscp_prio(struct net_device * dev,struct dcb_app * app)2183  dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
2184  {
2185  	struct dsa_port *dp = dsa_slave_to_port(dev);
2186  	struct dsa_switch *ds = dp->ds;
2187  	unsigned long mask, new_prio;
2188  	int err, port = dp->index;
2189  	u8 dscp = app->protocol;
2190  
2191  	if (!ds->ops->port_add_dscp_prio)
2192  		return -EOPNOTSUPP;
2193  
2194  	if (dscp >= 64) {
2195  		netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
2196  			   dscp);
2197  		return -EINVAL;
2198  	}
2199  
2200  	err = dcb_ieee_setapp(dev, app);
2201  	if (err)
2202  		return err;
2203  
2204  	mask = dcb_ieee_getapp_mask(dev, app);
2205  	new_prio = __fls(mask);
2206  
2207  	err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio);
2208  	if (err) {
2209  		dcb_ieee_delapp(dev, app);
2210  		return err;
2211  	}
2212  
2213  	return 0;
2214  }
2215  
dsa_slave_dcbnl_ieee_setapp(struct net_device * dev,struct dcb_app * app)2216  static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev,
2217  						      struct dcb_app *app)
2218  {
2219  	switch (app->selector) {
2220  	case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2221  		switch (app->protocol) {
2222  		case 0:
2223  			return dsa_slave_dcbnl_set_default_prio(dev, app);
2224  		default:
2225  			return -EOPNOTSUPP;
2226  		}
2227  		break;
2228  	case IEEE_8021QAZ_APP_SEL_DSCP:
2229  		return dsa_slave_dcbnl_add_dscp_prio(dev, app);
2230  	default:
2231  		return -EOPNOTSUPP;
2232  	}
2233  }
2234  
2235  static int __maybe_unused
dsa_slave_dcbnl_del_default_prio(struct net_device * dev,struct dcb_app * app)2236  dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
2237  {
2238  	struct dsa_port *dp = dsa_slave_to_port(dev);
2239  	struct dsa_switch *ds = dp->ds;
2240  	unsigned long mask, new_prio;
2241  	int err, port = dp->index;
2242  
2243  	if (!ds->ops->port_set_default_prio)
2244  		return -EOPNOTSUPP;
2245  
2246  	err = dcb_ieee_delapp(dev, app);
2247  	if (err)
2248  		return err;
2249  
2250  	mask = dcb_ieee_getapp_mask(dev, app);
2251  	new_prio = mask ? __fls(mask) : 0;
2252  
2253  	err = ds->ops->port_set_default_prio(ds, port, new_prio);
2254  	if (err) {
2255  		dcb_ieee_setapp(dev, app);
2256  		return err;
2257  	}
2258  
2259  	return 0;
2260  }
2261  
2262  static int __maybe_unused
dsa_slave_dcbnl_del_dscp_prio(struct net_device * dev,struct dcb_app * app)2263  dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
2264  {
2265  	struct dsa_port *dp = dsa_slave_to_port(dev);
2266  	struct dsa_switch *ds = dp->ds;
2267  	int err, port = dp->index;
2268  	u8 dscp = app->protocol;
2269  
2270  	if (!ds->ops->port_del_dscp_prio)
2271  		return -EOPNOTSUPP;
2272  
2273  	err = dcb_ieee_delapp(dev, app);
2274  	if (err)
2275  		return err;
2276  
2277  	err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority);
2278  	if (err) {
2279  		dcb_ieee_setapp(dev, app);
2280  		return err;
2281  	}
2282  
2283  	return 0;
2284  }
2285  
dsa_slave_dcbnl_ieee_delapp(struct net_device * dev,struct dcb_app * app)2286  static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev,
2287  						      struct dcb_app *app)
2288  {
2289  	switch (app->selector) {
2290  	case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2291  		switch (app->protocol) {
2292  		case 0:
2293  			return dsa_slave_dcbnl_del_default_prio(dev, app);
2294  		default:
2295  			return -EOPNOTSUPP;
2296  		}
2297  		break;
2298  	case IEEE_8021QAZ_APP_SEL_DSCP:
2299  		return dsa_slave_dcbnl_del_dscp_prio(dev, app);
2300  	default:
2301  		return -EOPNOTSUPP;
2302  	}
2303  }
2304  
2305  /* Pre-populate the DCB application priority table with the priorities
2306   * configured during switch setup, which we read from hardware here.
2307   */
dsa_slave_dcbnl_init(struct net_device * dev)2308  static int dsa_slave_dcbnl_init(struct net_device *dev)
2309  {
2310  	struct dsa_port *dp = dsa_slave_to_port(dev);
2311  	struct dsa_switch *ds = dp->ds;
2312  	int port = dp->index;
2313  	int err;
2314  
2315  	if (ds->ops->port_get_default_prio) {
2316  		int prio = ds->ops->port_get_default_prio(ds, port);
2317  		struct dcb_app app = {
2318  			.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
2319  			.protocol = 0,
2320  			.priority = prio,
2321  		};
2322  
2323  		if (prio < 0)
2324  			return prio;
2325  
2326  		err = dcb_ieee_setapp(dev, &app);
2327  		if (err)
2328  			return err;
2329  	}
2330  
2331  	if (ds->ops->port_get_dscp_prio) {
2332  		int protocol;
2333  
2334  		for (protocol = 0; protocol < 64; protocol++) {
2335  			struct dcb_app app = {
2336  				.selector = IEEE_8021QAZ_APP_SEL_DSCP,
2337  				.protocol = protocol,
2338  			};
2339  			int prio;
2340  
2341  			prio = ds->ops->port_get_dscp_prio(ds, port, protocol);
2342  			if (prio == -EOPNOTSUPP)
2343  				continue;
2344  			if (prio < 0)
2345  				return prio;
2346  
2347  			app.priority = prio;
2348  
2349  			err = dcb_ieee_setapp(dev, &app);
2350  			if (err)
2351  				return err;
2352  		}
2353  	}
2354  
2355  	return 0;
2356  }
2357  
2358  static const struct ethtool_ops dsa_slave_ethtool_ops = {
2359  	.get_drvinfo		= dsa_slave_get_drvinfo,
2360  	.get_regs_len		= dsa_slave_get_regs_len,
2361  	.get_regs		= dsa_slave_get_regs,
2362  	.nway_reset		= dsa_slave_nway_reset,
2363  	.get_link		= ethtool_op_get_link,
2364  	.get_eeprom_len		= dsa_slave_get_eeprom_len,
2365  	.get_eeprom		= dsa_slave_get_eeprom,
2366  	.set_eeprom		= dsa_slave_set_eeprom,
2367  	.get_strings		= dsa_slave_get_strings,
2368  	.get_ethtool_stats	= dsa_slave_get_ethtool_stats,
2369  	.get_sset_count		= dsa_slave_get_sset_count,
2370  	.get_eth_phy_stats	= dsa_slave_get_eth_phy_stats,
2371  	.get_eth_mac_stats	= dsa_slave_get_eth_mac_stats,
2372  	.get_eth_ctrl_stats	= dsa_slave_get_eth_ctrl_stats,
2373  	.get_rmon_stats		= dsa_slave_get_rmon_stats,
2374  	.set_wol		= dsa_slave_set_wol,
2375  	.get_wol		= dsa_slave_get_wol,
2376  	.set_eee		= dsa_slave_set_eee,
2377  	.get_eee		= dsa_slave_get_eee,
2378  	.get_link_ksettings	= dsa_slave_get_link_ksettings,
2379  	.set_link_ksettings	= dsa_slave_set_link_ksettings,
2380  	.get_pause_stats	= dsa_slave_get_pause_stats,
2381  	.get_pauseparam		= dsa_slave_get_pauseparam,
2382  	.set_pauseparam		= dsa_slave_set_pauseparam,
2383  	.get_rxnfc		= dsa_slave_get_rxnfc,
2384  	.set_rxnfc		= dsa_slave_set_rxnfc,
2385  	.get_ts_info		= dsa_slave_get_ts_info,
2386  	.self_test		= dsa_slave_net_selftest,
2387  	.get_mm			= dsa_slave_get_mm,
2388  	.set_mm			= dsa_slave_set_mm,
2389  	.get_mm_stats		= dsa_slave_get_mm_stats,
2390  };
2391  
2392  static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = {
2393  	.ieee_setapp		= dsa_slave_dcbnl_ieee_setapp,
2394  	.ieee_delapp		= dsa_slave_dcbnl_ieee_delapp,
2395  };
2396  
dsa_slave_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * s)2397  static void dsa_slave_get_stats64(struct net_device *dev,
2398  				  struct rtnl_link_stats64 *s)
2399  {
2400  	struct dsa_port *dp = dsa_slave_to_port(dev);
2401  	struct dsa_switch *ds = dp->ds;
2402  
2403  	if (ds->ops->get_stats64)
2404  		ds->ops->get_stats64(ds, dp->index, s);
2405  	else
2406  		dev_get_tstats64(dev, s);
2407  }
2408  
dsa_slave_fill_forward_path(struct net_device_path_ctx * ctx,struct net_device_path * path)2409  static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx,
2410  				       struct net_device_path *path)
2411  {
2412  	struct dsa_port *dp = dsa_slave_to_port(ctx->dev);
2413  	struct net_device *master = dsa_port_to_master(dp);
2414  	struct dsa_port *cpu_dp = dp->cpu_dp;
2415  
2416  	path->dev = ctx->dev;
2417  	path->type = DEV_PATH_DSA;
2418  	path->dsa.proto = cpu_dp->tag_ops->proto;
2419  	path->dsa.port = dp->index;
2420  	ctx->dev = master;
2421  
2422  	return 0;
2423  }
2424  
2425  static const struct net_device_ops dsa_slave_netdev_ops = {
2426  	.ndo_open	 	= dsa_slave_open,
2427  	.ndo_stop		= dsa_slave_close,
2428  	.ndo_start_xmit		= dsa_slave_xmit,
2429  	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
2430  	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
2431  	.ndo_set_mac_address	= dsa_slave_set_mac_address,
2432  	.ndo_fdb_dump		= dsa_slave_fdb_dump,
2433  	.ndo_eth_ioctl		= dsa_slave_ioctl,
2434  	.ndo_get_iflink		= dsa_slave_get_iflink,
2435  #ifdef CONFIG_NET_POLL_CONTROLLER
2436  	.ndo_netpoll_setup	= dsa_slave_netpoll_setup,
2437  	.ndo_netpoll_cleanup	= dsa_slave_netpoll_cleanup,
2438  	.ndo_poll_controller	= dsa_slave_poll_controller,
2439  #endif
2440  	.ndo_setup_tc		= dsa_slave_setup_tc,
2441  	.ndo_get_stats64	= dsa_slave_get_stats64,
2442  	.ndo_vlan_rx_add_vid	= dsa_slave_vlan_rx_add_vid,
2443  	.ndo_vlan_rx_kill_vid	= dsa_slave_vlan_rx_kill_vid,
2444  	.ndo_change_mtu		= dsa_slave_change_mtu,
2445  	.ndo_fill_forward_path	= dsa_slave_fill_forward_path,
2446  };
2447  
2448  static struct device_type dsa_type = {
2449  	.name	= "dsa",
2450  };
2451  
dsa_port_phylink_mac_change(struct dsa_switch * ds,int port,bool up)2452  void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
2453  {
2454  	const struct dsa_port *dp = dsa_to_port(ds, port);
2455  
2456  	if (dp->pl)
2457  		phylink_mac_change(dp->pl, up);
2458  }
2459  EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
2460  
dsa_slave_phylink_fixed_state(struct phylink_config * config,struct phylink_link_state * state)2461  static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
2462  					  struct phylink_link_state *state)
2463  {
2464  	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
2465  	struct dsa_switch *ds = dp->ds;
2466  
2467  	/* No need to check that this operation is valid, the callback would
2468  	 * not be called if it was not.
2469  	 */
2470  	ds->ops->phylink_fixed_state(ds, dp->index, state);
2471  }
2472  
2473  /* slave device setup *******************************************************/
dsa_slave_phy_connect(struct net_device * slave_dev,int addr,u32 flags)2474  static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr,
2475  				 u32 flags)
2476  {
2477  	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2478  	struct dsa_switch *ds = dp->ds;
2479  
2480  	slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
2481  	if (!slave_dev->phydev) {
2482  		netdev_err(slave_dev, "no phy at %d\n", addr);
2483  		return -ENODEV;
2484  	}
2485  
2486  	slave_dev->phydev->dev_flags |= flags;
2487  
2488  	return phylink_connect_phy(dp->pl, slave_dev->phydev);
2489  }
2490  
dsa_slave_phy_setup(struct net_device * slave_dev)2491  static int dsa_slave_phy_setup(struct net_device *slave_dev)
2492  {
2493  	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2494  	struct device_node *port_dn = dp->dn;
2495  	struct dsa_switch *ds = dp->ds;
2496  	u32 phy_flags = 0;
2497  	int ret;
2498  
2499  	dp->pl_config.dev = &slave_dev->dev;
2500  	dp->pl_config.type = PHYLINK_NETDEV;
2501  
2502  	/* The get_fixed_state callback takes precedence over polling the
2503  	 * link GPIO in PHYLINK (see phylink_get_fixed_state).  Only set
2504  	 * this if the switch provides such a callback.
2505  	 */
2506  	if (ds->ops->phylink_fixed_state) {
2507  		dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
2508  		dp->pl_config.poll_fixed_state = true;
2509  	}
2510  
2511  	ret = dsa_port_phylink_create(dp);
2512  	if (ret)
2513  		return ret;
2514  
2515  	if (ds->ops->get_phy_flags)
2516  		phy_flags = ds->ops->get_phy_flags(ds, dp->index);
2517  
2518  	ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
2519  	if (ret == -ENODEV && ds->slave_mii_bus) {
2520  		/* We could not connect to a designated PHY or SFP, so try to
2521  		 * use the switch internal MDIO bus instead
2522  		 */
2523  		ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
2524  	}
2525  	if (ret) {
2526  		netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
2527  			   ERR_PTR(ret));
2528  		dsa_port_phylink_destroy(dp);
2529  	}
2530  
2531  	return ret;
2532  }
2533  
dsa_slave_setup_tagger(struct net_device * slave)2534  void dsa_slave_setup_tagger(struct net_device *slave)
2535  {
2536  	struct dsa_port *dp = dsa_slave_to_port(slave);
2537  	struct net_device *master = dsa_port_to_master(dp);
2538  	struct dsa_slave_priv *p = netdev_priv(slave);
2539  	const struct dsa_port *cpu_dp = dp->cpu_dp;
2540  	const struct dsa_switch *ds = dp->ds;
2541  
2542  	slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
2543  	slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
2544  	/* Try to save one extra realloc later in the TX path (in the master)
2545  	 * by also inheriting the master's needed headroom and tailroom.
2546  	 * The 8021q driver also does this.
2547  	 */
2548  	slave->needed_headroom += master->needed_headroom;
2549  	slave->needed_tailroom += master->needed_tailroom;
2550  
2551  	p->xmit = cpu_dp->tag_ops->xmit;
2552  
2553  	slave->features = master->vlan_features | NETIF_F_HW_TC;
2554  	slave->hw_features |= NETIF_F_HW_TC;
2555  	slave->features |= NETIF_F_LLTX;
2556  	if (slave->needed_tailroom)
2557  		slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
2558  	if (ds->needs_standalone_vlan_filtering)
2559  		slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2560  }
2561  
dsa_slave_suspend(struct net_device * slave_dev)2562  int dsa_slave_suspend(struct net_device *slave_dev)
2563  {
2564  	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2565  
2566  	if (!netif_running(slave_dev))
2567  		return 0;
2568  
2569  	netif_device_detach(slave_dev);
2570  
2571  	rtnl_lock();
2572  	phylink_stop(dp->pl);
2573  	rtnl_unlock();
2574  
2575  	return 0;
2576  }
2577  
dsa_slave_resume(struct net_device * slave_dev)2578  int dsa_slave_resume(struct net_device *slave_dev)
2579  {
2580  	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2581  
2582  	if (!netif_running(slave_dev))
2583  		return 0;
2584  
2585  	netif_device_attach(slave_dev);
2586  
2587  	rtnl_lock();
2588  	phylink_start(dp->pl);
2589  	rtnl_unlock();
2590  
2591  	return 0;
2592  }
2593  
dsa_slave_create(struct dsa_port * port)2594  int dsa_slave_create(struct dsa_port *port)
2595  {
2596  	struct net_device *master = dsa_port_to_master(port);
2597  	struct dsa_switch *ds = port->ds;
2598  	struct net_device *slave_dev;
2599  	struct dsa_slave_priv *p;
2600  	const char *name;
2601  	int assign_type;
2602  	int ret;
2603  
2604  	if (!ds->num_tx_queues)
2605  		ds->num_tx_queues = 1;
2606  
2607  	if (port->name) {
2608  		name = port->name;
2609  		assign_type = NET_NAME_PREDICTABLE;
2610  	} else {
2611  		name = "eth%d";
2612  		assign_type = NET_NAME_ENUM;
2613  	}
2614  
2615  	slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
2616  				     assign_type, ether_setup,
2617  				     ds->num_tx_queues, 1);
2618  	if (slave_dev == NULL)
2619  		return -ENOMEM;
2620  
2621  	slave_dev->rtnl_link_ops = &dsa_link_ops;
2622  	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
2623  #if IS_ENABLED(CONFIG_DCB)
2624  	slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops;
2625  #endif
2626  	if (!is_zero_ether_addr(port->mac))
2627  		eth_hw_addr_set(slave_dev, port->mac);
2628  	else
2629  		eth_hw_addr_inherit(slave_dev, master);
2630  	slave_dev->priv_flags |= IFF_NO_QUEUE;
2631  	if (dsa_switch_supports_uc_filtering(ds))
2632  		slave_dev->priv_flags |= IFF_UNICAST_FLT;
2633  	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
2634  	if (ds->ops->port_max_mtu)
2635  		slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
2636  	SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
2637  
2638  	SET_NETDEV_DEV(slave_dev, port->ds->dev);
2639  	SET_NETDEV_DEVLINK_PORT(slave_dev, &port->devlink_port);
2640  	slave_dev->dev.of_node = port->dn;
2641  	slave_dev->vlan_features = master->vlan_features;
2642  
2643  	p = netdev_priv(slave_dev);
2644  	slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2645  	if (!slave_dev->tstats) {
2646  		free_netdev(slave_dev);
2647  		return -ENOMEM;
2648  	}
2649  
2650  	ret = gro_cells_init(&p->gcells, slave_dev);
2651  	if (ret)
2652  		goto out_free;
2653  
2654  	p->dp = port;
2655  	INIT_LIST_HEAD(&p->mall_tc_list);
2656  	port->slave = slave_dev;
2657  	dsa_slave_setup_tagger(slave_dev);
2658  
2659  	netif_carrier_off(slave_dev);
2660  
2661  	ret = dsa_slave_phy_setup(slave_dev);
2662  	if (ret) {
2663  		netdev_err(slave_dev,
2664  			   "error %d setting up PHY for tree %d, switch %d, port %d\n",
2665  			   ret, ds->dst->index, ds->index, port->index);
2666  		goto out_gcells;
2667  	}
2668  
2669  	rtnl_lock();
2670  
2671  	ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
2672  	if (ret && ret != -EOPNOTSUPP)
2673  		dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
2674  			 ret, ETH_DATA_LEN, port->index);
2675  
2676  	ret = register_netdevice(slave_dev);
2677  	if (ret) {
2678  		netdev_err(master, "error %d registering interface %s\n",
2679  			   ret, slave_dev->name);
2680  		rtnl_unlock();
2681  		goto out_phy;
2682  	}
2683  
2684  	if (IS_ENABLED(CONFIG_DCB)) {
2685  		ret = dsa_slave_dcbnl_init(slave_dev);
2686  		if (ret) {
2687  			netdev_err(slave_dev,
2688  				   "failed to initialize DCB: %pe\n",
2689  				   ERR_PTR(ret));
2690  			rtnl_unlock();
2691  			goto out_unregister;
2692  		}
2693  	}
2694  
2695  	ret = netdev_upper_dev_link(master, slave_dev, NULL);
2696  
2697  	rtnl_unlock();
2698  
2699  	if (ret)
2700  		goto out_unregister;
2701  
2702  	return 0;
2703  
2704  out_unregister:
2705  	unregister_netdev(slave_dev);
2706  out_phy:
2707  	rtnl_lock();
2708  	phylink_disconnect_phy(p->dp->pl);
2709  	rtnl_unlock();
2710  	dsa_port_phylink_destroy(p->dp);
2711  out_gcells:
2712  	gro_cells_destroy(&p->gcells);
2713  out_free:
2714  	free_percpu(slave_dev->tstats);
2715  	free_netdev(slave_dev);
2716  	port->slave = NULL;
2717  	return ret;
2718  }
2719  
dsa_slave_destroy(struct net_device * slave_dev)2720  void dsa_slave_destroy(struct net_device *slave_dev)
2721  {
2722  	struct net_device *master = dsa_slave_to_master(slave_dev);
2723  	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2724  	struct dsa_slave_priv *p = netdev_priv(slave_dev);
2725  
2726  	netif_carrier_off(slave_dev);
2727  	rtnl_lock();
2728  	netdev_upper_dev_unlink(master, slave_dev);
2729  	unregister_netdevice(slave_dev);
2730  	phylink_disconnect_phy(dp->pl);
2731  	rtnl_unlock();
2732  
2733  	dsa_port_phylink_destroy(dp);
2734  	gro_cells_destroy(&p->gcells);
2735  	free_percpu(slave_dev->tstats);
2736  	free_netdev(slave_dev);
2737  }
2738  
dsa_slave_change_master(struct net_device * dev,struct net_device * master,struct netlink_ext_ack * extack)2739  int dsa_slave_change_master(struct net_device *dev, struct net_device *master,
2740  			    struct netlink_ext_ack *extack)
2741  {
2742  	struct net_device *old_master = dsa_slave_to_master(dev);
2743  	struct dsa_port *dp = dsa_slave_to_port(dev);
2744  	struct dsa_switch *ds = dp->ds;
2745  	struct net_device *upper;
2746  	struct list_head *iter;
2747  	int err;
2748  
2749  	if (master == old_master)
2750  		return 0;
2751  
2752  	if (!ds->ops->port_change_master) {
2753  		NL_SET_ERR_MSG_MOD(extack,
2754  				   "Driver does not support changing DSA master");
2755  		return -EOPNOTSUPP;
2756  	}
2757  
2758  	if (!netdev_uses_dsa(master)) {
2759  		NL_SET_ERR_MSG_MOD(extack,
2760  				   "Interface not eligible as DSA master");
2761  		return -EOPNOTSUPP;
2762  	}
2763  
2764  	netdev_for_each_upper_dev_rcu(master, upper, iter) {
2765  		if (dsa_slave_dev_check(upper))
2766  			continue;
2767  		if (netif_is_bridge_master(upper))
2768  			continue;
2769  		NL_SET_ERR_MSG_MOD(extack, "Cannot join master with unknown uppers");
2770  		return -EOPNOTSUPP;
2771  	}
2772  
2773  	/* Since we allow live-changing the DSA master, plus we auto-open the
2774  	 * DSA master when the user port opens => we need to ensure that the
2775  	 * new DSA master is open too.
2776  	 */
2777  	if (dev->flags & IFF_UP) {
2778  		err = dev_open(master, extack);
2779  		if (err)
2780  			return err;
2781  	}
2782  
2783  	netdev_upper_dev_unlink(old_master, dev);
2784  
2785  	err = netdev_upper_dev_link(master, dev, extack);
2786  	if (err)
2787  		goto out_revert_old_master_unlink;
2788  
2789  	err = dsa_port_change_master(dp, master, extack);
2790  	if (err)
2791  		goto out_revert_master_link;
2792  
2793  	/* Update the MTU of the new CPU port through cross-chip notifiers */
2794  	err = dsa_slave_change_mtu(dev, dev->mtu);
2795  	if (err && err != -EOPNOTSUPP) {
2796  		netdev_warn(dev,
2797  			    "nonfatal error updating MTU with new master: %pe\n",
2798  			    ERR_PTR(err));
2799  	}
2800  
2801  	/* If the port doesn't have its own MAC address and relies on the DSA
2802  	 * master's one, inherit it again from the new DSA master.
2803  	 */
2804  	if (is_zero_ether_addr(dp->mac))
2805  		eth_hw_addr_inherit(dev, master);
2806  
2807  	return 0;
2808  
2809  out_revert_master_link:
2810  	netdev_upper_dev_unlink(master, dev);
2811  out_revert_old_master_unlink:
2812  	netdev_upper_dev_link(old_master, dev, NULL);
2813  	return err;
2814  }
2815  
dsa_slave_dev_check(const struct net_device * dev)2816  bool dsa_slave_dev_check(const struct net_device *dev)
2817  {
2818  	return dev->netdev_ops == &dsa_slave_netdev_ops;
2819  }
2820  EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
2821  
dsa_slave_changeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)2822  static int dsa_slave_changeupper(struct net_device *dev,
2823  				 struct netdev_notifier_changeupper_info *info)
2824  {
2825  	struct dsa_port *dp = dsa_slave_to_port(dev);
2826  	struct netlink_ext_ack *extack;
2827  	int err = NOTIFY_DONE;
2828  
2829  	if (!dsa_slave_dev_check(dev))
2830  		return err;
2831  
2832  	extack = netdev_notifier_info_to_extack(&info->info);
2833  
2834  	if (netif_is_bridge_master(info->upper_dev)) {
2835  		if (info->linking) {
2836  			err = dsa_port_bridge_join(dp, info->upper_dev, extack);
2837  			if (!err)
2838  				dsa_bridge_mtu_normalization(dp);
2839  			if (err == -EOPNOTSUPP) {
2840  				NL_SET_ERR_MSG_WEAK_MOD(extack,
2841  							"Offloading not supported");
2842  				err = 0;
2843  			}
2844  			err = notifier_from_errno(err);
2845  		} else {
2846  			dsa_port_bridge_leave(dp, info->upper_dev);
2847  			err = NOTIFY_OK;
2848  		}
2849  	} else if (netif_is_lag_master(info->upper_dev)) {
2850  		if (info->linking) {
2851  			err = dsa_port_lag_join(dp, info->upper_dev,
2852  						info->upper_info, extack);
2853  			if (err == -EOPNOTSUPP) {
2854  				NL_SET_ERR_MSG_WEAK_MOD(extack,
2855  							"Offloading not supported");
2856  				err = 0;
2857  			}
2858  			err = notifier_from_errno(err);
2859  		} else {
2860  			dsa_port_lag_leave(dp, info->upper_dev);
2861  			err = NOTIFY_OK;
2862  		}
2863  	} else if (is_hsr_master(info->upper_dev)) {
2864  		if (info->linking) {
2865  			err = dsa_port_hsr_join(dp, info->upper_dev);
2866  			if (err == -EOPNOTSUPP) {
2867  				NL_SET_ERR_MSG_WEAK_MOD(extack,
2868  							"Offloading not supported");
2869  				err = 0;
2870  			}
2871  			err = notifier_from_errno(err);
2872  		} else {
2873  			dsa_port_hsr_leave(dp, info->upper_dev);
2874  			err = NOTIFY_OK;
2875  		}
2876  	}
2877  
2878  	return err;
2879  }
2880  
dsa_slave_prechangeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)2881  static int dsa_slave_prechangeupper(struct net_device *dev,
2882  				    struct netdev_notifier_changeupper_info *info)
2883  {
2884  	struct dsa_port *dp = dsa_slave_to_port(dev);
2885  
2886  	if (!dsa_slave_dev_check(dev))
2887  		return NOTIFY_DONE;
2888  
2889  	if (netif_is_bridge_master(info->upper_dev) && !info->linking)
2890  		dsa_port_pre_bridge_leave(dp, info->upper_dev);
2891  	else if (netif_is_lag_master(info->upper_dev) && !info->linking)
2892  		dsa_port_pre_lag_leave(dp, info->upper_dev);
2893  	/* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be
2894  	 * meaningfully enslaved to a bridge yet
2895  	 */
2896  
2897  	return NOTIFY_DONE;
2898  }
2899  
2900  static int
dsa_slave_lag_changeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)2901  dsa_slave_lag_changeupper(struct net_device *dev,
2902  			  struct netdev_notifier_changeupper_info *info)
2903  {
2904  	struct net_device *lower;
2905  	struct list_head *iter;
2906  	int err = NOTIFY_DONE;
2907  	struct dsa_port *dp;
2908  
2909  	if (!netif_is_lag_master(dev))
2910  		return err;
2911  
2912  	netdev_for_each_lower_dev(dev, lower, iter) {
2913  		if (!dsa_slave_dev_check(lower))
2914  			continue;
2915  
2916  		dp = dsa_slave_to_port(lower);
2917  		if (!dp->lag)
2918  			/* Software LAG */
2919  			continue;
2920  
2921  		err = dsa_slave_changeupper(lower, info);
2922  		if (notifier_to_errno(err))
2923  			break;
2924  	}
2925  
2926  	return err;
2927  }
2928  
2929  /* Same as dsa_slave_lag_changeupper() except that it calls
2930   * dsa_slave_prechangeupper()
2931   */
2932  static int
dsa_slave_lag_prechangeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)2933  dsa_slave_lag_prechangeupper(struct net_device *dev,
2934  			     struct netdev_notifier_changeupper_info *info)
2935  {
2936  	struct net_device *lower;
2937  	struct list_head *iter;
2938  	int err = NOTIFY_DONE;
2939  	struct dsa_port *dp;
2940  
2941  	if (!netif_is_lag_master(dev))
2942  		return err;
2943  
2944  	netdev_for_each_lower_dev(dev, lower, iter) {
2945  		if (!dsa_slave_dev_check(lower))
2946  			continue;
2947  
2948  		dp = dsa_slave_to_port(lower);
2949  		if (!dp->lag)
2950  			/* Software LAG */
2951  			continue;
2952  
2953  		err = dsa_slave_prechangeupper(lower, info);
2954  		if (notifier_to_errno(err))
2955  			break;
2956  	}
2957  
2958  	return err;
2959  }
2960  
2961  static int
dsa_prevent_bridging_8021q_upper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)2962  dsa_prevent_bridging_8021q_upper(struct net_device *dev,
2963  				 struct netdev_notifier_changeupper_info *info)
2964  {
2965  	struct netlink_ext_ack *ext_ack;
2966  	struct net_device *slave, *br;
2967  	struct dsa_port *dp;
2968  
2969  	ext_ack = netdev_notifier_info_to_extack(&info->info);
2970  
2971  	if (!is_vlan_dev(dev))
2972  		return NOTIFY_DONE;
2973  
2974  	slave = vlan_dev_real_dev(dev);
2975  	if (!dsa_slave_dev_check(slave))
2976  		return NOTIFY_DONE;
2977  
2978  	dp = dsa_slave_to_port(slave);
2979  	br = dsa_port_bridge_dev_get(dp);
2980  	if (!br)
2981  		return NOTIFY_DONE;
2982  
2983  	/* Deny enslaving a VLAN device into a VLAN-aware bridge */
2984  	if (br_vlan_enabled(br) &&
2985  	    netif_is_bridge_master(info->upper_dev) && info->linking) {
2986  		NL_SET_ERR_MSG_MOD(ext_ack,
2987  				   "Cannot enslave VLAN device into VLAN aware bridge");
2988  		return notifier_from_errno(-EINVAL);
2989  	}
2990  
2991  	return NOTIFY_DONE;
2992  }
2993  
2994  static int
dsa_slave_check_8021q_upper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)2995  dsa_slave_check_8021q_upper(struct net_device *dev,
2996  			    struct netdev_notifier_changeupper_info *info)
2997  {
2998  	struct dsa_port *dp = dsa_slave_to_port(dev);
2999  	struct net_device *br = dsa_port_bridge_dev_get(dp);
3000  	struct bridge_vlan_info br_info;
3001  	struct netlink_ext_ack *extack;
3002  	int err = NOTIFY_DONE;
3003  	u16 vid;
3004  
3005  	if (!br || !br_vlan_enabled(br))
3006  		return NOTIFY_DONE;
3007  
3008  	extack = netdev_notifier_info_to_extack(&info->info);
3009  	vid = vlan_dev_vlan_id(info->upper_dev);
3010  
3011  	/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
3012  	 * device, respectively the VID is not found, returning
3013  	 * 0 means success, which is a failure for us here.
3014  	 */
3015  	err = br_vlan_get_info(br, vid, &br_info);
3016  	if (err == 0) {
3017  		NL_SET_ERR_MSG_MOD(extack,
3018  				   "This VLAN is already configured by the bridge");
3019  		return notifier_from_errno(-EBUSY);
3020  	}
3021  
3022  	return NOTIFY_DONE;
3023  }
3024  
3025  static int
dsa_slave_prechangeupper_sanity_check(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3026  dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
3027  				      struct netdev_notifier_changeupper_info *info)
3028  {
3029  	struct dsa_switch *ds;
3030  	struct dsa_port *dp;
3031  	int err;
3032  
3033  	if (!dsa_slave_dev_check(dev))
3034  		return dsa_prevent_bridging_8021q_upper(dev, info);
3035  
3036  	dp = dsa_slave_to_port(dev);
3037  	ds = dp->ds;
3038  
3039  	if (ds->ops->port_prechangeupper) {
3040  		err = ds->ops->port_prechangeupper(ds, dp->index, info);
3041  		if (err)
3042  			return notifier_from_errno(err);
3043  	}
3044  
3045  	if (is_vlan_dev(info->upper_dev))
3046  		return dsa_slave_check_8021q_upper(dev, info);
3047  
3048  	return NOTIFY_DONE;
3049  }
3050  
3051  /* To be eligible as a DSA master, a LAG must have all lower interfaces be
3052   * eligible DSA masters. Additionally, all LAG slaves must be DSA masters of
3053   * switches in the same switch tree.
3054   */
dsa_lag_master_validate(struct net_device * lag_dev,struct netlink_ext_ack * extack)3055  static int dsa_lag_master_validate(struct net_device *lag_dev,
3056  				   struct netlink_ext_ack *extack)
3057  {
3058  	struct net_device *lower1, *lower2;
3059  	struct list_head *iter1, *iter2;
3060  
3061  	netdev_for_each_lower_dev(lag_dev, lower1, iter1) {
3062  		netdev_for_each_lower_dev(lag_dev, lower2, iter2) {
3063  			if (!netdev_uses_dsa(lower1) ||
3064  			    !netdev_uses_dsa(lower2)) {
3065  				NL_SET_ERR_MSG_MOD(extack,
3066  						   "All LAG ports must be eligible as DSA masters");
3067  				return notifier_from_errno(-EINVAL);
3068  			}
3069  
3070  			if (lower1 == lower2)
3071  				continue;
3072  
3073  			if (!dsa_port_tree_same(lower1->dsa_ptr,
3074  						lower2->dsa_ptr)) {
3075  				NL_SET_ERR_MSG_MOD(extack,
3076  						   "LAG contains DSA masters of disjoint switch trees");
3077  				return notifier_from_errno(-EINVAL);
3078  			}
3079  		}
3080  	}
3081  
3082  	return NOTIFY_DONE;
3083  }
3084  
3085  static int
dsa_master_prechangeupper_sanity_check(struct net_device * master,struct netdev_notifier_changeupper_info * info)3086  dsa_master_prechangeupper_sanity_check(struct net_device *master,
3087  				       struct netdev_notifier_changeupper_info *info)
3088  {
3089  	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
3090  
3091  	if (!netdev_uses_dsa(master))
3092  		return NOTIFY_DONE;
3093  
3094  	if (!info->linking)
3095  		return NOTIFY_DONE;
3096  
3097  	/* Allow DSA switch uppers */
3098  	if (dsa_slave_dev_check(info->upper_dev))
3099  		return NOTIFY_DONE;
3100  
3101  	/* Allow bridge uppers of DSA masters, subject to further
3102  	 * restrictions in dsa_bridge_prechangelower_sanity_check()
3103  	 */
3104  	if (netif_is_bridge_master(info->upper_dev))
3105  		return NOTIFY_DONE;
3106  
3107  	/* Allow LAG uppers, subject to further restrictions in
3108  	 * dsa_lag_master_prechangelower_sanity_check()
3109  	 */
3110  	if (netif_is_lag_master(info->upper_dev))
3111  		return dsa_lag_master_validate(info->upper_dev, extack);
3112  
3113  	NL_SET_ERR_MSG_MOD(extack,
3114  			   "DSA master cannot join unknown upper interfaces");
3115  	return notifier_from_errno(-EBUSY);
3116  }
3117  
3118  static int
dsa_lag_master_prechangelower_sanity_check(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3119  dsa_lag_master_prechangelower_sanity_check(struct net_device *dev,
3120  					   struct netdev_notifier_changeupper_info *info)
3121  {
3122  	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
3123  	struct net_device *lag_dev = info->upper_dev;
3124  	struct net_device *lower;
3125  	struct list_head *iter;
3126  
3127  	if (!netdev_uses_dsa(lag_dev) || !netif_is_lag_master(lag_dev))
3128  		return NOTIFY_DONE;
3129  
3130  	if (!info->linking)
3131  		return NOTIFY_DONE;
3132  
3133  	if (!netdev_uses_dsa(dev)) {
3134  		NL_SET_ERR_MSG(extack,
3135  			       "Only DSA masters can join a LAG DSA master");
3136  		return notifier_from_errno(-EINVAL);
3137  	}
3138  
3139  	netdev_for_each_lower_dev(lag_dev, lower, iter) {
3140  		if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) {
3141  			NL_SET_ERR_MSG(extack,
3142  				       "Interface is DSA master for a different switch tree than this LAG");
3143  			return notifier_from_errno(-EINVAL);
3144  		}
3145  
3146  		break;
3147  	}
3148  
3149  	return NOTIFY_DONE;
3150  }
3151  
3152  /* Don't allow bridging of DSA masters, since the bridge layer rx_handler
3153   * prevents the DSA fake ethertype handler to be invoked, so we don't get the
3154   * chance to strip off and parse the DSA switch tag protocol header (the bridge
3155   * layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these
3156   * frames).
3157   * The only case where that would not be an issue is when bridging can already
3158   * be offloaded, such as when the DSA master is itself a DSA or plain switchdev
3159   * port, and is bridged only with other ports from the same hardware device.
3160   */
3161  static int
dsa_bridge_prechangelower_sanity_check(struct net_device * new_lower,struct netdev_notifier_changeupper_info * info)3162  dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
3163  				       struct netdev_notifier_changeupper_info *info)
3164  {
3165  	struct net_device *br = info->upper_dev;
3166  	struct netlink_ext_ack *extack;
3167  	struct net_device *lower;
3168  	struct list_head *iter;
3169  
3170  	if (!netif_is_bridge_master(br))
3171  		return NOTIFY_DONE;
3172  
3173  	if (!info->linking)
3174  		return NOTIFY_DONE;
3175  
3176  	extack = netdev_notifier_info_to_extack(&info->info);
3177  
3178  	netdev_for_each_lower_dev(br, lower, iter) {
3179  		if (!netdev_uses_dsa(new_lower) && !netdev_uses_dsa(lower))
3180  			continue;
3181  
3182  		if (!netdev_port_same_parent_id(lower, new_lower)) {
3183  			NL_SET_ERR_MSG(extack,
3184  				       "Cannot do software bridging with a DSA master");
3185  			return notifier_from_errno(-EINVAL);
3186  		}
3187  	}
3188  
3189  	return NOTIFY_DONE;
3190  }
3191  
dsa_tree_migrate_ports_from_lag_master(struct dsa_switch_tree * dst,struct net_device * lag_dev)3192  static void dsa_tree_migrate_ports_from_lag_master(struct dsa_switch_tree *dst,
3193  						   struct net_device *lag_dev)
3194  {
3195  	struct net_device *new_master = dsa_tree_find_first_master(dst);
3196  	struct dsa_port *dp;
3197  	int err;
3198  
3199  	dsa_tree_for_each_user_port(dp, dst) {
3200  		if (dsa_port_to_master(dp) != lag_dev)
3201  			continue;
3202  
3203  		err = dsa_slave_change_master(dp->slave, new_master, NULL);
3204  		if (err) {
3205  			netdev_err(dp->slave,
3206  				   "failed to restore master to %s: %pe\n",
3207  				   new_master->name, ERR_PTR(err));
3208  		}
3209  	}
3210  }
3211  
dsa_master_lag_join(struct net_device * master,struct net_device * lag_dev,struct netdev_lag_upper_info * uinfo,struct netlink_ext_ack * extack)3212  static int dsa_master_lag_join(struct net_device *master,
3213  			       struct net_device *lag_dev,
3214  			       struct netdev_lag_upper_info *uinfo,
3215  			       struct netlink_ext_ack *extack)
3216  {
3217  	struct dsa_port *cpu_dp = master->dsa_ptr;
3218  	struct dsa_switch_tree *dst = cpu_dp->dst;
3219  	struct dsa_port *dp;
3220  	int err;
3221  
3222  	err = dsa_master_lag_setup(lag_dev, cpu_dp, uinfo, extack);
3223  	if (err)
3224  		return err;
3225  
3226  	dsa_tree_for_each_user_port(dp, dst) {
3227  		if (dsa_port_to_master(dp) != master)
3228  			continue;
3229  
3230  		err = dsa_slave_change_master(dp->slave, lag_dev, extack);
3231  		if (err)
3232  			goto restore;
3233  	}
3234  
3235  	return 0;
3236  
3237  restore:
3238  	dsa_tree_for_each_user_port_continue_reverse(dp, dst) {
3239  		if (dsa_port_to_master(dp) != lag_dev)
3240  			continue;
3241  
3242  		err = dsa_slave_change_master(dp->slave, master, NULL);
3243  		if (err) {
3244  			netdev_err(dp->slave,
3245  				   "failed to restore master to %s: %pe\n",
3246  				   master->name, ERR_PTR(err));
3247  		}
3248  	}
3249  
3250  	dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
3251  
3252  	return err;
3253  }
3254  
dsa_master_lag_leave(struct net_device * master,struct net_device * lag_dev)3255  static void dsa_master_lag_leave(struct net_device *master,
3256  				 struct net_device *lag_dev)
3257  {
3258  	struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr;
3259  	struct dsa_switch_tree *dst = cpu_dp->dst;
3260  	struct dsa_port *new_cpu_dp = NULL;
3261  	struct net_device *lower;
3262  	struct list_head *iter;
3263  
3264  	netdev_for_each_lower_dev(lag_dev, lower, iter) {
3265  		if (netdev_uses_dsa(lower)) {
3266  			new_cpu_dp = lower->dsa_ptr;
3267  			break;
3268  		}
3269  	}
3270  
3271  	if (new_cpu_dp) {
3272  		/* Update the CPU port of the user ports still under the LAG
3273  		 * so that dsa_port_to_master() continues to work properly
3274  		 */
3275  		dsa_tree_for_each_user_port(dp, dst)
3276  			if (dsa_port_to_master(dp) == lag_dev)
3277  				dp->cpu_dp = new_cpu_dp;
3278  
3279  		/* Update the index of the virtual CPU port to match the lowest
3280  		 * physical CPU port
3281  		 */
3282  		lag_dev->dsa_ptr = new_cpu_dp;
3283  		wmb();
3284  	} else {
3285  		/* If the LAG DSA master has no ports left, migrate back all
3286  		 * user ports to the first physical CPU port
3287  		 */
3288  		dsa_tree_migrate_ports_from_lag_master(dst, lag_dev);
3289  	}
3290  
3291  	/* This DSA master has left its LAG in any case, so let
3292  	 * the CPU port leave the hardware LAG as well
3293  	 */
3294  	dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
3295  }
3296  
dsa_master_changeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)3297  static int dsa_master_changeupper(struct net_device *dev,
3298  				  struct netdev_notifier_changeupper_info *info)
3299  {
3300  	struct netlink_ext_ack *extack;
3301  	int err = NOTIFY_DONE;
3302  
3303  	if (!netdev_uses_dsa(dev))
3304  		return err;
3305  
3306  	extack = netdev_notifier_info_to_extack(&info->info);
3307  
3308  	if (netif_is_lag_master(info->upper_dev)) {
3309  		if (info->linking) {
3310  			err = dsa_master_lag_join(dev, info->upper_dev,
3311  						  info->upper_info, extack);
3312  			err = notifier_from_errno(err);
3313  		} else {
3314  			dsa_master_lag_leave(dev, info->upper_dev);
3315  			err = NOTIFY_OK;
3316  		}
3317  	}
3318  
3319  	return err;
3320  }
3321  
dsa_slave_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)3322  static int dsa_slave_netdevice_event(struct notifier_block *nb,
3323  				     unsigned long event, void *ptr)
3324  {
3325  	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3326  
3327  	switch (event) {
3328  	case NETDEV_PRECHANGEUPPER: {
3329  		struct netdev_notifier_changeupper_info *info = ptr;
3330  		int err;
3331  
3332  		err = dsa_slave_prechangeupper_sanity_check(dev, info);
3333  		if (notifier_to_errno(err))
3334  			return err;
3335  
3336  		err = dsa_master_prechangeupper_sanity_check(dev, info);
3337  		if (notifier_to_errno(err))
3338  			return err;
3339  
3340  		err = dsa_lag_master_prechangelower_sanity_check(dev, info);
3341  		if (notifier_to_errno(err))
3342  			return err;
3343  
3344  		err = dsa_bridge_prechangelower_sanity_check(dev, info);
3345  		if (notifier_to_errno(err))
3346  			return err;
3347  
3348  		err = dsa_slave_prechangeupper(dev, ptr);
3349  		if (notifier_to_errno(err))
3350  			return err;
3351  
3352  		err = dsa_slave_lag_prechangeupper(dev, ptr);
3353  		if (notifier_to_errno(err))
3354  			return err;
3355  
3356  		break;
3357  	}
3358  	case NETDEV_CHANGEUPPER: {
3359  		int err;
3360  
3361  		err = dsa_slave_changeupper(dev, ptr);
3362  		if (notifier_to_errno(err))
3363  			return err;
3364  
3365  		err = dsa_slave_lag_changeupper(dev, ptr);
3366  		if (notifier_to_errno(err))
3367  			return err;
3368  
3369  		err = dsa_master_changeupper(dev, ptr);
3370  		if (notifier_to_errno(err))
3371  			return err;
3372  
3373  		break;
3374  	}
3375  	case NETDEV_CHANGELOWERSTATE: {
3376  		struct netdev_notifier_changelowerstate_info *info = ptr;
3377  		struct dsa_port *dp;
3378  		int err = 0;
3379  
3380  		if (dsa_slave_dev_check(dev)) {
3381  			dp = dsa_slave_to_port(dev);
3382  
3383  			err = dsa_port_lag_change(dp, info->lower_state_info);
3384  		}
3385  
3386  		/* Mirror LAG port events on DSA masters that are in
3387  		 * a LAG towards their respective switch CPU ports
3388  		 */
3389  		if (netdev_uses_dsa(dev)) {
3390  			dp = dev->dsa_ptr;
3391  
3392  			err = dsa_port_lag_change(dp, info->lower_state_info);
3393  		}
3394  
3395  		return notifier_from_errno(err);
3396  	}
3397  	case NETDEV_CHANGE:
3398  	case NETDEV_UP: {
3399  		/* Track state of master port.
3400  		 * DSA driver may require the master port (and indirectly
3401  		 * the tagger) to be available for some special operation.
3402  		 */
3403  		if (netdev_uses_dsa(dev)) {
3404  			struct dsa_port *cpu_dp = dev->dsa_ptr;
3405  			struct dsa_switch_tree *dst = cpu_dp->ds->dst;
3406  
3407  			/* Track when the master port is UP */
3408  			dsa_tree_master_oper_state_change(dst, dev,
3409  							  netif_oper_up(dev));
3410  
3411  			/* Track when the master port is ready and can accept
3412  			 * packet.
3413  			 * NETDEV_UP event is not enough to flag a port as ready.
3414  			 * We also have to wait for linkwatch_do_dev to dev_activate
3415  			 * and emit a NETDEV_CHANGE event.
3416  			 * We check if a master port is ready by checking if the dev
3417  			 * have a qdisc assigned and is not noop.
3418  			 */
3419  			dsa_tree_master_admin_state_change(dst, dev,
3420  							   !qdisc_tx_is_noop(dev));
3421  
3422  			return NOTIFY_OK;
3423  		}
3424  
3425  		return NOTIFY_DONE;
3426  	}
3427  	case NETDEV_GOING_DOWN: {
3428  		struct dsa_port *dp, *cpu_dp;
3429  		struct dsa_switch_tree *dst;
3430  		LIST_HEAD(close_list);
3431  
3432  		if (!netdev_uses_dsa(dev))
3433  			return NOTIFY_DONE;
3434  
3435  		cpu_dp = dev->dsa_ptr;
3436  		dst = cpu_dp->ds->dst;
3437  
3438  		dsa_tree_master_admin_state_change(dst, dev, false);
3439  
3440  		list_for_each_entry(dp, &dst->ports, list) {
3441  			if (!dsa_port_is_user(dp))
3442  				continue;
3443  
3444  			if (dp->cpu_dp != cpu_dp)
3445  				continue;
3446  
3447  			list_add(&dp->slave->close_list, &close_list);
3448  		}
3449  
3450  		dev_close_many(&close_list, true);
3451  
3452  		return NOTIFY_OK;
3453  	}
3454  	default:
3455  		break;
3456  	}
3457  
3458  	return NOTIFY_DONE;
3459  }
3460  
3461  static void
dsa_fdb_offload_notify(struct dsa_switchdev_event_work * switchdev_work)3462  dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
3463  {
3464  	struct switchdev_notifier_fdb_info info = {};
3465  
3466  	info.addr = switchdev_work->addr;
3467  	info.vid = switchdev_work->vid;
3468  	info.offloaded = true;
3469  	call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
3470  				 switchdev_work->orig_dev, &info.info, NULL);
3471  }
3472  
dsa_slave_switchdev_event_work(struct work_struct * work)3473  static void dsa_slave_switchdev_event_work(struct work_struct *work)
3474  {
3475  	struct dsa_switchdev_event_work *switchdev_work =
3476  		container_of(work, struct dsa_switchdev_event_work, work);
3477  	const unsigned char *addr = switchdev_work->addr;
3478  	struct net_device *dev = switchdev_work->dev;
3479  	u16 vid = switchdev_work->vid;
3480  	struct dsa_switch *ds;
3481  	struct dsa_port *dp;
3482  	int err;
3483  
3484  	dp = dsa_slave_to_port(dev);
3485  	ds = dp->ds;
3486  
3487  	switch (switchdev_work->event) {
3488  	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3489  		if (switchdev_work->host_addr)
3490  			err = dsa_port_bridge_host_fdb_add(dp, addr, vid);
3491  		else if (dp->lag)
3492  			err = dsa_port_lag_fdb_add(dp, addr, vid);
3493  		else
3494  			err = dsa_port_fdb_add(dp, addr, vid);
3495  		if (err) {
3496  			dev_err(ds->dev,
3497  				"port %d failed to add %pM vid %d to fdb: %d\n",
3498  				dp->index, addr, vid, err);
3499  			break;
3500  		}
3501  		dsa_fdb_offload_notify(switchdev_work);
3502  		break;
3503  
3504  	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3505  		if (switchdev_work->host_addr)
3506  			err = dsa_port_bridge_host_fdb_del(dp, addr, vid);
3507  		else if (dp->lag)
3508  			err = dsa_port_lag_fdb_del(dp, addr, vid);
3509  		else
3510  			err = dsa_port_fdb_del(dp, addr, vid);
3511  		if (err) {
3512  			dev_err(ds->dev,
3513  				"port %d failed to delete %pM vid %d from fdb: %d\n",
3514  				dp->index, addr, vid, err);
3515  		}
3516  
3517  		break;
3518  	}
3519  
3520  	kfree(switchdev_work);
3521  }
3522  
dsa_foreign_dev_check(const struct net_device * dev,const struct net_device * foreign_dev)3523  static bool dsa_foreign_dev_check(const struct net_device *dev,
3524  				  const struct net_device *foreign_dev)
3525  {
3526  	const struct dsa_port *dp = dsa_slave_to_port(dev);
3527  	struct dsa_switch_tree *dst = dp->ds->dst;
3528  
3529  	if (netif_is_bridge_master(foreign_dev))
3530  		return !dsa_tree_offloads_bridge_dev(dst, foreign_dev);
3531  
3532  	if (netif_is_bridge_port(foreign_dev))
3533  		return !dsa_tree_offloads_bridge_port(dst, foreign_dev);
3534  
3535  	/* Everything else is foreign */
3536  	return true;
3537  }
3538  
dsa_slave_fdb_event(struct net_device * dev,struct net_device * orig_dev,unsigned long event,const void * ctx,const struct switchdev_notifier_fdb_info * fdb_info)3539  static int dsa_slave_fdb_event(struct net_device *dev,
3540  			       struct net_device *orig_dev,
3541  			       unsigned long event, const void *ctx,
3542  			       const struct switchdev_notifier_fdb_info *fdb_info)
3543  {
3544  	struct dsa_switchdev_event_work *switchdev_work;
3545  	struct dsa_port *dp = dsa_slave_to_port(dev);
3546  	bool host_addr = fdb_info->is_local;
3547  	struct dsa_switch *ds = dp->ds;
3548  
3549  	if (ctx && ctx != dp)
3550  		return 0;
3551  
3552  	if (!dp->bridge)
3553  		return 0;
3554  
3555  	if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
3556  		if (dsa_port_offloads_bridge_port(dp, orig_dev))
3557  			return 0;
3558  
3559  		/* FDB entries learned by the software bridge or by foreign
3560  		 * bridge ports should be installed as host addresses only if
3561  		 * the driver requests assisted learning.
3562  		 */
3563  		if (!ds->assisted_learning_on_cpu_port)
3564  			return 0;
3565  	}
3566  
3567  	/* Also treat FDB entries on foreign interfaces bridged with us as host
3568  	 * addresses.
3569  	 */
3570  	if (dsa_foreign_dev_check(dev, orig_dev))
3571  		host_addr = true;
3572  
3573  	/* Check early that we're not doing work in vain.
3574  	 * Host addresses on LAG ports still require regular FDB ops,
3575  	 * since the CPU port isn't in a LAG.
3576  	 */
3577  	if (dp->lag && !host_addr) {
3578  		if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del)
3579  			return -EOPNOTSUPP;
3580  	} else {
3581  		if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
3582  			return -EOPNOTSUPP;
3583  	}
3584  
3585  	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3586  	if (!switchdev_work)
3587  		return -ENOMEM;
3588  
3589  	netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n",
3590  		   event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting",
3591  		   orig_dev->name, fdb_info->addr, fdb_info->vid,
3592  		   host_addr ? " as host address" : "");
3593  
3594  	INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work);
3595  	switchdev_work->event = event;
3596  	switchdev_work->dev = dev;
3597  	switchdev_work->orig_dev = orig_dev;
3598  
3599  	ether_addr_copy(switchdev_work->addr, fdb_info->addr);
3600  	switchdev_work->vid = fdb_info->vid;
3601  	switchdev_work->host_addr = host_addr;
3602  
3603  	dsa_schedule_work(&switchdev_work->work);
3604  
3605  	return 0;
3606  }
3607  
3608  /* Called under rcu_read_lock() */
dsa_slave_switchdev_event(struct notifier_block * unused,unsigned long event,void * ptr)3609  static int dsa_slave_switchdev_event(struct notifier_block *unused,
3610  				     unsigned long event, void *ptr)
3611  {
3612  	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3613  	int err;
3614  
3615  	switch (event) {
3616  	case SWITCHDEV_PORT_ATTR_SET:
3617  		err = switchdev_handle_port_attr_set(dev, ptr,
3618  						     dsa_slave_dev_check,
3619  						     dsa_slave_port_attr_set);
3620  		return notifier_from_errno(err);
3621  	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3622  	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3623  		err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
3624  							   dsa_slave_dev_check,
3625  							   dsa_foreign_dev_check,
3626  							   dsa_slave_fdb_event);
3627  		return notifier_from_errno(err);
3628  	default:
3629  		return NOTIFY_DONE;
3630  	}
3631  
3632  	return NOTIFY_OK;
3633  }
3634  
dsa_slave_switchdev_blocking_event(struct notifier_block * unused,unsigned long event,void * ptr)3635  static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
3636  					      unsigned long event, void *ptr)
3637  {
3638  	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3639  	int err;
3640  
3641  	switch (event) {
3642  	case SWITCHDEV_PORT_OBJ_ADD:
3643  		err = switchdev_handle_port_obj_add_foreign(dev, ptr,
3644  							    dsa_slave_dev_check,
3645  							    dsa_foreign_dev_check,
3646  							    dsa_slave_port_obj_add);
3647  		return notifier_from_errno(err);
3648  	case SWITCHDEV_PORT_OBJ_DEL:
3649  		err = switchdev_handle_port_obj_del_foreign(dev, ptr,
3650  							    dsa_slave_dev_check,
3651  							    dsa_foreign_dev_check,
3652  							    dsa_slave_port_obj_del);
3653  		return notifier_from_errno(err);
3654  	case SWITCHDEV_PORT_ATTR_SET:
3655  		err = switchdev_handle_port_attr_set(dev, ptr,
3656  						     dsa_slave_dev_check,
3657  						     dsa_slave_port_attr_set);
3658  		return notifier_from_errno(err);
3659  	}
3660  
3661  	return NOTIFY_DONE;
3662  }
3663  
3664  static struct notifier_block dsa_slave_nb __read_mostly = {
3665  	.notifier_call  = dsa_slave_netdevice_event,
3666  };
3667  
3668  struct notifier_block dsa_slave_switchdev_notifier = {
3669  	.notifier_call = dsa_slave_switchdev_event,
3670  };
3671  
3672  struct notifier_block dsa_slave_switchdev_blocking_notifier = {
3673  	.notifier_call = dsa_slave_switchdev_blocking_event,
3674  };
3675  
dsa_slave_register_notifier(void)3676  int dsa_slave_register_notifier(void)
3677  {
3678  	struct notifier_block *nb;
3679  	int err;
3680  
3681  	err = register_netdevice_notifier(&dsa_slave_nb);
3682  	if (err)
3683  		return err;
3684  
3685  	err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
3686  	if (err)
3687  		goto err_switchdev_nb;
3688  
3689  	nb = &dsa_slave_switchdev_blocking_notifier;
3690  	err = register_switchdev_blocking_notifier(nb);
3691  	if (err)
3692  		goto err_switchdev_blocking_nb;
3693  
3694  	return 0;
3695  
3696  err_switchdev_blocking_nb:
3697  	unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
3698  err_switchdev_nb:
3699  	unregister_netdevice_notifier(&dsa_slave_nb);
3700  	return err;
3701  }
3702  
dsa_slave_unregister_notifier(void)3703  void dsa_slave_unregister_notifier(void)
3704  {
3705  	struct notifier_block *nb;
3706  	int err;
3707  
3708  	nb = &dsa_slave_switchdev_blocking_notifier;
3709  	err = unregister_switchdev_blocking_notifier(nb);
3710  	if (err)
3711  		pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
3712  
3713  	err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
3714  	if (err)
3715  		pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
3716  
3717  	err = unregister_netdevice_notifier(&dsa_slave_nb);
3718  	if (err)
3719  		pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
3720  }
3721