1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Bridge netlink control interface
4 *
5 * Authors:
6 * Stephen Hemminger <shemminger@osdl.org>
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/etherdevice.h>
12 #include <net/rtnetlink.h>
13 #include <net/net_namespace.h>
14 #include <net/sock.h>
15 #include <uapi/linux/if_bridge.h>
16
17 #include "br_private.h"
18 #include "br_private_stp.h"
19 #include "br_private_tunnel.h"
20
__get_num_vlan_infos(struct net_bridge_vlan_group * vg,u32 filter_mask)21 static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
22 u32 filter_mask)
23 {
24 struct net_bridge_vlan *v;
25 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
26 u16 flags, pvid;
27 int num_vlans = 0;
28
29 if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
30 return 0;
31
32 pvid = br_get_pvid(vg);
33 /* Count number of vlan infos */
34 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
35 flags = 0;
36 /* only a context, bridge vlan not activated */
37 if (!br_vlan_should_use(v))
38 continue;
39 if (v->vid == pvid)
40 flags |= BRIDGE_VLAN_INFO_PVID;
41
42 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
43 flags |= BRIDGE_VLAN_INFO_UNTAGGED;
44
45 if (vid_range_start == 0) {
46 goto initvars;
47 } else if ((v->vid - vid_range_end) == 1 &&
48 flags == vid_range_flags) {
49 vid_range_end = v->vid;
50 continue;
51 } else {
52 if ((vid_range_end - vid_range_start) > 0)
53 num_vlans += 2;
54 else
55 num_vlans += 1;
56 }
57 initvars:
58 vid_range_start = v->vid;
59 vid_range_end = v->vid;
60 vid_range_flags = flags;
61 }
62
63 if (vid_range_start != 0) {
64 if ((vid_range_end - vid_range_start) > 0)
65 num_vlans += 2;
66 else
67 num_vlans += 1;
68 }
69
70 return num_vlans;
71 }
72
br_get_num_vlan_infos(struct net_bridge_vlan_group * vg,u32 filter_mask)73 static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
74 u32 filter_mask)
75 {
76 int num_vlans;
77
78 if (!vg)
79 return 0;
80
81 if (filter_mask & RTEXT_FILTER_BRVLAN)
82 return vg->num_vlans;
83
84 rcu_read_lock();
85 num_vlans = __get_num_vlan_infos(vg, filter_mask);
86 rcu_read_unlock();
87
88 return num_vlans;
89 }
90
br_get_link_af_size_filtered(const struct net_device * dev,u32 filter_mask)91 static size_t br_get_link_af_size_filtered(const struct net_device *dev,
92 u32 filter_mask)
93 {
94 struct net_bridge_vlan_group *vg = NULL;
95 struct net_bridge_port *p = NULL;
96 struct net_bridge *br;
97 int num_vlan_infos;
98 size_t vinfo_sz = 0;
99
100 rcu_read_lock();
101 if (netif_is_bridge_port(dev)) {
102 p = br_port_get_rcu(dev);
103 vg = nbp_vlan_group_rcu(p);
104 } else if (dev->priv_flags & IFF_EBRIDGE) {
105 br = netdev_priv(dev);
106 vg = br_vlan_group_rcu(br);
107 }
108 num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
109 rcu_read_unlock();
110
111 if (p && (p->flags & BR_VLAN_TUNNEL))
112 vinfo_sz += br_get_vlan_tunnel_info_size(vg);
113
114 /* Each VLAN is returned in bridge_vlan_info along with flags */
115 vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
116
117 return vinfo_sz;
118 }
119
br_port_info_size(void)120 static inline size_t br_port_info_size(void)
121 {
122 return nla_total_size(1) /* IFLA_BRPORT_STATE */
123 + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */
124 + nla_total_size(4) /* IFLA_BRPORT_COST */
125 + nla_total_size(1) /* IFLA_BRPORT_MODE */
126 + nla_total_size(1) /* IFLA_BRPORT_GUARD */
127 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */
128 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
129 + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */
130 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
131 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
132 + nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */
133 + nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */
134 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
135 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
136 + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */
137 + nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */
138 + nla_total_size(1) /* IFLA_BRPORT_ISOLATED */
139 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */
140 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */
141 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */
142 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */
143 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */
144 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */
145 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
146 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */
147 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
148 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
149 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
150 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
151 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */
152 #endif
153 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */
154 + 0;
155 }
156
br_nlmsg_size(struct net_device * dev,u32 filter_mask)157 static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
158 {
159 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
160 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
161 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
162 + nla_total_size(4) /* IFLA_MASTER */
163 + nla_total_size(4) /* IFLA_MTU */
164 + nla_total_size(4) /* IFLA_LINK */
165 + nla_total_size(1) /* IFLA_OPERSTATE */
166 + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
167 + nla_total_size(br_get_link_af_size_filtered(dev,
168 filter_mask)) /* IFLA_AF_SPEC */
169 + nla_total_size(4); /* IFLA_BRPORT_BACKUP_PORT */
170 }
171
br_port_fill_attrs(struct sk_buff * skb,const struct net_bridge_port * p)172 static int br_port_fill_attrs(struct sk_buff *skb,
173 const struct net_bridge_port *p)
174 {
175 u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
176 struct net_bridge_port *backup_p;
177 u64 timerval;
178
179 if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
180 nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
181 nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
182 nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
183 nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
184 nla_put_u8(skb, IFLA_BRPORT_PROTECT,
185 !!(p->flags & BR_ROOT_BLOCK)) ||
186 nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
187 !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
188 nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
189 !!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
190 nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
191 nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
192 !!(p->flags & BR_FLOOD)) ||
193 nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD,
194 !!(p->flags & BR_MCAST_FLOOD)) ||
195 nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD,
196 !!(p->flags & BR_BCAST_FLOOD)) ||
197 nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
198 nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
199 !!(p->flags & BR_PROXYARP_WIFI)) ||
200 nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
201 &p->designated_root) ||
202 nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
203 &p->designated_bridge) ||
204 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
205 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
206 nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
207 nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
208 nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
209 p->topology_change_ack) ||
210 nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
211 nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
212 BR_VLAN_TUNNEL)) ||
213 nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) ||
214 nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS,
215 !!(p->flags & BR_NEIGH_SUPPRESS)) ||
216 nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)))
217 return -EMSGSIZE;
218
219 timerval = br_timer_value(&p->message_age_timer);
220 if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
221 IFLA_BRPORT_PAD))
222 return -EMSGSIZE;
223 timerval = br_timer_value(&p->forward_delay_timer);
224 if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
225 IFLA_BRPORT_PAD))
226 return -EMSGSIZE;
227 timerval = br_timer_value(&p->hold_timer);
228 if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
229 IFLA_BRPORT_PAD))
230 return -EMSGSIZE;
231
232 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
233 if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
234 p->multicast_router))
235 return -EMSGSIZE;
236 #endif
237
238 /* we might be called only with br->lock */
239 rcu_read_lock();
240 backup_p = rcu_dereference(p->backup_port);
241 if (backup_p)
242 nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT,
243 backup_p->dev->ifindex);
244 rcu_read_unlock();
245
246 return 0;
247 }
248
br_fill_ifvlaninfo_range(struct sk_buff * skb,u16 vid_start,u16 vid_end,u16 flags)249 static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
250 u16 vid_end, u16 flags)
251 {
252 struct bridge_vlan_info vinfo;
253
254 if ((vid_end - vid_start) > 0) {
255 /* add range to skb */
256 vinfo.vid = vid_start;
257 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
258 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
259 sizeof(vinfo), &vinfo))
260 goto nla_put_failure;
261
262 vinfo.vid = vid_end;
263 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
264 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
265 sizeof(vinfo), &vinfo))
266 goto nla_put_failure;
267 } else {
268 vinfo.vid = vid_start;
269 vinfo.flags = flags;
270 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
271 sizeof(vinfo), &vinfo))
272 goto nla_put_failure;
273 }
274
275 return 0;
276
277 nla_put_failure:
278 return -EMSGSIZE;
279 }
280
br_fill_ifvlaninfo_compressed(struct sk_buff * skb,struct net_bridge_vlan_group * vg)281 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
282 struct net_bridge_vlan_group *vg)
283 {
284 struct net_bridge_vlan *v;
285 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
286 u16 flags, pvid;
287 int err = 0;
288
289 /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
290 * and mark vlan info with begin and end flags
291 * if vlaninfo represents a range
292 */
293 pvid = br_get_pvid(vg);
294 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
295 flags = 0;
296 if (!br_vlan_should_use(v))
297 continue;
298 if (v->vid == pvid)
299 flags |= BRIDGE_VLAN_INFO_PVID;
300
301 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
302 flags |= BRIDGE_VLAN_INFO_UNTAGGED;
303
304 if (vid_range_start == 0) {
305 goto initvars;
306 } else if ((v->vid - vid_range_end) == 1 &&
307 flags == vid_range_flags) {
308 vid_range_end = v->vid;
309 continue;
310 } else {
311 err = br_fill_ifvlaninfo_range(skb, vid_range_start,
312 vid_range_end,
313 vid_range_flags);
314 if (err)
315 return err;
316 }
317
318 initvars:
319 vid_range_start = v->vid;
320 vid_range_end = v->vid;
321 vid_range_flags = flags;
322 }
323
324 if (vid_range_start != 0) {
325 /* Call it once more to send any left over vlans */
326 err = br_fill_ifvlaninfo_range(skb, vid_range_start,
327 vid_range_end,
328 vid_range_flags);
329 if (err)
330 return err;
331 }
332
333 return 0;
334 }
335
br_fill_ifvlaninfo(struct sk_buff * skb,struct net_bridge_vlan_group * vg)336 static int br_fill_ifvlaninfo(struct sk_buff *skb,
337 struct net_bridge_vlan_group *vg)
338 {
339 struct bridge_vlan_info vinfo;
340 struct net_bridge_vlan *v;
341 u16 pvid;
342
343 pvid = br_get_pvid(vg);
344 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
345 if (!br_vlan_should_use(v))
346 continue;
347
348 vinfo.vid = v->vid;
349 vinfo.flags = 0;
350 if (v->vid == pvid)
351 vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
352
353 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
354 vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
355
356 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
357 sizeof(vinfo), &vinfo))
358 goto nla_put_failure;
359 }
360
361 return 0;
362
363 nla_put_failure:
364 return -EMSGSIZE;
365 }
366
367 /*
368 * Create one netlink message for one interface
369 * Contains port and master info as well as carrier and bridge state.
370 */
br_fill_ifinfo(struct sk_buff * skb,const struct net_bridge_port * port,u32 pid,u32 seq,int event,unsigned int flags,u32 filter_mask,const struct net_device * dev)371 static int br_fill_ifinfo(struct sk_buff *skb,
372 const struct net_bridge_port *port,
373 u32 pid, u32 seq, int event, unsigned int flags,
374 u32 filter_mask, const struct net_device *dev)
375 {
376 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
377 struct net_bridge *br;
378 struct ifinfomsg *hdr;
379 struct nlmsghdr *nlh;
380
381 if (port)
382 br = port->br;
383 else
384 br = netdev_priv(dev);
385
386 br_debug(br, "br_fill_info event %d port %s master %s\n",
387 event, dev->name, br->dev->name);
388
389 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
390 if (nlh == NULL)
391 return -EMSGSIZE;
392
393 hdr = nlmsg_data(nlh);
394 hdr->ifi_family = AF_BRIDGE;
395 hdr->__ifi_pad = 0;
396 hdr->ifi_type = dev->type;
397 hdr->ifi_index = dev->ifindex;
398 hdr->ifi_flags = dev_get_flags(dev);
399 hdr->ifi_change = 0;
400
401 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
402 nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
403 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
404 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
405 (dev->addr_len &&
406 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
407 (dev->ifindex != dev_get_iflink(dev) &&
408 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
409 goto nla_put_failure;
410
411 if (event == RTM_NEWLINK && port) {
412 struct nlattr *nest;
413
414 nest = nla_nest_start(skb, IFLA_PROTINFO);
415 if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
416 goto nla_put_failure;
417 nla_nest_end(skb, nest);
418 }
419
420 /* Check if the VID information is requested */
421 if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
422 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
423 struct net_bridge_vlan_group *vg;
424 struct nlattr *af;
425 int err;
426
427 /* RCU needed because of the VLAN locking rules (rcu || rtnl) */
428 rcu_read_lock();
429 if (port)
430 vg = nbp_vlan_group_rcu(port);
431 else
432 vg = br_vlan_group_rcu(br);
433
434 if (!vg || !vg->num_vlans) {
435 rcu_read_unlock();
436 goto done;
437 }
438 af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
439 if (!af) {
440 rcu_read_unlock();
441 goto nla_put_failure;
442 }
443 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
444 err = br_fill_ifvlaninfo_compressed(skb, vg);
445 else
446 err = br_fill_ifvlaninfo(skb, vg);
447
448 if (port && (port->flags & BR_VLAN_TUNNEL))
449 err = br_fill_vlan_tunnel_info(skb, vg);
450 rcu_read_unlock();
451 if (err)
452 goto nla_put_failure;
453 nla_nest_end(skb, af);
454 }
455
456 done:
457 nlmsg_end(skb, nlh);
458 return 0;
459
460 nla_put_failure:
461 nlmsg_cancel(skb, nlh);
462 return -EMSGSIZE;
463 }
464
465 /* Notify listeners of a change in bridge or port information */
br_ifinfo_notify(int event,const struct net_bridge * br,const struct net_bridge_port * port)466 void br_ifinfo_notify(int event, const struct net_bridge *br,
467 const struct net_bridge_port *port)
468 {
469 u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
470 struct net_device *dev;
471 struct sk_buff *skb;
472 int err = -ENOBUFS;
473 struct net *net;
474 u16 port_no = 0;
475
476 if (WARN_ON(!port && !br))
477 return;
478
479 if (port) {
480 dev = port->dev;
481 br = port->br;
482 port_no = port->port_no;
483 } else {
484 dev = br->dev;
485 }
486
487 net = dev_net(dev);
488 br_debug(br, "port %u(%s) event %d\n", port_no, dev->name, event);
489
490 skb = nlmsg_new(br_nlmsg_size(dev, filter), GFP_ATOMIC);
491 if (skb == NULL)
492 goto errout;
493
494 err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev);
495 if (err < 0) {
496 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
497 WARN_ON(err == -EMSGSIZE);
498 kfree_skb(skb);
499 goto errout;
500 }
501 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
502 return;
503 errout:
504 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
505 }
506
507 /*
508 * Dump information about all ports, in response to GETLINK
509 */
br_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)510 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
511 struct net_device *dev, u32 filter_mask, int nlflags)
512 {
513 struct net_bridge_port *port = br_port_get_rtnl(dev);
514
515 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
516 !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
517 return 0;
518
519 return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
520 filter_mask, dev);
521 }
522
br_vlan_info(struct net_bridge * br,struct net_bridge_port * p,int cmd,struct bridge_vlan_info * vinfo,bool * changed,struct netlink_ext_ack * extack)523 static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
524 int cmd, struct bridge_vlan_info *vinfo, bool *changed,
525 struct netlink_ext_ack *extack)
526 {
527 bool curr_change;
528 int err = 0;
529
530 switch (cmd) {
531 case RTM_SETLINK:
532 if (p) {
533 /* if the MASTER flag is set this will act on the global
534 * per-VLAN entry as well
535 */
536 err = nbp_vlan_add(p, vinfo->vid, vinfo->flags,
537 &curr_change, extack);
538 } else {
539 vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
540 err = br_vlan_add(br, vinfo->vid, vinfo->flags,
541 &curr_change, extack);
542 }
543 if (curr_change)
544 *changed = true;
545 break;
546
547 case RTM_DELLINK:
548 if (p) {
549 if (!nbp_vlan_delete(p, vinfo->vid))
550 *changed = true;
551
552 if ((vinfo->flags & BRIDGE_VLAN_INFO_MASTER) &&
553 !br_vlan_delete(p->br, vinfo->vid))
554 *changed = true;
555 } else if (!br_vlan_delete(br, vinfo->vid)) {
556 *changed = true;
557 }
558 break;
559 }
560
561 return err;
562 }
563
br_process_vlan_info(struct net_bridge * br,struct net_bridge_port * p,int cmd,struct bridge_vlan_info * vinfo_curr,struct bridge_vlan_info ** vinfo_last,bool * changed,struct netlink_ext_ack * extack)564 static int br_process_vlan_info(struct net_bridge *br,
565 struct net_bridge_port *p, int cmd,
566 struct bridge_vlan_info *vinfo_curr,
567 struct bridge_vlan_info **vinfo_last,
568 bool *changed,
569 struct netlink_ext_ack *extack)
570 {
571 if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK)
572 return -EINVAL;
573
574 if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
575 /* check if we are already processing a range */
576 if (*vinfo_last)
577 return -EINVAL;
578 *vinfo_last = vinfo_curr;
579 /* don't allow range of pvids */
580 if ((*vinfo_last)->flags & BRIDGE_VLAN_INFO_PVID)
581 return -EINVAL;
582 return 0;
583 }
584
585 if (*vinfo_last) {
586 struct bridge_vlan_info tmp_vinfo;
587 int v, err;
588
589 if (!(vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END))
590 return -EINVAL;
591
592 if (vinfo_curr->vid <= (*vinfo_last)->vid)
593 return -EINVAL;
594
595 memcpy(&tmp_vinfo, *vinfo_last,
596 sizeof(struct bridge_vlan_info));
597 for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) {
598 tmp_vinfo.vid = v;
599 err = br_vlan_info(br, p, cmd, &tmp_vinfo, changed,
600 extack);
601 if (err)
602 break;
603 }
604 *vinfo_last = NULL;
605
606 return err;
607 }
608
609 return br_vlan_info(br, p, cmd, vinfo_curr, changed, extack);
610 }
611
br_afspec(struct net_bridge * br,struct net_bridge_port * p,struct nlattr * af_spec,int cmd,bool * changed,struct netlink_ext_ack * extack)612 static int br_afspec(struct net_bridge *br,
613 struct net_bridge_port *p,
614 struct nlattr *af_spec,
615 int cmd, bool *changed,
616 struct netlink_ext_ack *extack)
617 {
618 struct bridge_vlan_info *vinfo_curr = NULL;
619 struct bridge_vlan_info *vinfo_last = NULL;
620 struct nlattr *attr;
621 struct vtunnel_info tinfo_last = {};
622 struct vtunnel_info tinfo_curr = {};
623 int err = 0, rem;
624
625 nla_for_each_nested(attr, af_spec, rem) {
626 err = 0;
627 switch (nla_type(attr)) {
628 case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
629 if (!p || !(p->flags & BR_VLAN_TUNNEL))
630 return -EINVAL;
631 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
632 if (err)
633 return err;
634 err = br_process_vlan_tunnel_info(br, p, cmd,
635 &tinfo_curr,
636 &tinfo_last,
637 changed);
638 if (err)
639 return err;
640 break;
641 case IFLA_BRIDGE_VLAN_INFO:
642 if (nla_len(attr) != sizeof(struct bridge_vlan_info))
643 return -EINVAL;
644 vinfo_curr = nla_data(attr);
645 err = br_process_vlan_info(br, p, cmd, vinfo_curr,
646 &vinfo_last, changed,
647 extack);
648 if (err)
649 return err;
650 break;
651 }
652 }
653
654 return err;
655 }
656
657 static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
658 [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
659 [IFLA_BRPORT_COST] = { .type = NLA_U32 },
660 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
661 [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
662 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
663 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
664 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
665 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
666 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
667 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
668 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
669 [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
670 [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
671 [IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 },
672 [IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 },
673 [IFLA_BRPORT_VLAN_TUNNEL] = { .type = NLA_U8 },
674 [IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 },
675 [IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 },
676 [IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 },
677 [IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 },
678 };
679
680 /* Change the state of the port and notify spanning tree */
br_set_port_state(struct net_bridge_port * p,u8 state)681 static int br_set_port_state(struct net_bridge_port *p, u8 state)
682 {
683 if (state > BR_STATE_BLOCKING)
684 return -EINVAL;
685
686 /* if kernel STP is running, don't allow changes */
687 if (p->br->stp_enabled == BR_KERNEL_STP)
688 return -EBUSY;
689
690 /* if device is not up, change is not allowed
691 * if link is not present, only allowable state is disabled
692 */
693 if (!netif_running(p->dev) ||
694 (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
695 return -ENETDOWN;
696
697 br_set_state(p, state);
698 br_port_state_selection(p->br);
699 return 0;
700 }
701
702 /* Set/clear or port flags based on attribute */
br_set_port_flag(struct net_bridge_port * p,struct nlattr * tb[],int attrtype,unsigned long mask)703 static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
704 int attrtype, unsigned long mask)
705 {
706 unsigned long flags;
707 int err;
708
709 if (!tb[attrtype])
710 return 0;
711
712 if (nla_get_u8(tb[attrtype]))
713 flags = p->flags | mask;
714 else
715 flags = p->flags & ~mask;
716
717 err = br_switchdev_set_port_flag(p, flags, mask);
718 if (err)
719 return err;
720
721 p->flags = flags;
722 return 0;
723 }
724
725 /* Process bridge protocol info on port */
br_setport(struct net_bridge_port * p,struct nlattr * tb[])726 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
727 {
728 unsigned long old_flags = p->flags;
729 bool br_vlan_tunnel_old = false;
730 int err;
731
732 err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
733 if (err)
734 return err;
735
736 err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
737 if (err)
738 return err;
739
740 err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
741 if (err)
742 return err;
743
744 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
745 if (err)
746 return err;
747
748 err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
749 if (err)
750 return err;
751
752 err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
753 if (err)
754 return err;
755
756 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
757 if (err)
758 return err;
759
760 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
761 if (err)
762 return err;
763
764 err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
765 if (err)
766 return err;
767
768 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
769 if (err)
770 return err;
771
772 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
773 if (err)
774 return err;
775
776 br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false;
777 err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
778 if (err)
779 return err;
780
781 if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
782 nbp_vlan_tunnel_info_flush(p);
783
784 if (tb[IFLA_BRPORT_COST]) {
785 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
786 if (err)
787 return err;
788 }
789
790 if (tb[IFLA_BRPORT_PRIORITY]) {
791 err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
792 if (err)
793 return err;
794 }
795
796 if (tb[IFLA_BRPORT_STATE]) {
797 err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
798 if (err)
799 return err;
800 }
801
802 if (tb[IFLA_BRPORT_FLUSH])
803 br_fdb_delete_by_port(p->br, p, 0, 0);
804
805 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
806 if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
807 u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);
808
809 err = br_multicast_set_port_router(p, mcast_router);
810 if (err)
811 return err;
812 }
813 #endif
814
815 if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) {
816 u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]);
817
818 if (fwd_mask & BR_GROUPFWD_MACPAUSE)
819 return -EINVAL;
820 p->group_fwd_mask = fwd_mask;
821 }
822
823 err = br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS,
824 BR_NEIGH_SUPPRESS);
825 if (err)
826 return err;
827
828 err = br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED);
829 if (err)
830 return err;
831
832 if (tb[IFLA_BRPORT_BACKUP_PORT]) {
833 struct net_device *backup_dev = NULL;
834 u32 backup_ifindex;
835
836 backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]);
837 if (backup_ifindex) {
838 backup_dev = __dev_get_by_index(dev_net(p->dev),
839 backup_ifindex);
840 if (!backup_dev)
841 return -ENOENT;
842 }
843
844 err = nbp_backup_change(p, backup_dev);
845 if (err)
846 return err;
847 }
848
849 br_port_flags_change(p, old_flags ^ p->flags);
850 return 0;
851 }
852
853 /* Change state and parameters on port. */
br_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)854 int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags,
855 struct netlink_ext_ack *extack)
856 {
857 struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
858 struct nlattr *tb[IFLA_BRPORT_MAX + 1];
859 struct net_bridge_port *p;
860 struct nlattr *protinfo;
861 struct nlattr *afspec;
862 bool changed = false;
863 int err = 0;
864
865 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
866 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
867 if (!protinfo && !afspec)
868 return 0;
869
870 p = br_port_get_rtnl(dev);
871 /* We want to accept dev as bridge itself if the AF_SPEC
872 * is set to see if someone is setting vlan info on the bridge
873 */
874 if (!p && !afspec)
875 return -EINVAL;
876
877 if (p && protinfo) {
878 if (protinfo->nla_type & NLA_F_NESTED) {
879 err = nla_parse_nested_deprecated(tb, IFLA_BRPORT_MAX,
880 protinfo,
881 br_port_policy,
882 NULL);
883 if (err)
884 return err;
885
886 spin_lock_bh(&p->br->lock);
887 err = br_setport(p, tb);
888 spin_unlock_bh(&p->br->lock);
889 } else {
890 /* Binary compatibility with old RSTP */
891 if (nla_len(protinfo) < sizeof(u8))
892 return -EINVAL;
893
894 spin_lock_bh(&p->br->lock);
895 err = br_set_port_state(p, nla_get_u8(protinfo));
896 spin_unlock_bh(&p->br->lock);
897 }
898 if (err)
899 goto out;
900 changed = true;
901 }
902
903 if (afspec)
904 err = br_afspec(br, p, afspec, RTM_SETLINK, &changed, extack);
905
906 if (changed)
907 br_ifinfo_notify(RTM_NEWLINK, br, p);
908 out:
909 return err;
910 }
911
912 /* Delete port information */
br_dellink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags)913 int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
914 {
915 struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
916 struct net_bridge_port *p;
917 struct nlattr *afspec;
918 bool changed = false;
919 int err = 0;
920
921 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
922 if (!afspec)
923 return 0;
924
925 p = br_port_get_rtnl(dev);
926 /* We want to accept dev as bridge itself as well */
927 if (!p && !(dev->priv_flags & IFF_EBRIDGE))
928 return -EINVAL;
929
930 err = br_afspec(br, p, afspec, RTM_DELLINK, &changed, NULL);
931 if (changed)
932 /* Send RTM_NEWLINK because userspace
933 * expects RTM_NEWLINK for vlan dels
934 */
935 br_ifinfo_notify(RTM_NEWLINK, br, p);
936
937 return err;
938 }
939
br_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)940 static int br_validate(struct nlattr *tb[], struct nlattr *data[],
941 struct netlink_ext_ack *extack)
942 {
943 if (tb[IFLA_ADDRESS]) {
944 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
945 return -EINVAL;
946 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
947 return -EADDRNOTAVAIL;
948 }
949
950 if (!data)
951 return 0;
952
953 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
954 if (data[IFLA_BR_VLAN_PROTOCOL]) {
955 switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) {
956 case htons(ETH_P_8021Q):
957 case htons(ETH_P_8021AD):
958 break;
959 default:
960 return -EPROTONOSUPPORT;
961 }
962 }
963
964 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
965 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
966
967 if (defpvid >= VLAN_VID_MASK)
968 return -EINVAL;
969 }
970 #endif
971
972 return 0;
973 }
974
br_port_slave_changelink(struct net_device * brdev,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)975 static int br_port_slave_changelink(struct net_device *brdev,
976 struct net_device *dev,
977 struct nlattr *tb[],
978 struct nlattr *data[],
979 struct netlink_ext_ack *extack)
980 {
981 struct net_bridge *br = netdev_priv(brdev);
982 int ret;
983
984 if (!data)
985 return 0;
986
987 spin_lock_bh(&br->lock);
988 ret = br_setport(br_port_get_rtnl(dev), data);
989 spin_unlock_bh(&br->lock);
990
991 return ret;
992 }
993
br_port_fill_slave_info(struct sk_buff * skb,const struct net_device * brdev,const struct net_device * dev)994 static int br_port_fill_slave_info(struct sk_buff *skb,
995 const struct net_device *brdev,
996 const struct net_device *dev)
997 {
998 return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
999 }
1000
br_port_get_slave_size(const struct net_device * brdev,const struct net_device * dev)1001 static size_t br_port_get_slave_size(const struct net_device *brdev,
1002 const struct net_device *dev)
1003 {
1004 return br_port_info_size();
1005 }
1006
1007 static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
1008 [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 },
1009 [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 },
1010 [IFLA_BR_MAX_AGE] = { .type = NLA_U32 },
1011 [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
1012 [IFLA_BR_STP_STATE] = { .type = NLA_U32 },
1013 [IFLA_BR_PRIORITY] = { .type = NLA_U16 },
1014 [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
1015 [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
1016 [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
1017 [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
1018 .len = ETH_ALEN },
1019 [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
1020 [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
1021 [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
1022 [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
1023 [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
1024 [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
1025 [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
1026 [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
1027 [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
1028 [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
1029 [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
1030 [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
1031 [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
1032 [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
1033 [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
1034 [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
1035 [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
1036 [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
1037 [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
1038 [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
1039 [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
1040 [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 },
1041 [IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 },
1042 [IFLA_BR_MULTI_BOOLOPT] = { .type = NLA_EXACT_LEN,
1043 .len = sizeof(struct br_boolopt_multi) },
1044 };
1045
br_changelink(struct net_device * brdev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1046 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
1047 struct nlattr *data[],
1048 struct netlink_ext_ack *extack)
1049 {
1050 struct net_bridge *br = netdev_priv(brdev);
1051 int err;
1052
1053 if (!data)
1054 return 0;
1055
1056 if (data[IFLA_BR_FORWARD_DELAY]) {
1057 err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY]));
1058 if (err)
1059 return err;
1060 }
1061
1062 if (data[IFLA_BR_HELLO_TIME]) {
1063 err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME]));
1064 if (err)
1065 return err;
1066 }
1067
1068 if (data[IFLA_BR_MAX_AGE]) {
1069 err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE]));
1070 if (err)
1071 return err;
1072 }
1073
1074 if (data[IFLA_BR_AGEING_TIME]) {
1075 err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
1076 if (err)
1077 return err;
1078 }
1079
1080 if (data[IFLA_BR_STP_STATE]) {
1081 u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]);
1082
1083 br_stp_set_enabled(br, stp_enabled);
1084 }
1085
1086 if (data[IFLA_BR_PRIORITY]) {
1087 u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]);
1088
1089 br_stp_set_bridge_priority(br, priority);
1090 }
1091
1092 if (data[IFLA_BR_VLAN_FILTERING]) {
1093 u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
1094
1095 err = __br_vlan_filter_toggle(br, vlan_filter);
1096 if (err)
1097 return err;
1098 }
1099
1100 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1101 if (data[IFLA_BR_VLAN_PROTOCOL]) {
1102 __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);
1103
1104 err = __br_vlan_set_proto(br, vlan_proto);
1105 if (err)
1106 return err;
1107 }
1108
1109 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
1110 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
1111
1112 err = __br_vlan_set_default_pvid(br, defpvid, extack);
1113 if (err)
1114 return err;
1115 }
1116
1117 if (data[IFLA_BR_VLAN_STATS_ENABLED]) {
1118 __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]);
1119
1120 err = br_vlan_set_stats(br, vlan_stats);
1121 if (err)
1122 return err;
1123 }
1124
1125 if (data[IFLA_BR_VLAN_STATS_PER_PORT]) {
1126 __u8 per_port = nla_get_u8(data[IFLA_BR_VLAN_STATS_PER_PORT]);
1127
1128 err = br_vlan_set_stats_per_port(br, per_port);
1129 if (err)
1130 return err;
1131 }
1132 #endif
1133
1134 if (data[IFLA_BR_GROUP_FWD_MASK]) {
1135 u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);
1136
1137 if (fwd_mask & BR_GROUPFWD_RESTRICTED)
1138 return -EINVAL;
1139 br->group_fwd_mask = fwd_mask;
1140 }
1141
1142 if (data[IFLA_BR_GROUP_ADDR]) {
1143 u8 new_addr[ETH_ALEN];
1144
1145 if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
1146 return -EINVAL;
1147 memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
1148 if (!is_link_local_ether_addr(new_addr))
1149 return -EINVAL;
1150 if (new_addr[5] == 1 || /* 802.3x Pause address */
1151 new_addr[5] == 2 || /* 802.3ad Slow protocols */
1152 new_addr[5] == 3) /* 802.1X PAE address */
1153 return -EINVAL;
1154 spin_lock_bh(&br->lock);
1155 memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
1156 spin_unlock_bh(&br->lock);
1157 br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true);
1158 br_recalculate_fwd_mask(br);
1159 }
1160
1161 if (data[IFLA_BR_FDB_FLUSH])
1162 br_fdb_flush(br);
1163
1164 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1165 if (data[IFLA_BR_MCAST_ROUTER]) {
1166 u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);
1167
1168 err = br_multicast_set_router(br, multicast_router);
1169 if (err)
1170 return err;
1171 }
1172
1173 if (data[IFLA_BR_MCAST_SNOOPING]) {
1174 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
1175
1176 br_multicast_toggle(br, mcast_snooping);
1177 }
1178
1179 if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
1180 u8 val;
1181
1182 val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
1183 br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val);
1184 }
1185
1186 if (data[IFLA_BR_MCAST_QUERIER]) {
1187 u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);
1188
1189 err = br_multicast_set_querier(br, mcast_querier);
1190 if (err)
1191 return err;
1192 }
1193
1194 if (data[IFLA_BR_MCAST_HASH_ELASTICITY])
1195 br_warn(br, "the hash_elasticity option has been deprecated and is always %u\n",
1196 RHT_ELASTICITY);
1197
1198 if (data[IFLA_BR_MCAST_HASH_MAX])
1199 br->hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
1200
1201 if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
1202 u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
1203
1204 br->multicast_last_member_count = val;
1205 }
1206
1207 if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
1208 u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);
1209
1210 br->multicast_startup_query_count = val;
1211 }
1212
1213 if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
1214 u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);
1215
1216 br->multicast_last_member_interval = clock_t_to_jiffies(val);
1217 }
1218
1219 if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
1220 u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);
1221
1222 br->multicast_membership_interval = clock_t_to_jiffies(val);
1223 }
1224
1225 if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
1226 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);
1227
1228 br->multicast_querier_interval = clock_t_to_jiffies(val);
1229 }
1230
1231 if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
1232 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
1233
1234 br->multicast_query_interval = clock_t_to_jiffies(val);
1235 }
1236
1237 if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
1238 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);
1239
1240 br->multicast_query_response_interval = clock_t_to_jiffies(val);
1241 }
1242
1243 if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
1244 u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
1245
1246 br->multicast_startup_query_interval = clock_t_to_jiffies(val);
1247 }
1248
1249 if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
1250 __u8 mcast_stats;
1251
1252 mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
1253 br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!mcast_stats);
1254 }
1255
1256 if (data[IFLA_BR_MCAST_IGMP_VERSION]) {
1257 __u8 igmp_version;
1258
1259 igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]);
1260 err = br_multicast_set_igmp_version(br, igmp_version);
1261 if (err)
1262 return err;
1263 }
1264
1265 #if IS_ENABLED(CONFIG_IPV6)
1266 if (data[IFLA_BR_MCAST_MLD_VERSION]) {
1267 __u8 mld_version;
1268
1269 mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]);
1270 err = br_multicast_set_mld_version(br, mld_version);
1271 if (err)
1272 return err;
1273 }
1274 #endif
1275 #endif
1276 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1277 if (data[IFLA_BR_NF_CALL_IPTABLES]) {
1278 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);
1279
1280 br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val);
1281 }
1282
1283 if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
1284 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);
1285
1286 br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val);
1287 }
1288
1289 if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
1290 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);
1291
1292 br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val);
1293 }
1294 #endif
1295
1296 if (data[IFLA_BR_MULTI_BOOLOPT]) {
1297 struct br_boolopt_multi *bm;
1298
1299 bm = nla_data(data[IFLA_BR_MULTI_BOOLOPT]);
1300 err = br_boolopt_multi_toggle(br, bm, extack);
1301 if (err)
1302 return err;
1303 }
1304
1305 return 0;
1306 }
1307
br_dev_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1308 static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1309 struct nlattr *tb[], struct nlattr *data[],
1310 struct netlink_ext_ack *extack)
1311 {
1312 struct net_bridge *br = netdev_priv(dev);
1313 int err;
1314
1315 err = register_netdevice(dev);
1316 if (err)
1317 return err;
1318
1319 if (tb[IFLA_ADDRESS]) {
1320 spin_lock_bh(&br->lock);
1321 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
1322 spin_unlock_bh(&br->lock);
1323 }
1324
1325 err = br_changelink(dev, tb, data, extack);
1326 if (err)
1327 br_dev_delete(dev, NULL);
1328
1329 return err;
1330 }
1331
br_get_size(const struct net_device * brdev)1332 static size_t br_get_size(const struct net_device *brdev)
1333 {
1334 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
1335 nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */
1336 nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */
1337 nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */
1338 nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */
1339 nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */
1340 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */
1341 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1342 nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */
1343 nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */
1344 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */
1345 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_PER_PORT */
1346 #endif
1347 nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */
1348 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */
1349 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */
1350 nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */
1351 nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */
1352 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */
1353 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
1354 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
1355 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
1356 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
1357 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
1358 nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */
1359 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1360 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */
1361 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */
1362 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
1363 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */
1364 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */
1365 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */
1366 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */
1367 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
1368 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
1369 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
1370 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
1371 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
1372 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
1373 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
1374 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
1375 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */
1376 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */
1377 #endif
1378 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1379 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */
1380 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */
1381 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */
1382 #endif
1383 nla_total_size(sizeof(struct br_boolopt_multi)) + /* IFLA_BR_MULTI_BOOLOPT */
1384 0;
1385 }
1386
br_fill_info(struct sk_buff * skb,const struct net_device * brdev)1387 static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
1388 {
1389 struct net_bridge *br = netdev_priv(brdev);
1390 u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
1391 u32 hello_time = jiffies_to_clock_t(br->hello_time);
1392 u32 age_time = jiffies_to_clock_t(br->max_age);
1393 u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
1394 u32 stp_enabled = br->stp_enabled;
1395 u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
1396 u8 vlan_enabled = br_vlan_enabled(br->dev);
1397 struct br_boolopt_multi bm;
1398 u64 clockval;
1399
1400 clockval = br_timer_value(&br->hello_timer);
1401 if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
1402 return -EMSGSIZE;
1403 clockval = br_timer_value(&br->tcn_timer);
1404 if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
1405 return -EMSGSIZE;
1406 clockval = br_timer_value(&br->topology_change_timer);
1407 if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
1408 IFLA_BR_PAD))
1409 return -EMSGSIZE;
1410 clockval = br_timer_value(&br->gc_work.timer);
1411 if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
1412 return -EMSGSIZE;
1413
1414 br_boolopt_multi_get(br, &bm);
1415 if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
1416 nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
1417 nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
1418 nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
1419 nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
1420 nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
1421 nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
1422 nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
1423 nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
1424 &br->bridge_id) ||
1425 nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
1426 &br->designated_root) ||
1427 nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
1428 nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
1429 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
1430 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
1431 br->topology_change_detected) ||
1432 nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr) ||
1433 nla_put(skb, IFLA_BR_MULTI_BOOLOPT, sizeof(bm), &bm))
1434 return -EMSGSIZE;
1435
1436 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1437 if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
1438 nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
1439 nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
1440 br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
1441 nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
1442 br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
1443 return -EMSGSIZE;
1444 #endif
1445 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1446 if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) ||
1447 nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING,
1448 br_opt_get(br, BROPT_MULTICAST_ENABLED)) ||
1449 nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
1450 br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR)) ||
1451 nla_put_u8(skb, IFLA_BR_MCAST_QUERIER,
1452 br_opt_get(br, BROPT_MULTICAST_QUERIER)) ||
1453 nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
1454 br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) ||
1455 nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, RHT_ELASTICITY) ||
1456 nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
1457 nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
1458 br->multicast_last_member_count) ||
1459 nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
1460 br->multicast_startup_query_count) ||
1461 nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION,
1462 br->multicast_igmp_version))
1463 return -EMSGSIZE;
1464 #if IS_ENABLED(CONFIG_IPV6)
1465 if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION,
1466 br->multicast_mld_version))
1467 return -EMSGSIZE;
1468 #endif
1469 clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
1470 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
1471 IFLA_BR_PAD))
1472 return -EMSGSIZE;
1473 clockval = jiffies_to_clock_t(br->multicast_membership_interval);
1474 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
1475 IFLA_BR_PAD))
1476 return -EMSGSIZE;
1477 clockval = jiffies_to_clock_t(br->multicast_querier_interval);
1478 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
1479 IFLA_BR_PAD))
1480 return -EMSGSIZE;
1481 clockval = jiffies_to_clock_t(br->multicast_query_interval);
1482 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
1483 IFLA_BR_PAD))
1484 return -EMSGSIZE;
1485 clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
1486 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
1487 IFLA_BR_PAD))
1488 return -EMSGSIZE;
1489 clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
1490 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
1491 IFLA_BR_PAD))
1492 return -EMSGSIZE;
1493 #endif
1494 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1495 if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
1496 br_opt_get(br, BROPT_NF_CALL_IPTABLES) ? 1 : 0) ||
1497 nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
1498 br_opt_get(br, BROPT_NF_CALL_IP6TABLES) ? 1 : 0) ||
1499 nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
1500 br_opt_get(br, BROPT_NF_CALL_ARPTABLES) ? 1 : 0))
1501 return -EMSGSIZE;
1502 #endif
1503
1504 return 0;
1505 }
1506
br_get_linkxstats_size(const struct net_device * dev,int attr)1507 static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
1508 {
1509 struct net_bridge_port *p = NULL;
1510 struct net_bridge_vlan_group *vg;
1511 struct net_bridge_vlan *v;
1512 struct net_bridge *br;
1513 int numvls = 0;
1514
1515 switch (attr) {
1516 case IFLA_STATS_LINK_XSTATS:
1517 br = netdev_priv(dev);
1518 vg = br_vlan_group(br);
1519 break;
1520 case IFLA_STATS_LINK_XSTATS_SLAVE:
1521 p = br_port_get_rtnl(dev);
1522 if (!p)
1523 return 0;
1524 br = p->br;
1525 vg = nbp_vlan_group(p);
1526 break;
1527 default:
1528 return 0;
1529 }
1530
1531 if (vg) {
1532 /* we need to count all, even placeholder entries */
1533 list_for_each_entry(v, &vg->vlan_list, vlist)
1534 numvls++;
1535 }
1536
1537 return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
1538 nla_total_size(sizeof(struct br_mcast_stats)) +
1539 nla_total_size(0);
1540 }
1541
br_fill_linkxstats(struct sk_buff * skb,const struct net_device * dev,int * prividx,int attr)1542 static int br_fill_linkxstats(struct sk_buff *skb,
1543 const struct net_device *dev,
1544 int *prividx, int attr)
1545 {
1546 struct nlattr *nla __maybe_unused;
1547 struct net_bridge_port *p = NULL;
1548 struct net_bridge_vlan_group *vg;
1549 struct net_bridge_vlan *v;
1550 struct net_bridge *br;
1551 struct nlattr *nest;
1552 int vl_idx = 0;
1553
1554 switch (attr) {
1555 case IFLA_STATS_LINK_XSTATS:
1556 br = netdev_priv(dev);
1557 vg = br_vlan_group(br);
1558 break;
1559 case IFLA_STATS_LINK_XSTATS_SLAVE:
1560 p = br_port_get_rtnl(dev);
1561 if (!p)
1562 return 0;
1563 br = p->br;
1564 vg = nbp_vlan_group(p);
1565 break;
1566 default:
1567 return -EINVAL;
1568 }
1569
1570 nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BRIDGE);
1571 if (!nest)
1572 return -EMSGSIZE;
1573
1574 if (vg) {
1575 u16 pvid;
1576
1577 pvid = br_get_pvid(vg);
1578 list_for_each_entry(v, &vg->vlan_list, vlist) {
1579 struct bridge_vlan_xstats vxi;
1580 struct br_vlan_stats stats;
1581
1582 if (++vl_idx < *prividx)
1583 continue;
1584 memset(&vxi, 0, sizeof(vxi));
1585 vxi.vid = v->vid;
1586 vxi.flags = v->flags;
1587 if (v->vid == pvid)
1588 vxi.flags |= BRIDGE_VLAN_INFO_PVID;
1589 br_vlan_get_stats(v, &stats);
1590 vxi.rx_bytes = stats.rx_bytes;
1591 vxi.rx_packets = stats.rx_packets;
1592 vxi.tx_bytes = stats.tx_bytes;
1593 vxi.tx_packets = stats.tx_packets;
1594
1595 if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
1596 goto nla_put_failure;
1597 }
1598 }
1599
1600 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1601 if (++vl_idx >= *prividx) {
1602 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
1603 sizeof(struct br_mcast_stats),
1604 BRIDGE_XSTATS_PAD);
1605 if (!nla)
1606 goto nla_put_failure;
1607 br_multicast_get_stats(br, p, nla_data(nla));
1608 }
1609 #endif
1610 nla_nest_end(skb, nest);
1611 *prividx = 0;
1612
1613 return 0;
1614
1615 nla_put_failure:
1616 nla_nest_end(skb, nest);
1617 *prividx = vl_idx;
1618
1619 return -EMSGSIZE;
1620 }
1621
1622 static struct rtnl_af_ops br_af_ops __read_mostly = {
1623 .family = AF_BRIDGE,
1624 .get_link_af_size = br_get_link_af_size_filtered,
1625 };
1626
1627 struct rtnl_link_ops br_link_ops __read_mostly = {
1628 .kind = "bridge",
1629 .priv_size = sizeof(struct net_bridge),
1630 .setup = br_dev_setup,
1631 .maxtype = IFLA_BR_MAX,
1632 .policy = br_policy,
1633 .validate = br_validate,
1634 .newlink = br_dev_newlink,
1635 .changelink = br_changelink,
1636 .dellink = br_dev_delete,
1637 .get_size = br_get_size,
1638 .fill_info = br_fill_info,
1639 .fill_linkxstats = br_fill_linkxstats,
1640 .get_linkxstats_size = br_get_linkxstats_size,
1641
1642 .slave_maxtype = IFLA_BRPORT_MAX,
1643 .slave_policy = br_port_policy,
1644 .slave_changelink = br_port_slave_changelink,
1645 .get_slave_size = br_port_get_slave_size,
1646 .fill_slave_info = br_port_fill_slave_info,
1647 };
1648
br_netlink_init(void)1649 int __init br_netlink_init(void)
1650 {
1651 int err;
1652
1653 br_mdb_init();
1654 rtnl_af_register(&br_af_ops);
1655
1656 err = rtnl_link_register(&br_link_ops);
1657 if (err)
1658 goto out_af;
1659
1660 return 0;
1661
1662 out_af:
1663 rtnl_af_unregister(&br_af_ops);
1664 br_mdb_uninit();
1665 return err;
1666 }
1667
br_netlink_fini(void)1668 void br_netlink_fini(void)
1669 {
1670 br_mdb_uninit();
1671 rtnl_af_unregister(&br_af_ops);
1672 rtnl_link_unregister(&br_link_ops);
1673 }
1674