1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Routing netlink socket interface: protocol independent part.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 * Vitaly E. Lavrov RTA_OK arithmetic was wrong.
13 */
14
15 #include <linux/bitops.h>
16 #include <linux/errno.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/socket.h>
20 #include <linux/kernel.h>
21 #include <linux/timer.h>
22 #include <linux/string.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/fcntl.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/capability.h>
30 #include <linux/skbuff.h>
31 #include <linux/init.h>
32 #include <linux/security.h>
33 #include <linux/mutex.h>
34 #include <linux/if_addr.h>
35 #include <linux/if_bridge.h>
36 #include <linux/if_vlan.h>
37 #include <linux/pci.h>
38 #include <linux/etherdevice.h>
39 #include <linux/bpf.h>
40
41 #include <linux/uaccess.h>
42
43 #include <linux/inet.h>
44 #include <linux/netdevice.h>
45 #include <net/ip.h>
46 #include <net/protocol.h>
47 #include <net/arp.h>
48 #include <net/route.h>
49 #include <net/udp.h>
50 #include <net/tcp.h>
51 #include <net/sock.h>
52 #include <net/pkt_sched.h>
53 #include <net/fib_rules.h>
54 #include <net/rtnetlink.h>
55 #include <net/net_namespace.h>
56
57 #include "dev.h"
58
59 #define RTNL_MAX_TYPE 50
60 #define RTNL_SLAVE_MAX_TYPE 40
61
62 struct rtnl_link {
63 rtnl_doit_func doit;
64 rtnl_dumpit_func dumpit;
65 struct module *owner;
66 unsigned int flags;
67 struct rcu_head rcu;
68 };
69
70 static DEFINE_MUTEX(rtnl_mutex);
71
rtnl_lock(void)72 void rtnl_lock(void)
73 {
74 mutex_lock(&rtnl_mutex);
75 }
76 EXPORT_SYMBOL(rtnl_lock);
77
rtnl_lock_killable(void)78 int rtnl_lock_killable(void)
79 {
80 return mutex_lock_killable(&rtnl_mutex);
81 }
82 EXPORT_SYMBOL(rtnl_lock_killable);
83
84 static struct sk_buff *defer_kfree_skb_list;
rtnl_kfree_skbs(struct sk_buff * head,struct sk_buff * tail)85 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
86 {
87 if (head && tail) {
88 tail->next = defer_kfree_skb_list;
89 defer_kfree_skb_list = head;
90 }
91 }
92 EXPORT_SYMBOL(rtnl_kfree_skbs);
93
__rtnl_unlock(void)94 void __rtnl_unlock(void)
95 {
96 struct sk_buff *head = defer_kfree_skb_list;
97
98 defer_kfree_skb_list = NULL;
99
100 /* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
101 * is used. In some places, e.g. in cfg80211, we have code that will do
102 * something like
103 * rtnl_lock()
104 * wiphy_lock()
105 * ...
106 * rtnl_unlock()
107 *
108 * and because netdev_run_todo() acquires the RTNL for items on the list
109 * we could cause a situation such as this:
110 * Thread 1 Thread 2
111 * rtnl_lock()
112 * unregister_netdevice()
113 * __rtnl_unlock()
114 * rtnl_lock()
115 * wiphy_lock()
116 * rtnl_unlock()
117 * netdev_run_todo()
118 * __rtnl_unlock()
119 *
120 * // list not empty now
121 * // because of thread 2
122 * rtnl_lock()
123 * while (!list_empty(...))
124 * rtnl_lock()
125 * wiphy_lock()
126 * **** DEADLOCK ****
127 *
128 * However, usage of __rtnl_unlock() is rare, and so we can ensure that
129 * it's not used in cases where something is added to do the list.
130 */
131 WARN_ON(!list_empty(&net_todo_list));
132
133 mutex_unlock(&rtnl_mutex);
134
135 while (head) {
136 struct sk_buff *next = head->next;
137
138 kfree_skb(head);
139 cond_resched();
140 head = next;
141 }
142 }
143
rtnl_unlock(void)144 void rtnl_unlock(void)
145 {
146 /* This fellow will unlock it for us. */
147 netdev_run_todo();
148 }
149 EXPORT_SYMBOL(rtnl_unlock);
150
rtnl_trylock(void)151 int rtnl_trylock(void)
152 {
153 return mutex_trylock(&rtnl_mutex);
154 }
155 EXPORT_SYMBOL(rtnl_trylock);
156
rtnl_is_locked(void)157 int rtnl_is_locked(void)
158 {
159 return mutex_is_locked(&rtnl_mutex);
160 }
161 EXPORT_SYMBOL(rtnl_is_locked);
162
refcount_dec_and_rtnl_lock(refcount_t * r)163 bool refcount_dec_and_rtnl_lock(refcount_t *r)
164 {
165 return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
166 }
167 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
168
169 #ifdef CONFIG_PROVE_LOCKING
lockdep_rtnl_is_held(void)170 bool lockdep_rtnl_is_held(void)
171 {
172 return lockdep_is_held(&rtnl_mutex);
173 }
174 EXPORT_SYMBOL(lockdep_rtnl_is_held);
175 #endif /* #ifdef CONFIG_PROVE_LOCKING */
176
177 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
178
rtm_msgindex(int msgtype)179 static inline int rtm_msgindex(int msgtype)
180 {
181 int msgindex = msgtype - RTM_BASE;
182
183 /*
184 * msgindex < 0 implies someone tried to register a netlink
185 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
186 * the message type has not been added to linux/rtnetlink.h
187 */
188 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
189
190 return msgindex;
191 }
192
rtnl_get_link(int protocol,int msgtype)193 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
194 {
195 struct rtnl_link __rcu **tab;
196
197 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
198 protocol = PF_UNSPEC;
199
200 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
201 if (!tab)
202 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
203
204 return rcu_dereference_rtnl(tab[msgtype]);
205 }
206
rtnl_register_internal(struct module * owner,int protocol,int msgtype,rtnl_doit_func doit,rtnl_dumpit_func dumpit,unsigned int flags)207 static int rtnl_register_internal(struct module *owner,
208 int protocol, int msgtype,
209 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
210 unsigned int flags)
211 {
212 struct rtnl_link *link, *old;
213 struct rtnl_link __rcu **tab;
214 int msgindex;
215 int ret = -ENOBUFS;
216
217 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
218 msgindex = rtm_msgindex(msgtype);
219
220 rtnl_lock();
221 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
222 if (tab == NULL) {
223 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
224 if (!tab)
225 goto unlock;
226
227 /* ensures we see the 0 stores */
228 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
229 }
230
231 old = rtnl_dereference(tab[msgindex]);
232 if (old) {
233 link = kmemdup(old, sizeof(*old), GFP_KERNEL);
234 if (!link)
235 goto unlock;
236 } else {
237 link = kzalloc(sizeof(*link), GFP_KERNEL);
238 if (!link)
239 goto unlock;
240 }
241
242 WARN_ON(link->owner && link->owner != owner);
243 link->owner = owner;
244
245 WARN_ON(doit && link->doit && link->doit != doit);
246 if (doit)
247 link->doit = doit;
248 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
249 if (dumpit)
250 link->dumpit = dumpit;
251
252 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
253 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
254 link->flags |= flags;
255
256 /* publish protocol:msgtype */
257 rcu_assign_pointer(tab[msgindex], link);
258 ret = 0;
259 if (old)
260 kfree_rcu(old, rcu);
261 unlock:
262 rtnl_unlock();
263 return ret;
264 }
265
266 /**
267 * rtnl_register_module - Register a rtnetlink message type
268 *
269 * @owner: module registering the hook (THIS_MODULE)
270 * @protocol: Protocol family or PF_UNSPEC
271 * @msgtype: rtnetlink message type
272 * @doit: Function pointer called for each request message
273 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
274 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
275 *
276 * Like rtnl_register, but for use by removable modules.
277 */
rtnl_register_module(struct module * owner,int protocol,int msgtype,rtnl_doit_func doit,rtnl_dumpit_func dumpit,unsigned int flags)278 int rtnl_register_module(struct module *owner,
279 int protocol, int msgtype,
280 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
281 unsigned int flags)
282 {
283 return rtnl_register_internal(owner, protocol, msgtype,
284 doit, dumpit, flags);
285 }
286 EXPORT_SYMBOL_GPL(rtnl_register_module);
287
288 /**
289 * rtnl_register - Register a rtnetlink message type
290 * @protocol: Protocol family or PF_UNSPEC
291 * @msgtype: rtnetlink message type
292 * @doit: Function pointer called for each request message
293 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
294 * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
295 *
296 * Registers the specified function pointers (at least one of them has
297 * to be non-NULL) to be called whenever a request message for the
298 * specified protocol family and message type is received.
299 *
300 * The special protocol family PF_UNSPEC may be used to define fallback
301 * function pointers for the case when no entry for the specific protocol
302 * family exists.
303 */
rtnl_register(int protocol,int msgtype,rtnl_doit_func doit,rtnl_dumpit_func dumpit,unsigned int flags)304 void rtnl_register(int protocol, int msgtype,
305 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
306 unsigned int flags)
307 {
308 int err;
309
310 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
311 flags);
312 if (err)
313 pr_err("Unable to register rtnetlink message handler, "
314 "protocol = %d, message type = %d\n", protocol, msgtype);
315 }
316
317 /**
318 * rtnl_unregister - Unregister a rtnetlink message type
319 * @protocol: Protocol family or PF_UNSPEC
320 * @msgtype: rtnetlink message type
321 *
322 * Returns 0 on success or a negative error code.
323 */
rtnl_unregister(int protocol,int msgtype)324 int rtnl_unregister(int protocol, int msgtype)
325 {
326 struct rtnl_link __rcu **tab;
327 struct rtnl_link *link;
328 int msgindex;
329
330 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
331 msgindex = rtm_msgindex(msgtype);
332
333 rtnl_lock();
334 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
335 if (!tab) {
336 rtnl_unlock();
337 return -ENOENT;
338 }
339
340 link = rtnl_dereference(tab[msgindex]);
341 RCU_INIT_POINTER(tab[msgindex], NULL);
342 rtnl_unlock();
343
344 kfree_rcu(link, rcu);
345
346 return 0;
347 }
348 EXPORT_SYMBOL_GPL(rtnl_unregister);
349
350 /**
351 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
352 * @protocol : Protocol family or PF_UNSPEC
353 *
354 * Identical to calling rtnl_unregster() for all registered message types
355 * of a certain protocol family.
356 */
rtnl_unregister_all(int protocol)357 void rtnl_unregister_all(int protocol)
358 {
359 struct rtnl_link __rcu **tab;
360 struct rtnl_link *link;
361 int msgindex;
362
363 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
364
365 rtnl_lock();
366 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
367 if (!tab) {
368 rtnl_unlock();
369 return;
370 }
371 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
372 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
373 link = rtnl_dereference(tab[msgindex]);
374 if (!link)
375 continue;
376
377 RCU_INIT_POINTER(tab[msgindex], NULL);
378 kfree_rcu(link, rcu);
379 }
380 rtnl_unlock();
381
382 synchronize_net();
383
384 kfree(tab);
385 }
386 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
387
388 static LIST_HEAD(link_ops);
389
rtnl_link_ops_get(const char * kind)390 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
391 {
392 const struct rtnl_link_ops *ops;
393
394 list_for_each_entry(ops, &link_ops, list) {
395 if (!strcmp(ops->kind, kind))
396 return ops;
397 }
398 return NULL;
399 }
400
401 /**
402 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
403 * @ops: struct rtnl_link_ops * to register
404 *
405 * The caller must hold the rtnl_mutex. This function should be used
406 * by drivers that create devices during module initialization. It
407 * must be called before registering the devices.
408 *
409 * Returns 0 on success or a negative error code.
410 */
__rtnl_link_register(struct rtnl_link_ops * ops)411 int __rtnl_link_register(struct rtnl_link_ops *ops)
412 {
413 if (rtnl_link_ops_get(ops->kind))
414 return -EEXIST;
415
416 /* The check for alloc/setup is here because if ops
417 * does not have that filled up, it is not possible
418 * to use the ops for creating device. So do not
419 * fill up dellink as well. That disables rtnl_dellink.
420 */
421 if ((ops->alloc || ops->setup) && !ops->dellink)
422 ops->dellink = unregister_netdevice_queue;
423
424 list_add_tail(&ops->list, &link_ops);
425 return 0;
426 }
427 EXPORT_SYMBOL_GPL(__rtnl_link_register);
428
429 /**
430 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
431 * @ops: struct rtnl_link_ops * to register
432 *
433 * Returns 0 on success or a negative error code.
434 */
rtnl_link_register(struct rtnl_link_ops * ops)435 int rtnl_link_register(struct rtnl_link_ops *ops)
436 {
437 int err;
438
439 /* Sanity-check max sizes to avoid stack buffer overflow. */
440 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
441 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
442 return -EINVAL;
443
444 rtnl_lock();
445 err = __rtnl_link_register(ops);
446 rtnl_unlock();
447 return err;
448 }
449 EXPORT_SYMBOL_GPL(rtnl_link_register);
450
__rtnl_kill_links(struct net * net,struct rtnl_link_ops * ops)451 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
452 {
453 struct net_device *dev;
454 LIST_HEAD(list_kill);
455
456 for_each_netdev(net, dev) {
457 if (dev->rtnl_link_ops == ops)
458 ops->dellink(dev, &list_kill);
459 }
460 unregister_netdevice_many(&list_kill);
461 }
462
463 /**
464 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
465 * @ops: struct rtnl_link_ops * to unregister
466 *
467 * The caller must hold the rtnl_mutex and guarantee net_namespace_list
468 * integrity (hold pernet_ops_rwsem for writing to close the race
469 * with setup_net() and cleanup_net()).
470 */
__rtnl_link_unregister(struct rtnl_link_ops * ops)471 void __rtnl_link_unregister(struct rtnl_link_ops *ops)
472 {
473 struct net *net;
474
475 for_each_net(net) {
476 __rtnl_kill_links(net, ops);
477 }
478 list_del(&ops->list);
479 }
480 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
481
482 /* Return with the rtnl_lock held when there are no network
483 * devices unregistering in any network namespace.
484 */
rtnl_lock_unregistering_all(void)485 static void rtnl_lock_unregistering_all(void)
486 {
487 struct net *net;
488 bool unregistering;
489 DEFINE_WAIT_FUNC(wait, woken_wake_function);
490
491 add_wait_queue(&netdev_unregistering_wq, &wait);
492 for (;;) {
493 unregistering = false;
494 rtnl_lock();
495 /* We held write locked pernet_ops_rwsem, and parallel
496 * setup_net() and cleanup_net() are not possible.
497 */
498 for_each_net(net) {
499 if (atomic_read(&net->dev_unreg_count) > 0) {
500 unregistering = true;
501 break;
502 }
503 }
504 if (!unregistering)
505 break;
506 __rtnl_unlock();
507
508 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
509 }
510 remove_wait_queue(&netdev_unregistering_wq, &wait);
511 }
512
513 /**
514 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
515 * @ops: struct rtnl_link_ops * to unregister
516 */
rtnl_link_unregister(struct rtnl_link_ops * ops)517 void rtnl_link_unregister(struct rtnl_link_ops *ops)
518 {
519 /* Close the race with setup_net() and cleanup_net() */
520 down_write(&pernet_ops_rwsem);
521 rtnl_lock_unregistering_all();
522 __rtnl_link_unregister(ops);
523 rtnl_unlock();
524 up_write(&pernet_ops_rwsem);
525 }
526 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
527
rtnl_link_get_slave_info_data_size(const struct net_device * dev)528 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
529 {
530 struct net_device *master_dev;
531 const struct rtnl_link_ops *ops;
532 size_t size = 0;
533
534 rcu_read_lock();
535
536 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
537 if (!master_dev)
538 goto out;
539
540 ops = master_dev->rtnl_link_ops;
541 if (!ops || !ops->get_slave_size)
542 goto out;
543 /* IFLA_INFO_SLAVE_DATA + nested data */
544 size = nla_total_size(sizeof(struct nlattr)) +
545 ops->get_slave_size(master_dev, dev);
546
547 out:
548 rcu_read_unlock();
549 return size;
550 }
551
rtnl_link_get_size(const struct net_device * dev)552 static size_t rtnl_link_get_size(const struct net_device *dev)
553 {
554 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
555 size_t size;
556
557 if (!ops)
558 return 0;
559
560 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
561 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
562
563 if (ops->get_size)
564 /* IFLA_INFO_DATA + nested data */
565 size += nla_total_size(sizeof(struct nlattr)) +
566 ops->get_size(dev);
567
568 if (ops->get_xstats_size)
569 /* IFLA_INFO_XSTATS */
570 size += nla_total_size(ops->get_xstats_size(dev));
571
572 size += rtnl_link_get_slave_info_data_size(dev);
573
574 return size;
575 }
576
577 static LIST_HEAD(rtnl_af_ops);
578
rtnl_af_lookup(const int family)579 static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
580 {
581 const struct rtnl_af_ops *ops;
582
583 ASSERT_RTNL();
584
585 list_for_each_entry(ops, &rtnl_af_ops, list) {
586 if (ops->family == family)
587 return ops;
588 }
589
590 return NULL;
591 }
592
593 /**
594 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
595 * @ops: struct rtnl_af_ops * to register
596 *
597 * Returns 0 on success or a negative error code.
598 */
rtnl_af_register(struct rtnl_af_ops * ops)599 void rtnl_af_register(struct rtnl_af_ops *ops)
600 {
601 rtnl_lock();
602 list_add_tail_rcu(&ops->list, &rtnl_af_ops);
603 rtnl_unlock();
604 }
605 EXPORT_SYMBOL_GPL(rtnl_af_register);
606
607 /**
608 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
609 * @ops: struct rtnl_af_ops * to unregister
610 */
rtnl_af_unregister(struct rtnl_af_ops * ops)611 void rtnl_af_unregister(struct rtnl_af_ops *ops)
612 {
613 rtnl_lock();
614 list_del_rcu(&ops->list);
615 rtnl_unlock();
616
617 synchronize_rcu();
618 }
619 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
620
rtnl_link_get_af_size(const struct net_device * dev,u32 ext_filter_mask)621 static size_t rtnl_link_get_af_size(const struct net_device *dev,
622 u32 ext_filter_mask)
623 {
624 struct rtnl_af_ops *af_ops;
625 size_t size;
626
627 /* IFLA_AF_SPEC */
628 size = nla_total_size(sizeof(struct nlattr));
629
630 rcu_read_lock();
631 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
632 if (af_ops->get_link_af_size) {
633 /* AF_* + nested data */
634 size += nla_total_size(sizeof(struct nlattr)) +
635 af_ops->get_link_af_size(dev, ext_filter_mask);
636 }
637 }
638 rcu_read_unlock();
639
640 return size;
641 }
642
rtnl_have_link_slave_info(const struct net_device * dev)643 static bool rtnl_have_link_slave_info(const struct net_device *dev)
644 {
645 struct net_device *master_dev;
646 bool ret = false;
647
648 rcu_read_lock();
649
650 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
651 if (master_dev && master_dev->rtnl_link_ops)
652 ret = true;
653 rcu_read_unlock();
654 return ret;
655 }
656
rtnl_link_slave_info_fill(struct sk_buff * skb,const struct net_device * dev)657 static int rtnl_link_slave_info_fill(struct sk_buff *skb,
658 const struct net_device *dev)
659 {
660 struct net_device *master_dev;
661 const struct rtnl_link_ops *ops;
662 struct nlattr *slave_data;
663 int err;
664
665 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
666 if (!master_dev)
667 return 0;
668 ops = master_dev->rtnl_link_ops;
669 if (!ops)
670 return 0;
671 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
672 return -EMSGSIZE;
673 if (ops->fill_slave_info) {
674 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
675 if (!slave_data)
676 return -EMSGSIZE;
677 err = ops->fill_slave_info(skb, master_dev, dev);
678 if (err < 0)
679 goto err_cancel_slave_data;
680 nla_nest_end(skb, slave_data);
681 }
682 return 0;
683
684 err_cancel_slave_data:
685 nla_nest_cancel(skb, slave_data);
686 return err;
687 }
688
rtnl_link_info_fill(struct sk_buff * skb,const struct net_device * dev)689 static int rtnl_link_info_fill(struct sk_buff *skb,
690 const struct net_device *dev)
691 {
692 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
693 struct nlattr *data;
694 int err;
695
696 if (!ops)
697 return 0;
698 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
699 return -EMSGSIZE;
700 if (ops->fill_xstats) {
701 err = ops->fill_xstats(skb, dev);
702 if (err < 0)
703 return err;
704 }
705 if (ops->fill_info) {
706 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
707 if (data == NULL)
708 return -EMSGSIZE;
709 err = ops->fill_info(skb, dev);
710 if (err < 0)
711 goto err_cancel_data;
712 nla_nest_end(skb, data);
713 }
714 return 0;
715
716 err_cancel_data:
717 nla_nest_cancel(skb, data);
718 return err;
719 }
720
rtnl_link_fill(struct sk_buff * skb,const struct net_device * dev)721 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
722 {
723 struct nlattr *linkinfo;
724 int err = -EMSGSIZE;
725
726 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
727 if (linkinfo == NULL)
728 goto out;
729
730 err = rtnl_link_info_fill(skb, dev);
731 if (err < 0)
732 goto err_cancel_link;
733
734 err = rtnl_link_slave_info_fill(skb, dev);
735 if (err < 0)
736 goto err_cancel_link;
737
738 nla_nest_end(skb, linkinfo);
739 return 0;
740
741 err_cancel_link:
742 nla_nest_cancel(skb, linkinfo);
743 out:
744 return err;
745 }
746
rtnetlink_send(struct sk_buff * skb,struct net * net,u32 pid,unsigned int group,int echo)747 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
748 {
749 struct sock *rtnl = net->rtnl;
750
751 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL);
752 }
753
rtnl_unicast(struct sk_buff * skb,struct net * net,u32 pid)754 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
755 {
756 struct sock *rtnl = net->rtnl;
757
758 return nlmsg_unicast(rtnl, skb, pid);
759 }
760 EXPORT_SYMBOL(rtnl_unicast);
761
rtnl_notify(struct sk_buff * skb,struct net * net,u32 pid,u32 group,struct nlmsghdr * nlh,gfp_t flags)762 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
763 struct nlmsghdr *nlh, gfp_t flags)
764 {
765 struct sock *rtnl = net->rtnl;
766
767 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags);
768 }
769 EXPORT_SYMBOL(rtnl_notify);
770
rtnl_set_sk_err(struct net * net,u32 group,int error)771 void rtnl_set_sk_err(struct net *net, u32 group, int error)
772 {
773 struct sock *rtnl = net->rtnl;
774
775 netlink_set_err(rtnl, 0, group, error);
776 }
777 EXPORT_SYMBOL(rtnl_set_sk_err);
778
rtnetlink_put_metrics(struct sk_buff * skb,u32 * metrics)779 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
780 {
781 struct nlattr *mx;
782 int i, valid = 0;
783
784 /* nothing is dumped for dst_default_metrics, so just skip the loop */
785 if (metrics == dst_default_metrics.metrics)
786 return 0;
787
788 mx = nla_nest_start_noflag(skb, RTA_METRICS);
789 if (mx == NULL)
790 return -ENOBUFS;
791
792 for (i = 0; i < RTAX_MAX; i++) {
793 if (metrics[i]) {
794 if (i == RTAX_CC_ALGO - 1) {
795 char tmp[TCP_CA_NAME_MAX], *name;
796
797 name = tcp_ca_get_name_by_key(metrics[i], tmp);
798 if (!name)
799 continue;
800 if (nla_put_string(skb, i + 1, name))
801 goto nla_put_failure;
802 } else if (i == RTAX_FEATURES - 1) {
803 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
804
805 if (!user_features)
806 continue;
807 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
808 if (nla_put_u32(skb, i + 1, user_features))
809 goto nla_put_failure;
810 } else {
811 if (nla_put_u32(skb, i + 1, metrics[i]))
812 goto nla_put_failure;
813 }
814 valid++;
815 }
816 }
817
818 if (!valid) {
819 nla_nest_cancel(skb, mx);
820 return 0;
821 }
822
823 return nla_nest_end(skb, mx);
824
825 nla_put_failure:
826 nla_nest_cancel(skb, mx);
827 return -EMSGSIZE;
828 }
829 EXPORT_SYMBOL(rtnetlink_put_metrics);
830
rtnl_put_cacheinfo(struct sk_buff * skb,struct dst_entry * dst,u32 id,long expires,u32 error)831 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
832 long expires, u32 error)
833 {
834 struct rta_cacheinfo ci = {
835 .rta_error = error,
836 .rta_id = id,
837 };
838
839 if (dst) {
840 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
841 ci.rta_used = dst->__use;
842 ci.rta_clntref = atomic_read(&dst->__refcnt);
843 }
844 if (expires) {
845 unsigned long clock;
846
847 clock = jiffies_to_clock_t(abs(expires));
848 clock = min_t(unsigned long, clock, INT_MAX);
849 ci.rta_expires = (expires > 0) ? clock : -clock;
850 }
851 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
852 }
853 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
854
set_operstate(struct net_device * dev,unsigned char transition)855 static void set_operstate(struct net_device *dev, unsigned char transition)
856 {
857 unsigned char operstate = dev->operstate;
858
859 switch (transition) {
860 case IF_OPER_UP:
861 if ((operstate == IF_OPER_DORMANT ||
862 operstate == IF_OPER_TESTING ||
863 operstate == IF_OPER_UNKNOWN) &&
864 !netif_dormant(dev) && !netif_testing(dev))
865 operstate = IF_OPER_UP;
866 break;
867
868 case IF_OPER_TESTING:
869 if (netif_oper_up(dev))
870 operstate = IF_OPER_TESTING;
871 break;
872
873 case IF_OPER_DORMANT:
874 if (netif_oper_up(dev))
875 operstate = IF_OPER_DORMANT;
876 break;
877 }
878
879 if (dev->operstate != operstate) {
880 write_lock(&dev_base_lock);
881 dev->operstate = operstate;
882 write_unlock(&dev_base_lock);
883 netdev_state_change(dev);
884 }
885 }
886
rtnl_dev_get_flags(const struct net_device * dev)887 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
888 {
889 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
890 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
891 }
892
rtnl_dev_combine_flags(const struct net_device * dev,const struct ifinfomsg * ifm)893 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
894 const struct ifinfomsg *ifm)
895 {
896 unsigned int flags = ifm->ifi_flags;
897
898 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
899 if (ifm->ifi_change)
900 flags = (flags & ifm->ifi_change) |
901 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
902
903 return flags;
904 }
905
copy_rtnl_link_stats(struct rtnl_link_stats * a,const struct rtnl_link_stats64 * b)906 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
907 const struct rtnl_link_stats64 *b)
908 {
909 a->rx_packets = b->rx_packets;
910 a->tx_packets = b->tx_packets;
911 a->rx_bytes = b->rx_bytes;
912 a->tx_bytes = b->tx_bytes;
913 a->rx_errors = b->rx_errors;
914 a->tx_errors = b->tx_errors;
915 a->rx_dropped = b->rx_dropped;
916 a->tx_dropped = b->tx_dropped;
917
918 a->multicast = b->multicast;
919 a->collisions = b->collisions;
920
921 a->rx_length_errors = b->rx_length_errors;
922 a->rx_over_errors = b->rx_over_errors;
923 a->rx_crc_errors = b->rx_crc_errors;
924 a->rx_frame_errors = b->rx_frame_errors;
925 a->rx_fifo_errors = b->rx_fifo_errors;
926 a->rx_missed_errors = b->rx_missed_errors;
927
928 a->tx_aborted_errors = b->tx_aborted_errors;
929 a->tx_carrier_errors = b->tx_carrier_errors;
930 a->tx_fifo_errors = b->tx_fifo_errors;
931 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
932 a->tx_window_errors = b->tx_window_errors;
933
934 a->rx_compressed = b->rx_compressed;
935 a->tx_compressed = b->tx_compressed;
936
937 a->rx_nohandler = b->rx_nohandler;
938 }
939
940 /* All VF info */
rtnl_vfinfo_size(const struct net_device * dev,u32 ext_filter_mask)941 static inline int rtnl_vfinfo_size(const struct net_device *dev,
942 u32 ext_filter_mask)
943 {
944 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
945 int num_vfs = dev_num_vf(dev->dev.parent);
946 size_t size = nla_total_size(0);
947 size += num_vfs *
948 (nla_total_size(0) +
949 nla_total_size(sizeof(struct ifla_vf_mac)) +
950 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
951 nla_total_size(sizeof(struct ifla_vf_vlan)) +
952 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
953 nla_total_size(MAX_VLAN_LIST_LEN *
954 sizeof(struct ifla_vf_vlan_info)) +
955 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
956 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
957 nla_total_size(sizeof(struct ifla_vf_rate)) +
958 nla_total_size(sizeof(struct ifla_vf_link_state)) +
959 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
960 nla_total_size(0) + /* nest IFLA_VF_STATS */
961 /* IFLA_VF_STATS_RX_PACKETS */
962 nla_total_size_64bit(sizeof(__u64)) +
963 /* IFLA_VF_STATS_TX_PACKETS */
964 nla_total_size_64bit(sizeof(__u64)) +
965 /* IFLA_VF_STATS_RX_BYTES */
966 nla_total_size_64bit(sizeof(__u64)) +
967 /* IFLA_VF_STATS_TX_BYTES */
968 nla_total_size_64bit(sizeof(__u64)) +
969 /* IFLA_VF_STATS_BROADCAST */
970 nla_total_size_64bit(sizeof(__u64)) +
971 /* IFLA_VF_STATS_MULTICAST */
972 nla_total_size_64bit(sizeof(__u64)) +
973 /* IFLA_VF_STATS_RX_DROPPED */
974 nla_total_size_64bit(sizeof(__u64)) +
975 /* IFLA_VF_STATS_TX_DROPPED */
976 nla_total_size_64bit(sizeof(__u64)) +
977 nla_total_size(sizeof(struct ifla_vf_trust)));
978 return size;
979 } else
980 return 0;
981 }
982
rtnl_port_size(const struct net_device * dev,u32 ext_filter_mask)983 static size_t rtnl_port_size(const struct net_device *dev,
984 u32 ext_filter_mask)
985 {
986 size_t port_size = nla_total_size(4) /* PORT_VF */
987 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
988 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
989 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
990 + nla_total_size(1) /* PROT_VDP_REQUEST */
991 + nla_total_size(2); /* PORT_VDP_RESPONSE */
992 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
993 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
994 + port_size;
995 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
996 + port_size;
997
998 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
999 !(ext_filter_mask & RTEXT_FILTER_VF))
1000 return 0;
1001 if (dev_num_vf(dev->dev.parent))
1002 return port_self_size + vf_ports_size +
1003 vf_port_size * dev_num_vf(dev->dev.parent);
1004 else
1005 return port_self_size;
1006 }
1007
rtnl_xdp_size(void)1008 static size_t rtnl_xdp_size(void)
1009 {
1010 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
1011 nla_total_size(1) + /* XDP_ATTACHED */
1012 nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */
1013 nla_total_size(4); /* XDP_<mode>_PROG_ID */
1014
1015 return xdp_size;
1016 }
1017
rtnl_prop_list_size(const struct net_device * dev)1018 static size_t rtnl_prop_list_size(const struct net_device *dev)
1019 {
1020 struct netdev_name_node *name_node;
1021 size_t size;
1022
1023 if (list_empty(&dev->name_node->list))
1024 return 0;
1025 size = nla_total_size(0);
1026 list_for_each_entry(name_node, &dev->name_node->list, list)
1027 size += nla_total_size(ALTIFNAMSIZ);
1028 return size;
1029 }
1030
rtnl_proto_down_size(const struct net_device * dev)1031 static size_t rtnl_proto_down_size(const struct net_device *dev)
1032 {
1033 size_t size = nla_total_size(1);
1034
1035 if (dev->proto_down_reason)
1036 size += nla_total_size(0) + nla_total_size(4);
1037
1038 return size;
1039 }
1040
if_nlmsg_size(const struct net_device * dev,u32 ext_filter_mask)1041 static noinline size_t if_nlmsg_size(const struct net_device *dev,
1042 u32 ext_filter_mask)
1043 {
1044 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1045 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
1046 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
1047 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
1048 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
1049 + nla_total_size(sizeof(struct rtnl_link_stats))
1050 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
1051 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
1052 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
1053 + nla_total_size(4) /* IFLA_TXQLEN */
1054 + nla_total_size(4) /* IFLA_WEIGHT */
1055 + nla_total_size(4) /* IFLA_MTU */
1056 + nla_total_size(4) /* IFLA_LINK */
1057 + nla_total_size(4) /* IFLA_MASTER */
1058 + nla_total_size(1) /* IFLA_CARRIER */
1059 + nla_total_size(4) /* IFLA_PROMISCUITY */
1060 + nla_total_size(4) /* IFLA_ALLMULTI */
1061 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
1062 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
1063 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
1064 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
1065 + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
1066 + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
1067 + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
1068 + nla_total_size(1) /* IFLA_OPERSTATE */
1069 + nla_total_size(1) /* IFLA_LINKMODE */
1070 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
1071 + nla_total_size(4) /* IFLA_LINK_NETNSID */
1072 + nla_total_size(4) /* IFLA_GROUP */
1073 + nla_total_size(ext_filter_mask
1074 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
1075 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
1076 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
1077 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
1078 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
1079 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
1080 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
1081 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
1082 + rtnl_xdp_size() /* IFLA_XDP */
1083 + nla_total_size(4) /* IFLA_EVENT */
1084 + nla_total_size(4) /* IFLA_NEW_NETNSID */
1085 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
1086 + rtnl_proto_down_size(dev) /* proto down */
1087 + nla_total_size(4) /* IFLA_TARGET_NETNSID */
1088 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
1089 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
1090 + nla_total_size(4) /* IFLA_MIN_MTU */
1091 + nla_total_size(4) /* IFLA_MAX_MTU */
1092 + rtnl_prop_list_size(dev)
1093 + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
1094 + 0;
1095 }
1096
rtnl_vf_ports_fill(struct sk_buff * skb,struct net_device * dev)1097 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1098 {
1099 struct nlattr *vf_ports;
1100 struct nlattr *vf_port;
1101 int vf;
1102 int err;
1103
1104 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
1105 if (!vf_ports)
1106 return -EMSGSIZE;
1107
1108 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1109 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
1110 if (!vf_port)
1111 goto nla_put_failure;
1112 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1113 goto nla_put_failure;
1114 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1115 if (err == -EMSGSIZE)
1116 goto nla_put_failure;
1117 if (err) {
1118 nla_nest_cancel(skb, vf_port);
1119 continue;
1120 }
1121 nla_nest_end(skb, vf_port);
1122 }
1123
1124 nla_nest_end(skb, vf_ports);
1125
1126 return 0;
1127
1128 nla_put_failure:
1129 nla_nest_cancel(skb, vf_ports);
1130 return -EMSGSIZE;
1131 }
1132
rtnl_port_self_fill(struct sk_buff * skb,struct net_device * dev)1133 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1134 {
1135 struct nlattr *port_self;
1136 int err;
1137
1138 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
1139 if (!port_self)
1140 return -EMSGSIZE;
1141
1142 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1143 if (err) {
1144 nla_nest_cancel(skb, port_self);
1145 return (err == -EMSGSIZE) ? err : 0;
1146 }
1147
1148 nla_nest_end(skb, port_self);
1149
1150 return 0;
1151 }
1152
rtnl_port_fill(struct sk_buff * skb,struct net_device * dev,u32 ext_filter_mask)1153 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1154 u32 ext_filter_mask)
1155 {
1156 int err;
1157
1158 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1159 !(ext_filter_mask & RTEXT_FILTER_VF))
1160 return 0;
1161
1162 err = rtnl_port_self_fill(skb, dev);
1163 if (err)
1164 return err;
1165
1166 if (dev_num_vf(dev->dev.parent)) {
1167 err = rtnl_vf_ports_fill(skb, dev);
1168 if (err)
1169 return err;
1170 }
1171
1172 return 0;
1173 }
1174
rtnl_phys_port_id_fill(struct sk_buff * skb,struct net_device * dev)1175 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1176 {
1177 int err;
1178 struct netdev_phys_item_id ppid;
1179
1180 err = dev_get_phys_port_id(dev, &ppid);
1181 if (err) {
1182 if (err == -EOPNOTSUPP)
1183 return 0;
1184 return err;
1185 }
1186
1187 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1188 return -EMSGSIZE;
1189
1190 return 0;
1191 }
1192
rtnl_phys_port_name_fill(struct sk_buff * skb,struct net_device * dev)1193 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1194 {
1195 char name[IFNAMSIZ];
1196 int err;
1197
1198 err = dev_get_phys_port_name(dev, name, sizeof(name));
1199 if (err) {
1200 if (err == -EOPNOTSUPP)
1201 return 0;
1202 return err;
1203 }
1204
1205 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1206 return -EMSGSIZE;
1207
1208 return 0;
1209 }
1210
rtnl_phys_switch_id_fill(struct sk_buff * skb,struct net_device * dev)1211 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1212 {
1213 struct netdev_phys_item_id ppid = { };
1214 int err;
1215
1216 err = dev_get_port_parent_id(dev, &ppid, false);
1217 if (err) {
1218 if (err == -EOPNOTSUPP)
1219 return 0;
1220 return err;
1221 }
1222
1223 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
1224 return -EMSGSIZE;
1225
1226 return 0;
1227 }
1228
rtnl_fill_stats(struct sk_buff * skb,struct net_device * dev)1229 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1230 struct net_device *dev)
1231 {
1232 struct rtnl_link_stats64 *sp;
1233 struct nlattr *attr;
1234
1235 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1236 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1237 if (!attr)
1238 return -EMSGSIZE;
1239
1240 sp = nla_data(attr);
1241 dev_get_stats(dev, sp);
1242
1243 attr = nla_reserve(skb, IFLA_STATS,
1244 sizeof(struct rtnl_link_stats));
1245 if (!attr)
1246 return -EMSGSIZE;
1247
1248 copy_rtnl_link_stats(nla_data(attr), sp);
1249
1250 return 0;
1251 }
1252
rtnl_fill_vfinfo(struct sk_buff * skb,struct net_device * dev,int vfs_num,struct nlattr * vfinfo)1253 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1254 struct net_device *dev,
1255 int vfs_num,
1256 struct nlattr *vfinfo)
1257 {
1258 struct ifla_vf_rss_query_en vf_rss_query_en;
1259 struct nlattr *vf, *vfstats, *vfvlanlist;
1260 struct ifla_vf_link_state vf_linkstate;
1261 struct ifla_vf_vlan_info vf_vlan_info;
1262 struct ifla_vf_spoofchk vf_spoofchk;
1263 struct ifla_vf_tx_rate vf_tx_rate;
1264 struct ifla_vf_stats vf_stats;
1265 struct ifla_vf_trust vf_trust;
1266 struct ifla_vf_vlan vf_vlan;
1267 struct ifla_vf_rate vf_rate;
1268 struct ifla_vf_mac vf_mac;
1269 struct ifla_vf_broadcast vf_broadcast;
1270 struct ifla_vf_info ivi;
1271 struct ifla_vf_guid node_guid;
1272 struct ifla_vf_guid port_guid;
1273
1274 memset(&ivi, 0, sizeof(ivi));
1275
1276 /* Not all SR-IOV capable drivers support the
1277 * spoofcheck and "RSS query enable" query. Preset to
1278 * -1 so the user space tool can detect that the driver
1279 * didn't report anything.
1280 */
1281 ivi.spoofchk = -1;
1282 ivi.rss_query_en = -1;
1283 ivi.trusted = -1;
1284 /* The default value for VF link state is "auto"
1285 * IFLA_VF_LINK_STATE_AUTO which equals zero
1286 */
1287 ivi.linkstate = 0;
1288 /* VLAN Protocol by default is 802.1Q */
1289 ivi.vlan_proto = htons(ETH_P_8021Q);
1290 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1291 return 0;
1292
1293 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1294 memset(&node_guid, 0, sizeof(node_guid));
1295 memset(&port_guid, 0, sizeof(port_guid));
1296
1297 vf_mac.vf =
1298 vf_vlan.vf =
1299 vf_vlan_info.vf =
1300 vf_rate.vf =
1301 vf_tx_rate.vf =
1302 vf_spoofchk.vf =
1303 vf_linkstate.vf =
1304 vf_rss_query_en.vf =
1305 vf_trust.vf =
1306 node_guid.vf =
1307 port_guid.vf = ivi.vf;
1308
1309 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1310 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1311 vf_vlan.vlan = ivi.vlan;
1312 vf_vlan.qos = ivi.qos;
1313 vf_vlan_info.vlan = ivi.vlan;
1314 vf_vlan_info.qos = ivi.qos;
1315 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1316 vf_tx_rate.rate = ivi.max_tx_rate;
1317 vf_rate.min_tx_rate = ivi.min_tx_rate;
1318 vf_rate.max_tx_rate = ivi.max_tx_rate;
1319 vf_spoofchk.setting = ivi.spoofchk;
1320 vf_linkstate.link_state = ivi.linkstate;
1321 vf_rss_query_en.setting = ivi.rss_query_en;
1322 vf_trust.setting = ivi.trusted;
1323 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
1324 if (!vf)
1325 goto nla_put_vfinfo_failure;
1326 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1327 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
1328 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1329 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1330 &vf_rate) ||
1331 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1332 &vf_tx_rate) ||
1333 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1334 &vf_spoofchk) ||
1335 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1336 &vf_linkstate) ||
1337 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1338 sizeof(vf_rss_query_en),
1339 &vf_rss_query_en) ||
1340 nla_put(skb, IFLA_VF_TRUST,
1341 sizeof(vf_trust), &vf_trust))
1342 goto nla_put_vf_failure;
1343
1344 if (dev->netdev_ops->ndo_get_vf_guid &&
1345 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1346 &port_guid)) {
1347 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1348 &node_guid) ||
1349 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1350 &port_guid))
1351 goto nla_put_vf_failure;
1352 }
1353 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
1354 if (!vfvlanlist)
1355 goto nla_put_vf_failure;
1356 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1357 &vf_vlan_info)) {
1358 nla_nest_cancel(skb, vfvlanlist);
1359 goto nla_put_vf_failure;
1360 }
1361 nla_nest_end(skb, vfvlanlist);
1362 memset(&vf_stats, 0, sizeof(vf_stats));
1363 if (dev->netdev_ops->ndo_get_vf_stats)
1364 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1365 &vf_stats);
1366 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1367 if (!vfstats)
1368 goto nla_put_vf_failure;
1369 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1370 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1371 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1372 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1373 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1374 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1375 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1376 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1377 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1378 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1379 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1380 vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1381 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1382 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1383 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1384 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1385 nla_nest_cancel(skb, vfstats);
1386 goto nla_put_vf_failure;
1387 }
1388 nla_nest_end(skb, vfstats);
1389 nla_nest_end(skb, vf);
1390 return 0;
1391
1392 nla_put_vf_failure:
1393 nla_nest_cancel(skb, vf);
1394 nla_put_vfinfo_failure:
1395 nla_nest_cancel(skb, vfinfo);
1396 return -EMSGSIZE;
1397 }
1398
rtnl_fill_vf(struct sk_buff * skb,struct net_device * dev,u32 ext_filter_mask)1399 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1400 struct net_device *dev,
1401 u32 ext_filter_mask)
1402 {
1403 struct nlattr *vfinfo;
1404 int i, num_vfs;
1405
1406 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1407 return 0;
1408
1409 num_vfs = dev_num_vf(dev->dev.parent);
1410 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1411 return -EMSGSIZE;
1412
1413 if (!dev->netdev_ops->ndo_get_vf_config)
1414 return 0;
1415
1416 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
1417 if (!vfinfo)
1418 return -EMSGSIZE;
1419
1420 for (i = 0; i < num_vfs; i++) {
1421 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1422 return -EMSGSIZE;
1423 }
1424
1425 nla_nest_end(skb, vfinfo);
1426 return 0;
1427 }
1428
rtnl_fill_link_ifmap(struct sk_buff * skb,struct net_device * dev)1429 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1430 {
1431 struct rtnl_link_ifmap map;
1432
1433 memset(&map, 0, sizeof(map));
1434 map.mem_start = dev->mem_start;
1435 map.mem_end = dev->mem_end;
1436 map.base_addr = dev->base_addr;
1437 map.irq = dev->irq;
1438 map.dma = dev->dma;
1439 map.port = dev->if_port;
1440
1441 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1442 return -EMSGSIZE;
1443
1444 return 0;
1445 }
1446
rtnl_xdp_prog_skb(struct net_device * dev)1447 static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1448 {
1449 const struct bpf_prog *generic_xdp_prog;
1450
1451 ASSERT_RTNL();
1452
1453 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1454 if (!generic_xdp_prog)
1455 return 0;
1456 return generic_xdp_prog->aux->id;
1457 }
1458
rtnl_xdp_prog_drv(struct net_device * dev)1459 static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1460 {
1461 return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1462 }
1463
rtnl_xdp_prog_hw(struct net_device * dev)1464 static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1465 {
1466 return dev_xdp_prog_id(dev, XDP_MODE_HW);
1467 }
1468
rtnl_xdp_report_one(struct sk_buff * skb,struct net_device * dev,u32 * prog_id,u8 * mode,u8 tgt_mode,u32 attr,u32 (* get_prog_id)(struct net_device * dev))1469 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1470 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1471 u32 (*get_prog_id)(struct net_device *dev))
1472 {
1473 u32 curr_id;
1474 int err;
1475
1476 curr_id = get_prog_id(dev);
1477 if (!curr_id)
1478 return 0;
1479
1480 *prog_id = curr_id;
1481 err = nla_put_u32(skb, attr, curr_id);
1482 if (err)
1483 return err;
1484
1485 if (*mode != XDP_ATTACHED_NONE)
1486 *mode = XDP_ATTACHED_MULTI;
1487 else
1488 *mode = tgt_mode;
1489
1490 return 0;
1491 }
1492
rtnl_xdp_fill(struct sk_buff * skb,struct net_device * dev)1493 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1494 {
1495 struct nlattr *xdp;
1496 u32 prog_id;
1497 int err;
1498 u8 mode;
1499
1500 xdp = nla_nest_start_noflag(skb, IFLA_XDP);
1501 if (!xdp)
1502 return -EMSGSIZE;
1503
1504 prog_id = 0;
1505 mode = XDP_ATTACHED_NONE;
1506 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1507 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1508 if (err)
1509 goto err_cancel;
1510 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1511 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1512 if (err)
1513 goto err_cancel;
1514 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1515 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1516 if (err)
1517 goto err_cancel;
1518
1519 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1520 if (err)
1521 goto err_cancel;
1522
1523 if (prog_id && mode != XDP_ATTACHED_MULTI) {
1524 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1525 if (err)
1526 goto err_cancel;
1527 }
1528
1529 nla_nest_end(skb, xdp);
1530 return 0;
1531
1532 err_cancel:
1533 nla_nest_cancel(skb, xdp);
1534 return err;
1535 }
1536
rtnl_get_event(unsigned long event)1537 static u32 rtnl_get_event(unsigned long event)
1538 {
1539 u32 rtnl_event_type = IFLA_EVENT_NONE;
1540
1541 switch (event) {
1542 case NETDEV_REBOOT:
1543 rtnl_event_type = IFLA_EVENT_REBOOT;
1544 break;
1545 case NETDEV_FEAT_CHANGE:
1546 rtnl_event_type = IFLA_EVENT_FEATURES;
1547 break;
1548 case NETDEV_BONDING_FAILOVER:
1549 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1550 break;
1551 case NETDEV_NOTIFY_PEERS:
1552 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1553 break;
1554 case NETDEV_RESEND_IGMP:
1555 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1556 break;
1557 case NETDEV_CHANGEINFODATA:
1558 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1559 break;
1560 default:
1561 break;
1562 }
1563
1564 return rtnl_event_type;
1565 }
1566
put_master_ifindex(struct sk_buff * skb,struct net_device * dev)1567 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1568 {
1569 const struct net_device *upper_dev;
1570 int ret = 0;
1571
1572 rcu_read_lock();
1573
1574 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1575 if (upper_dev)
1576 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1577
1578 rcu_read_unlock();
1579 return ret;
1580 }
1581
nla_put_iflink(struct sk_buff * skb,const struct net_device * dev,bool force)1582 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1583 bool force)
1584 {
1585 int ifindex = dev_get_iflink(dev);
1586
1587 if (force || dev->ifindex != ifindex)
1588 return nla_put_u32(skb, IFLA_LINK, ifindex);
1589
1590 return 0;
1591 }
1592
nla_put_ifalias(struct sk_buff * skb,struct net_device * dev)1593 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1594 struct net_device *dev)
1595 {
1596 char buf[IFALIASZ];
1597 int ret;
1598
1599 ret = dev_get_alias(dev, buf, sizeof(buf));
1600 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1601 }
1602
rtnl_fill_link_netnsid(struct sk_buff * skb,const struct net_device * dev,struct net * src_net,gfp_t gfp)1603 static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1604 const struct net_device *dev,
1605 struct net *src_net, gfp_t gfp)
1606 {
1607 bool put_iflink = false;
1608
1609 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1610 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1611
1612 if (!net_eq(dev_net(dev), link_net)) {
1613 int id = peernet2id_alloc(src_net, link_net, gfp);
1614
1615 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1616 return -EMSGSIZE;
1617
1618 put_iflink = true;
1619 }
1620 }
1621
1622 return nla_put_iflink(skb, dev, put_iflink);
1623 }
1624
rtnl_fill_link_af(struct sk_buff * skb,const struct net_device * dev,u32 ext_filter_mask)1625 static int rtnl_fill_link_af(struct sk_buff *skb,
1626 const struct net_device *dev,
1627 u32 ext_filter_mask)
1628 {
1629 const struct rtnl_af_ops *af_ops;
1630 struct nlattr *af_spec;
1631
1632 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
1633 if (!af_spec)
1634 return -EMSGSIZE;
1635
1636 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1637 struct nlattr *af;
1638 int err;
1639
1640 if (!af_ops->fill_link_af)
1641 continue;
1642
1643 af = nla_nest_start_noflag(skb, af_ops->family);
1644 if (!af)
1645 return -EMSGSIZE;
1646
1647 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1648 /*
1649 * Caller may return ENODATA to indicate that there
1650 * was no data to be dumped. This is not an error, it
1651 * means we should trim the attribute header and
1652 * continue.
1653 */
1654 if (err == -ENODATA)
1655 nla_nest_cancel(skb, af);
1656 else if (err < 0)
1657 return -EMSGSIZE;
1658
1659 nla_nest_end(skb, af);
1660 }
1661
1662 nla_nest_end(skb, af_spec);
1663 return 0;
1664 }
1665
rtnl_fill_alt_ifnames(struct sk_buff * skb,const struct net_device * dev)1666 static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1667 const struct net_device *dev)
1668 {
1669 struct netdev_name_node *name_node;
1670 int count = 0;
1671
1672 list_for_each_entry(name_node, &dev->name_node->list, list) {
1673 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1674 return -EMSGSIZE;
1675 count++;
1676 }
1677 return count;
1678 }
1679
rtnl_fill_prop_list(struct sk_buff * skb,const struct net_device * dev)1680 static int rtnl_fill_prop_list(struct sk_buff *skb,
1681 const struct net_device *dev)
1682 {
1683 struct nlattr *prop_list;
1684 int ret;
1685
1686 prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1687 if (!prop_list)
1688 return -EMSGSIZE;
1689
1690 ret = rtnl_fill_alt_ifnames(skb, dev);
1691 if (ret <= 0)
1692 goto nest_cancel;
1693
1694 nla_nest_end(skb, prop_list);
1695 return 0;
1696
1697 nest_cancel:
1698 nla_nest_cancel(skb, prop_list);
1699 return ret;
1700 }
1701
rtnl_fill_proto_down(struct sk_buff * skb,const struct net_device * dev)1702 static int rtnl_fill_proto_down(struct sk_buff *skb,
1703 const struct net_device *dev)
1704 {
1705 struct nlattr *pr;
1706 u32 preason;
1707
1708 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1709 goto nla_put_failure;
1710
1711 preason = dev->proto_down_reason;
1712 if (!preason)
1713 return 0;
1714
1715 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1716 if (!pr)
1717 return -EMSGSIZE;
1718
1719 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1720 nla_nest_cancel(skb, pr);
1721 goto nla_put_failure;
1722 }
1723
1724 nla_nest_end(skb, pr);
1725 return 0;
1726
1727 nla_put_failure:
1728 return -EMSGSIZE;
1729 }
1730
rtnl_fill_ifinfo(struct sk_buff * skb,struct net_device * dev,struct net * src_net,int type,u32 pid,u32 seq,u32 change,unsigned int flags,u32 ext_filter_mask,u32 event,int * new_nsid,int new_ifindex,int tgt_netnsid,gfp_t gfp)1731 static int rtnl_fill_ifinfo(struct sk_buff *skb,
1732 struct net_device *dev, struct net *src_net,
1733 int type, u32 pid, u32 seq, u32 change,
1734 unsigned int flags, u32 ext_filter_mask,
1735 u32 event, int *new_nsid, int new_ifindex,
1736 int tgt_netnsid, gfp_t gfp)
1737 {
1738 struct ifinfomsg *ifm;
1739 struct nlmsghdr *nlh;
1740 struct Qdisc *qdisc;
1741
1742 ASSERT_RTNL();
1743 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1744 if (nlh == NULL)
1745 return -EMSGSIZE;
1746
1747 ifm = nlmsg_data(nlh);
1748 ifm->ifi_family = AF_UNSPEC;
1749 ifm->__ifi_pad = 0;
1750 ifm->ifi_type = dev->type;
1751 ifm->ifi_index = dev->ifindex;
1752 ifm->ifi_flags = dev_get_flags(dev);
1753 ifm->ifi_change = change;
1754
1755 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
1756 goto nla_put_failure;
1757
1758 qdisc = rtnl_dereference(dev->qdisc);
1759 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1760 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1761 nla_put_u8(skb, IFLA_OPERSTATE,
1762 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1763 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1764 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1765 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1766 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1767 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1768 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1769 nla_put_u32(skb, IFLA_ALLMULTI, dev->allmulti) ||
1770 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1771 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1772 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1773 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) ||
1774 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) ||
1775 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) ||
1776 #ifdef CONFIG_RPS
1777 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1778 #endif
1779 put_master_ifindex(skb, dev) ||
1780 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1781 (qdisc &&
1782 nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
1783 nla_put_ifalias(skb, dev) ||
1784 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1785 atomic_read(&dev->carrier_up_count) +
1786 atomic_read(&dev->carrier_down_count)) ||
1787 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1788 atomic_read(&dev->carrier_up_count)) ||
1789 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1790 atomic_read(&dev->carrier_down_count)))
1791 goto nla_put_failure;
1792
1793 if (rtnl_fill_proto_down(skb, dev))
1794 goto nla_put_failure;
1795
1796 if (event != IFLA_EVENT_NONE) {
1797 if (nla_put_u32(skb, IFLA_EVENT, event))
1798 goto nla_put_failure;
1799 }
1800
1801 if (rtnl_fill_link_ifmap(skb, dev))
1802 goto nla_put_failure;
1803
1804 if (dev->addr_len) {
1805 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1806 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1807 goto nla_put_failure;
1808 }
1809
1810 if (rtnl_phys_port_id_fill(skb, dev))
1811 goto nla_put_failure;
1812
1813 if (rtnl_phys_port_name_fill(skb, dev))
1814 goto nla_put_failure;
1815
1816 if (rtnl_phys_switch_id_fill(skb, dev))
1817 goto nla_put_failure;
1818
1819 if (rtnl_fill_stats(skb, dev))
1820 goto nla_put_failure;
1821
1822 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1823 goto nla_put_failure;
1824
1825 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1826 goto nla_put_failure;
1827
1828 if (rtnl_xdp_fill(skb, dev))
1829 goto nla_put_failure;
1830
1831 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1832 if (rtnl_link_fill(skb, dev) < 0)
1833 goto nla_put_failure;
1834 }
1835
1836 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1837 goto nla_put_failure;
1838
1839 if (new_nsid &&
1840 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1841 goto nla_put_failure;
1842 if (new_ifindex &&
1843 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1844 goto nla_put_failure;
1845
1846 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1847 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1848 goto nla_put_failure;
1849
1850 rcu_read_lock();
1851 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1852 goto nla_put_failure_rcu;
1853 rcu_read_unlock();
1854
1855 if (rtnl_fill_prop_list(skb, dev))
1856 goto nla_put_failure;
1857
1858 if (dev->dev.parent &&
1859 nla_put_string(skb, IFLA_PARENT_DEV_NAME,
1860 dev_name(dev->dev.parent)))
1861 goto nla_put_failure;
1862
1863 if (dev->dev.parent && dev->dev.parent->bus &&
1864 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
1865 dev->dev.parent->bus->name))
1866 goto nla_put_failure;
1867
1868 nlmsg_end(skb, nlh);
1869 return 0;
1870
1871 nla_put_failure_rcu:
1872 rcu_read_unlock();
1873 nla_put_failure:
1874 nlmsg_cancel(skb, nlh);
1875 return -EMSGSIZE;
1876 }
1877
1878 static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1879 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1880 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1881 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1882 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1883 [IFLA_MTU] = { .type = NLA_U32 },
1884 [IFLA_LINK] = { .type = NLA_U32 },
1885 [IFLA_MASTER] = { .type = NLA_U32 },
1886 [IFLA_CARRIER] = { .type = NLA_U8 },
1887 [IFLA_TXQLEN] = { .type = NLA_U32 },
1888 [IFLA_WEIGHT] = { .type = NLA_U32 },
1889 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1890 [IFLA_LINKMODE] = { .type = NLA_U8 },
1891 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1892 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1893 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1894 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1895 * allow 0-length string (needed to remove an alias).
1896 */
1897 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1898 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1899 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1900 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1901 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1902 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1903 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1904 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1905 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1906 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
1907 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
1908 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1909 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1910 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1911 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1912 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1913 [IFLA_XDP] = { .type = NLA_NESTED },
1914 [IFLA_EVENT] = { .type = NLA_U32 },
1915 [IFLA_GROUP] = { .type = NLA_U32 },
1916 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
1917 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
1918 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
1919 [IFLA_MIN_MTU] = { .type = NLA_U32 },
1920 [IFLA_MAX_MTU] = { .type = NLA_U32 },
1921 [IFLA_PROP_LIST] = { .type = NLA_NESTED },
1922 [IFLA_ALT_IFNAME] = { .type = NLA_STRING,
1923 .len = ALTIFNAMSIZ - 1 },
1924 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT },
1925 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
1926 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
1927 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING },
1928 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 },
1929 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT },
1930 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT },
1931 [IFLA_ALLMULTI] = { .type = NLA_REJECT },
1932 };
1933
1934 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1935 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1936 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1937 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1938 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1939 };
1940
1941 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1942 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1943 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT },
1944 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
1945 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
1946 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
1947 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
1948 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
1949 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
1950 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
1951 [IFLA_VF_STATS] = { .type = NLA_NESTED },
1952 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
1953 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1954 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1955 };
1956
1957 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1958 [IFLA_PORT_VF] = { .type = NLA_U32 },
1959 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
1960 .len = PORT_PROFILE_MAX },
1961 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1962 .len = PORT_UUID_MAX },
1963 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
1964 .len = PORT_UUID_MAX },
1965 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
1966 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
1967
1968 /* Unused, but we need to keep it here since user space could
1969 * fill it. It's also broken with regard to NLA_BINARY use in
1970 * combination with structs.
1971 */
1972 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
1973 .len = sizeof(struct ifla_port_vsi) },
1974 };
1975
1976 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
1977 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD },
1978 [IFLA_XDP_FD] = { .type = NLA_S32 },
1979 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 },
1980 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
1981 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
1982 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
1983 };
1984
linkinfo_to_kind_ops(const struct nlattr * nla)1985 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
1986 {
1987 const struct rtnl_link_ops *ops = NULL;
1988 struct nlattr *linfo[IFLA_INFO_MAX + 1];
1989
1990 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
1991 return NULL;
1992
1993 if (linfo[IFLA_INFO_KIND]) {
1994 char kind[MODULE_NAME_LEN];
1995
1996 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
1997 ops = rtnl_link_ops_get(kind);
1998 }
1999
2000 return ops;
2001 }
2002
link_master_filtered(struct net_device * dev,int master_idx)2003 static bool link_master_filtered(struct net_device *dev, int master_idx)
2004 {
2005 struct net_device *master;
2006
2007 if (!master_idx)
2008 return false;
2009
2010 master = netdev_master_upper_dev_get(dev);
2011
2012 /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need
2013 * another invalid value for ifindex to denote "no master".
2014 */
2015 if (master_idx == -1)
2016 return !!master;
2017
2018 if (!master || master->ifindex != master_idx)
2019 return true;
2020
2021 return false;
2022 }
2023
link_kind_filtered(const struct net_device * dev,const struct rtnl_link_ops * kind_ops)2024 static bool link_kind_filtered(const struct net_device *dev,
2025 const struct rtnl_link_ops *kind_ops)
2026 {
2027 if (kind_ops && dev->rtnl_link_ops != kind_ops)
2028 return true;
2029
2030 return false;
2031 }
2032
link_dump_filtered(struct net_device * dev,int master_idx,const struct rtnl_link_ops * kind_ops)2033 static bool link_dump_filtered(struct net_device *dev,
2034 int master_idx,
2035 const struct rtnl_link_ops *kind_ops)
2036 {
2037 if (link_master_filtered(dev, master_idx) ||
2038 link_kind_filtered(dev, kind_ops))
2039 return true;
2040
2041 return false;
2042 }
2043
2044 /**
2045 * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
2046 * @sk: netlink socket
2047 * @netnsid: network namespace identifier
2048 *
2049 * Returns the network namespace identified by netnsid on success or an error
2050 * pointer on failure.
2051 */
rtnl_get_net_ns_capable(struct sock * sk,int netnsid)2052 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
2053 {
2054 struct net *net;
2055
2056 net = get_net_ns_by_id(sock_net(sk), netnsid);
2057 if (!net)
2058 return ERR_PTR(-EINVAL);
2059
2060 /* For now, the caller is required to have CAP_NET_ADMIN in
2061 * the user namespace owning the target net ns.
2062 */
2063 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
2064 put_net(net);
2065 return ERR_PTR(-EACCES);
2066 }
2067 return net;
2068 }
2069 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
2070
rtnl_valid_dump_ifinfo_req(const struct nlmsghdr * nlh,bool strict_check,struct nlattr ** tb,struct netlink_ext_ack * extack)2071 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2072 bool strict_check, struct nlattr **tb,
2073 struct netlink_ext_ack *extack)
2074 {
2075 int hdrlen;
2076
2077 if (strict_check) {
2078 struct ifinfomsg *ifm;
2079
2080 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2081 NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2082 return -EINVAL;
2083 }
2084
2085 ifm = nlmsg_data(nlh);
2086 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2087 ifm->ifi_change) {
2088 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2089 return -EINVAL;
2090 }
2091 if (ifm->ifi_index) {
2092 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2093 return -EINVAL;
2094 }
2095
2096 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2097 IFLA_MAX, ifla_policy,
2098 extack);
2099 }
2100
2101 /* A hack to preserve kernel<->userspace interface.
2102 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
2103 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
2104 * what iproute2 < v3.9.0 used.
2105 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
2106 * attribute, its netlink message is shorter than struct ifinfomsg.
2107 */
2108 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2109 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2110
2111 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2112 extack);
2113 }
2114
rtnl_dump_ifinfo(struct sk_buff * skb,struct netlink_callback * cb)2115 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2116 {
2117 struct netlink_ext_ack *extack = cb->extack;
2118 const struct nlmsghdr *nlh = cb->nlh;
2119 struct net *net = sock_net(skb->sk);
2120 struct net *tgt_net = net;
2121 int h, s_h;
2122 int idx = 0, s_idx;
2123 struct net_device *dev;
2124 struct hlist_head *head;
2125 struct nlattr *tb[IFLA_MAX+1];
2126 u32 ext_filter_mask = 0;
2127 const struct rtnl_link_ops *kind_ops = NULL;
2128 unsigned int flags = NLM_F_MULTI;
2129 int master_idx = 0;
2130 int netnsid = -1;
2131 int err, i;
2132
2133 s_h = cb->args[0];
2134 s_idx = cb->args[1];
2135
2136 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2137 if (err < 0) {
2138 if (cb->strict_check)
2139 return err;
2140
2141 goto walk_entries;
2142 }
2143
2144 for (i = 0; i <= IFLA_MAX; ++i) {
2145 if (!tb[i])
2146 continue;
2147
2148 /* new attributes should only be added with strict checking */
2149 switch (i) {
2150 case IFLA_TARGET_NETNSID:
2151 netnsid = nla_get_s32(tb[i]);
2152 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
2153 if (IS_ERR(tgt_net)) {
2154 NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
2155 return PTR_ERR(tgt_net);
2156 }
2157 break;
2158 case IFLA_EXT_MASK:
2159 ext_filter_mask = nla_get_u32(tb[i]);
2160 break;
2161 case IFLA_MASTER:
2162 master_idx = nla_get_u32(tb[i]);
2163 break;
2164 case IFLA_LINKINFO:
2165 kind_ops = linkinfo_to_kind_ops(tb[i]);
2166 break;
2167 default:
2168 if (cb->strict_check) {
2169 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2170 return -EINVAL;
2171 }
2172 }
2173 }
2174
2175 if (master_idx || kind_ops)
2176 flags |= NLM_F_DUMP_FILTERED;
2177
2178 walk_entries:
2179 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
2180 idx = 0;
2181 head = &tgt_net->dev_index_head[h];
2182 hlist_for_each_entry(dev, head, index_hlist) {
2183 if (link_dump_filtered(dev, master_idx, kind_ops))
2184 goto cont;
2185 if (idx < s_idx)
2186 goto cont;
2187 err = rtnl_fill_ifinfo(skb, dev, net,
2188 RTM_NEWLINK,
2189 NETLINK_CB(cb->skb).portid,
2190 nlh->nlmsg_seq, 0, flags,
2191 ext_filter_mask, 0, NULL, 0,
2192 netnsid, GFP_KERNEL);
2193
2194 if (err < 0) {
2195 if (likely(skb->len))
2196 goto out;
2197
2198 goto out_err;
2199 }
2200 cont:
2201 idx++;
2202 }
2203 }
2204 out:
2205 err = skb->len;
2206 out_err:
2207 cb->args[1] = idx;
2208 cb->args[0] = h;
2209 cb->seq = tgt_net->dev_base_seq;
2210 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2211 if (netnsid >= 0)
2212 put_net(tgt_net);
2213
2214 return err;
2215 }
2216
rtnl_nla_parse_ifla(struct nlattr ** tb,const struct nlattr * head,int len,struct netlink_ext_ack * exterr)2217 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
2218 struct netlink_ext_ack *exterr)
2219 {
2220 return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
2221 exterr);
2222 }
2223 EXPORT_SYMBOL(rtnl_nla_parse_ifla);
2224
rtnl_link_get_net(struct net * src_net,struct nlattr * tb[])2225 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2226 {
2227 struct net *net;
2228 /* Examine the link attributes and figure out which
2229 * network namespace we are talking about.
2230 */
2231 if (tb[IFLA_NET_NS_PID])
2232 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
2233 else if (tb[IFLA_NET_NS_FD])
2234 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
2235 else
2236 net = get_net(src_net);
2237 return net;
2238 }
2239 EXPORT_SYMBOL(rtnl_link_get_net);
2240
2241 /* Figure out which network namespace we are talking about by
2242 * examining the link attributes in the following order:
2243 *
2244 * 1. IFLA_NET_NS_PID
2245 * 2. IFLA_NET_NS_FD
2246 * 3. IFLA_TARGET_NETNSID
2247 */
rtnl_link_get_net_by_nlattr(struct net * src_net,struct nlattr * tb[])2248 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2249 struct nlattr *tb[])
2250 {
2251 struct net *net;
2252
2253 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2254 return rtnl_link_get_net(src_net, tb);
2255
2256 if (!tb[IFLA_TARGET_NETNSID])
2257 return get_net(src_net);
2258
2259 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
2260 if (!net)
2261 return ERR_PTR(-EINVAL);
2262
2263 return net;
2264 }
2265
rtnl_link_get_net_capable(const struct sk_buff * skb,struct net * src_net,struct nlattr * tb[],int cap)2266 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2267 struct net *src_net,
2268 struct nlattr *tb[], int cap)
2269 {
2270 struct net *net;
2271
2272 net = rtnl_link_get_net_by_nlattr(src_net, tb);
2273 if (IS_ERR(net))
2274 return net;
2275
2276 if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2277 put_net(net);
2278 return ERR_PTR(-EPERM);
2279 }
2280
2281 return net;
2282 }
2283
2284 /* Verify that rtnetlink requests do not pass additional properties
2285 * potentially referring to different network namespaces.
2286 */
rtnl_ensure_unique_netns(struct nlattr * tb[],struct netlink_ext_ack * extack,bool netns_id_only)2287 static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2288 struct netlink_ext_ack *extack,
2289 bool netns_id_only)
2290 {
2291
2292 if (netns_id_only) {
2293 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2294 return 0;
2295
2296 NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2297 return -EOPNOTSUPP;
2298 }
2299
2300 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2301 goto invalid_attr;
2302
2303 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
2304 goto invalid_attr;
2305
2306 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
2307 goto invalid_attr;
2308
2309 return 0;
2310
2311 invalid_attr:
2312 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2313 return -EINVAL;
2314 }
2315
rtnl_set_vf_rate(struct net_device * dev,int vf,int min_tx_rate,int max_tx_rate)2316 static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2317 int max_tx_rate)
2318 {
2319 const struct net_device_ops *ops = dev->netdev_ops;
2320
2321 if (!ops->ndo_set_vf_rate)
2322 return -EOPNOTSUPP;
2323 if (max_tx_rate && max_tx_rate < min_tx_rate)
2324 return -EINVAL;
2325
2326 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
2327 }
2328
validate_linkmsg(struct net_device * dev,struct nlattr * tb[],struct netlink_ext_ack * extack)2329 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
2330 struct netlink_ext_ack *extack)
2331 {
2332 if (dev) {
2333 if (tb[IFLA_ADDRESS] &&
2334 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2335 return -EINVAL;
2336
2337 if (tb[IFLA_BROADCAST] &&
2338 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2339 return -EINVAL;
2340 }
2341
2342 if (tb[IFLA_AF_SPEC]) {
2343 struct nlattr *af;
2344 int rem, err;
2345
2346 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2347 const struct rtnl_af_ops *af_ops;
2348
2349 af_ops = rtnl_af_lookup(nla_type(af));
2350 if (!af_ops)
2351 return -EAFNOSUPPORT;
2352
2353 if (!af_ops->set_link_af)
2354 return -EOPNOTSUPP;
2355
2356 if (af_ops->validate_link_af) {
2357 err = af_ops->validate_link_af(dev, af, extack);
2358 if (err < 0)
2359 return err;
2360 }
2361 }
2362 }
2363
2364 return 0;
2365 }
2366
handle_infiniband_guid(struct net_device * dev,struct ifla_vf_guid * ivt,int guid_type)2367 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2368 int guid_type)
2369 {
2370 const struct net_device_ops *ops = dev->netdev_ops;
2371
2372 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2373 }
2374
handle_vf_guid(struct net_device * dev,struct ifla_vf_guid * ivt,int guid_type)2375 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2376 {
2377 if (dev->type != ARPHRD_INFINIBAND)
2378 return -EOPNOTSUPP;
2379
2380 return handle_infiniband_guid(dev, ivt, guid_type);
2381 }
2382
do_setvfinfo(struct net_device * dev,struct nlattr ** tb)2383 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2384 {
2385 const struct net_device_ops *ops = dev->netdev_ops;
2386 int err = -EINVAL;
2387
2388 if (tb[IFLA_VF_MAC]) {
2389 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2390
2391 if (ivm->vf >= INT_MAX)
2392 return -EINVAL;
2393 err = -EOPNOTSUPP;
2394 if (ops->ndo_set_vf_mac)
2395 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2396 ivm->mac);
2397 if (err < 0)
2398 return err;
2399 }
2400
2401 if (tb[IFLA_VF_VLAN]) {
2402 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2403
2404 if (ivv->vf >= INT_MAX)
2405 return -EINVAL;
2406 err = -EOPNOTSUPP;
2407 if (ops->ndo_set_vf_vlan)
2408 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2409 ivv->qos,
2410 htons(ETH_P_8021Q));
2411 if (err < 0)
2412 return err;
2413 }
2414
2415 if (tb[IFLA_VF_VLAN_LIST]) {
2416 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2417 struct nlattr *attr;
2418 int rem, len = 0;
2419
2420 err = -EOPNOTSUPP;
2421 if (!ops->ndo_set_vf_vlan)
2422 return err;
2423
2424 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2425 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2426 nla_len(attr) < NLA_HDRLEN) {
2427 return -EINVAL;
2428 }
2429 if (len >= MAX_VLAN_LIST_LEN)
2430 return -EOPNOTSUPP;
2431 ivvl[len] = nla_data(attr);
2432
2433 len++;
2434 }
2435 if (len == 0)
2436 return -EINVAL;
2437
2438 if (ivvl[0]->vf >= INT_MAX)
2439 return -EINVAL;
2440 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2441 ivvl[0]->qos, ivvl[0]->vlan_proto);
2442 if (err < 0)
2443 return err;
2444 }
2445
2446 if (tb[IFLA_VF_TX_RATE]) {
2447 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2448 struct ifla_vf_info ivf;
2449
2450 if (ivt->vf >= INT_MAX)
2451 return -EINVAL;
2452 err = -EOPNOTSUPP;
2453 if (ops->ndo_get_vf_config)
2454 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2455 if (err < 0)
2456 return err;
2457
2458 err = rtnl_set_vf_rate(dev, ivt->vf,
2459 ivf.min_tx_rate, ivt->rate);
2460 if (err < 0)
2461 return err;
2462 }
2463
2464 if (tb[IFLA_VF_RATE]) {
2465 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2466
2467 if (ivt->vf >= INT_MAX)
2468 return -EINVAL;
2469
2470 err = rtnl_set_vf_rate(dev, ivt->vf,
2471 ivt->min_tx_rate, ivt->max_tx_rate);
2472 if (err < 0)
2473 return err;
2474 }
2475
2476 if (tb[IFLA_VF_SPOOFCHK]) {
2477 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2478
2479 if (ivs->vf >= INT_MAX)
2480 return -EINVAL;
2481 err = -EOPNOTSUPP;
2482 if (ops->ndo_set_vf_spoofchk)
2483 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2484 ivs->setting);
2485 if (err < 0)
2486 return err;
2487 }
2488
2489 if (tb[IFLA_VF_LINK_STATE]) {
2490 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2491
2492 if (ivl->vf >= INT_MAX)
2493 return -EINVAL;
2494 err = -EOPNOTSUPP;
2495 if (ops->ndo_set_vf_link_state)
2496 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2497 ivl->link_state);
2498 if (err < 0)
2499 return err;
2500 }
2501
2502 if (tb[IFLA_VF_RSS_QUERY_EN]) {
2503 struct ifla_vf_rss_query_en *ivrssq_en;
2504
2505 err = -EOPNOTSUPP;
2506 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2507 if (ivrssq_en->vf >= INT_MAX)
2508 return -EINVAL;
2509 if (ops->ndo_set_vf_rss_query_en)
2510 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2511 ivrssq_en->setting);
2512 if (err < 0)
2513 return err;
2514 }
2515
2516 if (tb[IFLA_VF_TRUST]) {
2517 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2518
2519 if (ivt->vf >= INT_MAX)
2520 return -EINVAL;
2521 err = -EOPNOTSUPP;
2522 if (ops->ndo_set_vf_trust)
2523 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2524 if (err < 0)
2525 return err;
2526 }
2527
2528 if (tb[IFLA_VF_IB_NODE_GUID]) {
2529 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2530
2531 if (ivt->vf >= INT_MAX)
2532 return -EINVAL;
2533 if (!ops->ndo_set_vf_guid)
2534 return -EOPNOTSUPP;
2535 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2536 }
2537
2538 if (tb[IFLA_VF_IB_PORT_GUID]) {
2539 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2540
2541 if (ivt->vf >= INT_MAX)
2542 return -EINVAL;
2543 if (!ops->ndo_set_vf_guid)
2544 return -EOPNOTSUPP;
2545
2546 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2547 }
2548
2549 return err;
2550 }
2551
do_set_master(struct net_device * dev,int ifindex,struct netlink_ext_ack * extack)2552 static int do_set_master(struct net_device *dev, int ifindex,
2553 struct netlink_ext_ack *extack)
2554 {
2555 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2556 const struct net_device_ops *ops;
2557 int err;
2558
2559 if (upper_dev) {
2560 if (upper_dev->ifindex == ifindex)
2561 return 0;
2562 ops = upper_dev->netdev_ops;
2563 if (ops->ndo_del_slave) {
2564 err = ops->ndo_del_slave(upper_dev, dev);
2565 if (err)
2566 return err;
2567 } else {
2568 return -EOPNOTSUPP;
2569 }
2570 }
2571
2572 if (ifindex) {
2573 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2574 if (!upper_dev)
2575 return -EINVAL;
2576 ops = upper_dev->netdev_ops;
2577 if (ops->ndo_add_slave) {
2578 err = ops->ndo_add_slave(upper_dev, dev, extack);
2579 if (err)
2580 return err;
2581 } else {
2582 return -EOPNOTSUPP;
2583 }
2584 }
2585 return 0;
2586 }
2587
2588 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2589 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 },
2590 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 },
2591 };
2592
do_set_proto_down(struct net_device * dev,struct nlattr * nl_proto_down,struct nlattr * nl_proto_down_reason,struct netlink_ext_ack * extack)2593 static int do_set_proto_down(struct net_device *dev,
2594 struct nlattr *nl_proto_down,
2595 struct nlattr *nl_proto_down_reason,
2596 struct netlink_ext_ack *extack)
2597 {
2598 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
2599 unsigned long mask = 0;
2600 u32 value;
2601 bool proto_down;
2602 int err;
2603
2604 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
2605 NL_SET_ERR_MSG(extack, "Protodown not supported by device");
2606 return -EOPNOTSUPP;
2607 }
2608
2609 if (nl_proto_down_reason) {
2610 err = nla_parse_nested_deprecated(pdreason,
2611 IFLA_PROTO_DOWN_REASON_MAX,
2612 nl_proto_down_reason,
2613 ifla_proto_down_reason_policy,
2614 NULL);
2615 if (err < 0)
2616 return err;
2617
2618 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2619 NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2620 return -EINVAL;
2621 }
2622
2623 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2624
2625 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2626 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2627
2628 dev_change_proto_down_reason(dev, mask, value);
2629 }
2630
2631 if (nl_proto_down) {
2632 proto_down = nla_get_u8(nl_proto_down);
2633
2634 /* Don't turn off protodown if there are active reasons */
2635 if (!proto_down && dev->proto_down_reason) {
2636 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2637 return -EBUSY;
2638 }
2639 err = dev_change_proto_down(dev,
2640 proto_down);
2641 if (err)
2642 return err;
2643 }
2644
2645 return 0;
2646 }
2647
2648 #define DO_SETLINK_MODIFIED 0x01
2649 /* notify flag means notify + modified. */
2650 #define DO_SETLINK_NOTIFY 0x03
do_setlink(const struct sk_buff * skb,struct net_device * dev,struct ifinfomsg * ifm,struct netlink_ext_ack * extack,struct nlattr ** tb,int status)2651 static int do_setlink(const struct sk_buff *skb,
2652 struct net_device *dev, struct ifinfomsg *ifm,
2653 struct netlink_ext_ack *extack,
2654 struct nlattr **tb, int status)
2655 {
2656 const struct net_device_ops *ops = dev->netdev_ops;
2657 char ifname[IFNAMSIZ];
2658 int err;
2659
2660 err = validate_linkmsg(dev, tb, extack);
2661 if (err < 0)
2662 return err;
2663
2664 if (tb[IFLA_IFNAME])
2665 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2666 else
2667 ifname[0] = '\0';
2668
2669 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
2670 const char *pat = ifname[0] ? ifname : NULL;
2671 struct net *net;
2672 int new_ifindex;
2673
2674 net = rtnl_link_get_net_capable(skb, dev_net(dev),
2675 tb, CAP_NET_ADMIN);
2676 if (IS_ERR(net)) {
2677 err = PTR_ERR(net);
2678 goto errout;
2679 }
2680
2681 if (tb[IFLA_NEW_IFINDEX])
2682 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]);
2683 else
2684 new_ifindex = 0;
2685
2686 err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
2687 put_net(net);
2688 if (err)
2689 goto errout;
2690 status |= DO_SETLINK_MODIFIED;
2691 }
2692
2693 if (tb[IFLA_MAP]) {
2694 struct rtnl_link_ifmap *u_map;
2695 struct ifmap k_map;
2696
2697 if (!ops->ndo_set_config) {
2698 err = -EOPNOTSUPP;
2699 goto errout;
2700 }
2701
2702 if (!netif_device_present(dev)) {
2703 err = -ENODEV;
2704 goto errout;
2705 }
2706
2707 u_map = nla_data(tb[IFLA_MAP]);
2708 k_map.mem_start = (unsigned long) u_map->mem_start;
2709 k_map.mem_end = (unsigned long) u_map->mem_end;
2710 k_map.base_addr = (unsigned short) u_map->base_addr;
2711 k_map.irq = (unsigned char) u_map->irq;
2712 k_map.dma = (unsigned char) u_map->dma;
2713 k_map.port = (unsigned char) u_map->port;
2714
2715 err = ops->ndo_set_config(dev, &k_map);
2716 if (err < 0)
2717 goto errout;
2718
2719 status |= DO_SETLINK_NOTIFY;
2720 }
2721
2722 if (tb[IFLA_ADDRESS]) {
2723 struct sockaddr *sa;
2724 int len;
2725
2726 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2727 sizeof(*sa));
2728 sa = kmalloc(len, GFP_KERNEL);
2729 if (!sa) {
2730 err = -ENOMEM;
2731 goto errout;
2732 }
2733 sa->sa_family = dev->type;
2734 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2735 dev->addr_len);
2736 err = dev_set_mac_address_user(dev, sa, extack);
2737 kfree(sa);
2738 if (err)
2739 goto errout;
2740 status |= DO_SETLINK_MODIFIED;
2741 }
2742
2743 if (tb[IFLA_MTU]) {
2744 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2745 if (err < 0)
2746 goto errout;
2747 status |= DO_SETLINK_MODIFIED;
2748 }
2749
2750 if (tb[IFLA_GROUP]) {
2751 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2752 status |= DO_SETLINK_NOTIFY;
2753 }
2754
2755 /*
2756 * Interface selected by interface index but interface
2757 * name provided implies that a name change has been
2758 * requested.
2759 */
2760 if (ifm->ifi_index > 0 && ifname[0]) {
2761 err = dev_change_name(dev, ifname);
2762 if (err < 0)
2763 goto errout;
2764 status |= DO_SETLINK_MODIFIED;
2765 }
2766
2767 if (tb[IFLA_IFALIAS]) {
2768 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2769 nla_len(tb[IFLA_IFALIAS]));
2770 if (err < 0)
2771 goto errout;
2772 status |= DO_SETLINK_NOTIFY;
2773 }
2774
2775 if (tb[IFLA_BROADCAST]) {
2776 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2777 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2778 }
2779
2780 if (tb[IFLA_MASTER]) {
2781 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2782 if (err)
2783 goto errout;
2784 status |= DO_SETLINK_MODIFIED;
2785 }
2786
2787 if (ifm->ifi_flags || ifm->ifi_change) {
2788 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2789 extack);
2790 if (err < 0)
2791 goto errout;
2792 }
2793
2794 if (tb[IFLA_CARRIER]) {
2795 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2796 if (err)
2797 goto errout;
2798 status |= DO_SETLINK_MODIFIED;
2799 }
2800
2801 if (tb[IFLA_TXQLEN]) {
2802 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2803
2804 err = dev_change_tx_queue_len(dev, value);
2805 if (err)
2806 goto errout;
2807 status |= DO_SETLINK_MODIFIED;
2808 }
2809
2810 if (tb[IFLA_GSO_MAX_SIZE]) {
2811 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2812
2813 if (max_size > dev->tso_max_size) {
2814 err = -EINVAL;
2815 goto errout;
2816 }
2817
2818 if (dev->gso_max_size ^ max_size) {
2819 netif_set_gso_max_size(dev, max_size);
2820 status |= DO_SETLINK_MODIFIED;
2821 }
2822 }
2823
2824 if (tb[IFLA_GSO_MAX_SEGS]) {
2825 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2826
2827 if (max_segs > GSO_MAX_SEGS || max_segs > dev->tso_max_segs) {
2828 err = -EINVAL;
2829 goto errout;
2830 }
2831
2832 if (dev->gso_max_segs ^ max_segs) {
2833 netif_set_gso_max_segs(dev, max_segs);
2834 status |= DO_SETLINK_MODIFIED;
2835 }
2836 }
2837
2838 if (tb[IFLA_GRO_MAX_SIZE]) {
2839 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
2840
2841 if (dev->gro_max_size ^ gro_max_size) {
2842 netif_set_gro_max_size(dev, gro_max_size);
2843 status |= DO_SETLINK_MODIFIED;
2844 }
2845 }
2846
2847 if (tb[IFLA_OPERSTATE])
2848 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2849
2850 if (tb[IFLA_LINKMODE]) {
2851 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2852
2853 write_lock(&dev_base_lock);
2854 if (dev->link_mode ^ value)
2855 status |= DO_SETLINK_NOTIFY;
2856 dev->link_mode = value;
2857 write_unlock(&dev_base_lock);
2858 }
2859
2860 if (tb[IFLA_VFINFO_LIST]) {
2861 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2862 struct nlattr *attr;
2863 int rem;
2864
2865 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2866 if (nla_type(attr) != IFLA_VF_INFO ||
2867 nla_len(attr) < NLA_HDRLEN) {
2868 err = -EINVAL;
2869 goto errout;
2870 }
2871 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
2872 attr,
2873 ifla_vf_policy,
2874 NULL);
2875 if (err < 0)
2876 goto errout;
2877 err = do_setvfinfo(dev, vfinfo);
2878 if (err < 0)
2879 goto errout;
2880 status |= DO_SETLINK_NOTIFY;
2881 }
2882 }
2883 err = 0;
2884
2885 if (tb[IFLA_VF_PORTS]) {
2886 struct nlattr *port[IFLA_PORT_MAX+1];
2887 struct nlattr *attr;
2888 int vf;
2889 int rem;
2890
2891 err = -EOPNOTSUPP;
2892 if (!ops->ndo_set_vf_port)
2893 goto errout;
2894
2895 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2896 if (nla_type(attr) != IFLA_VF_PORT ||
2897 nla_len(attr) < NLA_HDRLEN) {
2898 err = -EINVAL;
2899 goto errout;
2900 }
2901 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2902 attr,
2903 ifla_port_policy,
2904 NULL);
2905 if (err < 0)
2906 goto errout;
2907 if (!port[IFLA_PORT_VF]) {
2908 err = -EOPNOTSUPP;
2909 goto errout;
2910 }
2911 vf = nla_get_u32(port[IFLA_PORT_VF]);
2912 err = ops->ndo_set_vf_port(dev, vf, port);
2913 if (err < 0)
2914 goto errout;
2915 status |= DO_SETLINK_NOTIFY;
2916 }
2917 }
2918 err = 0;
2919
2920 if (tb[IFLA_PORT_SELF]) {
2921 struct nlattr *port[IFLA_PORT_MAX+1];
2922
2923 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2924 tb[IFLA_PORT_SELF],
2925 ifla_port_policy, NULL);
2926 if (err < 0)
2927 goto errout;
2928
2929 err = -EOPNOTSUPP;
2930 if (ops->ndo_set_vf_port)
2931 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2932 if (err < 0)
2933 goto errout;
2934 status |= DO_SETLINK_NOTIFY;
2935 }
2936
2937 if (tb[IFLA_AF_SPEC]) {
2938 struct nlattr *af;
2939 int rem;
2940
2941 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2942 const struct rtnl_af_ops *af_ops;
2943
2944 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
2945
2946 err = af_ops->set_link_af(dev, af, extack);
2947 if (err < 0)
2948 goto errout;
2949
2950 status |= DO_SETLINK_NOTIFY;
2951 }
2952 }
2953 err = 0;
2954
2955 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
2956 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
2957 tb[IFLA_PROTO_DOWN_REASON], extack);
2958 if (err)
2959 goto errout;
2960 status |= DO_SETLINK_NOTIFY;
2961 }
2962
2963 if (tb[IFLA_XDP]) {
2964 struct nlattr *xdp[IFLA_XDP_MAX + 1];
2965 u32 xdp_flags = 0;
2966
2967 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
2968 tb[IFLA_XDP],
2969 ifla_xdp_policy, NULL);
2970 if (err < 0)
2971 goto errout;
2972
2973 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
2974 err = -EINVAL;
2975 goto errout;
2976 }
2977
2978 if (xdp[IFLA_XDP_FLAGS]) {
2979 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
2980 if (xdp_flags & ~XDP_FLAGS_MASK) {
2981 err = -EINVAL;
2982 goto errout;
2983 }
2984 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
2985 err = -EINVAL;
2986 goto errout;
2987 }
2988 }
2989
2990 if (xdp[IFLA_XDP_FD]) {
2991 int expected_fd = -1;
2992
2993 if (xdp_flags & XDP_FLAGS_REPLACE) {
2994 if (!xdp[IFLA_XDP_EXPECTED_FD]) {
2995 err = -EINVAL;
2996 goto errout;
2997 }
2998 expected_fd =
2999 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
3000 }
3001
3002 err = dev_change_xdp_fd(dev, extack,
3003 nla_get_s32(xdp[IFLA_XDP_FD]),
3004 expected_fd,
3005 xdp_flags);
3006 if (err)
3007 goto errout;
3008 status |= DO_SETLINK_NOTIFY;
3009 }
3010 }
3011
3012 errout:
3013 if (status & DO_SETLINK_MODIFIED) {
3014 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
3015 netdev_state_change(dev);
3016
3017 if (err < 0)
3018 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
3019 dev->name);
3020 }
3021
3022 return err;
3023 }
3024
rtnl_dev_get(struct net * net,struct nlattr * tb[])3025 static struct net_device *rtnl_dev_get(struct net *net,
3026 struct nlattr *tb[])
3027 {
3028 char ifname[ALTIFNAMSIZ];
3029
3030 if (tb[IFLA_IFNAME])
3031 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3032 else if (tb[IFLA_ALT_IFNAME])
3033 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
3034 else
3035 return NULL;
3036
3037 return __dev_get_by_name(net, ifname);
3038 }
3039
rtnl_setlink(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3040 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3041 struct netlink_ext_ack *extack)
3042 {
3043 struct net *net = sock_net(skb->sk);
3044 struct ifinfomsg *ifm;
3045 struct net_device *dev;
3046 int err;
3047 struct nlattr *tb[IFLA_MAX+1];
3048
3049 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3050 ifla_policy, extack);
3051 if (err < 0)
3052 goto errout;
3053
3054 err = rtnl_ensure_unique_netns(tb, extack, false);
3055 if (err < 0)
3056 goto errout;
3057
3058 err = -EINVAL;
3059 ifm = nlmsg_data(nlh);
3060 if (ifm->ifi_index > 0)
3061 dev = __dev_get_by_index(net, ifm->ifi_index);
3062 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3063 dev = rtnl_dev_get(net, tb);
3064 else
3065 goto errout;
3066
3067 if (dev == NULL) {
3068 err = -ENODEV;
3069 goto errout;
3070 }
3071
3072 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3073 errout:
3074 return err;
3075 }
3076
rtnl_group_dellink(const struct net * net,int group)3077 static int rtnl_group_dellink(const struct net *net, int group)
3078 {
3079 struct net_device *dev, *aux;
3080 LIST_HEAD(list_kill);
3081 bool found = false;
3082
3083 if (!group)
3084 return -EPERM;
3085
3086 for_each_netdev(net, dev) {
3087 if (dev->group == group) {
3088 const struct rtnl_link_ops *ops;
3089
3090 found = true;
3091 ops = dev->rtnl_link_ops;
3092 if (!ops || !ops->dellink)
3093 return -EOPNOTSUPP;
3094 }
3095 }
3096
3097 if (!found)
3098 return -ENODEV;
3099
3100 for_each_netdev_safe(net, dev, aux) {
3101 if (dev->group == group) {
3102 const struct rtnl_link_ops *ops;
3103
3104 ops = dev->rtnl_link_ops;
3105 ops->dellink(dev, &list_kill);
3106 }
3107 }
3108 unregister_netdevice_many(&list_kill);
3109
3110 return 0;
3111 }
3112
rtnl_delete_link(struct net_device * dev)3113 int rtnl_delete_link(struct net_device *dev)
3114 {
3115 const struct rtnl_link_ops *ops;
3116 LIST_HEAD(list_kill);
3117
3118 ops = dev->rtnl_link_ops;
3119 if (!ops || !ops->dellink)
3120 return -EOPNOTSUPP;
3121
3122 ops->dellink(dev, &list_kill);
3123 unregister_netdevice_many(&list_kill);
3124
3125 return 0;
3126 }
3127 EXPORT_SYMBOL_GPL(rtnl_delete_link);
3128
rtnl_dellink(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3129 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3130 struct netlink_ext_ack *extack)
3131 {
3132 struct net *net = sock_net(skb->sk);
3133 struct net *tgt_net = net;
3134 struct net_device *dev = NULL;
3135 struct ifinfomsg *ifm;
3136 struct nlattr *tb[IFLA_MAX+1];
3137 int err;
3138 int netnsid = -1;
3139
3140 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3141 ifla_policy, extack);
3142 if (err < 0)
3143 return err;
3144
3145 err = rtnl_ensure_unique_netns(tb, extack, true);
3146 if (err < 0)
3147 return err;
3148
3149 if (tb[IFLA_TARGET_NETNSID]) {
3150 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3151 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3152 if (IS_ERR(tgt_net))
3153 return PTR_ERR(tgt_net);
3154 }
3155
3156 err = -EINVAL;
3157 ifm = nlmsg_data(nlh);
3158 if (ifm->ifi_index > 0)
3159 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3160 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3161 dev = rtnl_dev_get(net, tb);
3162 else if (tb[IFLA_GROUP])
3163 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
3164 else
3165 goto out;
3166
3167 if (!dev) {
3168 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0)
3169 err = -ENODEV;
3170
3171 goto out;
3172 }
3173
3174 err = rtnl_delete_link(dev);
3175
3176 out:
3177 if (netnsid >= 0)
3178 put_net(tgt_net);
3179
3180 return err;
3181 }
3182
rtnl_configure_link(struct net_device * dev,const struct ifinfomsg * ifm)3183 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
3184 {
3185 unsigned int old_flags;
3186 int err;
3187
3188 old_flags = dev->flags;
3189 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
3190 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3191 NULL);
3192 if (err < 0)
3193 return err;
3194 }
3195
3196 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3197 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
3198 } else {
3199 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3200 __dev_notify_flags(dev, old_flags, ~0U);
3201 }
3202 return 0;
3203 }
3204 EXPORT_SYMBOL(rtnl_configure_link);
3205
rtnl_create_link(struct net * net,const char * ifname,unsigned char name_assign_type,const struct rtnl_link_ops * ops,struct nlattr * tb[],struct netlink_ext_ack * extack)3206 struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3207 unsigned char name_assign_type,
3208 const struct rtnl_link_ops *ops,
3209 struct nlattr *tb[],
3210 struct netlink_ext_ack *extack)
3211 {
3212 struct net_device *dev;
3213 unsigned int num_tx_queues = 1;
3214 unsigned int num_rx_queues = 1;
3215
3216 if (tb[IFLA_NUM_TX_QUEUES])
3217 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3218 else if (ops->get_num_tx_queues)
3219 num_tx_queues = ops->get_num_tx_queues();
3220
3221 if (tb[IFLA_NUM_RX_QUEUES])
3222 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3223 else if (ops->get_num_rx_queues)
3224 num_rx_queues = ops->get_num_rx_queues();
3225
3226 if (num_tx_queues < 1 || num_tx_queues > 4096) {
3227 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
3228 return ERR_PTR(-EINVAL);
3229 }
3230
3231 if (num_rx_queues < 1 || num_rx_queues > 4096) {
3232 NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
3233 return ERR_PTR(-EINVAL);
3234 }
3235
3236 if (ops->alloc) {
3237 dev = ops->alloc(tb, ifname, name_assign_type,
3238 num_tx_queues, num_rx_queues);
3239 if (IS_ERR(dev))
3240 return dev;
3241 } else {
3242 dev = alloc_netdev_mqs(ops->priv_size, ifname,
3243 name_assign_type, ops->setup,
3244 num_tx_queues, num_rx_queues);
3245 }
3246
3247 if (!dev)
3248 return ERR_PTR(-ENOMEM);
3249
3250 dev_net_set(dev, net);
3251 dev->rtnl_link_ops = ops;
3252 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3253
3254 if (tb[IFLA_MTU]) {
3255 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
3256 int err;
3257
3258 err = dev_validate_mtu(dev, mtu, extack);
3259 if (err) {
3260 free_netdev(dev);
3261 return ERR_PTR(err);
3262 }
3263 dev->mtu = mtu;
3264 }
3265 if (tb[IFLA_ADDRESS]) {
3266 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
3267 nla_len(tb[IFLA_ADDRESS]));
3268 dev->addr_assign_type = NET_ADDR_SET;
3269 }
3270 if (tb[IFLA_BROADCAST])
3271 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3272 nla_len(tb[IFLA_BROADCAST]));
3273 if (tb[IFLA_TXQLEN])
3274 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3275 if (tb[IFLA_OPERSTATE])
3276 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3277 if (tb[IFLA_LINKMODE])
3278 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3279 if (tb[IFLA_GROUP])
3280 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3281 if (tb[IFLA_GSO_MAX_SIZE])
3282 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3283 if (tb[IFLA_GSO_MAX_SEGS])
3284 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
3285 if (tb[IFLA_GRO_MAX_SIZE])
3286 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
3287
3288 return dev;
3289 }
3290 EXPORT_SYMBOL(rtnl_create_link);
3291
rtnl_group_changelink(const struct sk_buff * skb,struct net * net,int group,struct ifinfomsg * ifm,struct netlink_ext_ack * extack,struct nlattr ** tb)3292 static int rtnl_group_changelink(const struct sk_buff *skb,
3293 struct net *net, int group,
3294 struct ifinfomsg *ifm,
3295 struct netlink_ext_ack *extack,
3296 struct nlattr **tb)
3297 {
3298 struct net_device *dev, *aux;
3299 int err;
3300
3301 for_each_netdev_safe(net, dev, aux) {
3302 if (dev->group == group) {
3303 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3304 if (err < 0)
3305 return err;
3306 }
3307 }
3308
3309 return 0;
3310 }
3311
rtnl_newlink_create(struct sk_buff * skb,struct ifinfomsg * ifm,const struct rtnl_link_ops * ops,struct nlattr ** tb,struct nlattr ** data,struct netlink_ext_ack * extack)3312 static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
3313 const struct rtnl_link_ops *ops,
3314 struct nlattr **tb, struct nlattr **data,
3315 struct netlink_ext_ack *extack)
3316 {
3317 unsigned char name_assign_type = NET_NAME_USER;
3318 struct net *net = sock_net(skb->sk);
3319 struct net *dest_net, *link_net;
3320 struct net_device *dev;
3321 char ifname[IFNAMSIZ];
3322 int err;
3323
3324 if (!ops->alloc && !ops->setup)
3325 return -EOPNOTSUPP;
3326
3327 if (tb[IFLA_IFNAME]) {
3328 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3329 } else {
3330 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3331 name_assign_type = NET_NAME_ENUM;
3332 }
3333
3334 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3335 if (IS_ERR(dest_net))
3336 return PTR_ERR(dest_net);
3337
3338 if (tb[IFLA_LINK_NETNSID]) {
3339 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3340
3341 link_net = get_net_ns_by_id(dest_net, id);
3342 if (!link_net) {
3343 NL_SET_ERR_MSG(extack, "Unknown network namespace id");
3344 err = -EINVAL;
3345 goto out;
3346 }
3347 err = -EPERM;
3348 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3349 goto out;
3350 } else {
3351 link_net = NULL;
3352 }
3353
3354 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3355 name_assign_type, ops, tb, extack);
3356 if (IS_ERR(dev)) {
3357 err = PTR_ERR(dev);
3358 goto out;
3359 }
3360
3361 dev->ifindex = ifm->ifi_index;
3362
3363 if (ops->newlink)
3364 err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3365 else
3366 err = register_netdevice(dev);
3367 if (err < 0) {
3368 free_netdev(dev);
3369 goto out;
3370 }
3371
3372 err = rtnl_configure_link(dev, ifm);
3373 if (err < 0)
3374 goto out_unregister;
3375 if (link_net) {
3376 err = dev_change_net_namespace(dev, dest_net, ifname);
3377 if (err < 0)
3378 goto out_unregister;
3379 }
3380 if (tb[IFLA_MASTER]) {
3381 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3382 if (err)
3383 goto out_unregister;
3384 }
3385 out:
3386 if (link_net)
3387 put_net(link_net);
3388 put_net(dest_net);
3389 return err;
3390 out_unregister:
3391 if (ops->newlink) {
3392 LIST_HEAD(list_kill);
3393
3394 ops->dellink(dev, &list_kill);
3395 unregister_netdevice_many(&list_kill);
3396 } else {
3397 unregister_netdevice(dev);
3398 }
3399 goto out;
3400 }
3401
3402 struct rtnl_newlink_tbs {
3403 struct nlattr *tb[IFLA_MAX + 1];
3404 struct nlattr *attr[RTNL_MAX_TYPE + 1];
3405 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3406 };
3407
__rtnl_newlink(struct sk_buff * skb,struct nlmsghdr * nlh,struct rtnl_newlink_tbs * tbs,struct netlink_ext_ack * extack)3408 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3409 struct rtnl_newlink_tbs *tbs,
3410 struct netlink_ext_ack *extack)
3411 {
3412 struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
3413 struct nlattr ** const tb = tbs->tb;
3414 const struct rtnl_link_ops *m_ops;
3415 struct net_device *master_dev;
3416 struct net *net = sock_net(skb->sk);
3417 const struct rtnl_link_ops *ops;
3418 struct nlattr **slave_data;
3419 char kind[MODULE_NAME_LEN];
3420 struct net_device *dev;
3421 struct ifinfomsg *ifm;
3422 struct nlattr **data;
3423 bool link_specified;
3424 int err;
3425
3426 #ifdef CONFIG_MODULES
3427 replay:
3428 #endif
3429 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3430 ifla_policy, extack);
3431 if (err < 0)
3432 return err;
3433
3434 err = rtnl_ensure_unique_netns(tb, extack, false);
3435 if (err < 0)
3436 return err;
3437
3438 ifm = nlmsg_data(nlh);
3439 if (ifm->ifi_index > 0) {
3440 link_specified = true;
3441 dev = __dev_get_by_index(net, ifm->ifi_index);
3442 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3443 link_specified = true;
3444 dev = rtnl_dev_get(net, tb);
3445 } else {
3446 link_specified = false;
3447 dev = NULL;
3448 }
3449
3450 master_dev = NULL;
3451 m_ops = NULL;
3452 if (dev) {
3453 master_dev = netdev_master_upper_dev_get(dev);
3454 if (master_dev)
3455 m_ops = master_dev->rtnl_link_ops;
3456 }
3457
3458 err = validate_linkmsg(dev, tb, extack);
3459 if (err < 0)
3460 return err;
3461
3462 if (tb[IFLA_LINKINFO]) {
3463 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3464 tb[IFLA_LINKINFO],
3465 ifla_info_policy, NULL);
3466 if (err < 0)
3467 return err;
3468 } else
3469 memset(linkinfo, 0, sizeof(linkinfo));
3470
3471 if (linkinfo[IFLA_INFO_KIND]) {
3472 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3473 ops = rtnl_link_ops_get(kind);
3474 } else {
3475 kind[0] = '\0';
3476 ops = NULL;
3477 }
3478
3479 data = NULL;
3480 if (ops) {
3481 if (ops->maxtype > RTNL_MAX_TYPE)
3482 return -EINVAL;
3483
3484 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3485 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
3486 linkinfo[IFLA_INFO_DATA],
3487 ops->policy, extack);
3488 if (err < 0)
3489 return err;
3490 data = tbs->attr;
3491 }
3492 if (ops->validate) {
3493 err = ops->validate(tb, data, extack);
3494 if (err < 0)
3495 return err;
3496 }
3497 }
3498
3499 slave_data = NULL;
3500 if (m_ops) {
3501 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3502 return -EINVAL;
3503
3504 if (m_ops->slave_maxtype &&
3505 linkinfo[IFLA_INFO_SLAVE_DATA]) {
3506 err = nla_parse_nested_deprecated(tbs->slave_attr,
3507 m_ops->slave_maxtype,
3508 linkinfo[IFLA_INFO_SLAVE_DATA],
3509 m_ops->slave_policy,
3510 extack);
3511 if (err < 0)
3512 return err;
3513 slave_data = tbs->slave_attr;
3514 }
3515 }
3516
3517 if (dev) {
3518 int status = 0;
3519
3520 if (nlh->nlmsg_flags & NLM_F_EXCL)
3521 return -EEXIST;
3522 if (nlh->nlmsg_flags & NLM_F_REPLACE)
3523 return -EOPNOTSUPP;
3524
3525 if (linkinfo[IFLA_INFO_DATA]) {
3526 if (!ops || ops != dev->rtnl_link_ops ||
3527 !ops->changelink)
3528 return -EOPNOTSUPP;
3529
3530 err = ops->changelink(dev, tb, data, extack);
3531 if (err < 0)
3532 return err;
3533 status |= DO_SETLINK_NOTIFY;
3534 }
3535
3536 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3537 if (!m_ops || !m_ops->slave_changelink)
3538 return -EOPNOTSUPP;
3539
3540 err = m_ops->slave_changelink(master_dev, dev, tb,
3541 slave_data, extack);
3542 if (err < 0)
3543 return err;
3544 status |= DO_SETLINK_NOTIFY;
3545 }
3546
3547 return do_setlink(skb, dev, ifm, extack, tb, status);
3548 }
3549
3550 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3551 /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
3552 * or it's for a group
3553 */
3554 if (link_specified)
3555 return -ENODEV;
3556 if (tb[IFLA_GROUP])
3557 return rtnl_group_changelink(skb, net,
3558 nla_get_u32(tb[IFLA_GROUP]),
3559 ifm, extack, tb);
3560 return -ENODEV;
3561 }
3562
3563 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3564 return -EOPNOTSUPP;
3565
3566 if (!ops) {
3567 #ifdef CONFIG_MODULES
3568 if (kind[0]) {
3569 __rtnl_unlock();
3570 request_module("rtnl-link-%s", kind);
3571 rtnl_lock();
3572 ops = rtnl_link_ops_get(kind);
3573 if (ops)
3574 goto replay;
3575 }
3576 #endif
3577 NL_SET_ERR_MSG(extack, "Unknown device type");
3578 return -EOPNOTSUPP;
3579 }
3580
3581 return rtnl_newlink_create(skb, ifm, ops, tb, data, extack);
3582 }
3583
rtnl_newlink(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3584 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3585 struct netlink_ext_ack *extack)
3586 {
3587 struct rtnl_newlink_tbs *tbs;
3588 int ret;
3589
3590 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
3591 if (!tbs)
3592 return -ENOMEM;
3593
3594 ret = __rtnl_newlink(skb, nlh, tbs, extack);
3595 kfree(tbs);
3596 return ret;
3597 }
3598
rtnl_valid_getlink_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)3599 static int rtnl_valid_getlink_req(struct sk_buff *skb,
3600 const struct nlmsghdr *nlh,
3601 struct nlattr **tb,
3602 struct netlink_ext_ack *extack)
3603 {
3604 struct ifinfomsg *ifm;
3605 int i, err;
3606
3607 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
3608 NL_SET_ERR_MSG(extack, "Invalid header for get link");
3609 return -EINVAL;
3610 }
3611
3612 if (!netlink_strict_get_check(skb))
3613 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3614 ifla_policy, extack);
3615
3616 ifm = nlmsg_data(nlh);
3617 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3618 ifm->ifi_change) {
3619 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
3620 return -EINVAL;
3621 }
3622
3623 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
3624 ifla_policy, extack);
3625 if (err)
3626 return err;
3627
3628 for (i = 0; i <= IFLA_MAX; i++) {
3629 if (!tb[i])
3630 continue;
3631
3632 switch (i) {
3633 case IFLA_IFNAME:
3634 case IFLA_ALT_IFNAME:
3635 case IFLA_EXT_MASK:
3636 case IFLA_TARGET_NETNSID:
3637 break;
3638 default:
3639 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
3640 return -EINVAL;
3641 }
3642 }
3643
3644 return 0;
3645 }
3646
rtnl_getlink(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3647 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3648 struct netlink_ext_ack *extack)
3649 {
3650 struct net *net = sock_net(skb->sk);
3651 struct net *tgt_net = net;
3652 struct ifinfomsg *ifm;
3653 struct nlattr *tb[IFLA_MAX+1];
3654 struct net_device *dev = NULL;
3655 struct sk_buff *nskb;
3656 int netnsid = -1;
3657 int err;
3658 u32 ext_filter_mask = 0;
3659
3660 err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
3661 if (err < 0)
3662 return err;
3663
3664 err = rtnl_ensure_unique_netns(tb, extack, true);
3665 if (err < 0)
3666 return err;
3667
3668 if (tb[IFLA_TARGET_NETNSID]) {
3669 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3670 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3671 if (IS_ERR(tgt_net))
3672 return PTR_ERR(tgt_net);
3673 }
3674
3675 if (tb[IFLA_EXT_MASK])
3676 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3677
3678 err = -EINVAL;
3679 ifm = nlmsg_data(nlh);
3680 if (ifm->ifi_index > 0)
3681 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3682 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3683 dev = rtnl_dev_get(tgt_net, tb);
3684 else
3685 goto out;
3686
3687 err = -ENODEV;
3688 if (dev == NULL)
3689 goto out;
3690
3691 err = -ENOBUFS;
3692 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
3693 if (nskb == NULL)
3694 goto out;
3695
3696 err = rtnl_fill_ifinfo(nskb, dev, net,
3697 RTM_NEWLINK, NETLINK_CB(skb).portid,
3698 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3699 0, NULL, 0, netnsid, GFP_KERNEL);
3700 if (err < 0) {
3701 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3702 WARN_ON(err == -EMSGSIZE);
3703 kfree_skb(nskb);
3704 } else
3705 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3706 out:
3707 if (netnsid >= 0)
3708 put_net(tgt_net);
3709
3710 return err;
3711 }
3712
rtnl_alt_ifname(int cmd,struct net_device * dev,struct nlattr * attr,bool * changed,struct netlink_ext_ack * extack)3713 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3714 bool *changed, struct netlink_ext_ack *extack)
3715 {
3716 char *alt_ifname;
3717 size_t size;
3718 int err;
3719
3720 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
3721 if (err)
3722 return err;
3723
3724 if (cmd == RTM_NEWLINKPROP) {
3725 size = rtnl_prop_list_size(dev);
3726 size += nla_total_size(ALTIFNAMSIZ);
3727 if (size >= U16_MAX) {
3728 NL_SET_ERR_MSG(extack,
3729 "effective property list too long");
3730 return -EINVAL;
3731 }
3732 }
3733
3734 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
3735 if (!alt_ifname)
3736 return -ENOMEM;
3737
3738 if (cmd == RTM_NEWLINKPROP) {
3739 err = netdev_name_node_alt_create(dev, alt_ifname);
3740 if (!err)
3741 alt_ifname = NULL;
3742 } else if (cmd == RTM_DELLINKPROP) {
3743 err = netdev_name_node_alt_destroy(dev, alt_ifname);
3744 } else {
3745 WARN_ON_ONCE(1);
3746 err = -EINVAL;
3747 }
3748
3749 kfree(alt_ifname);
3750 if (!err)
3751 *changed = true;
3752 return err;
3753 }
3754
rtnl_linkprop(int cmd,struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3755 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
3756 struct netlink_ext_ack *extack)
3757 {
3758 struct net *net = sock_net(skb->sk);
3759 struct nlattr *tb[IFLA_MAX + 1];
3760 struct net_device *dev;
3761 struct ifinfomsg *ifm;
3762 bool changed = false;
3763 struct nlattr *attr;
3764 int err, rem;
3765
3766 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3767 if (err)
3768 return err;
3769
3770 err = rtnl_ensure_unique_netns(tb, extack, true);
3771 if (err)
3772 return err;
3773
3774 ifm = nlmsg_data(nlh);
3775 if (ifm->ifi_index > 0)
3776 dev = __dev_get_by_index(net, ifm->ifi_index);
3777 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3778 dev = rtnl_dev_get(net, tb);
3779 else
3780 return -EINVAL;
3781
3782 if (!dev)
3783 return -ENODEV;
3784
3785 if (!tb[IFLA_PROP_LIST])
3786 return 0;
3787
3788 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
3789 switch (nla_type(attr)) {
3790 case IFLA_ALT_IFNAME:
3791 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3792 if (err)
3793 return err;
3794 break;
3795 }
3796 }
3797
3798 if (changed)
3799 netdev_state_change(dev);
3800 return 0;
3801 }
3802
rtnl_newlinkprop(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3803 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3804 struct netlink_ext_ack *extack)
3805 {
3806 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
3807 }
3808
rtnl_dellinkprop(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3809 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3810 struct netlink_ext_ack *extack)
3811 {
3812 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
3813 }
3814
rtnl_calcit(struct sk_buff * skb,struct nlmsghdr * nlh)3815 static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
3816 {
3817 struct net *net = sock_net(skb->sk);
3818 size_t min_ifinfo_dump_size = 0;
3819 struct nlattr *tb[IFLA_MAX+1];
3820 u32 ext_filter_mask = 0;
3821 struct net_device *dev;
3822 int hdrlen;
3823
3824 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3825 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3826 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
3827
3828 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
3829 if (tb[IFLA_EXT_MASK])
3830 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3831 }
3832
3833 if (!ext_filter_mask)
3834 return NLMSG_GOODSIZE;
3835 /*
3836 * traverse the list of net devices and compute the minimum
3837 * buffer size based upon the filter mask.
3838 */
3839 rcu_read_lock();
3840 for_each_netdev_rcu(net, dev) {
3841 min_ifinfo_dump_size = max(min_ifinfo_dump_size,
3842 if_nlmsg_size(dev, ext_filter_mask));
3843 }
3844 rcu_read_unlock();
3845
3846 return nlmsg_total_size(min_ifinfo_dump_size);
3847 }
3848
rtnl_dump_all(struct sk_buff * skb,struct netlink_callback * cb)3849 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
3850 {
3851 int idx;
3852 int s_idx = cb->family;
3853 int type = cb->nlh->nlmsg_type - RTM_BASE;
3854 int ret = 0;
3855
3856 if (s_idx == 0)
3857 s_idx = 1;
3858
3859 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
3860 struct rtnl_link __rcu **tab;
3861 struct rtnl_link *link;
3862 rtnl_dumpit_func dumpit;
3863
3864 if (idx < s_idx || idx == PF_PACKET)
3865 continue;
3866
3867 if (type < 0 || type >= RTM_NR_MSGTYPES)
3868 continue;
3869
3870 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
3871 if (!tab)
3872 continue;
3873
3874 link = rcu_dereference_rtnl(tab[type]);
3875 if (!link)
3876 continue;
3877
3878 dumpit = link->dumpit;
3879 if (!dumpit)
3880 continue;
3881
3882 if (idx > s_idx) {
3883 memset(&cb->args[0], 0, sizeof(cb->args));
3884 cb->prev_seq = 0;
3885 cb->seq = 0;
3886 }
3887 ret = dumpit(skb, cb);
3888 if (ret)
3889 break;
3890 }
3891 cb->family = idx;
3892
3893 return skb->len ? : ret;
3894 }
3895
rtmsg_ifinfo_build_skb(int type,struct net_device * dev,unsigned int change,u32 event,gfp_t flags,int * new_nsid,int new_ifindex)3896 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3897 unsigned int change,
3898 u32 event, gfp_t flags, int *new_nsid,
3899 int new_ifindex)
3900 {
3901 struct net *net = dev_net(dev);
3902 struct sk_buff *skb;
3903 int err = -ENOBUFS;
3904
3905 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
3906 if (skb == NULL)
3907 goto errout;
3908
3909 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3910 type, 0, 0, change, 0, 0, event,
3911 new_nsid, new_ifindex, -1, flags);
3912 if (err < 0) {
3913 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
3914 WARN_ON(err == -EMSGSIZE);
3915 kfree_skb(skb);
3916 goto errout;
3917 }
3918 return skb;
3919 errout:
3920 if (err < 0)
3921 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3922 return NULL;
3923 }
3924
rtmsg_ifinfo_send(struct sk_buff * skb,struct net_device * dev,gfp_t flags)3925 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
3926 {
3927 struct net *net = dev_net(dev);
3928
3929 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
3930 }
3931
rtmsg_ifinfo_event(int type,struct net_device * dev,unsigned int change,u32 event,gfp_t flags,int * new_nsid,int new_ifindex)3932 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
3933 unsigned int change, u32 event,
3934 gfp_t flags, int *new_nsid, int new_ifindex)
3935 {
3936 struct sk_buff *skb;
3937
3938 if (dev->reg_state != NETREG_REGISTERED)
3939 return;
3940
3941 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
3942 new_ifindex);
3943 if (skb)
3944 rtmsg_ifinfo_send(skb, dev, flags);
3945 }
3946
rtmsg_ifinfo(int type,struct net_device * dev,unsigned int change,gfp_t flags)3947 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
3948 gfp_t flags)
3949 {
3950 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3951 NULL, 0);
3952 }
3953
rtmsg_ifinfo_newnet(int type,struct net_device * dev,unsigned int change,gfp_t flags,int * new_nsid,int new_ifindex)3954 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
3955 gfp_t flags, int *new_nsid, int new_ifindex)
3956 {
3957 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3958 new_nsid, new_ifindex);
3959 }
3960
nlmsg_populate_fdb_fill(struct sk_buff * skb,struct net_device * dev,u8 * addr,u16 vid,u32 pid,u32 seq,int type,unsigned int flags,int nlflags,u16 ndm_state)3961 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
3962 struct net_device *dev,
3963 u8 *addr, u16 vid, u32 pid, u32 seq,
3964 int type, unsigned int flags,
3965 int nlflags, u16 ndm_state)
3966 {
3967 struct nlmsghdr *nlh;
3968 struct ndmsg *ndm;
3969
3970 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
3971 if (!nlh)
3972 return -EMSGSIZE;
3973
3974 ndm = nlmsg_data(nlh);
3975 ndm->ndm_family = AF_BRIDGE;
3976 ndm->ndm_pad1 = 0;
3977 ndm->ndm_pad2 = 0;
3978 ndm->ndm_flags = flags;
3979 ndm->ndm_type = 0;
3980 ndm->ndm_ifindex = dev->ifindex;
3981 ndm->ndm_state = ndm_state;
3982
3983 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
3984 goto nla_put_failure;
3985 if (vid)
3986 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
3987 goto nla_put_failure;
3988
3989 nlmsg_end(skb, nlh);
3990 return 0;
3991
3992 nla_put_failure:
3993 nlmsg_cancel(skb, nlh);
3994 return -EMSGSIZE;
3995 }
3996
rtnl_fdb_nlmsg_size(void)3997 static inline size_t rtnl_fdb_nlmsg_size(void)
3998 {
3999 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
4000 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
4001 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
4002 0;
4003 }
4004
rtnl_fdb_notify(struct net_device * dev,u8 * addr,u16 vid,int type,u16 ndm_state)4005 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
4006 u16 ndm_state)
4007 {
4008 struct net *net = dev_net(dev);
4009 struct sk_buff *skb;
4010 int err = -ENOBUFS;
4011
4012 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
4013 if (!skb)
4014 goto errout;
4015
4016 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
4017 0, 0, type, NTF_SELF, 0, ndm_state);
4018 if (err < 0) {
4019 kfree_skb(skb);
4020 goto errout;
4021 }
4022
4023 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
4024 return;
4025 errout:
4026 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
4027 }
4028
4029 /*
4030 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
4031 */
ndo_dflt_fdb_add(struct ndmsg * ndm,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 flags)4032 int ndo_dflt_fdb_add(struct ndmsg *ndm,
4033 struct nlattr *tb[],
4034 struct net_device *dev,
4035 const unsigned char *addr, u16 vid,
4036 u16 flags)
4037 {
4038 int err = -EINVAL;
4039
4040 /* If aging addresses are supported device will need to
4041 * implement its own handler for this.
4042 */
4043 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
4044 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4045 return err;
4046 }
4047
4048 if (vid) {
4049 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
4050 return err;
4051 }
4052
4053 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4054 err = dev_uc_add_excl(dev, addr);
4055 else if (is_multicast_ether_addr(addr))
4056 err = dev_mc_add_excl(dev, addr);
4057
4058 /* Only return duplicate errors if NLM_F_EXCL is set */
4059 if (err == -EEXIST && !(flags & NLM_F_EXCL))
4060 err = 0;
4061
4062 return err;
4063 }
4064 EXPORT_SYMBOL(ndo_dflt_fdb_add);
4065
fdb_vid_parse(struct nlattr * vlan_attr,u16 * p_vid,struct netlink_ext_ack * extack)4066 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
4067 struct netlink_ext_ack *extack)
4068 {
4069 u16 vid = 0;
4070
4071 if (vlan_attr) {
4072 if (nla_len(vlan_attr) != sizeof(u16)) {
4073 NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
4074 return -EINVAL;
4075 }
4076
4077 vid = nla_get_u16(vlan_attr);
4078
4079 if (!vid || vid >= VLAN_VID_MASK) {
4080 NL_SET_ERR_MSG(extack, "invalid vlan id");
4081 return -EINVAL;
4082 }
4083 }
4084 *p_vid = vid;
4085 return 0;
4086 }
4087
rtnl_fdb_add(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)4088 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
4089 struct netlink_ext_ack *extack)
4090 {
4091 struct net *net = sock_net(skb->sk);
4092 struct ndmsg *ndm;
4093 struct nlattr *tb[NDA_MAX+1];
4094 struct net_device *dev;
4095 u8 *addr;
4096 u16 vid;
4097 int err;
4098
4099 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4100 extack);
4101 if (err < 0)
4102 return err;
4103
4104 ndm = nlmsg_data(nlh);
4105 if (ndm->ndm_ifindex == 0) {
4106 NL_SET_ERR_MSG(extack, "invalid ifindex");
4107 return -EINVAL;
4108 }
4109
4110 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4111 if (dev == NULL) {
4112 NL_SET_ERR_MSG(extack, "unknown ifindex");
4113 return -ENODEV;
4114 }
4115
4116 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4117 NL_SET_ERR_MSG(extack, "invalid address");
4118 return -EINVAL;
4119 }
4120
4121 if (dev->type != ARPHRD_ETHER) {
4122 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
4123 return -EINVAL;
4124 }
4125
4126 addr = nla_data(tb[NDA_LLADDR]);
4127
4128 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4129 if (err)
4130 return err;
4131
4132 err = -EOPNOTSUPP;
4133
4134 /* Support fdb on master device the net/bridge default case */
4135 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4136 netif_is_bridge_port(dev)) {
4137 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4138 const struct net_device_ops *ops = br_dev->netdev_ops;
4139
4140 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4141 nlh->nlmsg_flags, extack);
4142 if (err)
4143 goto out;
4144 else
4145 ndm->ndm_flags &= ~NTF_MASTER;
4146 }
4147
4148 /* Embedded bridge, macvlan, and any other device support */
4149 if ((ndm->ndm_flags & NTF_SELF)) {
4150 if (dev->netdev_ops->ndo_fdb_add)
4151 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4152 vid,
4153 nlh->nlmsg_flags,
4154 extack);
4155 else
4156 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4157 nlh->nlmsg_flags);
4158
4159 if (!err) {
4160 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4161 ndm->ndm_state);
4162 ndm->ndm_flags &= ~NTF_SELF;
4163 }
4164 }
4165 out:
4166 return err;
4167 }
4168
4169 /*
4170 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
4171 */
ndo_dflt_fdb_del(struct ndmsg * ndm,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid)4172 int ndo_dflt_fdb_del(struct ndmsg *ndm,
4173 struct nlattr *tb[],
4174 struct net_device *dev,
4175 const unsigned char *addr, u16 vid)
4176 {
4177 int err = -EINVAL;
4178
4179 /* If aging addresses are supported device will need to
4180 * implement its own handler for this.
4181 */
4182 if (!(ndm->ndm_state & NUD_PERMANENT)) {
4183 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4184 return err;
4185 }
4186
4187 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4188 err = dev_uc_del(dev, addr);
4189 else if (is_multicast_ether_addr(addr))
4190 err = dev_mc_del(dev, addr);
4191
4192 return err;
4193 }
4194 EXPORT_SYMBOL(ndo_dflt_fdb_del);
4195
4196 static const struct nla_policy fdb_del_bulk_policy[NDA_MAX + 1] = {
4197 [NDA_VLAN] = { .type = NLA_U16 },
4198 [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
4199 [NDA_NDM_STATE_MASK] = { .type = NLA_U16 },
4200 [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 },
4201 };
4202
rtnl_fdb_del(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)4203 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4204 struct netlink_ext_ack *extack)
4205 {
4206 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
4207 struct net *net = sock_net(skb->sk);
4208 const struct net_device_ops *ops;
4209 struct ndmsg *ndm;
4210 struct nlattr *tb[NDA_MAX+1];
4211 struct net_device *dev;
4212 __u8 *addr = NULL;
4213 int err;
4214 u16 vid;
4215
4216 if (!netlink_capable(skb, CAP_NET_ADMIN))
4217 return -EPERM;
4218
4219 if (!del_bulk) {
4220 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
4221 NULL, extack);
4222 } else {
4223 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX,
4224 fdb_del_bulk_policy, extack);
4225 }
4226 if (err < 0)
4227 return err;
4228
4229 ndm = nlmsg_data(nlh);
4230 if (ndm->ndm_ifindex == 0) {
4231 NL_SET_ERR_MSG(extack, "invalid ifindex");
4232 return -EINVAL;
4233 }
4234
4235 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4236 if (dev == NULL) {
4237 NL_SET_ERR_MSG(extack, "unknown ifindex");
4238 return -ENODEV;
4239 }
4240
4241 if (!del_bulk) {
4242 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4243 NL_SET_ERR_MSG(extack, "invalid address");
4244 return -EINVAL;
4245 }
4246 addr = nla_data(tb[NDA_LLADDR]);
4247 }
4248
4249 if (dev->type != ARPHRD_ETHER) {
4250 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4251 return -EINVAL;
4252 }
4253
4254 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4255 if (err)
4256 return err;
4257
4258 err = -EOPNOTSUPP;
4259
4260 /* Support fdb on master device the net/bridge default case */
4261 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4262 netif_is_bridge_port(dev)) {
4263 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4264
4265 ops = br_dev->netdev_ops;
4266 if (!del_bulk) {
4267 if (ops->ndo_fdb_del)
4268 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4269 } else {
4270 if (ops->ndo_fdb_del_bulk)
4271 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4272 extack);
4273 }
4274
4275 if (err)
4276 goto out;
4277 else
4278 ndm->ndm_flags &= ~NTF_MASTER;
4279 }
4280
4281 /* Embedded bridge, macvlan, and any other device support */
4282 if (ndm->ndm_flags & NTF_SELF) {
4283 ops = dev->netdev_ops;
4284 if (!del_bulk) {
4285 if (ops->ndo_fdb_del)
4286 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4287 else
4288 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4289 } else {
4290 /* in case err was cleared by NTF_MASTER call */
4291 err = -EOPNOTSUPP;
4292 if (ops->ndo_fdb_del_bulk)
4293 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4294 extack);
4295 }
4296
4297 if (!err) {
4298 if (!del_bulk)
4299 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4300 ndm->ndm_state);
4301 ndm->ndm_flags &= ~NTF_SELF;
4302 }
4303 }
4304 out:
4305 return err;
4306 }
4307
nlmsg_populate_fdb(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * dev,int * idx,struct netdev_hw_addr_list * list)4308 static int nlmsg_populate_fdb(struct sk_buff *skb,
4309 struct netlink_callback *cb,
4310 struct net_device *dev,
4311 int *idx,
4312 struct netdev_hw_addr_list *list)
4313 {
4314 struct netdev_hw_addr *ha;
4315 int err;
4316 u32 portid, seq;
4317
4318 portid = NETLINK_CB(cb->skb).portid;
4319 seq = cb->nlh->nlmsg_seq;
4320
4321 list_for_each_entry(ha, &list->list, list) {
4322 if (*idx < cb->args[2])
4323 goto skip;
4324
4325 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4326 portid, seq,
4327 RTM_NEWNEIGH, NTF_SELF,
4328 NLM_F_MULTI, NUD_PERMANENT);
4329 if (err < 0)
4330 return err;
4331 skip:
4332 *idx += 1;
4333 }
4334 return 0;
4335 }
4336
4337 /**
4338 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
4339 * @skb: socket buffer to store message in
4340 * @cb: netlink callback
4341 * @dev: netdevice
4342 * @filter_dev: ignored
4343 * @idx: the number of FDB table entries dumped is added to *@idx
4344 *
4345 * Default netdevice operation to dump the existing unicast address list.
4346 * Returns number of addresses from list put in skb.
4347 */
ndo_dflt_fdb_dump(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * dev,struct net_device * filter_dev,int * idx)4348 int ndo_dflt_fdb_dump(struct sk_buff *skb,
4349 struct netlink_callback *cb,
4350 struct net_device *dev,
4351 struct net_device *filter_dev,
4352 int *idx)
4353 {
4354 int err;
4355
4356 if (dev->type != ARPHRD_ETHER)
4357 return -EINVAL;
4358
4359 netif_addr_lock_bh(dev);
4360 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4361 if (err)
4362 goto out;
4363 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4364 out:
4365 netif_addr_unlock_bh(dev);
4366 return err;
4367 }
4368 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4369
valid_fdb_dump_strict(const struct nlmsghdr * nlh,int * br_idx,int * brport_idx,struct netlink_ext_ack * extack)4370 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4371 int *br_idx, int *brport_idx,
4372 struct netlink_ext_ack *extack)
4373 {
4374 struct nlattr *tb[NDA_MAX + 1];
4375 struct ndmsg *ndm;
4376 int err, i;
4377
4378 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4379 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4380 return -EINVAL;
4381 }
4382
4383 ndm = nlmsg_data(nlh);
4384 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4385 ndm->ndm_flags || ndm->ndm_type) {
4386 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
4387 return -EINVAL;
4388 }
4389
4390 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4391 NDA_MAX, NULL, extack);
4392 if (err < 0)
4393 return err;
4394
4395 *brport_idx = ndm->ndm_ifindex;
4396 for (i = 0; i <= NDA_MAX; ++i) {
4397 if (!tb[i])
4398 continue;
4399
4400 switch (i) {
4401 case NDA_IFINDEX:
4402 if (nla_len(tb[i]) != sizeof(u32)) {
4403 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4404 return -EINVAL;
4405 }
4406 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4407 break;
4408 case NDA_MASTER:
4409 if (nla_len(tb[i]) != sizeof(u32)) {
4410 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4411 return -EINVAL;
4412 }
4413 *br_idx = nla_get_u32(tb[NDA_MASTER]);
4414 break;
4415 default:
4416 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4417 return -EINVAL;
4418 }
4419 }
4420
4421 return 0;
4422 }
4423
valid_fdb_dump_legacy(const struct nlmsghdr * nlh,int * br_idx,int * brport_idx,struct netlink_ext_ack * extack)4424 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4425 int *br_idx, int *brport_idx,
4426 struct netlink_ext_ack *extack)
4427 {
4428 struct nlattr *tb[IFLA_MAX+1];
4429 int err;
4430
4431 /* A hack to preserve kernel<->userspace interface.
4432 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
4433 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
4434 * So, check for ndmsg with an optional u32 attribute (not used here).
4435 * Fortunately these sizes don't conflict with the size of ifinfomsg
4436 * with an optional attribute.
4437 */
4438 if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4439 (nlmsg_len(nlh) != sizeof(struct ndmsg) +
4440 nla_attr_size(sizeof(u32)))) {
4441 struct ifinfomsg *ifm;
4442
4443 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4444 tb, IFLA_MAX, ifla_policy,
4445 extack);
4446 if (err < 0) {
4447 return -EINVAL;
4448 } else if (err == 0) {
4449 if (tb[IFLA_MASTER])
4450 *br_idx = nla_get_u32(tb[IFLA_MASTER]);
4451 }
4452
4453 ifm = nlmsg_data(nlh);
4454 *brport_idx = ifm->ifi_index;
4455 }
4456 return 0;
4457 }
4458
rtnl_fdb_dump(struct sk_buff * skb,struct netlink_callback * cb)4459 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4460 {
4461 struct net_device *dev;
4462 struct net_device *br_dev = NULL;
4463 const struct net_device_ops *ops = NULL;
4464 const struct net_device_ops *cops = NULL;
4465 struct net *net = sock_net(skb->sk);
4466 struct hlist_head *head;
4467 int brport_idx = 0;
4468 int br_idx = 0;
4469 int h, s_h;
4470 int idx = 0, s_idx;
4471 int err = 0;
4472 int fidx = 0;
4473
4474 if (cb->strict_check)
4475 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4476 cb->extack);
4477 else
4478 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4479 cb->extack);
4480 if (err < 0)
4481 return err;
4482
4483 if (br_idx) {
4484 br_dev = __dev_get_by_index(net, br_idx);
4485 if (!br_dev)
4486 return -ENODEV;
4487
4488 ops = br_dev->netdev_ops;
4489 }
4490
4491 s_h = cb->args[0];
4492 s_idx = cb->args[1];
4493
4494 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4495 idx = 0;
4496 head = &net->dev_index_head[h];
4497 hlist_for_each_entry(dev, head, index_hlist) {
4498
4499 if (brport_idx && (dev->ifindex != brport_idx))
4500 continue;
4501
4502 if (!br_idx) { /* user did not specify a specific bridge */
4503 if (netif_is_bridge_port(dev)) {
4504 br_dev = netdev_master_upper_dev_get(dev);
4505 cops = br_dev->netdev_ops;
4506 }
4507 } else {
4508 if (dev != br_dev &&
4509 !netif_is_bridge_port(dev))
4510 continue;
4511
4512 if (br_dev != netdev_master_upper_dev_get(dev) &&
4513 !netif_is_bridge_master(dev))
4514 continue;
4515 cops = ops;
4516 }
4517
4518 if (idx < s_idx)
4519 goto cont;
4520
4521 if (netif_is_bridge_port(dev)) {
4522 if (cops && cops->ndo_fdb_dump) {
4523 err = cops->ndo_fdb_dump(skb, cb,
4524 br_dev, dev,
4525 &fidx);
4526 if (err == -EMSGSIZE)
4527 goto out;
4528 }
4529 }
4530
4531 if (dev->netdev_ops->ndo_fdb_dump)
4532 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4533 dev, NULL,
4534 &fidx);
4535 else
4536 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4537 &fidx);
4538 if (err == -EMSGSIZE)
4539 goto out;
4540
4541 cops = NULL;
4542
4543 /* reset fdb offset to 0 for rest of the interfaces */
4544 cb->args[2] = 0;
4545 fidx = 0;
4546 cont:
4547 idx++;
4548 }
4549 }
4550
4551 out:
4552 cb->args[0] = h;
4553 cb->args[1] = idx;
4554 cb->args[2] = fidx;
4555
4556 return skb->len;
4557 }
4558
valid_fdb_get_strict(const struct nlmsghdr * nlh,struct nlattr ** tb,u8 * ndm_flags,int * br_idx,int * brport_idx,u8 ** addr,u16 * vid,struct netlink_ext_ack * extack)4559 static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
4560 struct nlattr **tb, u8 *ndm_flags,
4561 int *br_idx, int *brport_idx, u8 **addr,
4562 u16 *vid, struct netlink_ext_ack *extack)
4563 {
4564 struct ndmsg *ndm;
4565 int err, i;
4566
4567 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4568 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
4569 return -EINVAL;
4570 }
4571
4572 ndm = nlmsg_data(nlh);
4573 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4574 ndm->ndm_type) {
4575 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
4576 return -EINVAL;
4577 }
4578
4579 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
4580 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
4581 return -EINVAL;
4582 }
4583
4584 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4585 NDA_MAX, nda_policy, extack);
4586 if (err < 0)
4587 return err;
4588
4589 *ndm_flags = ndm->ndm_flags;
4590 *brport_idx = ndm->ndm_ifindex;
4591 for (i = 0; i <= NDA_MAX; ++i) {
4592 if (!tb[i])
4593 continue;
4594
4595 switch (i) {
4596 case NDA_MASTER:
4597 *br_idx = nla_get_u32(tb[i]);
4598 break;
4599 case NDA_LLADDR:
4600 if (nla_len(tb[i]) != ETH_ALEN) {
4601 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
4602 return -EINVAL;
4603 }
4604 *addr = nla_data(tb[i]);
4605 break;
4606 case NDA_VLAN:
4607 err = fdb_vid_parse(tb[i], vid, extack);
4608 if (err)
4609 return err;
4610 break;
4611 case NDA_VNI:
4612 break;
4613 default:
4614 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
4615 return -EINVAL;
4616 }
4617 }
4618
4619 return 0;
4620 }
4621
rtnl_fdb_get(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)4622 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4623 struct netlink_ext_ack *extack)
4624 {
4625 struct net_device *dev = NULL, *br_dev = NULL;
4626 const struct net_device_ops *ops = NULL;
4627 struct net *net = sock_net(in_skb->sk);
4628 struct nlattr *tb[NDA_MAX + 1];
4629 struct sk_buff *skb;
4630 int brport_idx = 0;
4631 u8 ndm_flags = 0;
4632 int br_idx = 0;
4633 u8 *addr = NULL;
4634 u16 vid = 0;
4635 int err;
4636
4637 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
4638 &brport_idx, &addr, &vid, extack);
4639 if (err < 0)
4640 return err;
4641
4642 if (!addr) {
4643 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
4644 return -EINVAL;
4645 }
4646
4647 if (brport_idx) {
4648 dev = __dev_get_by_index(net, brport_idx);
4649 if (!dev) {
4650 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
4651 return -ENODEV;
4652 }
4653 }
4654
4655 if (br_idx) {
4656 if (dev) {
4657 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
4658 return -EINVAL;
4659 }
4660
4661 br_dev = __dev_get_by_index(net, br_idx);
4662 if (!br_dev) {
4663 NL_SET_ERR_MSG(extack, "Invalid master ifindex");
4664 return -EINVAL;
4665 }
4666 ops = br_dev->netdev_ops;
4667 }
4668
4669 if (dev) {
4670 if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
4671 if (!netif_is_bridge_port(dev)) {
4672 NL_SET_ERR_MSG(extack, "Device is not a bridge port");
4673 return -EINVAL;
4674 }
4675 br_dev = netdev_master_upper_dev_get(dev);
4676 if (!br_dev) {
4677 NL_SET_ERR_MSG(extack, "Master of device not found");
4678 return -EINVAL;
4679 }
4680 ops = br_dev->netdev_ops;
4681 } else {
4682 if (!(ndm_flags & NTF_SELF)) {
4683 NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
4684 return -EINVAL;
4685 }
4686 ops = dev->netdev_ops;
4687 }
4688 }
4689
4690 if (!br_dev && !dev) {
4691 NL_SET_ERR_MSG(extack, "No device specified");
4692 return -ENODEV;
4693 }
4694
4695 if (!ops || !ops->ndo_fdb_get) {
4696 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
4697 return -EOPNOTSUPP;
4698 }
4699
4700 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4701 if (!skb)
4702 return -ENOBUFS;
4703
4704 if (br_dev)
4705 dev = br_dev;
4706 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4707 NETLINK_CB(in_skb).portid,
4708 nlh->nlmsg_seq, extack);
4709 if (err)
4710 goto out;
4711
4712 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4713 out:
4714 kfree_skb(skb);
4715 return err;
4716 }
4717
brport_nla_put_flag(struct sk_buff * skb,u32 flags,u32 mask,unsigned int attrnum,unsigned int flag)4718 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
4719 unsigned int attrnum, unsigned int flag)
4720 {
4721 if (mask & flag)
4722 return nla_put_u8(skb, attrnum, !!(flags & flag));
4723 return 0;
4724 }
4725
ndo_dflt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u16 mode,u32 flags,u32 mask,int nlflags,u32 filter_mask,int (* vlan_fill)(struct sk_buff * skb,struct net_device * dev,u32 filter_mask))4726 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4727 struct net_device *dev, u16 mode,
4728 u32 flags, u32 mask, int nlflags,
4729 u32 filter_mask,
4730 int (*vlan_fill)(struct sk_buff *skb,
4731 struct net_device *dev,
4732 u32 filter_mask))
4733 {
4734 struct nlmsghdr *nlh;
4735 struct ifinfomsg *ifm;
4736 struct nlattr *br_afspec;
4737 struct nlattr *protinfo;
4738 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
4739 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4740 int err = 0;
4741
4742 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
4743 if (nlh == NULL)
4744 return -EMSGSIZE;
4745
4746 ifm = nlmsg_data(nlh);
4747 ifm->ifi_family = AF_BRIDGE;
4748 ifm->__ifi_pad = 0;
4749 ifm->ifi_type = dev->type;
4750 ifm->ifi_index = dev->ifindex;
4751 ifm->ifi_flags = dev_get_flags(dev);
4752 ifm->ifi_change = 0;
4753
4754
4755 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4756 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4757 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
4758 (br_dev &&
4759 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
4760 (dev->addr_len &&
4761 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4762 (dev->ifindex != dev_get_iflink(dev) &&
4763 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4764 goto nla_put_failure;
4765
4766 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
4767 if (!br_afspec)
4768 goto nla_put_failure;
4769
4770 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
4771 nla_nest_cancel(skb, br_afspec);
4772 goto nla_put_failure;
4773 }
4774
4775 if (mode != BRIDGE_MODE_UNDEF) {
4776 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
4777 nla_nest_cancel(skb, br_afspec);
4778 goto nla_put_failure;
4779 }
4780 }
4781 if (vlan_fill) {
4782 err = vlan_fill(skb, dev, filter_mask);
4783 if (err) {
4784 nla_nest_cancel(skb, br_afspec);
4785 goto nla_put_failure;
4786 }
4787 }
4788 nla_nest_end(skb, br_afspec);
4789
4790 protinfo = nla_nest_start(skb, IFLA_PROTINFO);
4791 if (!protinfo)
4792 goto nla_put_failure;
4793
4794 if (brport_nla_put_flag(skb, flags, mask,
4795 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
4796 brport_nla_put_flag(skb, flags, mask,
4797 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
4798 brport_nla_put_flag(skb, flags, mask,
4799 IFLA_BRPORT_FAST_LEAVE,
4800 BR_MULTICAST_FAST_LEAVE) ||
4801 brport_nla_put_flag(skb, flags, mask,
4802 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
4803 brport_nla_put_flag(skb, flags, mask,
4804 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
4805 brport_nla_put_flag(skb, flags, mask,
4806 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
4807 brport_nla_put_flag(skb, flags, mask,
4808 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
4809 brport_nla_put_flag(skb, flags, mask,
4810 IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
4811 brport_nla_put_flag(skb, flags, mask,
4812 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
4813 brport_nla_put_flag(skb, flags, mask,
4814 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
4815 nla_nest_cancel(skb, protinfo);
4816 goto nla_put_failure;
4817 }
4818
4819 nla_nest_end(skb, protinfo);
4820
4821 nlmsg_end(skb, nlh);
4822 return 0;
4823 nla_put_failure:
4824 nlmsg_cancel(skb, nlh);
4825 return err ? err : -EMSGSIZE;
4826 }
4827 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
4828
valid_bridge_getlink_req(const struct nlmsghdr * nlh,bool strict_check,u32 * filter_mask,struct netlink_ext_ack * extack)4829 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
4830 bool strict_check, u32 *filter_mask,
4831 struct netlink_ext_ack *extack)
4832 {
4833 struct nlattr *tb[IFLA_MAX+1];
4834 int err, i;
4835
4836 if (strict_check) {
4837 struct ifinfomsg *ifm;
4838
4839 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
4840 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
4841 return -EINVAL;
4842 }
4843
4844 ifm = nlmsg_data(nlh);
4845 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
4846 ifm->ifi_change || ifm->ifi_index) {
4847 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
4848 return -EINVAL;
4849 }
4850
4851 err = nlmsg_parse_deprecated_strict(nlh,
4852 sizeof(struct ifinfomsg),
4853 tb, IFLA_MAX, ifla_policy,
4854 extack);
4855 } else {
4856 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4857 tb, IFLA_MAX, ifla_policy,
4858 extack);
4859 }
4860 if (err < 0)
4861 return err;
4862
4863 /* new attributes should only be added with strict checking */
4864 for (i = 0; i <= IFLA_MAX; ++i) {
4865 if (!tb[i])
4866 continue;
4867
4868 switch (i) {
4869 case IFLA_EXT_MASK:
4870 *filter_mask = nla_get_u32(tb[i]);
4871 break;
4872 default:
4873 if (strict_check) {
4874 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
4875 return -EINVAL;
4876 }
4877 }
4878 }
4879
4880 return 0;
4881 }
4882
rtnl_bridge_getlink(struct sk_buff * skb,struct netlink_callback * cb)4883 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
4884 {
4885 const struct nlmsghdr *nlh = cb->nlh;
4886 struct net *net = sock_net(skb->sk);
4887 struct net_device *dev;
4888 int idx = 0;
4889 u32 portid = NETLINK_CB(cb->skb).portid;
4890 u32 seq = nlh->nlmsg_seq;
4891 u32 filter_mask = 0;
4892 int err;
4893
4894 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
4895 cb->extack);
4896 if (err < 0 && cb->strict_check)
4897 return err;
4898
4899 rcu_read_lock();
4900 for_each_netdev_rcu(net, dev) {
4901 const struct net_device_ops *ops = dev->netdev_ops;
4902 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4903
4904 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
4905 if (idx >= cb->args[0]) {
4906 err = br_dev->netdev_ops->ndo_bridge_getlink(
4907 skb, portid, seq, dev,
4908 filter_mask, NLM_F_MULTI);
4909 if (err < 0 && err != -EOPNOTSUPP) {
4910 if (likely(skb->len))
4911 break;
4912
4913 goto out_err;
4914 }
4915 }
4916 idx++;
4917 }
4918
4919 if (ops->ndo_bridge_getlink) {
4920 if (idx >= cb->args[0]) {
4921 err = ops->ndo_bridge_getlink(skb, portid,
4922 seq, dev,
4923 filter_mask,
4924 NLM_F_MULTI);
4925 if (err < 0 && err != -EOPNOTSUPP) {
4926 if (likely(skb->len))
4927 break;
4928
4929 goto out_err;
4930 }
4931 }
4932 idx++;
4933 }
4934 }
4935 err = skb->len;
4936 out_err:
4937 rcu_read_unlock();
4938 cb->args[0] = idx;
4939
4940 return err;
4941 }
4942
bridge_nlmsg_size(void)4943 static inline size_t bridge_nlmsg_size(void)
4944 {
4945 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
4946 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
4947 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
4948 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
4949 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
4950 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
4951 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
4952 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
4953 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
4954 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
4955 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
4956 }
4957
rtnl_bridge_notify(struct net_device * dev)4958 static int rtnl_bridge_notify(struct net_device *dev)
4959 {
4960 struct net *net = dev_net(dev);
4961 struct sk_buff *skb;
4962 int err = -EOPNOTSUPP;
4963
4964 if (!dev->netdev_ops->ndo_bridge_getlink)
4965 return 0;
4966
4967 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
4968 if (!skb) {
4969 err = -ENOMEM;
4970 goto errout;
4971 }
4972
4973 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
4974 if (err < 0)
4975 goto errout;
4976
4977 /* Notification info is only filled for bridge ports, not the bridge
4978 * device itself. Therefore, a zero notification length is valid and
4979 * should not result in an error.
4980 */
4981 if (!skb->len)
4982 goto errout;
4983
4984 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
4985 return 0;
4986 errout:
4987 WARN_ON(err == -EMSGSIZE);
4988 kfree_skb(skb);
4989 if (err)
4990 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
4991 return err;
4992 }
4993
rtnl_bridge_setlink(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)4994 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
4995 struct netlink_ext_ack *extack)
4996 {
4997 struct net *net = sock_net(skb->sk);
4998 struct ifinfomsg *ifm;
4999 struct net_device *dev;
5000 struct nlattr *br_spec, *attr = NULL;
5001 int rem, err = -EOPNOTSUPP;
5002 u16 flags = 0;
5003 bool have_flags = false;
5004
5005 if (nlmsg_len(nlh) < sizeof(*ifm))
5006 return -EINVAL;
5007
5008 ifm = nlmsg_data(nlh);
5009 if (ifm->ifi_family != AF_BRIDGE)
5010 return -EPFNOSUPPORT;
5011
5012 dev = __dev_get_by_index(net, ifm->ifi_index);
5013 if (!dev) {
5014 NL_SET_ERR_MSG(extack, "unknown ifindex");
5015 return -ENODEV;
5016 }
5017
5018 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5019 if (br_spec) {
5020 nla_for_each_nested(attr, br_spec, rem) {
5021 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5022 if (nla_len(attr) < sizeof(flags))
5023 return -EINVAL;
5024
5025 have_flags = true;
5026 flags = nla_get_u16(attr);
5027 break;
5028 }
5029 }
5030 }
5031
5032 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5033 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5034
5035 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
5036 err = -EOPNOTSUPP;
5037 goto out;
5038 }
5039
5040 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
5041 extack);
5042 if (err)
5043 goto out;
5044
5045 flags &= ~BRIDGE_FLAGS_MASTER;
5046 }
5047
5048 if ((flags & BRIDGE_FLAGS_SELF)) {
5049 if (!dev->netdev_ops->ndo_bridge_setlink)
5050 err = -EOPNOTSUPP;
5051 else
5052 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
5053 flags,
5054 extack);
5055 if (!err) {
5056 flags &= ~BRIDGE_FLAGS_SELF;
5057
5058 /* Generate event to notify upper layer of bridge
5059 * change
5060 */
5061 err = rtnl_bridge_notify(dev);
5062 }
5063 }
5064
5065 if (have_flags)
5066 memcpy(nla_data(attr), &flags, sizeof(flags));
5067 out:
5068 return err;
5069 }
5070
rtnl_bridge_dellink(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5071 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
5072 struct netlink_ext_ack *extack)
5073 {
5074 struct net *net = sock_net(skb->sk);
5075 struct ifinfomsg *ifm;
5076 struct net_device *dev;
5077 struct nlattr *br_spec, *attr = NULL;
5078 int rem, err = -EOPNOTSUPP;
5079 u16 flags = 0;
5080 bool have_flags = false;
5081
5082 if (nlmsg_len(nlh) < sizeof(*ifm))
5083 return -EINVAL;
5084
5085 ifm = nlmsg_data(nlh);
5086 if (ifm->ifi_family != AF_BRIDGE)
5087 return -EPFNOSUPPORT;
5088
5089 dev = __dev_get_by_index(net, ifm->ifi_index);
5090 if (!dev) {
5091 NL_SET_ERR_MSG(extack, "unknown ifindex");
5092 return -ENODEV;
5093 }
5094
5095 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5096 if (br_spec) {
5097 nla_for_each_nested(attr, br_spec, rem) {
5098 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5099 if (nla_len(attr) < sizeof(flags))
5100 return -EINVAL;
5101
5102 have_flags = true;
5103 flags = nla_get_u16(attr);
5104 break;
5105 }
5106 }
5107 }
5108
5109 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5110 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5111
5112 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
5113 err = -EOPNOTSUPP;
5114 goto out;
5115 }
5116
5117 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
5118 if (err)
5119 goto out;
5120
5121 flags &= ~BRIDGE_FLAGS_MASTER;
5122 }
5123
5124 if ((flags & BRIDGE_FLAGS_SELF)) {
5125 if (!dev->netdev_ops->ndo_bridge_dellink)
5126 err = -EOPNOTSUPP;
5127 else
5128 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5129 flags);
5130
5131 if (!err) {
5132 flags &= ~BRIDGE_FLAGS_SELF;
5133
5134 /* Generate event to notify upper layer of bridge
5135 * change
5136 */
5137 err = rtnl_bridge_notify(dev);
5138 }
5139 }
5140
5141 if (have_flags)
5142 memcpy(nla_data(attr), &flags, sizeof(flags));
5143 out:
5144 return err;
5145 }
5146
stats_attr_valid(unsigned int mask,int attrid,int idxattr)5147 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5148 {
5149 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5150 (!idxattr || idxattr == attrid);
5151 }
5152
5153 static bool
rtnl_offload_xstats_have_ndo(const struct net_device * dev,int attr_id)5154 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
5155 {
5156 return dev->netdev_ops &&
5157 dev->netdev_ops->ndo_has_offload_stats &&
5158 dev->netdev_ops->ndo_get_offload_stats &&
5159 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
5160 }
5161
5162 static unsigned int
rtnl_offload_xstats_get_size_ndo(const struct net_device * dev,int attr_id)5163 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
5164 {
5165 return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
5166 sizeof(struct rtnl_link_stats64) : 0;
5167 }
5168
5169 static int
rtnl_offload_xstats_fill_ndo(struct net_device * dev,int attr_id,struct sk_buff * skb)5170 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
5171 struct sk_buff *skb)
5172 {
5173 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
5174 struct nlattr *attr = NULL;
5175 void *attr_data;
5176 int err;
5177
5178 if (!size)
5179 return -ENODATA;
5180
5181 attr = nla_reserve_64bit(skb, attr_id, size,
5182 IFLA_OFFLOAD_XSTATS_UNSPEC);
5183 if (!attr)
5184 return -EMSGSIZE;
5185
5186 attr_data = nla_data(attr);
5187 memset(attr_data, 0, size);
5188
5189 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
5190 if (err)
5191 return err;
5192
5193 return 0;
5194 }
5195
5196 static unsigned int
rtnl_offload_xstats_get_size_stats(const struct net_device * dev,enum netdev_offload_xstats_type type)5197 rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
5198 enum netdev_offload_xstats_type type)
5199 {
5200 bool enabled = netdev_offload_xstats_enabled(dev, type);
5201
5202 return enabled ? sizeof(struct rtnl_hw_stats64) : 0;
5203 }
5204
5205 struct rtnl_offload_xstats_request_used {
5206 bool request;
5207 bool used;
5208 };
5209
5210 static int
rtnl_offload_xstats_get_stats(struct net_device * dev,enum netdev_offload_xstats_type type,struct rtnl_offload_xstats_request_used * ru,struct rtnl_hw_stats64 * stats,struct netlink_ext_ack * extack)5211 rtnl_offload_xstats_get_stats(struct net_device *dev,
5212 enum netdev_offload_xstats_type type,
5213 struct rtnl_offload_xstats_request_used *ru,
5214 struct rtnl_hw_stats64 *stats,
5215 struct netlink_ext_ack *extack)
5216 {
5217 bool request;
5218 bool used;
5219 int err;
5220
5221 request = netdev_offload_xstats_enabled(dev, type);
5222 if (!request) {
5223 used = false;
5224 goto out;
5225 }
5226
5227 err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
5228 if (err)
5229 return err;
5230
5231 out:
5232 if (ru) {
5233 ru->request = request;
5234 ru->used = used;
5235 }
5236 return 0;
5237 }
5238
5239 static int
rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff * skb,int attr_id,struct rtnl_offload_xstats_request_used * ru)5240 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id,
5241 struct rtnl_offload_xstats_request_used *ru)
5242 {
5243 struct nlattr *nest;
5244
5245 nest = nla_nest_start(skb, attr_id);
5246 if (!nest)
5247 return -EMSGSIZE;
5248
5249 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request))
5250 goto nla_put_failure;
5251
5252 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used))
5253 goto nla_put_failure;
5254
5255 nla_nest_end(skb, nest);
5256 return 0;
5257
5258 nla_put_failure:
5259 nla_nest_cancel(skb, nest);
5260 return -EMSGSIZE;
5261 }
5262
5263 static int
rtnl_offload_xstats_fill_hw_s_info(struct sk_buff * skb,struct net_device * dev,struct netlink_ext_ack * extack)5264 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
5265 struct netlink_ext_ack *extack)
5266 {
5267 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5268 struct rtnl_offload_xstats_request_used ru_l3;
5269 struct nlattr *nest;
5270 int err;
5271
5272 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
5273 if (err)
5274 return err;
5275
5276 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5277 if (!nest)
5278 return -EMSGSIZE;
5279
5280 if (rtnl_offload_xstats_fill_hw_s_info_one(skb,
5281 IFLA_OFFLOAD_XSTATS_L3_STATS,
5282 &ru_l3))
5283 goto nla_put_failure;
5284
5285 nla_nest_end(skb, nest);
5286 return 0;
5287
5288 nla_put_failure:
5289 nla_nest_cancel(skb, nest);
5290 return -EMSGSIZE;
5291 }
5292
rtnl_offload_xstats_fill(struct sk_buff * skb,struct net_device * dev,int * prividx,u32 off_filter_mask,struct netlink_ext_ack * extack)5293 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
5294 int *prividx, u32 off_filter_mask,
5295 struct netlink_ext_ack *extack)
5296 {
5297 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5298 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO;
5299 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS;
5300 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5301 bool have_data = false;
5302 int err;
5303
5304 if (*prividx <= attr_id_cpu_hit &&
5305 (off_filter_mask &
5306 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) {
5307 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
5308 if (!err) {
5309 have_data = true;
5310 } else if (err != -ENODATA) {
5311 *prividx = attr_id_cpu_hit;
5312 return err;
5313 }
5314 }
5315
5316 if (*prividx <= attr_id_hw_s_info &&
5317 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) {
5318 *prividx = attr_id_hw_s_info;
5319
5320 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
5321 if (err)
5322 return err;
5323
5324 have_data = true;
5325 *prividx = 0;
5326 }
5327
5328 if (*prividx <= attr_id_l3_stats &&
5329 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) {
5330 unsigned int size_l3;
5331 struct nlattr *attr;
5332
5333 *prividx = attr_id_l3_stats;
5334
5335 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5336 if (!size_l3)
5337 goto skip_l3_stats;
5338 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
5339 IFLA_OFFLOAD_XSTATS_UNSPEC);
5340 if (!attr)
5341 return -EMSGSIZE;
5342
5343 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
5344 nla_data(attr), extack);
5345 if (err)
5346 return err;
5347
5348 have_data = true;
5349 skip_l3_stats:
5350 *prividx = 0;
5351 }
5352
5353 if (!have_data)
5354 return -ENODATA;
5355
5356 *prividx = 0;
5357 return 0;
5358 }
5359
5360 static unsigned int
rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device * dev,enum netdev_offload_xstats_type type)5361 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
5362 enum netdev_offload_xstats_type type)
5363 {
5364 bool enabled = netdev_offload_xstats_enabled(dev, type);
5365
5366 return nla_total_size(0) +
5367 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
5368 nla_total_size(sizeof(u8)) +
5369 /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
5370 (enabled ? nla_total_size(sizeof(u8)) : 0) +
5371 0;
5372 }
5373
5374 static unsigned int
rtnl_offload_xstats_get_size_hw_s_info(const struct net_device * dev)5375 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
5376 {
5377 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5378
5379 return nla_total_size(0) +
5380 /* IFLA_OFFLOAD_XSTATS_L3_STATS */
5381 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
5382 0;
5383 }
5384
rtnl_offload_xstats_get_size(const struct net_device * dev,u32 off_filter_mask)5385 static int rtnl_offload_xstats_get_size(const struct net_device *dev,
5386 u32 off_filter_mask)
5387 {
5388 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5389 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5390 int nla_size = 0;
5391 int size;
5392
5393 if (off_filter_mask &
5394 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) {
5395 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
5396 nla_size += nla_total_size_64bit(size);
5397 }
5398
5399 if (off_filter_mask &
5400 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO))
5401 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
5402
5403 if (off_filter_mask &
5404 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) {
5405 size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5406 nla_size += nla_total_size_64bit(size);
5407 }
5408
5409 if (nla_size != 0)
5410 nla_size += nla_total_size(0);
5411
5412 return nla_size;
5413 }
5414
5415 struct rtnl_stats_dump_filters {
5416 /* mask[0] filters outer attributes. Then individual nests have their
5417 * filtering mask at the index of the nested attribute.
5418 */
5419 u32 mask[IFLA_STATS_MAX + 1];
5420 };
5421
rtnl_fill_statsinfo(struct sk_buff * skb,struct net_device * dev,int type,u32 pid,u32 seq,u32 change,unsigned int flags,const struct rtnl_stats_dump_filters * filters,int * idxattr,int * prividx,struct netlink_ext_ack * extack)5422 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5423 int type, u32 pid, u32 seq, u32 change,
5424 unsigned int flags,
5425 const struct rtnl_stats_dump_filters *filters,
5426 int *idxattr, int *prividx,
5427 struct netlink_ext_ack *extack)
5428 {
5429 unsigned int filter_mask = filters->mask[0];
5430 struct if_stats_msg *ifsm;
5431 struct nlmsghdr *nlh;
5432 struct nlattr *attr;
5433 int s_prividx = *prividx;
5434 int err;
5435
5436 ASSERT_RTNL();
5437
5438 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5439 if (!nlh)
5440 return -EMSGSIZE;
5441
5442 ifsm = nlmsg_data(nlh);
5443 ifsm->family = PF_UNSPEC;
5444 ifsm->pad1 = 0;
5445 ifsm->pad2 = 0;
5446 ifsm->ifindex = dev->ifindex;
5447 ifsm->filter_mask = filter_mask;
5448
5449 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
5450 struct rtnl_link_stats64 *sp;
5451
5452 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5453 sizeof(struct rtnl_link_stats64),
5454 IFLA_STATS_UNSPEC);
5455 if (!attr) {
5456 err = -EMSGSIZE;
5457 goto nla_put_failure;
5458 }
5459
5460 sp = nla_data(attr);
5461 dev_get_stats(dev, sp);
5462 }
5463
5464 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5465 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5466
5467 if (ops && ops->fill_linkxstats) {
5468 *idxattr = IFLA_STATS_LINK_XSTATS;
5469 attr = nla_nest_start_noflag(skb,
5470 IFLA_STATS_LINK_XSTATS);
5471 if (!attr) {
5472 err = -EMSGSIZE;
5473 goto nla_put_failure;
5474 }
5475
5476 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5477 nla_nest_end(skb, attr);
5478 if (err)
5479 goto nla_put_failure;
5480 *idxattr = 0;
5481 }
5482 }
5483
5484 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5485 *idxattr)) {
5486 const struct rtnl_link_ops *ops = NULL;
5487 const struct net_device *master;
5488
5489 master = netdev_master_upper_dev_get(dev);
5490 if (master)
5491 ops = master->rtnl_link_ops;
5492 if (ops && ops->fill_linkxstats) {
5493 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
5494 attr = nla_nest_start_noflag(skb,
5495 IFLA_STATS_LINK_XSTATS_SLAVE);
5496 if (!attr) {
5497 err = -EMSGSIZE;
5498 goto nla_put_failure;
5499 }
5500
5501 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5502 nla_nest_end(skb, attr);
5503 if (err)
5504 goto nla_put_failure;
5505 *idxattr = 0;
5506 }
5507 }
5508
5509 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5510 *idxattr)) {
5511 u32 off_filter_mask;
5512
5513 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5514 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
5515 attr = nla_nest_start_noflag(skb,
5516 IFLA_STATS_LINK_OFFLOAD_XSTATS);
5517 if (!attr) {
5518 err = -EMSGSIZE;
5519 goto nla_put_failure;
5520 }
5521
5522 err = rtnl_offload_xstats_fill(skb, dev, prividx,
5523 off_filter_mask, extack);
5524 if (err == -ENODATA)
5525 nla_nest_cancel(skb, attr);
5526 else
5527 nla_nest_end(skb, attr);
5528
5529 if (err && err != -ENODATA)
5530 goto nla_put_failure;
5531 *idxattr = 0;
5532 }
5533
5534 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5535 struct rtnl_af_ops *af_ops;
5536
5537 *idxattr = IFLA_STATS_AF_SPEC;
5538 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
5539 if (!attr) {
5540 err = -EMSGSIZE;
5541 goto nla_put_failure;
5542 }
5543
5544 rcu_read_lock();
5545 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5546 if (af_ops->fill_stats_af) {
5547 struct nlattr *af;
5548
5549 af = nla_nest_start_noflag(skb,
5550 af_ops->family);
5551 if (!af) {
5552 rcu_read_unlock();
5553 err = -EMSGSIZE;
5554 goto nla_put_failure;
5555 }
5556 err = af_ops->fill_stats_af(skb, dev);
5557
5558 if (err == -ENODATA) {
5559 nla_nest_cancel(skb, af);
5560 } else if (err < 0) {
5561 rcu_read_unlock();
5562 goto nla_put_failure;
5563 }
5564
5565 nla_nest_end(skb, af);
5566 }
5567 }
5568 rcu_read_unlock();
5569
5570 nla_nest_end(skb, attr);
5571
5572 *idxattr = 0;
5573 }
5574
5575 nlmsg_end(skb, nlh);
5576
5577 return 0;
5578
5579 nla_put_failure:
5580 /* not a multi message or no progress mean a real error */
5581 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
5582 nlmsg_cancel(skb, nlh);
5583 else
5584 nlmsg_end(skb, nlh);
5585
5586 return err;
5587 }
5588
if_nlmsg_stats_size(const struct net_device * dev,const struct rtnl_stats_dump_filters * filters)5589 static size_t if_nlmsg_stats_size(const struct net_device *dev,
5590 const struct rtnl_stats_dump_filters *filters)
5591 {
5592 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
5593 unsigned int filter_mask = filters->mask[0];
5594
5595 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
5596 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
5597
5598 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
5599 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5600 int attr = IFLA_STATS_LINK_XSTATS;
5601
5602 if (ops && ops->get_linkxstats_size) {
5603 size += nla_total_size(ops->get_linkxstats_size(dev,
5604 attr));
5605 /* for IFLA_STATS_LINK_XSTATS */
5606 size += nla_total_size(0);
5607 }
5608 }
5609
5610 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
5611 struct net_device *_dev = (struct net_device *)dev;
5612 const struct rtnl_link_ops *ops = NULL;
5613 const struct net_device *master;
5614
5615 /* netdev_master_upper_dev_get can't take const */
5616 master = netdev_master_upper_dev_get(_dev);
5617 if (master)
5618 ops = master->rtnl_link_ops;
5619 if (ops && ops->get_linkxstats_size) {
5620 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
5621
5622 size += nla_total_size(ops->get_linkxstats_size(dev,
5623 attr));
5624 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
5625 size += nla_total_size(0);
5626 }
5627 }
5628
5629 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) {
5630 u32 off_filter_mask;
5631
5632 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5633 size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
5634 }
5635
5636 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
5637 struct rtnl_af_ops *af_ops;
5638
5639 /* for IFLA_STATS_AF_SPEC */
5640 size += nla_total_size(0);
5641
5642 rcu_read_lock();
5643 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5644 if (af_ops->get_stats_af_size) {
5645 size += nla_total_size(
5646 af_ops->get_stats_af_size(dev));
5647
5648 /* for AF_* */
5649 size += nla_total_size(0);
5650 }
5651 }
5652 rcu_read_unlock();
5653 }
5654
5655 return size;
5656 }
5657
5658 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1)
5659
5660 static const struct nla_policy
5661 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
5662 [IFLA_STATS_LINK_OFFLOAD_XSTATS] =
5663 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID),
5664 };
5665
5666 static const struct nla_policy
5667 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = {
5668 [IFLA_STATS_GET_FILTERS] =
5669 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters),
5670 };
5671
5672 static const struct nla_policy
5673 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
5674 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
5675 };
5676
rtnl_stats_get_parse_filters(struct nlattr * ifla_filters,struct rtnl_stats_dump_filters * filters,struct netlink_ext_ack * extack)5677 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
5678 struct rtnl_stats_dump_filters *filters,
5679 struct netlink_ext_ack *extack)
5680 {
5681 struct nlattr *tb[IFLA_STATS_MAX + 1];
5682 int err;
5683 int at;
5684
5685 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters,
5686 rtnl_stats_get_policy_filters, extack);
5687 if (err < 0)
5688 return err;
5689
5690 for (at = 1; at <= IFLA_STATS_MAX; at++) {
5691 if (tb[at]) {
5692 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) {
5693 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask");
5694 return -EINVAL;
5695 }
5696 filters->mask[at] = nla_get_u32(tb[at]);
5697 }
5698 }
5699
5700 return 0;
5701 }
5702
rtnl_stats_get_parse(const struct nlmsghdr * nlh,u32 filter_mask,struct rtnl_stats_dump_filters * filters,struct netlink_ext_ack * extack)5703 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh,
5704 u32 filter_mask,
5705 struct rtnl_stats_dump_filters *filters,
5706 struct netlink_ext_ack *extack)
5707 {
5708 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5709 int err;
5710 int i;
5711
5712 filters->mask[0] = filter_mask;
5713 for (i = 1; i < ARRAY_SIZE(filters->mask); i++)
5714 filters->mask[i] = -1U;
5715
5716 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb,
5717 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack);
5718 if (err < 0)
5719 return err;
5720
5721 if (tb[IFLA_STATS_GET_FILTERS]) {
5722 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS],
5723 filters, extack);
5724 if (err)
5725 return err;
5726 }
5727
5728 return 0;
5729 }
5730
rtnl_valid_stats_req(const struct nlmsghdr * nlh,bool strict_check,bool is_dump,struct netlink_ext_ack * extack)5731 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
5732 bool is_dump, struct netlink_ext_ack *extack)
5733 {
5734 struct if_stats_msg *ifsm;
5735
5736 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
5737 NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
5738 return -EINVAL;
5739 }
5740
5741 if (!strict_check)
5742 return 0;
5743
5744 ifsm = nlmsg_data(nlh);
5745
5746 /* only requests using strict checks can pass data to influence
5747 * the dump. The legacy exception is filter_mask.
5748 */
5749 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
5750 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
5751 return -EINVAL;
5752 }
5753 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
5754 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
5755 return -EINVAL;
5756 }
5757
5758 return 0;
5759 }
5760
rtnl_stats_get(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5761 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
5762 struct netlink_ext_ack *extack)
5763 {
5764 struct rtnl_stats_dump_filters filters;
5765 struct net *net = sock_net(skb->sk);
5766 struct net_device *dev = NULL;
5767 int idxattr = 0, prividx = 0;
5768 struct if_stats_msg *ifsm;
5769 struct sk_buff *nskb;
5770 int err;
5771
5772 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5773 false, extack);
5774 if (err)
5775 return err;
5776
5777 ifsm = nlmsg_data(nlh);
5778 if (ifsm->ifindex > 0)
5779 dev = __dev_get_by_index(net, ifsm->ifindex);
5780 else
5781 return -EINVAL;
5782
5783 if (!dev)
5784 return -ENODEV;
5785
5786 if (!ifsm->filter_mask) {
5787 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
5788 return -EINVAL;
5789 }
5790
5791 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack);
5792 if (err)
5793 return err;
5794
5795 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
5796 if (!nskb)
5797 return -ENOBUFS;
5798
5799 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5800 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
5801 0, &filters, &idxattr, &prividx, extack);
5802 if (err < 0) {
5803 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
5804 WARN_ON(err == -EMSGSIZE);
5805 kfree_skb(nskb);
5806 } else {
5807 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
5808 }
5809
5810 return err;
5811 }
5812
rtnl_stats_dump(struct sk_buff * skb,struct netlink_callback * cb)5813 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
5814 {
5815 struct netlink_ext_ack *extack = cb->extack;
5816 int h, s_h, err, s_idx, s_idxattr, s_prividx;
5817 struct rtnl_stats_dump_filters filters;
5818 struct net *net = sock_net(skb->sk);
5819 unsigned int flags = NLM_F_MULTI;
5820 struct if_stats_msg *ifsm;
5821 struct hlist_head *head;
5822 struct net_device *dev;
5823 int idx = 0;
5824
5825 s_h = cb->args[0];
5826 s_idx = cb->args[1];
5827 s_idxattr = cb->args[2];
5828 s_prividx = cb->args[3];
5829
5830 cb->seq = net->dev_base_seq;
5831
5832 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
5833 if (err)
5834 return err;
5835
5836 ifsm = nlmsg_data(cb->nlh);
5837 if (!ifsm->filter_mask) {
5838 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
5839 return -EINVAL;
5840 }
5841
5842 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
5843 extack);
5844 if (err)
5845 return err;
5846
5847 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5848 idx = 0;
5849 head = &net->dev_index_head[h];
5850 hlist_for_each_entry(dev, head, index_hlist) {
5851 if (idx < s_idx)
5852 goto cont;
5853 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
5854 NETLINK_CB(cb->skb).portid,
5855 cb->nlh->nlmsg_seq, 0,
5856 flags, &filters,
5857 &s_idxattr, &s_prividx,
5858 extack);
5859 /* If we ran out of room on the first message,
5860 * we're in trouble
5861 */
5862 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
5863
5864 if (err < 0)
5865 goto out;
5866 s_prividx = 0;
5867 s_idxattr = 0;
5868 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5869 cont:
5870 idx++;
5871 }
5872 }
5873 out:
5874 cb->args[3] = s_prividx;
5875 cb->args[2] = s_idxattr;
5876 cb->args[1] = idx;
5877 cb->args[0] = h;
5878
5879 return skb->len;
5880 }
5881
rtnl_offload_xstats_notify(struct net_device * dev)5882 void rtnl_offload_xstats_notify(struct net_device *dev)
5883 {
5884 struct rtnl_stats_dump_filters response_filters = {};
5885 struct net *net = dev_net(dev);
5886 int idxattr = 0, prividx = 0;
5887 struct sk_buff *skb;
5888 int err = -ENOBUFS;
5889
5890 ASSERT_RTNL();
5891
5892 response_filters.mask[0] |=
5893 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
5894 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
5895 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5896
5897 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
5898 GFP_KERNEL);
5899 if (!skb)
5900 goto errout;
5901
5902 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
5903 &response_filters, &idxattr, &prividx, NULL);
5904 if (err < 0) {
5905 kfree_skb(skb);
5906 goto errout;
5907 }
5908
5909 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
5910 return;
5911
5912 errout:
5913 rtnl_set_sk_err(net, RTNLGRP_STATS, err);
5914 }
5915 EXPORT_SYMBOL(rtnl_offload_xstats_notify);
5916
rtnl_stats_set(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5917 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
5918 struct netlink_ext_ack *extack)
5919 {
5920 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5921 struct rtnl_stats_dump_filters response_filters = {};
5922 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5923 struct net *net = sock_net(skb->sk);
5924 struct net_device *dev = NULL;
5925 struct if_stats_msg *ifsm;
5926 bool notify = false;
5927 int err;
5928
5929 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5930 false, extack);
5931 if (err)
5932 return err;
5933
5934 ifsm = nlmsg_data(nlh);
5935 if (ifsm->family != AF_UNSPEC) {
5936 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC");
5937 return -EINVAL;
5938 }
5939
5940 if (ifsm->ifindex > 0)
5941 dev = __dev_get_by_index(net, ifsm->ifindex);
5942 else
5943 return -EINVAL;
5944
5945 if (!dev)
5946 return -ENODEV;
5947
5948 if (ifsm->filter_mask) {
5949 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set");
5950 return -EINVAL;
5951 }
5952
5953 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX,
5954 ifla_stats_set_policy, extack);
5955 if (err < 0)
5956 return err;
5957
5958 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
5959 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
5960
5961 if (req)
5962 err = netdev_offload_xstats_enable(dev, t_l3, extack);
5963 else
5964 err = netdev_offload_xstats_disable(dev, t_l3);
5965
5966 if (!err)
5967 notify = true;
5968 else if (err != -EALREADY)
5969 return err;
5970
5971 response_filters.mask[0] |=
5972 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
5973 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
5974 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5975 }
5976
5977 if (notify)
5978 rtnl_offload_xstats_notify(dev);
5979
5980 return 0;
5981 }
5982
5983 /* Process one rtnetlink message. */
5984
rtnetlink_rcv_msg(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5985 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
5986 struct netlink_ext_ack *extack)
5987 {
5988 struct net *net = sock_net(skb->sk);
5989 struct rtnl_link *link;
5990 enum rtnl_kinds kind;
5991 struct module *owner;
5992 int err = -EOPNOTSUPP;
5993 rtnl_doit_func doit;
5994 unsigned int flags;
5995 int family;
5996 int type;
5997
5998 type = nlh->nlmsg_type;
5999 if (type > RTM_MAX)
6000 return -EOPNOTSUPP;
6001
6002 type -= RTM_BASE;
6003
6004 /* All the messages must have at least 1 byte length */
6005 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
6006 return 0;
6007
6008 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
6009 kind = rtnl_msgtype_kind(type);
6010
6011 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
6012 return -EPERM;
6013
6014 rcu_read_lock();
6015 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
6016 struct sock *rtnl;
6017 rtnl_dumpit_func dumpit;
6018 u32 min_dump_alloc = 0;
6019
6020 link = rtnl_get_link(family, type);
6021 if (!link || !link->dumpit) {
6022 family = PF_UNSPEC;
6023 link = rtnl_get_link(family, type);
6024 if (!link || !link->dumpit)
6025 goto err_unlock;
6026 }
6027 owner = link->owner;
6028 dumpit = link->dumpit;
6029
6030 if (type == RTM_GETLINK - RTM_BASE)
6031 min_dump_alloc = rtnl_calcit(skb, nlh);
6032
6033 err = 0;
6034 /* need to do this before rcu_read_unlock() */
6035 if (!try_module_get(owner))
6036 err = -EPROTONOSUPPORT;
6037
6038 rcu_read_unlock();
6039
6040 rtnl = net->rtnl;
6041 if (err == 0) {
6042 struct netlink_dump_control c = {
6043 .dump = dumpit,
6044 .min_dump_alloc = min_dump_alloc,
6045 .module = owner,
6046 };
6047 err = netlink_dump_start(rtnl, skb, nlh, &c);
6048 /* netlink_dump_start() will keep a reference on
6049 * module if dump is still in progress.
6050 */
6051 module_put(owner);
6052 }
6053 return err;
6054 }
6055
6056 link = rtnl_get_link(family, type);
6057 if (!link || !link->doit) {
6058 family = PF_UNSPEC;
6059 link = rtnl_get_link(PF_UNSPEC, type);
6060 if (!link || !link->doit)
6061 goto out_unlock;
6062 }
6063
6064 owner = link->owner;
6065 if (!try_module_get(owner)) {
6066 err = -EPROTONOSUPPORT;
6067 goto out_unlock;
6068 }
6069
6070 flags = link->flags;
6071 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
6072 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
6073 NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
6074 module_put(owner);
6075 goto err_unlock;
6076 }
6077
6078 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
6079 doit = link->doit;
6080 rcu_read_unlock();
6081 if (doit)
6082 err = doit(skb, nlh, extack);
6083 module_put(owner);
6084 return err;
6085 }
6086 rcu_read_unlock();
6087
6088 rtnl_lock();
6089 link = rtnl_get_link(family, type);
6090 if (link && link->doit)
6091 err = link->doit(skb, nlh, extack);
6092 rtnl_unlock();
6093
6094 module_put(owner);
6095
6096 return err;
6097
6098 out_unlock:
6099 rcu_read_unlock();
6100 return err;
6101
6102 err_unlock:
6103 rcu_read_unlock();
6104 return -EOPNOTSUPP;
6105 }
6106
rtnetlink_rcv(struct sk_buff * skb)6107 static void rtnetlink_rcv(struct sk_buff *skb)
6108 {
6109 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
6110 }
6111
rtnetlink_bind(struct net * net,int group)6112 static int rtnetlink_bind(struct net *net, int group)
6113 {
6114 switch (group) {
6115 case RTNLGRP_IPV4_MROUTE_R:
6116 case RTNLGRP_IPV6_MROUTE_R:
6117 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
6118 return -EPERM;
6119 break;
6120 }
6121 return 0;
6122 }
6123
rtnetlink_event(struct notifier_block * this,unsigned long event,void * ptr)6124 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
6125 {
6126 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6127
6128 switch (event) {
6129 case NETDEV_REBOOT:
6130 case NETDEV_CHANGEMTU:
6131 case NETDEV_CHANGEADDR:
6132 case NETDEV_CHANGENAME:
6133 case NETDEV_FEAT_CHANGE:
6134 case NETDEV_BONDING_FAILOVER:
6135 case NETDEV_POST_TYPE_CHANGE:
6136 case NETDEV_NOTIFY_PEERS:
6137 case NETDEV_CHANGEUPPER:
6138 case NETDEV_RESEND_IGMP:
6139 case NETDEV_CHANGEINFODATA:
6140 case NETDEV_CHANGELOWERSTATE:
6141 case NETDEV_CHANGE_TX_QUEUE_LEN:
6142 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
6143 GFP_KERNEL, NULL, 0);
6144 break;
6145 default:
6146 break;
6147 }
6148 return NOTIFY_DONE;
6149 }
6150
6151 static struct notifier_block rtnetlink_dev_notifier = {
6152 .notifier_call = rtnetlink_event,
6153 };
6154
6155
rtnetlink_net_init(struct net * net)6156 static int __net_init rtnetlink_net_init(struct net *net)
6157 {
6158 struct sock *sk;
6159 struct netlink_kernel_cfg cfg = {
6160 .groups = RTNLGRP_MAX,
6161 .input = rtnetlink_rcv,
6162 .cb_mutex = &rtnl_mutex,
6163 .flags = NL_CFG_F_NONROOT_RECV,
6164 .bind = rtnetlink_bind,
6165 };
6166
6167 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
6168 if (!sk)
6169 return -ENOMEM;
6170 net->rtnl = sk;
6171 return 0;
6172 }
6173
rtnetlink_net_exit(struct net * net)6174 static void __net_exit rtnetlink_net_exit(struct net *net)
6175 {
6176 netlink_kernel_release(net->rtnl);
6177 net->rtnl = NULL;
6178 }
6179
6180 static struct pernet_operations rtnetlink_net_ops = {
6181 .init = rtnetlink_net_init,
6182 .exit = rtnetlink_net_exit,
6183 };
6184
rtnetlink_init(void)6185 void __init rtnetlink_init(void)
6186 {
6187 if (register_pernet_subsys(&rtnetlink_net_ops))
6188 panic("rtnetlink_init: cannot initialize rtnetlink\n");
6189
6190 register_netdevice_notifier(&rtnetlink_dev_notifier);
6191
6192 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
6193 rtnl_dump_ifinfo, 0);
6194 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
6195 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
6196 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
6197
6198 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
6199 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
6200 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
6201
6202 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
6203 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
6204
6205 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
6206 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL,
6207 RTNL_FLAG_BULK_DEL_SUPPORTED);
6208 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
6209
6210 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
6211 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
6212 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
6213
6214 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
6215 0);
6216 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0);
6217 }
6218