1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      NET3    Protocol independent device support routines.
4  *
5  *	Derived from the non IP parts of dev.c 1.0.19
6  *              Authors:	Ross Biro
7  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
9  *
10  *	Additional Authors:
11  *		Florian la Roche <rzsfl@rz.uni-sb.de>
12  *		Alan Cox <gw4pts@gw4pts.ampr.org>
13  *		David Hinds <dahinds@users.sourceforge.net>
14  *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15  *		Adam Sulmicki <adam@cfar.umd.edu>
16  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
17  *
18  *	Changes:
19  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
20  *                                      to 2 if register_netdev gets called
21  *                                      before net_dev_init & also removed a
22  *                                      few lines of code in the process.
23  *		Alan Cox	:	device private ioctl copies fields back.
24  *		Alan Cox	:	Transmit queue code does relevant
25  *					stunts to keep the queue safe.
26  *		Alan Cox	:	Fixed double lock.
27  *		Alan Cox	:	Fixed promisc NULL pointer trap
28  *		????????	:	Support the full private ioctl range
29  *		Alan Cox	:	Moved ioctl permission check into
30  *					drivers
31  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
32  *		Alan Cox	:	100 backlog just doesn't cut it when
33  *					you start doing multicast video 8)
34  *		Alan Cox	:	Rewrote net_bh and list manager.
35  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
36  *		Alan Cox	:	Took out transmit every packet pass
37  *					Saved a few bytes in the ioctl handler
38  *		Alan Cox	:	Network driver sets packet type before
39  *					calling netif_rx. Saves a function
40  *					call a packet.
41  *		Alan Cox	:	Hashed net_bh()
42  *		Richard Kooijman:	Timestamp fixes.
43  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
44  *		Alan Cox	:	Device lock protection.
45  *              Alan Cox        :       Fixed nasty side effect of device close
46  *					changes.
47  *		Rudi Cilibrasi	:	Pass the right thing to
48  *					set_mac_address()
49  *		Dave Miller	:	32bit quantity for the device lock to
50  *					make it work out on a Sparc.
51  *		Bjorn Ekwall	:	Added KERNELD hack.
52  *		Alan Cox	:	Cleaned up the backlog initialise.
53  *		Craig Metz	:	SIOCGIFCONF fix if space for under
54  *					1 device.
55  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
56  *					is no device open function.
57  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
58  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
59  *		Cyrus Durgin	:	Cleaned for KMOD
60  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
61  *					A network device unload needs to purge
62  *					the backlog queue.
63  *	Paul Rusty Russell	:	SIOCSIFNAME
64  *              Pekka Riikonen  :	Netdev boot-time settings code
65  *              Andrew Morton   :       Make unregister_netdevice wait
66  *                                      indefinitely on dev->refcnt
67  *              J Hadi Salim    :       - Backlog queue sampling
68  *				        - netif_rx() feedback
69  */
70 
71 #include <linux/uaccess.h>
72 #include <linux/bitmap.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/mm.h>
81 #include <linux/mutex.h>
82 #include <linux/rwsem.h>
83 #include <linux/string.h>
84 #include <linux/mm.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/ethtool.h>
93 #include <linux/skbuff.h>
94 #include <linux/kthread.h>
95 #include <linux/bpf.h>
96 #include <linux/bpf_trace.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <net/busy_poll.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/stat.h>
102 #include <net/dsa.h>
103 #include <net/dst.h>
104 #include <net/dst_metadata.h>
105 #include <net/gro.h>
106 #include <net/pkt_sched.h>
107 #include <net/pkt_cls.h>
108 #include <net/checksum.h>
109 #include <net/xfrm.h>
110 #include <net/tcx.h>
111 #include <linux/highmem.h>
112 #include <linux/init.h>
113 #include <linux/module.h>
114 #include <linux/netpoll.h>
115 #include <linux/rcupdate.h>
116 #include <linux/delay.h>
117 #include <net/iw_handler.h>
118 #include <asm/current.h>
119 #include <linux/audit.h>
120 #include <linux/dmaengine.h>
121 #include <linux/err.h>
122 #include <linux/ctype.h>
123 #include <linux/if_arp.h>
124 #include <linux/if_vlan.h>
125 #include <linux/ip.h>
126 #include <net/ip.h>
127 #include <net/mpls.h>
128 #include <linux/ipv6.h>
129 #include <linux/in.h>
130 #include <linux/jhash.h>
131 #include <linux/random.h>
132 #include <trace/events/napi.h>
133 #include <trace/events/net.h>
134 #include <trace/events/skb.h>
135 #include <trace/events/qdisc.h>
136 #include <trace/events/xdp.h>
137 #include <linux/inetdevice.h>
138 #include <linux/cpu_rmap.h>
139 #include <linux/static_key.h>
140 #include <linux/hashtable.h>
141 #include <linux/vmalloc.h>
142 #include <linux/if_macvlan.h>
143 #include <linux/errqueue.h>
144 #include <linux/hrtimer.h>
145 #include <linux/netfilter_netdev.h>
146 #include <linux/crash_dump.h>
147 #include <linux/sctp.h>
148 #include <net/udp_tunnel.h>
149 #include <linux/net_namespace.h>
150 #include <linux/indirect_call_wrapper.h>
151 #include <net/devlink.h>
152 #include <linux/pm_runtime.h>
153 #include <linux/prandom.h>
154 #include <linux/once_lite.h>
155 #include <net/netdev_rx_queue.h>
156 
157 #include "dev.h"
158 #include "net-sysfs.h"
159 
160 static DEFINE_SPINLOCK(ptype_lock);
161 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
162 struct list_head ptype_all __read_mostly;	/* Taps */
163 
164 static int netif_rx_internal(struct sk_buff *skb);
165 static int call_netdevice_notifiers_extack(unsigned long val,
166 					   struct net_device *dev,
167 					   struct netlink_ext_ack *extack);
168 static struct napi_struct *napi_by_id(unsigned int napi_id);
169 
170 /*
171  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
172  * semaphore.
173  *
174  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
175  *
176  * Writers must hold the rtnl semaphore while they loop through the
177  * dev_base_head list, and hold dev_base_lock for writing when they do the
178  * actual updates.  This allows pure readers to access the list even
179  * while a writer is preparing to update it.
180  *
181  * To put it another way, dev_base_lock is held for writing only to
182  * protect against pure readers; the rtnl semaphore provides the
183  * protection against other writers.
184  *
185  * See, for example usages, register_netdevice() and
186  * unregister_netdevice(), which must be called with the rtnl
187  * semaphore held.
188  */
189 DEFINE_RWLOCK(dev_base_lock);
190 EXPORT_SYMBOL(dev_base_lock);
191 
192 static DEFINE_MUTEX(ifalias_mutex);
193 
194 /* protects napi_hash addition/deletion and napi_gen_id */
195 static DEFINE_SPINLOCK(napi_hash_lock);
196 
197 static unsigned int napi_gen_id = NR_CPUS;
198 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
199 
200 static DECLARE_RWSEM(devnet_rename_sem);
201 
dev_base_seq_inc(struct net * net)202 static inline void dev_base_seq_inc(struct net *net)
203 {
204 	while (++net->dev_base_seq == 0)
205 		;
206 }
207 
dev_name_hash(struct net * net,const char * name)208 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
209 {
210 	unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
211 
212 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
213 }
214 
dev_index_hash(struct net * net,int ifindex)215 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
216 {
217 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
218 }
219 
rps_lock_irqsave(struct softnet_data * sd,unsigned long * flags)220 static inline void rps_lock_irqsave(struct softnet_data *sd,
221 				    unsigned long *flags)
222 {
223 	if (IS_ENABLED(CONFIG_RPS))
224 		spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
225 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
226 		local_irq_save(*flags);
227 }
228 
rps_lock_irq_disable(struct softnet_data * sd)229 static inline void rps_lock_irq_disable(struct softnet_data *sd)
230 {
231 	if (IS_ENABLED(CONFIG_RPS))
232 		spin_lock_irq(&sd->input_pkt_queue.lock);
233 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
234 		local_irq_disable();
235 }
236 
rps_unlock_irq_restore(struct softnet_data * sd,unsigned long * flags)237 static inline void rps_unlock_irq_restore(struct softnet_data *sd,
238 					  unsigned long *flags)
239 {
240 	if (IS_ENABLED(CONFIG_RPS))
241 		spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
242 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
243 		local_irq_restore(*flags);
244 }
245 
rps_unlock_irq_enable(struct softnet_data * sd)246 static inline void rps_unlock_irq_enable(struct softnet_data *sd)
247 {
248 	if (IS_ENABLED(CONFIG_RPS))
249 		spin_unlock_irq(&sd->input_pkt_queue.lock);
250 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
251 		local_irq_enable();
252 }
253 
netdev_name_node_alloc(struct net_device * dev,const char * name)254 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
255 						       const char *name)
256 {
257 	struct netdev_name_node *name_node;
258 
259 	name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
260 	if (!name_node)
261 		return NULL;
262 	INIT_HLIST_NODE(&name_node->hlist);
263 	name_node->dev = dev;
264 	name_node->name = name;
265 	return name_node;
266 }
267 
268 static struct netdev_name_node *
netdev_name_node_head_alloc(struct net_device * dev)269 netdev_name_node_head_alloc(struct net_device *dev)
270 {
271 	struct netdev_name_node *name_node;
272 
273 	name_node = netdev_name_node_alloc(dev, dev->name);
274 	if (!name_node)
275 		return NULL;
276 	INIT_LIST_HEAD(&name_node->list);
277 	return name_node;
278 }
279 
netdev_name_node_free(struct netdev_name_node * name_node)280 static void netdev_name_node_free(struct netdev_name_node *name_node)
281 {
282 	kfree(name_node);
283 }
284 
netdev_name_node_add(struct net * net,struct netdev_name_node * name_node)285 static void netdev_name_node_add(struct net *net,
286 				 struct netdev_name_node *name_node)
287 {
288 	hlist_add_head_rcu(&name_node->hlist,
289 			   dev_name_hash(net, name_node->name));
290 }
291 
netdev_name_node_del(struct netdev_name_node * name_node)292 static void netdev_name_node_del(struct netdev_name_node *name_node)
293 {
294 	hlist_del_rcu(&name_node->hlist);
295 }
296 
netdev_name_node_lookup(struct net * net,const char * name)297 static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
298 							const char *name)
299 {
300 	struct hlist_head *head = dev_name_hash(net, name);
301 	struct netdev_name_node *name_node;
302 
303 	hlist_for_each_entry(name_node, head, hlist)
304 		if (!strcmp(name_node->name, name))
305 			return name_node;
306 	return NULL;
307 }
308 
netdev_name_node_lookup_rcu(struct net * net,const char * name)309 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
310 							    const char *name)
311 {
312 	struct hlist_head *head = dev_name_hash(net, name);
313 	struct netdev_name_node *name_node;
314 
315 	hlist_for_each_entry_rcu(name_node, head, hlist)
316 		if (!strcmp(name_node->name, name))
317 			return name_node;
318 	return NULL;
319 }
320 
netdev_name_in_use(struct net * net,const char * name)321 bool netdev_name_in_use(struct net *net, const char *name)
322 {
323 	return netdev_name_node_lookup(net, name);
324 }
325 EXPORT_SYMBOL(netdev_name_in_use);
326 
netdev_name_node_alt_create(struct net_device * dev,const char * name)327 int netdev_name_node_alt_create(struct net_device *dev, const char *name)
328 {
329 	struct netdev_name_node *name_node;
330 	struct net *net = dev_net(dev);
331 
332 	name_node = netdev_name_node_lookup(net, name);
333 	if (name_node)
334 		return -EEXIST;
335 	name_node = netdev_name_node_alloc(dev, name);
336 	if (!name_node)
337 		return -ENOMEM;
338 	netdev_name_node_add(net, name_node);
339 	/* The node that holds dev->name acts as a head of per-device list. */
340 	list_add_tail(&name_node->list, &dev->name_node->list);
341 
342 	return 0;
343 }
344 
__netdev_name_node_alt_destroy(struct netdev_name_node * name_node)345 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
346 {
347 	list_del(&name_node->list);
348 	kfree(name_node->name);
349 	netdev_name_node_free(name_node);
350 }
351 
netdev_name_node_alt_destroy(struct net_device * dev,const char * name)352 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
353 {
354 	struct netdev_name_node *name_node;
355 	struct net *net = dev_net(dev);
356 
357 	name_node = netdev_name_node_lookup(net, name);
358 	if (!name_node)
359 		return -ENOENT;
360 	/* lookup might have found our primary name or a name belonging
361 	 * to another device.
362 	 */
363 	if (name_node == dev->name_node || name_node->dev != dev)
364 		return -EINVAL;
365 
366 	netdev_name_node_del(name_node);
367 	synchronize_rcu();
368 	__netdev_name_node_alt_destroy(name_node);
369 
370 	return 0;
371 }
372 
netdev_name_node_alt_flush(struct net_device * dev)373 static void netdev_name_node_alt_flush(struct net_device *dev)
374 {
375 	struct netdev_name_node *name_node, *tmp;
376 
377 	list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
378 		__netdev_name_node_alt_destroy(name_node);
379 }
380 
381 /* Device list insertion */
list_netdevice(struct net_device * dev)382 static void list_netdevice(struct net_device *dev)
383 {
384 	struct netdev_name_node *name_node;
385 	struct net *net = dev_net(dev);
386 
387 	ASSERT_RTNL();
388 
389 	write_lock(&dev_base_lock);
390 	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
391 	netdev_name_node_add(net, dev->name_node);
392 	hlist_add_head_rcu(&dev->index_hlist,
393 			   dev_index_hash(net, dev->ifindex));
394 	write_unlock(&dev_base_lock);
395 
396 	netdev_for_each_altname(dev, name_node)
397 		netdev_name_node_add(net, name_node);
398 
399 	/* We reserved the ifindex, this can't fail */
400 	WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL));
401 
402 	dev_base_seq_inc(net);
403 }
404 
405 /* Device list removal
406  * caller must respect a RCU grace period before freeing/reusing dev
407  */
unlist_netdevice(struct net_device * dev,bool lock)408 static void unlist_netdevice(struct net_device *dev, bool lock)
409 {
410 	struct netdev_name_node *name_node;
411 	struct net *net = dev_net(dev);
412 
413 	ASSERT_RTNL();
414 
415 	xa_erase(&net->dev_by_index, dev->ifindex);
416 
417 	netdev_for_each_altname(dev, name_node)
418 		netdev_name_node_del(name_node);
419 
420 	/* Unlink dev from the device chain */
421 	if (lock)
422 		write_lock(&dev_base_lock);
423 	list_del_rcu(&dev->dev_list);
424 	netdev_name_node_del(dev->name_node);
425 	hlist_del_rcu(&dev->index_hlist);
426 	if (lock)
427 		write_unlock(&dev_base_lock);
428 
429 	dev_base_seq_inc(dev_net(dev));
430 }
431 
432 /*
433  *	Our notifier list
434  */
435 
436 static RAW_NOTIFIER_HEAD(netdev_chain);
437 
438 /*
439  *	Device drivers call our routines to queue packets here. We empty the
440  *	queue in the local softnet handler.
441  */
442 
443 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
444 EXPORT_PER_CPU_SYMBOL(softnet_data);
445 
446 #ifdef CONFIG_LOCKDEP
447 /*
448  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
449  * according to dev->type
450  */
451 static const unsigned short netdev_lock_type[] = {
452 	 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
453 	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
454 	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
455 	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
456 	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
457 	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
458 	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
459 	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
460 	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
461 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
462 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
463 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
464 	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
465 	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
466 	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
467 
468 static const char *const netdev_lock_name[] = {
469 	"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
470 	"_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
471 	"_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
472 	"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
473 	"_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
474 	"_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
475 	"_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
476 	"_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
477 	"_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
478 	"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
479 	"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
480 	"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
481 	"_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
482 	"_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
483 	"_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
484 
485 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
486 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
487 
netdev_lock_pos(unsigned short dev_type)488 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
489 {
490 	int i;
491 
492 	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
493 		if (netdev_lock_type[i] == dev_type)
494 			return i;
495 	/* the last key is used by default */
496 	return ARRAY_SIZE(netdev_lock_type) - 1;
497 }
498 
netdev_set_xmit_lockdep_class(spinlock_t * lock,unsigned short dev_type)499 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
500 						 unsigned short dev_type)
501 {
502 	int i;
503 
504 	i = netdev_lock_pos(dev_type);
505 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
506 				   netdev_lock_name[i]);
507 }
508 
netdev_set_addr_lockdep_class(struct net_device * dev)509 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
510 {
511 	int i;
512 
513 	i = netdev_lock_pos(dev->type);
514 	lockdep_set_class_and_name(&dev->addr_list_lock,
515 				   &netdev_addr_lock_key[i],
516 				   netdev_lock_name[i]);
517 }
518 #else
netdev_set_xmit_lockdep_class(spinlock_t * lock,unsigned short dev_type)519 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
520 						 unsigned short dev_type)
521 {
522 }
523 
netdev_set_addr_lockdep_class(struct net_device * dev)524 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
525 {
526 }
527 #endif
528 
529 /*******************************************************************************
530  *
531  *		Protocol management and registration routines
532  *
533  *******************************************************************************/
534 
535 
536 /*
537  *	Add a protocol ID to the list. Now that the input handler is
538  *	smarter we can dispense with all the messy stuff that used to be
539  *	here.
540  *
541  *	BEWARE!!! Protocol handlers, mangling input packets,
542  *	MUST BE last in hash buckets and checking protocol handlers
543  *	MUST start from promiscuous ptype_all chain in net_bh.
544  *	It is true now, do not change it.
545  *	Explanation follows: if protocol handler, mangling packet, will
546  *	be the first on list, it is not able to sense, that packet
547  *	is cloned and should be copied-on-write, so that it will
548  *	change it and subsequent readers will get broken packet.
549  *							--ANK (980803)
550  */
551 
ptype_head(const struct packet_type * pt)552 static inline struct list_head *ptype_head(const struct packet_type *pt)
553 {
554 	if (pt->type == htons(ETH_P_ALL))
555 		return pt->dev ? &pt->dev->ptype_all : &ptype_all;
556 	else
557 		return pt->dev ? &pt->dev->ptype_specific :
558 				 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
559 }
560 
561 /**
562  *	dev_add_pack - add packet handler
563  *	@pt: packet type declaration
564  *
565  *	Add a protocol handler to the networking stack. The passed &packet_type
566  *	is linked into kernel lists and may not be freed until it has been
567  *	removed from the kernel lists.
568  *
569  *	This call does not sleep therefore it can not
570  *	guarantee all CPU's that are in middle of receiving packets
571  *	will see the new packet type (until the next received packet).
572  */
573 
dev_add_pack(struct packet_type * pt)574 void dev_add_pack(struct packet_type *pt)
575 {
576 	struct list_head *head = ptype_head(pt);
577 
578 	spin_lock(&ptype_lock);
579 	list_add_rcu(&pt->list, head);
580 	spin_unlock(&ptype_lock);
581 }
582 EXPORT_SYMBOL(dev_add_pack);
583 
584 /**
585  *	__dev_remove_pack	 - remove packet handler
586  *	@pt: packet type declaration
587  *
588  *	Remove a protocol handler that was previously added to the kernel
589  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
590  *	from the kernel lists and can be freed or reused once this function
591  *	returns.
592  *
593  *      The packet type might still be in use by receivers
594  *	and must not be freed until after all the CPU's have gone
595  *	through a quiescent state.
596  */
__dev_remove_pack(struct packet_type * pt)597 void __dev_remove_pack(struct packet_type *pt)
598 {
599 	struct list_head *head = ptype_head(pt);
600 	struct packet_type *pt1;
601 
602 	spin_lock(&ptype_lock);
603 
604 	list_for_each_entry(pt1, head, list) {
605 		if (pt == pt1) {
606 			list_del_rcu(&pt->list);
607 			goto out;
608 		}
609 	}
610 
611 	pr_warn("dev_remove_pack: %p not found\n", pt);
612 out:
613 	spin_unlock(&ptype_lock);
614 }
615 EXPORT_SYMBOL(__dev_remove_pack);
616 
617 /**
618  *	dev_remove_pack	 - remove packet handler
619  *	@pt: packet type declaration
620  *
621  *	Remove a protocol handler that was previously added to the kernel
622  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
623  *	from the kernel lists and can be freed or reused once this function
624  *	returns.
625  *
626  *	This call sleeps to guarantee that no CPU is looking at the packet
627  *	type after return.
628  */
dev_remove_pack(struct packet_type * pt)629 void dev_remove_pack(struct packet_type *pt)
630 {
631 	__dev_remove_pack(pt);
632 
633 	synchronize_net();
634 }
635 EXPORT_SYMBOL(dev_remove_pack);
636 
637 
638 /*******************************************************************************
639  *
640  *			    Device Interface Subroutines
641  *
642  *******************************************************************************/
643 
644 /**
645  *	dev_get_iflink	- get 'iflink' value of a interface
646  *	@dev: targeted interface
647  *
648  *	Indicates the ifindex the interface is linked to.
649  *	Physical interfaces have the same 'ifindex' and 'iflink' values.
650  */
651 
dev_get_iflink(const struct net_device * dev)652 int dev_get_iflink(const struct net_device *dev)
653 {
654 	if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
655 		return dev->netdev_ops->ndo_get_iflink(dev);
656 
657 	return dev->ifindex;
658 }
659 EXPORT_SYMBOL(dev_get_iflink);
660 
661 /**
662  *	dev_fill_metadata_dst - Retrieve tunnel egress information.
663  *	@dev: targeted interface
664  *	@skb: The packet.
665  *
666  *	For better visibility of tunnel traffic OVS needs to retrieve
667  *	egress tunnel information for a packet. Following API allows
668  *	user to get this info.
669  */
dev_fill_metadata_dst(struct net_device * dev,struct sk_buff * skb)670 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
671 {
672 	struct ip_tunnel_info *info;
673 
674 	if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
675 		return -EINVAL;
676 
677 	info = skb_tunnel_info_unclone(skb);
678 	if (!info)
679 		return -ENOMEM;
680 	if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
681 		return -EINVAL;
682 
683 	return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
684 }
685 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
686 
dev_fwd_path(struct net_device_path_stack * stack)687 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
688 {
689 	int k = stack->num_paths++;
690 
691 	if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
692 		return NULL;
693 
694 	return &stack->path[k];
695 }
696 
dev_fill_forward_path(const struct net_device * dev,const u8 * daddr,struct net_device_path_stack * stack)697 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
698 			  struct net_device_path_stack *stack)
699 {
700 	const struct net_device *last_dev;
701 	struct net_device_path_ctx ctx = {
702 		.dev	= dev,
703 	};
704 	struct net_device_path *path;
705 	int ret = 0;
706 
707 	memcpy(ctx.daddr, daddr, sizeof(ctx.daddr));
708 	stack->num_paths = 0;
709 	while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
710 		last_dev = ctx.dev;
711 		path = dev_fwd_path(stack);
712 		if (!path)
713 			return -1;
714 
715 		memset(path, 0, sizeof(struct net_device_path));
716 		ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
717 		if (ret < 0)
718 			return -1;
719 
720 		if (WARN_ON_ONCE(last_dev == ctx.dev))
721 			return -1;
722 	}
723 
724 	if (!ctx.dev)
725 		return ret;
726 
727 	path = dev_fwd_path(stack);
728 	if (!path)
729 		return -1;
730 	path->type = DEV_PATH_ETHERNET;
731 	path->dev = ctx.dev;
732 
733 	return ret;
734 }
735 EXPORT_SYMBOL_GPL(dev_fill_forward_path);
736 
737 /**
738  *	__dev_get_by_name	- find a device by its name
739  *	@net: the applicable net namespace
740  *	@name: name to find
741  *
742  *	Find an interface by name. Must be called under RTNL semaphore
743  *	or @dev_base_lock. If the name is found a pointer to the device
744  *	is returned. If the name is not found then %NULL is returned. The
745  *	reference counters are not incremented so the caller must be
746  *	careful with locks.
747  */
748 
__dev_get_by_name(struct net * net,const char * name)749 struct net_device *__dev_get_by_name(struct net *net, const char *name)
750 {
751 	struct netdev_name_node *node_name;
752 
753 	node_name = netdev_name_node_lookup(net, name);
754 	return node_name ? node_name->dev : NULL;
755 }
756 EXPORT_SYMBOL(__dev_get_by_name);
757 
758 /**
759  * dev_get_by_name_rcu	- find a device by its name
760  * @net: the applicable net namespace
761  * @name: name to find
762  *
763  * Find an interface by name.
764  * If the name is found a pointer to the device is returned.
765  * If the name is not found then %NULL is returned.
766  * The reference counters are not incremented so the caller must be
767  * careful with locks. The caller must hold RCU lock.
768  */
769 
dev_get_by_name_rcu(struct net * net,const char * name)770 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
771 {
772 	struct netdev_name_node *node_name;
773 
774 	node_name = netdev_name_node_lookup_rcu(net, name);
775 	return node_name ? node_name->dev : NULL;
776 }
777 EXPORT_SYMBOL(dev_get_by_name_rcu);
778 
779 /* Deprecated for new users, call netdev_get_by_name() instead */
dev_get_by_name(struct net * net,const char * name)780 struct net_device *dev_get_by_name(struct net *net, const char *name)
781 {
782 	struct net_device *dev;
783 
784 	rcu_read_lock();
785 	dev = dev_get_by_name_rcu(net, name);
786 	dev_hold(dev);
787 	rcu_read_unlock();
788 	return dev;
789 }
790 EXPORT_SYMBOL(dev_get_by_name);
791 
792 /**
793  *	netdev_get_by_name() - find a device by its name
794  *	@net: the applicable net namespace
795  *	@name: name to find
796  *	@tracker: tracking object for the acquired reference
797  *	@gfp: allocation flags for the tracker
798  *
799  *	Find an interface by name. This can be called from any
800  *	context and does its own locking. The returned handle has
801  *	the usage count incremented and the caller must use netdev_put() to
802  *	release it when it is no longer needed. %NULL is returned if no
803  *	matching device is found.
804  */
netdev_get_by_name(struct net * net,const char * name,netdevice_tracker * tracker,gfp_t gfp)805 struct net_device *netdev_get_by_name(struct net *net, const char *name,
806 				      netdevice_tracker *tracker, gfp_t gfp)
807 {
808 	struct net_device *dev;
809 
810 	dev = dev_get_by_name(net, name);
811 	if (dev)
812 		netdev_tracker_alloc(dev, tracker, gfp);
813 	return dev;
814 }
815 EXPORT_SYMBOL(netdev_get_by_name);
816 
817 /**
818  *	__dev_get_by_index - find a device by its ifindex
819  *	@net: the applicable net namespace
820  *	@ifindex: index of device
821  *
822  *	Search for an interface by index. Returns %NULL if the device
823  *	is not found or a pointer to the device. The device has not
824  *	had its reference counter increased so the caller must be careful
825  *	about locking. The caller must hold either the RTNL semaphore
826  *	or @dev_base_lock.
827  */
828 
__dev_get_by_index(struct net * net,int ifindex)829 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
830 {
831 	struct net_device *dev;
832 	struct hlist_head *head = dev_index_hash(net, ifindex);
833 
834 	hlist_for_each_entry(dev, head, index_hlist)
835 		if (dev->ifindex == ifindex)
836 			return dev;
837 
838 	return NULL;
839 }
840 EXPORT_SYMBOL(__dev_get_by_index);
841 
842 /**
843  *	dev_get_by_index_rcu - find a device by its ifindex
844  *	@net: the applicable net namespace
845  *	@ifindex: index of device
846  *
847  *	Search for an interface by index. Returns %NULL if the device
848  *	is not found or a pointer to the device. The device has not
849  *	had its reference counter increased so the caller must be careful
850  *	about locking. The caller must hold RCU lock.
851  */
852 
dev_get_by_index_rcu(struct net * net,int ifindex)853 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
854 {
855 	struct net_device *dev;
856 	struct hlist_head *head = dev_index_hash(net, ifindex);
857 
858 	hlist_for_each_entry_rcu(dev, head, index_hlist)
859 		if (dev->ifindex == ifindex)
860 			return dev;
861 
862 	return NULL;
863 }
864 EXPORT_SYMBOL(dev_get_by_index_rcu);
865 
866 /* Deprecated for new users, call netdev_get_by_index() instead */
dev_get_by_index(struct net * net,int ifindex)867 struct net_device *dev_get_by_index(struct net *net, int ifindex)
868 {
869 	struct net_device *dev;
870 
871 	rcu_read_lock();
872 	dev = dev_get_by_index_rcu(net, ifindex);
873 	dev_hold(dev);
874 	rcu_read_unlock();
875 	return dev;
876 }
877 EXPORT_SYMBOL(dev_get_by_index);
878 
879 /**
880  *	netdev_get_by_index() - find a device by its ifindex
881  *	@net: the applicable net namespace
882  *	@ifindex: index of device
883  *	@tracker: tracking object for the acquired reference
884  *	@gfp: allocation flags for the tracker
885  *
886  *	Search for an interface by index. Returns NULL if the device
887  *	is not found or a pointer to the device. The device returned has
888  *	had a reference added and the pointer is safe until the user calls
889  *	netdev_put() to indicate they have finished with it.
890  */
netdev_get_by_index(struct net * net,int ifindex,netdevice_tracker * tracker,gfp_t gfp)891 struct net_device *netdev_get_by_index(struct net *net, int ifindex,
892 				       netdevice_tracker *tracker, gfp_t gfp)
893 {
894 	struct net_device *dev;
895 
896 	dev = dev_get_by_index(net, ifindex);
897 	if (dev)
898 		netdev_tracker_alloc(dev, tracker, gfp);
899 	return dev;
900 }
901 EXPORT_SYMBOL(netdev_get_by_index);
902 
903 /**
904  *	dev_get_by_napi_id - find a device by napi_id
905  *	@napi_id: ID of the NAPI struct
906  *
907  *	Search for an interface by NAPI ID. Returns %NULL if the device
908  *	is not found or a pointer to the device. The device has not had
909  *	its reference counter increased so the caller must be careful
910  *	about locking. The caller must hold RCU lock.
911  */
912 
dev_get_by_napi_id(unsigned int napi_id)913 struct net_device *dev_get_by_napi_id(unsigned int napi_id)
914 {
915 	struct napi_struct *napi;
916 
917 	WARN_ON_ONCE(!rcu_read_lock_held());
918 
919 	if (napi_id < MIN_NAPI_ID)
920 		return NULL;
921 
922 	napi = napi_by_id(napi_id);
923 
924 	return napi ? napi->dev : NULL;
925 }
926 EXPORT_SYMBOL(dev_get_by_napi_id);
927 
928 /**
929  *	netdev_get_name - get a netdevice name, knowing its ifindex.
930  *	@net: network namespace
931  *	@name: a pointer to the buffer where the name will be stored.
932  *	@ifindex: the ifindex of the interface to get the name from.
933  */
netdev_get_name(struct net * net,char * name,int ifindex)934 int netdev_get_name(struct net *net, char *name, int ifindex)
935 {
936 	struct net_device *dev;
937 	int ret;
938 
939 	down_read(&devnet_rename_sem);
940 	rcu_read_lock();
941 
942 	dev = dev_get_by_index_rcu(net, ifindex);
943 	if (!dev) {
944 		ret = -ENODEV;
945 		goto out;
946 	}
947 
948 	strcpy(name, dev->name);
949 
950 	ret = 0;
951 out:
952 	rcu_read_unlock();
953 	up_read(&devnet_rename_sem);
954 	return ret;
955 }
956 
957 /**
958  *	dev_getbyhwaddr_rcu - find a device by its hardware address
959  *	@net: the applicable net namespace
960  *	@type: media type of device
961  *	@ha: hardware address
962  *
963  *	Search for an interface by MAC address. Returns NULL if the device
964  *	is not found or a pointer to the device.
965  *	The caller must hold RCU or RTNL.
966  *	The returned device has not had its ref count increased
967  *	and the caller must therefore be careful about locking
968  *
969  */
970 
dev_getbyhwaddr_rcu(struct net * net,unsigned short type,const char * ha)971 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
972 				       const char *ha)
973 {
974 	struct net_device *dev;
975 
976 	for_each_netdev_rcu(net, dev)
977 		if (dev->type == type &&
978 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
979 			return dev;
980 
981 	return NULL;
982 }
983 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
984 
dev_getfirstbyhwtype(struct net * net,unsigned short type)985 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
986 {
987 	struct net_device *dev, *ret = NULL;
988 
989 	rcu_read_lock();
990 	for_each_netdev_rcu(net, dev)
991 		if (dev->type == type) {
992 			dev_hold(dev);
993 			ret = dev;
994 			break;
995 		}
996 	rcu_read_unlock();
997 	return ret;
998 }
999 EXPORT_SYMBOL(dev_getfirstbyhwtype);
1000 
1001 /**
1002  *	__dev_get_by_flags - find any device with given flags
1003  *	@net: the applicable net namespace
1004  *	@if_flags: IFF_* values
1005  *	@mask: bitmask of bits in if_flags to check
1006  *
1007  *	Search for any interface with the given flags. Returns NULL if a device
1008  *	is not found or a pointer to the device. Must be called inside
1009  *	rtnl_lock(), and result refcount is unchanged.
1010  */
1011 
__dev_get_by_flags(struct net * net,unsigned short if_flags,unsigned short mask)1012 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1013 				      unsigned short mask)
1014 {
1015 	struct net_device *dev, *ret;
1016 
1017 	ASSERT_RTNL();
1018 
1019 	ret = NULL;
1020 	for_each_netdev(net, dev) {
1021 		if (((dev->flags ^ if_flags) & mask) == 0) {
1022 			ret = dev;
1023 			break;
1024 		}
1025 	}
1026 	return ret;
1027 }
1028 EXPORT_SYMBOL(__dev_get_by_flags);
1029 
1030 /**
1031  *	dev_valid_name - check if name is okay for network device
1032  *	@name: name string
1033  *
1034  *	Network device names need to be valid file names to
1035  *	allow sysfs to work.  We also disallow any kind of
1036  *	whitespace.
1037  */
dev_valid_name(const char * name)1038 bool dev_valid_name(const char *name)
1039 {
1040 	if (*name == '\0')
1041 		return false;
1042 	if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1043 		return false;
1044 	if (!strcmp(name, ".") || !strcmp(name, ".."))
1045 		return false;
1046 
1047 	while (*name) {
1048 		if (*name == '/' || *name == ':' || isspace(*name))
1049 			return false;
1050 		name++;
1051 	}
1052 	return true;
1053 }
1054 EXPORT_SYMBOL(dev_valid_name);
1055 
1056 /**
1057  *	__dev_alloc_name - allocate a name for a device
1058  *	@net: network namespace to allocate the device name in
1059  *	@name: name format string
1060  *	@buf:  scratch buffer and result name string
1061  *
1062  *	Passed a format string - eg "lt%d" it will try and find a suitable
1063  *	id. It scans list of devices to build up a free map, then chooses
1064  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1065  *	while allocating the name and adding the device in order to avoid
1066  *	duplicates.
1067  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1068  *	Returns the number of the unit assigned or a negative errno code.
1069  */
1070 
__dev_alloc_name(struct net * net,const char * name,char * buf)1071 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1072 {
1073 	int i = 0;
1074 	const char *p;
1075 	const int max_netdevices = 8*PAGE_SIZE;
1076 	unsigned long *inuse;
1077 	struct net_device *d;
1078 
1079 	if (!dev_valid_name(name))
1080 		return -EINVAL;
1081 
1082 	p = strchr(name, '%');
1083 	if (p) {
1084 		/*
1085 		 * Verify the string as this thing may have come from
1086 		 * the user.  There must be either one "%d" and no other "%"
1087 		 * characters.
1088 		 */
1089 		if (p[1] != 'd' || strchr(p + 2, '%'))
1090 			return -EINVAL;
1091 
1092 		/* Use one page as a bit array of possible slots */
1093 		inuse = bitmap_zalloc(max_netdevices, GFP_ATOMIC);
1094 		if (!inuse)
1095 			return -ENOMEM;
1096 
1097 		for_each_netdev(net, d) {
1098 			struct netdev_name_node *name_node;
1099 
1100 			netdev_for_each_altname(d, name_node) {
1101 				if (!sscanf(name_node->name, name, &i))
1102 					continue;
1103 				if (i < 0 || i >= max_netdevices)
1104 					continue;
1105 
1106 				/*  avoid cases where sscanf is not exact inverse of printf */
1107 				snprintf(buf, IFNAMSIZ, name, i);
1108 				if (!strncmp(buf, name_node->name, IFNAMSIZ))
1109 					__set_bit(i, inuse);
1110 			}
1111 			if (!sscanf(d->name, name, &i))
1112 				continue;
1113 			if (i < 0 || i >= max_netdevices)
1114 				continue;
1115 
1116 			/*  avoid cases where sscanf is not exact inverse of printf */
1117 			snprintf(buf, IFNAMSIZ, name, i);
1118 			if (!strncmp(buf, d->name, IFNAMSIZ))
1119 				__set_bit(i, inuse);
1120 		}
1121 
1122 		i = find_first_zero_bit(inuse, max_netdevices);
1123 		bitmap_free(inuse);
1124 	}
1125 
1126 	snprintf(buf, IFNAMSIZ, name, i);
1127 	if (!netdev_name_in_use(net, buf))
1128 		return i;
1129 
1130 	/* It is possible to run out of possible slots
1131 	 * when the name is long and there isn't enough space left
1132 	 * for the digits, or if all bits are used.
1133 	 */
1134 	return -ENFILE;
1135 }
1136 
dev_prep_valid_name(struct net * net,struct net_device * dev,const char * want_name,char * out_name)1137 static int dev_prep_valid_name(struct net *net, struct net_device *dev,
1138 			       const char *want_name, char *out_name)
1139 {
1140 	int ret;
1141 
1142 	if (!dev_valid_name(want_name))
1143 		return -EINVAL;
1144 
1145 	if (strchr(want_name, '%')) {
1146 		ret = __dev_alloc_name(net, want_name, out_name);
1147 		return ret < 0 ? ret : 0;
1148 	} else if (netdev_name_in_use(net, want_name)) {
1149 		return -EEXIST;
1150 	} else if (out_name != want_name) {
1151 		strscpy(out_name, want_name, IFNAMSIZ);
1152 	}
1153 
1154 	return 0;
1155 }
1156 
dev_alloc_name_ns(struct net * net,struct net_device * dev,const char * name)1157 static int dev_alloc_name_ns(struct net *net,
1158 			     struct net_device *dev,
1159 			     const char *name)
1160 {
1161 	char buf[IFNAMSIZ];
1162 	int ret;
1163 
1164 	BUG_ON(!net);
1165 	ret = __dev_alloc_name(net, name, buf);
1166 	if (ret >= 0)
1167 		strscpy(dev->name, buf, IFNAMSIZ);
1168 	return ret;
1169 }
1170 
1171 /**
1172  *	dev_alloc_name - allocate a name for a device
1173  *	@dev: device
1174  *	@name: name format string
1175  *
1176  *	Passed a format string - eg "lt%d" it will try and find a suitable
1177  *	id. It scans list of devices to build up a free map, then chooses
1178  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1179  *	while allocating the name and adding the device in order to avoid
1180  *	duplicates.
1181  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1182  *	Returns the number of the unit assigned or a negative errno code.
1183  */
1184 
dev_alloc_name(struct net_device * dev,const char * name)1185 int dev_alloc_name(struct net_device *dev, const char *name)
1186 {
1187 	return dev_alloc_name_ns(dev_net(dev), dev, name);
1188 }
1189 EXPORT_SYMBOL(dev_alloc_name);
1190 
dev_get_valid_name(struct net * net,struct net_device * dev,const char * name)1191 static int dev_get_valid_name(struct net *net, struct net_device *dev,
1192 			      const char *name)
1193 {
1194 	char buf[IFNAMSIZ];
1195 	int ret;
1196 
1197 	ret = dev_prep_valid_name(net, dev, name, buf);
1198 	if (ret >= 0)
1199 		strscpy(dev->name, buf, IFNAMSIZ);
1200 	return ret;
1201 }
1202 
1203 /**
1204  *	dev_change_name - change name of a device
1205  *	@dev: device
1206  *	@newname: name (or format string) must be at least IFNAMSIZ
1207  *
1208  *	Change name of a device, can pass format strings "eth%d".
1209  *	for wildcarding.
1210  */
dev_change_name(struct net_device * dev,const char * newname)1211 int dev_change_name(struct net_device *dev, const char *newname)
1212 {
1213 	unsigned char old_assign_type;
1214 	char oldname[IFNAMSIZ];
1215 	int err = 0;
1216 	int ret;
1217 	struct net *net;
1218 
1219 	ASSERT_RTNL();
1220 	BUG_ON(!dev_net(dev));
1221 
1222 	net = dev_net(dev);
1223 
1224 	down_write(&devnet_rename_sem);
1225 
1226 	if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1227 		up_write(&devnet_rename_sem);
1228 		return 0;
1229 	}
1230 
1231 	memcpy(oldname, dev->name, IFNAMSIZ);
1232 
1233 	err = dev_get_valid_name(net, dev, newname);
1234 	if (err < 0) {
1235 		up_write(&devnet_rename_sem);
1236 		return err;
1237 	}
1238 
1239 	if (oldname[0] && !strchr(oldname, '%'))
1240 		netdev_info(dev, "renamed from %s%s\n", oldname,
1241 			    dev->flags & IFF_UP ? " (while UP)" : "");
1242 
1243 	old_assign_type = dev->name_assign_type;
1244 	dev->name_assign_type = NET_NAME_RENAMED;
1245 
1246 rollback:
1247 	ret = device_rename(&dev->dev, dev->name);
1248 	if (ret) {
1249 		memcpy(dev->name, oldname, IFNAMSIZ);
1250 		dev->name_assign_type = old_assign_type;
1251 		up_write(&devnet_rename_sem);
1252 		return ret;
1253 	}
1254 
1255 	up_write(&devnet_rename_sem);
1256 
1257 	netdev_adjacent_rename_links(dev, oldname);
1258 
1259 	write_lock(&dev_base_lock);
1260 	netdev_name_node_del(dev->name_node);
1261 	write_unlock(&dev_base_lock);
1262 
1263 	synchronize_rcu();
1264 
1265 	write_lock(&dev_base_lock);
1266 	netdev_name_node_add(net, dev->name_node);
1267 	write_unlock(&dev_base_lock);
1268 
1269 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1270 	ret = notifier_to_errno(ret);
1271 
1272 	if (ret) {
1273 		/* err >= 0 after dev_alloc_name() or stores the first errno */
1274 		if (err >= 0) {
1275 			err = ret;
1276 			down_write(&devnet_rename_sem);
1277 			memcpy(dev->name, oldname, IFNAMSIZ);
1278 			memcpy(oldname, newname, IFNAMSIZ);
1279 			dev->name_assign_type = old_assign_type;
1280 			old_assign_type = NET_NAME_RENAMED;
1281 			goto rollback;
1282 		} else {
1283 			netdev_err(dev, "name change rollback failed: %d\n",
1284 				   ret);
1285 		}
1286 	}
1287 
1288 	return err;
1289 }
1290 
1291 /**
1292  *	dev_set_alias - change ifalias of a device
1293  *	@dev: device
1294  *	@alias: name up to IFALIASZ
1295  *	@len: limit of bytes to copy from info
1296  *
1297  *	Set ifalias for a device,
1298  */
dev_set_alias(struct net_device * dev,const char * alias,size_t len)1299 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1300 {
1301 	struct dev_ifalias *new_alias = NULL;
1302 
1303 	if (len >= IFALIASZ)
1304 		return -EINVAL;
1305 
1306 	if (len) {
1307 		new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1308 		if (!new_alias)
1309 			return -ENOMEM;
1310 
1311 		memcpy(new_alias->ifalias, alias, len);
1312 		new_alias->ifalias[len] = 0;
1313 	}
1314 
1315 	mutex_lock(&ifalias_mutex);
1316 	new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1317 					mutex_is_locked(&ifalias_mutex));
1318 	mutex_unlock(&ifalias_mutex);
1319 
1320 	if (new_alias)
1321 		kfree_rcu(new_alias, rcuhead);
1322 
1323 	return len;
1324 }
1325 EXPORT_SYMBOL(dev_set_alias);
1326 
1327 /**
1328  *	dev_get_alias - get ifalias of a device
1329  *	@dev: device
1330  *	@name: buffer to store name of ifalias
1331  *	@len: size of buffer
1332  *
1333  *	get ifalias for a device.  Caller must make sure dev cannot go
1334  *	away,  e.g. rcu read lock or own a reference count to device.
1335  */
dev_get_alias(const struct net_device * dev,char * name,size_t len)1336 int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1337 {
1338 	const struct dev_ifalias *alias;
1339 	int ret = 0;
1340 
1341 	rcu_read_lock();
1342 	alias = rcu_dereference(dev->ifalias);
1343 	if (alias)
1344 		ret = snprintf(name, len, "%s", alias->ifalias);
1345 	rcu_read_unlock();
1346 
1347 	return ret;
1348 }
1349 
1350 /**
1351  *	netdev_features_change - device changes features
1352  *	@dev: device to cause notification
1353  *
1354  *	Called to indicate a device has changed features.
1355  */
netdev_features_change(struct net_device * dev)1356 void netdev_features_change(struct net_device *dev)
1357 {
1358 	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1359 }
1360 EXPORT_SYMBOL(netdev_features_change);
1361 
1362 /**
1363  *	netdev_state_change - device changes state
1364  *	@dev: device to cause notification
1365  *
1366  *	Called to indicate a device has changed state. This function calls
1367  *	the notifier chains for netdev_chain and sends a NEWLINK message
1368  *	to the routing socket.
1369  */
netdev_state_change(struct net_device * dev)1370 void netdev_state_change(struct net_device *dev)
1371 {
1372 	if (dev->flags & IFF_UP) {
1373 		struct netdev_notifier_change_info change_info = {
1374 			.info.dev = dev,
1375 		};
1376 
1377 		call_netdevice_notifiers_info(NETDEV_CHANGE,
1378 					      &change_info.info);
1379 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL);
1380 	}
1381 }
1382 EXPORT_SYMBOL(netdev_state_change);
1383 
1384 /**
1385  * __netdev_notify_peers - notify network peers about existence of @dev,
1386  * to be called when rtnl lock is already held.
1387  * @dev: network device
1388  *
1389  * Generate traffic such that interested network peers are aware of
1390  * @dev, such as by generating a gratuitous ARP. This may be used when
1391  * a device wants to inform the rest of the network about some sort of
1392  * reconfiguration such as a failover event or virtual machine
1393  * migration.
1394  */
__netdev_notify_peers(struct net_device * dev)1395 void __netdev_notify_peers(struct net_device *dev)
1396 {
1397 	ASSERT_RTNL();
1398 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1399 	call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1400 }
1401 EXPORT_SYMBOL(__netdev_notify_peers);
1402 
1403 /**
1404  * netdev_notify_peers - notify network peers about existence of @dev
1405  * @dev: network device
1406  *
1407  * Generate traffic such that interested network peers are aware of
1408  * @dev, such as by generating a gratuitous ARP. This may be used when
1409  * a device wants to inform the rest of the network about some sort of
1410  * reconfiguration such as a failover event or virtual machine
1411  * migration.
1412  */
netdev_notify_peers(struct net_device * dev)1413 void netdev_notify_peers(struct net_device *dev)
1414 {
1415 	rtnl_lock();
1416 	__netdev_notify_peers(dev);
1417 	rtnl_unlock();
1418 }
1419 EXPORT_SYMBOL(netdev_notify_peers);
1420 
1421 static int napi_threaded_poll(void *data);
1422 
napi_kthread_create(struct napi_struct * n)1423 static int napi_kthread_create(struct napi_struct *n)
1424 {
1425 	int err = 0;
1426 
1427 	/* Create and wake up the kthread once to put it in
1428 	 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1429 	 * warning and work with loadavg.
1430 	 */
1431 	n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1432 				n->dev->name, n->napi_id);
1433 	if (IS_ERR(n->thread)) {
1434 		err = PTR_ERR(n->thread);
1435 		pr_err("kthread_run failed with err %d\n", err);
1436 		n->thread = NULL;
1437 	}
1438 
1439 	return err;
1440 }
1441 
__dev_open(struct net_device * dev,struct netlink_ext_ack * extack)1442 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1443 {
1444 	const struct net_device_ops *ops = dev->netdev_ops;
1445 	int ret;
1446 
1447 	ASSERT_RTNL();
1448 	dev_addr_check(dev);
1449 
1450 	if (!netif_device_present(dev)) {
1451 		/* may be detached because parent is runtime-suspended */
1452 		if (dev->dev.parent)
1453 			pm_runtime_resume(dev->dev.parent);
1454 		if (!netif_device_present(dev))
1455 			return -ENODEV;
1456 	}
1457 
1458 	/* Block netpoll from trying to do any rx path servicing.
1459 	 * If we don't do this there is a chance ndo_poll_controller
1460 	 * or ndo_poll may be running while we open the device
1461 	 */
1462 	netpoll_poll_disable(dev);
1463 
1464 	ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1465 	ret = notifier_to_errno(ret);
1466 	if (ret)
1467 		return ret;
1468 
1469 	set_bit(__LINK_STATE_START, &dev->state);
1470 
1471 	if (ops->ndo_validate_addr)
1472 		ret = ops->ndo_validate_addr(dev);
1473 
1474 	if (!ret && ops->ndo_open)
1475 		ret = ops->ndo_open(dev);
1476 
1477 	netpoll_poll_enable(dev);
1478 
1479 	if (ret)
1480 		clear_bit(__LINK_STATE_START, &dev->state);
1481 	else {
1482 		dev->flags |= IFF_UP;
1483 		dev_set_rx_mode(dev);
1484 		dev_activate(dev);
1485 		add_device_randomness(dev->dev_addr, dev->addr_len);
1486 	}
1487 
1488 	return ret;
1489 }
1490 
1491 /**
1492  *	dev_open	- prepare an interface for use.
1493  *	@dev: device to open
1494  *	@extack: netlink extended ack
1495  *
1496  *	Takes a device from down to up state. The device's private open
1497  *	function is invoked and then the multicast lists are loaded. Finally
1498  *	the device is moved into the up state and a %NETDEV_UP message is
1499  *	sent to the netdev notifier chain.
1500  *
1501  *	Calling this function on an active interface is a nop. On a failure
1502  *	a negative errno code is returned.
1503  */
dev_open(struct net_device * dev,struct netlink_ext_ack * extack)1504 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1505 {
1506 	int ret;
1507 
1508 	if (dev->flags & IFF_UP)
1509 		return 0;
1510 
1511 	ret = __dev_open(dev, extack);
1512 	if (ret < 0)
1513 		return ret;
1514 
1515 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1516 	call_netdevice_notifiers(NETDEV_UP, dev);
1517 
1518 	return ret;
1519 }
1520 EXPORT_SYMBOL(dev_open);
1521 
__dev_close_many(struct list_head * head)1522 static void __dev_close_many(struct list_head *head)
1523 {
1524 	struct net_device *dev;
1525 
1526 	ASSERT_RTNL();
1527 	might_sleep();
1528 
1529 	list_for_each_entry(dev, head, close_list) {
1530 		/* Temporarily disable netpoll until the interface is down */
1531 		netpoll_poll_disable(dev);
1532 
1533 		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1534 
1535 		clear_bit(__LINK_STATE_START, &dev->state);
1536 
1537 		/* Synchronize to scheduled poll. We cannot touch poll list, it
1538 		 * can be even on different cpu. So just clear netif_running().
1539 		 *
1540 		 * dev->stop() will invoke napi_disable() on all of it's
1541 		 * napi_struct instances on this device.
1542 		 */
1543 		smp_mb__after_atomic(); /* Commit netif_running(). */
1544 	}
1545 
1546 	dev_deactivate_many(head);
1547 
1548 	list_for_each_entry(dev, head, close_list) {
1549 		const struct net_device_ops *ops = dev->netdev_ops;
1550 
1551 		/*
1552 		 *	Call the device specific close. This cannot fail.
1553 		 *	Only if device is UP
1554 		 *
1555 		 *	We allow it to be called even after a DETACH hot-plug
1556 		 *	event.
1557 		 */
1558 		if (ops->ndo_stop)
1559 			ops->ndo_stop(dev);
1560 
1561 		dev->flags &= ~IFF_UP;
1562 		netpoll_poll_enable(dev);
1563 	}
1564 }
1565 
__dev_close(struct net_device * dev)1566 static void __dev_close(struct net_device *dev)
1567 {
1568 	LIST_HEAD(single);
1569 
1570 	list_add(&dev->close_list, &single);
1571 	__dev_close_many(&single);
1572 	list_del(&single);
1573 }
1574 
dev_close_many(struct list_head * head,bool unlink)1575 void dev_close_many(struct list_head *head, bool unlink)
1576 {
1577 	struct net_device *dev, *tmp;
1578 
1579 	/* Remove the devices that don't need to be closed */
1580 	list_for_each_entry_safe(dev, tmp, head, close_list)
1581 		if (!(dev->flags & IFF_UP))
1582 			list_del_init(&dev->close_list);
1583 
1584 	__dev_close_many(head);
1585 
1586 	list_for_each_entry_safe(dev, tmp, head, close_list) {
1587 		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1588 		call_netdevice_notifiers(NETDEV_DOWN, dev);
1589 		if (unlink)
1590 			list_del_init(&dev->close_list);
1591 	}
1592 }
1593 EXPORT_SYMBOL(dev_close_many);
1594 
1595 /**
1596  *	dev_close - shutdown an interface.
1597  *	@dev: device to shutdown
1598  *
1599  *	This function moves an active device into down state. A
1600  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1601  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1602  *	chain.
1603  */
dev_close(struct net_device * dev)1604 void dev_close(struct net_device *dev)
1605 {
1606 	if (dev->flags & IFF_UP) {
1607 		LIST_HEAD(single);
1608 
1609 		list_add(&dev->close_list, &single);
1610 		dev_close_many(&single, true);
1611 		list_del(&single);
1612 	}
1613 }
1614 EXPORT_SYMBOL(dev_close);
1615 
1616 
1617 /**
1618  *	dev_disable_lro - disable Large Receive Offload on a device
1619  *	@dev: device
1620  *
1621  *	Disable Large Receive Offload (LRO) on a net device.  Must be
1622  *	called under RTNL.  This is needed if received packets may be
1623  *	forwarded to another interface.
1624  */
dev_disable_lro(struct net_device * dev)1625 void dev_disable_lro(struct net_device *dev)
1626 {
1627 	struct net_device *lower_dev;
1628 	struct list_head *iter;
1629 
1630 	dev->wanted_features &= ~NETIF_F_LRO;
1631 	netdev_update_features(dev);
1632 
1633 	if (unlikely(dev->features & NETIF_F_LRO))
1634 		netdev_WARN(dev, "failed to disable LRO!\n");
1635 
1636 	netdev_for_each_lower_dev(dev, lower_dev, iter)
1637 		dev_disable_lro(lower_dev);
1638 }
1639 EXPORT_SYMBOL(dev_disable_lro);
1640 
1641 /**
1642  *	dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1643  *	@dev: device
1644  *
1645  *	Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
1646  *	called under RTNL.  This is needed if Generic XDP is installed on
1647  *	the device.
1648  */
dev_disable_gro_hw(struct net_device * dev)1649 static void dev_disable_gro_hw(struct net_device *dev)
1650 {
1651 	dev->wanted_features &= ~NETIF_F_GRO_HW;
1652 	netdev_update_features(dev);
1653 
1654 	if (unlikely(dev->features & NETIF_F_GRO_HW))
1655 		netdev_WARN(dev, "failed to disable GRO_HW!\n");
1656 }
1657 
netdev_cmd_to_name(enum netdev_cmd cmd)1658 const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1659 {
1660 #define N(val) 						\
1661 	case NETDEV_##val:				\
1662 		return "NETDEV_" __stringify(val);
1663 	switch (cmd) {
1664 	N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1665 	N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1666 	N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1667 	N(POST_INIT) N(PRE_UNINIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN)
1668 	N(CHANGEUPPER) N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA)
1669 	N(BONDING_INFO) N(PRECHANGEUPPER) N(CHANGELOWERSTATE)
1670 	N(UDP_TUNNEL_PUSH_INFO) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1671 	N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1672 	N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1673 	N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
1674 	N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
1675 	N(XDP_FEAT_CHANGE)
1676 	}
1677 #undef N
1678 	return "UNKNOWN_NETDEV_EVENT";
1679 }
1680 EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1681 
call_netdevice_notifier(struct notifier_block * nb,unsigned long val,struct net_device * dev)1682 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1683 				   struct net_device *dev)
1684 {
1685 	struct netdev_notifier_info info = {
1686 		.dev = dev,
1687 	};
1688 
1689 	return nb->notifier_call(nb, val, &info);
1690 }
1691 
call_netdevice_register_notifiers(struct notifier_block * nb,struct net_device * dev)1692 static int call_netdevice_register_notifiers(struct notifier_block *nb,
1693 					     struct net_device *dev)
1694 {
1695 	int err;
1696 
1697 	err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1698 	err = notifier_to_errno(err);
1699 	if (err)
1700 		return err;
1701 
1702 	if (!(dev->flags & IFF_UP))
1703 		return 0;
1704 
1705 	call_netdevice_notifier(nb, NETDEV_UP, dev);
1706 	return 0;
1707 }
1708 
call_netdevice_unregister_notifiers(struct notifier_block * nb,struct net_device * dev)1709 static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1710 						struct net_device *dev)
1711 {
1712 	if (dev->flags & IFF_UP) {
1713 		call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1714 					dev);
1715 		call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1716 	}
1717 	call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1718 }
1719 
call_netdevice_register_net_notifiers(struct notifier_block * nb,struct net * net)1720 static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1721 						 struct net *net)
1722 {
1723 	struct net_device *dev;
1724 	int err;
1725 
1726 	for_each_netdev(net, dev) {
1727 		err = call_netdevice_register_notifiers(nb, dev);
1728 		if (err)
1729 			goto rollback;
1730 	}
1731 	return 0;
1732 
1733 rollback:
1734 	for_each_netdev_continue_reverse(net, dev)
1735 		call_netdevice_unregister_notifiers(nb, dev);
1736 	return err;
1737 }
1738 
call_netdevice_unregister_net_notifiers(struct notifier_block * nb,struct net * net)1739 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1740 						    struct net *net)
1741 {
1742 	struct net_device *dev;
1743 
1744 	for_each_netdev(net, dev)
1745 		call_netdevice_unregister_notifiers(nb, dev);
1746 }
1747 
1748 static int dev_boot_phase = 1;
1749 
1750 /**
1751  * register_netdevice_notifier - register a network notifier block
1752  * @nb: notifier
1753  *
1754  * Register a notifier to be called when network device events occur.
1755  * The notifier passed is linked into the kernel structures and must
1756  * not be reused until it has been unregistered. A negative errno code
1757  * is returned on a failure.
1758  *
1759  * When registered all registration and up events are replayed
1760  * to the new notifier to allow device to have a race free
1761  * view of the network device list.
1762  */
1763 
register_netdevice_notifier(struct notifier_block * nb)1764 int register_netdevice_notifier(struct notifier_block *nb)
1765 {
1766 	struct net *net;
1767 	int err;
1768 
1769 	/* Close race with setup_net() and cleanup_net() */
1770 	down_write(&pernet_ops_rwsem);
1771 	rtnl_lock();
1772 	err = raw_notifier_chain_register(&netdev_chain, nb);
1773 	if (err)
1774 		goto unlock;
1775 	if (dev_boot_phase)
1776 		goto unlock;
1777 	for_each_net(net) {
1778 		err = call_netdevice_register_net_notifiers(nb, net);
1779 		if (err)
1780 			goto rollback;
1781 	}
1782 
1783 unlock:
1784 	rtnl_unlock();
1785 	up_write(&pernet_ops_rwsem);
1786 	return err;
1787 
1788 rollback:
1789 	for_each_net_continue_reverse(net)
1790 		call_netdevice_unregister_net_notifiers(nb, net);
1791 
1792 	raw_notifier_chain_unregister(&netdev_chain, nb);
1793 	goto unlock;
1794 }
1795 EXPORT_SYMBOL(register_netdevice_notifier);
1796 
1797 /**
1798  * unregister_netdevice_notifier - unregister a network notifier block
1799  * @nb: notifier
1800  *
1801  * Unregister a notifier previously registered by
1802  * register_netdevice_notifier(). The notifier is unlinked into the
1803  * kernel structures and may then be reused. A negative errno code
1804  * is returned on a failure.
1805  *
1806  * After unregistering unregister and down device events are synthesized
1807  * for all devices on the device list to the removed notifier to remove
1808  * the need for special case cleanup code.
1809  */
1810 
unregister_netdevice_notifier(struct notifier_block * nb)1811 int unregister_netdevice_notifier(struct notifier_block *nb)
1812 {
1813 	struct net *net;
1814 	int err;
1815 
1816 	/* Close race with setup_net() and cleanup_net() */
1817 	down_write(&pernet_ops_rwsem);
1818 	rtnl_lock();
1819 	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1820 	if (err)
1821 		goto unlock;
1822 
1823 	for_each_net(net)
1824 		call_netdevice_unregister_net_notifiers(nb, net);
1825 
1826 unlock:
1827 	rtnl_unlock();
1828 	up_write(&pernet_ops_rwsem);
1829 	return err;
1830 }
1831 EXPORT_SYMBOL(unregister_netdevice_notifier);
1832 
__register_netdevice_notifier_net(struct net * net,struct notifier_block * nb,bool ignore_call_fail)1833 static int __register_netdevice_notifier_net(struct net *net,
1834 					     struct notifier_block *nb,
1835 					     bool ignore_call_fail)
1836 {
1837 	int err;
1838 
1839 	err = raw_notifier_chain_register(&net->netdev_chain, nb);
1840 	if (err)
1841 		return err;
1842 	if (dev_boot_phase)
1843 		return 0;
1844 
1845 	err = call_netdevice_register_net_notifiers(nb, net);
1846 	if (err && !ignore_call_fail)
1847 		goto chain_unregister;
1848 
1849 	return 0;
1850 
1851 chain_unregister:
1852 	raw_notifier_chain_unregister(&net->netdev_chain, nb);
1853 	return err;
1854 }
1855 
__unregister_netdevice_notifier_net(struct net * net,struct notifier_block * nb)1856 static int __unregister_netdevice_notifier_net(struct net *net,
1857 					       struct notifier_block *nb)
1858 {
1859 	int err;
1860 
1861 	err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1862 	if (err)
1863 		return err;
1864 
1865 	call_netdevice_unregister_net_notifiers(nb, net);
1866 	return 0;
1867 }
1868 
1869 /**
1870  * register_netdevice_notifier_net - register a per-netns network notifier block
1871  * @net: network namespace
1872  * @nb: notifier
1873  *
1874  * Register a notifier to be called when network device events occur.
1875  * The notifier passed is linked into the kernel structures and must
1876  * not be reused until it has been unregistered. A negative errno code
1877  * is returned on a failure.
1878  *
1879  * When registered all registration and up events are replayed
1880  * to the new notifier to allow device to have a race free
1881  * view of the network device list.
1882  */
1883 
register_netdevice_notifier_net(struct net * net,struct notifier_block * nb)1884 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
1885 {
1886 	int err;
1887 
1888 	rtnl_lock();
1889 	err = __register_netdevice_notifier_net(net, nb, false);
1890 	rtnl_unlock();
1891 	return err;
1892 }
1893 EXPORT_SYMBOL(register_netdevice_notifier_net);
1894 
1895 /**
1896  * unregister_netdevice_notifier_net - unregister a per-netns
1897  *                                     network notifier block
1898  * @net: network namespace
1899  * @nb: notifier
1900  *
1901  * Unregister a notifier previously registered by
1902  * register_netdevice_notifier_net(). The notifier is unlinked from the
1903  * kernel structures and may then be reused. A negative errno code
1904  * is returned on a failure.
1905  *
1906  * After unregistering unregister and down device events are synthesized
1907  * for all devices on the device list to the removed notifier to remove
1908  * the need for special case cleanup code.
1909  */
1910 
unregister_netdevice_notifier_net(struct net * net,struct notifier_block * nb)1911 int unregister_netdevice_notifier_net(struct net *net,
1912 				      struct notifier_block *nb)
1913 {
1914 	int err;
1915 
1916 	rtnl_lock();
1917 	err = __unregister_netdevice_notifier_net(net, nb);
1918 	rtnl_unlock();
1919 	return err;
1920 }
1921 EXPORT_SYMBOL(unregister_netdevice_notifier_net);
1922 
__move_netdevice_notifier_net(struct net * src_net,struct net * dst_net,struct notifier_block * nb)1923 static void __move_netdevice_notifier_net(struct net *src_net,
1924 					  struct net *dst_net,
1925 					  struct notifier_block *nb)
1926 {
1927 	__unregister_netdevice_notifier_net(src_net, nb);
1928 	__register_netdevice_notifier_net(dst_net, nb, true);
1929 }
1930 
register_netdevice_notifier_dev_net(struct net_device * dev,struct notifier_block * nb,struct netdev_net_notifier * nn)1931 int register_netdevice_notifier_dev_net(struct net_device *dev,
1932 					struct notifier_block *nb,
1933 					struct netdev_net_notifier *nn)
1934 {
1935 	int err;
1936 
1937 	rtnl_lock();
1938 	err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
1939 	if (!err) {
1940 		nn->nb = nb;
1941 		list_add(&nn->list, &dev->net_notifier_list);
1942 	}
1943 	rtnl_unlock();
1944 	return err;
1945 }
1946 EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
1947 
unregister_netdevice_notifier_dev_net(struct net_device * dev,struct notifier_block * nb,struct netdev_net_notifier * nn)1948 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
1949 					  struct notifier_block *nb,
1950 					  struct netdev_net_notifier *nn)
1951 {
1952 	int err;
1953 
1954 	rtnl_lock();
1955 	list_del(&nn->list);
1956 	err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
1957 	rtnl_unlock();
1958 	return err;
1959 }
1960 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
1961 
move_netdevice_notifiers_dev_net(struct net_device * dev,struct net * net)1962 static void move_netdevice_notifiers_dev_net(struct net_device *dev,
1963 					     struct net *net)
1964 {
1965 	struct netdev_net_notifier *nn;
1966 
1967 	list_for_each_entry(nn, &dev->net_notifier_list, list)
1968 		__move_netdevice_notifier_net(dev_net(dev), net, nn->nb);
1969 }
1970 
1971 /**
1972  *	call_netdevice_notifiers_info - call all network notifier blocks
1973  *	@val: value passed unmodified to notifier function
1974  *	@info: notifier information data
1975  *
1976  *	Call all network notifier blocks.  Parameters and return value
1977  *	are as for raw_notifier_call_chain().
1978  */
1979 
call_netdevice_notifiers_info(unsigned long val,struct netdev_notifier_info * info)1980 int call_netdevice_notifiers_info(unsigned long val,
1981 				  struct netdev_notifier_info *info)
1982 {
1983 	struct net *net = dev_net(info->dev);
1984 	int ret;
1985 
1986 	ASSERT_RTNL();
1987 
1988 	/* Run per-netns notifier block chain first, then run the global one.
1989 	 * Hopefully, one day, the global one is going to be removed after
1990 	 * all notifier block registrators get converted to be per-netns.
1991 	 */
1992 	ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
1993 	if (ret & NOTIFY_STOP_MASK)
1994 		return ret;
1995 	return raw_notifier_call_chain(&netdev_chain, val, info);
1996 }
1997 
1998 /**
1999  *	call_netdevice_notifiers_info_robust - call per-netns notifier blocks
2000  *	                                       for and rollback on error
2001  *	@val_up: value passed unmodified to notifier function
2002  *	@val_down: value passed unmodified to the notifier function when
2003  *	           recovering from an error on @val_up
2004  *	@info: notifier information data
2005  *
2006  *	Call all per-netns network notifier blocks, but not notifier blocks on
2007  *	the global notifier chain. Parameters and return value are as for
2008  *	raw_notifier_call_chain_robust().
2009  */
2010 
2011 static int
call_netdevice_notifiers_info_robust(unsigned long val_up,unsigned long val_down,struct netdev_notifier_info * info)2012 call_netdevice_notifiers_info_robust(unsigned long val_up,
2013 				     unsigned long val_down,
2014 				     struct netdev_notifier_info *info)
2015 {
2016 	struct net *net = dev_net(info->dev);
2017 
2018 	ASSERT_RTNL();
2019 
2020 	return raw_notifier_call_chain_robust(&net->netdev_chain,
2021 					      val_up, val_down, info);
2022 }
2023 
call_netdevice_notifiers_extack(unsigned long val,struct net_device * dev,struct netlink_ext_ack * extack)2024 static int call_netdevice_notifiers_extack(unsigned long val,
2025 					   struct net_device *dev,
2026 					   struct netlink_ext_ack *extack)
2027 {
2028 	struct netdev_notifier_info info = {
2029 		.dev = dev,
2030 		.extack = extack,
2031 	};
2032 
2033 	return call_netdevice_notifiers_info(val, &info);
2034 }
2035 
2036 /**
2037  *	call_netdevice_notifiers - call all network notifier blocks
2038  *      @val: value passed unmodified to notifier function
2039  *      @dev: net_device pointer passed unmodified to notifier function
2040  *
2041  *	Call all network notifier blocks.  Parameters and return value
2042  *	are as for raw_notifier_call_chain().
2043  */
2044 
call_netdevice_notifiers(unsigned long val,struct net_device * dev)2045 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
2046 {
2047 	return call_netdevice_notifiers_extack(val, dev, NULL);
2048 }
2049 EXPORT_SYMBOL(call_netdevice_notifiers);
2050 
2051 /**
2052  *	call_netdevice_notifiers_mtu - call all network notifier blocks
2053  *	@val: value passed unmodified to notifier function
2054  *	@dev: net_device pointer passed unmodified to notifier function
2055  *	@arg: additional u32 argument passed to the notifier function
2056  *
2057  *	Call all network notifier blocks.  Parameters and return value
2058  *	are as for raw_notifier_call_chain().
2059  */
call_netdevice_notifiers_mtu(unsigned long val,struct net_device * dev,u32 arg)2060 static int call_netdevice_notifiers_mtu(unsigned long val,
2061 					struct net_device *dev, u32 arg)
2062 {
2063 	struct netdev_notifier_info_ext info = {
2064 		.info.dev = dev,
2065 		.ext.mtu = arg,
2066 	};
2067 
2068 	BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2069 
2070 	return call_netdevice_notifiers_info(val, &info.info);
2071 }
2072 
2073 #ifdef CONFIG_NET_INGRESS
2074 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
2075 
net_inc_ingress_queue(void)2076 void net_inc_ingress_queue(void)
2077 {
2078 	static_branch_inc(&ingress_needed_key);
2079 }
2080 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2081 
net_dec_ingress_queue(void)2082 void net_dec_ingress_queue(void)
2083 {
2084 	static_branch_dec(&ingress_needed_key);
2085 }
2086 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2087 #endif
2088 
2089 #ifdef CONFIG_NET_EGRESS
2090 static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
2091 
net_inc_egress_queue(void)2092 void net_inc_egress_queue(void)
2093 {
2094 	static_branch_inc(&egress_needed_key);
2095 }
2096 EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2097 
net_dec_egress_queue(void)2098 void net_dec_egress_queue(void)
2099 {
2100 	static_branch_dec(&egress_needed_key);
2101 }
2102 EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2103 #endif
2104 
2105 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2106 EXPORT_SYMBOL(netstamp_needed_key);
2107 #ifdef CONFIG_JUMP_LABEL
2108 static atomic_t netstamp_needed_deferred;
2109 static atomic_t netstamp_wanted;
netstamp_clear(struct work_struct * work)2110 static void netstamp_clear(struct work_struct *work)
2111 {
2112 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2113 	int wanted;
2114 
2115 	wanted = atomic_add_return(deferred, &netstamp_wanted);
2116 	if (wanted > 0)
2117 		static_branch_enable(&netstamp_needed_key);
2118 	else
2119 		static_branch_disable(&netstamp_needed_key);
2120 }
2121 static DECLARE_WORK(netstamp_work, netstamp_clear);
2122 #endif
2123 
net_enable_timestamp(void)2124 void net_enable_timestamp(void)
2125 {
2126 #ifdef CONFIG_JUMP_LABEL
2127 	int wanted = atomic_read(&netstamp_wanted);
2128 
2129 	while (wanted > 0) {
2130 		if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted + 1))
2131 			return;
2132 	}
2133 	atomic_inc(&netstamp_needed_deferred);
2134 	schedule_work(&netstamp_work);
2135 #else
2136 	static_branch_inc(&netstamp_needed_key);
2137 #endif
2138 }
2139 EXPORT_SYMBOL(net_enable_timestamp);
2140 
net_disable_timestamp(void)2141 void net_disable_timestamp(void)
2142 {
2143 #ifdef CONFIG_JUMP_LABEL
2144 	int wanted = atomic_read(&netstamp_wanted);
2145 
2146 	while (wanted > 1) {
2147 		if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1))
2148 			return;
2149 	}
2150 	atomic_dec(&netstamp_needed_deferred);
2151 	schedule_work(&netstamp_work);
2152 #else
2153 	static_branch_dec(&netstamp_needed_key);
2154 #endif
2155 }
2156 EXPORT_SYMBOL(net_disable_timestamp);
2157 
net_timestamp_set(struct sk_buff * skb)2158 static inline void net_timestamp_set(struct sk_buff *skb)
2159 {
2160 	skb->tstamp = 0;
2161 	skb->mono_delivery_time = 0;
2162 	if (static_branch_unlikely(&netstamp_needed_key))
2163 		skb->tstamp = ktime_get_real();
2164 }
2165 
2166 #define net_timestamp_check(COND, SKB)				\
2167 	if (static_branch_unlikely(&netstamp_needed_key)) {	\
2168 		if ((COND) && !(SKB)->tstamp)			\
2169 			(SKB)->tstamp = ktime_get_real();	\
2170 	}							\
2171 
is_skb_forwardable(const struct net_device * dev,const struct sk_buff * skb)2172 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2173 {
2174 	return __is_skb_forwardable(dev, skb, true);
2175 }
2176 EXPORT_SYMBOL_GPL(is_skb_forwardable);
2177 
__dev_forward_skb2(struct net_device * dev,struct sk_buff * skb,bool check_mtu)2178 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2179 			      bool check_mtu)
2180 {
2181 	int ret = ____dev_forward_skb(dev, skb, check_mtu);
2182 
2183 	if (likely(!ret)) {
2184 		skb->protocol = eth_type_trans(skb, dev);
2185 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2186 	}
2187 
2188 	return ret;
2189 }
2190 
__dev_forward_skb(struct net_device * dev,struct sk_buff * skb)2191 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2192 {
2193 	return __dev_forward_skb2(dev, skb, true);
2194 }
2195 EXPORT_SYMBOL_GPL(__dev_forward_skb);
2196 
2197 /**
2198  * dev_forward_skb - loopback an skb to another netif
2199  *
2200  * @dev: destination network device
2201  * @skb: buffer to forward
2202  *
2203  * return values:
2204  *	NET_RX_SUCCESS	(no congestion)
2205  *	NET_RX_DROP     (packet was dropped, but freed)
2206  *
2207  * dev_forward_skb can be used for injecting an skb from the
2208  * start_xmit function of one device into the receive queue
2209  * of another device.
2210  *
2211  * The receiving device may be in another namespace, so
2212  * we have to clear all information in the skb that could
2213  * impact namespace isolation.
2214  */
dev_forward_skb(struct net_device * dev,struct sk_buff * skb)2215 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2216 {
2217 	return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2218 }
2219 EXPORT_SYMBOL_GPL(dev_forward_skb);
2220 
dev_forward_skb_nomtu(struct net_device * dev,struct sk_buff * skb)2221 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2222 {
2223 	return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2224 }
2225 
deliver_skb(struct sk_buff * skb,struct packet_type * pt_prev,struct net_device * orig_dev)2226 static inline int deliver_skb(struct sk_buff *skb,
2227 			      struct packet_type *pt_prev,
2228 			      struct net_device *orig_dev)
2229 {
2230 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2231 		return -ENOMEM;
2232 	refcount_inc(&skb->users);
2233 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2234 }
2235 
deliver_ptype_list_skb(struct sk_buff * skb,struct packet_type ** pt,struct net_device * orig_dev,__be16 type,struct list_head * ptype_list)2236 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2237 					  struct packet_type **pt,
2238 					  struct net_device *orig_dev,
2239 					  __be16 type,
2240 					  struct list_head *ptype_list)
2241 {
2242 	struct packet_type *ptype, *pt_prev = *pt;
2243 
2244 	list_for_each_entry_rcu(ptype, ptype_list, list) {
2245 		if (ptype->type != type)
2246 			continue;
2247 		if (pt_prev)
2248 			deliver_skb(skb, pt_prev, orig_dev);
2249 		pt_prev = ptype;
2250 	}
2251 	*pt = pt_prev;
2252 }
2253 
skb_loop_sk(struct packet_type * ptype,struct sk_buff * skb)2254 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2255 {
2256 	if (!ptype->af_packet_priv || !skb->sk)
2257 		return false;
2258 
2259 	if (ptype->id_match)
2260 		return ptype->id_match(ptype, skb->sk);
2261 	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2262 		return true;
2263 
2264 	return false;
2265 }
2266 
2267 /**
2268  * dev_nit_active - return true if any network interface taps are in use
2269  *
2270  * @dev: network device to check for the presence of taps
2271  */
dev_nit_active(struct net_device * dev)2272 bool dev_nit_active(struct net_device *dev)
2273 {
2274 	return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2275 }
2276 EXPORT_SYMBOL_GPL(dev_nit_active);
2277 
2278 /*
2279  *	Support routine. Sends outgoing frames to any network
2280  *	taps currently in use.
2281  */
2282 
dev_queue_xmit_nit(struct sk_buff * skb,struct net_device * dev)2283 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2284 {
2285 	struct packet_type *ptype;
2286 	struct sk_buff *skb2 = NULL;
2287 	struct packet_type *pt_prev = NULL;
2288 	struct list_head *ptype_list = &ptype_all;
2289 
2290 	rcu_read_lock();
2291 again:
2292 	list_for_each_entry_rcu(ptype, ptype_list, list) {
2293 		if (ptype->ignore_outgoing)
2294 			continue;
2295 
2296 		/* Never send packets back to the socket
2297 		 * they originated from - MvS (miquels@drinkel.ow.org)
2298 		 */
2299 		if (skb_loop_sk(ptype, skb))
2300 			continue;
2301 
2302 		if (pt_prev) {
2303 			deliver_skb(skb2, pt_prev, skb->dev);
2304 			pt_prev = ptype;
2305 			continue;
2306 		}
2307 
2308 		/* need to clone skb, done only once */
2309 		skb2 = skb_clone(skb, GFP_ATOMIC);
2310 		if (!skb2)
2311 			goto out_unlock;
2312 
2313 		net_timestamp_set(skb2);
2314 
2315 		/* skb->nh should be correctly
2316 		 * set by sender, so that the second statement is
2317 		 * just protection against buggy protocols.
2318 		 */
2319 		skb_reset_mac_header(skb2);
2320 
2321 		if (skb_network_header(skb2) < skb2->data ||
2322 		    skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2323 			net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2324 					     ntohs(skb2->protocol),
2325 					     dev->name);
2326 			skb_reset_network_header(skb2);
2327 		}
2328 
2329 		skb2->transport_header = skb2->network_header;
2330 		skb2->pkt_type = PACKET_OUTGOING;
2331 		pt_prev = ptype;
2332 	}
2333 
2334 	if (ptype_list == &ptype_all) {
2335 		ptype_list = &dev->ptype_all;
2336 		goto again;
2337 	}
2338 out_unlock:
2339 	if (pt_prev) {
2340 		if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2341 			pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2342 		else
2343 			kfree_skb(skb2);
2344 	}
2345 	rcu_read_unlock();
2346 }
2347 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2348 
2349 /**
2350  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2351  * @dev: Network device
2352  * @txq: number of queues available
2353  *
2354  * If real_num_tx_queues is changed the tc mappings may no longer be
2355  * valid. To resolve this verify the tc mapping remains valid and if
2356  * not NULL the mapping. With no priorities mapping to this
2357  * offset/count pair it will no longer be used. In the worst case TC0
2358  * is invalid nothing can be done so disable priority mappings. If is
2359  * expected that drivers will fix this mapping if they can before
2360  * calling netif_set_real_num_tx_queues.
2361  */
netif_setup_tc(struct net_device * dev,unsigned int txq)2362 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2363 {
2364 	int i;
2365 	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2366 
2367 	/* If TC0 is invalidated disable TC mapping */
2368 	if (tc->offset + tc->count > txq) {
2369 		netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2370 		dev->num_tc = 0;
2371 		return;
2372 	}
2373 
2374 	/* Invalidated prio to tc mappings set to TC0 */
2375 	for (i = 1; i < TC_BITMASK + 1; i++) {
2376 		int q = netdev_get_prio_tc_map(dev, i);
2377 
2378 		tc = &dev->tc_to_txq[q];
2379 		if (tc->offset + tc->count > txq) {
2380 			netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2381 				    i, q);
2382 			netdev_set_prio_tc_map(dev, i, 0);
2383 		}
2384 	}
2385 }
2386 
netdev_txq_to_tc(struct net_device * dev,unsigned int txq)2387 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2388 {
2389 	if (dev->num_tc) {
2390 		struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2391 		int i;
2392 
2393 		/* walk through the TCs and see if it falls into any of them */
2394 		for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2395 			if ((txq - tc->offset) < tc->count)
2396 				return i;
2397 		}
2398 
2399 		/* didn't find it, just return -1 to indicate no match */
2400 		return -1;
2401 	}
2402 
2403 	return 0;
2404 }
2405 EXPORT_SYMBOL(netdev_txq_to_tc);
2406 
2407 #ifdef CONFIG_XPS
2408 static struct static_key xps_needed __read_mostly;
2409 static struct static_key xps_rxqs_needed __read_mostly;
2410 static DEFINE_MUTEX(xps_map_mutex);
2411 #define xmap_dereference(P)		\
2412 	rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2413 
remove_xps_queue(struct xps_dev_maps * dev_maps,struct xps_dev_maps * old_maps,int tci,u16 index)2414 static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2415 			     struct xps_dev_maps *old_maps, int tci, u16 index)
2416 {
2417 	struct xps_map *map = NULL;
2418 	int pos;
2419 
2420 	map = xmap_dereference(dev_maps->attr_map[tci]);
2421 	if (!map)
2422 		return false;
2423 
2424 	for (pos = map->len; pos--;) {
2425 		if (map->queues[pos] != index)
2426 			continue;
2427 
2428 		if (map->len > 1) {
2429 			map->queues[pos] = map->queues[--map->len];
2430 			break;
2431 		}
2432 
2433 		if (old_maps)
2434 			RCU_INIT_POINTER(old_maps->attr_map[tci], NULL);
2435 		RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2436 		kfree_rcu(map, rcu);
2437 		return false;
2438 	}
2439 
2440 	return true;
2441 }
2442 
remove_xps_queue_cpu(struct net_device * dev,struct xps_dev_maps * dev_maps,int cpu,u16 offset,u16 count)2443 static bool remove_xps_queue_cpu(struct net_device *dev,
2444 				 struct xps_dev_maps *dev_maps,
2445 				 int cpu, u16 offset, u16 count)
2446 {
2447 	int num_tc = dev_maps->num_tc;
2448 	bool active = false;
2449 	int tci;
2450 
2451 	for (tci = cpu * num_tc; num_tc--; tci++) {
2452 		int i, j;
2453 
2454 		for (i = count, j = offset; i--; j++) {
2455 			if (!remove_xps_queue(dev_maps, NULL, tci, j))
2456 				break;
2457 		}
2458 
2459 		active |= i < 0;
2460 	}
2461 
2462 	return active;
2463 }
2464 
reset_xps_maps(struct net_device * dev,struct xps_dev_maps * dev_maps,enum xps_map_type type)2465 static void reset_xps_maps(struct net_device *dev,
2466 			   struct xps_dev_maps *dev_maps,
2467 			   enum xps_map_type type)
2468 {
2469 	static_key_slow_dec_cpuslocked(&xps_needed);
2470 	if (type == XPS_RXQS)
2471 		static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2472 
2473 	RCU_INIT_POINTER(dev->xps_maps[type], NULL);
2474 
2475 	kfree_rcu(dev_maps, rcu);
2476 }
2477 
clean_xps_maps(struct net_device * dev,enum xps_map_type type,u16 offset,u16 count)2478 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
2479 			   u16 offset, u16 count)
2480 {
2481 	struct xps_dev_maps *dev_maps;
2482 	bool active = false;
2483 	int i, j;
2484 
2485 	dev_maps = xmap_dereference(dev->xps_maps[type]);
2486 	if (!dev_maps)
2487 		return;
2488 
2489 	for (j = 0; j < dev_maps->nr_ids; j++)
2490 		active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
2491 	if (!active)
2492 		reset_xps_maps(dev, dev_maps, type);
2493 
2494 	if (type == XPS_CPUS) {
2495 		for (i = offset + (count - 1); count--; i--)
2496 			netdev_queue_numa_node_write(
2497 				netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
2498 	}
2499 }
2500 
netif_reset_xps_queues(struct net_device * dev,u16 offset,u16 count)2501 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2502 				   u16 count)
2503 {
2504 	if (!static_key_false(&xps_needed))
2505 		return;
2506 
2507 	cpus_read_lock();
2508 	mutex_lock(&xps_map_mutex);
2509 
2510 	if (static_key_false(&xps_rxqs_needed))
2511 		clean_xps_maps(dev, XPS_RXQS, offset, count);
2512 
2513 	clean_xps_maps(dev, XPS_CPUS, offset, count);
2514 
2515 	mutex_unlock(&xps_map_mutex);
2516 	cpus_read_unlock();
2517 }
2518 
netif_reset_xps_queues_gt(struct net_device * dev,u16 index)2519 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2520 {
2521 	netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2522 }
2523 
expand_xps_map(struct xps_map * map,int attr_index,u16 index,bool is_rxqs_map)2524 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2525 				      u16 index, bool is_rxqs_map)
2526 {
2527 	struct xps_map *new_map;
2528 	int alloc_len = XPS_MIN_MAP_ALLOC;
2529 	int i, pos;
2530 
2531 	for (pos = 0; map && pos < map->len; pos++) {
2532 		if (map->queues[pos] != index)
2533 			continue;
2534 		return map;
2535 	}
2536 
2537 	/* Need to add tx-queue to this CPU's/rx-queue's existing map */
2538 	if (map) {
2539 		if (pos < map->alloc_len)
2540 			return map;
2541 
2542 		alloc_len = map->alloc_len * 2;
2543 	}
2544 
2545 	/* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2546 	 *  map
2547 	 */
2548 	if (is_rxqs_map)
2549 		new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2550 	else
2551 		new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2552 				       cpu_to_node(attr_index));
2553 	if (!new_map)
2554 		return NULL;
2555 
2556 	for (i = 0; i < pos; i++)
2557 		new_map->queues[i] = map->queues[i];
2558 	new_map->alloc_len = alloc_len;
2559 	new_map->len = pos;
2560 
2561 	return new_map;
2562 }
2563 
2564 /* Copy xps maps at a given index */
xps_copy_dev_maps(struct xps_dev_maps * dev_maps,struct xps_dev_maps * new_dev_maps,int index,int tc,bool skip_tc)2565 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
2566 			      struct xps_dev_maps *new_dev_maps, int index,
2567 			      int tc, bool skip_tc)
2568 {
2569 	int i, tci = index * dev_maps->num_tc;
2570 	struct xps_map *map;
2571 
2572 	/* copy maps belonging to foreign traffic classes */
2573 	for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2574 		if (i == tc && skip_tc)
2575 			continue;
2576 
2577 		/* fill in the new device map from the old device map */
2578 		map = xmap_dereference(dev_maps->attr_map[tci]);
2579 		RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2580 	}
2581 }
2582 
2583 /* Must be called under cpus_read_lock */
__netif_set_xps_queue(struct net_device * dev,const unsigned long * mask,u16 index,enum xps_map_type type)2584 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2585 			  u16 index, enum xps_map_type type)
2586 {
2587 	struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL;
2588 	const unsigned long *online_mask = NULL;
2589 	bool active = false, copy = false;
2590 	int i, j, tci, numa_node_id = -2;
2591 	int maps_sz, num_tc = 1, tc = 0;
2592 	struct xps_map *map, *new_map;
2593 	unsigned int nr_ids;
2594 
2595 	WARN_ON_ONCE(index >= dev->num_tx_queues);
2596 
2597 	if (dev->num_tc) {
2598 		/* Do not allow XPS on subordinate device directly */
2599 		num_tc = dev->num_tc;
2600 		if (num_tc < 0)
2601 			return -EINVAL;
2602 
2603 		/* If queue belongs to subordinate dev use its map */
2604 		dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2605 
2606 		tc = netdev_txq_to_tc(dev, index);
2607 		if (tc < 0)
2608 			return -EINVAL;
2609 	}
2610 
2611 	mutex_lock(&xps_map_mutex);
2612 
2613 	dev_maps = xmap_dereference(dev->xps_maps[type]);
2614 	if (type == XPS_RXQS) {
2615 		maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2616 		nr_ids = dev->num_rx_queues;
2617 	} else {
2618 		maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2619 		if (num_possible_cpus() > 1)
2620 			online_mask = cpumask_bits(cpu_online_mask);
2621 		nr_ids = nr_cpu_ids;
2622 	}
2623 
2624 	if (maps_sz < L1_CACHE_BYTES)
2625 		maps_sz = L1_CACHE_BYTES;
2626 
2627 	/* The old dev_maps could be larger or smaller than the one we're
2628 	 * setting up now, as dev->num_tc or nr_ids could have been updated in
2629 	 * between. We could try to be smart, but let's be safe instead and only
2630 	 * copy foreign traffic classes if the two map sizes match.
2631 	 */
2632 	if (dev_maps &&
2633 	    dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids)
2634 		copy = true;
2635 
2636 	/* allocate memory for queue storage */
2637 	for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2638 	     j < nr_ids;) {
2639 		if (!new_dev_maps) {
2640 			new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2641 			if (!new_dev_maps) {
2642 				mutex_unlock(&xps_map_mutex);
2643 				return -ENOMEM;
2644 			}
2645 
2646 			new_dev_maps->nr_ids = nr_ids;
2647 			new_dev_maps->num_tc = num_tc;
2648 		}
2649 
2650 		tci = j * num_tc + tc;
2651 		map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
2652 
2653 		map = expand_xps_map(map, j, index, type == XPS_RXQS);
2654 		if (!map)
2655 			goto error;
2656 
2657 		RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2658 	}
2659 
2660 	if (!new_dev_maps)
2661 		goto out_no_new_maps;
2662 
2663 	if (!dev_maps) {
2664 		/* Increment static keys at most once per type */
2665 		static_key_slow_inc_cpuslocked(&xps_needed);
2666 		if (type == XPS_RXQS)
2667 			static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2668 	}
2669 
2670 	for (j = 0; j < nr_ids; j++) {
2671 		bool skip_tc = false;
2672 
2673 		tci = j * num_tc + tc;
2674 		if (netif_attr_test_mask(j, mask, nr_ids) &&
2675 		    netif_attr_test_online(j, online_mask, nr_ids)) {
2676 			/* add tx-queue to CPU/rx-queue maps */
2677 			int pos = 0;
2678 
2679 			skip_tc = true;
2680 
2681 			map = xmap_dereference(new_dev_maps->attr_map[tci]);
2682 			while ((pos < map->len) && (map->queues[pos] != index))
2683 				pos++;
2684 
2685 			if (pos == map->len)
2686 				map->queues[map->len++] = index;
2687 #ifdef CONFIG_NUMA
2688 			if (type == XPS_CPUS) {
2689 				if (numa_node_id == -2)
2690 					numa_node_id = cpu_to_node(j);
2691 				else if (numa_node_id != cpu_to_node(j))
2692 					numa_node_id = -1;
2693 			}
2694 #endif
2695 		}
2696 
2697 		if (copy)
2698 			xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc,
2699 					  skip_tc);
2700 	}
2701 
2702 	rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
2703 
2704 	/* Cleanup old maps */
2705 	if (!dev_maps)
2706 		goto out_no_old_maps;
2707 
2708 	for (j = 0; j < dev_maps->nr_ids; j++) {
2709 		for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
2710 			map = xmap_dereference(dev_maps->attr_map[tci]);
2711 			if (!map)
2712 				continue;
2713 
2714 			if (copy) {
2715 				new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2716 				if (map == new_map)
2717 					continue;
2718 			}
2719 
2720 			RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2721 			kfree_rcu(map, rcu);
2722 		}
2723 	}
2724 
2725 	old_dev_maps = dev_maps;
2726 
2727 out_no_old_maps:
2728 	dev_maps = new_dev_maps;
2729 	active = true;
2730 
2731 out_no_new_maps:
2732 	if (type == XPS_CPUS)
2733 		/* update Tx queue numa node */
2734 		netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2735 					     (numa_node_id >= 0) ?
2736 					     numa_node_id : NUMA_NO_NODE);
2737 
2738 	if (!dev_maps)
2739 		goto out_no_maps;
2740 
2741 	/* removes tx-queue from unused CPUs/rx-queues */
2742 	for (j = 0; j < dev_maps->nr_ids; j++) {
2743 		tci = j * dev_maps->num_tc;
2744 
2745 		for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2746 			if (i == tc &&
2747 			    netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
2748 			    netif_attr_test_online(j, online_mask, dev_maps->nr_ids))
2749 				continue;
2750 
2751 			active |= remove_xps_queue(dev_maps,
2752 						   copy ? old_dev_maps : NULL,
2753 						   tci, index);
2754 		}
2755 	}
2756 
2757 	if (old_dev_maps)
2758 		kfree_rcu(old_dev_maps, rcu);
2759 
2760 	/* free map if not active */
2761 	if (!active)
2762 		reset_xps_maps(dev, dev_maps, type);
2763 
2764 out_no_maps:
2765 	mutex_unlock(&xps_map_mutex);
2766 
2767 	return 0;
2768 error:
2769 	/* remove any maps that we added */
2770 	for (j = 0; j < nr_ids; j++) {
2771 		for (i = num_tc, tci = j * num_tc; i--; tci++) {
2772 			new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2773 			map = copy ?
2774 			      xmap_dereference(dev_maps->attr_map[tci]) :
2775 			      NULL;
2776 			if (new_map && new_map != map)
2777 				kfree(new_map);
2778 		}
2779 	}
2780 
2781 	mutex_unlock(&xps_map_mutex);
2782 
2783 	kfree(new_dev_maps);
2784 	return -ENOMEM;
2785 }
2786 EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2787 
netif_set_xps_queue(struct net_device * dev,const struct cpumask * mask,u16 index)2788 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2789 			u16 index)
2790 {
2791 	int ret;
2792 
2793 	cpus_read_lock();
2794 	ret =  __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
2795 	cpus_read_unlock();
2796 
2797 	return ret;
2798 }
2799 EXPORT_SYMBOL(netif_set_xps_queue);
2800 
2801 #endif
netdev_unbind_all_sb_channels(struct net_device * dev)2802 static void netdev_unbind_all_sb_channels(struct net_device *dev)
2803 {
2804 	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2805 
2806 	/* Unbind any subordinate channels */
2807 	while (txq-- != &dev->_tx[0]) {
2808 		if (txq->sb_dev)
2809 			netdev_unbind_sb_channel(dev, txq->sb_dev);
2810 	}
2811 }
2812 
netdev_reset_tc(struct net_device * dev)2813 void netdev_reset_tc(struct net_device *dev)
2814 {
2815 #ifdef CONFIG_XPS
2816 	netif_reset_xps_queues_gt(dev, 0);
2817 #endif
2818 	netdev_unbind_all_sb_channels(dev);
2819 
2820 	/* Reset TC configuration of device */
2821 	dev->num_tc = 0;
2822 	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2823 	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2824 }
2825 EXPORT_SYMBOL(netdev_reset_tc);
2826 
netdev_set_tc_queue(struct net_device * dev,u8 tc,u16 count,u16 offset)2827 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2828 {
2829 	if (tc >= dev->num_tc)
2830 		return -EINVAL;
2831 
2832 #ifdef CONFIG_XPS
2833 	netif_reset_xps_queues(dev, offset, count);
2834 #endif
2835 	dev->tc_to_txq[tc].count = count;
2836 	dev->tc_to_txq[tc].offset = offset;
2837 	return 0;
2838 }
2839 EXPORT_SYMBOL(netdev_set_tc_queue);
2840 
netdev_set_num_tc(struct net_device * dev,u8 num_tc)2841 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2842 {
2843 	if (num_tc > TC_MAX_QUEUE)
2844 		return -EINVAL;
2845 
2846 #ifdef CONFIG_XPS
2847 	netif_reset_xps_queues_gt(dev, 0);
2848 #endif
2849 	netdev_unbind_all_sb_channels(dev);
2850 
2851 	dev->num_tc = num_tc;
2852 	return 0;
2853 }
2854 EXPORT_SYMBOL(netdev_set_num_tc);
2855 
netdev_unbind_sb_channel(struct net_device * dev,struct net_device * sb_dev)2856 void netdev_unbind_sb_channel(struct net_device *dev,
2857 			      struct net_device *sb_dev)
2858 {
2859 	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2860 
2861 #ifdef CONFIG_XPS
2862 	netif_reset_xps_queues_gt(sb_dev, 0);
2863 #endif
2864 	memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2865 	memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2866 
2867 	while (txq-- != &dev->_tx[0]) {
2868 		if (txq->sb_dev == sb_dev)
2869 			txq->sb_dev = NULL;
2870 	}
2871 }
2872 EXPORT_SYMBOL(netdev_unbind_sb_channel);
2873 
netdev_bind_sb_channel_queue(struct net_device * dev,struct net_device * sb_dev,u8 tc,u16 count,u16 offset)2874 int netdev_bind_sb_channel_queue(struct net_device *dev,
2875 				 struct net_device *sb_dev,
2876 				 u8 tc, u16 count, u16 offset)
2877 {
2878 	/* Make certain the sb_dev and dev are already configured */
2879 	if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2880 		return -EINVAL;
2881 
2882 	/* We cannot hand out queues we don't have */
2883 	if ((offset + count) > dev->real_num_tx_queues)
2884 		return -EINVAL;
2885 
2886 	/* Record the mapping */
2887 	sb_dev->tc_to_txq[tc].count = count;
2888 	sb_dev->tc_to_txq[tc].offset = offset;
2889 
2890 	/* Provide a way for Tx queue to find the tc_to_txq map or
2891 	 * XPS map for itself.
2892 	 */
2893 	while (count--)
2894 		netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2895 
2896 	return 0;
2897 }
2898 EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2899 
netdev_set_sb_channel(struct net_device * dev,u16 channel)2900 int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2901 {
2902 	/* Do not use a multiqueue device to represent a subordinate channel */
2903 	if (netif_is_multiqueue(dev))
2904 		return -ENODEV;
2905 
2906 	/* We allow channels 1 - 32767 to be used for subordinate channels.
2907 	 * Channel 0 is meant to be "native" mode and used only to represent
2908 	 * the main root device. We allow writing 0 to reset the device back
2909 	 * to normal mode after being used as a subordinate channel.
2910 	 */
2911 	if (channel > S16_MAX)
2912 		return -EINVAL;
2913 
2914 	dev->num_tc = -channel;
2915 
2916 	return 0;
2917 }
2918 EXPORT_SYMBOL(netdev_set_sb_channel);
2919 
2920 /*
2921  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2922  * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2923  */
netif_set_real_num_tx_queues(struct net_device * dev,unsigned int txq)2924 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2925 {
2926 	bool disabling;
2927 	int rc;
2928 
2929 	disabling = txq < dev->real_num_tx_queues;
2930 
2931 	if (txq < 1 || txq > dev->num_tx_queues)
2932 		return -EINVAL;
2933 
2934 	if (dev->reg_state == NETREG_REGISTERED ||
2935 	    dev->reg_state == NETREG_UNREGISTERING) {
2936 		ASSERT_RTNL();
2937 
2938 		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2939 						  txq);
2940 		if (rc)
2941 			return rc;
2942 
2943 		if (dev->num_tc)
2944 			netif_setup_tc(dev, txq);
2945 
2946 		dev_qdisc_change_real_num_tx(dev, txq);
2947 
2948 		dev->real_num_tx_queues = txq;
2949 
2950 		if (disabling) {
2951 			synchronize_net();
2952 			qdisc_reset_all_tx_gt(dev, txq);
2953 #ifdef CONFIG_XPS
2954 			netif_reset_xps_queues_gt(dev, txq);
2955 #endif
2956 		}
2957 	} else {
2958 		dev->real_num_tx_queues = txq;
2959 	}
2960 
2961 	return 0;
2962 }
2963 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2964 
2965 #ifdef CONFIG_SYSFS
2966 /**
2967  *	netif_set_real_num_rx_queues - set actual number of RX queues used
2968  *	@dev: Network device
2969  *	@rxq: Actual number of RX queues
2970  *
2971  *	This must be called either with the rtnl_lock held or before
2972  *	registration of the net device.  Returns 0 on success, or a
2973  *	negative error code.  If called before registration, it always
2974  *	succeeds.
2975  */
netif_set_real_num_rx_queues(struct net_device * dev,unsigned int rxq)2976 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2977 {
2978 	int rc;
2979 
2980 	if (rxq < 1 || rxq > dev->num_rx_queues)
2981 		return -EINVAL;
2982 
2983 	if (dev->reg_state == NETREG_REGISTERED) {
2984 		ASSERT_RTNL();
2985 
2986 		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2987 						  rxq);
2988 		if (rc)
2989 			return rc;
2990 	}
2991 
2992 	dev->real_num_rx_queues = rxq;
2993 	return 0;
2994 }
2995 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2996 #endif
2997 
2998 /**
2999  *	netif_set_real_num_queues - set actual number of RX and TX queues used
3000  *	@dev: Network device
3001  *	@txq: Actual number of TX queues
3002  *	@rxq: Actual number of RX queues
3003  *
3004  *	Set the real number of both TX and RX queues.
3005  *	Does nothing if the number of queues is already correct.
3006  */
netif_set_real_num_queues(struct net_device * dev,unsigned int txq,unsigned int rxq)3007 int netif_set_real_num_queues(struct net_device *dev,
3008 			      unsigned int txq, unsigned int rxq)
3009 {
3010 	unsigned int old_rxq = dev->real_num_rx_queues;
3011 	int err;
3012 
3013 	if (txq < 1 || txq > dev->num_tx_queues ||
3014 	    rxq < 1 || rxq > dev->num_rx_queues)
3015 		return -EINVAL;
3016 
3017 	/* Start from increases, so the error path only does decreases -
3018 	 * decreases can't fail.
3019 	 */
3020 	if (rxq > dev->real_num_rx_queues) {
3021 		err = netif_set_real_num_rx_queues(dev, rxq);
3022 		if (err)
3023 			return err;
3024 	}
3025 	if (txq > dev->real_num_tx_queues) {
3026 		err = netif_set_real_num_tx_queues(dev, txq);
3027 		if (err)
3028 			goto undo_rx;
3029 	}
3030 	if (rxq < dev->real_num_rx_queues)
3031 		WARN_ON(netif_set_real_num_rx_queues(dev, rxq));
3032 	if (txq < dev->real_num_tx_queues)
3033 		WARN_ON(netif_set_real_num_tx_queues(dev, txq));
3034 
3035 	return 0;
3036 undo_rx:
3037 	WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq));
3038 	return err;
3039 }
3040 EXPORT_SYMBOL(netif_set_real_num_queues);
3041 
3042 /**
3043  * netif_set_tso_max_size() - set the max size of TSO frames supported
3044  * @dev:	netdev to update
3045  * @size:	max skb->len of a TSO frame
3046  *
3047  * Set the limit on the size of TSO super-frames the device can handle.
3048  * Unless explicitly set the stack will assume the value of
3049  * %GSO_LEGACY_MAX_SIZE.
3050  */
netif_set_tso_max_size(struct net_device * dev,unsigned int size)3051 void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
3052 {
3053 	dev->tso_max_size = min(GSO_MAX_SIZE, size);
3054 	if (size < READ_ONCE(dev->gso_max_size))
3055 		netif_set_gso_max_size(dev, size);
3056 	if (size < READ_ONCE(dev->gso_ipv4_max_size))
3057 		netif_set_gso_ipv4_max_size(dev, size);
3058 }
3059 EXPORT_SYMBOL(netif_set_tso_max_size);
3060 
3061 /**
3062  * netif_set_tso_max_segs() - set the max number of segs supported for TSO
3063  * @dev:	netdev to update
3064  * @segs:	max number of TCP segments
3065  *
3066  * Set the limit on the number of TCP segments the device can generate from
3067  * a single TSO super-frame.
3068  * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS.
3069  */
netif_set_tso_max_segs(struct net_device * dev,unsigned int segs)3070 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs)
3071 {
3072 	dev->tso_max_segs = segs;
3073 	if (segs < READ_ONCE(dev->gso_max_segs))
3074 		netif_set_gso_max_segs(dev, segs);
3075 }
3076 EXPORT_SYMBOL(netif_set_tso_max_segs);
3077 
3078 /**
3079  * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper
3080  * @to:		netdev to update
3081  * @from:	netdev from which to copy the limits
3082  */
netif_inherit_tso_max(struct net_device * to,const struct net_device * from)3083 void netif_inherit_tso_max(struct net_device *to, const struct net_device *from)
3084 {
3085 	netif_set_tso_max_size(to, from->tso_max_size);
3086 	netif_set_tso_max_segs(to, from->tso_max_segs);
3087 }
3088 EXPORT_SYMBOL(netif_inherit_tso_max);
3089 
3090 /**
3091  * netif_get_num_default_rss_queues - default number of RSS queues
3092  *
3093  * Default value is the number of physical cores if there are only 1 or 2, or
3094  * divided by 2 if there are more.
3095  */
netif_get_num_default_rss_queues(void)3096 int netif_get_num_default_rss_queues(void)
3097 {
3098 	cpumask_var_t cpus;
3099 	int cpu, count = 0;
3100 
3101 	if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL)))
3102 		return 1;
3103 
3104 	cpumask_copy(cpus, cpu_online_mask);
3105 	for_each_cpu(cpu, cpus) {
3106 		++count;
3107 		cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
3108 	}
3109 	free_cpumask_var(cpus);
3110 
3111 	return count > 2 ? DIV_ROUND_UP(count, 2) : count;
3112 }
3113 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3114 
__netif_reschedule(struct Qdisc * q)3115 static void __netif_reschedule(struct Qdisc *q)
3116 {
3117 	struct softnet_data *sd;
3118 	unsigned long flags;
3119 
3120 	local_irq_save(flags);
3121 	sd = this_cpu_ptr(&softnet_data);
3122 	q->next_sched = NULL;
3123 	*sd->output_queue_tailp = q;
3124 	sd->output_queue_tailp = &q->next_sched;
3125 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3126 	local_irq_restore(flags);
3127 }
3128 
__netif_schedule(struct Qdisc * q)3129 void __netif_schedule(struct Qdisc *q)
3130 {
3131 	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3132 		__netif_reschedule(q);
3133 }
3134 EXPORT_SYMBOL(__netif_schedule);
3135 
3136 struct dev_kfree_skb_cb {
3137 	enum skb_drop_reason reason;
3138 };
3139 
get_kfree_skb_cb(const struct sk_buff * skb)3140 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
3141 {
3142 	return (struct dev_kfree_skb_cb *)skb->cb;
3143 }
3144 
netif_schedule_queue(struct netdev_queue * txq)3145 void netif_schedule_queue(struct netdev_queue *txq)
3146 {
3147 	rcu_read_lock();
3148 	if (!netif_xmit_stopped(txq)) {
3149 		struct Qdisc *q = rcu_dereference(txq->qdisc);
3150 
3151 		__netif_schedule(q);
3152 	}
3153 	rcu_read_unlock();
3154 }
3155 EXPORT_SYMBOL(netif_schedule_queue);
3156 
netif_tx_wake_queue(struct netdev_queue * dev_queue)3157 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3158 {
3159 	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3160 		struct Qdisc *q;
3161 
3162 		rcu_read_lock();
3163 		q = rcu_dereference(dev_queue->qdisc);
3164 		__netif_schedule(q);
3165 		rcu_read_unlock();
3166 	}
3167 }
3168 EXPORT_SYMBOL(netif_tx_wake_queue);
3169 
dev_kfree_skb_irq_reason(struct sk_buff * skb,enum skb_drop_reason reason)3170 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3171 {
3172 	unsigned long flags;
3173 
3174 	if (unlikely(!skb))
3175 		return;
3176 
3177 	if (likely(refcount_read(&skb->users) == 1)) {
3178 		smp_rmb();
3179 		refcount_set(&skb->users, 0);
3180 	} else if (likely(!refcount_dec_and_test(&skb->users))) {
3181 		return;
3182 	}
3183 	get_kfree_skb_cb(skb)->reason = reason;
3184 	local_irq_save(flags);
3185 	skb->next = __this_cpu_read(softnet_data.completion_queue);
3186 	__this_cpu_write(softnet_data.completion_queue, skb);
3187 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3188 	local_irq_restore(flags);
3189 }
3190 EXPORT_SYMBOL(dev_kfree_skb_irq_reason);
3191 
dev_kfree_skb_any_reason(struct sk_buff * skb,enum skb_drop_reason reason)3192 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3193 {
3194 	if (in_hardirq() || irqs_disabled())
3195 		dev_kfree_skb_irq_reason(skb, reason);
3196 	else
3197 		kfree_skb_reason(skb, reason);
3198 }
3199 EXPORT_SYMBOL(dev_kfree_skb_any_reason);
3200 
3201 
3202 /**
3203  * netif_device_detach - mark device as removed
3204  * @dev: network device
3205  *
3206  * Mark device as removed from system and therefore no longer available.
3207  */
netif_device_detach(struct net_device * dev)3208 void netif_device_detach(struct net_device *dev)
3209 {
3210 	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3211 	    netif_running(dev)) {
3212 		netif_tx_stop_all_queues(dev);
3213 	}
3214 }
3215 EXPORT_SYMBOL(netif_device_detach);
3216 
3217 /**
3218  * netif_device_attach - mark device as attached
3219  * @dev: network device
3220  *
3221  * Mark device as attached from system and restart if needed.
3222  */
netif_device_attach(struct net_device * dev)3223 void netif_device_attach(struct net_device *dev)
3224 {
3225 	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3226 	    netif_running(dev)) {
3227 		netif_tx_wake_all_queues(dev);
3228 		__netdev_watchdog_up(dev);
3229 	}
3230 }
3231 EXPORT_SYMBOL(netif_device_attach);
3232 
3233 /*
3234  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3235  * to be used as a distribution range.
3236  */
skb_tx_hash(const struct net_device * dev,const struct net_device * sb_dev,struct sk_buff * skb)3237 static u16 skb_tx_hash(const struct net_device *dev,
3238 		       const struct net_device *sb_dev,
3239 		       struct sk_buff *skb)
3240 {
3241 	u32 hash;
3242 	u16 qoffset = 0;
3243 	u16 qcount = dev->real_num_tx_queues;
3244 
3245 	if (dev->num_tc) {
3246 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3247 
3248 		qoffset = sb_dev->tc_to_txq[tc].offset;
3249 		qcount = sb_dev->tc_to_txq[tc].count;
3250 		if (unlikely(!qcount)) {
3251 			net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
3252 					     sb_dev->name, qoffset, tc);
3253 			qoffset = 0;
3254 			qcount = dev->real_num_tx_queues;
3255 		}
3256 	}
3257 
3258 	if (skb_rx_queue_recorded(skb)) {
3259 		DEBUG_NET_WARN_ON_ONCE(qcount == 0);
3260 		hash = skb_get_rx_queue(skb);
3261 		if (hash >= qoffset)
3262 			hash -= qoffset;
3263 		while (unlikely(hash >= qcount))
3264 			hash -= qcount;
3265 		return hash + qoffset;
3266 	}
3267 
3268 	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3269 }
3270 
skb_warn_bad_offload(const struct sk_buff * skb)3271 void skb_warn_bad_offload(const struct sk_buff *skb)
3272 {
3273 	static const netdev_features_t null_features;
3274 	struct net_device *dev = skb->dev;
3275 	const char *name = "";
3276 
3277 	if (!net_ratelimit())
3278 		return;
3279 
3280 	if (dev) {
3281 		if (dev->dev.parent)
3282 			name = dev_driver_string(dev->dev.parent);
3283 		else
3284 			name = netdev_name(dev);
3285 	}
3286 	skb_dump(KERN_WARNING, skb, false);
3287 	WARN(1, "%s: caps=(%pNF, %pNF)\n",
3288 	     name, dev ? &dev->features : &null_features,
3289 	     skb->sk ? &skb->sk->sk_route_caps : &null_features);
3290 }
3291 
3292 /*
3293  * Invalidate hardware checksum when packet is to be mangled, and
3294  * complete checksum manually on outgoing path.
3295  */
skb_checksum_help(struct sk_buff * skb)3296 int skb_checksum_help(struct sk_buff *skb)
3297 {
3298 	__wsum csum;
3299 	int ret = 0, offset;
3300 
3301 	if (skb->ip_summed == CHECKSUM_COMPLETE)
3302 		goto out_set_summed;
3303 
3304 	if (unlikely(skb_is_gso(skb))) {
3305 		skb_warn_bad_offload(skb);
3306 		return -EINVAL;
3307 	}
3308 
3309 	/* Before computing a checksum, we should make sure no frag could
3310 	 * be modified by an external entity : checksum could be wrong.
3311 	 */
3312 	if (skb_has_shared_frag(skb)) {
3313 		ret = __skb_linearize(skb);
3314 		if (ret)
3315 			goto out;
3316 	}
3317 
3318 	offset = skb_checksum_start_offset(skb);
3319 	ret = -EINVAL;
3320 	if (unlikely(offset >= skb_headlen(skb))) {
3321 		DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3322 		WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
3323 			  offset, skb_headlen(skb));
3324 		goto out;
3325 	}
3326 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
3327 
3328 	offset += skb->csum_offset;
3329 	if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) {
3330 		DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3331 		WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
3332 			  offset + sizeof(__sum16), skb_headlen(skb));
3333 		goto out;
3334 	}
3335 	ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3336 	if (ret)
3337 		goto out;
3338 
3339 	*(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3340 out_set_summed:
3341 	skb->ip_summed = CHECKSUM_NONE;
3342 out:
3343 	return ret;
3344 }
3345 EXPORT_SYMBOL(skb_checksum_help);
3346 
skb_crc32c_csum_help(struct sk_buff * skb)3347 int skb_crc32c_csum_help(struct sk_buff *skb)
3348 {
3349 	__le32 crc32c_csum;
3350 	int ret = 0, offset, start;
3351 
3352 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3353 		goto out;
3354 
3355 	if (unlikely(skb_is_gso(skb)))
3356 		goto out;
3357 
3358 	/* Before computing a checksum, we should make sure no frag could
3359 	 * be modified by an external entity : checksum could be wrong.
3360 	 */
3361 	if (unlikely(skb_has_shared_frag(skb))) {
3362 		ret = __skb_linearize(skb);
3363 		if (ret)
3364 			goto out;
3365 	}
3366 	start = skb_checksum_start_offset(skb);
3367 	offset = start + offsetof(struct sctphdr, checksum);
3368 	if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3369 		ret = -EINVAL;
3370 		goto out;
3371 	}
3372 
3373 	ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3374 	if (ret)
3375 		goto out;
3376 
3377 	crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3378 						  skb->len - start, ~(__u32)0,
3379 						  crc32c_csum_stub));
3380 	*(__le32 *)(skb->data + offset) = crc32c_csum;
3381 	skb_reset_csum_not_inet(skb);
3382 out:
3383 	return ret;
3384 }
3385 
skb_network_protocol(struct sk_buff * skb,int * depth)3386 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3387 {
3388 	__be16 type = skb->protocol;
3389 
3390 	/* Tunnel gso handlers can set protocol to ethernet. */
3391 	if (type == htons(ETH_P_TEB)) {
3392 		struct ethhdr *eth;
3393 
3394 		if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3395 			return 0;
3396 
3397 		eth = (struct ethhdr *)skb->data;
3398 		type = eth->h_proto;
3399 	}
3400 
3401 	return vlan_get_protocol_and_depth(skb, type, depth);
3402 }
3403 
3404 
3405 /* Take action when hardware reception checksum errors are detected. */
3406 #ifdef CONFIG_BUG
do_netdev_rx_csum_fault(struct net_device * dev,struct sk_buff * skb)3407 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3408 {
3409 	netdev_err(dev, "hw csum failure\n");
3410 	skb_dump(KERN_ERR, skb, true);
3411 	dump_stack();
3412 }
3413 
netdev_rx_csum_fault(struct net_device * dev,struct sk_buff * skb)3414 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3415 {
3416 	DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
3417 }
3418 EXPORT_SYMBOL(netdev_rx_csum_fault);
3419 #endif
3420 
3421 /* XXX: check that highmem exists at all on the given machine. */
illegal_highdma(struct net_device * dev,struct sk_buff * skb)3422 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3423 {
3424 #ifdef CONFIG_HIGHMEM
3425 	int i;
3426 
3427 	if (!(dev->features & NETIF_F_HIGHDMA)) {
3428 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3429 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3430 
3431 			if (PageHighMem(skb_frag_page(frag)))
3432 				return 1;
3433 		}
3434 	}
3435 #endif
3436 	return 0;
3437 }
3438 
3439 /* If MPLS offload request, verify we are testing hardware MPLS features
3440  * instead of standard features for the netdev.
3441  */
3442 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
net_mpls_features(struct sk_buff * skb,netdev_features_t features,__be16 type)3443 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3444 					   netdev_features_t features,
3445 					   __be16 type)
3446 {
3447 	if (eth_p_mpls(type))
3448 		features &= skb->dev->mpls_features;
3449 
3450 	return features;
3451 }
3452 #else
net_mpls_features(struct sk_buff * skb,netdev_features_t features,__be16 type)3453 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3454 					   netdev_features_t features,
3455 					   __be16 type)
3456 {
3457 	return features;
3458 }
3459 #endif
3460 
harmonize_features(struct sk_buff * skb,netdev_features_t features)3461 static netdev_features_t harmonize_features(struct sk_buff *skb,
3462 	netdev_features_t features)
3463 {
3464 	__be16 type;
3465 
3466 	type = skb_network_protocol(skb, NULL);
3467 	features = net_mpls_features(skb, features, type);
3468 
3469 	if (skb->ip_summed != CHECKSUM_NONE &&
3470 	    !can_checksum_protocol(features, type)) {
3471 		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3472 	}
3473 	if (illegal_highdma(skb->dev, skb))
3474 		features &= ~NETIF_F_SG;
3475 
3476 	return features;
3477 }
3478 
passthru_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3479 netdev_features_t passthru_features_check(struct sk_buff *skb,
3480 					  struct net_device *dev,
3481 					  netdev_features_t features)
3482 {
3483 	return features;
3484 }
3485 EXPORT_SYMBOL(passthru_features_check);
3486 
dflt_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3487 static netdev_features_t dflt_features_check(struct sk_buff *skb,
3488 					     struct net_device *dev,
3489 					     netdev_features_t features)
3490 {
3491 	return vlan_features_check(skb, features);
3492 }
3493 
gso_features_check(const struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3494 static netdev_features_t gso_features_check(const struct sk_buff *skb,
3495 					    struct net_device *dev,
3496 					    netdev_features_t features)
3497 {
3498 	u16 gso_segs = skb_shinfo(skb)->gso_segs;
3499 
3500 	if (gso_segs > READ_ONCE(dev->gso_max_segs))
3501 		return features & ~NETIF_F_GSO_MASK;
3502 
3503 	if (!skb_shinfo(skb)->gso_type) {
3504 		skb_warn_bad_offload(skb);
3505 		return features & ~NETIF_F_GSO_MASK;
3506 	}
3507 
3508 	/* Support for GSO partial features requires software
3509 	 * intervention before we can actually process the packets
3510 	 * so we need to strip support for any partial features now
3511 	 * and we can pull them back in after we have partially
3512 	 * segmented the frame.
3513 	 */
3514 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3515 		features &= ~dev->gso_partial_features;
3516 
3517 	/* Make sure to clear the IPv4 ID mangling feature if the
3518 	 * IPv4 header has the potential to be fragmented.
3519 	 */
3520 	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3521 		struct iphdr *iph = skb->encapsulation ?
3522 				    inner_ip_hdr(skb) : ip_hdr(skb);
3523 
3524 		if (!(iph->frag_off & htons(IP_DF)))
3525 			features &= ~NETIF_F_TSO_MANGLEID;
3526 	}
3527 
3528 	return features;
3529 }
3530 
netif_skb_features(struct sk_buff * skb)3531 netdev_features_t netif_skb_features(struct sk_buff *skb)
3532 {
3533 	struct net_device *dev = skb->dev;
3534 	netdev_features_t features = dev->features;
3535 
3536 	if (skb_is_gso(skb))
3537 		features = gso_features_check(skb, dev, features);
3538 
3539 	/* If encapsulation offload request, verify we are testing
3540 	 * hardware encapsulation features instead of standard
3541 	 * features for the netdev
3542 	 */
3543 	if (skb->encapsulation)
3544 		features &= dev->hw_enc_features;
3545 
3546 	if (skb_vlan_tagged(skb))
3547 		features = netdev_intersect_features(features,
3548 						     dev->vlan_features |
3549 						     NETIF_F_HW_VLAN_CTAG_TX |
3550 						     NETIF_F_HW_VLAN_STAG_TX);
3551 
3552 	if (dev->netdev_ops->ndo_features_check)
3553 		features &= dev->netdev_ops->ndo_features_check(skb, dev,
3554 								features);
3555 	else
3556 		features &= dflt_features_check(skb, dev, features);
3557 
3558 	return harmonize_features(skb, features);
3559 }
3560 EXPORT_SYMBOL(netif_skb_features);
3561 
xmit_one(struct sk_buff * skb,struct net_device * dev,struct netdev_queue * txq,bool more)3562 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3563 		    struct netdev_queue *txq, bool more)
3564 {
3565 	unsigned int len;
3566 	int rc;
3567 
3568 	if (dev_nit_active(dev))
3569 		dev_queue_xmit_nit(skb, dev);
3570 
3571 	len = skb->len;
3572 	trace_net_dev_start_xmit(skb, dev);
3573 	rc = netdev_start_xmit(skb, dev, txq, more);
3574 	trace_net_dev_xmit(skb, rc, dev, len);
3575 
3576 	return rc;
3577 }
3578 
dev_hard_start_xmit(struct sk_buff * first,struct net_device * dev,struct netdev_queue * txq,int * ret)3579 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3580 				    struct netdev_queue *txq, int *ret)
3581 {
3582 	struct sk_buff *skb = first;
3583 	int rc = NETDEV_TX_OK;
3584 
3585 	while (skb) {
3586 		struct sk_buff *next = skb->next;
3587 
3588 		skb_mark_not_on_list(skb);
3589 		rc = xmit_one(skb, dev, txq, next != NULL);
3590 		if (unlikely(!dev_xmit_complete(rc))) {
3591 			skb->next = next;
3592 			goto out;
3593 		}
3594 
3595 		skb = next;
3596 		if (netif_tx_queue_stopped(txq) && skb) {
3597 			rc = NETDEV_TX_BUSY;
3598 			break;
3599 		}
3600 	}
3601 
3602 out:
3603 	*ret = rc;
3604 	return skb;
3605 }
3606 
validate_xmit_vlan(struct sk_buff * skb,netdev_features_t features)3607 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3608 					  netdev_features_t features)
3609 {
3610 	if (skb_vlan_tag_present(skb) &&
3611 	    !vlan_hw_offload_capable(features, skb->vlan_proto))
3612 		skb = __vlan_hwaccel_push_inside(skb);
3613 	return skb;
3614 }
3615 
skb_csum_hwoffload_help(struct sk_buff * skb,const netdev_features_t features)3616 int skb_csum_hwoffload_help(struct sk_buff *skb,
3617 			    const netdev_features_t features)
3618 {
3619 	if (unlikely(skb_csum_is_sctp(skb)))
3620 		return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3621 			skb_crc32c_csum_help(skb);
3622 
3623 	if (features & NETIF_F_HW_CSUM)
3624 		return 0;
3625 
3626 	if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3627 		switch (skb->csum_offset) {
3628 		case offsetof(struct tcphdr, check):
3629 		case offsetof(struct udphdr, check):
3630 			return 0;
3631 		}
3632 	}
3633 
3634 	return skb_checksum_help(skb);
3635 }
3636 EXPORT_SYMBOL(skb_csum_hwoffload_help);
3637 
validate_xmit_skb(struct sk_buff * skb,struct net_device * dev,bool * again)3638 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3639 {
3640 	netdev_features_t features;
3641 
3642 	features = netif_skb_features(skb);
3643 	skb = validate_xmit_vlan(skb, features);
3644 	if (unlikely(!skb))
3645 		goto out_null;
3646 
3647 	skb = sk_validate_xmit_skb(skb, dev);
3648 	if (unlikely(!skb))
3649 		goto out_null;
3650 
3651 	if (netif_needs_gso(skb, features)) {
3652 		struct sk_buff *segs;
3653 
3654 		segs = skb_gso_segment(skb, features);
3655 		if (IS_ERR(segs)) {
3656 			goto out_kfree_skb;
3657 		} else if (segs) {
3658 			consume_skb(skb);
3659 			skb = segs;
3660 		}
3661 	} else {
3662 		if (skb_needs_linearize(skb, features) &&
3663 		    __skb_linearize(skb))
3664 			goto out_kfree_skb;
3665 
3666 		/* If packet is not checksummed and device does not
3667 		 * support checksumming for this protocol, complete
3668 		 * checksumming here.
3669 		 */
3670 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
3671 			if (skb->encapsulation)
3672 				skb_set_inner_transport_header(skb,
3673 							       skb_checksum_start_offset(skb));
3674 			else
3675 				skb_set_transport_header(skb,
3676 							 skb_checksum_start_offset(skb));
3677 			if (skb_csum_hwoffload_help(skb, features))
3678 				goto out_kfree_skb;
3679 		}
3680 	}
3681 
3682 	skb = validate_xmit_xfrm(skb, features, again);
3683 
3684 	return skb;
3685 
3686 out_kfree_skb:
3687 	kfree_skb(skb);
3688 out_null:
3689 	dev_core_stats_tx_dropped_inc(dev);
3690 	return NULL;
3691 }
3692 
validate_xmit_skb_list(struct sk_buff * skb,struct net_device * dev,bool * again)3693 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3694 {
3695 	struct sk_buff *next, *head = NULL, *tail;
3696 
3697 	for (; skb != NULL; skb = next) {
3698 		next = skb->next;
3699 		skb_mark_not_on_list(skb);
3700 
3701 		/* in case skb wont be segmented, point to itself */
3702 		skb->prev = skb;
3703 
3704 		skb = validate_xmit_skb(skb, dev, again);
3705 		if (!skb)
3706 			continue;
3707 
3708 		if (!head)
3709 			head = skb;
3710 		else
3711 			tail->next = skb;
3712 		/* If skb was segmented, skb->prev points to
3713 		 * the last segment. If not, it still contains skb.
3714 		 */
3715 		tail = skb->prev;
3716 	}
3717 	return head;
3718 }
3719 EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3720 
qdisc_pkt_len_init(struct sk_buff * skb)3721 static void qdisc_pkt_len_init(struct sk_buff *skb)
3722 {
3723 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
3724 
3725 	qdisc_skb_cb(skb)->pkt_len = skb->len;
3726 
3727 	/* To get more precise estimation of bytes sent on wire,
3728 	 * we add to pkt_len the headers size of all segments
3729 	 */
3730 	if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3731 		u16 gso_segs = shinfo->gso_segs;
3732 		unsigned int hdr_len;
3733 
3734 		/* mac layer + network layer */
3735 		hdr_len = skb_transport_offset(skb);
3736 
3737 		/* + transport layer */
3738 		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3739 			const struct tcphdr *th;
3740 			struct tcphdr _tcphdr;
3741 
3742 			th = skb_header_pointer(skb, hdr_len,
3743 						sizeof(_tcphdr), &_tcphdr);
3744 			if (likely(th))
3745 				hdr_len += __tcp_hdrlen(th);
3746 		} else {
3747 			struct udphdr _udphdr;
3748 
3749 			if (skb_header_pointer(skb, hdr_len,
3750 					       sizeof(_udphdr), &_udphdr))
3751 				hdr_len += sizeof(struct udphdr);
3752 		}
3753 
3754 		if (shinfo->gso_type & SKB_GSO_DODGY)
3755 			gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3756 						shinfo->gso_size);
3757 
3758 		qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3759 	}
3760 }
3761 
dev_qdisc_enqueue(struct sk_buff * skb,struct Qdisc * q,struct sk_buff ** to_free,struct netdev_queue * txq)3762 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
3763 			     struct sk_buff **to_free,
3764 			     struct netdev_queue *txq)
3765 {
3766 	int rc;
3767 
3768 	rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
3769 	if (rc == NET_XMIT_SUCCESS)
3770 		trace_qdisc_enqueue(q, txq, skb);
3771 	return rc;
3772 }
3773 
__dev_xmit_skb(struct sk_buff * skb,struct Qdisc * q,struct net_device * dev,struct netdev_queue * txq)3774 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3775 				 struct net_device *dev,
3776 				 struct netdev_queue *txq)
3777 {
3778 	spinlock_t *root_lock = qdisc_lock(q);
3779 	struct sk_buff *to_free = NULL;
3780 	bool contended;
3781 	int rc;
3782 
3783 	qdisc_calculate_pkt_len(skb, q);
3784 
3785 	if (q->flags & TCQ_F_NOLOCK) {
3786 		if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
3787 		    qdisc_run_begin(q)) {
3788 			/* Retest nolock_qdisc_is_empty() within the protection
3789 			 * of q->seqlock to protect from racing with requeuing.
3790 			 */
3791 			if (unlikely(!nolock_qdisc_is_empty(q))) {
3792 				rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3793 				__qdisc_run(q);
3794 				qdisc_run_end(q);
3795 
3796 				goto no_lock_out;
3797 			}
3798 
3799 			qdisc_bstats_cpu_update(q, skb);
3800 			if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
3801 			    !nolock_qdisc_is_empty(q))
3802 				__qdisc_run(q);
3803 
3804 			qdisc_run_end(q);
3805 			return NET_XMIT_SUCCESS;
3806 		}
3807 
3808 		rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3809 		qdisc_run(q);
3810 
3811 no_lock_out:
3812 		if (unlikely(to_free))
3813 			kfree_skb_list_reason(to_free,
3814 					      SKB_DROP_REASON_QDISC_DROP);
3815 		return rc;
3816 	}
3817 
3818 	/*
3819 	 * Heuristic to force contended enqueues to serialize on a
3820 	 * separate lock before trying to get qdisc main lock.
3821 	 * This permits qdisc->running owner to get the lock more
3822 	 * often and dequeue packets faster.
3823 	 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
3824 	 * and then other tasks will only enqueue packets. The packets will be
3825 	 * sent after the qdisc owner is scheduled again. To prevent this
3826 	 * scenario the task always serialize on the lock.
3827 	 */
3828 	contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT);
3829 	if (unlikely(contended))
3830 		spin_lock(&q->busylock);
3831 
3832 	spin_lock(root_lock);
3833 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3834 		__qdisc_drop(skb, &to_free);
3835 		rc = NET_XMIT_DROP;
3836 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3837 		   qdisc_run_begin(q)) {
3838 		/*
3839 		 * This is a work-conserving queue; there are no old skbs
3840 		 * waiting to be sent out; and the qdisc is not running -
3841 		 * xmit the skb directly.
3842 		 */
3843 
3844 		qdisc_bstats_update(q, skb);
3845 
3846 		if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3847 			if (unlikely(contended)) {
3848 				spin_unlock(&q->busylock);
3849 				contended = false;
3850 			}
3851 			__qdisc_run(q);
3852 		}
3853 
3854 		qdisc_run_end(q);
3855 		rc = NET_XMIT_SUCCESS;
3856 	} else {
3857 		rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3858 		if (qdisc_run_begin(q)) {
3859 			if (unlikely(contended)) {
3860 				spin_unlock(&q->busylock);
3861 				contended = false;
3862 			}
3863 			__qdisc_run(q);
3864 			qdisc_run_end(q);
3865 		}
3866 	}
3867 	spin_unlock(root_lock);
3868 	if (unlikely(to_free))
3869 		kfree_skb_list_reason(to_free, SKB_DROP_REASON_QDISC_DROP);
3870 	if (unlikely(contended))
3871 		spin_unlock(&q->busylock);
3872 	return rc;
3873 }
3874 
3875 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
skb_update_prio(struct sk_buff * skb)3876 static void skb_update_prio(struct sk_buff *skb)
3877 {
3878 	const struct netprio_map *map;
3879 	const struct sock *sk;
3880 	unsigned int prioidx;
3881 
3882 	if (skb->priority)
3883 		return;
3884 	map = rcu_dereference_bh(skb->dev->priomap);
3885 	if (!map)
3886 		return;
3887 	sk = skb_to_full_sk(skb);
3888 	if (!sk)
3889 		return;
3890 
3891 	prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3892 
3893 	if (prioidx < map->priomap_len)
3894 		skb->priority = map->priomap[prioidx];
3895 }
3896 #else
3897 #define skb_update_prio(skb)
3898 #endif
3899 
3900 /**
3901  *	dev_loopback_xmit - loop back @skb
3902  *	@net: network namespace this loopback is happening in
3903  *	@sk:  sk needed to be a netfilter okfn
3904  *	@skb: buffer to transmit
3905  */
dev_loopback_xmit(struct net * net,struct sock * sk,struct sk_buff * skb)3906 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3907 {
3908 	skb_reset_mac_header(skb);
3909 	__skb_pull(skb, skb_network_offset(skb));
3910 	skb->pkt_type = PACKET_LOOPBACK;
3911 	if (skb->ip_summed == CHECKSUM_NONE)
3912 		skb->ip_summed = CHECKSUM_UNNECESSARY;
3913 	DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb));
3914 	skb_dst_force(skb);
3915 	netif_rx(skb);
3916 	return 0;
3917 }
3918 EXPORT_SYMBOL(dev_loopback_xmit);
3919 
3920 #ifdef CONFIG_NET_EGRESS
3921 static struct netdev_queue *
netdev_tx_queue_mapping(struct net_device * dev,struct sk_buff * skb)3922 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
3923 {
3924 	int qm = skb_get_queue_mapping(skb);
3925 
3926 	return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
3927 }
3928 
netdev_xmit_txqueue_skipped(void)3929 static bool netdev_xmit_txqueue_skipped(void)
3930 {
3931 	return __this_cpu_read(softnet_data.xmit.skip_txqueue);
3932 }
3933 
netdev_xmit_skip_txqueue(bool skip)3934 void netdev_xmit_skip_txqueue(bool skip)
3935 {
3936 	__this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
3937 }
3938 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
3939 #endif /* CONFIG_NET_EGRESS */
3940 
3941 #ifdef CONFIG_NET_XGRESS
tc_run(struct tcx_entry * entry,struct sk_buff * skb)3942 static int tc_run(struct tcx_entry *entry, struct sk_buff *skb)
3943 {
3944 	int ret = TC_ACT_UNSPEC;
3945 #ifdef CONFIG_NET_CLS_ACT
3946 	struct mini_Qdisc *miniq = rcu_dereference_bh(entry->miniq);
3947 	struct tcf_result res;
3948 
3949 	if (!miniq)
3950 		return ret;
3951 
3952 	tc_skb_cb(skb)->mru = 0;
3953 	tc_skb_cb(skb)->post_ct = false;
3954 
3955 	mini_qdisc_bstats_cpu_update(miniq, skb);
3956 	ret = tcf_classify(skb, miniq->block, miniq->filter_list, &res, false);
3957 	/* Only tcf related quirks below. */
3958 	switch (ret) {
3959 	case TC_ACT_SHOT:
3960 		mini_qdisc_qstats_cpu_drop(miniq);
3961 		break;
3962 	case TC_ACT_OK:
3963 	case TC_ACT_RECLASSIFY:
3964 		skb->tc_index = TC_H_MIN(res.classid);
3965 		break;
3966 	}
3967 #endif /* CONFIG_NET_CLS_ACT */
3968 	return ret;
3969 }
3970 
3971 static DEFINE_STATIC_KEY_FALSE(tcx_needed_key);
3972 
tcx_inc(void)3973 void tcx_inc(void)
3974 {
3975 	static_branch_inc(&tcx_needed_key);
3976 }
3977 
tcx_dec(void)3978 void tcx_dec(void)
3979 {
3980 	static_branch_dec(&tcx_needed_key);
3981 }
3982 
3983 static __always_inline enum tcx_action_base
tcx_run(const struct bpf_mprog_entry * entry,struct sk_buff * skb,const bool needs_mac)3984 tcx_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
3985 	const bool needs_mac)
3986 {
3987 	const struct bpf_mprog_fp *fp;
3988 	const struct bpf_prog *prog;
3989 	int ret = TCX_NEXT;
3990 
3991 	if (needs_mac)
3992 		__skb_push(skb, skb->mac_len);
3993 	bpf_mprog_foreach_prog(entry, fp, prog) {
3994 		bpf_compute_data_pointers(skb);
3995 		ret = bpf_prog_run(prog, skb);
3996 		if (ret != TCX_NEXT)
3997 			break;
3998 	}
3999 	if (needs_mac)
4000 		__skb_pull(skb, skb->mac_len);
4001 	return tcx_action_code(skb, ret);
4002 }
4003 
4004 static __always_inline struct sk_buff *
sch_handle_ingress(struct sk_buff * skb,struct packet_type ** pt_prev,int * ret,struct net_device * orig_dev,bool * another)4005 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4006 		   struct net_device *orig_dev, bool *another)
4007 {
4008 	struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress);
4009 	int sch_ret;
4010 
4011 	if (!entry)
4012 		return skb;
4013 	if (*pt_prev) {
4014 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
4015 		*pt_prev = NULL;
4016 	}
4017 
4018 	qdisc_skb_cb(skb)->pkt_len = skb->len;
4019 	tcx_set_ingress(skb, true);
4020 
4021 	if (static_branch_unlikely(&tcx_needed_key)) {
4022 		sch_ret = tcx_run(entry, skb, true);
4023 		if (sch_ret != TC_ACT_UNSPEC)
4024 			goto ingress_verdict;
4025 	}
4026 	sch_ret = tc_run(tcx_entry(entry), skb);
4027 ingress_verdict:
4028 	switch (sch_ret) {
4029 	case TC_ACT_REDIRECT:
4030 		/* skb_mac_header check was done by BPF, so we can safely
4031 		 * push the L2 header back before redirecting to another
4032 		 * netdev.
4033 		 */
4034 		__skb_push(skb, skb->mac_len);
4035 		if (skb_do_redirect(skb) == -EAGAIN) {
4036 			__skb_pull(skb, skb->mac_len);
4037 			*another = true;
4038 			break;
4039 		}
4040 		*ret = NET_RX_SUCCESS;
4041 		return NULL;
4042 	case TC_ACT_SHOT:
4043 		kfree_skb_reason(skb, SKB_DROP_REASON_TC_INGRESS);
4044 		*ret = NET_RX_DROP;
4045 		return NULL;
4046 	/* used by tc_run */
4047 	case TC_ACT_STOLEN:
4048 	case TC_ACT_QUEUED:
4049 	case TC_ACT_TRAP:
4050 		consume_skb(skb);
4051 		fallthrough;
4052 	case TC_ACT_CONSUMED:
4053 		*ret = NET_RX_SUCCESS;
4054 		return NULL;
4055 	}
4056 
4057 	return skb;
4058 }
4059 
4060 static __always_inline struct sk_buff *
sch_handle_egress(struct sk_buff * skb,int * ret,struct net_device * dev)4061 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4062 {
4063 	struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress);
4064 	int sch_ret;
4065 
4066 	if (!entry)
4067 		return skb;
4068 
4069 	/* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
4070 	 * already set by the caller.
4071 	 */
4072 	if (static_branch_unlikely(&tcx_needed_key)) {
4073 		sch_ret = tcx_run(entry, skb, false);
4074 		if (sch_ret != TC_ACT_UNSPEC)
4075 			goto egress_verdict;
4076 	}
4077 	sch_ret = tc_run(tcx_entry(entry), skb);
4078 egress_verdict:
4079 	switch (sch_ret) {
4080 	case TC_ACT_REDIRECT:
4081 		/* No need to push/pop skb's mac_header here on egress! */
4082 		skb_do_redirect(skb);
4083 		*ret = NET_XMIT_SUCCESS;
4084 		return NULL;
4085 	case TC_ACT_SHOT:
4086 		kfree_skb_reason(skb, SKB_DROP_REASON_TC_EGRESS);
4087 		*ret = NET_XMIT_DROP;
4088 		return NULL;
4089 	/* used by tc_run */
4090 	case TC_ACT_STOLEN:
4091 	case TC_ACT_QUEUED:
4092 	case TC_ACT_TRAP:
4093 		consume_skb(skb);
4094 		fallthrough;
4095 	case TC_ACT_CONSUMED:
4096 		*ret = NET_XMIT_SUCCESS;
4097 		return NULL;
4098 	}
4099 
4100 	return skb;
4101 }
4102 #else
4103 static __always_inline struct sk_buff *
sch_handle_ingress(struct sk_buff * skb,struct packet_type ** pt_prev,int * ret,struct net_device * orig_dev,bool * another)4104 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4105 		   struct net_device *orig_dev, bool *another)
4106 {
4107 	return skb;
4108 }
4109 
4110 static __always_inline struct sk_buff *
sch_handle_egress(struct sk_buff * skb,int * ret,struct net_device * dev)4111 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4112 {
4113 	return skb;
4114 }
4115 #endif /* CONFIG_NET_XGRESS */
4116 
4117 #ifdef CONFIG_XPS
__get_xps_queue_idx(struct net_device * dev,struct sk_buff * skb,struct xps_dev_maps * dev_maps,unsigned int tci)4118 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
4119 			       struct xps_dev_maps *dev_maps, unsigned int tci)
4120 {
4121 	int tc = netdev_get_prio_tc_map(dev, skb->priority);
4122 	struct xps_map *map;
4123 	int queue_index = -1;
4124 
4125 	if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids)
4126 		return queue_index;
4127 
4128 	tci *= dev_maps->num_tc;
4129 	tci += tc;
4130 
4131 	map = rcu_dereference(dev_maps->attr_map[tci]);
4132 	if (map) {
4133 		if (map->len == 1)
4134 			queue_index = map->queues[0];
4135 		else
4136 			queue_index = map->queues[reciprocal_scale(
4137 						skb_get_hash(skb), map->len)];
4138 		if (unlikely(queue_index >= dev->real_num_tx_queues))
4139 			queue_index = -1;
4140 	}
4141 	return queue_index;
4142 }
4143 #endif
4144 
get_xps_queue(struct net_device * dev,struct net_device * sb_dev,struct sk_buff * skb)4145 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
4146 			 struct sk_buff *skb)
4147 {
4148 #ifdef CONFIG_XPS
4149 	struct xps_dev_maps *dev_maps;
4150 	struct sock *sk = skb->sk;
4151 	int queue_index = -1;
4152 
4153 	if (!static_key_false(&xps_needed))
4154 		return -1;
4155 
4156 	rcu_read_lock();
4157 	if (!static_key_false(&xps_rxqs_needed))
4158 		goto get_cpus_map;
4159 
4160 	dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
4161 	if (dev_maps) {
4162 		int tci = sk_rx_queue_get(sk);
4163 
4164 		if (tci >= 0)
4165 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4166 							  tci);
4167 	}
4168 
4169 get_cpus_map:
4170 	if (queue_index < 0) {
4171 		dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
4172 		if (dev_maps) {
4173 			unsigned int tci = skb->sender_cpu - 1;
4174 
4175 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4176 							  tci);
4177 		}
4178 	}
4179 	rcu_read_unlock();
4180 
4181 	return queue_index;
4182 #else
4183 	return -1;
4184 #endif
4185 }
4186 
dev_pick_tx_zero(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4187 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
4188 		     struct net_device *sb_dev)
4189 {
4190 	return 0;
4191 }
4192 EXPORT_SYMBOL(dev_pick_tx_zero);
4193 
dev_pick_tx_cpu_id(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4194 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
4195 		       struct net_device *sb_dev)
4196 {
4197 	return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
4198 }
4199 EXPORT_SYMBOL(dev_pick_tx_cpu_id);
4200 
netdev_pick_tx(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4201 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4202 		     struct net_device *sb_dev)
4203 {
4204 	struct sock *sk = skb->sk;
4205 	int queue_index = sk_tx_queue_get(sk);
4206 
4207 	sb_dev = sb_dev ? : dev;
4208 
4209 	if (queue_index < 0 || skb->ooo_okay ||
4210 	    queue_index >= dev->real_num_tx_queues) {
4211 		int new_index = get_xps_queue(dev, sb_dev, skb);
4212 
4213 		if (new_index < 0)
4214 			new_index = skb_tx_hash(dev, sb_dev, skb);
4215 
4216 		if (queue_index != new_index && sk &&
4217 		    sk_fullsock(sk) &&
4218 		    rcu_access_pointer(sk->sk_dst_cache))
4219 			sk_tx_queue_set(sk, new_index);
4220 
4221 		queue_index = new_index;
4222 	}
4223 
4224 	return queue_index;
4225 }
4226 EXPORT_SYMBOL(netdev_pick_tx);
4227 
netdev_core_pick_tx(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4228 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4229 					 struct sk_buff *skb,
4230 					 struct net_device *sb_dev)
4231 {
4232 	int queue_index = 0;
4233 
4234 #ifdef CONFIG_XPS
4235 	u32 sender_cpu = skb->sender_cpu - 1;
4236 
4237 	if (sender_cpu >= (u32)NR_CPUS)
4238 		skb->sender_cpu = raw_smp_processor_id() + 1;
4239 #endif
4240 
4241 	if (dev->real_num_tx_queues != 1) {
4242 		const struct net_device_ops *ops = dev->netdev_ops;
4243 
4244 		if (ops->ndo_select_queue)
4245 			queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4246 		else
4247 			queue_index = netdev_pick_tx(dev, skb, sb_dev);
4248 
4249 		queue_index = netdev_cap_txqueue(dev, queue_index);
4250 	}
4251 
4252 	skb_set_queue_mapping(skb, queue_index);
4253 	return netdev_get_tx_queue(dev, queue_index);
4254 }
4255 
4256 /**
4257  * __dev_queue_xmit() - transmit a buffer
4258  * @skb:	buffer to transmit
4259  * @sb_dev:	suboordinate device used for L2 forwarding offload
4260  *
4261  * Queue a buffer for transmission to a network device. The caller must
4262  * have set the device and priority and built the buffer before calling
4263  * this function. The function can be called from an interrupt.
4264  *
4265  * When calling this method, interrupts MUST be enabled. This is because
4266  * the BH enable code must have IRQs enabled so that it will not deadlock.
4267  *
4268  * Regardless of the return value, the skb is consumed, so it is currently
4269  * difficult to retry a send to this method. (You can bump the ref count
4270  * before sending to hold a reference for retry if you are careful.)
4271  *
4272  * Return:
4273  * * 0				- buffer successfully transmitted
4274  * * positive qdisc return code	- NET_XMIT_DROP etc.
4275  * * negative errno		- other errors
4276  */
__dev_queue_xmit(struct sk_buff * skb,struct net_device * sb_dev)4277 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4278 {
4279 	struct net_device *dev = skb->dev;
4280 	struct netdev_queue *txq = NULL;
4281 	struct Qdisc *q;
4282 	int rc = -ENOMEM;
4283 	bool again = false;
4284 
4285 	skb_reset_mac_header(skb);
4286 	skb_assert_len(skb);
4287 
4288 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4289 		__skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4290 
4291 	/* Disable soft irqs for various locks below. Also
4292 	 * stops preemption for RCU.
4293 	 */
4294 	rcu_read_lock_bh();
4295 
4296 	skb_update_prio(skb);
4297 
4298 	qdisc_pkt_len_init(skb);
4299 	tcx_set_ingress(skb, false);
4300 #ifdef CONFIG_NET_EGRESS
4301 	if (static_branch_unlikely(&egress_needed_key)) {
4302 		if (nf_hook_egress_active()) {
4303 			skb = nf_hook_egress(skb, &rc, dev);
4304 			if (!skb)
4305 				goto out;
4306 		}
4307 
4308 		netdev_xmit_skip_txqueue(false);
4309 
4310 		nf_skip_egress(skb, true);
4311 		skb = sch_handle_egress(skb, &rc, dev);
4312 		if (!skb)
4313 			goto out;
4314 		nf_skip_egress(skb, false);
4315 
4316 		if (netdev_xmit_txqueue_skipped())
4317 			txq = netdev_tx_queue_mapping(dev, skb);
4318 	}
4319 #endif
4320 	/* If device/qdisc don't need skb->dst, release it right now while
4321 	 * its hot in this cpu cache.
4322 	 */
4323 	if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4324 		skb_dst_drop(skb);
4325 	else
4326 		skb_dst_force(skb);
4327 
4328 	if (!txq)
4329 		txq = netdev_core_pick_tx(dev, skb, sb_dev);
4330 
4331 	q = rcu_dereference_bh(txq->qdisc);
4332 
4333 	trace_net_dev_queue(skb);
4334 	if (q->enqueue) {
4335 		rc = __dev_xmit_skb(skb, q, dev, txq);
4336 		goto out;
4337 	}
4338 
4339 	/* The device has no queue. Common case for software devices:
4340 	 * loopback, all the sorts of tunnels...
4341 
4342 	 * Really, it is unlikely that netif_tx_lock protection is necessary
4343 	 * here.  (f.e. loopback and IP tunnels are clean ignoring statistics
4344 	 * counters.)
4345 	 * However, it is possible, that they rely on protection
4346 	 * made by us here.
4347 
4348 	 * Check this and shot the lock. It is not prone from deadlocks.
4349 	 *Either shot noqueue qdisc, it is even simpler 8)
4350 	 */
4351 	if (dev->flags & IFF_UP) {
4352 		int cpu = smp_processor_id(); /* ok because BHs are off */
4353 
4354 		/* Other cpus might concurrently change txq->xmit_lock_owner
4355 		 * to -1 or to their cpu id, but not to our id.
4356 		 */
4357 		if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
4358 			if (dev_xmit_recursion())
4359 				goto recursion_alert;
4360 
4361 			skb = validate_xmit_skb(skb, dev, &again);
4362 			if (!skb)
4363 				goto out;
4364 
4365 			HARD_TX_LOCK(dev, txq, cpu);
4366 
4367 			if (!netif_xmit_stopped(txq)) {
4368 				dev_xmit_recursion_inc();
4369 				skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4370 				dev_xmit_recursion_dec();
4371 				if (dev_xmit_complete(rc)) {
4372 					HARD_TX_UNLOCK(dev, txq);
4373 					goto out;
4374 				}
4375 			}
4376 			HARD_TX_UNLOCK(dev, txq);
4377 			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4378 					     dev->name);
4379 		} else {
4380 			/* Recursion is detected! It is possible,
4381 			 * unfortunately
4382 			 */
4383 recursion_alert:
4384 			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4385 					     dev->name);
4386 		}
4387 	}
4388 
4389 	rc = -ENETDOWN;
4390 	rcu_read_unlock_bh();
4391 
4392 	dev_core_stats_tx_dropped_inc(dev);
4393 	kfree_skb_list(skb);
4394 	return rc;
4395 out:
4396 	rcu_read_unlock_bh();
4397 	return rc;
4398 }
4399 EXPORT_SYMBOL(__dev_queue_xmit);
4400 
__dev_direct_xmit(struct sk_buff * skb,u16 queue_id)4401 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4402 {
4403 	struct net_device *dev = skb->dev;
4404 	struct sk_buff *orig_skb = skb;
4405 	struct netdev_queue *txq;
4406 	int ret = NETDEV_TX_BUSY;
4407 	bool again = false;
4408 
4409 	if (unlikely(!netif_running(dev) ||
4410 		     !netif_carrier_ok(dev)))
4411 		goto drop;
4412 
4413 	skb = validate_xmit_skb_list(skb, dev, &again);
4414 	if (skb != orig_skb)
4415 		goto drop;
4416 
4417 	skb_set_queue_mapping(skb, queue_id);
4418 	txq = skb_get_tx_queue(dev, skb);
4419 
4420 	local_bh_disable();
4421 
4422 	dev_xmit_recursion_inc();
4423 	HARD_TX_LOCK(dev, txq, smp_processor_id());
4424 	if (!netif_xmit_frozen_or_drv_stopped(txq))
4425 		ret = netdev_start_xmit(skb, dev, txq, false);
4426 	HARD_TX_UNLOCK(dev, txq);
4427 	dev_xmit_recursion_dec();
4428 
4429 	local_bh_enable();
4430 	return ret;
4431 drop:
4432 	dev_core_stats_tx_dropped_inc(dev);
4433 	kfree_skb_list(skb);
4434 	return NET_XMIT_DROP;
4435 }
4436 EXPORT_SYMBOL(__dev_direct_xmit);
4437 
4438 /*************************************************************************
4439  *			Receiver routines
4440  *************************************************************************/
4441 
4442 int netdev_max_backlog __read_mostly = 1000;
4443 EXPORT_SYMBOL(netdev_max_backlog);
4444 
4445 int netdev_tstamp_prequeue __read_mostly = 1;
4446 unsigned int sysctl_skb_defer_max __read_mostly = 64;
4447 int netdev_budget __read_mostly = 300;
4448 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
4449 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
4450 int weight_p __read_mostly = 64;           /* old backlog weight */
4451 int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
4452 int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
4453 int dev_rx_weight __read_mostly = 64;
4454 int dev_tx_weight __read_mostly = 64;
4455 
4456 /* Called with irq disabled */
____napi_schedule(struct softnet_data * sd,struct napi_struct * napi)4457 static inline void ____napi_schedule(struct softnet_data *sd,
4458 				     struct napi_struct *napi)
4459 {
4460 	struct task_struct *thread;
4461 
4462 	lockdep_assert_irqs_disabled();
4463 
4464 	if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4465 		/* Paired with smp_mb__before_atomic() in
4466 		 * napi_enable()/dev_set_threaded().
4467 		 * Use READ_ONCE() to guarantee a complete
4468 		 * read on napi->thread. Only call
4469 		 * wake_up_process() when it's not NULL.
4470 		 */
4471 		thread = READ_ONCE(napi->thread);
4472 		if (thread) {
4473 			/* Avoid doing set_bit() if the thread is in
4474 			 * INTERRUPTIBLE state, cause napi_thread_wait()
4475 			 * makes sure to proceed with napi polling
4476 			 * if the thread is explicitly woken from here.
4477 			 */
4478 			if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
4479 				set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
4480 			wake_up_process(thread);
4481 			return;
4482 		}
4483 	}
4484 
4485 	list_add_tail(&napi->poll_list, &sd->poll_list);
4486 	WRITE_ONCE(napi->list_owner, smp_processor_id());
4487 	/* If not called from net_rx_action()
4488 	 * we have to raise NET_RX_SOFTIRQ.
4489 	 */
4490 	if (!sd->in_net_rx_action)
4491 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4492 }
4493 
4494 #ifdef CONFIG_RPS
4495 
4496 /* One global table that all flow-based protocols share. */
4497 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
4498 EXPORT_SYMBOL(rps_sock_flow_table);
4499 u32 rps_cpu_mask __read_mostly;
4500 EXPORT_SYMBOL(rps_cpu_mask);
4501 
4502 struct static_key_false rps_needed __read_mostly;
4503 EXPORT_SYMBOL(rps_needed);
4504 struct static_key_false rfs_needed __read_mostly;
4505 EXPORT_SYMBOL(rfs_needed);
4506 
4507 static struct rps_dev_flow *
set_rps_cpu(struct net_device * dev,struct sk_buff * skb,struct rps_dev_flow * rflow,u16 next_cpu)4508 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4509 	    struct rps_dev_flow *rflow, u16 next_cpu)
4510 {
4511 	if (next_cpu < nr_cpu_ids) {
4512 #ifdef CONFIG_RFS_ACCEL
4513 		struct netdev_rx_queue *rxqueue;
4514 		struct rps_dev_flow_table *flow_table;
4515 		struct rps_dev_flow *old_rflow;
4516 		u32 flow_id;
4517 		u16 rxq_index;
4518 		int rc;
4519 
4520 		/* Should we steer this flow to a different hardware queue? */
4521 		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4522 		    !(dev->features & NETIF_F_NTUPLE))
4523 			goto out;
4524 		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4525 		if (rxq_index == skb_get_rx_queue(skb))
4526 			goto out;
4527 
4528 		rxqueue = dev->_rx + rxq_index;
4529 		flow_table = rcu_dereference(rxqueue->rps_flow_table);
4530 		if (!flow_table)
4531 			goto out;
4532 		flow_id = skb_get_hash(skb) & flow_table->mask;
4533 		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4534 							rxq_index, flow_id);
4535 		if (rc < 0)
4536 			goto out;
4537 		old_rflow = rflow;
4538 		rflow = &flow_table->flows[flow_id];
4539 		rflow->filter = rc;
4540 		if (old_rflow->filter == rflow->filter)
4541 			old_rflow->filter = RPS_NO_FILTER;
4542 	out:
4543 #endif
4544 		rflow->last_qtail =
4545 			per_cpu(softnet_data, next_cpu).input_queue_head;
4546 	}
4547 
4548 	rflow->cpu = next_cpu;
4549 	return rflow;
4550 }
4551 
4552 /*
4553  * get_rps_cpu is called from netif_receive_skb and returns the target
4554  * CPU from the RPS map of the receiving queue for a given skb.
4555  * rcu_read_lock must be held on entry.
4556  */
get_rps_cpu(struct net_device * dev,struct sk_buff * skb,struct rps_dev_flow ** rflowp)4557 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4558 		       struct rps_dev_flow **rflowp)
4559 {
4560 	const struct rps_sock_flow_table *sock_flow_table;
4561 	struct netdev_rx_queue *rxqueue = dev->_rx;
4562 	struct rps_dev_flow_table *flow_table;
4563 	struct rps_map *map;
4564 	int cpu = -1;
4565 	u32 tcpu;
4566 	u32 hash;
4567 
4568 	if (skb_rx_queue_recorded(skb)) {
4569 		u16 index = skb_get_rx_queue(skb);
4570 
4571 		if (unlikely(index >= dev->real_num_rx_queues)) {
4572 			WARN_ONCE(dev->real_num_rx_queues > 1,
4573 				  "%s received packet on queue %u, but number "
4574 				  "of RX queues is %u\n",
4575 				  dev->name, index, dev->real_num_rx_queues);
4576 			goto done;
4577 		}
4578 		rxqueue += index;
4579 	}
4580 
4581 	/* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4582 
4583 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4584 	map = rcu_dereference(rxqueue->rps_map);
4585 	if (!flow_table && !map)
4586 		goto done;
4587 
4588 	skb_reset_network_header(skb);
4589 	hash = skb_get_hash(skb);
4590 	if (!hash)
4591 		goto done;
4592 
4593 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
4594 	if (flow_table && sock_flow_table) {
4595 		struct rps_dev_flow *rflow;
4596 		u32 next_cpu;
4597 		u32 ident;
4598 
4599 		/* First check into global flow table if there is a match.
4600 		 * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
4601 		 */
4602 		ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
4603 		if ((ident ^ hash) & ~rps_cpu_mask)
4604 			goto try_rps;
4605 
4606 		next_cpu = ident & rps_cpu_mask;
4607 
4608 		/* OK, now we know there is a match,
4609 		 * we can look at the local (per receive queue) flow table
4610 		 */
4611 		rflow = &flow_table->flows[hash & flow_table->mask];
4612 		tcpu = rflow->cpu;
4613 
4614 		/*
4615 		 * If the desired CPU (where last recvmsg was done) is
4616 		 * different from current CPU (one in the rx-queue flow
4617 		 * table entry), switch if one of the following holds:
4618 		 *   - Current CPU is unset (>= nr_cpu_ids).
4619 		 *   - Current CPU is offline.
4620 		 *   - The current CPU's queue tail has advanced beyond the
4621 		 *     last packet that was enqueued using this table entry.
4622 		 *     This guarantees that all previous packets for the flow
4623 		 *     have been dequeued, thus preserving in order delivery.
4624 		 */
4625 		if (unlikely(tcpu != next_cpu) &&
4626 		    (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4627 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4628 		      rflow->last_qtail)) >= 0)) {
4629 			tcpu = next_cpu;
4630 			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4631 		}
4632 
4633 		if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4634 			*rflowp = rflow;
4635 			cpu = tcpu;
4636 			goto done;
4637 		}
4638 	}
4639 
4640 try_rps:
4641 
4642 	if (map) {
4643 		tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4644 		if (cpu_online(tcpu)) {
4645 			cpu = tcpu;
4646 			goto done;
4647 		}
4648 	}
4649 
4650 done:
4651 	return cpu;
4652 }
4653 
4654 #ifdef CONFIG_RFS_ACCEL
4655 
4656 /**
4657  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4658  * @dev: Device on which the filter was set
4659  * @rxq_index: RX queue index
4660  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4661  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4662  *
4663  * Drivers that implement ndo_rx_flow_steer() should periodically call
4664  * this function for each installed filter and remove the filters for
4665  * which it returns %true.
4666  */
rps_may_expire_flow(struct net_device * dev,u16 rxq_index,u32 flow_id,u16 filter_id)4667 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4668 			 u32 flow_id, u16 filter_id)
4669 {
4670 	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4671 	struct rps_dev_flow_table *flow_table;
4672 	struct rps_dev_flow *rflow;
4673 	bool expire = true;
4674 	unsigned int cpu;
4675 
4676 	rcu_read_lock();
4677 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4678 	if (flow_table && flow_id <= flow_table->mask) {
4679 		rflow = &flow_table->flows[flow_id];
4680 		cpu = READ_ONCE(rflow->cpu);
4681 		if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4682 		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4683 			   rflow->last_qtail) <
4684 		     (int)(10 * flow_table->mask)))
4685 			expire = false;
4686 	}
4687 	rcu_read_unlock();
4688 	return expire;
4689 }
4690 EXPORT_SYMBOL(rps_may_expire_flow);
4691 
4692 #endif /* CONFIG_RFS_ACCEL */
4693 
4694 /* Called from hardirq (IPI) context */
rps_trigger_softirq(void * data)4695 static void rps_trigger_softirq(void *data)
4696 {
4697 	struct softnet_data *sd = data;
4698 
4699 	____napi_schedule(sd, &sd->backlog);
4700 	sd->received_rps++;
4701 }
4702 
4703 #endif /* CONFIG_RPS */
4704 
4705 /* Called from hardirq (IPI) context */
trigger_rx_softirq(void * data)4706 static void trigger_rx_softirq(void *data)
4707 {
4708 	struct softnet_data *sd = data;
4709 
4710 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4711 	smp_store_release(&sd->defer_ipi_scheduled, 0);
4712 }
4713 
4714 /*
4715  * After we queued a packet into sd->input_pkt_queue,
4716  * we need to make sure this queue is serviced soon.
4717  *
4718  * - If this is another cpu queue, link it to our rps_ipi_list,
4719  *   and make sure we will process rps_ipi_list from net_rx_action().
4720  *
4721  * - If this is our own queue, NAPI schedule our backlog.
4722  *   Note that this also raises NET_RX_SOFTIRQ.
4723  */
napi_schedule_rps(struct softnet_data * sd)4724 static void napi_schedule_rps(struct softnet_data *sd)
4725 {
4726 	struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4727 
4728 #ifdef CONFIG_RPS
4729 	if (sd != mysd) {
4730 		sd->rps_ipi_next = mysd->rps_ipi_list;
4731 		mysd->rps_ipi_list = sd;
4732 
4733 		/* If not called from net_rx_action() or napi_threaded_poll()
4734 		 * we have to raise NET_RX_SOFTIRQ.
4735 		 */
4736 		if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll)
4737 			__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4738 		return;
4739 	}
4740 #endif /* CONFIG_RPS */
4741 	__napi_schedule_irqoff(&mysd->backlog);
4742 }
4743 
4744 #ifdef CONFIG_NET_FLOW_LIMIT
4745 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4746 #endif
4747 
skb_flow_limit(struct sk_buff * skb,unsigned int qlen)4748 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4749 {
4750 #ifdef CONFIG_NET_FLOW_LIMIT
4751 	struct sd_flow_limit *fl;
4752 	struct softnet_data *sd;
4753 	unsigned int old_flow, new_flow;
4754 
4755 	if (qlen < (READ_ONCE(netdev_max_backlog) >> 1))
4756 		return false;
4757 
4758 	sd = this_cpu_ptr(&softnet_data);
4759 
4760 	rcu_read_lock();
4761 	fl = rcu_dereference(sd->flow_limit);
4762 	if (fl) {
4763 		new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4764 		old_flow = fl->history[fl->history_head];
4765 		fl->history[fl->history_head] = new_flow;
4766 
4767 		fl->history_head++;
4768 		fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4769 
4770 		if (likely(fl->buckets[old_flow]))
4771 			fl->buckets[old_flow]--;
4772 
4773 		if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4774 			fl->count++;
4775 			rcu_read_unlock();
4776 			return true;
4777 		}
4778 	}
4779 	rcu_read_unlock();
4780 #endif
4781 	return false;
4782 }
4783 
4784 /*
4785  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4786  * queue (may be a remote CPU queue).
4787  */
enqueue_to_backlog(struct sk_buff * skb,int cpu,unsigned int * qtail)4788 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4789 			      unsigned int *qtail)
4790 {
4791 	enum skb_drop_reason reason;
4792 	struct softnet_data *sd;
4793 	unsigned long flags;
4794 	unsigned int qlen;
4795 
4796 	reason = SKB_DROP_REASON_NOT_SPECIFIED;
4797 	sd = &per_cpu(softnet_data, cpu);
4798 
4799 	rps_lock_irqsave(sd, &flags);
4800 	if (!netif_running(skb->dev))
4801 		goto drop;
4802 	qlen = skb_queue_len(&sd->input_pkt_queue);
4803 	if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
4804 		if (qlen) {
4805 enqueue:
4806 			__skb_queue_tail(&sd->input_pkt_queue, skb);
4807 			input_queue_tail_incr_save(sd, qtail);
4808 			rps_unlock_irq_restore(sd, &flags);
4809 			return NET_RX_SUCCESS;
4810 		}
4811 
4812 		/* Schedule NAPI for backlog device
4813 		 * We can use non atomic operation since we own the queue lock
4814 		 */
4815 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
4816 			napi_schedule_rps(sd);
4817 		goto enqueue;
4818 	}
4819 	reason = SKB_DROP_REASON_CPU_BACKLOG;
4820 
4821 drop:
4822 	sd->dropped++;
4823 	rps_unlock_irq_restore(sd, &flags);
4824 
4825 	dev_core_stats_rx_dropped_inc(skb->dev);
4826 	kfree_skb_reason(skb, reason);
4827 	return NET_RX_DROP;
4828 }
4829 
netif_get_rxqueue(struct sk_buff * skb)4830 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4831 {
4832 	struct net_device *dev = skb->dev;
4833 	struct netdev_rx_queue *rxqueue;
4834 
4835 	rxqueue = dev->_rx;
4836 
4837 	if (skb_rx_queue_recorded(skb)) {
4838 		u16 index = skb_get_rx_queue(skb);
4839 
4840 		if (unlikely(index >= dev->real_num_rx_queues)) {
4841 			WARN_ONCE(dev->real_num_rx_queues > 1,
4842 				  "%s received packet on queue %u, but number "
4843 				  "of RX queues is %u\n",
4844 				  dev->name, index, dev->real_num_rx_queues);
4845 
4846 			return rxqueue; /* Return first rxqueue */
4847 		}
4848 		rxqueue += index;
4849 	}
4850 	return rxqueue;
4851 }
4852 
bpf_prog_run_generic_xdp(struct sk_buff * skb,struct xdp_buff * xdp,struct bpf_prog * xdp_prog)4853 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4854 			     struct bpf_prog *xdp_prog)
4855 {
4856 	void *orig_data, *orig_data_end, *hard_start;
4857 	struct netdev_rx_queue *rxqueue;
4858 	bool orig_bcast, orig_host;
4859 	u32 mac_len, frame_sz;
4860 	__be16 orig_eth_type;
4861 	struct ethhdr *eth;
4862 	u32 metalen, act;
4863 	int off;
4864 
4865 	/* The XDP program wants to see the packet starting at the MAC
4866 	 * header.
4867 	 */
4868 	mac_len = skb->data - skb_mac_header(skb);
4869 	hard_start = skb->data - skb_headroom(skb);
4870 
4871 	/* SKB "head" area always have tailroom for skb_shared_info */
4872 	frame_sz = (void *)skb_end_pointer(skb) - hard_start;
4873 	frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4874 
4875 	rxqueue = netif_get_rxqueue(skb);
4876 	xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4877 	xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4878 			 skb_headlen(skb) + mac_len, true);
4879 
4880 	orig_data_end = xdp->data_end;
4881 	orig_data = xdp->data;
4882 	eth = (struct ethhdr *)xdp->data;
4883 	orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
4884 	orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4885 	orig_eth_type = eth->h_proto;
4886 
4887 	act = bpf_prog_run_xdp(xdp_prog, xdp);
4888 
4889 	/* check if bpf_xdp_adjust_head was used */
4890 	off = xdp->data - orig_data;
4891 	if (off) {
4892 		if (off > 0)
4893 			__skb_pull(skb, off);
4894 		else if (off < 0)
4895 			__skb_push(skb, -off);
4896 
4897 		skb->mac_header += off;
4898 		skb_reset_network_header(skb);
4899 	}
4900 
4901 	/* check if bpf_xdp_adjust_tail was used */
4902 	off = xdp->data_end - orig_data_end;
4903 	if (off != 0) {
4904 		skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4905 		skb->len += off; /* positive on grow, negative on shrink */
4906 	}
4907 
4908 	/* check if XDP changed eth hdr such SKB needs update */
4909 	eth = (struct ethhdr *)xdp->data;
4910 	if ((orig_eth_type != eth->h_proto) ||
4911 	    (orig_host != ether_addr_equal_64bits(eth->h_dest,
4912 						  skb->dev->dev_addr)) ||
4913 	    (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4914 		__skb_push(skb, ETH_HLEN);
4915 		skb->pkt_type = PACKET_HOST;
4916 		skb->protocol = eth_type_trans(skb, skb->dev);
4917 	}
4918 
4919 	/* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
4920 	 * before calling us again on redirect path. We do not call do_redirect
4921 	 * as we leave that up to the caller.
4922 	 *
4923 	 * Caller is responsible for managing lifetime of skb (i.e. calling
4924 	 * kfree_skb in response to actions it cannot handle/XDP_DROP).
4925 	 */
4926 	switch (act) {
4927 	case XDP_REDIRECT:
4928 	case XDP_TX:
4929 		__skb_push(skb, mac_len);
4930 		break;
4931 	case XDP_PASS:
4932 		metalen = xdp->data - xdp->data_meta;
4933 		if (metalen)
4934 			skb_metadata_set(skb, metalen);
4935 		break;
4936 	}
4937 
4938 	return act;
4939 }
4940 
netif_receive_generic_xdp(struct sk_buff * skb,struct xdp_buff * xdp,struct bpf_prog * xdp_prog)4941 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4942 				     struct xdp_buff *xdp,
4943 				     struct bpf_prog *xdp_prog)
4944 {
4945 	u32 act = XDP_DROP;
4946 
4947 	/* Reinjected packets coming from act_mirred or similar should
4948 	 * not get XDP generic processing.
4949 	 */
4950 	if (skb_is_redirected(skb))
4951 		return XDP_PASS;
4952 
4953 	/* XDP packets must be linear and must have sufficient headroom
4954 	 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4955 	 * native XDP provides, thus we need to do it here as well.
4956 	 */
4957 	if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
4958 	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4959 		int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4960 		int troom = skb->tail + skb->data_len - skb->end;
4961 
4962 		/* In case we have to go down the path and also linearize,
4963 		 * then lets do the pskb_expand_head() work just once here.
4964 		 */
4965 		if (pskb_expand_head(skb,
4966 				     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4967 				     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4968 			goto do_drop;
4969 		if (skb_linearize(skb))
4970 			goto do_drop;
4971 	}
4972 
4973 	act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
4974 	switch (act) {
4975 	case XDP_REDIRECT:
4976 	case XDP_TX:
4977 	case XDP_PASS:
4978 		break;
4979 	default:
4980 		bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
4981 		fallthrough;
4982 	case XDP_ABORTED:
4983 		trace_xdp_exception(skb->dev, xdp_prog, act);
4984 		fallthrough;
4985 	case XDP_DROP:
4986 	do_drop:
4987 		kfree_skb(skb);
4988 		break;
4989 	}
4990 
4991 	return act;
4992 }
4993 
4994 /* When doing generic XDP we have to bypass the qdisc layer and the
4995  * network taps in order to match in-driver-XDP behavior. This also means
4996  * that XDP packets are able to starve other packets going through a qdisc,
4997  * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
4998  * queues, so they do not have this starvation issue.
4999  */
generic_xdp_tx(struct sk_buff * skb,struct bpf_prog * xdp_prog)5000 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
5001 {
5002 	struct net_device *dev = skb->dev;
5003 	struct netdev_queue *txq;
5004 	bool free_skb = true;
5005 	int cpu, rc;
5006 
5007 	txq = netdev_core_pick_tx(dev, skb, NULL);
5008 	cpu = smp_processor_id();
5009 	HARD_TX_LOCK(dev, txq, cpu);
5010 	if (!netif_xmit_frozen_or_drv_stopped(txq)) {
5011 		rc = netdev_start_xmit(skb, dev, txq, 0);
5012 		if (dev_xmit_complete(rc))
5013 			free_skb = false;
5014 	}
5015 	HARD_TX_UNLOCK(dev, txq);
5016 	if (free_skb) {
5017 		trace_xdp_exception(dev, xdp_prog, XDP_TX);
5018 		dev_core_stats_tx_dropped_inc(dev);
5019 		kfree_skb(skb);
5020 	}
5021 }
5022 
5023 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
5024 
do_xdp_generic(struct bpf_prog * xdp_prog,struct sk_buff * skb)5025 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
5026 {
5027 	if (xdp_prog) {
5028 		struct xdp_buff xdp;
5029 		u32 act;
5030 		int err;
5031 
5032 		act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
5033 		if (act != XDP_PASS) {
5034 			switch (act) {
5035 			case XDP_REDIRECT:
5036 				err = xdp_do_generic_redirect(skb->dev, skb,
5037 							      &xdp, xdp_prog);
5038 				if (err)
5039 					goto out_redir;
5040 				break;
5041 			case XDP_TX:
5042 				generic_xdp_tx(skb, xdp_prog);
5043 				break;
5044 			}
5045 			return XDP_DROP;
5046 		}
5047 	}
5048 	return XDP_PASS;
5049 out_redir:
5050 	kfree_skb_reason(skb, SKB_DROP_REASON_XDP);
5051 	return XDP_DROP;
5052 }
5053 EXPORT_SYMBOL_GPL(do_xdp_generic);
5054 
netif_rx_internal(struct sk_buff * skb)5055 static int netif_rx_internal(struct sk_buff *skb)
5056 {
5057 	int ret;
5058 
5059 	net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
5060 
5061 	trace_netif_rx(skb);
5062 
5063 #ifdef CONFIG_RPS
5064 	if (static_branch_unlikely(&rps_needed)) {
5065 		struct rps_dev_flow voidflow, *rflow = &voidflow;
5066 		int cpu;
5067 
5068 		rcu_read_lock();
5069 
5070 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
5071 		if (cpu < 0)
5072 			cpu = smp_processor_id();
5073 
5074 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5075 
5076 		rcu_read_unlock();
5077 	} else
5078 #endif
5079 	{
5080 		unsigned int qtail;
5081 
5082 		ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail);
5083 	}
5084 	return ret;
5085 }
5086 
5087 /**
5088  *	__netif_rx	-	Slightly optimized version of netif_rx
5089  *	@skb: buffer to post
5090  *
5091  *	This behaves as netif_rx except that it does not disable bottom halves.
5092  *	As a result this function may only be invoked from the interrupt context
5093  *	(either hard or soft interrupt).
5094  */
__netif_rx(struct sk_buff * skb)5095 int __netif_rx(struct sk_buff *skb)
5096 {
5097 	int ret;
5098 
5099 	lockdep_assert_once(hardirq_count() | softirq_count());
5100 
5101 	trace_netif_rx_entry(skb);
5102 	ret = netif_rx_internal(skb);
5103 	trace_netif_rx_exit(ret);
5104 	return ret;
5105 }
5106 EXPORT_SYMBOL(__netif_rx);
5107 
5108 /**
5109  *	netif_rx	-	post buffer to the network code
5110  *	@skb: buffer to post
5111  *
5112  *	This function receives a packet from a device driver and queues it for
5113  *	the upper (protocol) levels to process via the backlog NAPI device. It
5114  *	always succeeds. The buffer may be dropped during processing for
5115  *	congestion control or by the protocol layers.
5116  *	The network buffer is passed via the backlog NAPI device. Modern NIC
5117  *	driver should use NAPI and GRO.
5118  *	This function can used from interrupt and from process context. The
5119  *	caller from process context must not disable interrupts before invoking
5120  *	this function.
5121  *
5122  *	return values:
5123  *	NET_RX_SUCCESS	(no congestion)
5124  *	NET_RX_DROP     (packet was dropped)
5125  *
5126  */
netif_rx(struct sk_buff * skb)5127 int netif_rx(struct sk_buff *skb)
5128 {
5129 	bool need_bh_off = !(hardirq_count() | softirq_count());
5130 	int ret;
5131 
5132 	if (need_bh_off)
5133 		local_bh_disable();
5134 	trace_netif_rx_entry(skb);
5135 	ret = netif_rx_internal(skb);
5136 	trace_netif_rx_exit(ret);
5137 	if (need_bh_off)
5138 		local_bh_enable();
5139 	return ret;
5140 }
5141 EXPORT_SYMBOL(netif_rx);
5142 
net_tx_action(struct softirq_action * h)5143 static __latent_entropy void net_tx_action(struct softirq_action *h)
5144 {
5145 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
5146 
5147 	if (sd->completion_queue) {
5148 		struct sk_buff *clist;
5149 
5150 		local_irq_disable();
5151 		clist = sd->completion_queue;
5152 		sd->completion_queue = NULL;
5153 		local_irq_enable();
5154 
5155 		while (clist) {
5156 			struct sk_buff *skb = clist;
5157 
5158 			clist = clist->next;
5159 
5160 			WARN_ON(refcount_read(&skb->users));
5161 			if (likely(get_kfree_skb_cb(skb)->reason == SKB_CONSUMED))
5162 				trace_consume_skb(skb, net_tx_action);
5163 			else
5164 				trace_kfree_skb(skb, net_tx_action,
5165 						get_kfree_skb_cb(skb)->reason);
5166 
5167 			if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
5168 				__kfree_skb(skb);
5169 			else
5170 				__napi_kfree_skb(skb,
5171 						 get_kfree_skb_cb(skb)->reason);
5172 		}
5173 	}
5174 
5175 	if (sd->output_queue) {
5176 		struct Qdisc *head;
5177 
5178 		local_irq_disable();
5179 		head = sd->output_queue;
5180 		sd->output_queue = NULL;
5181 		sd->output_queue_tailp = &sd->output_queue;
5182 		local_irq_enable();
5183 
5184 		rcu_read_lock();
5185 
5186 		while (head) {
5187 			struct Qdisc *q = head;
5188 			spinlock_t *root_lock = NULL;
5189 
5190 			head = head->next_sched;
5191 
5192 			/* We need to make sure head->next_sched is read
5193 			 * before clearing __QDISC_STATE_SCHED
5194 			 */
5195 			smp_mb__before_atomic();
5196 
5197 			if (!(q->flags & TCQ_F_NOLOCK)) {
5198 				root_lock = qdisc_lock(q);
5199 				spin_lock(root_lock);
5200 			} else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
5201 						     &q->state))) {
5202 				/* There is a synchronize_net() between
5203 				 * STATE_DEACTIVATED flag being set and
5204 				 * qdisc_reset()/some_qdisc_is_busy() in
5205 				 * dev_deactivate(), so we can safely bail out
5206 				 * early here to avoid data race between
5207 				 * qdisc_deactivate() and some_qdisc_is_busy()
5208 				 * for lockless qdisc.
5209 				 */
5210 				clear_bit(__QDISC_STATE_SCHED, &q->state);
5211 				continue;
5212 			}
5213 
5214 			clear_bit(__QDISC_STATE_SCHED, &q->state);
5215 			qdisc_run(q);
5216 			if (root_lock)
5217 				spin_unlock(root_lock);
5218 		}
5219 
5220 		rcu_read_unlock();
5221 	}
5222 
5223 	xfrm_dev_backlog(sd);
5224 }
5225 
5226 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
5227 /* This hook is defined here for ATM LANE */
5228 int (*br_fdb_test_addr_hook)(struct net_device *dev,
5229 			     unsigned char *addr) __read_mostly;
5230 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
5231 #endif
5232 
5233 /**
5234  *	netdev_is_rx_handler_busy - check if receive handler is registered
5235  *	@dev: device to check
5236  *
5237  *	Check if a receive handler is already registered for a given device.
5238  *	Return true if there one.
5239  *
5240  *	The caller must hold the rtnl_mutex.
5241  */
netdev_is_rx_handler_busy(struct net_device * dev)5242 bool netdev_is_rx_handler_busy(struct net_device *dev)
5243 {
5244 	ASSERT_RTNL();
5245 	return dev && rtnl_dereference(dev->rx_handler);
5246 }
5247 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
5248 
5249 /**
5250  *	netdev_rx_handler_register - register receive handler
5251  *	@dev: device to register a handler for
5252  *	@rx_handler: receive handler to register
5253  *	@rx_handler_data: data pointer that is used by rx handler
5254  *
5255  *	Register a receive handler for a device. This handler will then be
5256  *	called from __netif_receive_skb. A negative errno code is returned
5257  *	on a failure.
5258  *
5259  *	The caller must hold the rtnl_mutex.
5260  *
5261  *	For a general description of rx_handler, see enum rx_handler_result.
5262  */
netdev_rx_handler_register(struct net_device * dev,rx_handler_func_t * rx_handler,void * rx_handler_data)5263 int netdev_rx_handler_register(struct net_device *dev,
5264 			       rx_handler_func_t *rx_handler,
5265 			       void *rx_handler_data)
5266 {
5267 	if (netdev_is_rx_handler_busy(dev))
5268 		return -EBUSY;
5269 
5270 	if (dev->priv_flags & IFF_NO_RX_HANDLER)
5271 		return -EINVAL;
5272 
5273 	/* Note: rx_handler_data must be set before rx_handler */
5274 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
5275 	rcu_assign_pointer(dev->rx_handler, rx_handler);
5276 
5277 	return 0;
5278 }
5279 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5280 
5281 /**
5282  *	netdev_rx_handler_unregister - unregister receive handler
5283  *	@dev: device to unregister a handler from
5284  *
5285  *	Unregister a receive handler from a device.
5286  *
5287  *	The caller must hold the rtnl_mutex.
5288  */
netdev_rx_handler_unregister(struct net_device * dev)5289 void netdev_rx_handler_unregister(struct net_device *dev)
5290 {
5291 
5292 	ASSERT_RTNL();
5293 	RCU_INIT_POINTER(dev->rx_handler, NULL);
5294 	/* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5295 	 * section has a guarantee to see a non NULL rx_handler_data
5296 	 * as well.
5297 	 */
5298 	synchronize_net();
5299 	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5300 }
5301 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5302 
5303 /*
5304  * Limit the use of PFMEMALLOC reserves to those protocols that implement
5305  * the special handling of PFMEMALLOC skbs.
5306  */
skb_pfmemalloc_protocol(struct sk_buff * skb)5307 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5308 {
5309 	switch (skb->protocol) {
5310 	case htons(ETH_P_ARP):
5311 	case htons(ETH_P_IP):
5312 	case htons(ETH_P_IPV6):
5313 	case htons(ETH_P_8021Q):
5314 	case htons(ETH_P_8021AD):
5315 		return true;
5316 	default:
5317 		return false;
5318 	}
5319 }
5320 
nf_ingress(struct sk_buff * skb,struct packet_type ** pt_prev,int * ret,struct net_device * orig_dev)5321 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5322 			     int *ret, struct net_device *orig_dev)
5323 {
5324 	if (nf_hook_ingress_active(skb)) {
5325 		int ingress_retval;
5326 
5327 		if (*pt_prev) {
5328 			*ret = deliver_skb(skb, *pt_prev, orig_dev);
5329 			*pt_prev = NULL;
5330 		}
5331 
5332 		rcu_read_lock();
5333 		ingress_retval = nf_hook_ingress(skb);
5334 		rcu_read_unlock();
5335 		return ingress_retval;
5336 	}
5337 	return 0;
5338 }
5339 
__netif_receive_skb_core(struct sk_buff ** pskb,bool pfmemalloc,struct packet_type ** ppt_prev)5340 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5341 				    struct packet_type **ppt_prev)
5342 {
5343 	struct packet_type *ptype, *pt_prev;
5344 	rx_handler_func_t *rx_handler;
5345 	struct sk_buff *skb = *pskb;
5346 	struct net_device *orig_dev;
5347 	bool deliver_exact = false;
5348 	int ret = NET_RX_DROP;
5349 	__be16 type;
5350 
5351 	net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
5352 
5353 	trace_netif_receive_skb(skb);
5354 
5355 	orig_dev = skb->dev;
5356 
5357 	skb_reset_network_header(skb);
5358 	if (!skb_transport_header_was_set(skb))
5359 		skb_reset_transport_header(skb);
5360 	skb_reset_mac_len(skb);
5361 
5362 	pt_prev = NULL;
5363 
5364 another_round:
5365 	skb->skb_iif = skb->dev->ifindex;
5366 
5367 	__this_cpu_inc(softnet_data.processed);
5368 
5369 	if (static_branch_unlikely(&generic_xdp_needed_key)) {
5370 		int ret2;
5371 
5372 		migrate_disable();
5373 		ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5374 		migrate_enable();
5375 
5376 		if (ret2 != XDP_PASS) {
5377 			ret = NET_RX_DROP;
5378 			goto out;
5379 		}
5380 	}
5381 
5382 	if (eth_type_vlan(skb->protocol)) {
5383 		skb = skb_vlan_untag(skb);
5384 		if (unlikely(!skb))
5385 			goto out;
5386 	}
5387 
5388 	if (skb_skip_tc_classify(skb))
5389 		goto skip_classify;
5390 
5391 	if (pfmemalloc)
5392 		goto skip_taps;
5393 
5394 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
5395 		if (pt_prev)
5396 			ret = deliver_skb(skb, pt_prev, orig_dev);
5397 		pt_prev = ptype;
5398 	}
5399 
5400 	list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5401 		if (pt_prev)
5402 			ret = deliver_skb(skb, pt_prev, orig_dev);
5403 		pt_prev = ptype;
5404 	}
5405 
5406 skip_taps:
5407 #ifdef CONFIG_NET_INGRESS
5408 	if (static_branch_unlikely(&ingress_needed_key)) {
5409 		bool another = false;
5410 
5411 		nf_skip_egress(skb, true);
5412 		skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
5413 					 &another);
5414 		if (another)
5415 			goto another_round;
5416 		if (!skb)
5417 			goto out;
5418 
5419 		nf_skip_egress(skb, false);
5420 		if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5421 			goto out;
5422 	}
5423 #endif
5424 	skb_reset_redirect(skb);
5425 skip_classify:
5426 	if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5427 		goto drop;
5428 
5429 	if (skb_vlan_tag_present(skb)) {
5430 		if (pt_prev) {
5431 			ret = deliver_skb(skb, pt_prev, orig_dev);
5432 			pt_prev = NULL;
5433 		}
5434 		if (vlan_do_receive(&skb))
5435 			goto another_round;
5436 		else if (unlikely(!skb))
5437 			goto out;
5438 	}
5439 
5440 	rx_handler = rcu_dereference(skb->dev->rx_handler);
5441 	if (rx_handler) {
5442 		if (pt_prev) {
5443 			ret = deliver_skb(skb, pt_prev, orig_dev);
5444 			pt_prev = NULL;
5445 		}
5446 		switch (rx_handler(&skb)) {
5447 		case RX_HANDLER_CONSUMED:
5448 			ret = NET_RX_SUCCESS;
5449 			goto out;
5450 		case RX_HANDLER_ANOTHER:
5451 			goto another_round;
5452 		case RX_HANDLER_EXACT:
5453 			deliver_exact = true;
5454 			break;
5455 		case RX_HANDLER_PASS:
5456 			break;
5457 		default:
5458 			BUG();
5459 		}
5460 	}
5461 
5462 	if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5463 check_vlan_id:
5464 		if (skb_vlan_tag_get_id(skb)) {
5465 			/* Vlan id is non 0 and vlan_do_receive() above couldn't
5466 			 * find vlan device.
5467 			 */
5468 			skb->pkt_type = PACKET_OTHERHOST;
5469 		} else if (eth_type_vlan(skb->protocol)) {
5470 			/* Outer header is 802.1P with vlan 0, inner header is
5471 			 * 802.1Q or 802.1AD and vlan_do_receive() above could
5472 			 * not find vlan dev for vlan id 0.
5473 			 */
5474 			__vlan_hwaccel_clear_tag(skb);
5475 			skb = skb_vlan_untag(skb);
5476 			if (unlikely(!skb))
5477 				goto out;
5478 			if (vlan_do_receive(&skb))
5479 				/* After stripping off 802.1P header with vlan 0
5480 				 * vlan dev is found for inner header.
5481 				 */
5482 				goto another_round;
5483 			else if (unlikely(!skb))
5484 				goto out;
5485 			else
5486 				/* We have stripped outer 802.1P vlan 0 header.
5487 				 * But could not find vlan dev.
5488 				 * check again for vlan id to set OTHERHOST.
5489 				 */
5490 				goto check_vlan_id;
5491 		}
5492 		/* Note: we might in the future use prio bits
5493 		 * and set skb->priority like in vlan_do_receive()
5494 		 * For the time being, just ignore Priority Code Point
5495 		 */
5496 		__vlan_hwaccel_clear_tag(skb);
5497 	}
5498 
5499 	type = skb->protocol;
5500 
5501 	/* deliver only exact match when indicated */
5502 	if (likely(!deliver_exact)) {
5503 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5504 				       &ptype_base[ntohs(type) &
5505 						   PTYPE_HASH_MASK]);
5506 	}
5507 
5508 	deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5509 			       &orig_dev->ptype_specific);
5510 
5511 	if (unlikely(skb->dev != orig_dev)) {
5512 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5513 				       &skb->dev->ptype_specific);
5514 	}
5515 
5516 	if (pt_prev) {
5517 		if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5518 			goto drop;
5519 		*ppt_prev = pt_prev;
5520 	} else {
5521 drop:
5522 		if (!deliver_exact)
5523 			dev_core_stats_rx_dropped_inc(skb->dev);
5524 		else
5525 			dev_core_stats_rx_nohandler_inc(skb->dev);
5526 		kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO);
5527 		/* Jamal, now you will not able to escape explaining
5528 		 * me how you were going to use this. :-)
5529 		 */
5530 		ret = NET_RX_DROP;
5531 	}
5532 
5533 out:
5534 	/* The invariant here is that if *ppt_prev is not NULL
5535 	 * then skb should also be non-NULL.
5536 	 *
5537 	 * Apparently *ppt_prev assignment above holds this invariant due to
5538 	 * skb dereferencing near it.
5539 	 */
5540 	*pskb = skb;
5541 	return ret;
5542 }
5543 
__netif_receive_skb_one_core(struct sk_buff * skb,bool pfmemalloc)5544 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5545 {
5546 	struct net_device *orig_dev = skb->dev;
5547 	struct packet_type *pt_prev = NULL;
5548 	int ret;
5549 
5550 	ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5551 	if (pt_prev)
5552 		ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5553 					 skb->dev, pt_prev, orig_dev);
5554 	return ret;
5555 }
5556 
5557 /**
5558  *	netif_receive_skb_core - special purpose version of netif_receive_skb
5559  *	@skb: buffer to process
5560  *
5561  *	More direct receive version of netif_receive_skb().  It should
5562  *	only be used by callers that have a need to skip RPS and Generic XDP.
5563  *	Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5564  *
5565  *	This function may only be called from softirq context and interrupts
5566  *	should be enabled.
5567  *
5568  *	Return values (usually ignored):
5569  *	NET_RX_SUCCESS: no congestion
5570  *	NET_RX_DROP: packet was dropped
5571  */
netif_receive_skb_core(struct sk_buff * skb)5572 int netif_receive_skb_core(struct sk_buff *skb)
5573 {
5574 	int ret;
5575 
5576 	rcu_read_lock();
5577 	ret = __netif_receive_skb_one_core(skb, false);
5578 	rcu_read_unlock();
5579 
5580 	return ret;
5581 }
5582 EXPORT_SYMBOL(netif_receive_skb_core);
5583 
__netif_receive_skb_list_ptype(struct list_head * head,struct packet_type * pt_prev,struct net_device * orig_dev)5584 static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5585 						  struct packet_type *pt_prev,
5586 						  struct net_device *orig_dev)
5587 {
5588 	struct sk_buff *skb, *next;
5589 
5590 	if (!pt_prev)
5591 		return;
5592 	if (list_empty(head))
5593 		return;
5594 	if (pt_prev->list_func != NULL)
5595 		INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5596 				   ip_list_rcv, head, pt_prev, orig_dev);
5597 	else
5598 		list_for_each_entry_safe(skb, next, head, list) {
5599 			skb_list_del_init(skb);
5600 			pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5601 		}
5602 }
5603 
__netif_receive_skb_list_core(struct list_head * head,bool pfmemalloc)5604 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5605 {
5606 	/* Fast-path assumptions:
5607 	 * - There is no RX handler.
5608 	 * - Only one packet_type matches.
5609 	 * If either of these fails, we will end up doing some per-packet
5610 	 * processing in-line, then handling the 'last ptype' for the whole
5611 	 * sublist.  This can't cause out-of-order delivery to any single ptype,
5612 	 * because the 'last ptype' must be constant across the sublist, and all
5613 	 * other ptypes are handled per-packet.
5614 	 */
5615 	/* Current (common) ptype of sublist */
5616 	struct packet_type *pt_curr = NULL;
5617 	/* Current (common) orig_dev of sublist */
5618 	struct net_device *od_curr = NULL;
5619 	struct list_head sublist;
5620 	struct sk_buff *skb, *next;
5621 
5622 	INIT_LIST_HEAD(&sublist);
5623 	list_for_each_entry_safe(skb, next, head, list) {
5624 		struct net_device *orig_dev = skb->dev;
5625 		struct packet_type *pt_prev = NULL;
5626 
5627 		skb_list_del_init(skb);
5628 		__netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5629 		if (!pt_prev)
5630 			continue;
5631 		if (pt_curr != pt_prev || od_curr != orig_dev) {
5632 			/* dispatch old sublist */
5633 			__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5634 			/* start new sublist */
5635 			INIT_LIST_HEAD(&sublist);
5636 			pt_curr = pt_prev;
5637 			od_curr = orig_dev;
5638 		}
5639 		list_add_tail(&skb->list, &sublist);
5640 	}
5641 
5642 	/* dispatch final sublist */
5643 	__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5644 }
5645 
__netif_receive_skb(struct sk_buff * skb)5646 static int __netif_receive_skb(struct sk_buff *skb)
5647 {
5648 	int ret;
5649 
5650 	if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5651 		unsigned int noreclaim_flag;
5652 
5653 		/*
5654 		 * PFMEMALLOC skbs are special, they should
5655 		 * - be delivered to SOCK_MEMALLOC sockets only
5656 		 * - stay away from userspace
5657 		 * - have bounded memory usage
5658 		 *
5659 		 * Use PF_MEMALLOC as this saves us from propagating the allocation
5660 		 * context down to all allocation sites.
5661 		 */
5662 		noreclaim_flag = memalloc_noreclaim_save();
5663 		ret = __netif_receive_skb_one_core(skb, true);
5664 		memalloc_noreclaim_restore(noreclaim_flag);
5665 	} else
5666 		ret = __netif_receive_skb_one_core(skb, false);
5667 
5668 	return ret;
5669 }
5670 
__netif_receive_skb_list(struct list_head * head)5671 static void __netif_receive_skb_list(struct list_head *head)
5672 {
5673 	unsigned long noreclaim_flag = 0;
5674 	struct sk_buff *skb, *next;
5675 	bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5676 
5677 	list_for_each_entry_safe(skb, next, head, list) {
5678 		if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5679 			struct list_head sublist;
5680 
5681 			/* Handle the previous sublist */
5682 			list_cut_before(&sublist, head, &skb->list);
5683 			if (!list_empty(&sublist))
5684 				__netif_receive_skb_list_core(&sublist, pfmemalloc);
5685 			pfmemalloc = !pfmemalloc;
5686 			/* See comments in __netif_receive_skb */
5687 			if (pfmemalloc)
5688 				noreclaim_flag = memalloc_noreclaim_save();
5689 			else
5690 				memalloc_noreclaim_restore(noreclaim_flag);
5691 		}
5692 	}
5693 	/* Handle the remaining sublist */
5694 	if (!list_empty(head))
5695 		__netif_receive_skb_list_core(head, pfmemalloc);
5696 	/* Restore pflags */
5697 	if (pfmemalloc)
5698 		memalloc_noreclaim_restore(noreclaim_flag);
5699 }
5700 
generic_xdp_install(struct net_device * dev,struct netdev_bpf * xdp)5701 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5702 {
5703 	struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5704 	struct bpf_prog *new = xdp->prog;
5705 	int ret = 0;
5706 
5707 	switch (xdp->command) {
5708 	case XDP_SETUP_PROG:
5709 		rcu_assign_pointer(dev->xdp_prog, new);
5710 		if (old)
5711 			bpf_prog_put(old);
5712 
5713 		if (old && !new) {
5714 			static_branch_dec(&generic_xdp_needed_key);
5715 		} else if (new && !old) {
5716 			static_branch_inc(&generic_xdp_needed_key);
5717 			dev_disable_lro(dev);
5718 			dev_disable_gro_hw(dev);
5719 		}
5720 		break;
5721 
5722 	default:
5723 		ret = -EINVAL;
5724 		break;
5725 	}
5726 
5727 	return ret;
5728 }
5729 
netif_receive_skb_internal(struct sk_buff * skb)5730 static int netif_receive_skb_internal(struct sk_buff *skb)
5731 {
5732 	int ret;
5733 
5734 	net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
5735 
5736 	if (skb_defer_rx_timestamp(skb))
5737 		return NET_RX_SUCCESS;
5738 
5739 	rcu_read_lock();
5740 #ifdef CONFIG_RPS
5741 	if (static_branch_unlikely(&rps_needed)) {
5742 		struct rps_dev_flow voidflow, *rflow = &voidflow;
5743 		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5744 
5745 		if (cpu >= 0) {
5746 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5747 			rcu_read_unlock();
5748 			return ret;
5749 		}
5750 	}
5751 #endif
5752 	ret = __netif_receive_skb(skb);
5753 	rcu_read_unlock();
5754 	return ret;
5755 }
5756 
netif_receive_skb_list_internal(struct list_head * head)5757 void netif_receive_skb_list_internal(struct list_head *head)
5758 {
5759 	struct sk_buff *skb, *next;
5760 	struct list_head sublist;
5761 
5762 	INIT_LIST_HEAD(&sublist);
5763 	list_for_each_entry_safe(skb, next, head, list) {
5764 		net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
5765 		skb_list_del_init(skb);
5766 		if (!skb_defer_rx_timestamp(skb))
5767 			list_add_tail(&skb->list, &sublist);
5768 	}
5769 	list_splice_init(&sublist, head);
5770 
5771 	rcu_read_lock();
5772 #ifdef CONFIG_RPS
5773 	if (static_branch_unlikely(&rps_needed)) {
5774 		list_for_each_entry_safe(skb, next, head, list) {
5775 			struct rps_dev_flow voidflow, *rflow = &voidflow;
5776 			int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5777 
5778 			if (cpu >= 0) {
5779 				/* Will be handled, remove from list */
5780 				skb_list_del_init(skb);
5781 				enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5782 			}
5783 		}
5784 	}
5785 #endif
5786 	__netif_receive_skb_list(head);
5787 	rcu_read_unlock();
5788 }
5789 
5790 /**
5791  *	netif_receive_skb - process receive buffer from network
5792  *	@skb: buffer to process
5793  *
5794  *	netif_receive_skb() is the main receive data processing function.
5795  *	It always succeeds. The buffer may be dropped during processing
5796  *	for congestion control or by the protocol layers.
5797  *
5798  *	This function may only be called from softirq context and interrupts
5799  *	should be enabled.
5800  *
5801  *	Return values (usually ignored):
5802  *	NET_RX_SUCCESS: no congestion
5803  *	NET_RX_DROP: packet was dropped
5804  */
netif_receive_skb(struct sk_buff * skb)5805 int netif_receive_skb(struct sk_buff *skb)
5806 {
5807 	int ret;
5808 
5809 	trace_netif_receive_skb_entry(skb);
5810 
5811 	ret = netif_receive_skb_internal(skb);
5812 	trace_netif_receive_skb_exit(ret);
5813 
5814 	return ret;
5815 }
5816 EXPORT_SYMBOL(netif_receive_skb);
5817 
5818 /**
5819  *	netif_receive_skb_list - process many receive buffers from network
5820  *	@head: list of skbs to process.
5821  *
5822  *	Since return value of netif_receive_skb() is normally ignored, and
5823  *	wouldn't be meaningful for a list, this function returns void.
5824  *
5825  *	This function may only be called from softirq context and interrupts
5826  *	should be enabled.
5827  */
netif_receive_skb_list(struct list_head * head)5828 void netif_receive_skb_list(struct list_head *head)
5829 {
5830 	struct sk_buff *skb;
5831 
5832 	if (list_empty(head))
5833 		return;
5834 	if (trace_netif_receive_skb_list_entry_enabled()) {
5835 		list_for_each_entry(skb, head, list)
5836 			trace_netif_receive_skb_list_entry(skb);
5837 	}
5838 	netif_receive_skb_list_internal(head);
5839 	trace_netif_receive_skb_list_exit(0);
5840 }
5841 EXPORT_SYMBOL(netif_receive_skb_list);
5842 
5843 static DEFINE_PER_CPU(struct work_struct, flush_works);
5844 
5845 /* Network device is going away, flush any packets still pending */
flush_backlog(struct work_struct * work)5846 static void flush_backlog(struct work_struct *work)
5847 {
5848 	struct sk_buff *skb, *tmp;
5849 	struct softnet_data *sd;
5850 
5851 	local_bh_disable();
5852 	sd = this_cpu_ptr(&softnet_data);
5853 
5854 	rps_lock_irq_disable(sd);
5855 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5856 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5857 			__skb_unlink(skb, &sd->input_pkt_queue);
5858 			dev_kfree_skb_irq(skb);
5859 			input_queue_head_incr(sd);
5860 		}
5861 	}
5862 	rps_unlock_irq_enable(sd);
5863 
5864 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5865 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5866 			__skb_unlink(skb, &sd->process_queue);
5867 			kfree_skb(skb);
5868 			input_queue_head_incr(sd);
5869 		}
5870 	}
5871 	local_bh_enable();
5872 }
5873 
flush_required(int cpu)5874 static bool flush_required(int cpu)
5875 {
5876 #if IS_ENABLED(CONFIG_RPS)
5877 	struct softnet_data *sd = &per_cpu(softnet_data, cpu);
5878 	bool do_flush;
5879 
5880 	rps_lock_irq_disable(sd);
5881 
5882 	/* as insertion into process_queue happens with the rps lock held,
5883 	 * process_queue access may race only with dequeue
5884 	 */
5885 	do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
5886 		   !skb_queue_empty_lockless(&sd->process_queue);
5887 	rps_unlock_irq_enable(sd);
5888 
5889 	return do_flush;
5890 #endif
5891 	/* without RPS we can't safely check input_pkt_queue: during a
5892 	 * concurrent remote skb_queue_splice() we can detect as empty both
5893 	 * input_pkt_queue and process_queue even if the latter could end-up
5894 	 * containing a lot of packets.
5895 	 */
5896 	return true;
5897 }
5898 
flush_all_backlogs(void)5899 static void flush_all_backlogs(void)
5900 {
5901 	static cpumask_t flush_cpus;
5902 	unsigned int cpu;
5903 
5904 	/* since we are under rtnl lock protection we can use static data
5905 	 * for the cpumask and avoid allocating on stack the possibly
5906 	 * large mask
5907 	 */
5908 	ASSERT_RTNL();
5909 
5910 	cpus_read_lock();
5911 
5912 	cpumask_clear(&flush_cpus);
5913 	for_each_online_cpu(cpu) {
5914 		if (flush_required(cpu)) {
5915 			queue_work_on(cpu, system_highpri_wq,
5916 				      per_cpu_ptr(&flush_works, cpu));
5917 			cpumask_set_cpu(cpu, &flush_cpus);
5918 		}
5919 	}
5920 
5921 	/* we can have in flight packet[s] on the cpus we are not flushing,
5922 	 * synchronize_net() in unregister_netdevice_many() will take care of
5923 	 * them
5924 	 */
5925 	for_each_cpu(cpu, &flush_cpus)
5926 		flush_work(per_cpu_ptr(&flush_works, cpu));
5927 
5928 	cpus_read_unlock();
5929 }
5930 
net_rps_send_ipi(struct softnet_data * remsd)5931 static void net_rps_send_ipi(struct softnet_data *remsd)
5932 {
5933 #ifdef CONFIG_RPS
5934 	while (remsd) {
5935 		struct softnet_data *next = remsd->rps_ipi_next;
5936 
5937 		if (cpu_online(remsd->cpu))
5938 			smp_call_function_single_async(remsd->cpu, &remsd->csd);
5939 		remsd = next;
5940 	}
5941 #endif
5942 }
5943 
5944 /*
5945  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5946  * Note: called with local irq disabled, but exits with local irq enabled.
5947  */
net_rps_action_and_irq_enable(struct softnet_data * sd)5948 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5949 {
5950 #ifdef CONFIG_RPS
5951 	struct softnet_data *remsd = sd->rps_ipi_list;
5952 
5953 	if (remsd) {
5954 		sd->rps_ipi_list = NULL;
5955 
5956 		local_irq_enable();
5957 
5958 		/* Send pending IPI's to kick RPS processing on remote cpus. */
5959 		net_rps_send_ipi(remsd);
5960 	} else
5961 #endif
5962 		local_irq_enable();
5963 }
5964 
sd_has_rps_ipi_waiting(struct softnet_data * sd)5965 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5966 {
5967 #ifdef CONFIG_RPS
5968 	return sd->rps_ipi_list != NULL;
5969 #else
5970 	return false;
5971 #endif
5972 }
5973 
process_backlog(struct napi_struct * napi,int quota)5974 static int process_backlog(struct napi_struct *napi, int quota)
5975 {
5976 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
5977 	bool again = true;
5978 	int work = 0;
5979 
5980 	/* Check if we have pending ipi, its better to send them now,
5981 	 * not waiting net_rx_action() end.
5982 	 */
5983 	if (sd_has_rps_ipi_waiting(sd)) {
5984 		local_irq_disable();
5985 		net_rps_action_and_irq_enable(sd);
5986 	}
5987 
5988 	napi->weight = READ_ONCE(dev_rx_weight);
5989 	while (again) {
5990 		struct sk_buff *skb;
5991 
5992 		while ((skb = __skb_dequeue(&sd->process_queue))) {
5993 			rcu_read_lock();
5994 			__netif_receive_skb(skb);
5995 			rcu_read_unlock();
5996 			input_queue_head_incr(sd);
5997 			if (++work >= quota)
5998 				return work;
5999 
6000 		}
6001 
6002 		rps_lock_irq_disable(sd);
6003 		if (skb_queue_empty(&sd->input_pkt_queue)) {
6004 			/*
6005 			 * Inline a custom version of __napi_complete().
6006 			 * only current cpu owns and manipulates this napi,
6007 			 * and NAPI_STATE_SCHED is the only possible flag set
6008 			 * on backlog.
6009 			 * We can use a plain write instead of clear_bit(),
6010 			 * and we dont need an smp_mb() memory barrier.
6011 			 */
6012 			napi->state = 0;
6013 			again = false;
6014 		} else {
6015 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
6016 						   &sd->process_queue);
6017 		}
6018 		rps_unlock_irq_enable(sd);
6019 	}
6020 
6021 	return work;
6022 }
6023 
6024 /**
6025  * __napi_schedule - schedule for receive
6026  * @n: entry to schedule
6027  *
6028  * The entry's receive function will be scheduled to run.
6029  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
6030  */
__napi_schedule(struct napi_struct * n)6031 void __napi_schedule(struct napi_struct *n)
6032 {
6033 	unsigned long flags;
6034 
6035 	local_irq_save(flags);
6036 	____napi_schedule(this_cpu_ptr(&softnet_data), n);
6037 	local_irq_restore(flags);
6038 }
6039 EXPORT_SYMBOL(__napi_schedule);
6040 
6041 /**
6042  *	napi_schedule_prep - check if napi can be scheduled
6043  *	@n: napi context
6044  *
6045  * Test if NAPI routine is already running, and if not mark
6046  * it as running.  This is used as a condition variable to
6047  * insure only one NAPI poll instance runs.  We also make
6048  * sure there is no pending NAPI disable.
6049  */
napi_schedule_prep(struct napi_struct * n)6050 bool napi_schedule_prep(struct napi_struct *n)
6051 {
6052 	unsigned long new, val = READ_ONCE(n->state);
6053 
6054 	do {
6055 		if (unlikely(val & NAPIF_STATE_DISABLE))
6056 			return false;
6057 		new = val | NAPIF_STATE_SCHED;
6058 
6059 		/* Sets STATE_MISSED bit if STATE_SCHED was already set
6060 		 * This was suggested by Alexander Duyck, as compiler
6061 		 * emits better code than :
6062 		 * if (val & NAPIF_STATE_SCHED)
6063 		 *     new |= NAPIF_STATE_MISSED;
6064 		 */
6065 		new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6066 						   NAPIF_STATE_MISSED;
6067 	} while (!try_cmpxchg(&n->state, &val, new));
6068 
6069 	return !(val & NAPIF_STATE_SCHED);
6070 }
6071 EXPORT_SYMBOL(napi_schedule_prep);
6072 
6073 /**
6074  * __napi_schedule_irqoff - schedule for receive
6075  * @n: entry to schedule
6076  *
6077  * Variant of __napi_schedule() assuming hard irqs are masked.
6078  *
6079  * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
6080  * because the interrupt disabled assumption might not be true
6081  * due to force-threaded interrupts and spinlock substitution.
6082  */
__napi_schedule_irqoff(struct napi_struct * n)6083 void __napi_schedule_irqoff(struct napi_struct *n)
6084 {
6085 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6086 		____napi_schedule(this_cpu_ptr(&softnet_data), n);
6087 	else
6088 		__napi_schedule(n);
6089 }
6090 EXPORT_SYMBOL(__napi_schedule_irqoff);
6091 
napi_complete_done(struct napi_struct * n,int work_done)6092 bool napi_complete_done(struct napi_struct *n, int work_done)
6093 {
6094 	unsigned long flags, val, new, timeout = 0;
6095 	bool ret = true;
6096 
6097 	/*
6098 	 * 1) Don't let napi dequeue from the cpu poll list
6099 	 *    just in case its running on a different cpu.
6100 	 * 2) If we are busy polling, do nothing here, we have
6101 	 *    the guarantee we will be called later.
6102 	 */
6103 	if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6104 				 NAPIF_STATE_IN_BUSY_POLL)))
6105 		return false;
6106 
6107 	if (work_done) {
6108 		if (n->gro_bitmask)
6109 			timeout = READ_ONCE(n->dev->gro_flush_timeout);
6110 		n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
6111 	}
6112 	if (n->defer_hard_irqs_count > 0) {
6113 		n->defer_hard_irqs_count--;
6114 		timeout = READ_ONCE(n->dev->gro_flush_timeout);
6115 		if (timeout)
6116 			ret = false;
6117 	}
6118 	if (n->gro_bitmask) {
6119 		/* When the NAPI instance uses a timeout and keeps postponing
6120 		 * it, we need to bound somehow the time packets are kept in
6121 		 * the GRO layer
6122 		 */
6123 		napi_gro_flush(n, !!timeout);
6124 	}
6125 
6126 	gro_normal_list(n);
6127 
6128 	if (unlikely(!list_empty(&n->poll_list))) {
6129 		/* If n->poll_list is not empty, we need to mask irqs */
6130 		local_irq_save(flags);
6131 		list_del_init(&n->poll_list);
6132 		local_irq_restore(flags);
6133 	}
6134 	WRITE_ONCE(n->list_owner, -1);
6135 
6136 	val = READ_ONCE(n->state);
6137 	do {
6138 		WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6139 
6140 		new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
6141 			      NAPIF_STATE_SCHED_THREADED |
6142 			      NAPIF_STATE_PREFER_BUSY_POLL);
6143 
6144 		/* If STATE_MISSED was set, leave STATE_SCHED set,
6145 		 * because we will call napi->poll() one more time.
6146 		 * This C code was suggested by Alexander Duyck to help gcc.
6147 		 */
6148 		new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6149 						    NAPIF_STATE_SCHED;
6150 	} while (!try_cmpxchg(&n->state, &val, new));
6151 
6152 	if (unlikely(val & NAPIF_STATE_MISSED)) {
6153 		__napi_schedule(n);
6154 		return false;
6155 	}
6156 
6157 	if (timeout)
6158 		hrtimer_start(&n->timer, ns_to_ktime(timeout),
6159 			      HRTIMER_MODE_REL_PINNED);
6160 	return ret;
6161 }
6162 EXPORT_SYMBOL(napi_complete_done);
6163 
6164 /* must be called under rcu_read_lock(), as we dont take a reference */
napi_by_id(unsigned int napi_id)6165 static struct napi_struct *napi_by_id(unsigned int napi_id)
6166 {
6167 	unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6168 	struct napi_struct *napi;
6169 
6170 	hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6171 		if (napi->napi_id == napi_id)
6172 			return napi;
6173 
6174 	return NULL;
6175 }
6176 
6177 #if defined(CONFIG_NET_RX_BUSY_POLL)
6178 
__busy_poll_stop(struct napi_struct * napi,bool skip_schedule)6179 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
6180 {
6181 	if (!skip_schedule) {
6182 		gro_normal_list(napi);
6183 		__napi_schedule(napi);
6184 		return;
6185 	}
6186 
6187 	if (napi->gro_bitmask) {
6188 		/* flush too old packets
6189 		 * If HZ < 1000, flush all packets.
6190 		 */
6191 		napi_gro_flush(napi, HZ >= 1000);
6192 	}
6193 
6194 	gro_normal_list(napi);
6195 	clear_bit(NAPI_STATE_SCHED, &napi->state);
6196 }
6197 
busy_poll_stop(struct napi_struct * napi,void * have_poll_lock,bool prefer_busy_poll,u16 budget)6198 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll,
6199 			   u16 budget)
6200 {
6201 	bool skip_schedule = false;
6202 	unsigned long timeout;
6203 	int rc;
6204 
6205 	/* Busy polling means there is a high chance device driver hard irq
6206 	 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6207 	 * set in napi_schedule_prep().
6208 	 * Since we are about to call napi->poll() once more, we can safely
6209 	 * clear NAPI_STATE_MISSED.
6210 	 *
6211 	 * Note: x86 could use a single "lock and ..." instruction
6212 	 * to perform these two clear_bit()
6213 	 */
6214 	clear_bit(NAPI_STATE_MISSED, &napi->state);
6215 	clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6216 
6217 	local_bh_disable();
6218 
6219 	if (prefer_busy_poll) {
6220 		napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
6221 		timeout = READ_ONCE(napi->dev->gro_flush_timeout);
6222 		if (napi->defer_hard_irqs_count && timeout) {
6223 			hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
6224 			skip_schedule = true;
6225 		}
6226 	}
6227 
6228 	/* All we really want here is to re-enable device interrupts.
6229 	 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6230 	 */
6231 	rc = napi->poll(napi, budget);
6232 	/* We can't gro_normal_list() here, because napi->poll() might have
6233 	 * rearmed the napi (napi_complete_done()) in which case it could
6234 	 * already be running on another CPU.
6235 	 */
6236 	trace_napi_poll(napi, rc, budget);
6237 	netpoll_poll_unlock(have_poll_lock);
6238 	if (rc == budget)
6239 		__busy_poll_stop(napi, skip_schedule);
6240 	local_bh_enable();
6241 }
6242 
napi_busy_loop(unsigned int napi_id,bool (* loop_end)(void *,unsigned long),void * loop_end_arg,bool prefer_busy_poll,u16 budget)6243 void napi_busy_loop(unsigned int napi_id,
6244 		    bool (*loop_end)(void *, unsigned long),
6245 		    void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6246 {
6247 	unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6248 	int (*napi_poll)(struct napi_struct *napi, int budget);
6249 	void *have_poll_lock = NULL;
6250 	struct napi_struct *napi;
6251 
6252 restart:
6253 	napi_poll = NULL;
6254 
6255 	rcu_read_lock();
6256 
6257 	napi = napi_by_id(napi_id);
6258 	if (!napi)
6259 		goto out;
6260 
6261 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6262 		preempt_disable();
6263 	for (;;) {
6264 		int work = 0;
6265 
6266 		local_bh_disable();
6267 		if (!napi_poll) {
6268 			unsigned long val = READ_ONCE(napi->state);
6269 
6270 			/* If multiple threads are competing for this napi,
6271 			 * we avoid dirtying napi->state as much as we can.
6272 			 */
6273 			if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6274 				   NAPIF_STATE_IN_BUSY_POLL)) {
6275 				if (prefer_busy_poll)
6276 					set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6277 				goto count;
6278 			}
6279 			if (cmpxchg(&napi->state, val,
6280 				    val | NAPIF_STATE_IN_BUSY_POLL |
6281 					  NAPIF_STATE_SCHED) != val) {
6282 				if (prefer_busy_poll)
6283 					set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6284 				goto count;
6285 			}
6286 			have_poll_lock = netpoll_poll_lock(napi);
6287 			napi_poll = napi->poll;
6288 		}
6289 		work = napi_poll(napi, budget);
6290 		trace_napi_poll(napi, work, budget);
6291 		gro_normal_list(napi);
6292 count:
6293 		if (work > 0)
6294 			__NET_ADD_STATS(dev_net(napi->dev),
6295 					LINUX_MIB_BUSYPOLLRXPACKETS, work);
6296 		local_bh_enable();
6297 
6298 		if (!loop_end || loop_end(loop_end_arg, start_time))
6299 			break;
6300 
6301 		if (unlikely(need_resched())) {
6302 			if (napi_poll)
6303 				busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6304 			if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6305 				preempt_enable();
6306 			rcu_read_unlock();
6307 			cond_resched();
6308 			if (loop_end(loop_end_arg, start_time))
6309 				return;
6310 			goto restart;
6311 		}
6312 		cpu_relax();
6313 	}
6314 	if (napi_poll)
6315 		busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6316 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6317 		preempt_enable();
6318 out:
6319 	rcu_read_unlock();
6320 }
6321 EXPORT_SYMBOL(napi_busy_loop);
6322 
6323 #endif /* CONFIG_NET_RX_BUSY_POLL */
6324 
napi_hash_add(struct napi_struct * napi)6325 static void napi_hash_add(struct napi_struct *napi)
6326 {
6327 	if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
6328 		return;
6329 
6330 	spin_lock(&napi_hash_lock);
6331 
6332 	/* 0..NR_CPUS range is reserved for sender_cpu use */
6333 	do {
6334 		if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6335 			napi_gen_id = MIN_NAPI_ID;
6336 	} while (napi_by_id(napi_gen_id));
6337 	napi->napi_id = napi_gen_id;
6338 
6339 	hlist_add_head_rcu(&napi->napi_hash_node,
6340 			   &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6341 
6342 	spin_unlock(&napi_hash_lock);
6343 }
6344 
6345 /* Warning : caller is responsible to make sure rcu grace period
6346  * is respected before freeing memory containing @napi
6347  */
napi_hash_del(struct napi_struct * napi)6348 static void napi_hash_del(struct napi_struct *napi)
6349 {
6350 	spin_lock(&napi_hash_lock);
6351 
6352 	hlist_del_init_rcu(&napi->napi_hash_node);
6353 
6354 	spin_unlock(&napi_hash_lock);
6355 }
6356 
napi_watchdog(struct hrtimer * timer)6357 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6358 {
6359 	struct napi_struct *napi;
6360 
6361 	napi = container_of(timer, struct napi_struct, timer);
6362 
6363 	/* Note : we use a relaxed variant of napi_schedule_prep() not setting
6364 	 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6365 	 */
6366 	if (!napi_disable_pending(napi) &&
6367 	    !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
6368 		clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6369 		__napi_schedule_irqoff(napi);
6370 	}
6371 
6372 	return HRTIMER_NORESTART;
6373 }
6374 
init_gro_hash(struct napi_struct * napi)6375 static void init_gro_hash(struct napi_struct *napi)
6376 {
6377 	int i;
6378 
6379 	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6380 		INIT_LIST_HEAD(&napi->gro_hash[i].list);
6381 		napi->gro_hash[i].count = 0;
6382 	}
6383 	napi->gro_bitmask = 0;
6384 }
6385 
dev_set_threaded(struct net_device * dev,bool threaded)6386 int dev_set_threaded(struct net_device *dev, bool threaded)
6387 {
6388 	struct napi_struct *napi;
6389 	int err = 0;
6390 
6391 	if (dev->threaded == threaded)
6392 		return 0;
6393 
6394 	if (threaded) {
6395 		list_for_each_entry(napi, &dev->napi_list, dev_list) {
6396 			if (!napi->thread) {
6397 				err = napi_kthread_create(napi);
6398 				if (err) {
6399 					threaded = false;
6400 					break;
6401 				}
6402 			}
6403 		}
6404 	}
6405 
6406 	dev->threaded = threaded;
6407 
6408 	/* Make sure kthread is created before THREADED bit
6409 	 * is set.
6410 	 */
6411 	smp_mb__before_atomic();
6412 
6413 	/* Setting/unsetting threaded mode on a napi might not immediately
6414 	 * take effect, if the current napi instance is actively being
6415 	 * polled. In this case, the switch between threaded mode and
6416 	 * softirq mode will happen in the next round of napi_schedule().
6417 	 * This should not cause hiccups/stalls to the live traffic.
6418 	 */
6419 	list_for_each_entry(napi, &dev->napi_list, dev_list)
6420 		assign_bit(NAPI_STATE_THREADED, &napi->state, threaded);
6421 
6422 	return err;
6423 }
6424 EXPORT_SYMBOL(dev_set_threaded);
6425 
netif_napi_add_weight(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int weight)6426 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
6427 			   int (*poll)(struct napi_struct *, int), int weight)
6428 {
6429 	if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
6430 		return;
6431 
6432 	INIT_LIST_HEAD(&napi->poll_list);
6433 	INIT_HLIST_NODE(&napi->napi_hash_node);
6434 	hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6435 	napi->timer.function = napi_watchdog;
6436 	init_gro_hash(napi);
6437 	napi->skb = NULL;
6438 	INIT_LIST_HEAD(&napi->rx_list);
6439 	napi->rx_count = 0;
6440 	napi->poll = poll;
6441 	if (weight > NAPI_POLL_WEIGHT)
6442 		netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6443 				weight);
6444 	napi->weight = weight;
6445 	napi->dev = dev;
6446 #ifdef CONFIG_NETPOLL
6447 	napi->poll_owner = -1;
6448 #endif
6449 	napi->list_owner = -1;
6450 	set_bit(NAPI_STATE_SCHED, &napi->state);
6451 	set_bit(NAPI_STATE_NPSVC, &napi->state);
6452 	list_add_rcu(&napi->dev_list, &dev->napi_list);
6453 	napi_hash_add(napi);
6454 	napi_get_frags_check(napi);
6455 	/* Create kthread for this napi if dev->threaded is set.
6456 	 * Clear dev->threaded if kthread creation failed so that
6457 	 * threaded mode will not be enabled in napi_enable().
6458 	 */
6459 	if (dev->threaded && napi_kthread_create(napi))
6460 		dev->threaded = 0;
6461 }
6462 EXPORT_SYMBOL(netif_napi_add_weight);
6463 
napi_disable(struct napi_struct * n)6464 void napi_disable(struct napi_struct *n)
6465 {
6466 	unsigned long val, new;
6467 
6468 	might_sleep();
6469 	set_bit(NAPI_STATE_DISABLE, &n->state);
6470 
6471 	val = READ_ONCE(n->state);
6472 	do {
6473 		while (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
6474 			usleep_range(20, 200);
6475 			val = READ_ONCE(n->state);
6476 		}
6477 
6478 		new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
6479 		new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
6480 	} while (!try_cmpxchg(&n->state, &val, new));
6481 
6482 	hrtimer_cancel(&n->timer);
6483 
6484 	clear_bit(NAPI_STATE_DISABLE, &n->state);
6485 }
6486 EXPORT_SYMBOL(napi_disable);
6487 
6488 /**
6489  *	napi_enable - enable NAPI scheduling
6490  *	@n: NAPI context
6491  *
6492  * Resume NAPI from being scheduled on this context.
6493  * Must be paired with napi_disable.
6494  */
napi_enable(struct napi_struct * n)6495 void napi_enable(struct napi_struct *n)
6496 {
6497 	unsigned long new, val = READ_ONCE(n->state);
6498 
6499 	do {
6500 		BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
6501 
6502 		new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
6503 		if (n->dev->threaded && n->thread)
6504 			new |= NAPIF_STATE_THREADED;
6505 	} while (!try_cmpxchg(&n->state, &val, new));
6506 }
6507 EXPORT_SYMBOL(napi_enable);
6508 
flush_gro_hash(struct napi_struct * napi)6509 static void flush_gro_hash(struct napi_struct *napi)
6510 {
6511 	int i;
6512 
6513 	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6514 		struct sk_buff *skb, *n;
6515 
6516 		list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6517 			kfree_skb(skb);
6518 		napi->gro_hash[i].count = 0;
6519 	}
6520 }
6521 
6522 /* Must be called in process context */
__netif_napi_del(struct napi_struct * napi)6523 void __netif_napi_del(struct napi_struct *napi)
6524 {
6525 	if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
6526 		return;
6527 
6528 	napi_hash_del(napi);
6529 	list_del_rcu(&napi->dev_list);
6530 	napi_free_frags(napi);
6531 
6532 	flush_gro_hash(napi);
6533 	napi->gro_bitmask = 0;
6534 
6535 	if (napi->thread) {
6536 		kthread_stop(napi->thread);
6537 		napi->thread = NULL;
6538 	}
6539 }
6540 EXPORT_SYMBOL(__netif_napi_del);
6541 
__napi_poll(struct napi_struct * n,bool * repoll)6542 static int __napi_poll(struct napi_struct *n, bool *repoll)
6543 {
6544 	int work, weight;
6545 
6546 	weight = n->weight;
6547 
6548 	/* This NAPI_STATE_SCHED test is for avoiding a race
6549 	 * with netpoll's poll_napi().  Only the entity which
6550 	 * obtains the lock and sees NAPI_STATE_SCHED set will
6551 	 * actually make the ->poll() call.  Therefore we avoid
6552 	 * accidentally calling ->poll() when NAPI is not scheduled.
6553 	 */
6554 	work = 0;
6555 	if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6556 		work = n->poll(n, weight);
6557 		trace_napi_poll(n, work, weight);
6558 	}
6559 
6560 	if (unlikely(work > weight))
6561 		netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6562 				n->poll, work, weight);
6563 
6564 	if (likely(work < weight))
6565 		return work;
6566 
6567 	/* Drivers must not modify the NAPI state if they
6568 	 * consume the entire weight.  In such cases this code
6569 	 * still "owns" the NAPI instance and therefore can
6570 	 * move the instance around on the list at-will.
6571 	 */
6572 	if (unlikely(napi_disable_pending(n))) {
6573 		napi_complete(n);
6574 		return work;
6575 	}
6576 
6577 	/* The NAPI context has more processing work, but busy-polling
6578 	 * is preferred. Exit early.
6579 	 */
6580 	if (napi_prefer_busy_poll(n)) {
6581 		if (napi_complete_done(n, work)) {
6582 			/* If timeout is not set, we need to make sure
6583 			 * that the NAPI is re-scheduled.
6584 			 */
6585 			napi_schedule(n);
6586 		}
6587 		return work;
6588 	}
6589 
6590 	if (n->gro_bitmask) {
6591 		/* flush too old packets
6592 		 * If HZ < 1000, flush all packets.
6593 		 */
6594 		napi_gro_flush(n, HZ >= 1000);
6595 	}
6596 
6597 	gro_normal_list(n);
6598 
6599 	/* Some drivers may have called napi_schedule
6600 	 * prior to exhausting their budget.
6601 	 */
6602 	if (unlikely(!list_empty(&n->poll_list))) {
6603 		pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6604 			     n->dev ? n->dev->name : "backlog");
6605 		return work;
6606 	}
6607 
6608 	*repoll = true;
6609 
6610 	return work;
6611 }
6612 
napi_poll(struct napi_struct * n,struct list_head * repoll)6613 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6614 {
6615 	bool do_repoll = false;
6616 	void *have;
6617 	int work;
6618 
6619 	list_del_init(&n->poll_list);
6620 
6621 	have = netpoll_poll_lock(n);
6622 
6623 	work = __napi_poll(n, &do_repoll);
6624 
6625 	if (do_repoll)
6626 		list_add_tail(&n->poll_list, repoll);
6627 
6628 	netpoll_poll_unlock(have);
6629 
6630 	return work;
6631 }
6632 
napi_thread_wait(struct napi_struct * napi)6633 static int napi_thread_wait(struct napi_struct *napi)
6634 {
6635 	bool woken = false;
6636 
6637 	set_current_state(TASK_INTERRUPTIBLE);
6638 
6639 	while (!kthread_should_stop()) {
6640 		/* Testing SCHED_THREADED bit here to make sure the current
6641 		 * kthread owns this napi and could poll on this napi.
6642 		 * Testing SCHED bit is not enough because SCHED bit might be
6643 		 * set by some other busy poll thread or by napi_disable().
6644 		 */
6645 		if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
6646 			WARN_ON(!list_empty(&napi->poll_list));
6647 			__set_current_state(TASK_RUNNING);
6648 			return 0;
6649 		}
6650 
6651 		schedule();
6652 		/* woken being true indicates this thread owns this napi. */
6653 		woken = true;
6654 		set_current_state(TASK_INTERRUPTIBLE);
6655 	}
6656 	__set_current_state(TASK_RUNNING);
6657 
6658 	return -1;
6659 }
6660 
skb_defer_free_flush(struct softnet_data * sd)6661 static void skb_defer_free_flush(struct softnet_data *sd)
6662 {
6663 	struct sk_buff *skb, *next;
6664 
6665 	/* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
6666 	if (!READ_ONCE(sd->defer_list))
6667 		return;
6668 
6669 	spin_lock(&sd->defer_lock);
6670 	skb = sd->defer_list;
6671 	sd->defer_list = NULL;
6672 	sd->defer_count = 0;
6673 	spin_unlock(&sd->defer_lock);
6674 
6675 	while (skb != NULL) {
6676 		next = skb->next;
6677 		napi_consume_skb(skb, 1);
6678 		skb = next;
6679 	}
6680 }
6681 
napi_threaded_poll(void * data)6682 static int napi_threaded_poll(void *data)
6683 {
6684 	struct napi_struct *napi = data;
6685 	struct softnet_data *sd;
6686 	void *have;
6687 
6688 	while (!napi_thread_wait(napi)) {
6689 		for (;;) {
6690 			bool repoll = false;
6691 
6692 			local_bh_disable();
6693 			sd = this_cpu_ptr(&softnet_data);
6694 			sd->in_napi_threaded_poll = true;
6695 
6696 			have = netpoll_poll_lock(napi);
6697 			__napi_poll(napi, &repoll);
6698 			netpoll_poll_unlock(have);
6699 
6700 			sd->in_napi_threaded_poll = false;
6701 			barrier();
6702 
6703 			if (sd_has_rps_ipi_waiting(sd)) {
6704 				local_irq_disable();
6705 				net_rps_action_and_irq_enable(sd);
6706 			}
6707 			skb_defer_free_flush(sd);
6708 			local_bh_enable();
6709 
6710 			if (!repoll)
6711 				break;
6712 
6713 			cond_resched();
6714 		}
6715 	}
6716 	return 0;
6717 }
6718 
net_rx_action(struct softirq_action * h)6719 static __latent_entropy void net_rx_action(struct softirq_action *h)
6720 {
6721 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6722 	unsigned long time_limit = jiffies +
6723 		usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
6724 	int budget = READ_ONCE(netdev_budget);
6725 	LIST_HEAD(list);
6726 	LIST_HEAD(repoll);
6727 
6728 start:
6729 	sd->in_net_rx_action = true;
6730 	local_irq_disable();
6731 	list_splice_init(&sd->poll_list, &list);
6732 	local_irq_enable();
6733 
6734 	for (;;) {
6735 		struct napi_struct *n;
6736 
6737 		skb_defer_free_flush(sd);
6738 
6739 		if (list_empty(&list)) {
6740 			if (list_empty(&repoll)) {
6741 				sd->in_net_rx_action = false;
6742 				barrier();
6743 				/* We need to check if ____napi_schedule()
6744 				 * had refilled poll_list while
6745 				 * sd->in_net_rx_action was true.
6746 				 */
6747 				if (!list_empty(&sd->poll_list))
6748 					goto start;
6749 				if (!sd_has_rps_ipi_waiting(sd))
6750 					goto end;
6751 			}
6752 			break;
6753 		}
6754 
6755 		n = list_first_entry(&list, struct napi_struct, poll_list);
6756 		budget -= napi_poll(n, &repoll);
6757 
6758 		/* If softirq window is exhausted then punt.
6759 		 * Allow this to run for 2 jiffies since which will allow
6760 		 * an average latency of 1.5/HZ.
6761 		 */
6762 		if (unlikely(budget <= 0 ||
6763 			     time_after_eq(jiffies, time_limit))) {
6764 			sd->time_squeeze++;
6765 			break;
6766 		}
6767 	}
6768 
6769 	local_irq_disable();
6770 
6771 	list_splice_tail_init(&sd->poll_list, &list);
6772 	list_splice_tail(&repoll, &list);
6773 	list_splice(&list, &sd->poll_list);
6774 	if (!list_empty(&sd->poll_list))
6775 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
6776 	else
6777 		sd->in_net_rx_action = false;
6778 
6779 	net_rps_action_and_irq_enable(sd);
6780 end:;
6781 }
6782 
6783 struct netdev_adjacent {
6784 	struct net_device *dev;
6785 	netdevice_tracker dev_tracker;
6786 
6787 	/* upper master flag, there can only be one master device per list */
6788 	bool master;
6789 
6790 	/* lookup ignore flag */
6791 	bool ignore;
6792 
6793 	/* counter for the number of times this device was added to us */
6794 	u16 ref_nr;
6795 
6796 	/* private field for the users */
6797 	void *private;
6798 
6799 	struct list_head list;
6800 	struct rcu_head rcu;
6801 };
6802 
__netdev_find_adj(struct net_device * adj_dev,struct list_head * adj_list)6803 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6804 						 struct list_head *adj_list)
6805 {
6806 	struct netdev_adjacent *adj;
6807 
6808 	list_for_each_entry(adj, adj_list, list) {
6809 		if (adj->dev == adj_dev)
6810 			return adj;
6811 	}
6812 	return NULL;
6813 }
6814 
____netdev_has_upper_dev(struct net_device * upper_dev,struct netdev_nested_priv * priv)6815 static int ____netdev_has_upper_dev(struct net_device *upper_dev,
6816 				    struct netdev_nested_priv *priv)
6817 {
6818 	struct net_device *dev = (struct net_device *)priv->data;
6819 
6820 	return upper_dev == dev;
6821 }
6822 
6823 /**
6824  * netdev_has_upper_dev - Check if device is linked to an upper device
6825  * @dev: device
6826  * @upper_dev: upper device to check
6827  *
6828  * Find out if a device is linked to specified upper device and return true
6829  * in case it is. Note that this checks only immediate upper device,
6830  * not through a complete stack of devices. The caller must hold the RTNL lock.
6831  */
netdev_has_upper_dev(struct net_device * dev,struct net_device * upper_dev)6832 bool netdev_has_upper_dev(struct net_device *dev,
6833 			  struct net_device *upper_dev)
6834 {
6835 	struct netdev_nested_priv priv = {
6836 		.data = (void *)upper_dev,
6837 	};
6838 
6839 	ASSERT_RTNL();
6840 
6841 	return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6842 					     &priv);
6843 }
6844 EXPORT_SYMBOL(netdev_has_upper_dev);
6845 
6846 /**
6847  * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
6848  * @dev: device
6849  * @upper_dev: upper device to check
6850  *
6851  * Find out if a device is linked to specified upper device and return true
6852  * in case it is. Note that this checks the entire upper device chain.
6853  * The caller must hold rcu lock.
6854  */
6855 
netdev_has_upper_dev_all_rcu(struct net_device * dev,struct net_device * upper_dev)6856 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6857 				  struct net_device *upper_dev)
6858 {
6859 	struct netdev_nested_priv priv = {
6860 		.data = (void *)upper_dev,
6861 	};
6862 
6863 	return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6864 					       &priv);
6865 }
6866 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6867 
6868 /**
6869  * netdev_has_any_upper_dev - Check if device is linked to some device
6870  * @dev: device
6871  *
6872  * Find out if a device is linked to an upper device and return true in case
6873  * it is. The caller must hold the RTNL lock.
6874  */
netdev_has_any_upper_dev(struct net_device * dev)6875 bool netdev_has_any_upper_dev(struct net_device *dev)
6876 {
6877 	ASSERT_RTNL();
6878 
6879 	return !list_empty(&dev->adj_list.upper);
6880 }
6881 EXPORT_SYMBOL(netdev_has_any_upper_dev);
6882 
6883 /**
6884  * netdev_master_upper_dev_get - Get master upper device
6885  * @dev: device
6886  *
6887  * Find a master upper device and return pointer to it or NULL in case
6888  * it's not there. The caller must hold the RTNL lock.
6889  */
netdev_master_upper_dev_get(struct net_device * dev)6890 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6891 {
6892 	struct netdev_adjacent *upper;
6893 
6894 	ASSERT_RTNL();
6895 
6896 	if (list_empty(&dev->adj_list.upper))
6897 		return NULL;
6898 
6899 	upper = list_first_entry(&dev->adj_list.upper,
6900 				 struct netdev_adjacent, list);
6901 	if (likely(upper->master))
6902 		return upper->dev;
6903 	return NULL;
6904 }
6905 EXPORT_SYMBOL(netdev_master_upper_dev_get);
6906 
__netdev_master_upper_dev_get(struct net_device * dev)6907 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
6908 {
6909 	struct netdev_adjacent *upper;
6910 
6911 	ASSERT_RTNL();
6912 
6913 	if (list_empty(&dev->adj_list.upper))
6914 		return NULL;
6915 
6916 	upper = list_first_entry(&dev->adj_list.upper,
6917 				 struct netdev_adjacent, list);
6918 	if (likely(upper->master) && !upper->ignore)
6919 		return upper->dev;
6920 	return NULL;
6921 }
6922 
6923 /**
6924  * netdev_has_any_lower_dev - Check if device is linked to some device
6925  * @dev: device
6926  *
6927  * Find out if a device is linked to a lower device and return true in case
6928  * it is. The caller must hold the RTNL lock.
6929  */
netdev_has_any_lower_dev(struct net_device * dev)6930 static bool netdev_has_any_lower_dev(struct net_device *dev)
6931 {
6932 	ASSERT_RTNL();
6933 
6934 	return !list_empty(&dev->adj_list.lower);
6935 }
6936 
netdev_adjacent_get_private(struct list_head * adj_list)6937 void *netdev_adjacent_get_private(struct list_head *adj_list)
6938 {
6939 	struct netdev_adjacent *adj;
6940 
6941 	adj = list_entry(adj_list, struct netdev_adjacent, list);
6942 
6943 	return adj->private;
6944 }
6945 EXPORT_SYMBOL(netdev_adjacent_get_private);
6946 
6947 /**
6948  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6949  * @dev: device
6950  * @iter: list_head ** of the current position
6951  *
6952  * Gets the next device from the dev's upper list, starting from iter
6953  * position. The caller must hold RCU read lock.
6954  */
netdev_upper_get_next_dev_rcu(struct net_device * dev,struct list_head ** iter)6955 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6956 						 struct list_head **iter)
6957 {
6958 	struct netdev_adjacent *upper;
6959 
6960 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6961 
6962 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6963 
6964 	if (&upper->list == &dev->adj_list.upper)
6965 		return NULL;
6966 
6967 	*iter = &upper->list;
6968 
6969 	return upper->dev;
6970 }
6971 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6972 
__netdev_next_upper_dev(struct net_device * dev,struct list_head ** iter,bool * ignore)6973 static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
6974 						  struct list_head **iter,
6975 						  bool *ignore)
6976 {
6977 	struct netdev_adjacent *upper;
6978 
6979 	upper = list_entry((*iter)->next, struct netdev_adjacent, list);
6980 
6981 	if (&upper->list == &dev->adj_list.upper)
6982 		return NULL;
6983 
6984 	*iter = &upper->list;
6985 	*ignore = upper->ignore;
6986 
6987 	return upper->dev;
6988 }
6989 
netdev_next_upper_dev_rcu(struct net_device * dev,struct list_head ** iter)6990 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6991 						    struct list_head **iter)
6992 {
6993 	struct netdev_adjacent *upper;
6994 
6995 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6996 
6997 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6998 
6999 	if (&upper->list == &dev->adj_list.upper)
7000 		return NULL;
7001 
7002 	*iter = &upper->list;
7003 
7004 	return upper->dev;
7005 }
7006 
__netdev_walk_all_upper_dev(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7007 static int __netdev_walk_all_upper_dev(struct net_device *dev,
7008 				       int (*fn)(struct net_device *dev,
7009 					 struct netdev_nested_priv *priv),
7010 				       struct netdev_nested_priv *priv)
7011 {
7012 	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7013 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7014 	int ret, cur = 0;
7015 	bool ignore;
7016 
7017 	now = dev;
7018 	iter = &dev->adj_list.upper;
7019 
7020 	while (1) {
7021 		if (now != dev) {
7022 			ret = fn(now, priv);
7023 			if (ret)
7024 				return ret;
7025 		}
7026 
7027 		next = NULL;
7028 		while (1) {
7029 			udev = __netdev_next_upper_dev(now, &iter, &ignore);
7030 			if (!udev)
7031 				break;
7032 			if (ignore)
7033 				continue;
7034 
7035 			next = udev;
7036 			niter = &udev->adj_list.upper;
7037 			dev_stack[cur] = now;
7038 			iter_stack[cur++] = iter;
7039 			break;
7040 		}
7041 
7042 		if (!next) {
7043 			if (!cur)
7044 				return 0;
7045 			next = dev_stack[--cur];
7046 			niter = iter_stack[cur];
7047 		}
7048 
7049 		now = next;
7050 		iter = niter;
7051 	}
7052 
7053 	return 0;
7054 }
7055 
netdev_walk_all_upper_dev_rcu(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7056 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
7057 				  int (*fn)(struct net_device *dev,
7058 					    struct netdev_nested_priv *priv),
7059 				  struct netdev_nested_priv *priv)
7060 {
7061 	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7062 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7063 	int ret, cur = 0;
7064 
7065 	now = dev;
7066 	iter = &dev->adj_list.upper;
7067 
7068 	while (1) {
7069 		if (now != dev) {
7070 			ret = fn(now, priv);
7071 			if (ret)
7072 				return ret;
7073 		}
7074 
7075 		next = NULL;
7076 		while (1) {
7077 			udev = netdev_next_upper_dev_rcu(now, &iter);
7078 			if (!udev)
7079 				break;
7080 
7081 			next = udev;
7082 			niter = &udev->adj_list.upper;
7083 			dev_stack[cur] = now;
7084 			iter_stack[cur++] = iter;
7085 			break;
7086 		}
7087 
7088 		if (!next) {
7089 			if (!cur)
7090 				return 0;
7091 			next = dev_stack[--cur];
7092 			niter = iter_stack[cur];
7093 		}
7094 
7095 		now = next;
7096 		iter = niter;
7097 	}
7098 
7099 	return 0;
7100 }
7101 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
7102 
__netdev_has_upper_dev(struct net_device * dev,struct net_device * upper_dev)7103 static bool __netdev_has_upper_dev(struct net_device *dev,
7104 				   struct net_device *upper_dev)
7105 {
7106 	struct netdev_nested_priv priv = {
7107 		.flags = 0,
7108 		.data = (void *)upper_dev,
7109 	};
7110 
7111 	ASSERT_RTNL();
7112 
7113 	return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
7114 					   &priv);
7115 }
7116 
7117 /**
7118  * netdev_lower_get_next_private - Get the next ->private from the
7119  *				   lower neighbour list
7120  * @dev: device
7121  * @iter: list_head ** of the current position
7122  *
7123  * Gets the next netdev_adjacent->private from the dev's lower neighbour
7124  * list, starting from iter position. The caller must hold either hold the
7125  * RTNL lock or its own locking that guarantees that the neighbour lower
7126  * list will remain unchanged.
7127  */
netdev_lower_get_next_private(struct net_device * dev,struct list_head ** iter)7128 void *netdev_lower_get_next_private(struct net_device *dev,
7129 				    struct list_head **iter)
7130 {
7131 	struct netdev_adjacent *lower;
7132 
7133 	lower = list_entry(*iter, struct netdev_adjacent, list);
7134 
7135 	if (&lower->list == &dev->adj_list.lower)
7136 		return NULL;
7137 
7138 	*iter = lower->list.next;
7139 
7140 	return lower->private;
7141 }
7142 EXPORT_SYMBOL(netdev_lower_get_next_private);
7143 
7144 /**
7145  * netdev_lower_get_next_private_rcu - Get the next ->private from the
7146  *				       lower neighbour list, RCU
7147  *				       variant
7148  * @dev: device
7149  * @iter: list_head ** of the current position
7150  *
7151  * Gets the next netdev_adjacent->private from the dev's lower neighbour
7152  * list, starting from iter position. The caller must hold RCU read lock.
7153  */
netdev_lower_get_next_private_rcu(struct net_device * dev,struct list_head ** iter)7154 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7155 					struct list_head **iter)
7156 {
7157 	struct netdev_adjacent *lower;
7158 
7159 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
7160 
7161 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7162 
7163 	if (&lower->list == &dev->adj_list.lower)
7164 		return NULL;
7165 
7166 	*iter = &lower->list;
7167 
7168 	return lower->private;
7169 }
7170 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7171 
7172 /**
7173  * netdev_lower_get_next - Get the next device from the lower neighbour
7174  *                         list
7175  * @dev: device
7176  * @iter: list_head ** of the current position
7177  *
7178  * Gets the next netdev_adjacent from the dev's lower neighbour
7179  * list, starting from iter position. The caller must hold RTNL lock or
7180  * its own locking that guarantees that the neighbour lower
7181  * list will remain unchanged.
7182  */
netdev_lower_get_next(struct net_device * dev,struct list_head ** iter)7183 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7184 {
7185 	struct netdev_adjacent *lower;
7186 
7187 	lower = list_entry(*iter, struct netdev_adjacent, list);
7188 
7189 	if (&lower->list == &dev->adj_list.lower)
7190 		return NULL;
7191 
7192 	*iter = lower->list.next;
7193 
7194 	return lower->dev;
7195 }
7196 EXPORT_SYMBOL(netdev_lower_get_next);
7197 
netdev_next_lower_dev(struct net_device * dev,struct list_head ** iter)7198 static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7199 						struct list_head **iter)
7200 {
7201 	struct netdev_adjacent *lower;
7202 
7203 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7204 
7205 	if (&lower->list == &dev->adj_list.lower)
7206 		return NULL;
7207 
7208 	*iter = &lower->list;
7209 
7210 	return lower->dev;
7211 }
7212 
__netdev_next_lower_dev(struct net_device * dev,struct list_head ** iter,bool * ignore)7213 static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7214 						  struct list_head **iter,
7215 						  bool *ignore)
7216 {
7217 	struct netdev_adjacent *lower;
7218 
7219 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7220 
7221 	if (&lower->list == &dev->adj_list.lower)
7222 		return NULL;
7223 
7224 	*iter = &lower->list;
7225 	*ignore = lower->ignore;
7226 
7227 	return lower->dev;
7228 }
7229 
netdev_walk_all_lower_dev(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7230 int netdev_walk_all_lower_dev(struct net_device *dev,
7231 			      int (*fn)(struct net_device *dev,
7232 					struct netdev_nested_priv *priv),
7233 			      struct netdev_nested_priv *priv)
7234 {
7235 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7236 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7237 	int ret, cur = 0;
7238 
7239 	now = dev;
7240 	iter = &dev->adj_list.lower;
7241 
7242 	while (1) {
7243 		if (now != dev) {
7244 			ret = fn(now, priv);
7245 			if (ret)
7246 				return ret;
7247 		}
7248 
7249 		next = NULL;
7250 		while (1) {
7251 			ldev = netdev_next_lower_dev(now, &iter);
7252 			if (!ldev)
7253 				break;
7254 
7255 			next = ldev;
7256 			niter = &ldev->adj_list.lower;
7257 			dev_stack[cur] = now;
7258 			iter_stack[cur++] = iter;
7259 			break;
7260 		}
7261 
7262 		if (!next) {
7263 			if (!cur)
7264 				return 0;
7265 			next = dev_stack[--cur];
7266 			niter = iter_stack[cur];
7267 		}
7268 
7269 		now = next;
7270 		iter = niter;
7271 	}
7272 
7273 	return 0;
7274 }
7275 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7276 
__netdev_walk_all_lower_dev(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7277 static int __netdev_walk_all_lower_dev(struct net_device *dev,
7278 				       int (*fn)(struct net_device *dev,
7279 					 struct netdev_nested_priv *priv),
7280 				       struct netdev_nested_priv *priv)
7281 {
7282 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7283 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7284 	int ret, cur = 0;
7285 	bool ignore;
7286 
7287 	now = dev;
7288 	iter = &dev->adj_list.lower;
7289 
7290 	while (1) {
7291 		if (now != dev) {
7292 			ret = fn(now, priv);
7293 			if (ret)
7294 				return ret;
7295 		}
7296 
7297 		next = NULL;
7298 		while (1) {
7299 			ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7300 			if (!ldev)
7301 				break;
7302 			if (ignore)
7303 				continue;
7304 
7305 			next = ldev;
7306 			niter = &ldev->adj_list.lower;
7307 			dev_stack[cur] = now;
7308 			iter_stack[cur++] = iter;
7309 			break;
7310 		}
7311 
7312 		if (!next) {
7313 			if (!cur)
7314 				return 0;
7315 			next = dev_stack[--cur];
7316 			niter = iter_stack[cur];
7317 		}
7318 
7319 		now = next;
7320 		iter = niter;
7321 	}
7322 
7323 	return 0;
7324 }
7325 
netdev_next_lower_dev_rcu(struct net_device * dev,struct list_head ** iter)7326 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7327 					     struct list_head **iter)
7328 {
7329 	struct netdev_adjacent *lower;
7330 
7331 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7332 	if (&lower->list == &dev->adj_list.lower)
7333 		return NULL;
7334 
7335 	*iter = &lower->list;
7336 
7337 	return lower->dev;
7338 }
7339 EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
7340 
__netdev_upper_depth(struct net_device * dev)7341 static u8 __netdev_upper_depth(struct net_device *dev)
7342 {
7343 	struct net_device *udev;
7344 	struct list_head *iter;
7345 	u8 max_depth = 0;
7346 	bool ignore;
7347 
7348 	for (iter = &dev->adj_list.upper,
7349 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore);
7350 	     udev;
7351 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7352 		if (ignore)
7353 			continue;
7354 		if (max_depth < udev->upper_level)
7355 			max_depth = udev->upper_level;
7356 	}
7357 
7358 	return max_depth;
7359 }
7360 
__netdev_lower_depth(struct net_device * dev)7361 static u8 __netdev_lower_depth(struct net_device *dev)
7362 {
7363 	struct net_device *ldev;
7364 	struct list_head *iter;
7365 	u8 max_depth = 0;
7366 	bool ignore;
7367 
7368 	for (iter = &dev->adj_list.lower,
7369 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
7370 	     ldev;
7371 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7372 		if (ignore)
7373 			continue;
7374 		if (max_depth < ldev->lower_level)
7375 			max_depth = ldev->lower_level;
7376 	}
7377 
7378 	return max_depth;
7379 }
7380 
__netdev_update_upper_level(struct net_device * dev,struct netdev_nested_priv * __unused)7381 static int __netdev_update_upper_level(struct net_device *dev,
7382 				       struct netdev_nested_priv *__unused)
7383 {
7384 	dev->upper_level = __netdev_upper_depth(dev) + 1;
7385 	return 0;
7386 }
7387 
7388 #ifdef CONFIG_LOCKDEP
7389 static LIST_HEAD(net_unlink_list);
7390 
net_unlink_todo(struct net_device * dev)7391 static void net_unlink_todo(struct net_device *dev)
7392 {
7393 	if (list_empty(&dev->unlink_list))
7394 		list_add_tail(&dev->unlink_list, &net_unlink_list);
7395 }
7396 #endif
7397 
__netdev_update_lower_level(struct net_device * dev,struct netdev_nested_priv * priv)7398 static int __netdev_update_lower_level(struct net_device *dev,
7399 				       struct netdev_nested_priv *priv)
7400 {
7401 	dev->lower_level = __netdev_lower_depth(dev) + 1;
7402 
7403 #ifdef CONFIG_LOCKDEP
7404 	if (!priv)
7405 		return 0;
7406 
7407 	if (priv->flags & NESTED_SYNC_IMM)
7408 		dev->nested_level = dev->lower_level - 1;
7409 	if (priv->flags & NESTED_SYNC_TODO)
7410 		net_unlink_todo(dev);
7411 #endif
7412 	return 0;
7413 }
7414 
netdev_walk_all_lower_dev_rcu(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7415 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7416 				  int (*fn)(struct net_device *dev,
7417 					    struct netdev_nested_priv *priv),
7418 				  struct netdev_nested_priv *priv)
7419 {
7420 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7421 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7422 	int ret, cur = 0;
7423 
7424 	now = dev;
7425 	iter = &dev->adj_list.lower;
7426 
7427 	while (1) {
7428 		if (now != dev) {
7429 			ret = fn(now, priv);
7430 			if (ret)
7431 				return ret;
7432 		}
7433 
7434 		next = NULL;
7435 		while (1) {
7436 			ldev = netdev_next_lower_dev_rcu(now, &iter);
7437 			if (!ldev)
7438 				break;
7439 
7440 			next = ldev;
7441 			niter = &ldev->adj_list.lower;
7442 			dev_stack[cur] = now;
7443 			iter_stack[cur++] = iter;
7444 			break;
7445 		}
7446 
7447 		if (!next) {
7448 			if (!cur)
7449 				return 0;
7450 			next = dev_stack[--cur];
7451 			niter = iter_stack[cur];
7452 		}
7453 
7454 		now = next;
7455 		iter = niter;
7456 	}
7457 
7458 	return 0;
7459 }
7460 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7461 
7462 /**
7463  * netdev_lower_get_first_private_rcu - Get the first ->private from the
7464  *				       lower neighbour list, RCU
7465  *				       variant
7466  * @dev: device
7467  *
7468  * Gets the first netdev_adjacent->private from the dev's lower neighbour
7469  * list. The caller must hold RCU read lock.
7470  */
netdev_lower_get_first_private_rcu(struct net_device * dev)7471 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7472 {
7473 	struct netdev_adjacent *lower;
7474 
7475 	lower = list_first_or_null_rcu(&dev->adj_list.lower,
7476 			struct netdev_adjacent, list);
7477 	if (lower)
7478 		return lower->private;
7479 	return NULL;
7480 }
7481 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7482 
7483 /**
7484  * netdev_master_upper_dev_get_rcu - Get master upper device
7485  * @dev: device
7486  *
7487  * Find a master upper device and return pointer to it or NULL in case
7488  * it's not there. The caller must hold the RCU read lock.
7489  */
netdev_master_upper_dev_get_rcu(struct net_device * dev)7490 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7491 {
7492 	struct netdev_adjacent *upper;
7493 
7494 	upper = list_first_or_null_rcu(&dev->adj_list.upper,
7495 				       struct netdev_adjacent, list);
7496 	if (upper && likely(upper->master))
7497 		return upper->dev;
7498 	return NULL;
7499 }
7500 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7501 
netdev_adjacent_sysfs_add(struct net_device * dev,struct net_device * adj_dev,struct list_head * dev_list)7502 static int netdev_adjacent_sysfs_add(struct net_device *dev,
7503 			      struct net_device *adj_dev,
7504 			      struct list_head *dev_list)
7505 {
7506 	char linkname[IFNAMSIZ+7];
7507 
7508 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
7509 		"upper_%s" : "lower_%s", adj_dev->name);
7510 	return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7511 				 linkname);
7512 }
netdev_adjacent_sysfs_del(struct net_device * dev,char * name,struct list_head * dev_list)7513 static void netdev_adjacent_sysfs_del(struct net_device *dev,
7514 			       char *name,
7515 			       struct list_head *dev_list)
7516 {
7517 	char linkname[IFNAMSIZ+7];
7518 
7519 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
7520 		"upper_%s" : "lower_%s", name);
7521 	sysfs_remove_link(&(dev->dev.kobj), linkname);
7522 }
7523 
netdev_adjacent_is_neigh_list(struct net_device * dev,struct net_device * adj_dev,struct list_head * dev_list)7524 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7525 						 struct net_device *adj_dev,
7526 						 struct list_head *dev_list)
7527 {
7528 	return (dev_list == &dev->adj_list.upper ||
7529 		dev_list == &dev->adj_list.lower) &&
7530 		net_eq(dev_net(dev), dev_net(adj_dev));
7531 }
7532 
__netdev_adjacent_dev_insert(struct net_device * dev,struct net_device * adj_dev,struct list_head * dev_list,void * private,bool master)7533 static int __netdev_adjacent_dev_insert(struct net_device *dev,
7534 					struct net_device *adj_dev,
7535 					struct list_head *dev_list,
7536 					void *private, bool master)
7537 {
7538 	struct netdev_adjacent *adj;
7539 	int ret;
7540 
7541 	adj = __netdev_find_adj(adj_dev, dev_list);
7542 
7543 	if (adj) {
7544 		adj->ref_nr += 1;
7545 		pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7546 			 dev->name, adj_dev->name, adj->ref_nr);
7547 
7548 		return 0;
7549 	}
7550 
7551 	adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7552 	if (!adj)
7553 		return -ENOMEM;
7554 
7555 	adj->dev = adj_dev;
7556 	adj->master = master;
7557 	adj->ref_nr = 1;
7558 	adj->private = private;
7559 	adj->ignore = false;
7560 	netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL);
7561 
7562 	pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7563 		 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7564 
7565 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
7566 		ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
7567 		if (ret)
7568 			goto free_adj;
7569 	}
7570 
7571 	/* Ensure that master link is always the first item in list. */
7572 	if (master) {
7573 		ret = sysfs_create_link(&(dev->dev.kobj),
7574 					&(adj_dev->dev.kobj), "master");
7575 		if (ret)
7576 			goto remove_symlinks;
7577 
7578 		list_add_rcu(&adj->list, dev_list);
7579 	} else {
7580 		list_add_tail_rcu(&adj->list, dev_list);
7581 	}
7582 
7583 	return 0;
7584 
7585 remove_symlinks:
7586 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7587 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7588 free_adj:
7589 	netdev_put(adj_dev, &adj->dev_tracker);
7590 	kfree(adj);
7591 
7592 	return ret;
7593 }
7594 
__netdev_adjacent_dev_remove(struct net_device * dev,struct net_device * adj_dev,u16 ref_nr,struct list_head * dev_list)7595 static void __netdev_adjacent_dev_remove(struct net_device *dev,
7596 					 struct net_device *adj_dev,
7597 					 u16 ref_nr,
7598 					 struct list_head *dev_list)
7599 {
7600 	struct netdev_adjacent *adj;
7601 
7602 	pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7603 		 dev->name, adj_dev->name, ref_nr);
7604 
7605 	adj = __netdev_find_adj(adj_dev, dev_list);
7606 
7607 	if (!adj) {
7608 		pr_err("Adjacency does not exist for device %s from %s\n",
7609 		       dev->name, adj_dev->name);
7610 		WARN_ON(1);
7611 		return;
7612 	}
7613 
7614 	if (adj->ref_nr > ref_nr) {
7615 		pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7616 			 dev->name, adj_dev->name, ref_nr,
7617 			 adj->ref_nr - ref_nr);
7618 		adj->ref_nr -= ref_nr;
7619 		return;
7620 	}
7621 
7622 	if (adj->master)
7623 		sysfs_remove_link(&(dev->dev.kobj), "master");
7624 
7625 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7626 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7627 
7628 	list_del_rcu(&adj->list);
7629 	pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7630 		 adj_dev->name, dev->name, adj_dev->name);
7631 	netdev_put(adj_dev, &adj->dev_tracker);
7632 	kfree_rcu(adj, rcu);
7633 }
7634 
__netdev_adjacent_dev_link_lists(struct net_device * dev,struct net_device * upper_dev,struct list_head * up_list,struct list_head * down_list,void * private,bool master)7635 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7636 					    struct net_device *upper_dev,
7637 					    struct list_head *up_list,
7638 					    struct list_head *down_list,
7639 					    void *private, bool master)
7640 {
7641 	int ret;
7642 
7643 	ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
7644 					   private, master);
7645 	if (ret)
7646 		return ret;
7647 
7648 	ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
7649 					   private, false);
7650 	if (ret) {
7651 		__netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
7652 		return ret;
7653 	}
7654 
7655 	return 0;
7656 }
7657 
__netdev_adjacent_dev_unlink_lists(struct net_device * dev,struct net_device * upper_dev,u16 ref_nr,struct list_head * up_list,struct list_head * down_list)7658 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7659 					       struct net_device *upper_dev,
7660 					       u16 ref_nr,
7661 					       struct list_head *up_list,
7662 					       struct list_head *down_list)
7663 {
7664 	__netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7665 	__netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
7666 }
7667 
__netdev_adjacent_dev_link_neighbour(struct net_device * dev,struct net_device * upper_dev,void * private,bool master)7668 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7669 						struct net_device *upper_dev,
7670 						void *private, bool master)
7671 {
7672 	return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7673 						&dev->adj_list.upper,
7674 						&upper_dev->adj_list.lower,
7675 						private, master);
7676 }
7677 
__netdev_adjacent_dev_unlink_neighbour(struct net_device * dev,struct net_device * upper_dev)7678 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7679 						   struct net_device *upper_dev)
7680 {
7681 	__netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
7682 					   &dev->adj_list.upper,
7683 					   &upper_dev->adj_list.lower);
7684 }
7685 
__netdev_upper_dev_link(struct net_device * dev,struct net_device * upper_dev,bool master,void * upper_priv,void * upper_info,struct netdev_nested_priv * priv,struct netlink_ext_ack * extack)7686 static int __netdev_upper_dev_link(struct net_device *dev,
7687 				   struct net_device *upper_dev, bool master,
7688 				   void *upper_priv, void *upper_info,
7689 				   struct netdev_nested_priv *priv,
7690 				   struct netlink_ext_ack *extack)
7691 {
7692 	struct netdev_notifier_changeupper_info changeupper_info = {
7693 		.info = {
7694 			.dev = dev,
7695 			.extack = extack,
7696 		},
7697 		.upper_dev = upper_dev,
7698 		.master = master,
7699 		.linking = true,
7700 		.upper_info = upper_info,
7701 	};
7702 	struct net_device *master_dev;
7703 	int ret = 0;
7704 
7705 	ASSERT_RTNL();
7706 
7707 	if (dev == upper_dev)
7708 		return -EBUSY;
7709 
7710 	/* To prevent loops, check if dev is not upper device to upper_dev. */
7711 	if (__netdev_has_upper_dev(upper_dev, dev))
7712 		return -EBUSY;
7713 
7714 	if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7715 		return -EMLINK;
7716 
7717 	if (!master) {
7718 		if (__netdev_has_upper_dev(dev, upper_dev))
7719 			return -EEXIST;
7720 	} else {
7721 		master_dev = __netdev_master_upper_dev_get(dev);
7722 		if (master_dev)
7723 			return master_dev == upper_dev ? -EEXIST : -EBUSY;
7724 	}
7725 
7726 	ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7727 					    &changeupper_info.info);
7728 	ret = notifier_to_errno(ret);
7729 	if (ret)
7730 		return ret;
7731 
7732 	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
7733 						   master);
7734 	if (ret)
7735 		return ret;
7736 
7737 	ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7738 					    &changeupper_info.info);
7739 	ret = notifier_to_errno(ret);
7740 	if (ret)
7741 		goto rollback;
7742 
7743 	__netdev_update_upper_level(dev, NULL);
7744 	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7745 
7746 	__netdev_update_lower_level(upper_dev, priv);
7747 	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7748 				    priv);
7749 
7750 	return 0;
7751 
7752 rollback:
7753 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7754 
7755 	return ret;
7756 }
7757 
7758 /**
7759  * netdev_upper_dev_link - Add a link to the upper device
7760  * @dev: device
7761  * @upper_dev: new upper device
7762  * @extack: netlink extended ack
7763  *
7764  * Adds a link to device which is upper to this one. The caller must hold
7765  * the RTNL lock. On a failure a negative errno code is returned.
7766  * On success the reference counts are adjusted and the function
7767  * returns zero.
7768  */
netdev_upper_dev_link(struct net_device * dev,struct net_device * upper_dev,struct netlink_ext_ack * extack)7769 int netdev_upper_dev_link(struct net_device *dev,
7770 			  struct net_device *upper_dev,
7771 			  struct netlink_ext_ack *extack)
7772 {
7773 	struct netdev_nested_priv priv = {
7774 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7775 		.data = NULL,
7776 	};
7777 
7778 	return __netdev_upper_dev_link(dev, upper_dev, false,
7779 				       NULL, NULL, &priv, extack);
7780 }
7781 EXPORT_SYMBOL(netdev_upper_dev_link);
7782 
7783 /**
7784  * netdev_master_upper_dev_link - Add a master link to the upper device
7785  * @dev: device
7786  * @upper_dev: new upper device
7787  * @upper_priv: upper device private
7788  * @upper_info: upper info to be passed down via notifier
7789  * @extack: netlink extended ack
7790  *
7791  * Adds a link to device which is upper to this one. In this case, only
7792  * one master upper device can be linked, although other non-master devices
7793  * might be linked as well. The caller must hold the RTNL lock.
7794  * On a failure a negative errno code is returned. On success the reference
7795  * counts are adjusted and the function returns zero.
7796  */
netdev_master_upper_dev_link(struct net_device * dev,struct net_device * upper_dev,void * upper_priv,void * upper_info,struct netlink_ext_ack * extack)7797 int netdev_master_upper_dev_link(struct net_device *dev,
7798 				 struct net_device *upper_dev,
7799 				 void *upper_priv, void *upper_info,
7800 				 struct netlink_ext_ack *extack)
7801 {
7802 	struct netdev_nested_priv priv = {
7803 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7804 		.data = NULL,
7805 	};
7806 
7807 	return __netdev_upper_dev_link(dev, upper_dev, true,
7808 				       upper_priv, upper_info, &priv, extack);
7809 }
7810 EXPORT_SYMBOL(netdev_master_upper_dev_link);
7811 
__netdev_upper_dev_unlink(struct net_device * dev,struct net_device * upper_dev,struct netdev_nested_priv * priv)7812 static void __netdev_upper_dev_unlink(struct net_device *dev,
7813 				      struct net_device *upper_dev,
7814 				      struct netdev_nested_priv *priv)
7815 {
7816 	struct netdev_notifier_changeupper_info changeupper_info = {
7817 		.info = {
7818 			.dev = dev,
7819 		},
7820 		.upper_dev = upper_dev,
7821 		.linking = false,
7822 	};
7823 
7824 	ASSERT_RTNL();
7825 
7826 	changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
7827 
7828 	call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7829 				      &changeupper_info.info);
7830 
7831 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7832 
7833 	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7834 				      &changeupper_info.info);
7835 
7836 	__netdev_update_upper_level(dev, NULL);
7837 	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7838 
7839 	__netdev_update_lower_level(upper_dev, priv);
7840 	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7841 				    priv);
7842 }
7843 
7844 /**
7845  * netdev_upper_dev_unlink - Removes a link to upper device
7846  * @dev: device
7847  * @upper_dev: new upper device
7848  *
7849  * Removes a link to device which is upper to this one. The caller must hold
7850  * the RTNL lock.
7851  */
netdev_upper_dev_unlink(struct net_device * dev,struct net_device * upper_dev)7852 void netdev_upper_dev_unlink(struct net_device *dev,
7853 			     struct net_device *upper_dev)
7854 {
7855 	struct netdev_nested_priv priv = {
7856 		.flags = NESTED_SYNC_TODO,
7857 		.data = NULL,
7858 	};
7859 
7860 	__netdev_upper_dev_unlink(dev, upper_dev, &priv);
7861 }
7862 EXPORT_SYMBOL(netdev_upper_dev_unlink);
7863 
__netdev_adjacent_dev_set(struct net_device * upper_dev,struct net_device * lower_dev,bool val)7864 static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
7865 				      struct net_device *lower_dev,
7866 				      bool val)
7867 {
7868 	struct netdev_adjacent *adj;
7869 
7870 	adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
7871 	if (adj)
7872 		adj->ignore = val;
7873 
7874 	adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
7875 	if (adj)
7876 		adj->ignore = val;
7877 }
7878 
netdev_adjacent_dev_disable(struct net_device * upper_dev,struct net_device * lower_dev)7879 static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
7880 					struct net_device *lower_dev)
7881 {
7882 	__netdev_adjacent_dev_set(upper_dev, lower_dev, true);
7883 }
7884 
netdev_adjacent_dev_enable(struct net_device * upper_dev,struct net_device * lower_dev)7885 static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
7886 				       struct net_device *lower_dev)
7887 {
7888 	__netdev_adjacent_dev_set(upper_dev, lower_dev, false);
7889 }
7890 
netdev_adjacent_change_prepare(struct net_device * old_dev,struct net_device * new_dev,struct net_device * dev,struct netlink_ext_ack * extack)7891 int netdev_adjacent_change_prepare(struct net_device *old_dev,
7892 				   struct net_device *new_dev,
7893 				   struct net_device *dev,
7894 				   struct netlink_ext_ack *extack)
7895 {
7896 	struct netdev_nested_priv priv = {
7897 		.flags = 0,
7898 		.data = NULL,
7899 	};
7900 	int err;
7901 
7902 	if (!new_dev)
7903 		return 0;
7904 
7905 	if (old_dev && new_dev != old_dev)
7906 		netdev_adjacent_dev_disable(dev, old_dev);
7907 	err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
7908 				      extack);
7909 	if (err) {
7910 		if (old_dev && new_dev != old_dev)
7911 			netdev_adjacent_dev_enable(dev, old_dev);
7912 		return err;
7913 	}
7914 
7915 	return 0;
7916 }
7917 EXPORT_SYMBOL(netdev_adjacent_change_prepare);
7918 
netdev_adjacent_change_commit(struct net_device * old_dev,struct net_device * new_dev,struct net_device * dev)7919 void netdev_adjacent_change_commit(struct net_device *old_dev,
7920 				   struct net_device *new_dev,
7921 				   struct net_device *dev)
7922 {
7923 	struct netdev_nested_priv priv = {
7924 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7925 		.data = NULL,
7926 	};
7927 
7928 	if (!new_dev || !old_dev)
7929 		return;
7930 
7931 	if (new_dev == old_dev)
7932 		return;
7933 
7934 	netdev_adjacent_dev_enable(dev, old_dev);
7935 	__netdev_upper_dev_unlink(old_dev, dev, &priv);
7936 }
7937 EXPORT_SYMBOL(netdev_adjacent_change_commit);
7938 
netdev_adjacent_change_abort(struct net_device * old_dev,struct net_device * new_dev,struct net_device * dev)7939 void netdev_adjacent_change_abort(struct net_device *old_dev,
7940 				  struct net_device *new_dev,
7941 				  struct net_device *dev)
7942 {
7943 	struct netdev_nested_priv priv = {
7944 		.flags = 0,
7945 		.data = NULL,
7946 	};
7947 
7948 	if (!new_dev)
7949 		return;
7950 
7951 	if (old_dev && new_dev != old_dev)
7952 		netdev_adjacent_dev_enable(dev, old_dev);
7953 
7954 	__netdev_upper_dev_unlink(new_dev, dev, &priv);
7955 }
7956 EXPORT_SYMBOL(netdev_adjacent_change_abort);
7957 
7958 /**
7959  * netdev_bonding_info_change - Dispatch event about slave change
7960  * @dev: device
7961  * @bonding_info: info to dispatch
7962  *
7963  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7964  * The caller must hold the RTNL lock.
7965  */
netdev_bonding_info_change(struct net_device * dev,struct netdev_bonding_info * bonding_info)7966 void netdev_bonding_info_change(struct net_device *dev,
7967 				struct netdev_bonding_info *bonding_info)
7968 {
7969 	struct netdev_notifier_bonding_info info = {
7970 		.info.dev = dev,
7971 	};
7972 
7973 	memcpy(&info.bonding_info, bonding_info,
7974 	       sizeof(struct netdev_bonding_info));
7975 	call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
7976 				      &info.info);
7977 }
7978 EXPORT_SYMBOL(netdev_bonding_info_change);
7979 
netdev_offload_xstats_enable_l3(struct net_device * dev,struct netlink_ext_ack * extack)7980 static int netdev_offload_xstats_enable_l3(struct net_device *dev,
7981 					   struct netlink_ext_ack *extack)
7982 {
7983 	struct netdev_notifier_offload_xstats_info info = {
7984 		.info.dev = dev,
7985 		.info.extack = extack,
7986 		.type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
7987 	};
7988 	int err;
7989 	int rc;
7990 
7991 	dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3),
7992 					 GFP_KERNEL);
7993 	if (!dev->offload_xstats_l3)
7994 		return -ENOMEM;
7995 
7996 	rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE,
7997 						  NETDEV_OFFLOAD_XSTATS_DISABLE,
7998 						  &info.info);
7999 	err = notifier_to_errno(rc);
8000 	if (err)
8001 		goto free_stats;
8002 
8003 	return 0;
8004 
8005 free_stats:
8006 	kfree(dev->offload_xstats_l3);
8007 	dev->offload_xstats_l3 = NULL;
8008 	return err;
8009 }
8010 
netdev_offload_xstats_enable(struct net_device * dev,enum netdev_offload_xstats_type type,struct netlink_ext_ack * extack)8011 int netdev_offload_xstats_enable(struct net_device *dev,
8012 				 enum netdev_offload_xstats_type type,
8013 				 struct netlink_ext_ack *extack)
8014 {
8015 	ASSERT_RTNL();
8016 
8017 	if (netdev_offload_xstats_enabled(dev, type))
8018 		return -EALREADY;
8019 
8020 	switch (type) {
8021 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8022 		return netdev_offload_xstats_enable_l3(dev, extack);
8023 	}
8024 
8025 	WARN_ON(1);
8026 	return -EINVAL;
8027 }
8028 EXPORT_SYMBOL(netdev_offload_xstats_enable);
8029 
netdev_offload_xstats_disable_l3(struct net_device * dev)8030 static void netdev_offload_xstats_disable_l3(struct net_device *dev)
8031 {
8032 	struct netdev_notifier_offload_xstats_info info = {
8033 		.info.dev = dev,
8034 		.type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8035 	};
8036 
8037 	call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE,
8038 				      &info.info);
8039 	kfree(dev->offload_xstats_l3);
8040 	dev->offload_xstats_l3 = NULL;
8041 }
8042 
netdev_offload_xstats_disable(struct net_device * dev,enum netdev_offload_xstats_type type)8043 int netdev_offload_xstats_disable(struct net_device *dev,
8044 				  enum netdev_offload_xstats_type type)
8045 {
8046 	ASSERT_RTNL();
8047 
8048 	if (!netdev_offload_xstats_enabled(dev, type))
8049 		return -EALREADY;
8050 
8051 	switch (type) {
8052 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8053 		netdev_offload_xstats_disable_l3(dev);
8054 		return 0;
8055 	}
8056 
8057 	WARN_ON(1);
8058 	return -EINVAL;
8059 }
8060 EXPORT_SYMBOL(netdev_offload_xstats_disable);
8061 
netdev_offload_xstats_disable_all(struct net_device * dev)8062 static void netdev_offload_xstats_disable_all(struct net_device *dev)
8063 {
8064 	netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
8065 }
8066 
8067 static struct rtnl_hw_stats64 *
netdev_offload_xstats_get_ptr(const struct net_device * dev,enum netdev_offload_xstats_type type)8068 netdev_offload_xstats_get_ptr(const struct net_device *dev,
8069 			      enum netdev_offload_xstats_type type)
8070 {
8071 	switch (type) {
8072 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8073 		return dev->offload_xstats_l3;
8074 	}
8075 
8076 	WARN_ON(1);
8077 	return NULL;
8078 }
8079 
netdev_offload_xstats_enabled(const struct net_device * dev,enum netdev_offload_xstats_type type)8080 bool netdev_offload_xstats_enabled(const struct net_device *dev,
8081 				   enum netdev_offload_xstats_type type)
8082 {
8083 	ASSERT_RTNL();
8084 
8085 	return netdev_offload_xstats_get_ptr(dev, type);
8086 }
8087 EXPORT_SYMBOL(netdev_offload_xstats_enabled);
8088 
8089 struct netdev_notifier_offload_xstats_ru {
8090 	bool used;
8091 };
8092 
8093 struct netdev_notifier_offload_xstats_rd {
8094 	struct rtnl_hw_stats64 stats;
8095 	bool used;
8096 };
8097 
netdev_hw_stats64_add(struct rtnl_hw_stats64 * dest,const struct rtnl_hw_stats64 * src)8098 static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest,
8099 				  const struct rtnl_hw_stats64 *src)
8100 {
8101 	dest->rx_packets	  += src->rx_packets;
8102 	dest->tx_packets	  += src->tx_packets;
8103 	dest->rx_bytes		  += src->rx_bytes;
8104 	dest->tx_bytes		  += src->tx_bytes;
8105 	dest->rx_errors		  += src->rx_errors;
8106 	dest->tx_errors		  += src->tx_errors;
8107 	dest->rx_dropped	  += src->rx_dropped;
8108 	dest->tx_dropped	  += src->tx_dropped;
8109 	dest->multicast		  += src->multicast;
8110 }
8111 
netdev_offload_xstats_get_used(struct net_device * dev,enum netdev_offload_xstats_type type,bool * p_used,struct netlink_ext_ack * extack)8112 static int netdev_offload_xstats_get_used(struct net_device *dev,
8113 					  enum netdev_offload_xstats_type type,
8114 					  bool *p_used,
8115 					  struct netlink_ext_ack *extack)
8116 {
8117 	struct netdev_notifier_offload_xstats_ru report_used = {};
8118 	struct netdev_notifier_offload_xstats_info info = {
8119 		.info.dev = dev,
8120 		.info.extack = extack,
8121 		.type = type,
8122 		.report_used = &report_used,
8123 	};
8124 	int rc;
8125 
8126 	WARN_ON(!netdev_offload_xstats_enabled(dev, type));
8127 	rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED,
8128 					   &info.info);
8129 	*p_used = report_used.used;
8130 	return notifier_to_errno(rc);
8131 }
8132 
netdev_offload_xstats_get_stats(struct net_device * dev,enum netdev_offload_xstats_type type,struct rtnl_hw_stats64 * p_stats,bool * p_used,struct netlink_ext_ack * extack)8133 static int netdev_offload_xstats_get_stats(struct net_device *dev,
8134 					   enum netdev_offload_xstats_type type,
8135 					   struct rtnl_hw_stats64 *p_stats,
8136 					   bool *p_used,
8137 					   struct netlink_ext_ack *extack)
8138 {
8139 	struct netdev_notifier_offload_xstats_rd report_delta = {};
8140 	struct netdev_notifier_offload_xstats_info info = {
8141 		.info.dev = dev,
8142 		.info.extack = extack,
8143 		.type = type,
8144 		.report_delta = &report_delta,
8145 	};
8146 	struct rtnl_hw_stats64 *stats;
8147 	int rc;
8148 
8149 	stats = netdev_offload_xstats_get_ptr(dev, type);
8150 	if (WARN_ON(!stats))
8151 		return -EINVAL;
8152 
8153 	rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
8154 					   &info.info);
8155 
8156 	/* Cache whatever we got, even if there was an error, otherwise the
8157 	 * successful stats retrievals would get lost.
8158 	 */
8159 	netdev_hw_stats64_add(stats, &report_delta.stats);
8160 
8161 	if (p_stats)
8162 		*p_stats = *stats;
8163 	*p_used = report_delta.used;
8164 
8165 	return notifier_to_errno(rc);
8166 }
8167 
netdev_offload_xstats_get(struct net_device * dev,enum netdev_offload_xstats_type type,struct rtnl_hw_stats64 * p_stats,bool * p_used,struct netlink_ext_ack * extack)8168 int netdev_offload_xstats_get(struct net_device *dev,
8169 			      enum netdev_offload_xstats_type type,
8170 			      struct rtnl_hw_stats64 *p_stats, bool *p_used,
8171 			      struct netlink_ext_ack *extack)
8172 {
8173 	ASSERT_RTNL();
8174 
8175 	if (p_stats)
8176 		return netdev_offload_xstats_get_stats(dev, type, p_stats,
8177 						       p_used, extack);
8178 	else
8179 		return netdev_offload_xstats_get_used(dev, type, p_used,
8180 						      extack);
8181 }
8182 EXPORT_SYMBOL(netdev_offload_xstats_get);
8183 
8184 void
netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd * report_delta,const struct rtnl_hw_stats64 * stats)8185 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta,
8186 				   const struct rtnl_hw_stats64 *stats)
8187 {
8188 	report_delta->used = true;
8189 	netdev_hw_stats64_add(&report_delta->stats, stats);
8190 }
8191 EXPORT_SYMBOL(netdev_offload_xstats_report_delta);
8192 
8193 void
netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru * report_used)8194 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used)
8195 {
8196 	report_used->used = true;
8197 }
8198 EXPORT_SYMBOL(netdev_offload_xstats_report_used);
8199 
netdev_offload_xstats_push_delta(struct net_device * dev,enum netdev_offload_xstats_type type,const struct rtnl_hw_stats64 * p_stats)8200 void netdev_offload_xstats_push_delta(struct net_device *dev,
8201 				      enum netdev_offload_xstats_type type,
8202 				      const struct rtnl_hw_stats64 *p_stats)
8203 {
8204 	struct rtnl_hw_stats64 *stats;
8205 
8206 	ASSERT_RTNL();
8207 
8208 	stats = netdev_offload_xstats_get_ptr(dev, type);
8209 	if (WARN_ON(!stats))
8210 		return;
8211 
8212 	netdev_hw_stats64_add(stats, p_stats);
8213 }
8214 EXPORT_SYMBOL(netdev_offload_xstats_push_delta);
8215 
8216 /**
8217  * netdev_get_xmit_slave - Get the xmit slave of master device
8218  * @dev: device
8219  * @skb: The packet
8220  * @all_slaves: assume all the slaves are active
8221  *
8222  * The reference counters are not incremented so the caller must be
8223  * careful with locks. The caller must hold RCU lock.
8224  * %NULL is returned if no slave is found.
8225  */
8226 
netdev_get_xmit_slave(struct net_device * dev,struct sk_buff * skb,bool all_slaves)8227 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
8228 					 struct sk_buff *skb,
8229 					 bool all_slaves)
8230 {
8231 	const struct net_device_ops *ops = dev->netdev_ops;
8232 
8233 	if (!ops->ndo_get_xmit_slave)
8234 		return NULL;
8235 	return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
8236 }
8237 EXPORT_SYMBOL(netdev_get_xmit_slave);
8238 
netdev_sk_get_lower_dev(struct net_device * dev,struct sock * sk)8239 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
8240 						  struct sock *sk)
8241 {
8242 	const struct net_device_ops *ops = dev->netdev_ops;
8243 
8244 	if (!ops->ndo_sk_get_lower_dev)
8245 		return NULL;
8246 	return ops->ndo_sk_get_lower_dev(dev, sk);
8247 }
8248 
8249 /**
8250  * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8251  * @dev: device
8252  * @sk: the socket
8253  *
8254  * %NULL is returned if no lower device is found.
8255  */
8256 
netdev_sk_get_lowest_dev(struct net_device * dev,struct sock * sk)8257 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
8258 					    struct sock *sk)
8259 {
8260 	struct net_device *lower;
8261 
8262 	lower = netdev_sk_get_lower_dev(dev, sk);
8263 	while (lower) {
8264 		dev = lower;
8265 		lower = netdev_sk_get_lower_dev(dev, sk);
8266 	}
8267 
8268 	return dev;
8269 }
8270 EXPORT_SYMBOL(netdev_sk_get_lowest_dev);
8271 
netdev_adjacent_add_links(struct net_device * dev)8272 static void netdev_adjacent_add_links(struct net_device *dev)
8273 {
8274 	struct netdev_adjacent *iter;
8275 
8276 	struct net *net = dev_net(dev);
8277 
8278 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
8279 		if (!net_eq(net, dev_net(iter->dev)))
8280 			continue;
8281 		netdev_adjacent_sysfs_add(iter->dev, dev,
8282 					  &iter->dev->adj_list.lower);
8283 		netdev_adjacent_sysfs_add(dev, iter->dev,
8284 					  &dev->adj_list.upper);
8285 	}
8286 
8287 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
8288 		if (!net_eq(net, dev_net(iter->dev)))
8289 			continue;
8290 		netdev_adjacent_sysfs_add(iter->dev, dev,
8291 					  &iter->dev->adj_list.upper);
8292 		netdev_adjacent_sysfs_add(dev, iter->dev,
8293 					  &dev->adj_list.lower);
8294 	}
8295 }
8296 
netdev_adjacent_del_links(struct net_device * dev)8297 static void netdev_adjacent_del_links(struct net_device *dev)
8298 {
8299 	struct netdev_adjacent *iter;
8300 
8301 	struct net *net = dev_net(dev);
8302 
8303 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
8304 		if (!net_eq(net, dev_net(iter->dev)))
8305 			continue;
8306 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
8307 					  &iter->dev->adj_list.lower);
8308 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
8309 					  &dev->adj_list.upper);
8310 	}
8311 
8312 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
8313 		if (!net_eq(net, dev_net(iter->dev)))
8314 			continue;
8315 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
8316 					  &iter->dev->adj_list.upper);
8317 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
8318 					  &dev->adj_list.lower);
8319 	}
8320 }
8321 
netdev_adjacent_rename_links(struct net_device * dev,char * oldname)8322 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
8323 {
8324 	struct netdev_adjacent *iter;
8325 
8326 	struct net *net = dev_net(dev);
8327 
8328 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
8329 		if (!net_eq(net, dev_net(iter->dev)))
8330 			continue;
8331 		netdev_adjacent_sysfs_del(iter->dev, oldname,
8332 					  &iter->dev->adj_list.lower);
8333 		netdev_adjacent_sysfs_add(iter->dev, dev,
8334 					  &iter->dev->adj_list.lower);
8335 	}
8336 
8337 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
8338 		if (!net_eq(net, dev_net(iter->dev)))
8339 			continue;
8340 		netdev_adjacent_sysfs_del(iter->dev, oldname,
8341 					  &iter->dev->adj_list.upper);
8342 		netdev_adjacent_sysfs_add(iter->dev, dev,
8343 					  &iter->dev->adj_list.upper);
8344 	}
8345 }
8346 
netdev_lower_dev_get_private(struct net_device * dev,struct net_device * lower_dev)8347 void *netdev_lower_dev_get_private(struct net_device *dev,
8348 				   struct net_device *lower_dev)
8349 {
8350 	struct netdev_adjacent *lower;
8351 
8352 	if (!lower_dev)
8353 		return NULL;
8354 	lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
8355 	if (!lower)
8356 		return NULL;
8357 
8358 	return lower->private;
8359 }
8360 EXPORT_SYMBOL(netdev_lower_dev_get_private);
8361 
8362 
8363 /**
8364  * netdev_lower_state_changed - Dispatch event about lower device state change
8365  * @lower_dev: device
8366  * @lower_state_info: state to dispatch
8367  *
8368  * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8369  * The caller must hold the RTNL lock.
8370  */
netdev_lower_state_changed(struct net_device * lower_dev,void * lower_state_info)8371 void netdev_lower_state_changed(struct net_device *lower_dev,
8372 				void *lower_state_info)
8373 {
8374 	struct netdev_notifier_changelowerstate_info changelowerstate_info = {
8375 		.info.dev = lower_dev,
8376 	};
8377 
8378 	ASSERT_RTNL();
8379 	changelowerstate_info.lower_state_info = lower_state_info;
8380 	call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
8381 				      &changelowerstate_info.info);
8382 }
8383 EXPORT_SYMBOL(netdev_lower_state_changed);
8384 
dev_change_rx_flags(struct net_device * dev,int flags)8385 static void dev_change_rx_flags(struct net_device *dev, int flags)
8386 {
8387 	const struct net_device_ops *ops = dev->netdev_ops;
8388 
8389 	if (ops->ndo_change_rx_flags)
8390 		ops->ndo_change_rx_flags(dev, flags);
8391 }
8392 
__dev_set_promiscuity(struct net_device * dev,int inc,bool notify)8393 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
8394 {
8395 	unsigned int old_flags = dev->flags;
8396 	kuid_t uid;
8397 	kgid_t gid;
8398 
8399 	ASSERT_RTNL();
8400 
8401 	dev->flags |= IFF_PROMISC;
8402 	dev->promiscuity += inc;
8403 	if (dev->promiscuity == 0) {
8404 		/*
8405 		 * Avoid overflow.
8406 		 * If inc causes overflow, untouch promisc and return error.
8407 		 */
8408 		if (inc < 0)
8409 			dev->flags &= ~IFF_PROMISC;
8410 		else {
8411 			dev->promiscuity -= inc;
8412 			netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
8413 			return -EOVERFLOW;
8414 		}
8415 	}
8416 	if (dev->flags != old_flags) {
8417 		netdev_info(dev, "%s promiscuous mode\n",
8418 			    dev->flags & IFF_PROMISC ? "entered" : "left");
8419 		if (audit_enabled) {
8420 			current_uid_gid(&uid, &gid);
8421 			audit_log(audit_context(), GFP_ATOMIC,
8422 				  AUDIT_ANOM_PROMISCUOUS,
8423 				  "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8424 				  dev->name, (dev->flags & IFF_PROMISC),
8425 				  (old_flags & IFF_PROMISC),
8426 				  from_kuid(&init_user_ns, audit_get_loginuid(current)),
8427 				  from_kuid(&init_user_ns, uid),
8428 				  from_kgid(&init_user_ns, gid),
8429 				  audit_get_sessionid(current));
8430 		}
8431 
8432 		dev_change_rx_flags(dev, IFF_PROMISC);
8433 	}
8434 	if (notify)
8435 		__dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL);
8436 	return 0;
8437 }
8438 
8439 /**
8440  *	dev_set_promiscuity	- update promiscuity count on a device
8441  *	@dev: device
8442  *	@inc: modifier
8443  *
8444  *	Add or remove promiscuity from a device. While the count in the device
8445  *	remains above zero the interface remains promiscuous. Once it hits zero
8446  *	the device reverts back to normal filtering operation. A negative inc
8447  *	value is used to drop promiscuity on the device.
8448  *	Return 0 if successful or a negative errno code on error.
8449  */
dev_set_promiscuity(struct net_device * dev,int inc)8450 int dev_set_promiscuity(struct net_device *dev, int inc)
8451 {
8452 	unsigned int old_flags = dev->flags;
8453 	int err;
8454 
8455 	err = __dev_set_promiscuity(dev, inc, true);
8456 	if (err < 0)
8457 		return err;
8458 	if (dev->flags != old_flags)
8459 		dev_set_rx_mode(dev);
8460 	return err;
8461 }
8462 EXPORT_SYMBOL(dev_set_promiscuity);
8463 
__dev_set_allmulti(struct net_device * dev,int inc,bool notify)8464 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
8465 {
8466 	unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
8467 
8468 	ASSERT_RTNL();
8469 
8470 	dev->flags |= IFF_ALLMULTI;
8471 	dev->allmulti += inc;
8472 	if (dev->allmulti == 0) {
8473 		/*
8474 		 * Avoid overflow.
8475 		 * If inc causes overflow, untouch allmulti and return error.
8476 		 */
8477 		if (inc < 0)
8478 			dev->flags &= ~IFF_ALLMULTI;
8479 		else {
8480 			dev->allmulti -= inc;
8481 			netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
8482 			return -EOVERFLOW;
8483 		}
8484 	}
8485 	if (dev->flags ^ old_flags) {
8486 		netdev_info(dev, "%s allmulticast mode\n",
8487 			    dev->flags & IFF_ALLMULTI ? "entered" : "left");
8488 		dev_change_rx_flags(dev, IFF_ALLMULTI);
8489 		dev_set_rx_mode(dev);
8490 		if (notify)
8491 			__dev_notify_flags(dev, old_flags,
8492 					   dev->gflags ^ old_gflags, 0, NULL);
8493 	}
8494 	return 0;
8495 }
8496 
8497 /**
8498  *	dev_set_allmulti	- update allmulti count on a device
8499  *	@dev: device
8500  *	@inc: modifier
8501  *
8502  *	Add or remove reception of all multicast frames to a device. While the
8503  *	count in the device remains above zero the interface remains listening
8504  *	to all interfaces. Once it hits zero the device reverts back to normal
8505  *	filtering operation. A negative @inc value is used to drop the counter
8506  *	when releasing a resource needing all multicasts.
8507  *	Return 0 if successful or a negative errno code on error.
8508  */
8509 
dev_set_allmulti(struct net_device * dev,int inc)8510 int dev_set_allmulti(struct net_device *dev, int inc)
8511 {
8512 	return __dev_set_allmulti(dev, inc, true);
8513 }
8514 EXPORT_SYMBOL(dev_set_allmulti);
8515 
8516 /*
8517  *	Upload unicast and multicast address lists to device and
8518  *	configure RX filtering. When the device doesn't support unicast
8519  *	filtering it is put in promiscuous mode while unicast addresses
8520  *	are present.
8521  */
__dev_set_rx_mode(struct net_device * dev)8522 void __dev_set_rx_mode(struct net_device *dev)
8523 {
8524 	const struct net_device_ops *ops = dev->netdev_ops;
8525 
8526 	/* dev_open will call this function so the list will stay sane. */
8527 	if (!(dev->flags&IFF_UP))
8528 		return;
8529 
8530 	if (!netif_device_present(dev))
8531 		return;
8532 
8533 	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8534 		/* Unicast addresses changes may only happen under the rtnl,
8535 		 * therefore calling __dev_set_promiscuity here is safe.
8536 		 */
8537 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8538 			__dev_set_promiscuity(dev, 1, false);
8539 			dev->uc_promisc = true;
8540 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8541 			__dev_set_promiscuity(dev, -1, false);
8542 			dev->uc_promisc = false;
8543 		}
8544 	}
8545 
8546 	if (ops->ndo_set_rx_mode)
8547 		ops->ndo_set_rx_mode(dev);
8548 }
8549 
dev_set_rx_mode(struct net_device * dev)8550 void dev_set_rx_mode(struct net_device *dev)
8551 {
8552 	netif_addr_lock_bh(dev);
8553 	__dev_set_rx_mode(dev);
8554 	netif_addr_unlock_bh(dev);
8555 }
8556 
8557 /**
8558  *	dev_get_flags - get flags reported to userspace
8559  *	@dev: device
8560  *
8561  *	Get the combination of flag bits exported through APIs to userspace.
8562  */
dev_get_flags(const struct net_device * dev)8563 unsigned int dev_get_flags(const struct net_device *dev)
8564 {
8565 	unsigned int flags;
8566 
8567 	flags = (dev->flags & ~(IFF_PROMISC |
8568 				IFF_ALLMULTI |
8569 				IFF_RUNNING |
8570 				IFF_LOWER_UP |
8571 				IFF_DORMANT)) |
8572 		(dev->gflags & (IFF_PROMISC |
8573 				IFF_ALLMULTI));
8574 
8575 	if (netif_running(dev)) {
8576 		if (netif_oper_up(dev))
8577 			flags |= IFF_RUNNING;
8578 		if (netif_carrier_ok(dev))
8579 			flags |= IFF_LOWER_UP;
8580 		if (netif_dormant(dev))
8581 			flags |= IFF_DORMANT;
8582 	}
8583 
8584 	return flags;
8585 }
8586 EXPORT_SYMBOL(dev_get_flags);
8587 
__dev_change_flags(struct net_device * dev,unsigned int flags,struct netlink_ext_ack * extack)8588 int __dev_change_flags(struct net_device *dev, unsigned int flags,
8589 		       struct netlink_ext_ack *extack)
8590 {
8591 	unsigned int old_flags = dev->flags;
8592 	int ret;
8593 
8594 	ASSERT_RTNL();
8595 
8596 	/*
8597 	 *	Set the flags on our device.
8598 	 */
8599 
8600 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8601 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8602 			       IFF_AUTOMEDIA)) |
8603 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8604 				    IFF_ALLMULTI));
8605 
8606 	/*
8607 	 *	Load in the correct multicast list now the flags have changed.
8608 	 */
8609 
8610 	if ((old_flags ^ flags) & IFF_MULTICAST)
8611 		dev_change_rx_flags(dev, IFF_MULTICAST);
8612 
8613 	dev_set_rx_mode(dev);
8614 
8615 	/*
8616 	 *	Have we downed the interface. We handle IFF_UP ourselves
8617 	 *	according to user attempts to set it, rather than blindly
8618 	 *	setting it.
8619 	 */
8620 
8621 	ret = 0;
8622 	if ((old_flags ^ flags) & IFF_UP) {
8623 		if (old_flags & IFF_UP)
8624 			__dev_close(dev);
8625 		else
8626 			ret = __dev_open(dev, extack);
8627 	}
8628 
8629 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
8630 		int inc = (flags & IFF_PROMISC) ? 1 : -1;
8631 		unsigned int old_flags = dev->flags;
8632 
8633 		dev->gflags ^= IFF_PROMISC;
8634 
8635 		if (__dev_set_promiscuity(dev, inc, false) >= 0)
8636 			if (dev->flags != old_flags)
8637 				dev_set_rx_mode(dev);
8638 	}
8639 
8640 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8641 	 * is important. Some (broken) drivers set IFF_PROMISC, when
8642 	 * IFF_ALLMULTI is requested not asking us and not reporting.
8643 	 */
8644 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8645 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8646 
8647 		dev->gflags ^= IFF_ALLMULTI;
8648 		__dev_set_allmulti(dev, inc, false);
8649 	}
8650 
8651 	return ret;
8652 }
8653 
__dev_notify_flags(struct net_device * dev,unsigned int old_flags,unsigned int gchanges,u32 portid,const struct nlmsghdr * nlh)8654 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8655 			unsigned int gchanges, u32 portid,
8656 			const struct nlmsghdr *nlh)
8657 {
8658 	unsigned int changes = dev->flags ^ old_flags;
8659 
8660 	if (gchanges)
8661 		rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh);
8662 
8663 	if (changes & IFF_UP) {
8664 		if (dev->flags & IFF_UP)
8665 			call_netdevice_notifiers(NETDEV_UP, dev);
8666 		else
8667 			call_netdevice_notifiers(NETDEV_DOWN, dev);
8668 	}
8669 
8670 	if (dev->flags & IFF_UP &&
8671 	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
8672 		struct netdev_notifier_change_info change_info = {
8673 			.info = {
8674 				.dev = dev,
8675 			},
8676 			.flags_changed = changes,
8677 		};
8678 
8679 		call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
8680 	}
8681 }
8682 
8683 /**
8684  *	dev_change_flags - change device settings
8685  *	@dev: device
8686  *	@flags: device state flags
8687  *	@extack: netlink extended ack
8688  *
8689  *	Change settings on device based state flags. The flags are
8690  *	in the userspace exported format.
8691  */
dev_change_flags(struct net_device * dev,unsigned int flags,struct netlink_ext_ack * extack)8692 int dev_change_flags(struct net_device *dev, unsigned int flags,
8693 		     struct netlink_ext_ack *extack)
8694 {
8695 	int ret;
8696 	unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8697 
8698 	ret = __dev_change_flags(dev, flags, extack);
8699 	if (ret < 0)
8700 		return ret;
8701 
8702 	changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8703 	__dev_notify_flags(dev, old_flags, changes, 0, NULL);
8704 	return ret;
8705 }
8706 EXPORT_SYMBOL(dev_change_flags);
8707 
__dev_set_mtu(struct net_device * dev,int new_mtu)8708 int __dev_set_mtu(struct net_device *dev, int new_mtu)
8709 {
8710 	const struct net_device_ops *ops = dev->netdev_ops;
8711 
8712 	if (ops->ndo_change_mtu)
8713 		return ops->ndo_change_mtu(dev, new_mtu);
8714 
8715 	/* Pairs with all the lockless reads of dev->mtu in the stack */
8716 	WRITE_ONCE(dev->mtu, new_mtu);
8717 	return 0;
8718 }
8719 EXPORT_SYMBOL(__dev_set_mtu);
8720 
dev_validate_mtu(struct net_device * dev,int new_mtu,struct netlink_ext_ack * extack)8721 int dev_validate_mtu(struct net_device *dev, int new_mtu,
8722 		     struct netlink_ext_ack *extack)
8723 {
8724 	/* MTU must be positive, and in range */
8725 	if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8726 		NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8727 		return -EINVAL;
8728 	}
8729 
8730 	if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8731 		NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8732 		return -EINVAL;
8733 	}
8734 	return 0;
8735 }
8736 
8737 /**
8738  *	dev_set_mtu_ext - Change maximum transfer unit
8739  *	@dev: device
8740  *	@new_mtu: new transfer unit
8741  *	@extack: netlink extended ack
8742  *
8743  *	Change the maximum transfer size of the network device.
8744  */
dev_set_mtu_ext(struct net_device * dev,int new_mtu,struct netlink_ext_ack * extack)8745 int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8746 		    struct netlink_ext_ack *extack)
8747 {
8748 	int err, orig_mtu;
8749 
8750 	if (new_mtu == dev->mtu)
8751 		return 0;
8752 
8753 	err = dev_validate_mtu(dev, new_mtu, extack);
8754 	if (err)
8755 		return err;
8756 
8757 	if (!netif_device_present(dev))
8758 		return -ENODEV;
8759 
8760 	err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8761 	err = notifier_to_errno(err);
8762 	if (err)
8763 		return err;
8764 
8765 	orig_mtu = dev->mtu;
8766 	err = __dev_set_mtu(dev, new_mtu);
8767 
8768 	if (!err) {
8769 		err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8770 						   orig_mtu);
8771 		err = notifier_to_errno(err);
8772 		if (err) {
8773 			/* setting mtu back and notifying everyone again,
8774 			 * so that they have a chance to revert changes.
8775 			 */
8776 			__dev_set_mtu(dev, orig_mtu);
8777 			call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8778 						     new_mtu);
8779 		}
8780 	}
8781 	return err;
8782 }
8783 
dev_set_mtu(struct net_device * dev,int new_mtu)8784 int dev_set_mtu(struct net_device *dev, int new_mtu)
8785 {
8786 	struct netlink_ext_ack extack;
8787 	int err;
8788 
8789 	memset(&extack, 0, sizeof(extack));
8790 	err = dev_set_mtu_ext(dev, new_mtu, &extack);
8791 	if (err && extack._msg)
8792 		net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8793 	return err;
8794 }
8795 EXPORT_SYMBOL(dev_set_mtu);
8796 
8797 /**
8798  *	dev_change_tx_queue_len - Change TX queue length of a netdevice
8799  *	@dev: device
8800  *	@new_len: new tx queue length
8801  */
dev_change_tx_queue_len(struct net_device * dev,unsigned long new_len)8802 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8803 {
8804 	unsigned int orig_len = dev->tx_queue_len;
8805 	int res;
8806 
8807 	if (new_len != (unsigned int)new_len)
8808 		return -ERANGE;
8809 
8810 	if (new_len != orig_len) {
8811 		dev->tx_queue_len = new_len;
8812 		res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
8813 		res = notifier_to_errno(res);
8814 		if (res)
8815 			goto err_rollback;
8816 		res = dev_qdisc_change_tx_queue_len(dev);
8817 		if (res)
8818 			goto err_rollback;
8819 	}
8820 
8821 	return 0;
8822 
8823 err_rollback:
8824 	netdev_err(dev, "refused to change device tx_queue_len\n");
8825 	dev->tx_queue_len = orig_len;
8826 	return res;
8827 }
8828 
8829 /**
8830  *	dev_set_group - Change group this device belongs to
8831  *	@dev: device
8832  *	@new_group: group this device should belong to
8833  */
dev_set_group(struct net_device * dev,int new_group)8834 void dev_set_group(struct net_device *dev, int new_group)
8835 {
8836 	dev->group = new_group;
8837 }
8838 
8839 /**
8840  *	dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8841  *	@dev: device
8842  *	@addr: new address
8843  *	@extack: netlink extended ack
8844  */
dev_pre_changeaddr_notify(struct net_device * dev,const char * addr,struct netlink_ext_ack * extack)8845 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
8846 			      struct netlink_ext_ack *extack)
8847 {
8848 	struct netdev_notifier_pre_changeaddr_info info = {
8849 		.info.dev = dev,
8850 		.info.extack = extack,
8851 		.dev_addr = addr,
8852 	};
8853 	int rc;
8854 
8855 	rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
8856 	return notifier_to_errno(rc);
8857 }
8858 EXPORT_SYMBOL(dev_pre_changeaddr_notify);
8859 
8860 /**
8861  *	dev_set_mac_address - Change Media Access Control Address
8862  *	@dev: device
8863  *	@sa: new address
8864  *	@extack: netlink extended ack
8865  *
8866  *	Change the hardware (MAC) address of the device
8867  */
dev_set_mac_address(struct net_device * dev,struct sockaddr * sa,struct netlink_ext_ack * extack)8868 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
8869 			struct netlink_ext_ack *extack)
8870 {
8871 	const struct net_device_ops *ops = dev->netdev_ops;
8872 	int err;
8873 
8874 	if (!ops->ndo_set_mac_address)
8875 		return -EOPNOTSUPP;
8876 	if (sa->sa_family != dev->type)
8877 		return -EINVAL;
8878 	if (!netif_device_present(dev))
8879 		return -ENODEV;
8880 	err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
8881 	if (err)
8882 		return err;
8883 	if (memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) {
8884 		err = ops->ndo_set_mac_address(dev, sa);
8885 		if (err)
8886 			return err;
8887 	}
8888 	dev->addr_assign_type = NET_ADDR_SET;
8889 	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
8890 	add_device_randomness(dev->dev_addr, dev->addr_len);
8891 	return 0;
8892 }
8893 EXPORT_SYMBOL(dev_set_mac_address);
8894 
8895 static DECLARE_RWSEM(dev_addr_sem);
8896 
dev_set_mac_address_user(struct net_device * dev,struct sockaddr * sa,struct netlink_ext_ack * extack)8897 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
8898 			     struct netlink_ext_ack *extack)
8899 {
8900 	int ret;
8901 
8902 	down_write(&dev_addr_sem);
8903 	ret = dev_set_mac_address(dev, sa, extack);
8904 	up_write(&dev_addr_sem);
8905 	return ret;
8906 }
8907 EXPORT_SYMBOL(dev_set_mac_address_user);
8908 
dev_get_mac_address(struct sockaddr * sa,struct net * net,char * dev_name)8909 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
8910 {
8911 	size_t size = sizeof(sa->sa_data_min);
8912 	struct net_device *dev;
8913 	int ret = 0;
8914 
8915 	down_read(&dev_addr_sem);
8916 	rcu_read_lock();
8917 
8918 	dev = dev_get_by_name_rcu(net, dev_name);
8919 	if (!dev) {
8920 		ret = -ENODEV;
8921 		goto unlock;
8922 	}
8923 	if (!dev->addr_len)
8924 		memset(sa->sa_data, 0, size);
8925 	else
8926 		memcpy(sa->sa_data, dev->dev_addr,
8927 		       min_t(size_t, size, dev->addr_len));
8928 	sa->sa_family = dev->type;
8929 
8930 unlock:
8931 	rcu_read_unlock();
8932 	up_read(&dev_addr_sem);
8933 	return ret;
8934 }
8935 EXPORT_SYMBOL(dev_get_mac_address);
8936 
8937 /**
8938  *	dev_change_carrier - Change device carrier
8939  *	@dev: device
8940  *	@new_carrier: new value
8941  *
8942  *	Change device carrier
8943  */
dev_change_carrier(struct net_device * dev,bool new_carrier)8944 int dev_change_carrier(struct net_device *dev, bool new_carrier)
8945 {
8946 	const struct net_device_ops *ops = dev->netdev_ops;
8947 
8948 	if (!ops->ndo_change_carrier)
8949 		return -EOPNOTSUPP;
8950 	if (!netif_device_present(dev))
8951 		return -ENODEV;
8952 	return ops->ndo_change_carrier(dev, new_carrier);
8953 }
8954 
8955 /**
8956  *	dev_get_phys_port_id - Get device physical port ID
8957  *	@dev: device
8958  *	@ppid: port ID
8959  *
8960  *	Get device physical port ID
8961  */
dev_get_phys_port_id(struct net_device * dev,struct netdev_phys_item_id * ppid)8962 int dev_get_phys_port_id(struct net_device *dev,
8963 			 struct netdev_phys_item_id *ppid)
8964 {
8965 	const struct net_device_ops *ops = dev->netdev_ops;
8966 
8967 	if (!ops->ndo_get_phys_port_id)
8968 		return -EOPNOTSUPP;
8969 	return ops->ndo_get_phys_port_id(dev, ppid);
8970 }
8971 
8972 /**
8973  *	dev_get_phys_port_name - Get device physical port name
8974  *	@dev: device
8975  *	@name: port name
8976  *	@len: limit of bytes to copy to name
8977  *
8978  *	Get device physical port name
8979  */
dev_get_phys_port_name(struct net_device * dev,char * name,size_t len)8980 int dev_get_phys_port_name(struct net_device *dev,
8981 			   char *name, size_t len)
8982 {
8983 	const struct net_device_ops *ops = dev->netdev_ops;
8984 	int err;
8985 
8986 	if (ops->ndo_get_phys_port_name) {
8987 		err = ops->ndo_get_phys_port_name(dev, name, len);
8988 		if (err != -EOPNOTSUPP)
8989 			return err;
8990 	}
8991 	return devlink_compat_phys_port_name_get(dev, name, len);
8992 }
8993 
8994 /**
8995  *	dev_get_port_parent_id - Get the device's port parent identifier
8996  *	@dev: network device
8997  *	@ppid: pointer to a storage for the port's parent identifier
8998  *	@recurse: allow/disallow recursion to lower devices
8999  *
9000  *	Get the devices's port parent identifier
9001  */
dev_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid,bool recurse)9002 int dev_get_port_parent_id(struct net_device *dev,
9003 			   struct netdev_phys_item_id *ppid,
9004 			   bool recurse)
9005 {
9006 	const struct net_device_ops *ops = dev->netdev_ops;
9007 	struct netdev_phys_item_id first = { };
9008 	struct net_device *lower_dev;
9009 	struct list_head *iter;
9010 	int err;
9011 
9012 	if (ops->ndo_get_port_parent_id) {
9013 		err = ops->ndo_get_port_parent_id(dev, ppid);
9014 		if (err != -EOPNOTSUPP)
9015 			return err;
9016 	}
9017 
9018 	err = devlink_compat_switch_id_get(dev, ppid);
9019 	if (!recurse || err != -EOPNOTSUPP)
9020 		return err;
9021 
9022 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
9023 		err = dev_get_port_parent_id(lower_dev, ppid, true);
9024 		if (err)
9025 			break;
9026 		if (!first.id_len)
9027 			first = *ppid;
9028 		else if (memcmp(&first, ppid, sizeof(*ppid)))
9029 			return -EOPNOTSUPP;
9030 	}
9031 
9032 	return err;
9033 }
9034 EXPORT_SYMBOL(dev_get_port_parent_id);
9035 
9036 /**
9037  *	netdev_port_same_parent_id - Indicate if two network devices have
9038  *	the same port parent identifier
9039  *	@a: first network device
9040  *	@b: second network device
9041  */
netdev_port_same_parent_id(struct net_device * a,struct net_device * b)9042 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
9043 {
9044 	struct netdev_phys_item_id a_id = { };
9045 	struct netdev_phys_item_id b_id = { };
9046 
9047 	if (dev_get_port_parent_id(a, &a_id, true) ||
9048 	    dev_get_port_parent_id(b, &b_id, true))
9049 		return false;
9050 
9051 	return netdev_phys_item_id_same(&a_id, &b_id);
9052 }
9053 EXPORT_SYMBOL(netdev_port_same_parent_id);
9054 
9055 /**
9056  *	dev_change_proto_down - set carrier according to proto_down.
9057  *
9058  *	@dev: device
9059  *	@proto_down: new value
9060  */
dev_change_proto_down(struct net_device * dev,bool proto_down)9061 int dev_change_proto_down(struct net_device *dev, bool proto_down)
9062 {
9063 	if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
9064 		return -EOPNOTSUPP;
9065 	if (!netif_device_present(dev))
9066 		return -ENODEV;
9067 	if (proto_down)
9068 		netif_carrier_off(dev);
9069 	else
9070 		netif_carrier_on(dev);
9071 	dev->proto_down = proto_down;
9072 	return 0;
9073 }
9074 
9075 /**
9076  *	dev_change_proto_down_reason - proto down reason
9077  *
9078  *	@dev: device
9079  *	@mask: proto down mask
9080  *	@value: proto down value
9081  */
dev_change_proto_down_reason(struct net_device * dev,unsigned long mask,u32 value)9082 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
9083 				  u32 value)
9084 {
9085 	int b;
9086 
9087 	if (!mask) {
9088 		dev->proto_down_reason = value;
9089 	} else {
9090 		for_each_set_bit(b, &mask, 32) {
9091 			if (value & (1 << b))
9092 				dev->proto_down_reason |= BIT(b);
9093 			else
9094 				dev->proto_down_reason &= ~BIT(b);
9095 		}
9096 	}
9097 }
9098 
9099 struct bpf_xdp_link {
9100 	struct bpf_link link;
9101 	struct net_device *dev; /* protected by rtnl_lock, no refcnt held */
9102 	int flags;
9103 };
9104 
dev_xdp_mode(struct net_device * dev,u32 flags)9105 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
9106 {
9107 	if (flags & XDP_FLAGS_HW_MODE)
9108 		return XDP_MODE_HW;
9109 	if (flags & XDP_FLAGS_DRV_MODE)
9110 		return XDP_MODE_DRV;
9111 	if (flags & XDP_FLAGS_SKB_MODE)
9112 		return XDP_MODE_SKB;
9113 	return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
9114 }
9115 
dev_xdp_bpf_op(struct net_device * dev,enum bpf_xdp_mode mode)9116 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
9117 {
9118 	switch (mode) {
9119 	case XDP_MODE_SKB:
9120 		return generic_xdp_install;
9121 	case XDP_MODE_DRV:
9122 	case XDP_MODE_HW:
9123 		return dev->netdev_ops->ndo_bpf;
9124 	default:
9125 		return NULL;
9126 	}
9127 }
9128 
dev_xdp_link(struct net_device * dev,enum bpf_xdp_mode mode)9129 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
9130 					 enum bpf_xdp_mode mode)
9131 {
9132 	return dev->xdp_state[mode].link;
9133 }
9134 
dev_xdp_prog(struct net_device * dev,enum bpf_xdp_mode mode)9135 static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
9136 				     enum bpf_xdp_mode mode)
9137 {
9138 	struct bpf_xdp_link *link = dev_xdp_link(dev, mode);
9139 
9140 	if (link)
9141 		return link->link.prog;
9142 	return dev->xdp_state[mode].prog;
9143 }
9144 
dev_xdp_prog_count(struct net_device * dev)9145 u8 dev_xdp_prog_count(struct net_device *dev)
9146 {
9147 	u8 count = 0;
9148 	int i;
9149 
9150 	for (i = 0; i < __MAX_XDP_MODE; i++)
9151 		if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
9152 			count++;
9153 	return count;
9154 }
9155 EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
9156 
dev_xdp_prog_id(struct net_device * dev,enum bpf_xdp_mode mode)9157 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
9158 {
9159 	struct bpf_prog *prog = dev_xdp_prog(dev, mode);
9160 
9161 	return prog ? prog->aux->id : 0;
9162 }
9163 
dev_xdp_set_link(struct net_device * dev,enum bpf_xdp_mode mode,struct bpf_xdp_link * link)9164 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
9165 			     struct bpf_xdp_link *link)
9166 {
9167 	dev->xdp_state[mode].link = link;
9168 	dev->xdp_state[mode].prog = NULL;
9169 }
9170 
dev_xdp_set_prog(struct net_device * dev,enum bpf_xdp_mode mode,struct bpf_prog * prog)9171 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
9172 			     struct bpf_prog *prog)
9173 {
9174 	dev->xdp_state[mode].link = NULL;
9175 	dev->xdp_state[mode].prog = prog;
9176 }
9177 
dev_xdp_install(struct net_device * dev,enum bpf_xdp_mode mode,bpf_op_t bpf_op,struct netlink_ext_ack * extack,u32 flags,struct bpf_prog * prog)9178 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
9179 			   bpf_op_t bpf_op, struct netlink_ext_ack *extack,
9180 			   u32 flags, struct bpf_prog *prog)
9181 {
9182 	struct netdev_bpf xdp;
9183 	int err;
9184 
9185 	memset(&xdp, 0, sizeof(xdp));
9186 	xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
9187 	xdp.extack = extack;
9188 	xdp.flags = flags;
9189 	xdp.prog = prog;
9190 
9191 	/* Drivers assume refcnt is already incremented (i.e, prog pointer is
9192 	 * "moved" into driver), so they don't increment it on their own, but
9193 	 * they do decrement refcnt when program is detached or replaced.
9194 	 * Given net_device also owns link/prog, we need to bump refcnt here
9195 	 * to prevent drivers from underflowing it.
9196 	 */
9197 	if (prog)
9198 		bpf_prog_inc(prog);
9199 	err = bpf_op(dev, &xdp);
9200 	if (err) {
9201 		if (prog)
9202 			bpf_prog_put(prog);
9203 		return err;
9204 	}
9205 
9206 	if (mode != XDP_MODE_HW)
9207 		bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
9208 
9209 	return 0;
9210 }
9211 
dev_xdp_uninstall(struct net_device * dev)9212 static void dev_xdp_uninstall(struct net_device *dev)
9213 {
9214 	struct bpf_xdp_link *link;
9215 	struct bpf_prog *prog;
9216 	enum bpf_xdp_mode mode;
9217 	bpf_op_t bpf_op;
9218 
9219 	ASSERT_RTNL();
9220 
9221 	for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) {
9222 		prog = dev_xdp_prog(dev, mode);
9223 		if (!prog)
9224 			continue;
9225 
9226 		bpf_op = dev_xdp_bpf_op(dev, mode);
9227 		if (!bpf_op)
9228 			continue;
9229 
9230 		WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9231 
9232 		/* auto-detach link from net device */
9233 		link = dev_xdp_link(dev, mode);
9234 		if (link)
9235 			link->dev = NULL;
9236 		else
9237 			bpf_prog_put(prog);
9238 
9239 		dev_xdp_set_link(dev, mode, NULL);
9240 	}
9241 }
9242 
dev_xdp_attach(struct net_device * dev,struct netlink_ext_ack * extack,struct bpf_xdp_link * link,struct bpf_prog * new_prog,struct bpf_prog * old_prog,u32 flags)9243 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
9244 			  struct bpf_xdp_link *link, struct bpf_prog *new_prog,
9245 			  struct bpf_prog *old_prog, u32 flags)
9246 {
9247 	unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
9248 	struct bpf_prog *cur_prog;
9249 	struct net_device *upper;
9250 	struct list_head *iter;
9251 	enum bpf_xdp_mode mode;
9252 	bpf_op_t bpf_op;
9253 	int err;
9254 
9255 	ASSERT_RTNL();
9256 
9257 	/* either link or prog attachment, never both */
9258 	if (link && (new_prog || old_prog))
9259 		return -EINVAL;
9260 	/* link supports only XDP mode flags */
9261 	if (link && (flags & ~XDP_FLAGS_MODES)) {
9262 		NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
9263 		return -EINVAL;
9264 	}
9265 	/* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9266 	if (num_modes > 1) {
9267 		NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
9268 		return -EINVAL;
9269 	}
9270 	/* avoid ambiguity if offload + drv/skb mode progs are both loaded */
9271 	if (!num_modes && dev_xdp_prog_count(dev) > 1) {
9272 		NL_SET_ERR_MSG(extack,
9273 			       "More than one program loaded, unset mode is ambiguous");
9274 		return -EINVAL;
9275 	}
9276 	/* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
9277 	if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
9278 		NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
9279 		return -EINVAL;
9280 	}
9281 
9282 	mode = dev_xdp_mode(dev, flags);
9283 	/* can't replace attached link */
9284 	if (dev_xdp_link(dev, mode)) {
9285 		NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
9286 		return -EBUSY;
9287 	}
9288 
9289 	/* don't allow if an upper device already has a program */
9290 	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
9291 		if (dev_xdp_prog_count(upper) > 0) {
9292 			NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program");
9293 			return -EEXIST;
9294 		}
9295 	}
9296 
9297 	cur_prog = dev_xdp_prog(dev, mode);
9298 	/* can't replace attached prog with link */
9299 	if (link && cur_prog) {
9300 		NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link");
9301 		return -EBUSY;
9302 	}
9303 	if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) {
9304 		NL_SET_ERR_MSG(extack, "Active program does not match expected");
9305 		return -EEXIST;
9306 	}
9307 
9308 	/* put effective new program into new_prog */
9309 	if (link)
9310 		new_prog = link->link.prog;
9311 
9312 	if (new_prog) {
9313 		bool offload = mode == XDP_MODE_HW;
9314 		enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
9315 					       ? XDP_MODE_DRV : XDP_MODE_SKB;
9316 
9317 		if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
9318 			NL_SET_ERR_MSG(extack, "XDP program already attached");
9319 			return -EBUSY;
9320 		}
9321 		if (!offload && dev_xdp_prog(dev, other_mode)) {
9322 			NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
9323 			return -EEXIST;
9324 		}
9325 		if (!offload && bpf_prog_is_offloaded(new_prog->aux)) {
9326 			NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported");
9327 			return -EINVAL;
9328 		}
9329 		if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) {
9330 			NL_SET_ERR_MSG(extack, "Program bound to different device");
9331 			return -EINVAL;
9332 		}
9333 		if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
9334 			NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
9335 			return -EINVAL;
9336 		}
9337 		if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
9338 			NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device");
9339 			return -EINVAL;
9340 		}
9341 	}
9342 
9343 	/* don't call drivers if the effective program didn't change */
9344 	if (new_prog != cur_prog) {
9345 		bpf_op = dev_xdp_bpf_op(dev, mode);
9346 		if (!bpf_op) {
9347 			NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode");
9348 			return -EOPNOTSUPP;
9349 		}
9350 
9351 		err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
9352 		if (err)
9353 			return err;
9354 	}
9355 
9356 	if (link)
9357 		dev_xdp_set_link(dev, mode, link);
9358 	else
9359 		dev_xdp_set_prog(dev, mode, new_prog);
9360 	if (cur_prog)
9361 		bpf_prog_put(cur_prog);
9362 
9363 	return 0;
9364 }
9365 
dev_xdp_attach_link(struct net_device * dev,struct netlink_ext_ack * extack,struct bpf_xdp_link * link)9366 static int dev_xdp_attach_link(struct net_device *dev,
9367 			       struct netlink_ext_ack *extack,
9368 			       struct bpf_xdp_link *link)
9369 {
9370 	return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
9371 }
9372 
dev_xdp_detach_link(struct net_device * dev,struct netlink_ext_ack * extack,struct bpf_xdp_link * link)9373 static int dev_xdp_detach_link(struct net_device *dev,
9374 			       struct netlink_ext_ack *extack,
9375 			       struct bpf_xdp_link *link)
9376 {
9377 	enum bpf_xdp_mode mode;
9378 	bpf_op_t bpf_op;
9379 
9380 	ASSERT_RTNL();
9381 
9382 	mode = dev_xdp_mode(dev, link->flags);
9383 	if (dev_xdp_link(dev, mode) != link)
9384 		return -EINVAL;
9385 
9386 	bpf_op = dev_xdp_bpf_op(dev, mode);
9387 	WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9388 	dev_xdp_set_link(dev, mode, NULL);
9389 	return 0;
9390 }
9391 
bpf_xdp_link_release(struct bpf_link * link)9392 static void bpf_xdp_link_release(struct bpf_link *link)
9393 {
9394 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9395 
9396 	rtnl_lock();
9397 
9398 	/* if racing with net_device's tear down, xdp_link->dev might be
9399 	 * already NULL, in which case link was already auto-detached
9400 	 */
9401 	if (xdp_link->dev) {
9402 		WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
9403 		xdp_link->dev = NULL;
9404 	}
9405 
9406 	rtnl_unlock();
9407 }
9408 
bpf_xdp_link_detach(struct bpf_link * link)9409 static int bpf_xdp_link_detach(struct bpf_link *link)
9410 {
9411 	bpf_xdp_link_release(link);
9412 	return 0;
9413 }
9414 
bpf_xdp_link_dealloc(struct bpf_link * link)9415 static void bpf_xdp_link_dealloc(struct bpf_link *link)
9416 {
9417 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9418 
9419 	kfree(xdp_link);
9420 }
9421 
bpf_xdp_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)9422 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
9423 				     struct seq_file *seq)
9424 {
9425 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9426 	u32 ifindex = 0;
9427 
9428 	rtnl_lock();
9429 	if (xdp_link->dev)
9430 		ifindex = xdp_link->dev->ifindex;
9431 	rtnl_unlock();
9432 
9433 	seq_printf(seq, "ifindex:\t%u\n", ifindex);
9434 }
9435 
bpf_xdp_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)9436 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
9437 				       struct bpf_link_info *info)
9438 {
9439 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9440 	u32 ifindex = 0;
9441 
9442 	rtnl_lock();
9443 	if (xdp_link->dev)
9444 		ifindex = xdp_link->dev->ifindex;
9445 	rtnl_unlock();
9446 
9447 	info->xdp.ifindex = ifindex;
9448 	return 0;
9449 }
9450 
bpf_xdp_link_update(struct bpf_link * link,struct bpf_prog * new_prog,struct bpf_prog * old_prog)9451 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
9452 			       struct bpf_prog *old_prog)
9453 {
9454 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9455 	enum bpf_xdp_mode mode;
9456 	bpf_op_t bpf_op;
9457 	int err = 0;
9458 
9459 	rtnl_lock();
9460 
9461 	/* link might have been auto-released already, so fail */
9462 	if (!xdp_link->dev) {
9463 		err = -ENOLINK;
9464 		goto out_unlock;
9465 	}
9466 
9467 	if (old_prog && link->prog != old_prog) {
9468 		err = -EPERM;
9469 		goto out_unlock;
9470 	}
9471 	old_prog = link->prog;
9472 	if (old_prog->type != new_prog->type ||
9473 	    old_prog->expected_attach_type != new_prog->expected_attach_type) {
9474 		err = -EINVAL;
9475 		goto out_unlock;
9476 	}
9477 
9478 	if (old_prog == new_prog) {
9479 		/* no-op, don't disturb drivers */
9480 		bpf_prog_put(new_prog);
9481 		goto out_unlock;
9482 	}
9483 
9484 	mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
9485 	bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
9486 	err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
9487 			      xdp_link->flags, new_prog);
9488 	if (err)
9489 		goto out_unlock;
9490 
9491 	old_prog = xchg(&link->prog, new_prog);
9492 	bpf_prog_put(old_prog);
9493 
9494 out_unlock:
9495 	rtnl_unlock();
9496 	return err;
9497 }
9498 
9499 static const struct bpf_link_ops bpf_xdp_link_lops = {
9500 	.release = bpf_xdp_link_release,
9501 	.dealloc = bpf_xdp_link_dealloc,
9502 	.detach = bpf_xdp_link_detach,
9503 	.show_fdinfo = bpf_xdp_link_show_fdinfo,
9504 	.fill_link_info = bpf_xdp_link_fill_link_info,
9505 	.update_prog = bpf_xdp_link_update,
9506 };
9507 
bpf_xdp_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)9508 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
9509 {
9510 	struct net *net = current->nsproxy->net_ns;
9511 	struct bpf_link_primer link_primer;
9512 	struct netlink_ext_ack extack = {};
9513 	struct bpf_xdp_link *link;
9514 	struct net_device *dev;
9515 	int err, fd;
9516 
9517 	rtnl_lock();
9518 	dev = dev_get_by_index(net, attr->link_create.target_ifindex);
9519 	if (!dev) {
9520 		rtnl_unlock();
9521 		return -EINVAL;
9522 	}
9523 
9524 	link = kzalloc(sizeof(*link), GFP_USER);
9525 	if (!link) {
9526 		err = -ENOMEM;
9527 		goto unlock;
9528 	}
9529 
9530 	bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
9531 	link->dev = dev;
9532 	link->flags = attr->link_create.flags;
9533 
9534 	err = bpf_link_prime(&link->link, &link_primer);
9535 	if (err) {
9536 		kfree(link);
9537 		goto unlock;
9538 	}
9539 
9540 	err = dev_xdp_attach_link(dev, &extack, link);
9541 	rtnl_unlock();
9542 
9543 	if (err) {
9544 		link->dev = NULL;
9545 		bpf_link_cleanup(&link_primer);
9546 		trace_bpf_xdp_link_attach_failed(extack._msg);
9547 		goto out_put_dev;
9548 	}
9549 
9550 	fd = bpf_link_settle(&link_primer);
9551 	/* link itself doesn't hold dev's refcnt to not complicate shutdown */
9552 	dev_put(dev);
9553 	return fd;
9554 
9555 unlock:
9556 	rtnl_unlock();
9557 
9558 out_put_dev:
9559 	dev_put(dev);
9560 	return err;
9561 }
9562 
9563 /**
9564  *	dev_change_xdp_fd - set or clear a bpf program for a device rx path
9565  *	@dev: device
9566  *	@extack: netlink extended ack
9567  *	@fd: new program fd or negative value to clear
9568  *	@expected_fd: old program fd that userspace expects to replace or clear
9569  *	@flags: xdp-related flags
9570  *
9571  *	Set or clear a bpf program for a device
9572  */
dev_change_xdp_fd(struct net_device * dev,struct netlink_ext_ack * extack,int fd,int expected_fd,u32 flags)9573 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
9574 		      int fd, int expected_fd, u32 flags)
9575 {
9576 	enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
9577 	struct bpf_prog *new_prog = NULL, *old_prog = NULL;
9578 	int err;
9579 
9580 	ASSERT_RTNL();
9581 
9582 	if (fd >= 0) {
9583 		new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
9584 						 mode != XDP_MODE_SKB);
9585 		if (IS_ERR(new_prog))
9586 			return PTR_ERR(new_prog);
9587 	}
9588 
9589 	if (expected_fd >= 0) {
9590 		old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP,
9591 						 mode != XDP_MODE_SKB);
9592 		if (IS_ERR(old_prog)) {
9593 			err = PTR_ERR(old_prog);
9594 			old_prog = NULL;
9595 			goto err_out;
9596 		}
9597 	}
9598 
9599 	err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
9600 
9601 err_out:
9602 	if (err && new_prog)
9603 		bpf_prog_put(new_prog);
9604 	if (old_prog)
9605 		bpf_prog_put(old_prog);
9606 	return err;
9607 }
9608 
9609 /**
9610  * dev_index_reserve() - allocate an ifindex in a namespace
9611  * @net: the applicable net namespace
9612  * @ifindex: requested ifindex, pass %0 to get one allocated
9613  *
9614  * Allocate a ifindex for a new device. Caller must either use the ifindex
9615  * to store the device (via list_netdevice()) or call dev_index_release()
9616  * to give the index up.
9617  *
9618  * Return: a suitable unique value for a new device interface number or -errno.
9619  */
dev_index_reserve(struct net * net,u32 ifindex)9620 static int dev_index_reserve(struct net *net, u32 ifindex)
9621 {
9622 	int err;
9623 
9624 	if (ifindex > INT_MAX) {
9625 		DEBUG_NET_WARN_ON_ONCE(1);
9626 		return -EINVAL;
9627 	}
9628 
9629 	if (!ifindex)
9630 		err = xa_alloc_cyclic(&net->dev_by_index, &ifindex, NULL,
9631 				      xa_limit_31b, &net->ifindex, GFP_KERNEL);
9632 	else
9633 		err = xa_insert(&net->dev_by_index, ifindex, NULL, GFP_KERNEL);
9634 	if (err < 0)
9635 		return err;
9636 
9637 	return ifindex;
9638 }
9639 
dev_index_release(struct net * net,int ifindex)9640 static void dev_index_release(struct net *net, int ifindex)
9641 {
9642 	/* Expect only unused indexes, unlist_netdevice() removes the used */
9643 	WARN_ON(xa_erase(&net->dev_by_index, ifindex));
9644 }
9645 
9646 /* Delayed registration/unregisteration */
9647 LIST_HEAD(net_todo_list);
9648 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
9649 
net_set_todo(struct net_device * dev)9650 static void net_set_todo(struct net_device *dev)
9651 {
9652 	list_add_tail(&dev->todo_list, &net_todo_list);
9653 	atomic_inc(&dev_net(dev)->dev_unreg_count);
9654 }
9655 
netdev_sync_upper_features(struct net_device * lower,struct net_device * upper,netdev_features_t features)9656 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
9657 	struct net_device *upper, netdev_features_t features)
9658 {
9659 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9660 	netdev_features_t feature;
9661 	int feature_bit;
9662 
9663 	for_each_netdev_feature(upper_disables, feature_bit) {
9664 		feature = __NETIF_F_BIT(feature_bit);
9665 		if (!(upper->wanted_features & feature)
9666 		    && (features & feature)) {
9667 			netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9668 				   &feature, upper->name);
9669 			features &= ~feature;
9670 		}
9671 	}
9672 
9673 	return features;
9674 }
9675 
netdev_sync_lower_features(struct net_device * upper,struct net_device * lower,netdev_features_t features)9676 static void netdev_sync_lower_features(struct net_device *upper,
9677 	struct net_device *lower, netdev_features_t features)
9678 {
9679 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9680 	netdev_features_t feature;
9681 	int feature_bit;
9682 
9683 	for_each_netdev_feature(upper_disables, feature_bit) {
9684 		feature = __NETIF_F_BIT(feature_bit);
9685 		if (!(features & feature) && (lower->features & feature)) {
9686 			netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9687 				   &feature, lower->name);
9688 			lower->wanted_features &= ~feature;
9689 			__netdev_update_features(lower);
9690 
9691 			if (unlikely(lower->features & feature))
9692 				netdev_WARN(upper, "failed to disable %pNF on %s!\n",
9693 					    &feature, lower->name);
9694 			else
9695 				netdev_features_change(lower);
9696 		}
9697 	}
9698 }
9699 
netdev_fix_features(struct net_device * dev,netdev_features_t features)9700 static netdev_features_t netdev_fix_features(struct net_device *dev,
9701 	netdev_features_t features)
9702 {
9703 	/* Fix illegal checksum combinations */
9704 	if ((features & NETIF_F_HW_CSUM) &&
9705 	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
9706 		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
9707 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
9708 	}
9709 
9710 	/* TSO requires that SG is present as well. */
9711 	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
9712 		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
9713 		features &= ~NETIF_F_ALL_TSO;
9714 	}
9715 
9716 	if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
9717 					!(features & NETIF_F_IP_CSUM)) {
9718 		netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9719 		features &= ~NETIF_F_TSO;
9720 		features &= ~NETIF_F_TSO_ECN;
9721 	}
9722 
9723 	if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
9724 					 !(features & NETIF_F_IPV6_CSUM)) {
9725 		netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9726 		features &= ~NETIF_F_TSO6;
9727 	}
9728 
9729 	/* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9730 	if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
9731 		features &= ~NETIF_F_TSO_MANGLEID;
9732 
9733 	/* TSO ECN requires that TSO is present as well. */
9734 	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
9735 		features &= ~NETIF_F_TSO_ECN;
9736 
9737 	/* Software GSO depends on SG. */
9738 	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
9739 		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
9740 		features &= ~NETIF_F_GSO;
9741 	}
9742 
9743 	/* GSO partial features require GSO partial be set */
9744 	if ((features & dev->gso_partial_features) &&
9745 	    !(features & NETIF_F_GSO_PARTIAL)) {
9746 		netdev_dbg(dev,
9747 			   "Dropping partially supported GSO features since no GSO partial.\n");
9748 		features &= ~dev->gso_partial_features;
9749 	}
9750 
9751 	if (!(features & NETIF_F_RXCSUM)) {
9752 		/* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9753 		 * successfully merged by hardware must also have the
9754 		 * checksum verified by hardware.  If the user does not
9755 		 * want to enable RXCSUM, logically, we should disable GRO_HW.
9756 		 */
9757 		if (features & NETIF_F_GRO_HW) {
9758 			netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9759 			features &= ~NETIF_F_GRO_HW;
9760 		}
9761 	}
9762 
9763 	/* LRO/HW-GRO features cannot be combined with RX-FCS */
9764 	if (features & NETIF_F_RXFCS) {
9765 		if (features & NETIF_F_LRO) {
9766 			netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9767 			features &= ~NETIF_F_LRO;
9768 		}
9769 
9770 		if (features & NETIF_F_GRO_HW) {
9771 			netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9772 			features &= ~NETIF_F_GRO_HW;
9773 		}
9774 	}
9775 
9776 	if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) {
9777 		netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n");
9778 		features &= ~NETIF_F_LRO;
9779 	}
9780 
9781 	if (features & NETIF_F_HW_TLS_TX) {
9782 		bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
9783 			(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
9784 		bool hw_csum = features & NETIF_F_HW_CSUM;
9785 
9786 		if (!ip_csum && !hw_csum) {
9787 			netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
9788 			features &= ~NETIF_F_HW_TLS_TX;
9789 		}
9790 	}
9791 
9792 	if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
9793 		netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
9794 		features &= ~NETIF_F_HW_TLS_RX;
9795 	}
9796 
9797 	return features;
9798 }
9799 
__netdev_update_features(struct net_device * dev)9800 int __netdev_update_features(struct net_device *dev)
9801 {
9802 	struct net_device *upper, *lower;
9803 	netdev_features_t features;
9804 	struct list_head *iter;
9805 	int err = -1;
9806 
9807 	ASSERT_RTNL();
9808 
9809 	features = netdev_get_wanted_features(dev);
9810 
9811 	if (dev->netdev_ops->ndo_fix_features)
9812 		features = dev->netdev_ops->ndo_fix_features(dev, features);
9813 
9814 	/* driver might be less strict about feature dependencies */
9815 	features = netdev_fix_features(dev, features);
9816 
9817 	/* some features can't be enabled if they're off on an upper device */
9818 	netdev_for_each_upper_dev_rcu(dev, upper, iter)
9819 		features = netdev_sync_upper_features(dev, upper, features);
9820 
9821 	if (dev->features == features)
9822 		goto sync_lower;
9823 
9824 	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9825 		&dev->features, &features);
9826 
9827 	if (dev->netdev_ops->ndo_set_features)
9828 		err = dev->netdev_ops->ndo_set_features(dev, features);
9829 	else
9830 		err = 0;
9831 
9832 	if (unlikely(err < 0)) {
9833 		netdev_err(dev,
9834 			"set_features() failed (%d); wanted %pNF, left %pNF\n",
9835 			err, &features, &dev->features);
9836 		/* return non-0 since some features might have changed and
9837 		 * it's better to fire a spurious notification than miss it
9838 		 */
9839 		return -1;
9840 	}
9841 
9842 sync_lower:
9843 	/* some features must be disabled on lower devices when disabled
9844 	 * on an upper device (think: bonding master or bridge)
9845 	 */
9846 	netdev_for_each_lower_dev(dev, lower, iter)
9847 		netdev_sync_lower_features(dev, lower, features);
9848 
9849 	if (!err) {
9850 		netdev_features_t diff = features ^ dev->features;
9851 
9852 		if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
9853 			/* udp_tunnel_{get,drop}_rx_info both need
9854 			 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9855 			 * device, or they won't do anything.
9856 			 * Thus we need to update dev->features
9857 			 * *before* calling udp_tunnel_get_rx_info,
9858 			 * but *after* calling udp_tunnel_drop_rx_info.
9859 			 */
9860 			if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
9861 				dev->features = features;
9862 				udp_tunnel_get_rx_info(dev);
9863 			} else {
9864 				udp_tunnel_drop_rx_info(dev);
9865 			}
9866 		}
9867 
9868 		if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
9869 			if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
9870 				dev->features = features;
9871 				err |= vlan_get_rx_ctag_filter_info(dev);
9872 			} else {
9873 				vlan_drop_rx_ctag_filter_info(dev);
9874 			}
9875 		}
9876 
9877 		if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
9878 			if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
9879 				dev->features = features;
9880 				err |= vlan_get_rx_stag_filter_info(dev);
9881 			} else {
9882 				vlan_drop_rx_stag_filter_info(dev);
9883 			}
9884 		}
9885 
9886 		dev->features = features;
9887 	}
9888 
9889 	return err < 0 ? 0 : 1;
9890 }
9891 
9892 /**
9893  *	netdev_update_features - recalculate device features
9894  *	@dev: the device to check
9895  *
9896  *	Recalculate dev->features set and send notifications if it
9897  *	has changed. Should be called after driver or hardware dependent
9898  *	conditions might have changed that influence the features.
9899  */
netdev_update_features(struct net_device * dev)9900 void netdev_update_features(struct net_device *dev)
9901 {
9902 	if (__netdev_update_features(dev))
9903 		netdev_features_change(dev);
9904 }
9905 EXPORT_SYMBOL(netdev_update_features);
9906 
9907 /**
9908  *	netdev_change_features - recalculate device features
9909  *	@dev: the device to check
9910  *
9911  *	Recalculate dev->features set and send notifications even
9912  *	if they have not changed. Should be called instead of
9913  *	netdev_update_features() if also dev->vlan_features might
9914  *	have changed to allow the changes to be propagated to stacked
9915  *	VLAN devices.
9916  */
netdev_change_features(struct net_device * dev)9917 void netdev_change_features(struct net_device *dev)
9918 {
9919 	__netdev_update_features(dev);
9920 	netdev_features_change(dev);
9921 }
9922 EXPORT_SYMBOL(netdev_change_features);
9923 
9924 /**
9925  *	netif_stacked_transfer_operstate -	transfer operstate
9926  *	@rootdev: the root or lower level device to transfer state from
9927  *	@dev: the device to transfer operstate to
9928  *
9929  *	Transfer operational state from root to device. This is normally
9930  *	called when a stacking relationship exists between the root
9931  *	device and the device(a leaf device).
9932  */
netif_stacked_transfer_operstate(const struct net_device * rootdev,struct net_device * dev)9933 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
9934 					struct net_device *dev)
9935 {
9936 	if (rootdev->operstate == IF_OPER_DORMANT)
9937 		netif_dormant_on(dev);
9938 	else
9939 		netif_dormant_off(dev);
9940 
9941 	if (rootdev->operstate == IF_OPER_TESTING)
9942 		netif_testing_on(dev);
9943 	else
9944 		netif_testing_off(dev);
9945 
9946 	if (netif_carrier_ok(rootdev))
9947 		netif_carrier_on(dev);
9948 	else
9949 		netif_carrier_off(dev);
9950 }
9951 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
9952 
netif_alloc_rx_queues(struct net_device * dev)9953 static int netif_alloc_rx_queues(struct net_device *dev)
9954 {
9955 	unsigned int i, count = dev->num_rx_queues;
9956 	struct netdev_rx_queue *rx;
9957 	size_t sz = count * sizeof(*rx);
9958 	int err = 0;
9959 
9960 	BUG_ON(count < 1);
9961 
9962 	rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
9963 	if (!rx)
9964 		return -ENOMEM;
9965 
9966 	dev->_rx = rx;
9967 
9968 	for (i = 0; i < count; i++) {
9969 		rx[i].dev = dev;
9970 
9971 		/* XDP RX-queue setup */
9972 		err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
9973 		if (err < 0)
9974 			goto err_rxq_info;
9975 	}
9976 	return 0;
9977 
9978 err_rxq_info:
9979 	/* Rollback successful reg's and free other resources */
9980 	while (i--)
9981 		xdp_rxq_info_unreg(&rx[i].xdp_rxq);
9982 	kvfree(dev->_rx);
9983 	dev->_rx = NULL;
9984 	return err;
9985 }
9986 
netif_free_rx_queues(struct net_device * dev)9987 static void netif_free_rx_queues(struct net_device *dev)
9988 {
9989 	unsigned int i, count = dev->num_rx_queues;
9990 
9991 	/* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
9992 	if (!dev->_rx)
9993 		return;
9994 
9995 	for (i = 0; i < count; i++)
9996 		xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
9997 
9998 	kvfree(dev->_rx);
9999 }
10000 
netdev_init_one_queue(struct net_device * dev,struct netdev_queue * queue,void * _unused)10001 static void netdev_init_one_queue(struct net_device *dev,
10002 				  struct netdev_queue *queue, void *_unused)
10003 {
10004 	/* Initialize queue lock */
10005 	spin_lock_init(&queue->_xmit_lock);
10006 	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
10007 	queue->xmit_lock_owner = -1;
10008 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
10009 	queue->dev = dev;
10010 #ifdef CONFIG_BQL
10011 	dql_init(&queue->dql, HZ);
10012 #endif
10013 }
10014 
netif_free_tx_queues(struct net_device * dev)10015 static void netif_free_tx_queues(struct net_device *dev)
10016 {
10017 	kvfree(dev->_tx);
10018 }
10019 
netif_alloc_netdev_queues(struct net_device * dev)10020 static int netif_alloc_netdev_queues(struct net_device *dev)
10021 {
10022 	unsigned int count = dev->num_tx_queues;
10023 	struct netdev_queue *tx;
10024 	size_t sz = count * sizeof(*tx);
10025 
10026 	if (count < 1 || count > 0xffff)
10027 		return -EINVAL;
10028 
10029 	tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10030 	if (!tx)
10031 		return -ENOMEM;
10032 
10033 	dev->_tx = tx;
10034 
10035 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
10036 	spin_lock_init(&dev->tx_global_lock);
10037 
10038 	return 0;
10039 }
10040 
netif_tx_stop_all_queues(struct net_device * dev)10041 void netif_tx_stop_all_queues(struct net_device *dev)
10042 {
10043 	unsigned int i;
10044 
10045 	for (i = 0; i < dev->num_tx_queues; i++) {
10046 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
10047 
10048 		netif_tx_stop_queue(txq);
10049 	}
10050 }
10051 EXPORT_SYMBOL(netif_tx_stop_all_queues);
10052 
10053 /**
10054  * register_netdevice() - register a network device
10055  * @dev: device to register
10056  *
10057  * Take a prepared network device structure and make it externally accessible.
10058  * A %NETDEV_REGISTER message is sent to the netdev notifier chain.
10059  * Callers must hold the rtnl lock - you may want register_netdev()
10060  * instead of this.
10061  */
register_netdevice(struct net_device * dev)10062 int register_netdevice(struct net_device *dev)
10063 {
10064 	int ret;
10065 	struct net *net = dev_net(dev);
10066 
10067 	BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
10068 		     NETDEV_FEATURE_COUNT);
10069 	BUG_ON(dev_boot_phase);
10070 	ASSERT_RTNL();
10071 
10072 	might_sleep();
10073 
10074 	/* When net_device's are persistent, this will be fatal. */
10075 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
10076 	BUG_ON(!net);
10077 
10078 	ret = ethtool_check_ops(dev->ethtool_ops);
10079 	if (ret)
10080 		return ret;
10081 
10082 	spin_lock_init(&dev->addr_list_lock);
10083 	netdev_set_addr_lockdep_class(dev);
10084 
10085 	ret = dev_get_valid_name(net, dev, dev->name);
10086 	if (ret < 0)
10087 		goto out;
10088 
10089 	ret = -ENOMEM;
10090 	dev->name_node = netdev_name_node_head_alloc(dev);
10091 	if (!dev->name_node)
10092 		goto out;
10093 
10094 	/* Init, if this function is available */
10095 	if (dev->netdev_ops->ndo_init) {
10096 		ret = dev->netdev_ops->ndo_init(dev);
10097 		if (ret) {
10098 			if (ret > 0)
10099 				ret = -EIO;
10100 			goto err_free_name;
10101 		}
10102 	}
10103 
10104 	if (((dev->hw_features | dev->features) &
10105 	     NETIF_F_HW_VLAN_CTAG_FILTER) &&
10106 	    (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
10107 	     !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
10108 		netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
10109 		ret = -EINVAL;
10110 		goto err_uninit;
10111 	}
10112 
10113 	ret = dev_index_reserve(net, dev->ifindex);
10114 	if (ret < 0)
10115 		goto err_uninit;
10116 	dev->ifindex = ret;
10117 
10118 	/* Transfer changeable features to wanted_features and enable
10119 	 * software offloads (GSO and GRO).
10120 	 */
10121 	dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
10122 	dev->features |= NETIF_F_SOFT_FEATURES;
10123 
10124 	if (dev->udp_tunnel_nic_info) {
10125 		dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10126 		dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10127 	}
10128 
10129 	dev->wanted_features = dev->features & dev->hw_features;
10130 
10131 	if (!(dev->flags & IFF_LOOPBACK))
10132 		dev->hw_features |= NETIF_F_NOCACHE_COPY;
10133 
10134 	/* If IPv4 TCP segmentation offload is supported we should also
10135 	 * allow the device to enable segmenting the frame with the option
10136 	 * of ignoring a static IP ID value.  This doesn't enable the
10137 	 * feature itself but allows the user to enable it later.
10138 	 */
10139 	if (dev->hw_features & NETIF_F_TSO)
10140 		dev->hw_features |= NETIF_F_TSO_MANGLEID;
10141 	if (dev->vlan_features & NETIF_F_TSO)
10142 		dev->vlan_features |= NETIF_F_TSO_MANGLEID;
10143 	if (dev->mpls_features & NETIF_F_TSO)
10144 		dev->mpls_features |= NETIF_F_TSO_MANGLEID;
10145 	if (dev->hw_enc_features & NETIF_F_TSO)
10146 		dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
10147 
10148 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
10149 	 */
10150 	dev->vlan_features |= NETIF_F_HIGHDMA;
10151 
10152 	/* Make NETIF_F_SG inheritable to tunnel devices.
10153 	 */
10154 	dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
10155 
10156 	/* Make NETIF_F_SG inheritable to MPLS.
10157 	 */
10158 	dev->mpls_features |= NETIF_F_SG;
10159 
10160 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
10161 	ret = notifier_to_errno(ret);
10162 	if (ret)
10163 		goto err_ifindex_release;
10164 
10165 	ret = netdev_register_kobject(dev);
10166 	write_lock(&dev_base_lock);
10167 	dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED;
10168 	write_unlock(&dev_base_lock);
10169 	if (ret)
10170 		goto err_uninit_notify;
10171 
10172 	__netdev_update_features(dev);
10173 
10174 	/*
10175 	 *	Default initial state at registry is that the
10176 	 *	device is present.
10177 	 */
10178 
10179 	set_bit(__LINK_STATE_PRESENT, &dev->state);
10180 
10181 	linkwatch_init_dev(dev);
10182 
10183 	dev_init_scheduler(dev);
10184 
10185 	netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL);
10186 	list_netdevice(dev);
10187 
10188 	add_device_randomness(dev->dev_addr, dev->addr_len);
10189 
10190 	/* If the device has permanent device address, driver should
10191 	 * set dev_addr and also addr_assign_type should be set to
10192 	 * NET_ADDR_PERM (default value).
10193 	 */
10194 	if (dev->addr_assign_type == NET_ADDR_PERM)
10195 		memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10196 
10197 	/* Notify protocols, that a new device appeared. */
10198 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
10199 	ret = notifier_to_errno(ret);
10200 	if (ret) {
10201 		/* Expect explicit free_netdev() on failure */
10202 		dev->needs_free_netdev = false;
10203 		unregister_netdevice_queue(dev, NULL);
10204 		goto out;
10205 	}
10206 	/*
10207 	 *	Prevent userspace races by waiting until the network
10208 	 *	device is fully setup before sending notifications.
10209 	 */
10210 	if (!dev->rtnl_link_ops ||
10211 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10212 		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
10213 
10214 out:
10215 	return ret;
10216 
10217 err_uninit_notify:
10218 	call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
10219 err_ifindex_release:
10220 	dev_index_release(net, dev->ifindex);
10221 err_uninit:
10222 	if (dev->netdev_ops->ndo_uninit)
10223 		dev->netdev_ops->ndo_uninit(dev);
10224 	if (dev->priv_destructor)
10225 		dev->priv_destructor(dev);
10226 err_free_name:
10227 	netdev_name_node_free(dev->name_node);
10228 	goto out;
10229 }
10230 EXPORT_SYMBOL(register_netdevice);
10231 
10232 /**
10233  *	init_dummy_netdev	- init a dummy network device for NAPI
10234  *	@dev: device to init
10235  *
10236  *	This takes a network device structure and initialize the minimum
10237  *	amount of fields so it can be used to schedule NAPI polls without
10238  *	registering a full blown interface. This is to be used by drivers
10239  *	that need to tie several hardware interfaces to a single NAPI
10240  *	poll scheduler due to HW limitations.
10241  */
init_dummy_netdev(struct net_device * dev)10242 int init_dummy_netdev(struct net_device *dev)
10243 {
10244 	/* Clear everything. Note we don't initialize spinlocks
10245 	 * are they aren't supposed to be taken by any of the
10246 	 * NAPI code and this dummy netdev is supposed to be
10247 	 * only ever used for NAPI polls
10248 	 */
10249 	memset(dev, 0, sizeof(struct net_device));
10250 
10251 	/* make sure we BUG if trying to hit standard
10252 	 * register/unregister code path
10253 	 */
10254 	dev->reg_state = NETREG_DUMMY;
10255 
10256 	/* NAPI wants this */
10257 	INIT_LIST_HEAD(&dev->napi_list);
10258 
10259 	/* a dummy interface is started by default */
10260 	set_bit(__LINK_STATE_PRESENT, &dev->state);
10261 	set_bit(__LINK_STATE_START, &dev->state);
10262 
10263 	/* napi_busy_loop stats accounting wants this */
10264 	dev_net_set(dev, &init_net);
10265 
10266 	/* Note : We dont allocate pcpu_refcnt for dummy devices,
10267 	 * because users of this 'device' dont need to change
10268 	 * its refcount.
10269 	 */
10270 
10271 	return 0;
10272 }
10273 EXPORT_SYMBOL_GPL(init_dummy_netdev);
10274 
10275 
10276 /**
10277  *	register_netdev	- register a network device
10278  *	@dev: device to register
10279  *
10280  *	Take a completed network device structure and add it to the kernel
10281  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10282  *	chain. 0 is returned on success. A negative errno code is returned
10283  *	on a failure to set up the device, or if the name is a duplicate.
10284  *
10285  *	This is a wrapper around register_netdevice that takes the rtnl semaphore
10286  *	and expands the device name if you passed a format string to
10287  *	alloc_netdev.
10288  */
register_netdev(struct net_device * dev)10289 int register_netdev(struct net_device *dev)
10290 {
10291 	int err;
10292 
10293 	if (rtnl_lock_killable())
10294 		return -EINTR;
10295 	err = register_netdevice(dev);
10296 	rtnl_unlock();
10297 	return err;
10298 }
10299 EXPORT_SYMBOL(register_netdev);
10300 
netdev_refcnt_read(const struct net_device * dev)10301 int netdev_refcnt_read(const struct net_device *dev)
10302 {
10303 #ifdef CONFIG_PCPU_DEV_REFCNT
10304 	int i, refcnt = 0;
10305 
10306 	for_each_possible_cpu(i)
10307 		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
10308 	return refcnt;
10309 #else
10310 	return refcount_read(&dev->dev_refcnt);
10311 #endif
10312 }
10313 EXPORT_SYMBOL(netdev_refcnt_read);
10314 
10315 int netdev_unregister_timeout_secs __read_mostly = 10;
10316 
10317 #define WAIT_REFS_MIN_MSECS 1
10318 #define WAIT_REFS_MAX_MSECS 250
10319 /**
10320  * netdev_wait_allrefs_any - wait until all references are gone.
10321  * @list: list of net_devices to wait on
10322  *
10323  * This is called when unregistering network devices.
10324  *
10325  * Any protocol or device that holds a reference should register
10326  * for netdevice notification, and cleanup and put back the
10327  * reference if they receive an UNREGISTER event.
10328  * We can get stuck here if buggy protocols don't correctly
10329  * call dev_put.
10330  */
netdev_wait_allrefs_any(struct list_head * list)10331 static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
10332 {
10333 	unsigned long rebroadcast_time, warning_time;
10334 	struct net_device *dev;
10335 	int wait = 0;
10336 
10337 	rebroadcast_time = warning_time = jiffies;
10338 
10339 	list_for_each_entry(dev, list, todo_list)
10340 		if (netdev_refcnt_read(dev) == 1)
10341 			return dev;
10342 
10343 	while (true) {
10344 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
10345 			rtnl_lock();
10346 
10347 			/* Rebroadcast unregister notification */
10348 			list_for_each_entry(dev, list, todo_list)
10349 				call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10350 
10351 			__rtnl_unlock();
10352 			rcu_barrier();
10353 			rtnl_lock();
10354 
10355 			list_for_each_entry(dev, list, todo_list)
10356 				if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
10357 					     &dev->state)) {
10358 					/* We must not have linkwatch events
10359 					 * pending on unregister. If this
10360 					 * happens, we simply run the queue
10361 					 * unscheduled, resulting in a noop
10362 					 * for this device.
10363 					 */
10364 					linkwatch_run_queue();
10365 					break;
10366 				}
10367 
10368 			__rtnl_unlock();
10369 
10370 			rebroadcast_time = jiffies;
10371 		}
10372 
10373 		if (!wait) {
10374 			rcu_barrier();
10375 			wait = WAIT_REFS_MIN_MSECS;
10376 		} else {
10377 			msleep(wait);
10378 			wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
10379 		}
10380 
10381 		list_for_each_entry(dev, list, todo_list)
10382 			if (netdev_refcnt_read(dev) == 1)
10383 				return dev;
10384 
10385 		if (time_after(jiffies, warning_time +
10386 			       READ_ONCE(netdev_unregister_timeout_secs) * HZ)) {
10387 			list_for_each_entry(dev, list, todo_list) {
10388 				pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
10389 					 dev->name, netdev_refcnt_read(dev));
10390 				ref_tracker_dir_print(&dev->refcnt_tracker, 10);
10391 			}
10392 
10393 			warning_time = jiffies;
10394 		}
10395 	}
10396 }
10397 
10398 /* The sequence is:
10399  *
10400  *	rtnl_lock();
10401  *	...
10402  *	register_netdevice(x1);
10403  *	register_netdevice(x2);
10404  *	...
10405  *	unregister_netdevice(y1);
10406  *	unregister_netdevice(y2);
10407  *      ...
10408  *	rtnl_unlock();
10409  *	free_netdev(y1);
10410  *	free_netdev(y2);
10411  *
10412  * We are invoked by rtnl_unlock().
10413  * This allows us to deal with problems:
10414  * 1) We can delete sysfs objects which invoke hotplug
10415  *    without deadlocking with linkwatch via keventd.
10416  * 2) Since we run with the RTNL semaphore not held, we can sleep
10417  *    safely in order to wait for the netdev refcnt to drop to zero.
10418  *
10419  * We must not return until all unregister events added during
10420  * the interval the lock was held have been completed.
10421  */
netdev_run_todo(void)10422 void netdev_run_todo(void)
10423 {
10424 	struct net_device *dev, *tmp;
10425 	struct list_head list;
10426 #ifdef CONFIG_LOCKDEP
10427 	struct list_head unlink_list;
10428 
10429 	list_replace_init(&net_unlink_list, &unlink_list);
10430 
10431 	while (!list_empty(&unlink_list)) {
10432 		struct net_device *dev = list_first_entry(&unlink_list,
10433 							  struct net_device,
10434 							  unlink_list);
10435 		list_del_init(&dev->unlink_list);
10436 		dev->nested_level = dev->lower_level - 1;
10437 	}
10438 #endif
10439 
10440 	/* Snapshot list, allow later requests */
10441 	list_replace_init(&net_todo_list, &list);
10442 
10443 	__rtnl_unlock();
10444 
10445 	/* Wait for rcu callbacks to finish before next phase */
10446 	if (!list_empty(&list))
10447 		rcu_barrier();
10448 
10449 	list_for_each_entry_safe(dev, tmp, &list, todo_list) {
10450 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
10451 			netdev_WARN(dev, "run_todo but not unregistering\n");
10452 			list_del(&dev->todo_list);
10453 			continue;
10454 		}
10455 
10456 		write_lock(&dev_base_lock);
10457 		dev->reg_state = NETREG_UNREGISTERED;
10458 		write_unlock(&dev_base_lock);
10459 		linkwatch_forget_dev(dev);
10460 	}
10461 
10462 	while (!list_empty(&list)) {
10463 		dev = netdev_wait_allrefs_any(&list);
10464 		list_del(&dev->todo_list);
10465 
10466 		/* paranoia */
10467 		BUG_ON(netdev_refcnt_read(dev) != 1);
10468 		BUG_ON(!list_empty(&dev->ptype_all));
10469 		BUG_ON(!list_empty(&dev->ptype_specific));
10470 		WARN_ON(rcu_access_pointer(dev->ip_ptr));
10471 		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
10472 
10473 		if (dev->priv_destructor)
10474 			dev->priv_destructor(dev);
10475 		if (dev->needs_free_netdev)
10476 			free_netdev(dev);
10477 
10478 		if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count))
10479 			wake_up(&netdev_unregistering_wq);
10480 
10481 		/* Free network device */
10482 		kobject_put(&dev->dev.kobj);
10483 	}
10484 }
10485 
10486 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
10487  * all the same fields in the same order as net_device_stats, with only
10488  * the type differing, but rtnl_link_stats64 may have additional fields
10489  * at the end for newer counters.
10490  */
netdev_stats_to_stats64(struct rtnl_link_stats64 * stats64,const struct net_device_stats * netdev_stats)10491 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
10492 			     const struct net_device_stats *netdev_stats)
10493 {
10494 	size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
10495 	const atomic_long_t *src = (atomic_long_t *)netdev_stats;
10496 	u64 *dst = (u64 *)stats64;
10497 
10498 	BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
10499 	for (i = 0; i < n; i++)
10500 		dst[i] = (unsigned long)atomic_long_read(&src[i]);
10501 	/* zero out counters that only exist in rtnl_link_stats64 */
10502 	memset((char *)stats64 + n * sizeof(u64), 0,
10503 	       sizeof(*stats64) - n * sizeof(u64));
10504 }
10505 EXPORT_SYMBOL(netdev_stats_to_stats64);
10506 
netdev_core_stats_alloc(struct net_device * dev)10507 struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev)
10508 {
10509 	struct net_device_core_stats __percpu *p;
10510 
10511 	p = alloc_percpu_gfp(struct net_device_core_stats,
10512 			     GFP_ATOMIC | __GFP_NOWARN);
10513 
10514 	if (p && cmpxchg(&dev->core_stats, NULL, p))
10515 		free_percpu(p);
10516 
10517 	/* This READ_ONCE() pairs with the cmpxchg() above */
10518 	return READ_ONCE(dev->core_stats);
10519 }
10520 EXPORT_SYMBOL(netdev_core_stats_alloc);
10521 
10522 /**
10523  *	dev_get_stats	- get network device statistics
10524  *	@dev: device to get statistics from
10525  *	@storage: place to store stats
10526  *
10527  *	Get network statistics from device. Return @storage.
10528  *	The device driver may provide its own method by setting
10529  *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10530  *	otherwise the internal statistics structure is used.
10531  */
dev_get_stats(struct net_device * dev,struct rtnl_link_stats64 * storage)10532 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
10533 					struct rtnl_link_stats64 *storage)
10534 {
10535 	const struct net_device_ops *ops = dev->netdev_ops;
10536 	const struct net_device_core_stats __percpu *p;
10537 
10538 	if (ops->ndo_get_stats64) {
10539 		memset(storage, 0, sizeof(*storage));
10540 		ops->ndo_get_stats64(dev, storage);
10541 	} else if (ops->ndo_get_stats) {
10542 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
10543 	} else {
10544 		netdev_stats_to_stats64(storage, &dev->stats);
10545 	}
10546 
10547 	/* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
10548 	p = READ_ONCE(dev->core_stats);
10549 	if (p) {
10550 		const struct net_device_core_stats *core_stats;
10551 		int i;
10552 
10553 		for_each_possible_cpu(i) {
10554 			core_stats = per_cpu_ptr(p, i);
10555 			storage->rx_dropped += READ_ONCE(core_stats->rx_dropped);
10556 			storage->tx_dropped += READ_ONCE(core_stats->tx_dropped);
10557 			storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler);
10558 			storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped);
10559 		}
10560 	}
10561 	return storage;
10562 }
10563 EXPORT_SYMBOL(dev_get_stats);
10564 
10565 /**
10566  *	dev_fetch_sw_netstats - get per-cpu network device statistics
10567  *	@s: place to store stats
10568  *	@netstats: per-cpu network stats to read from
10569  *
10570  *	Read per-cpu network statistics and populate the related fields in @s.
10571  */
dev_fetch_sw_netstats(struct rtnl_link_stats64 * s,const struct pcpu_sw_netstats __percpu * netstats)10572 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
10573 			   const struct pcpu_sw_netstats __percpu *netstats)
10574 {
10575 	int cpu;
10576 
10577 	for_each_possible_cpu(cpu) {
10578 		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
10579 		const struct pcpu_sw_netstats *stats;
10580 		unsigned int start;
10581 
10582 		stats = per_cpu_ptr(netstats, cpu);
10583 		do {
10584 			start = u64_stats_fetch_begin(&stats->syncp);
10585 			rx_packets = u64_stats_read(&stats->rx_packets);
10586 			rx_bytes   = u64_stats_read(&stats->rx_bytes);
10587 			tx_packets = u64_stats_read(&stats->tx_packets);
10588 			tx_bytes   = u64_stats_read(&stats->tx_bytes);
10589 		} while (u64_stats_fetch_retry(&stats->syncp, start));
10590 
10591 		s->rx_packets += rx_packets;
10592 		s->rx_bytes   += rx_bytes;
10593 		s->tx_packets += tx_packets;
10594 		s->tx_bytes   += tx_bytes;
10595 	}
10596 }
10597 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
10598 
10599 /**
10600  *	dev_get_tstats64 - ndo_get_stats64 implementation
10601  *	@dev: device to get statistics from
10602  *	@s: place to store stats
10603  *
10604  *	Populate @s from dev->stats and dev->tstats. Can be used as
10605  *	ndo_get_stats64() callback.
10606  */
dev_get_tstats64(struct net_device * dev,struct rtnl_link_stats64 * s)10607 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
10608 {
10609 	netdev_stats_to_stats64(s, &dev->stats);
10610 	dev_fetch_sw_netstats(s, dev->tstats);
10611 }
10612 EXPORT_SYMBOL_GPL(dev_get_tstats64);
10613 
dev_ingress_queue_create(struct net_device * dev)10614 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
10615 {
10616 	struct netdev_queue *queue = dev_ingress_queue(dev);
10617 
10618 #ifdef CONFIG_NET_CLS_ACT
10619 	if (queue)
10620 		return queue;
10621 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
10622 	if (!queue)
10623 		return NULL;
10624 	netdev_init_one_queue(dev, queue, NULL);
10625 	RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
10626 	RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc);
10627 	rcu_assign_pointer(dev->ingress_queue, queue);
10628 #endif
10629 	return queue;
10630 }
10631 
10632 static const struct ethtool_ops default_ethtool_ops;
10633 
netdev_set_default_ethtool_ops(struct net_device * dev,const struct ethtool_ops * ops)10634 void netdev_set_default_ethtool_ops(struct net_device *dev,
10635 				    const struct ethtool_ops *ops)
10636 {
10637 	if (dev->ethtool_ops == &default_ethtool_ops)
10638 		dev->ethtool_ops = ops;
10639 }
10640 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
10641 
10642 /**
10643  * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default
10644  * @dev: netdev to enable the IRQ coalescing on
10645  *
10646  * Sets a conservative default for SW IRQ coalescing. Users can use
10647  * sysfs attributes to override the default values.
10648  */
netdev_sw_irq_coalesce_default_on(struct net_device * dev)10649 void netdev_sw_irq_coalesce_default_on(struct net_device *dev)
10650 {
10651 	WARN_ON(dev->reg_state == NETREG_REGISTERED);
10652 
10653 	if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
10654 		dev->gro_flush_timeout = 20000;
10655 		dev->napi_defer_hard_irqs = 1;
10656 	}
10657 }
10658 EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on);
10659 
netdev_freemem(struct net_device * dev)10660 void netdev_freemem(struct net_device *dev)
10661 {
10662 	char *addr = (char *)dev - dev->padded;
10663 
10664 	kvfree(addr);
10665 }
10666 
10667 /**
10668  * alloc_netdev_mqs - allocate network device
10669  * @sizeof_priv: size of private data to allocate space for
10670  * @name: device name format string
10671  * @name_assign_type: origin of device name
10672  * @setup: callback to initialize device
10673  * @txqs: the number of TX subqueues to allocate
10674  * @rxqs: the number of RX subqueues to allocate
10675  *
10676  * Allocates a struct net_device with private data area for driver use
10677  * and performs basic initialization.  Also allocates subqueue structs
10678  * for each queue on the device.
10679  */
alloc_netdev_mqs(int sizeof_priv,const char * name,unsigned char name_assign_type,void (* setup)(struct net_device *),unsigned int txqs,unsigned int rxqs)10680 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
10681 		unsigned char name_assign_type,
10682 		void (*setup)(struct net_device *),
10683 		unsigned int txqs, unsigned int rxqs)
10684 {
10685 	struct net_device *dev;
10686 	unsigned int alloc_size;
10687 	struct net_device *p;
10688 
10689 	BUG_ON(strlen(name) >= sizeof(dev->name));
10690 
10691 	if (txqs < 1) {
10692 		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
10693 		return NULL;
10694 	}
10695 
10696 	if (rxqs < 1) {
10697 		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
10698 		return NULL;
10699 	}
10700 
10701 	alloc_size = sizeof(struct net_device);
10702 	if (sizeof_priv) {
10703 		/* ensure 32-byte alignment of private area */
10704 		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
10705 		alloc_size += sizeof_priv;
10706 	}
10707 	/* ensure 32-byte alignment of whole construct */
10708 	alloc_size += NETDEV_ALIGN - 1;
10709 
10710 	p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10711 	if (!p)
10712 		return NULL;
10713 
10714 	dev = PTR_ALIGN(p, NETDEV_ALIGN);
10715 	dev->padded = (char *)dev - (char *)p;
10716 
10717 	ref_tracker_dir_init(&dev->refcnt_tracker, 128, name);
10718 #ifdef CONFIG_PCPU_DEV_REFCNT
10719 	dev->pcpu_refcnt = alloc_percpu(int);
10720 	if (!dev->pcpu_refcnt)
10721 		goto free_dev;
10722 	__dev_hold(dev);
10723 #else
10724 	refcount_set(&dev->dev_refcnt, 1);
10725 #endif
10726 
10727 	if (dev_addr_init(dev))
10728 		goto free_pcpu;
10729 
10730 	dev_mc_init(dev);
10731 	dev_uc_init(dev);
10732 
10733 	dev_net_set(dev, &init_net);
10734 
10735 	dev->gso_max_size = GSO_LEGACY_MAX_SIZE;
10736 	dev->xdp_zc_max_segs = 1;
10737 	dev->gso_max_segs = GSO_MAX_SEGS;
10738 	dev->gro_max_size = GRO_LEGACY_MAX_SIZE;
10739 	dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE;
10740 	dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE;
10741 	dev->tso_max_size = TSO_LEGACY_MAX_SIZE;
10742 	dev->tso_max_segs = TSO_MAX_SEGS;
10743 	dev->upper_level = 1;
10744 	dev->lower_level = 1;
10745 #ifdef CONFIG_LOCKDEP
10746 	dev->nested_level = 0;
10747 	INIT_LIST_HEAD(&dev->unlink_list);
10748 #endif
10749 
10750 	INIT_LIST_HEAD(&dev->napi_list);
10751 	INIT_LIST_HEAD(&dev->unreg_list);
10752 	INIT_LIST_HEAD(&dev->close_list);
10753 	INIT_LIST_HEAD(&dev->link_watch_list);
10754 	INIT_LIST_HEAD(&dev->adj_list.upper);
10755 	INIT_LIST_HEAD(&dev->adj_list.lower);
10756 	INIT_LIST_HEAD(&dev->ptype_all);
10757 	INIT_LIST_HEAD(&dev->ptype_specific);
10758 	INIT_LIST_HEAD(&dev->net_notifier_list);
10759 #ifdef CONFIG_NET_SCHED
10760 	hash_init(dev->qdisc_hash);
10761 #endif
10762 	dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
10763 	setup(dev);
10764 
10765 	if (!dev->tx_queue_len) {
10766 		dev->priv_flags |= IFF_NO_QUEUE;
10767 		dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
10768 	}
10769 
10770 	dev->num_tx_queues = txqs;
10771 	dev->real_num_tx_queues = txqs;
10772 	if (netif_alloc_netdev_queues(dev))
10773 		goto free_all;
10774 
10775 	dev->num_rx_queues = rxqs;
10776 	dev->real_num_rx_queues = rxqs;
10777 	if (netif_alloc_rx_queues(dev))
10778 		goto free_all;
10779 
10780 	strcpy(dev->name, name);
10781 	dev->name_assign_type = name_assign_type;
10782 	dev->group = INIT_NETDEV_GROUP;
10783 	if (!dev->ethtool_ops)
10784 		dev->ethtool_ops = &default_ethtool_ops;
10785 
10786 	nf_hook_netdev_init(dev);
10787 
10788 	return dev;
10789 
10790 free_all:
10791 	free_netdev(dev);
10792 	return NULL;
10793 
10794 free_pcpu:
10795 #ifdef CONFIG_PCPU_DEV_REFCNT
10796 	free_percpu(dev->pcpu_refcnt);
10797 free_dev:
10798 #endif
10799 	netdev_freemem(dev);
10800 	return NULL;
10801 }
10802 EXPORT_SYMBOL(alloc_netdev_mqs);
10803 
10804 /**
10805  * free_netdev - free network device
10806  * @dev: device
10807  *
10808  * This function does the last stage of destroying an allocated device
10809  * interface. The reference to the device object is released. If this
10810  * is the last reference then it will be freed.Must be called in process
10811  * context.
10812  */
free_netdev(struct net_device * dev)10813 void free_netdev(struct net_device *dev)
10814 {
10815 	struct napi_struct *p, *n;
10816 
10817 	might_sleep();
10818 
10819 	/* When called immediately after register_netdevice() failed the unwind
10820 	 * handling may still be dismantling the device. Handle that case by
10821 	 * deferring the free.
10822 	 */
10823 	if (dev->reg_state == NETREG_UNREGISTERING) {
10824 		ASSERT_RTNL();
10825 		dev->needs_free_netdev = true;
10826 		return;
10827 	}
10828 
10829 	netif_free_tx_queues(dev);
10830 	netif_free_rx_queues(dev);
10831 
10832 	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
10833 
10834 	/* Flush device addresses */
10835 	dev_addr_flush(dev);
10836 
10837 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10838 		netif_napi_del(p);
10839 
10840 	ref_tracker_dir_exit(&dev->refcnt_tracker);
10841 #ifdef CONFIG_PCPU_DEV_REFCNT
10842 	free_percpu(dev->pcpu_refcnt);
10843 	dev->pcpu_refcnt = NULL;
10844 #endif
10845 	free_percpu(dev->core_stats);
10846 	dev->core_stats = NULL;
10847 	free_percpu(dev->xdp_bulkq);
10848 	dev->xdp_bulkq = NULL;
10849 
10850 	/*  Compatibility with error handling in drivers */
10851 	if (dev->reg_state == NETREG_UNINITIALIZED) {
10852 		netdev_freemem(dev);
10853 		return;
10854 	}
10855 
10856 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10857 	dev->reg_state = NETREG_RELEASED;
10858 
10859 	/* will free via device release */
10860 	put_device(&dev->dev);
10861 }
10862 EXPORT_SYMBOL(free_netdev);
10863 
10864 /**
10865  *	synchronize_net -  Synchronize with packet receive processing
10866  *
10867  *	Wait for packets currently being received to be done.
10868  *	Does not block later packets from starting.
10869  */
synchronize_net(void)10870 void synchronize_net(void)
10871 {
10872 	might_sleep();
10873 	if (rtnl_is_locked())
10874 		synchronize_rcu_expedited();
10875 	else
10876 		synchronize_rcu();
10877 }
10878 EXPORT_SYMBOL(synchronize_net);
10879 
10880 /**
10881  *	unregister_netdevice_queue - remove device from the kernel
10882  *	@dev: device
10883  *	@head: list
10884  *
10885  *	This function shuts down a device interface and removes it
10886  *	from the kernel tables.
10887  *	If head not NULL, device is queued to be unregistered later.
10888  *
10889  *	Callers must hold the rtnl semaphore.  You may want
10890  *	unregister_netdev() instead of this.
10891  */
10892 
unregister_netdevice_queue(struct net_device * dev,struct list_head * head)10893 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
10894 {
10895 	ASSERT_RTNL();
10896 
10897 	if (head) {
10898 		list_move_tail(&dev->unreg_list, head);
10899 	} else {
10900 		LIST_HEAD(single);
10901 
10902 		list_add(&dev->unreg_list, &single);
10903 		unregister_netdevice_many(&single);
10904 	}
10905 }
10906 EXPORT_SYMBOL(unregister_netdevice_queue);
10907 
unregister_netdevice_many_notify(struct list_head * head,u32 portid,const struct nlmsghdr * nlh)10908 void unregister_netdevice_many_notify(struct list_head *head,
10909 				      u32 portid, const struct nlmsghdr *nlh)
10910 {
10911 	struct net_device *dev, *tmp;
10912 	LIST_HEAD(close_head);
10913 
10914 	BUG_ON(dev_boot_phase);
10915 	ASSERT_RTNL();
10916 
10917 	if (list_empty(head))
10918 		return;
10919 
10920 	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
10921 		/* Some devices call without registering
10922 		 * for initialization unwind. Remove those
10923 		 * devices and proceed with the remaining.
10924 		 */
10925 		if (dev->reg_state == NETREG_UNINITIALIZED) {
10926 			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
10927 				 dev->name, dev);
10928 
10929 			WARN_ON(1);
10930 			list_del(&dev->unreg_list);
10931 			continue;
10932 		}
10933 		dev->dismantle = true;
10934 		BUG_ON(dev->reg_state != NETREG_REGISTERED);
10935 	}
10936 
10937 	/* If device is running, close it first. */
10938 	list_for_each_entry(dev, head, unreg_list)
10939 		list_add_tail(&dev->close_list, &close_head);
10940 	dev_close_many(&close_head, true);
10941 
10942 	list_for_each_entry(dev, head, unreg_list) {
10943 		/* And unlink it from device chain. */
10944 		write_lock(&dev_base_lock);
10945 		unlist_netdevice(dev, false);
10946 		dev->reg_state = NETREG_UNREGISTERING;
10947 		write_unlock(&dev_base_lock);
10948 	}
10949 	flush_all_backlogs();
10950 
10951 	synchronize_net();
10952 
10953 	list_for_each_entry(dev, head, unreg_list) {
10954 		struct sk_buff *skb = NULL;
10955 
10956 		/* Shutdown queueing discipline. */
10957 		dev_shutdown(dev);
10958 		dev_tcx_uninstall(dev);
10959 		dev_xdp_uninstall(dev);
10960 		bpf_dev_bound_netdev_unregister(dev);
10961 
10962 		netdev_offload_xstats_disable_all(dev);
10963 
10964 		/* Notify protocols, that we are about to destroy
10965 		 * this device. They should clean all the things.
10966 		 */
10967 		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10968 
10969 		if (!dev->rtnl_link_ops ||
10970 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10971 			skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
10972 						     GFP_KERNEL, NULL, 0,
10973 						     portid, nlh);
10974 
10975 		/*
10976 		 *	Flush the unicast and multicast chains
10977 		 */
10978 		dev_uc_flush(dev);
10979 		dev_mc_flush(dev);
10980 
10981 		netdev_name_node_alt_flush(dev);
10982 		netdev_name_node_free(dev->name_node);
10983 
10984 		call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
10985 
10986 		if (dev->netdev_ops->ndo_uninit)
10987 			dev->netdev_ops->ndo_uninit(dev);
10988 
10989 		if (skb)
10990 			rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh);
10991 
10992 		/* Notifier chain MUST detach us all upper devices. */
10993 		WARN_ON(netdev_has_any_upper_dev(dev));
10994 		WARN_ON(netdev_has_any_lower_dev(dev));
10995 
10996 		/* Remove entries from kobject tree */
10997 		netdev_unregister_kobject(dev);
10998 #ifdef CONFIG_XPS
10999 		/* Remove XPS queueing entries */
11000 		netif_reset_xps_queues_gt(dev, 0);
11001 #endif
11002 	}
11003 
11004 	synchronize_net();
11005 
11006 	list_for_each_entry(dev, head, unreg_list) {
11007 		netdev_put(dev, &dev->dev_registered_tracker);
11008 		net_set_todo(dev);
11009 	}
11010 
11011 	list_del(head);
11012 }
11013 
11014 /**
11015  *	unregister_netdevice_many - unregister many devices
11016  *	@head: list of devices
11017  *
11018  *  Note: As most callers use a stack allocated list_head,
11019  *  we force a list_del() to make sure stack wont be corrupted later.
11020  */
unregister_netdevice_many(struct list_head * head)11021 void unregister_netdevice_many(struct list_head *head)
11022 {
11023 	unregister_netdevice_many_notify(head, 0, NULL);
11024 }
11025 EXPORT_SYMBOL(unregister_netdevice_many);
11026 
11027 /**
11028  *	unregister_netdev - remove device from the kernel
11029  *	@dev: device
11030  *
11031  *	This function shuts down a device interface and removes it
11032  *	from the kernel tables.
11033  *
11034  *	This is just a wrapper for unregister_netdevice that takes
11035  *	the rtnl semaphore.  In general you want to use this and not
11036  *	unregister_netdevice.
11037  */
unregister_netdev(struct net_device * dev)11038 void unregister_netdev(struct net_device *dev)
11039 {
11040 	rtnl_lock();
11041 	unregister_netdevice(dev);
11042 	rtnl_unlock();
11043 }
11044 EXPORT_SYMBOL(unregister_netdev);
11045 
11046 /**
11047  *	__dev_change_net_namespace - move device to different nethost namespace
11048  *	@dev: device
11049  *	@net: network namespace
11050  *	@pat: If not NULL name pattern to try if the current device name
11051  *	      is already taken in the destination network namespace.
11052  *	@new_ifindex: If not zero, specifies device index in the target
11053  *	              namespace.
11054  *
11055  *	This function shuts down a device interface and moves it
11056  *	to a new network namespace. On success 0 is returned, on
11057  *	a failure a netagive errno code is returned.
11058  *
11059  *	Callers must hold the rtnl semaphore.
11060  */
11061 
__dev_change_net_namespace(struct net_device * dev,struct net * net,const char * pat,int new_ifindex)11062 int __dev_change_net_namespace(struct net_device *dev, struct net *net,
11063 			       const char *pat, int new_ifindex)
11064 {
11065 	struct netdev_name_node *name_node;
11066 	struct net *net_old = dev_net(dev);
11067 	char new_name[IFNAMSIZ] = {};
11068 	int err, new_nsid;
11069 
11070 	ASSERT_RTNL();
11071 
11072 	/* Don't allow namespace local devices to be moved. */
11073 	err = -EINVAL;
11074 	if (dev->features & NETIF_F_NETNS_LOCAL)
11075 		goto out;
11076 
11077 	/* Ensure the device has been registrered */
11078 	if (dev->reg_state != NETREG_REGISTERED)
11079 		goto out;
11080 
11081 	/* Get out if there is nothing todo */
11082 	err = 0;
11083 	if (net_eq(net_old, net))
11084 		goto out;
11085 
11086 	/* Pick the destination device name, and ensure
11087 	 * we can use it in the destination network namespace.
11088 	 */
11089 	err = -EEXIST;
11090 	if (netdev_name_in_use(net, dev->name)) {
11091 		/* We get here if we can't use the current device name */
11092 		if (!pat)
11093 			goto out;
11094 		err = dev_prep_valid_name(net, dev, pat, new_name);
11095 		if (err < 0)
11096 			goto out;
11097 	}
11098 	/* Check that none of the altnames conflicts. */
11099 	err = -EEXIST;
11100 	netdev_for_each_altname(dev, name_node)
11101 		if (netdev_name_in_use(net, name_node->name))
11102 			goto out;
11103 
11104 	/* Check that new_ifindex isn't used yet. */
11105 	if (new_ifindex) {
11106 		err = dev_index_reserve(net, new_ifindex);
11107 		if (err < 0)
11108 			goto out;
11109 	} else {
11110 		/* If there is an ifindex conflict assign a new one */
11111 		err = dev_index_reserve(net, dev->ifindex);
11112 		if (err == -EBUSY)
11113 			err = dev_index_reserve(net, 0);
11114 		if (err < 0)
11115 			goto out;
11116 		new_ifindex = err;
11117 	}
11118 
11119 	/*
11120 	 * And now a mini version of register_netdevice unregister_netdevice.
11121 	 */
11122 
11123 	/* If device is running close it first. */
11124 	dev_close(dev);
11125 
11126 	/* And unlink it from device chain */
11127 	unlist_netdevice(dev, true);
11128 
11129 	synchronize_net();
11130 
11131 	/* Shutdown queueing discipline. */
11132 	dev_shutdown(dev);
11133 
11134 	/* Notify protocols, that we are about to destroy
11135 	 * this device. They should clean all the things.
11136 	 *
11137 	 * Note that dev->reg_state stays at NETREG_REGISTERED.
11138 	 * This is wanted because this way 8021q and macvlan know
11139 	 * the device is just moving and can keep their slaves up.
11140 	 */
11141 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11142 	rcu_barrier();
11143 
11144 	new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
11145 
11146 	rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
11147 			    new_ifindex);
11148 
11149 	/*
11150 	 *	Flush the unicast and multicast chains
11151 	 */
11152 	dev_uc_flush(dev);
11153 	dev_mc_flush(dev);
11154 
11155 	/* Send a netdev-removed uevent to the old namespace */
11156 	kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
11157 	netdev_adjacent_del_links(dev);
11158 
11159 	/* Move per-net netdevice notifiers that are following the netdevice */
11160 	move_netdevice_notifiers_dev_net(dev, net);
11161 
11162 	/* Actually switch the network namespace */
11163 	dev_net_set(dev, net);
11164 	dev->ifindex = new_ifindex;
11165 
11166 	/* Send a netdev-add uevent to the new namespace */
11167 	kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
11168 	netdev_adjacent_add_links(dev);
11169 
11170 	if (new_name[0]) /* Rename the netdev to prepared name */
11171 		strscpy(dev->name, new_name, IFNAMSIZ);
11172 
11173 	/* Fixup kobjects */
11174 	err = device_rename(&dev->dev, dev->name);
11175 	WARN_ON(err);
11176 
11177 	/* Adapt owner in case owning user namespace of target network
11178 	 * namespace is different from the original one.
11179 	 */
11180 	err = netdev_change_owner(dev, net_old, net);
11181 	WARN_ON(err);
11182 
11183 	/* Add the device back in the hashes */
11184 	list_netdevice(dev);
11185 
11186 	/* Notify protocols, that a new device appeared. */
11187 	call_netdevice_notifiers(NETDEV_REGISTER, dev);
11188 
11189 	/*
11190 	 *	Prevent userspace races by waiting until the network
11191 	 *	device is fully setup before sending notifications.
11192 	 */
11193 	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
11194 
11195 	synchronize_net();
11196 	err = 0;
11197 out:
11198 	return err;
11199 }
11200 EXPORT_SYMBOL_GPL(__dev_change_net_namespace);
11201 
dev_cpu_dead(unsigned int oldcpu)11202 static int dev_cpu_dead(unsigned int oldcpu)
11203 {
11204 	struct sk_buff **list_skb;
11205 	struct sk_buff *skb;
11206 	unsigned int cpu;
11207 	struct softnet_data *sd, *oldsd, *remsd = NULL;
11208 
11209 	local_irq_disable();
11210 	cpu = smp_processor_id();
11211 	sd = &per_cpu(softnet_data, cpu);
11212 	oldsd = &per_cpu(softnet_data, oldcpu);
11213 
11214 	/* Find end of our completion_queue. */
11215 	list_skb = &sd->completion_queue;
11216 	while (*list_skb)
11217 		list_skb = &(*list_skb)->next;
11218 	/* Append completion queue from offline CPU. */
11219 	*list_skb = oldsd->completion_queue;
11220 	oldsd->completion_queue = NULL;
11221 
11222 	/* Append output queue from offline CPU. */
11223 	if (oldsd->output_queue) {
11224 		*sd->output_queue_tailp = oldsd->output_queue;
11225 		sd->output_queue_tailp = oldsd->output_queue_tailp;
11226 		oldsd->output_queue = NULL;
11227 		oldsd->output_queue_tailp = &oldsd->output_queue;
11228 	}
11229 	/* Append NAPI poll list from offline CPU, with one exception :
11230 	 * process_backlog() must be called by cpu owning percpu backlog.
11231 	 * We properly handle process_queue & input_pkt_queue later.
11232 	 */
11233 	while (!list_empty(&oldsd->poll_list)) {
11234 		struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
11235 							    struct napi_struct,
11236 							    poll_list);
11237 
11238 		list_del_init(&napi->poll_list);
11239 		if (napi->poll == process_backlog)
11240 			napi->state = 0;
11241 		else
11242 			____napi_schedule(sd, napi);
11243 	}
11244 
11245 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
11246 	local_irq_enable();
11247 
11248 #ifdef CONFIG_RPS
11249 	remsd = oldsd->rps_ipi_list;
11250 	oldsd->rps_ipi_list = NULL;
11251 #endif
11252 	/* send out pending IPI's on offline CPU */
11253 	net_rps_send_ipi(remsd);
11254 
11255 	/* Process offline CPU's input_pkt_queue */
11256 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
11257 		netif_rx(skb);
11258 		input_queue_head_incr(oldsd);
11259 	}
11260 	while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
11261 		netif_rx(skb);
11262 		input_queue_head_incr(oldsd);
11263 	}
11264 
11265 	return 0;
11266 }
11267 
11268 /**
11269  *	netdev_increment_features - increment feature set by one
11270  *	@all: current feature set
11271  *	@one: new feature set
11272  *	@mask: mask feature set
11273  *
11274  *	Computes a new feature set after adding a device with feature set
11275  *	@one to the master device with current feature set @all.  Will not
11276  *	enable anything that is off in @mask. Returns the new feature set.
11277  */
netdev_increment_features(netdev_features_t all,netdev_features_t one,netdev_features_t mask)11278 netdev_features_t netdev_increment_features(netdev_features_t all,
11279 	netdev_features_t one, netdev_features_t mask)
11280 {
11281 	if (mask & NETIF_F_HW_CSUM)
11282 		mask |= NETIF_F_CSUM_MASK;
11283 	mask |= NETIF_F_VLAN_CHALLENGED;
11284 
11285 	all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
11286 	all &= one | ~NETIF_F_ALL_FOR_ALL;
11287 
11288 	/* If one device supports hw checksumming, set for all. */
11289 	if (all & NETIF_F_HW_CSUM)
11290 		all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
11291 
11292 	return all;
11293 }
11294 EXPORT_SYMBOL(netdev_increment_features);
11295 
netdev_create_hash(void)11296 static struct hlist_head * __net_init netdev_create_hash(void)
11297 {
11298 	int i;
11299 	struct hlist_head *hash;
11300 
11301 	hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
11302 	if (hash != NULL)
11303 		for (i = 0; i < NETDEV_HASHENTRIES; i++)
11304 			INIT_HLIST_HEAD(&hash[i]);
11305 
11306 	return hash;
11307 }
11308 
11309 /* Initialize per network namespace state */
netdev_init(struct net * net)11310 static int __net_init netdev_init(struct net *net)
11311 {
11312 	BUILD_BUG_ON(GRO_HASH_BUCKETS >
11313 		     8 * sizeof_field(struct napi_struct, gro_bitmask));
11314 
11315 	INIT_LIST_HEAD(&net->dev_base_head);
11316 
11317 	net->dev_name_head = netdev_create_hash();
11318 	if (net->dev_name_head == NULL)
11319 		goto err_name;
11320 
11321 	net->dev_index_head = netdev_create_hash();
11322 	if (net->dev_index_head == NULL)
11323 		goto err_idx;
11324 
11325 	xa_init_flags(&net->dev_by_index, XA_FLAGS_ALLOC1);
11326 
11327 	RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
11328 
11329 	return 0;
11330 
11331 err_idx:
11332 	kfree(net->dev_name_head);
11333 err_name:
11334 	return -ENOMEM;
11335 }
11336 
11337 /**
11338  *	netdev_drivername - network driver for the device
11339  *	@dev: network device
11340  *
11341  *	Determine network driver for device.
11342  */
netdev_drivername(const struct net_device * dev)11343 const char *netdev_drivername(const struct net_device *dev)
11344 {
11345 	const struct device_driver *driver;
11346 	const struct device *parent;
11347 	const char *empty = "";
11348 
11349 	parent = dev->dev.parent;
11350 	if (!parent)
11351 		return empty;
11352 
11353 	driver = parent->driver;
11354 	if (driver && driver->name)
11355 		return driver->name;
11356 	return empty;
11357 }
11358 
__netdev_printk(const char * level,const struct net_device * dev,struct va_format * vaf)11359 static void __netdev_printk(const char *level, const struct net_device *dev,
11360 			    struct va_format *vaf)
11361 {
11362 	if (dev && dev->dev.parent) {
11363 		dev_printk_emit(level[1] - '0',
11364 				dev->dev.parent,
11365 				"%s %s %s%s: %pV",
11366 				dev_driver_string(dev->dev.parent),
11367 				dev_name(dev->dev.parent),
11368 				netdev_name(dev), netdev_reg_state(dev),
11369 				vaf);
11370 	} else if (dev) {
11371 		printk("%s%s%s: %pV",
11372 		       level, netdev_name(dev), netdev_reg_state(dev), vaf);
11373 	} else {
11374 		printk("%s(NULL net_device): %pV", level, vaf);
11375 	}
11376 }
11377 
netdev_printk(const char * level,const struct net_device * dev,const char * format,...)11378 void netdev_printk(const char *level, const struct net_device *dev,
11379 		   const char *format, ...)
11380 {
11381 	struct va_format vaf;
11382 	va_list args;
11383 
11384 	va_start(args, format);
11385 
11386 	vaf.fmt = format;
11387 	vaf.va = &args;
11388 
11389 	__netdev_printk(level, dev, &vaf);
11390 
11391 	va_end(args);
11392 }
11393 EXPORT_SYMBOL(netdev_printk);
11394 
11395 #define define_netdev_printk_level(func, level)			\
11396 void func(const struct net_device *dev, const char *fmt, ...)	\
11397 {								\
11398 	struct va_format vaf;					\
11399 	va_list args;						\
11400 								\
11401 	va_start(args, fmt);					\
11402 								\
11403 	vaf.fmt = fmt;						\
11404 	vaf.va = &args;						\
11405 								\
11406 	__netdev_printk(level, dev, &vaf);			\
11407 								\
11408 	va_end(args);						\
11409 }								\
11410 EXPORT_SYMBOL(func);
11411 
11412 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
11413 define_netdev_printk_level(netdev_alert, KERN_ALERT);
11414 define_netdev_printk_level(netdev_crit, KERN_CRIT);
11415 define_netdev_printk_level(netdev_err, KERN_ERR);
11416 define_netdev_printk_level(netdev_warn, KERN_WARNING);
11417 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
11418 define_netdev_printk_level(netdev_info, KERN_INFO);
11419 
netdev_exit(struct net * net)11420 static void __net_exit netdev_exit(struct net *net)
11421 {
11422 	kfree(net->dev_name_head);
11423 	kfree(net->dev_index_head);
11424 	xa_destroy(&net->dev_by_index);
11425 	if (net != &init_net)
11426 		WARN_ON_ONCE(!list_empty(&net->dev_base_head));
11427 }
11428 
11429 static struct pernet_operations __net_initdata netdev_net_ops = {
11430 	.init = netdev_init,
11431 	.exit = netdev_exit,
11432 };
11433 
default_device_exit_net(struct net * net)11434 static void __net_exit default_device_exit_net(struct net *net)
11435 {
11436 	struct net_device *dev, *aux;
11437 	/*
11438 	 * Push all migratable network devices back to the
11439 	 * initial network namespace
11440 	 */
11441 	ASSERT_RTNL();
11442 	for_each_netdev_safe(net, dev, aux) {
11443 		int err;
11444 		char fb_name[IFNAMSIZ];
11445 
11446 		/* Ignore unmoveable devices (i.e. loopback) */
11447 		if (dev->features & NETIF_F_NETNS_LOCAL)
11448 			continue;
11449 
11450 		/* Leave virtual devices for the generic cleanup */
11451 		if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
11452 			continue;
11453 
11454 		/* Push remaining network devices to init_net */
11455 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
11456 		if (netdev_name_in_use(&init_net, fb_name))
11457 			snprintf(fb_name, IFNAMSIZ, "dev%%d");
11458 		err = dev_change_net_namespace(dev, &init_net, fb_name);
11459 		if (err) {
11460 			pr_emerg("%s: failed to move %s to init_net: %d\n",
11461 				 __func__, dev->name, err);
11462 			BUG();
11463 		}
11464 	}
11465 }
11466 
default_device_exit_batch(struct list_head * net_list)11467 static void __net_exit default_device_exit_batch(struct list_head *net_list)
11468 {
11469 	/* At exit all network devices most be removed from a network
11470 	 * namespace.  Do this in the reverse order of registration.
11471 	 * Do this across as many network namespaces as possible to
11472 	 * improve batching efficiency.
11473 	 */
11474 	struct net_device *dev;
11475 	struct net *net;
11476 	LIST_HEAD(dev_kill_list);
11477 
11478 	rtnl_lock();
11479 	list_for_each_entry(net, net_list, exit_list) {
11480 		default_device_exit_net(net);
11481 		cond_resched();
11482 	}
11483 
11484 	list_for_each_entry(net, net_list, exit_list) {
11485 		for_each_netdev_reverse(net, dev) {
11486 			if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
11487 				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
11488 			else
11489 				unregister_netdevice_queue(dev, &dev_kill_list);
11490 		}
11491 	}
11492 	unregister_netdevice_many(&dev_kill_list);
11493 	rtnl_unlock();
11494 }
11495 
11496 static struct pernet_operations __net_initdata default_device_ops = {
11497 	.exit_batch = default_device_exit_batch,
11498 };
11499 
11500 /*
11501  *	Initialize the DEV module. At boot time this walks the device list and
11502  *	unhooks any devices that fail to initialise (normally hardware not
11503  *	present) and leaves us with a valid list of present and active devices.
11504  *
11505  */
11506 
11507 /*
11508  *       This is called single threaded during boot, so no need
11509  *       to take the rtnl semaphore.
11510  */
net_dev_init(void)11511 static int __init net_dev_init(void)
11512 {
11513 	int i, rc = -ENOMEM;
11514 
11515 	BUG_ON(!dev_boot_phase);
11516 
11517 	if (dev_proc_init())
11518 		goto out;
11519 
11520 	if (netdev_kobject_init())
11521 		goto out;
11522 
11523 	INIT_LIST_HEAD(&ptype_all);
11524 	for (i = 0; i < PTYPE_HASH_SIZE; i++)
11525 		INIT_LIST_HEAD(&ptype_base[i]);
11526 
11527 	if (register_pernet_subsys(&netdev_net_ops))
11528 		goto out;
11529 
11530 	/*
11531 	 *	Initialise the packet receive queues.
11532 	 */
11533 
11534 	for_each_possible_cpu(i) {
11535 		struct work_struct *flush = per_cpu_ptr(&flush_works, i);
11536 		struct softnet_data *sd = &per_cpu(softnet_data, i);
11537 
11538 		INIT_WORK(flush, flush_backlog);
11539 
11540 		skb_queue_head_init(&sd->input_pkt_queue);
11541 		skb_queue_head_init(&sd->process_queue);
11542 #ifdef CONFIG_XFRM_OFFLOAD
11543 		skb_queue_head_init(&sd->xfrm_backlog);
11544 #endif
11545 		INIT_LIST_HEAD(&sd->poll_list);
11546 		sd->output_queue_tailp = &sd->output_queue;
11547 #ifdef CONFIG_RPS
11548 		INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
11549 		sd->cpu = i;
11550 #endif
11551 		INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
11552 		spin_lock_init(&sd->defer_lock);
11553 
11554 		init_gro_hash(&sd->backlog);
11555 		sd->backlog.poll = process_backlog;
11556 		sd->backlog.weight = weight_p;
11557 	}
11558 
11559 	dev_boot_phase = 0;
11560 
11561 	/* The loopback device is special if any other network devices
11562 	 * is present in a network namespace the loopback device must
11563 	 * be present. Since we now dynamically allocate and free the
11564 	 * loopback device ensure this invariant is maintained by
11565 	 * keeping the loopback device as the first device on the
11566 	 * list of network devices.  Ensuring the loopback devices
11567 	 * is the first device that appears and the last network device
11568 	 * that disappears.
11569 	 */
11570 	if (register_pernet_device(&loopback_net_ops))
11571 		goto out;
11572 
11573 	if (register_pernet_device(&default_device_ops))
11574 		goto out;
11575 
11576 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
11577 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
11578 
11579 	rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
11580 				       NULL, dev_cpu_dead);
11581 	WARN_ON(rc < 0);
11582 	rc = 0;
11583 out:
11584 	return rc;
11585 }
11586 
11587 subsys_initcall(net_dev_init);
11588