1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      NET3    Protocol independent device support routines.
4  *
5  *	Derived from the non IP parts of dev.c 1.0.19
6  *              Authors:	Ross Biro
7  *				Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8  *				Mark Evans, <evansmp@uhura.aston.ac.uk>
9  *
10  *	Additional Authors:
11  *		Florian la Roche <rzsfl@rz.uni-sb.de>
12  *		Alan Cox <gw4pts@gw4pts.ampr.org>
13  *		David Hinds <dahinds@users.sourceforge.net>
14  *		Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15  *		Adam Sulmicki <adam@cfar.umd.edu>
16  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
17  *
18  *	Changes:
19  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
20  *                                      to 2 if register_netdev gets called
21  *                                      before net_dev_init & also removed a
22  *                                      few lines of code in the process.
23  *		Alan Cox	:	device private ioctl copies fields back.
24  *		Alan Cox	:	Transmit queue code does relevant
25  *					stunts to keep the queue safe.
26  *		Alan Cox	:	Fixed double lock.
27  *		Alan Cox	:	Fixed promisc NULL pointer trap
28  *		????????	:	Support the full private ioctl range
29  *		Alan Cox	:	Moved ioctl permission check into
30  *					drivers
31  *		Tim Kordas	:	SIOCADDMULTI/SIOCDELMULTI
32  *		Alan Cox	:	100 backlog just doesn't cut it when
33  *					you start doing multicast video 8)
34  *		Alan Cox	:	Rewrote net_bh and list manager.
35  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
36  *		Alan Cox	:	Took out transmit every packet pass
37  *					Saved a few bytes in the ioctl handler
38  *		Alan Cox	:	Network driver sets packet type before
39  *					calling netif_rx. Saves a function
40  *					call a packet.
41  *		Alan Cox	:	Hashed net_bh()
42  *		Richard Kooijman:	Timestamp fixes.
43  *		Alan Cox	:	Wrong field in SIOCGIFDSTADDR
44  *		Alan Cox	:	Device lock protection.
45  *              Alan Cox        :       Fixed nasty side effect of device close
46  *					changes.
47  *		Rudi Cilibrasi	:	Pass the right thing to
48  *					set_mac_address()
49  *		Dave Miller	:	32bit quantity for the device lock to
50  *					make it work out on a Sparc.
51  *		Bjorn Ekwall	:	Added KERNELD hack.
52  *		Alan Cox	:	Cleaned up the backlog initialise.
53  *		Craig Metz	:	SIOCGIFCONF fix if space for under
54  *					1 device.
55  *	    Thomas Bogendoerfer :	Return ENODEV for dev_open, if there
56  *					is no device open function.
57  *		Andi Kleen	:	Fix error reporting for SIOCGIFCONF
58  *	    Michael Chastain	:	Fix signed/unsigned for SIOCGIFCONF
59  *		Cyrus Durgin	:	Cleaned for KMOD
60  *		Adam Sulmicki   :	Bug Fix : Network Device Unload
61  *					A network device unload needs to purge
62  *					the backlog queue.
63  *	Paul Rusty Russell	:	SIOCSIFNAME
64  *              Pekka Riikonen  :	Netdev boot-time settings code
65  *              Andrew Morton   :       Make unregister_netdevice wait
66  *                                      indefinitely on dev->refcnt
67  *              J Hadi Salim    :       - Backlog queue sampling
68  *				        - netif_rx() feedback
69  */
70 
71 #include <linux/uaccess.h>
72 #include <linux/bitops.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/mm.h>
81 #include <linux/mutex.h>
82 #include <linux/rwsem.h>
83 #include <linux/string.h>
84 #include <linux/mm.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/ethtool.h>
93 #include <linux/skbuff.h>
94 #include <linux/kthread.h>
95 #include <linux/bpf.h>
96 #include <linux/bpf_trace.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <net/busy_poll.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/stat.h>
102 #include <net/dsa.h>
103 #include <net/dst.h>
104 #include <net/dst_metadata.h>
105 #include <net/gro.h>
106 #include <net/pkt_sched.h>
107 #include <net/pkt_cls.h>
108 #include <net/checksum.h>
109 #include <net/xfrm.h>
110 #include <linux/highmem.h>
111 #include <linux/init.h>
112 #include <linux/module.h>
113 #include <linux/netpoll.h>
114 #include <linux/rcupdate.h>
115 #include <linux/delay.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
125 #include <net/ip.h>
126 #include <net/mpls.h>
127 #include <linux/ipv6.h>
128 #include <linux/in.h>
129 #include <linux/jhash.h>
130 #include <linux/random.h>
131 #include <trace/events/napi.h>
132 #include <trace/events/net.h>
133 #include <trace/events/skb.h>
134 #include <trace/events/qdisc.h>
135 #include <linux/inetdevice.h>
136 #include <linux/cpu_rmap.h>
137 #include <linux/static_key.h>
138 #include <linux/hashtable.h>
139 #include <linux/vmalloc.h>
140 #include <linux/if_macvlan.h>
141 #include <linux/errqueue.h>
142 #include <linux/hrtimer.h>
143 #include <linux/netfilter_ingress.h>
144 #include <linux/crash_dump.h>
145 #include <linux/sctp.h>
146 #include <net/udp_tunnel.h>
147 #include <linux/net_namespace.h>
148 #include <linux/indirect_call_wrapper.h>
149 #include <net/devlink.h>
150 #include <linux/pm_runtime.h>
151 #include <linux/prandom.h>
152 #include <linux/once_lite.h>
153 
154 #include "net-sysfs.h"
155 
156 #define MAX_GRO_SKBS 8
157 
158 /* This should be increased if a protocol with a bigger head is added. */
159 #define GRO_MAX_HEAD (MAX_HEADER + 128)
160 
161 static DEFINE_SPINLOCK(ptype_lock);
162 static DEFINE_SPINLOCK(offload_lock);
163 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
164 struct list_head ptype_all __read_mostly;	/* Taps */
165 static struct list_head offload_base __read_mostly;
166 
167 static int netif_rx_internal(struct sk_buff *skb);
168 static int call_netdevice_notifiers_info(unsigned long val,
169 					 struct netdev_notifier_info *info);
170 static int call_netdevice_notifiers_extack(unsigned long val,
171 					   struct net_device *dev,
172 					   struct netlink_ext_ack *extack);
173 static struct napi_struct *napi_by_id(unsigned int napi_id);
174 
175 /*
176  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
177  * semaphore.
178  *
179  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
180  *
181  * Writers must hold the rtnl semaphore while they loop through the
182  * dev_base_head list, and hold dev_base_lock for writing when they do the
183  * actual updates.  This allows pure readers to access the list even
184  * while a writer is preparing to update it.
185  *
186  * To put it another way, dev_base_lock is held for writing only to
187  * protect against pure readers; the rtnl semaphore provides the
188  * protection against other writers.
189  *
190  * See, for example usages, register_netdevice() and
191  * unregister_netdevice(), which must be called with the rtnl
192  * semaphore held.
193  */
194 DEFINE_RWLOCK(dev_base_lock);
195 EXPORT_SYMBOL(dev_base_lock);
196 
197 static DEFINE_MUTEX(ifalias_mutex);
198 
199 /* protects napi_hash addition/deletion and napi_gen_id */
200 static DEFINE_SPINLOCK(napi_hash_lock);
201 
202 static unsigned int napi_gen_id = NR_CPUS;
203 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
204 
205 static DECLARE_RWSEM(devnet_rename_sem);
206 
dev_base_seq_inc(struct net * net)207 static inline void dev_base_seq_inc(struct net *net)
208 {
209 	while (++net->dev_base_seq == 0)
210 		;
211 }
212 
dev_name_hash(struct net * net,const char * name)213 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
214 {
215 	unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
216 
217 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
218 }
219 
dev_index_hash(struct net * net,int ifindex)220 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
221 {
222 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
223 }
224 
rps_lock(struct softnet_data * sd)225 static inline void rps_lock(struct softnet_data *sd)
226 {
227 #ifdef CONFIG_RPS
228 	spin_lock(&sd->input_pkt_queue.lock);
229 #endif
230 }
231 
rps_unlock(struct softnet_data * sd)232 static inline void rps_unlock(struct softnet_data *sd)
233 {
234 #ifdef CONFIG_RPS
235 	spin_unlock(&sd->input_pkt_queue.lock);
236 #endif
237 }
238 
netdev_name_node_alloc(struct net_device * dev,const char * name)239 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
240 						       const char *name)
241 {
242 	struct netdev_name_node *name_node;
243 
244 	name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
245 	if (!name_node)
246 		return NULL;
247 	INIT_HLIST_NODE(&name_node->hlist);
248 	name_node->dev = dev;
249 	name_node->name = name;
250 	return name_node;
251 }
252 
253 static struct netdev_name_node *
netdev_name_node_head_alloc(struct net_device * dev)254 netdev_name_node_head_alloc(struct net_device *dev)
255 {
256 	struct netdev_name_node *name_node;
257 
258 	name_node = netdev_name_node_alloc(dev, dev->name);
259 	if (!name_node)
260 		return NULL;
261 	INIT_LIST_HEAD(&name_node->list);
262 	return name_node;
263 }
264 
netdev_name_node_free(struct netdev_name_node * name_node)265 static void netdev_name_node_free(struct netdev_name_node *name_node)
266 {
267 	kfree(name_node);
268 }
269 
netdev_name_node_add(struct net * net,struct netdev_name_node * name_node)270 static void netdev_name_node_add(struct net *net,
271 				 struct netdev_name_node *name_node)
272 {
273 	hlist_add_head_rcu(&name_node->hlist,
274 			   dev_name_hash(net, name_node->name));
275 }
276 
netdev_name_node_del(struct netdev_name_node * name_node)277 static void netdev_name_node_del(struct netdev_name_node *name_node)
278 {
279 	hlist_del_rcu(&name_node->hlist);
280 }
281 
netdev_name_node_lookup(struct net * net,const char * name)282 static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
283 							const char *name)
284 {
285 	struct hlist_head *head = dev_name_hash(net, name);
286 	struct netdev_name_node *name_node;
287 
288 	hlist_for_each_entry(name_node, head, hlist)
289 		if (!strcmp(name_node->name, name))
290 			return name_node;
291 	return NULL;
292 }
293 
netdev_name_node_lookup_rcu(struct net * net,const char * name)294 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
295 							    const char *name)
296 {
297 	struct hlist_head *head = dev_name_hash(net, name);
298 	struct netdev_name_node *name_node;
299 
300 	hlist_for_each_entry_rcu(name_node, head, hlist)
301 		if (!strcmp(name_node->name, name))
302 			return name_node;
303 	return NULL;
304 }
305 
netdev_name_node_alt_create(struct net_device * dev,const char * name)306 int netdev_name_node_alt_create(struct net_device *dev, const char *name)
307 {
308 	struct netdev_name_node *name_node;
309 	struct net *net = dev_net(dev);
310 
311 	name_node = netdev_name_node_lookup(net, name);
312 	if (name_node)
313 		return -EEXIST;
314 	name_node = netdev_name_node_alloc(dev, name);
315 	if (!name_node)
316 		return -ENOMEM;
317 	netdev_name_node_add(net, name_node);
318 	/* The node that holds dev->name acts as a head of per-device list. */
319 	list_add_tail(&name_node->list, &dev->name_node->list);
320 
321 	return 0;
322 }
323 EXPORT_SYMBOL(netdev_name_node_alt_create);
324 
__netdev_name_node_alt_destroy(struct netdev_name_node * name_node)325 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
326 {
327 	list_del(&name_node->list);
328 	netdev_name_node_del(name_node);
329 	kfree(name_node->name);
330 	netdev_name_node_free(name_node);
331 }
332 
netdev_name_node_alt_destroy(struct net_device * dev,const char * name)333 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
334 {
335 	struct netdev_name_node *name_node;
336 	struct net *net = dev_net(dev);
337 
338 	name_node = netdev_name_node_lookup(net, name);
339 	if (!name_node)
340 		return -ENOENT;
341 	/* lookup might have found our primary name or a name belonging
342 	 * to another device.
343 	 */
344 	if (name_node == dev->name_node || name_node->dev != dev)
345 		return -EINVAL;
346 
347 	__netdev_name_node_alt_destroy(name_node);
348 
349 	return 0;
350 }
351 EXPORT_SYMBOL(netdev_name_node_alt_destroy);
352 
netdev_name_node_alt_flush(struct net_device * dev)353 static void netdev_name_node_alt_flush(struct net_device *dev)
354 {
355 	struct netdev_name_node *name_node, *tmp;
356 
357 	list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
358 		__netdev_name_node_alt_destroy(name_node);
359 }
360 
361 /* Device list insertion */
list_netdevice(struct net_device * dev)362 static void list_netdevice(struct net_device *dev)
363 {
364 	struct net *net = dev_net(dev);
365 
366 	ASSERT_RTNL();
367 
368 	write_lock_bh(&dev_base_lock);
369 	list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
370 	netdev_name_node_add(net, dev->name_node);
371 	hlist_add_head_rcu(&dev->index_hlist,
372 			   dev_index_hash(net, dev->ifindex));
373 	write_unlock_bh(&dev_base_lock);
374 
375 	dev_base_seq_inc(net);
376 }
377 
378 /* Device list removal
379  * caller must respect a RCU grace period before freeing/reusing dev
380  */
unlist_netdevice(struct net_device * dev)381 static void unlist_netdevice(struct net_device *dev)
382 {
383 	ASSERT_RTNL();
384 
385 	/* Unlink dev from the device chain */
386 	write_lock_bh(&dev_base_lock);
387 	list_del_rcu(&dev->dev_list);
388 	netdev_name_node_del(dev->name_node);
389 	hlist_del_rcu(&dev->index_hlist);
390 	write_unlock_bh(&dev_base_lock);
391 
392 	dev_base_seq_inc(dev_net(dev));
393 }
394 
395 /*
396  *	Our notifier list
397  */
398 
399 static RAW_NOTIFIER_HEAD(netdev_chain);
400 
401 /*
402  *	Device drivers call our routines to queue packets here. We empty the
403  *	queue in the local softnet handler.
404  */
405 
406 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
407 EXPORT_PER_CPU_SYMBOL(softnet_data);
408 
409 #ifdef CONFIG_LOCKDEP
410 /*
411  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
412  * according to dev->type
413  */
414 static const unsigned short netdev_lock_type[] = {
415 	 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
416 	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
417 	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
418 	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
419 	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
420 	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
421 	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
422 	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
423 	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
424 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
425 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
426 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
427 	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
428 	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
429 	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
430 
431 static const char *const netdev_lock_name[] = {
432 	"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
433 	"_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
434 	"_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
435 	"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
436 	"_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
437 	"_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
438 	"_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
439 	"_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
440 	"_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
441 	"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
442 	"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
443 	"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
444 	"_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
445 	"_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
446 	"_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
447 
448 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
449 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
450 
netdev_lock_pos(unsigned short dev_type)451 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
452 {
453 	int i;
454 
455 	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
456 		if (netdev_lock_type[i] == dev_type)
457 			return i;
458 	/* the last key is used by default */
459 	return ARRAY_SIZE(netdev_lock_type) - 1;
460 }
461 
netdev_set_xmit_lockdep_class(spinlock_t * lock,unsigned short dev_type)462 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
463 						 unsigned short dev_type)
464 {
465 	int i;
466 
467 	i = netdev_lock_pos(dev_type);
468 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
469 				   netdev_lock_name[i]);
470 }
471 
netdev_set_addr_lockdep_class(struct net_device * dev)472 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
473 {
474 	int i;
475 
476 	i = netdev_lock_pos(dev->type);
477 	lockdep_set_class_and_name(&dev->addr_list_lock,
478 				   &netdev_addr_lock_key[i],
479 				   netdev_lock_name[i]);
480 }
481 #else
netdev_set_xmit_lockdep_class(spinlock_t * lock,unsigned short dev_type)482 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
483 						 unsigned short dev_type)
484 {
485 }
486 
netdev_set_addr_lockdep_class(struct net_device * dev)487 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
488 {
489 }
490 #endif
491 
492 /*******************************************************************************
493  *
494  *		Protocol management and registration routines
495  *
496  *******************************************************************************/
497 
498 
499 /*
500  *	Add a protocol ID to the list. Now that the input handler is
501  *	smarter we can dispense with all the messy stuff that used to be
502  *	here.
503  *
504  *	BEWARE!!! Protocol handlers, mangling input packets,
505  *	MUST BE last in hash buckets and checking protocol handlers
506  *	MUST start from promiscuous ptype_all chain in net_bh.
507  *	It is true now, do not change it.
508  *	Explanation follows: if protocol handler, mangling packet, will
509  *	be the first on list, it is not able to sense, that packet
510  *	is cloned and should be copied-on-write, so that it will
511  *	change it and subsequent readers will get broken packet.
512  *							--ANK (980803)
513  */
514 
ptype_head(const struct packet_type * pt)515 static inline struct list_head *ptype_head(const struct packet_type *pt)
516 {
517 	if (pt->type == htons(ETH_P_ALL))
518 		return pt->dev ? &pt->dev->ptype_all : &ptype_all;
519 	else
520 		return pt->dev ? &pt->dev->ptype_specific :
521 				 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
522 }
523 
524 /**
525  *	dev_add_pack - add packet handler
526  *	@pt: packet type declaration
527  *
528  *	Add a protocol handler to the networking stack. The passed &packet_type
529  *	is linked into kernel lists and may not be freed until it has been
530  *	removed from the kernel lists.
531  *
532  *	This call does not sleep therefore it can not
533  *	guarantee all CPU's that are in middle of receiving packets
534  *	will see the new packet type (until the next received packet).
535  */
536 
dev_add_pack(struct packet_type * pt)537 void dev_add_pack(struct packet_type *pt)
538 {
539 	struct list_head *head = ptype_head(pt);
540 
541 	spin_lock(&ptype_lock);
542 	list_add_rcu(&pt->list, head);
543 	spin_unlock(&ptype_lock);
544 }
545 EXPORT_SYMBOL(dev_add_pack);
546 
547 /**
548  *	__dev_remove_pack	 - remove packet handler
549  *	@pt: packet type declaration
550  *
551  *	Remove a protocol handler that was previously added to the kernel
552  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
553  *	from the kernel lists and can be freed or reused once this function
554  *	returns.
555  *
556  *      The packet type might still be in use by receivers
557  *	and must not be freed until after all the CPU's have gone
558  *	through a quiescent state.
559  */
__dev_remove_pack(struct packet_type * pt)560 void __dev_remove_pack(struct packet_type *pt)
561 {
562 	struct list_head *head = ptype_head(pt);
563 	struct packet_type *pt1;
564 
565 	spin_lock(&ptype_lock);
566 
567 	list_for_each_entry(pt1, head, list) {
568 		if (pt == pt1) {
569 			list_del_rcu(&pt->list);
570 			goto out;
571 		}
572 	}
573 
574 	pr_warn("dev_remove_pack: %p not found\n", pt);
575 out:
576 	spin_unlock(&ptype_lock);
577 }
578 EXPORT_SYMBOL(__dev_remove_pack);
579 
580 /**
581  *	dev_remove_pack	 - remove packet handler
582  *	@pt: packet type declaration
583  *
584  *	Remove a protocol handler that was previously added to the kernel
585  *	protocol handlers by dev_add_pack(). The passed &packet_type is removed
586  *	from the kernel lists and can be freed or reused once this function
587  *	returns.
588  *
589  *	This call sleeps to guarantee that no CPU is looking at the packet
590  *	type after return.
591  */
dev_remove_pack(struct packet_type * pt)592 void dev_remove_pack(struct packet_type *pt)
593 {
594 	__dev_remove_pack(pt);
595 
596 	synchronize_net();
597 }
598 EXPORT_SYMBOL(dev_remove_pack);
599 
600 
601 /**
602  *	dev_add_offload - register offload handlers
603  *	@po: protocol offload declaration
604  *
605  *	Add protocol offload handlers to the networking stack. The passed
606  *	&proto_offload is linked into kernel lists and may not be freed until
607  *	it has been removed from the kernel lists.
608  *
609  *	This call does not sleep therefore it can not
610  *	guarantee all CPU's that are in middle of receiving packets
611  *	will see the new offload handlers (until the next received packet).
612  */
dev_add_offload(struct packet_offload * po)613 void dev_add_offload(struct packet_offload *po)
614 {
615 	struct packet_offload *elem;
616 
617 	spin_lock(&offload_lock);
618 	list_for_each_entry(elem, &offload_base, list) {
619 		if (po->priority < elem->priority)
620 			break;
621 	}
622 	list_add_rcu(&po->list, elem->list.prev);
623 	spin_unlock(&offload_lock);
624 }
625 EXPORT_SYMBOL(dev_add_offload);
626 
627 /**
628  *	__dev_remove_offload	 - remove offload handler
629  *	@po: packet offload declaration
630  *
631  *	Remove a protocol offload handler that was previously added to the
632  *	kernel offload handlers by dev_add_offload(). The passed &offload_type
633  *	is removed from the kernel lists and can be freed or reused once this
634  *	function returns.
635  *
636  *      The packet type might still be in use by receivers
637  *	and must not be freed until after all the CPU's have gone
638  *	through a quiescent state.
639  */
__dev_remove_offload(struct packet_offload * po)640 static void __dev_remove_offload(struct packet_offload *po)
641 {
642 	struct list_head *head = &offload_base;
643 	struct packet_offload *po1;
644 
645 	spin_lock(&offload_lock);
646 
647 	list_for_each_entry(po1, head, list) {
648 		if (po == po1) {
649 			list_del_rcu(&po->list);
650 			goto out;
651 		}
652 	}
653 
654 	pr_warn("dev_remove_offload: %p not found\n", po);
655 out:
656 	spin_unlock(&offload_lock);
657 }
658 
659 /**
660  *	dev_remove_offload	 - remove packet offload handler
661  *	@po: packet offload declaration
662  *
663  *	Remove a packet offload handler that was previously added to the kernel
664  *	offload handlers by dev_add_offload(). The passed &offload_type is
665  *	removed from the kernel lists and can be freed or reused once this
666  *	function returns.
667  *
668  *	This call sleeps to guarantee that no CPU is looking at the packet
669  *	type after return.
670  */
dev_remove_offload(struct packet_offload * po)671 void dev_remove_offload(struct packet_offload *po)
672 {
673 	__dev_remove_offload(po);
674 
675 	synchronize_net();
676 }
677 EXPORT_SYMBOL(dev_remove_offload);
678 
679 /*******************************************************************************
680  *
681  *			    Device Interface Subroutines
682  *
683  *******************************************************************************/
684 
685 /**
686  *	dev_get_iflink	- get 'iflink' value of a interface
687  *	@dev: targeted interface
688  *
689  *	Indicates the ifindex the interface is linked to.
690  *	Physical interfaces have the same 'ifindex' and 'iflink' values.
691  */
692 
dev_get_iflink(const struct net_device * dev)693 int dev_get_iflink(const struct net_device *dev)
694 {
695 	if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
696 		return dev->netdev_ops->ndo_get_iflink(dev);
697 
698 	return dev->ifindex;
699 }
700 EXPORT_SYMBOL(dev_get_iflink);
701 
702 /**
703  *	dev_fill_metadata_dst - Retrieve tunnel egress information.
704  *	@dev: targeted interface
705  *	@skb: The packet.
706  *
707  *	For better visibility of tunnel traffic OVS needs to retrieve
708  *	egress tunnel information for a packet. Following API allows
709  *	user to get this info.
710  */
dev_fill_metadata_dst(struct net_device * dev,struct sk_buff * skb)711 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
712 {
713 	struct ip_tunnel_info *info;
714 
715 	if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
716 		return -EINVAL;
717 
718 	info = skb_tunnel_info_unclone(skb);
719 	if (!info)
720 		return -ENOMEM;
721 	if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
722 		return -EINVAL;
723 
724 	return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
725 }
726 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
727 
dev_fwd_path(struct net_device_path_stack * stack)728 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
729 {
730 	int k = stack->num_paths++;
731 
732 	if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
733 		return NULL;
734 
735 	return &stack->path[k];
736 }
737 
dev_fill_forward_path(const struct net_device * dev,const u8 * daddr,struct net_device_path_stack * stack)738 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
739 			  struct net_device_path_stack *stack)
740 {
741 	const struct net_device *last_dev;
742 	struct net_device_path_ctx ctx = {
743 		.dev	= dev,
744 		.daddr	= daddr,
745 	};
746 	struct net_device_path *path;
747 	int ret = 0;
748 
749 	stack->num_paths = 0;
750 	while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
751 		last_dev = ctx.dev;
752 		path = dev_fwd_path(stack);
753 		if (!path)
754 			return -1;
755 
756 		memset(path, 0, sizeof(struct net_device_path));
757 		ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
758 		if (ret < 0)
759 			return -1;
760 
761 		if (WARN_ON_ONCE(last_dev == ctx.dev))
762 			return -1;
763 	}
764 	path = dev_fwd_path(stack);
765 	if (!path)
766 		return -1;
767 	path->type = DEV_PATH_ETHERNET;
768 	path->dev = ctx.dev;
769 
770 	return ret;
771 }
772 EXPORT_SYMBOL_GPL(dev_fill_forward_path);
773 
774 /**
775  *	__dev_get_by_name	- find a device by its name
776  *	@net: the applicable net namespace
777  *	@name: name to find
778  *
779  *	Find an interface by name. Must be called under RTNL semaphore
780  *	or @dev_base_lock. If the name is found a pointer to the device
781  *	is returned. If the name is not found then %NULL is returned. The
782  *	reference counters are not incremented so the caller must be
783  *	careful with locks.
784  */
785 
__dev_get_by_name(struct net * net,const char * name)786 struct net_device *__dev_get_by_name(struct net *net, const char *name)
787 {
788 	struct netdev_name_node *node_name;
789 
790 	node_name = netdev_name_node_lookup(net, name);
791 	return node_name ? node_name->dev : NULL;
792 }
793 EXPORT_SYMBOL(__dev_get_by_name);
794 
795 /**
796  * dev_get_by_name_rcu	- find a device by its name
797  * @net: the applicable net namespace
798  * @name: name to find
799  *
800  * Find an interface by name.
801  * If the name is found a pointer to the device is returned.
802  * If the name is not found then %NULL is returned.
803  * The reference counters are not incremented so the caller must be
804  * careful with locks. The caller must hold RCU lock.
805  */
806 
dev_get_by_name_rcu(struct net * net,const char * name)807 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
808 {
809 	struct netdev_name_node *node_name;
810 
811 	node_name = netdev_name_node_lookup_rcu(net, name);
812 	return node_name ? node_name->dev : NULL;
813 }
814 EXPORT_SYMBOL(dev_get_by_name_rcu);
815 
816 /**
817  *	dev_get_by_name		- find a device by its name
818  *	@net: the applicable net namespace
819  *	@name: name to find
820  *
821  *	Find an interface by name. This can be called from any
822  *	context and does its own locking. The returned handle has
823  *	the usage count incremented and the caller must use dev_put() to
824  *	release it when it is no longer needed. %NULL is returned if no
825  *	matching device is found.
826  */
827 
dev_get_by_name(struct net * net,const char * name)828 struct net_device *dev_get_by_name(struct net *net, const char *name)
829 {
830 	struct net_device *dev;
831 
832 	rcu_read_lock();
833 	dev = dev_get_by_name_rcu(net, name);
834 	dev_hold(dev);
835 	rcu_read_unlock();
836 	return dev;
837 }
838 EXPORT_SYMBOL(dev_get_by_name);
839 
840 /**
841  *	__dev_get_by_index - find a device by its ifindex
842  *	@net: the applicable net namespace
843  *	@ifindex: index of device
844  *
845  *	Search for an interface by index. Returns %NULL if the device
846  *	is not found or a pointer to the device. The device has not
847  *	had its reference counter increased so the caller must be careful
848  *	about locking. The caller must hold either the RTNL semaphore
849  *	or @dev_base_lock.
850  */
851 
__dev_get_by_index(struct net * net,int ifindex)852 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
853 {
854 	struct net_device *dev;
855 	struct hlist_head *head = dev_index_hash(net, ifindex);
856 
857 	hlist_for_each_entry(dev, head, index_hlist)
858 		if (dev->ifindex == ifindex)
859 			return dev;
860 
861 	return NULL;
862 }
863 EXPORT_SYMBOL(__dev_get_by_index);
864 
865 /**
866  *	dev_get_by_index_rcu - find a device by its ifindex
867  *	@net: the applicable net namespace
868  *	@ifindex: index of device
869  *
870  *	Search for an interface by index. Returns %NULL if the device
871  *	is not found or a pointer to the device. The device has not
872  *	had its reference counter increased so the caller must be careful
873  *	about locking. The caller must hold RCU lock.
874  */
875 
dev_get_by_index_rcu(struct net * net,int ifindex)876 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
877 {
878 	struct net_device *dev;
879 	struct hlist_head *head = dev_index_hash(net, ifindex);
880 
881 	hlist_for_each_entry_rcu(dev, head, index_hlist)
882 		if (dev->ifindex == ifindex)
883 			return dev;
884 
885 	return NULL;
886 }
887 EXPORT_SYMBOL(dev_get_by_index_rcu);
888 
889 
890 /**
891  *	dev_get_by_index - find a device by its ifindex
892  *	@net: the applicable net namespace
893  *	@ifindex: index of device
894  *
895  *	Search for an interface by index. Returns NULL if the device
896  *	is not found or a pointer to the device. The device returned has
897  *	had a reference added and the pointer is safe until the user calls
898  *	dev_put to indicate they have finished with it.
899  */
900 
dev_get_by_index(struct net * net,int ifindex)901 struct net_device *dev_get_by_index(struct net *net, int ifindex)
902 {
903 	struct net_device *dev;
904 
905 	rcu_read_lock();
906 	dev = dev_get_by_index_rcu(net, ifindex);
907 	dev_hold(dev);
908 	rcu_read_unlock();
909 	return dev;
910 }
911 EXPORT_SYMBOL(dev_get_by_index);
912 
913 /**
914  *	dev_get_by_napi_id - find a device by napi_id
915  *	@napi_id: ID of the NAPI struct
916  *
917  *	Search for an interface by NAPI ID. Returns %NULL if the device
918  *	is not found or a pointer to the device. The device has not had
919  *	its reference counter increased so the caller must be careful
920  *	about locking. The caller must hold RCU lock.
921  */
922 
dev_get_by_napi_id(unsigned int napi_id)923 struct net_device *dev_get_by_napi_id(unsigned int napi_id)
924 {
925 	struct napi_struct *napi;
926 
927 	WARN_ON_ONCE(!rcu_read_lock_held());
928 
929 	if (napi_id < MIN_NAPI_ID)
930 		return NULL;
931 
932 	napi = napi_by_id(napi_id);
933 
934 	return napi ? napi->dev : NULL;
935 }
936 EXPORT_SYMBOL(dev_get_by_napi_id);
937 
938 /**
939  *	netdev_get_name - get a netdevice name, knowing its ifindex.
940  *	@net: network namespace
941  *	@name: a pointer to the buffer where the name will be stored.
942  *	@ifindex: the ifindex of the interface to get the name from.
943  */
netdev_get_name(struct net * net,char * name,int ifindex)944 int netdev_get_name(struct net *net, char *name, int ifindex)
945 {
946 	struct net_device *dev;
947 	int ret;
948 
949 	down_read(&devnet_rename_sem);
950 	rcu_read_lock();
951 
952 	dev = dev_get_by_index_rcu(net, ifindex);
953 	if (!dev) {
954 		ret = -ENODEV;
955 		goto out;
956 	}
957 
958 	strcpy(name, dev->name);
959 
960 	ret = 0;
961 out:
962 	rcu_read_unlock();
963 	up_read(&devnet_rename_sem);
964 	return ret;
965 }
966 
967 /**
968  *	dev_getbyhwaddr_rcu - find a device by its hardware address
969  *	@net: the applicable net namespace
970  *	@type: media type of device
971  *	@ha: hardware address
972  *
973  *	Search for an interface by MAC address. Returns NULL if the device
974  *	is not found or a pointer to the device.
975  *	The caller must hold RCU or RTNL.
976  *	The returned device has not had its ref count increased
977  *	and the caller must therefore be careful about locking
978  *
979  */
980 
dev_getbyhwaddr_rcu(struct net * net,unsigned short type,const char * ha)981 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
982 				       const char *ha)
983 {
984 	struct net_device *dev;
985 
986 	for_each_netdev_rcu(net, dev)
987 		if (dev->type == type &&
988 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
989 			return dev;
990 
991 	return NULL;
992 }
993 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
994 
dev_getfirstbyhwtype(struct net * net,unsigned short type)995 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
996 {
997 	struct net_device *dev, *ret = NULL;
998 
999 	rcu_read_lock();
1000 	for_each_netdev_rcu(net, dev)
1001 		if (dev->type == type) {
1002 			dev_hold(dev);
1003 			ret = dev;
1004 			break;
1005 		}
1006 	rcu_read_unlock();
1007 	return ret;
1008 }
1009 EXPORT_SYMBOL(dev_getfirstbyhwtype);
1010 
1011 /**
1012  *	__dev_get_by_flags - find any device with given flags
1013  *	@net: the applicable net namespace
1014  *	@if_flags: IFF_* values
1015  *	@mask: bitmask of bits in if_flags to check
1016  *
1017  *	Search for any interface with the given flags. Returns NULL if a device
1018  *	is not found or a pointer to the device. Must be called inside
1019  *	rtnl_lock(), and result refcount is unchanged.
1020  */
1021 
__dev_get_by_flags(struct net * net,unsigned short if_flags,unsigned short mask)1022 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1023 				      unsigned short mask)
1024 {
1025 	struct net_device *dev, *ret;
1026 
1027 	ASSERT_RTNL();
1028 
1029 	ret = NULL;
1030 	for_each_netdev(net, dev) {
1031 		if (((dev->flags ^ if_flags) & mask) == 0) {
1032 			ret = dev;
1033 			break;
1034 		}
1035 	}
1036 	return ret;
1037 }
1038 EXPORT_SYMBOL(__dev_get_by_flags);
1039 
1040 /**
1041  *	dev_valid_name - check if name is okay for network device
1042  *	@name: name string
1043  *
1044  *	Network device names need to be valid file names to
1045  *	allow sysfs to work.  We also disallow any kind of
1046  *	whitespace.
1047  */
dev_valid_name(const char * name)1048 bool dev_valid_name(const char *name)
1049 {
1050 	if (*name == '\0')
1051 		return false;
1052 	if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1053 		return false;
1054 	if (!strcmp(name, ".") || !strcmp(name, ".."))
1055 		return false;
1056 
1057 	while (*name) {
1058 		if (*name == '/' || *name == ':' || isspace(*name))
1059 			return false;
1060 		name++;
1061 	}
1062 	return true;
1063 }
1064 EXPORT_SYMBOL(dev_valid_name);
1065 
1066 /**
1067  *	__dev_alloc_name - allocate a name for a device
1068  *	@net: network namespace to allocate the device name in
1069  *	@name: name format string
1070  *	@buf:  scratch buffer and result name string
1071  *
1072  *	Passed a format string - eg "lt%d" it will try and find a suitable
1073  *	id. It scans list of devices to build up a free map, then chooses
1074  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1075  *	while allocating the name and adding the device in order to avoid
1076  *	duplicates.
1077  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1078  *	Returns the number of the unit assigned or a negative errno code.
1079  */
1080 
__dev_alloc_name(struct net * net,const char * name,char * buf)1081 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1082 {
1083 	int i = 0;
1084 	const char *p;
1085 	const int max_netdevices = 8*PAGE_SIZE;
1086 	unsigned long *inuse;
1087 	struct net_device *d;
1088 
1089 	if (!dev_valid_name(name))
1090 		return -EINVAL;
1091 
1092 	p = strchr(name, '%');
1093 	if (p) {
1094 		/*
1095 		 * Verify the string as this thing may have come from
1096 		 * the user.  There must be either one "%d" and no other "%"
1097 		 * characters.
1098 		 */
1099 		if (p[1] != 'd' || strchr(p + 2, '%'))
1100 			return -EINVAL;
1101 
1102 		/* Use one page as a bit array of possible slots */
1103 		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1104 		if (!inuse)
1105 			return -ENOMEM;
1106 
1107 		for_each_netdev(net, d) {
1108 			struct netdev_name_node *name_node;
1109 			list_for_each_entry(name_node, &d->name_node->list, list) {
1110 				if (!sscanf(name_node->name, name, &i))
1111 					continue;
1112 				if (i < 0 || i >= max_netdevices)
1113 					continue;
1114 
1115 				/*  avoid cases where sscanf is not exact inverse of printf */
1116 				snprintf(buf, IFNAMSIZ, name, i);
1117 				if (!strncmp(buf, name_node->name, IFNAMSIZ))
1118 					set_bit(i, inuse);
1119 			}
1120 			if (!sscanf(d->name, name, &i))
1121 				continue;
1122 			if (i < 0 || i >= max_netdevices)
1123 				continue;
1124 
1125 			/*  avoid cases where sscanf is not exact inverse of printf */
1126 			snprintf(buf, IFNAMSIZ, name, i);
1127 			if (!strncmp(buf, d->name, IFNAMSIZ))
1128 				set_bit(i, inuse);
1129 		}
1130 
1131 		i = find_first_zero_bit(inuse, max_netdevices);
1132 		free_page((unsigned long) inuse);
1133 	}
1134 
1135 	snprintf(buf, IFNAMSIZ, name, i);
1136 	if (!__dev_get_by_name(net, buf))
1137 		return i;
1138 
1139 	/* It is possible to run out of possible slots
1140 	 * when the name is long and there isn't enough space left
1141 	 * for the digits, or if all bits are used.
1142 	 */
1143 	return -ENFILE;
1144 }
1145 
dev_alloc_name_ns(struct net * net,struct net_device * dev,const char * name)1146 static int dev_alloc_name_ns(struct net *net,
1147 			     struct net_device *dev,
1148 			     const char *name)
1149 {
1150 	char buf[IFNAMSIZ];
1151 	int ret;
1152 
1153 	BUG_ON(!net);
1154 	ret = __dev_alloc_name(net, name, buf);
1155 	if (ret >= 0)
1156 		strlcpy(dev->name, buf, IFNAMSIZ);
1157 	return ret;
1158 }
1159 
1160 /**
1161  *	dev_alloc_name - allocate a name for a device
1162  *	@dev: device
1163  *	@name: name format string
1164  *
1165  *	Passed a format string - eg "lt%d" it will try and find a suitable
1166  *	id. It scans list of devices to build up a free map, then chooses
1167  *	the first empty slot. The caller must hold the dev_base or rtnl lock
1168  *	while allocating the name and adding the device in order to avoid
1169  *	duplicates.
1170  *	Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1171  *	Returns the number of the unit assigned or a negative errno code.
1172  */
1173 
dev_alloc_name(struct net_device * dev,const char * name)1174 int dev_alloc_name(struct net_device *dev, const char *name)
1175 {
1176 	return dev_alloc_name_ns(dev_net(dev), dev, name);
1177 }
1178 EXPORT_SYMBOL(dev_alloc_name);
1179 
dev_get_valid_name(struct net * net,struct net_device * dev,const char * name)1180 static int dev_get_valid_name(struct net *net, struct net_device *dev,
1181 			      const char *name)
1182 {
1183 	BUG_ON(!net);
1184 
1185 	if (!dev_valid_name(name))
1186 		return -EINVAL;
1187 
1188 	if (strchr(name, '%'))
1189 		return dev_alloc_name_ns(net, dev, name);
1190 	else if (__dev_get_by_name(net, name))
1191 		return -EEXIST;
1192 	else if (dev->name != name)
1193 		strlcpy(dev->name, name, IFNAMSIZ);
1194 
1195 	return 0;
1196 }
1197 
1198 /**
1199  *	dev_change_name - change name of a device
1200  *	@dev: device
1201  *	@newname: name (or format string) must be at least IFNAMSIZ
1202  *
1203  *	Change name of a device, can pass format strings "eth%d".
1204  *	for wildcarding.
1205  */
dev_change_name(struct net_device * dev,const char * newname)1206 int dev_change_name(struct net_device *dev, const char *newname)
1207 {
1208 	unsigned char old_assign_type;
1209 	char oldname[IFNAMSIZ];
1210 	int err = 0;
1211 	int ret;
1212 	struct net *net;
1213 
1214 	ASSERT_RTNL();
1215 	BUG_ON(!dev_net(dev));
1216 
1217 	net = dev_net(dev);
1218 
1219 	/* Some auto-enslaved devices e.g. failover slaves are
1220 	 * special, as userspace might rename the device after
1221 	 * the interface had been brought up and running since
1222 	 * the point kernel initiated auto-enslavement. Allow
1223 	 * live name change even when these slave devices are
1224 	 * up and running.
1225 	 *
1226 	 * Typically, users of these auto-enslaving devices
1227 	 * don't actually care about slave name change, as
1228 	 * they are supposed to operate on master interface
1229 	 * directly.
1230 	 */
1231 	if (dev->flags & IFF_UP &&
1232 	    likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
1233 		return -EBUSY;
1234 
1235 	down_write(&devnet_rename_sem);
1236 
1237 	if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1238 		up_write(&devnet_rename_sem);
1239 		return 0;
1240 	}
1241 
1242 	memcpy(oldname, dev->name, IFNAMSIZ);
1243 
1244 	err = dev_get_valid_name(net, dev, newname);
1245 	if (err < 0) {
1246 		up_write(&devnet_rename_sem);
1247 		return err;
1248 	}
1249 
1250 	if (oldname[0] && !strchr(oldname, '%'))
1251 		netdev_info(dev, "renamed from %s\n", oldname);
1252 
1253 	old_assign_type = dev->name_assign_type;
1254 	dev->name_assign_type = NET_NAME_RENAMED;
1255 
1256 rollback:
1257 	ret = device_rename(&dev->dev, dev->name);
1258 	if (ret) {
1259 		memcpy(dev->name, oldname, IFNAMSIZ);
1260 		dev->name_assign_type = old_assign_type;
1261 		up_write(&devnet_rename_sem);
1262 		return ret;
1263 	}
1264 
1265 	up_write(&devnet_rename_sem);
1266 
1267 	netdev_adjacent_rename_links(dev, oldname);
1268 
1269 	write_lock_bh(&dev_base_lock);
1270 	netdev_name_node_del(dev->name_node);
1271 	write_unlock_bh(&dev_base_lock);
1272 
1273 	synchronize_rcu();
1274 
1275 	write_lock_bh(&dev_base_lock);
1276 	netdev_name_node_add(net, dev->name_node);
1277 	write_unlock_bh(&dev_base_lock);
1278 
1279 	ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1280 	ret = notifier_to_errno(ret);
1281 
1282 	if (ret) {
1283 		/* err >= 0 after dev_alloc_name() or stores the first errno */
1284 		if (err >= 0) {
1285 			err = ret;
1286 			down_write(&devnet_rename_sem);
1287 			memcpy(dev->name, oldname, IFNAMSIZ);
1288 			memcpy(oldname, newname, IFNAMSIZ);
1289 			dev->name_assign_type = old_assign_type;
1290 			old_assign_type = NET_NAME_RENAMED;
1291 			goto rollback;
1292 		} else {
1293 			pr_err("%s: name change rollback failed: %d\n",
1294 			       dev->name, ret);
1295 		}
1296 	}
1297 
1298 	return err;
1299 }
1300 
1301 /**
1302  *	dev_set_alias - change ifalias of a device
1303  *	@dev: device
1304  *	@alias: name up to IFALIASZ
1305  *	@len: limit of bytes to copy from info
1306  *
1307  *	Set ifalias for a device,
1308  */
dev_set_alias(struct net_device * dev,const char * alias,size_t len)1309 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1310 {
1311 	struct dev_ifalias *new_alias = NULL;
1312 
1313 	if (len >= IFALIASZ)
1314 		return -EINVAL;
1315 
1316 	if (len) {
1317 		new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1318 		if (!new_alias)
1319 			return -ENOMEM;
1320 
1321 		memcpy(new_alias->ifalias, alias, len);
1322 		new_alias->ifalias[len] = 0;
1323 	}
1324 
1325 	mutex_lock(&ifalias_mutex);
1326 	new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1327 					mutex_is_locked(&ifalias_mutex));
1328 	mutex_unlock(&ifalias_mutex);
1329 
1330 	if (new_alias)
1331 		kfree_rcu(new_alias, rcuhead);
1332 
1333 	return len;
1334 }
1335 EXPORT_SYMBOL(dev_set_alias);
1336 
1337 /**
1338  *	dev_get_alias - get ifalias of a device
1339  *	@dev: device
1340  *	@name: buffer to store name of ifalias
1341  *	@len: size of buffer
1342  *
1343  *	get ifalias for a device.  Caller must make sure dev cannot go
1344  *	away,  e.g. rcu read lock or own a reference count to device.
1345  */
dev_get_alias(const struct net_device * dev,char * name,size_t len)1346 int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1347 {
1348 	const struct dev_ifalias *alias;
1349 	int ret = 0;
1350 
1351 	rcu_read_lock();
1352 	alias = rcu_dereference(dev->ifalias);
1353 	if (alias)
1354 		ret = snprintf(name, len, "%s", alias->ifalias);
1355 	rcu_read_unlock();
1356 
1357 	return ret;
1358 }
1359 
1360 /**
1361  *	netdev_features_change - device changes features
1362  *	@dev: device to cause notification
1363  *
1364  *	Called to indicate a device has changed features.
1365  */
netdev_features_change(struct net_device * dev)1366 void netdev_features_change(struct net_device *dev)
1367 {
1368 	call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1369 }
1370 EXPORT_SYMBOL(netdev_features_change);
1371 
1372 /**
1373  *	netdev_state_change - device changes state
1374  *	@dev: device to cause notification
1375  *
1376  *	Called to indicate a device has changed state. This function calls
1377  *	the notifier chains for netdev_chain and sends a NEWLINK message
1378  *	to the routing socket.
1379  */
netdev_state_change(struct net_device * dev)1380 void netdev_state_change(struct net_device *dev)
1381 {
1382 	if (dev->flags & IFF_UP) {
1383 		struct netdev_notifier_change_info change_info = {
1384 			.info.dev = dev,
1385 		};
1386 
1387 		call_netdevice_notifiers_info(NETDEV_CHANGE,
1388 					      &change_info.info);
1389 		rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1390 	}
1391 }
1392 EXPORT_SYMBOL(netdev_state_change);
1393 
1394 /**
1395  * __netdev_notify_peers - notify network peers about existence of @dev,
1396  * to be called when rtnl lock is already held.
1397  * @dev: network device
1398  *
1399  * Generate traffic such that interested network peers are aware of
1400  * @dev, such as by generating a gratuitous ARP. This may be used when
1401  * a device wants to inform the rest of the network about some sort of
1402  * reconfiguration such as a failover event or virtual machine
1403  * migration.
1404  */
__netdev_notify_peers(struct net_device * dev)1405 void __netdev_notify_peers(struct net_device *dev)
1406 {
1407 	ASSERT_RTNL();
1408 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1409 	call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1410 }
1411 EXPORT_SYMBOL(__netdev_notify_peers);
1412 
1413 /**
1414  * netdev_notify_peers - notify network peers about existence of @dev
1415  * @dev: network device
1416  *
1417  * Generate traffic such that interested network peers are aware of
1418  * @dev, such as by generating a gratuitous ARP. This may be used when
1419  * a device wants to inform the rest of the network about some sort of
1420  * reconfiguration such as a failover event or virtual machine
1421  * migration.
1422  */
netdev_notify_peers(struct net_device * dev)1423 void netdev_notify_peers(struct net_device *dev)
1424 {
1425 	rtnl_lock();
1426 	__netdev_notify_peers(dev);
1427 	rtnl_unlock();
1428 }
1429 EXPORT_SYMBOL(netdev_notify_peers);
1430 
1431 static int napi_threaded_poll(void *data);
1432 
napi_kthread_create(struct napi_struct * n)1433 static int napi_kthread_create(struct napi_struct *n)
1434 {
1435 	int err = 0;
1436 
1437 	/* Create and wake up the kthread once to put it in
1438 	 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1439 	 * warning and work with loadavg.
1440 	 */
1441 	n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1442 				n->dev->name, n->napi_id);
1443 	if (IS_ERR(n->thread)) {
1444 		err = PTR_ERR(n->thread);
1445 		pr_err("kthread_run failed with err %d\n", err);
1446 		n->thread = NULL;
1447 	}
1448 
1449 	return err;
1450 }
1451 
__dev_open(struct net_device * dev,struct netlink_ext_ack * extack)1452 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1453 {
1454 	const struct net_device_ops *ops = dev->netdev_ops;
1455 	int ret;
1456 
1457 	ASSERT_RTNL();
1458 
1459 	if (!netif_device_present(dev)) {
1460 		/* may be detached because parent is runtime-suspended */
1461 		if (dev->dev.parent)
1462 			pm_runtime_resume(dev->dev.parent);
1463 		if (!netif_device_present(dev))
1464 			return -ENODEV;
1465 	}
1466 
1467 	/* Block netpoll from trying to do any rx path servicing.
1468 	 * If we don't do this there is a chance ndo_poll_controller
1469 	 * or ndo_poll may be running while we open the device
1470 	 */
1471 	netpoll_poll_disable(dev);
1472 
1473 	ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1474 	ret = notifier_to_errno(ret);
1475 	if (ret)
1476 		return ret;
1477 
1478 	set_bit(__LINK_STATE_START, &dev->state);
1479 
1480 	if (ops->ndo_validate_addr)
1481 		ret = ops->ndo_validate_addr(dev);
1482 
1483 	if (!ret && ops->ndo_open)
1484 		ret = ops->ndo_open(dev);
1485 
1486 	netpoll_poll_enable(dev);
1487 
1488 	if (ret)
1489 		clear_bit(__LINK_STATE_START, &dev->state);
1490 	else {
1491 		dev->flags |= IFF_UP;
1492 		dev_set_rx_mode(dev);
1493 		dev_activate(dev);
1494 		add_device_randomness(dev->dev_addr, dev->addr_len);
1495 	}
1496 
1497 	return ret;
1498 }
1499 
1500 /**
1501  *	dev_open	- prepare an interface for use.
1502  *	@dev: device to open
1503  *	@extack: netlink extended ack
1504  *
1505  *	Takes a device from down to up state. The device's private open
1506  *	function is invoked and then the multicast lists are loaded. Finally
1507  *	the device is moved into the up state and a %NETDEV_UP message is
1508  *	sent to the netdev notifier chain.
1509  *
1510  *	Calling this function on an active interface is a nop. On a failure
1511  *	a negative errno code is returned.
1512  */
dev_open(struct net_device * dev,struct netlink_ext_ack * extack)1513 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1514 {
1515 	int ret;
1516 
1517 	if (dev->flags & IFF_UP)
1518 		return 0;
1519 
1520 	ret = __dev_open(dev, extack);
1521 	if (ret < 0)
1522 		return ret;
1523 
1524 	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1525 	call_netdevice_notifiers(NETDEV_UP, dev);
1526 
1527 	return ret;
1528 }
1529 EXPORT_SYMBOL(dev_open);
1530 
__dev_close_many(struct list_head * head)1531 static void __dev_close_many(struct list_head *head)
1532 {
1533 	struct net_device *dev;
1534 
1535 	ASSERT_RTNL();
1536 	might_sleep();
1537 
1538 	list_for_each_entry(dev, head, close_list) {
1539 		/* Temporarily disable netpoll until the interface is down */
1540 		netpoll_poll_disable(dev);
1541 
1542 		call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1543 
1544 		clear_bit(__LINK_STATE_START, &dev->state);
1545 
1546 		/* Synchronize to scheduled poll. We cannot touch poll list, it
1547 		 * can be even on different cpu. So just clear netif_running().
1548 		 *
1549 		 * dev->stop() will invoke napi_disable() on all of it's
1550 		 * napi_struct instances on this device.
1551 		 */
1552 		smp_mb__after_atomic(); /* Commit netif_running(). */
1553 	}
1554 
1555 	dev_deactivate_many(head);
1556 
1557 	list_for_each_entry(dev, head, close_list) {
1558 		const struct net_device_ops *ops = dev->netdev_ops;
1559 
1560 		/*
1561 		 *	Call the device specific close. This cannot fail.
1562 		 *	Only if device is UP
1563 		 *
1564 		 *	We allow it to be called even after a DETACH hot-plug
1565 		 *	event.
1566 		 */
1567 		if (ops->ndo_stop)
1568 			ops->ndo_stop(dev);
1569 
1570 		dev->flags &= ~IFF_UP;
1571 		netpoll_poll_enable(dev);
1572 	}
1573 }
1574 
__dev_close(struct net_device * dev)1575 static void __dev_close(struct net_device *dev)
1576 {
1577 	LIST_HEAD(single);
1578 
1579 	list_add(&dev->close_list, &single);
1580 	__dev_close_many(&single);
1581 	list_del(&single);
1582 }
1583 
dev_close_many(struct list_head * head,bool unlink)1584 void dev_close_many(struct list_head *head, bool unlink)
1585 {
1586 	struct net_device *dev, *tmp;
1587 
1588 	/* Remove the devices that don't need to be closed */
1589 	list_for_each_entry_safe(dev, tmp, head, close_list)
1590 		if (!(dev->flags & IFF_UP))
1591 			list_del_init(&dev->close_list);
1592 
1593 	__dev_close_many(head);
1594 
1595 	list_for_each_entry_safe(dev, tmp, head, close_list) {
1596 		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1597 		call_netdevice_notifiers(NETDEV_DOWN, dev);
1598 		if (unlink)
1599 			list_del_init(&dev->close_list);
1600 	}
1601 }
1602 EXPORT_SYMBOL(dev_close_many);
1603 
1604 /**
1605  *	dev_close - shutdown an interface.
1606  *	@dev: device to shutdown
1607  *
1608  *	This function moves an active device into down state. A
1609  *	%NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1610  *	is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1611  *	chain.
1612  */
dev_close(struct net_device * dev)1613 void dev_close(struct net_device *dev)
1614 {
1615 	if (dev->flags & IFF_UP) {
1616 		LIST_HEAD(single);
1617 
1618 		list_add(&dev->close_list, &single);
1619 		dev_close_many(&single, true);
1620 		list_del(&single);
1621 	}
1622 }
1623 EXPORT_SYMBOL(dev_close);
1624 
1625 
1626 /**
1627  *	dev_disable_lro - disable Large Receive Offload on a device
1628  *	@dev: device
1629  *
1630  *	Disable Large Receive Offload (LRO) on a net device.  Must be
1631  *	called under RTNL.  This is needed if received packets may be
1632  *	forwarded to another interface.
1633  */
dev_disable_lro(struct net_device * dev)1634 void dev_disable_lro(struct net_device *dev)
1635 {
1636 	struct net_device *lower_dev;
1637 	struct list_head *iter;
1638 
1639 	dev->wanted_features &= ~NETIF_F_LRO;
1640 	netdev_update_features(dev);
1641 
1642 	if (unlikely(dev->features & NETIF_F_LRO))
1643 		netdev_WARN(dev, "failed to disable LRO!\n");
1644 
1645 	netdev_for_each_lower_dev(dev, lower_dev, iter)
1646 		dev_disable_lro(lower_dev);
1647 }
1648 EXPORT_SYMBOL(dev_disable_lro);
1649 
1650 /**
1651  *	dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1652  *	@dev: device
1653  *
1654  *	Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
1655  *	called under RTNL.  This is needed if Generic XDP is installed on
1656  *	the device.
1657  */
dev_disable_gro_hw(struct net_device * dev)1658 static void dev_disable_gro_hw(struct net_device *dev)
1659 {
1660 	dev->wanted_features &= ~NETIF_F_GRO_HW;
1661 	netdev_update_features(dev);
1662 
1663 	if (unlikely(dev->features & NETIF_F_GRO_HW))
1664 		netdev_WARN(dev, "failed to disable GRO_HW!\n");
1665 }
1666 
netdev_cmd_to_name(enum netdev_cmd cmd)1667 const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1668 {
1669 #define N(val) 						\
1670 	case NETDEV_##val:				\
1671 		return "NETDEV_" __stringify(val);
1672 	switch (cmd) {
1673 	N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1674 	N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1675 	N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1676 	N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1677 	N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1678 	N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1679 	N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1680 	N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1681 	N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1682 	N(PRE_CHANGEADDR)
1683 	}
1684 #undef N
1685 	return "UNKNOWN_NETDEV_EVENT";
1686 }
1687 EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1688 
call_netdevice_notifier(struct notifier_block * nb,unsigned long val,struct net_device * dev)1689 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1690 				   struct net_device *dev)
1691 {
1692 	struct netdev_notifier_info info = {
1693 		.dev = dev,
1694 	};
1695 
1696 	return nb->notifier_call(nb, val, &info);
1697 }
1698 
call_netdevice_register_notifiers(struct notifier_block * nb,struct net_device * dev)1699 static int call_netdevice_register_notifiers(struct notifier_block *nb,
1700 					     struct net_device *dev)
1701 {
1702 	int err;
1703 
1704 	err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1705 	err = notifier_to_errno(err);
1706 	if (err)
1707 		return err;
1708 
1709 	if (!(dev->flags & IFF_UP))
1710 		return 0;
1711 
1712 	call_netdevice_notifier(nb, NETDEV_UP, dev);
1713 	return 0;
1714 }
1715 
call_netdevice_unregister_notifiers(struct notifier_block * nb,struct net_device * dev)1716 static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1717 						struct net_device *dev)
1718 {
1719 	if (dev->flags & IFF_UP) {
1720 		call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1721 					dev);
1722 		call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1723 	}
1724 	call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1725 }
1726 
call_netdevice_register_net_notifiers(struct notifier_block * nb,struct net * net)1727 static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1728 						 struct net *net)
1729 {
1730 	struct net_device *dev;
1731 	int err;
1732 
1733 	for_each_netdev(net, dev) {
1734 		err = call_netdevice_register_notifiers(nb, dev);
1735 		if (err)
1736 			goto rollback;
1737 	}
1738 	return 0;
1739 
1740 rollback:
1741 	for_each_netdev_continue_reverse(net, dev)
1742 		call_netdevice_unregister_notifiers(nb, dev);
1743 	return err;
1744 }
1745 
call_netdevice_unregister_net_notifiers(struct notifier_block * nb,struct net * net)1746 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1747 						    struct net *net)
1748 {
1749 	struct net_device *dev;
1750 
1751 	for_each_netdev(net, dev)
1752 		call_netdevice_unregister_notifiers(nb, dev);
1753 }
1754 
1755 static int dev_boot_phase = 1;
1756 
1757 /**
1758  * register_netdevice_notifier - register a network notifier block
1759  * @nb: notifier
1760  *
1761  * Register a notifier to be called when network device events occur.
1762  * The notifier passed is linked into the kernel structures and must
1763  * not be reused until it has been unregistered. A negative errno code
1764  * is returned on a failure.
1765  *
1766  * When registered all registration and up events are replayed
1767  * to the new notifier to allow device to have a race free
1768  * view of the network device list.
1769  */
1770 
register_netdevice_notifier(struct notifier_block * nb)1771 int register_netdevice_notifier(struct notifier_block *nb)
1772 {
1773 	struct net *net;
1774 	int err;
1775 
1776 	/* Close race with setup_net() and cleanup_net() */
1777 	down_write(&pernet_ops_rwsem);
1778 	rtnl_lock();
1779 	err = raw_notifier_chain_register(&netdev_chain, nb);
1780 	if (err)
1781 		goto unlock;
1782 	if (dev_boot_phase)
1783 		goto unlock;
1784 	for_each_net(net) {
1785 		err = call_netdevice_register_net_notifiers(nb, net);
1786 		if (err)
1787 			goto rollback;
1788 	}
1789 
1790 unlock:
1791 	rtnl_unlock();
1792 	up_write(&pernet_ops_rwsem);
1793 	return err;
1794 
1795 rollback:
1796 	for_each_net_continue_reverse(net)
1797 		call_netdevice_unregister_net_notifiers(nb, net);
1798 
1799 	raw_notifier_chain_unregister(&netdev_chain, nb);
1800 	goto unlock;
1801 }
1802 EXPORT_SYMBOL(register_netdevice_notifier);
1803 
1804 /**
1805  * unregister_netdevice_notifier - unregister a network notifier block
1806  * @nb: notifier
1807  *
1808  * Unregister a notifier previously registered by
1809  * register_netdevice_notifier(). The notifier is unlinked into the
1810  * kernel structures and may then be reused. A negative errno code
1811  * is returned on a failure.
1812  *
1813  * After unregistering unregister and down device events are synthesized
1814  * for all devices on the device list to the removed notifier to remove
1815  * the need for special case cleanup code.
1816  */
1817 
unregister_netdevice_notifier(struct notifier_block * nb)1818 int unregister_netdevice_notifier(struct notifier_block *nb)
1819 {
1820 	struct net *net;
1821 	int err;
1822 
1823 	/* Close race with setup_net() and cleanup_net() */
1824 	down_write(&pernet_ops_rwsem);
1825 	rtnl_lock();
1826 	err = raw_notifier_chain_unregister(&netdev_chain, nb);
1827 	if (err)
1828 		goto unlock;
1829 
1830 	for_each_net(net)
1831 		call_netdevice_unregister_net_notifiers(nb, net);
1832 
1833 unlock:
1834 	rtnl_unlock();
1835 	up_write(&pernet_ops_rwsem);
1836 	return err;
1837 }
1838 EXPORT_SYMBOL(unregister_netdevice_notifier);
1839 
__register_netdevice_notifier_net(struct net * net,struct notifier_block * nb,bool ignore_call_fail)1840 static int __register_netdevice_notifier_net(struct net *net,
1841 					     struct notifier_block *nb,
1842 					     bool ignore_call_fail)
1843 {
1844 	int err;
1845 
1846 	err = raw_notifier_chain_register(&net->netdev_chain, nb);
1847 	if (err)
1848 		return err;
1849 	if (dev_boot_phase)
1850 		return 0;
1851 
1852 	err = call_netdevice_register_net_notifiers(nb, net);
1853 	if (err && !ignore_call_fail)
1854 		goto chain_unregister;
1855 
1856 	return 0;
1857 
1858 chain_unregister:
1859 	raw_notifier_chain_unregister(&net->netdev_chain, nb);
1860 	return err;
1861 }
1862 
__unregister_netdevice_notifier_net(struct net * net,struct notifier_block * nb)1863 static int __unregister_netdevice_notifier_net(struct net *net,
1864 					       struct notifier_block *nb)
1865 {
1866 	int err;
1867 
1868 	err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1869 	if (err)
1870 		return err;
1871 
1872 	call_netdevice_unregister_net_notifiers(nb, net);
1873 	return 0;
1874 }
1875 
1876 /**
1877  * register_netdevice_notifier_net - register a per-netns network notifier block
1878  * @net: network namespace
1879  * @nb: notifier
1880  *
1881  * Register a notifier to be called when network device events occur.
1882  * The notifier passed is linked into the kernel structures and must
1883  * not be reused until it has been unregistered. A negative errno code
1884  * is returned on a failure.
1885  *
1886  * When registered all registration and up events are replayed
1887  * to the new notifier to allow device to have a race free
1888  * view of the network device list.
1889  */
1890 
register_netdevice_notifier_net(struct net * net,struct notifier_block * nb)1891 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
1892 {
1893 	int err;
1894 
1895 	rtnl_lock();
1896 	err = __register_netdevice_notifier_net(net, nb, false);
1897 	rtnl_unlock();
1898 	return err;
1899 }
1900 EXPORT_SYMBOL(register_netdevice_notifier_net);
1901 
1902 /**
1903  * unregister_netdevice_notifier_net - unregister a per-netns
1904  *                                     network notifier block
1905  * @net: network namespace
1906  * @nb: notifier
1907  *
1908  * Unregister a notifier previously registered by
1909  * register_netdevice_notifier(). The notifier is unlinked into the
1910  * kernel structures and may then be reused. A negative errno code
1911  * is returned on a failure.
1912  *
1913  * After unregistering unregister and down device events are synthesized
1914  * for all devices on the device list to the removed notifier to remove
1915  * the need for special case cleanup code.
1916  */
1917 
unregister_netdevice_notifier_net(struct net * net,struct notifier_block * nb)1918 int unregister_netdevice_notifier_net(struct net *net,
1919 				      struct notifier_block *nb)
1920 {
1921 	int err;
1922 
1923 	rtnl_lock();
1924 	err = __unregister_netdevice_notifier_net(net, nb);
1925 	rtnl_unlock();
1926 	return err;
1927 }
1928 EXPORT_SYMBOL(unregister_netdevice_notifier_net);
1929 
register_netdevice_notifier_dev_net(struct net_device * dev,struct notifier_block * nb,struct netdev_net_notifier * nn)1930 int register_netdevice_notifier_dev_net(struct net_device *dev,
1931 					struct notifier_block *nb,
1932 					struct netdev_net_notifier *nn)
1933 {
1934 	int err;
1935 
1936 	rtnl_lock();
1937 	err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
1938 	if (!err) {
1939 		nn->nb = nb;
1940 		list_add(&nn->list, &dev->net_notifier_list);
1941 	}
1942 	rtnl_unlock();
1943 	return err;
1944 }
1945 EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
1946 
unregister_netdevice_notifier_dev_net(struct net_device * dev,struct notifier_block * nb,struct netdev_net_notifier * nn)1947 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
1948 					  struct notifier_block *nb,
1949 					  struct netdev_net_notifier *nn)
1950 {
1951 	int err;
1952 
1953 	rtnl_lock();
1954 	list_del(&nn->list);
1955 	err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
1956 	rtnl_unlock();
1957 	return err;
1958 }
1959 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
1960 
move_netdevice_notifiers_dev_net(struct net_device * dev,struct net * net)1961 static void move_netdevice_notifiers_dev_net(struct net_device *dev,
1962 					     struct net *net)
1963 {
1964 	struct netdev_net_notifier *nn;
1965 
1966 	list_for_each_entry(nn, &dev->net_notifier_list, list) {
1967 		__unregister_netdevice_notifier_net(dev_net(dev), nn->nb);
1968 		__register_netdevice_notifier_net(net, nn->nb, true);
1969 	}
1970 }
1971 
1972 /**
1973  *	call_netdevice_notifiers_info - call all network notifier blocks
1974  *	@val: value passed unmodified to notifier function
1975  *	@info: notifier information data
1976  *
1977  *	Call all network notifier blocks.  Parameters and return value
1978  *	are as for raw_notifier_call_chain().
1979  */
1980 
call_netdevice_notifiers_info(unsigned long val,struct netdev_notifier_info * info)1981 static int call_netdevice_notifiers_info(unsigned long val,
1982 					 struct netdev_notifier_info *info)
1983 {
1984 	struct net *net = dev_net(info->dev);
1985 	int ret;
1986 
1987 	ASSERT_RTNL();
1988 
1989 	/* Run per-netns notifier block chain first, then run the global one.
1990 	 * Hopefully, one day, the global one is going to be removed after
1991 	 * all notifier block registrators get converted to be per-netns.
1992 	 */
1993 	ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
1994 	if (ret & NOTIFY_STOP_MASK)
1995 		return ret;
1996 	return raw_notifier_call_chain(&netdev_chain, val, info);
1997 }
1998 
call_netdevice_notifiers_extack(unsigned long val,struct net_device * dev,struct netlink_ext_ack * extack)1999 static int call_netdevice_notifiers_extack(unsigned long val,
2000 					   struct net_device *dev,
2001 					   struct netlink_ext_ack *extack)
2002 {
2003 	struct netdev_notifier_info info = {
2004 		.dev = dev,
2005 		.extack = extack,
2006 	};
2007 
2008 	return call_netdevice_notifiers_info(val, &info);
2009 }
2010 
2011 /**
2012  *	call_netdevice_notifiers - call all network notifier blocks
2013  *      @val: value passed unmodified to notifier function
2014  *      @dev: net_device pointer passed unmodified to notifier function
2015  *
2016  *	Call all network notifier blocks.  Parameters and return value
2017  *	are as for raw_notifier_call_chain().
2018  */
2019 
call_netdevice_notifiers(unsigned long val,struct net_device * dev)2020 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
2021 {
2022 	return call_netdevice_notifiers_extack(val, dev, NULL);
2023 }
2024 EXPORT_SYMBOL(call_netdevice_notifiers);
2025 
2026 /**
2027  *	call_netdevice_notifiers_mtu - call all network notifier blocks
2028  *	@val: value passed unmodified to notifier function
2029  *	@dev: net_device pointer passed unmodified to notifier function
2030  *	@arg: additional u32 argument passed to the notifier function
2031  *
2032  *	Call all network notifier blocks.  Parameters and return value
2033  *	are as for raw_notifier_call_chain().
2034  */
call_netdevice_notifiers_mtu(unsigned long val,struct net_device * dev,u32 arg)2035 static int call_netdevice_notifiers_mtu(unsigned long val,
2036 					struct net_device *dev, u32 arg)
2037 {
2038 	struct netdev_notifier_info_ext info = {
2039 		.info.dev = dev,
2040 		.ext.mtu = arg,
2041 	};
2042 
2043 	BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2044 
2045 	return call_netdevice_notifiers_info(val, &info.info);
2046 }
2047 
2048 #ifdef CONFIG_NET_INGRESS
2049 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
2050 
net_inc_ingress_queue(void)2051 void net_inc_ingress_queue(void)
2052 {
2053 	static_branch_inc(&ingress_needed_key);
2054 }
2055 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2056 
net_dec_ingress_queue(void)2057 void net_dec_ingress_queue(void)
2058 {
2059 	static_branch_dec(&ingress_needed_key);
2060 }
2061 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2062 #endif
2063 
2064 #ifdef CONFIG_NET_EGRESS
2065 static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
2066 
net_inc_egress_queue(void)2067 void net_inc_egress_queue(void)
2068 {
2069 	static_branch_inc(&egress_needed_key);
2070 }
2071 EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2072 
net_dec_egress_queue(void)2073 void net_dec_egress_queue(void)
2074 {
2075 	static_branch_dec(&egress_needed_key);
2076 }
2077 EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2078 #endif
2079 
2080 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2081 #ifdef CONFIG_JUMP_LABEL
2082 static atomic_t netstamp_needed_deferred;
2083 static atomic_t netstamp_wanted;
netstamp_clear(struct work_struct * work)2084 static void netstamp_clear(struct work_struct *work)
2085 {
2086 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2087 	int wanted;
2088 
2089 	wanted = atomic_add_return(deferred, &netstamp_wanted);
2090 	if (wanted > 0)
2091 		static_branch_enable(&netstamp_needed_key);
2092 	else
2093 		static_branch_disable(&netstamp_needed_key);
2094 }
2095 static DECLARE_WORK(netstamp_work, netstamp_clear);
2096 #endif
2097 
net_enable_timestamp(void)2098 void net_enable_timestamp(void)
2099 {
2100 #ifdef CONFIG_JUMP_LABEL
2101 	int wanted;
2102 
2103 	while (1) {
2104 		wanted = atomic_read(&netstamp_wanted);
2105 		if (wanted <= 0)
2106 			break;
2107 		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
2108 			return;
2109 	}
2110 	atomic_inc(&netstamp_needed_deferred);
2111 	schedule_work(&netstamp_work);
2112 #else
2113 	static_branch_inc(&netstamp_needed_key);
2114 #endif
2115 }
2116 EXPORT_SYMBOL(net_enable_timestamp);
2117 
net_disable_timestamp(void)2118 void net_disable_timestamp(void)
2119 {
2120 #ifdef CONFIG_JUMP_LABEL
2121 	int wanted;
2122 
2123 	while (1) {
2124 		wanted = atomic_read(&netstamp_wanted);
2125 		if (wanted <= 1)
2126 			break;
2127 		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
2128 			return;
2129 	}
2130 	atomic_dec(&netstamp_needed_deferred);
2131 	schedule_work(&netstamp_work);
2132 #else
2133 	static_branch_dec(&netstamp_needed_key);
2134 #endif
2135 }
2136 EXPORT_SYMBOL(net_disable_timestamp);
2137 
net_timestamp_set(struct sk_buff * skb)2138 static inline void net_timestamp_set(struct sk_buff *skb)
2139 {
2140 	skb->tstamp = 0;
2141 	if (static_branch_unlikely(&netstamp_needed_key))
2142 		__net_timestamp(skb);
2143 }
2144 
2145 #define net_timestamp_check(COND, SKB)				\
2146 	if (static_branch_unlikely(&netstamp_needed_key)) {	\
2147 		if ((COND) && !(SKB)->tstamp)			\
2148 			__net_timestamp(SKB);			\
2149 	}							\
2150 
is_skb_forwardable(const struct net_device * dev,const struct sk_buff * skb)2151 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2152 {
2153 	return __is_skb_forwardable(dev, skb, true);
2154 }
2155 EXPORT_SYMBOL_GPL(is_skb_forwardable);
2156 
__dev_forward_skb2(struct net_device * dev,struct sk_buff * skb,bool check_mtu)2157 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2158 			      bool check_mtu)
2159 {
2160 	int ret = ____dev_forward_skb(dev, skb, check_mtu);
2161 
2162 	if (likely(!ret)) {
2163 		skb->protocol = eth_type_trans(skb, dev);
2164 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2165 	}
2166 
2167 	return ret;
2168 }
2169 
__dev_forward_skb(struct net_device * dev,struct sk_buff * skb)2170 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2171 {
2172 	return __dev_forward_skb2(dev, skb, true);
2173 }
2174 EXPORT_SYMBOL_GPL(__dev_forward_skb);
2175 
2176 /**
2177  * dev_forward_skb - loopback an skb to another netif
2178  *
2179  * @dev: destination network device
2180  * @skb: buffer to forward
2181  *
2182  * return values:
2183  *	NET_RX_SUCCESS	(no congestion)
2184  *	NET_RX_DROP     (packet was dropped, but freed)
2185  *
2186  * dev_forward_skb can be used for injecting an skb from the
2187  * start_xmit function of one device into the receive queue
2188  * of another device.
2189  *
2190  * The receiving device may be in another namespace, so
2191  * we have to clear all information in the skb that could
2192  * impact namespace isolation.
2193  */
dev_forward_skb(struct net_device * dev,struct sk_buff * skb)2194 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2195 {
2196 	return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2197 }
2198 EXPORT_SYMBOL_GPL(dev_forward_skb);
2199 
dev_forward_skb_nomtu(struct net_device * dev,struct sk_buff * skb)2200 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2201 {
2202 	return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2203 }
2204 
deliver_skb(struct sk_buff * skb,struct packet_type * pt_prev,struct net_device * orig_dev)2205 static inline int deliver_skb(struct sk_buff *skb,
2206 			      struct packet_type *pt_prev,
2207 			      struct net_device *orig_dev)
2208 {
2209 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2210 		return -ENOMEM;
2211 	refcount_inc(&skb->users);
2212 	return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2213 }
2214 
deliver_ptype_list_skb(struct sk_buff * skb,struct packet_type ** pt,struct net_device * orig_dev,__be16 type,struct list_head * ptype_list)2215 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2216 					  struct packet_type **pt,
2217 					  struct net_device *orig_dev,
2218 					  __be16 type,
2219 					  struct list_head *ptype_list)
2220 {
2221 	struct packet_type *ptype, *pt_prev = *pt;
2222 
2223 	list_for_each_entry_rcu(ptype, ptype_list, list) {
2224 		if (ptype->type != type)
2225 			continue;
2226 		if (pt_prev)
2227 			deliver_skb(skb, pt_prev, orig_dev);
2228 		pt_prev = ptype;
2229 	}
2230 	*pt = pt_prev;
2231 }
2232 
skb_loop_sk(struct packet_type * ptype,struct sk_buff * skb)2233 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2234 {
2235 	if (!ptype->af_packet_priv || !skb->sk)
2236 		return false;
2237 
2238 	if (ptype->id_match)
2239 		return ptype->id_match(ptype, skb->sk);
2240 	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2241 		return true;
2242 
2243 	return false;
2244 }
2245 
2246 /**
2247  * dev_nit_active - return true if any network interface taps are in use
2248  *
2249  * @dev: network device to check for the presence of taps
2250  */
dev_nit_active(struct net_device * dev)2251 bool dev_nit_active(struct net_device *dev)
2252 {
2253 	return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2254 }
2255 EXPORT_SYMBOL_GPL(dev_nit_active);
2256 
2257 /*
2258  *	Support routine. Sends outgoing frames to any network
2259  *	taps currently in use.
2260  */
2261 
dev_queue_xmit_nit(struct sk_buff * skb,struct net_device * dev)2262 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2263 {
2264 	struct packet_type *ptype;
2265 	struct sk_buff *skb2 = NULL;
2266 	struct packet_type *pt_prev = NULL;
2267 	struct list_head *ptype_list = &ptype_all;
2268 
2269 	rcu_read_lock();
2270 again:
2271 	list_for_each_entry_rcu(ptype, ptype_list, list) {
2272 		if (ptype->ignore_outgoing)
2273 			continue;
2274 
2275 		/* Never send packets back to the socket
2276 		 * they originated from - MvS (miquels@drinkel.ow.org)
2277 		 */
2278 		if (skb_loop_sk(ptype, skb))
2279 			continue;
2280 
2281 		if (pt_prev) {
2282 			deliver_skb(skb2, pt_prev, skb->dev);
2283 			pt_prev = ptype;
2284 			continue;
2285 		}
2286 
2287 		/* need to clone skb, done only once */
2288 		skb2 = skb_clone(skb, GFP_ATOMIC);
2289 		if (!skb2)
2290 			goto out_unlock;
2291 
2292 		net_timestamp_set(skb2);
2293 
2294 		/* skb->nh should be correctly
2295 		 * set by sender, so that the second statement is
2296 		 * just protection against buggy protocols.
2297 		 */
2298 		skb_reset_mac_header(skb2);
2299 
2300 		if (skb_network_header(skb2) < skb2->data ||
2301 		    skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2302 			net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2303 					     ntohs(skb2->protocol),
2304 					     dev->name);
2305 			skb_reset_network_header(skb2);
2306 		}
2307 
2308 		skb2->transport_header = skb2->network_header;
2309 		skb2->pkt_type = PACKET_OUTGOING;
2310 		pt_prev = ptype;
2311 	}
2312 
2313 	if (ptype_list == &ptype_all) {
2314 		ptype_list = &dev->ptype_all;
2315 		goto again;
2316 	}
2317 out_unlock:
2318 	if (pt_prev) {
2319 		if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2320 			pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2321 		else
2322 			kfree_skb(skb2);
2323 	}
2324 	rcu_read_unlock();
2325 }
2326 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2327 
2328 /**
2329  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2330  * @dev: Network device
2331  * @txq: number of queues available
2332  *
2333  * If real_num_tx_queues is changed the tc mappings may no longer be
2334  * valid. To resolve this verify the tc mapping remains valid and if
2335  * not NULL the mapping. With no priorities mapping to this
2336  * offset/count pair it will no longer be used. In the worst case TC0
2337  * is invalid nothing can be done so disable priority mappings. If is
2338  * expected that drivers will fix this mapping if they can before
2339  * calling netif_set_real_num_tx_queues.
2340  */
netif_setup_tc(struct net_device * dev,unsigned int txq)2341 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2342 {
2343 	int i;
2344 	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2345 
2346 	/* If TC0 is invalidated disable TC mapping */
2347 	if (tc->offset + tc->count > txq) {
2348 		pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2349 		dev->num_tc = 0;
2350 		return;
2351 	}
2352 
2353 	/* Invalidated prio to tc mappings set to TC0 */
2354 	for (i = 1; i < TC_BITMASK + 1; i++) {
2355 		int q = netdev_get_prio_tc_map(dev, i);
2356 
2357 		tc = &dev->tc_to_txq[q];
2358 		if (tc->offset + tc->count > txq) {
2359 			pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2360 				i, q);
2361 			netdev_set_prio_tc_map(dev, i, 0);
2362 		}
2363 	}
2364 }
2365 
netdev_txq_to_tc(struct net_device * dev,unsigned int txq)2366 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2367 {
2368 	if (dev->num_tc) {
2369 		struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2370 		int i;
2371 
2372 		/* walk through the TCs and see if it falls into any of them */
2373 		for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2374 			if ((txq - tc->offset) < tc->count)
2375 				return i;
2376 		}
2377 
2378 		/* didn't find it, just return -1 to indicate no match */
2379 		return -1;
2380 	}
2381 
2382 	return 0;
2383 }
2384 EXPORT_SYMBOL(netdev_txq_to_tc);
2385 
2386 #ifdef CONFIG_XPS
2387 static struct static_key xps_needed __read_mostly;
2388 static struct static_key xps_rxqs_needed __read_mostly;
2389 static DEFINE_MUTEX(xps_map_mutex);
2390 #define xmap_dereference(P)		\
2391 	rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2392 
remove_xps_queue(struct xps_dev_maps * dev_maps,struct xps_dev_maps * old_maps,int tci,u16 index)2393 static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2394 			     struct xps_dev_maps *old_maps, int tci, u16 index)
2395 {
2396 	struct xps_map *map = NULL;
2397 	int pos;
2398 
2399 	if (dev_maps)
2400 		map = xmap_dereference(dev_maps->attr_map[tci]);
2401 	if (!map)
2402 		return false;
2403 
2404 	for (pos = map->len; pos--;) {
2405 		if (map->queues[pos] != index)
2406 			continue;
2407 
2408 		if (map->len > 1) {
2409 			map->queues[pos] = map->queues[--map->len];
2410 			break;
2411 		}
2412 
2413 		if (old_maps)
2414 			RCU_INIT_POINTER(old_maps->attr_map[tci], NULL);
2415 		RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2416 		kfree_rcu(map, rcu);
2417 		return false;
2418 	}
2419 
2420 	return true;
2421 }
2422 
remove_xps_queue_cpu(struct net_device * dev,struct xps_dev_maps * dev_maps,int cpu,u16 offset,u16 count)2423 static bool remove_xps_queue_cpu(struct net_device *dev,
2424 				 struct xps_dev_maps *dev_maps,
2425 				 int cpu, u16 offset, u16 count)
2426 {
2427 	int num_tc = dev_maps->num_tc;
2428 	bool active = false;
2429 	int tci;
2430 
2431 	for (tci = cpu * num_tc; num_tc--; tci++) {
2432 		int i, j;
2433 
2434 		for (i = count, j = offset; i--; j++) {
2435 			if (!remove_xps_queue(dev_maps, NULL, tci, j))
2436 				break;
2437 		}
2438 
2439 		active |= i < 0;
2440 	}
2441 
2442 	return active;
2443 }
2444 
reset_xps_maps(struct net_device * dev,struct xps_dev_maps * dev_maps,enum xps_map_type type)2445 static void reset_xps_maps(struct net_device *dev,
2446 			   struct xps_dev_maps *dev_maps,
2447 			   enum xps_map_type type)
2448 {
2449 	static_key_slow_dec_cpuslocked(&xps_needed);
2450 	if (type == XPS_RXQS)
2451 		static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2452 
2453 	RCU_INIT_POINTER(dev->xps_maps[type], NULL);
2454 
2455 	kfree_rcu(dev_maps, rcu);
2456 }
2457 
clean_xps_maps(struct net_device * dev,enum xps_map_type type,u16 offset,u16 count)2458 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
2459 			   u16 offset, u16 count)
2460 {
2461 	struct xps_dev_maps *dev_maps;
2462 	bool active = false;
2463 	int i, j;
2464 
2465 	dev_maps = xmap_dereference(dev->xps_maps[type]);
2466 	if (!dev_maps)
2467 		return;
2468 
2469 	for (j = 0; j < dev_maps->nr_ids; j++)
2470 		active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
2471 	if (!active)
2472 		reset_xps_maps(dev, dev_maps, type);
2473 
2474 	if (type == XPS_CPUS) {
2475 		for (i = offset + (count - 1); count--; i--)
2476 			netdev_queue_numa_node_write(
2477 				netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
2478 	}
2479 }
2480 
netif_reset_xps_queues(struct net_device * dev,u16 offset,u16 count)2481 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2482 				   u16 count)
2483 {
2484 	if (!static_key_false(&xps_needed))
2485 		return;
2486 
2487 	cpus_read_lock();
2488 	mutex_lock(&xps_map_mutex);
2489 
2490 	if (static_key_false(&xps_rxqs_needed))
2491 		clean_xps_maps(dev, XPS_RXQS, offset, count);
2492 
2493 	clean_xps_maps(dev, XPS_CPUS, offset, count);
2494 
2495 	mutex_unlock(&xps_map_mutex);
2496 	cpus_read_unlock();
2497 }
2498 
netif_reset_xps_queues_gt(struct net_device * dev,u16 index)2499 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2500 {
2501 	netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2502 }
2503 
expand_xps_map(struct xps_map * map,int attr_index,u16 index,bool is_rxqs_map)2504 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2505 				      u16 index, bool is_rxqs_map)
2506 {
2507 	struct xps_map *new_map;
2508 	int alloc_len = XPS_MIN_MAP_ALLOC;
2509 	int i, pos;
2510 
2511 	for (pos = 0; map && pos < map->len; pos++) {
2512 		if (map->queues[pos] != index)
2513 			continue;
2514 		return map;
2515 	}
2516 
2517 	/* Need to add tx-queue to this CPU's/rx-queue's existing map */
2518 	if (map) {
2519 		if (pos < map->alloc_len)
2520 			return map;
2521 
2522 		alloc_len = map->alloc_len * 2;
2523 	}
2524 
2525 	/* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2526 	 *  map
2527 	 */
2528 	if (is_rxqs_map)
2529 		new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2530 	else
2531 		new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2532 				       cpu_to_node(attr_index));
2533 	if (!new_map)
2534 		return NULL;
2535 
2536 	for (i = 0; i < pos; i++)
2537 		new_map->queues[i] = map->queues[i];
2538 	new_map->alloc_len = alloc_len;
2539 	new_map->len = pos;
2540 
2541 	return new_map;
2542 }
2543 
2544 /* Copy xps maps at a given index */
xps_copy_dev_maps(struct xps_dev_maps * dev_maps,struct xps_dev_maps * new_dev_maps,int index,int tc,bool skip_tc)2545 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
2546 			      struct xps_dev_maps *new_dev_maps, int index,
2547 			      int tc, bool skip_tc)
2548 {
2549 	int i, tci = index * dev_maps->num_tc;
2550 	struct xps_map *map;
2551 
2552 	/* copy maps belonging to foreign traffic classes */
2553 	for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2554 		if (i == tc && skip_tc)
2555 			continue;
2556 
2557 		/* fill in the new device map from the old device map */
2558 		map = xmap_dereference(dev_maps->attr_map[tci]);
2559 		RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2560 	}
2561 }
2562 
2563 /* Must be called under cpus_read_lock */
__netif_set_xps_queue(struct net_device * dev,const unsigned long * mask,u16 index,enum xps_map_type type)2564 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2565 			  u16 index, enum xps_map_type type)
2566 {
2567 	struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL;
2568 	const unsigned long *online_mask = NULL;
2569 	bool active = false, copy = false;
2570 	int i, j, tci, numa_node_id = -2;
2571 	int maps_sz, num_tc = 1, tc = 0;
2572 	struct xps_map *map, *new_map;
2573 	unsigned int nr_ids;
2574 
2575 	if (dev->num_tc) {
2576 		/* Do not allow XPS on subordinate device directly */
2577 		num_tc = dev->num_tc;
2578 		if (num_tc < 0)
2579 			return -EINVAL;
2580 
2581 		/* If queue belongs to subordinate dev use its map */
2582 		dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2583 
2584 		tc = netdev_txq_to_tc(dev, index);
2585 		if (tc < 0)
2586 			return -EINVAL;
2587 	}
2588 
2589 	mutex_lock(&xps_map_mutex);
2590 
2591 	dev_maps = xmap_dereference(dev->xps_maps[type]);
2592 	if (type == XPS_RXQS) {
2593 		maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2594 		nr_ids = dev->num_rx_queues;
2595 	} else {
2596 		maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2597 		if (num_possible_cpus() > 1)
2598 			online_mask = cpumask_bits(cpu_online_mask);
2599 		nr_ids = nr_cpu_ids;
2600 	}
2601 
2602 	if (maps_sz < L1_CACHE_BYTES)
2603 		maps_sz = L1_CACHE_BYTES;
2604 
2605 	/* The old dev_maps could be larger or smaller than the one we're
2606 	 * setting up now, as dev->num_tc or nr_ids could have been updated in
2607 	 * between. We could try to be smart, but let's be safe instead and only
2608 	 * copy foreign traffic classes if the two map sizes match.
2609 	 */
2610 	if (dev_maps &&
2611 	    dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids)
2612 		copy = true;
2613 
2614 	/* allocate memory for queue storage */
2615 	for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2616 	     j < nr_ids;) {
2617 		if (!new_dev_maps) {
2618 			new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2619 			if (!new_dev_maps) {
2620 				mutex_unlock(&xps_map_mutex);
2621 				return -ENOMEM;
2622 			}
2623 
2624 			new_dev_maps->nr_ids = nr_ids;
2625 			new_dev_maps->num_tc = num_tc;
2626 		}
2627 
2628 		tci = j * num_tc + tc;
2629 		map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
2630 
2631 		map = expand_xps_map(map, j, index, type == XPS_RXQS);
2632 		if (!map)
2633 			goto error;
2634 
2635 		RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2636 	}
2637 
2638 	if (!new_dev_maps)
2639 		goto out_no_new_maps;
2640 
2641 	if (!dev_maps) {
2642 		/* Increment static keys at most once per type */
2643 		static_key_slow_inc_cpuslocked(&xps_needed);
2644 		if (type == XPS_RXQS)
2645 			static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2646 	}
2647 
2648 	for (j = 0; j < nr_ids; j++) {
2649 		bool skip_tc = false;
2650 
2651 		tci = j * num_tc + tc;
2652 		if (netif_attr_test_mask(j, mask, nr_ids) &&
2653 		    netif_attr_test_online(j, online_mask, nr_ids)) {
2654 			/* add tx-queue to CPU/rx-queue maps */
2655 			int pos = 0;
2656 
2657 			skip_tc = true;
2658 
2659 			map = xmap_dereference(new_dev_maps->attr_map[tci]);
2660 			while ((pos < map->len) && (map->queues[pos] != index))
2661 				pos++;
2662 
2663 			if (pos == map->len)
2664 				map->queues[map->len++] = index;
2665 #ifdef CONFIG_NUMA
2666 			if (type == XPS_CPUS) {
2667 				if (numa_node_id == -2)
2668 					numa_node_id = cpu_to_node(j);
2669 				else if (numa_node_id != cpu_to_node(j))
2670 					numa_node_id = -1;
2671 			}
2672 #endif
2673 		}
2674 
2675 		if (copy)
2676 			xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc,
2677 					  skip_tc);
2678 	}
2679 
2680 	rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
2681 
2682 	/* Cleanup old maps */
2683 	if (!dev_maps)
2684 		goto out_no_old_maps;
2685 
2686 	for (j = 0; j < dev_maps->nr_ids; j++) {
2687 		for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
2688 			map = xmap_dereference(dev_maps->attr_map[tci]);
2689 			if (!map)
2690 				continue;
2691 
2692 			if (copy) {
2693 				new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2694 				if (map == new_map)
2695 					continue;
2696 			}
2697 
2698 			RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2699 			kfree_rcu(map, rcu);
2700 		}
2701 	}
2702 
2703 	old_dev_maps = dev_maps;
2704 
2705 out_no_old_maps:
2706 	dev_maps = new_dev_maps;
2707 	active = true;
2708 
2709 out_no_new_maps:
2710 	if (type == XPS_CPUS)
2711 		/* update Tx queue numa node */
2712 		netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2713 					     (numa_node_id >= 0) ?
2714 					     numa_node_id : NUMA_NO_NODE);
2715 
2716 	if (!dev_maps)
2717 		goto out_no_maps;
2718 
2719 	/* removes tx-queue from unused CPUs/rx-queues */
2720 	for (j = 0; j < dev_maps->nr_ids; j++) {
2721 		tci = j * dev_maps->num_tc;
2722 
2723 		for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2724 			if (i == tc &&
2725 			    netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
2726 			    netif_attr_test_online(j, online_mask, dev_maps->nr_ids))
2727 				continue;
2728 
2729 			active |= remove_xps_queue(dev_maps,
2730 						   copy ? old_dev_maps : NULL,
2731 						   tci, index);
2732 		}
2733 	}
2734 
2735 	if (old_dev_maps)
2736 		kfree_rcu(old_dev_maps, rcu);
2737 
2738 	/* free map if not active */
2739 	if (!active)
2740 		reset_xps_maps(dev, dev_maps, type);
2741 
2742 out_no_maps:
2743 	mutex_unlock(&xps_map_mutex);
2744 
2745 	return 0;
2746 error:
2747 	/* remove any maps that we added */
2748 	for (j = 0; j < nr_ids; j++) {
2749 		for (i = num_tc, tci = j * num_tc; i--; tci++) {
2750 			new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2751 			map = copy ?
2752 			      xmap_dereference(dev_maps->attr_map[tci]) :
2753 			      NULL;
2754 			if (new_map && new_map != map)
2755 				kfree(new_map);
2756 		}
2757 	}
2758 
2759 	mutex_unlock(&xps_map_mutex);
2760 
2761 	kfree(new_dev_maps);
2762 	return -ENOMEM;
2763 }
2764 EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2765 
netif_set_xps_queue(struct net_device * dev,const struct cpumask * mask,u16 index)2766 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2767 			u16 index)
2768 {
2769 	int ret;
2770 
2771 	cpus_read_lock();
2772 	ret =  __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
2773 	cpus_read_unlock();
2774 
2775 	return ret;
2776 }
2777 EXPORT_SYMBOL(netif_set_xps_queue);
2778 
2779 #endif
netdev_unbind_all_sb_channels(struct net_device * dev)2780 static void netdev_unbind_all_sb_channels(struct net_device *dev)
2781 {
2782 	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2783 
2784 	/* Unbind any subordinate channels */
2785 	while (txq-- != &dev->_tx[0]) {
2786 		if (txq->sb_dev)
2787 			netdev_unbind_sb_channel(dev, txq->sb_dev);
2788 	}
2789 }
2790 
netdev_reset_tc(struct net_device * dev)2791 void netdev_reset_tc(struct net_device *dev)
2792 {
2793 #ifdef CONFIG_XPS
2794 	netif_reset_xps_queues_gt(dev, 0);
2795 #endif
2796 	netdev_unbind_all_sb_channels(dev);
2797 
2798 	/* Reset TC configuration of device */
2799 	dev->num_tc = 0;
2800 	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2801 	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2802 }
2803 EXPORT_SYMBOL(netdev_reset_tc);
2804 
netdev_set_tc_queue(struct net_device * dev,u8 tc,u16 count,u16 offset)2805 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2806 {
2807 	if (tc >= dev->num_tc)
2808 		return -EINVAL;
2809 
2810 #ifdef CONFIG_XPS
2811 	netif_reset_xps_queues(dev, offset, count);
2812 #endif
2813 	dev->tc_to_txq[tc].count = count;
2814 	dev->tc_to_txq[tc].offset = offset;
2815 	return 0;
2816 }
2817 EXPORT_SYMBOL(netdev_set_tc_queue);
2818 
netdev_set_num_tc(struct net_device * dev,u8 num_tc)2819 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2820 {
2821 	if (num_tc > TC_MAX_QUEUE)
2822 		return -EINVAL;
2823 
2824 #ifdef CONFIG_XPS
2825 	netif_reset_xps_queues_gt(dev, 0);
2826 #endif
2827 	netdev_unbind_all_sb_channels(dev);
2828 
2829 	dev->num_tc = num_tc;
2830 	return 0;
2831 }
2832 EXPORT_SYMBOL(netdev_set_num_tc);
2833 
netdev_unbind_sb_channel(struct net_device * dev,struct net_device * sb_dev)2834 void netdev_unbind_sb_channel(struct net_device *dev,
2835 			      struct net_device *sb_dev)
2836 {
2837 	struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2838 
2839 #ifdef CONFIG_XPS
2840 	netif_reset_xps_queues_gt(sb_dev, 0);
2841 #endif
2842 	memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2843 	memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2844 
2845 	while (txq-- != &dev->_tx[0]) {
2846 		if (txq->sb_dev == sb_dev)
2847 			txq->sb_dev = NULL;
2848 	}
2849 }
2850 EXPORT_SYMBOL(netdev_unbind_sb_channel);
2851 
netdev_bind_sb_channel_queue(struct net_device * dev,struct net_device * sb_dev,u8 tc,u16 count,u16 offset)2852 int netdev_bind_sb_channel_queue(struct net_device *dev,
2853 				 struct net_device *sb_dev,
2854 				 u8 tc, u16 count, u16 offset)
2855 {
2856 	/* Make certain the sb_dev and dev are already configured */
2857 	if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2858 		return -EINVAL;
2859 
2860 	/* We cannot hand out queues we don't have */
2861 	if ((offset + count) > dev->real_num_tx_queues)
2862 		return -EINVAL;
2863 
2864 	/* Record the mapping */
2865 	sb_dev->tc_to_txq[tc].count = count;
2866 	sb_dev->tc_to_txq[tc].offset = offset;
2867 
2868 	/* Provide a way for Tx queue to find the tc_to_txq map or
2869 	 * XPS map for itself.
2870 	 */
2871 	while (count--)
2872 		netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2873 
2874 	return 0;
2875 }
2876 EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2877 
netdev_set_sb_channel(struct net_device * dev,u16 channel)2878 int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2879 {
2880 	/* Do not use a multiqueue device to represent a subordinate channel */
2881 	if (netif_is_multiqueue(dev))
2882 		return -ENODEV;
2883 
2884 	/* We allow channels 1 - 32767 to be used for subordinate channels.
2885 	 * Channel 0 is meant to be "native" mode and used only to represent
2886 	 * the main root device. We allow writing 0 to reset the device back
2887 	 * to normal mode after being used as a subordinate channel.
2888 	 */
2889 	if (channel > S16_MAX)
2890 		return -EINVAL;
2891 
2892 	dev->num_tc = -channel;
2893 
2894 	return 0;
2895 }
2896 EXPORT_SYMBOL(netdev_set_sb_channel);
2897 
2898 /*
2899  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2900  * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2901  */
netif_set_real_num_tx_queues(struct net_device * dev,unsigned int txq)2902 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2903 {
2904 	bool disabling;
2905 	int rc;
2906 
2907 	disabling = txq < dev->real_num_tx_queues;
2908 
2909 	if (txq < 1 || txq > dev->num_tx_queues)
2910 		return -EINVAL;
2911 
2912 	if (dev->reg_state == NETREG_REGISTERED ||
2913 	    dev->reg_state == NETREG_UNREGISTERING) {
2914 		ASSERT_RTNL();
2915 
2916 		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2917 						  txq);
2918 		if (rc)
2919 			return rc;
2920 
2921 		if (dev->num_tc)
2922 			netif_setup_tc(dev, txq);
2923 
2924 		dev->real_num_tx_queues = txq;
2925 
2926 		if (disabling) {
2927 			synchronize_net();
2928 			qdisc_reset_all_tx_gt(dev, txq);
2929 #ifdef CONFIG_XPS
2930 			netif_reset_xps_queues_gt(dev, txq);
2931 #endif
2932 		}
2933 	} else {
2934 		dev->real_num_tx_queues = txq;
2935 	}
2936 
2937 	return 0;
2938 }
2939 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2940 
2941 #ifdef CONFIG_SYSFS
2942 /**
2943  *	netif_set_real_num_rx_queues - set actual number of RX queues used
2944  *	@dev: Network device
2945  *	@rxq: Actual number of RX queues
2946  *
2947  *	This must be called either with the rtnl_lock held or before
2948  *	registration of the net device.  Returns 0 on success, or a
2949  *	negative error code.  If called before registration, it always
2950  *	succeeds.
2951  */
netif_set_real_num_rx_queues(struct net_device * dev,unsigned int rxq)2952 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2953 {
2954 	int rc;
2955 
2956 	if (rxq < 1 || rxq > dev->num_rx_queues)
2957 		return -EINVAL;
2958 
2959 	if (dev->reg_state == NETREG_REGISTERED) {
2960 		ASSERT_RTNL();
2961 
2962 		rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2963 						  rxq);
2964 		if (rc)
2965 			return rc;
2966 	}
2967 
2968 	dev->real_num_rx_queues = rxq;
2969 	return 0;
2970 }
2971 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2972 #endif
2973 
2974 /**
2975  *	netif_set_real_num_queues - set actual number of RX and TX queues used
2976  *	@dev: Network device
2977  *	@txq: Actual number of TX queues
2978  *	@rxq: Actual number of RX queues
2979  *
2980  *	Set the real number of both TX and RX queues.
2981  *	Does nothing if the number of queues is already correct.
2982  */
netif_set_real_num_queues(struct net_device * dev,unsigned int txq,unsigned int rxq)2983 int netif_set_real_num_queues(struct net_device *dev,
2984 			      unsigned int txq, unsigned int rxq)
2985 {
2986 	unsigned int old_rxq = dev->real_num_rx_queues;
2987 	int err;
2988 
2989 	if (txq < 1 || txq > dev->num_tx_queues ||
2990 	    rxq < 1 || rxq > dev->num_rx_queues)
2991 		return -EINVAL;
2992 
2993 	/* Start from increases, so the error path only does decreases -
2994 	 * decreases can't fail.
2995 	 */
2996 	if (rxq > dev->real_num_rx_queues) {
2997 		err = netif_set_real_num_rx_queues(dev, rxq);
2998 		if (err)
2999 			return err;
3000 	}
3001 	if (txq > dev->real_num_tx_queues) {
3002 		err = netif_set_real_num_tx_queues(dev, txq);
3003 		if (err)
3004 			goto undo_rx;
3005 	}
3006 	if (rxq < dev->real_num_rx_queues)
3007 		WARN_ON(netif_set_real_num_rx_queues(dev, rxq));
3008 	if (txq < dev->real_num_tx_queues)
3009 		WARN_ON(netif_set_real_num_tx_queues(dev, txq));
3010 
3011 	return 0;
3012 undo_rx:
3013 	WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq));
3014 	return err;
3015 }
3016 EXPORT_SYMBOL(netif_set_real_num_queues);
3017 
3018 /**
3019  * netif_get_num_default_rss_queues - default number of RSS queues
3020  *
3021  * This routine should set an upper limit on the number of RSS queues
3022  * used by default by multiqueue devices.
3023  */
netif_get_num_default_rss_queues(void)3024 int netif_get_num_default_rss_queues(void)
3025 {
3026 	return is_kdump_kernel() ?
3027 		1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
3028 }
3029 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3030 
__netif_reschedule(struct Qdisc * q)3031 static void __netif_reschedule(struct Qdisc *q)
3032 {
3033 	struct softnet_data *sd;
3034 	unsigned long flags;
3035 
3036 	local_irq_save(flags);
3037 	sd = this_cpu_ptr(&softnet_data);
3038 	q->next_sched = NULL;
3039 	*sd->output_queue_tailp = q;
3040 	sd->output_queue_tailp = &q->next_sched;
3041 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3042 	local_irq_restore(flags);
3043 }
3044 
__netif_schedule(struct Qdisc * q)3045 void __netif_schedule(struct Qdisc *q)
3046 {
3047 	if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3048 		__netif_reschedule(q);
3049 }
3050 EXPORT_SYMBOL(__netif_schedule);
3051 
3052 struct dev_kfree_skb_cb {
3053 	enum skb_free_reason reason;
3054 };
3055 
get_kfree_skb_cb(const struct sk_buff * skb)3056 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
3057 {
3058 	return (struct dev_kfree_skb_cb *)skb->cb;
3059 }
3060 
netif_schedule_queue(struct netdev_queue * txq)3061 void netif_schedule_queue(struct netdev_queue *txq)
3062 {
3063 	rcu_read_lock();
3064 	if (!netif_xmit_stopped(txq)) {
3065 		struct Qdisc *q = rcu_dereference(txq->qdisc);
3066 
3067 		__netif_schedule(q);
3068 	}
3069 	rcu_read_unlock();
3070 }
3071 EXPORT_SYMBOL(netif_schedule_queue);
3072 
netif_tx_wake_queue(struct netdev_queue * dev_queue)3073 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3074 {
3075 	if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3076 		struct Qdisc *q;
3077 
3078 		rcu_read_lock();
3079 		q = rcu_dereference(dev_queue->qdisc);
3080 		__netif_schedule(q);
3081 		rcu_read_unlock();
3082 	}
3083 }
3084 EXPORT_SYMBOL(netif_tx_wake_queue);
3085 
__dev_kfree_skb_irq(struct sk_buff * skb,enum skb_free_reason reason)3086 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
3087 {
3088 	unsigned long flags;
3089 
3090 	if (unlikely(!skb))
3091 		return;
3092 
3093 	if (likely(refcount_read(&skb->users) == 1)) {
3094 		smp_rmb();
3095 		refcount_set(&skb->users, 0);
3096 	} else if (likely(!refcount_dec_and_test(&skb->users))) {
3097 		return;
3098 	}
3099 	get_kfree_skb_cb(skb)->reason = reason;
3100 	local_irq_save(flags);
3101 	skb->next = __this_cpu_read(softnet_data.completion_queue);
3102 	__this_cpu_write(softnet_data.completion_queue, skb);
3103 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
3104 	local_irq_restore(flags);
3105 }
3106 EXPORT_SYMBOL(__dev_kfree_skb_irq);
3107 
__dev_kfree_skb_any(struct sk_buff * skb,enum skb_free_reason reason)3108 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
3109 {
3110 	if (in_hardirq() || irqs_disabled())
3111 		__dev_kfree_skb_irq(skb, reason);
3112 	else
3113 		dev_kfree_skb(skb);
3114 }
3115 EXPORT_SYMBOL(__dev_kfree_skb_any);
3116 
3117 
3118 /**
3119  * netif_device_detach - mark device as removed
3120  * @dev: network device
3121  *
3122  * Mark device as removed from system and therefore no longer available.
3123  */
netif_device_detach(struct net_device * dev)3124 void netif_device_detach(struct net_device *dev)
3125 {
3126 	if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3127 	    netif_running(dev)) {
3128 		netif_tx_stop_all_queues(dev);
3129 	}
3130 }
3131 EXPORT_SYMBOL(netif_device_detach);
3132 
3133 /**
3134  * netif_device_attach - mark device as attached
3135  * @dev: network device
3136  *
3137  * Mark device as attached from system and restart if needed.
3138  */
netif_device_attach(struct net_device * dev)3139 void netif_device_attach(struct net_device *dev)
3140 {
3141 	if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3142 	    netif_running(dev)) {
3143 		netif_tx_wake_all_queues(dev);
3144 		__netdev_watchdog_up(dev);
3145 	}
3146 }
3147 EXPORT_SYMBOL(netif_device_attach);
3148 
3149 /*
3150  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3151  * to be used as a distribution range.
3152  */
skb_tx_hash(const struct net_device * dev,const struct net_device * sb_dev,struct sk_buff * skb)3153 static u16 skb_tx_hash(const struct net_device *dev,
3154 		       const struct net_device *sb_dev,
3155 		       struct sk_buff *skb)
3156 {
3157 	u32 hash;
3158 	u16 qoffset = 0;
3159 	u16 qcount = dev->real_num_tx_queues;
3160 
3161 	if (dev->num_tc) {
3162 		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3163 
3164 		qoffset = sb_dev->tc_to_txq[tc].offset;
3165 		qcount = sb_dev->tc_to_txq[tc].count;
3166 		if (unlikely(!qcount)) {
3167 			net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
3168 					     sb_dev->name, qoffset, tc);
3169 			qoffset = 0;
3170 			qcount = dev->real_num_tx_queues;
3171 		}
3172 	}
3173 
3174 	if (skb_rx_queue_recorded(skb)) {
3175 		hash = skb_get_rx_queue(skb);
3176 		if (hash >= qoffset)
3177 			hash -= qoffset;
3178 		while (unlikely(hash >= qcount))
3179 			hash -= qcount;
3180 		return hash + qoffset;
3181 	}
3182 
3183 	return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3184 }
3185 
skb_warn_bad_offload(const struct sk_buff * skb)3186 static void skb_warn_bad_offload(const struct sk_buff *skb)
3187 {
3188 	static const netdev_features_t null_features;
3189 	struct net_device *dev = skb->dev;
3190 	const char *name = "";
3191 
3192 	if (!net_ratelimit())
3193 		return;
3194 
3195 	if (dev) {
3196 		if (dev->dev.parent)
3197 			name = dev_driver_string(dev->dev.parent);
3198 		else
3199 			name = netdev_name(dev);
3200 	}
3201 	skb_dump(KERN_WARNING, skb, false);
3202 	WARN(1, "%s: caps=(%pNF, %pNF)\n",
3203 	     name, dev ? &dev->features : &null_features,
3204 	     skb->sk ? &skb->sk->sk_route_caps : &null_features);
3205 }
3206 
3207 /*
3208  * Invalidate hardware checksum when packet is to be mangled, and
3209  * complete checksum manually on outgoing path.
3210  */
skb_checksum_help(struct sk_buff * skb)3211 int skb_checksum_help(struct sk_buff *skb)
3212 {
3213 	__wsum csum;
3214 	int ret = 0, offset;
3215 
3216 	if (skb->ip_summed == CHECKSUM_COMPLETE)
3217 		goto out_set_summed;
3218 
3219 	if (unlikely(skb_is_gso(skb))) {
3220 		skb_warn_bad_offload(skb);
3221 		return -EINVAL;
3222 	}
3223 
3224 	/* Before computing a checksum, we should make sure no frag could
3225 	 * be modified by an external entity : checksum could be wrong.
3226 	 */
3227 	if (skb_has_shared_frag(skb)) {
3228 		ret = __skb_linearize(skb);
3229 		if (ret)
3230 			goto out;
3231 	}
3232 
3233 	offset = skb_checksum_start_offset(skb);
3234 	BUG_ON(offset >= skb_headlen(skb));
3235 	csum = skb_checksum(skb, offset, skb->len - offset, 0);
3236 
3237 	offset += skb->csum_offset;
3238 	BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
3239 
3240 	ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3241 	if (ret)
3242 		goto out;
3243 
3244 	*(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3245 out_set_summed:
3246 	skb->ip_summed = CHECKSUM_NONE;
3247 out:
3248 	return ret;
3249 }
3250 EXPORT_SYMBOL(skb_checksum_help);
3251 
skb_crc32c_csum_help(struct sk_buff * skb)3252 int skb_crc32c_csum_help(struct sk_buff *skb)
3253 {
3254 	__le32 crc32c_csum;
3255 	int ret = 0, offset, start;
3256 
3257 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3258 		goto out;
3259 
3260 	if (unlikely(skb_is_gso(skb)))
3261 		goto out;
3262 
3263 	/* Before computing a checksum, we should make sure no frag could
3264 	 * be modified by an external entity : checksum could be wrong.
3265 	 */
3266 	if (unlikely(skb_has_shared_frag(skb))) {
3267 		ret = __skb_linearize(skb);
3268 		if (ret)
3269 			goto out;
3270 	}
3271 	start = skb_checksum_start_offset(skb);
3272 	offset = start + offsetof(struct sctphdr, checksum);
3273 	if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3274 		ret = -EINVAL;
3275 		goto out;
3276 	}
3277 
3278 	ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3279 	if (ret)
3280 		goto out;
3281 
3282 	crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3283 						  skb->len - start, ~(__u32)0,
3284 						  crc32c_csum_stub));
3285 	*(__le32 *)(skb->data + offset) = crc32c_csum;
3286 	skb->ip_summed = CHECKSUM_NONE;
3287 	skb->csum_not_inet = 0;
3288 out:
3289 	return ret;
3290 }
3291 
skb_network_protocol(struct sk_buff * skb,int * depth)3292 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3293 {
3294 	__be16 type = skb->protocol;
3295 
3296 	/* Tunnel gso handlers can set protocol to ethernet. */
3297 	if (type == htons(ETH_P_TEB)) {
3298 		struct ethhdr *eth;
3299 
3300 		if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3301 			return 0;
3302 
3303 		eth = (struct ethhdr *)skb->data;
3304 		type = eth->h_proto;
3305 	}
3306 
3307 	return __vlan_get_protocol(skb, type, depth);
3308 }
3309 
3310 /**
3311  *	skb_mac_gso_segment - mac layer segmentation handler.
3312  *	@skb: buffer to segment
3313  *	@features: features for the output path (see dev->features)
3314  */
skb_mac_gso_segment(struct sk_buff * skb,netdev_features_t features)3315 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3316 				    netdev_features_t features)
3317 {
3318 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
3319 	struct packet_offload *ptype;
3320 	int vlan_depth = skb->mac_len;
3321 	__be16 type = skb_network_protocol(skb, &vlan_depth);
3322 
3323 	if (unlikely(!type))
3324 		return ERR_PTR(-EINVAL);
3325 
3326 	__skb_pull(skb, vlan_depth);
3327 
3328 	rcu_read_lock();
3329 	list_for_each_entry_rcu(ptype, &offload_base, list) {
3330 		if (ptype->type == type && ptype->callbacks.gso_segment) {
3331 			segs = ptype->callbacks.gso_segment(skb, features);
3332 			break;
3333 		}
3334 	}
3335 	rcu_read_unlock();
3336 
3337 	__skb_push(skb, skb->data - skb_mac_header(skb));
3338 
3339 	return segs;
3340 }
3341 EXPORT_SYMBOL(skb_mac_gso_segment);
3342 
3343 
3344 /* openvswitch calls this on rx path, so we need a different check.
3345  */
skb_needs_check(struct sk_buff * skb,bool tx_path)3346 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
3347 {
3348 	if (tx_path)
3349 		return skb->ip_summed != CHECKSUM_PARTIAL &&
3350 		       skb->ip_summed != CHECKSUM_UNNECESSARY;
3351 
3352 	return skb->ip_summed == CHECKSUM_NONE;
3353 }
3354 
3355 /**
3356  *	__skb_gso_segment - Perform segmentation on skb.
3357  *	@skb: buffer to segment
3358  *	@features: features for the output path (see dev->features)
3359  *	@tx_path: whether it is called in TX path
3360  *
3361  *	This function segments the given skb and returns a list of segments.
3362  *
3363  *	It may return NULL if the skb requires no segmentation.  This is
3364  *	only possible when GSO is used for verifying header integrity.
3365  *
3366  *	Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.
3367  */
__skb_gso_segment(struct sk_buff * skb,netdev_features_t features,bool tx_path)3368 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3369 				  netdev_features_t features, bool tx_path)
3370 {
3371 	struct sk_buff *segs;
3372 
3373 	if (unlikely(skb_needs_check(skb, tx_path))) {
3374 		int err;
3375 
3376 		/* We're going to init ->check field in TCP or UDP header */
3377 		err = skb_cow_head(skb, 0);
3378 		if (err < 0)
3379 			return ERR_PTR(err);
3380 	}
3381 
3382 	/* Only report GSO partial support if it will enable us to
3383 	 * support segmentation on this frame without needing additional
3384 	 * work.
3385 	 */
3386 	if (features & NETIF_F_GSO_PARTIAL) {
3387 		netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3388 		struct net_device *dev = skb->dev;
3389 
3390 		partial_features |= dev->features & dev->gso_partial_features;
3391 		if (!skb_gso_ok(skb, features | partial_features))
3392 			features &= ~NETIF_F_GSO_PARTIAL;
3393 	}
3394 
3395 	BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
3396 		     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3397 
3398 	SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3399 	SKB_GSO_CB(skb)->encap_level = 0;
3400 
3401 	skb_reset_mac_header(skb);
3402 	skb_reset_mac_len(skb);
3403 
3404 	segs = skb_mac_gso_segment(skb, features);
3405 
3406 	if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3407 		skb_warn_bad_offload(skb);
3408 
3409 	return segs;
3410 }
3411 EXPORT_SYMBOL(__skb_gso_segment);
3412 
3413 /* Take action when hardware reception checksum errors are detected. */
3414 #ifdef CONFIG_BUG
do_netdev_rx_csum_fault(struct net_device * dev,struct sk_buff * skb)3415 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3416 {
3417 	pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
3418 	skb_dump(KERN_ERR, skb, true);
3419 	dump_stack();
3420 }
3421 
netdev_rx_csum_fault(struct net_device * dev,struct sk_buff * skb)3422 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3423 {
3424 	DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
3425 }
3426 EXPORT_SYMBOL(netdev_rx_csum_fault);
3427 #endif
3428 
3429 /* XXX: check that highmem exists at all on the given machine. */
illegal_highdma(struct net_device * dev,struct sk_buff * skb)3430 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3431 {
3432 #ifdef CONFIG_HIGHMEM
3433 	int i;
3434 
3435 	if (!(dev->features & NETIF_F_HIGHDMA)) {
3436 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3437 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3438 
3439 			if (PageHighMem(skb_frag_page(frag)))
3440 				return 1;
3441 		}
3442 	}
3443 #endif
3444 	return 0;
3445 }
3446 
3447 /* If MPLS offload request, verify we are testing hardware MPLS features
3448  * instead of standard features for the netdev.
3449  */
3450 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
net_mpls_features(struct sk_buff * skb,netdev_features_t features,__be16 type)3451 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3452 					   netdev_features_t features,
3453 					   __be16 type)
3454 {
3455 	if (eth_p_mpls(type))
3456 		features &= skb->dev->mpls_features;
3457 
3458 	return features;
3459 }
3460 #else
net_mpls_features(struct sk_buff * skb,netdev_features_t features,__be16 type)3461 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3462 					   netdev_features_t features,
3463 					   __be16 type)
3464 {
3465 	return features;
3466 }
3467 #endif
3468 
harmonize_features(struct sk_buff * skb,netdev_features_t features)3469 static netdev_features_t harmonize_features(struct sk_buff *skb,
3470 	netdev_features_t features)
3471 {
3472 	__be16 type;
3473 
3474 	type = skb_network_protocol(skb, NULL);
3475 	features = net_mpls_features(skb, features, type);
3476 
3477 	if (skb->ip_summed != CHECKSUM_NONE &&
3478 	    !can_checksum_protocol(features, type)) {
3479 		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3480 	}
3481 	if (illegal_highdma(skb->dev, skb))
3482 		features &= ~NETIF_F_SG;
3483 
3484 	return features;
3485 }
3486 
passthru_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3487 netdev_features_t passthru_features_check(struct sk_buff *skb,
3488 					  struct net_device *dev,
3489 					  netdev_features_t features)
3490 {
3491 	return features;
3492 }
3493 EXPORT_SYMBOL(passthru_features_check);
3494 
dflt_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3495 static netdev_features_t dflt_features_check(struct sk_buff *skb,
3496 					     struct net_device *dev,
3497 					     netdev_features_t features)
3498 {
3499 	return vlan_features_check(skb, features);
3500 }
3501 
gso_features_check(const struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3502 static netdev_features_t gso_features_check(const struct sk_buff *skb,
3503 					    struct net_device *dev,
3504 					    netdev_features_t features)
3505 {
3506 	u16 gso_segs = skb_shinfo(skb)->gso_segs;
3507 
3508 	if (gso_segs > dev->gso_max_segs)
3509 		return features & ~NETIF_F_GSO_MASK;
3510 
3511 	if (!skb_shinfo(skb)->gso_type) {
3512 		skb_warn_bad_offload(skb);
3513 		return features & ~NETIF_F_GSO_MASK;
3514 	}
3515 
3516 	/* Support for GSO partial features requires software
3517 	 * intervention before we can actually process the packets
3518 	 * so we need to strip support for any partial features now
3519 	 * and we can pull them back in after we have partially
3520 	 * segmented the frame.
3521 	 */
3522 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3523 		features &= ~dev->gso_partial_features;
3524 
3525 	/* Make sure to clear the IPv4 ID mangling feature if the
3526 	 * IPv4 header has the potential to be fragmented.
3527 	 */
3528 	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3529 		struct iphdr *iph = skb->encapsulation ?
3530 				    inner_ip_hdr(skb) : ip_hdr(skb);
3531 
3532 		if (!(iph->frag_off & htons(IP_DF)))
3533 			features &= ~NETIF_F_TSO_MANGLEID;
3534 	}
3535 
3536 	return features;
3537 }
3538 
netif_skb_features(struct sk_buff * skb)3539 netdev_features_t netif_skb_features(struct sk_buff *skb)
3540 {
3541 	struct net_device *dev = skb->dev;
3542 	netdev_features_t features = dev->features;
3543 
3544 	if (skb_is_gso(skb))
3545 		features = gso_features_check(skb, dev, features);
3546 
3547 	/* If encapsulation offload request, verify we are testing
3548 	 * hardware encapsulation features instead of standard
3549 	 * features for the netdev
3550 	 */
3551 	if (skb->encapsulation)
3552 		features &= dev->hw_enc_features;
3553 
3554 	if (skb_vlan_tagged(skb))
3555 		features = netdev_intersect_features(features,
3556 						     dev->vlan_features |
3557 						     NETIF_F_HW_VLAN_CTAG_TX |
3558 						     NETIF_F_HW_VLAN_STAG_TX);
3559 
3560 	if (dev->netdev_ops->ndo_features_check)
3561 		features &= dev->netdev_ops->ndo_features_check(skb, dev,
3562 								features);
3563 	else
3564 		features &= dflt_features_check(skb, dev, features);
3565 
3566 	return harmonize_features(skb, features);
3567 }
3568 EXPORT_SYMBOL(netif_skb_features);
3569 
xmit_one(struct sk_buff * skb,struct net_device * dev,struct netdev_queue * txq,bool more)3570 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3571 		    struct netdev_queue *txq, bool more)
3572 {
3573 	unsigned int len;
3574 	int rc;
3575 
3576 	if (dev_nit_active(dev))
3577 		dev_queue_xmit_nit(skb, dev);
3578 
3579 	len = skb->len;
3580 	PRANDOM_ADD_NOISE(skb, dev, txq, len + jiffies);
3581 	trace_net_dev_start_xmit(skb, dev);
3582 	rc = netdev_start_xmit(skb, dev, txq, more);
3583 	trace_net_dev_xmit(skb, rc, dev, len);
3584 
3585 	return rc;
3586 }
3587 
dev_hard_start_xmit(struct sk_buff * first,struct net_device * dev,struct netdev_queue * txq,int * ret)3588 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3589 				    struct netdev_queue *txq, int *ret)
3590 {
3591 	struct sk_buff *skb = first;
3592 	int rc = NETDEV_TX_OK;
3593 
3594 	while (skb) {
3595 		struct sk_buff *next = skb->next;
3596 
3597 		skb_mark_not_on_list(skb);
3598 		rc = xmit_one(skb, dev, txq, next != NULL);
3599 		if (unlikely(!dev_xmit_complete(rc))) {
3600 			skb->next = next;
3601 			goto out;
3602 		}
3603 
3604 		skb = next;
3605 		if (netif_tx_queue_stopped(txq) && skb) {
3606 			rc = NETDEV_TX_BUSY;
3607 			break;
3608 		}
3609 	}
3610 
3611 out:
3612 	*ret = rc;
3613 	return skb;
3614 }
3615 
validate_xmit_vlan(struct sk_buff * skb,netdev_features_t features)3616 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3617 					  netdev_features_t features)
3618 {
3619 	if (skb_vlan_tag_present(skb) &&
3620 	    !vlan_hw_offload_capable(features, skb->vlan_proto))
3621 		skb = __vlan_hwaccel_push_inside(skb);
3622 	return skb;
3623 }
3624 
skb_csum_hwoffload_help(struct sk_buff * skb,const netdev_features_t features)3625 int skb_csum_hwoffload_help(struct sk_buff *skb,
3626 			    const netdev_features_t features)
3627 {
3628 	if (unlikely(skb_csum_is_sctp(skb)))
3629 		return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3630 			skb_crc32c_csum_help(skb);
3631 
3632 	if (features & NETIF_F_HW_CSUM)
3633 		return 0;
3634 
3635 	if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3636 		switch (skb->csum_offset) {
3637 		case offsetof(struct tcphdr, check):
3638 		case offsetof(struct udphdr, check):
3639 			return 0;
3640 		}
3641 	}
3642 
3643 	return skb_checksum_help(skb);
3644 }
3645 EXPORT_SYMBOL(skb_csum_hwoffload_help);
3646 
validate_xmit_skb(struct sk_buff * skb,struct net_device * dev,bool * again)3647 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3648 {
3649 	netdev_features_t features;
3650 
3651 	features = netif_skb_features(skb);
3652 	skb = validate_xmit_vlan(skb, features);
3653 	if (unlikely(!skb))
3654 		goto out_null;
3655 
3656 	skb = sk_validate_xmit_skb(skb, dev);
3657 	if (unlikely(!skb))
3658 		goto out_null;
3659 
3660 	if (netif_needs_gso(skb, features)) {
3661 		struct sk_buff *segs;
3662 
3663 		segs = skb_gso_segment(skb, features);
3664 		if (IS_ERR(segs)) {
3665 			goto out_kfree_skb;
3666 		} else if (segs) {
3667 			consume_skb(skb);
3668 			skb = segs;
3669 		}
3670 	} else {
3671 		if (skb_needs_linearize(skb, features) &&
3672 		    __skb_linearize(skb))
3673 			goto out_kfree_skb;
3674 
3675 		/* If packet is not checksummed and device does not
3676 		 * support checksumming for this protocol, complete
3677 		 * checksumming here.
3678 		 */
3679 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
3680 			if (skb->encapsulation)
3681 				skb_set_inner_transport_header(skb,
3682 							       skb_checksum_start_offset(skb));
3683 			else
3684 				skb_set_transport_header(skb,
3685 							 skb_checksum_start_offset(skb));
3686 			if (skb_csum_hwoffload_help(skb, features))
3687 				goto out_kfree_skb;
3688 		}
3689 	}
3690 
3691 	skb = validate_xmit_xfrm(skb, features, again);
3692 
3693 	return skb;
3694 
3695 out_kfree_skb:
3696 	kfree_skb(skb);
3697 out_null:
3698 	atomic_long_inc(&dev->tx_dropped);
3699 	return NULL;
3700 }
3701 
validate_xmit_skb_list(struct sk_buff * skb,struct net_device * dev,bool * again)3702 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3703 {
3704 	struct sk_buff *next, *head = NULL, *tail;
3705 
3706 	for (; skb != NULL; skb = next) {
3707 		next = skb->next;
3708 		skb_mark_not_on_list(skb);
3709 
3710 		/* in case skb wont be segmented, point to itself */
3711 		skb->prev = skb;
3712 
3713 		skb = validate_xmit_skb(skb, dev, again);
3714 		if (!skb)
3715 			continue;
3716 
3717 		if (!head)
3718 			head = skb;
3719 		else
3720 			tail->next = skb;
3721 		/* If skb was segmented, skb->prev points to
3722 		 * the last segment. If not, it still contains skb.
3723 		 */
3724 		tail = skb->prev;
3725 	}
3726 	return head;
3727 }
3728 EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3729 
qdisc_pkt_len_init(struct sk_buff * skb)3730 static void qdisc_pkt_len_init(struct sk_buff *skb)
3731 {
3732 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
3733 
3734 	qdisc_skb_cb(skb)->pkt_len = skb->len;
3735 
3736 	/* To get more precise estimation of bytes sent on wire,
3737 	 * we add to pkt_len the headers size of all segments
3738 	 */
3739 	if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3740 		unsigned int hdr_len;
3741 		u16 gso_segs = shinfo->gso_segs;
3742 
3743 		/* mac layer + network layer */
3744 		hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3745 
3746 		/* + transport layer */
3747 		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3748 			const struct tcphdr *th;
3749 			struct tcphdr _tcphdr;
3750 
3751 			th = skb_header_pointer(skb, skb_transport_offset(skb),
3752 						sizeof(_tcphdr), &_tcphdr);
3753 			if (likely(th))
3754 				hdr_len += __tcp_hdrlen(th);
3755 		} else {
3756 			struct udphdr _udphdr;
3757 
3758 			if (skb_header_pointer(skb, skb_transport_offset(skb),
3759 					       sizeof(_udphdr), &_udphdr))
3760 				hdr_len += sizeof(struct udphdr);
3761 		}
3762 
3763 		if (shinfo->gso_type & SKB_GSO_DODGY)
3764 			gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3765 						shinfo->gso_size);
3766 
3767 		qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3768 	}
3769 }
3770 
dev_qdisc_enqueue(struct sk_buff * skb,struct Qdisc * q,struct sk_buff ** to_free,struct netdev_queue * txq)3771 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
3772 			     struct sk_buff **to_free,
3773 			     struct netdev_queue *txq)
3774 {
3775 	int rc;
3776 
3777 	rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
3778 	if (rc == NET_XMIT_SUCCESS)
3779 		trace_qdisc_enqueue(q, txq, skb);
3780 	return rc;
3781 }
3782 
__dev_xmit_skb(struct sk_buff * skb,struct Qdisc * q,struct net_device * dev,struct netdev_queue * txq)3783 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3784 				 struct net_device *dev,
3785 				 struct netdev_queue *txq)
3786 {
3787 	spinlock_t *root_lock = qdisc_lock(q);
3788 	struct sk_buff *to_free = NULL;
3789 	bool contended;
3790 	int rc;
3791 
3792 	qdisc_calculate_pkt_len(skb, q);
3793 
3794 	if (q->flags & TCQ_F_NOLOCK) {
3795 		if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
3796 		    qdisc_run_begin(q)) {
3797 			/* Retest nolock_qdisc_is_empty() within the protection
3798 			 * of q->seqlock to protect from racing with requeuing.
3799 			 */
3800 			if (unlikely(!nolock_qdisc_is_empty(q))) {
3801 				rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3802 				__qdisc_run(q);
3803 				qdisc_run_end(q);
3804 
3805 				goto no_lock_out;
3806 			}
3807 
3808 			qdisc_bstats_cpu_update(q, skb);
3809 			if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
3810 			    !nolock_qdisc_is_empty(q))
3811 				__qdisc_run(q);
3812 
3813 			qdisc_run_end(q);
3814 			return NET_XMIT_SUCCESS;
3815 		}
3816 
3817 		rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3818 		qdisc_run(q);
3819 
3820 no_lock_out:
3821 		if (unlikely(to_free))
3822 			kfree_skb_list(to_free);
3823 		return rc;
3824 	}
3825 
3826 	/*
3827 	 * Heuristic to force contended enqueues to serialize on a
3828 	 * separate lock before trying to get qdisc main lock.
3829 	 * This permits qdisc->running owner to get the lock more
3830 	 * often and dequeue packets faster.
3831 	 */
3832 	contended = qdisc_is_running(q);
3833 	if (unlikely(contended))
3834 		spin_lock(&q->busylock);
3835 
3836 	spin_lock(root_lock);
3837 	if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3838 		__qdisc_drop(skb, &to_free);
3839 		rc = NET_XMIT_DROP;
3840 	} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3841 		   qdisc_run_begin(q)) {
3842 		/*
3843 		 * This is a work-conserving queue; there are no old skbs
3844 		 * waiting to be sent out; and the qdisc is not running -
3845 		 * xmit the skb directly.
3846 		 */
3847 
3848 		qdisc_bstats_update(q, skb);
3849 
3850 		if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3851 			if (unlikely(contended)) {
3852 				spin_unlock(&q->busylock);
3853 				contended = false;
3854 			}
3855 			__qdisc_run(q);
3856 		}
3857 
3858 		qdisc_run_end(q);
3859 		rc = NET_XMIT_SUCCESS;
3860 	} else {
3861 		rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3862 		if (qdisc_run_begin(q)) {
3863 			if (unlikely(contended)) {
3864 				spin_unlock(&q->busylock);
3865 				contended = false;
3866 			}
3867 			__qdisc_run(q);
3868 			qdisc_run_end(q);
3869 		}
3870 	}
3871 	spin_unlock(root_lock);
3872 	if (unlikely(to_free))
3873 		kfree_skb_list(to_free);
3874 	if (unlikely(contended))
3875 		spin_unlock(&q->busylock);
3876 	return rc;
3877 }
3878 
3879 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
skb_update_prio(struct sk_buff * skb)3880 static void skb_update_prio(struct sk_buff *skb)
3881 {
3882 	const struct netprio_map *map;
3883 	const struct sock *sk;
3884 	unsigned int prioidx;
3885 
3886 	if (skb->priority)
3887 		return;
3888 	map = rcu_dereference_bh(skb->dev->priomap);
3889 	if (!map)
3890 		return;
3891 	sk = skb_to_full_sk(skb);
3892 	if (!sk)
3893 		return;
3894 
3895 	prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3896 
3897 	if (prioidx < map->priomap_len)
3898 		skb->priority = map->priomap[prioidx];
3899 }
3900 #else
3901 #define skb_update_prio(skb)
3902 #endif
3903 
3904 /**
3905  *	dev_loopback_xmit - loop back @skb
3906  *	@net: network namespace this loopback is happening in
3907  *	@sk:  sk needed to be a netfilter okfn
3908  *	@skb: buffer to transmit
3909  */
dev_loopback_xmit(struct net * net,struct sock * sk,struct sk_buff * skb)3910 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3911 {
3912 	skb_reset_mac_header(skb);
3913 	__skb_pull(skb, skb_network_offset(skb));
3914 	skb->pkt_type = PACKET_LOOPBACK;
3915 	if (skb->ip_summed == CHECKSUM_NONE)
3916 		skb->ip_summed = CHECKSUM_UNNECESSARY;
3917 	WARN_ON(!skb_dst(skb));
3918 	skb_dst_force(skb);
3919 	netif_rx_ni(skb);
3920 	return 0;
3921 }
3922 EXPORT_SYMBOL(dev_loopback_xmit);
3923 
3924 #ifdef CONFIG_NET_EGRESS
3925 static struct sk_buff *
sch_handle_egress(struct sk_buff * skb,int * ret,struct net_device * dev)3926 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3927 {
3928 	struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
3929 	struct tcf_result cl_res;
3930 
3931 	if (!miniq)
3932 		return skb;
3933 
3934 	/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3935 	qdisc_skb_cb(skb)->mru = 0;
3936 	qdisc_skb_cb(skb)->post_ct = false;
3937 	mini_qdisc_bstats_cpu_update(miniq, skb);
3938 
3939 	switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
3940 	case TC_ACT_OK:
3941 	case TC_ACT_RECLASSIFY:
3942 		skb->tc_index = TC_H_MIN(cl_res.classid);
3943 		break;
3944 	case TC_ACT_SHOT:
3945 		mini_qdisc_qstats_cpu_drop(miniq);
3946 		*ret = NET_XMIT_DROP;
3947 		kfree_skb(skb);
3948 		return NULL;
3949 	case TC_ACT_STOLEN:
3950 	case TC_ACT_QUEUED:
3951 	case TC_ACT_TRAP:
3952 		*ret = NET_XMIT_SUCCESS;
3953 		consume_skb(skb);
3954 		return NULL;
3955 	case TC_ACT_REDIRECT:
3956 		/* No need to push/pop skb's mac_header here on egress! */
3957 		skb_do_redirect(skb);
3958 		*ret = NET_XMIT_SUCCESS;
3959 		return NULL;
3960 	default:
3961 		break;
3962 	}
3963 
3964 	return skb;
3965 }
3966 #endif /* CONFIG_NET_EGRESS */
3967 
3968 #ifdef CONFIG_XPS
__get_xps_queue_idx(struct net_device * dev,struct sk_buff * skb,struct xps_dev_maps * dev_maps,unsigned int tci)3969 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3970 			       struct xps_dev_maps *dev_maps, unsigned int tci)
3971 {
3972 	int tc = netdev_get_prio_tc_map(dev, skb->priority);
3973 	struct xps_map *map;
3974 	int queue_index = -1;
3975 
3976 	if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids)
3977 		return queue_index;
3978 
3979 	tci *= dev_maps->num_tc;
3980 	tci += tc;
3981 
3982 	map = rcu_dereference(dev_maps->attr_map[tci]);
3983 	if (map) {
3984 		if (map->len == 1)
3985 			queue_index = map->queues[0];
3986 		else
3987 			queue_index = map->queues[reciprocal_scale(
3988 						skb_get_hash(skb), map->len)];
3989 		if (unlikely(queue_index >= dev->real_num_tx_queues))
3990 			queue_index = -1;
3991 	}
3992 	return queue_index;
3993 }
3994 #endif
3995 
get_xps_queue(struct net_device * dev,struct net_device * sb_dev,struct sk_buff * skb)3996 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
3997 			 struct sk_buff *skb)
3998 {
3999 #ifdef CONFIG_XPS
4000 	struct xps_dev_maps *dev_maps;
4001 	struct sock *sk = skb->sk;
4002 	int queue_index = -1;
4003 
4004 	if (!static_key_false(&xps_needed))
4005 		return -1;
4006 
4007 	rcu_read_lock();
4008 	if (!static_key_false(&xps_rxqs_needed))
4009 		goto get_cpus_map;
4010 
4011 	dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
4012 	if (dev_maps) {
4013 		int tci = sk_rx_queue_get(sk);
4014 
4015 		if (tci >= 0)
4016 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4017 							  tci);
4018 	}
4019 
4020 get_cpus_map:
4021 	if (queue_index < 0) {
4022 		dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
4023 		if (dev_maps) {
4024 			unsigned int tci = skb->sender_cpu - 1;
4025 
4026 			queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4027 							  tci);
4028 		}
4029 	}
4030 	rcu_read_unlock();
4031 
4032 	return queue_index;
4033 #else
4034 	return -1;
4035 #endif
4036 }
4037 
dev_pick_tx_zero(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4038 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
4039 		     struct net_device *sb_dev)
4040 {
4041 	return 0;
4042 }
4043 EXPORT_SYMBOL(dev_pick_tx_zero);
4044 
dev_pick_tx_cpu_id(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4045 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
4046 		       struct net_device *sb_dev)
4047 {
4048 	return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
4049 }
4050 EXPORT_SYMBOL(dev_pick_tx_cpu_id);
4051 
netdev_pick_tx(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4052 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4053 		     struct net_device *sb_dev)
4054 {
4055 	struct sock *sk = skb->sk;
4056 	int queue_index = sk_tx_queue_get(sk);
4057 
4058 	sb_dev = sb_dev ? : dev;
4059 
4060 	if (queue_index < 0 || skb->ooo_okay ||
4061 	    queue_index >= dev->real_num_tx_queues) {
4062 		int new_index = get_xps_queue(dev, sb_dev, skb);
4063 
4064 		if (new_index < 0)
4065 			new_index = skb_tx_hash(dev, sb_dev, skb);
4066 
4067 		if (queue_index != new_index && sk &&
4068 		    sk_fullsock(sk) &&
4069 		    rcu_access_pointer(sk->sk_dst_cache))
4070 			sk_tx_queue_set(sk, new_index);
4071 
4072 		queue_index = new_index;
4073 	}
4074 
4075 	return queue_index;
4076 }
4077 EXPORT_SYMBOL(netdev_pick_tx);
4078 
netdev_core_pick_tx(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4079 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4080 					 struct sk_buff *skb,
4081 					 struct net_device *sb_dev)
4082 {
4083 	int queue_index = 0;
4084 
4085 #ifdef CONFIG_XPS
4086 	u32 sender_cpu = skb->sender_cpu - 1;
4087 
4088 	if (sender_cpu >= (u32)NR_CPUS)
4089 		skb->sender_cpu = raw_smp_processor_id() + 1;
4090 #endif
4091 
4092 	if (dev->real_num_tx_queues != 1) {
4093 		const struct net_device_ops *ops = dev->netdev_ops;
4094 
4095 		if (ops->ndo_select_queue)
4096 			queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4097 		else
4098 			queue_index = netdev_pick_tx(dev, skb, sb_dev);
4099 
4100 		queue_index = netdev_cap_txqueue(dev, queue_index);
4101 	}
4102 
4103 	skb_set_queue_mapping(skb, queue_index);
4104 	return netdev_get_tx_queue(dev, queue_index);
4105 }
4106 
4107 /**
4108  *	__dev_queue_xmit - transmit a buffer
4109  *	@skb: buffer to transmit
4110  *	@sb_dev: suboordinate device used for L2 forwarding offload
4111  *
4112  *	Queue a buffer for transmission to a network device. The caller must
4113  *	have set the device and priority and built the buffer before calling
4114  *	this function. The function can be called from an interrupt.
4115  *
4116  *	A negative errno code is returned on a failure. A success does not
4117  *	guarantee the frame will be transmitted as it may be dropped due
4118  *	to congestion or traffic shaping.
4119  *
4120  * -----------------------------------------------------------------------------------
4121  *      I notice this method can also return errors from the queue disciplines,
4122  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
4123  *      be positive.
4124  *
4125  *      Regardless of the return value, the skb is consumed, so it is currently
4126  *      difficult to retry a send to this method.  (You can bump the ref count
4127  *      before sending to hold a reference for retry if you are careful.)
4128  *
4129  *      When calling this method, interrupts MUST be enabled.  This is because
4130  *      the BH enable code must have IRQs enabled so that it will not deadlock.
4131  *          --BLG
4132  */
__dev_queue_xmit(struct sk_buff * skb,struct net_device * sb_dev)4133 static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4134 {
4135 	struct net_device *dev = skb->dev;
4136 	struct netdev_queue *txq;
4137 	struct Qdisc *q;
4138 	int rc = -ENOMEM;
4139 	bool again = false;
4140 
4141 	skb_reset_mac_header(skb);
4142 
4143 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4144 		__skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4145 
4146 	/* Disable soft irqs for various locks below. Also
4147 	 * stops preemption for RCU.
4148 	 */
4149 	rcu_read_lock_bh();
4150 
4151 	skb_update_prio(skb);
4152 
4153 	qdisc_pkt_len_init(skb);
4154 #ifdef CONFIG_NET_CLS_ACT
4155 	skb->tc_at_ingress = 0;
4156 # ifdef CONFIG_NET_EGRESS
4157 	if (static_branch_unlikely(&egress_needed_key)) {
4158 		skb = sch_handle_egress(skb, &rc, dev);
4159 		if (!skb)
4160 			goto out;
4161 	}
4162 # endif
4163 #endif
4164 	/* If device/qdisc don't need skb->dst, release it right now while
4165 	 * its hot in this cpu cache.
4166 	 */
4167 	if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4168 		skb_dst_drop(skb);
4169 	else
4170 		skb_dst_force(skb);
4171 
4172 	txq = netdev_core_pick_tx(dev, skb, sb_dev);
4173 	q = rcu_dereference_bh(txq->qdisc);
4174 
4175 	trace_net_dev_queue(skb);
4176 	if (q->enqueue) {
4177 		rc = __dev_xmit_skb(skb, q, dev, txq);
4178 		goto out;
4179 	}
4180 
4181 	/* The device has no queue. Common case for software devices:
4182 	 * loopback, all the sorts of tunnels...
4183 
4184 	 * Really, it is unlikely that netif_tx_lock protection is necessary
4185 	 * here.  (f.e. loopback and IP tunnels are clean ignoring statistics
4186 	 * counters.)
4187 	 * However, it is possible, that they rely on protection
4188 	 * made by us here.
4189 
4190 	 * Check this and shot the lock. It is not prone from deadlocks.
4191 	 *Either shot noqueue qdisc, it is even simpler 8)
4192 	 */
4193 	if (dev->flags & IFF_UP) {
4194 		int cpu = smp_processor_id(); /* ok because BHs are off */
4195 
4196 		if (txq->xmit_lock_owner != cpu) {
4197 			if (dev_xmit_recursion())
4198 				goto recursion_alert;
4199 
4200 			skb = validate_xmit_skb(skb, dev, &again);
4201 			if (!skb)
4202 				goto out;
4203 
4204 			PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
4205 			HARD_TX_LOCK(dev, txq, cpu);
4206 
4207 			if (!netif_xmit_stopped(txq)) {
4208 				dev_xmit_recursion_inc();
4209 				skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4210 				dev_xmit_recursion_dec();
4211 				if (dev_xmit_complete(rc)) {
4212 					HARD_TX_UNLOCK(dev, txq);
4213 					goto out;
4214 				}
4215 			}
4216 			HARD_TX_UNLOCK(dev, txq);
4217 			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4218 					     dev->name);
4219 		} else {
4220 			/* Recursion is detected! It is possible,
4221 			 * unfortunately
4222 			 */
4223 recursion_alert:
4224 			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4225 					     dev->name);
4226 		}
4227 	}
4228 
4229 	rc = -ENETDOWN;
4230 	rcu_read_unlock_bh();
4231 
4232 	atomic_long_inc(&dev->tx_dropped);
4233 	kfree_skb_list(skb);
4234 	return rc;
4235 out:
4236 	rcu_read_unlock_bh();
4237 	return rc;
4238 }
4239 
dev_queue_xmit(struct sk_buff * skb)4240 int dev_queue_xmit(struct sk_buff *skb)
4241 {
4242 	return __dev_queue_xmit(skb, NULL);
4243 }
4244 EXPORT_SYMBOL(dev_queue_xmit);
4245 
dev_queue_xmit_accel(struct sk_buff * skb,struct net_device * sb_dev)4246 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
4247 {
4248 	return __dev_queue_xmit(skb, sb_dev);
4249 }
4250 EXPORT_SYMBOL(dev_queue_xmit_accel);
4251 
__dev_direct_xmit(struct sk_buff * skb,u16 queue_id)4252 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4253 {
4254 	struct net_device *dev = skb->dev;
4255 	struct sk_buff *orig_skb = skb;
4256 	struct netdev_queue *txq;
4257 	int ret = NETDEV_TX_BUSY;
4258 	bool again = false;
4259 
4260 	if (unlikely(!netif_running(dev) ||
4261 		     !netif_carrier_ok(dev)))
4262 		goto drop;
4263 
4264 	skb = validate_xmit_skb_list(skb, dev, &again);
4265 	if (skb != orig_skb)
4266 		goto drop;
4267 
4268 	skb_set_queue_mapping(skb, queue_id);
4269 	txq = skb_get_tx_queue(dev, skb);
4270 	PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
4271 
4272 	local_bh_disable();
4273 
4274 	dev_xmit_recursion_inc();
4275 	HARD_TX_LOCK(dev, txq, smp_processor_id());
4276 	if (!netif_xmit_frozen_or_drv_stopped(txq))
4277 		ret = netdev_start_xmit(skb, dev, txq, false);
4278 	HARD_TX_UNLOCK(dev, txq);
4279 	dev_xmit_recursion_dec();
4280 
4281 	local_bh_enable();
4282 	return ret;
4283 drop:
4284 	atomic_long_inc(&dev->tx_dropped);
4285 	kfree_skb_list(skb);
4286 	return NET_XMIT_DROP;
4287 }
4288 EXPORT_SYMBOL(__dev_direct_xmit);
4289 
4290 /*************************************************************************
4291  *			Receiver routines
4292  *************************************************************************/
4293 
4294 int netdev_max_backlog __read_mostly = 1000;
4295 EXPORT_SYMBOL(netdev_max_backlog);
4296 
4297 int netdev_tstamp_prequeue __read_mostly = 1;
4298 int netdev_budget __read_mostly = 300;
4299 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
4300 unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
4301 int weight_p __read_mostly = 64;           /* old backlog weight */
4302 int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
4303 int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
4304 int dev_rx_weight __read_mostly = 64;
4305 int dev_tx_weight __read_mostly = 64;
4306 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
4307 int gro_normal_batch __read_mostly = 8;
4308 
4309 /* Called with irq disabled */
____napi_schedule(struct softnet_data * sd,struct napi_struct * napi)4310 static inline void ____napi_schedule(struct softnet_data *sd,
4311 				     struct napi_struct *napi)
4312 {
4313 	struct task_struct *thread;
4314 
4315 	if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4316 		/* Paired with smp_mb__before_atomic() in
4317 		 * napi_enable()/dev_set_threaded().
4318 		 * Use READ_ONCE() to guarantee a complete
4319 		 * read on napi->thread. Only call
4320 		 * wake_up_process() when it's not NULL.
4321 		 */
4322 		thread = READ_ONCE(napi->thread);
4323 		if (thread) {
4324 			/* Avoid doing set_bit() if the thread is in
4325 			 * INTERRUPTIBLE state, cause napi_thread_wait()
4326 			 * makes sure to proceed with napi polling
4327 			 * if the thread is explicitly woken from here.
4328 			 */
4329 			if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
4330 				set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
4331 			wake_up_process(thread);
4332 			return;
4333 		}
4334 	}
4335 
4336 	list_add_tail(&napi->poll_list, &sd->poll_list);
4337 	__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4338 }
4339 
4340 #ifdef CONFIG_RPS
4341 
4342 /* One global table that all flow-based protocols share. */
4343 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
4344 EXPORT_SYMBOL(rps_sock_flow_table);
4345 u32 rps_cpu_mask __read_mostly;
4346 EXPORT_SYMBOL(rps_cpu_mask);
4347 
4348 struct static_key_false rps_needed __read_mostly;
4349 EXPORT_SYMBOL(rps_needed);
4350 struct static_key_false rfs_needed __read_mostly;
4351 EXPORT_SYMBOL(rfs_needed);
4352 
4353 static struct rps_dev_flow *
set_rps_cpu(struct net_device * dev,struct sk_buff * skb,struct rps_dev_flow * rflow,u16 next_cpu)4354 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4355 	    struct rps_dev_flow *rflow, u16 next_cpu)
4356 {
4357 	if (next_cpu < nr_cpu_ids) {
4358 #ifdef CONFIG_RFS_ACCEL
4359 		struct netdev_rx_queue *rxqueue;
4360 		struct rps_dev_flow_table *flow_table;
4361 		struct rps_dev_flow *old_rflow;
4362 		u32 flow_id;
4363 		u16 rxq_index;
4364 		int rc;
4365 
4366 		/* Should we steer this flow to a different hardware queue? */
4367 		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4368 		    !(dev->features & NETIF_F_NTUPLE))
4369 			goto out;
4370 		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4371 		if (rxq_index == skb_get_rx_queue(skb))
4372 			goto out;
4373 
4374 		rxqueue = dev->_rx + rxq_index;
4375 		flow_table = rcu_dereference(rxqueue->rps_flow_table);
4376 		if (!flow_table)
4377 			goto out;
4378 		flow_id = skb_get_hash(skb) & flow_table->mask;
4379 		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4380 							rxq_index, flow_id);
4381 		if (rc < 0)
4382 			goto out;
4383 		old_rflow = rflow;
4384 		rflow = &flow_table->flows[flow_id];
4385 		rflow->filter = rc;
4386 		if (old_rflow->filter == rflow->filter)
4387 			old_rflow->filter = RPS_NO_FILTER;
4388 	out:
4389 #endif
4390 		rflow->last_qtail =
4391 			per_cpu(softnet_data, next_cpu).input_queue_head;
4392 	}
4393 
4394 	rflow->cpu = next_cpu;
4395 	return rflow;
4396 }
4397 
4398 /*
4399  * get_rps_cpu is called from netif_receive_skb and returns the target
4400  * CPU from the RPS map of the receiving queue for a given skb.
4401  * rcu_read_lock must be held on entry.
4402  */
get_rps_cpu(struct net_device * dev,struct sk_buff * skb,struct rps_dev_flow ** rflowp)4403 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4404 		       struct rps_dev_flow **rflowp)
4405 {
4406 	const struct rps_sock_flow_table *sock_flow_table;
4407 	struct netdev_rx_queue *rxqueue = dev->_rx;
4408 	struct rps_dev_flow_table *flow_table;
4409 	struct rps_map *map;
4410 	int cpu = -1;
4411 	u32 tcpu;
4412 	u32 hash;
4413 
4414 	if (skb_rx_queue_recorded(skb)) {
4415 		u16 index = skb_get_rx_queue(skb);
4416 
4417 		if (unlikely(index >= dev->real_num_rx_queues)) {
4418 			WARN_ONCE(dev->real_num_rx_queues > 1,
4419 				  "%s received packet on queue %u, but number "
4420 				  "of RX queues is %u\n",
4421 				  dev->name, index, dev->real_num_rx_queues);
4422 			goto done;
4423 		}
4424 		rxqueue += index;
4425 	}
4426 
4427 	/* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4428 
4429 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4430 	map = rcu_dereference(rxqueue->rps_map);
4431 	if (!flow_table && !map)
4432 		goto done;
4433 
4434 	skb_reset_network_header(skb);
4435 	hash = skb_get_hash(skb);
4436 	if (!hash)
4437 		goto done;
4438 
4439 	sock_flow_table = rcu_dereference(rps_sock_flow_table);
4440 	if (flow_table && sock_flow_table) {
4441 		struct rps_dev_flow *rflow;
4442 		u32 next_cpu;
4443 		u32 ident;
4444 
4445 		/* First check into global flow table if there is a match */
4446 		ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4447 		if ((ident ^ hash) & ~rps_cpu_mask)
4448 			goto try_rps;
4449 
4450 		next_cpu = ident & rps_cpu_mask;
4451 
4452 		/* OK, now we know there is a match,
4453 		 * we can look at the local (per receive queue) flow table
4454 		 */
4455 		rflow = &flow_table->flows[hash & flow_table->mask];
4456 		tcpu = rflow->cpu;
4457 
4458 		/*
4459 		 * If the desired CPU (where last recvmsg was done) is
4460 		 * different from current CPU (one in the rx-queue flow
4461 		 * table entry), switch if one of the following holds:
4462 		 *   - Current CPU is unset (>= nr_cpu_ids).
4463 		 *   - Current CPU is offline.
4464 		 *   - The current CPU's queue tail has advanced beyond the
4465 		 *     last packet that was enqueued using this table entry.
4466 		 *     This guarantees that all previous packets for the flow
4467 		 *     have been dequeued, thus preserving in order delivery.
4468 		 */
4469 		if (unlikely(tcpu != next_cpu) &&
4470 		    (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4471 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4472 		      rflow->last_qtail)) >= 0)) {
4473 			tcpu = next_cpu;
4474 			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4475 		}
4476 
4477 		if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4478 			*rflowp = rflow;
4479 			cpu = tcpu;
4480 			goto done;
4481 		}
4482 	}
4483 
4484 try_rps:
4485 
4486 	if (map) {
4487 		tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4488 		if (cpu_online(tcpu)) {
4489 			cpu = tcpu;
4490 			goto done;
4491 		}
4492 	}
4493 
4494 done:
4495 	return cpu;
4496 }
4497 
4498 #ifdef CONFIG_RFS_ACCEL
4499 
4500 /**
4501  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4502  * @dev: Device on which the filter was set
4503  * @rxq_index: RX queue index
4504  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4505  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4506  *
4507  * Drivers that implement ndo_rx_flow_steer() should periodically call
4508  * this function for each installed filter and remove the filters for
4509  * which it returns %true.
4510  */
rps_may_expire_flow(struct net_device * dev,u16 rxq_index,u32 flow_id,u16 filter_id)4511 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4512 			 u32 flow_id, u16 filter_id)
4513 {
4514 	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4515 	struct rps_dev_flow_table *flow_table;
4516 	struct rps_dev_flow *rflow;
4517 	bool expire = true;
4518 	unsigned int cpu;
4519 
4520 	rcu_read_lock();
4521 	flow_table = rcu_dereference(rxqueue->rps_flow_table);
4522 	if (flow_table && flow_id <= flow_table->mask) {
4523 		rflow = &flow_table->flows[flow_id];
4524 		cpu = READ_ONCE(rflow->cpu);
4525 		if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4526 		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4527 			   rflow->last_qtail) <
4528 		     (int)(10 * flow_table->mask)))
4529 			expire = false;
4530 	}
4531 	rcu_read_unlock();
4532 	return expire;
4533 }
4534 EXPORT_SYMBOL(rps_may_expire_flow);
4535 
4536 #endif /* CONFIG_RFS_ACCEL */
4537 
4538 /* Called from hardirq (IPI) context */
rps_trigger_softirq(void * data)4539 static void rps_trigger_softirq(void *data)
4540 {
4541 	struct softnet_data *sd = data;
4542 
4543 	____napi_schedule(sd, &sd->backlog);
4544 	sd->received_rps++;
4545 }
4546 
4547 #endif /* CONFIG_RPS */
4548 
4549 /*
4550  * Check if this softnet_data structure is another cpu one
4551  * If yes, queue it to our IPI list and return 1
4552  * If no, return 0
4553  */
rps_ipi_queued(struct softnet_data * sd)4554 static int rps_ipi_queued(struct softnet_data *sd)
4555 {
4556 #ifdef CONFIG_RPS
4557 	struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4558 
4559 	if (sd != mysd) {
4560 		sd->rps_ipi_next = mysd->rps_ipi_list;
4561 		mysd->rps_ipi_list = sd;
4562 
4563 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
4564 		return 1;
4565 	}
4566 #endif /* CONFIG_RPS */
4567 	return 0;
4568 }
4569 
4570 #ifdef CONFIG_NET_FLOW_LIMIT
4571 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4572 #endif
4573 
skb_flow_limit(struct sk_buff * skb,unsigned int qlen)4574 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4575 {
4576 #ifdef CONFIG_NET_FLOW_LIMIT
4577 	struct sd_flow_limit *fl;
4578 	struct softnet_data *sd;
4579 	unsigned int old_flow, new_flow;
4580 
4581 	if (qlen < (netdev_max_backlog >> 1))
4582 		return false;
4583 
4584 	sd = this_cpu_ptr(&softnet_data);
4585 
4586 	rcu_read_lock();
4587 	fl = rcu_dereference(sd->flow_limit);
4588 	if (fl) {
4589 		new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4590 		old_flow = fl->history[fl->history_head];
4591 		fl->history[fl->history_head] = new_flow;
4592 
4593 		fl->history_head++;
4594 		fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4595 
4596 		if (likely(fl->buckets[old_flow]))
4597 			fl->buckets[old_flow]--;
4598 
4599 		if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4600 			fl->count++;
4601 			rcu_read_unlock();
4602 			return true;
4603 		}
4604 	}
4605 	rcu_read_unlock();
4606 #endif
4607 	return false;
4608 }
4609 
4610 /*
4611  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4612  * queue (may be a remote CPU queue).
4613  */
enqueue_to_backlog(struct sk_buff * skb,int cpu,unsigned int * qtail)4614 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4615 			      unsigned int *qtail)
4616 {
4617 	struct softnet_data *sd;
4618 	unsigned long flags;
4619 	unsigned int qlen;
4620 
4621 	sd = &per_cpu(softnet_data, cpu);
4622 
4623 	local_irq_save(flags);
4624 
4625 	rps_lock(sd);
4626 	if (!netif_running(skb->dev))
4627 		goto drop;
4628 	qlen = skb_queue_len(&sd->input_pkt_queue);
4629 	if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
4630 		if (qlen) {
4631 enqueue:
4632 			__skb_queue_tail(&sd->input_pkt_queue, skb);
4633 			input_queue_tail_incr_save(sd, qtail);
4634 			rps_unlock(sd);
4635 			local_irq_restore(flags);
4636 			return NET_RX_SUCCESS;
4637 		}
4638 
4639 		/* Schedule NAPI for backlog device
4640 		 * We can use non atomic operation since we own the queue lock
4641 		 */
4642 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
4643 			if (!rps_ipi_queued(sd))
4644 				____napi_schedule(sd, &sd->backlog);
4645 		}
4646 		goto enqueue;
4647 	}
4648 
4649 drop:
4650 	sd->dropped++;
4651 	rps_unlock(sd);
4652 
4653 	local_irq_restore(flags);
4654 
4655 	atomic_long_inc(&skb->dev->rx_dropped);
4656 	kfree_skb(skb);
4657 	return NET_RX_DROP;
4658 }
4659 
netif_get_rxqueue(struct sk_buff * skb)4660 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4661 {
4662 	struct net_device *dev = skb->dev;
4663 	struct netdev_rx_queue *rxqueue;
4664 
4665 	rxqueue = dev->_rx;
4666 
4667 	if (skb_rx_queue_recorded(skb)) {
4668 		u16 index = skb_get_rx_queue(skb);
4669 
4670 		if (unlikely(index >= dev->real_num_rx_queues)) {
4671 			WARN_ONCE(dev->real_num_rx_queues > 1,
4672 				  "%s received packet on queue %u, but number "
4673 				  "of RX queues is %u\n",
4674 				  dev->name, index, dev->real_num_rx_queues);
4675 
4676 			return rxqueue; /* Return first rxqueue */
4677 		}
4678 		rxqueue += index;
4679 	}
4680 	return rxqueue;
4681 }
4682 
bpf_prog_run_generic_xdp(struct sk_buff * skb,struct xdp_buff * xdp,struct bpf_prog * xdp_prog)4683 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4684 			     struct bpf_prog *xdp_prog)
4685 {
4686 	void *orig_data, *orig_data_end, *hard_start;
4687 	struct netdev_rx_queue *rxqueue;
4688 	bool orig_bcast, orig_host;
4689 	u32 mac_len, frame_sz;
4690 	__be16 orig_eth_type;
4691 	struct ethhdr *eth;
4692 	u32 metalen, act;
4693 	int off;
4694 
4695 	/* The XDP program wants to see the packet starting at the MAC
4696 	 * header.
4697 	 */
4698 	mac_len = skb->data - skb_mac_header(skb);
4699 	hard_start = skb->data - skb_headroom(skb);
4700 
4701 	/* SKB "head" area always have tailroom for skb_shared_info */
4702 	frame_sz = (void *)skb_end_pointer(skb) - hard_start;
4703 	frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4704 
4705 	rxqueue = netif_get_rxqueue(skb);
4706 	xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4707 	xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4708 			 skb_headlen(skb) + mac_len, true);
4709 
4710 	orig_data_end = xdp->data_end;
4711 	orig_data = xdp->data;
4712 	eth = (struct ethhdr *)xdp->data;
4713 	orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
4714 	orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4715 	orig_eth_type = eth->h_proto;
4716 
4717 	act = bpf_prog_run_xdp(xdp_prog, xdp);
4718 
4719 	/* check if bpf_xdp_adjust_head was used */
4720 	off = xdp->data - orig_data;
4721 	if (off) {
4722 		if (off > 0)
4723 			__skb_pull(skb, off);
4724 		else if (off < 0)
4725 			__skb_push(skb, -off);
4726 
4727 		skb->mac_header += off;
4728 		skb_reset_network_header(skb);
4729 	}
4730 
4731 	/* check if bpf_xdp_adjust_tail was used */
4732 	off = xdp->data_end - orig_data_end;
4733 	if (off != 0) {
4734 		skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4735 		skb->len += off; /* positive on grow, negative on shrink */
4736 	}
4737 
4738 	/* check if XDP changed eth hdr such SKB needs update */
4739 	eth = (struct ethhdr *)xdp->data;
4740 	if ((orig_eth_type != eth->h_proto) ||
4741 	    (orig_host != ether_addr_equal_64bits(eth->h_dest,
4742 						  skb->dev->dev_addr)) ||
4743 	    (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4744 		__skb_push(skb, ETH_HLEN);
4745 		skb->pkt_type = PACKET_HOST;
4746 		skb->protocol = eth_type_trans(skb, skb->dev);
4747 	}
4748 
4749 	/* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
4750 	 * before calling us again on redirect path. We do not call do_redirect
4751 	 * as we leave that up to the caller.
4752 	 *
4753 	 * Caller is responsible for managing lifetime of skb (i.e. calling
4754 	 * kfree_skb in response to actions it cannot handle/XDP_DROP).
4755 	 */
4756 	switch (act) {
4757 	case XDP_REDIRECT:
4758 	case XDP_TX:
4759 		__skb_push(skb, mac_len);
4760 		break;
4761 	case XDP_PASS:
4762 		metalen = xdp->data - xdp->data_meta;
4763 		if (metalen)
4764 			skb_metadata_set(skb, metalen);
4765 		break;
4766 	}
4767 
4768 	return act;
4769 }
4770 
netif_receive_generic_xdp(struct sk_buff * skb,struct xdp_buff * xdp,struct bpf_prog * xdp_prog)4771 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4772 				     struct xdp_buff *xdp,
4773 				     struct bpf_prog *xdp_prog)
4774 {
4775 	u32 act = XDP_DROP;
4776 
4777 	/* Reinjected packets coming from act_mirred or similar should
4778 	 * not get XDP generic processing.
4779 	 */
4780 	if (skb_is_redirected(skb))
4781 		return XDP_PASS;
4782 
4783 	/* XDP packets must be linear and must have sufficient headroom
4784 	 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4785 	 * native XDP provides, thus we need to do it here as well.
4786 	 */
4787 	if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
4788 	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4789 		int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4790 		int troom = skb->tail + skb->data_len - skb->end;
4791 
4792 		/* In case we have to go down the path and also linearize,
4793 		 * then lets do the pskb_expand_head() work just once here.
4794 		 */
4795 		if (pskb_expand_head(skb,
4796 				     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4797 				     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4798 			goto do_drop;
4799 		if (skb_linearize(skb))
4800 			goto do_drop;
4801 	}
4802 
4803 	act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
4804 	switch (act) {
4805 	case XDP_REDIRECT:
4806 	case XDP_TX:
4807 	case XDP_PASS:
4808 		break;
4809 	default:
4810 		bpf_warn_invalid_xdp_action(act);
4811 		fallthrough;
4812 	case XDP_ABORTED:
4813 		trace_xdp_exception(skb->dev, xdp_prog, act);
4814 		fallthrough;
4815 	case XDP_DROP:
4816 	do_drop:
4817 		kfree_skb(skb);
4818 		break;
4819 	}
4820 
4821 	return act;
4822 }
4823 
4824 /* When doing generic XDP we have to bypass the qdisc layer and the
4825  * network taps in order to match in-driver-XDP behavior.
4826  */
generic_xdp_tx(struct sk_buff * skb,struct bpf_prog * xdp_prog)4827 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4828 {
4829 	struct net_device *dev = skb->dev;
4830 	struct netdev_queue *txq;
4831 	bool free_skb = true;
4832 	int cpu, rc;
4833 
4834 	txq = netdev_core_pick_tx(dev, skb, NULL);
4835 	cpu = smp_processor_id();
4836 	HARD_TX_LOCK(dev, txq, cpu);
4837 	if (!netif_xmit_stopped(txq)) {
4838 		rc = netdev_start_xmit(skb, dev, txq, 0);
4839 		if (dev_xmit_complete(rc))
4840 			free_skb = false;
4841 	}
4842 	HARD_TX_UNLOCK(dev, txq);
4843 	if (free_skb) {
4844 		trace_xdp_exception(dev, xdp_prog, XDP_TX);
4845 		kfree_skb(skb);
4846 	}
4847 }
4848 
4849 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
4850 
do_xdp_generic(struct bpf_prog * xdp_prog,struct sk_buff * skb)4851 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
4852 {
4853 	if (xdp_prog) {
4854 		struct xdp_buff xdp;
4855 		u32 act;
4856 		int err;
4857 
4858 		act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
4859 		if (act != XDP_PASS) {
4860 			switch (act) {
4861 			case XDP_REDIRECT:
4862 				err = xdp_do_generic_redirect(skb->dev, skb,
4863 							      &xdp, xdp_prog);
4864 				if (err)
4865 					goto out_redir;
4866 				break;
4867 			case XDP_TX:
4868 				generic_xdp_tx(skb, xdp_prog);
4869 				break;
4870 			}
4871 			return XDP_DROP;
4872 		}
4873 	}
4874 	return XDP_PASS;
4875 out_redir:
4876 	kfree_skb(skb);
4877 	return XDP_DROP;
4878 }
4879 EXPORT_SYMBOL_GPL(do_xdp_generic);
4880 
netif_rx_internal(struct sk_buff * skb)4881 static int netif_rx_internal(struct sk_buff *skb)
4882 {
4883 	int ret;
4884 
4885 	net_timestamp_check(netdev_tstamp_prequeue, skb);
4886 
4887 	trace_netif_rx(skb);
4888 
4889 #ifdef CONFIG_RPS
4890 	if (static_branch_unlikely(&rps_needed)) {
4891 		struct rps_dev_flow voidflow, *rflow = &voidflow;
4892 		int cpu;
4893 
4894 		preempt_disable();
4895 		rcu_read_lock();
4896 
4897 		cpu = get_rps_cpu(skb->dev, skb, &rflow);
4898 		if (cpu < 0)
4899 			cpu = smp_processor_id();
4900 
4901 		ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4902 
4903 		rcu_read_unlock();
4904 		preempt_enable();
4905 	} else
4906 #endif
4907 	{
4908 		unsigned int qtail;
4909 
4910 		ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4911 		put_cpu();
4912 	}
4913 	return ret;
4914 }
4915 
4916 /**
4917  *	netif_rx	-	post buffer to the network code
4918  *	@skb: buffer to post
4919  *
4920  *	This function receives a packet from a device driver and queues it for
4921  *	the upper (protocol) levels to process.  It always succeeds. The buffer
4922  *	may be dropped during processing for congestion control or by the
4923  *	protocol layers.
4924  *
4925  *	return values:
4926  *	NET_RX_SUCCESS	(no congestion)
4927  *	NET_RX_DROP     (packet was dropped)
4928  *
4929  */
4930 
netif_rx(struct sk_buff * skb)4931 int netif_rx(struct sk_buff *skb)
4932 {
4933 	int ret;
4934 
4935 	trace_netif_rx_entry(skb);
4936 
4937 	ret = netif_rx_internal(skb);
4938 	trace_netif_rx_exit(ret);
4939 
4940 	return ret;
4941 }
4942 EXPORT_SYMBOL(netif_rx);
4943 
netif_rx_ni(struct sk_buff * skb)4944 int netif_rx_ni(struct sk_buff *skb)
4945 {
4946 	int err;
4947 
4948 	trace_netif_rx_ni_entry(skb);
4949 
4950 	preempt_disable();
4951 	err = netif_rx_internal(skb);
4952 	if (local_softirq_pending())
4953 		do_softirq();
4954 	preempt_enable();
4955 	trace_netif_rx_ni_exit(err);
4956 
4957 	return err;
4958 }
4959 EXPORT_SYMBOL(netif_rx_ni);
4960 
netif_rx_any_context(struct sk_buff * skb)4961 int netif_rx_any_context(struct sk_buff *skb)
4962 {
4963 	/*
4964 	 * If invoked from contexts which do not invoke bottom half
4965 	 * processing either at return from interrupt or when softrqs are
4966 	 * reenabled, use netif_rx_ni() which invokes bottomhalf processing
4967 	 * directly.
4968 	 */
4969 	if (in_interrupt())
4970 		return netif_rx(skb);
4971 	else
4972 		return netif_rx_ni(skb);
4973 }
4974 EXPORT_SYMBOL(netif_rx_any_context);
4975 
net_tx_action(struct softirq_action * h)4976 static __latent_entropy void net_tx_action(struct softirq_action *h)
4977 {
4978 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4979 
4980 	if (sd->completion_queue) {
4981 		struct sk_buff *clist;
4982 
4983 		local_irq_disable();
4984 		clist = sd->completion_queue;
4985 		sd->completion_queue = NULL;
4986 		local_irq_enable();
4987 
4988 		while (clist) {
4989 			struct sk_buff *skb = clist;
4990 
4991 			clist = clist->next;
4992 
4993 			WARN_ON(refcount_read(&skb->users));
4994 			if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4995 				trace_consume_skb(skb);
4996 			else
4997 				trace_kfree_skb(skb, net_tx_action);
4998 
4999 			if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
5000 				__kfree_skb(skb);
5001 			else
5002 				__kfree_skb_defer(skb);
5003 		}
5004 	}
5005 
5006 	if (sd->output_queue) {
5007 		struct Qdisc *head;
5008 
5009 		local_irq_disable();
5010 		head = sd->output_queue;
5011 		sd->output_queue = NULL;
5012 		sd->output_queue_tailp = &sd->output_queue;
5013 		local_irq_enable();
5014 
5015 		rcu_read_lock();
5016 
5017 		while (head) {
5018 			struct Qdisc *q = head;
5019 			spinlock_t *root_lock = NULL;
5020 
5021 			head = head->next_sched;
5022 
5023 			/* We need to make sure head->next_sched is read
5024 			 * before clearing __QDISC_STATE_SCHED
5025 			 */
5026 			smp_mb__before_atomic();
5027 
5028 			if (!(q->flags & TCQ_F_NOLOCK)) {
5029 				root_lock = qdisc_lock(q);
5030 				spin_lock(root_lock);
5031 			} else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
5032 						     &q->state))) {
5033 				/* There is a synchronize_net() between
5034 				 * STATE_DEACTIVATED flag being set and
5035 				 * qdisc_reset()/some_qdisc_is_busy() in
5036 				 * dev_deactivate(), so we can safely bail out
5037 				 * early here to avoid data race between
5038 				 * qdisc_deactivate() and some_qdisc_is_busy()
5039 				 * for lockless qdisc.
5040 				 */
5041 				clear_bit(__QDISC_STATE_SCHED, &q->state);
5042 				continue;
5043 			}
5044 
5045 			clear_bit(__QDISC_STATE_SCHED, &q->state);
5046 			qdisc_run(q);
5047 			if (root_lock)
5048 				spin_unlock(root_lock);
5049 		}
5050 
5051 		rcu_read_unlock();
5052 	}
5053 
5054 	xfrm_dev_backlog(sd);
5055 }
5056 
5057 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
5058 /* This hook is defined here for ATM LANE */
5059 int (*br_fdb_test_addr_hook)(struct net_device *dev,
5060 			     unsigned char *addr) __read_mostly;
5061 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
5062 #endif
5063 
5064 static inline struct sk_buff *
sch_handle_ingress(struct sk_buff * skb,struct packet_type ** pt_prev,int * ret,struct net_device * orig_dev,bool * another)5065 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
5066 		   struct net_device *orig_dev, bool *another)
5067 {
5068 #ifdef CONFIG_NET_CLS_ACT
5069 	struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
5070 	struct tcf_result cl_res;
5071 
5072 	/* If there's at least one ingress present somewhere (so
5073 	 * we get here via enabled static key), remaining devices
5074 	 * that are not configured with an ingress qdisc will bail
5075 	 * out here.
5076 	 */
5077 	if (!miniq)
5078 		return skb;
5079 
5080 	if (*pt_prev) {
5081 		*ret = deliver_skb(skb, *pt_prev, orig_dev);
5082 		*pt_prev = NULL;
5083 	}
5084 
5085 	qdisc_skb_cb(skb)->pkt_len = skb->len;
5086 	qdisc_skb_cb(skb)->mru = 0;
5087 	qdisc_skb_cb(skb)->post_ct = false;
5088 	skb->tc_at_ingress = 1;
5089 	mini_qdisc_bstats_cpu_update(miniq, skb);
5090 
5091 	switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
5092 	case TC_ACT_OK:
5093 	case TC_ACT_RECLASSIFY:
5094 		skb->tc_index = TC_H_MIN(cl_res.classid);
5095 		break;
5096 	case TC_ACT_SHOT:
5097 		mini_qdisc_qstats_cpu_drop(miniq);
5098 		kfree_skb(skb);
5099 		return NULL;
5100 	case TC_ACT_STOLEN:
5101 	case TC_ACT_QUEUED:
5102 	case TC_ACT_TRAP:
5103 		consume_skb(skb);
5104 		return NULL;
5105 	case TC_ACT_REDIRECT:
5106 		/* skb_mac_header check was done by cls/act_bpf, so
5107 		 * we can safely push the L2 header back before
5108 		 * redirecting to another netdev
5109 		 */
5110 		__skb_push(skb, skb->mac_len);
5111 		if (skb_do_redirect(skb) == -EAGAIN) {
5112 			__skb_pull(skb, skb->mac_len);
5113 			*another = true;
5114 			break;
5115 		}
5116 		return NULL;
5117 	case TC_ACT_CONSUMED:
5118 		return NULL;
5119 	default:
5120 		break;
5121 	}
5122 #endif /* CONFIG_NET_CLS_ACT */
5123 	return skb;
5124 }
5125 
5126 /**
5127  *	netdev_is_rx_handler_busy - check if receive handler is registered
5128  *	@dev: device to check
5129  *
5130  *	Check if a receive handler is already registered for a given device.
5131  *	Return true if there one.
5132  *
5133  *	The caller must hold the rtnl_mutex.
5134  */
netdev_is_rx_handler_busy(struct net_device * dev)5135 bool netdev_is_rx_handler_busy(struct net_device *dev)
5136 {
5137 	ASSERT_RTNL();
5138 	return dev && rtnl_dereference(dev->rx_handler);
5139 }
5140 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
5141 
5142 /**
5143  *	netdev_rx_handler_register - register receive handler
5144  *	@dev: device to register a handler for
5145  *	@rx_handler: receive handler to register
5146  *	@rx_handler_data: data pointer that is used by rx handler
5147  *
5148  *	Register a receive handler for a device. This handler will then be
5149  *	called from __netif_receive_skb. A negative errno code is returned
5150  *	on a failure.
5151  *
5152  *	The caller must hold the rtnl_mutex.
5153  *
5154  *	For a general description of rx_handler, see enum rx_handler_result.
5155  */
netdev_rx_handler_register(struct net_device * dev,rx_handler_func_t * rx_handler,void * rx_handler_data)5156 int netdev_rx_handler_register(struct net_device *dev,
5157 			       rx_handler_func_t *rx_handler,
5158 			       void *rx_handler_data)
5159 {
5160 	if (netdev_is_rx_handler_busy(dev))
5161 		return -EBUSY;
5162 
5163 	if (dev->priv_flags & IFF_NO_RX_HANDLER)
5164 		return -EINVAL;
5165 
5166 	/* Note: rx_handler_data must be set before rx_handler */
5167 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
5168 	rcu_assign_pointer(dev->rx_handler, rx_handler);
5169 
5170 	return 0;
5171 }
5172 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5173 
5174 /**
5175  *	netdev_rx_handler_unregister - unregister receive handler
5176  *	@dev: device to unregister a handler from
5177  *
5178  *	Unregister a receive handler from a device.
5179  *
5180  *	The caller must hold the rtnl_mutex.
5181  */
netdev_rx_handler_unregister(struct net_device * dev)5182 void netdev_rx_handler_unregister(struct net_device *dev)
5183 {
5184 
5185 	ASSERT_RTNL();
5186 	RCU_INIT_POINTER(dev->rx_handler, NULL);
5187 	/* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5188 	 * section has a guarantee to see a non NULL rx_handler_data
5189 	 * as well.
5190 	 */
5191 	synchronize_net();
5192 	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5193 }
5194 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5195 
5196 /*
5197  * Limit the use of PFMEMALLOC reserves to those protocols that implement
5198  * the special handling of PFMEMALLOC skbs.
5199  */
skb_pfmemalloc_protocol(struct sk_buff * skb)5200 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5201 {
5202 	switch (skb->protocol) {
5203 	case htons(ETH_P_ARP):
5204 	case htons(ETH_P_IP):
5205 	case htons(ETH_P_IPV6):
5206 	case htons(ETH_P_8021Q):
5207 	case htons(ETH_P_8021AD):
5208 		return true;
5209 	default:
5210 		return false;
5211 	}
5212 }
5213 
nf_ingress(struct sk_buff * skb,struct packet_type ** pt_prev,int * ret,struct net_device * orig_dev)5214 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5215 			     int *ret, struct net_device *orig_dev)
5216 {
5217 	if (nf_hook_ingress_active(skb)) {
5218 		int ingress_retval;
5219 
5220 		if (*pt_prev) {
5221 			*ret = deliver_skb(skb, *pt_prev, orig_dev);
5222 			*pt_prev = NULL;
5223 		}
5224 
5225 		rcu_read_lock();
5226 		ingress_retval = nf_hook_ingress(skb);
5227 		rcu_read_unlock();
5228 		return ingress_retval;
5229 	}
5230 	return 0;
5231 }
5232 
__netif_receive_skb_core(struct sk_buff ** pskb,bool pfmemalloc,struct packet_type ** ppt_prev)5233 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5234 				    struct packet_type **ppt_prev)
5235 {
5236 	struct packet_type *ptype, *pt_prev;
5237 	rx_handler_func_t *rx_handler;
5238 	struct sk_buff *skb = *pskb;
5239 	struct net_device *orig_dev;
5240 	bool deliver_exact = false;
5241 	int ret = NET_RX_DROP;
5242 	__be16 type;
5243 
5244 	net_timestamp_check(!netdev_tstamp_prequeue, skb);
5245 
5246 	trace_netif_receive_skb(skb);
5247 
5248 	orig_dev = skb->dev;
5249 
5250 	skb_reset_network_header(skb);
5251 	if (!skb_transport_header_was_set(skb))
5252 		skb_reset_transport_header(skb);
5253 	skb_reset_mac_len(skb);
5254 
5255 	pt_prev = NULL;
5256 
5257 another_round:
5258 	skb->skb_iif = skb->dev->ifindex;
5259 
5260 	__this_cpu_inc(softnet_data.processed);
5261 
5262 	if (static_branch_unlikely(&generic_xdp_needed_key)) {
5263 		int ret2;
5264 
5265 		migrate_disable();
5266 		ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5267 		migrate_enable();
5268 
5269 		if (ret2 != XDP_PASS) {
5270 			ret = NET_RX_DROP;
5271 			goto out;
5272 		}
5273 	}
5274 
5275 	if (eth_type_vlan(skb->protocol)) {
5276 		skb = skb_vlan_untag(skb);
5277 		if (unlikely(!skb))
5278 			goto out;
5279 	}
5280 
5281 	if (skb_skip_tc_classify(skb))
5282 		goto skip_classify;
5283 
5284 	if (pfmemalloc)
5285 		goto skip_taps;
5286 
5287 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
5288 		if (pt_prev)
5289 			ret = deliver_skb(skb, pt_prev, orig_dev);
5290 		pt_prev = ptype;
5291 	}
5292 
5293 	list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5294 		if (pt_prev)
5295 			ret = deliver_skb(skb, pt_prev, orig_dev);
5296 		pt_prev = ptype;
5297 	}
5298 
5299 skip_taps:
5300 #ifdef CONFIG_NET_INGRESS
5301 	if (static_branch_unlikely(&ingress_needed_key)) {
5302 		bool another = false;
5303 
5304 		skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
5305 					 &another);
5306 		if (another)
5307 			goto another_round;
5308 		if (!skb)
5309 			goto out;
5310 
5311 		if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5312 			goto out;
5313 	}
5314 #endif
5315 	skb_reset_redirect(skb);
5316 skip_classify:
5317 	if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5318 		goto drop;
5319 
5320 	if (skb_vlan_tag_present(skb)) {
5321 		if (pt_prev) {
5322 			ret = deliver_skb(skb, pt_prev, orig_dev);
5323 			pt_prev = NULL;
5324 		}
5325 		if (vlan_do_receive(&skb))
5326 			goto another_round;
5327 		else if (unlikely(!skb))
5328 			goto out;
5329 	}
5330 
5331 	rx_handler = rcu_dereference(skb->dev->rx_handler);
5332 	if (rx_handler) {
5333 		if (pt_prev) {
5334 			ret = deliver_skb(skb, pt_prev, orig_dev);
5335 			pt_prev = NULL;
5336 		}
5337 		switch (rx_handler(&skb)) {
5338 		case RX_HANDLER_CONSUMED:
5339 			ret = NET_RX_SUCCESS;
5340 			goto out;
5341 		case RX_HANDLER_ANOTHER:
5342 			goto another_round;
5343 		case RX_HANDLER_EXACT:
5344 			deliver_exact = true;
5345 			break;
5346 		case RX_HANDLER_PASS:
5347 			break;
5348 		default:
5349 			BUG();
5350 		}
5351 	}
5352 
5353 	if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5354 check_vlan_id:
5355 		if (skb_vlan_tag_get_id(skb)) {
5356 			/* Vlan id is non 0 and vlan_do_receive() above couldn't
5357 			 * find vlan device.
5358 			 */
5359 			skb->pkt_type = PACKET_OTHERHOST;
5360 		} else if (eth_type_vlan(skb->protocol)) {
5361 			/* Outer header is 802.1P with vlan 0, inner header is
5362 			 * 802.1Q or 802.1AD and vlan_do_receive() above could
5363 			 * not find vlan dev for vlan id 0.
5364 			 */
5365 			__vlan_hwaccel_clear_tag(skb);
5366 			skb = skb_vlan_untag(skb);
5367 			if (unlikely(!skb))
5368 				goto out;
5369 			if (vlan_do_receive(&skb))
5370 				/* After stripping off 802.1P header with vlan 0
5371 				 * vlan dev is found for inner header.
5372 				 */
5373 				goto another_round;
5374 			else if (unlikely(!skb))
5375 				goto out;
5376 			else
5377 				/* We have stripped outer 802.1P vlan 0 header.
5378 				 * But could not find vlan dev.
5379 				 * check again for vlan id to set OTHERHOST.
5380 				 */
5381 				goto check_vlan_id;
5382 		}
5383 		/* Note: we might in the future use prio bits
5384 		 * and set skb->priority like in vlan_do_receive()
5385 		 * For the time being, just ignore Priority Code Point
5386 		 */
5387 		__vlan_hwaccel_clear_tag(skb);
5388 	}
5389 
5390 	type = skb->protocol;
5391 
5392 	/* deliver only exact match when indicated */
5393 	if (likely(!deliver_exact)) {
5394 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5395 				       &ptype_base[ntohs(type) &
5396 						   PTYPE_HASH_MASK]);
5397 	}
5398 
5399 	deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5400 			       &orig_dev->ptype_specific);
5401 
5402 	if (unlikely(skb->dev != orig_dev)) {
5403 		deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5404 				       &skb->dev->ptype_specific);
5405 	}
5406 
5407 	if (pt_prev) {
5408 		if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5409 			goto drop;
5410 		*ppt_prev = pt_prev;
5411 	} else {
5412 drop:
5413 		if (!deliver_exact)
5414 			atomic_long_inc(&skb->dev->rx_dropped);
5415 		else
5416 			atomic_long_inc(&skb->dev->rx_nohandler);
5417 		kfree_skb(skb);
5418 		/* Jamal, now you will not able to escape explaining
5419 		 * me how you were going to use this. :-)
5420 		 */
5421 		ret = NET_RX_DROP;
5422 	}
5423 
5424 out:
5425 	/* The invariant here is that if *ppt_prev is not NULL
5426 	 * then skb should also be non-NULL.
5427 	 *
5428 	 * Apparently *ppt_prev assignment above holds this invariant due to
5429 	 * skb dereferencing near it.
5430 	 */
5431 	*pskb = skb;
5432 	return ret;
5433 }
5434 
__netif_receive_skb_one_core(struct sk_buff * skb,bool pfmemalloc)5435 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5436 {
5437 	struct net_device *orig_dev = skb->dev;
5438 	struct packet_type *pt_prev = NULL;
5439 	int ret;
5440 
5441 	ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5442 	if (pt_prev)
5443 		ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5444 					 skb->dev, pt_prev, orig_dev);
5445 	return ret;
5446 }
5447 
5448 /**
5449  *	netif_receive_skb_core - special purpose version of netif_receive_skb
5450  *	@skb: buffer to process
5451  *
5452  *	More direct receive version of netif_receive_skb().  It should
5453  *	only be used by callers that have a need to skip RPS and Generic XDP.
5454  *	Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5455  *
5456  *	This function may only be called from softirq context and interrupts
5457  *	should be enabled.
5458  *
5459  *	Return values (usually ignored):
5460  *	NET_RX_SUCCESS: no congestion
5461  *	NET_RX_DROP: packet was dropped
5462  */
netif_receive_skb_core(struct sk_buff * skb)5463 int netif_receive_skb_core(struct sk_buff *skb)
5464 {
5465 	int ret;
5466 
5467 	rcu_read_lock();
5468 	ret = __netif_receive_skb_one_core(skb, false);
5469 	rcu_read_unlock();
5470 
5471 	return ret;
5472 }
5473 EXPORT_SYMBOL(netif_receive_skb_core);
5474 
__netif_receive_skb_list_ptype(struct list_head * head,struct packet_type * pt_prev,struct net_device * orig_dev)5475 static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5476 						  struct packet_type *pt_prev,
5477 						  struct net_device *orig_dev)
5478 {
5479 	struct sk_buff *skb, *next;
5480 
5481 	if (!pt_prev)
5482 		return;
5483 	if (list_empty(head))
5484 		return;
5485 	if (pt_prev->list_func != NULL)
5486 		INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5487 				   ip_list_rcv, head, pt_prev, orig_dev);
5488 	else
5489 		list_for_each_entry_safe(skb, next, head, list) {
5490 			skb_list_del_init(skb);
5491 			pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5492 		}
5493 }
5494 
__netif_receive_skb_list_core(struct list_head * head,bool pfmemalloc)5495 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5496 {
5497 	/* Fast-path assumptions:
5498 	 * - There is no RX handler.
5499 	 * - Only one packet_type matches.
5500 	 * If either of these fails, we will end up doing some per-packet
5501 	 * processing in-line, then handling the 'last ptype' for the whole
5502 	 * sublist.  This can't cause out-of-order delivery to any single ptype,
5503 	 * because the 'last ptype' must be constant across the sublist, and all
5504 	 * other ptypes are handled per-packet.
5505 	 */
5506 	/* Current (common) ptype of sublist */
5507 	struct packet_type *pt_curr = NULL;
5508 	/* Current (common) orig_dev of sublist */
5509 	struct net_device *od_curr = NULL;
5510 	struct list_head sublist;
5511 	struct sk_buff *skb, *next;
5512 
5513 	INIT_LIST_HEAD(&sublist);
5514 	list_for_each_entry_safe(skb, next, head, list) {
5515 		struct net_device *orig_dev = skb->dev;
5516 		struct packet_type *pt_prev = NULL;
5517 
5518 		skb_list_del_init(skb);
5519 		__netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5520 		if (!pt_prev)
5521 			continue;
5522 		if (pt_curr != pt_prev || od_curr != orig_dev) {
5523 			/* dispatch old sublist */
5524 			__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5525 			/* start new sublist */
5526 			INIT_LIST_HEAD(&sublist);
5527 			pt_curr = pt_prev;
5528 			od_curr = orig_dev;
5529 		}
5530 		list_add_tail(&skb->list, &sublist);
5531 	}
5532 
5533 	/* dispatch final sublist */
5534 	__netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5535 }
5536 
__netif_receive_skb(struct sk_buff * skb)5537 static int __netif_receive_skb(struct sk_buff *skb)
5538 {
5539 	int ret;
5540 
5541 	if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5542 		unsigned int noreclaim_flag;
5543 
5544 		/*
5545 		 * PFMEMALLOC skbs are special, they should
5546 		 * - be delivered to SOCK_MEMALLOC sockets only
5547 		 * - stay away from userspace
5548 		 * - have bounded memory usage
5549 		 *
5550 		 * Use PF_MEMALLOC as this saves us from propagating the allocation
5551 		 * context down to all allocation sites.
5552 		 */
5553 		noreclaim_flag = memalloc_noreclaim_save();
5554 		ret = __netif_receive_skb_one_core(skb, true);
5555 		memalloc_noreclaim_restore(noreclaim_flag);
5556 	} else
5557 		ret = __netif_receive_skb_one_core(skb, false);
5558 
5559 	return ret;
5560 }
5561 
__netif_receive_skb_list(struct list_head * head)5562 static void __netif_receive_skb_list(struct list_head *head)
5563 {
5564 	unsigned long noreclaim_flag = 0;
5565 	struct sk_buff *skb, *next;
5566 	bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5567 
5568 	list_for_each_entry_safe(skb, next, head, list) {
5569 		if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5570 			struct list_head sublist;
5571 
5572 			/* Handle the previous sublist */
5573 			list_cut_before(&sublist, head, &skb->list);
5574 			if (!list_empty(&sublist))
5575 				__netif_receive_skb_list_core(&sublist, pfmemalloc);
5576 			pfmemalloc = !pfmemalloc;
5577 			/* See comments in __netif_receive_skb */
5578 			if (pfmemalloc)
5579 				noreclaim_flag = memalloc_noreclaim_save();
5580 			else
5581 				memalloc_noreclaim_restore(noreclaim_flag);
5582 		}
5583 	}
5584 	/* Handle the remaining sublist */
5585 	if (!list_empty(head))
5586 		__netif_receive_skb_list_core(head, pfmemalloc);
5587 	/* Restore pflags */
5588 	if (pfmemalloc)
5589 		memalloc_noreclaim_restore(noreclaim_flag);
5590 }
5591 
generic_xdp_install(struct net_device * dev,struct netdev_bpf * xdp)5592 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5593 {
5594 	struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5595 	struct bpf_prog *new = xdp->prog;
5596 	int ret = 0;
5597 
5598 	switch (xdp->command) {
5599 	case XDP_SETUP_PROG:
5600 		rcu_assign_pointer(dev->xdp_prog, new);
5601 		if (old)
5602 			bpf_prog_put(old);
5603 
5604 		if (old && !new) {
5605 			static_branch_dec(&generic_xdp_needed_key);
5606 		} else if (new && !old) {
5607 			static_branch_inc(&generic_xdp_needed_key);
5608 			dev_disable_lro(dev);
5609 			dev_disable_gro_hw(dev);
5610 		}
5611 		break;
5612 
5613 	default:
5614 		ret = -EINVAL;
5615 		break;
5616 	}
5617 
5618 	return ret;
5619 }
5620 
netif_receive_skb_internal(struct sk_buff * skb)5621 static int netif_receive_skb_internal(struct sk_buff *skb)
5622 {
5623 	int ret;
5624 
5625 	net_timestamp_check(netdev_tstamp_prequeue, skb);
5626 
5627 	if (skb_defer_rx_timestamp(skb))
5628 		return NET_RX_SUCCESS;
5629 
5630 	rcu_read_lock();
5631 #ifdef CONFIG_RPS
5632 	if (static_branch_unlikely(&rps_needed)) {
5633 		struct rps_dev_flow voidflow, *rflow = &voidflow;
5634 		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5635 
5636 		if (cpu >= 0) {
5637 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5638 			rcu_read_unlock();
5639 			return ret;
5640 		}
5641 	}
5642 #endif
5643 	ret = __netif_receive_skb(skb);
5644 	rcu_read_unlock();
5645 	return ret;
5646 }
5647 
netif_receive_skb_list_internal(struct list_head * head)5648 static void netif_receive_skb_list_internal(struct list_head *head)
5649 {
5650 	struct sk_buff *skb, *next;
5651 	struct list_head sublist;
5652 
5653 	INIT_LIST_HEAD(&sublist);
5654 	list_for_each_entry_safe(skb, next, head, list) {
5655 		net_timestamp_check(netdev_tstamp_prequeue, skb);
5656 		skb_list_del_init(skb);
5657 		if (!skb_defer_rx_timestamp(skb))
5658 			list_add_tail(&skb->list, &sublist);
5659 	}
5660 	list_splice_init(&sublist, head);
5661 
5662 	rcu_read_lock();
5663 #ifdef CONFIG_RPS
5664 	if (static_branch_unlikely(&rps_needed)) {
5665 		list_for_each_entry_safe(skb, next, head, list) {
5666 			struct rps_dev_flow voidflow, *rflow = &voidflow;
5667 			int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5668 
5669 			if (cpu >= 0) {
5670 				/* Will be handled, remove from list */
5671 				skb_list_del_init(skb);
5672 				enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5673 			}
5674 		}
5675 	}
5676 #endif
5677 	__netif_receive_skb_list(head);
5678 	rcu_read_unlock();
5679 }
5680 
5681 /**
5682  *	netif_receive_skb - process receive buffer from network
5683  *	@skb: buffer to process
5684  *
5685  *	netif_receive_skb() is the main receive data processing function.
5686  *	It always succeeds. The buffer may be dropped during processing
5687  *	for congestion control or by the protocol layers.
5688  *
5689  *	This function may only be called from softirq context and interrupts
5690  *	should be enabled.
5691  *
5692  *	Return values (usually ignored):
5693  *	NET_RX_SUCCESS: no congestion
5694  *	NET_RX_DROP: packet was dropped
5695  */
netif_receive_skb(struct sk_buff * skb)5696 int netif_receive_skb(struct sk_buff *skb)
5697 {
5698 	int ret;
5699 
5700 	trace_netif_receive_skb_entry(skb);
5701 
5702 	ret = netif_receive_skb_internal(skb);
5703 	trace_netif_receive_skb_exit(ret);
5704 
5705 	return ret;
5706 }
5707 EXPORT_SYMBOL(netif_receive_skb);
5708 
5709 /**
5710  *	netif_receive_skb_list - process many receive buffers from network
5711  *	@head: list of skbs to process.
5712  *
5713  *	Since return value of netif_receive_skb() is normally ignored, and
5714  *	wouldn't be meaningful for a list, this function returns void.
5715  *
5716  *	This function may only be called from softirq context and interrupts
5717  *	should be enabled.
5718  */
netif_receive_skb_list(struct list_head * head)5719 void netif_receive_skb_list(struct list_head *head)
5720 {
5721 	struct sk_buff *skb;
5722 
5723 	if (list_empty(head))
5724 		return;
5725 	if (trace_netif_receive_skb_list_entry_enabled()) {
5726 		list_for_each_entry(skb, head, list)
5727 			trace_netif_receive_skb_list_entry(skb);
5728 	}
5729 	netif_receive_skb_list_internal(head);
5730 	trace_netif_receive_skb_list_exit(0);
5731 }
5732 EXPORT_SYMBOL(netif_receive_skb_list);
5733 
5734 static DEFINE_PER_CPU(struct work_struct, flush_works);
5735 
5736 /* Network device is going away, flush any packets still pending */
flush_backlog(struct work_struct * work)5737 static void flush_backlog(struct work_struct *work)
5738 {
5739 	struct sk_buff *skb, *tmp;
5740 	struct softnet_data *sd;
5741 
5742 	local_bh_disable();
5743 	sd = this_cpu_ptr(&softnet_data);
5744 
5745 	local_irq_disable();
5746 	rps_lock(sd);
5747 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5748 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5749 			__skb_unlink(skb, &sd->input_pkt_queue);
5750 			dev_kfree_skb_irq(skb);
5751 			input_queue_head_incr(sd);
5752 		}
5753 	}
5754 	rps_unlock(sd);
5755 	local_irq_enable();
5756 
5757 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5758 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5759 			__skb_unlink(skb, &sd->process_queue);
5760 			kfree_skb(skb);
5761 			input_queue_head_incr(sd);
5762 		}
5763 	}
5764 	local_bh_enable();
5765 }
5766 
flush_required(int cpu)5767 static bool flush_required(int cpu)
5768 {
5769 #if IS_ENABLED(CONFIG_RPS)
5770 	struct softnet_data *sd = &per_cpu(softnet_data, cpu);
5771 	bool do_flush;
5772 
5773 	local_irq_disable();
5774 	rps_lock(sd);
5775 
5776 	/* as insertion into process_queue happens with the rps lock held,
5777 	 * process_queue access may race only with dequeue
5778 	 */
5779 	do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
5780 		   !skb_queue_empty_lockless(&sd->process_queue);
5781 	rps_unlock(sd);
5782 	local_irq_enable();
5783 
5784 	return do_flush;
5785 #endif
5786 	/* without RPS we can't safely check input_pkt_queue: during a
5787 	 * concurrent remote skb_queue_splice() we can detect as empty both
5788 	 * input_pkt_queue and process_queue even if the latter could end-up
5789 	 * containing a lot of packets.
5790 	 */
5791 	return true;
5792 }
5793 
flush_all_backlogs(void)5794 static void flush_all_backlogs(void)
5795 {
5796 	static cpumask_t flush_cpus;
5797 	unsigned int cpu;
5798 
5799 	/* since we are under rtnl lock protection we can use static data
5800 	 * for the cpumask and avoid allocating on stack the possibly
5801 	 * large mask
5802 	 */
5803 	ASSERT_RTNL();
5804 
5805 	cpus_read_lock();
5806 
5807 	cpumask_clear(&flush_cpus);
5808 	for_each_online_cpu(cpu) {
5809 		if (flush_required(cpu)) {
5810 			queue_work_on(cpu, system_highpri_wq,
5811 				      per_cpu_ptr(&flush_works, cpu));
5812 			cpumask_set_cpu(cpu, &flush_cpus);
5813 		}
5814 	}
5815 
5816 	/* we can have in flight packet[s] on the cpus we are not flushing,
5817 	 * synchronize_net() in unregister_netdevice_many() will take care of
5818 	 * them
5819 	 */
5820 	for_each_cpu(cpu, &flush_cpus)
5821 		flush_work(per_cpu_ptr(&flush_works, cpu));
5822 
5823 	cpus_read_unlock();
5824 }
5825 
5826 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
gro_normal_list(struct napi_struct * napi)5827 static void gro_normal_list(struct napi_struct *napi)
5828 {
5829 	if (!napi->rx_count)
5830 		return;
5831 	netif_receive_skb_list_internal(&napi->rx_list);
5832 	INIT_LIST_HEAD(&napi->rx_list);
5833 	napi->rx_count = 0;
5834 }
5835 
5836 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
5837  * pass the whole batch up to the stack.
5838  */
gro_normal_one(struct napi_struct * napi,struct sk_buff * skb,int segs)5839 static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
5840 {
5841 	list_add_tail(&skb->list, &napi->rx_list);
5842 	napi->rx_count += segs;
5843 	if (napi->rx_count >= gro_normal_batch)
5844 		gro_normal_list(napi);
5845 }
5846 
napi_gro_complete(struct napi_struct * napi,struct sk_buff * skb)5847 static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
5848 {
5849 	struct packet_offload *ptype;
5850 	__be16 type = skb->protocol;
5851 	struct list_head *head = &offload_base;
5852 	int err = -ENOENT;
5853 
5854 	BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
5855 
5856 	if (NAPI_GRO_CB(skb)->count == 1) {
5857 		skb_shinfo(skb)->gso_size = 0;
5858 		goto out;
5859 	}
5860 
5861 	rcu_read_lock();
5862 	list_for_each_entry_rcu(ptype, head, list) {
5863 		if (ptype->type != type || !ptype->callbacks.gro_complete)
5864 			continue;
5865 
5866 		err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
5867 					 ipv6_gro_complete, inet_gro_complete,
5868 					 skb, 0);
5869 		break;
5870 	}
5871 	rcu_read_unlock();
5872 
5873 	if (err) {
5874 		WARN_ON(&ptype->list == head);
5875 		kfree_skb(skb);
5876 		return NET_RX_SUCCESS;
5877 	}
5878 
5879 out:
5880 	gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
5881 	return NET_RX_SUCCESS;
5882 }
5883 
__napi_gro_flush_chain(struct napi_struct * napi,u32 index,bool flush_old)5884 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
5885 				   bool flush_old)
5886 {
5887 	struct list_head *head = &napi->gro_hash[index].list;
5888 	struct sk_buff *skb, *p;
5889 
5890 	list_for_each_entry_safe_reverse(skb, p, head, list) {
5891 		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
5892 			return;
5893 		skb_list_del_init(skb);
5894 		napi_gro_complete(napi, skb);
5895 		napi->gro_hash[index].count--;
5896 	}
5897 
5898 	if (!napi->gro_hash[index].count)
5899 		__clear_bit(index, &napi->gro_bitmask);
5900 }
5901 
5902 /* napi->gro_hash[].list contains packets ordered by age.
5903  * youngest packets at the head of it.
5904  * Complete skbs in reverse order to reduce latencies.
5905  */
napi_gro_flush(struct napi_struct * napi,bool flush_old)5906 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
5907 {
5908 	unsigned long bitmask = napi->gro_bitmask;
5909 	unsigned int i, base = ~0U;
5910 
5911 	while ((i = ffs(bitmask)) != 0) {
5912 		bitmask >>= i;
5913 		base += i;
5914 		__napi_gro_flush_chain(napi, base, flush_old);
5915 	}
5916 }
5917 EXPORT_SYMBOL(napi_gro_flush);
5918 
gro_list_prepare(const struct list_head * head,const struct sk_buff * skb)5919 static void gro_list_prepare(const struct list_head *head,
5920 			     const struct sk_buff *skb)
5921 {
5922 	unsigned int maclen = skb->dev->hard_header_len;
5923 	u32 hash = skb_get_hash_raw(skb);
5924 	struct sk_buff *p;
5925 
5926 	list_for_each_entry(p, head, list) {
5927 		unsigned long diffs;
5928 
5929 		NAPI_GRO_CB(p)->flush = 0;
5930 
5931 		if (hash != skb_get_hash_raw(p)) {
5932 			NAPI_GRO_CB(p)->same_flow = 0;
5933 			continue;
5934 		}
5935 
5936 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
5937 		diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
5938 		if (skb_vlan_tag_present(p))
5939 			diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
5940 		diffs |= skb_metadata_differs(p, skb);
5941 		if (maclen == ETH_HLEN)
5942 			diffs |= compare_ether_header(skb_mac_header(p),
5943 						      skb_mac_header(skb));
5944 		else if (!diffs)
5945 			diffs = memcmp(skb_mac_header(p),
5946 				       skb_mac_header(skb),
5947 				       maclen);
5948 
5949 		/* in most common scenarions 'slow_gro' is 0
5950 		 * otherwise we are already on some slower paths
5951 		 * either skip all the infrequent tests altogether or
5952 		 * avoid trying too hard to skip each of them individually
5953 		 */
5954 		if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
5955 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
5956 			struct tc_skb_ext *skb_ext;
5957 			struct tc_skb_ext *p_ext;
5958 #endif
5959 
5960 			diffs |= p->sk != skb->sk;
5961 			diffs |= skb_metadata_dst_cmp(p, skb);
5962 			diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
5963 
5964 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
5965 			skb_ext = skb_ext_find(skb, TC_SKB_EXT);
5966 			p_ext = skb_ext_find(p, TC_SKB_EXT);
5967 
5968 			diffs |= (!!p_ext) ^ (!!skb_ext);
5969 			if (!diffs && unlikely(skb_ext))
5970 				diffs |= p_ext->chain ^ skb_ext->chain;
5971 #endif
5972 		}
5973 
5974 		NAPI_GRO_CB(p)->same_flow = !diffs;
5975 	}
5976 }
5977 
skb_gro_reset_offset(struct sk_buff * skb,u32 nhoff)5978 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
5979 {
5980 	const struct skb_shared_info *pinfo = skb_shinfo(skb);
5981 	const skb_frag_t *frag0 = &pinfo->frags[0];
5982 
5983 	NAPI_GRO_CB(skb)->data_offset = 0;
5984 	NAPI_GRO_CB(skb)->frag0 = NULL;
5985 	NAPI_GRO_CB(skb)->frag0_len = 0;
5986 
5987 	if (!skb_headlen(skb) && pinfo->nr_frags &&
5988 	    !PageHighMem(skb_frag_page(frag0)) &&
5989 	    (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
5990 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
5991 		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
5992 						    skb_frag_size(frag0),
5993 						    skb->end - skb->tail);
5994 	}
5995 }
5996 
gro_pull_from_frag0(struct sk_buff * skb,int grow)5997 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
5998 {
5999 	struct skb_shared_info *pinfo = skb_shinfo(skb);
6000 
6001 	BUG_ON(skb->end - skb->tail < grow);
6002 
6003 	memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
6004 
6005 	skb->data_len -= grow;
6006 	skb->tail += grow;
6007 
6008 	skb_frag_off_add(&pinfo->frags[0], grow);
6009 	skb_frag_size_sub(&pinfo->frags[0], grow);
6010 
6011 	if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
6012 		skb_frag_unref(skb, 0);
6013 		memmove(pinfo->frags, pinfo->frags + 1,
6014 			--pinfo->nr_frags * sizeof(pinfo->frags[0]));
6015 	}
6016 }
6017 
gro_flush_oldest(struct napi_struct * napi,struct list_head * head)6018 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
6019 {
6020 	struct sk_buff *oldest;
6021 
6022 	oldest = list_last_entry(head, struct sk_buff, list);
6023 
6024 	/* We are called with head length >= MAX_GRO_SKBS, so this is
6025 	 * impossible.
6026 	 */
6027 	if (WARN_ON_ONCE(!oldest))
6028 		return;
6029 
6030 	/* Do not adjust napi->gro_hash[].count, caller is adding a new
6031 	 * SKB to the chain.
6032 	 */
6033 	skb_list_del_init(oldest);
6034 	napi_gro_complete(napi, oldest);
6035 }
6036 
dev_gro_receive(struct napi_struct * napi,struct sk_buff * skb)6037 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
6038 {
6039 	u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
6040 	struct gro_list *gro_list = &napi->gro_hash[bucket];
6041 	struct list_head *head = &offload_base;
6042 	struct packet_offload *ptype;
6043 	__be16 type = skb->protocol;
6044 	struct sk_buff *pp = NULL;
6045 	enum gro_result ret;
6046 	int same_flow;
6047 	int grow;
6048 
6049 	if (netif_elide_gro(skb->dev))
6050 		goto normal;
6051 
6052 	gro_list_prepare(&gro_list->list, skb);
6053 
6054 	rcu_read_lock();
6055 	list_for_each_entry_rcu(ptype, head, list) {
6056 		if (ptype->type != type || !ptype->callbacks.gro_receive)
6057 			continue;
6058 
6059 		skb_set_network_header(skb, skb_gro_offset(skb));
6060 		skb_reset_mac_len(skb);
6061 		NAPI_GRO_CB(skb)->same_flow = 0;
6062 		NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
6063 		NAPI_GRO_CB(skb)->free = 0;
6064 		NAPI_GRO_CB(skb)->encap_mark = 0;
6065 		NAPI_GRO_CB(skb)->recursion_counter = 0;
6066 		NAPI_GRO_CB(skb)->is_fou = 0;
6067 		NAPI_GRO_CB(skb)->is_atomic = 1;
6068 		NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
6069 
6070 		/* Setup for GRO checksum validation */
6071 		switch (skb->ip_summed) {
6072 		case CHECKSUM_COMPLETE:
6073 			NAPI_GRO_CB(skb)->csum = skb->csum;
6074 			NAPI_GRO_CB(skb)->csum_valid = 1;
6075 			NAPI_GRO_CB(skb)->csum_cnt = 0;
6076 			break;
6077 		case CHECKSUM_UNNECESSARY:
6078 			NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
6079 			NAPI_GRO_CB(skb)->csum_valid = 0;
6080 			break;
6081 		default:
6082 			NAPI_GRO_CB(skb)->csum_cnt = 0;
6083 			NAPI_GRO_CB(skb)->csum_valid = 0;
6084 		}
6085 
6086 		pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
6087 					ipv6_gro_receive, inet_gro_receive,
6088 					&gro_list->list, skb);
6089 		break;
6090 	}
6091 	rcu_read_unlock();
6092 
6093 	if (&ptype->list == head)
6094 		goto normal;
6095 
6096 	if (PTR_ERR(pp) == -EINPROGRESS) {
6097 		ret = GRO_CONSUMED;
6098 		goto ok;
6099 	}
6100 
6101 	same_flow = NAPI_GRO_CB(skb)->same_flow;
6102 	ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
6103 
6104 	if (pp) {
6105 		skb_list_del_init(pp);
6106 		napi_gro_complete(napi, pp);
6107 		gro_list->count--;
6108 	}
6109 
6110 	if (same_flow)
6111 		goto ok;
6112 
6113 	if (NAPI_GRO_CB(skb)->flush)
6114 		goto normal;
6115 
6116 	if (unlikely(gro_list->count >= MAX_GRO_SKBS))
6117 		gro_flush_oldest(napi, &gro_list->list);
6118 	else
6119 		gro_list->count++;
6120 
6121 	NAPI_GRO_CB(skb)->count = 1;
6122 	NAPI_GRO_CB(skb)->age = jiffies;
6123 	NAPI_GRO_CB(skb)->last = skb;
6124 	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
6125 	list_add(&skb->list, &gro_list->list);
6126 	ret = GRO_HELD;
6127 
6128 pull:
6129 	grow = skb_gro_offset(skb) - skb_headlen(skb);
6130 	if (grow > 0)
6131 		gro_pull_from_frag0(skb, grow);
6132 ok:
6133 	if (gro_list->count) {
6134 		if (!test_bit(bucket, &napi->gro_bitmask))
6135 			__set_bit(bucket, &napi->gro_bitmask);
6136 	} else if (test_bit(bucket, &napi->gro_bitmask)) {
6137 		__clear_bit(bucket, &napi->gro_bitmask);
6138 	}
6139 
6140 	return ret;
6141 
6142 normal:
6143 	ret = GRO_NORMAL;
6144 	goto pull;
6145 }
6146 
gro_find_receive_by_type(__be16 type)6147 struct packet_offload *gro_find_receive_by_type(__be16 type)
6148 {
6149 	struct list_head *offload_head = &offload_base;
6150 	struct packet_offload *ptype;
6151 
6152 	list_for_each_entry_rcu(ptype, offload_head, list) {
6153 		if (ptype->type != type || !ptype->callbacks.gro_receive)
6154 			continue;
6155 		return ptype;
6156 	}
6157 	return NULL;
6158 }
6159 EXPORT_SYMBOL(gro_find_receive_by_type);
6160 
gro_find_complete_by_type(__be16 type)6161 struct packet_offload *gro_find_complete_by_type(__be16 type)
6162 {
6163 	struct list_head *offload_head = &offload_base;
6164 	struct packet_offload *ptype;
6165 
6166 	list_for_each_entry_rcu(ptype, offload_head, list) {
6167 		if (ptype->type != type || !ptype->callbacks.gro_complete)
6168 			continue;
6169 		return ptype;
6170 	}
6171 	return NULL;
6172 }
6173 EXPORT_SYMBOL(gro_find_complete_by_type);
6174 
napi_skb_finish(struct napi_struct * napi,struct sk_buff * skb,gro_result_t ret)6175 static gro_result_t napi_skb_finish(struct napi_struct *napi,
6176 				    struct sk_buff *skb,
6177 				    gro_result_t ret)
6178 {
6179 	switch (ret) {
6180 	case GRO_NORMAL:
6181 		gro_normal_one(napi, skb, 1);
6182 		break;
6183 
6184 	case GRO_MERGED_FREE:
6185 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
6186 			napi_skb_free_stolen_head(skb);
6187 		else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
6188 			__kfree_skb(skb);
6189 		else
6190 			__kfree_skb_defer(skb);
6191 		break;
6192 
6193 	case GRO_HELD:
6194 	case GRO_MERGED:
6195 	case GRO_CONSUMED:
6196 		break;
6197 	}
6198 
6199 	return ret;
6200 }
6201 
napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)6202 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
6203 {
6204 	gro_result_t ret;
6205 
6206 	skb_mark_napi_id(skb, napi);
6207 	trace_napi_gro_receive_entry(skb);
6208 
6209 	skb_gro_reset_offset(skb, 0);
6210 
6211 	ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
6212 	trace_napi_gro_receive_exit(ret);
6213 
6214 	return ret;
6215 }
6216 EXPORT_SYMBOL(napi_gro_receive);
6217 
napi_reuse_skb(struct napi_struct * napi,struct sk_buff * skb)6218 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
6219 {
6220 	if (unlikely(skb->pfmemalloc)) {
6221 		consume_skb(skb);
6222 		return;
6223 	}
6224 	__skb_pull(skb, skb_headlen(skb));
6225 	/* restore the reserve we had after netdev_alloc_skb_ip_align() */
6226 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
6227 	__vlan_hwaccel_clear_tag(skb);
6228 	skb->dev = napi->dev;
6229 	skb->skb_iif = 0;
6230 
6231 	/* eth_type_trans() assumes pkt_type is PACKET_HOST */
6232 	skb->pkt_type = PACKET_HOST;
6233 
6234 	skb->encapsulation = 0;
6235 	skb_shinfo(skb)->gso_type = 0;
6236 	skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
6237 	if (unlikely(skb->slow_gro)) {
6238 		skb_orphan(skb);
6239 		skb_ext_reset(skb);
6240 		nf_reset_ct(skb);
6241 		skb->slow_gro = 0;
6242 	}
6243 
6244 	napi->skb = skb;
6245 }
6246 
napi_get_frags(struct napi_struct * napi)6247 struct sk_buff *napi_get_frags(struct napi_struct *napi)
6248 {
6249 	struct sk_buff *skb = napi->skb;
6250 
6251 	if (!skb) {
6252 		skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
6253 		if (skb) {
6254 			napi->skb = skb;
6255 			skb_mark_napi_id(skb, napi);
6256 		}
6257 	}
6258 	return skb;
6259 }
6260 EXPORT_SYMBOL(napi_get_frags);
6261 
napi_frags_finish(struct napi_struct * napi,struct sk_buff * skb,gro_result_t ret)6262 static gro_result_t napi_frags_finish(struct napi_struct *napi,
6263 				      struct sk_buff *skb,
6264 				      gro_result_t ret)
6265 {
6266 	switch (ret) {
6267 	case GRO_NORMAL:
6268 	case GRO_HELD:
6269 		__skb_push(skb, ETH_HLEN);
6270 		skb->protocol = eth_type_trans(skb, skb->dev);
6271 		if (ret == GRO_NORMAL)
6272 			gro_normal_one(napi, skb, 1);
6273 		break;
6274 
6275 	case GRO_MERGED_FREE:
6276 		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
6277 			napi_skb_free_stolen_head(skb);
6278 		else
6279 			napi_reuse_skb(napi, skb);
6280 		break;
6281 
6282 	case GRO_MERGED:
6283 	case GRO_CONSUMED:
6284 		break;
6285 	}
6286 
6287 	return ret;
6288 }
6289 
6290 /* Upper GRO stack assumes network header starts at gro_offset=0
6291  * Drivers could call both napi_gro_frags() and napi_gro_receive()
6292  * We copy ethernet header into skb->data to have a common layout.
6293  */
napi_frags_skb(struct napi_struct * napi)6294 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
6295 {
6296 	struct sk_buff *skb = napi->skb;
6297 	const struct ethhdr *eth;
6298 	unsigned int hlen = sizeof(*eth);
6299 
6300 	napi->skb = NULL;
6301 
6302 	skb_reset_mac_header(skb);
6303 	skb_gro_reset_offset(skb, hlen);
6304 
6305 	if (unlikely(skb_gro_header_hard(skb, hlen))) {
6306 		eth = skb_gro_header_slow(skb, hlen, 0);
6307 		if (unlikely(!eth)) {
6308 			net_warn_ratelimited("%s: dropping impossible skb from %s\n",
6309 					     __func__, napi->dev->name);
6310 			napi_reuse_skb(napi, skb);
6311 			return NULL;
6312 		}
6313 	} else {
6314 		eth = (const struct ethhdr *)skb->data;
6315 		gro_pull_from_frag0(skb, hlen);
6316 		NAPI_GRO_CB(skb)->frag0 += hlen;
6317 		NAPI_GRO_CB(skb)->frag0_len -= hlen;
6318 	}
6319 	__skb_pull(skb, hlen);
6320 
6321 	/*
6322 	 * This works because the only protocols we care about don't require
6323 	 * special handling.
6324 	 * We'll fix it up properly in napi_frags_finish()
6325 	 */
6326 	skb->protocol = eth->h_proto;
6327 
6328 	return skb;
6329 }
6330 
napi_gro_frags(struct napi_struct * napi)6331 gro_result_t napi_gro_frags(struct napi_struct *napi)
6332 {
6333 	gro_result_t ret;
6334 	struct sk_buff *skb = napi_frags_skb(napi);
6335 
6336 	trace_napi_gro_frags_entry(skb);
6337 
6338 	ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
6339 	trace_napi_gro_frags_exit(ret);
6340 
6341 	return ret;
6342 }
6343 EXPORT_SYMBOL(napi_gro_frags);
6344 
6345 /* Compute the checksum from gro_offset and return the folded value
6346  * after adding in any pseudo checksum.
6347  */
__skb_gro_checksum_complete(struct sk_buff * skb)6348 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
6349 {
6350 	__wsum wsum;
6351 	__sum16 sum;
6352 
6353 	wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
6354 
6355 	/* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
6356 	sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
6357 	/* See comments in __skb_checksum_complete(). */
6358 	if (likely(!sum)) {
6359 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
6360 		    !skb->csum_complete_sw)
6361 			netdev_rx_csum_fault(skb->dev, skb);
6362 	}
6363 
6364 	NAPI_GRO_CB(skb)->csum = wsum;
6365 	NAPI_GRO_CB(skb)->csum_valid = 1;
6366 
6367 	return sum;
6368 }
6369 EXPORT_SYMBOL(__skb_gro_checksum_complete);
6370 
net_rps_send_ipi(struct softnet_data * remsd)6371 static void net_rps_send_ipi(struct softnet_data *remsd)
6372 {
6373 #ifdef CONFIG_RPS
6374 	while (remsd) {
6375 		struct softnet_data *next = remsd->rps_ipi_next;
6376 
6377 		if (cpu_online(remsd->cpu))
6378 			smp_call_function_single_async(remsd->cpu, &remsd->csd);
6379 		remsd = next;
6380 	}
6381 #endif
6382 }
6383 
6384 /*
6385  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
6386  * Note: called with local irq disabled, but exits with local irq enabled.
6387  */
net_rps_action_and_irq_enable(struct softnet_data * sd)6388 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
6389 {
6390 #ifdef CONFIG_RPS
6391 	struct softnet_data *remsd = sd->rps_ipi_list;
6392 
6393 	if (remsd) {
6394 		sd->rps_ipi_list = NULL;
6395 
6396 		local_irq_enable();
6397 
6398 		/* Send pending IPI's to kick RPS processing on remote cpus. */
6399 		net_rps_send_ipi(remsd);
6400 	} else
6401 #endif
6402 		local_irq_enable();
6403 }
6404 
sd_has_rps_ipi_waiting(struct softnet_data * sd)6405 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
6406 {
6407 #ifdef CONFIG_RPS
6408 	return sd->rps_ipi_list != NULL;
6409 #else
6410 	return false;
6411 #endif
6412 }
6413 
process_backlog(struct napi_struct * napi,int quota)6414 static int process_backlog(struct napi_struct *napi, int quota)
6415 {
6416 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
6417 	bool again = true;
6418 	int work = 0;
6419 
6420 	/* Check if we have pending ipi, its better to send them now,
6421 	 * not waiting net_rx_action() end.
6422 	 */
6423 	if (sd_has_rps_ipi_waiting(sd)) {
6424 		local_irq_disable();
6425 		net_rps_action_and_irq_enable(sd);
6426 	}
6427 
6428 	napi->weight = dev_rx_weight;
6429 	while (again) {
6430 		struct sk_buff *skb;
6431 
6432 		while ((skb = __skb_dequeue(&sd->process_queue))) {
6433 			rcu_read_lock();
6434 			__netif_receive_skb(skb);
6435 			rcu_read_unlock();
6436 			input_queue_head_incr(sd);
6437 			if (++work >= quota)
6438 				return work;
6439 
6440 		}
6441 
6442 		local_irq_disable();
6443 		rps_lock(sd);
6444 		if (skb_queue_empty(&sd->input_pkt_queue)) {
6445 			/*
6446 			 * Inline a custom version of __napi_complete().
6447 			 * only current cpu owns and manipulates this napi,
6448 			 * and NAPI_STATE_SCHED is the only possible flag set
6449 			 * on backlog.
6450 			 * We can use a plain write instead of clear_bit(),
6451 			 * and we dont need an smp_mb() memory barrier.
6452 			 */
6453 			napi->state = 0;
6454 			again = false;
6455 		} else {
6456 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
6457 						   &sd->process_queue);
6458 		}
6459 		rps_unlock(sd);
6460 		local_irq_enable();
6461 	}
6462 
6463 	return work;
6464 }
6465 
6466 /**
6467  * __napi_schedule - schedule for receive
6468  * @n: entry to schedule
6469  *
6470  * The entry's receive function will be scheduled to run.
6471  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
6472  */
__napi_schedule(struct napi_struct * n)6473 void __napi_schedule(struct napi_struct *n)
6474 {
6475 	unsigned long flags;
6476 
6477 	local_irq_save(flags);
6478 	____napi_schedule(this_cpu_ptr(&softnet_data), n);
6479 	local_irq_restore(flags);
6480 }
6481 EXPORT_SYMBOL(__napi_schedule);
6482 
6483 /**
6484  *	napi_schedule_prep - check if napi can be scheduled
6485  *	@n: napi context
6486  *
6487  * Test if NAPI routine is already running, and if not mark
6488  * it as running.  This is used as a condition variable to
6489  * insure only one NAPI poll instance runs.  We also make
6490  * sure there is no pending NAPI disable.
6491  */
napi_schedule_prep(struct napi_struct * n)6492 bool napi_schedule_prep(struct napi_struct *n)
6493 {
6494 	unsigned long val, new;
6495 
6496 	do {
6497 		val = READ_ONCE(n->state);
6498 		if (unlikely(val & NAPIF_STATE_DISABLE))
6499 			return false;
6500 		new = val | NAPIF_STATE_SCHED;
6501 
6502 		/* Sets STATE_MISSED bit if STATE_SCHED was already set
6503 		 * This was suggested by Alexander Duyck, as compiler
6504 		 * emits better code than :
6505 		 * if (val & NAPIF_STATE_SCHED)
6506 		 *     new |= NAPIF_STATE_MISSED;
6507 		 */
6508 		new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6509 						   NAPIF_STATE_MISSED;
6510 	} while (cmpxchg(&n->state, val, new) != val);
6511 
6512 	return !(val & NAPIF_STATE_SCHED);
6513 }
6514 EXPORT_SYMBOL(napi_schedule_prep);
6515 
6516 /**
6517  * __napi_schedule_irqoff - schedule for receive
6518  * @n: entry to schedule
6519  *
6520  * Variant of __napi_schedule() assuming hard irqs are masked.
6521  *
6522  * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
6523  * because the interrupt disabled assumption might not be true
6524  * due to force-threaded interrupts and spinlock substitution.
6525  */
__napi_schedule_irqoff(struct napi_struct * n)6526 void __napi_schedule_irqoff(struct napi_struct *n)
6527 {
6528 	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6529 		____napi_schedule(this_cpu_ptr(&softnet_data), n);
6530 	else
6531 		__napi_schedule(n);
6532 }
6533 EXPORT_SYMBOL(__napi_schedule_irqoff);
6534 
napi_complete_done(struct napi_struct * n,int work_done)6535 bool napi_complete_done(struct napi_struct *n, int work_done)
6536 {
6537 	unsigned long flags, val, new, timeout = 0;
6538 	bool ret = true;
6539 
6540 	/*
6541 	 * 1) Don't let napi dequeue from the cpu poll list
6542 	 *    just in case its running on a different cpu.
6543 	 * 2) If we are busy polling, do nothing here, we have
6544 	 *    the guarantee we will be called later.
6545 	 */
6546 	if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6547 				 NAPIF_STATE_IN_BUSY_POLL)))
6548 		return false;
6549 
6550 	if (work_done) {
6551 		if (n->gro_bitmask)
6552 			timeout = READ_ONCE(n->dev->gro_flush_timeout);
6553 		n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
6554 	}
6555 	if (n->defer_hard_irqs_count > 0) {
6556 		n->defer_hard_irqs_count--;
6557 		timeout = READ_ONCE(n->dev->gro_flush_timeout);
6558 		if (timeout)
6559 			ret = false;
6560 	}
6561 	if (n->gro_bitmask) {
6562 		/* When the NAPI instance uses a timeout and keeps postponing
6563 		 * it, we need to bound somehow the time packets are kept in
6564 		 * the GRO layer
6565 		 */
6566 		napi_gro_flush(n, !!timeout);
6567 	}
6568 
6569 	gro_normal_list(n);
6570 
6571 	if (unlikely(!list_empty(&n->poll_list))) {
6572 		/* If n->poll_list is not empty, we need to mask irqs */
6573 		local_irq_save(flags);
6574 		list_del_init(&n->poll_list);
6575 		local_irq_restore(flags);
6576 	}
6577 
6578 	do {
6579 		val = READ_ONCE(n->state);
6580 
6581 		WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6582 
6583 		new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
6584 			      NAPIF_STATE_SCHED_THREADED |
6585 			      NAPIF_STATE_PREFER_BUSY_POLL);
6586 
6587 		/* If STATE_MISSED was set, leave STATE_SCHED set,
6588 		 * because we will call napi->poll() one more time.
6589 		 * This C code was suggested by Alexander Duyck to help gcc.
6590 		 */
6591 		new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6592 						    NAPIF_STATE_SCHED;
6593 	} while (cmpxchg(&n->state, val, new) != val);
6594 
6595 	if (unlikely(val & NAPIF_STATE_MISSED)) {
6596 		__napi_schedule(n);
6597 		return false;
6598 	}
6599 
6600 	if (timeout)
6601 		hrtimer_start(&n->timer, ns_to_ktime(timeout),
6602 			      HRTIMER_MODE_REL_PINNED);
6603 	return ret;
6604 }
6605 EXPORT_SYMBOL(napi_complete_done);
6606 
6607 /* must be called under rcu_read_lock(), as we dont take a reference */
napi_by_id(unsigned int napi_id)6608 static struct napi_struct *napi_by_id(unsigned int napi_id)
6609 {
6610 	unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6611 	struct napi_struct *napi;
6612 
6613 	hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6614 		if (napi->napi_id == napi_id)
6615 			return napi;
6616 
6617 	return NULL;
6618 }
6619 
6620 #if defined(CONFIG_NET_RX_BUSY_POLL)
6621 
__busy_poll_stop(struct napi_struct * napi,bool skip_schedule)6622 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
6623 {
6624 	if (!skip_schedule) {
6625 		gro_normal_list(napi);
6626 		__napi_schedule(napi);
6627 		return;
6628 	}
6629 
6630 	if (napi->gro_bitmask) {
6631 		/* flush too old packets
6632 		 * If HZ < 1000, flush all packets.
6633 		 */
6634 		napi_gro_flush(napi, HZ >= 1000);
6635 	}
6636 
6637 	gro_normal_list(napi);
6638 	clear_bit(NAPI_STATE_SCHED, &napi->state);
6639 }
6640 
busy_poll_stop(struct napi_struct * napi,void * have_poll_lock,bool prefer_busy_poll,u16 budget)6641 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll,
6642 			   u16 budget)
6643 {
6644 	bool skip_schedule = false;
6645 	unsigned long timeout;
6646 	int rc;
6647 
6648 	/* Busy polling means there is a high chance device driver hard irq
6649 	 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6650 	 * set in napi_schedule_prep().
6651 	 * Since we are about to call napi->poll() once more, we can safely
6652 	 * clear NAPI_STATE_MISSED.
6653 	 *
6654 	 * Note: x86 could use a single "lock and ..." instruction
6655 	 * to perform these two clear_bit()
6656 	 */
6657 	clear_bit(NAPI_STATE_MISSED, &napi->state);
6658 	clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6659 
6660 	local_bh_disable();
6661 
6662 	if (prefer_busy_poll) {
6663 		napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
6664 		timeout = READ_ONCE(napi->dev->gro_flush_timeout);
6665 		if (napi->defer_hard_irqs_count && timeout) {
6666 			hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
6667 			skip_schedule = true;
6668 		}
6669 	}
6670 
6671 	/* All we really want here is to re-enable device interrupts.
6672 	 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6673 	 */
6674 	rc = napi->poll(napi, budget);
6675 	/* We can't gro_normal_list() here, because napi->poll() might have
6676 	 * rearmed the napi (napi_complete_done()) in which case it could
6677 	 * already be running on another CPU.
6678 	 */
6679 	trace_napi_poll(napi, rc, budget);
6680 	netpoll_poll_unlock(have_poll_lock);
6681 	if (rc == budget)
6682 		__busy_poll_stop(napi, skip_schedule);
6683 	local_bh_enable();
6684 }
6685 
napi_busy_loop(unsigned int napi_id,bool (* loop_end)(void *,unsigned long),void * loop_end_arg,bool prefer_busy_poll,u16 budget)6686 void napi_busy_loop(unsigned int napi_id,
6687 		    bool (*loop_end)(void *, unsigned long),
6688 		    void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6689 {
6690 	unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6691 	int (*napi_poll)(struct napi_struct *napi, int budget);
6692 	void *have_poll_lock = NULL;
6693 	struct napi_struct *napi;
6694 
6695 restart:
6696 	napi_poll = NULL;
6697 
6698 	rcu_read_lock();
6699 
6700 	napi = napi_by_id(napi_id);
6701 	if (!napi)
6702 		goto out;
6703 
6704 	preempt_disable();
6705 	for (;;) {
6706 		int work = 0;
6707 
6708 		local_bh_disable();
6709 		if (!napi_poll) {
6710 			unsigned long val = READ_ONCE(napi->state);
6711 
6712 			/* If multiple threads are competing for this napi,
6713 			 * we avoid dirtying napi->state as much as we can.
6714 			 */
6715 			if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6716 				   NAPIF_STATE_IN_BUSY_POLL)) {
6717 				if (prefer_busy_poll)
6718 					set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6719 				goto count;
6720 			}
6721 			if (cmpxchg(&napi->state, val,
6722 				    val | NAPIF_STATE_IN_BUSY_POLL |
6723 					  NAPIF_STATE_SCHED) != val) {
6724 				if (prefer_busy_poll)
6725 					set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6726 				goto count;
6727 			}
6728 			have_poll_lock = netpoll_poll_lock(napi);
6729 			napi_poll = napi->poll;
6730 		}
6731 		work = napi_poll(napi, budget);
6732 		trace_napi_poll(napi, work, budget);
6733 		gro_normal_list(napi);
6734 count:
6735 		if (work > 0)
6736 			__NET_ADD_STATS(dev_net(napi->dev),
6737 					LINUX_MIB_BUSYPOLLRXPACKETS, work);
6738 		local_bh_enable();
6739 
6740 		if (!loop_end || loop_end(loop_end_arg, start_time))
6741 			break;
6742 
6743 		if (unlikely(need_resched())) {
6744 			if (napi_poll)
6745 				busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6746 			preempt_enable();
6747 			rcu_read_unlock();
6748 			cond_resched();
6749 			if (loop_end(loop_end_arg, start_time))
6750 				return;
6751 			goto restart;
6752 		}
6753 		cpu_relax();
6754 	}
6755 	if (napi_poll)
6756 		busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
6757 	preempt_enable();
6758 out:
6759 	rcu_read_unlock();
6760 }
6761 EXPORT_SYMBOL(napi_busy_loop);
6762 
6763 #endif /* CONFIG_NET_RX_BUSY_POLL */
6764 
napi_hash_add(struct napi_struct * napi)6765 static void napi_hash_add(struct napi_struct *napi)
6766 {
6767 	if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
6768 		return;
6769 
6770 	spin_lock(&napi_hash_lock);
6771 
6772 	/* 0..NR_CPUS range is reserved for sender_cpu use */
6773 	do {
6774 		if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6775 			napi_gen_id = MIN_NAPI_ID;
6776 	} while (napi_by_id(napi_gen_id));
6777 	napi->napi_id = napi_gen_id;
6778 
6779 	hlist_add_head_rcu(&napi->napi_hash_node,
6780 			   &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6781 
6782 	spin_unlock(&napi_hash_lock);
6783 }
6784 
6785 /* Warning : caller is responsible to make sure rcu grace period
6786  * is respected before freeing memory containing @napi
6787  */
napi_hash_del(struct napi_struct * napi)6788 static void napi_hash_del(struct napi_struct *napi)
6789 {
6790 	spin_lock(&napi_hash_lock);
6791 
6792 	hlist_del_init_rcu(&napi->napi_hash_node);
6793 
6794 	spin_unlock(&napi_hash_lock);
6795 }
6796 
napi_watchdog(struct hrtimer * timer)6797 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6798 {
6799 	struct napi_struct *napi;
6800 
6801 	napi = container_of(timer, struct napi_struct, timer);
6802 
6803 	/* Note : we use a relaxed variant of napi_schedule_prep() not setting
6804 	 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6805 	 */
6806 	if (!napi_disable_pending(napi) &&
6807 	    !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
6808 		clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6809 		__napi_schedule_irqoff(napi);
6810 	}
6811 
6812 	return HRTIMER_NORESTART;
6813 }
6814 
init_gro_hash(struct napi_struct * napi)6815 static void init_gro_hash(struct napi_struct *napi)
6816 {
6817 	int i;
6818 
6819 	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6820 		INIT_LIST_HEAD(&napi->gro_hash[i].list);
6821 		napi->gro_hash[i].count = 0;
6822 	}
6823 	napi->gro_bitmask = 0;
6824 }
6825 
dev_set_threaded(struct net_device * dev,bool threaded)6826 int dev_set_threaded(struct net_device *dev, bool threaded)
6827 {
6828 	struct napi_struct *napi;
6829 	int err = 0;
6830 
6831 	if (dev->threaded == threaded)
6832 		return 0;
6833 
6834 	if (threaded) {
6835 		list_for_each_entry(napi, &dev->napi_list, dev_list) {
6836 			if (!napi->thread) {
6837 				err = napi_kthread_create(napi);
6838 				if (err) {
6839 					threaded = false;
6840 					break;
6841 				}
6842 			}
6843 		}
6844 	}
6845 
6846 	dev->threaded = threaded;
6847 
6848 	/* Make sure kthread is created before THREADED bit
6849 	 * is set.
6850 	 */
6851 	smp_mb__before_atomic();
6852 
6853 	/* Setting/unsetting threaded mode on a napi might not immediately
6854 	 * take effect, if the current napi instance is actively being
6855 	 * polled. In this case, the switch between threaded mode and
6856 	 * softirq mode will happen in the next round of napi_schedule().
6857 	 * This should not cause hiccups/stalls to the live traffic.
6858 	 */
6859 	list_for_each_entry(napi, &dev->napi_list, dev_list) {
6860 		if (threaded)
6861 			set_bit(NAPI_STATE_THREADED, &napi->state);
6862 		else
6863 			clear_bit(NAPI_STATE_THREADED, &napi->state);
6864 	}
6865 
6866 	return err;
6867 }
6868 EXPORT_SYMBOL(dev_set_threaded);
6869 
netif_napi_add(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int weight)6870 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6871 		    int (*poll)(struct napi_struct *, int), int weight)
6872 {
6873 	if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
6874 		return;
6875 
6876 	INIT_LIST_HEAD(&napi->poll_list);
6877 	INIT_HLIST_NODE(&napi->napi_hash_node);
6878 	hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6879 	napi->timer.function = napi_watchdog;
6880 	init_gro_hash(napi);
6881 	napi->skb = NULL;
6882 	INIT_LIST_HEAD(&napi->rx_list);
6883 	napi->rx_count = 0;
6884 	napi->poll = poll;
6885 	if (weight > NAPI_POLL_WEIGHT)
6886 		netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6887 				weight);
6888 	napi->weight = weight;
6889 	napi->dev = dev;
6890 #ifdef CONFIG_NETPOLL
6891 	napi->poll_owner = -1;
6892 #endif
6893 	set_bit(NAPI_STATE_SCHED, &napi->state);
6894 	set_bit(NAPI_STATE_NPSVC, &napi->state);
6895 	list_add_rcu(&napi->dev_list, &dev->napi_list);
6896 	napi_hash_add(napi);
6897 	/* Create kthread for this napi if dev->threaded is set.
6898 	 * Clear dev->threaded if kthread creation failed so that
6899 	 * threaded mode will not be enabled in napi_enable().
6900 	 */
6901 	if (dev->threaded && napi_kthread_create(napi))
6902 		dev->threaded = 0;
6903 }
6904 EXPORT_SYMBOL(netif_napi_add);
6905 
napi_disable(struct napi_struct * n)6906 void napi_disable(struct napi_struct *n)
6907 {
6908 	might_sleep();
6909 	set_bit(NAPI_STATE_DISABLE, &n->state);
6910 
6911 	while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
6912 		msleep(1);
6913 	while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
6914 		msleep(1);
6915 
6916 	hrtimer_cancel(&n->timer);
6917 
6918 	clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
6919 	clear_bit(NAPI_STATE_DISABLE, &n->state);
6920 	clear_bit(NAPI_STATE_THREADED, &n->state);
6921 }
6922 EXPORT_SYMBOL(napi_disable);
6923 
6924 /**
6925  *	napi_enable - enable NAPI scheduling
6926  *	@n: NAPI context
6927  *
6928  * Resume NAPI from being scheduled on this context.
6929  * Must be paired with napi_disable.
6930  */
napi_enable(struct napi_struct * n)6931 void napi_enable(struct napi_struct *n)
6932 {
6933 	unsigned long val, new;
6934 
6935 	do {
6936 		val = READ_ONCE(n->state);
6937 		BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
6938 
6939 		new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
6940 		if (n->dev->threaded && n->thread)
6941 			new |= NAPIF_STATE_THREADED;
6942 	} while (cmpxchg(&n->state, val, new) != val);
6943 }
6944 EXPORT_SYMBOL(napi_enable);
6945 
flush_gro_hash(struct napi_struct * napi)6946 static void flush_gro_hash(struct napi_struct *napi)
6947 {
6948 	int i;
6949 
6950 	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6951 		struct sk_buff *skb, *n;
6952 
6953 		list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6954 			kfree_skb(skb);
6955 		napi->gro_hash[i].count = 0;
6956 	}
6957 }
6958 
6959 /* Must be called in process context */
__netif_napi_del(struct napi_struct * napi)6960 void __netif_napi_del(struct napi_struct *napi)
6961 {
6962 	if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
6963 		return;
6964 
6965 	napi_hash_del(napi);
6966 	list_del_rcu(&napi->dev_list);
6967 	napi_free_frags(napi);
6968 
6969 	flush_gro_hash(napi);
6970 	napi->gro_bitmask = 0;
6971 
6972 	if (napi->thread) {
6973 		kthread_stop(napi->thread);
6974 		napi->thread = NULL;
6975 	}
6976 }
6977 EXPORT_SYMBOL(__netif_napi_del);
6978 
__napi_poll(struct napi_struct * n,bool * repoll)6979 static int __napi_poll(struct napi_struct *n, bool *repoll)
6980 {
6981 	int work, weight;
6982 
6983 	weight = n->weight;
6984 
6985 	/* This NAPI_STATE_SCHED test is for avoiding a race
6986 	 * with netpoll's poll_napi().  Only the entity which
6987 	 * obtains the lock and sees NAPI_STATE_SCHED set will
6988 	 * actually make the ->poll() call.  Therefore we avoid
6989 	 * accidentally calling ->poll() when NAPI is not scheduled.
6990 	 */
6991 	work = 0;
6992 	if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6993 		work = n->poll(n, weight);
6994 		trace_napi_poll(n, work, weight);
6995 	}
6996 
6997 	if (unlikely(work > weight))
6998 		pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6999 			    n->poll, work, weight);
7000 
7001 	if (likely(work < weight))
7002 		return work;
7003 
7004 	/* Drivers must not modify the NAPI state if they
7005 	 * consume the entire weight.  In such cases this code
7006 	 * still "owns" the NAPI instance and therefore can
7007 	 * move the instance around on the list at-will.
7008 	 */
7009 	if (unlikely(napi_disable_pending(n))) {
7010 		napi_complete(n);
7011 		return work;
7012 	}
7013 
7014 	/* The NAPI context has more processing work, but busy-polling
7015 	 * is preferred. Exit early.
7016 	 */
7017 	if (napi_prefer_busy_poll(n)) {
7018 		if (napi_complete_done(n, work)) {
7019 			/* If timeout is not set, we need to make sure
7020 			 * that the NAPI is re-scheduled.
7021 			 */
7022 			napi_schedule(n);
7023 		}
7024 		return work;
7025 	}
7026 
7027 	if (n->gro_bitmask) {
7028 		/* flush too old packets
7029 		 * If HZ < 1000, flush all packets.
7030 		 */
7031 		napi_gro_flush(n, HZ >= 1000);
7032 	}
7033 
7034 	gro_normal_list(n);
7035 
7036 	/* Some drivers may have called napi_schedule
7037 	 * prior to exhausting their budget.
7038 	 */
7039 	if (unlikely(!list_empty(&n->poll_list))) {
7040 		pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
7041 			     n->dev ? n->dev->name : "backlog");
7042 		return work;
7043 	}
7044 
7045 	*repoll = true;
7046 
7047 	return work;
7048 }
7049 
napi_poll(struct napi_struct * n,struct list_head * repoll)7050 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
7051 {
7052 	bool do_repoll = false;
7053 	void *have;
7054 	int work;
7055 
7056 	list_del_init(&n->poll_list);
7057 
7058 	have = netpoll_poll_lock(n);
7059 
7060 	work = __napi_poll(n, &do_repoll);
7061 
7062 	if (do_repoll)
7063 		list_add_tail(&n->poll_list, repoll);
7064 
7065 	netpoll_poll_unlock(have);
7066 
7067 	return work;
7068 }
7069 
napi_thread_wait(struct napi_struct * napi)7070 static int napi_thread_wait(struct napi_struct *napi)
7071 {
7072 	bool woken = false;
7073 
7074 	set_current_state(TASK_INTERRUPTIBLE);
7075 
7076 	while (!kthread_should_stop()) {
7077 		/* Testing SCHED_THREADED bit here to make sure the current
7078 		 * kthread owns this napi and could poll on this napi.
7079 		 * Testing SCHED bit is not enough because SCHED bit might be
7080 		 * set by some other busy poll thread or by napi_disable().
7081 		 */
7082 		if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
7083 			WARN_ON(!list_empty(&napi->poll_list));
7084 			__set_current_state(TASK_RUNNING);
7085 			return 0;
7086 		}
7087 
7088 		schedule();
7089 		/* woken being true indicates this thread owns this napi. */
7090 		woken = true;
7091 		set_current_state(TASK_INTERRUPTIBLE);
7092 	}
7093 	__set_current_state(TASK_RUNNING);
7094 
7095 	return -1;
7096 }
7097 
napi_threaded_poll(void * data)7098 static int napi_threaded_poll(void *data)
7099 {
7100 	struct napi_struct *napi = data;
7101 	void *have;
7102 
7103 	while (!napi_thread_wait(napi)) {
7104 		for (;;) {
7105 			bool repoll = false;
7106 
7107 			local_bh_disable();
7108 
7109 			have = netpoll_poll_lock(napi);
7110 			__napi_poll(napi, &repoll);
7111 			netpoll_poll_unlock(have);
7112 
7113 			local_bh_enable();
7114 
7115 			if (!repoll)
7116 				break;
7117 
7118 			cond_resched();
7119 		}
7120 	}
7121 	return 0;
7122 }
7123 
net_rx_action(struct softirq_action * h)7124 static __latent_entropy void net_rx_action(struct softirq_action *h)
7125 {
7126 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
7127 	unsigned long time_limit = jiffies +
7128 		usecs_to_jiffies(netdev_budget_usecs);
7129 	int budget = netdev_budget;
7130 	LIST_HEAD(list);
7131 	LIST_HEAD(repoll);
7132 
7133 	local_irq_disable();
7134 	list_splice_init(&sd->poll_list, &list);
7135 	local_irq_enable();
7136 
7137 	for (;;) {
7138 		struct napi_struct *n;
7139 
7140 		if (list_empty(&list)) {
7141 			if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
7142 				return;
7143 			break;
7144 		}
7145 
7146 		n = list_first_entry(&list, struct napi_struct, poll_list);
7147 		budget -= napi_poll(n, &repoll);
7148 
7149 		/* If softirq window is exhausted then punt.
7150 		 * Allow this to run for 2 jiffies since which will allow
7151 		 * an average latency of 1.5/HZ.
7152 		 */
7153 		if (unlikely(budget <= 0 ||
7154 			     time_after_eq(jiffies, time_limit))) {
7155 			sd->time_squeeze++;
7156 			break;
7157 		}
7158 	}
7159 
7160 	local_irq_disable();
7161 
7162 	list_splice_tail_init(&sd->poll_list, &list);
7163 	list_splice_tail(&repoll, &list);
7164 	list_splice(&list, &sd->poll_list);
7165 	if (!list_empty(&sd->poll_list))
7166 		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
7167 
7168 	net_rps_action_and_irq_enable(sd);
7169 }
7170 
7171 struct netdev_adjacent {
7172 	struct net_device *dev;
7173 
7174 	/* upper master flag, there can only be one master device per list */
7175 	bool master;
7176 
7177 	/* lookup ignore flag */
7178 	bool ignore;
7179 
7180 	/* counter for the number of times this device was added to us */
7181 	u16 ref_nr;
7182 
7183 	/* private field for the users */
7184 	void *private;
7185 
7186 	struct list_head list;
7187 	struct rcu_head rcu;
7188 };
7189 
__netdev_find_adj(struct net_device * adj_dev,struct list_head * adj_list)7190 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
7191 						 struct list_head *adj_list)
7192 {
7193 	struct netdev_adjacent *adj;
7194 
7195 	list_for_each_entry(adj, adj_list, list) {
7196 		if (adj->dev == adj_dev)
7197 			return adj;
7198 	}
7199 	return NULL;
7200 }
7201 
____netdev_has_upper_dev(struct net_device * upper_dev,struct netdev_nested_priv * priv)7202 static int ____netdev_has_upper_dev(struct net_device *upper_dev,
7203 				    struct netdev_nested_priv *priv)
7204 {
7205 	struct net_device *dev = (struct net_device *)priv->data;
7206 
7207 	return upper_dev == dev;
7208 }
7209 
7210 /**
7211  * netdev_has_upper_dev - Check if device is linked to an upper device
7212  * @dev: device
7213  * @upper_dev: upper device to check
7214  *
7215  * Find out if a device is linked to specified upper device and return true
7216  * in case it is. Note that this checks only immediate upper device,
7217  * not through a complete stack of devices. The caller must hold the RTNL lock.
7218  */
netdev_has_upper_dev(struct net_device * dev,struct net_device * upper_dev)7219 bool netdev_has_upper_dev(struct net_device *dev,
7220 			  struct net_device *upper_dev)
7221 {
7222 	struct netdev_nested_priv priv = {
7223 		.data = (void *)upper_dev,
7224 	};
7225 
7226 	ASSERT_RTNL();
7227 
7228 	return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
7229 					     &priv);
7230 }
7231 EXPORT_SYMBOL(netdev_has_upper_dev);
7232 
7233 /**
7234  * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
7235  * @dev: device
7236  * @upper_dev: upper device to check
7237  *
7238  * Find out if a device is linked to specified upper device and return true
7239  * in case it is. Note that this checks the entire upper device chain.
7240  * The caller must hold rcu lock.
7241  */
7242 
netdev_has_upper_dev_all_rcu(struct net_device * dev,struct net_device * upper_dev)7243 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
7244 				  struct net_device *upper_dev)
7245 {
7246 	struct netdev_nested_priv priv = {
7247 		.data = (void *)upper_dev,
7248 	};
7249 
7250 	return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
7251 					       &priv);
7252 }
7253 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
7254 
7255 /**
7256  * netdev_has_any_upper_dev - Check if device is linked to some device
7257  * @dev: device
7258  *
7259  * Find out if a device is linked to an upper device and return true in case
7260  * it is. The caller must hold the RTNL lock.
7261  */
netdev_has_any_upper_dev(struct net_device * dev)7262 bool netdev_has_any_upper_dev(struct net_device *dev)
7263 {
7264 	ASSERT_RTNL();
7265 
7266 	return !list_empty(&dev->adj_list.upper);
7267 }
7268 EXPORT_SYMBOL(netdev_has_any_upper_dev);
7269 
7270 /**
7271  * netdev_master_upper_dev_get - Get master upper device
7272  * @dev: device
7273  *
7274  * Find a master upper device and return pointer to it or NULL in case
7275  * it's not there. The caller must hold the RTNL lock.
7276  */
netdev_master_upper_dev_get(struct net_device * dev)7277 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
7278 {
7279 	struct netdev_adjacent *upper;
7280 
7281 	ASSERT_RTNL();
7282 
7283 	if (list_empty(&dev->adj_list.upper))
7284 		return NULL;
7285 
7286 	upper = list_first_entry(&dev->adj_list.upper,
7287 				 struct netdev_adjacent, list);
7288 	if (likely(upper->master))
7289 		return upper->dev;
7290 	return NULL;
7291 }
7292 EXPORT_SYMBOL(netdev_master_upper_dev_get);
7293 
__netdev_master_upper_dev_get(struct net_device * dev)7294 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
7295 {
7296 	struct netdev_adjacent *upper;
7297 
7298 	ASSERT_RTNL();
7299 
7300 	if (list_empty(&dev->adj_list.upper))
7301 		return NULL;
7302 
7303 	upper = list_first_entry(&dev->adj_list.upper,
7304 				 struct netdev_adjacent, list);
7305 	if (likely(upper->master) && !upper->ignore)
7306 		return upper->dev;
7307 	return NULL;
7308 }
7309 
7310 /**
7311  * netdev_has_any_lower_dev - Check if device is linked to some device
7312  * @dev: device
7313  *
7314  * Find out if a device is linked to a lower device and return true in case
7315  * it is. The caller must hold the RTNL lock.
7316  */
netdev_has_any_lower_dev(struct net_device * dev)7317 static bool netdev_has_any_lower_dev(struct net_device *dev)
7318 {
7319 	ASSERT_RTNL();
7320 
7321 	return !list_empty(&dev->adj_list.lower);
7322 }
7323 
netdev_adjacent_get_private(struct list_head * adj_list)7324 void *netdev_adjacent_get_private(struct list_head *adj_list)
7325 {
7326 	struct netdev_adjacent *adj;
7327 
7328 	adj = list_entry(adj_list, struct netdev_adjacent, list);
7329 
7330 	return adj->private;
7331 }
7332 EXPORT_SYMBOL(netdev_adjacent_get_private);
7333 
7334 /**
7335  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
7336  * @dev: device
7337  * @iter: list_head ** of the current position
7338  *
7339  * Gets the next device from the dev's upper list, starting from iter
7340  * position. The caller must hold RCU read lock.
7341  */
netdev_upper_get_next_dev_rcu(struct net_device * dev,struct list_head ** iter)7342 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
7343 						 struct list_head **iter)
7344 {
7345 	struct netdev_adjacent *upper;
7346 
7347 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7348 
7349 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7350 
7351 	if (&upper->list == &dev->adj_list.upper)
7352 		return NULL;
7353 
7354 	*iter = &upper->list;
7355 
7356 	return upper->dev;
7357 }
7358 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
7359 
__netdev_next_upper_dev(struct net_device * dev,struct list_head ** iter,bool * ignore)7360 static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
7361 						  struct list_head **iter,
7362 						  bool *ignore)
7363 {
7364 	struct netdev_adjacent *upper;
7365 
7366 	upper = list_entry((*iter)->next, struct netdev_adjacent, list);
7367 
7368 	if (&upper->list == &dev->adj_list.upper)
7369 		return NULL;
7370 
7371 	*iter = &upper->list;
7372 	*ignore = upper->ignore;
7373 
7374 	return upper->dev;
7375 }
7376 
netdev_next_upper_dev_rcu(struct net_device * dev,struct list_head ** iter)7377 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
7378 						    struct list_head **iter)
7379 {
7380 	struct netdev_adjacent *upper;
7381 
7382 	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7383 
7384 	upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7385 
7386 	if (&upper->list == &dev->adj_list.upper)
7387 		return NULL;
7388 
7389 	*iter = &upper->list;
7390 
7391 	return upper->dev;
7392 }
7393 
__netdev_walk_all_upper_dev(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7394 static int __netdev_walk_all_upper_dev(struct net_device *dev,
7395 				       int (*fn)(struct net_device *dev,
7396 					 struct netdev_nested_priv *priv),
7397 				       struct netdev_nested_priv *priv)
7398 {
7399 	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7400 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7401 	int ret, cur = 0;
7402 	bool ignore;
7403 
7404 	now = dev;
7405 	iter = &dev->adj_list.upper;
7406 
7407 	while (1) {
7408 		if (now != dev) {
7409 			ret = fn(now, priv);
7410 			if (ret)
7411 				return ret;
7412 		}
7413 
7414 		next = NULL;
7415 		while (1) {
7416 			udev = __netdev_next_upper_dev(now, &iter, &ignore);
7417 			if (!udev)
7418 				break;
7419 			if (ignore)
7420 				continue;
7421 
7422 			next = udev;
7423 			niter = &udev->adj_list.upper;
7424 			dev_stack[cur] = now;
7425 			iter_stack[cur++] = iter;
7426 			break;
7427 		}
7428 
7429 		if (!next) {
7430 			if (!cur)
7431 				return 0;
7432 			next = dev_stack[--cur];
7433 			niter = iter_stack[cur];
7434 		}
7435 
7436 		now = next;
7437 		iter = niter;
7438 	}
7439 
7440 	return 0;
7441 }
7442 
netdev_walk_all_upper_dev_rcu(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7443 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
7444 				  int (*fn)(struct net_device *dev,
7445 					    struct netdev_nested_priv *priv),
7446 				  struct netdev_nested_priv *priv)
7447 {
7448 	struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7449 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7450 	int ret, cur = 0;
7451 
7452 	now = dev;
7453 	iter = &dev->adj_list.upper;
7454 
7455 	while (1) {
7456 		if (now != dev) {
7457 			ret = fn(now, priv);
7458 			if (ret)
7459 				return ret;
7460 		}
7461 
7462 		next = NULL;
7463 		while (1) {
7464 			udev = netdev_next_upper_dev_rcu(now, &iter);
7465 			if (!udev)
7466 				break;
7467 
7468 			next = udev;
7469 			niter = &udev->adj_list.upper;
7470 			dev_stack[cur] = now;
7471 			iter_stack[cur++] = iter;
7472 			break;
7473 		}
7474 
7475 		if (!next) {
7476 			if (!cur)
7477 				return 0;
7478 			next = dev_stack[--cur];
7479 			niter = iter_stack[cur];
7480 		}
7481 
7482 		now = next;
7483 		iter = niter;
7484 	}
7485 
7486 	return 0;
7487 }
7488 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
7489 
__netdev_has_upper_dev(struct net_device * dev,struct net_device * upper_dev)7490 static bool __netdev_has_upper_dev(struct net_device *dev,
7491 				   struct net_device *upper_dev)
7492 {
7493 	struct netdev_nested_priv priv = {
7494 		.flags = 0,
7495 		.data = (void *)upper_dev,
7496 	};
7497 
7498 	ASSERT_RTNL();
7499 
7500 	return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
7501 					   &priv);
7502 }
7503 
7504 /**
7505  * netdev_lower_get_next_private - Get the next ->private from the
7506  *				   lower neighbour list
7507  * @dev: device
7508  * @iter: list_head ** of the current position
7509  *
7510  * Gets the next netdev_adjacent->private from the dev's lower neighbour
7511  * list, starting from iter position. The caller must hold either hold the
7512  * RTNL lock or its own locking that guarantees that the neighbour lower
7513  * list will remain unchanged.
7514  */
netdev_lower_get_next_private(struct net_device * dev,struct list_head ** iter)7515 void *netdev_lower_get_next_private(struct net_device *dev,
7516 				    struct list_head **iter)
7517 {
7518 	struct netdev_adjacent *lower;
7519 
7520 	lower = list_entry(*iter, struct netdev_adjacent, list);
7521 
7522 	if (&lower->list == &dev->adj_list.lower)
7523 		return NULL;
7524 
7525 	*iter = lower->list.next;
7526 
7527 	return lower->private;
7528 }
7529 EXPORT_SYMBOL(netdev_lower_get_next_private);
7530 
7531 /**
7532  * netdev_lower_get_next_private_rcu - Get the next ->private from the
7533  *				       lower neighbour list, RCU
7534  *				       variant
7535  * @dev: device
7536  * @iter: list_head ** of the current position
7537  *
7538  * Gets the next netdev_adjacent->private from the dev's lower neighbour
7539  * list, starting from iter position. The caller must hold RCU read lock.
7540  */
netdev_lower_get_next_private_rcu(struct net_device * dev,struct list_head ** iter)7541 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7542 					struct list_head **iter)
7543 {
7544 	struct netdev_adjacent *lower;
7545 
7546 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
7547 
7548 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7549 
7550 	if (&lower->list == &dev->adj_list.lower)
7551 		return NULL;
7552 
7553 	*iter = &lower->list;
7554 
7555 	return lower->private;
7556 }
7557 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7558 
7559 /**
7560  * netdev_lower_get_next - Get the next device from the lower neighbour
7561  *                         list
7562  * @dev: device
7563  * @iter: list_head ** of the current position
7564  *
7565  * Gets the next netdev_adjacent from the dev's lower neighbour
7566  * list, starting from iter position. The caller must hold RTNL lock or
7567  * its own locking that guarantees that the neighbour lower
7568  * list will remain unchanged.
7569  */
netdev_lower_get_next(struct net_device * dev,struct list_head ** iter)7570 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7571 {
7572 	struct netdev_adjacent *lower;
7573 
7574 	lower = list_entry(*iter, struct netdev_adjacent, list);
7575 
7576 	if (&lower->list == &dev->adj_list.lower)
7577 		return NULL;
7578 
7579 	*iter = lower->list.next;
7580 
7581 	return lower->dev;
7582 }
7583 EXPORT_SYMBOL(netdev_lower_get_next);
7584 
netdev_next_lower_dev(struct net_device * dev,struct list_head ** iter)7585 static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7586 						struct list_head **iter)
7587 {
7588 	struct netdev_adjacent *lower;
7589 
7590 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7591 
7592 	if (&lower->list == &dev->adj_list.lower)
7593 		return NULL;
7594 
7595 	*iter = &lower->list;
7596 
7597 	return lower->dev;
7598 }
7599 
__netdev_next_lower_dev(struct net_device * dev,struct list_head ** iter,bool * ignore)7600 static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7601 						  struct list_head **iter,
7602 						  bool *ignore)
7603 {
7604 	struct netdev_adjacent *lower;
7605 
7606 	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7607 
7608 	if (&lower->list == &dev->adj_list.lower)
7609 		return NULL;
7610 
7611 	*iter = &lower->list;
7612 	*ignore = lower->ignore;
7613 
7614 	return lower->dev;
7615 }
7616 
netdev_walk_all_lower_dev(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7617 int netdev_walk_all_lower_dev(struct net_device *dev,
7618 			      int (*fn)(struct net_device *dev,
7619 					struct netdev_nested_priv *priv),
7620 			      struct netdev_nested_priv *priv)
7621 {
7622 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7623 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7624 	int ret, cur = 0;
7625 
7626 	now = dev;
7627 	iter = &dev->adj_list.lower;
7628 
7629 	while (1) {
7630 		if (now != dev) {
7631 			ret = fn(now, priv);
7632 			if (ret)
7633 				return ret;
7634 		}
7635 
7636 		next = NULL;
7637 		while (1) {
7638 			ldev = netdev_next_lower_dev(now, &iter);
7639 			if (!ldev)
7640 				break;
7641 
7642 			next = ldev;
7643 			niter = &ldev->adj_list.lower;
7644 			dev_stack[cur] = now;
7645 			iter_stack[cur++] = iter;
7646 			break;
7647 		}
7648 
7649 		if (!next) {
7650 			if (!cur)
7651 				return 0;
7652 			next = dev_stack[--cur];
7653 			niter = iter_stack[cur];
7654 		}
7655 
7656 		now = next;
7657 		iter = niter;
7658 	}
7659 
7660 	return 0;
7661 }
7662 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7663 
__netdev_walk_all_lower_dev(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7664 static int __netdev_walk_all_lower_dev(struct net_device *dev,
7665 				       int (*fn)(struct net_device *dev,
7666 					 struct netdev_nested_priv *priv),
7667 				       struct netdev_nested_priv *priv)
7668 {
7669 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7670 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7671 	int ret, cur = 0;
7672 	bool ignore;
7673 
7674 	now = dev;
7675 	iter = &dev->adj_list.lower;
7676 
7677 	while (1) {
7678 		if (now != dev) {
7679 			ret = fn(now, priv);
7680 			if (ret)
7681 				return ret;
7682 		}
7683 
7684 		next = NULL;
7685 		while (1) {
7686 			ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7687 			if (!ldev)
7688 				break;
7689 			if (ignore)
7690 				continue;
7691 
7692 			next = ldev;
7693 			niter = &ldev->adj_list.lower;
7694 			dev_stack[cur] = now;
7695 			iter_stack[cur++] = iter;
7696 			break;
7697 		}
7698 
7699 		if (!next) {
7700 			if (!cur)
7701 				return 0;
7702 			next = dev_stack[--cur];
7703 			niter = iter_stack[cur];
7704 		}
7705 
7706 		now = next;
7707 		iter = niter;
7708 	}
7709 
7710 	return 0;
7711 }
7712 
netdev_next_lower_dev_rcu(struct net_device * dev,struct list_head ** iter)7713 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7714 					     struct list_head **iter)
7715 {
7716 	struct netdev_adjacent *lower;
7717 
7718 	lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7719 	if (&lower->list == &dev->adj_list.lower)
7720 		return NULL;
7721 
7722 	*iter = &lower->list;
7723 
7724 	return lower->dev;
7725 }
7726 EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
7727 
__netdev_upper_depth(struct net_device * dev)7728 static u8 __netdev_upper_depth(struct net_device *dev)
7729 {
7730 	struct net_device *udev;
7731 	struct list_head *iter;
7732 	u8 max_depth = 0;
7733 	bool ignore;
7734 
7735 	for (iter = &dev->adj_list.upper,
7736 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore);
7737 	     udev;
7738 	     udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7739 		if (ignore)
7740 			continue;
7741 		if (max_depth < udev->upper_level)
7742 			max_depth = udev->upper_level;
7743 	}
7744 
7745 	return max_depth;
7746 }
7747 
__netdev_lower_depth(struct net_device * dev)7748 static u8 __netdev_lower_depth(struct net_device *dev)
7749 {
7750 	struct net_device *ldev;
7751 	struct list_head *iter;
7752 	u8 max_depth = 0;
7753 	bool ignore;
7754 
7755 	for (iter = &dev->adj_list.lower,
7756 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
7757 	     ldev;
7758 	     ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7759 		if (ignore)
7760 			continue;
7761 		if (max_depth < ldev->lower_level)
7762 			max_depth = ldev->lower_level;
7763 	}
7764 
7765 	return max_depth;
7766 }
7767 
__netdev_update_upper_level(struct net_device * dev,struct netdev_nested_priv * __unused)7768 static int __netdev_update_upper_level(struct net_device *dev,
7769 				       struct netdev_nested_priv *__unused)
7770 {
7771 	dev->upper_level = __netdev_upper_depth(dev) + 1;
7772 	return 0;
7773 }
7774 
__netdev_update_lower_level(struct net_device * dev,struct netdev_nested_priv * priv)7775 static int __netdev_update_lower_level(struct net_device *dev,
7776 				       struct netdev_nested_priv *priv)
7777 {
7778 	dev->lower_level = __netdev_lower_depth(dev) + 1;
7779 
7780 #ifdef CONFIG_LOCKDEP
7781 	if (!priv)
7782 		return 0;
7783 
7784 	if (priv->flags & NESTED_SYNC_IMM)
7785 		dev->nested_level = dev->lower_level - 1;
7786 	if (priv->flags & NESTED_SYNC_TODO)
7787 		net_unlink_todo(dev);
7788 #endif
7789 	return 0;
7790 }
7791 
netdev_walk_all_lower_dev_rcu(struct net_device * dev,int (* fn)(struct net_device * dev,struct netdev_nested_priv * priv),struct netdev_nested_priv * priv)7792 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7793 				  int (*fn)(struct net_device *dev,
7794 					    struct netdev_nested_priv *priv),
7795 				  struct netdev_nested_priv *priv)
7796 {
7797 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7798 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7799 	int ret, cur = 0;
7800 
7801 	now = dev;
7802 	iter = &dev->adj_list.lower;
7803 
7804 	while (1) {
7805 		if (now != dev) {
7806 			ret = fn(now, priv);
7807 			if (ret)
7808 				return ret;
7809 		}
7810 
7811 		next = NULL;
7812 		while (1) {
7813 			ldev = netdev_next_lower_dev_rcu(now, &iter);
7814 			if (!ldev)
7815 				break;
7816 
7817 			next = ldev;
7818 			niter = &ldev->adj_list.lower;
7819 			dev_stack[cur] = now;
7820 			iter_stack[cur++] = iter;
7821 			break;
7822 		}
7823 
7824 		if (!next) {
7825 			if (!cur)
7826 				return 0;
7827 			next = dev_stack[--cur];
7828 			niter = iter_stack[cur];
7829 		}
7830 
7831 		now = next;
7832 		iter = niter;
7833 	}
7834 
7835 	return 0;
7836 }
7837 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7838 
7839 /**
7840  * netdev_lower_get_first_private_rcu - Get the first ->private from the
7841  *				       lower neighbour list, RCU
7842  *				       variant
7843  * @dev: device
7844  *
7845  * Gets the first netdev_adjacent->private from the dev's lower neighbour
7846  * list. The caller must hold RCU read lock.
7847  */
netdev_lower_get_first_private_rcu(struct net_device * dev)7848 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7849 {
7850 	struct netdev_adjacent *lower;
7851 
7852 	lower = list_first_or_null_rcu(&dev->adj_list.lower,
7853 			struct netdev_adjacent, list);
7854 	if (lower)
7855 		return lower->private;
7856 	return NULL;
7857 }
7858 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7859 
7860 /**
7861  * netdev_master_upper_dev_get_rcu - Get master upper device
7862  * @dev: device
7863  *
7864  * Find a master upper device and return pointer to it or NULL in case
7865  * it's not there. The caller must hold the RCU read lock.
7866  */
netdev_master_upper_dev_get_rcu(struct net_device * dev)7867 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7868 {
7869 	struct netdev_adjacent *upper;
7870 
7871 	upper = list_first_or_null_rcu(&dev->adj_list.upper,
7872 				       struct netdev_adjacent, list);
7873 	if (upper && likely(upper->master))
7874 		return upper->dev;
7875 	return NULL;
7876 }
7877 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7878 
netdev_adjacent_sysfs_add(struct net_device * dev,struct net_device * adj_dev,struct list_head * dev_list)7879 static int netdev_adjacent_sysfs_add(struct net_device *dev,
7880 			      struct net_device *adj_dev,
7881 			      struct list_head *dev_list)
7882 {
7883 	char linkname[IFNAMSIZ+7];
7884 
7885 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
7886 		"upper_%s" : "lower_%s", adj_dev->name);
7887 	return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7888 				 linkname);
7889 }
netdev_adjacent_sysfs_del(struct net_device * dev,char * name,struct list_head * dev_list)7890 static void netdev_adjacent_sysfs_del(struct net_device *dev,
7891 			       char *name,
7892 			       struct list_head *dev_list)
7893 {
7894 	char linkname[IFNAMSIZ+7];
7895 
7896 	sprintf(linkname, dev_list == &dev->adj_list.upper ?
7897 		"upper_%s" : "lower_%s", name);
7898 	sysfs_remove_link(&(dev->dev.kobj), linkname);
7899 }
7900 
netdev_adjacent_is_neigh_list(struct net_device * dev,struct net_device * adj_dev,struct list_head * dev_list)7901 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7902 						 struct net_device *adj_dev,
7903 						 struct list_head *dev_list)
7904 {
7905 	return (dev_list == &dev->adj_list.upper ||
7906 		dev_list == &dev->adj_list.lower) &&
7907 		net_eq(dev_net(dev), dev_net(adj_dev));
7908 }
7909 
__netdev_adjacent_dev_insert(struct net_device * dev,struct net_device * adj_dev,struct list_head * dev_list,void * private,bool master)7910 static int __netdev_adjacent_dev_insert(struct net_device *dev,
7911 					struct net_device *adj_dev,
7912 					struct list_head *dev_list,
7913 					void *private, bool master)
7914 {
7915 	struct netdev_adjacent *adj;
7916 	int ret;
7917 
7918 	adj = __netdev_find_adj(adj_dev, dev_list);
7919 
7920 	if (adj) {
7921 		adj->ref_nr += 1;
7922 		pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7923 			 dev->name, adj_dev->name, adj->ref_nr);
7924 
7925 		return 0;
7926 	}
7927 
7928 	adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7929 	if (!adj)
7930 		return -ENOMEM;
7931 
7932 	adj->dev = adj_dev;
7933 	adj->master = master;
7934 	adj->ref_nr = 1;
7935 	adj->private = private;
7936 	adj->ignore = false;
7937 	dev_hold(adj_dev);
7938 
7939 	pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7940 		 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7941 
7942 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
7943 		ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
7944 		if (ret)
7945 			goto free_adj;
7946 	}
7947 
7948 	/* Ensure that master link is always the first item in list. */
7949 	if (master) {
7950 		ret = sysfs_create_link(&(dev->dev.kobj),
7951 					&(adj_dev->dev.kobj), "master");
7952 		if (ret)
7953 			goto remove_symlinks;
7954 
7955 		list_add_rcu(&adj->list, dev_list);
7956 	} else {
7957 		list_add_tail_rcu(&adj->list, dev_list);
7958 	}
7959 
7960 	return 0;
7961 
7962 remove_symlinks:
7963 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7964 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7965 free_adj:
7966 	kfree(adj);
7967 	dev_put(adj_dev);
7968 
7969 	return ret;
7970 }
7971 
__netdev_adjacent_dev_remove(struct net_device * dev,struct net_device * adj_dev,u16 ref_nr,struct list_head * dev_list)7972 static void __netdev_adjacent_dev_remove(struct net_device *dev,
7973 					 struct net_device *adj_dev,
7974 					 u16 ref_nr,
7975 					 struct list_head *dev_list)
7976 {
7977 	struct netdev_adjacent *adj;
7978 
7979 	pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7980 		 dev->name, adj_dev->name, ref_nr);
7981 
7982 	adj = __netdev_find_adj(adj_dev, dev_list);
7983 
7984 	if (!adj) {
7985 		pr_err("Adjacency does not exist for device %s from %s\n",
7986 		       dev->name, adj_dev->name);
7987 		WARN_ON(1);
7988 		return;
7989 	}
7990 
7991 	if (adj->ref_nr > ref_nr) {
7992 		pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7993 			 dev->name, adj_dev->name, ref_nr,
7994 			 adj->ref_nr - ref_nr);
7995 		adj->ref_nr -= ref_nr;
7996 		return;
7997 	}
7998 
7999 	if (adj->master)
8000 		sysfs_remove_link(&(dev->dev.kobj), "master");
8001 
8002 	if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
8003 		netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
8004 
8005 	list_del_rcu(&adj->list);
8006 	pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
8007 		 adj_dev->name, dev->name, adj_dev->name);
8008 	dev_put(adj_dev);
8009 	kfree_rcu(adj, rcu);
8010 }
8011 
__netdev_adjacent_dev_link_lists(struct net_device * dev,struct net_device * upper_dev,struct list_head * up_list,struct list_head * down_list,void * private,bool master)8012 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
8013 					    struct net_device *upper_dev,
8014 					    struct list_head *up_list,
8015 					    struct list_head *down_list,
8016 					    void *private, bool master)
8017 {
8018 	int ret;
8019 
8020 	ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
8021 					   private, master);
8022 	if (ret)
8023 		return ret;
8024 
8025 	ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
8026 					   private, false);
8027 	if (ret) {
8028 		__netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
8029 		return ret;
8030 	}
8031 
8032 	return 0;
8033 }
8034 
__netdev_adjacent_dev_unlink_lists(struct net_device * dev,struct net_device * upper_dev,u16 ref_nr,struct list_head * up_list,struct list_head * down_list)8035 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
8036 					       struct net_device *upper_dev,
8037 					       u16 ref_nr,
8038 					       struct list_head *up_list,
8039 					       struct list_head *down_list)
8040 {
8041 	__netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
8042 	__netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
8043 }
8044 
__netdev_adjacent_dev_link_neighbour(struct net_device * dev,struct net_device * upper_dev,void * private,bool master)8045 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
8046 						struct net_device *upper_dev,
8047 						void *private, bool master)
8048 {
8049 	return __netdev_adjacent_dev_link_lists(dev, upper_dev,
8050 						&dev->adj_list.upper,
8051 						&upper_dev->adj_list.lower,
8052 						private, master);
8053 }
8054 
__netdev_adjacent_dev_unlink_neighbour(struct net_device * dev,struct net_device * upper_dev)8055 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
8056 						   struct net_device *upper_dev)
8057 {
8058 	__netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
8059 					   &dev->adj_list.upper,
8060 					   &upper_dev->adj_list.lower);
8061 }
8062 
__netdev_upper_dev_link(struct net_device * dev,struct net_device * upper_dev,bool master,void * upper_priv,void * upper_info,struct netdev_nested_priv * priv,struct netlink_ext_ack * extack)8063 static int __netdev_upper_dev_link(struct net_device *dev,
8064 				   struct net_device *upper_dev, bool master,
8065 				   void *upper_priv, void *upper_info,
8066 				   struct netdev_nested_priv *priv,
8067 				   struct netlink_ext_ack *extack)
8068 {
8069 	struct netdev_notifier_changeupper_info changeupper_info = {
8070 		.info = {
8071 			.dev = dev,
8072 			.extack = extack,
8073 		},
8074 		.upper_dev = upper_dev,
8075 		.master = master,
8076 		.linking = true,
8077 		.upper_info = upper_info,
8078 	};
8079 	struct net_device *master_dev;
8080 	int ret = 0;
8081 
8082 	ASSERT_RTNL();
8083 
8084 	if (dev == upper_dev)
8085 		return -EBUSY;
8086 
8087 	/* To prevent loops, check if dev is not upper device to upper_dev. */
8088 	if (__netdev_has_upper_dev(upper_dev, dev))
8089 		return -EBUSY;
8090 
8091 	if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
8092 		return -EMLINK;
8093 
8094 	if (!master) {
8095 		if (__netdev_has_upper_dev(dev, upper_dev))
8096 			return -EEXIST;
8097 	} else {
8098 		master_dev = __netdev_master_upper_dev_get(dev);
8099 		if (master_dev)
8100 			return master_dev == upper_dev ? -EEXIST : -EBUSY;
8101 	}
8102 
8103 	ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
8104 					    &changeupper_info.info);
8105 	ret = notifier_to_errno(ret);
8106 	if (ret)
8107 		return ret;
8108 
8109 	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
8110 						   master);
8111 	if (ret)
8112 		return ret;
8113 
8114 	ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
8115 					    &changeupper_info.info);
8116 	ret = notifier_to_errno(ret);
8117 	if (ret)
8118 		goto rollback;
8119 
8120 	__netdev_update_upper_level(dev, NULL);
8121 	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
8122 
8123 	__netdev_update_lower_level(upper_dev, priv);
8124 	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
8125 				    priv);
8126 
8127 	return 0;
8128 
8129 rollback:
8130 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
8131 
8132 	return ret;
8133 }
8134 
8135 /**
8136  * netdev_upper_dev_link - Add a link to the upper device
8137  * @dev: device
8138  * @upper_dev: new upper device
8139  * @extack: netlink extended ack
8140  *
8141  * Adds a link to device which is upper to this one. The caller must hold
8142  * the RTNL lock. On a failure a negative errno code is returned.
8143  * On success the reference counts are adjusted and the function
8144  * returns zero.
8145  */
netdev_upper_dev_link(struct net_device * dev,struct net_device * upper_dev,struct netlink_ext_ack * extack)8146 int netdev_upper_dev_link(struct net_device *dev,
8147 			  struct net_device *upper_dev,
8148 			  struct netlink_ext_ack *extack)
8149 {
8150 	struct netdev_nested_priv priv = {
8151 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8152 		.data = NULL,
8153 	};
8154 
8155 	return __netdev_upper_dev_link(dev, upper_dev, false,
8156 				       NULL, NULL, &priv, extack);
8157 }
8158 EXPORT_SYMBOL(netdev_upper_dev_link);
8159 
8160 /**
8161  * netdev_master_upper_dev_link - Add a master link to the upper device
8162  * @dev: device
8163  * @upper_dev: new upper device
8164  * @upper_priv: upper device private
8165  * @upper_info: upper info to be passed down via notifier
8166  * @extack: netlink extended ack
8167  *
8168  * Adds a link to device which is upper to this one. In this case, only
8169  * one master upper device can be linked, although other non-master devices
8170  * might be linked as well. The caller must hold the RTNL lock.
8171  * On a failure a negative errno code is returned. On success the reference
8172  * counts are adjusted and the function returns zero.
8173  */
netdev_master_upper_dev_link(struct net_device * dev,struct net_device * upper_dev,void * upper_priv,void * upper_info,struct netlink_ext_ack * extack)8174 int netdev_master_upper_dev_link(struct net_device *dev,
8175 				 struct net_device *upper_dev,
8176 				 void *upper_priv, void *upper_info,
8177 				 struct netlink_ext_ack *extack)
8178 {
8179 	struct netdev_nested_priv priv = {
8180 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8181 		.data = NULL,
8182 	};
8183 
8184 	return __netdev_upper_dev_link(dev, upper_dev, true,
8185 				       upper_priv, upper_info, &priv, extack);
8186 }
8187 EXPORT_SYMBOL(netdev_master_upper_dev_link);
8188 
__netdev_upper_dev_unlink(struct net_device * dev,struct net_device * upper_dev,struct netdev_nested_priv * priv)8189 static void __netdev_upper_dev_unlink(struct net_device *dev,
8190 				      struct net_device *upper_dev,
8191 				      struct netdev_nested_priv *priv)
8192 {
8193 	struct netdev_notifier_changeupper_info changeupper_info = {
8194 		.info = {
8195 			.dev = dev,
8196 		},
8197 		.upper_dev = upper_dev,
8198 		.linking = false,
8199 	};
8200 
8201 	ASSERT_RTNL();
8202 
8203 	changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
8204 
8205 	call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
8206 				      &changeupper_info.info);
8207 
8208 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
8209 
8210 	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
8211 				      &changeupper_info.info);
8212 
8213 	__netdev_update_upper_level(dev, NULL);
8214 	__netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
8215 
8216 	__netdev_update_lower_level(upper_dev, priv);
8217 	__netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
8218 				    priv);
8219 }
8220 
8221 /**
8222  * netdev_upper_dev_unlink - Removes a link to upper device
8223  * @dev: device
8224  * @upper_dev: new upper device
8225  *
8226  * Removes a link to device which is upper to this one. The caller must hold
8227  * the RTNL lock.
8228  */
netdev_upper_dev_unlink(struct net_device * dev,struct net_device * upper_dev)8229 void netdev_upper_dev_unlink(struct net_device *dev,
8230 			     struct net_device *upper_dev)
8231 {
8232 	struct netdev_nested_priv priv = {
8233 		.flags = NESTED_SYNC_TODO,
8234 		.data = NULL,
8235 	};
8236 
8237 	__netdev_upper_dev_unlink(dev, upper_dev, &priv);
8238 }
8239 EXPORT_SYMBOL(netdev_upper_dev_unlink);
8240 
__netdev_adjacent_dev_set(struct net_device * upper_dev,struct net_device * lower_dev,bool val)8241 static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
8242 				      struct net_device *lower_dev,
8243 				      bool val)
8244 {
8245 	struct netdev_adjacent *adj;
8246 
8247 	adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
8248 	if (adj)
8249 		adj->ignore = val;
8250 
8251 	adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
8252 	if (adj)
8253 		adj->ignore = val;
8254 }
8255 
netdev_adjacent_dev_disable(struct net_device * upper_dev,struct net_device * lower_dev)8256 static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
8257 					struct net_device *lower_dev)
8258 {
8259 	__netdev_adjacent_dev_set(upper_dev, lower_dev, true);
8260 }
8261 
netdev_adjacent_dev_enable(struct net_device * upper_dev,struct net_device * lower_dev)8262 static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
8263 				       struct net_device *lower_dev)
8264 {
8265 	__netdev_adjacent_dev_set(upper_dev, lower_dev, false);
8266 }
8267 
netdev_adjacent_change_prepare(struct net_device * old_dev,struct net_device * new_dev,struct net_device * dev,struct netlink_ext_ack * extack)8268 int netdev_adjacent_change_prepare(struct net_device *old_dev,
8269 				   struct net_device *new_dev,
8270 				   struct net_device *dev,
8271 				   struct netlink_ext_ack *extack)
8272 {
8273 	struct netdev_nested_priv priv = {
8274 		.flags = 0,
8275 		.data = NULL,
8276 	};
8277 	int err;
8278 
8279 	if (!new_dev)
8280 		return 0;
8281 
8282 	if (old_dev && new_dev != old_dev)
8283 		netdev_adjacent_dev_disable(dev, old_dev);
8284 	err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
8285 				      extack);
8286 	if (err) {
8287 		if (old_dev && new_dev != old_dev)
8288 			netdev_adjacent_dev_enable(dev, old_dev);
8289 		return err;
8290 	}
8291 
8292 	return 0;
8293 }
8294 EXPORT_SYMBOL(netdev_adjacent_change_prepare);
8295 
netdev_adjacent_change_commit(struct net_device * old_dev,struct net_device * new_dev,struct net_device * dev)8296 void netdev_adjacent_change_commit(struct net_device *old_dev,
8297 				   struct net_device *new_dev,
8298 				   struct net_device *dev)
8299 {
8300 	struct netdev_nested_priv priv = {
8301 		.flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8302 		.data = NULL,
8303 	};
8304 
8305 	if (!new_dev || !old_dev)
8306 		return;
8307 
8308 	if (new_dev == old_dev)
8309 		return;
8310 
8311 	netdev_adjacent_dev_enable(dev, old_dev);
8312 	__netdev_upper_dev_unlink(old_dev, dev, &priv);
8313 }
8314 EXPORT_SYMBOL(netdev_adjacent_change_commit);
8315 
netdev_adjacent_change_abort(struct net_device * old_dev,struct net_device * new_dev,struct net_device * dev)8316 void netdev_adjacent_change_abort(struct net_device *old_dev,
8317 				  struct net_device *new_dev,
8318 				  struct net_device *dev)
8319 {
8320 	struct netdev_nested_priv priv = {
8321 		.flags = 0,
8322 		.data = NULL,
8323 	};
8324 
8325 	if (!new_dev)
8326 		return;
8327 
8328 	if (old_dev && new_dev != old_dev)
8329 		netdev_adjacent_dev_enable(dev, old_dev);
8330 
8331 	__netdev_upper_dev_unlink(new_dev, dev, &priv);
8332 }
8333 EXPORT_SYMBOL(netdev_adjacent_change_abort);
8334 
8335 /**
8336  * netdev_bonding_info_change - Dispatch event about slave change
8337  * @dev: device
8338  * @bonding_info: info to dispatch
8339  *
8340  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
8341  * The caller must hold the RTNL lock.
8342  */
netdev_bonding_info_change(struct net_device * dev,struct netdev_bonding_info * bonding_info)8343 void netdev_bonding_info_change(struct net_device *dev,
8344 				struct netdev_bonding_info *bonding_info)
8345 {
8346 	struct netdev_notifier_bonding_info info = {
8347 		.info.dev = dev,
8348 	};
8349 
8350 	memcpy(&info.bonding_info, bonding_info,
8351 	       sizeof(struct netdev_bonding_info));
8352 	call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
8353 				      &info.info);
8354 }
8355 EXPORT_SYMBOL(netdev_bonding_info_change);
8356 
8357 /**
8358  * netdev_get_xmit_slave - Get the xmit slave of master device
8359  * @dev: device
8360  * @skb: The packet
8361  * @all_slaves: assume all the slaves are active
8362  *
8363  * The reference counters are not incremented so the caller must be
8364  * careful with locks. The caller must hold RCU lock.
8365  * %NULL is returned if no slave is found.
8366  */
8367 
netdev_get_xmit_slave(struct net_device * dev,struct sk_buff * skb,bool all_slaves)8368 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
8369 					 struct sk_buff *skb,
8370 					 bool all_slaves)
8371 {
8372 	const struct net_device_ops *ops = dev->netdev_ops;
8373 
8374 	if (!ops->ndo_get_xmit_slave)
8375 		return NULL;
8376 	return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
8377 }
8378 EXPORT_SYMBOL(netdev_get_xmit_slave);
8379 
netdev_sk_get_lower_dev(struct net_device * dev,struct sock * sk)8380 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
8381 						  struct sock *sk)
8382 {
8383 	const struct net_device_ops *ops = dev->netdev_ops;
8384 
8385 	if (!ops->ndo_sk_get_lower_dev)
8386 		return NULL;
8387 	return ops->ndo_sk_get_lower_dev(dev, sk);
8388 }
8389 
8390 /**
8391  * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8392  * @dev: device
8393  * @sk: the socket
8394  *
8395  * %NULL is returned if no lower device is found.
8396  */
8397 
netdev_sk_get_lowest_dev(struct net_device * dev,struct sock * sk)8398 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
8399 					    struct sock *sk)
8400 {
8401 	struct net_device *lower;
8402 
8403 	lower = netdev_sk_get_lower_dev(dev, sk);
8404 	while (lower) {
8405 		dev = lower;
8406 		lower = netdev_sk_get_lower_dev(dev, sk);
8407 	}
8408 
8409 	return dev;
8410 }
8411 EXPORT_SYMBOL(netdev_sk_get_lowest_dev);
8412 
netdev_adjacent_add_links(struct net_device * dev)8413 static void netdev_adjacent_add_links(struct net_device *dev)
8414 {
8415 	struct netdev_adjacent *iter;
8416 
8417 	struct net *net = dev_net(dev);
8418 
8419 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
8420 		if (!net_eq(net, dev_net(iter->dev)))
8421 			continue;
8422 		netdev_adjacent_sysfs_add(iter->dev, dev,
8423 					  &iter->dev->adj_list.lower);
8424 		netdev_adjacent_sysfs_add(dev, iter->dev,
8425 					  &dev->adj_list.upper);
8426 	}
8427 
8428 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
8429 		if (!net_eq(net, dev_net(iter->dev)))
8430 			continue;
8431 		netdev_adjacent_sysfs_add(iter->dev, dev,
8432 					  &iter->dev->adj_list.upper);
8433 		netdev_adjacent_sysfs_add(dev, iter->dev,
8434 					  &dev->adj_list.lower);
8435 	}
8436 }
8437 
netdev_adjacent_del_links(struct net_device * dev)8438 static void netdev_adjacent_del_links(struct net_device *dev)
8439 {
8440 	struct netdev_adjacent *iter;
8441 
8442 	struct net *net = dev_net(dev);
8443 
8444 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
8445 		if (!net_eq(net, dev_net(iter->dev)))
8446 			continue;
8447 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
8448 					  &iter->dev->adj_list.lower);
8449 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
8450 					  &dev->adj_list.upper);
8451 	}
8452 
8453 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
8454 		if (!net_eq(net, dev_net(iter->dev)))
8455 			continue;
8456 		netdev_adjacent_sysfs_del(iter->dev, dev->name,
8457 					  &iter->dev->adj_list.upper);
8458 		netdev_adjacent_sysfs_del(dev, iter->dev->name,
8459 					  &dev->adj_list.lower);
8460 	}
8461 }
8462 
netdev_adjacent_rename_links(struct net_device * dev,char * oldname)8463 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
8464 {
8465 	struct netdev_adjacent *iter;
8466 
8467 	struct net *net = dev_net(dev);
8468 
8469 	list_for_each_entry(iter, &dev->adj_list.upper, list) {
8470 		if (!net_eq(net, dev_net(iter->dev)))
8471 			continue;
8472 		netdev_adjacent_sysfs_del(iter->dev, oldname,
8473 					  &iter->dev->adj_list.lower);
8474 		netdev_adjacent_sysfs_add(iter->dev, dev,
8475 					  &iter->dev->adj_list.lower);
8476 	}
8477 
8478 	list_for_each_entry(iter, &dev->adj_list.lower, list) {
8479 		if (!net_eq(net, dev_net(iter->dev)))
8480 			continue;
8481 		netdev_adjacent_sysfs_del(iter->dev, oldname,
8482 					  &iter->dev->adj_list.upper);
8483 		netdev_adjacent_sysfs_add(iter->dev, dev,
8484 					  &iter->dev->adj_list.upper);
8485 	}
8486 }
8487 
netdev_lower_dev_get_private(struct net_device * dev,struct net_device * lower_dev)8488 void *netdev_lower_dev_get_private(struct net_device *dev,
8489 				   struct net_device *lower_dev)
8490 {
8491 	struct netdev_adjacent *lower;
8492 
8493 	if (!lower_dev)
8494 		return NULL;
8495 	lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
8496 	if (!lower)
8497 		return NULL;
8498 
8499 	return lower->private;
8500 }
8501 EXPORT_SYMBOL(netdev_lower_dev_get_private);
8502 
8503 
8504 /**
8505  * netdev_lower_state_changed - Dispatch event about lower device state change
8506  * @lower_dev: device
8507  * @lower_state_info: state to dispatch
8508  *
8509  * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8510  * The caller must hold the RTNL lock.
8511  */
netdev_lower_state_changed(struct net_device * lower_dev,void * lower_state_info)8512 void netdev_lower_state_changed(struct net_device *lower_dev,
8513 				void *lower_state_info)
8514 {
8515 	struct netdev_notifier_changelowerstate_info changelowerstate_info = {
8516 		.info.dev = lower_dev,
8517 	};
8518 
8519 	ASSERT_RTNL();
8520 	changelowerstate_info.lower_state_info = lower_state_info;
8521 	call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
8522 				      &changelowerstate_info.info);
8523 }
8524 EXPORT_SYMBOL(netdev_lower_state_changed);
8525 
dev_change_rx_flags(struct net_device * dev,int flags)8526 static void dev_change_rx_flags(struct net_device *dev, int flags)
8527 {
8528 	const struct net_device_ops *ops = dev->netdev_ops;
8529 
8530 	if (ops->ndo_change_rx_flags)
8531 		ops->ndo_change_rx_flags(dev, flags);
8532 }
8533 
__dev_set_promiscuity(struct net_device * dev,int inc,bool notify)8534 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
8535 {
8536 	unsigned int old_flags = dev->flags;
8537 	kuid_t uid;
8538 	kgid_t gid;
8539 
8540 	ASSERT_RTNL();
8541 
8542 	dev->flags |= IFF_PROMISC;
8543 	dev->promiscuity += inc;
8544 	if (dev->promiscuity == 0) {
8545 		/*
8546 		 * Avoid overflow.
8547 		 * If inc causes overflow, untouch promisc and return error.
8548 		 */
8549 		if (inc < 0)
8550 			dev->flags &= ~IFF_PROMISC;
8551 		else {
8552 			dev->promiscuity -= inc;
8553 			pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
8554 				dev->name);
8555 			return -EOVERFLOW;
8556 		}
8557 	}
8558 	if (dev->flags != old_flags) {
8559 		pr_info("device %s %s promiscuous mode\n",
8560 			dev->name,
8561 			dev->flags & IFF_PROMISC ? "entered" : "left");
8562 		if (audit_enabled) {
8563 			current_uid_gid(&uid, &gid);
8564 			audit_log(audit_context(), GFP_ATOMIC,
8565 				  AUDIT_ANOM_PROMISCUOUS,
8566 				  "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8567 				  dev->name, (dev->flags & IFF_PROMISC),
8568 				  (old_flags & IFF_PROMISC),
8569 				  from_kuid(&init_user_ns, audit_get_loginuid(current)),
8570 				  from_kuid(&init_user_ns, uid),
8571 				  from_kgid(&init_user_ns, gid),
8572 				  audit_get_sessionid(current));
8573 		}
8574 
8575 		dev_change_rx_flags(dev, IFF_PROMISC);
8576 	}
8577 	if (notify)
8578 		__dev_notify_flags(dev, old_flags, IFF_PROMISC);
8579 	return 0;
8580 }
8581 
8582 /**
8583  *	dev_set_promiscuity	- update promiscuity count on a device
8584  *	@dev: device
8585  *	@inc: modifier
8586  *
8587  *	Add or remove promiscuity from a device. While the count in the device
8588  *	remains above zero the interface remains promiscuous. Once it hits zero
8589  *	the device reverts back to normal filtering operation. A negative inc
8590  *	value is used to drop promiscuity on the device.
8591  *	Return 0 if successful or a negative errno code on error.
8592  */
dev_set_promiscuity(struct net_device * dev,int inc)8593 int dev_set_promiscuity(struct net_device *dev, int inc)
8594 {
8595 	unsigned int old_flags = dev->flags;
8596 	int err;
8597 
8598 	err = __dev_set_promiscuity(dev, inc, true);
8599 	if (err < 0)
8600 		return err;
8601 	if (dev->flags != old_flags)
8602 		dev_set_rx_mode(dev);
8603 	return err;
8604 }
8605 EXPORT_SYMBOL(dev_set_promiscuity);
8606 
__dev_set_allmulti(struct net_device * dev,int inc,bool notify)8607 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
8608 {
8609 	unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
8610 
8611 	ASSERT_RTNL();
8612 
8613 	dev->flags |= IFF_ALLMULTI;
8614 	dev->allmulti += inc;
8615 	if (dev->allmulti == 0) {
8616 		/*
8617 		 * Avoid overflow.
8618 		 * If inc causes overflow, untouch allmulti and return error.
8619 		 */
8620 		if (inc < 0)
8621 			dev->flags &= ~IFF_ALLMULTI;
8622 		else {
8623 			dev->allmulti -= inc;
8624 			pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
8625 				dev->name);
8626 			return -EOVERFLOW;
8627 		}
8628 	}
8629 	if (dev->flags ^ old_flags) {
8630 		dev_change_rx_flags(dev, IFF_ALLMULTI);
8631 		dev_set_rx_mode(dev);
8632 		if (notify)
8633 			__dev_notify_flags(dev, old_flags,
8634 					   dev->gflags ^ old_gflags);
8635 	}
8636 	return 0;
8637 }
8638 
8639 /**
8640  *	dev_set_allmulti	- update allmulti count on a device
8641  *	@dev: device
8642  *	@inc: modifier
8643  *
8644  *	Add or remove reception of all multicast frames to a device. While the
8645  *	count in the device remains above zero the interface remains listening
8646  *	to all interfaces. Once it hits zero the device reverts back to normal
8647  *	filtering operation. A negative @inc value is used to drop the counter
8648  *	when releasing a resource needing all multicasts.
8649  *	Return 0 if successful or a negative errno code on error.
8650  */
8651 
dev_set_allmulti(struct net_device * dev,int inc)8652 int dev_set_allmulti(struct net_device *dev, int inc)
8653 {
8654 	return __dev_set_allmulti(dev, inc, true);
8655 }
8656 EXPORT_SYMBOL(dev_set_allmulti);
8657 
8658 /*
8659  *	Upload unicast and multicast address lists to device and
8660  *	configure RX filtering. When the device doesn't support unicast
8661  *	filtering it is put in promiscuous mode while unicast addresses
8662  *	are present.
8663  */
__dev_set_rx_mode(struct net_device * dev)8664 void __dev_set_rx_mode(struct net_device *dev)
8665 {
8666 	const struct net_device_ops *ops = dev->netdev_ops;
8667 
8668 	/* dev_open will call this function so the list will stay sane. */
8669 	if (!(dev->flags&IFF_UP))
8670 		return;
8671 
8672 	if (!netif_device_present(dev))
8673 		return;
8674 
8675 	if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8676 		/* Unicast addresses changes may only happen under the rtnl,
8677 		 * therefore calling __dev_set_promiscuity here is safe.
8678 		 */
8679 		if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8680 			__dev_set_promiscuity(dev, 1, false);
8681 			dev->uc_promisc = true;
8682 		} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8683 			__dev_set_promiscuity(dev, -1, false);
8684 			dev->uc_promisc = false;
8685 		}
8686 	}
8687 
8688 	if (ops->ndo_set_rx_mode)
8689 		ops->ndo_set_rx_mode(dev);
8690 }
8691 
dev_set_rx_mode(struct net_device * dev)8692 void dev_set_rx_mode(struct net_device *dev)
8693 {
8694 	netif_addr_lock_bh(dev);
8695 	__dev_set_rx_mode(dev);
8696 	netif_addr_unlock_bh(dev);
8697 }
8698 
8699 /**
8700  *	dev_get_flags - get flags reported to userspace
8701  *	@dev: device
8702  *
8703  *	Get the combination of flag bits exported through APIs to userspace.
8704  */
dev_get_flags(const struct net_device * dev)8705 unsigned int dev_get_flags(const struct net_device *dev)
8706 {
8707 	unsigned int flags;
8708 
8709 	flags = (dev->flags & ~(IFF_PROMISC |
8710 				IFF_ALLMULTI |
8711 				IFF_RUNNING |
8712 				IFF_LOWER_UP |
8713 				IFF_DORMANT)) |
8714 		(dev->gflags & (IFF_PROMISC |
8715 				IFF_ALLMULTI));
8716 
8717 	if (netif_running(dev)) {
8718 		if (netif_oper_up(dev))
8719 			flags |= IFF_RUNNING;
8720 		if (netif_carrier_ok(dev))
8721 			flags |= IFF_LOWER_UP;
8722 		if (netif_dormant(dev))
8723 			flags |= IFF_DORMANT;
8724 	}
8725 
8726 	return flags;
8727 }
8728 EXPORT_SYMBOL(dev_get_flags);
8729 
__dev_change_flags(struct net_device * dev,unsigned int flags,struct netlink_ext_ack * extack)8730 int __dev_change_flags(struct net_device *dev, unsigned int flags,
8731 		       struct netlink_ext_ack *extack)
8732 {
8733 	unsigned int old_flags = dev->flags;
8734 	int ret;
8735 
8736 	ASSERT_RTNL();
8737 
8738 	/*
8739 	 *	Set the flags on our device.
8740 	 */
8741 
8742 	dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8743 			       IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8744 			       IFF_AUTOMEDIA)) |
8745 		     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8746 				    IFF_ALLMULTI));
8747 
8748 	/*
8749 	 *	Load in the correct multicast list now the flags have changed.
8750 	 */
8751 
8752 	if ((old_flags ^ flags) & IFF_MULTICAST)
8753 		dev_change_rx_flags(dev, IFF_MULTICAST);
8754 
8755 	dev_set_rx_mode(dev);
8756 
8757 	/*
8758 	 *	Have we downed the interface. We handle IFF_UP ourselves
8759 	 *	according to user attempts to set it, rather than blindly
8760 	 *	setting it.
8761 	 */
8762 
8763 	ret = 0;
8764 	if ((old_flags ^ flags) & IFF_UP) {
8765 		if (old_flags & IFF_UP)
8766 			__dev_close(dev);
8767 		else
8768 			ret = __dev_open(dev, extack);
8769 	}
8770 
8771 	if ((flags ^ dev->gflags) & IFF_PROMISC) {
8772 		int inc = (flags & IFF_PROMISC) ? 1 : -1;
8773 		unsigned int old_flags = dev->flags;
8774 
8775 		dev->gflags ^= IFF_PROMISC;
8776 
8777 		if (__dev_set_promiscuity(dev, inc, false) >= 0)
8778 			if (dev->flags != old_flags)
8779 				dev_set_rx_mode(dev);
8780 	}
8781 
8782 	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8783 	 * is important. Some (broken) drivers set IFF_PROMISC, when
8784 	 * IFF_ALLMULTI is requested not asking us and not reporting.
8785 	 */
8786 	if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8787 		int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8788 
8789 		dev->gflags ^= IFF_ALLMULTI;
8790 		__dev_set_allmulti(dev, inc, false);
8791 	}
8792 
8793 	return ret;
8794 }
8795 
__dev_notify_flags(struct net_device * dev,unsigned int old_flags,unsigned int gchanges)8796 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8797 			unsigned int gchanges)
8798 {
8799 	unsigned int changes = dev->flags ^ old_flags;
8800 
8801 	if (gchanges)
8802 		rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
8803 
8804 	if (changes & IFF_UP) {
8805 		if (dev->flags & IFF_UP)
8806 			call_netdevice_notifiers(NETDEV_UP, dev);
8807 		else
8808 			call_netdevice_notifiers(NETDEV_DOWN, dev);
8809 	}
8810 
8811 	if (dev->flags & IFF_UP &&
8812 	    (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
8813 		struct netdev_notifier_change_info change_info = {
8814 			.info = {
8815 				.dev = dev,
8816 			},
8817 			.flags_changed = changes,
8818 		};
8819 
8820 		call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
8821 	}
8822 }
8823 
8824 /**
8825  *	dev_change_flags - change device settings
8826  *	@dev: device
8827  *	@flags: device state flags
8828  *	@extack: netlink extended ack
8829  *
8830  *	Change settings on device based state flags. The flags are
8831  *	in the userspace exported format.
8832  */
dev_change_flags(struct net_device * dev,unsigned int flags,struct netlink_ext_ack * extack)8833 int dev_change_flags(struct net_device *dev, unsigned int flags,
8834 		     struct netlink_ext_ack *extack)
8835 {
8836 	int ret;
8837 	unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8838 
8839 	ret = __dev_change_flags(dev, flags, extack);
8840 	if (ret < 0)
8841 		return ret;
8842 
8843 	changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8844 	__dev_notify_flags(dev, old_flags, changes);
8845 	return ret;
8846 }
8847 EXPORT_SYMBOL(dev_change_flags);
8848 
__dev_set_mtu(struct net_device * dev,int new_mtu)8849 int __dev_set_mtu(struct net_device *dev, int new_mtu)
8850 {
8851 	const struct net_device_ops *ops = dev->netdev_ops;
8852 
8853 	if (ops->ndo_change_mtu)
8854 		return ops->ndo_change_mtu(dev, new_mtu);
8855 
8856 	/* Pairs with all the lockless reads of dev->mtu in the stack */
8857 	WRITE_ONCE(dev->mtu, new_mtu);
8858 	return 0;
8859 }
8860 EXPORT_SYMBOL(__dev_set_mtu);
8861 
dev_validate_mtu(struct net_device * dev,int new_mtu,struct netlink_ext_ack * extack)8862 int dev_validate_mtu(struct net_device *dev, int new_mtu,
8863 		     struct netlink_ext_ack *extack)
8864 {
8865 	/* MTU must be positive, and in range */
8866 	if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8867 		NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8868 		return -EINVAL;
8869 	}
8870 
8871 	if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8872 		NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8873 		return -EINVAL;
8874 	}
8875 	return 0;
8876 }
8877 
8878 /**
8879  *	dev_set_mtu_ext - Change maximum transfer unit
8880  *	@dev: device
8881  *	@new_mtu: new transfer unit
8882  *	@extack: netlink extended ack
8883  *
8884  *	Change the maximum transfer size of the network device.
8885  */
dev_set_mtu_ext(struct net_device * dev,int new_mtu,struct netlink_ext_ack * extack)8886 int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8887 		    struct netlink_ext_ack *extack)
8888 {
8889 	int err, orig_mtu;
8890 
8891 	if (new_mtu == dev->mtu)
8892 		return 0;
8893 
8894 	err = dev_validate_mtu(dev, new_mtu, extack);
8895 	if (err)
8896 		return err;
8897 
8898 	if (!netif_device_present(dev))
8899 		return -ENODEV;
8900 
8901 	err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8902 	err = notifier_to_errno(err);
8903 	if (err)
8904 		return err;
8905 
8906 	orig_mtu = dev->mtu;
8907 	err = __dev_set_mtu(dev, new_mtu);
8908 
8909 	if (!err) {
8910 		err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8911 						   orig_mtu);
8912 		err = notifier_to_errno(err);
8913 		if (err) {
8914 			/* setting mtu back and notifying everyone again,
8915 			 * so that they have a chance to revert changes.
8916 			 */
8917 			__dev_set_mtu(dev, orig_mtu);
8918 			call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8919 						     new_mtu);
8920 		}
8921 	}
8922 	return err;
8923 }
8924 
dev_set_mtu(struct net_device * dev,int new_mtu)8925 int dev_set_mtu(struct net_device *dev, int new_mtu)
8926 {
8927 	struct netlink_ext_ack extack;
8928 	int err;
8929 
8930 	memset(&extack, 0, sizeof(extack));
8931 	err = dev_set_mtu_ext(dev, new_mtu, &extack);
8932 	if (err && extack._msg)
8933 		net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8934 	return err;
8935 }
8936 EXPORT_SYMBOL(dev_set_mtu);
8937 
8938 /**
8939  *	dev_change_tx_queue_len - Change TX queue length of a netdevice
8940  *	@dev: device
8941  *	@new_len: new tx queue length
8942  */
dev_change_tx_queue_len(struct net_device * dev,unsigned long new_len)8943 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8944 {
8945 	unsigned int orig_len = dev->tx_queue_len;
8946 	int res;
8947 
8948 	if (new_len != (unsigned int)new_len)
8949 		return -ERANGE;
8950 
8951 	if (new_len != orig_len) {
8952 		dev->tx_queue_len = new_len;
8953 		res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
8954 		res = notifier_to_errno(res);
8955 		if (res)
8956 			goto err_rollback;
8957 		res = dev_qdisc_change_tx_queue_len(dev);
8958 		if (res)
8959 			goto err_rollback;
8960 	}
8961 
8962 	return 0;
8963 
8964 err_rollback:
8965 	netdev_err(dev, "refused to change device tx_queue_len\n");
8966 	dev->tx_queue_len = orig_len;
8967 	return res;
8968 }
8969 
8970 /**
8971  *	dev_set_group - Change group this device belongs to
8972  *	@dev: device
8973  *	@new_group: group this device should belong to
8974  */
dev_set_group(struct net_device * dev,int new_group)8975 void dev_set_group(struct net_device *dev, int new_group)
8976 {
8977 	dev->group = new_group;
8978 }
8979 EXPORT_SYMBOL(dev_set_group);
8980 
8981 /**
8982  *	dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8983  *	@dev: device
8984  *	@addr: new address
8985  *	@extack: netlink extended ack
8986  */
dev_pre_changeaddr_notify(struct net_device * dev,const char * addr,struct netlink_ext_ack * extack)8987 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
8988 			      struct netlink_ext_ack *extack)
8989 {
8990 	struct netdev_notifier_pre_changeaddr_info info = {
8991 		.info.dev = dev,
8992 		.info.extack = extack,
8993 		.dev_addr = addr,
8994 	};
8995 	int rc;
8996 
8997 	rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
8998 	return notifier_to_errno(rc);
8999 }
9000 EXPORT_SYMBOL(dev_pre_changeaddr_notify);
9001 
9002 /**
9003  *	dev_set_mac_address - Change Media Access Control Address
9004  *	@dev: device
9005  *	@sa: new address
9006  *	@extack: netlink extended ack
9007  *
9008  *	Change the hardware (MAC) address of the device
9009  */
dev_set_mac_address(struct net_device * dev,struct sockaddr * sa,struct netlink_ext_ack * extack)9010 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
9011 			struct netlink_ext_ack *extack)
9012 {
9013 	const struct net_device_ops *ops = dev->netdev_ops;
9014 	int err;
9015 
9016 	if (!ops->ndo_set_mac_address)
9017 		return -EOPNOTSUPP;
9018 	if (sa->sa_family != dev->type)
9019 		return -EINVAL;
9020 	if (!netif_device_present(dev))
9021 		return -ENODEV;
9022 	err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
9023 	if (err)
9024 		return err;
9025 	err = ops->ndo_set_mac_address(dev, sa);
9026 	if (err)
9027 		return err;
9028 	dev->addr_assign_type = NET_ADDR_SET;
9029 	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
9030 	add_device_randomness(dev->dev_addr, dev->addr_len);
9031 	return 0;
9032 }
9033 EXPORT_SYMBOL(dev_set_mac_address);
9034 
9035 static DECLARE_RWSEM(dev_addr_sem);
9036 
dev_set_mac_address_user(struct net_device * dev,struct sockaddr * sa,struct netlink_ext_ack * extack)9037 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
9038 			     struct netlink_ext_ack *extack)
9039 {
9040 	int ret;
9041 
9042 	down_write(&dev_addr_sem);
9043 	ret = dev_set_mac_address(dev, sa, extack);
9044 	up_write(&dev_addr_sem);
9045 	return ret;
9046 }
9047 EXPORT_SYMBOL(dev_set_mac_address_user);
9048 
dev_get_mac_address(struct sockaddr * sa,struct net * net,char * dev_name)9049 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
9050 {
9051 	size_t size = sizeof(sa->sa_data);
9052 	struct net_device *dev;
9053 	int ret = 0;
9054 
9055 	down_read(&dev_addr_sem);
9056 	rcu_read_lock();
9057 
9058 	dev = dev_get_by_name_rcu(net, dev_name);
9059 	if (!dev) {
9060 		ret = -ENODEV;
9061 		goto unlock;
9062 	}
9063 	if (!dev->addr_len)
9064 		memset(sa->sa_data, 0, size);
9065 	else
9066 		memcpy(sa->sa_data, dev->dev_addr,
9067 		       min_t(size_t, size, dev->addr_len));
9068 	sa->sa_family = dev->type;
9069 
9070 unlock:
9071 	rcu_read_unlock();
9072 	up_read(&dev_addr_sem);
9073 	return ret;
9074 }
9075 EXPORT_SYMBOL(dev_get_mac_address);
9076 
9077 /**
9078  *	dev_change_carrier - Change device carrier
9079  *	@dev: device
9080  *	@new_carrier: new value
9081  *
9082  *	Change device carrier
9083  */
dev_change_carrier(struct net_device * dev,bool new_carrier)9084 int dev_change_carrier(struct net_device *dev, bool new_carrier)
9085 {
9086 	const struct net_device_ops *ops = dev->netdev_ops;
9087 
9088 	if (!ops->ndo_change_carrier)
9089 		return -EOPNOTSUPP;
9090 	if (!netif_device_present(dev))
9091 		return -ENODEV;
9092 	return ops->ndo_change_carrier(dev, new_carrier);
9093 }
9094 EXPORT_SYMBOL(dev_change_carrier);
9095 
9096 /**
9097  *	dev_get_phys_port_id - Get device physical port ID
9098  *	@dev: device
9099  *	@ppid: port ID
9100  *
9101  *	Get device physical port ID
9102  */
dev_get_phys_port_id(struct net_device * dev,struct netdev_phys_item_id * ppid)9103 int dev_get_phys_port_id(struct net_device *dev,
9104 			 struct netdev_phys_item_id *ppid)
9105 {
9106 	const struct net_device_ops *ops = dev->netdev_ops;
9107 
9108 	if (!ops->ndo_get_phys_port_id)
9109 		return -EOPNOTSUPP;
9110 	return ops->ndo_get_phys_port_id(dev, ppid);
9111 }
9112 EXPORT_SYMBOL(dev_get_phys_port_id);
9113 
9114 /**
9115  *	dev_get_phys_port_name - Get device physical port name
9116  *	@dev: device
9117  *	@name: port name
9118  *	@len: limit of bytes to copy to name
9119  *
9120  *	Get device physical port name
9121  */
dev_get_phys_port_name(struct net_device * dev,char * name,size_t len)9122 int dev_get_phys_port_name(struct net_device *dev,
9123 			   char *name, size_t len)
9124 {
9125 	const struct net_device_ops *ops = dev->netdev_ops;
9126 	int err;
9127 
9128 	if (ops->ndo_get_phys_port_name) {
9129 		err = ops->ndo_get_phys_port_name(dev, name, len);
9130 		if (err != -EOPNOTSUPP)
9131 			return err;
9132 	}
9133 	return devlink_compat_phys_port_name_get(dev, name, len);
9134 }
9135 EXPORT_SYMBOL(dev_get_phys_port_name);
9136 
9137 /**
9138  *	dev_get_port_parent_id - Get the device's port parent identifier
9139  *	@dev: network device
9140  *	@ppid: pointer to a storage for the port's parent identifier
9141  *	@recurse: allow/disallow recursion to lower devices
9142  *
9143  *	Get the devices's port parent identifier
9144  */
dev_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid,bool recurse)9145 int dev_get_port_parent_id(struct net_device *dev,
9146 			   struct netdev_phys_item_id *ppid,
9147 			   bool recurse)
9148 {
9149 	const struct net_device_ops *ops = dev->netdev_ops;
9150 	struct netdev_phys_item_id first = { };
9151 	struct net_device *lower_dev;
9152 	struct list_head *iter;
9153 	int err;
9154 
9155 	if (ops->ndo_get_port_parent_id) {
9156 		err = ops->ndo_get_port_parent_id(dev, ppid);
9157 		if (err != -EOPNOTSUPP)
9158 			return err;
9159 	}
9160 
9161 	err = devlink_compat_switch_id_get(dev, ppid);
9162 	if (!err || err != -EOPNOTSUPP)
9163 		return err;
9164 
9165 	if (!recurse)
9166 		return -EOPNOTSUPP;
9167 
9168 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
9169 		err = dev_get_port_parent_id(lower_dev, ppid, recurse);
9170 		if (err)
9171 			break;
9172 		if (!first.id_len)
9173 			first = *ppid;
9174 		else if (memcmp(&first, ppid, sizeof(*ppid)))
9175 			return -EOPNOTSUPP;
9176 	}
9177 
9178 	return err;
9179 }
9180 EXPORT_SYMBOL(dev_get_port_parent_id);
9181 
9182 /**
9183  *	netdev_port_same_parent_id - Indicate if two network devices have
9184  *	the same port parent identifier
9185  *	@a: first network device
9186  *	@b: second network device
9187  */
netdev_port_same_parent_id(struct net_device * a,struct net_device * b)9188 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
9189 {
9190 	struct netdev_phys_item_id a_id = { };
9191 	struct netdev_phys_item_id b_id = { };
9192 
9193 	if (dev_get_port_parent_id(a, &a_id, true) ||
9194 	    dev_get_port_parent_id(b, &b_id, true))
9195 		return false;
9196 
9197 	return netdev_phys_item_id_same(&a_id, &b_id);
9198 }
9199 EXPORT_SYMBOL(netdev_port_same_parent_id);
9200 
9201 /**
9202  *	dev_change_proto_down - update protocol port state information
9203  *	@dev: device
9204  *	@proto_down: new value
9205  *
9206  *	This info can be used by switch drivers to set the phys state of the
9207  *	port.
9208  */
dev_change_proto_down(struct net_device * dev,bool proto_down)9209 int dev_change_proto_down(struct net_device *dev, bool proto_down)
9210 {
9211 	const struct net_device_ops *ops = dev->netdev_ops;
9212 
9213 	if (!ops->ndo_change_proto_down)
9214 		return -EOPNOTSUPP;
9215 	if (!netif_device_present(dev))
9216 		return -ENODEV;
9217 	return ops->ndo_change_proto_down(dev, proto_down);
9218 }
9219 EXPORT_SYMBOL(dev_change_proto_down);
9220 
9221 /**
9222  *	dev_change_proto_down_generic - generic implementation for
9223  * 	ndo_change_proto_down that sets carrier according to
9224  * 	proto_down.
9225  *
9226  *	@dev: device
9227  *	@proto_down: new value
9228  */
dev_change_proto_down_generic(struct net_device * dev,bool proto_down)9229 int dev_change_proto_down_generic(struct net_device *dev, bool proto_down)
9230 {
9231 	if (proto_down)
9232 		netif_carrier_off(dev);
9233 	else
9234 		netif_carrier_on(dev);
9235 	dev->proto_down = proto_down;
9236 	return 0;
9237 }
9238 EXPORT_SYMBOL(dev_change_proto_down_generic);
9239 
9240 /**
9241  *	dev_change_proto_down_reason - proto down reason
9242  *
9243  *	@dev: device
9244  *	@mask: proto down mask
9245  *	@value: proto down value
9246  */
dev_change_proto_down_reason(struct net_device * dev,unsigned long mask,u32 value)9247 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
9248 				  u32 value)
9249 {
9250 	int b;
9251 
9252 	if (!mask) {
9253 		dev->proto_down_reason = value;
9254 	} else {
9255 		for_each_set_bit(b, &mask, 32) {
9256 			if (value & (1 << b))
9257 				dev->proto_down_reason |= BIT(b);
9258 			else
9259 				dev->proto_down_reason &= ~BIT(b);
9260 		}
9261 	}
9262 }
9263 EXPORT_SYMBOL(dev_change_proto_down_reason);
9264 
9265 struct bpf_xdp_link {
9266 	struct bpf_link link;
9267 	struct net_device *dev; /* protected by rtnl_lock, no refcnt held */
9268 	int flags;
9269 };
9270 
dev_xdp_mode(struct net_device * dev,u32 flags)9271 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
9272 {
9273 	if (flags & XDP_FLAGS_HW_MODE)
9274 		return XDP_MODE_HW;
9275 	if (flags & XDP_FLAGS_DRV_MODE)
9276 		return XDP_MODE_DRV;
9277 	if (flags & XDP_FLAGS_SKB_MODE)
9278 		return XDP_MODE_SKB;
9279 	return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
9280 }
9281 
dev_xdp_bpf_op(struct net_device * dev,enum bpf_xdp_mode mode)9282 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
9283 {
9284 	switch (mode) {
9285 	case XDP_MODE_SKB:
9286 		return generic_xdp_install;
9287 	case XDP_MODE_DRV:
9288 	case XDP_MODE_HW:
9289 		return dev->netdev_ops->ndo_bpf;
9290 	default:
9291 		return NULL;
9292 	}
9293 }
9294 
dev_xdp_link(struct net_device * dev,enum bpf_xdp_mode mode)9295 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
9296 					 enum bpf_xdp_mode mode)
9297 {
9298 	return dev->xdp_state[mode].link;
9299 }
9300 
dev_xdp_prog(struct net_device * dev,enum bpf_xdp_mode mode)9301 static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
9302 				     enum bpf_xdp_mode mode)
9303 {
9304 	struct bpf_xdp_link *link = dev_xdp_link(dev, mode);
9305 
9306 	if (link)
9307 		return link->link.prog;
9308 	return dev->xdp_state[mode].prog;
9309 }
9310 
dev_xdp_prog_count(struct net_device * dev)9311 u8 dev_xdp_prog_count(struct net_device *dev)
9312 {
9313 	u8 count = 0;
9314 	int i;
9315 
9316 	for (i = 0; i < __MAX_XDP_MODE; i++)
9317 		if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
9318 			count++;
9319 	return count;
9320 }
9321 EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
9322 
dev_xdp_prog_id(struct net_device * dev,enum bpf_xdp_mode mode)9323 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
9324 {
9325 	struct bpf_prog *prog = dev_xdp_prog(dev, mode);
9326 
9327 	return prog ? prog->aux->id : 0;
9328 }
9329 
dev_xdp_set_link(struct net_device * dev,enum bpf_xdp_mode mode,struct bpf_xdp_link * link)9330 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
9331 			     struct bpf_xdp_link *link)
9332 {
9333 	dev->xdp_state[mode].link = link;
9334 	dev->xdp_state[mode].prog = NULL;
9335 }
9336 
dev_xdp_set_prog(struct net_device * dev,enum bpf_xdp_mode mode,struct bpf_prog * prog)9337 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
9338 			     struct bpf_prog *prog)
9339 {
9340 	dev->xdp_state[mode].link = NULL;
9341 	dev->xdp_state[mode].prog = prog;
9342 }
9343 
dev_xdp_install(struct net_device * dev,enum bpf_xdp_mode mode,bpf_op_t bpf_op,struct netlink_ext_ack * extack,u32 flags,struct bpf_prog * prog)9344 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
9345 			   bpf_op_t bpf_op, struct netlink_ext_ack *extack,
9346 			   u32 flags, struct bpf_prog *prog)
9347 {
9348 	struct netdev_bpf xdp;
9349 	int err;
9350 
9351 	memset(&xdp, 0, sizeof(xdp));
9352 	xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
9353 	xdp.extack = extack;
9354 	xdp.flags = flags;
9355 	xdp.prog = prog;
9356 
9357 	/* Drivers assume refcnt is already incremented (i.e, prog pointer is
9358 	 * "moved" into driver), so they don't increment it on their own, but
9359 	 * they do decrement refcnt when program is detached or replaced.
9360 	 * Given net_device also owns link/prog, we need to bump refcnt here
9361 	 * to prevent drivers from underflowing it.
9362 	 */
9363 	if (prog)
9364 		bpf_prog_inc(prog);
9365 	err = bpf_op(dev, &xdp);
9366 	if (err) {
9367 		if (prog)
9368 			bpf_prog_put(prog);
9369 		return err;
9370 	}
9371 
9372 	if (mode != XDP_MODE_HW)
9373 		bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
9374 
9375 	return 0;
9376 }
9377 
dev_xdp_uninstall(struct net_device * dev)9378 static void dev_xdp_uninstall(struct net_device *dev)
9379 {
9380 	struct bpf_xdp_link *link;
9381 	struct bpf_prog *prog;
9382 	enum bpf_xdp_mode mode;
9383 	bpf_op_t bpf_op;
9384 
9385 	ASSERT_RTNL();
9386 
9387 	for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) {
9388 		prog = dev_xdp_prog(dev, mode);
9389 		if (!prog)
9390 			continue;
9391 
9392 		bpf_op = dev_xdp_bpf_op(dev, mode);
9393 		if (!bpf_op)
9394 			continue;
9395 
9396 		WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9397 
9398 		/* auto-detach link from net device */
9399 		link = dev_xdp_link(dev, mode);
9400 		if (link)
9401 			link->dev = NULL;
9402 		else
9403 			bpf_prog_put(prog);
9404 
9405 		dev_xdp_set_link(dev, mode, NULL);
9406 	}
9407 }
9408 
dev_xdp_attach(struct net_device * dev,struct netlink_ext_ack * extack,struct bpf_xdp_link * link,struct bpf_prog * new_prog,struct bpf_prog * old_prog,u32 flags)9409 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
9410 			  struct bpf_xdp_link *link, struct bpf_prog *new_prog,
9411 			  struct bpf_prog *old_prog, u32 flags)
9412 {
9413 	unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
9414 	struct bpf_prog *cur_prog;
9415 	struct net_device *upper;
9416 	struct list_head *iter;
9417 	enum bpf_xdp_mode mode;
9418 	bpf_op_t bpf_op;
9419 	int err;
9420 
9421 	ASSERT_RTNL();
9422 
9423 	/* either link or prog attachment, never both */
9424 	if (link && (new_prog || old_prog))
9425 		return -EINVAL;
9426 	/* link supports only XDP mode flags */
9427 	if (link && (flags & ~XDP_FLAGS_MODES)) {
9428 		NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
9429 		return -EINVAL;
9430 	}
9431 	/* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9432 	if (num_modes > 1) {
9433 		NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
9434 		return -EINVAL;
9435 	}
9436 	/* avoid ambiguity if offload + drv/skb mode progs are both loaded */
9437 	if (!num_modes && dev_xdp_prog_count(dev) > 1) {
9438 		NL_SET_ERR_MSG(extack,
9439 			       "More than one program loaded, unset mode is ambiguous");
9440 		return -EINVAL;
9441 	}
9442 	/* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
9443 	if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
9444 		NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
9445 		return -EINVAL;
9446 	}
9447 
9448 	mode = dev_xdp_mode(dev, flags);
9449 	/* can't replace attached link */
9450 	if (dev_xdp_link(dev, mode)) {
9451 		NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
9452 		return -EBUSY;
9453 	}
9454 
9455 	/* don't allow if an upper device already has a program */
9456 	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
9457 		if (dev_xdp_prog_count(upper) > 0) {
9458 			NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program");
9459 			return -EEXIST;
9460 		}
9461 	}
9462 
9463 	cur_prog = dev_xdp_prog(dev, mode);
9464 	/* can't replace attached prog with link */
9465 	if (link && cur_prog) {
9466 		NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link");
9467 		return -EBUSY;
9468 	}
9469 	if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) {
9470 		NL_SET_ERR_MSG(extack, "Active program does not match expected");
9471 		return -EEXIST;
9472 	}
9473 
9474 	/* put effective new program into new_prog */
9475 	if (link)
9476 		new_prog = link->link.prog;
9477 
9478 	if (new_prog) {
9479 		bool offload = mode == XDP_MODE_HW;
9480 		enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
9481 					       ? XDP_MODE_DRV : XDP_MODE_SKB;
9482 
9483 		if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
9484 			NL_SET_ERR_MSG(extack, "XDP program already attached");
9485 			return -EBUSY;
9486 		}
9487 		if (!offload && dev_xdp_prog(dev, other_mode)) {
9488 			NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
9489 			return -EEXIST;
9490 		}
9491 		if (!offload && bpf_prog_is_dev_bound(new_prog->aux)) {
9492 			NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported");
9493 			return -EINVAL;
9494 		}
9495 		if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
9496 			NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
9497 			return -EINVAL;
9498 		}
9499 		if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
9500 			NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device");
9501 			return -EINVAL;
9502 		}
9503 	}
9504 
9505 	/* don't call drivers if the effective program didn't change */
9506 	if (new_prog != cur_prog) {
9507 		bpf_op = dev_xdp_bpf_op(dev, mode);
9508 		if (!bpf_op) {
9509 			NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode");
9510 			return -EOPNOTSUPP;
9511 		}
9512 
9513 		err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
9514 		if (err)
9515 			return err;
9516 	}
9517 
9518 	if (link)
9519 		dev_xdp_set_link(dev, mode, link);
9520 	else
9521 		dev_xdp_set_prog(dev, mode, new_prog);
9522 	if (cur_prog)
9523 		bpf_prog_put(cur_prog);
9524 
9525 	return 0;
9526 }
9527 
dev_xdp_attach_link(struct net_device * dev,struct netlink_ext_ack * extack,struct bpf_xdp_link * link)9528 static int dev_xdp_attach_link(struct net_device *dev,
9529 			       struct netlink_ext_ack *extack,
9530 			       struct bpf_xdp_link *link)
9531 {
9532 	return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
9533 }
9534 
dev_xdp_detach_link(struct net_device * dev,struct netlink_ext_ack * extack,struct bpf_xdp_link * link)9535 static int dev_xdp_detach_link(struct net_device *dev,
9536 			       struct netlink_ext_ack *extack,
9537 			       struct bpf_xdp_link *link)
9538 {
9539 	enum bpf_xdp_mode mode;
9540 	bpf_op_t bpf_op;
9541 
9542 	ASSERT_RTNL();
9543 
9544 	mode = dev_xdp_mode(dev, link->flags);
9545 	if (dev_xdp_link(dev, mode) != link)
9546 		return -EINVAL;
9547 
9548 	bpf_op = dev_xdp_bpf_op(dev, mode);
9549 	WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9550 	dev_xdp_set_link(dev, mode, NULL);
9551 	return 0;
9552 }
9553 
bpf_xdp_link_release(struct bpf_link * link)9554 static void bpf_xdp_link_release(struct bpf_link *link)
9555 {
9556 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9557 
9558 	rtnl_lock();
9559 
9560 	/* if racing with net_device's tear down, xdp_link->dev might be
9561 	 * already NULL, in which case link was already auto-detached
9562 	 */
9563 	if (xdp_link->dev) {
9564 		WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
9565 		xdp_link->dev = NULL;
9566 	}
9567 
9568 	rtnl_unlock();
9569 }
9570 
bpf_xdp_link_detach(struct bpf_link * link)9571 static int bpf_xdp_link_detach(struct bpf_link *link)
9572 {
9573 	bpf_xdp_link_release(link);
9574 	return 0;
9575 }
9576 
bpf_xdp_link_dealloc(struct bpf_link * link)9577 static void bpf_xdp_link_dealloc(struct bpf_link *link)
9578 {
9579 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9580 
9581 	kfree(xdp_link);
9582 }
9583 
bpf_xdp_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)9584 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
9585 				     struct seq_file *seq)
9586 {
9587 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9588 	u32 ifindex = 0;
9589 
9590 	rtnl_lock();
9591 	if (xdp_link->dev)
9592 		ifindex = xdp_link->dev->ifindex;
9593 	rtnl_unlock();
9594 
9595 	seq_printf(seq, "ifindex:\t%u\n", ifindex);
9596 }
9597 
bpf_xdp_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)9598 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
9599 				       struct bpf_link_info *info)
9600 {
9601 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9602 	u32 ifindex = 0;
9603 
9604 	rtnl_lock();
9605 	if (xdp_link->dev)
9606 		ifindex = xdp_link->dev->ifindex;
9607 	rtnl_unlock();
9608 
9609 	info->xdp.ifindex = ifindex;
9610 	return 0;
9611 }
9612 
bpf_xdp_link_update(struct bpf_link * link,struct bpf_prog * new_prog,struct bpf_prog * old_prog)9613 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
9614 			       struct bpf_prog *old_prog)
9615 {
9616 	struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9617 	enum bpf_xdp_mode mode;
9618 	bpf_op_t bpf_op;
9619 	int err = 0;
9620 
9621 	rtnl_lock();
9622 
9623 	/* link might have been auto-released already, so fail */
9624 	if (!xdp_link->dev) {
9625 		err = -ENOLINK;
9626 		goto out_unlock;
9627 	}
9628 
9629 	if (old_prog && link->prog != old_prog) {
9630 		err = -EPERM;
9631 		goto out_unlock;
9632 	}
9633 	old_prog = link->prog;
9634 	if (old_prog == new_prog) {
9635 		/* no-op, don't disturb drivers */
9636 		bpf_prog_put(new_prog);
9637 		goto out_unlock;
9638 	}
9639 
9640 	mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
9641 	bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
9642 	err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
9643 			      xdp_link->flags, new_prog);
9644 	if (err)
9645 		goto out_unlock;
9646 
9647 	old_prog = xchg(&link->prog, new_prog);
9648 	bpf_prog_put(old_prog);
9649 
9650 out_unlock:
9651 	rtnl_unlock();
9652 	return err;
9653 }
9654 
9655 static const struct bpf_link_ops bpf_xdp_link_lops = {
9656 	.release = bpf_xdp_link_release,
9657 	.dealloc = bpf_xdp_link_dealloc,
9658 	.detach = bpf_xdp_link_detach,
9659 	.show_fdinfo = bpf_xdp_link_show_fdinfo,
9660 	.fill_link_info = bpf_xdp_link_fill_link_info,
9661 	.update_prog = bpf_xdp_link_update,
9662 };
9663 
bpf_xdp_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)9664 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
9665 {
9666 	struct net *net = current->nsproxy->net_ns;
9667 	struct bpf_link_primer link_primer;
9668 	struct bpf_xdp_link *link;
9669 	struct net_device *dev;
9670 	int err, fd;
9671 
9672 	rtnl_lock();
9673 	dev = dev_get_by_index(net, attr->link_create.target_ifindex);
9674 	if (!dev) {
9675 		rtnl_unlock();
9676 		return -EINVAL;
9677 	}
9678 
9679 	link = kzalloc(sizeof(*link), GFP_USER);
9680 	if (!link) {
9681 		err = -ENOMEM;
9682 		goto unlock;
9683 	}
9684 
9685 	bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
9686 	link->dev = dev;
9687 	link->flags = attr->link_create.flags;
9688 
9689 	err = bpf_link_prime(&link->link, &link_primer);
9690 	if (err) {
9691 		kfree(link);
9692 		goto unlock;
9693 	}
9694 
9695 	err = dev_xdp_attach_link(dev, NULL, link);
9696 	rtnl_unlock();
9697 
9698 	if (err) {
9699 		link->dev = NULL;
9700 		bpf_link_cleanup(&link_primer);
9701 		goto out_put_dev;
9702 	}
9703 
9704 	fd = bpf_link_settle(&link_primer);
9705 	/* link itself doesn't hold dev's refcnt to not complicate shutdown */
9706 	dev_put(dev);
9707 	return fd;
9708 
9709 unlock:
9710 	rtnl_unlock();
9711 
9712 out_put_dev:
9713 	dev_put(dev);
9714 	return err;
9715 }
9716 
9717 /**
9718  *	dev_change_xdp_fd - set or clear a bpf program for a device rx path
9719  *	@dev: device
9720  *	@extack: netlink extended ack
9721  *	@fd: new program fd or negative value to clear
9722  *	@expected_fd: old program fd that userspace expects to replace or clear
9723  *	@flags: xdp-related flags
9724  *
9725  *	Set or clear a bpf program for a device
9726  */
dev_change_xdp_fd(struct net_device * dev,struct netlink_ext_ack * extack,int fd,int expected_fd,u32 flags)9727 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
9728 		      int fd, int expected_fd, u32 flags)
9729 {
9730 	enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
9731 	struct bpf_prog *new_prog = NULL, *old_prog = NULL;
9732 	int err;
9733 
9734 	ASSERT_RTNL();
9735 
9736 	if (fd >= 0) {
9737 		new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
9738 						 mode != XDP_MODE_SKB);
9739 		if (IS_ERR(new_prog))
9740 			return PTR_ERR(new_prog);
9741 	}
9742 
9743 	if (expected_fd >= 0) {
9744 		old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP,
9745 						 mode != XDP_MODE_SKB);
9746 		if (IS_ERR(old_prog)) {
9747 			err = PTR_ERR(old_prog);
9748 			old_prog = NULL;
9749 			goto err_out;
9750 		}
9751 	}
9752 
9753 	err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
9754 
9755 err_out:
9756 	if (err && new_prog)
9757 		bpf_prog_put(new_prog);
9758 	if (old_prog)
9759 		bpf_prog_put(old_prog);
9760 	return err;
9761 }
9762 
9763 /**
9764  *	dev_new_index	-	allocate an ifindex
9765  *	@net: the applicable net namespace
9766  *
9767  *	Returns a suitable unique value for a new device interface
9768  *	number.  The caller must hold the rtnl semaphore or the
9769  *	dev_base_lock to be sure it remains unique.
9770  */
dev_new_index(struct net * net)9771 static int dev_new_index(struct net *net)
9772 {
9773 	int ifindex = net->ifindex;
9774 
9775 	for (;;) {
9776 		if (++ifindex <= 0)
9777 			ifindex = 1;
9778 		if (!__dev_get_by_index(net, ifindex))
9779 			return net->ifindex = ifindex;
9780 	}
9781 }
9782 
9783 /* Delayed registration/unregisteration */
9784 static LIST_HEAD(net_todo_list);
9785 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
9786 
net_set_todo(struct net_device * dev)9787 static void net_set_todo(struct net_device *dev)
9788 {
9789 	list_add_tail(&dev->todo_list, &net_todo_list);
9790 	dev_net(dev)->dev_unreg_count++;
9791 }
9792 
netdev_sync_upper_features(struct net_device * lower,struct net_device * upper,netdev_features_t features)9793 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
9794 	struct net_device *upper, netdev_features_t features)
9795 {
9796 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9797 	netdev_features_t feature;
9798 	int feature_bit;
9799 
9800 	for_each_netdev_feature(upper_disables, feature_bit) {
9801 		feature = __NETIF_F_BIT(feature_bit);
9802 		if (!(upper->wanted_features & feature)
9803 		    && (features & feature)) {
9804 			netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9805 				   &feature, upper->name);
9806 			features &= ~feature;
9807 		}
9808 	}
9809 
9810 	return features;
9811 }
9812 
netdev_sync_lower_features(struct net_device * upper,struct net_device * lower,netdev_features_t features)9813 static void netdev_sync_lower_features(struct net_device *upper,
9814 	struct net_device *lower, netdev_features_t features)
9815 {
9816 	netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9817 	netdev_features_t feature;
9818 	int feature_bit;
9819 
9820 	for_each_netdev_feature(upper_disables, feature_bit) {
9821 		feature = __NETIF_F_BIT(feature_bit);
9822 		if (!(features & feature) && (lower->features & feature)) {
9823 			netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9824 				   &feature, lower->name);
9825 			lower->wanted_features &= ~feature;
9826 			__netdev_update_features(lower);
9827 
9828 			if (unlikely(lower->features & feature))
9829 				netdev_WARN(upper, "failed to disable %pNF on %s!\n",
9830 					    &feature, lower->name);
9831 			else
9832 				netdev_features_change(lower);
9833 		}
9834 	}
9835 }
9836 
netdev_fix_features(struct net_device * dev,netdev_features_t features)9837 static netdev_features_t netdev_fix_features(struct net_device *dev,
9838 	netdev_features_t features)
9839 {
9840 	/* Fix illegal checksum combinations */
9841 	if ((features & NETIF_F_HW_CSUM) &&
9842 	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
9843 		netdev_warn(dev, "mixed HW and IP checksum settings.\n");
9844 		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
9845 	}
9846 
9847 	/* TSO requires that SG is present as well. */
9848 	if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
9849 		netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
9850 		features &= ~NETIF_F_ALL_TSO;
9851 	}
9852 
9853 	if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
9854 					!(features & NETIF_F_IP_CSUM)) {
9855 		netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9856 		features &= ~NETIF_F_TSO;
9857 		features &= ~NETIF_F_TSO_ECN;
9858 	}
9859 
9860 	if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
9861 					 !(features & NETIF_F_IPV6_CSUM)) {
9862 		netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9863 		features &= ~NETIF_F_TSO6;
9864 	}
9865 
9866 	/* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9867 	if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
9868 		features &= ~NETIF_F_TSO_MANGLEID;
9869 
9870 	/* TSO ECN requires that TSO is present as well. */
9871 	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
9872 		features &= ~NETIF_F_TSO_ECN;
9873 
9874 	/* Software GSO depends on SG. */
9875 	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
9876 		netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
9877 		features &= ~NETIF_F_GSO;
9878 	}
9879 
9880 	/* GSO partial features require GSO partial be set */
9881 	if ((features & dev->gso_partial_features) &&
9882 	    !(features & NETIF_F_GSO_PARTIAL)) {
9883 		netdev_dbg(dev,
9884 			   "Dropping partially supported GSO features since no GSO partial.\n");
9885 		features &= ~dev->gso_partial_features;
9886 	}
9887 
9888 	if (!(features & NETIF_F_RXCSUM)) {
9889 		/* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9890 		 * successfully merged by hardware must also have the
9891 		 * checksum verified by hardware.  If the user does not
9892 		 * want to enable RXCSUM, logically, we should disable GRO_HW.
9893 		 */
9894 		if (features & NETIF_F_GRO_HW) {
9895 			netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9896 			features &= ~NETIF_F_GRO_HW;
9897 		}
9898 	}
9899 
9900 	/* LRO/HW-GRO features cannot be combined with RX-FCS */
9901 	if (features & NETIF_F_RXFCS) {
9902 		if (features & NETIF_F_LRO) {
9903 			netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9904 			features &= ~NETIF_F_LRO;
9905 		}
9906 
9907 		if (features & NETIF_F_GRO_HW) {
9908 			netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9909 			features &= ~NETIF_F_GRO_HW;
9910 		}
9911 	}
9912 
9913 	if (features & NETIF_F_HW_TLS_TX) {
9914 		bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
9915 			(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
9916 		bool hw_csum = features & NETIF_F_HW_CSUM;
9917 
9918 		if (!ip_csum && !hw_csum) {
9919 			netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
9920 			features &= ~NETIF_F_HW_TLS_TX;
9921 		}
9922 	}
9923 
9924 	if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
9925 		netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
9926 		features &= ~NETIF_F_HW_TLS_RX;
9927 	}
9928 
9929 	return features;
9930 }
9931 
__netdev_update_features(struct net_device * dev)9932 int __netdev_update_features(struct net_device *dev)
9933 {
9934 	struct net_device *upper, *lower;
9935 	netdev_features_t features;
9936 	struct list_head *iter;
9937 	int err = -1;
9938 
9939 	ASSERT_RTNL();
9940 
9941 	features = netdev_get_wanted_features(dev);
9942 
9943 	if (dev->netdev_ops->ndo_fix_features)
9944 		features = dev->netdev_ops->ndo_fix_features(dev, features);
9945 
9946 	/* driver might be less strict about feature dependencies */
9947 	features = netdev_fix_features(dev, features);
9948 
9949 	/* some features can't be enabled if they're off on an upper device */
9950 	netdev_for_each_upper_dev_rcu(dev, upper, iter)
9951 		features = netdev_sync_upper_features(dev, upper, features);
9952 
9953 	if (dev->features == features)
9954 		goto sync_lower;
9955 
9956 	netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9957 		&dev->features, &features);
9958 
9959 	if (dev->netdev_ops->ndo_set_features)
9960 		err = dev->netdev_ops->ndo_set_features(dev, features);
9961 	else
9962 		err = 0;
9963 
9964 	if (unlikely(err < 0)) {
9965 		netdev_err(dev,
9966 			"set_features() failed (%d); wanted %pNF, left %pNF\n",
9967 			err, &features, &dev->features);
9968 		/* return non-0 since some features might have changed and
9969 		 * it's better to fire a spurious notification than miss it
9970 		 */
9971 		return -1;
9972 	}
9973 
9974 sync_lower:
9975 	/* some features must be disabled on lower devices when disabled
9976 	 * on an upper device (think: bonding master or bridge)
9977 	 */
9978 	netdev_for_each_lower_dev(dev, lower, iter)
9979 		netdev_sync_lower_features(dev, lower, features);
9980 
9981 	if (!err) {
9982 		netdev_features_t diff = features ^ dev->features;
9983 
9984 		if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
9985 			/* udp_tunnel_{get,drop}_rx_info both need
9986 			 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9987 			 * device, or they won't do anything.
9988 			 * Thus we need to update dev->features
9989 			 * *before* calling udp_tunnel_get_rx_info,
9990 			 * but *after* calling udp_tunnel_drop_rx_info.
9991 			 */
9992 			if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
9993 				dev->features = features;
9994 				udp_tunnel_get_rx_info(dev);
9995 			} else {
9996 				udp_tunnel_drop_rx_info(dev);
9997 			}
9998 		}
9999 
10000 		if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
10001 			if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
10002 				dev->features = features;
10003 				err |= vlan_get_rx_ctag_filter_info(dev);
10004 			} else {
10005 				vlan_drop_rx_ctag_filter_info(dev);
10006 			}
10007 		}
10008 
10009 		if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
10010 			if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
10011 				dev->features = features;
10012 				err |= vlan_get_rx_stag_filter_info(dev);
10013 			} else {
10014 				vlan_drop_rx_stag_filter_info(dev);
10015 			}
10016 		}
10017 
10018 		dev->features = features;
10019 	}
10020 
10021 	return err < 0 ? 0 : 1;
10022 }
10023 
10024 /**
10025  *	netdev_update_features - recalculate device features
10026  *	@dev: the device to check
10027  *
10028  *	Recalculate dev->features set and send notifications if it
10029  *	has changed. Should be called after driver or hardware dependent
10030  *	conditions might have changed that influence the features.
10031  */
netdev_update_features(struct net_device * dev)10032 void netdev_update_features(struct net_device *dev)
10033 {
10034 	if (__netdev_update_features(dev))
10035 		netdev_features_change(dev);
10036 }
10037 EXPORT_SYMBOL(netdev_update_features);
10038 
10039 /**
10040  *	netdev_change_features - recalculate device features
10041  *	@dev: the device to check
10042  *
10043  *	Recalculate dev->features set and send notifications even
10044  *	if they have not changed. Should be called instead of
10045  *	netdev_update_features() if also dev->vlan_features might
10046  *	have changed to allow the changes to be propagated to stacked
10047  *	VLAN devices.
10048  */
netdev_change_features(struct net_device * dev)10049 void netdev_change_features(struct net_device *dev)
10050 {
10051 	__netdev_update_features(dev);
10052 	netdev_features_change(dev);
10053 }
10054 EXPORT_SYMBOL(netdev_change_features);
10055 
10056 /**
10057  *	netif_stacked_transfer_operstate -	transfer operstate
10058  *	@rootdev: the root or lower level device to transfer state from
10059  *	@dev: the device to transfer operstate to
10060  *
10061  *	Transfer operational state from root to device. This is normally
10062  *	called when a stacking relationship exists between the root
10063  *	device and the device(a leaf device).
10064  */
netif_stacked_transfer_operstate(const struct net_device * rootdev,struct net_device * dev)10065 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
10066 					struct net_device *dev)
10067 {
10068 	if (rootdev->operstate == IF_OPER_DORMANT)
10069 		netif_dormant_on(dev);
10070 	else
10071 		netif_dormant_off(dev);
10072 
10073 	if (rootdev->operstate == IF_OPER_TESTING)
10074 		netif_testing_on(dev);
10075 	else
10076 		netif_testing_off(dev);
10077 
10078 	if (netif_carrier_ok(rootdev))
10079 		netif_carrier_on(dev);
10080 	else
10081 		netif_carrier_off(dev);
10082 }
10083 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
10084 
netif_alloc_rx_queues(struct net_device * dev)10085 static int netif_alloc_rx_queues(struct net_device *dev)
10086 {
10087 	unsigned int i, count = dev->num_rx_queues;
10088 	struct netdev_rx_queue *rx;
10089 	size_t sz = count * sizeof(*rx);
10090 	int err = 0;
10091 
10092 	BUG_ON(count < 1);
10093 
10094 	rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10095 	if (!rx)
10096 		return -ENOMEM;
10097 
10098 	dev->_rx = rx;
10099 
10100 	for (i = 0; i < count; i++) {
10101 		rx[i].dev = dev;
10102 
10103 		/* XDP RX-queue setup */
10104 		err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
10105 		if (err < 0)
10106 			goto err_rxq_info;
10107 	}
10108 	return 0;
10109 
10110 err_rxq_info:
10111 	/* Rollback successful reg's and free other resources */
10112 	while (i--)
10113 		xdp_rxq_info_unreg(&rx[i].xdp_rxq);
10114 	kvfree(dev->_rx);
10115 	dev->_rx = NULL;
10116 	return err;
10117 }
10118 
netif_free_rx_queues(struct net_device * dev)10119 static void netif_free_rx_queues(struct net_device *dev)
10120 {
10121 	unsigned int i, count = dev->num_rx_queues;
10122 
10123 	/* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
10124 	if (!dev->_rx)
10125 		return;
10126 
10127 	for (i = 0; i < count; i++)
10128 		xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
10129 
10130 	kvfree(dev->_rx);
10131 }
10132 
netdev_init_one_queue(struct net_device * dev,struct netdev_queue * queue,void * _unused)10133 static void netdev_init_one_queue(struct net_device *dev,
10134 				  struct netdev_queue *queue, void *_unused)
10135 {
10136 	/* Initialize queue lock */
10137 	spin_lock_init(&queue->_xmit_lock);
10138 	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
10139 	queue->xmit_lock_owner = -1;
10140 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
10141 	queue->dev = dev;
10142 #ifdef CONFIG_BQL
10143 	dql_init(&queue->dql, HZ);
10144 #endif
10145 }
10146 
netif_free_tx_queues(struct net_device * dev)10147 static void netif_free_tx_queues(struct net_device *dev)
10148 {
10149 	kvfree(dev->_tx);
10150 }
10151 
netif_alloc_netdev_queues(struct net_device * dev)10152 static int netif_alloc_netdev_queues(struct net_device *dev)
10153 {
10154 	unsigned int count = dev->num_tx_queues;
10155 	struct netdev_queue *tx;
10156 	size_t sz = count * sizeof(*tx);
10157 
10158 	if (count < 1 || count > 0xffff)
10159 		return -EINVAL;
10160 
10161 	tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10162 	if (!tx)
10163 		return -ENOMEM;
10164 
10165 	dev->_tx = tx;
10166 
10167 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
10168 	spin_lock_init(&dev->tx_global_lock);
10169 
10170 	return 0;
10171 }
10172 
netif_tx_stop_all_queues(struct net_device * dev)10173 void netif_tx_stop_all_queues(struct net_device *dev)
10174 {
10175 	unsigned int i;
10176 
10177 	for (i = 0; i < dev->num_tx_queues; i++) {
10178 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
10179 
10180 		netif_tx_stop_queue(txq);
10181 	}
10182 }
10183 EXPORT_SYMBOL(netif_tx_stop_all_queues);
10184 
10185 /**
10186  *	register_netdevice	- register a network device
10187  *	@dev: device to register
10188  *
10189  *	Take a completed network device structure and add it to the kernel
10190  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10191  *	chain. 0 is returned on success. A negative errno code is returned
10192  *	on a failure to set up the device, or if the name is a duplicate.
10193  *
10194  *	Callers must hold the rtnl semaphore. You may want
10195  *	register_netdev() instead of this.
10196  *
10197  *	BUGS:
10198  *	The locking appears insufficient to guarantee two parallel registers
10199  *	will not get the same name.
10200  */
10201 
register_netdevice(struct net_device * dev)10202 int register_netdevice(struct net_device *dev)
10203 {
10204 	int ret;
10205 	struct net *net = dev_net(dev);
10206 
10207 	BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
10208 		     NETDEV_FEATURE_COUNT);
10209 	BUG_ON(dev_boot_phase);
10210 	ASSERT_RTNL();
10211 
10212 	might_sleep();
10213 
10214 	/* When net_device's are persistent, this will be fatal. */
10215 	BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
10216 	BUG_ON(!net);
10217 
10218 	ret = ethtool_check_ops(dev->ethtool_ops);
10219 	if (ret)
10220 		return ret;
10221 
10222 	spin_lock_init(&dev->addr_list_lock);
10223 	netdev_set_addr_lockdep_class(dev);
10224 
10225 	ret = dev_get_valid_name(net, dev, dev->name);
10226 	if (ret < 0)
10227 		goto out;
10228 
10229 	ret = -ENOMEM;
10230 	dev->name_node = netdev_name_node_head_alloc(dev);
10231 	if (!dev->name_node)
10232 		goto out;
10233 
10234 	/* Init, if this function is available */
10235 	if (dev->netdev_ops->ndo_init) {
10236 		ret = dev->netdev_ops->ndo_init(dev);
10237 		if (ret) {
10238 			if (ret > 0)
10239 				ret = -EIO;
10240 			goto err_free_name;
10241 		}
10242 	}
10243 
10244 	if (((dev->hw_features | dev->features) &
10245 	     NETIF_F_HW_VLAN_CTAG_FILTER) &&
10246 	    (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
10247 	     !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
10248 		netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
10249 		ret = -EINVAL;
10250 		goto err_uninit;
10251 	}
10252 
10253 	ret = -EBUSY;
10254 	if (!dev->ifindex)
10255 		dev->ifindex = dev_new_index(net);
10256 	else if (__dev_get_by_index(net, dev->ifindex))
10257 		goto err_uninit;
10258 
10259 	/* Transfer changeable features to wanted_features and enable
10260 	 * software offloads (GSO and GRO).
10261 	 */
10262 	dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
10263 	dev->features |= NETIF_F_SOFT_FEATURES;
10264 
10265 	if (dev->udp_tunnel_nic_info) {
10266 		dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10267 		dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10268 	}
10269 
10270 	dev->wanted_features = dev->features & dev->hw_features;
10271 
10272 	if (!(dev->flags & IFF_LOOPBACK))
10273 		dev->hw_features |= NETIF_F_NOCACHE_COPY;
10274 
10275 	/* If IPv4 TCP segmentation offload is supported we should also
10276 	 * allow the device to enable segmenting the frame with the option
10277 	 * of ignoring a static IP ID value.  This doesn't enable the
10278 	 * feature itself but allows the user to enable it later.
10279 	 */
10280 	if (dev->hw_features & NETIF_F_TSO)
10281 		dev->hw_features |= NETIF_F_TSO_MANGLEID;
10282 	if (dev->vlan_features & NETIF_F_TSO)
10283 		dev->vlan_features |= NETIF_F_TSO_MANGLEID;
10284 	if (dev->mpls_features & NETIF_F_TSO)
10285 		dev->mpls_features |= NETIF_F_TSO_MANGLEID;
10286 	if (dev->hw_enc_features & NETIF_F_TSO)
10287 		dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
10288 
10289 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
10290 	 */
10291 	dev->vlan_features |= NETIF_F_HIGHDMA;
10292 
10293 	/* Make NETIF_F_SG inheritable to tunnel devices.
10294 	 */
10295 	dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
10296 
10297 	/* Make NETIF_F_SG inheritable to MPLS.
10298 	 */
10299 	dev->mpls_features |= NETIF_F_SG;
10300 
10301 	ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
10302 	ret = notifier_to_errno(ret);
10303 	if (ret)
10304 		goto err_uninit;
10305 
10306 	ret = netdev_register_kobject(dev);
10307 	if (ret) {
10308 		dev->reg_state = NETREG_UNREGISTERED;
10309 		goto err_uninit;
10310 	}
10311 	dev->reg_state = NETREG_REGISTERED;
10312 
10313 	__netdev_update_features(dev);
10314 
10315 	/*
10316 	 *	Default initial state at registry is that the
10317 	 *	device is present.
10318 	 */
10319 
10320 	set_bit(__LINK_STATE_PRESENT, &dev->state);
10321 
10322 	linkwatch_init_dev(dev);
10323 
10324 	dev_init_scheduler(dev);
10325 	dev_hold(dev);
10326 	list_netdevice(dev);
10327 	add_device_randomness(dev->dev_addr, dev->addr_len);
10328 
10329 	/* If the device has permanent device address, driver should
10330 	 * set dev_addr and also addr_assign_type should be set to
10331 	 * NET_ADDR_PERM (default value).
10332 	 */
10333 	if (dev->addr_assign_type == NET_ADDR_PERM)
10334 		memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10335 
10336 	/* Notify protocols, that a new device appeared. */
10337 	ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
10338 	ret = notifier_to_errno(ret);
10339 	if (ret) {
10340 		/* Expect explicit free_netdev() on failure */
10341 		dev->needs_free_netdev = false;
10342 		unregister_netdevice_queue(dev, NULL);
10343 		goto out;
10344 	}
10345 	/*
10346 	 *	Prevent userspace races by waiting until the network
10347 	 *	device is fully setup before sending notifications.
10348 	 */
10349 	if (!dev->rtnl_link_ops ||
10350 	    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10351 		rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
10352 
10353 out:
10354 	return ret;
10355 
10356 err_uninit:
10357 	if (dev->netdev_ops->ndo_uninit)
10358 		dev->netdev_ops->ndo_uninit(dev);
10359 	if (dev->priv_destructor)
10360 		dev->priv_destructor(dev);
10361 err_free_name:
10362 	netdev_name_node_free(dev->name_node);
10363 	goto out;
10364 }
10365 EXPORT_SYMBOL(register_netdevice);
10366 
10367 /**
10368  *	init_dummy_netdev	- init a dummy network device for NAPI
10369  *	@dev: device to init
10370  *
10371  *	This takes a network device structure and initialize the minimum
10372  *	amount of fields so it can be used to schedule NAPI polls without
10373  *	registering a full blown interface. This is to be used by drivers
10374  *	that need to tie several hardware interfaces to a single NAPI
10375  *	poll scheduler due to HW limitations.
10376  */
init_dummy_netdev(struct net_device * dev)10377 int init_dummy_netdev(struct net_device *dev)
10378 {
10379 	/* Clear everything. Note we don't initialize spinlocks
10380 	 * are they aren't supposed to be taken by any of the
10381 	 * NAPI code and this dummy netdev is supposed to be
10382 	 * only ever used for NAPI polls
10383 	 */
10384 	memset(dev, 0, sizeof(struct net_device));
10385 
10386 	/* make sure we BUG if trying to hit standard
10387 	 * register/unregister code path
10388 	 */
10389 	dev->reg_state = NETREG_DUMMY;
10390 
10391 	/* NAPI wants this */
10392 	INIT_LIST_HEAD(&dev->napi_list);
10393 
10394 	/* a dummy interface is started by default */
10395 	set_bit(__LINK_STATE_PRESENT, &dev->state);
10396 	set_bit(__LINK_STATE_START, &dev->state);
10397 
10398 	/* napi_busy_loop stats accounting wants this */
10399 	dev_net_set(dev, &init_net);
10400 
10401 	/* Note : We dont allocate pcpu_refcnt for dummy devices,
10402 	 * because users of this 'device' dont need to change
10403 	 * its refcount.
10404 	 */
10405 
10406 	return 0;
10407 }
10408 EXPORT_SYMBOL_GPL(init_dummy_netdev);
10409 
10410 
10411 /**
10412  *	register_netdev	- register a network device
10413  *	@dev: device to register
10414  *
10415  *	Take a completed network device structure and add it to the kernel
10416  *	interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10417  *	chain. 0 is returned on success. A negative errno code is returned
10418  *	on a failure to set up the device, or if the name is a duplicate.
10419  *
10420  *	This is a wrapper around register_netdevice that takes the rtnl semaphore
10421  *	and expands the device name if you passed a format string to
10422  *	alloc_netdev.
10423  */
register_netdev(struct net_device * dev)10424 int register_netdev(struct net_device *dev)
10425 {
10426 	int err;
10427 
10428 	if (rtnl_lock_killable())
10429 		return -EINTR;
10430 	err = register_netdevice(dev);
10431 	rtnl_unlock();
10432 	return err;
10433 }
10434 EXPORT_SYMBOL(register_netdev);
10435 
netdev_refcnt_read(const struct net_device * dev)10436 int netdev_refcnt_read(const struct net_device *dev)
10437 {
10438 #ifdef CONFIG_PCPU_DEV_REFCNT
10439 	int i, refcnt = 0;
10440 
10441 	for_each_possible_cpu(i)
10442 		refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
10443 	return refcnt;
10444 #else
10445 	return refcount_read(&dev->dev_refcnt);
10446 #endif
10447 }
10448 EXPORT_SYMBOL(netdev_refcnt_read);
10449 
10450 int netdev_unregister_timeout_secs __read_mostly = 10;
10451 
10452 #define WAIT_REFS_MIN_MSECS 1
10453 #define WAIT_REFS_MAX_MSECS 250
10454 /**
10455  * netdev_wait_allrefs - wait until all references are gone.
10456  * @dev: target net_device
10457  *
10458  * This is called when unregistering network devices.
10459  *
10460  * Any protocol or device that holds a reference should register
10461  * for netdevice notification, and cleanup and put back the
10462  * reference if they receive an UNREGISTER event.
10463  * We can get stuck here if buggy protocols don't correctly
10464  * call dev_put.
10465  */
netdev_wait_allrefs(struct net_device * dev)10466 static void netdev_wait_allrefs(struct net_device *dev)
10467 {
10468 	unsigned long rebroadcast_time, warning_time;
10469 	int wait = 0, refcnt;
10470 
10471 	linkwatch_forget_dev(dev);
10472 
10473 	rebroadcast_time = warning_time = jiffies;
10474 	refcnt = netdev_refcnt_read(dev);
10475 
10476 	while (refcnt != 1) {
10477 		if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
10478 			rtnl_lock();
10479 
10480 			/* Rebroadcast unregister notification */
10481 			call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10482 
10483 			__rtnl_unlock();
10484 			rcu_barrier();
10485 			rtnl_lock();
10486 
10487 			if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
10488 				     &dev->state)) {
10489 				/* We must not have linkwatch events
10490 				 * pending on unregister. If this
10491 				 * happens, we simply run the queue
10492 				 * unscheduled, resulting in a noop
10493 				 * for this device.
10494 				 */
10495 				linkwatch_run_queue();
10496 			}
10497 
10498 			__rtnl_unlock();
10499 
10500 			rebroadcast_time = jiffies;
10501 		}
10502 
10503 		if (!wait) {
10504 			rcu_barrier();
10505 			wait = WAIT_REFS_MIN_MSECS;
10506 		} else {
10507 			msleep(wait);
10508 			wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
10509 		}
10510 
10511 		refcnt = netdev_refcnt_read(dev);
10512 
10513 		if (refcnt != 1 &&
10514 		    time_after(jiffies, warning_time +
10515 			       netdev_unregister_timeout_secs * HZ)) {
10516 			pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
10517 				 dev->name, refcnt);
10518 			warning_time = jiffies;
10519 		}
10520 	}
10521 }
10522 
10523 /* The sequence is:
10524  *
10525  *	rtnl_lock();
10526  *	...
10527  *	register_netdevice(x1);
10528  *	register_netdevice(x2);
10529  *	...
10530  *	unregister_netdevice(y1);
10531  *	unregister_netdevice(y2);
10532  *      ...
10533  *	rtnl_unlock();
10534  *	free_netdev(y1);
10535  *	free_netdev(y2);
10536  *
10537  * We are invoked by rtnl_unlock().
10538  * This allows us to deal with problems:
10539  * 1) We can delete sysfs objects which invoke hotplug
10540  *    without deadlocking with linkwatch via keventd.
10541  * 2) Since we run with the RTNL semaphore not held, we can sleep
10542  *    safely in order to wait for the netdev refcnt to drop to zero.
10543  *
10544  * We must not return until all unregister events added during
10545  * the interval the lock was held have been completed.
10546  */
netdev_run_todo(void)10547 void netdev_run_todo(void)
10548 {
10549 	struct list_head list;
10550 #ifdef CONFIG_LOCKDEP
10551 	struct list_head unlink_list;
10552 
10553 	list_replace_init(&net_unlink_list, &unlink_list);
10554 
10555 	while (!list_empty(&unlink_list)) {
10556 		struct net_device *dev = list_first_entry(&unlink_list,
10557 							  struct net_device,
10558 							  unlink_list);
10559 		list_del_init(&dev->unlink_list);
10560 		dev->nested_level = dev->lower_level - 1;
10561 	}
10562 #endif
10563 
10564 	/* Snapshot list, allow later requests */
10565 	list_replace_init(&net_todo_list, &list);
10566 
10567 	__rtnl_unlock();
10568 
10569 
10570 	/* Wait for rcu callbacks to finish before next phase */
10571 	if (!list_empty(&list))
10572 		rcu_barrier();
10573 
10574 	while (!list_empty(&list)) {
10575 		struct net_device *dev
10576 			= list_first_entry(&list, struct net_device, todo_list);
10577 		list_del(&dev->todo_list);
10578 
10579 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
10580 			pr_err("network todo '%s' but state %d\n",
10581 			       dev->name, dev->reg_state);
10582 			dump_stack();
10583 			continue;
10584 		}
10585 
10586 		dev->reg_state = NETREG_UNREGISTERED;
10587 
10588 		netdev_wait_allrefs(dev);
10589 
10590 		/* paranoia */
10591 		BUG_ON(netdev_refcnt_read(dev) != 1);
10592 		BUG_ON(!list_empty(&dev->ptype_all));
10593 		BUG_ON(!list_empty(&dev->ptype_specific));
10594 		WARN_ON(rcu_access_pointer(dev->ip_ptr));
10595 		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
10596 #if IS_ENABLED(CONFIG_DECNET)
10597 		WARN_ON(dev->dn_ptr);
10598 #endif
10599 		if (dev->priv_destructor)
10600 			dev->priv_destructor(dev);
10601 		if (dev->needs_free_netdev)
10602 			free_netdev(dev);
10603 
10604 		/* Report a network device has been unregistered */
10605 		rtnl_lock();
10606 		dev_net(dev)->dev_unreg_count--;
10607 		__rtnl_unlock();
10608 		wake_up(&netdev_unregistering_wq);
10609 
10610 		/* Free network device */
10611 		kobject_put(&dev->dev.kobj);
10612 	}
10613 }
10614 
10615 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
10616  * all the same fields in the same order as net_device_stats, with only
10617  * the type differing, but rtnl_link_stats64 may have additional fields
10618  * at the end for newer counters.
10619  */
netdev_stats_to_stats64(struct rtnl_link_stats64 * stats64,const struct net_device_stats * netdev_stats)10620 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
10621 			     const struct net_device_stats *netdev_stats)
10622 {
10623 #if BITS_PER_LONG == 64
10624 	BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
10625 	memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
10626 	/* zero out counters that only exist in rtnl_link_stats64 */
10627 	memset((char *)stats64 + sizeof(*netdev_stats), 0,
10628 	       sizeof(*stats64) - sizeof(*netdev_stats));
10629 #else
10630 	size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
10631 	const unsigned long *src = (const unsigned long *)netdev_stats;
10632 	u64 *dst = (u64 *)stats64;
10633 
10634 	BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
10635 	for (i = 0; i < n; i++)
10636 		dst[i] = src[i];
10637 	/* zero out counters that only exist in rtnl_link_stats64 */
10638 	memset((char *)stats64 + n * sizeof(u64), 0,
10639 	       sizeof(*stats64) - n * sizeof(u64));
10640 #endif
10641 }
10642 EXPORT_SYMBOL(netdev_stats_to_stats64);
10643 
10644 /**
10645  *	dev_get_stats	- get network device statistics
10646  *	@dev: device to get statistics from
10647  *	@storage: place to store stats
10648  *
10649  *	Get network statistics from device. Return @storage.
10650  *	The device driver may provide its own method by setting
10651  *	dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10652  *	otherwise the internal statistics structure is used.
10653  */
dev_get_stats(struct net_device * dev,struct rtnl_link_stats64 * storage)10654 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
10655 					struct rtnl_link_stats64 *storage)
10656 {
10657 	const struct net_device_ops *ops = dev->netdev_ops;
10658 
10659 	if (ops->ndo_get_stats64) {
10660 		memset(storage, 0, sizeof(*storage));
10661 		ops->ndo_get_stats64(dev, storage);
10662 	} else if (ops->ndo_get_stats) {
10663 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
10664 	} else {
10665 		netdev_stats_to_stats64(storage, &dev->stats);
10666 	}
10667 	storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
10668 	storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
10669 	storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
10670 	return storage;
10671 }
10672 EXPORT_SYMBOL(dev_get_stats);
10673 
10674 /**
10675  *	dev_fetch_sw_netstats - get per-cpu network device statistics
10676  *	@s: place to store stats
10677  *	@netstats: per-cpu network stats to read from
10678  *
10679  *	Read per-cpu network statistics and populate the related fields in @s.
10680  */
dev_fetch_sw_netstats(struct rtnl_link_stats64 * s,const struct pcpu_sw_netstats __percpu * netstats)10681 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
10682 			   const struct pcpu_sw_netstats __percpu *netstats)
10683 {
10684 	int cpu;
10685 
10686 	for_each_possible_cpu(cpu) {
10687 		const struct pcpu_sw_netstats *stats;
10688 		struct pcpu_sw_netstats tmp;
10689 		unsigned int start;
10690 
10691 		stats = per_cpu_ptr(netstats, cpu);
10692 		do {
10693 			start = u64_stats_fetch_begin_irq(&stats->syncp);
10694 			tmp.rx_packets = stats->rx_packets;
10695 			tmp.rx_bytes   = stats->rx_bytes;
10696 			tmp.tx_packets = stats->tx_packets;
10697 			tmp.tx_bytes   = stats->tx_bytes;
10698 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
10699 
10700 		s->rx_packets += tmp.rx_packets;
10701 		s->rx_bytes   += tmp.rx_bytes;
10702 		s->tx_packets += tmp.tx_packets;
10703 		s->tx_bytes   += tmp.tx_bytes;
10704 	}
10705 }
10706 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
10707 
10708 /**
10709  *	dev_get_tstats64 - ndo_get_stats64 implementation
10710  *	@dev: device to get statistics from
10711  *	@s: place to store stats
10712  *
10713  *	Populate @s from dev->stats and dev->tstats. Can be used as
10714  *	ndo_get_stats64() callback.
10715  */
dev_get_tstats64(struct net_device * dev,struct rtnl_link_stats64 * s)10716 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
10717 {
10718 	netdev_stats_to_stats64(s, &dev->stats);
10719 	dev_fetch_sw_netstats(s, dev->tstats);
10720 }
10721 EXPORT_SYMBOL_GPL(dev_get_tstats64);
10722 
dev_ingress_queue_create(struct net_device * dev)10723 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
10724 {
10725 	struct netdev_queue *queue = dev_ingress_queue(dev);
10726 
10727 #ifdef CONFIG_NET_CLS_ACT
10728 	if (queue)
10729 		return queue;
10730 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
10731 	if (!queue)
10732 		return NULL;
10733 	netdev_init_one_queue(dev, queue, NULL);
10734 	RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
10735 	queue->qdisc_sleeping = &noop_qdisc;
10736 	rcu_assign_pointer(dev->ingress_queue, queue);
10737 #endif
10738 	return queue;
10739 }
10740 
10741 static const struct ethtool_ops default_ethtool_ops;
10742 
netdev_set_default_ethtool_ops(struct net_device * dev,const struct ethtool_ops * ops)10743 void netdev_set_default_ethtool_ops(struct net_device *dev,
10744 				    const struct ethtool_ops *ops)
10745 {
10746 	if (dev->ethtool_ops == &default_ethtool_ops)
10747 		dev->ethtool_ops = ops;
10748 }
10749 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
10750 
netdev_freemem(struct net_device * dev)10751 void netdev_freemem(struct net_device *dev)
10752 {
10753 	char *addr = (char *)dev - dev->padded;
10754 
10755 	kvfree(addr);
10756 }
10757 
10758 /**
10759  * alloc_netdev_mqs - allocate network device
10760  * @sizeof_priv: size of private data to allocate space for
10761  * @name: device name format string
10762  * @name_assign_type: origin of device name
10763  * @setup: callback to initialize device
10764  * @txqs: the number of TX subqueues to allocate
10765  * @rxqs: the number of RX subqueues to allocate
10766  *
10767  * Allocates a struct net_device with private data area for driver use
10768  * and performs basic initialization.  Also allocates subqueue structs
10769  * for each queue on the device.
10770  */
alloc_netdev_mqs(int sizeof_priv,const char * name,unsigned char name_assign_type,void (* setup)(struct net_device *),unsigned int txqs,unsigned int rxqs)10771 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
10772 		unsigned char name_assign_type,
10773 		void (*setup)(struct net_device *),
10774 		unsigned int txqs, unsigned int rxqs)
10775 {
10776 	struct net_device *dev;
10777 	unsigned int alloc_size;
10778 	struct net_device *p;
10779 
10780 	BUG_ON(strlen(name) >= sizeof(dev->name));
10781 
10782 	if (txqs < 1) {
10783 		pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
10784 		return NULL;
10785 	}
10786 
10787 	if (rxqs < 1) {
10788 		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
10789 		return NULL;
10790 	}
10791 
10792 	alloc_size = sizeof(struct net_device);
10793 	if (sizeof_priv) {
10794 		/* ensure 32-byte alignment of private area */
10795 		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
10796 		alloc_size += sizeof_priv;
10797 	}
10798 	/* ensure 32-byte alignment of whole construct */
10799 	alloc_size += NETDEV_ALIGN - 1;
10800 
10801 	p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10802 	if (!p)
10803 		return NULL;
10804 
10805 	dev = PTR_ALIGN(p, NETDEV_ALIGN);
10806 	dev->padded = (char *)dev - (char *)p;
10807 
10808 #ifdef CONFIG_PCPU_DEV_REFCNT
10809 	dev->pcpu_refcnt = alloc_percpu(int);
10810 	if (!dev->pcpu_refcnt)
10811 		goto free_dev;
10812 	dev_hold(dev);
10813 #else
10814 	refcount_set(&dev->dev_refcnt, 1);
10815 #endif
10816 
10817 	if (dev_addr_init(dev))
10818 		goto free_pcpu;
10819 
10820 	dev_mc_init(dev);
10821 	dev_uc_init(dev);
10822 
10823 	dev_net_set(dev, &init_net);
10824 
10825 	dev->gso_max_size = GSO_MAX_SIZE;
10826 	dev->gso_max_segs = GSO_MAX_SEGS;
10827 	dev->upper_level = 1;
10828 	dev->lower_level = 1;
10829 #ifdef CONFIG_LOCKDEP
10830 	dev->nested_level = 0;
10831 	INIT_LIST_HEAD(&dev->unlink_list);
10832 #endif
10833 
10834 	INIT_LIST_HEAD(&dev->napi_list);
10835 	INIT_LIST_HEAD(&dev->unreg_list);
10836 	INIT_LIST_HEAD(&dev->close_list);
10837 	INIT_LIST_HEAD(&dev->link_watch_list);
10838 	INIT_LIST_HEAD(&dev->adj_list.upper);
10839 	INIT_LIST_HEAD(&dev->adj_list.lower);
10840 	INIT_LIST_HEAD(&dev->ptype_all);
10841 	INIT_LIST_HEAD(&dev->ptype_specific);
10842 	INIT_LIST_HEAD(&dev->net_notifier_list);
10843 #ifdef CONFIG_NET_SCHED
10844 	hash_init(dev->qdisc_hash);
10845 #endif
10846 	dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
10847 	setup(dev);
10848 
10849 	if (!dev->tx_queue_len) {
10850 		dev->priv_flags |= IFF_NO_QUEUE;
10851 		dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
10852 	}
10853 
10854 	dev->num_tx_queues = txqs;
10855 	dev->real_num_tx_queues = txqs;
10856 	if (netif_alloc_netdev_queues(dev))
10857 		goto free_all;
10858 
10859 	dev->num_rx_queues = rxqs;
10860 	dev->real_num_rx_queues = rxqs;
10861 	if (netif_alloc_rx_queues(dev))
10862 		goto free_all;
10863 
10864 	strcpy(dev->name, name);
10865 	dev->name_assign_type = name_assign_type;
10866 	dev->group = INIT_NETDEV_GROUP;
10867 	if (!dev->ethtool_ops)
10868 		dev->ethtool_ops = &default_ethtool_ops;
10869 
10870 	nf_hook_ingress_init(dev);
10871 
10872 	return dev;
10873 
10874 free_all:
10875 	free_netdev(dev);
10876 	return NULL;
10877 
10878 free_pcpu:
10879 #ifdef CONFIG_PCPU_DEV_REFCNT
10880 	free_percpu(dev->pcpu_refcnt);
10881 free_dev:
10882 #endif
10883 	netdev_freemem(dev);
10884 	return NULL;
10885 }
10886 EXPORT_SYMBOL(alloc_netdev_mqs);
10887 
10888 /**
10889  * free_netdev - free network device
10890  * @dev: device
10891  *
10892  * This function does the last stage of destroying an allocated device
10893  * interface. The reference to the device object is released. If this
10894  * is the last reference then it will be freed.Must be called in process
10895  * context.
10896  */
free_netdev(struct net_device * dev)10897 void free_netdev(struct net_device *dev)
10898 {
10899 	struct napi_struct *p, *n;
10900 
10901 	might_sleep();
10902 
10903 	/* When called immediately after register_netdevice() failed the unwind
10904 	 * handling may still be dismantling the device. Handle that case by
10905 	 * deferring the free.
10906 	 */
10907 	if (dev->reg_state == NETREG_UNREGISTERING) {
10908 		ASSERT_RTNL();
10909 		dev->needs_free_netdev = true;
10910 		return;
10911 	}
10912 
10913 	netif_free_tx_queues(dev);
10914 	netif_free_rx_queues(dev);
10915 
10916 	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
10917 
10918 	/* Flush device addresses */
10919 	dev_addr_flush(dev);
10920 
10921 	list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10922 		netif_napi_del(p);
10923 
10924 #ifdef CONFIG_PCPU_DEV_REFCNT
10925 	free_percpu(dev->pcpu_refcnt);
10926 	dev->pcpu_refcnt = NULL;
10927 #endif
10928 	free_percpu(dev->xdp_bulkq);
10929 	dev->xdp_bulkq = NULL;
10930 
10931 	/*  Compatibility with error handling in drivers */
10932 	if (dev->reg_state == NETREG_UNINITIALIZED) {
10933 		netdev_freemem(dev);
10934 		return;
10935 	}
10936 
10937 	BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10938 	dev->reg_state = NETREG_RELEASED;
10939 
10940 	/* will free via device release */
10941 	put_device(&dev->dev);
10942 }
10943 EXPORT_SYMBOL(free_netdev);
10944 
10945 /**
10946  *	synchronize_net -  Synchronize with packet receive processing
10947  *
10948  *	Wait for packets currently being received to be done.
10949  *	Does not block later packets from starting.
10950  */
synchronize_net(void)10951 void synchronize_net(void)
10952 {
10953 	might_sleep();
10954 	if (rtnl_is_locked())
10955 		synchronize_rcu_expedited();
10956 	else
10957 		synchronize_rcu();
10958 }
10959 EXPORT_SYMBOL(synchronize_net);
10960 
10961 /**
10962  *	unregister_netdevice_queue - remove device from the kernel
10963  *	@dev: device
10964  *	@head: list
10965  *
10966  *	This function shuts down a device interface and removes it
10967  *	from the kernel tables.
10968  *	If head not NULL, device is queued to be unregistered later.
10969  *
10970  *	Callers must hold the rtnl semaphore.  You may want
10971  *	unregister_netdev() instead of this.
10972  */
10973 
unregister_netdevice_queue(struct net_device * dev,struct list_head * head)10974 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
10975 {
10976 	ASSERT_RTNL();
10977 
10978 	if (head) {
10979 		list_move_tail(&dev->unreg_list, head);
10980 	} else {
10981 		LIST_HEAD(single);
10982 
10983 		list_add(&dev->unreg_list, &single);
10984 		unregister_netdevice_many(&single);
10985 	}
10986 }
10987 EXPORT_SYMBOL(unregister_netdevice_queue);
10988 
10989 /**
10990  *	unregister_netdevice_many - unregister many devices
10991  *	@head: list of devices
10992  *
10993  *  Note: As most callers use a stack allocated list_head,
10994  *  we force a list_del() to make sure stack wont be corrupted later.
10995  */
unregister_netdevice_many(struct list_head * head)10996 void unregister_netdevice_many(struct list_head *head)
10997 {
10998 	struct net_device *dev, *tmp;
10999 	LIST_HEAD(close_head);
11000 
11001 	BUG_ON(dev_boot_phase);
11002 	ASSERT_RTNL();
11003 
11004 	if (list_empty(head))
11005 		return;
11006 
11007 	list_for_each_entry_safe(dev, tmp, head, unreg_list) {
11008 		/* Some devices call without registering
11009 		 * for initialization unwind. Remove those
11010 		 * devices and proceed with the remaining.
11011 		 */
11012 		if (dev->reg_state == NETREG_UNINITIALIZED) {
11013 			pr_debug("unregister_netdevice: device %s/%p never was registered\n",
11014 				 dev->name, dev);
11015 
11016 			WARN_ON(1);
11017 			list_del(&dev->unreg_list);
11018 			continue;
11019 		}
11020 		dev->dismantle = true;
11021 		BUG_ON(dev->reg_state != NETREG_REGISTERED);
11022 	}
11023 
11024 	/* If device is running, close it first. */
11025 	list_for_each_entry(dev, head, unreg_list)
11026 		list_add_tail(&dev->close_list, &close_head);
11027 	dev_close_many(&close_head, true);
11028 
11029 	list_for_each_entry(dev, head, unreg_list) {
11030 		/* And unlink it from device chain. */
11031 		unlist_netdevice(dev);
11032 
11033 		dev->reg_state = NETREG_UNREGISTERING;
11034 	}
11035 	flush_all_backlogs();
11036 
11037 	synchronize_net();
11038 
11039 	list_for_each_entry(dev, head, unreg_list) {
11040 		struct sk_buff *skb = NULL;
11041 
11042 		/* Shutdown queueing discipline. */
11043 		dev_shutdown(dev);
11044 
11045 		dev_xdp_uninstall(dev);
11046 
11047 		/* Notify protocols, that we are about to destroy
11048 		 * this device. They should clean all the things.
11049 		 */
11050 		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11051 
11052 		if (!dev->rtnl_link_ops ||
11053 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
11054 			skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
11055 						     GFP_KERNEL, NULL, 0);
11056 
11057 		/*
11058 		 *	Flush the unicast and multicast chains
11059 		 */
11060 		dev_uc_flush(dev);
11061 		dev_mc_flush(dev);
11062 
11063 		netdev_name_node_alt_flush(dev);
11064 		netdev_name_node_free(dev->name_node);
11065 
11066 		if (dev->netdev_ops->ndo_uninit)
11067 			dev->netdev_ops->ndo_uninit(dev);
11068 
11069 		if (skb)
11070 			rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
11071 
11072 		/* Notifier chain MUST detach us all upper devices. */
11073 		WARN_ON(netdev_has_any_upper_dev(dev));
11074 		WARN_ON(netdev_has_any_lower_dev(dev));
11075 
11076 		/* Remove entries from kobject tree */
11077 		netdev_unregister_kobject(dev);
11078 #ifdef CONFIG_XPS
11079 		/* Remove XPS queueing entries */
11080 		netif_reset_xps_queues_gt(dev, 0);
11081 #endif
11082 	}
11083 
11084 	synchronize_net();
11085 
11086 	list_for_each_entry(dev, head, unreg_list) {
11087 		dev_put(dev);
11088 		net_set_todo(dev);
11089 	}
11090 
11091 	list_del(head);
11092 }
11093 EXPORT_SYMBOL(unregister_netdevice_many);
11094 
11095 /**
11096  *	unregister_netdev - remove device from the kernel
11097  *	@dev: device
11098  *
11099  *	This function shuts down a device interface and removes it
11100  *	from the kernel tables.
11101  *
11102  *	This is just a wrapper for unregister_netdevice that takes
11103  *	the rtnl semaphore.  In general you want to use this and not
11104  *	unregister_netdevice.
11105  */
unregister_netdev(struct net_device * dev)11106 void unregister_netdev(struct net_device *dev)
11107 {
11108 	rtnl_lock();
11109 	unregister_netdevice(dev);
11110 	rtnl_unlock();
11111 }
11112 EXPORT_SYMBOL(unregister_netdev);
11113 
11114 /**
11115  *	__dev_change_net_namespace - move device to different nethost namespace
11116  *	@dev: device
11117  *	@net: network namespace
11118  *	@pat: If not NULL name pattern to try if the current device name
11119  *	      is already taken in the destination network namespace.
11120  *	@new_ifindex: If not zero, specifies device index in the target
11121  *	              namespace.
11122  *
11123  *	This function shuts down a device interface and moves it
11124  *	to a new network namespace. On success 0 is returned, on
11125  *	a failure a netagive errno code is returned.
11126  *
11127  *	Callers must hold the rtnl semaphore.
11128  */
11129 
__dev_change_net_namespace(struct net_device * dev,struct net * net,const char * pat,int new_ifindex)11130 int __dev_change_net_namespace(struct net_device *dev, struct net *net,
11131 			       const char *pat, int new_ifindex)
11132 {
11133 	struct net *net_old = dev_net(dev);
11134 	int err, new_nsid;
11135 
11136 	ASSERT_RTNL();
11137 
11138 	/* Don't allow namespace local devices to be moved. */
11139 	err = -EINVAL;
11140 	if (dev->features & NETIF_F_NETNS_LOCAL)
11141 		goto out;
11142 
11143 	/* Ensure the device has been registrered */
11144 	if (dev->reg_state != NETREG_REGISTERED)
11145 		goto out;
11146 
11147 	/* Get out if there is nothing todo */
11148 	err = 0;
11149 	if (net_eq(net_old, net))
11150 		goto out;
11151 
11152 	/* Pick the destination device name, and ensure
11153 	 * we can use it in the destination network namespace.
11154 	 */
11155 	err = -EEXIST;
11156 	if (__dev_get_by_name(net, dev->name)) {
11157 		/* We get here if we can't use the current device name */
11158 		if (!pat)
11159 			goto out;
11160 		err = dev_get_valid_name(net, dev, pat);
11161 		if (err < 0)
11162 			goto out;
11163 	}
11164 
11165 	/* Check that new_ifindex isn't used yet. */
11166 	err = -EBUSY;
11167 	if (new_ifindex && __dev_get_by_index(net, new_ifindex))
11168 		goto out;
11169 
11170 	/*
11171 	 * And now a mini version of register_netdevice unregister_netdevice.
11172 	 */
11173 
11174 	/* If device is running close it first. */
11175 	dev_close(dev);
11176 
11177 	/* And unlink it from device chain */
11178 	unlist_netdevice(dev);
11179 
11180 	synchronize_net();
11181 
11182 	/* Shutdown queueing discipline. */
11183 	dev_shutdown(dev);
11184 
11185 	/* Notify protocols, that we are about to destroy
11186 	 * this device. They should clean all the things.
11187 	 *
11188 	 * Note that dev->reg_state stays at NETREG_REGISTERED.
11189 	 * This is wanted because this way 8021q and macvlan know
11190 	 * the device is just moving and can keep their slaves up.
11191 	 */
11192 	call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11193 	rcu_barrier();
11194 
11195 	new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
11196 	/* If there is an ifindex conflict assign a new one */
11197 	if (!new_ifindex) {
11198 		if (__dev_get_by_index(net, dev->ifindex))
11199 			new_ifindex = dev_new_index(net);
11200 		else
11201 			new_ifindex = dev->ifindex;
11202 	}
11203 
11204 	rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
11205 			    new_ifindex);
11206 
11207 	/*
11208 	 *	Flush the unicast and multicast chains
11209 	 */
11210 	dev_uc_flush(dev);
11211 	dev_mc_flush(dev);
11212 
11213 	/* Send a netdev-removed uevent to the old namespace */
11214 	kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
11215 	netdev_adjacent_del_links(dev);
11216 
11217 	/* Move per-net netdevice notifiers that are following the netdevice */
11218 	move_netdevice_notifiers_dev_net(dev, net);
11219 
11220 	/* Actually switch the network namespace */
11221 	dev_net_set(dev, net);
11222 	dev->ifindex = new_ifindex;
11223 
11224 	/* Send a netdev-add uevent to the new namespace */
11225 	kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
11226 	netdev_adjacent_add_links(dev);
11227 
11228 	/* Fixup kobjects */
11229 	err = device_rename(&dev->dev, dev->name);
11230 	WARN_ON(err);
11231 
11232 	/* Adapt owner in case owning user namespace of target network
11233 	 * namespace is different from the original one.
11234 	 */
11235 	err = netdev_change_owner(dev, net_old, net);
11236 	WARN_ON(err);
11237 
11238 	/* Add the device back in the hashes */
11239 	list_netdevice(dev);
11240 
11241 	/* Notify protocols, that a new device appeared. */
11242 	call_netdevice_notifiers(NETDEV_REGISTER, dev);
11243 
11244 	/*
11245 	 *	Prevent userspace races by waiting until the network
11246 	 *	device is fully setup before sending notifications.
11247 	 */
11248 	rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
11249 
11250 	synchronize_net();
11251 	err = 0;
11252 out:
11253 	return err;
11254 }
11255 EXPORT_SYMBOL_GPL(__dev_change_net_namespace);
11256 
dev_cpu_dead(unsigned int oldcpu)11257 static int dev_cpu_dead(unsigned int oldcpu)
11258 {
11259 	struct sk_buff **list_skb;
11260 	struct sk_buff *skb;
11261 	unsigned int cpu;
11262 	struct softnet_data *sd, *oldsd, *remsd = NULL;
11263 
11264 	local_irq_disable();
11265 	cpu = smp_processor_id();
11266 	sd = &per_cpu(softnet_data, cpu);
11267 	oldsd = &per_cpu(softnet_data, oldcpu);
11268 
11269 	/* Find end of our completion_queue. */
11270 	list_skb = &sd->completion_queue;
11271 	while (*list_skb)
11272 		list_skb = &(*list_skb)->next;
11273 	/* Append completion queue from offline CPU. */
11274 	*list_skb = oldsd->completion_queue;
11275 	oldsd->completion_queue = NULL;
11276 
11277 	/* Append output queue from offline CPU. */
11278 	if (oldsd->output_queue) {
11279 		*sd->output_queue_tailp = oldsd->output_queue;
11280 		sd->output_queue_tailp = oldsd->output_queue_tailp;
11281 		oldsd->output_queue = NULL;
11282 		oldsd->output_queue_tailp = &oldsd->output_queue;
11283 	}
11284 	/* Append NAPI poll list from offline CPU, with one exception :
11285 	 * process_backlog() must be called by cpu owning percpu backlog.
11286 	 * We properly handle process_queue & input_pkt_queue later.
11287 	 */
11288 	while (!list_empty(&oldsd->poll_list)) {
11289 		struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
11290 							    struct napi_struct,
11291 							    poll_list);
11292 
11293 		list_del_init(&napi->poll_list);
11294 		if (napi->poll == process_backlog)
11295 			napi->state = 0;
11296 		else
11297 			____napi_schedule(sd, napi);
11298 	}
11299 
11300 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
11301 	local_irq_enable();
11302 
11303 #ifdef CONFIG_RPS
11304 	remsd = oldsd->rps_ipi_list;
11305 	oldsd->rps_ipi_list = NULL;
11306 #endif
11307 	/* send out pending IPI's on offline CPU */
11308 	net_rps_send_ipi(remsd);
11309 
11310 	/* Process offline CPU's input_pkt_queue */
11311 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
11312 		netif_rx_ni(skb);
11313 		input_queue_head_incr(oldsd);
11314 	}
11315 	while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
11316 		netif_rx_ni(skb);
11317 		input_queue_head_incr(oldsd);
11318 	}
11319 
11320 	return 0;
11321 }
11322 
11323 /**
11324  *	netdev_increment_features - increment feature set by one
11325  *	@all: current feature set
11326  *	@one: new feature set
11327  *	@mask: mask feature set
11328  *
11329  *	Computes a new feature set after adding a device with feature set
11330  *	@one to the master device with current feature set @all.  Will not
11331  *	enable anything that is off in @mask. Returns the new feature set.
11332  */
netdev_increment_features(netdev_features_t all,netdev_features_t one,netdev_features_t mask)11333 netdev_features_t netdev_increment_features(netdev_features_t all,
11334 	netdev_features_t one, netdev_features_t mask)
11335 {
11336 	if (mask & NETIF_F_HW_CSUM)
11337 		mask |= NETIF_F_CSUM_MASK;
11338 	mask |= NETIF_F_VLAN_CHALLENGED;
11339 
11340 	all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
11341 	all &= one | ~NETIF_F_ALL_FOR_ALL;
11342 
11343 	/* If one device supports hw checksumming, set for all. */
11344 	if (all & NETIF_F_HW_CSUM)
11345 		all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
11346 
11347 	return all;
11348 }
11349 EXPORT_SYMBOL(netdev_increment_features);
11350 
netdev_create_hash(void)11351 static struct hlist_head * __net_init netdev_create_hash(void)
11352 {
11353 	int i;
11354 	struct hlist_head *hash;
11355 
11356 	hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
11357 	if (hash != NULL)
11358 		for (i = 0; i < NETDEV_HASHENTRIES; i++)
11359 			INIT_HLIST_HEAD(&hash[i]);
11360 
11361 	return hash;
11362 }
11363 
11364 /* Initialize per network namespace state */
netdev_init(struct net * net)11365 static int __net_init netdev_init(struct net *net)
11366 {
11367 	BUILD_BUG_ON(GRO_HASH_BUCKETS >
11368 		     8 * sizeof_field(struct napi_struct, gro_bitmask));
11369 
11370 	if (net != &init_net)
11371 		INIT_LIST_HEAD(&net->dev_base_head);
11372 
11373 	net->dev_name_head = netdev_create_hash();
11374 	if (net->dev_name_head == NULL)
11375 		goto err_name;
11376 
11377 	net->dev_index_head = netdev_create_hash();
11378 	if (net->dev_index_head == NULL)
11379 		goto err_idx;
11380 
11381 	RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
11382 
11383 	return 0;
11384 
11385 err_idx:
11386 	kfree(net->dev_name_head);
11387 err_name:
11388 	return -ENOMEM;
11389 }
11390 
11391 /**
11392  *	netdev_drivername - network driver for the device
11393  *	@dev: network device
11394  *
11395  *	Determine network driver for device.
11396  */
netdev_drivername(const struct net_device * dev)11397 const char *netdev_drivername(const struct net_device *dev)
11398 {
11399 	const struct device_driver *driver;
11400 	const struct device *parent;
11401 	const char *empty = "";
11402 
11403 	parent = dev->dev.parent;
11404 	if (!parent)
11405 		return empty;
11406 
11407 	driver = parent->driver;
11408 	if (driver && driver->name)
11409 		return driver->name;
11410 	return empty;
11411 }
11412 
__netdev_printk(const char * level,const struct net_device * dev,struct va_format * vaf)11413 static void __netdev_printk(const char *level, const struct net_device *dev,
11414 			    struct va_format *vaf)
11415 {
11416 	if (dev && dev->dev.parent) {
11417 		dev_printk_emit(level[1] - '0',
11418 				dev->dev.parent,
11419 				"%s %s %s%s: %pV",
11420 				dev_driver_string(dev->dev.parent),
11421 				dev_name(dev->dev.parent),
11422 				netdev_name(dev), netdev_reg_state(dev),
11423 				vaf);
11424 	} else if (dev) {
11425 		printk("%s%s%s: %pV",
11426 		       level, netdev_name(dev), netdev_reg_state(dev), vaf);
11427 	} else {
11428 		printk("%s(NULL net_device): %pV", level, vaf);
11429 	}
11430 }
11431 
netdev_printk(const char * level,const struct net_device * dev,const char * format,...)11432 void netdev_printk(const char *level, const struct net_device *dev,
11433 		   const char *format, ...)
11434 {
11435 	struct va_format vaf;
11436 	va_list args;
11437 
11438 	va_start(args, format);
11439 
11440 	vaf.fmt = format;
11441 	vaf.va = &args;
11442 
11443 	__netdev_printk(level, dev, &vaf);
11444 
11445 	va_end(args);
11446 }
11447 EXPORT_SYMBOL(netdev_printk);
11448 
11449 #define define_netdev_printk_level(func, level)			\
11450 void func(const struct net_device *dev, const char *fmt, ...)	\
11451 {								\
11452 	struct va_format vaf;					\
11453 	va_list args;						\
11454 								\
11455 	va_start(args, fmt);					\
11456 								\
11457 	vaf.fmt = fmt;						\
11458 	vaf.va = &args;						\
11459 								\
11460 	__netdev_printk(level, dev, &vaf);			\
11461 								\
11462 	va_end(args);						\
11463 }								\
11464 EXPORT_SYMBOL(func);
11465 
11466 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
11467 define_netdev_printk_level(netdev_alert, KERN_ALERT);
11468 define_netdev_printk_level(netdev_crit, KERN_CRIT);
11469 define_netdev_printk_level(netdev_err, KERN_ERR);
11470 define_netdev_printk_level(netdev_warn, KERN_WARNING);
11471 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
11472 define_netdev_printk_level(netdev_info, KERN_INFO);
11473 
netdev_exit(struct net * net)11474 static void __net_exit netdev_exit(struct net *net)
11475 {
11476 	kfree(net->dev_name_head);
11477 	kfree(net->dev_index_head);
11478 	if (net != &init_net)
11479 		WARN_ON_ONCE(!list_empty(&net->dev_base_head));
11480 }
11481 
11482 static struct pernet_operations __net_initdata netdev_net_ops = {
11483 	.init = netdev_init,
11484 	.exit = netdev_exit,
11485 };
11486 
default_device_exit(struct net * net)11487 static void __net_exit default_device_exit(struct net *net)
11488 {
11489 	struct net_device *dev, *aux;
11490 	/*
11491 	 * Push all migratable network devices back to the
11492 	 * initial network namespace
11493 	 */
11494 	rtnl_lock();
11495 	for_each_netdev_safe(net, dev, aux) {
11496 		int err;
11497 		char fb_name[IFNAMSIZ];
11498 
11499 		/* Ignore unmoveable devices (i.e. loopback) */
11500 		if (dev->features & NETIF_F_NETNS_LOCAL)
11501 			continue;
11502 
11503 		/* Leave virtual devices for the generic cleanup */
11504 		if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
11505 			continue;
11506 
11507 		/* Push remaining network devices to init_net */
11508 		snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
11509 		if (__dev_get_by_name(&init_net, fb_name))
11510 			snprintf(fb_name, IFNAMSIZ, "dev%%d");
11511 		err = dev_change_net_namespace(dev, &init_net, fb_name);
11512 		if (err) {
11513 			pr_emerg("%s: failed to move %s to init_net: %d\n",
11514 				 __func__, dev->name, err);
11515 			BUG();
11516 		}
11517 	}
11518 	rtnl_unlock();
11519 }
11520 
rtnl_lock_unregistering(struct list_head * net_list)11521 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
11522 {
11523 	/* Return with the rtnl_lock held when there are no network
11524 	 * devices unregistering in any network namespace in net_list.
11525 	 */
11526 	struct net *net;
11527 	bool unregistering;
11528 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
11529 
11530 	add_wait_queue(&netdev_unregistering_wq, &wait);
11531 	for (;;) {
11532 		unregistering = false;
11533 		rtnl_lock();
11534 		list_for_each_entry(net, net_list, exit_list) {
11535 			if (net->dev_unreg_count > 0) {
11536 				unregistering = true;
11537 				break;
11538 			}
11539 		}
11540 		if (!unregistering)
11541 			break;
11542 		__rtnl_unlock();
11543 
11544 		wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
11545 	}
11546 	remove_wait_queue(&netdev_unregistering_wq, &wait);
11547 }
11548 
default_device_exit_batch(struct list_head * net_list)11549 static void __net_exit default_device_exit_batch(struct list_head *net_list)
11550 {
11551 	/* At exit all network devices most be removed from a network
11552 	 * namespace.  Do this in the reverse order of registration.
11553 	 * Do this across as many network namespaces as possible to
11554 	 * improve batching efficiency.
11555 	 */
11556 	struct net_device *dev;
11557 	struct net *net;
11558 	LIST_HEAD(dev_kill_list);
11559 
11560 	/* To prevent network device cleanup code from dereferencing
11561 	 * loopback devices or network devices that have been freed
11562 	 * wait here for all pending unregistrations to complete,
11563 	 * before unregistring the loopback device and allowing the
11564 	 * network namespace be freed.
11565 	 *
11566 	 * The netdev todo list containing all network devices
11567 	 * unregistrations that happen in default_device_exit_batch
11568 	 * will run in the rtnl_unlock() at the end of
11569 	 * default_device_exit_batch.
11570 	 */
11571 	rtnl_lock_unregistering(net_list);
11572 	list_for_each_entry(net, net_list, exit_list) {
11573 		for_each_netdev_reverse(net, dev) {
11574 			if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
11575 				dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
11576 			else
11577 				unregister_netdevice_queue(dev, &dev_kill_list);
11578 		}
11579 	}
11580 	unregister_netdevice_many(&dev_kill_list);
11581 	rtnl_unlock();
11582 }
11583 
11584 static struct pernet_operations __net_initdata default_device_ops = {
11585 	.exit = default_device_exit,
11586 	.exit_batch = default_device_exit_batch,
11587 };
11588 
11589 /*
11590  *	Initialize the DEV module. At boot time this walks the device list and
11591  *	unhooks any devices that fail to initialise (normally hardware not
11592  *	present) and leaves us with a valid list of present and active devices.
11593  *
11594  */
11595 
11596 /*
11597  *       This is called single threaded during boot, so no need
11598  *       to take the rtnl semaphore.
11599  */
net_dev_init(void)11600 static int __init net_dev_init(void)
11601 {
11602 	int i, rc = -ENOMEM;
11603 
11604 	BUG_ON(!dev_boot_phase);
11605 
11606 	if (dev_proc_init())
11607 		goto out;
11608 
11609 	if (netdev_kobject_init())
11610 		goto out;
11611 
11612 	INIT_LIST_HEAD(&ptype_all);
11613 	for (i = 0; i < PTYPE_HASH_SIZE; i++)
11614 		INIT_LIST_HEAD(&ptype_base[i]);
11615 
11616 	INIT_LIST_HEAD(&offload_base);
11617 
11618 	if (register_pernet_subsys(&netdev_net_ops))
11619 		goto out;
11620 
11621 	/*
11622 	 *	Initialise the packet receive queues.
11623 	 */
11624 
11625 	for_each_possible_cpu(i) {
11626 		struct work_struct *flush = per_cpu_ptr(&flush_works, i);
11627 		struct softnet_data *sd = &per_cpu(softnet_data, i);
11628 
11629 		INIT_WORK(flush, flush_backlog);
11630 
11631 		skb_queue_head_init(&sd->input_pkt_queue);
11632 		skb_queue_head_init(&sd->process_queue);
11633 #ifdef CONFIG_XFRM_OFFLOAD
11634 		skb_queue_head_init(&sd->xfrm_backlog);
11635 #endif
11636 		INIT_LIST_HEAD(&sd->poll_list);
11637 		sd->output_queue_tailp = &sd->output_queue;
11638 #ifdef CONFIG_RPS
11639 		INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
11640 		sd->cpu = i;
11641 #endif
11642 
11643 		init_gro_hash(&sd->backlog);
11644 		sd->backlog.poll = process_backlog;
11645 		sd->backlog.weight = weight_p;
11646 	}
11647 
11648 	dev_boot_phase = 0;
11649 
11650 	/* The loopback device is special if any other network devices
11651 	 * is present in a network namespace the loopback device must
11652 	 * be present. Since we now dynamically allocate and free the
11653 	 * loopback device ensure this invariant is maintained by
11654 	 * keeping the loopback device as the first device on the
11655 	 * list of network devices.  Ensuring the loopback devices
11656 	 * is the first device that appears and the last network device
11657 	 * that disappears.
11658 	 */
11659 	if (register_pernet_device(&loopback_net_ops))
11660 		goto out;
11661 
11662 	if (register_pernet_device(&default_device_ops))
11663 		goto out;
11664 
11665 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
11666 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
11667 
11668 	rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
11669 				       NULL, dev_cpu_dead);
11670 	WARN_ON(rc < 0);
11671 	rc = 0;
11672 out:
11673 	return rc;
11674 }
11675 
11676 subsys_initcall(net_dev_init);
11677