1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * VLAN		An implementation of 802.1Q VLAN tagging.
4  *
5  * Authors:	Ben Greear <greearb@candelatech.com>
6  */
7 #ifndef _LINUX_IF_VLAN_H_
8 #define _LINUX_IF_VLAN_H_
9 
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/rtnetlink.h>
13 #include <linux/bug.h>
14 #include <uapi/linux/if_vlan.h>
15 
16 #define VLAN_HLEN	4		/* The additional bytes required by VLAN
17 					 * (in addition to the Ethernet header)
18 					 */
19 #define VLAN_ETH_HLEN	18		/* Total octets in header.	 */
20 #define VLAN_ETH_ZLEN	64		/* Min. octets in frame sans FCS */
21 
22 /*
23  * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan
24  */
25 #define VLAN_ETH_DATA_LEN	1500	/* Max. octets in payload	 */
26 #define VLAN_ETH_FRAME_LEN	1518	/* Max. octets in frame sans FCS */
27 
28 #define VLAN_MAX_DEPTH	8		/* Max. number of nested VLAN tags parsed */
29 
30 /*
31  * 	struct vlan_hdr - vlan header
32  * 	@h_vlan_TCI: priority and VLAN ID
33  *	@h_vlan_encapsulated_proto: packet type ID or len
34  */
35 struct vlan_hdr {
36 	__be16	h_vlan_TCI;
37 	__be16	h_vlan_encapsulated_proto;
38 };
39 
40 /**
41  *	struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
42  *	@h_dest: destination ethernet address
43  *	@h_source: source ethernet address
44  *	@h_vlan_proto: ethernet protocol
45  *	@h_vlan_TCI: priority and VLAN ID
46  *	@h_vlan_encapsulated_proto: packet type ID or len
47  */
48 struct vlan_ethhdr {
49 	unsigned char	h_dest[ETH_ALEN];
50 	unsigned char	h_source[ETH_ALEN];
51 	__be16		h_vlan_proto;
52 	__be16		h_vlan_TCI;
53 	__be16		h_vlan_encapsulated_proto;
54 };
55 
56 #include <linux/skbuff.h>
57 
vlan_eth_hdr(const struct sk_buff * skb)58 static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
59 {
60 	return (struct vlan_ethhdr *)skb_mac_header(skb);
61 }
62 
63 #define VLAN_PRIO_MASK		0xe000 /* Priority Code Point */
64 #define VLAN_PRIO_SHIFT		13
65 #define VLAN_CFI_MASK		0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */
66 #define VLAN_VID_MASK		0x0fff /* VLAN Identifier */
67 #define VLAN_N_VID		4096
68 
69 /* found in socket.c */
70 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
71 
is_vlan_dev(const struct net_device * dev)72 static inline bool is_vlan_dev(const struct net_device *dev)
73 {
74         return dev->priv_flags & IFF_802_1Q_VLAN;
75 }
76 
77 #define skb_vlan_tag_present(__skb)	((__skb)->vlan_present)
78 #define skb_vlan_tag_get(__skb)		((__skb)->vlan_tci)
79 #define skb_vlan_tag_get_id(__skb)	((__skb)->vlan_tci & VLAN_VID_MASK)
80 #define skb_vlan_tag_get_cfi(__skb)	(!!((__skb)->vlan_tci & VLAN_CFI_MASK))
81 #define skb_vlan_tag_get_prio(__skb)	(((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT)
82 
vlan_get_rx_ctag_filter_info(struct net_device * dev)83 static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev)
84 {
85 	ASSERT_RTNL();
86 	return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev));
87 }
88 
vlan_drop_rx_ctag_filter_info(struct net_device * dev)89 static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev)
90 {
91 	ASSERT_RTNL();
92 	call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev);
93 }
94 
vlan_get_rx_stag_filter_info(struct net_device * dev)95 static inline int vlan_get_rx_stag_filter_info(struct net_device *dev)
96 {
97 	ASSERT_RTNL();
98 	return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev));
99 }
100 
vlan_drop_rx_stag_filter_info(struct net_device * dev)101 static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev)
102 {
103 	ASSERT_RTNL();
104 	call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev);
105 }
106 
107 /**
108  *	struct vlan_pcpu_stats - VLAN percpu rx/tx stats
109  *	@rx_packets: number of received packets
110  *	@rx_bytes: number of received bytes
111  *	@rx_multicast: number of received multicast packets
112  *	@tx_packets: number of transmitted packets
113  *	@tx_bytes: number of transmitted bytes
114  *	@syncp: synchronization point for 64bit counters
115  *	@rx_errors: number of rx errors
116  *	@tx_dropped: number of tx drops
117  */
118 struct vlan_pcpu_stats {
119 	u64			rx_packets;
120 	u64			rx_bytes;
121 	u64			rx_multicast;
122 	u64			tx_packets;
123 	u64			tx_bytes;
124 	struct u64_stats_sync	syncp;
125 	u32			rx_errors;
126 	u32			tx_dropped;
127 };
128 
129 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
130 
131 extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
132 					       __be16 vlan_proto, u16 vlan_id);
133 extern int vlan_for_each(struct net_device *dev,
134 			 int (*action)(struct net_device *dev, int vid,
135 				       void *arg), void *arg);
136 extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
137 extern u16 vlan_dev_vlan_id(const struct net_device *dev);
138 extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
139 
140 /**
141  *	struct vlan_priority_tci_mapping - vlan egress priority mappings
142  *	@priority: skb priority
143  *	@vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
144  *	@next: pointer to next struct
145  */
146 struct vlan_priority_tci_mapping {
147 	u32					priority;
148 	u16					vlan_qos;
149 	struct vlan_priority_tci_mapping	*next;
150 };
151 
152 struct proc_dir_entry;
153 struct netpoll;
154 
155 /**
156  *	struct vlan_dev_priv - VLAN private device data
157  *	@nr_ingress_mappings: number of ingress priority mappings
158  *	@ingress_priority_map: ingress priority mappings
159  *	@nr_egress_mappings: number of egress priority mappings
160  *	@egress_priority_map: hash of egress priority mappings
161  *	@vlan_proto: VLAN encapsulation protocol
162  *	@vlan_id: VLAN identifier
163  *	@flags: device flags
164  *	@real_dev: underlying netdevice
165  *	@real_dev_addr: address of underlying netdevice
166  *	@dent: proc dir entry
167  *	@vlan_pcpu_stats: ptr to percpu rx stats
168  */
169 struct vlan_dev_priv {
170 	unsigned int				nr_ingress_mappings;
171 	u32					ingress_priority_map[8];
172 	unsigned int				nr_egress_mappings;
173 	struct vlan_priority_tci_mapping	*egress_priority_map[16];
174 
175 	__be16					vlan_proto;
176 	u16					vlan_id;
177 	u16					flags;
178 
179 	struct net_device			*real_dev;
180 	unsigned char				real_dev_addr[ETH_ALEN];
181 
182 	struct proc_dir_entry			*dent;
183 	struct vlan_pcpu_stats __percpu		*vlan_pcpu_stats;
184 #ifdef CONFIG_NET_POLL_CONTROLLER
185 	struct netpoll				*netpoll;
186 #endif
187 };
188 
vlan_dev_priv(const struct net_device * dev)189 static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
190 {
191 	return netdev_priv(dev);
192 }
193 
194 static inline u16
vlan_dev_get_egress_qos_mask(struct net_device * dev,u32 skprio)195 vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
196 {
197 	struct vlan_priority_tci_mapping *mp;
198 
199 	smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
200 
201 	mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
202 	while (mp) {
203 		if (mp->priority == skprio) {
204 			return mp->vlan_qos; /* This should already be shifted
205 					      * to mask correctly with the
206 					      * VLAN's TCI */
207 		}
208 		mp = mp->next;
209 	}
210 	return 0;
211 }
212 
213 extern bool vlan_do_receive(struct sk_buff **skb);
214 
215 extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
216 extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
217 
218 extern int vlan_vids_add_by_dev(struct net_device *dev,
219 				const struct net_device *by_dev);
220 extern void vlan_vids_del_by_dev(struct net_device *dev,
221 				 const struct net_device *by_dev);
222 
223 extern bool vlan_uses_dev(const struct net_device *dev);
224 
225 #else
226 static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device * real_dev,__be16 vlan_proto,u16 vlan_id)227 __vlan_find_dev_deep_rcu(struct net_device *real_dev,
228 		     __be16 vlan_proto, u16 vlan_id)
229 {
230 	return NULL;
231 }
232 
233 static inline int
vlan_for_each(struct net_device * dev,int (* action)(struct net_device * dev,int vid,void * arg),void * arg)234 vlan_for_each(struct net_device *dev,
235 	      int (*action)(struct net_device *dev, int vid, void *arg),
236 	      void *arg)
237 {
238 	return 0;
239 }
240 
vlan_dev_real_dev(const struct net_device * dev)241 static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
242 {
243 	BUG();
244 	return NULL;
245 }
246 
vlan_dev_vlan_id(const struct net_device * dev)247 static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
248 {
249 	BUG();
250 	return 0;
251 }
252 
vlan_dev_vlan_proto(const struct net_device * dev)253 static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
254 {
255 	BUG();
256 	return 0;
257 }
258 
vlan_dev_get_egress_qos_mask(struct net_device * dev,u32 skprio)259 static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev,
260 					       u32 skprio)
261 {
262 	return 0;
263 }
264 
vlan_do_receive(struct sk_buff ** skb)265 static inline bool vlan_do_receive(struct sk_buff **skb)
266 {
267 	return false;
268 }
269 
vlan_vid_add(struct net_device * dev,__be16 proto,u16 vid)270 static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
271 {
272 	return 0;
273 }
274 
vlan_vid_del(struct net_device * dev,__be16 proto,u16 vid)275 static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
276 {
277 }
278 
vlan_vids_add_by_dev(struct net_device * dev,const struct net_device * by_dev)279 static inline int vlan_vids_add_by_dev(struct net_device *dev,
280 				       const struct net_device *by_dev)
281 {
282 	return 0;
283 }
284 
vlan_vids_del_by_dev(struct net_device * dev,const struct net_device * by_dev)285 static inline void vlan_vids_del_by_dev(struct net_device *dev,
286 					const struct net_device *by_dev)
287 {
288 }
289 
vlan_uses_dev(const struct net_device * dev)290 static inline bool vlan_uses_dev(const struct net_device *dev)
291 {
292 	return false;
293 }
294 #endif
295 
296 /**
297  * eth_type_vlan - check for valid vlan ether type.
298  * @ethertype: ether type to check
299  *
300  * Returns true if the ether type is a vlan ether type.
301  */
eth_type_vlan(__be16 ethertype)302 static inline bool eth_type_vlan(__be16 ethertype)
303 {
304 	switch (ethertype) {
305 	case htons(ETH_P_8021Q):
306 	case htons(ETH_P_8021AD):
307 		return true;
308 	default:
309 		return false;
310 	}
311 }
312 
vlan_hw_offload_capable(netdev_features_t features,__be16 proto)313 static inline bool vlan_hw_offload_capable(netdev_features_t features,
314 					   __be16 proto)
315 {
316 	if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX)
317 		return true;
318 	if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX)
319 		return true;
320 	return false;
321 }
322 
323 /**
324  * __vlan_insert_inner_tag - inner VLAN tag inserting
325  * @skb: skbuff to tag
326  * @vlan_proto: VLAN encapsulation protocol
327  * @vlan_tci: VLAN TCI to insert
328  * @mac_len: MAC header length including outer vlan headers
329  *
330  * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
331  * Returns error if skb_cow_head fails.
332  *
333  * Does not change skb->protocol so this function can be used during receive.
334  */
__vlan_insert_inner_tag(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci,unsigned int mac_len)335 static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
336 					  __be16 vlan_proto, u16 vlan_tci,
337 					  unsigned int mac_len)
338 {
339 	struct vlan_ethhdr *veth;
340 
341 	if (skb_cow_head(skb, VLAN_HLEN) < 0)
342 		return -ENOMEM;
343 
344 	skb_push(skb, VLAN_HLEN);
345 
346 	/* Move the mac header sans proto to the beginning of the new header. */
347 	if (likely(mac_len > ETH_TLEN))
348 		memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
349 	skb->mac_header -= VLAN_HLEN;
350 
351 	veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
352 
353 	/* first, the ethernet type */
354 	if (likely(mac_len >= ETH_TLEN)) {
355 		/* h_vlan_encapsulated_proto should already be populated, and
356 		 * skb->data has space for h_vlan_proto
357 		 */
358 		veth->h_vlan_proto = vlan_proto;
359 	} else {
360 		/* h_vlan_encapsulated_proto should not be populated, and
361 		 * skb->data has no space for h_vlan_proto
362 		 */
363 		veth->h_vlan_encapsulated_proto = skb->protocol;
364 	}
365 
366 	/* now, the TCI */
367 	veth->h_vlan_TCI = htons(vlan_tci);
368 
369 	return 0;
370 }
371 
372 /**
373  * __vlan_insert_tag - regular VLAN tag inserting
374  * @skb: skbuff to tag
375  * @vlan_proto: VLAN encapsulation protocol
376  * @vlan_tci: VLAN TCI to insert
377  *
378  * Inserts the VLAN tag into @skb as part of the payload
379  * Returns error if skb_cow_head fails.
380  *
381  * Does not change skb->protocol so this function can be used during receive.
382  */
__vlan_insert_tag(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci)383 static inline int __vlan_insert_tag(struct sk_buff *skb,
384 				    __be16 vlan_proto, u16 vlan_tci)
385 {
386 	return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
387 }
388 
389 /**
390  * vlan_insert_inner_tag - inner VLAN tag inserting
391  * @skb: skbuff to tag
392  * @vlan_proto: VLAN encapsulation protocol
393  * @vlan_tci: VLAN TCI to insert
394  * @mac_len: MAC header length including outer vlan headers
395  *
396  * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
397  * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
398  *
399  * Following the skb_unshare() example, in case of error, the calling function
400  * doesn't have to worry about freeing the original skb.
401  *
402  * Does not change skb->protocol so this function can be used during receive.
403  */
vlan_insert_inner_tag(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci,unsigned int mac_len)404 static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
405 						    __be16 vlan_proto,
406 						    u16 vlan_tci,
407 						    unsigned int mac_len)
408 {
409 	int err;
410 
411 	err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
412 	if (err) {
413 		dev_kfree_skb_any(skb);
414 		return NULL;
415 	}
416 	return skb;
417 }
418 
419 /**
420  * vlan_insert_tag - regular VLAN tag inserting
421  * @skb: skbuff to tag
422  * @vlan_proto: VLAN encapsulation protocol
423  * @vlan_tci: VLAN TCI to insert
424  *
425  * Inserts the VLAN tag into @skb as part of the payload
426  * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
427  *
428  * Following the skb_unshare() example, in case of error, the calling function
429  * doesn't have to worry about freeing the original skb.
430  *
431  * Does not change skb->protocol so this function can be used during receive.
432  */
vlan_insert_tag(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci)433 static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
434 					      __be16 vlan_proto, u16 vlan_tci)
435 {
436 	return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
437 }
438 
439 /**
440  * vlan_insert_tag_set_proto - regular VLAN tag inserting
441  * @skb: skbuff to tag
442  * @vlan_proto: VLAN encapsulation protocol
443  * @vlan_tci: VLAN TCI to insert
444  *
445  * Inserts the VLAN tag into @skb as part of the payload
446  * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
447  *
448  * Following the skb_unshare() example, in case of error, the calling function
449  * doesn't have to worry about freeing the original skb.
450  */
vlan_insert_tag_set_proto(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci)451 static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
452 							__be16 vlan_proto,
453 							u16 vlan_tci)
454 {
455 	skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
456 	if (skb)
457 		skb->protocol = vlan_proto;
458 	return skb;
459 }
460 
461 /**
462  * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info
463  * @skb: skbuff to clear
464  *
465  * Clears the VLAN information from @skb
466  */
__vlan_hwaccel_clear_tag(struct sk_buff * skb)467 static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
468 {
469 	skb->vlan_present = 0;
470 }
471 
472 /**
473  * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb
474  * @dst: skbuff to copy to
475  * @src: skbuff to copy from
476  *
477  * Copies VLAN information from @src to @dst (for branchless code)
478  */
__vlan_hwaccel_copy_tag(struct sk_buff * dst,const struct sk_buff * src)479 static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
480 {
481 	dst->vlan_present = src->vlan_present;
482 	dst->vlan_proto = src->vlan_proto;
483 	dst->vlan_tci = src->vlan_tci;
484 }
485 
486 /*
487  * __vlan_hwaccel_push_inside - pushes vlan tag to the payload
488  * @skb: skbuff to tag
489  *
490  * Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
491  *
492  * Following the skb_unshare() example, in case of error, the calling function
493  * doesn't have to worry about freeing the original skb.
494  */
__vlan_hwaccel_push_inside(struct sk_buff * skb)495 static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
496 {
497 	skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
498 					skb_vlan_tag_get(skb));
499 	if (likely(skb))
500 		__vlan_hwaccel_clear_tag(skb);
501 	return skb;
502 }
503 
504 /**
505  * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
506  * @skb: skbuff to tag
507  * @vlan_proto: VLAN encapsulation protocol
508  * @vlan_tci: VLAN TCI to insert
509  *
510  * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
511  */
__vlan_hwaccel_put_tag(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci)512 static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
513 					  __be16 vlan_proto, u16 vlan_tci)
514 {
515 	skb->vlan_proto = vlan_proto;
516 	skb->vlan_tci = vlan_tci;
517 	skb->vlan_present = 1;
518 }
519 
520 /**
521  * __vlan_get_tag - get the VLAN ID that is part of the payload
522  * @skb: skbuff to query
523  * @vlan_tci: buffer to store value
524  *
525  * Returns error if the skb is not of VLAN type
526  */
__vlan_get_tag(const struct sk_buff * skb,u16 * vlan_tci)527 static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
528 {
529 	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
530 
531 	if (!eth_type_vlan(veth->h_vlan_proto))
532 		return -EINVAL;
533 
534 	*vlan_tci = ntohs(veth->h_vlan_TCI);
535 	return 0;
536 }
537 
538 /**
539  * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
540  * @skb: skbuff to query
541  * @vlan_tci: buffer to store value
542  *
543  * Returns error if @skb->vlan_tci is not set correctly
544  */
__vlan_hwaccel_get_tag(const struct sk_buff * skb,u16 * vlan_tci)545 static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
546 					 u16 *vlan_tci)
547 {
548 	if (skb_vlan_tag_present(skb)) {
549 		*vlan_tci = skb_vlan_tag_get(skb);
550 		return 0;
551 	} else {
552 		*vlan_tci = 0;
553 		return -EINVAL;
554 	}
555 }
556 
557 /**
558  * vlan_get_tag - get the VLAN ID from the skb
559  * @skb: skbuff to query
560  * @vlan_tci: buffer to store value
561  *
562  * Returns error if the skb is not VLAN tagged
563  */
vlan_get_tag(const struct sk_buff * skb,u16 * vlan_tci)564 static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
565 {
566 	if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
567 		return __vlan_hwaccel_get_tag(skb, vlan_tci);
568 	} else {
569 		return __vlan_get_tag(skb, vlan_tci);
570 	}
571 }
572 
573 /**
574  * vlan_get_protocol - get protocol EtherType.
575  * @skb: skbuff to query
576  * @type: first vlan protocol
577  * @depth: buffer to store length of eth and vlan tags in bytes
578  *
579  * Returns the EtherType of the packet, regardless of whether it is
580  * vlan encapsulated (normal or hardware accelerated) or not.
581  */
__vlan_get_protocol(const struct sk_buff * skb,__be16 type,int * depth)582 static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
583 					 int *depth)
584 {
585 	unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
586 
587 	/* if type is 802.1Q/AD then the header should already be
588 	 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
589 	 * ETH_HLEN otherwise
590 	 */
591 	if (eth_type_vlan(type)) {
592 		if (vlan_depth) {
593 			if (WARN_ON(vlan_depth < VLAN_HLEN))
594 				return 0;
595 			vlan_depth -= VLAN_HLEN;
596 		} else {
597 			vlan_depth = ETH_HLEN;
598 		}
599 		do {
600 			struct vlan_hdr vhdr, *vh;
601 
602 			vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
603 			if (unlikely(!vh || !--parse_depth))
604 				return 0;
605 
606 			type = vh->h_vlan_encapsulated_proto;
607 			vlan_depth += VLAN_HLEN;
608 		} while (eth_type_vlan(type));
609 	}
610 
611 	if (depth)
612 		*depth = vlan_depth;
613 
614 	return type;
615 }
616 
617 /**
618  * vlan_get_protocol - get protocol EtherType.
619  * @skb: skbuff to query
620  *
621  * Returns the EtherType of the packet, regardless of whether it is
622  * vlan encapsulated (normal or hardware accelerated) or not.
623  */
vlan_get_protocol(const struct sk_buff * skb)624 static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
625 {
626 	return __vlan_get_protocol(skb, skb->protocol, NULL);
627 }
628 
629 /* A getter for the SKB protocol field which will handle VLAN tags consistently
630  * whether VLAN acceleration is enabled or not.
631  */
skb_protocol(const struct sk_buff * skb,bool skip_vlan)632 static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan)
633 {
634 	if (!skip_vlan)
635 		/* VLAN acceleration strips the VLAN header from the skb and
636 		 * moves it to skb->vlan_proto
637 		 */
638 		return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol;
639 
640 	return vlan_get_protocol(skb);
641 }
642 
vlan_set_encap_proto(struct sk_buff * skb,struct vlan_hdr * vhdr)643 static inline void vlan_set_encap_proto(struct sk_buff *skb,
644 					struct vlan_hdr *vhdr)
645 {
646 	__be16 proto;
647 	unsigned short *rawp;
648 
649 	/*
650 	 * Was a VLAN packet, grab the encapsulated protocol, which the layer
651 	 * three protocols care about.
652 	 */
653 
654 	proto = vhdr->h_vlan_encapsulated_proto;
655 	if (eth_proto_is_802_3(proto)) {
656 		skb->protocol = proto;
657 		return;
658 	}
659 
660 	rawp = (unsigned short *)(vhdr + 1);
661 	if (*rawp == 0xFFFF)
662 		/*
663 		 * This is a magic hack to spot IPX packets. Older Novell
664 		 * breaks the protocol design and runs IPX over 802.3 without
665 		 * an 802.2 LLC layer. We look for FFFF which isn't a used
666 		 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
667 		 * but does for the rest.
668 		 */
669 		skb->protocol = htons(ETH_P_802_3);
670 	else
671 		/*
672 		 * Real 802.2 LLC
673 		 */
674 		skb->protocol = htons(ETH_P_802_2);
675 }
676 
677 /**
678  * skb_vlan_tagged - check if skb is vlan tagged.
679  * @skb: skbuff to query
680  *
681  * Returns true if the skb is tagged, regardless of whether it is hardware
682  * accelerated or not.
683  */
skb_vlan_tagged(const struct sk_buff * skb)684 static inline bool skb_vlan_tagged(const struct sk_buff *skb)
685 {
686 	if (!skb_vlan_tag_present(skb) &&
687 	    likely(!eth_type_vlan(skb->protocol)))
688 		return false;
689 
690 	return true;
691 }
692 
693 /**
694  * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
695  * @skb: skbuff to query
696  *
697  * Returns true if the skb is tagged with multiple vlan headers, regardless
698  * of whether it is hardware accelerated or not.
699  */
skb_vlan_tagged_multi(struct sk_buff * skb)700 static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
701 {
702 	__be16 protocol = skb->protocol;
703 
704 	if (!skb_vlan_tag_present(skb)) {
705 		struct vlan_ethhdr *veh;
706 
707 		if (likely(!eth_type_vlan(protocol)))
708 			return false;
709 
710 		if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
711 			return false;
712 
713 		veh = (struct vlan_ethhdr *)skb->data;
714 		protocol = veh->h_vlan_encapsulated_proto;
715 	}
716 
717 	if (!eth_type_vlan(protocol))
718 		return false;
719 
720 	return true;
721 }
722 
723 /**
724  * vlan_features_check - drop unsafe features for skb with multiple tags.
725  * @skb: skbuff to query
726  * @features: features to be checked
727  *
728  * Returns features without unsafe ones if the skb has multiple tags.
729  */
vlan_features_check(struct sk_buff * skb,netdev_features_t features)730 static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
731 						    netdev_features_t features)
732 {
733 	if (skb_vlan_tagged_multi(skb)) {
734 		/* In the case of multi-tagged packets, use a direct mask
735 		 * instead of using netdev_interesect_features(), to make
736 		 * sure that only devices supporting NETIF_F_HW_CSUM will
737 		 * have checksum offloading support.
738 		 */
739 		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
740 			    NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
741 			    NETIF_F_HW_VLAN_STAG_TX;
742 	}
743 
744 	return features;
745 }
746 
747 /**
748  * compare_vlan_header - Compare two vlan headers
749  * @h1: Pointer to vlan header
750  * @h2: Pointer to vlan header
751  *
752  * Compare two vlan headers, returns 0 if equal.
753  *
754  * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
755  */
compare_vlan_header(const struct vlan_hdr * h1,const struct vlan_hdr * h2)756 static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
757 						const struct vlan_hdr *h2)
758 {
759 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
760 	return *(u32 *)h1 ^ *(u32 *)h2;
761 #else
762 	return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
763 	       ((__force u32)h1->h_vlan_encapsulated_proto ^
764 		(__force u32)h2->h_vlan_encapsulated_proto);
765 #endif
766 }
767 #endif /* !(_LINUX_IF_VLAN_H_) */
768