1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Definitions for the 'struct sk_buff' memory handlers.
4 *
5 * Authors:
6 * Alan Cox, <gw4pts@gw4pts.ampr.org>
7 * Florian La Roche, <rzsfl@rz.uni-sb.de>
8 */
9
10 #ifndef _LINUX_SKBUFF_H
11 #define _LINUX_SKBUFF_H
12
13 #include <linux/kernel.h>
14 #include <linux/compiler.h>
15 #include <linux/time.h>
16 #include <linux/bug.h>
17 #include <linux/bvec.h>
18 #include <linux/cache.h>
19 #include <linux/rbtree.h>
20 #include <linux/socket.h>
21 #include <linux/refcount.h>
22
23 #include <linux/atomic.h>
24 #include <asm/types.h>
25 #include <linux/spinlock.h>
26 #include <linux/net.h>
27 #include <linux/textsearch.h>
28 #include <net/checksum.h>
29 #include <linux/rcupdate.h>
30 #include <linux/hrtimer.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/netdev_features.h>
33 #include <linux/sched.h>
34 #include <linux/sched/clock.h>
35 #include <net/flow_dissector.h>
36 #include <linux/splice.h>
37 #include <linux/in6.h>
38 #include <linux/if_packet.h>
39 #include <net/flow.h>
40 #include <net/page_pool.h>
41 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
42 #include <linux/netfilter/nf_conntrack_common.h>
43 #endif
44
45 /* The interface for checksum offload between the stack and networking drivers
46 * is as follows...
47 *
48 * A. IP checksum related features
49 *
50 * Drivers advertise checksum offload capabilities in the features of a device.
51 * From the stack's point of view these are capabilities offered by the driver.
52 * A driver typically only advertises features that it is capable of offloading
53 * to its device.
54 *
55 * The checksum related features are:
56 *
57 * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one
58 * IP (one's complement) checksum for any combination
59 * of protocols or protocol layering. The checksum is
60 * computed and set in a packet per the CHECKSUM_PARTIAL
61 * interface (see below).
62 *
63 * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
64 * TCP or UDP packets over IPv4. These are specifically
65 * unencapsulated packets of the form IPv4|TCP or
66 * IPv4|UDP where the Protocol field in the IPv4 header
67 * is TCP or UDP. The IPv4 header may contain IP options.
68 * This feature cannot be set in features for a device
69 * with NETIF_F_HW_CSUM also set. This feature is being
70 * DEPRECATED (see below).
71 *
72 * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
73 * TCP or UDP packets over IPv6. These are specifically
74 * unencapsulated packets of the form IPv6|TCP or
75 * IPv6|UDP where the Next Header field in the IPv6
76 * header is either TCP or UDP. IPv6 extension headers
77 * are not supported with this feature. This feature
78 * cannot be set in features for a device with
79 * NETIF_F_HW_CSUM also set. This feature is being
80 * DEPRECATED (see below).
81 *
82 * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
83 * This flag is only used to disable the RX checksum
84 * feature for a device. The stack will accept receive
85 * checksum indication in packets received on a device
86 * regardless of whether NETIF_F_RXCSUM is set.
87 *
88 * B. Checksumming of received packets by device. Indication of checksum
89 * verification is set in skb->ip_summed. Possible values are:
90 *
91 * CHECKSUM_NONE:
92 *
93 * Device did not checksum this packet e.g. due to lack of capabilities.
94 * The packet contains full (though not verified) checksum in packet but
95 * not in skb->csum. Thus, skb->csum is undefined in this case.
96 *
97 * CHECKSUM_UNNECESSARY:
98 *
99 * The hardware you're dealing with doesn't calculate the full checksum
100 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
101 * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
102 * if their checksums are okay. skb->csum is still undefined in this case
103 * though. A driver or device must never modify the checksum field in the
104 * packet even if checksum is verified.
105 *
106 * CHECKSUM_UNNECESSARY is applicable to following protocols:
107 * TCP: IPv6 and IPv4.
108 * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
109 * zero UDP checksum for either IPv4 or IPv6, the networking stack
110 * may perform further validation in this case.
111 * GRE: only if the checksum is present in the header.
112 * SCTP: indicates the CRC in SCTP header has been validated.
113 * FCOE: indicates the CRC in FC frame has been validated.
114 *
115 * skb->csum_level indicates the number of consecutive checksums found in
116 * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
117 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
118 * and a device is able to verify the checksums for UDP (possibly zero),
119 * GRE (checksum flag is set) and TCP, skb->csum_level would be set to
120 * two. If the device were only able to verify the UDP checksum and not
121 * GRE, either because it doesn't support GRE checksum or because GRE
122 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
123 * not considered in this case).
124 *
125 * CHECKSUM_COMPLETE:
126 *
127 * This is the most generic way. The device supplied checksum of the _whole_
128 * packet as seen by netif_rx() and fills in skb->csum. This means the
129 * hardware doesn't need to parse L3/L4 headers to implement this.
130 *
131 * Notes:
132 * - Even if device supports only some protocols, but is able to produce
133 * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
134 * - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols.
135 *
136 * CHECKSUM_PARTIAL:
137 *
138 * A checksum is set up to be offloaded to a device as described in the
139 * output description for CHECKSUM_PARTIAL. This may occur on a packet
140 * received directly from another Linux OS, e.g., a virtualized Linux kernel
141 * on the same host, or it may be set in the input path in GRO or remote
142 * checksum offload. For the purposes of checksum verification, the checksum
143 * referred to by skb->csum_start + skb->csum_offset and any preceding
144 * checksums in the packet are considered verified. Any checksums in the
145 * packet that are after the checksum being offloaded are not considered to
146 * be verified.
147 *
148 * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
149 * in the skb->ip_summed for a packet. Values are:
150 *
151 * CHECKSUM_PARTIAL:
152 *
153 * The driver is required to checksum the packet as seen by hard_start_xmit()
154 * from skb->csum_start up to the end, and to record/write the checksum at
155 * offset skb->csum_start + skb->csum_offset. A driver may verify that the
156 * csum_start and csum_offset values are valid values given the length and
157 * offset of the packet, but it should not attempt to validate that the
158 * checksum refers to a legitimate transport layer checksum -- it is the
159 * purview of the stack to validate that csum_start and csum_offset are set
160 * correctly.
161 *
162 * When the stack requests checksum offload for a packet, the driver MUST
163 * ensure that the checksum is set correctly. A driver can either offload the
164 * checksum calculation to the device, or call skb_checksum_help (in the case
165 * that the device does not support offload for a particular checksum).
166 *
167 * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
168 * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
169 * checksum offload capability.
170 * skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based
171 * on network device checksumming capabilities: if a packet does not match
172 * them, skb_checksum_help or skb_crc32c_help (depending on the value of
173 * csum_not_inet, see item D.) is called to resolve the checksum.
174 *
175 * CHECKSUM_NONE:
176 *
177 * The skb was already checksummed by the protocol, or a checksum is not
178 * required.
179 *
180 * CHECKSUM_UNNECESSARY:
181 *
182 * This has the same meaning as CHECKSUM_NONE for checksum offload on
183 * output.
184 *
185 * CHECKSUM_COMPLETE:
186 * Not used in checksum output. If a driver observes a packet with this value
187 * set in skbuff, it should treat the packet as if CHECKSUM_NONE were set.
188 *
189 * D. Non-IP checksum (CRC) offloads
190 *
191 * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
192 * offloading the SCTP CRC in a packet. To perform this offload the stack
193 * will set csum_start and csum_offset accordingly, set ip_summed to
194 * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in
195 * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c.
196 * A driver that supports both IP checksum offload and SCTP CRC32c offload
197 * must verify which offload is configured for a packet by testing the
198 * value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve
199 * CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
200 *
201 * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
202 * offloading the FCOE CRC in a packet. To perform this offload the stack
203 * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
204 * accordingly. Note that there is no indication in the skbuff that the
205 * CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports
206 * both IP checksum offload and FCOE CRC offload must verify which offload
207 * is configured for a packet, presumably by inspecting packet headers.
208 *
209 * E. Checksumming on output with GSO.
210 *
211 * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
212 * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
213 * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
214 * part of the GSO operation is implied. If a checksum is being offloaded
215 * with GSO then ip_summed is CHECKSUM_PARTIAL, and both csum_start and
216 * csum_offset are set to refer to the outermost checksum being offloaded
217 * (two offloaded checksums are possible with UDP encapsulation).
218 */
219
220 /* Don't change this without changing skb_csum_unnecessary! */
221 #define CHECKSUM_NONE 0
222 #define CHECKSUM_UNNECESSARY 1
223 #define CHECKSUM_COMPLETE 2
224 #define CHECKSUM_PARTIAL 3
225
226 /* Maximum value in skb->csum_level */
227 #define SKB_MAX_CSUM_LEVEL 3
228
229 #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
230 #define SKB_WITH_OVERHEAD(X) \
231 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
232 #define SKB_MAX_ORDER(X, ORDER) \
233 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
234 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
235 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
236
237 /* return minimum truesize of one skb containing X bytes of data */
238 #define SKB_TRUESIZE(X) ((X) + \
239 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
240 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
241
242 struct ahash_request;
243 struct net_device;
244 struct scatterlist;
245 struct pipe_inode_info;
246 struct iov_iter;
247 struct napi_struct;
248 struct bpf_prog;
249 union bpf_attr;
250 struct skb_ext;
251
252 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
253 struct nf_bridge_info {
254 enum {
255 BRNF_PROTO_UNCHANGED,
256 BRNF_PROTO_8021Q,
257 BRNF_PROTO_PPPOE
258 } orig_proto:8;
259 u8 pkt_otherhost:1;
260 u8 in_prerouting:1;
261 u8 bridged_dnat:1;
262 __u16 frag_max_size;
263 struct net_device *physindev;
264
265 /* always valid & non-NULL from FORWARD on, for physdev match */
266 struct net_device *physoutdev;
267 union {
268 /* prerouting: detect dnat in orig/reply direction */
269 __be32 ipv4_daddr;
270 struct in6_addr ipv6_daddr;
271
272 /* after prerouting + nat detected: store original source
273 * mac since neigh resolution overwrites it, only used while
274 * skb is out in neigh layer.
275 */
276 char neigh_header[8];
277 };
278 };
279 #endif
280
281 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
282 /* Chain in tc_skb_ext will be used to share the tc chain with
283 * ovs recirc_id. It will be set to the current chain by tc
284 * and read by ovs to recirc_id.
285 */
286 struct tc_skb_ext {
287 __u32 chain;
288 __u16 mru;
289 bool post_ct;
290 };
291 #endif
292
293 struct sk_buff_head {
294 /* These two members must be first. */
295 struct sk_buff *next;
296 struct sk_buff *prev;
297
298 __u32 qlen;
299 spinlock_t lock;
300 };
301
302 struct sk_buff;
303
304 /* To allow 64K frame to be packed as single skb without frag_list we
305 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
306 * buffers which do not start on a page boundary.
307 *
308 * Since GRO uses frags we allocate at least 16 regardless of page
309 * size.
310 */
311 #if (65536/PAGE_SIZE + 1) < 16
312 #define MAX_SKB_FRAGS 16UL
313 #else
314 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
315 #endif
316 extern int sysctl_max_skb_frags;
317
318 /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
319 * segment using its current segmentation instead.
320 */
321 #define GSO_BY_FRAGS 0xFFFF
322
323 typedef struct bio_vec skb_frag_t;
324
325 /**
326 * skb_frag_size() - Returns the size of a skb fragment
327 * @frag: skb fragment
328 */
skb_frag_size(const skb_frag_t * frag)329 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
330 {
331 return frag->bv_len;
332 }
333
334 /**
335 * skb_frag_size_set() - Sets the size of a skb fragment
336 * @frag: skb fragment
337 * @size: size of fragment
338 */
skb_frag_size_set(skb_frag_t * frag,unsigned int size)339 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
340 {
341 frag->bv_len = size;
342 }
343
344 /**
345 * skb_frag_size_add() - Increments the size of a skb fragment by @delta
346 * @frag: skb fragment
347 * @delta: value to add
348 */
skb_frag_size_add(skb_frag_t * frag,int delta)349 static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
350 {
351 frag->bv_len += delta;
352 }
353
354 /**
355 * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta
356 * @frag: skb fragment
357 * @delta: value to subtract
358 */
skb_frag_size_sub(skb_frag_t * frag,int delta)359 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
360 {
361 frag->bv_len -= delta;
362 }
363
364 /**
365 * skb_frag_must_loop - Test if %p is a high memory page
366 * @p: fragment's page
367 */
skb_frag_must_loop(struct page * p)368 static inline bool skb_frag_must_loop(struct page *p)
369 {
370 #if defined(CONFIG_HIGHMEM)
371 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p))
372 return true;
373 #endif
374 return false;
375 }
376
377 /**
378 * skb_frag_foreach_page - loop over pages in a fragment
379 *
380 * @f: skb frag to operate on
381 * @f_off: offset from start of f->bv_page
382 * @f_len: length from f_off to loop over
383 * @p: (temp var) current page
384 * @p_off: (temp var) offset from start of current page,
385 * non-zero only on first page.
386 * @p_len: (temp var) length in current page,
387 * < PAGE_SIZE only on first and last page.
388 * @copied: (temp var) length so far, excluding current p_len.
389 *
390 * A fragment can hold a compound page, in which case per-page
391 * operations, notably kmap_atomic, must be called for each
392 * regular page.
393 */
394 #define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
395 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
396 p_off = (f_off) & (PAGE_SIZE - 1), \
397 p_len = skb_frag_must_loop(p) ? \
398 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
399 copied = 0; \
400 copied < f_len; \
401 copied += p_len, p++, p_off = 0, \
402 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
403
404 #define HAVE_HW_TIME_STAMP
405
406 /**
407 * struct skb_shared_hwtstamps - hardware time stamps
408 * @hwtstamp: hardware time stamp transformed into duration
409 * since arbitrary point in time
410 *
411 * Software time stamps generated by ktime_get_real() are stored in
412 * skb->tstamp.
413 *
414 * hwtstamps can only be compared against other hwtstamps from
415 * the same device.
416 *
417 * This structure is attached to packets as part of the
418 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
419 */
420 struct skb_shared_hwtstamps {
421 ktime_t hwtstamp;
422 };
423
424 /* Definitions for tx_flags in struct skb_shared_info */
425 enum {
426 /* generate hardware time stamp */
427 SKBTX_HW_TSTAMP = 1 << 0,
428
429 /* generate software time stamp when queueing packet to NIC */
430 SKBTX_SW_TSTAMP = 1 << 1,
431
432 /* device driver is going to provide hardware time stamp */
433 SKBTX_IN_PROGRESS = 1 << 2,
434
435 /* generate wifi status information (where possible) */
436 SKBTX_WIFI_STATUS = 1 << 4,
437
438 /* generate software time stamp when entering packet scheduling */
439 SKBTX_SCHED_TSTAMP = 1 << 6,
440 };
441
442 #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
443 SKBTX_SCHED_TSTAMP)
444 #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
445
446 /* Definitions for flags in struct skb_shared_info */
447 enum {
448 /* use zcopy routines */
449 SKBFL_ZEROCOPY_ENABLE = BIT(0),
450
451 /* This indicates at least one fragment might be overwritten
452 * (as in vmsplice(), sendfile() ...)
453 * If we need to compute a TX checksum, we'll need to copy
454 * all frags to avoid possible bad checksum
455 */
456 SKBFL_SHARED_FRAG = BIT(1),
457 };
458
459 #define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
460
461 /*
462 * The callback notifies userspace to release buffers when skb DMA is done in
463 * lower device, the skb last reference should be 0 when calling this.
464 * The zerocopy_success argument is true if zero copy transmit occurred,
465 * false on data copy or out of memory error caused by data copy attempt.
466 * The ctx field is used to track device context.
467 * The desc field is used to track userspace buffer index.
468 */
469 struct ubuf_info {
470 void (*callback)(struct sk_buff *, struct ubuf_info *,
471 bool zerocopy_success);
472 union {
473 struct {
474 unsigned long desc;
475 void *ctx;
476 };
477 struct {
478 u32 id;
479 u16 len;
480 u16 zerocopy:1;
481 u32 bytelen;
482 };
483 };
484 refcount_t refcnt;
485 u8 flags;
486
487 struct mmpin {
488 struct user_struct *user;
489 unsigned int num_pg;
490 } mmp;
491 };
492
493 #define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
494
495 int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
496 void mm_unaccount_pinned_pages(struct mmpin *mmp);
497
498 struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size);
499 struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
500 struct ubuf_info *uarg);
501
502 void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
503
504 void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
505 bool success);
506
507 int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
508 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
509 struct msghdr *msg, int len,
510 struct ubuf_info *uarg);
511
512 /* This data is invariant across clones and lives at
513 * the end of the header data, ie. at skb->end.
514 */
515 struct skb_shared_info {
516 __u8 flags;
517 __u8 meta_len;
518 __u8 nr_frags;
519 __u8 tx_flags;
520 unsigned short gso_size;
521 /* Warning: this field is not always filled in (UFO)! */
522 unsigned short gso_segs;
523 struct sk_buff *frag_list;
524 struct skb_shared_hwtstamps hwtstamps;
525 unsigned int gso_type;
526 u32 tskey;
527
528 /*
529 * Warning : all fields before dataref are cleared in __alloc_skb()
530 */
531 atomic_t dataref;
532
533 /* Intermediate layers must ensure that destructor_arg
534 * remains valid until skb destructor */
535 void * destructor_arg;
536
537 /* must be last field, see pskb_expand_head() */
538 skb_frag_t frags[MAX_SKB_FRAGS];
539 };
540
541 /* We divide dataref into two halves. The higher 16 bits hold references
542 * to the payload part of skb->data. The lower 16 bits hold references to
543 * the entire skb->data. A clone of a headerless skb holds the length of
544 * the header in skb->hdr_len.
545 *
546 * All users must obey the rule that the skb->data reference count must be
547 * greater than or equal to the payload reference count.
548 *
549 * Holding a reference to the payload part means that the user does not
550 * care about modifications to the header part of skb->data.
551 */
552 #define SKB_DATAREF_SHIFT 16
553 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
554
555
556 enum {
557 SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
558 SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
559 SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
560 };
561
562 enum {
563 SKB_GSO_TCPV4 = 1 << 0,
564
565 /* This indicates the skb is from an untrusted source. */
566 SKB_GSO_DODGY = 1 << 1,
567
568 /* This indicates the tcp segment has CWR set. */
569 SKB_GSO_TCP_ECN = 1 << 2,
570
571 SKB_GSO_TCP_FIXEDID = 1 << 3,
572
573 SKB_GSO_TCPV6 = 1 << 4,
574
575 SKB_GSO_FCOE = 1 << 5,
576
577 SKB_GSO_GRE = 1 << 6,
578
579 SKB_GSO_GRE_CSUM = 1 << 7,
580
581 SKB_GSO_IPXIP4 = 1 << 8,
582
583 SKB_GSO_IPXIP6 = 1 << 9,
584
585 SKB_GSO_UDP_TUNNEL = 1 << 10,
586
587 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
588
589 SKB_GSO_PARTIAL = 1 << 12,
590
591 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
592
593 SKB_GSO_SCTP = 1 << 14,
594
595 SKB_GSO_ESP = 1 << 15,
596
597 SKB_GSO_UDP = 1 << 16,
598
599 SKB_GSO_UDP_L4 = 1 << 17,
600
601 SKB_GSO_FRAGLIST = 1 << 18,
602 };
603
604 #if BITS_PER_LONG > 32
605 #define NET_SKBUFF_DATA_USES_OFFSET 1
606 #endif
607
608 #ifdef NET_SKBUFF_DATA_USES_OFFSET
609 typedef unsigned int sk_buff_data_t;
610 #else
611 typedef unsigned char *sk_buff_data_t;
612 #endif
613
614 /**
615 * struct sk_buff - socket buffer
616 * @next: Next buffer in list
617 * @prev: Previous buffer in list
618 * @tstamp: Time we arrived/left
619 * @skb_mstamp_ns: (aka @tstamp) earliest departure time; start point
620 * for retransmit timer
621 * @rbnode: RB tree node, alternative to next/prev for netem/tcp
622 * @list: queue head
623 * @sk: Socket we are owned by
624 * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
625 * fragmentation management
626 * @dev: Device we arrived on/are leaving by
627 * @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
628 * @cb: Control buffer. Free for use by every layer. Put private vars here
629 * @_skb_refdst: destination entry (with norefcount bit)
630 * @sp: the security path, used for xfrm
631 * @len: Length of actual data
632 * @data_len: Data length
633 * @mac_len: Length of link layer header
634 * @hdr_len: writable header length of cloned skb
635 * @csum: Checksum (must include start/offset pair)
636 * @csum_start: Offset from skb->head where checksumming should start
637 * @csum_offset: Offset from csum_start where checksum should be stored
638 * @priority: Packet queueing priority
639 * @ignore_df: allow local fragmentation
640 * @cloned: Head may be cloned (check refcnt to be sure)
641 * @ip_summed: Driver fed us an IP checksum
642 * @nohdr: Payload reference only, must not modify header
643 * @pkt_type: Packet class
644 * @fclone: skbuff clone status
645 * @ipvs_property: skbuff is owned by ipvs
646 * @inner_protocol_type: whether the inner protocol is
647 * ENCAP_TYPE_ETHER or ENCAP_TYPE_IPPROTO
648 * @remcsum_offload: remote checksum offload is enabled
649 * @offload_fwd_mark: Packet was L2-forwarded in hardware
650 * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
651 * @tc_skip_classify: do not classify packet. set by IFB device
652 * @tc_at_ingress: used within tc_classify to distinguish in/egress
653 * @redirected: packet was redirected by packet classifier
654 * @from_ingress: packet was redirected from the ingress path
655 * @peeked: this packet has been seen already, so stats have been
656 * done for it, don't do them again
657 * @nf_trace: netfilter packet trace flag
658 * @protocol: Packet protocol from driver
659 * @destructor: Destruct function
660 * @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
661 * @_sk_redir: socket redirection information for skmsg
662 * @_nfct: Associated connection, if any (with nfctinfo bits)
663 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
664 * @skb_iif: ifindex of device we arrived on
665 * @tc_index: Traffic control index
666 * @hash: the packet hash
667 * @queue_mapping: Queue mapping for multiqueue devices
668 * @head_frag: skb was allocated from page fragments,
669 * not allocated by kmalloc() or vmalloc().
670 * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
671 * @pp_recycle: mark the packet for recycling instead of freeing (implies
672 * page_pool support on driver)
673 * @active_extensions: active extensions (skb_ext_id types)
674 * @ndisc_nodetype: router type (from link layer)
675 * @ooo_okay: allow the mapping of a socket to a queue to be changed
676 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
677 * ports.
678 * @sw_hash: indicates hash was computed in software stack
679 * @wifi_acked_valid: wifi_acked was set
680 * @wifi_acked: whether frame was acked on wifi or not
681 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
682 * @encapsulation: indicates the inner headers in the skbuff are valid
683 * @encap_hdr_csum: software checksum is needed
684 * @csum_valid: checksum is already valid
685 * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
686 * @csum_complete_sw: checksum was completed by software
687 * @csum_level: indicates the number of consecutive checksums found in
688 * the packet minus one that have been verified as
689 * CHECKSUM_UNNECESSARY (max 3)
690 * @dst_pending_confirm: need to confirm neighbour
691 * @decrypted: Decrypted SKB
692 * @slow_gro: state present at GRO time, slower prepare step required
693 * @napi_id: id of the NAPI struct this skb came from
694 * @sender_cpu: (aka @napi_id) source CPU in XPS
695 * @secmark: security marking
696 * @mark: Generic packet mark
697 * @reserved_tailroom: (aka @mark) number of bytes of free space available
698 * at the tail of an sk_buff
699 * @vlan_present: VLAN tag is present
700 * @vlan_proto: vlan encapsulation protocol
701 * @vlan_tci: vlan tag control information
702 * @inner_protocol: Protocol (encapsulation)
703 * @inner_ipproto: (aka @inner_protocol) stores ipproto when
704 * skb->inner_protocol_type == ENCAP_TYPE_IPPROTO;
705 * @inner_transport_header: Inner transport layer header (encapsulation)
706 * @inner_network_header: Network layer header (encapsulation)
707 * @inner_mac_header: Link layer header (encapsulation)
708 * @transport_header: Transport layer header
709 * @network_header: Network layer header
710 * @mac_header: Link layer header
711 * @kcov_handle: KCOV remote handle for remote coverage collection
712 * @tail: Tail pointer
713 * @end: End pointer
714 * @head: Head of buffer
715 * @data: Data head pointer
716 * @truesize: Buffer size
717 * @users: User count - see {datagram,tcp}.c
718 * @extensions: allocated extensions, valid if active_extensions is nonzero
719 */
720
721 struct sk_buff {
722 union {
723 struct {
724 /* These two members must be first. */
725 struct sk_buff *next;
726 struct sk_buff *prev;
727
728 union {
729 struct net_device *dev;
730 /* Some protocols might use this space to store information,
731 * while device pointer would be NULL.
732 * UDP receive path is one user.
733 */
734 unsigned long dev_scratch;
735 };
736 };
737 struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
738 struct list_head list;
739 };
740
741 union {
742 struct sock *sk;
743 int ip_defrag_offset;
744 };
745
746 union {
747 ktime_t tstamp;
748 u64 skb_mstamp_ns; /* earliest departure time */
749 };
750 /*
751 * This is the control buffer. It is free to use for every
752 * layer. Please put your private variables there. If you
753 * want to keep them across layers you have to do a skb_clone()
754 * first. This is owned by whoever has the skb queued ATM.
755 */
756 char cb[48] __aligned(8);
757
758 union {
759 struct {
760 unsigned long _skb_refdst;
761 void (*destructor)(struct sk_buff *skb);
762 };
763 struct list_head tcp_tsorted_anchor;
764 #ifdef CONFIG_NET_SOCK_MSG
765 unsigned long _sk_redir;
766 #endif
767 };
768
769 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
770 unsigned long _nfct;
771 #endif
772 unsigned int len,
773 data_len;
774 __u16 mac_len,
775 hdr_len;
776
777 /* Following fields are _not_ copied in __copy_skb_header()
778 * Note that queue_mapping is here mostly to fill a hole.
779 */
780 __u16 queue_mapping;
781
782 /* if you move cloned around you also must adapt those constants */
783 #ifdef __BIG_ENDIAN_BITFIELD
784 #define CLONED_MASK (1 << 7)
785 #else
786 #define CLONED_MASK 1
787 #endif
788 #define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
789
790 /* private: */
791 __u8 __cloned_offset[0];
792 /* public: */
793 __u8 cloned:1,
794 nohdr:1,
795 fclone:2,
796 peeked:1,
797 head_frag:1,
798 pfmemalloc:1,
799 pp_recycle:1; /* page_pool recycle indicator */
800 #ifdef CONFIG_SKB_EXTENSIONS
801 __u8 active_extensions;
802 #endif
803
804 /* fields enclosed in headers_start/headers_end are copied
805 * using a single memcpy() in __copy_skb_header()
806 */
807 /* private: */
808 __u32 headers_start[0];
809 /* public: */
810
811 /* if you move pkt_type around you also must adapt those constants */
812 #ifdef __BIG_ENDIAN_BITFIELD
813 #define PKT_TYPE_MAX (7 << 5)
814 #else
815 #define PKT_TYPE_MAX 7
816 #endif
817 #define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
818
819 /* private: */
820 __u8 __pkt_type_offset[0];
821 /* public: */
822 __u8 pkt_type:3;
823 __u8 ignore_df:1;
824 __u8 nf_trace:1;
825 __u8 ip_summed:2;
826 __u8 ooo_okay:1;
827
828 __u8 l4_hash:1;
829 __u8 sw_hash:1;
830 __u8 wifi_acked_valid:1;
831 __u8 wifi_acked:1;
832 __u8 no_fcs:1;
833 /* Indicates the inner headers are valid in the skbuff. */
834 __u8 encapsulation:1;
835 __u8 encap_hdr_csum:1;
836 __u8 csum_valid:1;
837
838 #ifdef __BIG_ENDIAN_BITFIELD
839 #define PKT_VLAN_PRESENT_BIT 7
840 #else
841 #define PKT_VLAN_PRESENT_BIT 0
842 #endif
843 #define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
844 /* private: */
845 __u8 __pkt_vlan_present_offset[0];
846 /* public: */
847 __u8 vlan_present:1;
848 __u8 csum_complete_sw:1;
849 __u8 csum_level:2;
850 __u8 csum_not_inet:1;
851 __u8 dst_pending_confirm:1;
852 #ifdef CONFIG_IPV6_NDISC_NODETYPE
853 __u8 ndisc_nodetype:2;
854 #endif
855
856 __u8 ipvs_property:1;
857 __u8 inner_protocol_type:1;
858 __u8 remcsum_offload:1;
859 #ifdef CONFIG_NET_SWITCHDEV
860 __u8 offload_fwd_mark:1;
861 __u8 offload_l3_fwd_mark:1;
862 #endif
863 #ifdef CONFIG_NET_CLS_ACT
864 __u8 tc_skip_classify:1;
865 __u8 tc_at_ingress:1;
866 #endif
867 __u8 redirected:1;
868 #ifdef CONFIG_NET_REDIRECT
869 __u8 from_ingress:1;
870 #endif
871 #ifdef CONFIG_TLS_DEVICE
872 __u8 decrypted:1;
873 #endif
874 __u8 slow_gro:1;
875
876 #ifdef CONFIG_NET_SCHED
877 __u16 tc_index; /* traffic control index */
878 #endif
879
880 union {
881 __wsum csum;
882 struct {
883 __u16 csum_start;
884 __u16 csum_offset;
885 };
886 };
887 __u32 priority;
888 int skb_iif;
889 __u32 hash;
890 __be16 vlan_proto;
891 __u16 vlan_tci;
892 #if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
893 union {
894 unsigned int napi_id;
895 unsigned int sender_cpu;
896 };
897 #endif
898 #ifdef CONFIG_NETWORK_SECMARK
899 __u32 secmark;
900 #endif
901
902 union {
903 __u32 mark;
904 __u32 reserved_tailroom;
905 };
906
907 union {
908 __be16 inner_protocol;
909 __u8 inner_ipproto;
910 };
911
912 __u16 inner_transport_header;
913 __u16 inner_network_header;
914 __u16 inner_mac_header;
915
916 __be16 protocol;
917 __u16 transport_header;
918 __u16 network_header;
919 __u16 mac_header;
920
921 #ifdef CONFIG_KCOV
922 u64 kcov_handle;
923 #endif
924
925 /* private: */
926 __u32 headers_end[0];
927 /* public: */
928
929 /* These elements must be at the end, see alloc_skb() for details. */
930 sk_buff_data_t tail;
931 sk_buff_data_t end;
932 unsigned char *head,
933 *data;
934 unsigned int truesize;
935 refcount_t users;
936
937 #ifdef CONFIG_SKB_EXTENSIONS
938 /* only useable after checking ->active_extensions != 0 */
939 struct skb_ext *extensions;
940 #endif
941 };
942
943 #ifdef __KERNEL__
944 /*
945 * Handling routines are only of interest to the kernel
946 */
947
948 #define SKB_ALLOC_FCLONE 0x01
949 #define SKB_ALLOC_RX 0x02
950 #define SKB_ALLOC_NAPI 0x04
951
952 /**
953 * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves
954 * @skb: buffer
955 */
skb_pfmemalloc(const struct sk_buff * skb)956 static inline bool skb_pfmemalloc(const struct sk_buff *skb)
957 {
958 return unlikely(skb->pfmemalloc);
959 }
960
961 /*
962 * skb might have a dst pointer attached, refcounted or not.
963 * _skb_refdst low order bit is set if refcount was _not_ taken
964 */
965 #define SKB_DST_NOREF 1UL
966 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
967
968 /**
969 * skb_dst - returns skb dst_entry
970 * @skb: buffer
971 *
972 * Returns skb dst_entry, regardless of reference taken or not.
973 */
skb_dst(const struct sk_buff * skb)974 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
975 {
976 /* If refdst was not refcounted, check we still are in a
977 * rcu_read_lock section
978 */
979 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
980 !rcu_read_lock_held() &&
981 !rcu_read_lock_bh_held());
982 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
983 }
984
985 /**
986 * skb_dst_set - sets skb dst
987 * @skb: buffer
988 * @dst: dst entry
989 *
990 * Sets skb dst, assuming a reference was taken on dst and should
991 * be released by skb_dst_drop()
992 */
skb_dst_set(struct sk_buff * skb,struct dst_entry * dst)993 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
994 {
995 skb->slow_gro |= !!dst;
996 skb->_skb_refdst = (unsigned long)dst;
997 }
998
999 /**
1000 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
1001 * @skb: buffer
1002 * @dst: dst entry
1003 *
1004 * Sets skb dst, assuming a reference was not taken on dst.
1005 * If dst entry is cached, we do not take reference and dst_release
1006 * will be avoided by refdst_drop. If dst entry is not cached, we take
1007 * reference, so that last dst_release can destroy the dst immediately.
1008 */
skb_dst_set_noref(struct sk_buff * skb,struct dst_entry * dst)1009 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
1010 {
1011 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
1012 skb->slow_gro |= !!dst;
1013 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
1014 }
1015
1016 /**
1017 * skb_dst_is_noref - Test if skb dst isn't refcounted
1018 * @skb: buffer
1019 */
skb_dst_is_noref(const struct sk_buff * skb)1020 static inline bool skb_dst_is_noref(const struct sk_buff *skb)
1021 {
1022 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
1023 }
1024
1025 /**
1026 * skb_rtable - Returns the skb &rtable
1027 * @skb: buffer
1028 */
skb_rtable(const struct sk_buff * skb)1029 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
1030 {
1031 return (struct rtable *)skb_dst(skb);
1032 }
1033
1034 /* For mangling skb->pkt_type from user space side from applications
1035 * such as nft, tc, etc, we only allow a conservative subset of
1036 * possible pkt_types to be set.
1037 */
skb_pkt_type_ok(u32 ptype)1038 static inline bool skb_pkt_type_ok(u32 ptype)
1039 {
1040 return ptype <= PACKET_OTHERHOST;
1041 }
1042
1043 /**
1044 * skb_napi_id - Returns the skb's NAPI id
1045 * @skb: buffer
1046 */
skb_napi_id(const struct sk_buff * skb)1047 static inline unsigned int skb_napi_id(const struct sk_buff *skb)
1048 {
1049 #ifdef CONFIG_NET_RX_BUSY_POLL
1050 return skb->napi_id;
1051 #else
1052 return 0;
1053 #endif
1054 }
1055
1056 /**
1057 * skb_unref - decrement the skb's reference count
1058 * @skb: buffer
1059 *
1060 * Returns true if we can free the skb.
1061 */
skb_unref(struct sk_buff * skb)1062 static inline bool skb_unref(struct sk_buff *skb)
1063 {
1064 if (unlikely(!skb))
1065 return false;
1066 if (likely(refcount_read(&skb->users) == 1))
1067 smp_rmb();
1068 else if (likely(!refcount_dec_and_test(&skb->users)))
1069 return false;
1070
1071 return true;
1072 }
1073
1074 void skb_release_head_state(struct sk_buff *skb);
1075 void kfree_skb(struct sk_buff *skb);
1076 void kfree_skb_list(struct sk_buff *segs);
1077 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
1078 void skb_tx_error(struct sk_buff *skb);
1079
1080 #ifdef CONFIG_TRACEPOINTS
1081 void consume_skb(struct sk_buff *skb);
1082 #else
consume_skb(struct sk_buff * skb)1083 static inline void consume_skb(struct sk_buff *skb)
1084 {
1085 return kfree_skb(skb);
1086 }
1087 #endif
1088
1089 void __consume_stateless_skb(struct sk_buff *skb);
1090 void __kfree_skb(struct sk_buff *skb);
1091 extern struct kmem_cache *skbuff_head_cache;
1092
1093 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1094 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
1095 bool *fragstolen, int *delta_truesize);
1096
1097 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
1098 int node);
1099 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
1100 struct sk_buff *build_skb(void *data, unsigned int frag_size);
1101 struct sk_buff *build_skb_around(struct sk_buff *skb,
1102 void *data, unsigned int frag_size);
1103
1104 struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
1105
1106 /**
1107 * alloc_skb - allocate a network buffer
1108 * @size: size to allocate
1109 * @priority: allocation mask
1110 *
1111 * This function is a convenient wrapper around __alloc_skb().
1112 */
alloc_skb(unsigned int size,gfp_t priority)1113 static inline struct sk_buff *alloc_skb(unsigned int size,
1114 gfp_t priority)
1115 {
1116 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
1117 }
1118
1119 struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
1120 unsigned long data_len,
1121 int max_page_order,
1122 int *errcode,
1123 gfp_t gfp_mask);
1124 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
1125
1126 /* Layout of fast clones : [skb1][skb2][fclone_ref] */
1127 struct sk_buff_fclones {
1128 struct sk_buff skb1;
1129
1130 struct sk_buff skb2;
1131
1132 refcount_t fclone_ref;
1133 };
1134
1135 /**
1136 * skb_fclone_busy - check if fclone is busy
1137 * @sk: socket
1138 * @skb: buffer
1139 *
1140 * Returns true if skb is a fast clone, and its clone is not freed.
1141 * Some drivers call skb_orphan() in their ndo_start_xmit(),
1142 * so we also check that this didnt happen.
1143 */
skb_fclone_busy(const struct sock * sk,const struct sk_buff * skb)1144 static inline bool skb_fclone_busy(const struct sock *sk,
1145 const struct sk_buff *skb)
1146 {
1147 const struct sk_buff_fclones *fclones;
1148
1149 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1150
1151 return skb->fclone == SKB_FCLONE_ORIG &&
1152 refcount_read(&fclones->fclone_ref) > 1 &&
1153 READ_ONCE(fclones->skb2.sk) == sk;
1154 }
1155
1156 /**
1157 * alloc_skb_fclone - allocate a network buffer from fclone cache
1158 * @size: size to allocate
1159 * @priority: allocation mask
1160 *
1161 * This function is a convenient wrapper around __alloc_skb().
1162 */
alloc_skb_fclone(unsigned int size,gfp_t priority)1163 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1164 gfp_t priority)
1165 {
1166 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1167 }
1168
1169 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1170 void skb_headers_offset_update(struct sk_buff *skb, int off);
1171 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1172 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1173 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1174 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1175 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1176 gfp_t gfp_mask, bool fclone);
__pskb_copy(struct sk_buff * skb,int headroom,gfp_t gfp_mask)1177 static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1178 gfp_t gfp_mask)
1179 {
1180 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1181 }
1182
1183 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1184 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1185 unsigned int headroom);
1186 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom);
1187 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1188 int newtailroom, gfp_t priority);
1189 int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1190 int offset, int len);
1191 int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1192 int offset, int len);
1193 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1194 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1195
1196 /**
1197 * skb_pad - zero pad the tail of an skb
1198 * @skb: buffer to pad
1199 * @pad: space to pad
1200 *
1201 * Ensure that a buffer is followed by a padding area that is zero
1202 * filled. Used by network drivers which may DMA or transfer data
1203 * beyond the buffer end onto the wire.
1204 *
1205 * May return error in out of memory cases. The skb is freed on error.
1206 */
skb_pad(struct sk_buff * skb,int pad)1207 static inline int skb_pad(struct sk_buff *skb, int pad)
1208 {
1209 return __skb_pad(skb, pad, true);
1210 }
1211 #define dev_kfree_skb(a) consume_skb(a)
1212
1213 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1214 int offset, size_t size);
1215
1216 struct skb_seq_state {
1217 __u32 lower_offset;
1218 __u32 upper_offset;
1219 __u32 frag_idx;
1220 __u32 stepped_offset;
1221 struct sk_buff *root_skb;
1222 struct sk_buff *cur_skb;
1223 __u8 *frag_data;
1224 __u32 frag_off;
1225 };
1226
1227 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1228 unsigned int to, struct skb_seq_state *st);
1229 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1230 struct skb_seq_state *st);
1231 void skb_abort_seq_read(struct skb_seq_state *st);
1232
1233 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1234 unsigned int to, struct ts_config *config);
1235
1236 /*
1237 * Packet hash types specify the type of hash in skb_set_hash.
1238 *
1239 * Hash types refer to the protocol layer addresses which are used to
1240 * construct a packet's hash. The hashes are used to differentiate or identify
1241 * flows of the protocol layer for the hash type. Hash types are either
1242 * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
1243 *
1244 * Properties of hashes:
1245 *
1246 * 1) Two packets in different flows have different hash values
1247 * 2) Two packets in the same flow should have the same hash value
1248 *
1249 * A hash at a higher layer is considered to be more specific. A driver should
1250 * set the most specific hash possible.
1251 *
1252 * A driver cannot indicate a more specific hash than the layer at which a hash
1253 * was computed. For instance an L3 hash cannot be set as an L4 hash.
1254 *
1255 * A driver may indicate a hash level which is less specific than the
1256 * actual layer the hash was computed on. For instance, a hash computed
1257 * at L4 may be considered an L3 hash. This should only be done if the
1258 * driver can't unambiguously determine that the HW computed the hash at
1259 * the higher layer. Note that the "should" in the second property above
1260 * permits this.
1261 */
1262 enum pkt_hash_types {
1263 PKT_HASH_TYPE_NONE, /* Undefined type */
1264 PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
1265 PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
1266 PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
1267 };
1268
skb_clear_hash(struct sk_buff * skb)1269 static inline void skb_clear_hash(struct sk_buff *skb)
1270 {
1271 skb->hash = 0;
1272 skb->sw_hash = 0;
1273 skb->l4_hash = 0;
1274 }
1275
skb_clear_hash_if_not_l4(struct sk_buff * skb)1276 static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1277 {
1278 if (!skb->l4_hash)
1279 skb_clear_hash(skb);
1280 }
1281
1282 static inline void
__skb_set_hash(struct sk_buff * skb,__u32 hash,bool is_sw,bool is_l4)1283 __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1284 {
1285 skb->l4_hash = is_l4;
1286 skb->sw_hash = is_sw;
1287 skb->hash = hash;
1288 }
1289
1290 static inline void
skb_set_hash(struct sk_buff * skb,__u32 hash,enum pkt_hash_types type)1291 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1292 {
1293 /* Used by drivers to set hash from HW */
1294 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1295 }
1296
1297 static inline void
__skb_set_sw_hash(struct sk_buff * skb,__u32 hash,bool is_l4)1298 __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1299 {
1300 __skb_set_hash(skb, hash, true, is_l4);
1301 }
1302
1303 void __skb_get_hash(struct sk_buff *skb);
1304 u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1305 u32 skb_get_poff(const struct sk_buff *skb);
1306 u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
1307 const struct flow_keys_basic *keys, int hlen);
1308 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1309 const void *data, int hlen_proto);
1310
skb_flow_get_ports(const struct sk_buff * skb,int thoff,u8 ip_proto)1311 static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1312 int thoff, u8 ip_proto)
1313 {
1314 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1315 }
1316
1317 void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1318 const struct flow_dissector_key *key,
1319 unsigned int key_count);
1320
1321 struct bpf_flow_dissector;
1322 bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1323 __be16 proto, int nhoff, int hlen, unsigned int flags);
1324
1325 bool __skb_flow_dissect(const struct net *net,
1326 const struct sk_buff *skb,
1327 struct flow_dissector *flow_dissector,
1328 void *target_container, const void *data,
1329 __be16 proto, int nhoff, int hlen, unsigned int flags);
1330
skb_flow_dissect(const struct sk_buff * skb,struct flow_dissector * flow_dissector,void * target_container,unsigned int flags)1331 static inline bool skb_flow_dissect(const struct sk_buff *skb,
1332 struct flow_dissector *flow_dissector,
1333 void *target_container, unsigned int flags)
1334 {
1335 return __skb_flow_dissect(NULL, skb, flow_dissector,
1336 target_container, NULL, 0, 0, 0, flags);
1337 }
1338
skb_flow_dissect_flow_keys(const struct sk_buff * skb,struct flow_keys * flow,unsigned int flags)1339 static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1340 struct flow_keys *flow,
1341 unsigned int flags)
1342 {
1343 memset(flow, 0, sizeof(*flow));
1344 return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
1345 flow, NULL, 0, 0, 0, flags);
1346 }
1347
1348 static inline bool
skb_flow_dissect_flow_keys_basic(const struct net * net,const struct sk_buff * skb,struct flow_keys_basic * flow,const void * data,__be16 proto,int nhoff,int hlen,unsigned int flags)1349 skb_flow_dissect_flow_keys_basic(const struct net *net,
1350 const struct sk_buff *skb,
1351 struct flow_keys_basic *flow,
1352 const void *data, __be16 proto,
1353 int nhoff, int hlen, unsigned int flags)
1354 {
1355 memset(flow, 0, sizeof(*flow));
1356 return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
1357 data, proto, nhoff, hlen, flags);
1358 }
1359
1360 void skb_flow_dissect_meta(const struct sk_buff *skb,
1361 struct flow_dissector *flow_dissector,
1362 void *target_container);
1363
1364 /* Gets a skb connection tracking info, ctinfo map should be a
1365 * map of mapsize to translate enum ip_conntrack_info states
1366 * to user states.
1367 */
1368 void
1369 skb_flow_dissect_ct(const struct sk_buff *skb,
1370 struct flow_dissector *flow_dissector,
1371 void *target_container,
1372 u16 *ctinfo_map, size_t mapsize,
1373 bool post_ct);
1374 void
1375 skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1376 struct flow_dissector *flow_dissector,
1377 void *target_container);
1378
1379 void skb_flow_dissect_hash(const struct sk_buff *skb,
1380 struct flow_dissector *flow_dissector,
1381 void *target_container);
1382
skb_get_hash(struct sk_buff * skb)1383 static inline __u32 skb_get_hash(struct sk_buff *skb)
1384 {
1385 if (!skb->l4_hash && !skb->sw_hash)
1386 __skb_get_hash(skb);
1387
1388 return skb->hash;
1389 }
1390
skb_get_hash_flowi6(struct sk_buff * skb,const struct flowi6 * fl6)1391 static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1392 {
1393 if (!skb->l4_hash && !skb->sw_hash) {
1394 struct flow_keys keys;
1395 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1396
1397 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1398 }
1399
1400 return skb->hash;
1401 }
1402
1403 __u32 skb_get_hash_perturb(const struct sk_buff *skb,
1404 const siphash_key_t *perturb);
1405
skb_get_hash_raw(const struct sk_buff * skb)1406 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1407 {
1408 return skb->hash;
1409 }
1410
skb_copy_hash(struct sk_buff * to,const struct sk_buff * from)1411 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1412 {
1413 to->hash = from->hash;
1414 to->sw_hash = from->sw_hash;
1415 to->l4_hash = from->l4_hash;
1416 };
1417
skb_copy_decrypted(struct sk_buff * to,const struct sk_buff * from)1418 static inline void skb_copy_decrypted(struct sk_buff *to,
1419 const struct sk_buff *from)
1420 {
1421 #ifdef CONFIG_TLS_DEVICE
1422 to->decrypted = from->decrypted;
1423 #endif
1424 }
1425
1426 #ifdef NET_SKBUFF_DATA_USES_OFFSET
skb_end_pointer(const struct sk_buff * skb)1427 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1428 {
1429 return skb->head + skb->end;
1430 }
1431
skb_end_offset(const struct sk_buff * skb)1432 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1433 {
1434 return skb->end;
1435 }
1436 #else
skb_end_pointer(const struct sk_buff * skb)1437 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1438 {
1439 return skb->end;
1440 }
1441
skb_end_offset(const struct sk_buff * skb)1442 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1443 {
1444 return skb->end - skb->head;
1445 }
1446 #endif
1447
1448 /* Internal */
1449 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1450
skb_hwtstamps(struct sk_buff * skb)1451 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1452 {
1453 return &skb_shinfo(skb)->hwtstamps;
1454 }
1455
skb_zcopy(struct sk_buff * skb)1456 static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1457 {
1458 bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE;
1459
1460 return is_zcopy ? skb_uarg(skb) : NULL;
1461 }
1462
net_zcopy_get(struct ubuf_info * uarg)1463 static inline void net_zcopy_get(struct ubuf_info *uarg)
1464 {
1465 refcount_inc(&uarg->refcnt);
1466 }
1467
skb_zcopy_init(struct sk_buff * skb,struct ubuf_info * uarg)1468 static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg)
1469 {
1470 skb_shinfo(skb)->destructor_arg = uarg;
1471 skb_shinfo(skb)->flags |= uarg->flags;
1472 }
1473
skb_zcopy_set(struct sk_buff * skb,struct ubuf_info * uarg,bool * have_ref)1474 static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
1475 bool *have_ref)
1476 {
1477 if (skb && uarg && !skb_zcopy(skb)) {
1478 if (unlikely(have_ref && *have_ref))
1479 *have_ref = false;
1480 else
1481 net_zcopy_get(uarg);
1482 skb_zcopy_init(skb, uarg);
1483 }
1484 }
1485
skb_zcopy_set_nouarg(struct sk_buff * skb,void * val)1486 static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1487 {
1488 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1489 skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG;
1490 }
1491
skb_zcopy_is_nouarg(struct sk_buff * skb)1492 static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1493 {
1494 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1495 }
1496
skb_zcopy_get_nouarg(struct sk_buff * skb)1497 static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1498 {
1499 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1500 }
1501
net_zcopy_put(struct ubuf_info * uarg)1502 static inline void net_zcopy_put(struct ubuf_info *uarg)
1503 {
1504 if (uarg)
1505 uarg->callback(NULL, uarg, true);
1506 }
1507
net_zcopy_put_abort(struct ubuf_info * uarg,bool have_uref)1508 static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
1509 {
1510 if (uarg) {
1511 if (uarg->callback == msg_zerocopy_callback)
1512 msg_zerocopy_put_abort(uarg, have_uref);
1513 else if (have_uref)
1514 net_zcopy_put(uarg);
1515 }
1516 }
1517
1518 /* Release a reference on a zerocopy structure */
skb_zcopy_clear(struct sk_buff * skb,bool zerocopy_success)1519 static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
1520 {
1521 struct ubuf_info *uarg = skb_zcopy(skb);
1522
1523 if (uarg) {
1524 if (!skb_zcopy_is_nouarg(skb))
1525 uarg->callback(skb, uarg, zerocopy_success);
1526
1527 skb_shinfo(skb)->flags &= ~SKBFL_ZEROCOPY_FRAG;
1528 }
1529 }
1530
skb_mark_not_on_list(struct sk_buff * skb)1531 static inline void skb_mark_not_on_list(struct sk_buff *skb)
1532 {
1533 skb->next = NULL;
1534 }
1535
1536 /* Iterate through singly-linked GSO fragments of an skb. */
1537 #define skb_list_walk_safe(first, skb, next_skb) \
1538 for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
1539 (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
1540
skb_list_del_init(struct sk_buff * skb)1541 static inline void skb_list_del_init(struct sk_buff *skb)
1542 {
1543 __list_del_entry(&skb->list);
1544 skb_mark_not_on_list(skb);
1545 }
1546
1547 /**
1548 * skb_queue_empty - check if a queue is empty
1549 * @list: queue head
1550 *
1551 * Returns true if the queue is empty, false otherwise.
1552 */
skb_queue_empty(const struct sk_buff_head * list)1553 static inline int skb_queue_empty(const struct sk_buff_head *list)
1554 {
1555 return list->next == (const struct sk_buff *) list;
1556 }
1557
1558 /**
1559 * skb_queue_empty_lockless - check if a queue is empty
1560 * @list: queue head
1561 *
1562 * Returns true if the queue is empty, false otherwise.
1563 * This variant can be used in lockless contexts.
1564 */
skb_queue_empty_lockless(const struct sk_buff_head * list)1565 static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
1566 {
1567 return READ_ONCE(list->next) == (const struct sk_buff *) list;
1568 }
1569
1570
1571 /**
1572 * skb_queue_is_last - check if skb is the last entry in the queue
1573 * @list: queue head
1574 * @skb: buffer
1575 *
1576 * Returns true if @skb is the last buffer on the list.
1577 */
skb_queue_is_last(const struct sk_buff_head * list,const struct sk_buff * skb)1578 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1579 const struct sk_buff *skb)
1580 {
1581 return skb->next == (const struct sk_buff *) list;
1582 }
1583
1584 /**
1585 * skb_queue_is_first - check if skb is the first entry in the queue
1586 * @list: queue head
1587 * @skb: buffer
1588 *
1589 * Returns true if @skb is the first buffer on the list.
1590 */
skb_queue_is_first(const struct sk_buff_head * list,const struct sk_buff * skb)1591 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1592 const struct sk_buff *skb)
1593 {
1594 return skb->prev == (const struct sk_buff *) list;
1595 }
1596
1597 /**
1598 * skb_queue_next - return the next packet in the queue
1599 * @list: queue head
1600 * @skb: current buffer
1601 *
1602 * Return the next packet in @list after @skb. It is only valid to
1603 * call this if skb_queue_is_last() evaluates to false.
1604 */
skb_queue_next(const struct sk_buff_head * list,const struct sk_buff * skb)1605 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1606 const struct sk_buff *skb)
1607 {
1608 /* This BUG_ON may seem severe, but if we just return then we
1609 * are going to dereference garbage.
1610 */
1611 BUG_ON(skb_queue_is_last(list, skb));
1612 return skb->next;
1613 }
1614
1615 /**
1616 * skb_queue_prev - return the prev packet in the queue
1617 * @list: queue head
1618 * @skb: current buffer
1619 *
1620 * Return the prev packet in @list before @skb. It is only valid to
1621 * call this if skb_queue_is_first() evaluates to false.
1622 */
skb_queue_prev(const struct sk_buff_head * list,const struct sk_buff * skb)1623 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1624 const struct sk_buff *skb)
1625 {
1626 /* This BUG_ON may seem severe, but if we just return then we
1627 * are going to dereference garbage.
1628 */
1629 BUG_ON(skb_queue_is_first(list, skb));
1630 return skb->prev;
1631 }
1632
1633 /**
1634 * skb_get - reference buffer
1635 * @skb: buffer to reference
1636 *
1637 * Makes another reference to a socket buffer and returns a pointer
1638 * to the buffer.
1639 */
skb_get(struct sk_buff * skb)1640 static inline struct sk_buff *skb_get(struct sk_buff *skb)
1641 {
1642 refcount_inc(&skb->users);
1643 return skb;
1644 }
1645
1646 /*
1647 * If users == 1, we are the only owner and can avoid redundant atomic changes.
1648 */
1649
1650 /**
1651 * skb_cloned - is the buffer a clone
1652 * @skb: buffer to check
1653 *
1654 * Returns true if the buffer was generated with skb_clone() and is
1655 * one of multiple shared copies of the buffer. Cloned buffers are
1656 * shared data so must not be written to under normal circumstances.
1657 */
skb_cloned(const struct sk_buff * skb)1658 static inline int skb_cloned(const struct sk_buff *skb)
1659 {
1660 return skb->cloned &&
1661 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1662 }
1663
skb_unclone(struct sk_buff * skb,gfp_t pri)1664 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1665 {
1666 might_sleep_if(gfpflags_allow_blocking(pri));
1667
1668 if (skb_cloned(skb))
1669 return pskb_expand_head(skb, 0, 0, pri);
1670
1671 return 0;
1672 }
1673
1674 /**
1675 * skb_header_cloned - is the header a clone
1676 * @skb: buffer to check
1677 *
1678 * Returns true if modifying the header part of the buffer requires
1679 * the data to be copied.
1680 */
skb_header_cloned(const struct sk_buff * skb)1681 static inline int skb_header_cloned(const struct sk_buff *skb)
1682 {
1683 int dataref;
1684
1685 if (!skb->cloned)
1686 return 0;
1687
1688 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1689 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1690 return dataref != 1;
1691 }
1692
skb_header_unclone(struct sk_buff * skb,gfp_t pri)1693 static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1694 {
1695 might_sleep_if(gfpflags_allow_blocking(pri));
1696
1697 if (skb_header_cloned(skb))
1698 return pskb_expand_head(skb, 0, 0, pri);
1699
1700 return 0;
1701 }
1702
1703 /**
1704 * __skb_header_release - release reference to header
1705 * @skb: buffer to operate on
1706 */
__skb_header_release(struct sk_buff * skb)1707 static inline void __skb_header_release(struct sk_buff *skb)
1708 {
1709 skb->nohdr = 1;
1710 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1711 }
1712
1713
1714 /**
1715 * skb_shared - is the buffer shared
1716 * @skb: buffer to check
1717 *
1718 * Returns true if more than one person has a reference to this
1719 * buffer.
1720 */
skb_shared(const struct sk_buff * skb)1721 static inline int skb_shared(const struct sk_buff *skb)
1722 {
1723 return refcount_read(&skb->users) != 1;
1724 }
1725
1726 /**
1727 * skb_share_check - check if buffer is shared and if so clone it
1728 * @skb: buffer to check
1729 * @pri: priority for memory allocation
1730 *
1731 * If the buffer is shared the buffer is cloned and the old copy
1732 * drops a reference. A new clone with a single reference is returned.
1733 * If the buffer is not shared the original buffer is returned. When
1734 * being called from interrupt status or with spinlocks held pri must
1735 * be GFP_ATOMIC.
1736 *
1737 * NULL is returned on a memory allocation failure.
1738 */
skb_share_check(struct sk_buff * skb,gfp_t pri)1739 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1740 {
1741 might_sleep_if(gfpflags_allow_blocking(pri));
1742 if (skb_shared(skb)) {
1743 struct sk_buff *nskb = skb_clone(skb, pri);
1744
1745 if (likely(nskb))
1746 consume_skb(skb);
1747 else
1748 kfree_skb(skb);
1749 skb = nskb;
1750 }
1751 return skb;
1752 }
1753
1754 /*
1755 * Copy shared buffers into a new sk_buff. We effectively do COW on
1756 * packets to handle cases where we have a local reader and forward
1757 * and a couple of other messy ones. The normal one is tcpdumping
1758 * a packet thats being forwarded.
1759 */
1760
1761 /**
1762 * skb_unshare - make a copy of a shared buffer
1763 * @skb: buffer to check
1764 * @pri: priority for memory allocation
1765 *
1766 * If the socket buffer is a clone then this function creates a new
1767 * copy of the data, drops a reference count on the old copy and returns
1768 * the new copy with the reference count at 1. If the buffer is not a clone
1769 * the original buffer is returned. When called with a spinlock held or
1770 * from interrupt state @pri must be %GFP_ATOMIC
1771 *
1772 * %NULL is returned on a memory allocation failure.
1773 */
skb_unshare(struct sk_buff * skb,gfp_t pri)1774 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1775 gfp_t pri)
1776 {
1777 might_sleep_if(gfpflags_allow_blocking(pri));
1778 if (skb_cloned(skb)) {
1779 struct sk_buff *nskb = skb_copy(skb, pri);
1780
1781 /* Free our shared copy */
1782 if (likely(nskb))
1783 consume_skb(skb);
1784 else
1785 kfree_skb(skb);
1786 skb = nskb;
1787 }
1788 return skb;
1789 }
1790
1791 /**
1792 * skb_peek - peek at the head of an &sk_buff_head
1793 * @list_: list to peek at
1794 *
1795 * Peek an &sk_buff. Unlike most other operations you _MUST_
1796 * be careful with this one. A peek leaves the buffer on the
1797 * list and someone else may run off with it. You must hold
1798 * the appropriate locks or have a private queue to do this.
1799 *
1800 * Returns %NULL for an empty list or a pointer to the head element.
1801 * The reference count is not incremented and the reference is therefore
1802 * volatile. Use with caution.
1803 */
skb_peek(const struct sk_buff_head * list_)1804 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1805 {
1806 struct sk_buff *skb = list_->next;
1807
1808 if (skb == (struct sk_buff *)list_)
1809 skb = NULL;
1810 return skb;
1811 }
1812
1813 /**
1814 * __skb_peek - peek at the head of a non-empty &sk_buff_head
1815 * @list_: list to peek at
1816 *
1817 * Like skb_peek(), but the caller knows that the list is not empty.
1818 */
__skb_peek(const struct sk_buff_head * list_)1819 static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
1820 {
1821 return list_->next;
1822 }
1823
1824 /**
1825 * skb_peek_next - peek skb following the given one from a queue
1826 * @skb: skb to start from
1827 * @list_: list to peek at
1828 *
1829 * Returns %NULL when the end of the list is met or a pointer to the
1830 * next element. The reference count is not incremented and the
1831 * reference is therefore volatile. Use with caution.
1832 */
skb_peek_next(struct sk_buff * skb,const struct sk_buff_head * list_)1833 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1834 const struct sk_buff_head *list_)
1835 {
1836 struct sk_buff *next = skb->next;
1837
1838 if (next == (struct sk_buff *)list_)
1839 next = NULL;
1840 return next;
1841 }
1842
1843 /**
1844 * skb_peek_tail - peek at the tail of an &sk_buff_head
1845 * @list_: list to peek at
1846 *
1847 * Peek an &sk_buff. Unlike most other operations you _MUST_
1848 * be careful with this one. A peek leaves the buffer on the
1849 * list and someone else may run off with it. You must hold
1850 * the appropriate locks or have a private queue to do this.
1851 *
1852 * Returns %NULL for an empty list or a pointer to the tail element.
1853 * The reference count is not incremented and the reference is therefore
1854 * volatile. Use with caution.
1855 */
skb_peek_tail(const struct sk_buff_head * list_)1856 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1857 {
1858 struct sk_buff *skb = READ_ONCE(list_->prev);
1859
1860 if (skb == (struct sk_buff *)list_)
1861 skb = NULL;
1862 return skb;
1863
1864 }
1865
1866 /**
1867 * skb_queue_len - get queue length
1868 * @list_: list to measure
1869 *
1870 * Return the length of an &sk_buff queue.
1871 */
skb_queue_len(const struct sk_buff_head * list_)1872 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1873 {
1874 return list_->qlen;
1875 }
1876
1877 /**
1878 * skb_queue_len_lockless - get queue length
1879 * @list_: list to measure
1880 *
1881 * Return the length of an &sk_buff queue.
1882 * This variant can be used in lockless contexts.
1883 */
skb_queue_len_lockless(const struct sk_buff_head * list_)1884 static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
1885 {
1886 return READ_ONCE(list_->qlen);
1887 }
1888
1889 /**
1890 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
1891 * @list: queue to initialize
1892 *
1893 * This initializes only the list and queue length aspects of
1894 * an sk_buff_head object. This allows to initialize the list
1895 * aspects of an sk_buff_head without reinitializing things like
1896 * the spinlock. It can also be used for on-stack sk_buff_head
1897 * objects where the spinlock is known to not be used.
1898 */
__skb_queue_head_init(struct sk_buff_head * list)1899 static inline void __skb_queue_head_init(struct sk_buff_head *list)
1900 {
1901 list->prev = list->next = (struct sk_buff *)list;
1902 list->qlen = 0;
1903 }
1904
1905 /*
1906 * This function creates a split out lock class for each invocation;
1907 * this is needed for now since a whole lot of users of the skb-queue
1908 * infrastructure in drivers have different locking usage (in hardirq)
1909 * than the networking core (in softirq only). In the long run either the
1910 * network layer or drivers should need annotation to consolidate the
1911 * main types of usage into 3 classes.
1912 */
skb_queue_head_init(struct sk_buff_head * list)1913 static inline void skb_queue_head_init(struct sk_buff_head *list)
1914 {
1915 spin_lock_init(&list->lock);
1916 __skb_queue_head_init(list);
1917 }
1918
skb_queue_head_init_class(struct sk_buff_head * list,struct lock_class_key * class)1919 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1920 struct lock_class_key *class)
1921 {
1922 skb_queue_head_init(list);
1923 lockdep_set_class(&list->lock, class);
1924 }
1925
1926 /*
1927 * Insert an sk_buff on a list.
1928 *
1929 * The "__skb_xxxx()" functions are the non-atomic ones that
1930 * can only be called with interrupts disabled.
1931 */
__skb_insert(struct sk_buff * newsk,struct sk_buff * prev,struct sk_buff * next,struct sk_buff_head * list)1932 static inline void __skb_insert(struct sk_buff *newsk,
1933 struct sk_buff *prev, struct sk_buff *next,
1934 struct sk_buff_head *list)
1935 {
1936 /* See skb_queue_empty_lockless() and skb_peek_tail()
1937 * for the opposite READ_ONCE()
1938 */
1939 WRITE_ONCE(newsk->next, next);
1940 WRITE_ONCE(newsk->prev, prev);
1941 WRITE_ONCE(next->prev, newsk);
1942 WRITE_ONCE(prev->next, newsk);
1943 WRITE_ONCE(list->qlen, list->qlen + 1);
1944 }
1945
__skb_queue_splice(const struct sk_buff_head * list,struct sk_buff * prev,struct sk_buff * next)1946 static inline void __skb_queue_splice(const struct sk_buff_head *list,
1947 struct sk_buff *prev,
1948 struct sk_buff *next)
1949 {
1950 struct sk_buff *first = list->next;
1951 struct sk_buff *last = list->prev;
1952
1953 WRITE_ONCE(first->prev, prev);
1954 WRITE_ONCE(prev->next, first);
1955
1956 WRITE_ONCE(last->next, next);
1957 WRITE_ONCE(next->prev, last);
1958 }
1959
1960 /**
1961 * skb_queue_splice - join two skb lists, this is designed for stacks
1962 * @list: the new list to add
1963 * @head: the place to add it in the first list
1964 */
skb_queue_splice(const struct sk_buff_head * list,struct sk_buff_head * head)1965 static inline void skb_queue_splice(const struct sk_buff_head *list,
1966 struct sk_buff_head *head)
1967 {
1968 if (!skb_queue_empty(list)) {
1969 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1970 head->qlen += list->qlen;
1971 }
1972 }
1973
1974 /**
1975 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1976 * @list: the new list to add
1977 * @head: the place to add it in the first list
1978 *
1979 * The list at @list is reinitialised
1980 */
skb_queue_splice_init(struct sk_buff_head * list,struct sk_buff_head * head)1981 static inline void skb_queue_splice_init(struct sk_buff_head *list,
1982 struct sk_buff_head *head)
1983 {
1984 if (!skb_queue_empty(list)) {
1985 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1986 head->qlen += list->qlen;
1987 __skb_queue_head_init(list);
1988 }
1989 }
1990
1991 /**
1992 * skb_queue_splice_tail - join two skb lists, each list being a queue
1993 * @list: the new list to add
1994 * @head: the place to add it in the first list
1995 */
skb_queue_splice_tail(const struct sk_buff_head * list,struct sk_buff_head * head)1996 static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1997 struct sk_buff_head *head)
1998 {
1999 if (!skb_queue_empty(list)) {
2000 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
2001 head->qlen += list->qlen;
2002 }
2003 }
2004
2005 /**
2006 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
2007 * @list: the new list to add
2008 * @head: the place to add it in the first list
2009 *
2010 * Each of the lists is a queue.
2011 * The list at @list is reinitialised
2012 */
skb_queue_splice_tail_init(struct sk_buff_head * list,struct sk_buff_head * head)2013 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
2014 struct sk_buff_head *head)
2015 {
2016 if (!skb_queue_empty(list)) {
2017 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
2018 head->qlen += list->qlen;
2019 __skb_queue_head_init(list);
2020 }
2021 }
2022
2023 /**
2024 * __skb_queue_after - queue a buffer at the list head
2025 * @list: list to use
2026 * @prev: place after this buffer
2027 * @newsk: buffer to queue
2028 *
2029 * Queue a buffer int the middle of a list. This function takes no locks
2030 * and you must therefore hold required locks before calling it.
2031 *
2032 * A buffer cannot be placed on two lists at the same time.
2033 */
__skb_queue_after(struct sk_buff_head * list,struct sk_buff * prev,struct sk_buff * newsk)2034 static inline void __skb_queue_after(struct sk_buff_head *list,
2035 struct sk_buff *prev,
2036 struct sk_buff *newsk)
2037 {
2038 __skb_insert(newsk, prev, prev->next, list);
2039 }
2040
2041 void skb_append(struct sk_buff *old, struct sk_buff *newsk,
2042 struct sk_buff_head *list);
2043
__skb_queue_before(struct sk_buff_head * list,struct sk_buff * next,struct sk_buff * newsk)2044 static inline void __skb_queue_before(struct sk_buff_head *list,
2045 struct sk_buff *next,
2046 struct sk_buff *newsk)
2047 {
2048 __skb_insert(newsk, next->prev, next, list);
2049 }
2050
2051 /**
2052 * __skb_queue_head - queue a buffer at the list head
2053 * @list: list to use
2054 * @newsk: buffer to queue
2055 *
2056 * Queue a buffer at the start of a list. This function takes no locks
2057 * and you must therefore hold required locks before calling it.
2058 *
2059 * A buffer cannot be placed on two lists at the same time.
2060 */
__skb_queue_head(struct sk_buff_head * list,struct sk_buff * newsk)2061 static inline void __skb_queue_head(struct sk_buff_head *list,
2062 struct sk_buff *newsk)
2063 {
2064 __skb_queue_after(list, (struct sk_buff *)list, newsk);
2065 }
2066 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
2067
2068 /**
2069 * __skb_queue_tail - queue a buffer at the list tail
2070 * @list: list to use
2071 * @newsk: buffer to queue
2072 *
2073 * Queue a buffer at the end of a list. This function takes no locks
2074 * and you must therefore hold required locks before calling it.
2075 *
2076 * A buffer cannot be placed on two lists at the same time.
2077 */
__skb_queue_tail(struct sk_buff_head * list,struct sk_buff * newsk)2078 static inline void __skb_queue_tail(struct sk_buff_head *list,
2079 struct sk_buff *newsk)
2080 {
2081 __skb_queue_before(list, (struct sk_buff *)list, newsk);
2082 }
2083 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
2084
2085 /*
2086 * remove sk_buff from list. _Must_ be called atomically, and with
2087 * the list known..
2088 */
2089 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
__skb_unlink(struct sk_buff * skb,struct sk_buff_head * list)2090 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2091 {
2092 struct sk_buff *next, *prev;
2093
2094 WRITE_ONCE(list->qlen, list->qlen - 1);
2095 next = skb->next;
2096 prev = skb->prev;
2097 skb->next = skb->prev = NULL;
2098 WRITE_ONCE(next->prev, prev);
2099 WRITE_ONCE(prev->next, next);
2100 }
2101
2102 /**
2103 * __skb_dequeue - remove from the head of the queue
2104 * @list: list to dequeue from
2105 *
2106 * Remove the head of the list. This function does not take any locks
2107 * so must be used with appropriate locks held only. The head item is
2108 * returned or %NULL if the list is empty.
2109 */
__skb_dequeue(struct sk_buff_head * list)2110 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
2111 {
2112 struct sk_buff *skb = skb_peek(list);
2113 if (skb)
2114 __skb_unlink(skb, list);
2115 return skb;
2116 }
2117 struct sk_buff *skb_dequeue(struct sk_buff_head *list);
2118
2119 /**
2120 * __skb_dequeue_tail - remove from the tail of the queue
2121 * @list: list to dequeue from
2122 *
2123 * Remove the tail of the list. This function does not take any locks
2124 * so must be used with appropriate locks held only. The tail item is
2125 * returned or %NULL if the list is empty.
2126 */
__skb_dequeue_tail(struct sk_buff_head * list)2127 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
2128 {
2129 struct sk_buff *skb = skb_peek_tail(list);
2130 if (skb)
2131 __skb_unlink(skb, list);
2132 return skb;
2133 }
2134 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
2135
2136
skb_is_nonlinear(const struct sk_buff * skb)2137 static inline bool skb_is_nonlinear(const struct sk_buff *skb)
2138 {
2139 return skb->data_len;
2140 }
2141
skb_headlen(const struct sk_buff * skb)2142 static inline unsigned int skb_headlen(const struct sk_buff *skb)
2143 {
2144 return skb->len - skb->data_len;
2145 }
2146
__skb_pagelen(const struct sk_buff * skb)2147 static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
2148 {
2149 unsigned int i, len = 0;
2150
2151 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
2152 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2153 return len;
2154 }
2155
skb_pagelen(const struct sk_buff * skb)2156 static inline unsigned int skb_pagelen(const struct sk_buff *skb)
2157 {
2158 return skb_headlen(skb) + __skb_pagelen(skb);
2159 }
2160
2161 /**
2162 * __skb_fill_page_desc - initialise a paged fragment in an skb
2163 * @skb: buffer containing fragment to be initialised
2164 * @i: paged fragment index to initialise
2165 * @page: the page to use for this fragment
2166 * @off: the offset to the data with @page
2167 * @size: the length of the data
2168 *
2169 * Initialises the @i'th fragment of @skb to point to &size bytes at
2170 * offset @off within @page.
2171 *
2172 * Does not take any additional reference on the fragment.
2173 */
__skb_fill_page_desc(struct sk_buff * skb,int i,struct page * page,int off,int size)2174 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
2175 struct page *page, int off, int size)
2176 {
2177 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2178
2179 /*
2180 * Propagate page pfmemalloc to the skb if we can. The problem is
2181 * that not all callers have unique ownership of the page but rely
2182 * on page_is_pfmemalloc doing the right thing(tm).
2183 */
2184 frag->bv_page = page;
2185 frag->bv_offset = off;
2186 skb_frag_size_set(frag, size);
2187
2188 page = compound_head(page);
2189 if (page_is_pfmemalloc(page))
2190 skb->pfmemalloc = true;
2191 }
2192
2193 /**
2194 * skb_fill_page_desc - initialise a paged fragment in an skb
2195 * @skb: buffer containing fragment to be initialised
2196 * @i: paged fragment index to initialise
2197 * @page: the page to use for this fragment
2198 * @off: the offset to the data with @page
2199 * @size: the length of the data
2200 *
2201 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
2202 * @skb to point to @size bytes at offset @off within @page. In
2203 * addition updates @skb such that @i is the last fragment.
2204 *
2205 * Does not take any additional reference on the fragment.
2206 */
skb_fill_page_desc(struct sk_buff * skb,int i,struct page * page,int off,int size)2207 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
2208 struct page *page, int off, int size)
2209 {
2210 __skb_fill_page_desc(skb, i, page, off, size);
2211 skb_shinfo(skb)->nr_frags = i + 1;
2212 }
2213
2214 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
2215 int size, unsigned int truesize);
2216
2217 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
2218 unsigned int truesize);
2219
2220 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
2221
2222 #ifdef NET_SKBUFF_DATA_USES_OFFSET
skb_tail_pointer(const struct sk_buff * skb)2223 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2224 {
2225 return skb->head + skb->tail;
2226 }
2227
skb_reset_tail_pointer(struct sk_buff * skb)2228 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2229 {
2230 skb->tail = skb->data - skb->head;
2231 }
2232
skb_set_tail_pointer(struct sk_buff * skb,const int offset)2233 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2234 {
2235 skb_reset_tail_pointer(skb);
2236 skb->tail += offset;
2237 }
2238
2239 #else /* NET_SKBUFF_DATA_USES_OFFSET */
skb_tail_pointer(const struct sk_buff * skb)2240 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2241 {
2242 return skb->tail;
2243 }
2244
skb_reset_tail_pointer(struct sk_buff * skb)2245 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2246 {
2247 skb->tail = skb->data;
2248 }
2249
skb_set_tail_pointer(struct sk_buff * skb,const int offset)2250 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2251 {
2252 skb->tail = skb->data + offset;
2253 }
2254
2255 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
2256
2257 /*
2258 * Add data to an sk_buff
2259 */
2260 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2261 void *skb_put(struct sk_buff *skb, unsigned int len);
__skb_put(struct sk_buff * skb,unsigned int len)2262 static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2263 {
2264 void *tmp = skb_tail_pointer(skb);
2265 SKB_LINEAR_ASSERT(skb);
2266 skb->tail += len;
2267 skb->len += len;
2268 return tmp;
2269 }
2270
__skb_put_zero(struct sk_buff * skb,unsigned int len)2271 static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2272 {
2273 void *tmp = __skb_put(skb, len);
2274
2275 memset(tmp, 0, len);
2276 return tmp;
2277 }
2278
__skb_put_data(struct sk_buff * skb,const void * data,unsigned int len)2279 static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2280 unsigned int len)
2281 {
2282 void *tmp = __skb_put(skb, len);
2283
2284 memcpy(tmp, data, len);
2285 return tmp;
2286 }
2287
__skb_put_u8(struct sk_buff * skb,u8 val)2288 static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2289 {
2290 *(u8 *)__skb_put(skb, 1) = val;
2291 }
2292
skb_put_zero(struct sk_buff * skb,unsigned int len)2293 static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2294 {
2295 void *tmp = skb_put(skb, len);
2296
2297 memset(tmp, 0, len);
2298
2299 return tmp;
2300 }
2301
skb_put_data(struct sk_buff * skb,const void * data,unsigned int len)2302 static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2303 unsigned int len)
2304 {
2305 void *tmp = skb_put(skb, len);
2306
2307 memcpy(tmp, data, len);
2308
2309 return tmp;
2310 }
2311
skb_put_u8(struct sk_buff * skb,u8 val)2312 static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2313 {
2314 *(u8 *)skb_put(skb, 1) = val;
2315 }
2316
2317 void *skb_push(struct sk_buff *skb, unsigned int len);
__skb_push(struct sk_buff * skb,unsigned int len)2318 static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2319 {
2320 skb->data -= len;
2321 skb->len += len;
2322 return skb->data;
2323 }
2324
2325 void *skb_pull(struct sk_buff *skb, unsigned int len);
__skb_pull(struct sk_buff * skb,unsigned int len)2326 static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2327 {
2328 skb->len -= len;
2329 BUG_ON(skb->len < skb->data_len);
2330 return skb->data += len;
2331 }
2332
skb_pull_inline(struct sk_buff * skb,unsigned int len)2333 static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2334 {
2335 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2336 }
2337
2338 void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2339
__pskb_pull(struct sk_buff * skb,unsigned int len)2340 static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2341 {
2342 if (len > skb_headlen(skb) &&
2343 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2344 return NULL;
2345 skb->len -= len;
2346 return skb->data += len;
2347 }
2348
pskb_pull(struct sk_buff * skb,unsigned int len)2349 static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2350 {
2351 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2352 }
2353
pskb_may_pull(struct sk_buff * skb,unsigned int len)2354 static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
2355 {
2356 if (likely(len <= skb_headlen(skb)))
2357 return true;
2358 if (unlikely(len > skb->len))
2359 return false;
2360 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2361 }
2362
2363 void skb_condense(struct sk_buff *skb);
2364
2365 /**
2366 * skb_headroom - bytes at buffer head
2367 * @skb: buffer to check
2368 *
2369 * Return the number of bytes of free space at the head of an &sk_buff.
2370 */
skb_headroom(const struct sk_buff * skb)2371 static inline unsigned int skb_headroom(const struct sk_buff *skb)
2372 {
2373 return skb->data - skb->head;
2374 }
2375
2376 /**
2377 * skb_tailroom - bytes at buffer end
2378 * @skb: buffer to check
2379 *
2380 * Return the number of bytes of free space at the tail of an sk_buff
2381 */
skb_tailroom(const struct sk_buff * skb)2382 static inline int skb_tailroom(const struct sk_buff *skb)
2383 {
2384 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2385 }
2386
2387 /**
2388 * skb_availroom - bytes at buffer end
2389 * @skb: buffer to check
2390 *
2391 * Return the number of bytes of free space at the tail of an sk_buff
2392 * allocated by sk_stream_alloc()
2393 */
skb_availroom(const struct sk_buff * skb)2394 static inline int skb_availroom(const struct sk_buff *skb)
2395 {
2396 if (skb_is_nonlinear(skb))
2397 return 0;
2398
2399 return skb->end - skb->tail - skb->reserved_tailroom;
2400 }
2401
2402 /**
2403 * skb_reserve - adjust headroom
2404 * @skb: buffer to alter
2405 * @len: bytes to move
2406 *
2407 * Increase the headroom of an empty &sk_buff by reducing the tail
2408 * room. This is only allowed for an empty buffer.
2409 */
skb_reserve(struct sk_buff * skb,int len)2410 static inline void skb_reserve(struct sk_buff *skb, int len)
2411 {
2412 skb->data += len;
2413 skb->tail += len;
2414 }
2415
2416 /**
2417 * skb_tailroom_reserve - adjust reserved_tailroom
2418 * @skb: buffer to alter
2419 * @mtu: maximum amount of headlen permitted
2420 * @needed_tailroom: minimum amount of reserved_tailroom
2421 *
2422 * Set reserved_tailroom so that headlen can be as large as possible but
2423 * not larger than mtu and tailroom cannot be smaller than
2424 * needed_tailroom.
2425 * The required headroom should already have been reserved before using
2426 * this function.
2427 */
skb_tailroom_reserve(struct sk_buff * skb,unsigned int mtu,unsigned int needed_tailroom)2428 static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2429 unsigned int needed_tailroom)
2430 {
2431 SKB_LINEAR_ASSERT(skb);
2432 if (mtu < skb_tailroom(skb) - needed_tailroom)
2433 /* use at most mtu */
2434 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2435 else
2436 /* use up to all available space */
2437 skb->reserved_tailroom = needed_tailroom;
2438 }
2439
2440 #define ENCAP_TYPE_ETHER 0
2441 #define ENCAP_TYPE_IPPROTO 1
2442
skb_set_inner_protocol(struct sk_buff * skb,__be16 protocol)2443 static inline void skb_set_inner_protocol(struct sk_buff *skb,
2444 __be16 protocol)
2445 {
2446 skb->inner_protocol = protocol;
2447 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2448 }
2449
skb_set_inner_ipproto(struct sk_buff * skb,__u8 ipproto)2450 static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2451 __u8 ipproto)
2452 {
2453 skb->inner_ipproto = ipproto;
2454 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2455 }
2456
skb_reset_inner_headers(struct sk_buff * skb)2457 static inline void skb_reset_inner_headers(struct sk_buff *skb)
2458 {
2459 skb->inner_mac_header = skb->mac_header;
2460 skb->inner_network_header = skb->network_header;
2461 skb->inner_transport_header = skb->transport_header;
2462 }
2463
skb_reset_mac_len(struct sk_buff * skb)2464 static inline void skb_reset_mac_len(struct sk_buff *skb)
2465 {
2466 skb->mac_len = skb->network_header - skb->mac_header;
2467 }
2468
skb_inner_transport_header(const struct sk_buff * skb)2469 static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2470 *skb)
2471 {
2472 return skb->head + skb->inner_transport_header;
2473 }
2474
skb_inner_transport_offset(const struct sk_buff * skb)2475 static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2476 {
2477 return skb_inner_transport_header(skb) - skb->data;
2478 }
2479
skb_reset_inner_transport_header(struct sk_buff * skb)2480 static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2481 {
2482 skb->inner_transport_header = skb->data - skb->head;
2483 }
2484
skb_set_inner_transport_header(struct sk_buff * skb,const int offset)2485 static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2486 const int offset)
2487 {
2488 skb_reset_inner_transport_header(skb);
2489 skb->inner_transport_header += offset;
2490 }
2491
skb_inner_network_header(const struct sk_buff * skb)2492 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2493 {
2494 return skb->head + skb->inner_network_header;
2495 }
2496
skb_reset_inner_network_header(struct sk_buff * skb)2497 static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2498 {
2499 skb->inner_network_header = skb->data - skb->head;
2500 }
2501
skb_set_inner_network_header(struct sk_buff * skb,const int offset)2502 static inline void skb_set_inner_network_header(struct sk_buff *skb,
2503 const int offset)
2504 {
2505 skb_reset_inner_network_header(skb);
2506 skb->inner_network_header += offset;
2507 }
2508
skb_inner_mac_header(const struct sk_buff * skb)2509 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2510 {
2511 return skb->head + skb->inner_mac_header;
2512 }
2513
skb_reset_inner_mac_header(struct sk_buff * skb)2514 static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2515 {
2516 skb->inner_mac_header = skb->data - skb->head;
2517 }
2518
skb_set_inner_mac_header(struct sk_buff * skb,const int offset)2519 static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2520 const int offset)
2521 {
2522 skb_reset_inner_mac_header(skb);
2523 skb->inner_mac_header += offset;
2524 }
skb_transport_header_was_set(const struct sk_buff * skb)2525 static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2526 {
2527 return skb->transport_header != (typeof(skb->transport_header))~0U;
2528 }
2529
skb_transport_header(const struct sk_buff * skb)2530 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2531 {
2532 return skb->head + skb->transport_header;
2533 }
2534
skb_reset_transport_header(struct sk_buff * skb)2535 static inline void skb_reset_transport_header(struct sk_buff *skb)
2536 {
2537 skb->transport_header = skb->data - skb->head;
2538 }
2539
skb_set_transport_header(struct sk_buff * skb,const int offset)2540 static inline void skb_set_transport_header(struct sk_buff *skb,
2541 const int offset)
2542 {
2543 skb_reset_transport_header(skb);
2544 skb->transport_header += offset;
2545 }
2546
skb_network_header(const struct sk_buff * skb)2547 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2548 {
2549 return skb->head + skb->network_header;
2550 }
2551
skb_reset_network_header(struct sk_buff * skb)2552 static inline void skb_reset_network_header(struct sk_buff *skb)
2553 {
2554 skb->network_header = skb->data - skb->head;
2555 }
2556
skb_set_network_header(struct sk_buff * skb,const int offset)2557 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2558 {
2559 skb_reset_network_header(skb);
2560 skb->network_header += offset;
2561 }
2562
skb_mac_header(const struct sk_buff * skb)2563 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2564 {
2565 return skb->head + skb->mac_header;
2566 }
2567
skb_mac_offset(const struct sk_buff * skb)2568 static inline int skb_mac_offset(const struct sk_buff *skb)
2569 {
2570 return skb_mac_header(skb) - skb->data;
2571 }
2572
skb_mac_header_len(const struct sk_buff * skb)2573 static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2574 {
2575 return skb->network_header - skb->mac_header;
2576 }
2577
skb_mac_header_was_set(const struct sk_buff * skb)2578 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2579 {
2580 return skb->mac_header != (typeof(skb->mac_header))~0U;
2581 }
2582
skb_unset_mac_header(struct sk_buff * skb)2583 static inline void skb_unset_mac_header(struct sk_buff *skb)
2584 {
2585 skb->mac_header = (typeof(skb->mac_header))~0U;
2586 }
2587
skb_reset_mac_header(struct sk_buff * skb)2588 static inline void skb_reset_mac_header(struct sk_buff *skb)
2589 {
2590 skb->mac_header = skb->data - skb->head;
2591 }
2592
skb_set_mac_header(struct sk_buff * skb,const int offset)2593 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2594 {
2595 skb_reset_mac_header(skb);
2596 skb->mac_header += offset;
2597 }
2598
skb_pop_mac_header(struct sk_buff * skb)2599 static inline void skb_pop_mac_header(struct sk_buff *skb)
2600 {
2601 skb->mac_header = skb->network_header;
2602 }
2603
skb_probe_transport_header(struct sk_buff * skb)2604 static inline void skb_probe_transport_header(struct sk_buff *skb)
2605 {
2606 struct flow_keys_basic keys;
2607
2608 if (skb_transport_header_was_set(skb))
2609 return;
2610
2611 if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2612 NULL, 0, 0, 0, 0))
2613 skb_set_transport_header(skb, keys.control.thoff);
2614 }
2615
skb_mac_header_rebuild(struct sk_buff * skb)2616 static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2617 {
2618 if (skb_mac_header_was_set(skb)) {
2619 const unsigned char *old_mac = skb_mac_header(skb);
2620
2621 skb_set_mac_header(skb, -skb->mac_len);
2622 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2623 }
2624 }
2625
skb_checksum_start_offset(const struct sk_buff * skb)2626 static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2627 {
2628 return skb->csum_start - skb_headroom(skb);
2629 }
2630
skb_checksum_start(const struct sk_buff * skb)2631 static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2632 {
2633 return skb->head + skb->csum_start;
2634 }
2635
skb_transport_offset(const struct sk_buff * skb)2636 static inline int skb_transport_offset(const struct sk_buff *skb)
2637 {
2638 return skb_transport_header(skb) - skb->data;
2639 }
2640
skb_network_header_len(const struct sk_buff * skb)2641 static inline u32 skb_network_header_len(const struct sk_buff *skb)
2642 {
2643 return skb->transport_header - skb->network_header;
2644 }
2645
skb_inner_network_header_len(const struct sk_buff * skb)2646 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2647 {
2648 return skb->inner_transport_header - skb->inner_network_header;
2649 }
2650
skb_network_offset(const struct sk_buff * skb)2651 static inline int skb_network_offset(const struct sk_buff *skb)
2652 {
2653 return skb_network_header(skb) - skb->data;
2654 }
2655
skb_inner_network_offset(const struct sk_buff * skb)2656 static inline int skb_inner_network_offset(const struct sk_buff *skb)
2657 {
2658 return skb_inner_network_header(skb) - skb->data;
2659 }
2660
pskb_network_may_pull(struct sk_buff * skb,unsigned int len)2661 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2662 {
2663 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2664 }
2665
2666 /*
2667 * CPUs often take a performance hit when accessing unaligned memory
2668 * locations. The actual performance hit varies, it can be small if the
2669 * hardware handles it or large if we have to take an exception and fix it
2670 * in software.
2671 *
2672 * Since an ethernet header is 14 bytes network drivers often end up with
2673 * the IP header at an unaligned offset. The IP header can be aligned by
2674 * shifting the start of the packet by 2 bytes. Drivers should do this
2675 * with:
2676 *
2677 * skb_reserve(skb, NET_IP_ALIGN);
2678 *
2679 * The downside to this alignment of the IP header is that the DMA is now
2680 * unaligned. On some architectures the cost of an unaligned DMA is high
2681 * and this cost outweighs the gains made by aligning the IP header.
2682 *
2683 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
2684 * to be overridden.
2685 */
2686 #ifndef NET_IP_ALIGN
2687 #define NET_IP_ALIGN 2
2688 #endif
2689
2690 /*
2691 * The networking layer reserves some headroom in skb data (via
2692 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
2693 * the header has to grow. In the default case, if the header has to grow
2694 * 32 bytes or less we avoid the reallocation.
2695 *
2696 * Unfortunately this headroom changes the DMA alignment of the resulting
2697 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
2698 * on some architectures. An architecture can override this value,
2699 * perhaps setting it to a cacheline in size (since that will maintain
2700 * cacheline alignment of the DMA). It must be a power of 2.
2701 *
2702 * Various parts of the networking layer expect at least 32 bytes of
2703 * headroom, you should not reduce this.
2704 *
2705 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
2706 * to reduce average number of cache lines per packet.
2707 * get_rps_cpu() for example only access one 64 bytes aligned block :
2708 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
2709 */
2710 #ifndef NET_SKB_PAD
2711 #define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2712 #endif
2713
2714 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2715
__skb_set_length(struct sk_buff * skb,unsigned int len)2716 static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2717 {
2718 if (WARN_ON(skb_is_nonlinear(skb)))
2719 return;
2720 skb->len = len;
2721 skb_set_tail_pointer(skb, len);
2722 }
2723
__skb_trim(struct sk_buff * skb,unsigned int len)2724 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2725 {
2726 __skb_set_length(skb, len);
2727 }
2728
2729 void skb_trim(struct sk_buff *skb, unsigned int len);
2730
__pskb_trim(struct sk_buff * skb,unsigned int len)2731 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2732 {
2733 if (skb->data_len)
2734 return ___pskb_trim(skb, len);
2735 __skb_trim(skb, len);
2736 return 0;
2737 }
2738
pskb_trim(struct sk_buff * skb,unsigned int len)2739 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2740 {
2741 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2742 }
2743
2744 /**
2745 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
2746 * @skb: buffer to alter
2747 * @len: new length
2748 *
2749 * This is identical to pskb_trim except that the caller knows that
2750 * the skb is not cloned so we should never get an error due to out-
2751 * of-memory.
2752 */
pskb_trim_unique(struct sk_buff * skb,unsigned int len)2753 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2754 {
2755 int err = pskb_trim(skb, len);
2756 BUG_ON(err);
2757 }
2758
__skb_grow(struct sk_buff * skb,unsigned int len)2759 static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2760 {
2761 unsigned int diff = len - skb->len;
2762
2763 if (skb_tailroom(skb) < diff) {
2764 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2765 GFP_ATOMIC);
2766 if (ret)
2767 return ret;
2768 }
2769 __skb_set_length(skb, len);
2770 return 0;
2771 }
2772
2773 /**
2774 * skb_orphan - orphan a buffer
2775 * @skb: buffer to orphan
2776 *
2777 * If a buffer currently has an owner then we call the owner's
2778 * destructor function and make the @skb unowned. The buffer continues
2779 * to exist but is no longer charged to its former owner.
2780 */
skb_orphan(struct sk_buff * skb)2781 static inline void skb_orphan(struct sk_buff *skb)
2782 {
2783 if (skb->destructor) {
2784 skb->destructor(skb);
2785 skb->destructor = NULL;
2786 skb->sk = NULL;
2787 } else {
2788 BUG_ON(skb->sk);
2789 }
2790 }
2791
2792 /**
2793 * skb_orphan_frags - orphan the frags contained in a buffer
2794 * @skb: buffer to orphan frags from
2795 * @gfp_mask: allocation mask for replacement pages
2796 *
2797 * For each frag in the SKB which needs a destructor (i.e. has an
2798 * owner) create a copy of that frag and release the original
2799 * page by calling the destructor.
2800 */
skb_orphan_frags(struct sk_buff * skb,gfp_t gfp_mask)2801 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2802 {
2803 if (likely(!skb_zcopy(skb)))
2804 return 0;
2805 if (!skb_zcopy_is_nouarg(skb) &&
2806 skb_uarg(skb)->callback == msg_zerocopy_callback)
2807 return 0;
2808 return skb_copy_ubufs(skb, gfp_mask);
2809 }
2810
2811 /* Frags must be orphaned, even if refcounted, if skb might loop to rx path */
skb_orphan_frags_rx(struct sk_buff * skb,gfp_t gfp_mask)2812 static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
2813 {
2814 if (likely(!skb_zcopy(skb)))
2815 return 0;
2816 return skb_copy_ubufs(skb, gfp_mask);
2817 }
2818
2819 /**
2820 * __skb_queue_purge - empty a list
2821 * @list: list to empty
2822 *
2823 * Delete all buffers on an &sk_buff list. Each buffer is removed from
2824 * the list and one reference dropped. This function does not take the
2825 * list lock and the caller must hold the relevant locks to use it.
2826 */
__skb_queue_purge(struct sk_buff_head * list)2827 static inline void __skb_queue_purge(struct sk_buff_head *list)
2828 {
2829 struct sk_buff *skb;
2830 while ((skb = __skb_dequeue(list)) != NULL)
2831 kfree_skb(skb);
2832 }
2833 void skb_queue_purge(struct sk_buff_head *list);
2834
2835 unsigned int skb_rbtree_purge(struct rb_root *root);
2836
2837 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
2838
2839 /**
2840 * netdev_alloc_frag - allocate a page fragment
2841 * @fragsz: fragment size
2842 *
2843 * Allocates a frag from a page for receive buffer.
2844 * Uses GFP_ATOMIC allocations.
2845 */
netdev_alloc_frag(unsigned int fragsz)2846 static inline void *netdev_alloc_frag(unsigned int fragsz)
2847 {
2848 return __netdev_alloc_frag_align(fragsz, ~0u);
2849 }
2850
netdev_alloc_frag_align(unsigned int fragsz,unsigned int align)2851 static inline void *netdev_alloc_frag_align(unsigned int fragsz,
2852 unsigned int align)
2853 {
2854 WARN_ON_ONCE(!is_power_of_2(align));
2855 return __netdev_alloc_frag_align(fragsz, -align);
2856 }
2857
2858 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2859 gfp_t gfp_mask);
2860
2861 /**
2862 * netdev_alloc_skb - allocate an skbuff for rx on a specific device
2863 * @dev: network device to receive on
2864 * @length: length to allocate
2865 *
2866 * Allocate a new &sk_buff and assign it a usage count of one. The
2867 * buffer has unspecified headroom built in. Users should allocate
2868 * the headroom they think they need without accounting for the
2869 * built in space. The built in space is used for optimisations.
2870 *
2871 * %NULL is returned if there is no free memory. Although this function
2872 * allocates memory it can be called from an interrupt.
2873 */
netdev_alloc_skb(struct net_device * dev,unsigned int length)2874 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2875 unsigned int length)
2876 {
2877 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2878 }
2879
2880 /* legacy helper around __netdev_alloc_skb() */
__dev_alloc_skb(unsigned int length,gfp_t gfp_mask)2881 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2882 gfp_t gfp_mask)
2883 {
2884 return __netdev_alloc_skb(NULL, length, gfp_mask);
2885 }
2886
2887 /* legacy helper around netdev_alloc_skb() */
dev_alloc_skb(unsigned int length)2888 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2889 {
2890 return netdev_alloc_skb(NULL, length);
2891 }
2892
2893
__netdev_alloc_skb_ip_align(struct net_device * dev,unsigned int length,gfp_t gfp)2894 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2895 unsigned int length, gfp_t gfp)
2896 {
2897 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2898
2899 if (NET_IP_ALIGN && skb)
2900 skb_reserve(skb, NET_IP_ALIGN);
2901 return skb;
2902 }
2903
netdev_alloc_skb_ip_align(struct net_device * dev,unsigned int length)2904 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2905 unsigned int length)
2906 {
2907 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2908 }
2909
skb_free_frag(void * addr)2910 static inline void skb_free_frag(void *addr)
2911 {
2912 page_frag_free(addr);
2913 }
2914
2915 void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
2916
napi_alloc_frag(unsigned int fragsz)2917 static inline void *napi_alloc_frag(unsigned int fragsz)
2918 {
2919 return __napi_alloc_frag_align(fragsz, ~0u);
2920 }
2921
napi_alloc_frag_align(unsigned int fragsz,unsigned int align)2922 static inline void *napi_alloc_frag_align(unsigned int fragsz,
2923 unsigned int align)
2924 {
2925 WARN_ON_ONCE(!is_power_of_2(align));
2926 return __napi_alloc_frag_align(fragsz, -align);
2927 }
2928
2929 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2930 unsigned int length, gfp_t gfp_mask);
napi_alloc_skb(struct napi_struct * napi,unsigned int length)2931 static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2932 unsigned int length)
2933 {
2934 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2935 }
2936 void napi_consume_skb(struct sk_buff *skb, int budget);
2937
2938 void napi_skb_free_stolen_head(struct sk_buff *skb);
2939 void __kfree_skb_defer(struct sk_buff *skb);
2940
2941 /**
2942 * __dev_alloc_pages - allocate page for network Rx
2943 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2944 * @order: size of the allocation
2945 *
2946 * Allocate a new page.
2947 *
2948 * %NULL is returned if there is no free memory.
2949 */
__dev_alloc_pages(gfp_t gfp_mask,unsigned int order)2950 static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2951 unsigned int order)
2952 {
2953 /* This piece of code contains several assumptions.
2954 * 1. This is for device Rx, therefor a cold page is preferred.
2955 * 2. The expectation is the user wants a compound page.
2956 * 3. If requesting a order 0 page it will not be compound
2957 * due to the check to see if order has a value in prep_new_page
2958 * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
2959 * code in gfp_to_alloc_flags that should be enforcing this.
2960 */
2961 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
2962
2963 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2964 }
2965
dev_alloc_pages(unsigned int order)2966 static inline struct page *dev_alloc_pages(unsigned int order)
2967 {
2968 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2969 }
2970
2971 /**
2972 * __dev_alloc_page - allocate a page for network Rx
2973 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2974 *
2975 * Allocate a new page.
2976 *
2977 * %NULL is returned if there is no free memory.
2978 */
__dev_alloc_page(gfp_t gfp_mask)2979 static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2980 {
2981 return __dev_alloc_pages(gfp_mask, 0);
2982 }
2983
dev_alloc_page(void)2984 static inline struct page *dev_alloc_page(void)
2985 {
2986 return dev_alloc_pages(0);
2987 }
2988
2989 /**
2990 * dev_page_is_reusable - check whether a page can be reused for network Rx
2991 * @page: the page to test
2992 *
2993 * A page shouldn't be considered for reusing/recycling if it was allocated
2994 * under memory pressure or at a distant memory node.
2995 *
2996 * Returns false if this page should be returned to page allocator, true
2997 * otherwise.
2998 */
dev_page_is_reusable(const struct page * page)2999 static inline bool dev_page_is_reusable(const struct page *page)
3000 {
3001 return likely(page_to_nid(page) == numa_mem_id() &&
3002 !page_is_pfmemalloc(page));
3003 }
3004
3005 /**
3006 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
3007 * @page: The page that was allocated from skb_alloc_page
3008 * @skb: The skb that may need pfmemalloc set
3009 */
skb_propagate_pfmemalloc(const struct page * page,struct sk_buff * skb)3010 static inline void skb_propagate_pfmemalloc(const struct page *page,
3011 struct sk_buff *skb)
3012 {
3013 if (page_is_pfmemalloc(page))
3014 skb->pfmemalloc = true;
3015 }
3016
3017 /**
3018 * skb_frag_off() - Returns the offset of a skb fragment
3019 * @frag: the paged fragment
3020 */
skb_frag_off(const skb_frag_t * frag)3021 static inline unsigned int skb_frag_off(const skb_frag_t *frag)
3022 {
3023 return frag->bv_offset;
3024 }
3025
3026 /**
3027 * skb_frag_off_add() - Increments the offset of a skb fragment by @delta
3028 * @frag: skb fragment
3029 * @delta: value to add
3030 */
skb_frag_off_add(skb_frag_t * frag,int delta)3031 static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
3032 {
3033 frag->bv_offset += delta;
3034 }
3035
3036 /**
3037 * skb_frag_off_set() - Sets the offset of a skb fragment
3038 * @frag: skb fragment
3039 * @offset: offset of fragment
3040 */
skb_frag_off_set(skb_frag_t * frag,unsigned int offset)3041 static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
3042 {
3043 frag->bv_offset = offset;
3044 }
3045
3046 /**
3047 * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment
3048 * @fragto: skb fragment where offset is set
3049 * @fragfrom: skb fragment offset is copied from
3050 */
skb_frag_off_copy(skb_frag_t * fragto,const skb_frag_t * fragfrom)3051 static inline void skb_frag_off_copy(skb_frag_t *fragto,
3052 const skb_frag_t *fragfrom)
3053 {
3054 fragto->bv_offset = fragfrom->bv_offset;
3055 }
3056
3057 /**
3058 * skb_frag_page - retrieve the page referred to by a paged fragment
3059 * @frag: the paged fragment
3060 *
3061 * Returns the &struct page associated with @frag.
3062 */
skb_frag_page(const skb_frag_t * frag)3063 static inline struct page *skb_frag_page(const skb_frag_t *frag)
3064 {
3065 return frag->bv_page;
3066 }
3067
3068 /**
3069 * __skb_frag_ref - take an addition reference on a paged fragment.
3070 * @frag: the paged fragment
3071 *
3072 * Takes an additional reference on the paged fragment @frag.
3073 */
__skb_frag_ref(skb_frag_t * frag)3074 static inline void __skb_frag_ref(skb_frag_t *frag)
3075 {
3076 get_page(skb_frag_page(frag));
3077 }
3078
3079 /**
3080 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
3081 * @skb: the buffer
3082 * @f: the fragment offset.
3083 *
3084 * Takes an additional reference on the @f'th paged fragment of @skb.
3085 */
skb_frag_ref(struct sk_buff * skb,int f)3086 static inline void skb_frag_ref(struct sk_buff *skb, int f)
3087 {
3088 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
3089 }
3090
3091 /**
3092 * __skb_frag_unref - release a reference on a paged fragment.
3093 * @frag: the paged fragment
3094 * @recycle: recycle the page if allocated via page_pool
3095 *
3096 * Releases a reference on the paged fragment @frag
3097 * or recycles the page via the page_pool API.
3098 */
__skb_frag_unref(skb_frag_t * frag,bool recycle)3099 static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
3100 {
3101 struct page *page = skb_frag_page(frag);
3102
3103 #ifdef CONFIG_PAGE_POOL
3104 if (recycle && page_pool_return_skb_page(page))
3105 return;
3106 #endif
3107 put_page(page);
3108 }
3109
3110 /**
3111 * skb_frag_unref - release a reference on a paged fragment of an skb.
3112 * @skb: the buffer
3113 * @f: the fragment offset
3114 *
3115 * Releases a reference on the @f'th paged fragment of @skb.
3116 */
skb_frag_unref(struct sk_buff * skb,int f)3117 static inline void skb_frag_unref(struct sk_buff *skb, int f)
3118 {
3119 __skb_frag_unref(&skb_shinfo(skb)->frags[f], skb->pp_recycle);
3120 }
3121
3122 /**
3123 * skb_frag_address - gets the address of the data contained in a paged fragment
3124 * @frag: the paged fragment buffer
3125 *
3126 * Returns the address of the data within @frag. The page must already
3127 * be mapped.
3128 */
skb_frag_address(const skb_frag_t * frag)3129 static inline void *skb_frag_address(const skb_frag_t *frag)
3130 {
3131 return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
3132 }
3133
3134 /**
3135 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
3136 * @frag: the paged fragment buffer
3137 *
3138 * Returns the address of the data within @frag. Checks that the page
3139 * is mapped and returns %NULL otherwise.
3140 */
skb_frag_address_safe(const skb_frag_t * frag)3141 static inline void *skb_frag_address_safe(const skb_frag_t *frag)
3142 {
3143 void *ptr = page_address(skb_frag_page(frag));
3144 if (unlikely(!ptr))
3145 return NULL;
3146
3147 return ptr + skb_frag_off(frag);
3148 }
3149
3150 /**
3151 * skb_frag_page_copy() - sets the page in a fragment from another fragment
3152 * @fragto: skb fragment where page is set
3153 * @fragfrom: skb fragment page is copied from
3154 */
skb_frag_page_copy(skb_frag_t * fragto,const skb_frag_t * fragfrom)3155 static inline void skb_frag_page_copy(skb_frag_t *fragto,
3156 const skb_frag_t *fragfrom)
3157 {
3158 fragto->bv_page = fragfrom->bv_page;
3159 }
3160
3161 /**
3162 * __skb_frag_set_page - sets the page contained in a paged fragment
3163 * @frag: the paged fragment
3164 * @page: the page to set
3165 *
3166 * Sets the fragment @frag to contain @page.
3167 */
__skb_frag_set_page(skb_frag_t * frag,struct page * page)3168 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
3169 {
3170 frag->bv_page = page;
3171 }
3172
3173 /**
3174 * skb_frag_set_page - sets the page contained in a paged fragment of an skb
3175 * @skb: the buffer
3176 * @f: the fragment offset
3177 * @page: the page to set
3178 *
3179 * Sets the @f'th fragment of @skb to contain @page.
3180 */
skb_frag_set_page(struct sk_buff * skb,int f,struct page * page)3181 static inline void skb_frag_set_page(struct sk_buff *skb, int f,
3182 struct page *page)
3183 {
3184 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
3185 }
3186
3187 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
3188
3189 /**
3190 * skb_frag_dma_map - maps a paged fragment via the DMA API
3191 * @dev: the device to map the fragment to
3192 * @frag: the paged fragment to map
3193 * @offset: the offset within the fragment (starting at the
3194 * fragment's own offset)
3195 * @size: the number of bytes to map
3196 * @dir: the direction of the mapping (``PCI_DMA_*``)
3197 *
3198 * Maps the page associated with @frag to @device.
3199 */
skb_frag_dma_map(struct device * dev,const skb_frag_t * frag,size_t offset,size_t size,enum dma_data_direction dir)3200 static inline dma_addr_t skb_frag_dma_map(struct device *dev,
3201 const skb_frag_t *frag,
3202 size_t offset, size_t size,
3203 enum dma_data_direction dir)
3204 {
3205 return dma_map_page(dev, skb_frag_page(frag),
3206 skb_frag_off(frag) + offset, size, dir);
3207 }
3208
pskb_copy(struct sk_buff * skb,gfp_t gfp_mask)3209 static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
3210 gfp_t gfp_mask)
3211 {
3212 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
3213 }
3214
3215
pskb_copy_for_clone(struct sk_buff * skb,gfp_t gfp_mask)3216 static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
3217 gfp_t gfp_mask)
3218 {
3219 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
3220 }
3221
3222
3223 /**
3224 * skb_clone_writable - is the header of a clone writable
3225 * @skb: buffer to check
3226 * @len: length up to which to write
3227 *
3228 * Returns true if modifying the header part of the cloned buffer
3229 * does not requires the data to be copied.
3230 */
skb_clone_writable(const struct sk_buff * skb,unsigned int len)3231 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
3232 {
3233 return !skb_header_cloned(skb) &&
3234 skb_headroom(skb) + len <= skb->hdr_len;
3235 }
3236
skb_try_make_writable(struct sk_buff * skb,unsigned int write_len)3237 static inline int skb_try_make_writable(struct sk_buff *skb,
3238 unsigned int write_len)
3239 {
3240 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
3241 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3242 }
3243
__skb_cow(struct sk_buff * skb,unsigned int headroom,int cloned)3244 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
3245 int cloned)
3246 {
3247 int delta = 0;
3248
3249 if (headroom > skb_headroom(skb))
3250 delta = headroom - skb_headroom(skb);
3251
3252 if (delta || cloned)
3253 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
3254 GFP_ATOMIC);
3255 return 0;
3256 }
3257
3258 /**
3259 * skb_cow - copy header of skb when it is required
3260 * @skb: buffer to cow
3261 * @headroom: needed headroom
3262 *
3263 * If the skb passed lacks sufficient headroom or its data part
3264 * is shared, data is reallocated. If reallocation fails, an error
3265 * is returned and original skb is not changed.
3266 *
3267 * The result is skb with writable area skb->head...skb->tail
3268 * and at least @headroom of space at head.
3269 */
skb_cow(struct sk_buff * skb,unsigned int headroom)3270 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
3271 {
3272 return __skb_cow(skb, headroom, skb_cloned(skb));
3273 }
3274
3275 /**
3276 * skb_cow_head - skb_cow but only making the head writable
3277 * @skb: buffer to cow
3278 * @headroom: needed headroom
3279 *
3280 * This function is identical to skb_cow except that we replace the
3281 * skb_cloned check by skb_header_cloned. It should be used when
3282 * you only need to push on some header and do not need to modify
3283 * the data.
3284 */
skb_cow_head(struct sk_buff * skb,unsigned int headroom)3285 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
3286 {
3287 return __skb_cow(skb, headroom, skb_header_cloned(skb));
3288 }
3289
3290 /**
3291 * skb_padto - pad an skbuff up to a minimal size
3292 * @skb: buffer to pad
3293 * @len: minimal length
3294 *
3295 * Pads up a buffer to ensure the trailing bytes exist and are
3296 * blanked. If the buffer already contains sufficient data it
3297 * is untouched. Otherwise it is extended. Returns zero on
3298 * success. The skb is freed on error.
3299 */
skb_padto(struct sk_buff * skb,unsigned int len)3300 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
3301 {
3302 unsigned int size = skb->len;
3303 if (likely(size >= len))
3304 return 0;
3305 return skb_pad(skb, len - size);
3306 }
3307
3308 /**
3309 * __skb_put_padto - increase size and pad an skbuff up to a minimal size
3310 * @skb: buffer to pad
3311 * @len: minimal length
3312 * @free_on_error: free buffer on error
3313 *
3314 * Pads up a buffer to ensure the trailing bytes exist and are
3315 * blanked. If the buffer already contains sufficient data it
3316 * is untouched. Otherwise it is extended. Returns zero on
3317 * success. The skb is freed on error if @free_on_error is true.
3318 */
__skb_put_padto(struct sk_buff * skb,unsigned int len,bool free_on_error)3319 static inline int __must_check __skb_put_padto(struct sk_buff *skb,
3320 unsigned int len,
3321 bool free_on_error)
3322 {
3323 unsigned int size = skb->len;
3324
3325 if (unlikely(size < len)) {
3326 len -= size;
3327 if (__skb_pad(skb, len, free_on_error))
3328 return -ENOMEM;
3329 __skb_put(skb, len);
3330 }
3331 return 0;
3332 }
3333
3334 /**
3335 * skb_put_padto - increase size and pad an skbuff up to a minimal size
3336 * @skb: buffer to pad
3337 * @len: minimal length
3338 *
3339 * Pads up a buffer to ensure the trailing bytes exist and are
3340 * blanked. If the buffer already contains sufficient data it
3341 * is untouched. Otherwise it is extended. Returns zero on
3342 * success. The skb is freed on error.
3343 */
skb_put_padto(struct sk_buff * skb,unsigned int len)3344 static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len)
3345 {
3346 return __skb_put_padto(skb, len, true);
3347 }
3348
skb_add_data(struct sk_buff * skb,struct iov_iter * from,int copy)3349 static inline int skb_add_data(struct sk_buff *skb,
3350 struct iov_iter *from, int copy)
3351 {
3352 const int off = skb->len;
3353
3354 if (skb->ip_summed == CHECKSUM_NONE) {
3355 __wsum csum = 0;
3356 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
3357 &csum, from)) {
3358 skb->csum = csum_block_add(skb->csum, csum, off);
3359 return 0;
3360 }
3361 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
3362 return 0;
3363
3364 __skb_trim(skb, off);
3365 return -EFAULT;
3366 }
3367
skb_can_coalesce(struct sk_buff * skb,int i,const struct page * page,int off)3368 static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3369 const struct page *page, int off)
3370 {
3371 if (skb_zcopy(skb))
3372 return false;
3373 if (i) {
3374 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
3375
3376 return page == skb_frag_page(frag) &&
3377 off == skb_frag_off(frag) + skb_frag_size(frag);
3378 }
3379 return false;
3380 }
3381
__skb_linearize(struct sk_buff * skb)3382 static inline int __skb_linearize(struct sk_buff *skb)
3383 {
3384 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3385 }
3386
3387 /**
3388 * skb_linearize - convert paged skb to linear one
3389 * @skb: buffer to linarize
3390 *
3391 * If there is no free memory -ENOMEM is returned, otherwise zero
3392 * is returned and the old skb data released.
3393 */
skb_linearize(struct sk_buff * skb)3394 static inline int skb_linearize(struct sk_buff *skb)
3395 {
3396 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3397 }
3398
3399 /**
3400 * skb_has_shared_frag - can any frag be overwritten
3401 * @skb: buffer to test
3402 *
3403 * Return true if the skb has at least one frag that might be modified
3404 * by an external entity (as in vmsplice()/sendfile())
3405 */
skb_has_shared_frag(const struct sk_buff * skb)3406 static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3407 {
3408 return skb_is_nonlinear(skb) &&
3409 skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG;
3410 }
3411
3412 /**
3413 * skb_linearize_cow - make sure skb is linear and writable
3414 * @skb: buffer to process
3415 *
3416 * If there is no free memory -ENOMEM is returned, otherwise zero
3417 * is returned and the old skb data released.
3418 */
skb_linearize_cow(struct sk_buff * skb)3419 static inline int skb_linearize_cow(struct sk_buff *skb)
3420 {
3421 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3422 __skb_linearize(skb) : 0;
3423 }
3424
3425 static __always_inline void
__skb_postpull_rcsum(struct sk_buff * skb,const void * start,unsigned int len,unsigned int off)3426 __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3427 unsigned int off)
3428 {
3429 if (skb->ip_summed == CHECKSUM_COMPLETE)
3430 skb->csum = csum_block_sub(skb->csum,
3431 csum_partial(start, len, 0), off);
3432 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3433 skb_checksum_start_offset(skb) < 0)
3434 skb->ip_summed = CHECKSUM_NONE;
3435 }
3436
3437 /**
3438 * skb_postpull_rcsum - update checksum for received skb after pull
3439 * @skb: buffer to update
3440 * @start: start of data before pull
3441 * @len: length of data pulled
3442 *
3443 * After doing a pull on a received packet, you need to call this to
3444 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
3445 * CHECKSUM_NONE so that it can be recomputed from scratch.
3446 */
skb_postpull_rcsum(struct sk_buff * skb,const void * start,unsigned int len)3447 static inline void skb_postpull_rcsum(struct sk_buff *skb,
3448 const void *start, unsigned int len)
3449 {
3450 __skb_postpull_rcsum(skb, start, len, 0);
3451 }
3452
3453 static __always_inline void
__skb_postpush_rcsum(struct sk_buff * skb,const void * start,unsigned int len,unsigned int off)3454 __skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3455 unsigned int off)
3456 {
3457 if (skb->ip_summed == CHECKSUM_COMPLETE)
3458 skb->csum = csum_block_add(skb->csum,
3459 csum_partial(start, len, 0), off);
3460 }
3461
3462 /**
3463 * skb_postpush_rcsum - update checksum for received skb after push
3464 * @skb: buffer to update
3465 * @start: start of data after push
3466 * @len: length of data pushed
3467 *
3468 * After doing a push on a received packet, you need to call this to
3469 * update the CHECKSUM_COMPLETE checksum.
3470 */
skb_postpush_rcsum(struct sk_buff * skb,const void * start,unsigned int len)3471 static inline void skb_postpush_rcsum(struct sk_buff *skb,
3472 const void *start, unsigned int len)
3473 {
3474 __skb_postpush_rcsum(skb, start, len, 0);
3475 }
3476
3477 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3478
3479 /**
3480 * skb_push_rcsum - push skb and update receive checksum
3481 * @skb: buffer to update
3482 * @len: length of data pulled
3483 *
3484 * This function performs an skb_push on the packet and updates
3485 * the CHECKSUM_COMPLETE checksum. It should be used on
3486 * receive path processing instead of skb_push unless you know
3487 * that the checksum difference is zero (e.g., a valid IP header)
3488 * or you are setting ip_summed to CHECKSUM_NONE.
3489 */
skb_push_rcsum(struct sk_buff * skb,unsigned int len)3490 static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3491 {
3492 skb_push(skb, len);
3493 skb_postpush_rcsum(skb, skb->data, len);
3494 return skb->data;
3495 }
3496
3497 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3498 /**
3499 * pskb_trim_rcsum - trim received skb and update checksum
3500 * @skb: buffer to trim
3501 * @len: new length
3502 *
3503 * This is exactly the same as pskb_trim except that it ensures the
3504 * checksum of received packets are still valid after the operation.
3505 * It can change skb pointers.
3506 */
3507
pskb_trim_rcsum(struct sk_buff * skb,unsigned int len)3508 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3509 {
3510 if (likely(len >= skb->len))
3511 return 0;
3512 return pskb_trim_rcsum_slow(skb, len);
3513 }
3514
__skb_trim_rcsum(struct sk_buff * skb,unsigned int len)3515 static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3516 {
3517 if (skb->ip_summed == CHECKSUM_COMPLETE)
3518 skb->ip_summed = CHECKSUM_NONE;
3519 __skb_trim(skb, len);
3520 return 0;
3521 }
3522
__skb_grow_rcsum(struct sk_buff * skb,unsigned int len)3523 static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3524 {
3525 if (skb->ip_summed == CHECKSUM_COMPLETE)
3526 skb->ip_summed = CHECKSUM_NONE;
3527 return __skb_grow(skb, len);
3528 }
3529
3530 #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3531 #define skb_rb_first(root) rb_to_skb(rb_first(root))
3532 #define skb_rb_last(root) rb_to_skb(rb_last(root))
3533 #define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3534 #define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3535
3536 #define skb_queue_walk(queue, skb) \
3537 for (skb = (queue)->next; \
3538 skb != (struct sk_buff *)(queue); \
3539 skb = skb->next)
3540
3541 #define skb_queue_walk_safe(queue, skb, tmp) \
3542 for (skb = (queue)->next, tmp = skb->next; \
3543 skb != (struct sk_buff *)(queue); \
3544 skb = tmp, tmp = skb->next)
3545
3546 #define skb_queue_walk_from(queue, skb) \
3547 for (; skb != (struct sk_buff *)(queue); \
3548 skb = skb->next)
3549
3550 #define skb_rbtree_walk(skb, root) \
3551 for (skb = skb_rb_first(root); skb != NULL; \
3552 skb = skb_rb_next(skb))
3553
3554 #define skb_rbtree_walk_from(skb) \
3555 for (; skb != NULL; \
3556 skb = skb_rb_next(skb))
3557
3558 #define skb_rbtree_walk_from_safe(skb, tmp) \
3559 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3560 skb = tmp)
3561
3562 #define skb_queue_walk_from_safe(queue, skb, tmp) \
3563 for (tmp = skb->next; \
3564 skb != (struct sk_buff *)(queue); \
3565 skb = tmp, tmp = skb->next)
3566
3567 #define skb_queue_reverse_walk(queue, skb) \
3568 for (skb = (queue)->prev; \
3569 skb != (struct sk_buff *)(queue); \
3570 skb = skb->prev)
3571
3572 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3573 for (skb = (queue)->prev, tmp = skb->prev; \
3574 skb != (struct sk_buff *)(queue); \
3575 skb = tmp, tmp = skb->prev)
3576
3577 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3578 for (tmp = skb->prev; \
3579 skb != (struct sk_buff *)(queue); \
3580 skb = tmp, tmp = skb->prev)
3581
skb_has_frag_list(const struct sk_buff * skb)3582 static inline bool skb_has_frag_list(const struct sk_buff *skb)
3583 {
3584 return skb_shinfo(skb)->frag_list != NULL;
3585 }
3586
skb_frag_list_init(struct sk_buff * skb)3587 static inline void skb_frag_list_init(struct sk_buff *skb)
3588 {
3589 skb_shinfo(skb)->frag_list = NULL;
3590 }
3591
3592 #define skb_walk_frags(skb, iter) \
3593 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3594
3595
3596 int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
3597 int *err, long *timeo_p,
3598 const struct sk_buff *skb);
3599 struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3600 struct sk_buff_head *queue,
3601 unsigned int flags,
3602 int *off, int *err,
3603 struct sk_buff **last);
3604 struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
3605 struct sk_buff_head *queue,
3606 unsigned int flags, int *off, int *err,
3607 struct sk_buff **last);
3608 struct sk_buff *__skb_recv_datagram(struct sock *sk,
3609 struct sk_buff_head *sk_queue,
3610 unsigned int flags, int *off, int *err);
3611 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3612 int *err);
3613 __poll_t datagram_poll(struct file *file, struct socket *sock,
3614 struct poll_table_struct *wait);
3615 int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3616 struct iov_iter *to, int size);
skb_copy_datagram_msg(const struct sk_buff * from,int offset,struct msghdr * msg,int size)3617 static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3618 struct msghdr *msg, int size)
3619 {
3620 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3621 }
3622 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3623 struct msghdr *msg);
3624 int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
3625 struct iov_iter *to, int len,
3626 struct ahash_request *hash);
3627 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3628 struct iov_iter *from, int len);
3629 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3630 void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3631 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
skb_free_datagram_locked(struct sock * sk,struct sk_buff * skb)3632 static inline void skb_free_datagram_locked(struct sock *sk,
3633 struct sk_buff *skb)
3634 {
3635 __skb_free_datagram_locked(sk, skb, 0);
3636 }
3637 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3638 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3639 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3640 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3641 int len);
3642 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3643 struct pipe_inode_info *pipe, unsigned int len,
3644 unsigned int flags);
3645 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3646 int len);
3647 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
3648 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3649 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3650 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3651 int len, int hlen);
3652 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3653 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3654 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3655 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3656 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3657 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3658 struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
3659 unsigned int offset);
3660 struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3661 int skb_ensure_writable(struct sk_buff *skb, int write_len);
3662 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3663 int skb_vlan_pop(struct sk_buff *skb);
3664 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3665 int skb_eth_pop(struct sk_buff *skb);
3666 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
3667 const unsigned char *src);
3668 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
3669 int mac_len, bool ethernet);
3670 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
3671 bool ethernet);
3672 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
3673 int skb_mpls_dec_ttl(struct sk_buff *skb);
3674 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3675 gfp_t gfp);
3676
memcpy_from_msg(void * data,struct msghdr * msg,int len)3677 static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3678 {
3679 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3680 }
3681
memcpy_to_msg(struct msghdr * msg,void * data,int len)3682 static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3683 {
3684 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3685 }
3686
3687 struct skb_checksum_ops {
3688 __wsum (*update)(const void *mem, int len, __wsum wsum);
3689 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3690 };
3691
3692 extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3693
3694 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3695 __wsum csum, const struct skb_checksum_ops *ops);
3696 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3697 __wsum csum);
3698
3699 static inline void * __must_check
__skb_header_pointer(const struct sk_buff * skb,int offset,int len,const void * data,int hlen,void * buffer)3700 __skb_header_pointer(const struct sk_buff *skb, int offset, int len,
3701 const void *data, int hlen, void *buffer)
3702 {
3703 if (likely(hlen - offset >= len))
3704 return (void *)data + offset;
3705
3706 if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0))
3707 return NULL;
3708
3709 return buffer;
3710 }
3711
3712 static inline void * __must_check
skb_header_pointer(const struct sk_buff * skb,int offset,int len,void * buffer)3713 skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3714 {
3715 return __skb_header_pointer(skb, offset, len, skb->data,
3716 skb_headlen(skb), buffer);
3717 }
3718
3719 /**
3720 * skb_needs_linearize - check if we need to linearize a given skb
3721 * depending on the given device features.
3722 * @skb: socket buffer to check
3723 * @features: net device features
3724 *
3725 * Returns true if either:
3726 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
3727 * 2. skb is fragmented and the device does not support SG.
3728 */
skb_needs_linearize(struct sk_buff * skb,netdev_features_t features)3729 static inline bool skb_needs_linearize(struct sk_buff *skb,
3730 netdev_features_t features)
3731 {
3732 return skb_is_nonlinear(skb) &&
3733 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3734 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3735 }
3736
skb_copy_from_linear_data(const struct sk_buff * skb,void * to,const unsigned int len)3737 static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3738 void *to,
3739 const unsigned int len)
3740 {
3741 memcpy(to, skb->data, len);
3742 }
3743
skb_copy_from_linear_data_offset(const struct sk_buff * skb,const int offset,void * to,const unsigned int len)3744 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3745 const int offset, void *to,
3746 const unsigned int len)
3747 {
3748 memcpy(to, skb->data + offset, len);
3749 }
3750
skb_copy_to_linear_data(struct sk_buff * skb,const void * from,const unsigned int len)3751 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3752 const void *from,
3753 const unsigned int len)
3754 {
3755 memcpy(skb->data, from, len);
3756 }
3757
skb_copy_to_linear_data_offset(struct sk_buff * skb,const int offset,const void * from,const unsigned int len)3758 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3759 const int offset,
3760 const void *from,
3761 const unsigned int len)
3762 {
3763 memcpy(skb->data + offset, from, len);
3764 }
3765
3766 void skb_init(void);
3767
skb_get_ktime(const struct sk_buff * skb)3768 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3769 {
3770 return skb->tstamp;
3771 }
3772
3773 /**
3774 * skb_get_timestamp - get timestamp from a skb
3775 * @skb: skb to get stamp from
3776 * @stamp: pointer to struct __kernel_old_timeval to store stamp in
3777 *
3778 * Timestamps are stored in the skb as offsets to a base timestamp.
3779 * This function converts the offset back to a struct timeval and stores
3780 * it in stamp.
3781 */
skb_get_timestamp(const struct sk_buff * skb,struct __kernel_old_timeval * stamp)3782 static inline void skb_get_timestamp(const struct sk_buff *skb,
3783 struct __kernel_old_timeval *stamp)
3784 {
3785 *stamp = ns_to_kernel_old_timeval(skb->tstamp);
3786 }
3787
skb_get_new_timestamp(const struct sk_buff * skb,struct __kernel_sock_timeval * stamp)3788 static inline void skb_get_new_timestamp(const struct sk_buff *skb,
3789 struct __kernel_sock_timeval *stamp)
3790 {
3791 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3792
3793 stamp->tv_sec = ts.tv_sec;
3794 stamp->tv_usec = ts.tv_nsec / 1000;
3795 }
3796
skb_get_timestampns(const struct sk_buff * skb,struct __kernel_old_timespec * stamp)3797 static inline void skb_get_timestampns(const struct sk_buff *skb,
3798 struct __kernel_old_timespec *stamp)
3799 {
3800 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3801
3802 stamp->tv_sec = ts.tv_sec;
3803 stamp->tv_nsec = ts.tv_nsec;
3804 }
3805
skb_get_new_timestampns(const struct sk_buff * skb,struct __kernel_timespec * stamp)3806 static inline void skb_get_new_timestampns(const struct sk_buff *skb,
3807 struct __kernel_timespec *stamp)
3808 {
3809 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3810
3811 stamp->tv_sec = ts.tv_sec;
3812 stamp->tv_nsec = ts.tv_nsec;
3813 }
3814
__net_timestamp(struct sk_buff * skb)3815 static inline void __net_timestamp(struct sk_buff *skb)
3816 {
3817 skb->tstamp = ktime_get_real();
3818 }
3819
net_timedelta(ktime_t t)3820 static inline ktime_t net_timedelta(ktime_t t)
3821 {
3822 return ktime_sub(ktime_get_real(), t);
3823 }
3824
net_invalid_timestamp(void)3825 static inline ktime_t net_invalid_timestamp(void)
3826 {
3827 return 0;
3828 }
3829
skb_metadata_len(const struct sk_buff * skb)3830 static inline u8 skb_metadata_len(const struct sk_buff *skb)
3831 {
3832 return skb_shinfo(skb)->meta_len;
3833 }
3834
skb_metadata_end(const struct sk_buff * skb)3835 static inline void *skb_metadata_end(const struct sk_buff *skb)
3836 {
3837 return skb_mac_header(skb);
3838 }
3839
__skb_metadata_differs(const struct sk_buff * skb_a,const struct sk_buff * skb_b,u8 meta_len)3840 static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
3841 const struct sk_buff *skb_b,
3842 u8 meta_len)
3843 {
3844 const void *a = skb_metadata_end(skb_a);
3845 const void *b = skb_metadata_end(skb_b);
3846 /* Using more efficient varaiant than plain call to memcmp(). */
3847 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3848 u64 diffs = 0;
3849
3850 switch (meta_len) {
3851 #define __it(x, op) (x -= sizeof(u##op))
3852 #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
3853 case 32: diffs |= __it_diff(a, b, 64);
3854 fallthrough;
3855 case 24: diffs |= __it_diff(a, b, 64);
3856 fallthrough;
3857 case 16: diffs |= __it_diff(a, b, 64);
3858 fallthrough;
3859 case 8: diffs |= __it_diff(a, b, 64);
3860 break;
3861 case 28: diffs |= __it_diff(a, b, 64);
3862 fallthrough;
3863 case 20: diffs |= __it_diff(a, b, 64);
3864 fallthrough;
3865 case 12: diffs |= __it_diff(a, b, 64);
3866 fallthrough;
3867 case 4: diffs |= __it_diff(a, b, 32);
3868 break;
3869 }
3870 return diffs;
3871 #else
3872 return memcmp(a - meta_len, b - meta_len, meta_len);
3873 #endif
3874 }
3875
skb_metadata_differs(const struct sk_buff * skb_a,const struct sk_buff * skb_b)3876 static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
3877 const struct sk_buff *skb_b)
3878 {
3879 u8 len_a = skb_metadata_len(skb_a);
3880 u8 len_b = skb_metadata_len(skb_b);
3881
3882 if (!(len_a | len_b))
3883 return false;
3884
3885 return len_a != len_b ?
3886 true : __skb_metadata_differs(skb_a, skb_b, len_a);
3887 }
3888
skb_metadata_set(struct sk_buff * skb,u8 meta_len)3889 static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
3890 {
3891 skb_shinfo(skb)->meta_len = meta_len;
3892 }
3893
skb_metadata_clear(struct sk_buff * skb)3894 static inline void skb_metadata_clear(struct sk_buff *skb)
3895 {
3896 skb_metadata_set(skb, 0);
3897 }
3898
3899 struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3900
3901 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3902
3903 void skb_clone_tx_timestamp(struct sk_buff *skb);
3904 bool skb_defer_rx_timestamp(struct sk_buff *skb);
3905
3906 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
3907
skb_clone_tx_timestamp(struct sk_buff * skb)3908 static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3909 {
3910 }
3911
skb_defer_rx_timestamp(struct sk_buff * skb)3912 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3913 {
3914 return false;
3915 }
3916
3917 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
3918
3919 /**
3920 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
3921 *
3922 * PHY drivers may accept clones of transmitted packets for
3923 * timestamping via their phy_driver.txtstamp method. These drivers
3924 * must call this function to return the skb back to the stack with a
3925 * timestamp.
3926 *
3927 * @skb: clone of the original outgoing packet
3928 * @hwtstamps: hardware time stamps
3929 *
3930 */
3931 void skb_complete_tx_timestamp(struct sk_buff *skb,
3932 struct skb_shared_hwtstamps *hwtstamps);
3933
3934 void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb,
3935 struct skb_shared_hwtstamps *hwtstamps,
3936 struct sock *sk, int tstype);
3937
3938 /**
3939 * skb_tstamp_tx - queue clone of skb with send time stamps
3940 * @orig_skb: the original outgoing packet
3941 * @hwtstamps: hardware time stamps, may be NULL if not available
3942 *
3943 * If the skb has a socket associated, then this function clones the
3944 * skb (thus sharing the actual data and optional structures), stores
3945 * the optional hardware time stamping information (if non NULL) or
3946 * generates a software time stamp (otherwise), then queues the clone
3947 * to the error queue of the socket. Errors are silently ignored.
3948 */
3949 void skb_tstamp_tx(struct sk_buff *orig_skb,
3950 struct skb_shared_hwtstamps *hwtstamps);
3951
3952 /**
3953 * skb_tx_timestamp() - Driver hook for transmit timestamping
3954 *
3955 * Ethernet MAC Drivers should call this function in their hard_xmit()
3956 * function immediately before giving the sk_buff to the MAC hardware.
3957 *
3958 * Specifically, one should make absolutely sure that this function is
3959 * called before TX completion of this packet can trigger. Otherwise
3960 * the packet could potentially already be freed.
3961 *
3962 * @skb: A socket buffer.
3963 */
skb_tx_timestamp(struct sk_buff * skb)3964 static inline void skb_tx_timestamp(struct sk_buff *skb)
3965 {
3966 skb_clone_tx_timestamp(skb);
3967 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3968 skb_tstamp_tx(skb, NULL);
3969 }
3970
3971 /**
3972 * skb_complete_wifi_ack - deliver skb with wifi status
3973 *
3974 * @skb: the original outgoing packet
3975 * @acked: ack status
3976 *
3977 */
3978 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3979
3980 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3981 __sum16 __skb_checksum_complete(struct sk_buff *skb);
3982
skb_csum_unnecessary(const struct sk_buff * skb)3983 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3984 {
3985 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3986 skb->csum_valid ||
3987 (skb->ip_summed == CHECKSUM_PARTIAL &&
3988 skb_checksum_start_offset(skb) >= 0));
3989 }
3990
3991 /**
3992 * skb_checksum_complete - Calculate checksum of an entire packet
3993 * @skb: packet to process
3994 *
3995 * This function calculates the checksum over the entire packet plus
3996 * the value of skb->csum. The latter can be used to supply the
3997 * checksum of a pseudo header as used by TCP/UDP. It returns the
3998 * checksum.
3999 *
4000 * For protocols that contain complete checksums such as ICMP/TCP/UDP,
4001 * this function can be used to verify that checksum on received
4002 * packets. In that case the function should return zero if the
4003 * checksum is correct. In particular, this function will return zero
4004 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
4005 * hardware has already verified the correctness of the checksum.
4006 */
skb_checksum_complete(struct sk_buff * skb)4007 static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
4008 {
4009 return skb_csum_unnecessary(skb) ?
4010 0 : __skb_checksum_complete(skb);
4011 }
4012
__skb_decr_checksum_unnecessary(struct sk_buff * skb)4013 static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
4014 {
4015 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4016 if (skb->csum_level == 0)
4017 skb->ip_summed = CHECKSUM_NONE;
4018 else
4019 skb->csum_level--;
4020 }
4021 }
4022
__skb_incr_checksum_unnecessary(struct sk_buff * skb)4023 static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
4024 {
4025 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4026 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
4027 skb->csum_level++;
4028 } else if (skb->ip_summed == CHECKSUM_NONE) {
4029 skb->ip_summed = CHECKSUM_UNNECESSARY;
4030 skb->csum_level = 0;
4031 }
4032 }
4033
__skb_reset_checksum_unnecessary(struct sk_buff * skb)4034 static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
4035 {
4036 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4037 skb->ip_summed = CHECKSUM_NONE;
4038 skb->csum_level = 0;
4039 }
4040 }
4041
4042 /* Check if we need to perform checksum complete validation.
4043 *
4044 * Returns true if checksum complete is needed, false otherwise
4045 * (either checksum is unnecessary or zero checksum is allowed).
4046 */
__skb_checksum_validate_needed(struct sk_buff * skb,bool zero_okay,__sum16 check)4047 static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
4048 bool zero_okay,
4049 __sum16 check)
4050 {
4051 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
4052 skb->csum_valid = 1;
4053 __skb_decr_checksum_unnecessary(skb);
4054 return false;
4055 }
4056
4057 return true;
4058 }
4059
4060 /* For small packets <= CHECKSUM_BREAK perform checksum complete directly
4061 * in checksum_init.
4062 */
4063 #define CHECKSUM_BREAK 76
4064
4065 /* Unset checksum-complete
4066 *
4067 * Unset checksum complete can be done when packet is being modified
4068 * (uncompressed for instance) and checksum-complete value is
4069 * invalidated.
4070 */
skb_checksum_complete_unset(struct sk_buff * skb)4071 static inline void skb_checksum_complete_unset(struct sk_buff *skb)
4072 {
4073 if (skb->ip_summed == CHECKSUM_COMPLETE)
4074 skb->ip_summed = CHECKSUM_NONE;
4075 }
4076
4077 /* Validate (init) checksum based on checksum complete.
4078 *
4079 * Return values:
4080 * 0: checksum is validated or try to in skb_checksum_complete. In the latter
4081 * case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo
4082 * checksum is stored in skb->csum for use in __skb_checksum_complete
4083 * non-zero: value of invalid checksum
4084 *
4085 */
__skb_checksum_validate_complete(struct sk_buff * skb,bool complete,__wsum psum)4086 static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
4087 bool complete,
4088 __wsum psum)
4089 {
4090 if (skb->ip_summed == CHECKSUM_COMPLETE) {
4091 if (!csum_fold(csum_add(psum, skb->csum))) {
4092 skb->csum_valid = 1;
4093 return 0;
4094 }
4095 }
4096
4097 skb->csum = psum;
4098
4099 if (complete || skb->len <= CHECKSUM_BREAK) {
4100 __sum16 csum;
4101
4102 csum = __skb_checksum_complete(skb);
4103 skb->csum_valid = !csum;
4104 return csum;
4105 }
4106
4107 return 0;
4108 }
4109
null_compute_pseudo(struct sk_buff * skb,int proto)4110 static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
4111 {
4112 return 0;
4113 }
4114
4115 /* Perform checksum validate (init). Note that this is a macro since we only
4116 * want to calculate the pseudo header which is an input function if necessary.
4117 * First we try to validate without any computation (checksum unnecessary) and
4118 * then calculate based on checksum complete calling the function to compute
4119 * pseudo header.
4120 *
4121 * Return values:
4122 * 0: checksum is validated or try to in skb_checksum_complete
4123 * non-zero: value of invalid checksum
4124 */
4125 #define __skb_checksum_validate(skb, proto, complete, \
4126 zero_okay, check, compute_pseudo) \
4127 ({ \
4128 __sum16 __ret = 0; \
4129 skb->csum_valid = 0; \
4130 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
4131 __ret = __skb_checksum_validate_complete(skb, \
4132 complete, compute_pseudo(skb, proto)); \
4133 __ret; \
4134 })
4135
4136 #define skb_checksum_init(skb, proto, compute_pseudo) \
4137 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
4138
4139 #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
4140 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
4141
4142 #define skb_checksum_validate(skb, proto, compute_pseudo) \
4143 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
4144
4145 #define skb_checksum_validate_zero_check(skb, proto, check, \
4146 compute_pseudo) \
4147 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
4148
4149 #define skb_checksum_simple_validate(skb) \
4150 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
4151
__skb_checksum_convert_check(struct sk_buff * skb)4152 static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
4153 {
4154 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
4155 }
4156
__skb_checksum_convert(struct sk_buff * skb,__wsum pseudo)4157 static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
4158 {
4159 skb->csum = ~pseudo;
4160 skb->ip_summed = CHECKSUM_COMPLETE;
4161 }
4162
4163 #define skb_checksum_try_convert(skb, proto, compute_pseudo) \
4164 do { \
4165 if (__skb_checksum_convert_check(skb)) \
4166 __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
4167 } while (0)
4168
skb_remcsum_adjust_partial(struct sk_buff * skb,void * ptr,u16 start,u16 offset)4169 static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
4170 u16 start, u16 offset)
4171 {
4172 skb->ip_summed = CHECKSUM_PARTIAL;
4173 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
4174 skb->csum_offset = offset - start;
4175 }
4176
4177 /* Update skbuf and packet to reflect the remote checksum offload operation.
4178 * When called, ptr indicates the starting point for skb->csum when
4179 * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
4180 * here, skb_postpull_rcsum is done so skb->csum start is ptr.
4181 */
skb_remcsum_process(struct sk_buff * skb,void * ptr,int start,int offset,bool nopartial)4182 static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
4183 int start, int offset, bool nopartial)
4184 {
4185 __wsum delta;
4186
4187 if (!nopartial) {
4188 skb_remcsum_adjust_partial(skb, ptr, start, offset);
4189 return;
4190 }
4191
4192 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
4193 __skb_checksum_complete(skb);
4194 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
4195 }
4196
4197 delta = remcsum_adjust(ptr, skb->csum, start, offset);
4198
4199 /* Adjust skb->csum since we changed the packet */
4200 skb->csum = csum_add(skb->csum, delta);
4201 }
4202
skb_nfct(const struct sk_buff * skb)4203 static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
4204 {
4205 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
4206 return (void *)(skb->_nfct & NFCT_PTRMASK);
4207 #else
4208 return NULL;
4209 #endif
4210 }
4211
skb_get_nfct(const struct sk_buff * skb)4212 static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
4213 {
4214 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
4215 return skb->_nfct;
4216 #else
4217 return 0UL;
4218 #endif
4219 }
4220
skb_set_nfct(struct sk_buff * skb,unsigned long nfct)4221 static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
4222 {
4223 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
4224 skb->slow_gro |= !!nfct;
4225 skb->_nfct = nfct;
4226 #endif
4227 }
4228
4229 #ifdef CONFIG_SKB_EXTENSIONS
4230 enum skb_ext_id {
4231 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4232 SKB_EXT_BRIDGE_NF,
4233 #endif
4234 #ifdef CONFIG_XFRM
4235 SKB_EXT_SEC_PATH,
4236 #endif
4237 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4238 TC_SKB_EXT,
4239 #endif
4240 #if IS_ENABLED(CONFIG_MPTCP)
4241 SKB_EXT_MPTCP,
4242 #endif
4243 SKB_EXT_NUM, /* must be last */
4244 };
4245
4246 /**
4247 * struct skb_ext - sk_buff extensions
4248 * @refcnt: 1 on allocation, deallocated on 0
4249 * @offset: offset to add to @data to obtain extension address
4250 * @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units
4251 * @data: start of extension data, variable sized
4252 *
4253 * Note: offsets/lengths are stored in chunks of 8 bytes, this allows
4254 * to use 'u8' types while allowing up to 2kb worth of extension data.
4255 */
4256 struct skb_ext {
4257 refcount_t refcnt;
4258 u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */
4259 u8 chunks; /* same */
4260 char data[] __aligned(8);
4261 };
4262
4263 struct skb_ext *__skb_ext_alloc(gfp_t flags);
4264 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
4265 struct skb_ext *ext);
4266 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4267 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4268 void __skb_ext_put(struct skb_ext *ext);
4269
skb_ext_put(struct sk_buff * skb)4270 static inline void skb_ext_put(struct sk_buff *skb)
4271 {
4272 if (skb->active_extensions)
4273 __skb_ext_put(skb->extensions);
4274 }
4275
__skb_ext_copy(struct sk_buff * dst,const struct sk_buff * src)4276 static inline void __skb_ext_copy(struct sk_buff *dst,
4277 const struct sk_buff *src)
4278 {
4279 dst->active_extensions = src->active_extensions;
4280
4281 if (src->active_extensions) {
4282 struct skb_ext *ext = src->extensions;
4283
4284 refcount_inc(&ext->refcnt);
4285 dst->extensions = ext;
4286 }
4287 }
4288
skb_ext_copy(struct sk_buff * dst,const struct sk_buff * src)4289 static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
4290 {
4291 skb_ext_put(dst);
4292 __skb_ext_copy(dst, src);
4293 }
4294
__skb_ext_exist(const struct skb_ext * ext,enum skb_ext_id i)4295 static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
4296 {
4297 return !!ext->offset[i];
4298 }
4299
skb_ext_exist(const struct sk_buff * skb,enum skb_ext_id id)4300 static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
4301 {
4302 return skb->active_extensions & (1 << id);
4303 }
4304
skb_ext_del(struct sk_buff * skb,enum skb_ext_id id)4305 static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
4306 {
4307 if (skb_ext_exist(skb, id))
4308 __skb_ext_del(skb, id);
4309 }
4310
skb_ext_find(const struct sk_buff * skb,enum skb_ext_id id)4311 static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
4312 {
4313 if (skb_ext_exist(skb, id)) {
4314 struct skb_ext *ext = skb->extensions;
4315
4316 return (void *)ext + (ext->offset[id] << 3);
4317 }
4318
4319 return NULL;
4320 }
4321
skb_ext_reset(struct sk_buff * skb)4322 static inline void skb_ext_reset(struct sk_buff *skb)
4323 {
4324 if (unlikely(skb->active_extensions)) {
4325 __skb_ext_put(skb->extensions);
4326 skb->active_extensions = 0;
4327 }
4328 }
4329
skb_has_extensions(struct sk_buff * skb)4330 static inline bool skb_has_extensions(struct sk_buff *skb)
4331 {
4332 return unlikely(skb->active_extensions);
4333 }
4334 #else
skb_ext_put(struct sk_buff * skb)4335 static inline void skb_ext_put(struct sk_buff *skb) {}
skb_ext_reset(struct sk_buff * skb)4336 static inline void skb_ext_reset(struct sk_buff *skb) {}
skb_ext_del(struct sk_buff * skb,int unused)4337 static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
__skb_ext_copy(struct sk_buff * d,const struct sk_buff * s)4338 static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
skb_ext_copy(struct sk_buff * dst,const struct sk_buff * s)4339 static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
skb_has_extensions(struct sk_buff * skb)4340 static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
4341 #endif /* CONFIG_SKB_EXTENSIONS */
4342
nf_reset_ct(struct sk_buff * skb)4343 static inline void nf_reset_ct(struct sk_buff *skb)
4344 {
4345 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4346 nf_conntrack_put(skb_nfct(skb));
4347 skb->_nfct = 0;
4348 #endif
4349 }
4350
nf_reset_trace(struct sk_buff * skb)4351 static inline void nf_reset_trace(struct sk_buff *skb)
4352 {
4353 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4354 skb->nf_trace = 0;
4355 #endif
4356 }
4357
ipvs_reset(struct sk_buff * skb)4358 static inline void ipvs_reset(struct sk_buff *skb)
4359 {
4360 #if IS_ENABLED(CONFIG_IP_VS)
4361 skb->ipvs_property = 0;
4362 #endif
4363 }
4364
4365 /* Note: This doesn't put any conntrack info in dst. */
__nf_copy(struct sk_buff * dst,const struct sk_buff * src,bool copy)4366 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
4367 bool copy)
4368 {
4369 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4370 dst->_nfct = src->_nfct;
4371 nf_conntrack_get(skb_nfct(src));
4372 #endif
4373 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4374 if (copy)
4375 dst->nf_trace = src->nf_trace;
4376 #endif
4377 }
4378
nf_copy(struct sk_buff * dst,const struct sk_buff * src)4379 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
4380 {
4381 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4382 nf_conntrack_put(skb_nfct(dst));
4383 #endif
4384 dst->slow_gro = src->slow_gro;
4385 __nf_copy(dst, src, true);
4386 }
4387
4388 #ifdef CONFIG_NETWORK_SECMARK
skb_copy_secmark(struct sk_buff * to,const struct sk_buff * from)4389 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4390 {
4391 to->secmark = from->secmark;
4392 }
4393
skb_init_secmark(struct sk_buff * skb)4394 static inline void skb_init_secmark(struct sk_buff *skb)
4395 {
4396 skb->secmark = 0;
4397 }
4398 #else
skb_copy_secmark(struct sk_buff * to,const struct sk_buff * from)4399 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4400 { }
4401
skb_init_secmark(struct sk_buff * skb)4402 static inline void skb_init_secmark(struct sk_buff *skb)
4403 { }
4404 #endif
4405
secpath_exists(const struct sk_buff * skb)4406 static inline int secpath_exists(const struct sk_buff *skb)
4407 {
4408 #ifdef CONFIG_XFRM
4409 return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4410 #else
4411 return 0;
4412 #endif
4413 }
4414
skb_irq_freeable(const struct sk_buff * skb)4415 static inline bool skb_irq_freeable(const struct sk_buff *skb)
4416 {
4417 return !skb->destructor &&
4418 !secpath_exists(skb) &&
4419 !skb_nfct(skb) &&
4420 !skb->_skb_refdst &&
4421 !skb_has_frag_list(skb);
4422 }
4423
skb_set_queue_mapping(struct sk_buff * skb,u16 queue_mapping)4424 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
4425 {
4426 skb->queue_mapping = queue_mapping;
4427 }
4428
skb_get_queue_mapping(const struct sk_buff * skb)4429 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
4430 {
4431 return skb->queue_mapping;
4432 }
4433
skb_copy_queue_mapping(struct sk_buff * to,const struct sk_buff * from)4434 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
4435 {
4436 to->queue_mapping = from->queue_mapping;
4437 }
4438
skb_record_rx_queue(struct sk_buff * skb,u16 rx_queue)4439 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
4440 {
4441 skb->queue_mapping = rx_queue + 1;
4442 }
4443
skb_get_rx_queue(const struct sk_buff * skb)4444 static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
4445 {
4446 return skb->queue_mapping - 1;
4447 }
4448
skb_rx_queue_recorded(const struct sk_buff * skb)4449 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
4450 {
4451 return skb->queue_mapping != 0;
4452 }
4453
skb_set_dst_pending_confirm(struct sk_buff * skb,u32 val)4454 static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
4455 {
4456 skb->dst_pending_confirm = val;
4457 }
4458
skb_get_dst_pending_confirm(const struct sk_buff * skb)4459 static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
4460 {
4461 return skb->dst_pending_confirm != 0;
4462 }
4463
skb_sec_path(const struct sk_buff * skb)4464 static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
4465 {
4466 #ifdef CONFIG_XFRM
4467 return skb_ext_find(skb, SKB_EXT_SEC_PATH);
4468 #else
4469 return NULL;
4470 #endif
4471 }
4472
4473 /* Keeps track of mac header offset relative to skb->head.
4474 * It is useful for TSO of Tunneling protocol. e.g. GRE.
4475 * For non-tunnel skb it points to skb_mac_header() and for
4476 * tunnel skb it points to outer mac header.
4477 * Keeps track of level of encapsulation of network headers.
4478 */
4479 struct skb_gso_cb {
4480 union {
4481 int mac_offset;
4482 int data_offset;
4483 };
4484 int encap_level;
4485 __wsum csum;
4486 __u16 csum_start;
4487 };
4488 #define SKB_GSO_CB_OFFSET 32
4489 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
4490
skb_tnl_header_len(const struct sk_buff * inner_skb)4491 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
4492 {
4493 return (skb_mac_header(inner_skb) - inner_skb->head) -
4494 SKB_GSO_CB(inner_skb)->mac_offset;
4495 }
4496
gso_pskb_expand_head(struct sk_buff * skb,int extra)4497 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
4498 {
4499 int new_headroom, headroom;
4500 int ret;
4501
4502 headroom = skb_headroom(skb);
4503 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
4504 if (ret)
4505 return ret;
4506
4507 new_headroom = skb_headroom(skb);
4508 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
4509 return 0;
4510 }
4511
gso_reset_checksum(struct sk_buff * skb,__wsum res)4512 static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
4513 {
4514 /* Do not update partial checksums if remote checksum is enabled. */
4515 if (skb->remcsum_offload)
4516 return;
4517
4518 SKB_GSO_CB(skb)->csum = res;
4519 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4520 }
4521
4522 /* Compute the checksum for a gso segment. First compute the checksum value
4523 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
4524 * then add in skb->csum (checksum from csum_start to end of packet).
4525 * skb->csum and csum_start are then updated to reflect the checksum of the
4526 * resultant packet starting from the transport header-- the resultant checksum
4527 * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
4528 * header.
4529 */
gso_make_checksum(struct sk_buff * skb,__wsum res)4530 static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4531 {
4532 unsigned char *csum_start = skb_transport_header(skb);
4533 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4534 __wsum partial = SKB_GSO_CB(skb)->csum;
4535
4536 SKB_GSO_CB(skb)->csum = res;
4537 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4538
4539 return csum_fold(csum_partial(csum_start, plen, partial));
4540 }
4541
skb_is_gso(const struct sk_buff * skb)4542 static inline bool skb_is_gso(const struct sk_buff *skb)
4543 {
4544 return skb_shinfo(skb)->gso_size;
4545 }
4546
4547 /* Note: Should be called only if skb_is_gso(skb) is true */
skb_is_gso_v6(const struct sk_buff * skb)4548 static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4549 {
4550 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4551 }
4552
4553 /* Note: Should be called only if skb_is_gso(skb) is true */
skb_is_gso_sctp(const struct sk_buff * skb)4554 static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4555 {
4556 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4557 }
4558
4559 /* Note: Should be called only if skb_is_gso(skb) is true */
skb_is_gso_tcp(const struct sk_buff * skb)4560 static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
4561 {
4562 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4563 }
4564
skb_gso_reset(struct sk_buff * skb)4565 static inline void skb_gso_reset(struct sk_buff *skb)
4566 {
4567 skb_shinfo(skb)->gso_size = 0;
4568 skb_shinfo(skb)->gso_segs = 0;
4569 skb_shinfo(skb)->gso_type = 0;
4570 }
4571
skb_increase_gso_size(struct skb_shared_info * shinfo,u16 increment)4572 static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
4573 u16 increment)
4574 {
4575 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4576 return;
4577 shinfo->gso_size += increment;
4578 }
4579
skb_decrease_gso_size(struct skb_shared_info * shinfo,u16 decrement)4580 static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
4581 u16 decrement)
4582 {
4583 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4584 return;
4585 shinfo->gso_size -= decrement;
4586 }
4587
4588 void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4589
skb_warn_if_lro(const struct sk_buff * skb)4590 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
4591 {
4592 /* LRO sets gso_size but not gso_type, whereas if GSO is really
4593 * wanted then gso_type will be set. */
4594 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4595
4596 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
4597 unlikely(shinfo->gso_type == 0)) {
4598 __skb_warn_lro_forwarding(skb);
4599 return true;
4600 }
4601 return false;
4602 }
4603
skb_forward_csum(struct sk_buff * skb)4604 static inline void skb_forward_csum(struct sk_buff *skb)
4605 {
4606 /* Unfortunately we don't support this one. Any brave souls? */
4607 if (skb->ip_summed == CHECKSUM_COMPLETE)
4608 skb->ip_summed = CHECKSUM_NONE;
4609 }
4610
4611 /**
4612 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
4613 * @skb: skb to check
4614 *
4615 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
4616 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
4617 * use this helper, to document places where we make this assertion.
4618 */
skb_checksum_none_assert(const struct sk_buff * skb)4619 static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4620 {
4621 #ifdef DEBUG
4622 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
4623 #endif
4624 }
4625
4626 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4627
4628 int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4629 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4630 unsigned int transport_len,
4631 __sum16(*skb_chkf)(struct sk_buff *skb));
4632
4633 /**
4634 * skb_head_is_locked - Determine if the skb->head is locked down
4635 * @skb: skb to check
4636 *
4637 * The head on skbs build around a head frag can be removed if they are
4638 * not cloned. This function returns true if the skb head is locked down
4639 * due to either being allocated via kmalloc, or by being a clone with
4640 * multiple references to the head.
4641 */
skb_head_is_locked(const struct sk_buff * skb)4642 static inline bool skb_head_is_locked(const struct sk_buff *skb)
4643 {
4644 return !skb->head_frag || skb_cloned(skb);
4645 }
4646
4647 /* Local Checksum Offload.
4648 * Compute outer checksum based on the assumption that the
4649 * inner checksum will be offloaded later.
4650 * See Documentation/networking/checksum-offloads.rst for
4651 * explanation of how this works.
4652 * Fill in outer checksum adjustment (e.g. with sum of outer
4653 * pseudo-header) before calling.
4654 * Also ensure that inner checksum is in linear data area.
4655 */
lco_csum(struct sk_buff * skb)4656 static inline __wsum lco_csum(struct sk_buff *skb)
4657 {
4658 unsigned char *csum_start = skb_checksum_start(skb);
4659 unsigned char *l4_hdr = skb_transport_header(skb);
4660 __wsum partial;
4661
4662 /* Start with complement of inner checksum adjustment */
4663 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4664 skb->csum_offset));
4665
4666 /* Add in checksum of our headers (incl. outer checksum
4667 * adjustment filled in by caller) and return result.
4668 */
4669 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4670 }
4671
skb_is_redirected(const struct sk_buff * skb)4672 static inline bool skb_is_redirected(const struct sk_buff *skb)
4673 {
4674 return skb->redirected;
4675 }
4676
skb_set_redirected(struct sk_buff * skb,bool from_ingress)4677 static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
4678 {
4679 skb->redirected = 1;
4680 #ifdef CONFIG_NET_REDIRECT
4681 skb->from_ingress = from_ingress;
4682 if (skb->from_ingress)
4683 skb->tstamp = 0;
4684 #endif
4685 }
4686
skb_reset_redirect(struct sk_buff * skb)4687 static inline void skb_reset_redirect(struct sk_buff *skb)
4688 {
4689 skb->redirected = 0;
4690 }
4691
skb_csum_is_sctp(struct sk_buff * skb)4692 static inline bool skb_csum_is_sctp(struct sk_buff *skb)
4693 {
4694 return skb->csum_not_inet;
4695 }
4696
skb_set_kcov_handle(struct sk_buff * skb,const u64 kcov_handle)4697 static inline void skb_set_kcov_handle(struct sk_buff *skb,
4698 const u64 kcov_handle)
4699 {
4700 #ifdef CONFIG_KCOV
4701 skb->kcov_handle = kcov_handle;
4702 #endif
4703 }
4704
skb_get_kcov_handle(struct sk_buff * skb)4705 static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
4706 {
4707 #ifdef CONFIG_KCOV
4708 return skb->kcov_handle;
4709 #else
4710 return 0;
4711 #endif
4712 }
4713
4714 #ifdef CONFIG_PAGE_POOL
skb_mark_for_recycle(struct sk_buff * skb)4715 static inline void skb_mark_for_recycle(struct sk_buff *skb)
4716 {
4717 skb->pp_recycle = 1;
4718 }
4719 #endif
4720
skb_pp_recycle(struct sk_buff * skb,void * data)4721 static inline bool skb_pp_recycle(struct sk_buff *skb, void *data)
4722 {
4723 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
4724 return false;
4725 return page_pool_return_skb_page(virt_to_page(data));
4726 }
4727
4728 #endif /* __KERNEL__ */
4729 #endif /* _LINUX_SKBUFF_H */
4730