Lines Matching refs:skb
113 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, in skb_panic() argument
117 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
118 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
119 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
123 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument
125 skb_panic(skb, sz, addr, __func__); in skb_over_panic()
128 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument
130 skb_panic(skb, sz, addr, __func__); in skb_under_panic()
212 struct sk_buff *skb; in napi_get_frags_check() local
215 skb = napi_get_frags(napi); in napi_get_frags_check()
216 WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag); in napi_get_frags_check()
255 struct sk_buff *skb; in napi_skb_cache_get() local
266 skb = nc->skb_cache[--nc->skb_count]; in napi_skb_cache_get()
267 kasan_unpoison_object_data(skbuff_head_cache, skb); in napi_skb_cache_get()
269 return skb; in napi_skb_cache_get()
273 static void __build_skb_around(struct sk_buff *skb, void *data, in __build_skb_around() argument
282 skb->truesize = SKB_TRUESIZE(size); in __build_skb_around()
283 refcount_set(&skb->users, 1); in __build_skb_around()
284 skb->head = data; in __build_skb_around()
285 skb->data = data; in __build_skb_around()
286 skb_reset_tail_pointer(skb); in __build_skb_around()
287 skb_set_end_offset(skb, size); in __build_skb_around()
288 skb->mac_header = (typeof(skb->mac_header))~0U; in __build_skb_around()
289 skb->transport_header = (typeof(skb->transport_header))~0U; in __build_skb_around()
290 skb->alloc_cpu = raw_smp_processor_id(); in __build_skb_around()
292 shinfo = skb_shinfo(skb); in __build_skb_around()
296 skb_set_kcov_handle(skb, kcov_common_handle()); in __build_skb_around()
320 struct sk_buff *skb; in __build_skb() local
322 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); in __build_skb()
323 if (unlikely(!skb)) in __build_skb()
326 memset(skb, 0, offsetof(struct sk_buff, tail)); in __build_skb()
327 __build_skb_around(skb, data, frag_size); in __build_skb()
329 return skb; in __build_skb()
339 struct sk_buff *skb = __build_skb(data, frag_size); in build_skb() local
341 if (skb && frag_size) { in build_skb()
342 skb->head_frag = 1; in build_skb()
344 skb->pfmemalloc = 1; in build_skb()
346 return skb; in build_skb()
356 struct sk_buff *build_skb_around(struct sk_buff *skb, in build_skb_around() argument
359 if (unlikely(!skb)) in build_skb_around()
362 __build_skb_around(skb, data, frag_size); in build_skb_around()
365 skb->head_frag = 1; in build_skb_around()
367 skb->pfmemalloc = 1; in build_skb_around()
369 return skb; in build_skb_around()
385 struct sk_buff *skb; in __napi_build_skb() local
387 skb = napi_skb_cache_get(); in __napi_build_skb()
388 if (unlikely(!skb)) in __napi_build_skb()
391 memset(skb, 0, offsetof(struct sk_buff, tail)); in __napi_build_skb()
392 __build_skb_around(skb, data, frag_size); in __napi_build_skb()
394 return skb; in __napi_build_skb()
409 struct sk_buff *skb = __napi_build_skb(data, frag_size); in napi_build_skb() local
411 if (likely(skb) && frag_size) { in napi_build_skb()
412 skb->head_frag = 1; in napi_build_skb()
413 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); in napi_build_skb()
416 return skb; in napi_build_skb()
481 struct sk_buff *skb; in __alloc_skb() local
495 skb = napi_skb_cache_get(); in __alloc_skb()
497 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); in __alloc_skb()
498 if (unlikely(!skb)) in __alloc_skb()
500 prefetchw(skb); in __alloc_skb()
525 memset(skb, 0, offsetof(struct sk_buff, tail)); in __alloc_skb()
526 __build_skb_around(skb, data, osize); in __alloc_skb()
527 skb->pfmemalloc = pfmemalloc; in __alloc_skb()
532 fclones = container_of(skb, struct sk_buff_fclones, skb1); in __alloc_skb()
534 skb->fclone = SKB_FCLONE_ORIG; in __alloc_skb()
538 return skb; in __alloc_skb()
541 kmem_cache_free(cache, skb); in __alloc_skb()
563 struct sk_buff *skb; in __netdev_alloc_skb() local
575 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); in __netdev_alloc_skb()
576 if (!skb) in __netdev_alloc_skb()
602 skb = __build_skb(data, len); in __netdev_alloc_skb()
603 if (unlikely(!skb)) { in __netdev_alloc_skb()
609 skb->pfmemalloc = 1; in __netdev_alloc_skb()
610 skb->head_frag = 1; in __netdev_alloc_skb()
613 skb_reserve(skb, NET_SKB_PAD); in __netdev_alloc_skb()
614 skb->dev = dev; in __netdev_alloc_skb()
617 return skb; in __netdev_alloc_skb()
638 struct sk_buff *skb; in __napi_alloc_skb() local
653 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, in __napi_alloc_skb()
655 if (!skb) in __napi_alloc_skb()
691 skb = __napi_build_skb(data, len); in __napi_alloc_skb()
692 if (unlikely(!skb)) { in __napi_alloc_skb()
698 skb->pfmemalloc = 1; in __napi_alloc_skb()
699 skb->head_frag = 1; in __napi_alloc_skb()
702 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); in __napi_alloc_skb()
703 skb->dev = napi->dev; in __napi_alloc_skb()
706 return skb; in __napi_alloc_skb()
710 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, in skb_add_rx_frag() argument
713 skb_fill_page_desc(skb, i, page, off, size); in skb_add_rx_frag()
714 skb->len += size; in skb_add_rx_frag()
715 skb->data_len += size; in skb_add_rx_frag()
716 skb->truesize += truesize; in skb_add_rx_frag()
720 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, in skb_coalesce_rx_frag() argument
723 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
726 skb->len += size; in skb_coalesce_rx_frag()
727 skb->data_len += size; in skb_coalesce_rx_frag()
728 skb->truesize += truesize; in skb_coalesce_rx_frag()
738 static inline void skb_drop_fraglist(struct sk_buff *skb) in skb_drop_fraglist() argument
740 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
743 static void skb_clone_fraglist(struct sk_buff *skb) in skb_clone_fraglist() argument
747 skb_walk_frags(skb, list) in skb_clone_fraglist()
751 static void skb_free_head(struct sk_buff *skb) in skb_free_head() argument
753 unsigned char *head = skb->head; in skb_free_head()
755 if (skb->head_frag) { in skb_free_head()
756 if (skb_pp_recycle(skb, head)) in skb_free_head()
764 static void skb_release_data(struct sk_buff *skb) in skb_release_data() argument
766 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data()
769 if (skb->cloned && in skb_release_data()
770 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, in skb_release_data()
774 if (skb_zcopy(skb)) { in skb_release_data()
777 skb_zcopy_clear(skb, true); in skb_release_data()
783 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); in skb_release_data()
789 skb_free_head(skb); in skb_release_data()
800 skb->pp_recycle = 0; in skb_release_data()
806 static void kfree_skbmem(struct sk_buff *skb) in kfree_skbmem() argument
810 switch (skb->fclone) { in kfree_skbmem()
812 kmem_cache_free(skbuff_head_cache, skb); in kfree_skbmem()
816 fclones = container_of(skb, struct sk_buff_fclones, skb1); in kfree_skbmem()
827 fclones = container_of(skb, struct sk_buff_fclones, skb2); in kfree_skbmem()
836 void skb_release_head_state(struct sk_buff *skb) in skb_release_head_state() argument
838 skb_dst_drop(skb); in skb_release_head_state()
839 if (skb->destructor) { in skb_release_head_state()
841 skb->destructor(skb); in skb_release_head_state()
844 nf_conntrack_put(skb_nfct(skb)); in skb_release_head_state()
846 skb_ext_put(skb); in skb_release_head_state()
850 static void skb_release_all(struct sk_buff *skb) in skb_release_all() argument
852 skb_release_head_state(skb); in skb_release_all()
853 if (likely(skb->head)) in skb_release_all()
854 skb_release_data(skb); in skb_release_all()
866 void __kfree_skb(struct sk_buff *skb) in __kfree_skb() argument
868 skb_release_all(skb); in __kfree_skb()
869 kfree_skbmem(skb); in __kfree_skb()
883 kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) in kfree_skb_reason() argument
885 if (unlikely(!skb_unref(skb))) in kfree_skb_reason()
890 trace_kfree_skb(skb, __builtin_return_address(0), reason); in kfree_skb_reason()
891 __kfree_skb(skb); in kfree_skb_reason()
913 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) in skb_dump() argument
915 struct skb_shared_info *sh = skb_shinfo(skb); in skb_dump()
916 struct net_device *dev = skb->dev; in skb_dump()
917 struct sock *sk = skb->sk; in skb_dump()
924 len = skb->len; in skb_dump()
926 len = min_t(int, skb->len, MAX_HEADER + 128); in skb_dump()
928 headroom = skb_headroom(skb); in skb_dump()
929 tailroom = skb_tailroom(skb); in skb_dump()
931 has_mac = skb_mac_header_was_set(skb); in skb_dump()
932 has_trans = skb_transport_header_was_set(skb); in skb_dump()
939 level, skb->len, headroom, skb_headlen(skb), tailroom, in skb_dump()
940 has_mac ? skb->mac_header : -1, in skb_dump()
941 has_mac ? skb_mac_header_len(skb) : -1, in skb_dump()
942 skb->network_header, in skb_dump()
943 has_trans ? skb_network_header_len(skb) : -1, in skb_dump()
944 has_trans ? skb->transport_header : -1, in skb_dump()
947 skb->csum, skb->ip_summed, skb->csum_complete_sw, in skb_dump()
948 skb->csum_valid, skb->csum_level, in skb_dump()
949 skb->hash, skb->sw_hash, skb->l4_hash, in skb_dump()
950 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); in skb_dump()
961 16, 1, skb->head, headroom, false); in skb_dump()
963 seg_len = min_t(int, skb_headlen(skb), len); in skb_dump()
966 16, 1, skb->data, seg_len, false); in skb_dump()
971 16, 1, skb_tail_pointer(skb), tailroom, false); in skb_dump()
973 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { in skb_dump()
974 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump()
994 if (full_pkt && skb_has_frag_list(skb)) { in skb_dump()
996 skb_walk_frags(skb, list_skb) in skb_dump()
1009 void skb_tx_error(struct sk_buff *skb) in skb_tx_error() argument
1011 if (skb) { in skb_tx_error()
1012 skb_zcopy_downgrade_managed(skb); in skb_tx_error()
1013 skb_zcopy_clear(skb, true); in skb_tx_error()
1027 void consume_skb(struct sk_buff *skb) in consume_skb() argument
1029 if (!skb_unref(skb)) in consume_skb()
1032 trace_consume_skb(skb); in consume_skb()
1033 __kfree_skb(skb); in consume_skb()
1045 void __consume_stateless_skb(struct sk_buff *skb) in __consume_stateless_skb() argument
1047 trace_consume_skb(skb); in __consume_stateless_skb()
1048 skb_release_data(skb); in __consume_stateless_skb()
1049 kfree_skbmem(skb); in __consume_stateless_skb()
1052 static void napi_skb_cache_put(struct sk_buff *skb) in napi_skb_cache_put() argument
1057 kasan_poison_object_data(skbuff_head_cache, skb); in napi_skb_cache_put()
1058 nc->skb_cache[nc->skb_count++] = skb; in napi_skb_cache_put()
1071 void __kfree_skb_defer(struct sk_buff *skb) in __kfree_skb_defer() argument
1073 skb_release_all(skb); in __kfree_skb_defer()
1074 napi_skb_cache_put(skb); in __kfree_skb_defer()
1077 void napi_skb_free_stolen_head(struct sk_buff *skb) in napi_skb_free_stolen_head() argument
1079 if (unlikely(skb->slow_gro)) { in napi_skb_free_stolen_head()
1080 nf_reset_ct(skb); in napi_skb_free_stolen_head()
1081 skb_dst_drop(skb); in napi_skb_free_stolen_head()
1082 skb_ext_put(skb); in napi_skb_free_stolen_head()
1083 skb_orphan(skb); in napi_skb_free_stolen_head()
1084 skb->slow_gro = 0; in napi_skb_free_stolen_head()
1086 napi_skb_cache_put(skb); in napi_skb_free_stolen_head()
1089 void napi_consume_skb(struct sk_buff *skb, int budget) in napi_consume_skb() argument
1093 dev_consume_skb_any(skb); in napi_consume_skb()
1099 if (!skb_unref(skb)) in napi_consume_skb()
1103 trace_consume_skb(skb); in napi_consume_skb()
1106 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in napi_consume_skb()
1107 __kfree_skb(skb); in napi_consume_skb()
1111 skb_release_all(skb); in napi_consume_skb()
1112 napi_skb_cache_put(skb); in napi_consume_skb()
1172 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) in __skb_clone() argument
1174 #define C(x) n->x = skb->x in __skb_clone()
1178 __copy_skb_header(n, skb); in __skb_clone()
1183 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; in __skb_clone()
1198 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
1199 skb->cloned = 1; in __skb_clone()
1290 struct sk_buff *skb; in msg_zerocopy_alloc() local
1294 skb = sock_omalloc(sk, 0, GFP_KERNEL); in msg_zerocopy_alloc()
1295 if (!skb) in msg_zerocopy_alloc()
1298 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); in msg_zerocopy_alloc()
1299 uarg = (void *)skb->cb; in msg_zerocopy_alloc()
1303 kfree_skb(skb); in msg_zerocopy_alloc()
1374 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) in skb_zerocopy_notify_extend() argument
1376 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); in skb_zerocopy_notify_extend()
1396 struct sk_buff *tail, *skb = skb_from_uarg(uarg); in __msg_zerocopy_callback() local
1398 struct sock *sk = skb->sk; in __msg_zerocopy_callback()
1418 serr = SKB_EXT_ERR(skb); in __msg_zerocopy_callback()
1432 __skb_queue_tail(q, skb); in __msg_zerocopy_callback()
1433 skb = NULL; in __msg_zerocopy_callback()
1440 consume_skb(skb); in __msg_zerocopy_callback()
1444 void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, in msg_zerocopy_callback() argument
1468 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, in skb_zerocopy_iter_stream() argument
1472 struct ubuf_info *orig_uarg = skb_zcopy(skb); in skb_zerocopy_iter_stream()
1473 int err, orig_len = skb->len; in skb_zerocopy_iter_stream()
1481 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_stream()
1482 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { in skb_zerocopy_iter_stream()
1483 struct sock *save_sk = skb->sk; in skb_zerocopy_iter_stream()
1486 iov_iter_revert(&msg->msg_iter, skb->len - orig_len); in skb_zerocopy_iter_stream()
1487 skb->sk = sk; in skb_zerocopy_iter_stream()
1488 ___pskb_trim(skb, orig_len); in skb_zerocopy_iter_stream()
1489 skb->sk = save_sk; in skb_zerocopy_iter_stream()
1493 skb_zcopy_set(skb, uarg, NULL); in skb_zerocopy_iter_stream()
1494 return skb->len - orig_len; in skb_zerocopy_iter_stream()
1498 void __skb_zcopy_downgrade_managed(struct sk_buff *skb) in __skb_zcopy_downgrade_managed() argument
1502 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; in __skb_zcopy_downgrade_managed()
1503 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in __skb_zcopy_downgrade_managed()
1504 skb_frag_ref(skb, i); in __skb_zcopy_downgrade_managed()
1543 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) in skb_copy_ubufs() argument
1545 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs()
1550 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) in skb_copy_ubufs()
1556 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; in skb_copy_ubufs()
1574 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs()
1601 skb_frag_unref(skb, i); in skb_copy_ubufs()
1605 __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); in skb_copy_ubufs()
1608 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); in skb_copy_ubufs()
1609 skb_shinfo(skb)->nr_frags = new_frags; in skb_copy_ubufs()
1612 skb_zcopy_clear(skb, false); in skb_copy_ubufs()
1631 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) in skb_clone() argument
1633 struct sk_buff_fclones *fclones = container_of(skb, in skb_clone()
1638 if (skb_orphan_frags(skb, gfp_mask)) in skb_clone()
1641 if (skb->fclone == SKB_FCLONE_ORIG && in skb_clone()
1647 if (skb_pfmemalloc(skb)) in skb_clone()
1657 return __skb_clone(n, skb); in skb_clone()
1661 void skb_headers_offset_update(struct sk_buff *skb, int off) in skb_headers_offset_update() argument
1664 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_headers_offset_update()
1665 skb->csum_start += off; in skb_headers_offset_update()
1667 skb->transport_header += off; in skb_headers_offset_update()
1668 skb->network_header += off; in skb_headers_offset_update()
1669 if (skb_mac_header_was_set(skb)) in skb_headers_offset_update()
1670 skb->mac_header += off; in skb_headers_offset_update()
1671 skb->inner_transport_header += off; in skb_headers_offset_update()
1672 skb->inner_network_header += off; in skb_headers_offset_update()
1673 skb->inner_mac_header += off; in skb_headers_offset_update()
1687 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) in skb_alloc_rx_flag() argument
1689 if (skb_pfmemalloc(skb)) in skb_alloc_rx_flag()
1711 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) in skb_copy() argument
1713 int headerlen = skb_headroom(skb); in skb_copy()
1714 unsigned int size = skb_end_offset(skb) + skb->data_len; in skb_copy()
1716 skb_alloc_rx_flag(skb), NUMA_NO_NODE); in skb_copy()
1724 skb_put(n, skb->len); in skb_copy()
1726 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); in skb_copy()
1728 skb_copy_header(n, skb); in skb_copy()
1750 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, in __pskb_copy_fclone() argument
1753 unsigned int size = skb_headlen(skb) + headroom; in __pskb_copy_fclone()
1754 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); in __pskb_copy_fclone()
1763 skb_put(n, skb_headlen(skb)); in __pskb_copy_fclone()
1765 skb_copy_from_linear_data(skb, n->data, n->len); in __pskb_copy_fclone()
1767 n->truesize += skb->data_len; in __pskb_copy_fclone()
1768 n->data_len = skb->data_len; in __pskb_copy_fclone()
1769 n->len = skb->len; in __pskb_copy_fclone()
1771 if (skb_shinfo(skb)->nr_frags) { in __pskb_copy_fclone()
1774 if (skb_orphan_frags(skb, gfp_mask) || in __pskb_copy_fclone()
1775 skb_zerocopy_clone(n, skb, gfp_mask)) { in __pskb_copy_fclone()
1780 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_copy_fclone()
1781 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone()
1782 skb_frag_ref(skb, i); in __pskb_copy_fclone()
1787 if (skb_has_frag_list(skb)) { in __pskb_copy_fclone()
1788 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; in __pskb_copy_fclone()
1792 skb_copy_header(n, skb); in __pskb_copy_fclone()
1814 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, in pskb_expand_head() argument
1817 int i, osize = skb_end_offset(skb); in pskb_expand_head()
1824 BUG_ON(skb_shared(skb)); in pskb_expand_head()
1826 skb_zcopy_downgrade_managed(skb); in pskb_expand_head()
1830 if (skb_pfmemalloc(skb)) in pskb_expand_head()
1841 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); in pskb_expand_head()
1844 skb_shinfo(skb), in pskb_expand_head()
1845 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head()
1852 if (skb_cloned(skb)) { in pskb_expand_head()
1853 if (skb_orphan_frags(skb, gfp_mask)) in pskb_expand_head()
1855 if (skb_zcopy(skb)) in pskb_expand_head()
1856 refcount_inc(&skb_uarg(skb)->refcnt); in pskb_expand_head()
1857 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_expand_head()
1858 skb_frag_ref(skb, i); in pskb_expand_head()
1860 if (skb_has_frag_list(skb)) in pskb_expand_head()
1861 skb_clone_fraglist(skb); in pskb_expand_head()
1863 skb_release_data(skb); in pskb_expand_head()
1865 skb_free_head(skb); in pskb_expand_head()
1867 off = (data + nhead) - skb->head; in pskb_expand_head()
1869 skb->head = data; in pskb_expand_head()
1870 skb->head_frag = 0; in pskb_expand_head()
1871 skb->data += off; in pskb_expand_head()
1873 skb_set_end_offset(skb, size); in pskb_expand_head()
1877 skb->tail += off; in pskb_expand_head()
1878 skb_headers_offset_update(skb, nhead); in pskb_expand_head()
1879 skb->cloned = 0; in pskb_expand_head()
1880 skb->hdr_len = 0; in pskb_expand_head()
1881 skb->nohdr = 0; in pskb_expand_head()
1882 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_expand_head()
1884 skb_metadata_clear(skb); in pskb_expand_head()
1890 if (!skb->sk || skb->destructor == sock_edemux) in pskb_expand_head()
1891 skb->truesize += size - osize; in pskb_expand_head()
1904 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) in skb_realloc_headroom() argument
1907 int delta = headroom - skb_headroom(skb); in skb_realloc_headroom()
1910 skb2 = pskb_copy(skb, GFP_ATOMIC); in skb_realloc_headroom()
1912 skb2 = skb_clone(skb, GFP_ATOMIC); in skb_realloc_headroom()
1923 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) in __skb_unclone_keeptruesize() argument
1929 saved_end_offset = skb_end_offset(skb); in __skb_unclone_keeptruesize()
1930 saved_truesize = skb->truesize; in __skb_unclone_keeptruesize()
1932 res = pskb_expand_head(skb, 0, 0, pri); in __skb_unclone_keeptruesize()
1936 skb->truesize = saved_truesize; in __skb_unclone_keeptruesize()
1938 if (likely(skb_end_offset(skb) == saved_end_offset)) in __skb_unclone_keeptruesize()
1941 shinfo = skb_shinfo(skb); in __skb_unclone_keeptruesize()
1946 memmove(skb->head + saved_end_offset, in __skb_unclone_keeptruesize()
1950 skb_set_end_offset(skb, saved_end_offset); in __skb_unclone_keeptruesize()
1967 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) in skb_expand_head() argument
1969 int delta = headroom - skb_headroom(skb); in skb_expand_head()
1970 int osize = skb_end_offset(skb); in skb_expand_head()
1971 struct sock *sk = skb->sk; in skb_expand_head()
1975 return skb; in skb_expand_head()
1979 if (skb_shared(skb) || !is_skb_wmem(skb)) { in skb_expand_head()
1980 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); in skb_expand_head()
1987 consume_skb(skb); in skb_expand_head()
1988 skb = nskb; in skb_expand_head()
1990 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) in skb_expand_head()
1993 if (sk && is_skb_wmem(skb)) { in skb_expand_head()
1994 delta = skb_end_offset(skb) - osize; in skb_expand_head()
1996 skb->truesize += delta; in skb_expand_head()
1998 return skb; in skb_expand_head()
2001 kfree_skb(skb); in skb_expand_head()
2024 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, in skb_copy_expand() argument
2031 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, in skb_copy_expand()
2032 gfp_mask, skb_alloc_rx_flag(skb), in skb_copy_expand()
2034 int oldheadroom = skb_headroom(skb); in skb_copy_expand()
2043 skb_put(n, skb->len); in skb_copy_expand()
2053 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, in skb_copy_expand()
2054 skb->len + head_copy_len)); in skb_copy_expand()
2056 skb_copy_header(n, skb); in skb_copy_expand()
2078 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) in __skb_pad() argument
2084 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { in __skb_pad()
2085 memset(skb->data+skb->len, 0, pad); in __skb_pad()
2089 ntail = skb->data_len + pad - (skb->end - skb->tail); in __skb_pad()
2090 if (likely(skb_cloned(skb) || ntail > 0)) { in __skb_pad()
2091 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); in __skb_pad()
2099 err = skb_linearize(skb); in __skb_pad()
2103 memset(skb->data + skb->len, 0, pad); in __skb_pad()
2108 kfree_skb(skb); in __skb_pad()
2126 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) in pskb_put() argument
2128 if (tail != skb) { in pskb_put()
2129 skb->data_len += len; in pskb_put()
2130 skb->len += len; in pskb_put()
2145 void *skb_put(struct sk_buff *skb, unsigned int len) in skb_put() argument
2147 void *tmp = skb_tail_pointer(skb); in skb_put()
2148 SKB_LINEAR_ASSERT(skb); in skb_put()
2149 skb->tail += len; in skb_put()
2150 skb->len += len; in skb_put()
2151 if (unlikely(skb->tail > skb->end)) in skb_put()
2152 skb_over_panic(skb, len, __builtin_return_address(0)); in skb_put()
2166 void *skb_push(struct sk_buff *skb, unsigned int len) in skb_push() argument
2168 skb->data -= len; in skb_push()
2169 skb->len += len; in skb_push()
2170 if (unlikely(skb->data < skb->head)) in skb_push()
2171 skb_under_panic(skb, len, __builtin_return_address(0)); in skb_push()
2172 return skb->data; in skb_push()
2186 void *skb_pull(struct sk_buff *skb, unsigned int len) in skb_pull() argument
2188 return skb_pull_inline(skb, len); in skb_pull()
2203 void *skb_pull_data(struct sk_buff *skb, size_t len) in skb_pull_data() argument
2205 void *data = skb->data; in skb_pull_data()
2207 if (skb->len < len) in skb_pull_data()
2210 skb_pull(skb, len); in skb_pull_data()
2225 void skb_trim(struct sk_buff *skb, unsigned int len) in skb_trim() argument
2227 if (skb->len > len) in skb_trim()
2228 __skb_trim(skb, len); in skb_trim()
2235 int ___pskb_trim(struct sk_buff *skb, unsigned int len) in ___pskb_trim() argument
2239 int offset = skb_headlen(skb); in ___pskb_trim()
2240 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim()
2244 if (skb_cloned(skb) && in ___pskb_trim()
2245 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) in ___pskb_trim()
2253 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
2260 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
2263 skb_shinfo(skb)->nr_frags = i; in ___pskb_trim()
2266 skb_frag_unref(skb, i); in ___pskb_trim()
2268 if (skb_has_frag_list(skb)) in ___pskb_trim()
2269 skb_drop_fraglist(skb); in ___pskb_trim()
2273 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); in ___pskb_trim()
2305 if (len > skb_headlen(skb)) { in ___pskb_trim()
2306 skb->data_len -= skb->len - len; in ___pskb_trim()
2307 skb->len = len; in ___pskb_trim()
2309 skb->len = len; in ___pskb_trim()
2310 skb->data_len = 0; in ___pskb_trim()
2311 skb_set_tail_pointer(skb, len); in ___pskb_trim()
2314 if (!skb->sk || skb->destructor == sock_edemux) in ___pskb_trim()
2315 skb_condense(skb); in ___pskb_trim()
2322 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) in pskb_trim_rcsum_slow() argument
2324 if (skb->ip_summed == CHECKSUM_COMPLETE) { in pskb_trim_rcsum_slow()
2325 int delta = skb->len - len; in pskb_trim_rcsum_slow()
2327 skb->csum = csum_block_sub(skb->csum, in pskb_trim_rcsum_slow()
2328 skb_checksum(skb, len, delta, 0), in pskb_trim_rcsum_slow()
2330 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in pskb_trim_rcsum_slow()
2331 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; in pskb_trim_rcsum_slow()
2332 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; in pskb_trim_rcsum_slow()
2337 return __pskb_trim(skb, len); in pskb_trim_rcsum_slow()
2366 void *__pskb_pull_tail(struct sk_buff *skb, int delta) in __pskb_pull_tail() argument
2372 int i, k, eat = (skb->tail + delta) - skb->end; in __pskb_pull_tail()
2374 if (eat > 0 || skb_cloned(skb)) { in __pskb_pull_tail()
2375 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, in __pskb_pull_tail()
2380 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), in __pskb_pull_tail()
2381 skb_tail_pointer(skb), delta)); in __pskb_pull_tail()
2386 if (!skb_has_frag_list(skb)) in __pskb_pull_tail()
2391 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2392 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2407 struct sk_buff *list = skb_shinfo(skb)->frag_list; in __pskb_pull_tail()
2441 while ((list = skb_shinfo(skb)->frag_list) != insp) { in __pskb_pull_tail()
2442 skb_shinfo(skb)->frag_list = list->next; in __pskb_pull_tail()
2448 skb_shinfo(skb)->frag_list = clone; in __pskb_pull_tail()
2456 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2457 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2460 skb_frag_unref(skb, i); in __pskb_pull_tail()
2463 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; in __pskb_pull_tail()
2465 *frag = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail()
2476 skb_shinfo(skb)->nr_frags = k; in __pskb_pull_tail()
2479 skb->tail += delta; in __pskb_pull_tail()
2480 skb->data_len -= delta; in __pskb_pull_tail()
2482 if (!skb->data_len) in __pskb_pull_tail()
2483 skb_zcopy_clear(skb, false); in __pskb_pull_tail()
2485 return skb_tail_pointer(skb); in __pskb_pull_tail()
2504 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) in skb_copy_bits() argument
2506 int start = skb_headlen(skb); in skb_copy_bits()
2510 if (offset > (int)skb->len - len) in skb_copy_bits()
2517 skb_copy_from_linear_data_offset(skb, offset, to, copy); in skb_copy_bits()
2524 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_bits()
2526 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_bits()
2555 skb_walk_frags(skb, frag_iter) { in skb_copy_bits()
2689 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, in __skb_splice_bits() argument
2701 if (__splice_segment(virt_to_page(skb->data), in __skb_splice_bits()
2702 (unsigned long) skb->data & (PAGE_SIZE - 1), in __skb_splice_bits()
2703 skb_headlen(skb), in __skb_splice_bits()
2705 skb_head_is_locked(skb), in __skb_splice_bits()
2712 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { in __skb_splice_bits()
2713 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; in __skb_splice_bits()
2721 skb_walk_frags(skb, iter) { in __skb_splice_bits()
2741 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, in skb_splice_bits() argument
2756 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); in skb_splice_bits()
2789 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, in __skb_send_sock() argument
2793 struct sk_buff *head = skb; in __skb_send_sock()
2800 while (offset < skb_headlen(skb) && len) { in __skb_send_sock()
2804 slen = min_t(int, len, skb_headlen(skb) - offset); in __skb_send_sock()
2805 kv.iov_base = skb->data + offset; in __skb_send_sock()
2824 offset -= skb_headlen(skb); in __skb_send_sock()
2827 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
2828 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
2836 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
2837 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
2861 if (skb == head) { in __skb_send_sock()
2862 if (skb_has_frag_list(skb)) { in __skb_send_sock()
2863 skb = skb_shinfo(skb)->frag_list; in __skb_send_sock()
2866 } else if (skb->next) { in __skb_send_sock()
2867 skb = skb->next; in __skb_send_sock()
2880 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, in skb_send_sock_locked() argument
2883 return __skb_send_sock(sk, skb, offset, len, kernel_sendmsg_locked, in skb_send_sock_locked()
2889 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) in skb_send_sock() argument
2891 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, in skb_send_sock()
2907 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) in skb_store_bits() argument
2909 int start = skb_headlen(skb); in skb_store_bits()
2913 if (offset > (int)skb->len - len) in skb_store_bits()
2919 skb_copy_to_linear_data_offset(skb, offset, from, copy); in skb_store_bits()
2926 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_store_bits()
2927 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_store_bits()
2957 skb_walk_frags(skb, frag_iter) { in skb_store_bits()
2985 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, in __skb_checksum() argument
2988 int start = skb_headlen(skb); in __skb_checksum()
2998 skb->data + offset, copy, csum); in __skb_checksum()
3005 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_checksum()
3007 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_checksum()
3042 skb_walk_frags(skb, frag_iter) { in __skb_checksum()
3069 __wsum skb_checksum(const struct sk_buff *skb, int offset, in skb_checksum() argument
3077 return __skb_checksum(skb, offset, len, csum, &ops); in skb_checksum()
3083 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, in skb_copy_and_csum_bits() argument
3086 int start = skb_headlen(skb); in skb_copy_and_csum_bits()
3096 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
3105 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_bits()
3110 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_copy_and_csum_bits()
3112 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_bits()
3141 skb_walk_frags(skb, frag_iter) { in skb_copy_and_csum_bits()
3168 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) in __skb_checksum_complete_head() argument
3172 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); in __skb_checksum_complete_head()
3175 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete_head()
3176 !skb->csum_complete_sw) in __skb_checksum_complete_head()
3177 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete_head()
3179 if (!skb_shared(skb)) in __skb_checksum_complete_head()
3180 skb->csum_valid = !sum; in __skb_checksum_complete_head()
3194 __sum16 __skb_checksum_complete(struct sk_buff *skb) in __skb_checksum_complete() argument
3199 csum = skb_checksum(skb, 0, skb->len, 0); in __skb_checksum_complete()
3201 sum = csum_fold(csum_add(skb->csum, csum)); in __skb_checksum_complete()
3210 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete()
3211 !skb->csum_complete_sw) in __skb_checksum_complete()
3212 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete()
3215 if (!skb_shared(skb)) { in __skb_checksum_complete()
3217 skb->csum = csum; in __skb_checksum_complete()
3218 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_complete()
3219 skb->csum_complete_sw = 1; in __skb_checksum_complete()
3220 skb->csum_valid = !sum; in __skb_checksum_complete()
3357 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) in skb_copy_and_csum_dev() argument
3362 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_copy_and_csum_dev()
3363 csstart = skb_checksum_start_offset(skb); in skb_copy_and_csum_dev()
3365 csstart = skb_headlen(skb); in skb_copy_and_csum_dev()
3367 BUG_ON(csstart > skb_headlen(skb)); in skb_copy_and_csum_dev()
3369 skb_copy_from_linear_data(skb, to, csstart); in skb_copy_and_csum_dev()
3372 if (csstart != skb->len) in skb_copy_and_csum_dev()
3373 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, in skb_copy_and_csum_dev()
3374 skb->len - csstart); in skb_copy_and_csum_dev()
3376 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_copy_and_csum_dev()
3377 long csstuff = csstart + skb->csum_offset; in skb_copy_and_csum_dev()
3435 struct sk_buff *skb; in skb_queue_purge() local
3436 while ((skb = skb_dequeue(list)) != NULL) in skb_queue_purge()
3437 kfree_skb(skb); in skb_queue_purge()
3457 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); in skb_rbtree_purge() local
3460 rb_erase(&skb->rbnode, root); in skb_rbtree_purge()
3461 sum += skb->truesize; in skb_rbtree_purge()
3462 kfree_skb(skb); in skb_rbtree_purge()
3519 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) in skb_unlink() argument
3524 __skb_unlink(skb, list); in skb_unlink()
3549 static inline void skb_split_inside_header(struct sk_buff *skb, in skb_split_inside_header() argument
3555 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header()
3558 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_split_inside_header()
3559 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header()
3561 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header()
3562 skb_shinfo(skb)->nr_frags = 0; in skb_split_inside_header()
3563 skb1->data_len = skb->data_len; in skb_split_inside_header()
3565 skb->data_len = 0; in skb_split_inside_header()
3566 skb->len = len; in skb_split_inside_header()
3567 skb_set_tail_pointer(skb, len); in skb_split_inside_header()
3570 static inline void skb_split_no_header(struct sk_buff *skb, in skb_split_no_header() argument
3575 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header()
3577 skb_shinfo(skb)->nr_frags = 0; in skb_split_no_header()
3578 skb1->len = skb1->data_len = skb->len - len; in skb_split_no_header()
3579 skb->len = len; in skb_split_no_header()
3580 skb->data_len = len - pos; in skb_split_no_header()
3583 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_split_no_header()
3586 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header()
3597 skb_frag_ref(skb, i); in skb_split_no_header()
3600 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); in skb_split_no_header()
3601 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
3605 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
3617 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) in skb_split() argument
3619 int pos = skb_headlen(skb); in skb_split()
3622 skb_zcopy_downgrade_managed(skb); in skb_split()
3624 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; in skb_split()
3625 skb_zerocopy_clone(skb1, skb, 0); in skb_split()
3627 skb_split_inside_header(skb, skb1, len, pos); in skb_split()
3629 skb_split_no_header(skb, skb1, len, pos); in skb_split()
3637 static int skb_prepare_for_shift(struct sk_buff *skb) in skb_prepare_for_shift() argument
3639 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); in skb_prepare_for_shift()
3660 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) in skb_shift() argument
3665 BUG_ON(shiftlen > skb->len); in skb_shift()
3667 if (skb_headlen(skb)) in skb_shift()
3669 if (skb_zcopy(tgt) || skb_zcopy(skb)) in skb_shift()
3675 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
3689 if (skb_prepare_for_shift(skb) || in skb_shift()
3694 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
3708 if ((shiftlen == skb->len) && in skb_shift()
3709 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
3712 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) in skb_shift()
3715 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
3719 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
3747 fragfrom = &skb_shinfo(skb)->frags[0]; in skb_shift()
3751 __skb_frag_unref(fragfrom, skb->pp_recycle); in skb_shift()
3756 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
3757 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
3758 skb_shinfo(skb)->nr_frags = to; in skb_shift()
3760 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); in skb_shift()
3767 skb->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
3769 skb_len_add(skb, -shiftlen); in skb_shift()
3785 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, in skb_prepare_seq_read() argument
3790 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
3946 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, in skb_find_text() argument
3957 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); in skb_find_text()
3964 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, in skb_append_pagefrags() argument
3967 int i = skb_shinfo(skb)->nr_frags; in skb_append_pagefrags()
3969 if (skb_can_coalesce(skb, i, page, offset)) { in skb_append_pagefrags()
3970 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); in skb_append_pagefrags()
3972 skb_zcopy_downgrade_managed(skb); in skb_append_pagefrags()
3974 skb_fill_page_desc_noacc(skb, i, page, offset, size); in skb_append_pagefrags()
3994 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) in skb_pull_rcsum() argument
3996 unsigned char *data = skb->data; in skb_pull_rcsum()
3998 BUG_ON(len > skb->len); in skb_pull_rcsum()
3999 __skb_pull(skb, len); in skb_pull_rcsum()
4000 skb_postpull_rcsum(skb, data, len); in skb_pull_rcsum()
4001 return skb->data; in skb_pull_rcsum()
4018 struct sk_buff *skb_segment_list(struct sk_buff *skb, in skb_segment_list() argument
4022 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; in skb_segment_list()
4023 unsigned int tnl_hlen = skb_tnl_header_len(skb); in skb_segment_list()
4030 skb_push(skb, -skb_network_offset(skb) + offset); in skb_segment_list()
4032 skb_shinfo(skb)->frag_list = NULL; in skb_segment_list()
4052 skb->next = nskb; in skb_segment_list()
4068 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); in skb_segment_list()
4069 __copy_skb_header(nskb, skb); in skb_segment_list()
4071 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); in skb_segment_list()
4073 skb_copy_from_linear_data_offset(skb, -tnl_hlen, in skb_segment_list()
4083 skb->truesize = skb->truesize - delta_truesize; in skb_segment_list()
4084 skb->data_len = skb->data_len - delta_len; in skb_segment_list()
4085 skb->len = skb->len - delta_len; in skb_segment_list()
4087 skb_gso_reset(skb); in skb_segment_list()
4089 skb->prev = tail; in skb_segment_list()
4091 if (skb_needs_linearize(skb, features) && in skb_segment_list()
4092 __skb_linearize(skb)) in skb_segment_list()
4095 skb_get(skb); in skb_segment_list()
4097 return skb; in skb_segment_list()
4100 kfree_skb_list(skb->next); in skb_segment_list()
4101 skb->next = NULL; in skb_segment_list()
4535 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, in __skb_to_sgvec() argument
4538 int start = skb_headlen(skb); in __skb_to_sgvec()
4549 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
4556 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_to_sgvec()
4561 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_to_sgvec()
4563 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_to_sgvec()
4579 skb_walk_frags(skb, frag_iter) { in __skb_to_sgvec()
4618 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in skb_to_sgvec() argument
4620 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec()
4650 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, in skb_to_sgvec_nomark() argument
4653 return __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec_nomark()
4676 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) in skb_cow_data() argument
4686 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && in skb_cow_data()
4687 !__pskb_pull_tail(skb, __skb_pagelen(skb))) in skb_cow_data()
4691 if (!skb_has_frag_list(skb)) { in skb_cow_data()
4697 if (skb_tailroom(skb) < tailbits && in skb_cow_data()
4698 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) in skb_cow_data()
4702 *trailer = skb; in skb_cow_data()
4709 skb_p = &skb_shinfo(skb)->frag_list; in skb_cow_data()
4769 static void sock_rmem_free(struct sk_buff *skb) in sock_rmem_free() argument
4771 struct sock *sk = skb->sk; in sock_rmem_free()
4773 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in sock_rmem_free()
4776 static void skb_set_err_queue(struct sk_buff *skb) in skb_set_err_queue() argument
4781 skb->pkt_type = PACKET_OUTGOING; in skb_set_err_queue()
4788 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_err_skb() argument
4790 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
4794 skb_orphan(skb); in sock_queue_err_skb()
4795 skb->sk = sk; in sock_queue_err_skb()
4796 skb->destructor = sock_rmem_free; in sock_queue_err_skb()
4797 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in sock_queue_err_skb()
4798 skb_set_err_queue(skb); in sock_queue_err_skb()
4801 skb_dst_force(skb); in sock_queue_err_skb()
4803 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
4810 static bool is_icmp_err_skb(const struct sk_buff *skb) in is_icmp_err_skb() argument
4812 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || in is_icmp_err_skb()
4813 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); in is_icmp_err_skb()
4819 struct sk_buff *skb, *skb_next = NULL; in sock_dequeue_err_skb() local
4824 skb = __skb_dequeue(q); in sock_dequeue_err_skb()
4825 if (skb && (skb_next = skb_peek(q))) { in sock_dequeue_err_skb()
4832 if (is_icmp_err_skb(skb) && !icmp_next) in sock_dequeue_err_skb()
4838 return skb; in sock_dequeue_err_skb()
4855 struct sk_buff *skb_clone_sk(struct sk_buff *skb) in skb_clone_sk() argument
4857 struct sock *sk = skb->sk; in skb_clone_sk()
4863 clone = skb_clone(skb, GFP_ATOMIC); in skb_clone_sk()
4876 static void __skb_complete_tx_timestamp(struct sk_buff *skb, in __skb_complete_tx_timestamp() argument
4884 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); in __skb_complete_tx_timestamp()
4886 serr = SKB_EXT_ERR(skb); in __skb_complete_tx_timestamp()
4892 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; in __skb_complete_tx_timestamp()
4894 serr->ee.ee_data = skb_shinfo(skb)->tskey; in __skb_complete_tx_timestamp()
4899 err = sock_queue_err_skb(sk, skb); in __skb_complete_tx_timestamp()
4902 kfree_skb(skb); in __skb_complete_tx_timestamp()
4919 void skb_complete_tx_timestamp(struct sk_buff *skb, in skb_complete_tx_timestamp() argument
4922 struct sock *sk = skb->sk; in skb_complete_tx_timestamp()
4931 *skb_hwtstamps(skb) = *hwtstamps; in skb_complete_tx_timestamp()
4932 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); in skb_complete_tx_timestamp()
4938 kfree_skb(skb); in skb_complete_tx_timestamp()
4947 struct sk_buff *skb; in __skb_tstamp_tx() local
4965 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, in __skb_tstamp_tx()
4970 skb = alloc_skb(0, GFP_ATOMIC); in __skb_tstamp_tx()
4972 skb = skb_clone(orig_skb, GFP_ATOMIC); in __skb_tstamp_tx()
4974 if (!skb) in __skb_tstamp_tx()
4978 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & in __skb_tstamp_tx()
4980 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; in __skb_tstamp_tx()
4984 *skb_hwtstamps(skb) = *hwtstamps; in __skb_tstamp_tx()
4986 __net_timestamp(skb); in __skb_tstamp_tx()
4988 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); in __skb_tstamp_tx()
5000 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) in skb_complete_wifi_ack() argument
5002 struct sock *sk = skb->sk; in skb_complete_wifi_ack()
5006 skb->wifi_acked_valid = 1; in skb_complete_wifi_ack()
5007 skb->wifi_acked = acked; in skb_complete_wifi_ack()
5009 serr = SKB_EXT_ERR(skb); in skb_complete_wifi_ack()
5018 err = sock_queue_err_skb(sk, skb); in skb_complete_wifi_ack()
5022 kfree_skb(skb); in skb_complete_wifi_ack()
5038 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) in skb_partial_csum_set() argument
5041 u32 csum_start = skb_headroom(skb) + (u32)start; in skb_partial_csum_set()
5043 if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { in skb_partial_csum_set()
5045 start, off, skb_headroom(skb), skb_headlen(skb)); in skb_partial_csum_set()
5048 skb->ip_summed = CHECKSUM_PARTIAL; in skb_partial_csum_set()
5049 skb->csum_start = csum_start; in skb_partial_csum_set()
5050 skb->csum_offset = off; in skb_partial_csum_set()
5051 skb_set_transport_header(skb, start); in skb_partial_csum_set()
5056 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, in skb_maybe_pull_tail() argument
5059 if (skb_headlen(skb) >= len) in skb_maybe_pull_tail()
5065 if (max > skb->len) in skb_maybe_pull_tail()
5066 max = skb->len; in skb_maybe_pull_tail()
5068 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) in skb_maybe_pull_tail()
5071 if (skb_headlen(skb) < len) in skb_maybe_pull_tail()
5079 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, in skb_checksum_setup_ip() argument
5087 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), in skb_checksum_setup_ip()
5089 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
5093 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; in skb_checksum_setup_ip()
5096 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), in skb_checksum_setup_ip()
5098 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
5102 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; in skb_checksum_setup_ip()
5113 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv4() argument
5122 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv4()
5128 if (ip_is_fragment(ip_hdr(skb))) in skb_checksum_setup_ipv4()
5131 off = ip_hdrlen(skb); in skb_checksum_setup_ipv4()
5138 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); in skb_checksum_setup_ipv4()
5143 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in skb_checksum_setup_ipv4()
5144 ip_hdr(skb)->daddr, in skb_checksum_setup_ipv4()
5145 skb->len - off, in skb_checksum_setup_ipv4()
5146 ip_hdr(skb)->protocol, 0); in skb_checksum_setup_ipv4()
5158 #define OPT_HDR(type, skb, off) \ argument
5159 (type *)(skb_network_header(skb) + (off))
5161 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv6() argument
5176 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); in skb_checksum_setup_ipv6()
5180 nexthdr = ipv6_hdr(skb)->nexthdr; in skb_checksum_setup_ipv6()
5182 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); in skb_checksum_setup_ipv6()
5190 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5197 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); in skb_checksum_setup_ipv6()
5205 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5212 hp = OPT_HDR(struct ip_auth_hdr, skb, off); in skb_checksum_setup_ipv6()
5220 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5227 hp = OPT_HDR(struct frag_hdr, skb, off); in skb_checksum_setup_ipv6()
5247 csum = skb_checksum_setup_ip(skb, nexthdr, off); in skb_checksum_setup_ipv6()
5252 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in skb_checksum_setup_ipv6()
5253 &ipv6_hdr(skb)->daddr, in skb_checksum_setup_ipv6()
5254 skb->len - off, nexthdr, 0); in skb_checksum_setup_ipv6()
5266 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) in skb_checksum_setup() argument
5270 switch (skb->protocol) { in skb_checksum_setup()
5272 err = skb_checksum_setup_ipv4(skb, recalculate); in skb_checksum_setup()
5276 err = skb_checksum_setup_ipv6(skb, recalculate); in skb_checksum_setup()
5301 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, in skb_checksum_maybe_trim() argument
5305 unsigned int len = skb_transport_offset(skb) + transport_len; in skb_checksum_maybe_trim()
5308 if (skb->len < len) in skb_checksum_maybe_trim()
5310 else if (skb->len == len) in skb_checksum_maybe_trim()
5311 return skb; in skb_checksum_maybe_trim()
5313 skb_chk = skb_clone(skb, GFP_ATOMIC); in skb_checksum_maybe_trim()
5341 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, in skb_checksum_trimmed() argument
5343 __sum16(*skb_chkf)(struct sk_buff *skb)) in skb_checksum_trimmed() argument
5346 unsigned int offset = skb_transport_offset(skb); in skb_checksum_trimmed()
5349 skb_chk = skb_checksum_maybe_trim(skb, transport_len); in skb_checksum_trimmed()
5366 if (skb_chk && skb_chk != skb) in skb_checksum_trimmed()
5374 void __skb_warn_lro_forwarding(const struct sk_buff *skb) in __skb_warn_lro_forwarding() argument
5377 skb->dev->name); in __skb_warn_lro_forwarding()
5381 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) in kfree_skb_partial() argument
5384 skb_release_head_state(skb); in kfree_skb_partial()
5385 kmem_cache_free(skbuff_head_cache, skb); in kfree_skb_partial()
5387 __kfree_skb(skb); in kfree_skb_partial()
5503 void skb_scrub_packet(struct sk_buff *skb, bool xnet) in skb_scrub_packet() argument
5505 skb->pkt_type = PACKET_HOST; in skb_scrub_packet()
5506 skb->skb_iif = 0; in skb_scrub_packet()
5507 skb->ignore_df = 0; in skb_scrub_packet()
5508 skb_dst_drop(skb); in skb_scrub_packet()
5509 skb_ext_reset(skb); in skb_scrub_packet()
5510 nf_reset_ct(skb); in skb_scrub_packet()
5511 nf_reset_trace(skb); in skb_scrub_packet()
5514 skb->offload_fwd_mark = 0; in skb_scrub_packet()
5515 skb->offload_l3_fwd_mark = 0; in skb_scrub_packet()
5521 ipvs_reset(skb); in skb_scrub_packet()
5522 skb->mark = 0; in skb_scrub_packet()
5523 skb_clear_tstamp(skb); in skb_scrub_packet()
5537 static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) in skb_gso_transport_seglen() argument
5539 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_transport_seglen()
5542 if (skb->encapsulation) { in skb_gso_transport_seglen()
5543 thlen = skb_inner_transport_header(skb) - in skb_gso_transport_seglen()
5544 skb_transport_header(skb); in skb_gso_transport_seglen()
5547 thlen += inner_tcp_hdrlen(skb); in skb_gso_transport_seglen()
5549 thlen = tcp_hdrlen(skb); in skb_gso_transport_seglen()
5550 } else if (unlikely(skb_is_gso_sctp(skb))) { in skb_gso_transport_seglen()
5572 static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) in skb_gso_network_seglen() argument
5574 unsigned int hdr_len = skb_transport_header(skb) - in skb_gso_network_seglen()
5575 skb_network_header(skb); in skb_gso_network_seglen()
5577 return hdr_len + skb_gso_transport_seglen(skb); in skb_gso_network_seglen()
5589 static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) in skb_gso_mac_seglen() argument
5591 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); in skb_gso_mac_seglen()
5593 return hdr_len + skb_gso_transport_seglen(skb); in skb_gso_mac_seglen()
5617 static inline bool skb_gso_size_check(const struct sk_buff *skb, in skb_gso_size_check() argument
5620 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_size_check()
5629 skb_walk_frags(skb, iter) { in skb_gso_size_check()
5647 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) in skb_gso_validate_network_len() argument
5649 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); in skb_gso_validate_network_len()
5662 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) in skb_gso_validate_mac_len() argument
5664 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); in skb_gso_validate_mac_len()
5668 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) in skb_reorder_vlan_header() argument
5673 if (skb_cow(skb, skb_headroom(skb)) < 0) { in skb_reorder_vlan_header()
5674 kfree_skb(skb); in skb_reorder_vlan_header()
5678 mac_len = skb->data - skb_mac_header(skb); in skb_reorder_vlan_header()
5680 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), in skb_reorder_vlan_header()
5684 meta_len = skb_metadata_len(skb); in skb_reorder_vlan_header()
5686 meta = skb_metadata_end(skb) - meta_len; in skb_reorder_vlan_header()
5690 skb->mac_header += VLAN_HLEN; in skb_reorder_vlan_header()
5691 return skb; in skb_reorder_vlan_header()
5694 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) in skb_vlan_untag() argument
5699 if (unlikely(skb_vlan_tag_present(skb))) { in skb_vlan_untag()
5701 return skb; in skb_vlan_untag()
5704 skb = skb_share_check(skb, GFP_ATOMIC); in skb_vlan_untag()
5705 if (unlikely(!skb)) in skb_vlan_untag()
5708 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) in skb_vlan_untag()
5711 vhdr = (struct vlan_hdr *)skb->data; in skb_vlan_untag()
5713 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); in skb_vlan_untag()
5715 skb_pull_rcsum(skb, VLAN_HLEN); in skb_vlan_untag()
5716 vlan_set_encap_proto(skb, vhdr); in skb_vlan_untag()
5718 skb = skb_reorder_vlan_header(skb); in skb_vlan_untag()
5719 if (unlikely(!skb)) in skb_vlan_untag()
5722 skb_reset_network_header(skb); in skb_vlan_untag()
5723 if (!skb_transport_header_was_set(skb)) in skb_vlan_untag()
5724 skb_reset_transport_header(skb); in skb_vlan_untag()
5725 skb_reset_mac_len(skb); in skb_vlan_untag()
5727 return skb; in skb_vlan_untag()
5730 kfree_skb(skb); in skb_vlan_untag()
5735 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) in skb_ensure_writable() argument
5737 if (!pskb_may_pull(skb, write_len)) in skb_ensure_writable()
5740 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) in skb_ensure_writable()
5743 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_ensure_writable()
5750 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) in __skb_vlan_pop() argument
5753 int offset = skb->data - skb_mac_header(skb); in __skb_vlan_pop()
5762 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); in __skb_vlan_pop()
5766 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in __skb_vlan_pop()
5768 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); in __skb_vlan_pop()
5771 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); in __skb_vlan_pop()
5772 __skb_pull(skb, VLAN_HLEN); in __skb_vlan_pop()
5774 vlan_set_encap_proto(skb, vhdr); in __skb_vlan_pop()
5775 skb->mac_header += VLAN_HLEN; in __skb_vlan_pop()
5777 if (skb_network_offset(skb) < ETH_HLEN) in __skb_vlan_pop()
5778 skb_set_network_header(skb, ETH_HLEN); in __skb_vlan_pop()
5780 skb_reset_mac_len(skb); in __skb_vlan_pop()
5789 int skb_vlan_pop(struct sk_buff *skb) in skb_vlan_pop() argument
5795 if (likely(skb_vlan_tag_present(skb))) { in skb_vlan_pop()
5796 __vlan_hwaccel_clear_tag(skb); in skb_vlan_pop()
5798 if (unlikely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
5801 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
5806 if (likely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
5809 vlan_proto = skb->protocol; in skb_vlan_pop()
5810 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
5814 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_pop()
5822 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) in skb_vlan_push() argument
5824 if (skb_vlan_tag_present(skb)) { in skb_vlan_push()
5825 int offset = skb->data - skb_mac_header(skb); in skb_vlan_push()
5834 err = __vlan_insert_tag(skb, skb->vlan_proto, in skb_vlan_push()
5835 skb_vlan_tag_get(skb)); in skb_vlan_push()
5839 skb->protocol = skb->vlan_proto; in skb_vlan_push()
5840 skb->mac_len += VLAN_HLEN; in skb_vlan_push()
5842 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in skb_vlan_push()
5844 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_push()
5861 int skb_eth_pop(struct sk_buff *skb) in skb_eth_pop() argument
5863 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || in skb_eth_pop()
5864 skb_network_offset(skb) < ETH_HLEN) in skb_eth_pop()
5867 skb_pull_rcsum(skb, ETH_HLEN); in skb_eth_pop()
5868 skb_reset_mac_header(skb); in skb_eth_pop()
5869 skb_reset_mac_len(skb); in skb_eth_pop()
5888 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, in skb_eth_push() argument
5894 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) in skb_eth_push()
5897 err = skb_cow_head(skb, sizeof(*eth)); in skb_eth_push()
5901 skb_push(skb, sizeof(*eth)); in skb_eth_push()
5902 skb_reset_mac_header(skb); in skb_eth_push()
5903 skb_reset_mac_len(skb); in skb_eth_push()
5905 eth = eth_hdr(skb); in skb_eth_push()
5908 eth->h_proto = skb->protocol; in skb_eth_push()
5910 skb_postpush_rcsum(skb, eth, sizeof(*eth)); in skb_eth_push()
5917 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, in skb_mod_eth_type() argument
5920 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mod_eth_type()
5923 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mod_eth_type()
5944 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, in skb_mpls_push() argument
5954 if (skb->encapsulation) in skb_mpls_push()
5957 err = skb_cow_head(skb, MPLS_HLEN); in skb_mpls_push()
5961 if (!skb->inner_protocol) { in skb_mpls_push()
5962 skb_set_inner_network_header(skb, skb_network_offset(skb)); in skb_mpls_push()
5963 skb_set_inner_protocol(skb, skb->protocol); in skb_mpls_push()
5966 skb_push(skb, MPLS_HLEN); in skb_mpls_push()
5967 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), in skb_mpls_push()
5969 skb_reset_mac_header(skb); in skb_mpls_push()
5970 skb_set_network_header(skb, mac_len); in skb_mpls_push()
5971 skb_reset_mac_len(skb); in skb_mpls_push()
5973 lse = mpls_hdr(skb); in skb_mpls_push()
5975 skb_postpush_rcsum(skb, lse, MPLS_HLEN); in skb_mpls_push()
5978 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); in skb_mpls_push()
5979 skb->protocol = mpls_proto; in skb_mpls_push()
5997 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, in skb_mpls_pop() argument
6002 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_pop()
6005 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); in skb_mpls_pop()
6009 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); in skb_mpls_pop()
6010 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), in skb_mpls_pop()
6013 __skb_pull(skb, MPLS_HLEN); in skb_mpls_pop()
6014 skb_reset_mac_header(skb); in skb_mpls_pop()
6015 skb_set_network_header(skb, mac_len); in skb_mpls_pop()
6021 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); in skb_mpls_pop()
6022 skb_mod_eth_type(skb, hdr, next_proto); in skb_mpls_pop()
6024 skb->protocol = next_proto; in skb_mpls_pop()
6040 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) in skb_mpls_update_lse() argument
6044 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_update_lse()
6047 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); in skb_mpls_update_lse()
6051 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mpls_update_lse()
6052 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; in skb_mpls_update_lse()
6054 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mpls_update_lse()
6057 mpls_hdr(skb)->label_stack_entry = mpls_lse; in skb_mpls_update_lse()
6072 int skb_mpls_dec_ttl(struct sk_buff *skb) in skb_mpls_dec_ttl() argument
6077 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_dec_ttl()
6080 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) in skb_mpls_dec_ttl()
6083 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); in skb_mpls_dec_ttl()
6091 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); in skb_mpls_dec_ttl()
6114 struct sk_buff *skb; in alloc_skb_with_frags() local
6126 skb = alloc_skb(header_len, gfp_mask); in alloc_skb_with_frags()
6127 if (!skb) in alloc_skb_with_frags()
6130 skb->truesize += npages << PAGE_SHIFT; in alloc_skb_with_frags()
6155 skb_fill_page_desc(skb, i, page, 0, chunk); in alloc_skb_with_frags()
6159 return skb; in alloc_skb_with_frags()
6162 kfree_skb(skb); in alloc_skb_with_frags()
6168 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, in pskb_carve_inside_header() argument
6172 int size = skb_end_offset(skb); in pskb_carve_inside_header()
6178 if (skb_pfmemalloc(skb)) in pskb_carve_inside_header()
6189 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); in pskb_carve_inside_header()
6190 skb->len -= off; in pskb_carve_inside_header()
6193 skb_shinfo(skb), in pskb_carve_inside_header()
6195 frags[skb_shinfo(skb)->nr_frags])); in pskb_carve_inside_header()
6196 if (skb_cloned(skb)) { in pskb_carve_inside_header()
6198 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_header()
6202 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_carve_inside_header()
6203 skb_frag_ref(skb, i); in pskb_carve_inside_header()
6204 if (skb_has_frag_list(skb)) in pskb_carve_inside_header()
6205 skb_clone_fraglist(skb); in pskb_carve_inside_header()
6206 skb_release_data(skb); in pskb_carve_inside_header()
6211 skb_free_head(skb); in pskb_carve_inside_header()
6214 skb->head = data; in pskb_carve_inside_header()
6215 skb->data = data; in pskb_carve_inside_header()
6216 skb->head_frag = 0; in pskb_carve_inside_header()
6217 skb_set_end_offset(skb, size); in pskb_carve_inside_header()
6218 skb_set_tail_pointer(skb, skb_headlen(skb)); in pskb_carve_inside_header()
6219 skb_headers_offset_update(skb, 0); in pskb_carve_inside_header()
6220 skb->cloned = 0; in pskb_carve_inside_header()
6221 skb->hdr_len = 0; in pskb_carve_inside_header()
6222 skb->nohdr = 0; in pskb_carve_inside_header()
6223 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_header()
6228 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
6233 static int pskb_carve_frag_list(struct sk_buff *skb, in pskb_carve_frag_list() argument
6287 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, in pskb_carve_inside_nonlinear() argument
6291 int size = skb_end_offset(skb); in pskb_carve_inside_nonlinear()
6293 const int nfrags = skb_shinfo(skb)->nr_frags; in pskb_carve_inside_nonlinear()
6298 if (skb_pfmemalloc(skb)) in pskb_carve_inside_nonlinear()
6309 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); in pskb_carve_inside_nonlinear()
6310 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_nonlinear()
6316 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); in pskb_carve_inside_nonlinear()
6319 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; in pskb_carve_inside_nonlinear()
6333 skb_frag_ref(skb, i); in pskb_carve_inside_nonlinear()
6339 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6340 skb_clone_fraglist(skb); in pskb_carve_inside_nonlinear()
6343 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { in pskb_carve_inside_nonlinear()
6345 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6346 kfree_skb_list(skb_shinfo(skb)->frag_list); in pskb_carve_inside_nonlinear()
6350 skb_release_data(skb); in pskb_carve_inside_nonlinear()
6352 skb->head = data; in pskb_carve_inside_nonlinear()
6353 skb->head_frag = 0; in pskb_carve_inside_nonlinear()
6354 skb->data = data; in pskb_carve_inside_nonlinear()
6355 skb_set_end_offset(skb, size); in pskb_carve_inside_nonlinear()
6356 skb_reset_tail_pointer(skb); in pskb_carve_inside_nonlinear()
6357 skb_headers_offset_update(skb, 0); in pskb_carve_inside_nonlinear()
6358 skb->cloned = 0; in pskb_carve_inside_nonlinear()
6359 skb->hdr_len = 0; in pskb_carve_inside_nonlinear()
6360 skb->nohdr = 0; in pskb_carve_inside_nonlinear()
6361 skb->len -= off; in pskb_carve_inside_nonlinear()
6362 skb->data_len = skb->len; in pskb_carve_inside_nonlinear()
6363 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_nonlinear()
6368 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) in pskb_carve() argument
6370 int headlen = skb_headlen(skb); in pskb_carve()
6373 return pskb_carve_inside_header(skb, len, headlen, gfp); in pskb_carve()
6375 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); in pskb_carve()
6381 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, in pskb_extract() argument
6384 struct sk_buff *clone = skb_clone(skb, gfp); in pskb_extract()
6410 void skb_condense(struct sk_buff *skb) in skb_condense() argument
6412 if (skb->data_len) { in skb_condense()
6413 if (skb->data_len > skb->end - skb->tail || in skb_condense()
6414 skb_cloned(skb)) in skb_condense()
6418 __pskb_pull_tail(skb, skb->data_len); in skb_condense()
6427 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_condense()
6495 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, in __skb_ext_set() argument
6500 skb_ext_put(skb); in __skb_ext_set()
6504 skb->extensions = ext; in __skb_ext_set()
6505 skb->active_extensions = 1 << id; in __skb_ext_set()
6523 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) in skb_ext_add() argument
6528 if (skb->active_extensions) { in skb_ext_add()
6529 old = skb->extensions; in skb_ext_add()
6531 new = skb_ext_maybe_cow(old, skb->active_extensions); in skb_ext_add()
6551 skb->slow_gro = 1; in skb_ext_add()
6552 skb->extensions = new; in skb_ext_add()
6553 skb->active_extensions |= 1 << id; in skb_ext_add()
6576 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) in __skb_ext_del() argument
6578 struct skb_ext *ext = skb->extensions; in __skb_ext_del()
6580 skb->active_extensions &= ~(1 << id); in __skb_ext_del()
6581 if (skb->active_extensions == 0) { in __skb_ext_del()
6582 skb->extensions = NULL; in __skb_ext_del()
6629 void skb_attempt_defer_free(struct sk_buff *skb) in skb_attempt_defer_free() argument
6631 int cpu = skb->alloc_cpu; in skb_attempt_defer_free()
6640 nodefer: __kfree_skb(skb); in skb_attempt_defer_free()
6655 skb->next = sd->defer_list; in skb_attempt_defer_free()
6657 WRITE_ONCE(sd->defer_list, skb); in skb_attempt_defer_free()