| /Linux-v6.1/net/ipv4/ |
| D | tcp_offload.c | 20 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP; in tcp_gso_tstamp() 21 skb_shinfo(skb)->tskey = ts_seq; in tcp_gso_tstamp() 33 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)) in tcp4_gso_segment() 81 mss = skb_shinfo(skb)->gso_size; in tcp_gso_segment() 88 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); in tcp_gso_segment() 111 mss *= skb_shinfo(segs)->gso_segs; in tcp_gso_segment() 119 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP)) in tcp_gso_segment() 120 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); in tcp_gso_segment() 256 mss = skb_shinfo(p)->gso_size; in tcp_gro_receive() 263 flush |= (mss != skb_shinfo(skb)->gso_size); in tcp_gro_receive() [all …]
|
| D | udp_offload.c | 41 if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) in __skb_udp_tunnel_segment() 57 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); in __skb_udp_tunnel_segment() 60 remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); in __skb_udp_tunnel_segment() 93 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in __skb_udp_tunnel_segment() 125 uh->len = htons(skb_shinfo(skb)->gso_size + in __skb_udp_tunnel_segment() 253 unsigned int mss = skb_shinfo(skb)->gso_size; in __udp_gso_segment_list() 276 if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST) in __udp_gso_segment() 279 mss = skb_shinfo(gso_skb)->gso_size; in __udp_gso_segment() 302 mss *= skb_shinfo(segs)->gso_segs; in __udp_gso_segment() 308 skb_shinfo(seg)->tskey = skb_shinfo(gso_skb)->tskey; in __udp_gso_segment() [all …]
|
| D | gre_offload.c | 44 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); in gre_gso_segment() 64 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in gre_gso_segment() 102 skb_shinfo(skb)->gso_size; in gre_gso_segment() 244 skb_shinfo(skb)->gso_type = SKB_GSO_GRE; in gre_gro_complete()
|
| /Linux-v6.1/net/core/ |
| D | skbuff.c | 292 shinfo = skb_shinfo(skb); in __build_skb_around() 723 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag() 740 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist() 766 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data() 915 struct skb_shared_info *sh = skb_shinfo(skb); in skb_dump() 973 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { in skb_dump() 974 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump() 1198 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone() 1221 skb_shinfo(n)->frag_list = first; in alloc_skb_for_msg() 1502 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; in __skb_zcopy_downgrade_managed() [all …]
|
| D | tso.c | 12 return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; in tso_count_descs() 61 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { in tso_build_data() 62 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; in tso_build_data() 87 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { in tso_start() 88 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; in tso_start()
|
| D | gro.c | 155 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); in skb_gro_receive() 181 pinfo = skb_shinfo(lp); in skb_gro_receive() 262 skb_shinfo(p)->frag_list = skb; in skb_gro_receive() 294 skb_shinfo(skb)->gso_size = 0; in napi_gro_complete() 416 const struct skb_shared_info *pinfo = skb_shinfo(skb); in skb_gro_reset_offset() 435 struct skb_shared_info *pinfo = skb_shinfo(skb); in gro_pull_from_frag0() 505 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs; in dev_gro_receive() 560 skb_shinfo(skb)->gso_size = skb_gro_len(skb); in dev_gro_receive() 671 skb_shinfo(skb)->gso_type = 0; in napi_reuse_skb() 672 skb_shinfo(skb)->gso_size = 0; in napi_reuse_skb()
|
| /Linux-v6.1/drivers/net/ethernet/sfc/siena/ |
| D | tx.h | 28 if (skb_shinfo(skb)->gso_segs > 1 && in efx_tx_csum_type_skb() 29 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in efx_tx_csum_type_skb() 30 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) in efx_tx_csum_type_skb()
|
| /Linux-v6.1/drivers/net/ethernet/sfc/ |
| D | tx.h | 35 if (skb_shinfo(skb)->gso_segs > 1 && in efx_tx_csum_type_skb() 36 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in efx_tx_csum_type_skb() 37 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) in efx_tx_csum_type_skb()
|
| D | tx_tso.c | 291 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size; in tso_start_new_packet() 295 st->packet_space = skb_shinfo(skb)->gso_size; in tso_start_new_packet() 341 st->seqnum += skb_shinfo(skb)->gso_size; in tso_start_new_packet() 386 EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->nr_frags < 1); in efx_enqueue_skb_tso() 389 skb_shinfo(skb)->frags + frag_i); in efx_enqueue_skb_tso() 408 if (++frag_i >= skb_shinfo(skb)->nr_frags) in efx_enqueue_skb_tso() 412 skb_shinfo(skb)->frags + frag_i); in efx_enqueue_skb_tso()
|
| D | ef100_tx.c | 67 mss = skb_shinfo(skb)->gso_size; in ef100_tx_can_tso() 77 if (skb_shinfo(skb)->gso_segs > nic_data->tso_max_payload_num_segs) { in ef100_tx_can_tso() 190 bool gso_partial = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; in ef100_make_tso_desc() 195 u32 mss = skb_shinfo(skb)->gso_size; in ef100_make_tso_desc() 203 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID) in ef100_make_tso_desc() 216 if (skb_shinfo(skb)->gso_type & in ef100_make_tso_desc() 224 outer_csum = skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM; in ef100_make_tso_desc() 391 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; in __ef100_enqueue_skb()
|
| /Linux-v6.1/drivers/net/xen-netback/ |
| D | netback.c | 371 skb_shinfo(skb)->destructor_arg = NULL; in xenvif_alloc_skb() 387 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_get_requests() 467 shinfo = skb_shinfo(nskb); in xenvif_get_requests() 480 skb_shinfo(skb)->frag_list = nskb; in xenvif_get_requests() 524 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_tx_check_gop() 634 shinfo = skb_shinfo(shinfo->frag_list); in xenvif_tx_check_gop() 646 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_fill_frags() 661 skb_shinfo(skb)->destructor_arg = in xenvif_fill_frags() 728 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in xenvif_set_skb_gso() 731 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in xenvif_set_skb_gso() [all …]
|
| /Linux-v6.1/net/ipv6/ |
| D | udp_offload.c | 32 if (skb->encapsulation && skb_shinfo(skb)->gso_type & in udp6_ufo_fragment() 39 if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4))) in udp6_ufo_fragment() 45 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in udp6_ufo_fragment() 48 mss = skb_shinfo(skb)->gso_size; in udp6_ufo_fragment() 169 skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4); in udp6_gro_complete() 170 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; in udp6_gro_complete()
|
| D | ip6_offload.c | 128 skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6)) in ipv6_gso_segment() 130 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); in ipv6_gso_segment() 133 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); in ipv6_gso_segment() 146 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in ipv6_gso_segment() 151 payload_len = skb_shinfo(skb)->gso_size + in ipv6_gso_segment() 394 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; in sit_gro_complete() 401 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; in ip6ip6_gro_complete() 408 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; in ip4ip6_gro_complete() 424 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4)) in sit_gso_segment() 433 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6)) in ip4ip6_gso_segment() [all …]
|
| D | tcpv6_offload.c | 37 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; in tcp6_gro_complete() 47 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) in tcp6_gso_segment()
|
| /Linux-v6.1/net/openvswitch/ |
| D | openvswitch_trace.h | 52 __entry->nr_frags = skb_shinfo(skb)->nr_frags; 53 __entry->gso_size = skb_shinfo(skb)->gso_size; 54 __entry->gso_type = skb_shinfo(skb)->gso_type; 122 __entry->nr_frags = skb_shinfo(skb)->nr_frags; 123 __entry->gso_size = skb_shinfo(skb)->gso_size; 124 __entry->gso_type = skb_shinfo(skb)->gso_type;
|
| /Linux-v6.1/include/linux/ |
| D | udp.h | 124 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in udp_cmsg_recv() 125 gso_size = skb_shinfo(skb)->gso_size; in udp_cmsg_recv() 135 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4) in udp_unexpected_gso() 138 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist) in udp_unexpected_gso()
|
| D | skbuff.h | 562 #define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg)) 1632 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) macro 1636 return &skb_shinfo(skb)->hwtstamps; in skb_hwtstamps() 1641 bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE; in skb_zcopy() 1648 return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY; in skb_zcopy_pure() 1653 return skb_shinfo(skb)->flags & SKBFL_MANAGED_FRAG_REFS; in skb_zcopy_managed() 1669 skb_shinfo(skb)->destructor_arg = uarg; in skb_zcopy_init() 1670 skb_shinfo(skb)->flags |= uarg->flags; in skb_zcopy_init() 1687 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); in skb_zcopy_set_nouarg() 1688 skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG; in skb_zcopy_set_nouarg() [all …]
|
| /Linux-v6.1/drivers/net/ethernet/hisilicon/hns3/ |
| D | hns3_trace.h | 35 __entry->nr_frags = skb_shinfo(skb)->nr_frags; 36 __entry->gso_size = skb_shinfo(skb)->gso_size; 37 __entry->gso_segs = skb_shinfo(skb)->gso_segs; 38 __entry->gso_type = skb_shinfo(skb)->gso_type; 43 hns3_shinfo_pack(skb_shinfo(skb), __entry->size);
|
| /Linux-v6.1/include/trace/events/ |
| D | net.h | 55 __entry->tx_flags = skb_shinfo(skb)->tx_flags; 56 __entry->gso_size = skb_shinfo(skb)->gso_size; 57 __entry->gso_segs = skb_shinfo(skb)->gso_segs; 58 __entry->gso_type = skb_shinfo(skb)->gso_type; 213 __entry->nr_frags = skb_shinfo(skb)->nr_frags; 214 __entry->gso_size = skb_shinfo(skb)->gso_size; 215 __entry->gso_type = skb_shinfo(skb)->gso_type;
|
| /Linux-v6.1/drivers/net/ethernet/marvell/octeontx2/nic/ |
| D | otx2_txrx.c | 94 frag = &skb_shinfo(skb)->frags[seg - 1]; in otx2_dma_map_skb_frag() 154 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { in otx2_snd_pkt_handler() 202 if (likely(!skb_shinfo(skb)->nr_frags)) { in otx2_skb_add_frag() 215 if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) { in otx2_skb_add_frag() 216 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, in otx2_skb_add_frag() 637 if (skb_shinfo(skb)->gso_size) { in otx2_sqe_add_ext() 640 ext->lso_mps = skb_shinfo(skb)->gso_size; in otx2_sqe_add_ext() 643 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { in otx2_sqe_add_ext() 652 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { in otx2_sqe_add_ext() 657 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in otx2_sqe_add_ext() [all …]
|
| /Linux-v6.1/net/tls/ |
| D | tls_strp.c | 29 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_anchor_free() 50 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in tls_strp_msg_make_copy() 51 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in tls_strp_msg_make_copy() 128 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_msg_hold() 175 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_flush_anchor_copy() 199 frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE]; in tls_strp_copyin() 289 shinfo = skb_shinfo(strp->anchor); in tls_strp_read_copy() 324 skb = skb_shinfo(strp->anchor)->frag_list; in tls_strp_check_no_dup() 354 skb_shinfo(strp->anchor)->frag_list = first; in tls_strp_load_anchor_with_queue()
|
| /Linux-v6.1/drivers/net/ethernet/chelsio/cxgb4/ |
| D | cxgb4_ptp.h | 57 return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP; in cxgb4_xmit_with_hwtstamp() 62 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in cxgb4_xmit_hwtstamp_pending()
|
| /Linux-v6.1/drivers/net/ethernet/pensando/ionic/ |
| D | ionic_txrx.c | 156 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in ionic_rx_frags() 642 frag = skb_shinfo(skb)->frags; in ionic_tx_map_skb() 643 nfrags = skb_shinfo(skb)->nr_frags; in ionic_tx_map_skb() 725 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in ionic_tx_clean() 927 mss = skb_shinfo(skb)->gso_size; in ionic_tx_tso() 928 outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) || in ionic_tx_tso() 929 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); in ionic_tx_tso() 1035 flags, skb_shinfo(skb)->nr_frags, in ionic_tx_calc_csum() 1072 flags, skb_shinfo(skb)->nr_frags, in ionic_tx_calc_no_csum() 1097 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) { in ionic_tx_skb_frags() [all …]
|
| /Linux-v6.1/drivers/staging/octeon/ |
| D | ethernet-tx.c | 191 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) { in cvm_oct_xmit() 263 if (skb_shinfo(skb)->nr_frags == 0) { in cvm_oct_xmit() 272 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in cvm_oct_xmit() 273 skb_frag_t *fs = skb_shinfo(skb)->frags + i; in cvm_oct_xmit() 282 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1; in cvm_oct_xmit() 283 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; in cvm_oct_xmit() 325 if (unlikely(skb_shinfo(skb)->nr_frags)) { in cvm_oct_xmit()
|
| /Linux-v6.1/drivers/net/ethernet/sun/ |
| D | sunvnet_common.c | 1083 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in vnet_skb_map() 1084 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in vnet_skb_map() 1123 docopy = skb_shinfo(skb)->nr_frags >= ncookies; in vnet_skb_shape() 1124 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in vnet_skb_shape() 1125 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in vnet_skb_shape() 1209 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; in vnet_skb_shape() 1210 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; in vnet_skb_shape() 1249 gso_size = skb_shinfo(skb)->gso_size; in vnet_handle_offloads() 1250 gso_type = skb_shinfo(skb)->gso_type; in vnet_handle_offloads() 1251 gso_segs = skb_shinfo(skb)->gso_segs; in vnet_handle_offloads() [all …]
|