/Linux-v4.19/net/core/ |
D | skbuff.c | 233 shinfo = skb_shinfo(skb); in __alloc_skb() 298 shinfo = skb_shinfo(skb); in __build_skb() 515 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag() 532 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist() 555 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data() 871 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone() 1176 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs() 1205 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs() 1240 skb_shinfo(skb)->nr_frags = new_frags; in skb_copy_ubufs() 1311 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; in skb_copy_header() [all …]
|
D | tso.c | 12 return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; in tso_count_descs() 54 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { in tso_build_data() 55 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; in tso_build_data() 78 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { in tso_start() 79 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; in tso_start()
|
D | datagram.c | 440 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_datagram_iter() 442 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_datagram_iter() 529 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_datagram_from_iter() 531 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_datagram_from_iter() 584 int frag = skb_shinfo(skb)->nr_frags; in __zerocopy_sg_from_iter() 671 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_datagram() 673 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_datagram()
|
/Linux-v4.19/net/ipv4/ |
D | tcp_offload.c | 22 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP; in tcp_gso_tstamp() 23 skb_shinfo(skb)->tskey = ts_seq; in tcp_gso_tstamp() 35 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)) in tcp4_gso_segment() 83 mss = skb_shinfo(skb)->gso_size; in tcp_gso_segment() 90 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); in tcp_gso_segment() 113 mss *= skb_shinfo(segs)->gso_segs; in tcp_gso_segment() 121 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP)) in tcp_gso_segment() 122 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); in tcp_gso_segment() 261 mss = skb_shinfo(p)->gso_size; in tcp_gro_receive() 299 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; in tcp_gro_complete() [all …]
|
D | udp_offload.c | 43 if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) in __skb_udp_tunnel_segment() 58 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); in __skb_udp_tunnel_segment() 61 remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); in __skb_udp_tunnel_segment() 92 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in __skb_udp_tunnel_segment() 124 uh->len = htons(skb_shinfo(skb)->gso_size + in __skb_udp_tunnel_segment() 202 mss = skb_shinfo(gso_skb)->gso_size; in __udp_gso_segment() 225 mss *= skb_shinfo(segs)->gso_segs; in __udp_gso_segment() 296 (skb_shinfo(skb)->gso_type & in udp4_ufo_fragment() 302 if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4))) in udp4_ufo_fragment() 308 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in udp4_ufo_fragment() [all …]
|
D | gre_offload.c | 47 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); in gre_gso_segment() 60 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in gre_gso_segment() 98 skb_shinfo(skb)->gso_size; in gre_gso_segment() 240 skb_shinfo(skb)->gso_type = SKB_GSO_GRE; in gre_gro_complete()
|
/Linux-v4.19/include/linux/ |
D | virtio_net.h | 65 skb_shinfo(skb)->gso_size = gso_size; in virtio_net_hdr_to_skb() 66 skb_shinfo(skb)->gso_type = gso_type; in virtio_net_hdr_to_skb() 69 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; in virtio_net_hdr_to_skb() 70 skb_shinfo(skb)->gso_segs = 0; in virtio_net_hdr_to_skb() 85 struct skb_shared_info *sinfo = skb_shinfo(skb); in virtio_net_hdr_from_skb()
|
D | skbuff.h | 467 #define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg)) 1291 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) macro 1295 return &skb_shinfo(skb)->hwtstamps; in skb_hwtstamps() 1300 bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY; in skb_zcopy() 1309 skb_shinfo(skb)->destructor_arg = uarg; in skb_zcopy_set() 1310 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; in skb_zcopy_set() 1327 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; in skb_zcopy_clear() 1338 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; in skb_zcopy_abort() 1443 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; in skb_cloned() 1470 dataref = atomic_read(&skb_shinfo(skb)->dataref); in skb_header_cloned() [all …]
|
/Linux-v4.19/drivers/net/xen-netback/ |
D | netback.c | 355 skb_shinfo(skb)->destructor_arg = NULL; in xenvif_alloc_skb() 367 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_get_requests() 389 shinfo = skb_shinfo(nskb); in xenvif_get_requests() 402 skb_shinfo(skb)->frag_list = nskb; in xenvif_get_requests() 445 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_tx_check_gop() 549 first_shinfo = skb_shinfo(skb); in xenvif_tx_check_gop() 550 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list); in xenvif_tx_check_gop() 562 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_fill_frags() 577 skb_shinfo(skb)->destructor_arg = in xenvif_fill_frags() 644 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in xenvif_set_skb_gso() [all …]
|
/Linux-v4.19/net/ipv6/ |
D | udp_offload.c | 34 mss = skb_shinfo(skb)->gso_size; in udp6_ufo_fragment() 38 if (skb->encapsulation && skb_shinfo(skb)->gso_type & in udp6_ufo_fragment() 45 if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4))) in udp6_ufo_fragment() 51 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in udp6_ufo_fragment() 151 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; in udp6_gro_complete() 155 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; in udp6_gro_complete()
|
D | ip6_offload.c | 90 skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6)) in ipv6_gso_segment() 92 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); in ipv6_gso_segment() 95 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); in ipv6_gso_segment() 106 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in ipv6_gso_segment() 111 payload_len = skb_shinfo(skb)->gso_size + in ipv6_gso_segment() 327 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; in sit_gro_complete() 334 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; in ip6ip6_gro_complete() 341 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; in ip4ip6_gro_complete()
|
D | tcpv6_offload.c | 39 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; in tcp6_gro_complete() 49 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) in tcp6_gso_segment()
|
/Linux-v4.19/include/trace/events/ |
D | net.h | 55 __entry->tx_flags = skb_shinfo(skb)->tx_flags; 56 __entry->gso_size = skb_shinfo(skb)->gso_size; 57 __entry->gso_segs = skb_shinfo(skb)->gso_segs; 58 __entry->gso_type = skb_shinfo(skb)->gso_type; 190 __entry->nr_frags = skb_shinfo(skb)->nr_frags; 191 __entry->gso_size = skb_shinfo(skb)->gso_size; 192 __entry->gso_type = skb_shinfo(skb)->gso_type;
|
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_tx.c | 231 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in mlx5e_tx_get_gso_ihs() 266 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in mlx5e_txwqe_build_dsegs() 267 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; in mlx5e_txwqe_build_dsegs() 325 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) in mlx5e_txwqe_complete() 326 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in mlx5e_txwqe_complete() 361 mss = cpu_to_be16(skb_shinfo(skb)->gso_size); in mlx5e_sq_xmit() 363 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; in mlx5e_sq_xmit() 364 stats->packets += skb_shinfo(skb)->gso_segs; in mlx5e_sq_xmit() 378 ds_cnt += skb_shinfo(skb)->nr_frags; in mlx5e_sq_xmit() 538 if (unlikely(skb_shinfo(skb)->tx_flags & in mlx5e_poll_tx_cq() [all …]
|
/Linux-v4.19/drivers/net/ethernet/sfc/ |
D | tx_tso.c | 294 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size; in tso_start_new_packet() 298 st->packet_space = skb_shinfo(skb)->gso_size; in tso_start_new_packet() 344 st->seqnum += skb_shinfo(skb)->gso_size; in tso_start_new_packet() 389 EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->nr_frags < 1); in efx_enqueue_skb_tso() 392 skb_shinfo(skb)->frags + frag_i); in efx_enqueue_skb_tso() 411 if (++frag_i >= skb_shinfo(skb)->nr_frags) in efx_enqueue_skb_tso() 415 skb_shinfo(skb)->frags + frag_i); in efx_enqueue_skb_tso()
|
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
D | tls_rxtx.c | 168 skb_shinfo(nskb)->gso_size = 0; in mlx5e_tls_complete_sync_skb() 170 skb_shinfo(nskb)->gso_size = mss; in mlx5e_tls_complete_sync_skb() 171 skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss); in mlx5e_tls_complete_sync_skb() 173 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; in mlx5e_tls_complete_sync_skb() 243 skb_shinfo(nskb)->frags[i] = info.frags[i]; in mlx5e_tls_handle_ooo() 245 skb_shinfo(nskb)->nr_frags = info.nr_frags; in mlx5e_tls_handle_ooo()
|
D | en_accel.h | 46 int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); in mlx5e_udp_gso_handle_tx_skb() 74 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in mlx5e_accel_handle_tx()
|
/Linux-v4.19/drivers/net/ethernet/chelsio/cxgb4/ |
D | cxgb4_ptp.h | 57 return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP; in cxgb4_xmit_with_hwtstamp() 62 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in cxgb4_xmit_hwtstamp_pending()
|
/Linux-v4.19/net/sctp/ |
D | inqueue.c | 140 chunk->skb = skb_shinfo(chunk->skb)->frag_list; in sctp_inq_pop() 177 if (skb_shinfo(chunk->skb)->frag_list) in sctp_inq_pop() 182 chunk->skb = skb_shinfo(chunk->skb)->frag_list; in sctp_inq_pop()
|
/Linux-v4.19/drivers/net/ethernet/qlogic/qede/ |
D | qede_fp.c | 136 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { in qede_free_tx_pkt() 216 unsigned short gso_type = skb_shinfo(skb)->gso_type; in qede_xmit_type() 310 return (skb_shinfo(skb)->nr_frags > allowed_frags); in qede_pkt_req_lin() 641 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in qede_set_gro_params() 643 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in qede_set_gro_params() 645 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params() 911 skb_shinfo(skb)->gso_type = 0; in qede_gro_receive() 912 skb_shinfo(skb)->gso_size = 0; in qede_gro_receive() 917 if (skb_shinfo(skb)->gso_size) { in qede_gro_receive() 1172 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, in qede_rx_build_jumbo() [all …]
|
/Linux-v4.19/drivers/net/ethernet/huawei/hinic/ |
D | hinic_tx.c | 130 for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) { in tx_map_skb() 131 frag = &skb_shinfo(skb)->frags[i]; in tx_map_skb() 170 for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++) in tx_unmap_skb() 201 nr_sges = skb_shinfo(skb)->nr_frags + 1; in hinic_xmit_frame() 291 nr_sges = skb_shinfo(skb)->nr_frags + 1; in free_all_tx_skbs() 344 nr_sges = skb_shinfo(skb)->nr_frags + 1; in free_tx_poll()
|
/Linux-v4.19/drivers/net/ |
D | xen-netfront.c | 316 page = skb_frag_page(&skb_shinfo(skb)->frags[0]); in xennet_alloc_rx_buffers() 527 int i, frags = skb_shinfo(skb)->nr_frags; in xennet_count_skb_slots() 534 skb_frag_t *frag = skb_shinfo(skb)->frags + i; in xennet_count_skb_slots() 655 if (skb_shinfo(skb)->gso_size) { in xennet_start_xmit() 663 gso->u.gso.size = skb_shinfo(skb)->gso_size; in xennet_start_xmit() 664 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? in xennet_start_xmit() 678 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in xennet_start_xmit() 679 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in xennet_start_xmit() 880 skb_shinfo(skb)->gso_size = gso->u.gso.size; in xennet_set_skb_gso() 881 skb_shinfo(skb)->gso_type = in xennet_set_skb_gso() [all …]
|
/Linux-v4.19/drivers/net/ethernet/sun/ |
D | sunvnet_common.c | 1083 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in vnet_skb_map() 1084 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in vnet_skb_map() 1123 docopy = skb_shinfo(skb)->nr_frags >= ncookies; in vnet_skb_shape() 1124 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in vnet_skb_shape() 1125 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in vnet_skb_shape() 1209 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; in vnet_skb_shape() 1210 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; in vnet_skb_shape() 1248 gso_size = skb_shinfo(skb)->gso_size; in vnet_handle_offloads() 1249 gso_type = skb_shinfo(skb)->gso_type; in vnet_handle_offloads() 1250 gso_segs = skb_shinfo(skb)->gso_segs; in vnet_handle_offloads() [all …]
|
/Linux-v4.19/drivers/staging/octeon/ |
D | ethernet-tx.c | 201 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) { in cvm_oct_xmit() 271 if (skb_shinfo(skb)->nr_frags == 0) { in cvm_oct_xmit() 280 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in cvm_oct_xmit() 281 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i; in cvm_oct_xmit() 290 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1; in cvm_oct_xmit() 291 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; in cvm_oct_xmit() 333 if (unlikely(skb_shinfo(skb)->nr_frags)) { in cvm_oct_xmit()
|
/Linux-v4.19/net/xfrm/ |
D | xfrm_ipcomp.c | 76 if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) in ipcomp_decompress() 79 frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; in ipcomp_decompress() 100 skb_shinfo(skb)->nr_frags++; in ipcomp_decompress()
|