Home
last modified time | relevance | path

Searched refs:skb_shinfo (Results 1 – 25 of 385) sorted by relevance

12345678910>>...16

/Linux-v6.6/net/ipv4/
Dtcp_offload.c21 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP; in tcp_gso_tstamp()
22 skb_shinfo(skb)->tskey = ts_seq; in tcp_gso_tstamp()
34 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)) in tcp4_gso_segment()
82 mss = skb_shinfo(skb)->gso_size; in tcp_gso_segment()
89 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); in tcp_gso_segment()
112 mss *= skb_shinfo(segs)->gso_segs; in tcp_gso_segment()
120 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP)) in tcp_gso_segment()
121 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); in tcp_gso_segment()
256 mss = skb_shinfo(p)->gso_size; in tcp_gro_receive()
263 flush |= (mss != skb_shinfo(skb)->gso_size); in tcp_gro_receive()
[all …]
Dudp_offload.c42 if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) in __skb_udp_tunnel_segment()
58 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); in __skb_udp_tunnel_segment()
61 remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); in __skb_udp_tunnel_segment()
94 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in __skb_udp_tunnel_segment()
126 uh->len = htons(skb_shinfo(skb)->gso_size + in __skb_udp_tunnel_segment()
254 unsigned int mss = skb_shinfo(skb)->gso_size; in __udp_gso_segment_list()
277 mss = skb_shinfo(gso_skb)->gso_size; in __udp_gso_segment()
283 skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh), in __udp_gso_segment()
288 if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST) in __udp_gso_segment()
310 mss *= skb_shinfo(segs)->gso_segs; in __udp_gso_segment()
[all …]
Dgre_offload.c45 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); in gre_gso_segment()
65 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in gre_gso_segment()
103 skb_shinfo(skb)->gso_size; in gre_gso_segment()
245 skb_shinfo(skb)->gso_type = SKB_GSO_GRE; in gre_gro_complete()
/Linux-v6.6/net/core/
Dskbuff.c362 shinfo = skb_shinfo(skb); in __finalize_skb_around()
860 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
877 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
964 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data()
1165 struct skb_shared_info *sh = skb_shinfo(skb); in skb_dump()
1223 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { in skb_dump()
1224 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump()
1448 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
1471 skb_shinfo(n)->frag_list = first; in alloc_skb_for_msg()
1755 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; in __skb_zcopy_downgrade_managed()
[all …]
Dtso.c53 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { in tso_build_data()
54 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; in tso_build_data()
79 (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) { in tso_start()
80 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; in tso_start()
Dgro.c98 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); in skb_gro_receive()
135 pinfo = skb_shinfo(lp); in skb_gro_receive()
214 skb_shinfo(p)->frag_list = skb; in skb_gro_receive()
246 skb_shinfo(skb)->gso_size = 0; in napi_gro_complete()
372 const struct skb_shared_info *pinfo = skb_shinfo(skb); in skb_gro_reset_offset()
391 struct skb_shared_info *pinfo = skb_shinfo(skb); in gro_pull_from_frag0()
472 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs; in dev_gro_receive()
475 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY)) in dev_gro_receive()
526 skb_shinfo(skb)->gso_size = skb_gro_len(skb); in dev_gro_receive()
633 skb_shinfo(skb)->gso_type = 0; in napi_reuse_skb()
[all …]
/Linux-v6.6/drivers/net/ethernet/sfc/siena/
Dtx.h28 if (skb_shinfo(skb)->gso_segs > 1 && in efx_tx_csum_type_skb()
29 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in efx_tx_csum_type_skb()
30 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) in efx_tx_csum_type_skb()
/Linux-v6.6/drivers/net/xen-netback/
Dnetback.c374 skb_shinfo(skb)->destructor_arg = NULL; in xenvif_alloc_skb()
390 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_get_requests()
481 shinfo = skb_shinfo(nskb); in xenvif_get_requests()
494 skb_shinfo(skb)->frag_list = nskb; in xenvif_get_requests()
543 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_tx_check_gop()
660 shinfo = skb_shinfo(shinfo->frag_list); in xenvif_tx_check_gop()
672 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_fill_frags()
687 skb_shinfo(skb)->destructor_arg = in xenvif_fill_frags()
754 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in xenvif_set_skb_gso()
757 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in xenvif_set_skb_gso()
[all …]
/Linux-v6.6/drivers/net/ethernet/sfc/
Dtx.h35 if (skb_shinfo(skb)->gso_segs > 1 && in efx_tx_csum_type_skb()
36 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in efx_tx_csum_type_skb()
37 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) in efx_tx_csum_type_skb()
Dtx_tso.c291 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size; in tso_start_new_packet()
295 st->packet_space = skb_shinfo(skb)->gso_size; in tso_start_new_packet()
341 st->seqnum += skb_shinfo(skb)->gso_size; in tso_start_new_packet()
386 EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->nr_frags < 1); in efx_enqueue_skb_tso()
389 skb_shinfo(skb)->frags + frag_i); in efx_enqueue_skb_tso()
408 if (++frag_i >= skb_shinfo(skb)->nr_frags) in efx_enqueue_skb_tso()
412 skb_shinfo(skb)->frags + frag_i); in efx_enqueue_skb_tso()
Def100_tx.c67 mss = skb_shinfo(skb)->gso_size; in ef100_tx_can_tso()
77 if (skb_shinfo(skb)->gso_segs > nic_data->tso_max_payload_num_segs) { in ef100_tx_can_tso()
190 bool gso_partial = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; in ef100_make_tso_desc()
195 u32 mss = skb_shinfo(skb)->gso_size; in ef100_make_tso_desc()
203 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID) in ef100_make_tso_desc()
216 if (skb_shinfo(skb)->gso_type & in ef100_make_tso_desc()
224 outer_csum = skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM; in ef100_make_tso_desc()
392 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; in __ef100_enqueue_skb()
/Linux-v6.6/net/ipv6/
Dip6_offload.c110 skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6)) in ipv6_gso_segment()
112 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); in ipv6_gso_segment()
115 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); in ipv6_gso_segment()
128 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in ipv6_gso_segment()
133 payload_len = skb_shinfo(skb)->gso_size + in ipv6_gso_segment()
376 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; in sit_gro_complete()
383 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; in ip6ip6_gro_complete()
390 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; in ip4ip6_gro_complete()
406 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4)) in sit_gso_segment()
415 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6)) in ip4ip6_gso_segment()
[all …]
Dudp_offload.c33 if (skb->encapsulation && skb_shinfo(skb)->gso_type & in udp6_ufo_fragment()
40 if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4))) in udp6_ufo_fragment()
46 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in udp6_ufo_fragment()
49 mss = skb_shinfo(skb)->gso_size; in udp6_ufo_fragment()
174 skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4); in udp6_gro_complete()
175 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; in udp6_gro_complete()
Dtcpv6_offload.c37 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; in tcp6_gro_complete()
48 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) in tcp6_gso_segment()
/Linux-v6.6/net/openvswitch/
Dopenvswitch_trace.h52 __entry->nr_frags = skb_shinfo(skb)->nr_frags;
53 __entry->gso_size = skb_shinfo(skb)->gso_size;
54 __entry->gso_type = skb_shinfo(skb)->gso_type;
122 __entry->nr_frags = skb_shinfo(skb)->nr_frags;
123 __entry->gso_size = skb_shinfo(skb)->gso_size;
124 __entry->gso_type = skb_shinfo(skb)->gso_type;
/Linux-v6.6/drivers/net/ethernet/hisilicon/hns3/
Dhns3_trace.h35 __entry->nr_frags = skb_shinfo(skb)->nr_frags;
36 __entry->gso_size = skb_shinfo(skb)->gso_size;
37 __entry->gso_segs = skb_shinfo(skb)->gso_segs;
38 __entry->gso_type = skb_shinfo(skb)->gso_type;
43 hns3_shinfo_pack(skb_shinfo(skb), __entry->size);
/Linux-v6.6/include/trace/events/
Dnet.h56 __entry->tx_flags = skb_shinfo(skb)->tx_flags;
57 __entry->gso_size = skb_shinfo(skb)->gso_size;
58 __entry->gso_segs = skb_shinfo(skb)->gso_segs;
59 __entry->gso_type = skb_shinfo(skb)->gso_type;
214 __entry->nr_frags = skb_shinfo(skb)->nr_frags;
215 __entry->gso_size = skb_shinfo(skb)->gso_size;
216 __entry->gso_type = skb_shinfo(skb)->gso_type;
/Linux-v6.6/include/linux/
Dudp.h127 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in udp_cmsg_recv()
128 gso_size = skb_shinfo(skb)->gso_size; in udp_cmsg_recv()
138 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4) in udp_unexpected_gso()
141 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist) in udp_unexpected_gso()
Dskbuff.h562 #define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
1661 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) macro
1665 return &skb_shinfo(skb)->hwtstamps; in skb_hwtstamps()
1670 bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE; in skb_zcopy()
1677 return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY; in skb_zcopy_pure()
1682 return skb_shinfo(skb)->flags & SKBFL_MANAGED_FRAG_REFS; in skb_zcopy_managed()
1698 skb_shinfo(skb)->destructor_arg = uarg; in skb_zcopy_init()
1699 skb_shinfo(skb)->flags |= uarg->flags; in skb_zcopy_init()
1716 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); in skb_zcopy_set_nouarg()
1717 skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG; in skb_zcopy_set_nouarg()
[all …]
/Linux-v6.6/net/tls/
Dtls_strp.c31 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_anchor_free()
52 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in tls_strp_skb_copy()
53 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in tls_strp_skb_copy()
146 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_msg_hold()
193 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_flush_anchor_copy()
217 frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE]; in tls_strp_copyin_frag()
298 shinfo = skb_shinfo(skb); in tls_strp_copyin_skb()
397 shinfo = skb_shinfo(strp->anchor); in tls_strp_read_copy()
432 first = skb_shinfo(strp->anchor)->frag_list; in tls_strp_check_queue_ok()
468 skb_shinfo(strp->anchor)->frag_list = first; in tls_strp_load_anchor_with_queue()
/Linux-v6.6/drivers/net/ethernet/marvell/octeontx2/nic/
Dotx2_txrx.c96 frag = &skb_shinfo(skb)->frags[seg - 1]; in otx2_dma_map_skb_frag()
156 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { in otx2_snd_pkt_handler()
204 if (likely(!skb_shinfo(skb)->nr_frags)) { in otx2_skb_add_frag()
217 if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) { in otx2_skb_add_frag()
218 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, in otx2_skb_add_frag()
666 if (skb_shinfo(skb)->gso_size) { in otx2_sqe_add_ext()
669 ext->lso_mps = skb_shinfo(skb)->gso_size; in otx2_sqe_add_ext()
672 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { in otx2_sqe_add_ext()
681 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { in otx2_sqe_add_ext()
684 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in otx2_sqe_add_ext()
[all …]
/Linux-v6.6/include/net/
Dtso.h23 return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; in tso_count_descs()
/Linux-v6.6/drivers/net/ethernet/chelsio/cxgb4/
Dcxgb4_ptp.h57 return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP; in cxgb4_xmit_with_hwtstamp()
62 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in cxgb4_xmit_hwtstamp_pending()
/Linux-v6.6/drivers/net/ethernet/mellanox/mlx5/core/
Den_tx.c160 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in mlx5e_tx_get_gso_ihs()
200 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in mlx5e_txwqe_build_dsegs()
201 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in mlx5e_txwqe_build_dsegs()
274 .mss = cpu_to_be16(skb_shinfo(skb)->gso_size), in mlx5e_sq_xmit_prepare()
276 .num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs, in mlx5e_sq_xmit_prepare()
281 stats->packets += skb_shinfo(skb)->gso_segs; in mlx5e_sq_xmit_prepare()
314 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags + ds_cnt_ids; in mlx5e_sq_calc_wqe_attr()
338 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) in mlx5e_tx_skb_update_hwts_flags()
339 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in mlx5e_tx_skb_update_hwts_flags()
398 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) { in mlx5e_txwqe_complete()
[all …]
/Linux-v6.6/drivers/staging/octeon/
Dethernet-tx.c189 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) { in cvm_oct_xmit()
261 if (skb_shinfo(skb)->nr_frags == 0) { in cvm_oct_xmit()
270 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in cvm_oct_xmit()
271 skb_frag_t *fs = skb_shinfo(skb)->frags + i; in cvm_oct_xmit()
280 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1; in cvm_oct_xmit()
281 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; in cvm_oct_xmit()
323 if (unlikely(skb_shinfo(skb)->nr_frags)) { in cvm_oct_xmit()

12345678910>>...16