/Linux-v5.15/drivers/net/ethernet/mellanox/mlx4/ |
D | en_tx.c | 601 const struct skb_shared_info *shinfo, in is_inline() argument 609 if (shinfo->nr_frags == 1) { in is_inline() 610 ptr = skb_frag_address_safe(&shinfo->frags[0]); in is_inline() 616 if (shinfo->nr_frags) in is_inline() 633 const struct skb_shared_info *shinfo, in get_real_size() argument 642 if (shinfo->gso_size) { in get_real_size() 648 real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE + in get_real_size() 664 shinfo, pfrag); in get_real_size() 670 (shinfo->nr_frags + 1) * DS_SIZE; in get_real_size() 678 const struct skb_shared_info *shinfo, in build_inline_wqe() argument [all …]
|
/Linux-v5.15/include/linux/ |
D | virtio_net.h | 123 struct skb_shared_info *shinfo = skb_shinfo(skb); in virtio_net_hdr_to_skb() local 127 shinfo->gso_size = gso_size; in virtio_net_hdr_to_skb() 128 shinfo->gso_type = gso_type; in virtio_net_hdr_to_skb() 131 shinfo->gso_type |= SKB_GSO_DODGY; in virtio_net_hdr_to_skb() 132 shinfo->gso_segs = 0; in virtio_net_hdr_to_skb()
|
D | skbuff.h | 4572 static inline void skb_increase_gso_size(struct skb_shared_info *shinfo, in skb_increase_gso_size() argument 4575 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) in skb_increase_gso_size() 4577 shinfo->gso_size += increment; in skb_increase_gso_size() 4580 static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo, in skb_decrease_gso_size() argument 4583 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) in skb_decrease_gso_size() 4585 shinfo->gso_size -= decrement; in skb_decrease_gso_size() 4594 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_warn_if_lro() local 4596 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && in skb_warn_if_lro() 4597 unlikely(shinfo->gso_type == 0)) { in skb_warn_if_lro()
|
/Linux-v5.15/drivers/net/xen-netback/ |
D | netback.c | 378 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_get_requests() local 379 skb_frag_t *frags = shinfo->frags; in xenvif_get_requests() 385 nr_slots = shinfo->nr_frags; in xenvif_get_requests() 388 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); in xenvif_get_requests() 390 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; in xenvif_get_requests() 391 shinfo->nr_frags++, txp++, gop++) { in xenvif_get_requests() 395 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); in xenvif_get_requests() 400 shinfo = skb_shinfo(nskb); in xenvif_get_requests() 401 frags = shinfo->frags; in xenvif_get_requests() 403 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; in xenvif_get_requests() [all …]
|
/Linux-v5.15/drivers/net/ethernet/google/gve/ |
D | gve_tx_dqo.c | 456 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_tx_add_skb_no_copy_dqo() local 511 /*eop=*/shinfo->nr_frags == 0, is_gso); in gve_tx_add_skb_no_copy_dqo() 514 for (i = 0; i < shinfo->nr_frags; i++) { in gve_tx_add_skb_no_copy_dqo() 517 const skb_frag_t *frag = &shinfo->frags[i]; in gve_tx_add_skb_no_copy_dqo() 518 bool is_eop = i == (shinfo->nr_frags - 1); in gve_tx_add_skb_no_copy_dqo() 582 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_num_buffer_descs_needed() local 588 for (i = 0; i < shinfo->nr_frags; i++) { in gve_num_buffer_descs_needed() 589 unsigned int frag_size = skb_frag_size(&shinfo->frags[i]); in gve_num_buffer_descs_needed() 608 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_can_send_tso() local 609 const int gso_size = shinfo->gso_size; in gve_can_send_tso() [all …]
|
D | gve_tx.c | 488 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_tx_add_skb_no_copy() local 522 payload_nfrags = shinfo->nr_frags; in gve_tx_add_skb_no_copy() 541 for (i = 0; i < shinfo->nr_frags; i++) { in gve_tx_add_skb_no_copy() 542 const skb_frag_t *frag = &shinfo->frags[i]; in gve_tx_add_skb_no_copy() 563 i += (payload_nfrags == shinfo->nr_frags ? 1 : 2); in gve_tx_add_skb_no_copy()
|
D | gve_rx_dqo.c | 608 struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_rx_complete_rsc() local 616 shinfo->gso_type = SKB_GSO_TCPV4; in gve_rx_complete_rsc() 619 shinfo->gso_type = SKB_GSO_TCPV6; in gve_rx_complete_rsc() 625 shinfo->gso_size = le16_to_cpu(desc->rsc_seg_len); in gve_rx_complete_rsc()
|
/Linux-v5.15/net/core/ |
D | lwt_bpf.c | 527 struct skb_shared_info *shinfo = skb_shinfo(skb); in handle_gso_type() local 530 shinfo->gso_type |= gso_type; in handle_gso_type() 531 skb_decrease_gso_size(shinfo, encap_len); in handle_gso_type() 532 shinfo->gso_segs = 0; in handle_gso_type()
|
D | skbuff.c | 195 struct skb_shared_info *shinfo; in __build_skb_around() local 211 shinfo = skb_shinfo(skb); in __build_skb_around() 212 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); in __build_skb_around() 213 atomic_set(&shinfo->dataref, 1); in __build_skb_around() 661 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data() local 666 &shinfo->dataref)) in skb_release_data() 671 for (i = 0; i < shinfo->nr_frags; i++) in skb_release_data() 672 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); in skb_release_data() 674 if (shinfo->frag_list) in skb_release_data() 675 kfree_skb_list(shinfo->frag_list); in skb_release_data() [all …]
|
D | filter.c | 3237 struct skb_shared_info *shinfo = skb_shinfo(skb); in bpf_skb_proto_4_to_6() local 3240 if (shinfo->gso_type & SKB_GSO_TCPV4) { in bpf_skb_proto_4_to_6() 3241 shinfo->gso_type &= ~SKB_GSO_TCPV4; in bpf_skb_proto_4_to_6() 3242 shinfo->gso_type |= SKB_GSO_TCPV6; in bpf_skb_proto_4_to_6() 3267 struct skb_shared_info *shinfo = skb_shinfo(skb); in bpf_skb_proto_6_to_4() local 3270 if (shinfo->gso_type & SKB_GSO_TCPV6) { in bpf_skb_proto_6_to_4() 3271 shinfo->gso_type &= ~SKB_GSO_TCPV6; in bpf_skb_proto_6_to_4() 3272 shinfo->gso_type |= SKB_GSO_TCPV4; in bpf_skb_proto_6_to_4() 3470 struct skb_shared_info *shinfo = skb_shinfo(skb); in bpf_skb_net_grow() local 3474 skb_decrease_gso_size(shinfo, len_diff); in bpf_skb_net_grow() [all …]
|
D | dev.c | 3732 const struct skb_shared_info *shinfo = skb_shinfo(skb); in qdisc_pkt_len_init() local 3739 if (shinfo->gso_size && skb_transport_header_was_set(skb)) { in qdisc_pkt_len_init() 3741 u16 gso_segs = shinfo->gso_segs; in qdisc_pkt_len_init() 3747 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { in qdisc_pkt_len_init() 3763 if (shinfo->gso_type & SKB_GSO_DODGY) in qdisc_pkt_len_init() 3765 shinfo->gso_size); in qdisc_pkt_len_init()
|
/Linux-v5.15/net/ipv4/ |
D | tcp_output.c | 1495 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_fragment_tstamp() local 1498 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { in tcp_fragment_tstamp() 1500 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_fragment_tstamp() 1502 shinfo->tx_flags &= ~tsflags; in tcp_fragment_tstamp() 1504 swap(shinfo->tskey, shinfo2->tskey); in tcp_fragment_tstamp() 1634 struct skb_shared_info *shinfo; in __pskb_trim_head() local 1646 shinfo = skb_shinfo(skb); in __pskb_trim_head() 1647 for (i = 0; i < shinfo->nr_frags; i++) { in __pskb_trim_head() 1648 int size = skb_frag_size(&shinfo->frags[i]); in __pskb_trim_head() 1654 shinfo->frags[k] = shinfo->frags[i]; in __pskb_trim_head() [all …]
|
D | tcp_ipv4.c | 1802 struct skb_shared_info *shinfo; in tcp_add_backlog() local 1859 shinfo = skb_shinfo(skb); in tcp_add_backlog() 1860 gso_size = shinfo->gso_size ?: skb->len; in tcp_add_backlog() 1861 gso_segs = shinfo->gso_segs ?: 1; in tcp_add_backlog() 1863 shinfo = skb_shinfo(tail); in tcp_add_backlog() 1864 tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen); in tcp_add_backlog() 1865 tail_gso_segs = shinfo->gso_segs ?: 1; in tcp_add_backlog() 1893 shinfo->gso_size = max(gso_size, tail_gso_size); in tcp_add_backlog() 1894 shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF); in tcp_add_backlog()
|
D | tcp.c | 474 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_tx_timestamp() local 477 sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); in tcp_tx_timestamp() 481 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; in tcp_tx_timestamp()
|
D | tcp_input.c | 3190 const struct skb_shared_info *shinfo; in tcp_ack_tstamp() local 3196 shinfo = skb_shinfo(skb); in tcp_ack_tstamp() 3197 if (!before(shinfo->tskey, prior_snd_una) && in tcp_ack_tstamp() 3198 before(shinfo->tskey, tcp_sk(sk)->snd_una)) { in tcp_ack_tstamp()
|
/Linux-v5.15/drivers/net/wireless/mediatek/mt76/ |
D | dma.c | 530 struct skb_shared_info *shinfo = skb_shinfo(skb); in mt76_add_fragment() local 531 int nr_frags = shinfo->nr_frags; in mt76_add_fragment() 533 if (nr_frags < ARRAY_SIZE(shinfo->frags)) { in mt76_add_fragment() 546 if (nr_frags < ARRAY_SIZE(shinfo->frags)) in mt76_add_fragment()
|
/Linux-v5.15/drivers/net/ethernet/freescale/enetc/ |
D | enetc.c | 1017 struct skb_shared_info *shinfo; in enetc_xdp_frame_to_xdp_tx_swbd() local 1041 shinfo = xdp_get_shared_info_from_frame(xdp_frame); in enetc_xdp_frame_to_xdp_tx_swbd() 1043 for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags; in enetc_xdp_frame_to_xdp_tx_swbd() 1124 struct skb_shared_info *shinfo; in enetc_map_rx_buff_to_xdp() local 1132 shinfo = xdp_get_shared_info_from_buff(xdp_buff); in enetc_map_rx_buff_to_xdp() 1133 shinfo->nr_frags = 0; in enetc_map_rx_buff_to_xdp() 1139 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff); in enetc_add_rx_buff_to_xdp() local 1141 skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags]; in enetc_add_rx_buff_to_xdp() 1150 shinfo->nr_frags++; in enetc_add_rx_buff_to_xdp()
|
/Linux-v5.15/net/sched/ |
D | sch_cake.c | 1351 const struct skb_shared_info *shinfo = skb_shinfo(skb); in cake_overhead() local 1359 if (!shinfo->gso_size) in cake_overhead() 1366 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | in cake_overhead() 1383 if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) in cake_overhead() 1385 shinfo->gso_size); in cake_overhead() 1387 segs = shinfo->gso_segs; in cake_overhead() 1389 len = shinfo->gso_size + hdr_len; in cake_overhead() 1390 last_len = skb->len - shinfo->gso_size * (segs - 1); in cake_overhead()
|
/Linux-v5.15/drivers/net/ethernet/hisilicon/hns3/ |
D | hns3_enet.h | 718 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
|
D | hns3_enet.c | 1840 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) in hns3_shinfo_pack() argument 1845 size[i] = skb_frag_size(&shinfo->frags[i]); in hns3_shinfo_pack()
|
/Linux-v5.15/drivers/net/ethernet/broadcom/ |
D | bnx2.c | 2954 struct skb_shared_info *shinfo; in bnx2_reuse_rx_skb_pages() local 2956 shinfo = skb_shinfo(skb); in bnx2_reuse_rx_skb_pages() 2957 shinfo->nr_frags--; in bnx2_reuse_rx_skb_pages() 2958 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]); in bnx2_reuse_rx_skb_pages() 2959 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL); in bnx2_reuse_rx_skb_pages()
|
/Linux-v5.15/drivers/net/ethernet/realtek/ |
D | r8169_main.c | 4181 struct skb_shared_info *shinfo = skb_shinfo(skb); in rtl8169_tso_csum_v2() local 4182 u32 mss = shinfo->gso_size; in rtl8169_tso_csum_v2() 4185 if (shinfo->gso_type & SKB_GSO_TCPV4) { in rtl8169_tso_csum_v2() 4187 } else if (shinfo->gso_type & SKB_GSO_TCPV6) { in rtl8169_tso_csum_v2()
|
/Linux-v5.15/drivers/net/ethernet/intel/e1000e/ |
D | netdev.c | 1524 struct skb_shared_info *shinfo; in e1000_clean_jumbo_rx_irq() local 1580 shinfo = skb_shinfo(rxtop); in e1000_clean_jumbo_rx_irq() 1581 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq() 1592 shinfo = skb_shinfo(rxtop); in e1000_clean_jumbo_rx_irq() 1593 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
|
/Linux-v5.15/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt.c | 1130 struct skb_shared_info *shinfo; in bnxt_rx_pages() local 1133 shinfo = skb_shinfo(skb); in bnxt_rx_pages() 1134 nr_frags = --shinfo->nr_frags; in bnxt_rx_pages() 1135 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); in bnxt_rx_pages()
|