/Linux-v5.4/net/sctp/ |
D | offload.c | 37 struct sk_buff *segs = ERR_PTR(-EINVAL); in sctp_gso_segment() local 63 segs = NULL; in sctp_gso_segment() 67 segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG); in sctp_gso_segment() 68 if (IS_ERR(segs)) in sctp_gso_segment() 73 for (skb = segs; skb; skb = skb->next) { in sctp_gso_segment() 82 return segs; in sctp_gso_segment()
|
/Linux-v5.4/net/ipv4/ |
D | udp_offload.c | 22 struct sk_buff *segs = ERR_PTR(-EINVAL); in __skb_udp_tunnel_segment() local 82 segs = gso_inner_segment(skb, features); in __skb_udp_tunnel_segment() 83 if (IS_ERR_OR_NULL(segs)) { in __skb_udp_tunnel_segment() 89 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in __skb_udp_tunnel_segment() 93 skb = segs; in __skb_udp_tunnel_segment() 145 return segs; in __skb_udp_tunnel_segment() 155 struct sk_buff *segs = ERR_PTR(-EINVAL); in skb_udp_tunnel_segment() local 177 segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, in skb_udp_tunnel_segment() 183 return segs; in skb_udp_tunnel_segment() 192 struct sk_buff *segs, *seg; in __udp_gso_segment() local [all …]
|
D | tcp_offload.c | 57 struct sk_buff *segs = ERR_PTR(-EINVAL); in tcp_gso_segment() local 89 segs = NULL; in tcp_gso_segment() 98 segs = skb_segment(skb, features); in tcp_gso_segment() 99 if (IS_ERR(segs)) in tcp_gso_segment() 103 segs->ooo_okay = ooo_okay; in tcp_gso_segment() 109 if (skb_is_gso(segs)) in tcp_gso_segment() 110 mss *= skb_shinfo(segs)->gso_segs; in tcp_gso_segment() 114 skb = segs; in tcp_gso_segment() 119 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); in tcp_gso_segment() 177 return segs; in tcp_gso_segment()
|
D | gre_offload.c | 18 struct sk_buff *segs = ERR_PTR(-EINVAL); in gre_gso_segment() local 49 segs = skb_mac_gso_segment(skb, features); in gre_gso_segment() 50 if (IS_ERR_OR_NULL(segs)) { in gre_gso_segment() 56 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in gre_gso_segment() 60 skb = segs; in gre_gso_segment() 104 return segs; in gre_gso_segment()
|
D | esp4_offload.c | 122 struct sk_buff *segs = ERR_PTR(-EINVAL); in xfrm4_transport_gso_segment() local 128 segs = ops->callbacks.gso_segment(skb, features); in xfrm4_transport_gso_segment() 130 return segs; in xfrm4_transport_gso_segment()
|
/Linux-v5.4/net/mpls/ |
D | mpls_gso.c | 21 struct sk_buff *segs = ERR_PTR(-EINVAL); in mpls_gso_segment() local 44 segs = skb_mac_gso_segment(skb, mpls_features); in mpls_gso_segment() 45 if (IS_ERR_OR_NULL(segs)) { in mpls_gso_segment() 50 skb = segs; in mpls_gso_segment() 66 return segs; in mpls_gso_segment()
|
/Linux-v5.4/net/nsh/ |
D | nsh.c | 79 struct sk_buff *segs = ERR_PTR(-EINVAL); in nsh_gso_segment() local 108 segs = skb_mac_gso_segment(skb, features); in nsh_gso_segment() 109 if (IS_ERR_OR_NULL(segs)) { in nsh_gso_segment() 116 for (skb = segs; skb; skb = skb->next) { in nsh_gso_segment() 125 return segs; in nsh_gso_segment()
|
/Linux-v5.4/net/sched/ |
D | sch_tbf.c | 147 struct sk_buff *segs, *nskb; in tbf_segment() local 152 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in tbf_segment() 154 if (IS_ERR_OR_NULL(segs)) in tbf_segment() 158 while (segs) { in tbf_segment() 159 nskb = segs->next; in tbf_segment() 160 skb_mark_not_on_list(segs); in tbf_segment() 161 qdisc_skb_cb(segs)->pkt_len = segs->len; in tbf_segment() 162 len += segs->len; in tbf_segment() 163 ret = qdisc_enqueue(segs, q->qdisc, to_free); in tbf_segment() 170 segs = nskb; in tbf_segment()
|
D | sch_netem.c | 415 struct sk_buff *segs; in netem_segment() local 418 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in netem_segment() 420 if (IS_ERR_OR_NULL(segs)) { in netem_segment() 425 return segs; in netem_segment() 441 struct sk_buff *segs = NULL; in netem_enqueue() local 499 segs = skb->next; in netem_enqueue() 522 skb->next = segs; in netem_enqueue() 595 if (segs) { in netem_enqueue() 602 while (segs) { in netem_enqueue() 603 skb2 = segs->next; in netem_enqueue() [all …]
|
D | sch_cake.c | 1315 u16 segs = 1; in cake_overhead() local 1344 segs = DIV_ROUND_UP(skb->len - hdr_len, in cake_overhead() 1347 segs = shinfo->gso_segs; in cake_overhead() 1350 last_len = skb->len - shinfo->gso_size * (segs - 1); in cake_overhead() 1352 return (cake_calc_overhead(q, len, off) * (segs - 1) + in cake_overhead() 1678 struct sk_buff *segs, *nskb; in cake_enqueue() local 1682 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in cake_enqueue() 1683 if (IS_ERR_OR_NULL(segs)) in cake_enqueue() 1686 while (segs) { in cake_enqueue() 1687 nskb = segs->next; in cake_enqueue() [all …]
|
/Linux-v5.4/net/ipv6/ |
D | ip6_offload.c | 74 struct sk_buff *segs = ERR_PTR(-EINVAL); in ipv6_gso_segment() local 98 segs = ERR_PTR(-EPROTONOSUPPORT); in ipv6_gso_segment() 113 segs = ops->callbacks.gso_segment(skb, features); in ipv6_gso_segment() 116 if (IS_ERR_OR_NULL(segs)) in ipv6_gso_segment() 119 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in ipv6_gso_segment() 121 for (skb = segs; skb; skb = skb->next) { in ipv6_gso_segment() 136 kfree_skb_list(segs); in ipv6_gso_segment() 151 return segs; in ipv6_gso_segment()
|
D | udp_offload.c | 20 struct sk_buff *segs = ERR_PTR(-EINVAL); in udp6_ufo_fragment() local 37 segs = skb_udp_tunnel_segment(skb, features, true); in udp6_ufo_fragment() 107 segs = skb_segment(skb, features); in udp6_ufo_fragment() 111 return segs; in udp6_ufo_fragment()
|
D | esp6_offload.c | 149 struct sk_buff *segs = ERR_PTR(-EINVAL); in xfrm6_transport_gso_segment() local 155 segs = ops->callbacks.gso_segment(skb, features); in xfrm6_transport_gso_segment() 157 return segs; in xfrm6_transport_gso_segment()
|
/Linux-v5.4/drivers/infiniband/sw/rdmavt/ |
D | mr.c | 423 mr->mr.map[m]->segs[n].vaddr = vaddr; in rvt_reg_user_mr() 424 mr->mr.map[m]->segs[n].length = PAGE_SIZE; in rvt_reg_user_mr() 613 mr->mr.map[m]->segs[n].vaddr = (void *)addr; in rvt_set_page() 614 mr->mr.map[m]->segs[n].length = ps; in rvt_set_page() 643 mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr; in rvt_map_mr_sg() 811 fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i]; in rvt_map_phys_fmr() 812 fmr->mr.map[m]->segs[n].length = ps; in rvt_map_phys_fmr() 985 while (off >= mr->map[m]->segs[n].length) { in rvt_lkey_ok() 986 off -= mr->map[m]->segs[n].length; in rvt_lkey_ok() 995 isge->vaddr = mr->map[m]->segs[n].vaddr + off; in rvt_lkey_ok() [all …]
|
/Linux-v5.4/net/xfrm/ |
D | xfrm_output.c | 536 struct sk_buff *segs; in xfrm_output_gso() local 540 segs = skb_gso_segment(skb, 0); in xfrm_output_gso() 542 if (IS_ERR(segs)) in xfrm_output_gso() 543 return PTR_ERR(segs); in xfrm_output_gso() 544 if (segs == NULL) in xfrm_output_gso() 548 struct sk_buff *nskb = segs->next; in xfrm_output_gso() 551 skb_mark_not_on_list(segs); in xfrm_output_gso() 552 err = xfrm_output2(net, sk, segs); in xfrm_output_gso() 559 segs = nskb; in xfrm_output_gso() 560 } while (segs); in xfrm_output_gso()
|
D | xfrm_device.c | 112 struct sk_buff *segs; in validate_xmit_xfrm() local 118 segs = skb_gso_segment(skb, esp_features); in validate_xmit_xfrm() 119 if (IS_ERR(segs)) { in validate_xmit_xfrm() 125 skb = segs; in validate_xmit_xfrm()
|
/Linux-v5.4/include/rdma/ |
D | rdmavt_mr.h | 70 struct rvt_seg segs[RVT_SEGSZ]; member 175 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; in rvt_update_sge() 176 sge->length = sge->mr->map[sge->m]->segs[sge->n].length; in rvt_update_sge()
|
/Linux-v5.4/fs/ |
D | binfmt_elf_fdpic.c | 764 seg = loadmap->segs; in elf_fdpic_map_file() 785 seg = loadmap->segs; in elf_fdpic_map_file() 810 seg = loadmap->segs; in elf_fdpic_map_file() 831 seg = loadmap->segs; in elf_fdpic_map_file() 867 mseg = loadmap->segs; in elf_fdpic_map_file() 895 seg = loadmap->segs; in elf_fdpic_map_file() 926 seg = params->loadmap->segs; in elf_fdpic_map_file_constdisp_on_uclinux() 1017 seg = params->loadmap->segs; in elf_fdpic_map_file_by_direct_mmap() 1290 static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs) in fill_elf_fdpic_header() argument 1308 elf->e_phnum = segs; in fill_elf_fdpic_header() [all …]
|
D | binfmt_elf.c | 1426 static void fill_elf_header(struct elfhdr *elf, int segs, in fill_elf_header() argument 1444 elf->e_phnum = segs; in fill_elf_header() 2162 elf_addr_t e_shoff, int segs) in fill_extnum_info() argument 2174 shdr4extnum->sh_info = segs; in fill_extnum_info() 2188 int segs, i; in elf_core_dump() local 2220 segs = current->mm->map_count; in elf_core_dump() 2221 segs += elf_core_extra_phdrs(); in elf_core_dump() 2225 segs++; in elf_core_dump() 2228 segs++; in elf_core_dump() 2233 e_phnum = segs > PN_XNUM ? PN_XNUM : segs; in elf_core_dump() [all …]
|
/Linux-v5.4/drivers/staging/wusbcore/ |
D | wa-xfer.c | 143 u8 segs, segs_submitted, segs_done; member 177 for (cnt = 0; cnt < xfer->segs; cnt++) { in wa_xfer_destroy() 335 for (cnt = 0; cnt < xfer->segs; cnt++) { in __wa_xfer_is_done() 349 && cnt != xfer->segs-1) in __wa_xfer_is_done() 455 while (seg_index < xfer->segs) { in __wa_xfer_abort_cb() 626 xfer->segs = 0; in __wa_xfer_setup_sizes() 635 ++xfer->segs; in __wa_xfer_setup_sizes() 638 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, in __wa_xfer_setup_sizes() 640 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) in __wa_xfer_setup_sizes() 641 xfer->segs = 1; in __wa_xfer_setup_sizes() [all …]
|
/Linux-v5.4/include/uapi/linux/ |
D | elf-fdpic.h | 30 struct elf32_fdpic_loadseg segs[]; member
|
/Linux-v5.4/include/net/ |
D | udp.h | 471 struct sk_buff *segs; in udp_rcv_segment() local 482 segs = __skb_gso_segment(skb, features, false); in udp_rcv_segment() 483 if (IS_ERR_OR_NULL(segs)) { in udp_rcv_segment() 493 return segs; in udp_rcv_segment()
|
D | ip.h | 494 u32 ip_idents_reserve(u32 hash, int segs); 495 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs); 498 struct sock *sk, int segs) in ip_select_ident_segs() argument 510 inet_sk(sk)->inet_id += segs; in ip_select_ident_segs() 515 __ip_select_ident(net, iph, segs); in ip_select_ident_segs()
|
/Linux-v5.4/drivers/net/ |
D | tap.c | 343 struct sk_buff *segs = __skb_gso_segment(skb, features, false); in tap_handle_frame() local 345 if (IS_ERR(segs)) in tap_handle_frame() 348 if (!segs) { in tap_handle_frame() 355 while (segs) { in tap_handle_frame() 356 struct sk_buff *nskb = segs->next; in tap_handle_frame() 358 segs->next = NULL; in tap_handle_frame() 359 if (ptr_ring_produce(&q->ring, segs)) { in tap_handle_frame() 360 kfree_skb(segs); in tap_handle_frame() 364 segs = nskb; in tap_handle_frame()
|
/Linux-v5.4/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_ethtool.h | 40 u16 segs; member
|