/Linux-v5.4/net/sched/ |
D | sch_choke.c | 155 static bool choke_match_flow(struct sk_buff *skb1, in choke_match_flow() argument 160 if (skb1->protocol != skb2->protocol) in choke_match_flow() 163 if (!choke_skb_cb(skb1)->keys_valid) { in choke_match_flow() 164 choke_skb_cb(skb1)->keys_valid = 1; in choke_match_flow() 165 skb_flow_dissect_flow_keys(skb1, &temp, 0); in choke_match_flow() 166 make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp); in choke_match_flow() 175 return !memcmp(&choke_skb_cb(skb1)->keys, in choke_match_flow() 177 sizeof(choke_skb_cb(skb1)->keys)); in choke_match_flow()
|
/Linux-v5.4/net/core/ |
D | skbuff.c | 244 fclones = container_of(skb, struct sk_buff_fclones, skb1); in __alloc_skb() 627 fclones = container_of(skb, struct sk_buff_fclones, skb1); in kfree_skbmem() 1434 skb1); in skb_clone() 3191 struct sk_buff* skb1, in skb_split_inside_header() argument 3196 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header() 3200 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header() 3202 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header() 3204 skb1->data_len = skb->data_len; in skb_split_inside_header() 3205 skb1->len += skb1->data_len; in skb_split_inside_header() 3212 struct sk_buff* skb1, in skb_split_no_header() argument [all …]
|
/Linux-v5.4/net/llc/ |
D | llc_sap.c | 363 struct sk_buff *skb1; in llc_do_mcast() local 367 skb1 = skb_clone(skb, GFP_ATOMIC); in llc_do_mcast() 368 if (!skb1) { in llc_do_mcast() 373 llc_sap_rcv(sap, skb1, stack[i]); in llc_do_mcast()
|
/Linux-v5.4/net/batman-adv/ |
D | send.c | 856 struct sk_buff *skb1; in batadv_send_outstanding_bcast_packet() local 939 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); in batadv_send_outstanding_bcast_packet() 940 if (skb1) in batadv_send_outstanding_bcast_packet() 941 batadv_send_broadcast_skb(skb1, hard_iface); in batadv_send_outstanding_bcast_packet()
|
/Linux-v5.4/net/ipv4/ |
D | tcp_input.c | 4544 struct sk_buff *skb1; in tcp_data_queue_ofo() local 4601 skb1 = rb_to_skb(parent); in tcp_data_queue_ofo() 4602 if (before(seq, TCP_SKB_CB(skb1)->seq)) { in tcp_data_queue_ofo() 4606 if (before(seq, TCP_SKB_CB(skb1)->end_seq)) { in tcp_data_queue_ofo() 4607 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { in tcp_data_queue_ofo() 4616 if (after(seq, TCP_SKB_CB(skb1)->seq)) { in tcp_data_queue_ofo() 4618 tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq); in tcp_data_queue_ofo() 4623 rb_replace_node(&skb1->rbnode, &skb->rbnode, in tcp_data_queue_ofo() 4626 TCP_SKB_CB(skb1)->seq, in tcp_data_queue_ofo() 4627 TCP_SKB_CB(skb1)->end_seq); in tcp_data_queue_ofo() [all …]
|
D | icmp.c | 379 struct sk_buff *skb1; in icmp_push_reply() local 381 skb_queue_walk(&sk->sk_write_queue, skb1) { in icmp_push_reply() 382 csum = csum_add(csum, skb1->csum); in icmp_push_reply()
|
/Linux-v5.4/Documentation/networking/ |
D | x25-iface.txt | 78 preserved. Even if a device driver calls netif_rx(skb1) and later 80 earlier that skb1.
|
D | snmp_counter.rst | 729 10 to 15, skb1 has seq 10 to 13, skb2 has seq 14 to 20. The seq 14 and 730 15 in skb2 would be moved to skb1. This operation is 'shift'. If a 731 SACK block acknowledges seq 10 to 20, skb1 has seq 10 to 13, skb2 has 732 seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
|
/Linux-v5.4/drivers/net/wireless/ath/ath6kl/ |
D | txrx.c | 1315 struct sk_buff *skb1 = NULL; in ath6kl_rx() local 1572 skb1 = skb_copy(skb, GFP_ATOMIC); in ath6kl_rx() 1583 skb1 = skb; in ath6kl_rx() 1590 if (skb1) in ath6kl_rx() 1591 ath6kl_data_tx(skb1, vif->ndev); in ath6kl_rx()
|
/Linux-v5.4/drivers/atm/ |
D | iphase.c | 641 struct sk_buff *skb = NULL, *skb1 = NULL; in ia_tx_poll() local 666 skb1 = skb_dequeue(&iavcc->txing_skb); in ia_tx_poll() 667 while (skb1 && (skb1 != skb)) { in ia_tx_poll() 668 if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) { in ia_tx_poll() 672 if ((vcc->pop) && (skb1->len != 0)) in ia_tx_poll() 674 vcc->pop(vcc, skb1); in ia_tx_poll() 676 (long)skb1);) in ia_tx_poll() 679 dev_kfree_skb_any(skb1); in ia_tx_poll() 680 skb1 = skb_dequeue(&iavcc->txing_skb); in ia_tx_poll() 682 if (!skb1) { in ia_tx_poll()
|
/Linux-v5.4/drivers/net/ethernet/amd/ |
D | ni65.c | 1098 struct sk_buff *skb1 = p->recv_skb[p->rmdnum]; in ni65_recv_intr() local 1102 skb = skb1; in ni65_recv_intr()
|
/Linux-v5.4/drivers/net/ethernet/qlogic/ |
D | qla3xxx.c | 2046 struct sk_buff *skb1 = NULL, *skb2; in ql_process_macip_rx_intr() local 2060 skb1 = lrg_buf_cb1->skb; in ql_process_macip_rx_intr() 2062 if (*((u16 *) skb1->data) != 0xFFFF) in ql_process_macip_rx_intr() 2083 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, in ql_process_macip_rx_intr()
|
/Linux-v5.4/include/linux/ |
D | skbuff.h | 1061 struct sk_buff skb1; member 1082 fclones = container_of(skb, struct sk_buff_fclones, skb1); in skb_fclone_busy() 3518 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
|
/Linux-v5.4/drivers/net/ |
D | vxlan.c | 2698 struct sk_buff *skb1; in vxlan_xmit() local 2704 skb1 = skb_clone(skb, GFP_ATOMIC); in vxlan_xmit() 2705 if (skb1) in vxlan_xmit() 2706 vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc); in vxlan_xmit()
|