Searched refs:TCP_SKB_CB (Results 1 – 16 of 16) sorted by relevance
| /Linux-v5.4/net/ipv4/ |
| D | tcp_input.c | 276 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { in __tcp_ecn_check_ce() 585 if (TCP_SKB_CB(skb)->end_seq - in tcp_rcv_rtt_measure_ts() 586 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) { in tcp_rcv_rtt_measure_ts() 919 before(TCP_SKB_CB(skb)->seq, in tcp_verify_retransmit_hint() 920 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) in tcp_verify_retransmit_hint() 933 __u8 sacked = TCP_SKB_CB(skb)->sacked; in tcp_sum_lost() 942 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { in tcp_skb_mark_lost() 947 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; in tcp_skb_mark_lost() 956 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { in tcp_skb_mark_lost_uncond_verify() 958 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; in tcp_skb_mark_lost_uncond_verify() [all …]
|
| D | tcp_output.c | 70 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); in tcp_event_new_data_sent() 309 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; in tcp_ecn_send_synack() 311 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; in tcp_ecn_send_synack() 335 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; in tcp_ecn_send_syn() 348 TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR); in tcp_ecn_clear_syn() 369 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { in tcp_ecn_send() 392 TCP_SKB_CB(skb)->tcp_flags = flags; in tcp_init_nondata_skb() 393 TCP_SKB_CB(skb)->sacked = 0; in tcp_init_nondata_skb() 397 TCP_SKB_CB(skb)->seq = seq; in tcp_init_nondata_skb() 400 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb() [all …]
|
| D | tcp_minisocks.c | 117 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, in tcp_timewait_state_process() 126 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt)) in tcp_timewait_state_process() 131 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || in tcp_timewait_state_process() 132 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { in tcp_timewait_state_process() 141 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) in tcp_timewait_state_process() 146 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_timewait_state_process() 174 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt && in tcp_timewait_state_process() 175 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { in tcp_timewait_state_process() 219 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || in tcp_timewait_state_process() 225 TCP_SKB_CB(skb)->tcp_tw_isn = isn; in tcp_timewait_state_process() [all …]
|
| D | tcp_rate.c | 65 TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp; in tcp_rate_skb_sent() 66 TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp; in tcp_rate_skb_sent() 67 TCP_SKB_CB(skb)->tx.delivered = tp->delivered; in tcp_rate_skb_sent() 68 TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0; in tcp_rate_skb_sent() 82 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); in tcp_rate_skb_delivered()
|
| D | tcp_fastopen.c | 167 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) in tcp_fastopen_add_skb() 187 TCP_SKB_CB(skb)->seq++; in tcp_fastopen_add_skb() 188 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; in tcp_fastopen_add_skb() 190 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_fastopen_add_skb() 199 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in tcp_fastopen_add_skb() 277 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; in tcp_fastopen_create_child() 343 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; in tcp_try_fastopen() 534 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { in tcp_fastopen_active_disable_ofo_check()
|
| D | tcp_ipv4.c | 782 skb, &TCP_SKB_CB(skb)->header.h4.opt, in tcp_v4_send_reset() 878 skb, &TCP_SKB_CB(skb)->header.h4.opt, in tcp_v4_send_ack() 1696 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq || in tcp_add_backlog() 1697 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield || in tcp_add_backlog() 1698 ((TCP_SKB_CB(tail)->tcp_flags | in tcp_add_backlog() 1699 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) || in tcp_add_backlog() 1700 !((TCP_SKB_CB(tail)->tcp_flags & in tcp_add_backlog() 1701 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) || in tcp_add_backlog() 1702 ((TCP_SKB_CB(tail)->tcp_flags ^ in tcp_add_backlog() 1703 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) || in tcp_add_backlog() [all …]
|
| D | tcp_recovery.c | 10 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { in tcp_mark_skb_lost() 12 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; in tcp_mark_skb_lost() 86 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); in tcp_rack_detect_loss() 241 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) in tcp_newreno_mark_lost()
|
| D | tcp.c | 467 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); in tcp_tx_timestamp() 473 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; in tcp_tx_timestamp() 642 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_mark_push() 654 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); in skb_entail() 867 memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb)); in sk_stream_alloc_skb() 1041 TCP_SKB_CB(skb)->end_seq += copy; in do_tcp_sendpages() 1045 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; in do_tcp_sendpages() 1301 TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; in tcp_sendmsg_locked() 1365 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; in tcp_sendmsg_locked() 1368 TCP_SKB_CB(skb)->end_seq += copy; in tcp_sendmsg_locked() [all …]
|
| D | syncookies.c | 285 struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt; in cookie_v4_check()
|
| /Linux-v5.4/include/net/ |
| D | tcp.h | 865 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) macro 869 TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb); in bpf_compute_data_end_sk_skb() 874 return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS; in tcp_skb_bpf_ingress() 879 return TCP_SKB_CB(skb)->bpf.sk_redir; in tcp_skb_bpf_redirect_fetch() 884 TCP_SKB_CB(skb)->bpf.sk_redir = NULL; in tcp_skb_bpf_redirect_clear() 893 return TCP_SKB_CB(skb)->header.h6.iif; in tcp_v6_iif() 898 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); in tcp_v6_iif_l3_slave() 900 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; in tcp_v6_iif_l3_slave() 907 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags)) in tcp_v6_sdif() 908 return TCP_SKB_CB(skb)->header.h6.iif; in tcp_v6_sdif() [all …]
|
| /Linux-v5.4/net/ipv6/ |
| D | tcp_ipv6.c | 749 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); in tcp_v6_init_req() 761 if (!TCP_SKB_CB(skb)->tcp_tw_isn && in tcp_v6_init_req() 762 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) || in tcp_v6_init_req() 1110 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, in tcp_v6_restore_cb() 1431 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && in tcp_v6_do_rcv() 1441 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { in tcp_v6_do_rcv() 1463 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb), in tcp_v6_fill_cb() 1467 TCP_SKB_CB(skb)->seq = ntohl(th->seq); in tcp_v6_fill_cb() 1468 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + in tcp_v6_fill_cb() 1470 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); in tcp_v6_fill_cb() [all …]
|
| D | syncookies.c | 189 if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || in cookie_v6_check()
|
| /Linux-v5.4/net/core/ |
| D | sock_map.c | 456 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); in BPF_CALL_4() 905 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); in BPF_CALL_4()
|
| D | skmsg.c | 705 struct tcp_skb_cb *tcp = TCP_SKB_CB(skb); in sk_psock_verdict_apply()
|
| /Linux-v5.4/net/tls/ |
| D | tls_device.c | 215 TCP_SKB_CB(skb)->eor = 1; in tls_device_resync_tx() 992 TCP_SKB_CB(skb)->eor = 1; in tls_set_device_offload()
|
| D | tls_sw.c | 2020 TCP_SKB_CB(skb)->seq + rxm->offset); in tls_read_size()
|