/Linux-v4.19/net/ipv4/ |
D | tcp_input.c | 598 if (TCP_SKB_CB(skb)->end_seq - in tcp_rcv_rtt_measure_ts() 1063 u32 start_seq, u32 end_seq) in tcp_is_sackblock_valid() argument 1066 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) in tcp_is_sackblock_valid() 1083 if (after(end_seq, tp->snd_una)) in tcp_is_sackblock_valid() 1090 if (!after(end_seq, tp->undo_marker)) in tcp_is_sackblock_valid() 1096 return !before(start_seq, end_seq - tp->max_window); in tcp_is_sackblock_valid() 1105 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); in tcp_check_dsack() 1113 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); in tcp_check_dsack() 1156 u32 start_seq, u32 end_seq) in tcp_match_skb_to_sack() argument 1164 !before(end_seq, TCP_SKB_CB(skb)->end_seq); in tcp_match_skb_to_sack() [all …]
|
D | tcp_recovery.c | 95 tp->rack.end_seq, scb->end_seq)) in tcp_rack_detect_loss() 134 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, in tcp_rack_advance() argument 156 end_seq, tp->rack.end_seq)) { in tcp_rack_advance() 158 tp->rack.end_seq = end_seq; in tcp_rack_advance()
|
D | tcp_minisocks.c | 32 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) in tcp_in_window() argument 36 if (after(end_seq, s_win) && before(seq, e_win)) in tcp_in_window() 38 return seq == e_win && seq == end_seq; in tcp_in_window() 116 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, in tcp_timewait_state_process() 130 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || in tcp_timewait_state_process() 131 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { in tcp_timewait_state_process() 140 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) in tcp_timewait_state_process() 145 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_timewait_state_process() 174 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { in tcp_timewait_state_process() 725 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, in tcp_check_req() [all …]
|
D | tcp_output.c | 58 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_event_new_data_sent() 404 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb() 522 *ptr++ = htonl(sp[this_sack].end_seq); in tcp_options_write() 1034 TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq in __tcp_transmit_skb() 1143 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) in __tcp_transmit_skb() 1189 tp->write_seq = TCP_SKB_CB(skb)->end_seq; in tcp_queue_skb() 1318 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_fragment() 1319 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tcp_fragment() 1347 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { in tcp_fragment() 1675 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; in tcp_minshall_update() [all …]
|
D | tcp_illinois.c | 48 u32 end_seq; /* right edge of current RTT */ member 61 ca->end_seq = tp->snd_nxt; in rtt_reset() 264 if (after(ack, ca->end_seq)) in tcp_illinois_cong_avoid()
|
D | tcp_cubic.c | 99 u32 end_seq; /* end_seq of the round */ member 134 ca->end_seq = tp->snd_nxt; in bictcp_hystart_reset() 343 if (hystart && after(ack, ca->end_seq)) in bictcp_cong_avoid()
|
D | tcp_fastopen.c | 179 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) in tcp_fastopen_add_skb() 202 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_fastopen_add_skb() 330 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; in tcp_try_fastopen()
|
D | tcp.c | 666 tcb->seq = tcb->end_seq = tp->write_seq; in skb_entail() 1015 TCP_SKB_CB(skb)->end_seq += copy; in do_tcp_sendpages() 1369 TCP_SKB_CB(skb)->end_seq += copy; in tcp_sendmsg_locked() 1537 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), in tcp_cleanup_rbuf() 1539 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); in tcp_cleanup_rbuf() 2334 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; in tcp_close()
|
D | tcp_ipv4.c | 1654 TCP_SKB_CB(skb)->end_seq -= eaten; in tcp_filter() 1677 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + in tcp_v4_fill_cb()
|
/Linux-v4.19/net/netfilter/ |
D | nf_conntrack_seqadj.c | 93 if (after(ntohl(sack->end_seq) - seq->offset_before, in nf_ct_sack_block_adjust() 95 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust() 98 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust() 103 ntohl(sack->end_seq), ntohl(new_end_seq)); in nf_ct_sack_block_adjust() 108 sack->end_seq, new_end_seq, false); in nf_ct_sack_block_adjust() 110 sack->end_seq = new_end_seq; in nf_ct_sack_block_adjust()
|
/Linux-v4.19/net/tls/ |
D | tls_device.c | 175 if (info && !before(acked_seq, info->end_seq)) { in tls_icsk_clean_acked() 183 if (before(acked_seq, info->end_seq)) in tls_icsk_clean_acked() 267 record->end_seq = tp->write_seq + record->len; in tls_push_record() 510 before(seq, info->end_seq - info->len)) { in tls_get_record() 520 if (before(seq, info->end_seq)) { in tls_get_record() 522 after(info->end_seq, in tls_get_record() 523 context->retransmit_hint->end_seq)) { in tls_get_record() 733 start_marker_record->end_seq = tcp_sk(sk)->write_seq; in tls_set_device_offload()
|
/Linux-v4.19/include/linux/ |
D | tcp.h | 78 __be32 end_seq; member 83 u32 end_seq; member 217 u32 end_seq; /* Ending TCP sequence of the skb */ member
|
/Linux-v4.19/include/net/ |
D | tls.h | 131 u32 end_seq; member 273 return rec->end_seq - rec->len; in tls_record_start_seq()
|
D | tcp.h | 799 __u32 end_seq; /* SEQ + FIN + SYN + datalen */ member 1933 extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
|
/Linux-v4.19/net/ipv6/ |
D | tcp_ipv6.c | 1379 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && in tcp_v6_do_rcv() 1416 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + in tcp_v6_fill_cb()
|
/Linux-v4.19/net/sched/ |
D | sch_cake.c | 979 u32 end_a = get_unaligned_be32(&sack_a->end_seq); in cake_tcph_sack_compare() 991 u32 end_b = get_unaligned_be32(&sack_tmp->end_seq); in cake_tcph_sack_compare()
|