Lines Matching +full:settling +full:- +full:time +full:- +full:us
1 // SPDX-License-Identifier: GPL-2.0
14 if (!tp->reord_seen) { in tcp_rack_reo_wnd()
18 if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery) in tcp_rack_reo_wnd()
21 if (tp->sacked_out >= tp->reordering && in tcp_rack_reo_wnd()
22 !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH)) in tcp_rack_reo_wnd()
26 /* To be more reordering resilient, allow min_rtt/4 settling delay. in tcp_rack_reo_wnd()
32 return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps, in tcp_rack_reo_wnd()
33 tp->srtt_us >> 3); in tcp_rack_reo_wnd()
38 return tp->rack.rtt_us + reo_wnd - in tcp_rack_skb_timeout()
39 tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb)); in tcp_rack_skb_timeout()
42 /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
50 * RACK: sent time delta to the latest delivered packet (time domain)
55 * "settling delay", instead of tweaking the dupthresh.
60 * make us enter the CA_Recovery state.
70 list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue, in tcp_rack_detect_loss()
76 if ((scb->sacked & TCPCB_LOST) && in tcp_rack_detect_loss()
77 !(scb->sacked & TCPCB_SACKED_RETRANS)) in tcp_rack_detect_loss()
80 if (!tcp_rack_sent_after(tp->rack.mstamp, in tcp_rack_detect_loss()
82 tp->rack.end_seq, scb->end_seq)) in tcp_rack_detect_loss()
91 list_del_init(&skb->tcp_tsorted_anchor); in tcp_rack_detect_loss()
93 /* Record maximum wait time */ in tcp_rack_detect_loss()
104 if (!tp->rack.advanced) in tcp_rack_mark_lost()
108 tp->rack.advanced = 0; in tcp_rack_mark_lost()
113 timeout, inet_csk(sk)->icsk_rto); in tcp_rack_mark_lost()
118 /* Record the most recently (re)sent time among the (s)acked packets
120 * draft-cheng-tcpm-rack-00.txt
127 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time); in tcp_rack_advance()
141 tp->rack.advanced = 1; in tcp_rack_advance()
142 tp->rack.rtt_us = rtt_us; in tcp_rack_advance()
143 if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp, in tcp_rack_advance()
144 end_seq, tp->rack.end_seq)) { in tcp_rack_advance()
145 tp->rack.mstamp = xmit_time; in tcp_rack_advance()
146 tp->rack.end_seq = end_seq; in tcp_rack_advance()
157 u32 lost = tp->lost; in tcp_rack_reo_timeout()
162 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { in tcp_rack_reo_timeout()
164 if (!inet_csk(sk)->icsk_ca_ops->cong_control) in tcp_rack_reo_timeout()
165 tcp_cwnd_reduction(sk, 1, tp->lost - lost, 0); in tcp_rack_reo_timeout()
169 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS) in tcp_rack_reo_timeout()
181 * no. of successful recoveries (accounts for full DSACK-based loss
186 * after the reo_wnd has been updated last time.
195 if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND || in tcp_rack_update_reo_wnd()
196 !rs->prior_delivered) in tcp_rack_update_reo_wnd()
200 if (before(rs->prior_delivered, tp->rack.last_delivered)) in tcp_rack_update_reo_wnd()
201 tp->rack.dsack_seen = 0; in tcp_rack_update_reo_wnd()
204 if (tp->rack.dsack_seen) { in tcp_rack_update_reo_wnd()
205 tp->rack.reo_wnd_steps = min_t(u32, 0xFF, in tcp_rack_update_reo_wnd()
206 tp->rack.reo_wnd_steps + 1); in tcp_rack_update_reo_wnd()
207 tp->rack.dsack_seen = 0; in tcp_rack_update_reo_wnd()
208 tp->rack.last_delivered = tp->delivered; in tcp_rack_update_reo_wnd()
209 tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH; in tcp_rack_update_reo_wnd()
210 } else if (!tp->rack.reo_wnd_persist) { in tcp_rack_update_reo_wnd()
211 tp->rack.reo_wnd_steps = 1; in tcp_rack_update_reo_wnd()
215 /* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits
222 const u8 state = inet_csk(sk)->icsk_ca_state; in tcp_newreno_mark_lost()
225 if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) || in tcp_newreno_mark_lost()
230 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) in tcp_newreno_mark_lost()
234 if (tcp_skb_pcount(skb) > 1 && skb->len > mss) in tcp_newreno_mark_lost()