Lines Matching +full:max +full:- +full:retries

1 // SPDX-License-Identifier: GPL-2.0-only
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
32 start_ts = tcp_sk(sk)->retrans_stamp; in tcp_clamp_rto_to_user_timeout()
33 if (!icsk->icsk_user_timeout) in tcp_clamp_rto_to_user_timeout()
34 return icsk->icsk_rto; in tcp_clamp_rto_to_user_timeout()
35 elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts; in tcp_clamp_rto_to_user_timeout()
36 remaining = icsk->icsk_user_timeout - elapsed; in tcp_clamp_rto_to_user_timeout()
40 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining)); in tcp_clamp_rto_to_user_timeout()
49 if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp) in tcp_clamp_probe0_to_user_timeout()
52 elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp; in tcp_clamp_probe0_to_user_timeout()
55 remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed; in tcp_clamp_probe0_to_user_timeout()
62 * tcp_write_err() - close socket and save error info
70 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; in tcp_write_err()
79 * tcp_out_of_resources() - Close socket if out of resources
109 if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) in tcp_out_of_resources()
113 if (sk->sk_err_soft) in tcp_out_of_resources()
119 if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN || in tcp_out_of_resources()
121 (!tp->snd_wnd && !tp->packets_out)) in tcp_out_of_resources()
140 * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
146 int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */ in tcp_orphan_retries() local
149 if (sk->sk_err_soft && !alive) in tcp_orphan_retries()
150 retries = 0; in tcp_orphan_retries()
153 * number of retries. 8 corresponds to >100 seconds with minimal in tcp_orphan_retries()
155 if (retries == 0 && alive) in tcp_orphan_retries()
156 retries = 8; in tcp_orphan_retries()
157 return retries; in tcp_orphan_retries()
166 if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing)) in tcp_mtu_probing()
169 if (!icsk->icsk_mtup.enabled) { in tcp_mtu_probing()
170 icsk->icsk_mtup.enabled = 1; in tcp_mtu_probing()
171 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; in tcp_mtu_probing()
173 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; in tcp_mtu_probing()
174 mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss); in tcp_mtu_probing()
175 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor)); in tcp_mtu_probing()
176 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss)); in tcp_mtu_probing()
177 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_probing()
179 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtu_probing()
190 timeout = ((2 << boundary) - 1) * rto_base; in tcp_model_timeout()
192 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + in tcp_model_timeout()
193 (boundary - linear_backoff_thresh) * TCP_RTO_MAX; in tcp_model_timeout()
197 * retransmits_timed_out() - returns true if this connection has timed out
199 * @boundary: max number of retransmissions
206 * after "boundary" unsuccessful, exponentially backed-off
215 if (!inet_csk(sk)->icsk_retransmits) in retransmits_timed_out()
218 start_ts = tcp_sk(sk)->retrans_stamp; in retransmits_timed_out()
222 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) in retransmits_timed_out()
227 return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0; in retransmits_timed_out()
239 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { in tcp_write_timeout()
240 if (icsk->icsk_retransmits) in tcp_write_timeout()
242 retry_until = icsk->icsk_syn_retries ? : in tcp_write_timeout()
243 READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); in tcp_write_timeout()
244 expired = icsk->icsk_retransmits >= retry_until; in tcp_write_timeout()
246 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) { in tcp_write_timeout()
253 retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2); in tcp_write_timeout()
255 const bool alive = icsk->icsk_rto < TCP_RTO_MAX; in tcp_write_timeout()
267 icsk->icsk_user_timeout); in tcp_write_timeout()
272 icsk->icsk_retransmits, in tcp_write_timeout()
273 icsk->icsk_rto, (int)expired); in tcp_write_timeout()
282 tp->timeout_rehash++; in tcp_write_timeout()
294 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || in tcp_delack_timer_handler()
295 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) in tcp_delack_timer_handler()
298 if (time_after(icsk->icsk_ack.timeout, jiffies)) { in tcp_delack_timer_handler()
299 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); in tcp_delack_timer_handler()
302 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; in tcp_delack_timer_handler()
307 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); in tcp_delack_timer_handler()
313 icsk->icsk_ack.ato = TCP_ATO_MIN; in tcp_delack_timer_handler()
323 * tcp_delack_timer() - The TCP delayed ACK timeout handler
335 struct sock *sk = &icsk->icsk_inet.sk; in tcp_delack_timer()
343 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags)) in tcp_delack_timer()
357 if (tp->packets_out || !skb) { in tcp_probe_timer()
358 icsk->icsk_probes_out = 0; in tcp_probe_timer()
359 icsk->icsk_probes_tstamp = 0; in tcp_probe_timer()
371 if (!icsk->icsk_probes_tstamp) in tcp_probe_timer()
372 icsk->icsk_probes_tstamp = tcp_jiffies32; in tcp_probe_timer()
373 else if (icsk->icsk_user_timeout && in tcp_probe_timer()
374 (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >= in tcp_probe_timer()
375 msecs_to_jiffies(icsk->icsk_user_timeout)) in tcp_probe_timer()
378 max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); in tcp_probe_timer()
383 if (!alive && icsk->icsk_backoff >= max_probes) in tcp_probe_timer()
389 if (icsk->icsk_probes_out >= max_probes) { in tcp_probe_timer()
407 req->rsk_ops->syn_ack_timeout(req); in tcp_fastopen_synack_timer()
410 max_retries = icsk->icsk_syn_retries ? : in tcp_fastopen_synack_timer()
411 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1; in tcp_fastopen_synack_timer()
413 if (req->num_timeout >= max_retries) { in tcp_fastopen_synack_timer()
418 if (icsk->icsk_retransmits == 1) in tcp_fastopen_synack_timer()
420 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error in tcp_fastopen_synack_timer()
426 req->num_timeout++; in tcp_fastopen_synack_timer()
427 icsk->icsk_retransmits++; in tcp_fastopen_synack_timer()
428 if (!tp->retrans_stamp) in tcp_fastopen_synack_timer()
429 tp->retrans_stamp = tcp_time_stamp(tp); in tcp_fastopen_synack_timer()
431 req->timeout << req->num_timeout, TCP_RTO_MAX); in tcp_fastopen_synack_timer()
436 * tcp_retransmit_timer() - The TCP retransmit timeout handler
454 req = rcu_dereference_protected(tp->fastopen_rsk, in tcp_retransmit_timer()
457 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && in tcp_retransmit_timer()
458 sk->sk_state != TCP_FIN_WAIT1); in tcp_retransmit_timer()
460 /* Before we receive ACK to our SYN-ACK don't retransmit in tcp_retransmit_timer()
466 if (!tp->packets_out) in tcp_retransmit_timer()
473 tp->tlp_high_seq = 0; in tcp_retransmit_timer()
475 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && in tcp_retransmit_timer()
476 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { in tcp_retransmit_timer()
483 if (sk->sk_family == AF_INET) { in tcp_retransmit_timer()
485 &inet->inet_daddr, in tcp_retransmit_timer()
486 ntohs(inet->inet_dport), in tcp_retransmit_timer()
487 inet->inet_num, in tcp_retransmit_timer()
488 tp->snd_una, tp->snd_nxt); in tcp_retransmit_timer()
491 else if (sk->sk_family == AF_INET6) { in tcp_retransmit_timer()
493 &sk->sk_v6_daddr, in tcp_retransmit_timer()
494 ntohs(inet->inet_dport), in tcp_retransmit_timer()
495 inet->inet_num, in tcp_retransmit_timer()
496 tp->snd_una, tp->snd_nxt); in tcp_retransmit_timer()
499 if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) { in tcp_retransmit_timer()
513 if (icsk->icsk_retransmits == 0) { in tcp_retransmit_timer()
516 if (icsk->icsk_ca_state == TCP_CA_Recovery) { in tcp_retransmit_timer()
521 } else if (icsk->icsk_ca_state == TCP_CA_Loss) { in tcp_retransmit_timer()
523 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) || in tcp_retransmit_timer()
524 tp->sacked_out) { in tcp_retransmit_timer()
536 icsk->icsk_retransmits++; in tcp_retransmit_timer()
562 icsk->icsk_backoff++; in tcp_retransmit_timer()
572 * linear-timeout retransmissions into a black hole in tcp_retransmit_timer()
574 if (sk->sk_state == TCP_ESTABLISHED && in tcp_retransmit_timer()
575 (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) && in tcp_retransmit_timer()
577 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { in tcp_retransmit_timer()
578 icsk->icsk_backoff = 0; in tcp_retransmit_timer()
579 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX); in tcp_retransmit_timer()
582 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); in tcp_retransmit_timer()
586 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0)) in tcp_retransmit_timer()
592 /* Called with bottom-half processing disabled.
599 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || in tcp_write_timer_handler()
600 !icsk->icsk_pending) in tcp_write_timer_handler()
603 if (time_after(icsk->icsk_timeout, jiffies)) { in tcp_write_timer_handler()
604 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); in tcp_write_timer_handler()
609 event = icsk->icsk_pending; in tcp_write_timer_handler()
619 icsk->icsk_pending = 0; in tcp_write_timer_handler()
623 icsk->icsk_pending = 0; in tcp_write_timer_handler()
633 struct sock *sk = &icsk->icsk_inet.sk; in tcp_write_timer()
640 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags)) in tcp_write_timer()
649 struct net *net = read_pnet(&inet_rsk(req)->ireq_net); in tcp_syn_ack_timeout()
657 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) in tcp_set_keepalive()
683 if (sk->sk_state == TCP_LISTEN) { in tcp_keepalive_timer()
689 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { in tcp_keepalive_timer()
690 if (tp->linger2 >= 0) { in tcp_keepalive_timer()
691 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; in tcp_keepalive_timer()
703 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT))) in tcp_keepalive_timer()
709 if (tp->packets_out || !tcp_write_queue_empty(sk)) in tcp_keepalive_timer()
718 if ((icsk->icsk_user_timeout != 0 && in tcp_keepalive_timer()
719 elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) && in tcp_keepalive_timer()
720 icsk->icsk_probes_out > 0) || in tcp_keepalive_timer()
721 (icsk->icsk_user_timeout == 0 && in tcp_keepalive_timer()
722 icsk->icsk_probes_out >= keepalive_probes(tp))) { in tcp_keepalive_timer()
728 icsk->icsk_probes_out++; in tcp_keepalive_timer()
737 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */ in tcp_keepalive_timer()
738 elapsed = keepalive_time_when(tp) - elapsed; in tcp_keepalive_timer()
760 if (tp->compressed_ack) { in tcp_compressed_ack_kick()
762 * subtract one from tp->compressed_ack to keep in tcp_compressed_ack_kick()
765 tp->compressed_ack--; in tcp_compressed_ack_kick()
770 &sk->sk_tsq_flags)) in tcp_compressed_ack_kick()
784 hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC, in tcp_init_xmit_timers()
786 tcp_sk(sk)->pacing_timer.function = tcp_pace_kick; in tcp_init_xmit_timers()
788 hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC, in tcp_init_xmit_timers()
790 tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick; in tcp_init_xmit_timers()