Lines Matching refs:sk
26 static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk) in tcp_clamp_rto_to_user_timeout() argument
28 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_rto_to_user_timeout()
32 start_ts = tcp_sk(sk)->retrans_stamp; in tcp_clamp_rto_to_user_timeout()
36 elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts; in tcp_clamp_rto_to_user_timeout()
44 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when) in tcp_clamp_probe0_to_user_timeout() argument
46 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_probe0_to_user_timeout()
70 static void tcp_write_err(struct sock *sk) in tcp_write_err() argument
72 WRITE_ONCE(sk->sk_err, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT); in tcp_write_err()
73 sk_error_report(sk); in tcp_write_err()
75 tcp_write_queue_purge(sk); in tcp_write_err()
76 tcp_done(sk); in tcp_write_err()
77 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); in tcp_write_err()
104 static int tcp_out_of_resources(struct sock *sk, bool do_reset) in tcp_out_of_resources() argument
106 struct tcp_sock *tp = tcp_sk(sk); in tcp_out_of_resources()
115 if (READ_ONCE(sk->sk_err_soft)) in tcp_out_of_resources()
118 if (tcp_check_oom(sk, shift)) { in tcp_out_of_resources()
126 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_out_of_resources()
127 tcp_done(sk); in tcp_out_of_resources()
128 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); in tcp_out_of_resources()
132 if (!check_net(sock_net(sk))) { in tcp_out_of_resources()
134 tcp_done(sk); in tcp_out_of_resources()
146 static int tcp_orphan_retries(struct sock *sk, bool alive) in tcp_orphan_retries() argument
148 int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */ in tcp_orphan_retries()
151 if (READ_ONCE(sk->sk_err_soft) && !alive) in tcp_orphan_retries()
162 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) in tcp_mtu_probing() argument
164 const struct net *net = sock_net(sk); in tcp_mtu_probing()
175 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; in tcp_mtu_probing()
179 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_probing()
181 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtu_probing()
184 static unsigned int tcp_model_timeout(struct sock *sk, in tcp_model_timeout() argument
211 static bool retransmits_timed_out(struct sock *sk, in retransmits_timed_out() argument
217 if (!inet_csk(sk)->icsk_retransmits) in retransmits_timed_out()
220 start_ts = tcp_sk(sk)->retrans_stamp; in retransmits_timed_out()
224 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) in retransmits_timed_out()
225 rto_base = tcp_timeout_init(sk); in retransmits_timed_out()
226 timeout = tcp_model_timeout(sk, boundary, rto_base); in retransmits_timed_out()
229 return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0; in retransmits_timed_out()
233 static int tcp_write_timeout(struct sock *sk) in tcp_write_timeout() argument
235 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timeout()
236 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_timeout()
237 struct net *net = sock_net(sk); in tcp_write_timeout()
241 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { in tcp_write_timeout()
243 __dst_negative_advice(sk); in tcp_write_timeout()
249 if (sk->sk_state == TCP_SYN_SENT) in tcp_write_timeout()
254 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) { in tcp_write_timeout()
256 tcp_mtu_probing(icsk, sk); in tcp_write_timeout()
258 __dst_negative_advice(sk); in tcp_write_timeout()
262 if (sock_flag(sk, SOCK_DEAD)) { in tcp_write_timeout()
265 retry_until = tcp_orphan_retries(sk, alive); in tcp_write_timeout()
267 !retransmits_timed_out(sk, retry_until, 0); in tcp_write_timeout()
269 if (tcp_out_of_resources(sk, do_reset)) in tcp_write_timeout()
274 expired = retransmits_timed_out(sk, retry_until, in tcp_write_timeout()
276 tcp_fastopen_active_detect_blackhole(sk, expired); in tcp_write_timeout()
279 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB, in tcp_write_timeout()
285 tcp_write_err(sk); in tcp_write_timeout()
289 if (sk_rethink_txhash(sk)) { in tcp_write_timeout()
291 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH); in tcp_write_timeout()
298 void tcp_delack_timer_handler(struct sock *sk) in tcp_delack_timer_handler() argument
300 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_delack_timer_handler()
301 struct tcp_sock *tp = tcp_sk(sk); in tcp_delack_timer_handler()
303 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) in tcp_delack_timer_handler()
309 tcp_sack_compress_send_ack(sk); in tcp_delack_timer_handler()
317 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); in tcp_delack_timer_handler()
322 if (inet_csk_ack_scheduled(sk)) { in tcp_delack_timer_handler()
323 if (!inet_csk_in_pingpong_mode(sk)) { in tcp_delack_timer_handler()
330 inet_csk_exit_pingpong_mode(sk); in tcp_delack_timer_handler()
334 tcp_send_ack(sk); in tcp_delack_timer_handler()
335 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); in tcp_delack_timer_handler()
353 struct sock *sk = &icsk->icsk_inet.sk; in tcp_delack_timer() local
355 bh_lock_sock(sk); in tcp_delack_timer()
356 if (!sock_owned_by_user(sk)) { in tcp_delack_timer()
357 tcp_delack_timer_handler(sk); in tcp_delack_timer()
359 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); in tcp_delack_timer()
361 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags)) in tcp_delack_timer()
362 sock_hold(sk); in tcp_delack_timer()
364 bh_unlock_sock(sk); in tcp_delack_timer()
365 sock_put(sk); in tcp_delack_timer()
368 static void tcp_probe_timer(struct sock *sk) in tcp_probe_timer() argument
370 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_probe_timer()
371 struct sk_buff *skb = tcp_send_head(sk); in tcp_probe_timer()
372 struct tcp_sock *tp = tcp_sk(sk); in tcp_probe_timer()
399 max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); in tcp_probe_timer()
400 if (sock_flag(sk, SOCK_DEAD)) { in tcp_probe_timer()
403 max_probes = tcp_orphan_retries(sk, alive); in tcp_probe_timer()
406 if (tcp_out_of_resources(sk, true)) in tcp_probe_timer()
411 abort: tcp_write_err(sk); in tcp_probe_timer()
414 tcp_send_probe0(sk); in tcp_probe_timer()
422 static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req) in tcp_fastopen_synack_timer() argument
424 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastopen_synack_timer()
425 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastopen_synack_timer()
434 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1; in tcp_fastopen_synack_timer()
437 tcp_write_err(sk); in tcp_fastopen_synack_timer()
442 tcp_enter_loss(sk); in tcp_fastopen_synack_timer()
448 inet_rtx_syn_ack(sk, req); in tcp_fastopen_synack_timer()
453 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_fastopen_synack_timer()
457 static bool tcp_rtx_probe0_timed_out(const struct sock *sk, in tcp_rtx_probe0_timed_out() argument
460 const struct tcp_sock *tp = tcp_sk(sk); in tcp_rtx_probe0_timed_out()
464 rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp; in tcp_rtx_probe0_timed_out()
485 void tcp_retransmit_timer(struct sock *sk) in tcp_retransmit_timer() argument
487 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_timer()
488 struct net *net = sock_net(sk); in tcp_retransmit_timer()
489 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_retransmit_timer()
494 lockdep_sock_is_held(sk)); in tcp_retransmit_timer()
496 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && in tcp_retransmit_timer()
497 sk->sk_state != TCP_FIN_WAIT1); in tcp_retransmit_timer()
498 tcp_fastopen_synack_timer(sk, req); in tcp_retransmit_timer()
508 skb = tcp_rtx_queue_head(sk); in tcp_retransmit_timer()
514 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && in tcp_retransmit_timer()
515 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { in tcp_retransmit_timer()
521 struct inet_sock *inet = inet_sk(sk); in tcp_retransmit_timer()
525 if (sk->sk_family == AF_INET) { in tcp_retransmit_timer()
533 else if (sk->sk_family == AF_INET6) { in tcp_retransmit_timer()
535 &sk->sk_v6_daddr, ntohs(inet->inet_dport), in tcp_retransmit_timer()
541 if (tcp_rtx_probe0_timed_out(sk, skb)) { in tcp_retransmit_timer()
542 tcp_write_err(sk); in tcp_retransmit_timer()
545 tcp_enter_loss(sk); in tcp_retransmit_timer()
546 tcp_retransmit_skb(sk, skb, 1); in tcp_retransmit_timer()
547 __sk_dst_reset(sk); in tcp_retransmit_timer()
551 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS); in tcp_retransmit_timer()
552 if (tcp_write_timeout(sk)) in tcp_retransmit_timer()
573 __NET_INC_STATS(sock_net(sk), mib_idx); in tcp_retransmit_timer()
576 tcp_enter_loss(sk); in tcp_retransmit_timer()
579 if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) { in tcp_retransmit_timer()
583 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_retransmit_timer()
616 if (sk->sk_state == TCP_ESTABLISHED && in tcp_retransmit_timer()
622 tcp_rto_min(sk), in tcp_retransmit_timer()
624 } else if (sk->sk_state != TCP_SYN_SENT || in tcp_retransmit_timer()
632 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_retransmit_timer()
633 tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX); in tcp_retransmit_timer()
634 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0)) in tcp_retransmit_timer()
635 __sk_dst_reset(sk); in tcp_retransmit_timer()
642 void tcp_write_timer_handler(struct sock *sk) in tcp_write_timer_handler() argument
644 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timer_handler()
647 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || in tcp_write_timer_handler()
652 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); in tcp_write_timer_handler()
656 tcp_mstamp_refresh(tcp_sk(sk)); in tcp_write_timer_handler()
661 tcp_rack_reo_timeout(sk); in tcp_write_timer_handler()
664 tcp_send_loss_probe(sk); in tcp_write_timer_handler()
668 tcp_retransmit_timer(sk); in tcp_write_timer_handler()
672 tcp_probe_timer(sk); in tcp_write_timer_handler()
681 struct sock *sk = &icsk->icsk_inet.sk; in tcp_write_timer() local
683 bh_lock_sock(sk); in tcp_write_timer()
684 if (!sock_owned_by_user(sk)) { in tcp_write_timer()
685 tcp_write_timer_handler(sk); in tcp_write_timer()
688 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags)) in tcp_write_timer()
689 sock_hold(sk); in tcp_write_timer()
691 bh_unlock_sock(sk); in tcp_write_timer()
692 sock_put(sk); in tcp_write_timer()
703 void tcp_set_keepalive(struct sock *sk, int val) in tcp_set_keepalive() argument
705 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) in tcp_set_keepalive()
708 if (val && !sock_flag(sk, SOCK_KEEPOPEN)) in tcp_set_keepalive()
709 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); in tcp_set_keepalive()
711 inet_csk_delete_keepalive_timer(sk); in tcp_set_keepalive()
718 struct sock *sk = from_timer(sk, t, sk_timer); in tcp_keepalive_timer() local
719 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_keepalive_timer()
720 struct tcp_sock *tp = tcp_sk(sk); in tcp_keepalive_timer()
724 bh_lock_sock(sk); in tcp_keepalive_timer()
725 if (sock_owned_by_user(sk)) { in tcp_keepalive_timer()
727 inet_csk_reset_keepalive_timer (sk, HZ/20); in tcp_keepalive_timer()
731 if (sk->sk_state == TCP_LISTEN) { in tcp_keepalive_timer()
737 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { in tcp_keepalive_timer()
739 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; in tcp_keepalive_timer()
742 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); in tcp_keepalive_timer()
746 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_keepalive_timer()
750 if (!sock_flag(sk, SOCK_KEEPOPEN) || in tcp_keepalive_timer()
751 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT))) in tcp_keepalive_timer()
757 if (tp->packets_out || !tcp_write_queue_empty(sk)) in tcp_keepalive_timer()
773 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_keepalive_timer()
774 tcp_write_err(sk); in tcp_keepalive_timer()
777 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) { in tcp_keepalive_timer()
792 inet_csk_reset_keepalive_timer (sk, elapsed); in tcp_keepalive_timer()
796 tcp_done(sk); in tcp_keepalive_timer()
799 bh_unlock_sock(sk); in tcp_keepalive_timer()
800 sock_put(sk); in tcp_keepalive_timer()
806 struct sock *sk = (struct sock *)tp; in tcp_compressed_ack_kick() local
808 bh_lock_sock(sk); in tcp_compressed_ack_kick()
809 if (!sock_owned_by_user(sk)) { in tcp_compressed_ack_kick()
816 tcp_send_ack(sk); in tcp_compressed_ack_kick()
820 &sk->sk_tsq_flags)) in tcp_compressed_ack_kick()
821 sock_hold(sk); in tcp_compressed_ack_kick()
823 bh_unlock_sock(sk); in tcp_compressed_ack_kick()
825 sock_put(sk); in tcp_compressed_ack_kick()
830 void tcp_init_xmit_timers(struct sock *sk) in tcp_init_xmit_timers() argument
832 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, in tcp_init_xmit_timers()
834 hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC, in tcp_init_xmit_timers()
836 tcp_sk(sk)->pacing_timer.function = tcp_pace_kick; in tcp_init_xmit_timers()
838 hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC, in tcp_init_xmit_timers()
840 tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick; in tcp_init_xmit_timers()