Searched refs:icsk_ca_state (Results 1 – 15 of 15) sorted by relevance
32 if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery) in tcp_rack_reo_wnd()174 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { in tcp_rack_reo_timeout()233 const u8 state = inet_csk(sk)->icsk_ca_state; in tcp_newreno_mark_lost()
92 if (icsk->icsk_ca_state == TCP_CA_Open) { in measure_rtt()109 if (icsk->icsk_ca_state == TCP_CA_Open) in measure_achieved_throughput()119 if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_Disorder))) { in measure_achieved_throughput()
256 if (icsk->icsk_ca_state != TCP_CA_Open && in tcpnv_acked()257 icsk->icsk_ca_state != TCP_CA_Disorder) in tcpnv_acked()
899 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, in tcp_check_sack_reordering()1839 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker) in tcp_sacktag_write_queue()1990 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; in tcp_enter_loss()1995 if (icsk->icsk_ca_state <= TCP_CA_Disorder || in tcp_enter_loss()1997 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { in tcp_enter_loss()2011 if (icsk->icsk_ca_state <= TCP_CA_Disorder && in tcp_enter_loss()2388 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); in tcp_try_undo_recovery()2390 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) in tcp_try_undo_recovery()2510 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { in tcp_end_cwnd_reduction()2523 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { in tcp_enter_cwr()[all …]
495 if (icsk->icsk_ca_state == TCP_CA_Recovery) { in tcp_retransmit_timer()500 } else if (icsk->icsk_ca_state == TCP_CA_Loss) { in tcp_retransmit_timer()502 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) || in tcp_retransmit_timer()
166 inet_csk(sk)->icsk_ca_state, in tcp_rate_gen()
66 if (icsk->icsk_ca_state == TCP_CA_Open) in tcp_yeah_pkts_acked()
193 if (icsk->icsk_ca_state == TCP_CA_Open) { in bictcp_acked()
156 new_state != inet_csk(sk)->icsk_ca_state) in dctcp_state()
1624 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && in tcp_cwnd_application_limited()1943 if (icsk->icsk_ca_state >= TCP_CA_Recovery) in tcp_tso_should_defer()2104 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || in tcp_mtu_probe()2494 (icsk->icsk_ca_state != TCP_CA_Open && in tcp_schedule_loss_probe()2495 icsk->icsk_ca_state != TCP_CA_CWR)) in tcp_schedule_loss_probe()3068 if (icsk->icsk_ca_state != TCP_CA_Loss) in tcp_xmit_retransmit_queue()
401 icsk->icsk_ca_state == TCP_CA_Open) { in tcp_update_metrics()
481 u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state; in bbr_set_cwnd_to_recover_or_restore()
3234 info->tcpi_ca_state = icsk->icsk_ca_state; in tcp_get_info()3382 nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); in tcp_get_timestamping_opt_stats()
104 __u8 icsk_ca_state:6, member
1111 icsk->icsk_ca_state = ca_state; in tcp_set_ca_state()1186 (1 << inet_csk(sk)->icsk_ca_state); in tcp_in_cwnd_reduction()