Lines Matching refs:icsk

119 void clean_acked_data_enable(struct inet_connection_sock *icsk,  in clean_acked_data_enable()  argument
122 icsk->icsk_clean_acked = cad; in clean_acked_data_enable()
127 void clean_acked_data_disable(struct inet_connection_sock *icsk) in clean_acked_data_disable() argument
130 icsk->icsk_clean_acked = NULL; in clean_acked_data_disable()
228 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss() local
229 const unsigned int lss = icsk->icsk_ack.last_seg_size; in tcp_measure_rcv_mss()
232 icsk->icsk_ack.last_seg_size = 0; in tcp_measure_rcv_mss()
238 if (len >= icsk->icsk_ack.rcv_mss) { in tcp_measure_rcv_mss()
239 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, in tcp_measure_rcv_mss()
242 if (unlikely(len > icsk->icsk_ack.rcv_mss + in tcp_measure_rcv_mss()
265 icsk->icsk_ack.last_seg_size = len; in tcp_measure_rcv_mss()
267 icsk->icsk_ack.rcv_mss = len; in tcp_measure_rcv_mss()
271 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) in tcp_measure_rcv_mss()
272 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; in tcp_measure_rcv_mss()
273 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; in tcp_measure_rcv_mss()
279 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack() local
280 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); in tcp_incr_quickack()
285 if (quickacks > icsk->icsk_ack.quick) in tcp_incr_quickack()
286 icsk->icsk_ack.quick = quickacks; in tcp_incr_quickack()
291 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_quickack_mode() local
295 icsk->icsk_ack.ato = TCP_ATO_MIN; in tcp_enter_quickack_mode()
305 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_quickack_mode() local
309 (icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk)); in tcp_in_quickack_mode()
544 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_window() local
547 icsk->icsk_ack.quick = 0; in tcp_clamp_window()
745 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_recv() local
756 if (!icsk->icsk_ack.ato) { in tcp_event_data_recv()
761 icsk->icsk_ack.ato = TCP_ATO_MIN; in tcp_event_data_recv()
763 int m = now - icsk->icsk_ack.lrcvtime; in tcp_event_data_recv()
767 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; in tcp_event_data_recv()
768 } else if (m < icsk->icsk_ack.ato) { in tcp_event_data_recv()
769 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; in tcp_event_data_recv()
770 if (icsk->icsk_ack.ato > icsk->icsk_rto) in tcp_event_data_recv()
771 icsk->icsk_ack.ato = icsk->icsk_rto; in tcp_event_data_recv()
772 } else if (m > icsk->icsk_rto) { in tcp_event_data_recv()
780 icsk->icsk_ack.lrcvtime = now; in tcp_event_data_recv()
2096 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_loss() local
2099 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; in tcp_enter_loss()
2104 if (icsk->icsk_ca_state <= TCP_CA_Disorder || in tcp_enter_loss()
2106 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { in tcp_enter_loss()
2109 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); in tcp_enter_loss()
2120 if (icsk->icsk_ca_state <= TCP_CA_Disorder && in tcp_enter_loss()
2133 (new_recovery || icsk->icsk_retransmits) && in tcp_enter_loss()
2451 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_undo_cwnd_reduction() local
2453 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); in tcp_undo_cwnd_reduction()
2657 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_failed() local
2659 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; in tcp_mtup_probe_failed()
2660 icsk->icsk_mtup.probe_size = 0; in tcp_mtup_probe_failed()
2667 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_success() local
2673 icsk->icsk_mtup.probe_size; in tcp_mtup_probe_success()
2678 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; in tcp_mtup_probe_success()
2679 icsk->icsk_mtup.probe_size = 0; in tcp_mtup_probe_success()
2680 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtup_probe_success()
2690 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_simple_retransmit() local
2715 if (icsk->icsk_ca_state != TCP_CA_Loss) { in tcp_simple_retransmit()
2877 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastretrans_alert() local
2901 if (icsk->icsk_ca_state == TCP_CA_Open) { in tcp_fastretrans_alert()
2905 switch (icsk->icsk_ca_state) { in tcp_fastretrans_alert()
2926 switch (icsk->icsk_ca_state) { in tcp_fastretrans_alert()
2946 if (!(icsk->icsk_ca_state == TCP_CA_Open || in tcp_fastretrans_alert()
2958 if (icsk->icsk_ca_state <= TCP_CA_Disorder) in tcp_fastretrans_alert()
2968 if (icsk->icsk_ca_state < TCP_CA_CWR && in tcp_fastretrans_alert()
2969 icsk->icsk_mtup.probe_size && in tcp_fastretrans_alert()
3067 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cong_avoid() local
3069 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); in tcp_cong_avoid()
3078 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rearm_rto() local
3092 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || in tcp_rearm_rto()
3093 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { in tcp_rearm_rto()
3159 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clean_rtx_queue() local
3292 if (unlikely(icsk->icsk_mtup.probe_size && in tcp_clean_rtx_queue()
3328 if (icsk->icsk_ca_ops->pkts_acked) { in tcp_clean_rtx_queue()
3333 icsk->icsk_ca_ops->pkts_acked(sk, &sample); in tcp_clean_rtx_queue()
3341 icsk = inet_csk(sk); in tcp_clean_rtx_queue()
3344 tp->lost_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3349 tp->sacked_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3354 tp->retrans_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3364 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack_probe() local
3372 icsk->icsk_backoff = 0; in tcp_ack_probe()
3414 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cong_control() local
3416 if (icsk->icsk_ca_ops->cong_control) { in tcp_cong_control()
3417 icsk->icsk_ca_ops->cong_control(sk, rs); in tcp_cong_control()
3630 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_ack_event() local
3632 if (icsk->icsk_ca_ops->in_ack_event) in tcp_in_ack_event()
3633 icsk->icsk_ca_ops->in_ack_event(sk, flags); in tcp_in_ack_event()
3675 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack() local
3718 icsk->icsk_retransmits = 0; in tcp_ack()
3722 if (icsk->icsk_clean_acked) in tcp_ack()
3723 icsk->icsk_clean_acked(sk, ack); in tcp_ack()
3791 icsk->icsk_probes_out = 0; in tcp_ack()
5886 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_transfer() local
5890 icsk->icsk_af_ops->rebuild_header(sk); in tcp_init_transfer()
5905 icsk->icsk_ca_initialized = 0; in tcp_init_transfer()
5907 if (!icsk->icsk_ca_initialized) in tcp_init_transfer()
5915 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_finish_connect() local
5918 icsk->icsk_ack.lrcvtime = tcp_jiffies32; in tcp_finish_connect()
5921 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_finish_connect()
6035 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_synsent_state_process() local
6057 if (icsk->icsk_retransmits == 0) in tcp_rcv_synsent_state_process()
6134 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
6158 icsk->icsk_accept_queue.rskq_defer_accept || in tcp_rcv_synsent_state_process()
6228 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
6306 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_state_process() local
6331 acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0; in tcp_rcv_state_process()