Lines Matching refs:tp

242 static void tcp_ecn_queue_cwr(struct tcp_sock *tp)  in tcp_ecn_queue_cwr()  argument
244 if (tp->ecn_flags & TCP_ECN_OK) in tcp_ecn_queue_cwr()
245 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; in tcp_ecn_queue_cwr()
261 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) in tcp_ecn_withdraw_cwr() argument
263 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; in tcp_ecn_withdraw_cwr()
268 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ecn_check_ce() local
276 if (tp->ecn_flags & TCP_ECN_SEEN) in __tcp_ecn_check_ce()
283 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { in __tcp_ecn_check_ce()
286 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; in __tcp_ecn_check_ce()
288 tp->ecn_flags |= TCP_ECN_SEEN; in __tcp_ecn_check_ce()
293 tp->ecn_flags |= TCP_ECN_SEEN; in __tcp_ecn_check_ce()
304 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_synack() argument
306 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) in tcp_ecn_rcv_synack()
307 tp->ecn_flags &= ~TCP_ECN_OK; in tcp_ecn_rcv_synack()
310 static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_syn() argument
312 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) in tcp_ecn_rcv_syn()
313 tp->ecn_flags &= ~TCP_ECN_OK; in tcp_ecn_rcv_syn()
316 static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_ecn_echo() argument
318 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) in tcp_ecn_rcv_ecn_echo()
330 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sndbuf_expand() local
338 per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + in tcp_sndbuf_expand()
345 nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd); in tcp_sndbuf_expand()
346 nr_segs = max_t(u32, nr_segs, tp->reordering + 1); in tcp_sndbuf_expand()
387 struct tcp_sock *tp = tcp_sk(sk); in __tcp_grow_window() local
392 while (tp->rcv_ssthresh <= window) { in __tcp_grow_window()
404 struct tcp_sock *tp = tcp_sk(sk); in tcp_grow_window() local
407 if (tp->rcv_ssthresh < tp->window_clamp && in tcp_grow_window()
408 (int)tp->rcv_ssthresh < tcp_space(sk) && in tcp_grow_window()
416 incr = 2 * tp->advmss; in tcp_grow_window()
422 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, in tcp_grow_window()
423 tp->window_clamp); in tcp_grow_window()
454 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_buffer_space() local
462 tp->rcvq_space.space = tp->rcv_wnd; in tcp_init_buffer_space()
463 tcp_mstamp_refresh(tp); in tcp_init_buffer_space()
464 tp->rcvq_space.time = tp->tcp_mstamp; in tcp_init_buffer_space()
465 tp->rcvq_space.seq = tp->copied_seq; in tcp_init_buffer_space()
469 if (tp->window_clamp >= maxwin) { in tcp_init_buffer_space()
470 tp->window_clamp = maxwin; in tcp_init_buffer_space()
472 if (tcp_app_win && maxwin > 4 * tp->advmss) in tcp_init_buffer_space()
473 tp->window_clamp = max(maxwin - in tcp_init_buffer_space()
475 4 * tp->advmss); in tcp_init_buffer_space()
480 tp->window_clamp > 2 * tp->advmss && in tcp_init_buffer_space()
481 tp->window_clamp + tp->advmss > maxwin) in tcp_init_buffer_space()
482 tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); in tcp_init_buffer_space()
484 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); in tcp_init_buffer_space()
485 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_init_buffer_space()
491 struct tcp_sock *tp = tcp_sk(sk); in tcp_clamp_window() local
505 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); in tcp_clamp_window()
517 const struct tcp_sock *tp = tcp_sk(sk); in tcp_initialize_rcv_mss() local
518 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); in tcp_initialize_rcv_mss()
520 hint = min(hint, tp->rcv_wnd / 2); in tcp_initialize_rcv_mss()
539 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) in tcp_rcv_rtt_update() argument
541 u32 new_sample = tp->rcv_rtt_est.rtt_us; in tcp_rcv_rtt_update()
568 tp->rcv_rtt_est.rtt_us = new_sample; in tcp_rcv_rtt_update()
571 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) in tcp_rcv_rtt_measure() argument
575 if (tp->rcv_rtt_est.time == 0) in tcp_rcv_rtt_measure()
577 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) in tcp_rcv_rtt_measure()
579 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); in tcp_rcv_rtt_measure()
582 tcp_rcv_rtt_update(tp, delta_us, 1); in tcp_rcv_rtt_measure()
585 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; in tcp_rcv_rtt_measure()
586 tp->rcv_rtt_est.time = tp->tcp_mstamp; in tcp_rcv_rtt_measure()
592 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_rtt_measure_ts() local
594 if (tp->rx_opt.rcv_tsecr == tp->rcv_rtt_last_tsecr) in tcp_rcv_rtt_measure_ts()
596 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; in tcp_rcv_rtt_measure_ts()
600 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; in tcp_rcv_rtt_measure_ts()
606 tcp_rcv_rtt_update(tp, delta_us, 0); in tcp_rcv_rtt_measure_ts()
616 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_space_adjust() local
622 tcp_mstamp_refresh(tp); in tcp_rcv_space_adjust()
623 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); in tcp_rcv_space_adjust()
624 if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) in tcp_rcv_space_adjust()
628 copied = tp->copied_seq - tp->rcvq_space.seq; in tcp_rcv_space_adjust()
629 if (copied <= tp->rcvq_space.space) in tcp_rcv_space_adjust()
649 rcvwin = ((u64)copied << 1) + 16 * tp->advmss; in tcp_rcv_space_adjust()
652 grow = rcvwin * (copied - tp->rcvq_space.space); in tcp_rcv_space_adjust()
653 do_div(grow, tp->rcvq_space.space); in tcp_rcv_space_adjust()
656 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER); in tcp_rcv_space_adjust()
657 while (tcp_win_from_space(sk, rcvmem) < tp->advmss) in tcp_rcv_space_adjust()
660 do_div(rcvwin, tp->advmss); in tcp_rcv_space_adjust()
667 tp->window_clamp = tcp_win_from_space(sk, rcvbuf); in tcp_rcv_space_adjust()
670 tp->rcvq_space.space = copied; in tcp_rcv_space_adjust()
673 tp->rcvq_space.seq = tp->copied_seq; in tcp_rcv_space_adjust()
674 tp->rcvq_space.time = tp->tcp_mstamp; in tcp_rcv_space_adjust()
689 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_data_recv() local
697 tcp_rcv_rtt_measure(tp); in tcp_event_data_recv()
744 struct tcp_sock *tp = tcp_sk(sk); in tcp_rtt_estimator() local
746 u32 srtt = tp->srtt_us; in tcp_rtt_estimator()
769 m -= (tp->mdev_us >> 2); /* similar update on mdev */ in tcp_rtt_estimator()
781 m -= (tp->mdev_us >> 2); /* similar update on mdev */ in tcp_rtt_estimator()
783 tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ in tcp_rtt_estimator()
784 if (tp->mdev_us > tp->mdev_max_us) { in tcp_rtt_estimator()
785 tp->mdev_max_us = tp->mdev_us; in tcp_rtt_estimator()
786 if (tp->mdev_max_us > tp->rttvar_us) in tcp_rtt_estimator()
787 tp->rttvar_us = tp->mdev_max_us; in tcp_rtt_estimator()
789 if (after(tp->snd_una, tp->rtt_seq)) { in tcp_rtt_estimator()
790 if (tp->mdev_max_us < tp->rttvar_us) in tcp_rtt_estimator()
791 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2; in tcp_rtt_estimator()
792 tp->rtt_seq = tp->snd_nxt; in tcp_rtt_estimator()
793 tp->mdev_max_us = tcp_rto_min_us(sk); in tcp_rtt_estimator()
798 tp->mdev_us = m << 1; /* make sure rto = 3*rtt */ in tcp_rtt_estimator()
799 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); in tcp_rtt_estimator()
800 tp->mdev_max_us = tp->rttvar_us; in tcp_rtt_estimator()
801 tp->rtt_seq = tp->snd_nxt; in tcp_rtt_estimator()
803 tp->srtt_us = max(1U, srtt); in tcp_rtt_estimator()
808 const struct tcp_sock *tp = tcp_sk(sk); in tcp_update_pacing_rate() local
812 rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3); in tcp_update_pacing_rate()
822 if (tp->snd_cwnd < tp->snd_ssthresh / 2) in tcp_update_pacing_rate()
827 rate *= max(tp->snd_cwnd, tp->packets_out); in tcp_update_pacing_rate()
829 if (likely(tp->srtt_us)) in tcp_update_pacing_rate()
830 do_div(rate, tp->srtt_us); in tcp_update_pacing_rate()
845 const struct tcp_sock *tp = tcp_sk(sk); in tcp_set_rto() local
856 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); in tcp_set_rto()
870 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) in tcp_init_cwnd() argument
876 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); in tcp_init_cwnd()
880 static void tcp_dsack_seen(struct tcp_sock *tp) in tcp_dsack_seen() argument
882 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; in tcp_dsack_seen()
883 tp->rack.dsack_seen = 1; in tcp_dsack_seen()
884 tp->dsack_dups++; in tcp_dsack_seen()
894 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reordering() local
895 const u32 mss = tp->mss_cache; in tcp_check_sack_reordering()
898 fack = tcp_highest_sack_seq(tp); in tcp_check_sack_reordering()
903 if ((metric > tp->reordering * mss) && mss) { in tcp_check_sack_reordering()
906 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, in tcp_check_sack_reordering()
907 tp->reordering, in tcp_check_sack_reordering()
909 tp->sacked_out, in tcp_check_sack_reordering()
910 tp->undo_marker ? tp->undo_retrans : 0); in tcp_check_sack_reordering()
912 tp->reordering = min_t(u32, (metric + mss - 1) / mss, in tcp_check_sack_reordering()
917 tp->reord_seen++; in tcp_check_sack_reordering()
923 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) in tcp_verify_retransmit_hint() argument
925 if (!tp->retransmit_skb_hint || in tcp_verify_retransmit_hint()
927 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) in tcp_verify_retransmit_hint()
928 tp->retransmit_skb_hint = skb; in tcp_verify_retransmit_hint()
938 static void tcp_sum_lost(struct tcp_sock *tp, struct sk_buff *skb) in tcp_sum_lost() argument
944 tp->lost += tcp_skb_pcount(skb); in tcp_sum_lost()
947 static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) in tcp_skb_mark_lost() argument
950 tcp_verify_retransmit_hint(tp, skb); in tcp_skb_mark_lost()
952 tp->lost_out += tcp_skb_pcount(skb); in tcp_skb_mark_lost()
953 tcp_sum_lost(tp, skb); in tcp_skb_mark_lost()
958 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb) in tcp_skb_mark_lost_uncond_verify() argument
960 tcp_verify_retransmit_hint(tp, skb); in tcp_skb_mark_lost_uncond_verify()
962 tcp_sum_lost(tp, skb); in tcp_skb_mark_lost_uncond_verify()
964 tp->lost_out += tcp_skb_pcount(skb); in tcp_skb_mark_lost_uncond_verify()
1062 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, in tcp_is_sackblock_valid() argument
1066 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) in tcp_is_sackblock_valid()
1070 if (!before(start_seq, tp->snd_nxt)) in tcp_is_sackblock_valid()
1076 if (after(start_seq, tp->snd_una)) in tcp_is_sackblock_valid()
1079 if (!is_dsack || !tp->undo_marker) in tcp_is_sackblock_valid()
1083 if (after(end_seq, tp->snd_una)) in tcp_is_sackblock_valid()
1086 if (!before(start_seq, tp->undo_marker)) in tcp_is_sackblock_valid()
1090 if (!after(end_seq, tp->undo_marker)) in tcp_is_sackblock_valid()
1096 return !before(start_seq, end_seq - tp->max_window); in tcp_is_sackblock_valid()
1103 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_dsack() local
1110 tcp_dsack_seen(tp); in tcp_check_dsack()
1119 tcp_dsack_seen(tp); in tcp_check_dsack()
1126 if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 && in tcp_check_dsack()
1128 after(end_seq_0, tp->undo_marker)) in tcp_check_dsack()
1129 tp->undo_retrans--; in tcp_check_dsack()
1210 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_one() local
1214 if (tp->undo_marker && tp->undo_retrans > 0 && in tcp_sacktag_one()
1215 after(end_seq, tp->undo_marker)) in tcp_sacktag_one()
1216 tp->undo_retrans--; in tcp_sacktag_one()
1223 if (!after(end_seq, tp->snd_una)) in tcp_sacktag_one()
1227 tcp_rack_advance(tp, sacked, end_seq, xmit_time); in tcp_sacktag_one()
1236 tp->lost_out -= pcount; in tcp_sacktag_one()
1237 tp->retrans_out -= pcount; in tcp_sacktag_one()
1245 tcp_highest_sack_seq(tp)) && in tcp_sacktag_one()
1249 if (!after(end_seq, tp->high_seq)) in tcp_sacktag_one()
1258 tp->lost_out -= pcount; in tcp_sacktag_one()
1264 tp->sacked_out += pcount; in tcp_sacktag_one()
1265 tp->delivered += pcount; /* Out-of-order packets delivered */ in tcp_sacktag_one()
1268 if (tp->lost_skb_hint && in tcp_sacktag_one()
1269 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) in tcp_sacktag_one()
1270 tp->lost_cnt_hint += pcount; in tcp_sacktag_one()
1279 tp->retrans_out -= pcount; in tcp_sacktag_one()
1294 struct tcp_sock *tp = tcp_sk(sk); in tcp_shifted_skb() local
1311 if (skb == tp->lost_skb_hint) in tcp_shifted_skb()
1312 tp->lost_cnt_hint += pcount; in tcp_shifted_skb()
1344 if (skb == tp->retransmit_skb_hint) in tcp_shifted_skb()
1345 tp->retransmit_skb_hint = prev; in tcp_shifted_skb()
1346 if (skb == tp->lost_skb_hint) { in tcp_shifted_skb()
1347 tp->lost_skb_hint = prev; in tcp_shifted_skb()
1348 tp->lost_cnt_hint -= tcp_skb_pcount(prev); in tcp_shifted_skb()
1392 struct tcp_sock *tp = tcp_sk(sk); in tcp_shift_skb_data() local
1406 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) in tcp_shift_skb_data()
1486 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) in tcp_shift_skb_data()
1530 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_walk() local
1589 tcp_highest_sack_seq(tp))) in tcp_sacktag_walk()
1648 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) in tcp_sack_cache_ok() argument
1650 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); in tcp_sack_cache_ok()
1657 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_write_queue() local
1671 state->reord = tp->snd_nxt; in tcp_sacktag_write_queue()
1673 if (!tp->sacked_out) in tcp_sacktag_write_queue()
1680 tp->delivered++; /* A spurious retransmission is delivered */ in tcp_sacktag_write_queue()
1687 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) in tcp_sacktag_write_queue()
1690 if (!tp->packets_out) in tcp_sacktag_write_queue()
1701 if (!tcp_is_sackblock_valid(tp, dup_sack, in tcp_sacktag_write_queue()
1707 if (!tp->undo_marker) in tcp_sacktag_write_queue()
1713 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && in tcp_sacktag_write_queue()
1714 !after(sp[used_sacks].end_seq, tp->snd_una)) in tcp_sacktag_write_queue()
1749 if (!tp->sacked_out) { in tcp_sacktag_write_queue()
1751 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); in tcp_sacktag_write_queue()
1753 cache = tp->recv_sack_cache; in tcp_sacktag_write_queue()
1755 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && in tcp_sacktag_write_queue()
1770 while (tcp_sack_cache_ok(tp, cache) && in tcp_sacktag_write_queue()
1775 if (tcp_sack_cache_ok(tp, cache) && !dup_sack && in tcp_sacktag_write_queue()
1798 if (tcp_highest_sack_seq(tp) == cache->end_seq) { in tcp_sacktag_write_queue()
1813 if (!before(start_seq, tcp_highest_sack_seq(tp))) { in tcp_sacktag_write_queue()
1829 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { in tcp_sacktag_write_queue()
1830 tp->recv_sack_cache[i].start_seq = 0; in tcp_sacktag_write_queue()
1831 tp->recv_sack_cache[i].end_seq = 0; in tcp_sacktag_write_queue()
1834 tp->recv_sack_cache[i++] = sp[j]; in tcp_sacktag_write_queue()
1836 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker) in tcp_sacktag_write_queue()
1839 tcp_verify_left_out(tp); in tcp_sacktag_write_queue()
1843 WARN_ON((int)tp->sacked_out < 0); in tcp_sacktag_write_queue()
1844 WARN_ON((int)tp->lost_out < 0); in tcp_sacktag_write_queue()
1845 WARN_ON((int)tp->retrans_out < 0); in tcp_sacktag_write_queue()
1846 WARN_ON((int)tcp_packets_in_flight(tp) < 0); in tcp_sacktag_write_queue()
1854 static bool tcp_limit_reno_sacked(struct tcp_sock *tp) in tcp_limit_reno_sacked() argument
1858 holes = max(tp->lost_out, 1U); in tcp_limit_reno_sacked()
1859 holes = min(holes, tp->packets_out); in tcp_limit_reno_sacked()
1861 if ((tp->sacked_out + holes) > tp->packets_out) { in tcp_limit_reno_sacked()
1862 tp->sacked_out = tp->packets_out - holes; in tcp_limit_reno_sacked()
1874 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_reno_reordering() local
1876 if (!tcp_limit_reno_sacked(tp)) in tcp_check_reno_reordering()
1879 tp->reordering = min_t(u32, tp->packets_out + addend, in tcp_check_reno_reordering()
1881 tp->reord_seen++; in tcp_check_reno_reordering()
1889 struct tcp_sock *tp = tcp_sk(sk); in tcp_add_reno_sack() local
1890 u32 prior_sacked = tp->sacked_out; in tcp_add_reno_sack()
1892 tp->sacked_out++; in tcp_add_reno_sack()
1894 if (tp->sacked_out > prior_sacked) in tcp_add_reno_sack()
1895 tp->delivered++; /* Some out-of-order packet is delivered */ in tcp_add_reno_sack()
1896 tcp_verify_left_out(tp); in tcp_add_reno_sack()
1903 struct tcp_sock *tp = tcp_sk(sk); in tcp_remove_reno_sacks() local
1907 tp->delivered += max_t(int, acked - tp->sacked_out, 1); in tcp_remove_reno_sacks()
1908 if (acked - 1 >= tp->sacked_out) in tcp_remove_reno_sacks()
1909 tp->sacked_out = 0; in tcp_remove_reno_sacks()
1911 tp->sacked_out -= acked - 1; in tcp_remove_reno_sacks()
1914 tcp_verify_left_out(tp); in tcp_remove_reno_sacks()
1917 static inline void tcp_reset_reno_sack(struct tcp_sock *tp) in tcp_reset_reno_sack() argument
1919 tp->sacked_out = 0; in tcp_reset_reno_sack()
1922 void tcp_clear_retrans(struct tcp_sock *tp) in tcp_clear_retrans() argument
1924 tp->retrans_out = 0; in tcp_clear_retrans()
1925 tp->lost_out = 0; in tcp_clear_retrans()
1926 tp->undo_marker = 0; in tcp_clear_retrans()
1927 tp->undo_retrans = -1; in tcp_clear_retrans()
1928 tp->sacked_out = 0; in tcp_clear_retrans()
1931 static inline void tcp_init_undo(struct tcp_sock *tp) in tcp_init_undo() argument
1933 tp->undo_marker = tp->snd_una; in tcp_init_undo()
1935 tp->undo_retrans = tp->retrans_out ? : -1; in tcp_init_undo()
1949 struct tcp_sock *tp = tcp_sk(sk); in tcp_timeout_mark_lost() local
1957 tp->sacked_out = 0; in tcp_timeout_mark_lost()
1959 tp->is_sack_reneg = 1; in tcp_timeout_mark_lost()
1960 } else if (tcp_is_reno(tp)) { in tcp_timeout_mark_lost()
1961 tcp_reset_reno_sack(tp); in tcp_timeout_mark_lost()
1969 tcp_rack_skb_timeout(tp, skb, 0) > 0) in tcp_timeout_mark_lost()
1973 tcp_verify_left_out(tp); in tcp_timeout_mark_lost()
1974 tcp_clear_all_retrans_hints(tp); in tcp_timeout_mark_lost()
1981 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_loss() local
1989 !after(tp->high_seq, tp->snd_una) || in tcp_enter_loss()
1991 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_loss()
1992 tp->prior_cwnd = tp->snd_cwnd; in tcp_enter_loss()
1993 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); in tcp_enter_loss()
1995 tcp_init_undo(tp); in tcp_enter_loss()
1997 tp->snd_cwnd = tcp_packets_in_flight(tp) + 1; in tcp_enter_loss()
1998 tp->snd_cwnd_cnt = 0; in tcp_enter_loss()
1999 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_enter_loss()
2005 tp->sacked_out >= net->ipv4.sysctl_tcp_reordering) in tcp_enter_loss()
2006 tp->reordering = min_t(unsigned int, tp->reordering, in tcp_enter_loss()
2009 tp->high_seq = tp->snd_nxt; in tcp_enter_loss()
2010 tcp_ecn_queue_cwr(tp); in tcp_enter_loss()
2016 tp->frto = net->ipv4.sysctl_tcp_frto && in tcp_enter_loss()
2034 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reneging() local
2035 unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), in tcp_check_sack_reneging()
2056 static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) in tcp_dupack_heuristics() argument
2058 return tp->sacked_out + 1; in tcp_dupack_heuristics()
2160 struct tcp_sock *tp = tcp_sk(sk); in tcp_time_to_recover() local
2163 if (tp->lost_out) in tcp_time_to_recover()
2167 if (!tcp_is_rack(sk) && tcp_dupack_heuristics(tp) > tp->reordering) in tcp_time_to_recover()
2181 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_head_lost() local
2186 const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; in tcp_mark_head_lost()
2188 WARN_ON(packets > tp->packets_out); in tcp_mark_head_lost()
2189 skb = tp->lost_skb_hint; in tcp_mark_head_lost()
2192 if (mark_head && after(TCP_SKB_CB(skb)->seq, tp->snd_una)) in tcp_mark_head_lost()
2194 cnt = tp->lost_cnt_hint; in tcp_mark_head_lost()
2203 tp->lost_skb_hint = skb; in tcp_mark_head_lost()
2204 tp->lost_cnt_hint = cnt; in tcp_mark_head_lost()
2210 if (tcp_is_reno(tp) || in tcp_mark_head_lost()
2215 if (tcp_is_sack(tp) || in tcp_mark_head_lost()
2230 tcp_skb_mark_lost(tp, skb); in tcp_mark_head_lost()
2235 tcp_verify_left_out(tp); in tcp_mark_head_lost()
2242 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_scoreboard() local
2244 if (tcp_is_sack(tp)) { in tcp_update_scoreboard()
2245 int sacked_upto = tp->sacked_out - tp->reordering; in tcp_update_scoreboard()
2253 static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when) in tcp_tsopt_ecr_before() argument
2255 return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_tsopt_ecr_before()
2256 before(tp->rx_opt.rcv_tsecr, when); in tcp_tsopt_ecr_before()
2262 static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp, in tcp_skb_spurious_retrans() argument
2266 tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb)); in tcp_skb_spurious_retrans()
2272 static inline bool tcp_packet_delayed(const struct tcp_sock *tp) in tcp_packet_delayed() argument
2274 return !tp->retrans_stamp || in tcp_packet_delayed()
2275 tcp_tsopt_ecr_before(tp, tp->retrans_stamp); in tcp_packet_delayed()
2296 const struct tcp_sock *tp = tcp_sk(sk); in tcp_any_retrans_done() local
2299 if (tp->retrans_out) in tcp_any_retrans_done()
2312 struct tcp_sock *tp = tcp_sk(sk); in DBGUNDO() local
2319 tp->snd_cwnd, tcp_left_out(tp), in DBGUNDO()
2320 tp->snd_ssthresh, tp->prior_ssthresh, in DBGUNDO()
2321 tp->packets_out); in DBGUNDO()
2328 tp->snd_cwnd, tcp_left_out(tp), in DBGUNDO()
2329 tp->snd_ssthresh, tp->prior_ssthresh, in DBGUNDO()
2330 tp->packets_out); in DBGUNDO()
2338 struct tcp_sock *tp = tcp_sk(sk); in tcp_undo_cwnd_reduction() local
2346 tp->lost_out = 0; in tcp_undo_cwnd_reduction()
2347 tcp_clear_all_retrans_hints(tp); in tcp_undo_cwnd_reduction()
2350 if (tp->prior_ssthresh) { in tcp_undo_cwnd_reduction()
2353 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); in tcp_undo_cwnd_reduction()
2355 if (tp->prior_ssthresh > tp->snd_ssthresh) { in tcp_undo_cwnd_reduction()
2356 tp->snd_ssthresh = tp->prior_ssthresh; in tcp_undo_cwnd_reduction()
2357 tcp_ecn_withdraw_cwr(tp); in tcp_undo_cwnd_reduction()
2360 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_undo_cwnd_reduction()
2361 tp->undo_marker = 0; in tcp_undo_cwnd_reduction()
2362 tp->rack.advanced = 1; /* Force RACK to re-exam losses */ in tcp_undo_cwnd_reduction()
2365 static inline bool tcp_may_undo(const struct tcp_sock *tp) in tcp_may_undo() argument
2367 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); in tcp_may_undo()
2373 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_recovery() local
2375 if (tcp_may_undo(tp)) { in tcp_try_undo_recovery()
2389 } else if (tp->rack.reo_wnd_persist) { in tcp_try_undo_recovery()
2390 tp->rack.reo_wnd_persist--; in tcp_try_undo_recovery()
2392 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { in tcp_try_undo_recovery()
2397 tp->retrans_stamp = 0; in tcp_try_undo_recovery()
2401 tp->is_sack_reneg = 0; in tcp_try_undo_recovery()
2408 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_dsack() local
2410 if (tp->undo_marker && !tp->undo_retrans) { in tcp_try_undo_dsack()
2411 tp->rack.reo_wnd_persist = min(TCP_RACK_RECOVERY_THRESH, in tcp_try_undo_dsack()
2412 tp->rack.reo_wnd_persist + 1); in tcp_try_undo_dsack()
2424 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_loss() local
2426 if (frto_undo || tcp_may_undo(tp)) { in tcp_try_undo_loss()
2435 if (frto_undo || tcp_is_sack(tp)) { in tcp_try_undo_loss()
2437 tp->is_sack_reneg = 0; in tcp_try_undo_loss()
2455 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_cwnd_reduction() local
2457 tp->high_seq = tp->snd_nxt; in tcp_init_cwnd_reduction()
2458 tp->tlp_high_seq = 0; in tcp_init_cwnd_reduction()
2459 tp->snd_cwnd_cnt = 0; in tcp_init_cwnd_reduction()
2460 tp->prior_cwnd = tp->snd_cwnd; in tcp_init_cwnd_reduction()
2461 tp->prr_delivered = 0; in tcp_init_cwnd_reduction()
2462 tp->prr_out = 0; in tcp_init_cwnd_reduction()
2463 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); in tcp_init_cwnd_reduction()
2464 tcp_ecn_queue_cwr(tp); in tcp_init_cwnd_reduction()
2469 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_reduction() local
2471 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); in tcp_cwnd_reduction()
2473 if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd)) in tcp_cwnd_reduction()
2476 tp->prr_delivered += newly_acked_sacked; in tcp_cwnd_reduction()
2478 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + in tcp_cwnd_reduction()
2479 tp->prior_cwnd - 1; in tcp_cwnd_reduction()
2480 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; in tcp_cwnd_reduction()
2484 max_t(int, tp->prr_delivered - tp->prr_out, in tcp_cwnd_reduction()
2490 sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1)); in tcp_cwnd_reduction()
2491 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; in tcp_cwnd_reduction()
2496 struct tcp_sock *tp = tcp_sk(sk); in tcp_end_cwnd_reduction() local
2502 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && in tcp_end_cwnd_reduction()
2503 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { in tcp_end_cwnd_reduction()
2504 tp->snd_cwnd = tp->snd_ssthresh; in tcp_end_cwnd_reduction()
2505 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_end_cwnd_reduction()
2513 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_cwr() local
2515 tp->prior_ssthresh = 0; in tcp_enter_cwr()
2517 tp->undo_marker = 0; in tcp_enter_cwr()
2526 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_keep_open() local
2529 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) in tcp_try_keep_open()
2534 tp->high_seq = tp->snd_nxt; in tcp_try_keep_open()
2540 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_to_open() local
2542 tcp_verify_left_out(tp); in tcp_try_to_open()
2545 tp->retrans_stamp = 0; in tcp_try_to_open()
2566 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_probe_success() local
2570 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2571 tp->snd_cwnd = tp->snd_cwnd * in tcp_mtup_probe_success()
2572 tcp_mss_to_mtu(sk, tp->mss_cache) / in tcp_mtup_probe_success()
2574 tp->snd_cwnd_cnt = 0; in tcp_mtup_probe_success()
2575 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_mtup_probe_success()
2576 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2591 struct tcp_sock *tp = tcp_sk(sk); in tcp_simple_retransmit() local
2600 tp->retrans_out -= tcp_skb_pcount(skb); in tcp_simple_retransmit()
2602 tcp_skb_mark_lost_uncond_verify(tp, skb); in tcp_simple_retransmit()
2606 tcp_clear_retrans_hints_partial(tp); in tcp_simple_retransmit()
2608 if (!tp->lost_out) in tcp_simple_retransmit()
2611 if (tcp_is_reno(tp)) in tcp_simple_retransmit()
2612 tcp_limit_reno_sacked(tp); in tcp_simple_retransmit()
2614 tcp_verify_left_out(tp); in tcp_simple_retransmit()
2622 tp->high_seq = tp->snd_nxt; in tcp_simple_retransmit()
2623 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_simple_retransmit()
2624 tp->prior_ssthresh = 0; in tcp_simple_retransmit()
2625 tp->undo_marker = 0; in tcp_simple_retransmit()
2634 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_recovery() local
2637 if (tcp_is_reno(tp)) in tcp_enter_recovery()
2644 tp->prior_ssthresh = 0; in tcp_enter_recovery()
2645 tcp_init_undo(tp); in tcp_enter_recovery()
2649 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_recovery()
2661 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_loss() local
2662 bool recovered = !before(tp->snd_una, tp->high_seq); in tcp_process_loss()
2668 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ in tcp_process_loss()
2676 if (after(tp->snd_nxt, tp->high_seq)) { in tcp_process_loss()
2678 tp->frto = 0; /* Step 3.a. loss was real */ in tcp_process_loss()
2680 tp->high_seq = tp->snd_nxt; in tcp_process_loss()
2686 after(tcp_wnd_end(tp), tp->snd_nxt)) { in tcp_process_loss()
2690 tp->frto = 0; in tcp_process_loss()
2699 if (tcp_is_reno(tp)) { in tcp_process_loss()
2703 if (after(tp->snd_nxt, tp->high_seq) && is_dupack) in tcp_process_loss()
2706 tcp_reset_reno_sack(tp); in tcp_process_loss()
2714 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_partial() local
2716 if (tp->undo_marker && tcp_packet_delayed(tp)) { in tcp_try_undo_partial()
2727 if (tp->retrans_out) in tcp_try_undo_partial()
2731 tp->retrans_stamp = 0; in tcp_try_undo_partial()
2744 struct tcp_sock *tp = tcp_sk(sk); in tcp_identify_packet_loss() local
2749 if (unlikely(tcp_is_reno(tp))) { in tcp_identify_packet_loss()
2752 u32 prior_retrans = tp->retrans_out; in tcp_identify_packet_loss()
2755 if (prior_retrans > tp->retrans_out) in tcp_identify_packet_loss()
2762 struct tcp_sock *tp = tcp_sk(sk); in tcp_force_fast_retransmit() local
2764 return after(tcp_highest_sack_seq(tp), in tcp_force_fast_retransmit()
2765 tp->snd_una + tp->reordering * tp->mss_cache); in tcp_force_fast_retransmit()
2784 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastretrans_alert() local
2789 if (!tp->packets_out && tp->sacked_out) in tcp_fastretrans_alert()
2790 tp->sacked_out = 0; in tcp_fastretrans_alert()
2795 tp->prior_ssthresh = 0; in tcp_fastretrans_alert()
2802 tcp_verify_left_out(tp); in tcp_fastretrans_alert()
2807 WARN_ON(tp->retrans_out != 0); in tcp_fastretrans_alert()
2808 tp->retrans_stamp = 0; in tcp_fastretrans_alert()
2809 } else if (!before(tp->snd_una, tp->high_seq)) { in tcp_fastretrans_alert()
2814 if (tp->snd_una != tp->high_seq) { in tcp_fastretrans_alert()
2821 if (tcp_is_reno(tp)) in tcp_fastretrans_alert()
2822 tcp_reset_reno_sack(tp); in tcp_fastretrans_alert()
2834 if (tcp_is_reno(tp) && is_dupack) in tcp_fastretrans_alert()
2840 do_lost = tcp_is_reno(tp) || in tcp_fastretrans_alert()
2858 if (tcp_is_reno(tp)) { in tcp_fastretrans_alert()
2860 tcp_reset_reno_sack(tp); in tcp_fastretrans_alert()
2877 tp->snd_una == tp->mtu_probe.probe_seq_start) { in tcp_fastretrans_alert()
2880 tp->snd_cwnd++; in tcp_fastretrans_alert()
2898 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_rtt_min() local
2900 if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) { in tcp_update_rtt_min()
2907 minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32, in tcp_update_rtt_min()
2915 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_rtt() local
2931 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_ack_update_rtt()
2933 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; in tcp_ack_update_rtt()
2982 struct tcp_sock *tp = tcp_sk(sk); in tcp_rearm_rto() local
2987 if (tp->fastopen_rsk) in tcp_rearm_rto()
2990 if (!tp->packets_out) { in tcp_rearm_rto()
3018 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_acked() local
3021 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); in tcp_tso_acked()
3024 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in tcp_tso_acked()
3064 struct tcp_sock *tp = tcp_sk(sk); in tcp_clean_rtx_queue() local
3065 u32 prior_sacked = tp->sacked_out; in tcp_clean_rtx_queue()
3066 u32 reord = tp->snd_nxt; /* lowest acked un-retx un-sacked seq */ in tcp_clean_rtx_queue()
3088 if (after(scb->end_seq, tp->snd_una)) { in tcp_clean_rtx_queue()
3090 !after(tp->snd_una, scb->seq)) in tcp_clean_rtx_queue()
3103 tp->retrans_out -= acked_pcount; in tcp_clean_rtx_queue()
3114 if (!after(scb->end_seq, tp->high_seq)) in tcp_clean_rtx_queue()
3119 tp->sacked_out -= acked_pcount; in tcp_clean_rtx_queue()
3120 } else if (tcp_is_sack(tp)) { in tcp_clean_rtx_queue()
3121 tp->delivered += acked_pcount; in tcp_clean_rtx_queue()
3122 if (!tcp_skb_spurious_retrans(tp, skb)) in tcp_clean_rtx_queue()
3123 tcp_rack_advance(tp, sacked, scb->end_seq, in tcp_clean_rtx_queue()
3127 tp->lost_out -= acked_pcount; in tcp_clean_rtx_queue()
3129 tp->packets_out -= acked_pcount; in tcp_clean_rtx_queue()
3144 tp->retrans_stamp = 0; in tcp_clean_rtx_queue()
3151 if (unlikely(skb == tp->retransmit_skb_hint)) in tcp_clean_rtx_queue()
3152 tp->retransmit_skb_hint = NULL; in tcp_clean_rtx_queue()
3153 if (unlikely(skb == tp->lost_skb_hint)) in tcp_clean_rtx_queue()
3154 tp->lost_skb_hint = NULL; in tcp_clean_rtx_queue()
3161 if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) in tcp_clean_rtx_queue()
3162 tp->snd_up = tp->snd_una; in tcp_clean_rtx_queue()
3168 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt); in tcp_clean_rtx_queue()
3169 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt); in tcp_clean_rtx_queue()
3171 if (pkts_acked == 1 && last_in_flight < tp->mss_cache && in tcp_clean_rtx_queue()
3173 sack->rate->prior_delivered + 1 == tp->delivered && in tcp_clean_rtx_queue()
3183 sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt); in tcp_clean_rtx_queue()
3184 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt); in tcp_clean_rtx_queue()
3192 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { in tcp_clean_rtx_queue()
3196 if (tcp_is_reno(tp)) { in tcp_clean_rtx_queue()
3214 delta = prior_sacked - tp->sacked_out; in tcp_clean_rtx_queue()
3215 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); in tcp_clean_rtx_queue()
3218 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) { in tcp_clean_rtx_queue()
3235 WARN_ON((int)tp->sacked_out < 0); in tcp_clean_rtx_queue()
3236 WARN_ON((int)tp->lost_out < 0); in tcp_clean_rtx_queue()
3237 WARN_ON((int)tp->retrans_out < 0); in tcp_clean_rtx_queue()
3238 if (!tp->packets_out && tcp_is_sack(tp)) { in tcp_clean_rtx_queue()
3240 if (tp->lost_out) { in tcp_clean_rtx_queue()
3242 tp->lost_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3243 tp->lost_out = 0; in tcp_clean_rtx_queue()
3245 if (tp->sacked_out) { in tcp_clean_rtx_queue()
3247 tp->sacked_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3248 tp->sacked_out = 0; in tcp_clean_rtx_queue()
3250 if (tp->retrans_out) { in tcp_clean_rtx_queue()
3252 tp->retrans_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3253 tp->retrans_out = 0; in tcp_clean_rtx_queue()
3264 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_probe() local
3269 if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) { in tcp_ack_probe()
3332 static inline bool tcp_may_update_window(const struct tcp_sock *tp, in tcp_may_update_window() argument
3336 return after(ack, tp->snd_una) || in tcp_may_update_window()
3337 after(ack_seq, tp->snd_wl1) || in tcp_may_update_window()
3338 (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); in tcp_may_update_window()
3342 static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) in tcp_snd_una_update() argument
3344 u32 delta = ack - tp->snd_una; in tcp_snd_una_update()
3346 sock_owned_by_me((struct sock *)tp); in tcp_snd_una_update()
3347 tp->bytes_acked += delta; in tcp_snd_una_update()
3348 tp->snd_una = ack; in tcp_snd_una_update()
3352 static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) in tcp_rcv_nxt_update() argument
3354 u32 delta = seq - tp->rcv_nxt; in tcp_rcv_nxt_update()
3356 sock_owned_by_me((struct sock *)tp); in tcp_rcv_nxt_update()
3357 tp->bytes_received += delta; in tcp_rcv_nxt_update()
3358 tp->rcv_nxt = seq; in tcp_rcv_nxt_update()
3369 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_window() local
3374 nwin <<= tp->rx_opt.snd_wscale; in tcp_ack_update_window()
3376 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { in tcp_ack_update_window()
3378 tcp_update_wl(tp, ack_seq); in tcp_ack_update_window()
3380 if (tp->snd_wnd != nwin) { in tcp_ack_update_window()
3381 tp->snd_wnd = nwin; in tcp_ack_update_window()
3386 tp->pred_flags = 0; in tcp_ack_update_window()
3392 if (nwin > tp->max_window) { in tcp_ack_update_window()
3393 tp->max_window = nwin; in tcp_ack_update_window()
3399 tcp_snd_una_update(tp, ack); in tcp_ack_update_window()
3445 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_challenge_ack() local
3452 &tp->last_oow_ack_time)) in tcp_send_challenge_ack()
3472 static void tcp_store_ts_recent(struct tcp_sock *tp) in tcp_store_ts_recent() argument
3474 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; in tcp_store_ts_recent()
3475 tp->rx_opt.ts_recent_stamp = ktime_get_seconds(); in tcp_store_ts_recent()
3478 static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) in tcp_replace_ts_recent() argument
3480 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { in tcp_replace_ts_recent()
3488 if (tcp_paws_check(&tp->rx_opt, 0)) in tcp_replace_ts_recent()
3489 tcp_store_ts_recent(tp); in tcp_replace_ts_recent()
3500 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_tlp_ack() local
3502 if (before(ack, tp->tlp_high_seq)) in tcp_process_tlp_ack()
3507 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3508 } else if (after(ack, tp->tlp_high_seq)) { in tcp_process_tlp_ack()
3521 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3539 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_recovery() local
3547 if (after(tp->snd_nxt, tp->high_seq)) in tcp_xmit_recovery()
3549 tp->frto = 0; in tcp_xmit_recovery()
3558 struct tcp_sock *tp = tcp_sk(sk); in tcp_newly_delivered() local
3561 delivered = tp->delivered - prior_delivered; in tcp_newly_delivered()
3564 tp->delivered_ce += delivered; in tcp_newly_delivered()
3574 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack() local
3577 u32 prior_snd_una = tp->snd_una; in tcp_ack()
3578 bool is_sack_reneg = tp->is_sack_reneg; in tcp_ack()
3582 int prior_packets = tp->packets_out; in tcp_ack()
3583 u32 delivered = tp->delivered; in tcp_ack()
3584 u32 lost = tp->lost; in tcp_ack()
3599 if (before(ack, prior_snd_una - tp->max_window)) { in tcp_ack()
3610 if (after(ack, tp->snd_nxt)) in tcp_ack()
3624 prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una; in tcp_ack()
3625 rs.prior_in_flight = tcp_packets_in_flight(tp); in tcp_ack()
3631 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); in tcp_ack()
3638 tcp_update_wl(tp, ack_seq); in tcp_ack()
3639 tcp_snd_una_update(tp, ack); in tcp_ack()
3659 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { in tcp_ack()
3675 tp->rcv_tstamp = tcp_jiffies32; in tcp_ack()
3684 if (tp->tlp_high_seq) in tcp_ack()
3700 lost = tp->lost - lost; /* freshly marked lost */ in tcp_ack()
3720 if (tp->tlp_high_seq) in tcp_ack()
3725 SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); in tcp_ack()
3741 SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); in tcp_ack()
3897 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) in tcp_parse_aligned_timestamp() argument
3903 tp->rx_opt.saw_tstamp = 1; in tcp_parse_aligned_timestamp()
3905 tp->rx_opt.rcv_tsval = ntohl(*ptr); in tcp_parse_aligned_timestamp()
3908 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; in tcp_parse_aligned_timestamp()
3910 tp->rx_opt.rcv_tsecr = 0; in tcp_parse_aligned_timestamp()
3921 const struct tcphdr *th, struct tcp_sock *tp) in tcp_fast_parse_options() argument
3927 tp->rx_opt.saw_tstamp = 0; in tcp_fast_parse_options()
3929 } else if (tp->rx_opt.tstamp_ok && in tcp_fast_parse_options()
3931 if (tcp_parse_aligned_timestamp(tp, th)) in tcp_fast_parse_options()
3935 tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL); in tcp_fast_parse_options()
3936 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) in tcp_fast_parse_options()
3937 tp->rx_opt.rcv_tsecr -= tp->tsoffset; in tcp_fast_parse_options()
4002 const struct tcp_sock *tp = tcp_sk(sk); in tcp_disordered_ack() local
4008 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && in tcp_disordered_ack()
4011 ack == tp->snd_una && in tcp_disordered_ack()
4014 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && in tcp_disordered_ack()
4017 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); in tcp_disordered_ack()
4023 const struct tcp_sock *tp = tcp_sk(sk); in tcp_paws_discard() local
4025 return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && in tcp_paws_discard()
4042 static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) in tcp_sequence() argument
4044 return !before(end_seq, tp->rcv_wup) && in tcp_sequence()
4045 !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); in tcp_sequence()
4092 struct tcp_sock *tp = tcp_sk(sk); in tcp_fin() local
4142 skb_rbtree_purge(&tp->out_of_order_queue); in tcp_fin()
4143 if (tcp_is_sack(tp)) in tcp_fin()
4144 tcp_sack_reset(&tp->rx_opt); in tcp_fin()
4174 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_set() local
4176 if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) { in tcp_dsack_set()
4179 if (before(seq, tp->rcv_nxt)) in tcp_dsack_set()
4186 tp->rx_opt.dsack = 1; in tcp_dsack_set()
4187 tp->duplicate_sack[0].start_seq = seq; in tcp_dsack_set()
4188 tp->duplicate_sack[0].end_seq = end_seq; in tcp_dsack_set()
4194 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_extend() local
4196 if (!tp->rx_opt.dsack) in tcp_dsack_extend()
4199 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); in tcp_dsack_extend()
4204 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_dupack() local
4207 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_send_dupack()
4211 if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) { in tcp_send_dupack()
4214 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) in tcp_send_dupack()
4215 end_seq = tp->rcv_nxt; in tcp_send_dupack()
4226 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) in tcp_sack_maybe_coalesce() argument
4229 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_maybe_coalesce()
4235 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { in tcp_sack_maybe_coalesce()
4242 tp->rx_opt.num_sacks--; in tcp_sack_maybe_coalesce()
4243 for (i = this_sack; i < tp->rx_opt.num_sacks; i++) in tcp_sack_maybe_coalesce()
4253 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_new_ofo_skb() local
4254 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_new_ofo_skb()
4255 int cur_sacks = tp->rx_opt.num_sacks; in tcp_sack_new_ofo_skb()
4267 tcp_sack_maybe_coalesce(tp); in tcp_sack_new_ofo_skb()
4279 if (tp->compressed_ack) in tcp_sack_new_ofo_skb()
4282 tp->rx_opt.num_sacks--; in tcp_sack_new_ofo_skb()
4292 tp->rx_opt.num_sacks++; in tcp_sack_new_ofo_skb()
4297 static void tcp_sack_remove(struct tcp_sock *tp) in tcp_sack_remove() argument
4299 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_remove()
4300 int num_sacks = tp->rx_opt.num_sacks; in tcp_sack_remove()
4304 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in tcp_sack_remove()
4305 tp->rx_opt.num_sacks = 0; in tcp_sack_remove()
4311 if (!before(tp->rcv_nxt, sp->start_seq)) { in tcp_sack_remove()
4315 WARN_ON(before(tp->rcv_nxt, sp->end_seq)); in tcp_sack_remove()
4319 tp->selective_acks[i-1] = tp->selective_acks[i]; in tcp_sack_remove()
4326 tp->rx_opt.num_sacks = num_sacks; in tcp_sack_remove()
4407 struct tcp_sock *tp = tcp_sk(sk); in tcp_ofo_queue() local
4408 __u32 dsack_high = tp->rcv_nxt; in tcp_ofo_queue()
4413 p = rb_first(&tp->out_of_order_queue); in tcp_ofo_queue()
4416 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) in tcp_ofo_queue()
4426 rb_erase(&skb->rbnode, &tp->out_of_order_queue); in tcp_ofo_queue()
4428 if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) { in tcp_ofo_queue()
4434 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, in tcp_ofo_queue()
4439 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); in tcp_ofo_queue()
4478 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue_ofo() local
4493 tp->pred_flags = 0; in tcp_data_queue_ofo()
4500 tp->rcv_nxt, seq, end_seq); in tcp_data_queue_ofo()
4502 p = &tp->out_of_order_queue.rb_node; in tcp_data_queue_ofo()
4503 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in tcp_data_queue_ofo()
4505 if (tcp_is_sack(tp)) { in tcp_data_queue_ofo()
4506 tp->rx_opt.num_sacks = 1; in tcp_data_queue_ofo()
4507 tp->selective_acks[0].start_seq = seq; in tcp_data_queue_ofo()
4508 tp->selective_acks[0].end_seq = end_seq; in tcp_data_queue_ofo()
4511 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
4512 tp->ooo_last_skb = skb; in tcp_data_queue_ofo()
4519 if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, in tcp_data_queue_ofo()
4528 if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) { in tcp_data_queue_ofo()
4529 parent = &tp->ooo_last_skb->rbnode; in tcp_data_queue_ofo()
4561 &tp->out_of_order_queue); in tcp_data_queue_ofo()
4579 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
4591 rb_erase(&skb1->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
4599 tp->ooo_last_skb = skb; in tcp_data_queue_ofo()
4602 if (tcp_is_sack(tp)) in tcp_data_queue_ofo()
4684 const struct tcp_sock *tp = tcp_sk(sk); in tcp_data_ready() local
4685 int avail = tp->rcv_nxt - tp->copied_seq; in tcp_data_ready()
4695 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue() local
4708 tp->rx_opt.dsack = 0; in tcp_data_queue()
4714 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { in tcp_data_queue()
4715 if (tcp_receive_window(tp) == 0) { in tcp_data_queue()
4735 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in tcp_data_queue()
4741 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) in tcp_data_queue()
4745 if (tp->rx_opt.num_sacks) in tcp_data_queue()
4746 tcp_sack_remove(tp); in tcp_data_queue()
4757 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { in tcp_data_queue()
4771 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) in tcp_data_queue()
4774 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_data_queue()
4777 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, in tcp_data_queue()
4780 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); in tcp_data_queue()
4785 if (!tcp_receive_window(tp)) { in tcp_data_queue()
4953 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_ofo_queue() local
4958 skb = skb_rb_first(&tp->out_of_order_queue); in tcp_collapse_ofo_queue()
4961 tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue); in tcp_collapse_ofo_queue()
4980 tcp_collapse(sk, NULL, &tp->out_of_order_queue, in tcp_collapse_ofo_queue()
5011 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_ofo_queue() local
5015 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) in tcp_prune_ofo_queue()
5020 node = &tp->ooo_last_skb->rbnode; in tcp_prune_ofo_queue()
5023 rb_erase(node, &tp->out_of_order_queue); in tcp_prune_ofo_queue()
5035 tp->ooo_last_skb = rb_to_skb(prev); in tcp_prune_ofo_queue()
5042 if (tp->rx_opt.sack_ok) in tcp_prune_ofo_queue()
5043 tcp_sack_reset(&tp->rx_opt); in tcp_prune_ofo_queue()
5056 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_queue() local
5058 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); in tcp_prune_queue()
5065 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); in tcp_prune_queue()
5075 tp->copied_seq, tp->rcv_nxt); in tcp_prune_queue()
5096 tp->pred_flags = 0; in tcp_prune_queue()
5102 const struct tcp_sock *tp = tcp_sk(sk); in tcp_should_expand_sndbuf() local
5119 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) in tcp_should_expand_sndbuf()
5133 struct tcp_sock *tp = tcp_sk(sk); in tcp_new_space() local
5137 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_new_space()
5169 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ack_snd_check() local
5173 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && in __tcp_ack_snd_check()
5179 (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat || in __tcp_ack_snd_check()
5180 __tcp_select_window(sk) >= tp->rcv_wnd)) || in __tcp_ack_snd_check()
5190 if (!ofo_possible || RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in __tcp_ack_snd_check()
5195 if (!tcp_is_sack(tp) || in __tcp_ack_snd_check()
5196 tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr) in __tcp_ack_snd_check()
5198 tp->compressed_ack++; in __tcp_ack_snd_check()
5200 if (hrtimer_is_queued(&tp->compressed_ack_timer)) in __tcp_ack_snd_check()
5205 rtt = tp->rcv_rtt_est.rtt_us; in __tcp_ack_snd_check()
5206 if (tp->srtt_us && tp->srtt_us < rtt) in __tcp_ack_snd_check()
5207 rtt = tp->srtt_us; in __tcp_ack_snd_check()
5212 hrtimer_start(&tp->compressed_ack_timer, ns_to_ktime(delay), in __tcp_ack_snd_check()
5237 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_urg() local
5245 if (after(tp->copied_seq, ptr)) in tcp_check_urg()
5258 if (before(ptr, tp->rcv_nxt)) in tcp_check_urg()
5262 if (tp->urg_data && !after(ptr, tp->urg_seq)) in tcp_check_urg()
5283 if (tp->urg_seq == tp->copied_seq && tp->urg_data && in tcp_check_urg()
5284 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { in tcp_check_urg()
5286 tp->copied_seq++; in tcp_check_urg()
5287 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { in tcp_check_urg()
5293 tp->urg_data = TCP_URG_NOTYET; in tcp_check_urg()
5294 tp->urg_seq = ptr; in tcp_check_urg()
5297 tp->pred_flags = 0; in tcp_check_urg()
5303 struct tcp_sock *tp = tcp_sk(sk); in tcp_urg() local
5310 if (tp->urg_data == TCP_URG_NOTYET) { in tcp_urg()
5311 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - in tcp_urg()
5319 tp->urg_data = TCP_URG_VALID | tmp; in tcp_urg()
5336 struct tcp_sock *tp = tcp_sk(sk); in tcp_reset_check() local
5338 return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) && in tcp_reset_check()
5349 struct tcp_sock *tp = tcp_sk(sk); in tcp_validate_incoming() local
5353 if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) && in tcp_validate_incoming()
5354 tp->rx_opt.saw_tstamp && in tcp_validate_incoming()
5360 &tp->last_oow_ack_time)) in tcp_validate_incoming()
5368 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { in tcp_validate_incoming()
5380 &tp->last_oow_ack_time)) in tcp_validate_incoming()
5399 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt || in tcp_validate_incoming()
5402 } else if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) { in tcp_validate_incoming()
5403 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_validate_incoming()
5407 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; in tcp_validate_incoming()
5425 if (tp->syn_fastopen && !tp->data_segs_in && in tcp_validate_incoming()
5480 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_established() local
5486 tcp_mstamp_refresh(tp); in tcp_rcv_established()
5504 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_established()
5515 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && in tcp_rcv_established()
5516 TCP_SKB_CB(skb)->seq == tp->rcv_nxt && in tcp_rcv_established()
5517 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { in tcp_rcv_established()
5518 int tcp_header_len = tp->tcp_header_len; in tcp_rcv_established()
5528 if (!tcp_parse_aligned_timestamp(tp, th)) in tcp_rcv_established()
5532 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) in tcp_rcv_established()
5551 tp->rcv_nxt == tp->rcv_wup) in tcp_rcv_established()
5552 tcp_store_ts_recent(tp); in tcp_rcv_established()
5564 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; in tcp_rcv_established()
5586 tp->rcv_nxt == tp->rcv_wup) in tcp_rcv_established()
5587 tcp_store_ts_recent(tp); in tcp_rcv_established()
5599 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { in tcp_rcv_established()
5657 struct tcp_sock *tp = tcp_sk(sk); in tcp_finish_connect() local
5674 tp->lsndtime = tcp_jiffies32; in tcp_finish_connect()
5677 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); in tcp_finish_connect()
5679 if (!tp->rx_opt.snd_wscale) in tcp_finish_connect()
5680 __tcp_fast_path_on(tp, tp->snd_wnd); in tcp_finish_connect()
5682 tp->pred_flags = 0; in tcp_finish_connect()
5688 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_fastopen_synack() local
5689 struct sk_buff *data = tp->syn_data ? tcp_rtx_queue_head(sk) : NULL; in tcp_rcv_fastopen_synack()
5690 u16 mss = tp->rx_opt.mss_clamp, try_exp = 0; in tcp_rcv_fastopen_synack()
5693 if (mss == tp->rx_opt.user_mss) { in tcp_rcv_fastopen_synack()
5703 if (!tp->syn_fastopen) { in tcp_rcv_fastopen_synack()
5706 } else if (tp->total_retrans) { in tcp_rcv_fastopen_synack()
5713 } else if (cookie->len < 0 && !tp->syn_data) { in tcp_rcv_fastopen_synack()
5718 try_exp = tp->syn_fastopen_exp ? 2 : 1; in tcp_rcv_fastopen_synack()
5733 tp->syn_data_acked = tp->syn_data; in tcp_rcv_fastopen_synack()
5734 if (tp->syn_data_acked) { in tcp_rcv_fastopen_synack()
5737 if (tp->delivered > 1) in tcp_rcv_fastopen_synack()
5738 --tp->delivered; in tcp_rcv_fastopen_synack()
5746 static void smc_check_reset_syn(struct tcp_sock *tp) in smc_check_reset_syn() argument
5750 if (tp->syn_smc && !tp->rx_opt.smc_ok) in smc_check_reset_syn()
5751 tp->syn_smc = 0; in smc_check_reset_syn()
5760 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_synsent_state_process() local
5762 int saved_clamp = tp->rx_opt.mss_clamp; in tcp_rcv_synsent_state_process()
5765 tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc); in tcp_rcv_synsent_state_process()
5766 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) in tcp_rcv_synsent_state_process()
5767 tp->rx_opt.rcv_tsecr -= tp->tsoffset; in tcp_rcv_synsent_state_process()
5778 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || in tcp_rcv_synsent_state_process()
5779 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) in tcp_rcv_synsent_state_process()
5782 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_rcv_synsent_state_process()
5783 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, in tcp_rcv_synsent_state_process()
5784 tcp_time_stamp(tp))) { in tcp_rcv_synsent_state_process()
5820 tcp_ecn_rcv_synack(tp, th); in tcp_rcv_synsent_state_process()
5822 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_synsent_state_process()
5828 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5829 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5834 tp->snd_wnd = ntohs(th->window); in tcp_rcv_synsent_state_process()
5836 if (!tp->rx_opt.wscale_ok) { in tcp_rcv_synsent_state_process()
5837 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; in tcp_rcv_synsent_state_process()
5838 tp->window_clamp = min(tp->window_clamp, 65535U); in tcp_rcv_synsent_state_process()
5841 if (tp->rx_opt.saw_tstamp) { in tcp_rcv_synsent_state_process()
5842 tp->rx_opt.tstamp_ok = 1; in tcp_rcv_synsent_state_process()
5843 tp->tcp_header_len = in tcp_rcv_synsent_state_process()
5845 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; in tcp_rcv_synsent_state_process()
5846 tcp_store_ts_recent(tp); in tcp_rcv_synsent_state_process()
5848 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_rcv_synsent_state_process()
5857 tp->copied_seq = tp->rcv_nxt; in tcp_rcv_synsent_state_process()
5859 smc_check_reset_syn(tp); in tcp_rcv_synsent_state_process()
5865 fastopen_fail = (tp->syn_fastopen || tp->syn_data) && in tcp_rcv_synsent_state_process()
5911 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && in tcp_rcv_synsent_state_process()
5912 tcp_paws_reject(&tp->rx_opt, 0)) in tcp_rcv_synsent_state_process()
5922 if (tp->rx_opt.saw_tstamp) { in tcp_rcv_synsent_state_process()
5923 tp->rx_opt.tstamp_ok = 1; in tcp_rcv_synsent_state_process()
5924 tcp_store_ts_recent(tp); in tcp_rcv_synsent_state_process()
5925 tp->tcp_header_len = in tcp_rcv_synsent_state_process()
5928 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_rcv_synsent_state_process()
5931 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5932 tp->copied_seq = tp->rcv_nxt; in tcp_rcv_synsent_state_process()
5933 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
5938 tp->snd_wnd = ntohs(th->window); in tcp_rcv_synsent_state_process()
5939 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; in tcp_rcv_synsent_state_process()
5940 tp->max_window = tp->snd_wnd; in tcp_rcv_synsent_state_process()
5942 tcp_ecn_rcv_syn(tp, th); in tcp_rcv_synsent_state_process()
5971 tcp_clear_options(&tp->rx_opt); in tcp_rcv_synsent_state_process()
5972 tp->rx_opt.mss_clamp = saved_clamp; in tcp_rcv_synsent_state_process()
5976 tcp_clear_options(&tp->rx_opt); in tcp_rcv_synsent_state_process()
5977 tp->rx_opt.mss_clamp = saved_clamp; in tcp_rcv_synsent_state_process()
5990 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_state_process() local
6028 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_state_process()
6029 tcp_mstamp_refresh(tp); in tcp_rcv_state_process()
6041 tcp_mstamp_refresh(tp); in tcp_rcv_state_process()
6042 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_state_process()
6043 req = tp->fastopen_rsk; in tcp_rcv_state_process()
6073 tp->delivered++; /* SYN-ACK delivery isn't tracked in tcp_ack */ in tcp_rcv_state_process()
6074 if (!tp->srtt_us) in tcp_rcv_state_process()
6094 tp->copied_seq = tp->rcv_nxt; in tcp_rcv_state_process()
6107 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; in tcp_rcv_state_process()
6108 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; in tcp_rcv_state_process()
6109 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_state_process()
6111 if (tp->rx_opt.tstamp_ok) in tcp_rcv_state_process()
6112 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; in tcp_rcv_state_process()
6118 tp->lsndtime = tcp_jiffies32; in tcp_rcv_state_process()
6121 tcp_fast_path_on(tp); in tcp_rcv_state_process()
6137 if (tp->snd_una != tp->write_seq) in tcp_rcv_state_process()
6151 if (tp->linger2 < 0) { in tcp_rcv_state_process()
6157 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { in tcp_rcv_state_process()
6159 if (tp->syn_fastopen && th->fin) in tcp_rcv_state_process()
6185 if (tp->snd_una == tp->write_seq) { in tcp_rcv_state_process()
6192 if (tp->snd_una == tp->write_seq) { in tcp_rcv_state_process()
6208 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) in tcp_rcv_state_process()
6219 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { in tcp_rcv_state_process()
6402 struct tcp_sock *tp = tcp_sk(sk); in tcp_conn_request() local
6435 tmp_opt.user_mss = tp->rx_opt.user_mss; in tcp_conn_request()