Lines Matching refs:tp
312 static void tcp_ecn_queue_cwr(struct tcp_sock *tp) in tcp_ecn_queue_cwr() argument
314 if (tp->ecn_flags & TCP_ECN_OK) in tcp_ecn_queue_cwr()
315 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; in tcp_ecn_queue_cwr()
332 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) in tcp_ecn_withdraw_cwr() argument
334 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; in tcp_ecn_withdraw_cwr()
339 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ecn_check_ce() local
347 if (tp->ecn_flags & TCP_ECN_SEEN) in __tcp_ecn_check_ce()
354 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { in __tcp_ecn_check_ce()
357 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; in __tcp_ecn_check_ce()
359 tp->ecn_flags |= TCP_ECN_SEEN; in __tcp_ecn_check_ce()
364 tp->ecn_flags |= TCP_ECN_SEEN; in __tcp_ecn_check_ce()
375 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_synack() argument
377 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) in tcp_ecn_rcv_synack()
378 tp->ecn_flags &= ~TCP_ECN_OK; in tcp_ecn_rcv_synack()
381 static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_syn() argument
383 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) in tcp_ecn_rcv_syn()
384 tp->ecn_flags &= ~TCP_ECN_OK; in tcp_ecn_rcv_syn()
387 static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_ecn_echo() argument
389 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) in tcp_ecn_rcv_ecn_echo()
401 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sndbuf_expand() local
409 per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + in tcp_sndbuf_expand()
416 nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd); in tcp_sndbuf_expand()
417 nr_segs = max_t(u32, nr_segs, tp->reordering + 1); in tcp_sndbuf_expand()
459 struct tcp_sock *tp = tcp_sk(sk); in __tcp_grow_window() local
464 while (tp->rcv_ssthresh <= window) { in __tcp_grow_window()
476 struct tcp_sock *tp = tcp_sk(sk); in tcp_grow_window() local
479 room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh; in tcp_grow_window()
489 incr = 2 * tp->advmss; in tcp_grow_window()
495 tp->rcv_ssthresh += min(room, incr); in tcp_grow_window()
507 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_buffer_space() local
513 tcp_mstamp_refresh(tp); in tcp_init_buffer_space()
514 tp->rcvq_space.time = tp->tcp_mstamp; in tcp_init_buffer_space()
515 tp->rcvq_space.seq = tp->copied_seq; in tcp_init_buffer_space()
519 if (tp->window_clamp >= maxwin) { in tcp_init_buffer_space()
520 tp->window_clamp = maxwin; in tcp_init_buffer_space()
522 if (tcp_app_win && maxwin > 4 * tp->advmss) in tcp_init_buffer_space()
523 tp->window_clamp = max(maxwin - in tcp_init_buffer_space()
525 4 * tp->advmss); in tcp_init_buffer_space()
530 tp->window_clamp > 2 * tp->advmss && in tcp_init_buffer_space()
531 tp->window_clamp + tp->advmss > maxwin) in tcp_init_buffer_space()
532 tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); in tcp_init_buffer_space()
534 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); in tcp_init_buffer_space()
535 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_init_buffer_space()
536 tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd, in tcp_init_buffer_space()
537 (u32)TCP_INIT_CWND * tp->advmss); in tcp_init_buffer_space()
543 struct tcp_sock *tp = tcp_sk(sk); in tcp_clamp_window() local
558 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); in tcp_clamp_window()
570 const struct tcp_sock *tp = tcp_sk(sk); in tcp_initialize_rcv_mss() local
571 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); in tcp_initialize_rcv_mss()
573 hint = min(hint, tp->rcv_wnd / 2); in tcp_initialize_rcv_mss()
592 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) in tcp_rcv_rtt_update() argument
594 u32 new_sample = tp->rcv_rtt_est.rtt_us; in tcp_rcv_rtt_update()
621 tp->rcv_rtt_est.rtt_us = new_sample; in tcp_rcv_rtt_update()
624 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) in tcp_rcv_rtt_measure() argument
628 if (tp->rcv_rtt_est.time == 0) in tcp_rcv_rtt_measure()
630 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) in tcp_rcv_rtt_measure()
632 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); in tcp_rcv_rtt_measure()
635 tcp_rcv_rtt_update(tp, delta_us, 1); in tcp_rcv_rtt_measure()
638 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; in tcp_rcv_rtt_measure()
639 tp->rcv_rtt_est.time = tp->tcp_mstamp; in tcp_rcv_rtt_measure()
645 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_rtt_measure_ts() local
647 if (tp->rx_opt.rcv_tsecr == tp->rcv_rtt_last_tsecr) in tcp_rcv_rtt_measure_ts()
649 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; in tcp_rcv_rtt_measure_ts()
653 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; in tcp_rcv_rtt_measure_ts()
660 tcp_rcv_rtt_update(tp, delta_us, 0); in tcp_rcv_rtt_measure_ts()
671 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_space_adjust() local
677 tcp_mstamp_refresh(tp); in tcp_rcv_space_adjust()
678 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); in tcp_rcv_space_adjust()
679 if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) in tcp_rcv_space_adjust()
683 copied = tp->copied_seq - tp->rcvq_space.seq; in tcp_rcv_space_adjust()
684 if (copied <= tp->rcvq_space.space) in tcp_rcv_space_adjust()
704 rcvwin = ((u64)copied << 1) + 16 * tp->advmss; in tcp_rcv_space_adjust()
707 grow = rcvwin * (copied - tp->rcvq_space.space); in tcp_rcv_space_adjust()
708 do_div(grow, tp->rcvq_space.space); in tcp_rcv_space_adjust()
711 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER); in tcp_rcv_space_adjust()
712 while (tcp_win_from_space(sk, rcvmem) < tp->advmss) in tcp_rcv_space_adjust()
715 do_div(rcvwin, tp->advmss); in tcp_rcv_space_adjust()
722 tp->window_clamp = tcp_win_from_space(sk, rcvbuf); in tcp_rcv_space_adjust()
725 tp->rcvq_space.space = copied; in tcp_rcv_space_adjust()
728 tp->rcvq_space.seq = tp->copied_seq; in tcp_rcv_space_adjust()
729 tp->rcvq_space.time = tp->tcp_mstamp; in tcp_rcv_space_adjust()
744 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_data_recv() local
752 tcp_rcv_rtt_measure(tp); in tcp_event_data_recv()
799 struct tcp_sock *tp = tcp_sk(sk); in tcp_rtt_estimator() local
801 u32 srtt = tp->srtt_us; in tcp_rtt_estimator()
824 m -= (tp->mdev_us >> 2); /* similar update on mdev */ in tcp_rtt_estimator()
836 m -= (tp->mdev_us >> 2); /* similar update on mdev */ in tcp_rtt_estimator()
838 tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ in tcp_rtt_estimator()
839 if (tp->mdev_us > tp->mdev_max_us) { in tcp_rtt_estimator()
840 tp->mdev_max_us = tp->mdev_us; in tcp_rtt_estimator()
841 if (tp->mdev_max_us > tp->rttvar_us) in tcp_rtt_estimator()
842 tp->rttvar_us = tp->mdev_max_us; in tcp_rtt_estimator()
844 if (after(tp->snd_una, tp->rtt_seq)) { in tcp_rtt_estimator()
845 if (tp->mdev_max_us < tp->rttvar_us) in tcp_rtt_estimator()
846 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2; in tcp_rtt_estimator()
847 tp->rtt_seq = tp->snd_nxt; in tcp_rtt_estimator()
848 tp->mdev_max_us = tcp_rto_min_us(sk); in tcp_rtt_estimator()
855 tp->mdev_us = m << 1; /* make sure rto = 3*rtt */ in tcp_rtt_estimator()
856 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); in tcp_rtt_estimator()
857 tp->mdev_max_us = tp->rttvar_us; in tcp_rtt_estimator()
858 tp->rtt_seq = tp->snd_nxt; in tcp_rtt_estimator()
862 tp->srtt_us = max(1U, srtt); in tcp_rtt_estimator()
867 const struct tcp_sock *tp = tcp_sk(sk); in tcp_update_pacing_rate() local
871 rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3); in tcp_update_pacing_rate()
881 if (tp->snd_cwnd < tp->snd_ssthresh / 2) in tcp_update_pacing_rate()
886 rate *= max(tp->snd_cwnd, tp->packets_out); in tcp_update_pacing_rate()
888 if (likely(tp->srtt_us)) in tcp_update_pacing_rate()
889 do_div(rate, tp->srtt_us); in tcp_update_pacing_rate()
904 const struct tcp_sock *tp = tcp_sk(sk); in tcp_set_rto() local
915 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); in tcp_set_rto()
929 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) in tcp_init_cwnd() argument
935 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); in tcp_init_cwnd()
958 static u32 tcp_dsack_seen(struct tcp_sock *tp, u32 start_seq, in tcp_dsack_seen() argument
968 if (seq_len > tp->max_window) in tcp_dsack_seen()
970 if (seq_len > tp->mss_cache) in tcp_dsack_seen()
971 dup_segs = DIV_ROUND_UP(seq_len, tp->mss_cache); in tcp_dsack_seen()
973 tp->dsack_dups += dup_segs; in tcp_dsack_seen()
975 if (tp->dsack_dups > tp->total_retrans) in tcp_dsack_seen()
978 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; in tcp_dsack_seen()
979 tp->rack.dsack_seen = 1; in tcp_dsack_seen()
995 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reordering() local
996 const u32 mss = tp->mss_cache; in tcp_check_sack_reordering()
999 fack = tcp_highest_sack_seq(tp); in tcp_check_sack_reordering()
1004 if ((metric > tp->reordering * mss) && mss) { in tcp_check_sack_reordering()
1007 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, in tcp_check_sack_reordering()
1008 tp->reordering, in tcp_check_sack_reordering()
1010 tp->sacked_out, in tcp_check_sack_reordering()
1011 tp->undo_marker ? tp->undo_retrans : 0); in tcp_check_sack_reordering()
1013 tp->reordering = min_t(u32, (metric + mss - 1) / mss, in tcp_check_sack_reordering()
1018 tp->reord_seen++; in tcp_check_sack_reordering()
1028 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) in tcp_verify_retransmit_hint() argument
1030 if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) || in tcp_verify_retransmit_hint()
1031 (tp->retransmit_skb_hint && in tcp_verify_retransmit_hint()
1033 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))) in tcp_verify_retransmit_hint()
1034 tp->retransmit_skb_hint = skb; in tcp_verify_retransmit_hint()
1040 static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb) in tcp_notify_skb_loss_event() argument
1042 tp->lost += tcp_skb_pcount(skb); in tcp_notify_skb_loss_event()
1048 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_skb_lost() local
1053 tcp_verify_retransmit_hint(tp, skb); in tcp_mark_skb_lost()
1058 tp->retrans_out -= tcp_skb_pcount(skb); in tcp_mark_skb_lost()
1061 tcp_notify_skb_loss_event(tp, skb); in tcp_mark_skb_lost()
1064 tp->lost_out += tcp_skb_pcount(skb); in tcp_mark_skb_lost()
1066 tcp_notify_skb_loss_event(tp, skb); in tcp_mark_skb_lost()
1071 static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, in tcp_count_delivered() argument
1074 tp->delivered += delivered; in tcp_count_delivered()
1076 tp->delivered_ce += delivered; in tcp_count_delivered()
1172 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, in tcp_is_sackblock_valid() argument
1176 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) in tcp_is_sackblock_valid()
1180 if (!before(start_seq, tp->snd_nxt)) in tcp_is_sackblock_valid()
1186 if (after(start_seq, tp->snd_una)) in tcp_is_sackblock_valid()
1189 if (!is_dsack || !tp->undo_marker) in tcp_is_sackblock_valid()
1193 if (after(end_seq, tp->snd_una)) in tcp_is_sackblock_valid()
1196 if (!before(start_seq, tp->undo_marker)) in tcp_is_sackblock_valid()
1200 if (!after(end_seq, tp->undo_marker)) in tcp_is_sackblock_valid()
1206 return !before(start_seq, end_seq - tp->max_window); in tcp_is_sackblock_valid()
1213 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_dsack() local
1231 dup_segs = tcp_dsack_seen(tp, start_seq_0, end_seq_0, state); in tcp_check_dsack()
1240 if (tp->undo_marker && tp->undo_retrans > 0 && in tcp_check_dsack()
1242 after(end_seq_0, tp->undo_marker)) in tcp_check_dsack()
1243 tp->undo_retrans = max_t(int, 0, tp->undo_retrans - dup_segs); in tcp_check_dsack()
1311 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_one() local
1315 if (tp->undo_marker && tp->undo_retrans > 0 && in tcp_sacktag_one()
1316 after(end_seq, tp->undo_marker)) in tcp_sacktag_one()
1317 tp->undo_retrans--; in tcp_sacktag_one()
1324 if (!after(end_seq, tp->snd_una)) in tcp_sacktag_one()
1328 tcp_rack_advance(tp, sacked, end_seq, xmit_time); in tcp_sacktag_one()
1337 tp->lost_out -= pcount; in tcp_sacktag_one()
1338 tp->retrans_out -= pcount; in tcp_sacktag_one()
1346 tcp_highest_sack_seq(tp)) && in tcp_sacktag_one()
1350 if (!after(end_seq, tp->high_seq)) in tcp_sacktag_one()
1359 tp->lost_out -= pcount; in tcp_sacktag_one()
1365 tp->sacked_out += pcount; in tcp_sacktag_one()
1370 if (tp->lost_skb_hint && in tcp_sacktag_one()
1371 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) in tcp_sacktag_one()
1372 tp->lost_cnt_hint += pcount; in tcp_sacktag_one()
1381 tp->retrans_out -= pcount; in tcp_sacktag_one()
1396 struct tcp_sock *tp = tcp_sk(sk); in tcp_shifted_skb() local
1413 if (skb == tp->lost_skb_hint) in tcp_shifted_skb()
1414 tp->lost_cnt_hint += pcount; in tcp_shifted_skb()
1446 if (skb == tp->retransmit_skb_hint) in tcp_shifted_skb()
1447 tp->retransmit_skb_hint = prev; in tcp_shifted_skb()
1448 if (skb == tp->lost_skb_hint) { in tcp_shifted_skb()
1449 tp->lost_skb_hint = prev; in tcp_shifted_skb()
1450 tp->lost_cnt_hint -= tcp_skb_pcount(prev); in tcp_shifted_skb()
1509 struct tcp_sock *tp = tcp_sk(sk); in tcp_shift_skb_data() local
1523 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) in tcp_shift_skb_data()
1603 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) in tcp_shift_skb_data()
1646 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_walk() local
1705 tcp_highest_sack_seq(tp))) in tcp_sacktag_walk()
1761 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) in tcp_sack_cache_ok() argument
1763 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); in tcp_sack_cache_ok()
1770 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_write_queue() local
1784 state->reord = tp->snd_nxt; in tcp_sacktag_write_queue()
1786 if (!tp->sacked_out) in tcp_sacktag_write_queue()
1796 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) in tcp_sacktag_write_queue()
1799 if (!tp->packets_out) in tcp_sacktag_write_queue()
1810 if (!tcp_is_sackblock_valid(tp, dup_sack, in tcp_sacktag_write_queue()
1816 if (!tp->undo_marker) in tcp_sacktag_write_queue()
1822 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && in tcp_sacktag_write_queue()
1823 !after(sp[used_sacks].end_seq, tp->snd_una)) in tcp_sacktag_write_queue()
1861 if (!tp->sacked_out) { in tcp_sacktag_write_queue()
1863 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); in tcp_sacktag_write_queue()
1865 cache = tp->recv_sack_cache; in tcp_sacktag_write_queue()
1867 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && in tcp_sacktag_write_queue()
1882 while (tcp_sack_cache_ok(tp, cache) && in tcp_sacktag_write_queue()
1887 if (tcp_sack_cache_ok(tp, cache) && !dup_sack && in tcp_sacktag_write_queue()
1909 if (tcp_highest_sack_seq(tp) == cache->end_seq) { in tcp_sacktag_write_queue()
1924 if (!before(start_seq, tcp_highest_sack_seq(tp))) { in tcp_sacktag_write_queue()
1940 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { in tcp_sacktag_write_queue()
1941 tp->recv_sack_cache[i].start_seq = 0; in tcp_sacktag_write_queue()
1942 tp->recv_sack_cache[i].end_seq = 0; in tcp_sacktag_write_queue()
1945 tp->recv_sack_cache[i++] = sp[j]; in tcp_sacktag_write_queue()
1947 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker) in tcp_sacktag_write_queue()
1950 tcp_verify_left_out(tp); in tcp_sacktag_write_queue()
1954 WARN_ON((int)tp->sacked_out < 0); in tcp_sacktag_write_queue()
1955 WARN_ON((int)tp->lost_out < 0); in tcp_sacktag_write_queue()
1956 WARN_ON((int)tp->retrans_out < 0); in tcp_sacktag_write_queue()
1957 WARN_ON((int)tcp_packets_in_flight(tp) < 0); in tcp_sacktag_write_queue()
1965 static bool tcp_limit_reno_sacked(struct tcp_sock *tp) in tcp_limit_reno_sacked() argument
1969 holes = max(tp->lost_out, 1U); in tcp_limit_reno_sacked()
1970 holes = min(holes, tp->packets_out); in tcp_limit_reno_sacked()
1972 if ((tp->sacked_out + holes) > tp->packets_out) { in tcp_limit_reno_sacked()
1973 tp->sacked_out = tp->packets_out - holes; in tcp_limit_reno_sacked()
1985 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_reno_reordering() local
1987 if (!tcp_limit_reno_sacked(tp)) in tcp_check_reno_reordering()
1990 tp->reordering = min_t(u32, tp->packets_out + addend, in tcp_check_reno_reordering()
1992 tp->reord_seen++; in tcp_check_reno_reordering()
2001 struct tcp_sock *tp = tcp_sk(sk); in tcp_add_reno_sack() local
2002 u32 prior_sacked = tp->sacked_out; in tcp_add_reno_sack()
2005 tp->sacked_out += num_dupack; in tcp_add_reno_sack()
2007 delivered = tp->sacked_out - prior_sacked; in tcp_add_reno_sack()
2009 tcp_count_delivered(tp, delivered, ece_ack); in tcp_add_reno_sack()
2010 tcp_verify_left_out(tp); in tcp_add_reno_sack()
2018 struct tcp_sock *tp = tcp_sk(sk); in tcp_remove_reno_sacks() local
2022 tcp_count_delivered(tp, max_t(int, acked - tp->sacked_out, 1), in tcp_remove_reno_sacks()
2024 if (acked - 1 >= tp->sacked_out) in tcp_remove_reno_sacks()
2025 tp->sacked_out = 0; in tcp_remove_reno_sacks()
2027 tp->sacked_out -= acked - 1; in tcp_remove_reno_sacks()
2030 tcp_verify_left_out(tp); in tcp_remove_reno_sacks()
2033 static inline void tcp_reset_reno_sack(struct tcp_sock *tp) in tcp_reset_reno_sack() argument
2035 tp->sacked_out = 0; in tcp_reset_reno_sack()
2038 void tcp_clear_retrans(struct tcp_sock *tp) in tcp_clear_retrans() argument
2040 tp->retrans_out = 0; in tcp_clear_retrans()
2041 tp->lost_out = 0; in tcp_clear_retrans()
2042 tp->undo_marker = 0; in tcp_clear_retrans()
2043 tp->undo_retrans = -1; in tcp_clear_retrans()
2044 tp->sacked_out = 0; in tcp_clear_retrans()
2047 static inline void tcp_init_undo(struct tcp_sock *tp) in tcp_init_undo() argument
2049 tp->undo_marker = tp->snd_una; in tcp_init_undo()
2051 tp->undo_retrans = tp->retrans_out ? : -1; in tcp_init_undo()
2065 struct tcp_sock *tp = tcp_sk(sk); in tcp_timeout_mark_lost() local
2073 tp->sacked_out = 0; in tcp_timeout_mark_lost()
2075 tp->is_sack_reneg = 1; in tcp_timeout_mark_lost()
2076 } else if (tcp_is_reno(tp)) { in tcp_timeout_mark_lost()
2077 tcp_reset_reno_sack(tp); in tcp_timeout_mark_lost()
2085 tcp_rack_skb_timeout(tp, skb, 0) > 0) in tcp_timeout_mark_lost()
2089 tcp_verify_left_out(tp); in tcp_timeout_mark_lost()
2090 tcp_clear_all_retrans_hints(tp); in tcp_timeout_mark_lost()
2097 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_loss() local
2105 !after(tp->high_seq, tp->snd_una) || in tcp_enter_loss()
2107 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_loss()
2108 tp->prior_cwnd = tp->snd_cwnd; in tcp_enter_loss()
2109 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); in tcp_enter_loss()
2111 tcp_init_undo(tp); in tcp_enter_loss()
2113 tp->snd_cwnd = tcp_packets_in_flight(tp) + 1; in tcp_enter_loss()
2114 tp->snd_cwnd_cnt = 0; in tcp_enter_loss()
2115 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_enter_loss()
2121 tp->sacked_out >= net->ipv4.sysctl_tcp_reordering) in tcp_enter_loss()
2122 tp->reordering = min_t(unsigned int, tp->reordering, in tcp_enter_loss()
2125 tp->high_seq = tp->snd_nxt; in tcp_enter_loss()
2126 tcp_ecn_queue_cwr(tp); in tcp_enter_loss()
2132 tp->frto = net->ipv4.sysctl_tcp_frto && in tcp_enter_loss()
2150 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reneging() local
2151 unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), in tcp_check_sack_reneging()
2172 static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) in tcp_dupack_heuristics() argument
2174 return tp->sacked_out + 1; in tcp_dupack_heuristics()
2276 struct tcp_sock *tp = tcp_sk(sk); in tcp_time_to_recover() local
2279 if (tp->lost_out) in tcp_time_to_recover()
2283 if (!tcp_is_rack(sk) && tcp_dupack_heuristics(tp) > tp->reordering) in tcp_time_to_recover()
2296 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_head_lost() local
2300 const u32 loss_high = tp->snd_nxt; in tcp_mark_head_lost()
2302 WARN_ON(packets > tp->packets_out); in tcp_mark_head_lost()
2303 skb = tp->lost_skb_hint; in tcp_mark_head_lost()
2306 if (mark_head && after(TCP_SKB_CB(skb)->seq, tp->snd_una)) in tcp_mark_head_lost()
2308 cnt = tp->lost_cnt_hint; in tcp_mark_head_lost()
2317 tp->lost_skb_hint = skb; in tcp_mark_head_lost()
2318 tp->lost_cnt_hint = cnt; in tcp_mark_head_lost()
2335 tcp_verify_left_out(tp); in tcp_mark_head_lost()
2342 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_scoreboard() local
2344 if (tcp_is_sack(tp)) { in tcp_update_scoreboard()
2345 int sacked_upto = tp->sacked_out - tp->reordering; in tcp_update_scoreboard()
2353 static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when) in tcp_tsopt_ecr_before() argument
2355 return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_tsopt_ecr_before()
2356 before(tp->rx_opt.rcv_tsecr, when); in tcp_tsopt_ecr_before()
2362 static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp, in tcp_skb_spurious_retrans() argument
2366 tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb)); in tcp_skb_spurious_retrans()
2372 static inline bool tcp_packet_delayed(const struct tcp_sock *tp) in tcp_packet_delayed() argument
2374 return tp->retrans_stamp && in tcp_packet_delayed()
2375 tcp_tsopt_ecr_before(tp, tp->retrans_stamp); in tcp_packet_delayed()
2396 const struct tcp_sock *tp = tcp_sk(sk); in tcp_any_retrans_done() local
2399 if (tp->retrans_out) in tcp_any_retrans_done()
2412 struct tcp_sock *tp = tcp_sk(sk); in DBGUNDO() local
2419 tp->snd_cwnd, tcp_left_out(tp), in DBGUNDO()
2420 tp->snd_ssthresh, tp->prior_ssthresh, in DBGUNDO()
2421 tp->packets_out); in DBGUNDO()
2428 tp->snd_cwnd, tcp_left_out(tp), in DBGUNDO()
2429 tp->snd_ssthresh, tp->prior_ssthresh, in DBGUNDO()
2430 tp->packets_out); in DBGUNDO()
2438 struct tcp_sock *tp = tcp_sk(sk); in tcp_undo_cwnd_reduction() local
2446 tp->lost_out = 0; in tcp_undo_cwnd_reduction()
2447 tcp_clear_all_retrans_hints(tp); in tcp_undo_cwnd_reduction()
2450 if (tp->prior_ssthresh) { in tcp_undo_cwnd_reduction()
2453 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); in tcp_undo_cwnd_reduction()
2455 if (tp->prior_ssthresh > tp->snd_ssthresh) { in tcp_undo_cwnd_reduction()
2456 tp->snd_ssthresh = tp->prior_ssthresh; in tcp_undo_cwnd_reduction()
2457 tcp_ecn_withdraw_cwr(tp); in tcp_undo_cwnd_reduction()
2460 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_undo_cwnd_reduction()
2461 tp->undo_marker = 0; in tcp_undo_cwnd_reduction()
2462 tp->rack.advanced = 1; /* Force RACK to re-exam losses */ in tcp_undo_cwnd_reduction()
2465 static inline bool tcp_may_undo(const struct tcp_sock *tp) in tcp_may_undo() argument
2467 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); in tcp_may_undo()
2473 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_recovery() local
2475 if (tcp_may_undo(tp)) { in tcp_try_undo_recovery()
2489 } else if (tp->rack.reo_wnd_persist) { in tcp_try_undo_recovery()
2490 tp->rack.reo_wnd_persist--; in tcp_try_undo_recovery()
2492 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { in tcp_try_undo_recovery()
2497 tp->retrans_stamp = 0; in tcp_try_undo_recovery()
2501 tp->is_sack_reneg = 0; in tcp_try_undo_recovery()
2508 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_dsack() local
2510 if (tp->undo_marker && !tp->undo_retrans) { in tcp_try_undo_dsack()
2511 tp->rack.reo_wnd_persist = min(TCP_RACK_RECOVERY_THRESH, in tcp_try_undo_dsack()
2512 tp->rack.reo_wnd_persist + 1); in tcp_try_undo_dsack()
2524 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_loss() local
2526 if (frto_undo || tcp_may_undo(tp)) { in tcp_try_undo_loss()
2535 if (frto_undo || tcp_is_sack(tp)) { in tcp_try_undo_loss()
2537 tp->is_sack_reneg = 0; in tcp_try_undo_loss()
2555 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_cwnd_reduction() local
2557 tp->high_seq = tp->snd_nxt; in tcp_init_cwnd_reduction()
2558 tp->tlp_high_seq = 0; in tcp_init_cwnd_reduction()
2559 tp->snd_cwnd_cnt = 0; in tcp_init_cwnd_reduction()
2560 tp->prior_cwnd = tp->snd_cwnd; in tcp_init_cwnd_reduction()
2561 tp->prr_delivered = 0; in tcp_init_cwnd_reduction()
2562 tp->prr_out = 0; in tcp_init_cwnd_reduction()
2563 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); in tcp_init_cwnd_reduction()
2564 tcp_ecn_queue_cwr(tp); in tcp_init_cwnd_reduction()
2569 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_reduction() local
2571 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); in tcp_cwnd_reduction()
2573 if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd)) in tcp_cwnd_reduction()
2576 tp->prr_delivered += newly_acked_sacked; in tcp_cwnd_reduction()
2578 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + in tcp_cwnd_reduction()
2579 tp->prior_cwnd - 1; in tcp_cwnd_reduction()
2580 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; in tcp_cwnd_reduction()
2584 max_t(int, tp->prr_delivered - tp->prr_out, in tcp_cwnd_reduction()
2590 sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1)); in tcp_cwnd_reduction()
2591 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; in tcp_cwnd_reduction()
2596 struct tcp_sock *tp = tcp_sk(sk); in tcp_end_cwnd_reduction() local
2602 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && in tcp_end_cwnd_reduction()
2603 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { in tcp_end_cwnd_reduction()
2604 tp->snd_cwnd = tp->snd_ssthresh; in tcp_end_cwnd_reduction()
2605 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_end_cwnd_reduction()
2613 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_cwr() local
2615 tp->prior_ssthresh = 0; in tcp_enter_cwr()
2617 tp->undo_marker = 0; in tcp_enter_cwr()
2626 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_keep_open() local
2629 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) in tcp_try_keep_open()
2634 tp->high_seq = tp->snd_nxt; in tcp_try_keep_open()
2640 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_to_open() local
2642 tcp_verify_left_out(tp); in tcp_try_to_open()
2645 tp->retrans_stamp = 0; in tcp_try_to_open()
2666 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_probe_success() local
2670 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2671 tp->snd_cwnd = tp->snd_cwnd * in tcp_mtup_probe_success()
2672 tcp_mss_to_mtu(sk, tp->mss_cache) / in tcp_mtup_probe_success()
2674 tp->snd_cwnd_cnt = 0; in tcp_mtup_probe_success()
2675 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_mtup_probe_success()
2676 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2691 struct tcp_sock *tp = tcp_sk(sk); in tcp_simple_retransmit() local
2700 tcp_clear_retrans_hints_partial(tp); in tcp_simple_retransmit()
2702 if (!tp->lost_out) in tcp_simple_retransmit()
2705 if (tcp_is_reno(tp)) in tcp_simple_retransmit()
2706 tcp_limit_reno_sacked(tp); in tcp_simple_retransmit()
2708 tcp_verify_left_out(tp); in tcp_simple_retransmit()
2716 tp->high_seq = tp->snd_nxt; in tcp_simple_retransmit()
2717 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_simple_retransmit()
2718 tp->prior_ssthresh = 0; in tcp_simple_retransmit()
2719 tp->undo_marker = 0; in tcp_simple_retransmit()
2728 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_recovery() local
2731 if (tcp_is_reno(tp)) in tcp_enter_recovery()
2738 tp->prior_ssthresh = 0; in tcp_enter_recovery()
2739 tcp_init_undo(tp); in tcp_enter_recovery()
2743 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_recovery()
2755 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_loss() local
2756 bool recovered = !before(tp->snd_una, tp->high_seq); in tcp_process_loss()
2758 if ((flag & FLAG_SND_UNA_ADVANCED || rcu_access_pointer(tp->fastopen_rsk)) && in tcp_process_loss()
2762 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ in tcp_process_loss()
2770 if (after(tp->snd_nxt, tp->high_seq)) { in tcp_process_loss()
2772 tp->frto = 0; /* Step 3.a. loss was real */ in tcp_process_loss()
2774 tp->high_seq = tp->snd_nxt; in tcp_process_loss()
2780 after(tcp_wnd_end(tp), tp->snd_nxt)) { in tcp_process_loss()
2784 tp->frto = 0; in tcp_process_loss()
2793 if (tcp_is_reno(tp)) { in tcp_process_loss()
2797 if (after(tp->snd_nxt, tp->high_seq) && num_dupack) in tcp_process_loss()
2800 tcp_reset_reno_sack(tp); in tcp_process_loss()
2808 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_partial() local
2810 if (tp->undo_marker && tcp_packet_delayed(tp)) { in tcp_try_undo_partial()
2821 if (tp->retrans_out) in tcp_try_undo_partial()
2825 tp->retrans_stamp = 0; in tcp_try_undo_partial()
2838 struct tcp_sock *tp = tcp_sk(sk); in tcp_identify_packet_loss() local
2843 if (unlikely(tcp_is_reno(tp))) { in tcp_identify_packet_loss()
2846 u32 prior_retrans = tp->retrans_out; in tcp_identify_packet_loss()
2849 if (prior_retrans > tp->retrans_out) in tcp_identify_packet_loss()
2856 struct tcp_sock *tp = tcp_sk(sk); in tcp_force_fast_retransmit() local
2858 return after(tcp_highest_sack_seq(tp), in tcp_force_fast_retransmit()
2859 tp->snd_una + tp->reordering * tp->mss_cache); in tcp_force_fast_retransmit()
2878 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastretrans_alert() local
2884 if (!tp->packets_out && tp->sacked_out) in tcp_fastretrans_alert()
2885 tp->sacked_out = 0; in tcp_fastretrans_alert()
2890 tp->prior_ssthresh = 0; in tcp_fastretrans_alert()
2897 tcp_verify_left_out(tp); in tcp_fastretrans_alert()
2902 WARN_ON(tp->retrans_out != 0); in tcp_fastretrans_alert()
2903 tp->retrans_stamp = 0; in tcp_fastretrans_alert()
2904 } else if (!before(tp->snd_una, tp->high_seq)) { in tcp_fastretrans_alert()
2909 if (tp->snd_una != tp->high_seq) { in tcp_fastretrans_alert()
2916 if (tcp_is_reno(tp)) in tcp_fastretrans_alert()
2917 tcp_reset_reno_sack(tp); in tcp_fastretrans_alert()
2929 if (tcp_is_reno(tp)) in tcp_fastretrans_alert()
2952 if (tcp_is_reno(tp)) { in tcp_fastretrans_alert()
2954 tcp_reset_reno_sack(tp); in tcp_fastretrans_alert()
2970 tp->snd_una == tp->mtu_probe.probe_seq_start) { in tcp_fastretrans_alert()
2973 tp->snd_cwnd++; in tcp_fastretrans_alert()
2991 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_rtt_min() local
2993 if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) { in tcp_update_rtt_min()
3000 minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32, in tcp_update_rtt_min()
3008 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_rtt() local
3024 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_ack_update_rtt()
3026 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; in tcp_ack_update_rtt()
3079 struct tcp_sock *tp = tcp_sk(sk); in tcp_rearm_rto() local
3084 if (rcu_access_pointer(tp->fastopen_rsk)) in tcp_rearm_rto()
3087 if (!tp->packets_out) { in tcp_rearm_rto()
3115 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_acked() local
3118 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); in tcp_tso_acked()
3121 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in tcp_tso_acked()
3161 struct tcp_sock *tp = tcp_sk(sk); in tcp_clean_rtx_queue() local
3162 u32 prior_sacked = tp->sacked_out; in tcp_clean_rtx_queue()
3163 u32 reord = tp->snd_nxt; /* lowest acked un-retx un-sacked seq */ in tcp_clean_rtx_queue()
3183 if (after(scb->end_seq, tp->snd_una)) { in tcp_clean_rtx_queue()
3185 !after(tp->snd_una, scb->seq)) in tcp_clean_rtx_queue()
3198 tp->retrans_out -= acked_pcount; in tcp_clean_rtx_queue()
3209 if (!after(scb->end_seq, tp->high_seq)) in tcp_clean_rtx_queue()
3214 tp->sacked_out -= acked_pcount; in tcp_clean_rtx_queue()
3215 } else if (tcp_is_sack(tp)) { in tcp_clean_rtx_queue()
3216 tcp_count_delivered(tp, acked_pcount, ece_ack); in tcp_clean_rtx_queue()
3217 if (!tcp_skb_spurious_retrans(tp, skb)) in tcp_clean_rtx_queue()
3218 tcp_rack_advance(tp, sacked, scb->end_seq, in tcp_clean_rtx_queue()
3222 tp->lost_out -= acked_pcount; in tcp_clean_rtx_queue()
3224 tp->packets_out -= acked_pcount; in tcp_clean_rtx_queue()
3239 tp->retrans_stamp = 0; in tcp_clean_rtx_queue()
3248 if (unlikely(skb == tp->retransmit_skb_hint)) in tcp_clean_rtx_queue()
3249 tp->retransmit_skb_hint = NULL; in tcp_clean_rtx_queue()
3250 if (unlikely(skb == tp->lost_skb_hint)) in tcp_clean_rtx_queue()
3251 tp->lost_skb_hint = NULL; in tcp_clean_rtx_queue()
3259 if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) in tcp_clean_rtx_queue()
3260 tp->snd_up = tp->snd_una; in tcp_clean_rtx_queue()
3269 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt); in tcp_clean_rtx_queue()
3270 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt); in tcp_clean_rtx_queue()
3272 if (pkts_acked == 1 && last_in_flight < tp->mss_cache && in tcp_clean_rtx_queue()
3274 sack->rate->prior_delivered + 1 == tp->delivered && in tcp_clean_rtx_queue()
3284 sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt); in tcp_clean_rtx_queue()
3285 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt); in tcp_clean_rtx_queue()
3293 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { in tcp_clean_rtx_queue()
3297 if (tcp_is_reno(tp)) { in tcp_clean_rtx_queue()
3315 delta = prior_sacked - tp->sacked_out; in tcp_clean_rtx_queue()
3316 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); in tcp_clean_rtx_queue()
3319 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, in tcp_clean_rtx_queue()
3337 WARN_ON((int)tp->sacked_out < 0); in tcp_clean_rtx_queue()
3338 WARN_ON((int)tp->lost_out < 0); in tcp_clean_rtx_queue()
3339 WARN_ON((int)tp->retrans_out < 0); in tcp_clean_rtx_queue()
3340 if (!tp->packets_out && tcp_is_sack(tp)) { in tcp_clean_rtx_queue()
3342 if (tp->lost_out) { in tcp_clean_rtx_queue()
3344 tp->lost_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3345 tp->lost_out = 0; in tcp_clean_rtx_queue()
3347 if (tp->sacked_out) { in tcp_clean_rtx_queue()
3349 tp->sacked_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3350 tp->sacked_out = 0; in tcp_clean_rtx_queue()
3352 if (tp->retrans_out) { in tcp_clean_rtx_queue()
3354 tp->retrans_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3355 tp->retrans_out = 0; in tcp_clean_rtx_queue()
3366 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_probe() local
3371 if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) { in tcp_ack_probe()
3434 static inline bool tcp_may_update_window(const struct tcp_sock *tp, in tcp_may_update_window() argument
3438 return after(ack, tp->snd_una) || in tcp_may_update_window()
3439 after(ack_seq, tp->snd_wl1) || in tcp_may_update_window()
3440 (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); in tcp_may_update_window()
3444 static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) in tcp_snd_una_update() argument
3446 u32 delta = ack - tp->snd_una; in tcp_snd_una_update()
3448 sock_owned_by_me((struct sock *)tp); in tcp_snd_una_update()
3449 tp->bytes_acked += delta; in tcp_snd_una_update()
3450 tp->snd_una = ack; in tcp_snd_una_update()
3454 static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) in tcp_rcv_nxt_update() argument
3456 u32 delta = seq - tp->rcv_nxt; in tcp_rcv_nxt_update()
3458 sock_owned_by_me((struct sock *)tp); in tcp_rcv_nxt_update()
3459 tp->bytes_received += delta; in tcp_rcv_nxt_update()
3460 WRITE_ONCE(tp->rcv_nxt, seq); in tcp_rcv_nxt_update()
3471 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_window() local
3476 nwin <<= tp->rx_opt.snd_wscale; in tcp_ack_update_window()
3478 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { in tcp_ack_update_window()
3480 tcp_update_wl(tp, ack_seq); in tcp_ack_update_window()
3482 if (tp->snd_wnd != nwin) { in tcp_ack_update_window()
3483 tp->snd_wnd = nwin; in tcp_ack_update_window()
3488 tp->pred_flags = 0; in tcp_ack_update_window()
3494 if (nwin > tp->max_window) { in tcp_ack_update_window()
3495 tp->max_window = nwin; in tcp_ack_update_window()
3501 tcp_snd_una_update(tp, ack); in tcp_ack_update_window()
3547 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_challenge_ack() local
3554 &tp->last_oow_ack_time)) in tcp_send_challenge_ack()
3574 static void tcp_store_ts_recent(struct tcp_sock *tp) in tcp_store_ts_recent() argument
3576 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; in tcp_store_ts_recent()
3577 tp->rx_opt.ts_recent_stamp = ktime_get_seconds(); in tcp_store_ts_recent()
3580 static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) in tcp_replace_ts_recent() argument
3582 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { in tcp_replace_ts_recent()
3590 if (tcp_paws_check(&tp->rx_opt, 0)) in tcp_replace_ts_recent()
3591 tcp_store_ts_recent(tp); in tcp_replace_ts_recent()
3600 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_tlp_ack() local
3602 if (before(ack, tp->tlp_high_seq)) in tcp_process_tlp_ack()
3605 if (!tp->tlp_retrans) { in tcp_process_tlp_ack()
3607 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3610 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3611 } else if (after(ack, tp->tlp_high_seq)) { in tcp_process_tlp_ack()
3624 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3642 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_recovery() local
3650 if (after(tp->snd_nxt, tp->high_seq)) in tcp_xmit_recovery()
3652 tp->frto = 0; in tcp_xmit_recovery()
3661 struct tcp_sock *tp = tcp_sk(sk); in tcp_newly_delivered() local
3664 delivered = tp->delivered - prior_delivered; in tcp_newly_delivered()
3676 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack() local
3679 u32 prior_snd_una = tp->snd_una; in tcp_ack()
3680 bool is_sack_reneg = tp->is_sack_reneg; in tcp_ack()
3684 int prior_packets = tp->packets_out; in tcp_ack()
3685 u32 delivered = tp->delivered; in tcp_ack()
3686 u32 lost = tp->lost; in tcp_ack()
3702 if (before(ack, prior_snd_una - tp->max_window)) { in tcp_ack()
3713 if (after(ack, tp->snd_nxt)) in tcp_ack()
3727 prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una; in tcp_ack()
3728 rs.prior_in_flight = tcp_packets_in_flight(tp); in tcp_ack()
3734 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); in tcp_ack()
3742 tcp_update_wl(tp, ack_seq); in tcp_ack()
3743 tcp_snd_una_update(tp, ack); in tcp_ack()
3763 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { in tcp_ack()
3769 tcp_count_delivered(tp, sack_state.sack_delivered, in tcp_ack()
3792 tp->rcv_tstamp = tcp_jiffies32; in tcp_ack()
3802 if (tp->tlp_high_seq) in tcp_ack()
3823 lost = tp->lost - lost; /* freshly marked lost */ in tcp_ack()
3843 if (tp->tlp_high_seq) in tcp_ack()
4070 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) in tcp_parse_aligned_timestamp() argument
4076 tp->rx_opt.saw_tstamp = 1; in tcp_parse_aligned_timestamp()
4078 tp->rx_opt.rcv_tsval = ntohl(*ptr); in tcp_parse_aligned_timestamp()
4081 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; in tcp_parse_aligned_timestamp()
4083 tp->rx_opt.rcv_tsecr = 0; in tcp_parse_aligned_timestamp()
4094 const struct tcphdr *th, struct tcp_sock *tp) in tcp_fast_parse_options() argument
4100 tp->rx_opt.saw_tstamp = 0; in tcp_fast_parse_options()
4102 } else if (tp->rx_opt.tstamp_ok && in tcp_fast_parse_options()
4104 if (tcp_parse_aligned_timestamp(tp, th)) in tcp_fast_parse_options()
4108 tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL); in tcp_fast_parse_options()
4109 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) in tcp_fast_parse_options()
4110 tp->rx_opt.rcv_tsecr -= tp->tsoffset; in tcp_fast_parse_options()
4175 const struct tcp_sock *tp = tcp_sk(sk); in tcp_disordered_ack() local
4181 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && in tcp_disordered_ack()
4184 ack == tp->snd_una && in tcp_disordered_ack()
4187 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && in tcp_disordered_ack()
4190 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); in tcp_disordered_ack()
4196 const struct tcp_sock *tp = tcp_sk(sk); in tcp_paws_discard() local
4198 return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && in tcp_paws_discard()
4215 static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) in tcp_sequence() argument
4217 return !before(end_seq, tp->rcv_wup) && in tcp_sequence()
4218 !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); in tcp_sequence()
4265 struct tcp_sock *tp = tcp_sk(sk); in tcp_fin() local
4315 skb_rbtree_purge(&tp->out_of_order_queue); in tcp_fin()
4316 if (tcp_is_sack(tp)) in tcp_fin()
4317 tcp_sack_reset(&tp->rx_opt); in tcp_fin()
4347 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_set() local
4349 if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) { in tcp_dsack_set()
4352 if (before(seq, tp->rcv_nxt)) in tcp_dsack_set()
4359 tp->rx_opt.dsack = 1; in tcp_dsack_set()
4360 tp->duplicate_sack[0].start_seq = seq; in tcp_dsack_set()
4361 tp->duplicate_sack[0].end_seq = end_seq; in tcp_dsack_set()
4367 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_extend() local
4369 if (!tp->rx_opt.dsack) in tcp_dsack_extend()
4372 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); in tcp_dsack_extend()
4390 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_dupack() local
4393 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_send_dupack()
4397 if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) { in tcp_send_dupack()
4401 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) in tcp_send_dupack()
4402 end_seq = tp->rcv_nxt; in tcp_send_dupack()
4413 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) in tcp_sack_maybe_coalesce() argument
4416 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_maybe_coalesce()
4422 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { in tcp_sack_maybe_coalesce()
4429 tp->rx_opt.num_sacks--; in tcp_sack_maybe_coalesce()
4430 for (i = this_sack; i < tp->rx_opt.num_sacks; i++) in tcp_sack_maybe_coalesce()
4441 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_compress_send_ack() local
4443 if (!tp->compressed_ack) in tcp_sack_compress_send_ack()
4446 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) in tcp_sack_compress_send_ack()
4454 tp->compressed_ack - 1); in tcp_sack_compress_send_ack()
4456 tp->compressed_ack = 0; in tcp_sack_compress_send_ack()
4468 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_new_ofo_skb() local
4469 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_new_ofo_skb()
4470 int cur_sacks = tp->rx_opt.num_sacks; in tcp_sack_new_ofo_skb()
4484 tcp_sack_maybe_coalesce(tp); in tcp_sack_new_ofo_skb()
4500 tp->rx_opt.num_sacks--; in tcp_sack_new_ofo_skb()
4510 tp->rx_opt.num_sacks++; in tcp_sack_new_ofo_skb()
4515 static void tcp_sack_remove(struct tcp_sock *tp) in tcp_sack_remove() argument
4517 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_remove()
4518 int num_sacks = tp->rx_opt.num_sacks; in tcp_sack_remove()
4522 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in tcp_sack_remove()
4523 tp->rx_opt.num_sacks = 0; in tcp_sack_remove()
4529 if (!before(tp->rcv_nxt, sp->start_seq)) { in tcp_sack_remove()
4533 WARN_ON(before(tp->rcv_nxt, sp->end_seq)); in tcp_sack_remove()
4537 tp->selective_acks[i-1] = tp->selective_acks[i]; in tcp_sack_remove()
4544 tp->rx_opt.num_sacks = num_sacks; in tcp_sack_remove()
4628 struct tcp_sock *tp = tcp_sk(sk); in tcp_ofo_queue() local
4629 __u32 dsack_high = tp->rcv_nxt; in tcp_ofo_queue()
4634 p = rb_first(&tp->out_of_order_queue); in tcp_ofo_queue()
4637 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) in tcp_ofo_queue()
4647 rb_erase(&skb->rbnode, &tp->out_of_order_queue); in tcp_ofo_queue()
4649 if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) { in tcp_ofo_queue()
4656 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); in tcp_ofo_queue()
4695 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue_ofo() local
4711 tp->pred_flags = 0; in tcp_data_queue_ofo()
4714 tp->rcv_ooopack += max_t(u16, 1, skb_shinfo(skb)->gso_segs); in tcp_data_queue_ofo()
4719 p = &tp->out_of_order_queue.rb_node; in tcp_data_queue_ofo()
4720 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in tcp_data_queue_ofo()
4722 if (tcp_is_sack(tp)) { in tcp_data_queue_ofo()
4723 tp->rx_opt.num_sacks = 1; in tcp_data_queue_ofo()
4724 tp->selective_acks[0].start_seq = seq; in tcp_data_queue_ofo()
4725 tp->selective_acks[0].end_seq = end_seq; in tcp_data_queue_ofo()
4728 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
4729 tp->ooo_last_skb = skb; in tcp_data_queue_ofo()
4736 if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, in tcp_data_queue_ofo()
4742 if (tcp_is_sack(tp)) in tcp_data_queue_ofo()
4749 if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) { in tcp_data_queue_ofo()
4750 parent = &tp->ooo_last_skb->rbnode; in tcp_data_queue_ofo()
4782 &tp->out_of_order_queue); in tcp_data_queue_ofo()
4800 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
4812 rb_erase(&skb1->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
4820 tp->ooo_last_skb = skb; in tcp_data_queue_ofo()
4823 if (tcp_is_sack(tp)) in tcp_data_queue_ofo()
4830 if (tcp_is_sack(tp)) in tcp_data_queue_ofo()
4908 const struct tcp_sock *tp = tcp_sk(sk); in tcp_data_ready() local
4909 int avail = tp->rcv_nxt - tp->copied_seq; in tcp_data_ready()
4913 tcp_receive_window(tp) > inet_csk(sk)->icsk_ack.rcv_mss) in tcp_data_ready()
4921 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue() local
4935 tp->rx_opt.dsack = 0; in tcp_data_queue()
4941 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { in tcp_data_queue()
4942 if (tcp_receive_window(tp) == 0) { in tcp_data_queue()
4963 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in tcp_data_queue()
4969 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) in tcp_data_queue()
4973 if (tp->rx_opt.num_sacks) in tcp_data_queue()
4974 tcp_sack_remove(tp); in tcp_data_queue()
4985 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { in tcp_data_queue()
5000 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) in tcp_data_queue()
5003 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_data_queue()
5005 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); in tcp_data_queue()
5010 if (!tcp_receive_window(tp)) { in tcp_data_queue()
5180 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_ofo_queue() local
5185 skb = skb_rb_first(&tp->out_of_order_queue); in tcp_collapse_ofo_queue()
5188 tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue); in tcp_collapse_ofo_queue()
5207 tcp_collapse(sk, NULL, &tp->out_of_order_queue, in tcp_collapse_ofo_queue()
5238 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_ofo_queue() local
5242 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) in tcp_prune_ofo_queue()
5247 node = &tp->ooo_last_skb->rbnode; in tcp_prune_ofo_queue()
5250 rb_erase(node, &tp->out_of_order_queue); in tcp_prune_ofo_queue()
5262 tp->ooo_last_skb = rb_to_skb(prev); in tcp_prune_ofo_queue()
5269 if (tp->rx_opt.sack_ok) in tcp_prune_ofo_queue()
5270 tcp_sack_reset(&tp->rx_opt); in tcp_prune_ofo_queue()
5283 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_queue() local
5290 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); in tcp_prune_queue()
5300 tp->copied_seq, tp->rcv_nxt); in tcp_prune_queue()
5321 tp->pred_flags = 0; in tcp_prune_queue()
5327 const struct tcp_sock *tp = tcp_sk(sk); in tcp_should_expand_sndbuf() local
5344 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) in tcp_should_expand_sndbuf()
5352 struct tcp_sock *tp = tcp_sk(sk); in tcp_new_space() local
5356 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_new_space()
5385 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ack_snd_check() local
5389 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && in __tcp_ack_snd_check()
5395 (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat || in __tcp_ack_snd_check()
5396 __tcp_select_window(sk) >= tp->rcv_wnd)) || in __tcp_ack_snd_check()
5406 if (!ofo_possible || RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in __tcp_ack_snd_check()
5411 if (!tcp_is_sack(tp) || in __tcp_ack_snd_check()
5412 tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr) in __tcp_ack_snd_check()
5415 if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) { in __tcp_ack_snd_check()
5416 tp->compressed_ack_rcv_nxt = tp->rcv_nxt; in __tcp_ack_snd_check()
5417 tp->dup_ack_counter = 0; in __tcp_ack_snd_check()
5419 if (tp->dup_ack_counter < TCP_FASTRETRANS_THRESH) { in __tcp_ack_snd_check()
5420 tp->dup_ack_counter++; in __tcp_ack_snd_check()
5423 tp->compressed_ack++; in __tcp_ack_snd_check()
5424 if (hrtimer_is_queued(&tp->compressed_ack_timer)) in __tcp_ack_snd_check()
5429 rtt = tp->rcv_rtt_est.rtt_us; in __tcp_ack_snd_check()
5430 if (tp->srtt_us && tp->srtt_us < rtt) in __tcp_ack_snd_check()
5431 rtt = tp->srtt_us; in __tcp_ack_snd_check()
5436 hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay), in __tcp_ack_snd_check()
5462 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_urg() local
5470 if (after(tp->copied_seq, ptr)) in tcp_check_urg()
5483 if (before(ptr, tp->rcv_nxt)) in tcp_check_urg()
5487 if (tp->urg_data && !after(ptr, tp->urg_seq)) in tcp_check_urg()
5508 if (tp->urg_seq == tp->copied_seq && tp->urg_data && in tcp_check_urg()
5509 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { in tcp_check_urg()
5511 tp->copied_seq++; in tcp_check_urg()
5512 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { in tcp_check_urg()
5518 tp->urg_data = TCP_URG_NOTYET; in tcp_check_urg()
5519 WRITE_ONCE(tp->urg_seq, ptr); in tcp_check_urg()
5522 tp->pred_flags = 0; in tcp_check_urg()
5528 struct tcp_sock *tp = tcp_sk(sk); in tcp_urg() local
5535 if (tp->urg_data == TCP_URG_NOTYET) { in tcp_urg()
5536 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - in tcp_urg()
5544 tp->urg_data = TCP_URG_VALID | tmp; in tcp_urg()
5561 struct tcp_sock *tp = tcp_sk(sk); in tcp_reset_check() local
5563 return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) && in tcp_reset_check()
5574 struct tcp_sock *tp = tcp_sk(sk); in tcp_validate_incoming() local
5578 if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) && in tcp_validate_incoming()
5579 tp->rx_opt.saw_tstamp && in tcp_validate_incoming()
5585 &tp->last_oow_ack_time)) in tcp_validate_incoming()
5593 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { in tcp_validate_incoming()
5605 &tp->last_oow_ack_time)) in tcp_validate_incoming()
5624 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt || in tcp_validate_incoming()
5627 } else if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) { in tcp_validate_incoming()
5628 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_validate_incoming()
5632 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; in tcp_validate_incoming()
5650 if (tp->syn_fastopen && !tp->data_segs_in && in tcp_validate_incoming()
5707 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_established() local
5713 tcp_mstamp_refresh(tp); in tcp_rcv_established()
5731 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_established()
5742 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && in tcp_rcv_established()
5743 TCP_SKB_CB(skb)->seq == tp->rcv_nxt && in tcp_rcv_established()
5744 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { in tcp_rcv_established()
5745 int tcp_header_len = tp->tcp_header_len; in tcp_rcv_established()
5755 if (!tcp_parse_aligned_timestamp(tp, th)) in tcp_rcv_established()
5759 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) in tcp_rcv_established()
5778 tp->rcv_nxt == tp->rcv_wup) in tcp_rcv_established()
5779 tcp_store_ts_recent(tp); in tcp_rcv_established()
5791 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; in tcp_rcv_established()
5813 tp->rcv_nxt == tp->rcv_wup) in tcp_rcv_established()
5814 tcp_store_ts_recent(tp); in tcp_rcv_established()
5826 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { in tcp_rcv_established()
5833 tcp_update_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_established()
5887 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_transfer() local
5899 if (tp->total_retrans > 1 && tp->undo_marker) in tcp_init_transfer()
5900 tp->snd_cwnd = 1; in tcp_init_transfer()
5902 tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_init_transfer()
5903 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_init_transfer()
5914 struct tcp_sock *tp = tcp_sk(sk); in tcp_finish_connect() local
5931 tp->lsndtime = tcp_jiffies32; in tcp_finish_connect()
5934 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); in tcp_finish_connect()
5936 if (!tp->rx_opt.snd_wscale) in tcp_finish_connect()
5937 __tcp_fast_path_on(tp, tp->snd_wnd); in tcp_finish_connect()
5939 tp->pred_flags = 0; in tcp_finish_connect()
5945 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_fastopen_synack() local
5946 struct sk_buff *data = tp->syn_data ? tcp_rtx_queue_head(sk) : NULL; in tcp_rcv_fastopen_synack()
5947 u16 mss = tp->rx_opt.mss_clamp, try_exp = 0; in tcp_rcv_fastopen_synack()
5950 if (mss == tp->rx_opt.user_mss) { in tcp_rcv_fastopen_synack()
5960 if (!tp->syn_fastopen) { in tcp_rcv_fastopen_synack()
5963 } else if (tp->total_retrans) { in tcp_rcv_fastopen_synack()
5970 } else if (cookie->len < 0 && !tp->syn_data) { in tcp_rcv_fastopen_synack()
5975 try_exp = tp->syn_fastopen_exp ? 2 : 1; in tcp_rcv_fastopen_synack()
5981 if (tp->total_retrans) in tcp_rcv_fastopen_synack()
5982 tp->fastopen_client_fail = TFO_SYN_RETRANSMITTED; in tcp_rcv_fastopen_synack()
5984 tp->fastopen_client_fail = TFO_DATA_NOT_ACKED; in tcp_rcv_fastopen_synack()
5994 tp->syn_data_acked = tp->syn_data; in tcp_rcv_fastopen_synack()
5995 if (tp->syn_data_acked) { in tcp_rcv_fastopen_synack()
5998 if (tp->delivered > 1) in tcp_rcv_fastopen_synack()
5999 --tp->delivered; in tcp_rcv_fastopen_synack()
6007 static void smc_check_reset_syn(struct tcp_sock *tp) in smc_check_reset_syn() argument
6011 if (tp->syn_smc && !tp->rx_opt.smc_ok) in smc_check_reset_syn()
6012 tp->syn_smc = 0; in smc_check_reset_syn()
6019 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_spurious_syn() local
6026 syn_stamp = tp->retrans_stamp; in tcp_try_undo_spurious_syn()
6027 if (tp->undo_marker && syn_stamp && tp->rx_opt.saw_tstamp && in tcp_try_undo_spurious_syn()
6028 syn_stamp == tp->rx_opt.rcv_tsecr) in tcp_try_undo_spurious_syn()
6029 tp->undo_marker = 0; in tcp_try_undo_spurious_syn()
6036 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_synsent_state_process() local
6038 int saved_clamp = tp->rx_opt.mss_clamp; in tcp_rcv_synsent_state_process()
6041 tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc); in tcp_rcv_synsent_state_process()
6042 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) in tcp_rcv_synsent_state_process()
6043 tp->rx_opt.rcv_tsecr -= tp->tsoffset; in tcp_rcv_synsent_state_process()
6054 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || in tcp_rcv_synsent_state_process()
6055 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { in tcp_rcv_synsent_state_process()
6064 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_rcv_synsent_state_process()
6065 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, in tcp_rcv_synsent_state_process()
6066 tcp_time_stamp(tp))) { in tcp_rcv_synsent_state_process()
6102 tcp_ecn_rcv_synack(tp, th); in tcp_rcv_synsent_state_process()
6104 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_synsent_state_process()
6111 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1); in tcp_rcv_synsent_state_process()
6112 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
6117 tp->snd_wnd = ntohs(th->window); in tcp_rcv_synsent_state_process()
6119 if (!tp->rx_opt.wscale_ok) { in tcp_rcv_synsent_state_process()
6120 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; in tcp_rcv_synsent_state_process()
6121 tp->window_clamp = min(tp->window_clamp, 65535U); in tcp_rcv_synsent_state_process()
6124 if (tp->rx_opt.saw_tstamp) { in tcp_rcv_synsent_state_process()
6125 tp->rx_opt.tstamp_ok = 1; in tcp_rcv_synsent_state_process()
6126 tp->tcp_header_len = in tcp_rcv_synsent_state_process()
6128 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; in tcp_rcv_synsent_state_process()
6129 tcp_store_ts_recent(tp); in tcp_rcv_synsent_state_process()
6131 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_rcv_synsent_state_process()
6140 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_rcv_synsent_state_process()
6142 smc_check_reset_syn(tp); in tcp_rcv_synsent_state_process()
6148 fastopen_fail = (tp->syn_fastopen || tp->syn_data) && in tcp_rcv_synsent_state_process()
6194 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && in tcp_rcv_synsent_state_process()
6195 tcp_paws_reject(&tp->rx_opt, 0)) in tcp_rcv_synsent_state_process()
6205 if (tp->rx_opt.saw_tstamp) { in tcp_rcv_synsent_state_process()
6206 tp->rx_opt.tstamp_ok = 1; in tcp_rcv_synsent_state_process()
6207 tcp_store_ts_recent(tp); in tcp_rcv_synsent_state_process()
6208 tp->tcp_header_len = in tcp_rcv_synsent_state_process()
6211 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_rcv_synsent_state_process()
6214 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1); in tcp_rcv_synsent_state_process()
6215 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_rcv_synsent_state_process()
6216 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
6221 tp->snd_wnd = ntohs(th->window); in tcp_rcv_synsent_state_process()
6222 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; in tcp_rcv_synsent_state_process()
6223 tp->max_window = tp->snd_wnd; in tcp_rcv_synsent_state_process()
6225 tcp_ecn_rcv_syn(tp, th); in tcp_rcv_synsent_state_process()
6254 tcp_clear_options(&tp->rx_opt); in tcp_rcv_synsent_state_process()
6255 tp->rx_opt.mss_clamp = saved_clamp; in tcp_rcv_synsent_state_process()
6259 tcp_clear_options(&tp->rx_opt); in tcp_rcv_synsent_state_process()
6260 tp->rx_opt.mss_clamp = saved_clamp; in tcp_rcv_synsent_state_process()
6305 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_state_process() local
6343 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_state_process()
6344 tcp_mstamp_refresh(tp); in tcp_rcv_state_process()
6356 tcp_mstamp_refresh(tp); in tcp_rcv_state_process()
6357 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_state_process()
6358 req = rcu_dereference_protected(tp->fastopen_rsk, in tcp_rcv_state_process()
6389 tp->delivered++; /* SYN-ACK delivery isn't tracked in tcp_ack */ in tcp_rcv_state_process()
6390 if (!tp->srtt_us) in tcp_rcv_state_process()
6397 tp->retrans_stamp = 0; in tcp_rcv_state_process()
6400 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_rcv_state_process()
6413 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; in tcp_rcv_state_process()
6414 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; in tcp_rcv_state_process()
6415 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_state_process()
6417 if (tp->rx_opt.tstamp_ok) in tcp_rcv_state_process()
6418 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; in tcp_rcv_state_process()
6424 tp->lsndtime = tcp_jiffies32; in tcp_rcv_state_process()
6427 tcp_fast_path_on(tp); in tcp_rcv_state_process()
6436 if (tp->snd_una != tp->write_seq) in tcp_rcv_state_process()
6450 if (tp->linger2 < 0) { in tcp_rcv_state_process()
6456 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { in tcp_rcv_state_process()
6458 if (tp->syn_fastopen && th->fin) in tcp_rcv_state_process()
6484 if (tp->snd_una == tp->write_seq) { in tcp_rcv_state_process()
6491 if (tp->snd_una == tp->write_seq) { in tcp_rcv_state_process()
6507 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_rcv_state_process()
6521 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { in tcp_rcv_state_process()
6720 struct tcp_sock *tp = tcp_sk(sk); in tcp_get_syncookie_mss() local
6735 mss = tcp_parse_mss_option(th, tp->rx_opt.user_mss); in tcp_get_syncookie_mss()
6750 struct tcp_sock *tp = tcp_sk(sk); in tcp_conn_request() local
6787 tmp_opt.user_mss = tp->rx_opt.user_mss; in tcp_conn_request()