Lines Matching refs:tp

390 static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp)  in tcp_compute_delivery_rate()  argument
392 u32 rate = READ_ONCE(tp->rate_delivered); in tcp_compute_delivery_rate()
393 u32 intv = READ_ONCE(tp->rate_interval_us); in tcp_compute_delivery_rate()
397 rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; in tcp_compute_delivery_rate()
411 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_sock() local
413 tp->out_of_order_queue = RB_ROOT; in tcp_init_sock()
416 INIT_LIST_HEAD(&tp->tsq_node); in tcp_init_sock()
417 INIT_LIST_HEAD(&tp->tsorted_sent_queue); in tcp_init_sock()
420 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); in tcp_init_sock()
421 minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); in tcp_init_sock()
428 tp->snd_cwnd = TCP_INIT_CWND; in tcp_init_sock()
431 tp->app_limited = ~0U; in tcp_init_sock()
436 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; in tcp_init_sock()
437 tp->snd_cwnd_clamp = ~0; in tcp_init_sock()
438 tp->mss_cache = TCP_MSS_DEFAULT; in tcp_init_sock()
440 tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; in tcp_init_sock()
443 tp->tsoffset = 0; in tcp_init_sock()
444 tp->rack.reo_wnd_steps = 1; in tcp_init_sock()
477 static inline bool tcp_stream_is_readable(const struct tcp_sock *tp, in tcp_stream_is_readable() argument
480 return (READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq) >= target) || in tcp_stream_is_readable()
496 const struct tcp_sock *tp = tcp_sk(sk); in tcp_poll() local
546 (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { in tcp_poll()
549 if (READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && in tcp_poll()
551 tp->urg_data) in tcp_poll()
554 if (tcp_stream_is_readable(tp, target, sk)) in tcp_poll()
576 if (tp->urg_data & TCP_URG_VALID) in tcp_poll()
596 struct tcp_sock *tp = tcp_sk(sk); in tcp_ioctl() local
610 answ = tp->urg_data && in tcp_ioctl()
611 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); in tcp_ioctl()
620 answ = READ_ONCE(tp->write_seq) - tp->snd_una; in tcp_ioctl()
629 answ = READ_ONCE(tp->write_seq) - in tcp_ioctl()
630 READ_ONCE(tp->snd_nxt); in tcp_ioctl()
640 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) in tcp_mark_push() argument
643 tp->pushed_seq = tp->write_seq; in tcp_mark_push()
646 static inline bool forced_push(const struct tcp_sock *tp) in forced_push() argument
648 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); in forced_push()
653 struct tcp_sock *tp = tcp_sk(sk); in skb_entail() local
657 tcb->seq = tcb->end_seq = tp->write_seq; in skb_entail()
664 if (tp->nonagle & TCP_NAGLE_PUSH) in skb_entail()
665 tp->nonagle &= ~TCP_NAGLE_PUSH; in skb_entail()
670 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) in tcp_mark_urg() argument
673 tp->snd_up = tp->write_seq; in tcp_mark_urg()
698 struct tcp_sock *tp = tcp_sk(sk); in tcp_push() local
704 if (!(flags & MSG_MORE) || forced_push(tp)) in tcp_push()
705 tcp_mark_push(tp, skb); in tcp_push()
707 tcp_mark_urg(tp, flags); in tcp_push()
908 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_size_goal() local
916 new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); in tcp_xmit_size_goal()
919 size_goal = tp->gso_segs * mss_now; in tcp_xmit_size_goal()
922 tp->gso_segs = min_t(u16, new_size_goal / mss_now, in tcp_xmit_size_goal()
924 size_goal = tp->gso_segs * mss_now; in tcp_xmit_size_goal()
959 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_sendpages() local
1018 tcp_mark_push(tp, skb); in do_tcp_sendpages()
1040 WRITE_ONCE(tp->write_seq, tp->write_seq + copy); in do_tcp_sendpages()
1056 if (forced_push(tp)) { in do_tcp_sendpages()
1057 tcp_mark_push(tp, skb); in do_tcp_sendpages()
1080 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); in do_tcp_sendpages()
1124 void tcp_free_fastopen_req(struct tcp_sock *tp) in tcp_free_fastopen_req() argument
1126 if (tp->fastopen_req) { in tcp_free_fastopen_req()
1127 kfree(tp->fastopen_req); in tcp_free_fastopen_req()
1128 tp->fastopen_req = NULL; in tcp_free_fastopen_req()
1136 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg_fastopen() local
1145 if (tp->fastopen_req) in tcp_sendmsg_fastopen()
1148 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), in tcp_sendmsg_fastopen()
1150 if (unlikely(!tp->fastopen_req)) in tcp_sendmsg_fastopen()
1152 tp->fastopen_req->data = msg; in tcp_sendmsg_fastopen()
1153 tp->fastopen_req->size = size; in tcp_sendmsg_fastopen()
1154 tp->fastopen_req->uarg = uarg; in tcp_sendmsg_fastopen()
1171 if (tp->fastopen_req) { in tcp_sendmsg_fastopen()
1172 *copied = tp->fastopen_req->copied; in tcp_sendmsg_fastopen()
1173 tcp_free_fastopen_req(tp); in tcp_sendmsg_fastopen()
1181 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg_locked() local
1207 !tp->repair) { in tcp_sendmsg_locked()
1230 if (unlikely(tp->repair)) { in tcp_sendmsg_locked()
1231 if (tp->repair_queue == TCP_RECV_QUEUE) { in tcp_sendmsg_locked()
1237 if (tp->repair_queue == TCP_NO_QUEUE) in tcp_sendmsg_locked()
1300 if (tp->repair) in tcp_sendmsg_locked()
1326 tcp_mark_push(tp, skb); in tcp_sendmsg_locked()
1356 tcp_mark_push(tp, skb); in tcp_sendmsg_locked()
1367 WRITE_ONCE(tp->write_seq, tp->write_seq + copy); in tcp_sendmsg_locked()
1378 if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) in tcp_sendmsg_locked()
1381 if (forced_push(tp)) { in tcp_sendmsg_locked()
1382 tcp_mark_push(tp, skb); in tcp_sendmsg_locked()
1405 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); in tcp_sendmsg_locked()
1450 struct tcp_sock *tp = tcp_sk(sk); in tcp_recv_urg() local
1453 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || in tcp_recv_urg()
1454 tp->urg_data == TCP_URG_READ) in tcp_recv_urg()
1460 if (tp->urg_data & TCP_URG_VALID) { in tcp_recv_urg()
1462 char c = tp->urg_data; in tcp_recv_urg()
1465 tp->urg_data = TCP_URG_READ; in tcp_recv_urg()
1525 struct tcp_sock *tp = tcp_sk(sk); in tcp_cleanup_rbuf() local
1530 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), in tcp_cleanup_rbuf()
1532 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); in tcp_cleanup_rbuf()
1540 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || in tcp_cleanup_rbuf()
1562 __u32 rcv_window_now = tcp_receive_window(tp); in tcp_cleanup_rbuf()
1565 if (2*rcv_window_now <= tp->window_clamp) { in tcp_cleanup_rbuf()
1620 struct tcp_sock *tp = tcp_sk(sk); in tcp_read_sock() local
1621 u32 seq = tp->copied_seq; in tcp_read_sock()
1634 if (tp->urg_data) { in tcp_read_sock()
1635 u32 urg_offset = tp->urg_seq - seq; in tcp_read_sock()
1673 WRITE_ONCE(tp->copied_seq, seq); in tcp_read_sock()
1675 WRITE_ONCE(tp->copied_seq, seq); in tcp_read_sock()
1748 struct tcp_sock *tp; in tcp_zerocopy_receive() local
1768 tp = tcp_sk(sk); in tcp_zerocopy_receive()
1769 seq = tp->copied_seq; in tcp_zerocopy_receive()
1824 WRITE_ONCE(tp->copied_seq, seq); in tcp_zerocopy_receive()
1920 const struct tcp_sock *tp = tcp_sk(sk); in tcp_inq_hint() local
1921 u32 copied_seq = READ_ONCE(tp->copied_seq); in tcp_inq_hint()
1922 u32 rcv_nxt = READ_ONCE(tp->rcv_nxt); in tcp_inq_hint()
1926 if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) { in tcp_inq_hint()
1928 inq = tp->rcv_nxt - tp->copied_seq; in tcp_inq_hint()
1950 struct tcp_sock *tp = tcp_sk(sk); in tcp_recvmsg() local
1977 has_cmsg = tp->recvmsg_inq; in tcp_recvmsg()
1984 if (unlikely(tp->repair)) { in tcp_recvmsg()
1989 if (tp->repair_queue == TCP_SEND_QUEUE) in tcp_recvmsg()
1993 if (tp->repair_queue == TCP_NO_QUEUE) in tcp_recvmsg()
1999 seq = &tp->copied_seq; in tcp_recvmsg()
2001 peek_seq = tp->copied_seq; in tcp_recvmsg()
2011 if (tp->urg_data && tp->urg_seq == *seq) { in tcp_recvmsg()
2030 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, in tcp_recvmsg()
2045 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); in tcp_recvmsg()
2102 (peek_seq - copied - urg_hole != tp->copied_seq)) { in tcp_recvmsg()
2106 peek_seq = tp->copied_seq; in tcp_recvmsg()
2117 if (tp->urg_data) { in tcp_recvmsg()
2118 u32 urg_offset = tp->urg_seq - *seq; in tcp_recvmsg()
2151 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { in tcp_recvmsg()
2152 tp->urg_data = 0; in tcp_recvmsg()
2189 if (tp->recvmsg_inq) { in tcp_recvmsg()
2460 struct tcp_sock *tp = tcp_sk(sk); in tcp_close() local
2461 if (tp->linger2 < 0) { in tcp_close()
2565 struct tcp_sock *tp = tcp_sk(sk); in tcp_disconnect() local
2575 } else if (unlikely(tp->repair)) { in tcp_disconnect()
2578 (tp->snd_nxt != tp->write_seq && in tcp_disconnect()
2594 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_disconnect()
2595 tp->urg_data = 0; in tcp_disconnect()
2598 skb_rbtree_purge(&tp->out_of_order_queue); in tcp_disconnect()
2607 tp->srtt_us = 0; in tcp_disconnect()
2608 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); in tcp_disconnect()
2609 tp->rcv_rtt_last_tsecr = 0; in tcp_disconnect()
2611 seq = tp->write_seq + tp->max_window + 2; in tcp_disconnect()
2614 WRITE_ONCE(tp->write_seq, seq); in tcp_disconnect()
2617 tp->snd_cwnd = 2; in tcp_disconnect()
2620 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; in tcp_disconnect()
2621 tp->snd_cwnd = TCP_INIT_CWND; in tcp_disconnect()
2622 tp->snd_cwnd_cnt = 0; in tcp_disconnect()
2623 tp->window_clamp = 0; in tcp_disconnect()
2624 tp->delivered_ce = 0; in tcp_disconnect()
2626 tp->is_sack_reneg = 0; in tcp_disconnect()
2627 tcp_clear_retrans(tp); in tcp_disconnect()
2633 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); in tcp_disconnect()
2637 tcp_saved_syn_free(tp); in tcp_disconnect()
2638 tp->compressed_ack = 0; in tcp_disconnect()
2639 tp->bytes_sent = 0; in tcp_disconnect()
2640 tp->bytes_acked = 0; in tcp_disconnect()
2641 tp->bytes_received = 0; in tcp_disconnect()
2642 tp->bytes_retrans = 0; in tcp_disconnect()
2643 tp->duplicate_sack[0].start_seq = 0; in tcp_disconnect()
2644 tp->duplicate_sack[0].end_seq = 0; in tcp_disconnect()
2645 tp->dsack_dups = 0; in tcp_disconnect()
2646 tp->reord_seen = 0; in tcp_disconnect()
2647 tp->retrans_out = 0; in tcp_disconnect()
2648 tp->sacked_out = 0; in tcp_disconnect()
2649 tp->tlp_high_seq = 0; in tcp_disconnect()
2650 tp->last_oow_ack_time = 0; in tcp_disconnect()
2652 tp->app_limited = ~0U; in tcp_disconnect()
2653 tp->rack.mstamp = 0; in tcp_disconnect()
2654 tp->rack.advanced = 0; in tcp_disconnect()
2655 tp->rack.reo_wnd_steps = 1; in tcp_disconnect()
2656 tp->rack.last_delivered = 0; in tcp_disconnect()
2657 tp->rack.reo_wnd_persist = 0; in tcp_disconnect()
2658 tp->rack.dsack_seen = 0; in tcp_disconnect()
2659 tp->syn_data_acked = 0; in tcp_disconnect()
2660 tp->rx_opt.saw_tstamp = 0; in tcp_disconnect()
2661 tp->rx_opt.dsack = 0; in tcp_disconnect()
2662 tp->rx_opt.num_sacks = 0; in tcp_disconnect()
2663 tp->rcv_ooopack = 0; in tcp_disconnect()
2667 tcp_free_fastopen_req(tp); in tcp_disconnect()
2689 static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len) in tcp_repair_set_window() argument
2693 if (!tp->repair) in tcp_repair_set_window()
2705 if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd)) in tcp_repair_set_window()
2708 if (after(opt.rcv_wup, tp->rcv_nxt)) in tcp_repair_set_window()
2711 tp->snd_wl1 = opt.snd_wl1; in tcp_repair_set_window()
2712 tp->snd_wnd = opt.snd_wnd; in tcp_repair_set_window()
2713 tp->max_window = opt.max_window; in tcp_repair_set_window()
2715 tp->rcv_wnd = opt.rcv_wnd; in tcp_repair_set_window()
2716 tp->rcv_wup = opt.rcv_wup; in tcp_repair_set_window()
2724 struct tcp_sock *tp = tcp_sk(sk); in tcp_repair_options_est() local
2736 tp->rx_opt.mss_clamp = opt.opt_val; in tcp_repair_options_est()
2747 tp->rx_opt.snd_wscale = snd_wscale; in tcp_repair_options_est()
2748 tp->rx_opt.rcv_wscale = rcv_wscale; in tcp_repair_options_est()
2749 tp->rx_opt.wscale_ok = 1; in tcp_repair_options_est()
2756 tp->rx_opt.sack_ok |= TCP_SACK_SEEN; in tcp_repair_options_est()
2762 tp->rx_opt.tstamp_ok = 1; in tcp_repair_options_est()
2791 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_setsockopt() local
2878 tp->rx_opt.user_mss = val; in do_tcp_setsockopt()
2891 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; in do_tcp_setsockopt()
2894 tp->nonagle &= ~TCP_NAGLE_OFF; in do_tcp_setsockopt()
2902 tp->thin_lto = val; in do_tcp_setsockopt()
2914 tp->repair = 1; in do_tcp_setsockopt()
2916 tp->repair_queue = TCP_NO_QUEUE; in do_tcp_setsockopt()
2918 tp->repair = 0; in do_tcp_setsockopt()
2922 tp->repair = 0; in do_tcp_setsockopt()
2930 if (!tp->repair) in do_tcp_setsockopt()
2933 tp->repair_queue = val; in do_tcp_setsockopt()
2941 else if (tp->repair_queue == TCP_SEND_QUEUE) in do_tcp_setsockopt()
2942 WRITE_ONCE(tp->write_seq, val); in do_tcp_setsockopt()
2943 else if (tp->repair_queue == TCP_RECV_QUEUE) in do_tcp_setsockopt()
2944 WRITE_ONCE(tp->rcv_nxt, val); in do_tcp_setsockopt()
2950 if (!tp->repair) in do_tcp_setsockopt()
2973 tp->nonagle |= TCP_NAGLE_CORK; in do_tcp_setsockopt()
2975 tp->nonagle &= ~TCP_NAGLE_CORK; in do_tcp_setsockopt()
2976 if (tp->nonagle&TCP_NAGLE_OFF) in do_tcp_setsockopt()
2977 tp->nonagle |= TCP_NAGLE_PUSH; in do_tcp_setsockopt()
2986 tp->keepalive_time = val * HZ; in do_tcp_setsockopt()
2990 u32 elapsed = keepalive_time_elapsed(tp); in do_tcp_setsockopt()
2991 if (tp->keepalive_time > elapsed) in do_tcp_setsockopt()
2992 elapsed = tp->keepalive_time - elapsed; in do_tcp_setsockopt()
3003 tp->keepalive_intvl = val * HZ; in do_tcp_setsockopt()
3009 tp->keepalive_probes = val; in do_tcp_setsockopt()
3022 tp->save_syn = val; in do_tcp_setsockopt()
3027 tp->linger2 = -1; in do_tcp_setsockopt()
3029 tp->linger2 = 0; in do_tcp_setsockopt()
3031 tp->linger2 = val * HZ; in do_tcp_setsockopt()
3047 tp->window_clamp = 0; in do_tcp_setsockopt()
3049 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? in do_tcp_setsockopt()
3073 err = tp->af_specific->md5_parse(sk, optname, optval, optlen); in do_tcp_setsockopt()
3103 tp->fastopen_connect = val; in do_tcp_setsockopt()
3116 tp->fastopen_no_cookie = val; in do_tcp_setsockopt()
3119 if (!tp->repair) in do_tcp_setsockopt()
3122 tp->tsoffset = val - tcp_time_stamp_raw(); in do_tcp_setsockopt()
3125 err = tcp_repair_set_window(tp, optval, optlen); in do_tcp_setsockopt()
3128 tp->notsent_lowat = val; in do_tcp_setsockopt()
3135 tp->recvmsg_inq = val; in do_tcp_setsockopt()
3140 tp->tcp_tx_delay = val; in do_tcp_setsockopt()
3175 static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, in tcp_get_info_chrono_stats() argument
3182 stats[i] = tp->chrono_stat[i - 1]; in tcp_get_info_chrono_stats()
3183 if (i == tp->chrono_type) in tcp_get_info_chrono_stats()
3184 stats[i] += tcp_jiffies32 - tp->chrono_start; in tcp_get_info_chrono_stats()
3197 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ in tcp_get_info() local
3219 info->tcpi_reordering = tp->reordering; in tcp_get_info()
3220 info->tcpi_snd_cwnd = tp->snd_cwnd; in tcp_get_info()
3239 if (tp->rx_opt.tstamp_ok) in tcp_get_info()
3241 if (tcp_is_sack(tp)) in tcp_get_info()
3243 if (tp->rx_opt.wscale_ok) { in tcp_get_info()
3245 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; in tcp_get_info()
3246 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; in tcp_get_info()
3249 if (tp->ecn_flags & TCP_ECN_OK) in tcp_get_info()
3251 if (tp->ecn_flags & TCP_ECN_SEEN) in tcp_get_info()
3253 if (tp->syn_data_acked) in tcp_get_info()
3258 info->tcpi_snd_mss = tp->mss_cache; in tcp_get_info()
3261 info->tcpi_unacked = tp->packets_out; in tcp_get_info()
3262 info->tcpi_sacked = tp->sacked_out; in tcp_get_info()
3264 info->tcpi_lost = tp->lost_out; in tcp_get_info()
3265 info->tcpi_retrans = tp->retrans_out; in tcp_get_info()
3268 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); in tcp_get_info()
3270 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); in tcp_get_info()
3273 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; in tcp_get_info()
3274 info->tcpi_rtt = tp->srtt_us >> 3; in tcp_get_info()
3275 info->tcpi_rttvar = tp->mdev_us >> 2; in tcp_get_info()
3276 info->tcpi_snd_ssthresh = tp->snd_ssthresh; in tcp_get_info()
3277 info->tcpi_advmss = tp->advmss; in tcp_get_info()
3279 info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; in tcp_get_info()
3280 info->tcpi_rcv_space = tp->rcvq_space.space; in tcp_get_info()
3282 info->tcpi_total_retrans = tp->total_retrans; in tcp_get_info()
3284 info->tcpi_bytes_acked = tp->bytes_acked; in tcp_get_info()
3285 info->tcpi_bytes_received = tp->bytes_received; in tcp_get_info()
3286 info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); in tcp_get_info()
3287 tcp_get_info_chrono_stats(tp, info); in tcp_get_info()
3289 info->tcpi_segs_out = tp->segs_out; in tcp_get_info()
3290 info->tcpi_segs_in = tp->segs_in; in tcp_get_info()
3292 info->tcpi_min_rtt = tcp_min_rtt(tp); in tcp_get_info()
3293 info->tcpi_data_segs_in = tp->data_segs_in; in tcp_get_info()
3294 info->tcpi_data_segs_out = tp->data_segs_out; in tcp_get_info()
3296 info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; in tcp_get_info()
3297 rate64 = tcp_compute_delivery_rate(tp); in tcp_get_info()
3300 info->tcpi_delivered = tp->delivered; in tcp_get_info()
3301 info->tcpi_delivered_ce = tp->delivered_ce; in tcp_get_info()
3302 info->tcpi_bytes_sent = tp->bytes_sent; in tcp_get_info()
3303 info->tcpi_bytes_retrans = tp->bytes_retrans; in tcp_get_info()
3304 info->tcpi_dsack_dups = tp->dsack_dups; in tcp_get_info()
3305 info->tcpi_reord_seen = tp->reord_seen; in tcp_get_info()
3306 info->tcpi_rcv_ooopack = tp->rcv_ooopack; in tcp_get_info()
3307 info->tcpi_snd_wnd = tp->snd_wnd; in tcp_get_info()
3342 const struct tcp_sock *tp = tcp_sk(sk); in tcp_get_timestamping_opt_stats() local
3352 tcp_get_info_chrono_stats(tp, &info); in tcp_get_timestamping_opt_stats()
3360 tp->data_segs_out, TCP_NLA_PAD); in tcp_get_timestamping_opt_stats()
3362 tp->total_retrans, TCP_NLA_PAD); in tcp_get_timestamping_opt_stats()
3368 rate64 = tcp_compute_delivery_rate(tp); in tcp_get_timestamping_opt_stats()
3371 nla_put_u32(stats, TCP_NLA_SND_CWND, tp->snd_cwnd); in tcp_get_timestamping_opt_stats()
3372 nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); in tcp_get_timestamping_opt_stats()
3373 nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp)); in tcp_get_timestamping_opt_stats()
3376 nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited); in tcp_get_timestamping_opt_stats()
3377 nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh); in tcp_get_timestamping_opt_stats()
3378 nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); in tcp_get_timestamping_opt_stats()
3379 nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); in tcp_get_timestamping_opt_stats()
3381 nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); in tcp_get_timestamping_opt_stats()
3384 nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, in tcp_get_timestamping_opt_stats()
3386 nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, in tcp_get_timestamping_opt_stats()
3388 nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); in tcp_get_timestamping_opt_stats()
3389 nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); in tcp_get_timestamping_opt_stats()
3390 nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); in tcp_get_timestamping_opt_stats()
3399 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_getsockopt() local
3413 val = tp->mss_cache; in do_tcp_getsockopt()
3415 val = tp->rx_opt.user_mss; in do_tcp_getsockopt()
3416 if (tp->repair) in do_tcp_getsockopt()
3417 val = tp->rx_opt.mss_clamp; in do_tcp_getsockopt()
3420 val = !!(tp->nonagle&TCP_NAGLE_OFF); in do_tcp_getsockopt()
3423 val = !!(tp->nonagle&TCP_NAGLE_CORK); in do_tcp_getsockopt()
3426 val = keepalive_time_when(tp) / HZ; in do_tcp_getsockopt()
3429 val = keepalive_intvl_when(tp) / HZ; in do_tcp_getsockopt()
3432 val = keepalive_probes(tp); in do_tcp_getsockopt()
3438 val = tp->linger2; in do_tcp_getsockopt()
3447 val = tp->window_clamp; in do_tcp_getsockopt()
3538 val = tp->thin_lto; in do_tcp_getsockopt()
3546 val = tp->repair; in do_tcp_getsockopt()
3550 if (tp->repair) in do_tcp_getsockopt()
3551 val = tp->repair_queue; in do_tcp_getsockopt()
3565 if (!tp->repair) in do_tcp_getsockopt()
3568 opt.snd_wl1 = tp->snd_wl1; in do_tcp_getsockopt()
3569 opt.snd_wnd = tp->snd_wnd; in do_tcp_getsockopt()
3570 opt.max_window = tp->max_window; in do_tcp_getsockopt()
3571 opt.rcv_wnd = tp->rcv_wnd; in do_tcp_getsockopt()
3572 opt.rcv_wup = tp->rcv_wup; in do_tcp_getsockopt()
3579 if (tp->repair_queue == TCP_SEND_QUEUE) in do_tcp_getsockopt()
3580 val = tp->write_seq; in do_tcp_getsockopt()
3581 else if (tp->repair_queue == TCP_RECV_QUEUE) in do_tcp_getsockopt()
3582 val = tp->rcv_nxt; in do_tcp_getsockopt()
3596 val = tp->fastopen_connect; in do_tcp_getsockopt()
3600 val = tp->fastopen_no_cookie; in do_tcp_getsockopt()
3604 val = tp->tcp_tx_delay; in do_tcp_getsockopt()
3608 val = tcp_time_stamp_raw() + tp->tsoffset; in do_tcp_getsockopt()
3611 val = tp->notsent_lowat; in do_tcp_getsockopt()
3614 val = tp->recvmsg_inq; in do_tcp_getsockopt()
3617 val = tp->save_syn; in do_tcp_getsockopt()
3624 if (tp->saved_syn) { in do_tcp_getsockopt()
3625 if (len < tp->saved_syn[0]) { in do_tcp_getsockopt()
3626 if (put_user(tp->saved_syn[0], optlen)) { in do_tcp_getsockopt()
3633 len = tp->saved_syn[0]; in do_tcp_getsockopt()
3638 if (copy_to_user(optval, tp->saved_syn + 1, len)) { in do_tcp_getsockopt()
3642 tcp_saved_syn_free(tp); in do_tcp_getsockopt()
3794 const struct tcphdr *tp = tcp_hdr(skb); in tcp_md5_hash_skb_data() local
3804 sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); in tcp_md5_hash_skb_data()