Lines Matching refs:tp

391 static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp)  in tcp_compute_delivery_rate()  argument
393 u32 rate = READ_ONCE(tp->rate_delivered); in tcp_compute_delivery_rate()
394 u32 intv = READ_ONCE(tp->rate_interval_us); in tcp_compute_delivery_rate()
398 rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; in tcp_compute_delivery_rate()
412 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_sock() local
414 tp->out_of_order_queue = RB_ROOT; in tcp_init_sock()
417 INIT_LIST_HEAD(&tp->tsq_node); in tcp_init_sock()
418 INIT_LIST_HEAD(&tp->tsorted_sent_queue); in tcp_init_sock()
423 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); in tcp_init_sock()
424 minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); in tcp_init_sock()
431 tp->snd_cwnd = TCP_INIT_CWND; in tcp_init_sock()
434 tp->app_limited = ~0U; in tcp_init_sock()
439 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; in tcp_init_sock()
440 tp->snd_cwnd_clamp = ~0; in tcp_init_sock()
441 tp->mss_cache = TCP_MSS_DEFAULT; in tcp_init_sock()
443 tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; in tcp_init_sock()
446 tp->tsoffset = 0; in tcp_init_sock()
447 tp->rack.reo_wnd_steps = 1; in tcp_init_sock()
478 static inline bool tcp_stream_is_readable(const struct tcp_sock *tp, in tcp_stream_is_readable() argument
481 int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq); in tcp_stream_is_readable()
488 if (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss) in tcp_stream_is_readable()
507 const struct tcp_sock *tp = tcp_sk(sk); in tcp_poll() local
557 (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { in tcp_poll()
560 if (READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && in tcp_poll()
562 tp->urg_data) in tcp_poll()
565 if (tcp_stream_is_readable(tp, target, sk)) in tcp_poll()
587 if (tp->urg_data & TCP_URG_VALID) in tcp_poll()
607 struct tcp_sock *tp = tcp_sk(sk); in tcp_ioctl() local
621 answ = tp->urg_data && in tcp_ioctl()
622 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); in tcp_ioctl()
631 answ = READ_ONCE(tp->write_seq) - tp->snd_una; in tcp_ioctl()
640 answ = READ_ONCE(tp->write_seq) - in tcp_ioctl()
641 READ_ONCE(tp->snd_nxt); in tcp_ioctl()
651 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) in tcp_mark_push() argument
654 tp->pushed_seq = tp->write_seq; in tcp_mark_push()
657 static inline bool forced_push(const struct tcp_sock *tp) in forced_push() argument
659 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); in forced_push()
664 struct tcp_sock *tp = tcp_sk(sk); in skb_entail() local
668 tcb->seq = tcb->end_seq = tp->write_seq; in skb_entail()
675 if (tp->nonagle & TCP_NAGLE_PUSH) in skb_entail()
676 tp->nonagle &= ~TCP_NAGLE_PUSH; in skb_entail()
681 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) in tcp_mark_urg() argument
684 tp->snd_up = tp->write_seq; in tcp_mark_urg()
709 struct tcp_sock *tp = tcp_sk(sk); in tcp_push() local
715 if (!(flags & MSG_MORE) || forced_push(tp)) in tcp_push()
716 tcp_mark_push(tp, skb); in tcp_push()
718 tcp_mark_urg(tp, flags); in tcp_push()
919 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_size_goal() local
927 new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); in tcp_xmit_size_goal()
930 size_goal = tp->gso_segs * mss_now; in tcp_xmit_size_goal()
933 tp->gso_segs = min_t(u16, new_size_goal / mss_now, in tcp_xmit_size_goal()
935 size_goal = tp->gso_segs * mss_now; in tcp_xmit_size_goal()
970 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_sendpages() local
1030 tcp_mark_push(tp, skb); in do_tcp_sendpages()
1052 WRITE_ONCE(tp->write_seq, tp->write_seq + copy); in do_tcp_sendpages()
1068 if (forced_push(tp)) { in do_tcp_sendpages()
1069 tcp_mark_push(tp, skb); in do_tcp_sendpages()
1091 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); in do_tcp_sendpages()
1134 void tcp_free_fastopen_req(struct tcp_sock *tp) in tcp_free_fastopen_req() argument
1136 if (tp->fastopen_req) { in tcp_free_fastopen_req()
1137 kfree(tp->fastopen_req); in tcp_free_fastopen_req()
1138 tp->fastopen_req = NULL; in tcp_free_fastopen_req()
1146 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg_fastopen() local
1155 if (tp->fastopen_req) in tcp_sendmsg_fastopen()
1158 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), in tcp_sendmsg_fastopen()
1160 if (unlikely(!tp->fastopen_req)) in tcp_sendmsg_fastopen()
1162 tp->fastopen_req->data = msg; in tcp_sendmsg_fastopen()
1163 tp->fastopen_req->size = size; in tcp_sendmsg_fastopen()
1164 tp->fastopen_req->uarg = uarg; in tcp_sendmsg_fastopen()
1181 if (tp->fastopen_req) { in tcp_sendmsg_fastopen()
1182 *copied = tp->fastopen_req->copied; in tcp_sendmsg_fastopen()
1183 tcp_free_fastopen_req(tp); in tcp_sendmsg_fastopen()
1191 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg_locked() local
1217 !tp->repair) { in tcp_sendmsg_locked()
1240 if (unlikely(tp->repair)) { in tcp_sendmsg_locked()
1241 if (tp->repair_queue == TCP_RECV_QUEUE) { in tcp_sendmsg_locked()
1247 if (tp->repair_queue == TCP_NO_QUEUE) in tcp_sendmsg_locked()
1310 if (tp->repair) in tcp_sendmsg_locked()
1336 tcp_mark_push(tp, skb); in tcp_sendmsg_locked()
1366 tcp_mark_push(tp, skb); in tcp_sendmsg_locked()
1377 WRITE_ONCE(tp->write_seq, tp->write_seq + copy); in tcp_sendmsg_locked()
1388 if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) in tcp_sendmsg_locked()
1391 if (forced_push(tp)) { in tcp_sendmsg_locked()
1392 tcp_mark_push(tp, skb); in tcp_sendmsg_locked()
1414 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); in tcp_sendmsg_locked()
1458 struct tcp_sock *tp = tcp_sk(sk); in tcp_recv_urg() local
1461 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || in tcp_recv_urg()
1462 tp->urg_data == TCP_URG_READ) in tcp_recv_urg()
1468 if (tp->urg_data & TCP_URG_VALID) { in tcp_recv_urg()
1470 char c = tp->urg_data; in tcp_recv_urg()
1473 tp->urg_data = TCP_URG_READ; in tcp_recv_urg()
1533 struct tcp_sock *tp = tcp_sk(sk); in tcp_cleanup_rbuf() local
1538 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), in tcp_cleanup_rbuf()
1540 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); in tcp_cleanup_rbuf()
1546 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || in tcp_cleanup_rbuf()
1568 __u32 rcv_window_now = tcp_receive_window(tp); in tcp_cleanup_rbuf()
1571 if (2*rcv_window_now <= tp->window_clamp) { in tcp_cleanup_rbuf()
1626 struct tcp_sock *tp = tcp_sk(sk); in tcp_read_sock() local
1627 u32 seq = tp->copied_seq; in tcp_read_sock()
1640 if (tp->urg_data) { in tcp_read_sock()
1641 u32 urg_offset = tp->urg_seq - seq; in tcp_read_sock()
1679 WRITE_ONCE(tp->copied_seq, seq); in tcp_read_sock()
1681 WRITE_ONCE(tp->copied_seq, seq); in tcp_read_sock()
1788 struct tcp_sock *tp; in tcp_zerocopy_receive() local
1800 tp = tcp_sk(sk); in tcp_zerocopy_receive()
1811 seq = tp->copied_seq; in tcp_zerocopy_receive()
1889 WRITE_ONCE(tp->copied_seq, seq); in tcp_zerocopy_receive()
1989 const struct tcp_sock *tp = tcp_sk(sk); in tcp_inq_hint() local
1990 u32 copied_seq = READ_ONCE(tp->copied_seq); in tcp_inq_hint()
1991 u32 rcv_nxt = READ_ONCE(tp->rcv_nxt); in tcp_inq_hint()
1995 if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) { in tcp_inq_hint()
1997 inq = tp->rcv_nxt - tp->copied_seq; in tcp_inq_hint()
2019 struct tcp_sock *tp = tcp_sk(sk); in tcp_recvmsg() local
2045 cmsg_flags = tp->recvmsg_inq ? 1 : 0; in tcp_recvmsg()
2052 if (unlikely(tp->repair)) { in tcp_recvmsg()
2057 if (tp->repair_queue == TCP_SEND_QUEUE) in tcp_recvmsg()
2061 if (tp->repair_queue == TCP_NO_QUEUE) in tcp_recvmsg()
2067 seq = &tp->copied_seq; in tcp_recvmsg()
2069 peek_seq = tp->copied_seq; in tcp_recvmsg()
2079 if (tp->urg_data && tp->urg_seq == *seq) { in tcp_recvmsg()
2098 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, in tcp_recvmsg()
2113 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); in tcp_recvmsg()
2170 (peek_seq - copied - urg_hole != tp->copied_seq)) { in tcp_recvmsg()
2174 peek_seq = tp->copied_seq; in tcp_recvmsg()
2185 if (tp->urg_data) { in tcp_recvmsg()
2186 u32 urg_offset = tp->urg_seq - *seq; in tcp_recvmsg()
2219 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { in tcp_recvmsg()
2220 tp->urg_data = 0; in tcp_recvmsg()
2529 struct tcp_sock *tp = tcp_sk(sk); in tcp_close() local
2530 if (tp->linger2 < 0) { in tcp_close()
2635 struct tcp_sock *tp = tcp_sk(sk); in tcp_disconnect() local
2645 } else if (unlikely(tp->repair)) { in tcp_disconnect()
2648 (tp->snd_nxt != tp->write_seq && in tcp_disconnect()
2664 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_disconnect()
2665 tp->urg_data = 0; in tcp_disconnect()
2668 skb_rbtree_purge(&tp->out_of_order_queue); in tcp_disconnect()
2677 tp->srtt_us = 0; in tcp_disconnect()
2678 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); in tcp_disconnect()
2679 tp->rcv_rtt_last_tsecr = 0; in tcp_disconnect()
2681 seq = tp->write_seq + tp->max_window + 2; in tcp_disconnect()
2684 WRITE_ONCE(tp->write_seq, seq); in tcp_disconnect()
2691 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; in tcp_disconnect()
2692 tp->snd_cwnd = TCP_INIT_CWND; in tcp_disconnect()
2693 tp->snd_cwnd_cnt = 0; in tcp_disconnect()
2694 tp->window_clamp = 0; in tcp_disconnect()
2695 tp->delivered = 0; in tcp_disconnect()
2696 tp->delivered_ce = 0; in tcp_disconnect()
2702 tp->is_sack_reneg = 0; in tcp_disconnect()
2703 tcp_clear_retrans(tp); in tcp_disconnect()
2704 tp->total_retrans = 0; in tcp_disconnect()
2710 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); in tcp_disconnect()
2714 tcp_saved_syn_free(tp); in tcp_disconnect()
2715 tp->compressed_ack = 0; in tcp_disconnect()
2716 tp->segs_in = 0; in tcp_disconnect()
2717 tp->segs_out = 0; in tcp_disconnect()
2718 tp->bytes_sent = 0; in tcp_disconnect()
2719 tp->bytes_acked = 0; in tcp_disconnect()
2720 tp->bytes_received = 0; in tcp_disconnect()
2721 tp->bytes_retrans = 0; in tcp_disconnect()
2722 tp->data_segs_in = 0; in tcp_disconnect()
2723 tp->data_segs_out = 0; in tcp_disconnect()
2724 tp->duplicate_sack[0].start_seq = 0; in tcp_disconnect()
2725 tp->duplicate_sack[0].end_seq = 0; in tcp_disconnect()
2726 tp->dsack_dups = 0; in tcp_disconnect()
2727 tp->reord_seen = 0; in tcp_disconnect()
2728 tp->retrans_out = 0; in tcp_disconnect()
2729 tp->sacked_out = 0; in tcp_disconnect()
2730 tp->tlp_high_seq = 0; in tcp_disconnect()
2731 tp->last_oow_ack_time = 0; in tcp_disconnect()
2733 tp->app_limited = ~0U; in tcp_disconnect()
2734 tp->rack.mstamp = 0; in tcp_disconnect()
2735 tp->rack.advanced = 0; in tcp_disconnect()
2736 tp->rack.reo_wnd_steps = 1; in tcp_disconnect()
2737 tp->rack.last_delivered = 0; in tcp_disconnect()
2738 tp->rack.reo_wnd_persist = 0; in tcp_disconnect()
2739 tp->rack.dsack_seen = 0; in tcp_disconnect()
2740 tp->syn_data_acked = 0; in tcp_disconnect()
2741 tp->rx_opt.saw_tstamp = 0; in tcp_disconnect()
2742 tp->rx_opt.dsack = 0; in tcp_disconnect()
2743 tp->rx_opt.num_sacks = 0; in tcp_disconnect()
2744 tp->rcv_ooopack = 0; in tcp_disconnect()
2748 tcp_free_fastopen_req(tp); in tcp_disconnect()
2750 tp->fastopen_client_fail = 0; in tcp_disconnect()
2771 static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len) in tcp_repair_set_window() argument
2775 if (!tp->repair) in tcp_repair_set_window()
2787 if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd)) in tcp_repair_set_window()
2790 if (after(opt.rcv_wup, tp->rcv_nxt)) in tcp_repair_set_window()
2793 tp->snd_wl1 = opt.snd_wl1; in tcp_repair_set_window()
2794 tp->snd_wnd = opt.snd_wnd; in tcp_repair_set_window()
2795 tp->max_window = opt.max_window; in tcp_repair_set_window()
2797 tp->rcv_wnd = opt.rcv_wnd; in tcp_repair_set_window()
2798 tp->rcv_wup = opt.rcv_wup; in tcp_repair_set_window()
2806 struct tcp_sock *tp = tcp_sk(sk); in tcp_repair_options_est() local
2819 tp->rx_opt.mss_clamp = opt.opt_val; in tcp_repair_options_est()
2830 tp->rx_opt.snd_wscale = snd_wscale; in tcp_repair_options_est()
2831 tp->rx_opt.rcv_wscale = rcv_wscale; in tcp_repair_options_est()
2832 tp->rx_opt.wscale_ok = 1; in tcp_repair_options_est()
2839 tp->rx_opt.sack_ok |= TCP_SACK_SEEN; in tcp_repair_options_est()
2845 tp->rx_opt.tstamp_ok = 1; in tcp_repair_options_est()
2879 struct tcp_sock *tp = tcp_sk(sk); in __tcp_sock_set_cork() local
2882 tp->nonagle |= TCP_NAGLE_CORK; in __tcp_sock_set_cork()
2884 tp->nonagle &= ~TCP_NAGLE_CORK; in __tcp_sock_set_cork()
2885 if (tp->nonagle & TCP_NAGLE_OFF) in __tcp_sock_set_cork()
2886 tp->nonagle |= TCP_NAGLE_PUSH; in __tcp_sock_set_cork()
2970 struct tcp_sock *tp = tcp_sk(sk); in tcp_sock_set_keepidle_locked() local
2975 tp->keepalive_time = val * HZ; in tcp_sock_set_keepidle_locked()
2978 u32 elapsed = keepalive_time_elapsed(tp); in tcp_sock_set_keepidle_locked()
2980 if (tp->keepalive_time > elapsed) in tcp_sock_set_keepidle_locked()
2981 elapsed = tp->keepalive_time - elapsed; in tcp_sock_set_keepidle_locked()
3031 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_setsockopt() local
3118 tp->rx_opt.user_mss = val; in do_tcp_setsockopt()
3129 tp->thin_lto = val; in do_tcp_setsockopt()
3141 tp->repair = 1; in do_tcp_setsockopt()
3143 tp->repair_queue = TCP_NO_QUEUE; in do_tcp_setsockopt()
3145 tp->repair = 0; in do_tcp_setsockopt()
3149 tp->repair = 0; in do_tcp_setsockopt()
3157 if (!tp->repair) in do_tcp_setsockopt()
3160 tp->repair_queue = val; in do_tcp_setsockopt()
3168 else if (tp->repair_queue == TCP_SEND_QUEUE) in do_tcp_setsockopt()
3169 WRITE_ONCE(tp->write_seq, val); in do_tcp_setsockopt()
3170 else if (tp->repair_queue == TCP_RECV_QUEUE) { in do_tcp_setsockopt()
3171 WRITE_ONCE(tp->rcv_nxt, val); in do_tcp_setsockopt()
3172 WRITE_ONCE(tp->copied_seq, val); in do_tcp_setsockopt()
3179 if (!tp->repair) in do_tcp_setsockopt()
3198 tp->keepalive_intvl = val * HZ; in do_tcp_setsockopt()
3204 tp->keepalive_probes = val; in do_tcp_setsockopt()
3218 tp->save_syn = val; in do_tcp_setsockopt()
3223 tp->linger2 = -1; in do_tcp_setsockopt()
3225 tp->linger2 = TCP_FIN_TIMEOUT_MAX; in do_tcp_setsockopt()
3227 tp->linger2 = val * HZ; in do_tcp_setsockopt()
3243 tp->window_clamp = 0; in do_tcp_setsockopt()
3245 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? in do_tcp_setsockopt()
3256 err = tp->af_specific->md5_parse(sk, optname, optval, optlen); in do_tcp_setsockopt()
3284 tp->fastopen_connect = val; in do_tcp_setsockopt()
3297 tp->fastopen_no_cookie = val; in do_tcp_setsockopt()
3300 if (!tp->repair) in do_tcp_setsockopt()
3303 tp->tsoffset = val - tcp_time_stamp_raw(); in do_tcp_setsockopt()
3306 err = tcp_repair_set_window(tp, optval, optlen); in do_tcp_setsockopt()
3309 tp->notsent_lowat = val; in do_tcp_setsockopt()
3316 tp->recvmsg_inq = val; in do_tcp_setsockopt()
3321 tp->tcp_tx_delay = val; in do_tcp_setsockopt()
3344 static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, in tcp_get_info_chrono_stats() argument
3351 stats[i] = tp->chrono_stat[i - 1]; in tcp_get_info_chrono_stats()
3352 if (i == tp->chrono_type) in tcp_get_info_chrono_stats()
3353 stats[i] += tcp_jiffies32 - tp->chrono_start; in tcp_get_info_chrono_stats()
3366 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ in tcp_get_info() local
3388 info->tcpi_reordering = tp->reordering; in tcp_get_info()
3389 info->tcpi_snd_cwnd = tp->snd_cwnd; in tcp_get_info()
3408 if (tp->rx_opt.tstamp_ok) in tcp_get_info()
3410 if (tcp_is_sack(tp)) in tcp_get_info()
3412 if (tp->rx_opt.wscale_ok) { in tcp_get_info()
3414 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; in tcp_get_info()
3415 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; in tcp_get_info()
3418 if (tp->ecn_flags & TCP_ECN_OK) in tcp_get_info()
3420 if (tp->ecn_flags & TCP_ECN_SEEN) in tcp_get_info()
3422 if (tp->syn_data_acked) in tcp_get_info()
3427 info->tcpi_snd_mss = tp->mss_cache; in tcp_get_info()
3430 info->tcpi_unacked = tp->packets_out; in tcp_get_info()
3431 info->tcpi_sacked = tp->sacked_out; in tcp_get_info()
3433 info->tcpi_lost = tp->lost_out; in tcp_get_info()
3434 info->tcpi_retrans = tp->retrans_out; in tcp_get_info()
3437 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); in tcp_get_info()
3439 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); in tcp_get_info()
3442 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; in tcp_get_info()
3443 info->tcpi_rtt = tp->srtt_us >> 3; in tcp_get_info()
3444 info->tcpi_rttvar = tp->mdev_us >> 2; in tcp_get_info()
3445 info->tcpi_snd_ssthresh = tp->snd_ssthresh; in tcp_get_info()
3446 info->tcpi_advmss = tp->advmss; in tcp_get_info()
3448 info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; in tcp_get_info()
3449 info->tcpi_rcv_space = tp->rcvq_space.space; in tcp_get_info()
3451 info->tcpi_total_retrans = tp->total_retrans; in tcp_get_info()
3453 info->tcpi_bytes_acked = tp->bytes_acked; in tcp_get_info()
3454 info->tcpi_bytes_received = tp->bytes_received; in tcp_get_info()
3455 info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); in tcp_get_info()
3456 tcp_get_info_chrono_stats(tp, info); in tcp_get_info()
3458 info->tcpi_segs_out = tp->segs_out; in tcp_get_info()
3459 info->tcpi_segs_in = tp->segs_in; in tcp_get_info()
3461 info->tcpi_min_rtt = tcp_min_rtt(tp); in tcp_get_info()
3462 info->tcpi_data_segs_in = tp->data_segs_in; in tcp_get_info()
3463 info->tcpi_data_segs_out = tp->data_segs_out; in tcp_get_info()
3465 info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; in tcp_get_info()
3466 rate64 = tcp_compute_delivery_rate(tp); in tcp_get_info()
3469 info->tcpi_delivered = tp->delivered; in tcp_get_info()
3470 info->tcpi_delivered_ce = tp->delivered_ce; in tcp_get_info()
3471 info->tcpi_bytes_sent = tp->bytes_sent; in tcp_get_info()
3472 info->tcpi_bytes_retrans = tp->bytes_retrans; in tcp_get_info()
3473 info->tcpi_dsack_dups = tp->dsack_dups; in tcp_get_info()
3474 info->tcpi_reord_seen = tp->reord_seen; in tcp_get_info()
3475 info->tcpi_rcv_ooopack = tp->rcv_ooopack; in tcp_get_info()
3476 info->tcpi_snd_wnd = tp->snd_wnd; in tcp_get_info()
3477 info->tcpi_fastopen_client_fail = tp->fastopen_client_fail; in tcp_get_info()
3516 const struct tcp_sock *tp = tcp_sk(sk); in tcp_get_timestamping_opt_stats() local
3526 tcp_get_info_chrono_stats(tp, &info); in tcp_get_timestamping_opt_stats()
3534 tp->data_segs_out, TCP_NLA_PAD); in tcp_get_timestamping_opt_stats()
3536 tp->total_retrans, TCP_NLA_PAD); in tcp_get_timestamping_opt_stats()
3542 rate64 = tcp_compute_delivery_rate(tp); in tcp_get_timestamping_opt_stats()
3545 nla_put_u32(stats, TCP_NLA_SND_CWND, tp->snd_cwnd); in tcp_get_timestamping_opt_stats()
3546 nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); in tcp_get_timestamping_opt_stats()
3547 nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp)); in tcp_get_timestamping_opt_stats()
3550 nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited); in tcp_get_timestamping_opt_stats()
3551 nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh); in tcp_get_timestamping_opt_stats()
3552 nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); in tcp_get_timestamping_opt_stats()
3553 nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); in tcp_get_timestamping_opt_stats()
3555 nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); in tcp_get_timestamping_opt_stats()
3558 nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, in tcp_get_timestamping_opt_stats()
3560 nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, in tcp_get_timestamping_opt_stats()
3562 nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); in tcp_get_timestamping_opt_stats()
3563 nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); in tcp_get_timestamping_opt_stats()
3564 nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); in tcp_get_timestamping_opt_stats()
3565 nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); in tcp_get_timestamping_opt_stats()
3567 max_t(int, 0, tp->write_seq - tp->snd_nxt)); in tcp_get_timestamping_opt_stats()
3578 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_getsockopt() local
3592 val = tp->mss_cache; in do_tcp_getsockopt()
3594 val = tp->rx_opt.user_mss; in do_tcp_getsockopt()
3595 if (tp->repair) in do_tcp_getsockopt()
3596 val = tp->rx_opt.mss_clamp; in do_tcp_getsockopt()
3599 val = !!(tp->nonagle&TCP_NAGLE_OFF); in do_tcp_getsockopt()
3602 val = !!(tp->nonagle&TCP_NAGLE_CORK); in do_tcp_getsockopt()
3605 val = keepalive_time_when(tp) / HZ; in do_tcp_getsockopt()
3608 val = keepalive_intvl_when(tp) / HZ; in do_tcp_getsockopt()
3611 val = keepalive_probes(tp); in do_tcp_getsockopt()
3617 val = tp->linger2; in do_tcp_getsockopt()
3626 val = tp->window_clamp; in do_tcp_getsockopt()
3709 val = tp->thin_lto; in do_tcp_getsockopt()
3717 val = tp->repair; in do_tcp_getsockopt()
3721 if (tp->repair) in do_tcp_getsockopt()
3722 val = tp->repair_queue; in do_tcp_getsockopt()
3736 if (!tp->repair) in do_tcp_getsockopt()
3739 opt.snd_wl1 = tp->snd_wl1; in do_tcp_getsockopt()
3740 opt.snd_wnd = tp->snd_wnd; in do_tcp_getsockopt()
3741 opt.max_window = tp->max_window; in do_tcp_getsockopt()
3742 opt.rcv_wnd = tp->rcv_wnd; in do_tcp_getsockopt()
3743 opt.rcv_wup = tp->rcv_wup; in do_tcp_getsockopt()
3750 if (tp->repair_queue == TCP_SEND_QUEUE) in do_tcp_getsockopt()
3751 val = tp->write_seq; in do_tcp_getsockopt()
3752 else if (tp->repair_queue == TCP_RECV_QUEUE) in do_tcp_getsockopt()
3753 val = tp->rcv_nxt; in do_tcp_getsockopt()
3767 val = tp->fastopen_connect; in do_tcp_getsockopt()
3771 val = tp->fastopen_no_cookie; in do_tcp_getsockopt()
3775 val = tp->tcp_tx_delay; in do_tcp_getsockopt()
3779 val = tcp_time_stamp_raw() + tp->tsoffset; in do_tcp_getsockopt()
3782 val = tp->notsent_lowat; in do_tcp_getsockopt()
3785 val = tp->recvmsg_inq; in do_tcp_getsockopt()
3788 val = tp->save_syn; in do_tcp_getsockopt()
3795 if (tp->saved_syn) { in do_tcp_getsockopt()
3796 if (len < tcp_saved_syn_len(tp->saved_syn)) { in do_tcp_getsockopt()
3797 if (put_user(tcp_saved_syn_len(tp->saved_syn), in do_tcp_getsockopt()
3805 len = tcp_saved_syn_len(tp->saved_syn); in do_tcp_getsockopt()
3810 if (copy_to_user(optval, tp->saved_syn->data, len)) { in do_tcp_getsockopt()
3814 tcp_saved_syn_free(tp); in do_tcp_getsockopt()
3976 const struct tcphdr *tp = tcp_hdr(skb); in tcp_md5_hash_skb_data() local
3986 sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); in tcp_md5_hash_skb_data()