Lines Matching refs:sk
51 void tcp_time_wait(struct sock *sk, int state, int timeo);
255 static inline bool tcp_under_memory_pressure(const struct sock *sk) in tcp_under_memory_pressure() argument
257 if (mem_cgroup_sockets_enabled && sk->sk_memcg && in tcp_under_memory_pressure()
258 mem_cgroup_under_socket_pressure(sk->sk_memcg)) in tcp_under_memory_pressure()
280 static inline bool tcp_out_of_memory(struct sock *sk) in tcp_out_of_memory() argument
282 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && in tcp_out_of_memory()
283 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) in tcp_out_of_memory()
288 void sk_forced_mem_schedule(struct sock *sk, int size);
290 static inline bool tcp_too_many_orphans(struct sock *sk, int shift) in tcp_too_many_orphans() argument
292 struct percpu_counter *ocp = sk->sk_prot->orphan_count; in tcp_too_many_orphans()
303 bool tcp_check_oom(struct sock *sk, int shift);
317 void tcp_shutdown(struct sock *sk, int how);
323 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
324 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
325 int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
327 int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
329 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
331 void tcp_release_cb(struct sock *sk);
333 void tcp_write_timer_handler(struct sock *sk);
334 void tcp_delack_timer_handler(struct sock *sk);
335 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
336 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
337 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
338 void tcp_rcv_space_adjust(struct sock *sk);
339 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
340 void tcp_twsk_destructor(struct sock *sk);
341 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
345 void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
346 static inline void tcp_dec_quickack_mode(struct sock *sk, in tcp_dec_quickack_mode() argument
349 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_dec_quickack_mode()
377 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
382 void tcp_enter_loss(struct sock *sk);
383 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
385 void tcp_update_metrics(struct sock *sk);
386 void tcp_init_metrics(struct sock *sk);
389 void tcp_close(struct sock *sk, long timeout);
390 void tcp_init_sock(struct sock *sk);
391 void tcp_init_transfer(struct sock *sk, int bpf_op);
394 int tcp_getsockopt(struct sock *sk, int level, int optname,
396 int tcp_setsockopt(struct sock *sk, int level, int optname,
398 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
400 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
402 void tcp_set_keepalive(struct sock *sk, int val);
404 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
406 int tcp_set_rcvlowat(struct sock *sk, int val);
407 void tcp_data_ready(struct sock *sk);
420 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
422 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
426 struct sock *sk, struct tcphdr *th);
431 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
432 void tcp_v4_mtu_reduced(struct sock *sk);
433 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
434 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
435 struct sock *tcp_create_openreq_child(const struct sock *sk,
438 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
439 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
444 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
445 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
446 int tcp_connect(struct sock *sk);
452 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
456 int tcp_disconnect(struct sock *sk, int flags);
458 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
459 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
460 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
463 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
468 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
486 static inline void tcp_synq_overflow(const struct sock *sk) in tcp_synq_overflow() argument
491 if (sk->sk_reuseport) { in tcp_synq_overflow()
494 reuse = rcu_dereference(sk->sk_reuseport_cb); in tcp_synq_overflow()
503 last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; in tcp_synq_overflow()
505 tcp_sk(sk)->rx_opt.ts_recent_stamp = now; in tcp_synq_overflow()
509 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) in tcp_synq_no_recent_overflow() argument
514 if (sk->sk_reuseport) { in tcp_synq_no_recent_overflow()
517 reuse = rcu_dereference(sk->sk_reuseport_cb); in tcp_synq_no_recent_overflow()
525 last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; in tcp_synq_no_recent_overflow()
549 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
557 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
559 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
560 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
561 void tcp_retransmit_timer(struct sock *sk);
564 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
570 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
577 void tcp_send_fin(struct sock *sk);
578 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
581 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
582 void tcp_send_ack(struct sock *sk);
583 void tcp_send_delayed_ack(struct sock *sk);
584 void tcp_send_loss_probe(struct sock *sk);
585 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
590 void tcp_rearm_rto(struct sock *sk);
591 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
592 void tcp_reset(struct sock *sk);
594 void tcp_fin(struct sock *sk);
598 static inline void tcp_clear_xmit_timers(struct sock *sk) in tcp_clear_xmit_timers() argument
600 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1) in tcp_clear_xmit_timers()
601 __sock_put(sk); in tcp_clear_xmit_timers()
603 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1) in tcp_clear_xmit_timers()
604 __sock_put(sk); in tcp_clear_xmit_timers()
606 inet_csk_clear_xmit_timers(sk); in tcp_clear_xmit_timers()
609 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
610 unsigned int tcp_current_mss(struct sock *sk);
639 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
642 void tcp_initialize_rcv_mss(struct sock *sk);
644 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
645 int tcp_mss_to_mtu(struct sock *sk, int mss);
646 void tcp_mtup_init(struct sock *sk);
647 void tcp_init_buffer_space(struct sock *sk);
649 static inline void tcp_bound_rto(const struct sock *sk) in tcp_bound_rto() argument
651 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) in tcp_bound_rto()
652 inet_csk(sk)->icsk_rto = TCP_RTO_MAX; in tcp_bound_rto()
672 static inline void tcp_fast_path_check(struct sock *sk) in tcp_fast_path_check() argument
674 struct tcp_sock *tp = tcp_sk(sk); in tcp_fast_path_check()
678 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && in tcp_fast_path_check()
684 static inline u32 tcp_rto_min(struct sock *sk) in tcp_rto_min() argument
686 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_rto_min()
694 static inline u32 tcp_rto_min_us(struct sock *sk) in tcp_rto_min_us() argument
696 return jiffies_to_usecs(tcp_rto_min(sk)); in tcp_rto_min_us()
727 u32 __tcp_select_window(struct sock *sk);
729 void tcp_send_window_probe(struct sock *sk);
1032 void (*init)(struct sock *sk);
1034 void (*release)(struct sock *sk);
1037 u32 (*ssthresh)(struct sock *sk);
1039 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1041 void (*set_state)(struct sock *sk, u8 new_state);
1043 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1045 void (*in_ack_event)(struct sock *sk, u32 flags);
1047 u32 (*undo_cwnd)(struct sock *sk);
1049 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1051 u32 (*min_tso_segs)(struct sock *sk);
1053 u32 (*sndbuf_expand)(struct sock *sk);
1057 void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1059 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1069 void tcp_assign_congestion_control(struct sock *sk);
1070 void tcp_init_congestion_control(struct sock *sk);
1071 void tcp_cleanup_congestion_control(struct sock *sk);
1077 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1082 u32 tcp_reno_ssthresh(struct sock *sk);
1083 u32 tcp_reno_undo_cwnd(struct sock *sk);
1084 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1098 static inline bool tcp_ca_needs_ecn(const struct sock *sk) in tcp_ca_needs_ecn() argument
1100 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_needs_ecn()
1105 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) in tcp_set_ca_state() argument
1107 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_ca_state()
1110 icsk->icsk_ca_ops->set_state(sk, ca_state); in tcp_set_ca_state()
1114 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) in tcp_ca_event() argument
1116 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_event()
1119 icsk->icsk_ca_ops->cwnd_event(sk, event); in tcp_ca_event()
1123 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1124 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1126 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1128 void tcp_rate_check_app_limited(struct sock *sk);
1183 static inline bool tcp_in_cwnd_reduction(const struct sock *sk) in tcp_in_cwnd_reduction() argument
1186 (1 << inet_csk(sk)->icsk_ca_state); in tcp_in_cwnd_reduction()
1193 static inline __u32 tcp_current_ssthresh(const struct sock *sk) in tcp_current_ssthresh() argument
1195 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_ssthresh()
1197 if (tcp_in_cwnd_reduction(sk)) in tcp_current_ssthresh()
1208 void tcp_enter_cwr(struct sock *sk);
1238 static inline bool tcp_is_cwnd_limited(const struct sock *sk) in tcp_is_cwnd_limited() argument
1240 const struct tcp_sock *tp = tcp_sk(sk); in tcp_is_cwnd_limited()
1255 static inline bool tcp_needs_internal_pacing(const struct sock *sk) in tcp_needs_internal_pacing() argument
1257 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; in tcp_needs_internal_pacing()
1263 static inline unsigned long tcp_pacing_delay(const struct sock *sk, in tcp_pacing_delay() argument
1266 s64 pacing_delay = skb ? skb->tstamp : tcp_sk(sk)->tcp_wstamp_ns; in tcp_pacing_delay()
1268 pacing_delay -= tcp_sk(sk)->tcp_clock_cache; in tcp_pacing_delay()
1273 static inline void tcp_reset_xmit_timer(struct sock *sk, in tcp_reset_xmit_timer() argument
1279 inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk, skb), in tcp_reset_xmit_timer()
1289 static inline unsigned long tcp_probe0_base(const struct sock *sk) in tcp_probe0_base() argument
1291 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); in tcp_probe0_base()
1295 static inline unsigned long tcp_probe0_when(const struct sock *sk, in tcp_probe0_when() argument
1298 u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff; in tcp_probe0_when()
1303 static inline void tcp_check_probe_timer(struct sock *sk) in tcp_check_probe_timer() argument
1305 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) in tcp_check_probe_timer()
1306 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, in tcp_check_probe_timer()
1307 tcp_probe0_base(sk), TCP_RTO_MAX, in tcp_check_probe_timer()
1336 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1337 int tcp_filter(struct sock *sk, struct sk_buff *skb);
1338 void tcp_set_state(struct sock *sk, int state);
1339 void tcp_done(struct sock *sk);
1340 int tcp_abort(struct sock *sk, int err);
1349 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1351 static inline void tcp_slow_start_after_idle_check(struct sock *sk) in tcp_slow_start_after_idle_check() argument
1353 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_slow_start_after_idle_check()
1354 struct tcp_sock *tp = tcp_sk(sk); in tcp_slow_start_after_idle_check()
1357 if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out || in tcp_slow_start_after_idle_check()
1361 if (delta > inet_csk(sk)->icsk_rto) in tcp_slow_start_after_idle_check()
1362 tcp_cwnd_restart(sk, delta); in tcp_slow_start_after_idle_check()
1366 void tcp_select_initial_window(const struct sock *sk, int __space,
1371 static inline int tcp_win_from_space(const struct sock *sk, int space) in tcp_win_from_space() argument
1373 int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale; in tcp_win_from_space()
1381 static inline int tcp_space(const struct sock *sk) in tcp_space() argument
1383 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - in tcp_space()
1384 READ_ONCE(sk->sk_backlog.len) - in tcp_space()
1385 atomic_read(&sk->sk_rmem_alloc)); in tcp_space()
1388 static inline int tcp_full_space(const struct sock *sk) in tcp_full_space() argument
1390 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); in tcp_full_space()
1397 void tcp_enter_memory_pressure(struct sock *sk);
1398 void tcp_leave_memory_pressure(struct sock *sk);
1429 static inline int tcp_fin_time(const struct sock *sk) in tcp_fin_time() argument
1431 int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout; in tcp_fin_time()
1432 const int rto = inet_csk(sk)->icsk_rto; in tcp_fin_time()
1561 const struct sock *sk, const struct sk_buff *skb);
1562 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1565 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1567 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1573 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
1577 tcp_md5_do_lookup(const struct sock *sk, in tcp_md5_do_lookup() argument
1583 return __tcp_md5_do_lookup(sk, addr, family); in tcp_md5_do_lookup()
1588 static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, in tcp_md5_do_lookup() argument
1611 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1613 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1625 void tcp_fastopen_destroy_cipher(struct sock *sk);
1627 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1629 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1630 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1635 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1637 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1651 void tcp_fastopen_active_disable(struct sock *sk);
1652 bool tcp_fastopen_active_should_disable(struct sock *sk);
1653 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1654 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1658 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk) in tcp_fastopen_get_ctx() argument
1662 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx); in tcp_fastopen_get_ctx()
1664 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx); in tcp_fastopen_get_ctx()
1696 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1697 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1716 void tcp_write_queue_purge(struct sock *sk);
1718 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) in tcp_rtx_queue_head() argument
1720 return skb_rb_first(&sk->tcp_rtx_queue); in tcp_rtx_queue_head()
1723 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) in tcp_rtx_queue_tail() argument
1725 return skb_rb_last(&sk->tcp_rtx_queue); in tcp_rtx_queue_tail()
1728 static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) in tcp_write_queue_head() argument
1730 return skb_peek(&sk->sk_write_queue); in tcp_write_queue_head()
1733 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) in tcp_write_queue_tail() argument
1735 return skb_peek_tail(&sk->sk_write_queue); in tcp_write_queue_tail()
1738 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ argument
1739 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1741 static inline struct sk_buff *tcp_send_head(const struct sock *sk) in tcp_send_head() argument
1743 return skb_peek(&sk->sk_write_queue); in tcp_send_head()
1746 static inline bool tcp_skb_is_last(const struct sock *sk, in tcp_skb_is_last() argument
1749 return skb_queue_is_last(&sk->sk_write_queue, skb); in tcp_skb_is_last()
1752 static inline bool tcp_write_queue_empty(const struct sock *sk) in tcp_write_queue_empty() argument
1754 return skb_queue_empty(&sk->sk_write_queue); in tcp_write_queue_empty()
1757 static inline bool tcp_rtx_queue_empty(const struct sock *sk) in tcp_rtx_queue_empty() argument
1759 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue); in tcp_rtx_queue_empty()
1762 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk) in tcp_rtx_and_write_queues_empty() argument
1764 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk); in tcp_rtx_and_write_queues_empty()
1767 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) in tcp_add_write_queue_tail() argument
1769 __skb_queue_tail(&sk->sk_write_queue, skb); in tcp_add_write_queue_tail()
1772 if (sk->sk_write_queue.next == skb) in tcp_add_write_queue_tail()
1773 tcp_chrono_start(sk, TCP_CHRONO_BUSY); in tcp_add_write_queue_tail()
1779 struct sock *sk) in tcp_insert_write_queue_before() argument
1781 __skb_queue_before(&sk->sk_write_queue, skb, new); in tcp_insert_write_queue_before()
1784 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) in tcp_unlink_write_queue() argument
1787 __skb_unlink(skb, &sk->sk_write_queue); in tcp_unlink_write_queue()
1792 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk) in tcp_rtx_queue_unlink() argument
1795 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue); in tcp_rtx_queue_unlink()
1798 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk) in tcp_rtx_queue_unlink_and_free() argument
1801 tcp_rtx_queue_unlink(skb, sk); in tcp_rtx_queue_unlink_and_free()
1802 sk_wmem_free_skb(sk, skb); in tcp_rtx_queue_unlink_and_free()
1805 static inline void tcp_push_pending_frames(struct sock *sk) in tcp_push_pending_frames() argument
1807 if (tcp_send_head(sk)) { in tcp_push_pending_frames()
1808 struct tcp_sock *tp = tcp_sk(sk); in tcp_push_pending_frames()
1810 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); in tcp_push_pending_frames()
1829 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) in tcp_advance_highest_sack() argument
1831 tcp_sk(sk)->highest_sack = skb_rb_next(skb); in tcp_advance_highest_sack()
1834 static inline struct sk_buff *tcp_highest_sack(struct sock *sk) in tcp_highest_sack() argument
1836 return tcp_sk(sk)->highest_sack; in tcp_highest_sack()
1839 static inline void tcp_highest_sack_reset(struct sock *sk) in tcp_highest_sack_reset() argument
1841 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk); in tcp_highest_sack_reset()
1845 static inline void tcp_highest_sack_replace(struct sock *sk, in tcp_highest_sack_replace() argument
1849 if (old == tcp_highest_sack(sk)) in tcp_highest_sack_replace()
1850 tcp_sk(sk)->highest_sack = new; in tcp_highest_sack_replace()
1854 static inline bool inet_sk_transparent(const struct sock *sk) in inet_sk_transparent() argument
1856 switch (sk->sk_state) { in inet_sk_transparent()
1858 return inet_twsk(sk)->tw_transparent; in inet_sk_transparent()
1860 return inet_rsk(inet_reqsk(sk))->no_srccheck; in inet_sk_transparent()
1862 return inet_sk(sk)->transparent; in inet_sk_transparent()
1898 void tcp_v4_destroy_sock(struct sock *sk);
1917 static inline bool tcp_stream_memory_free(const struct sock *sk, int wake) in tcp_stream_memory_free() argument
1919 const struct tcp_sock *tp = tcp_sk(sk); in tcp_stream_memory_free()
1931 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
1934 struct sock *sk, struct sk_buff *skb);
1939 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
1943 const struct sock *sk,
1945 int (*md5_parse)(struct sock *sk,
1955 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
1959 const struct sock *sk,
1969 struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
1973 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1981 const struct sock *sk, struct sk_buff *skb, in cookie_init_sequence() argument
1984 tcp_synq_overflow(sk); in cookie_init_sequence()
1985 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); in cookie_init_sequence()
1990 const struct sock *sk, struct sk_buff *skb, in cookie_init_sequence() argument
2003 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2004 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2007 extern void tcp_rack_mark_lost(struct sock *sk);
2010 extern void tcp_rack_reo_timeout(struct sock *sk);
2011 extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2014 static inline s64 tcp_rto_delta_us(const struct sock *sk) in tcp_rto_delta_us() argument
2016 const struct sk_buff *skb = tcp_rtx_queue_head(sk); in tcp_rto_delta_us()
2017 u32 rto = inet_csk(sk)->icsk_rto; in tcp_rto_delta_us()
2020 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; in tcp_rto_delta_us()
2059 static inline int tcp_inq(struct sock *sk) in tcp_inq() argument
2061 struct tcp_sock *tp = tcp_sk(sk); in tcp_inq()
2064 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { in tcp_inq()
2066 } else if (sock_flag(sk, SOCK_URGINLINE) || in tcp_inq()
2074 if (answ && sock_flag(sk, SOCK_DONE)) in tcp_inq()
2102 static inline void tcp_listendrop(const struct sock *sk) in tcp_listendrop() argument
2104 atomic_inc(&((struct sock *)sk)->sk_drops); in tcp_listendrop()
2105 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); in tcp_listendrop()
2122 int (*init)(struct sock *sk);
2124 void (*update)(struct sock *sk, struct proto *p);
2126 void (*release)(struct sock *sk);
2128 int (*get_info)(const struct sock *sk, struct sk_buff *skb);
2129 size_t (*get_info_size)(const struct sock *sk);
2136 int tcp_set_ulp(struct sock *sk, const char *name);
2138 void tcp_cleanup_ulp(struct sock *sk);
2139 void tcp_update_ulp(struct sock *sk, struct proto *p);
2148 int tcp_bpf_init(struct sock *sk);
2149 void tcp_bpf_reinit(struct sock *sk);
2150 int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
2152 int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
2154 int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
2163 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) in tcp_call_bpf() argument
2169 if (sk_fullsock(sk)) { in tcp_call_bpf()
2171 sock_owned_by_me(sk); in tcp_call_bpf()
2174 sock_ops.sk = sk; in tcp_call_bpf()
2187 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) in tcp_call_bpf_2arg() argument
2191 return tcp_call_bpf(sk, op, 2, args); in tcp_call_bpf_2arg()
2194 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, in tcp_call_bpf_3arg() argument
2199 return tcp_call_bpf(sk, op, 3, args); in tcp_call_bpf_3arg()
2203 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) in tcp_call_bpf() argument
2208 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) in tcp_call_bpf_2arg() argument
2213 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, in tcp_call_bpf_3arg() argument
2221 static inline u32 tcp_timeout_init(struct sock *sk) in tcp_timeout_init() argument
2225 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL); in tcp_timeout_init()
2232 static inline u32 tcp_rwnd_init_bpf(struct sock *sk) in tcp_rwnd_init_bpf() argument
2236 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL); in tcp_rwnd_init_bpf()
2243 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) in tcp_bpf_ca_needs_ecn() argument
2245 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1); in tcp_bpf_ca_needs_ecn()
2248 static inline void tcp_bpf_rtt(struct sock *sk) in tcp_bpf_rtt() argument
2250 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG)) in tcp_bpf_rtt()
2251 tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL); in tcp_bpf_rtt()
2260 void (*cad)(struct sock *sk, u32 ack_seq));
2276 static inline u64 tcp_transmit_time(const struct sock *sk) in tcp_transmit_time() argument
2279 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ? in tcp_transmit_time()
2280 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay; in tcp_transmit_time()