| /Linux-v5.4/include/net/ |
| D | inet_connection_sock.h | 149 static inline struct inet_connection_sock *inet_csk(const struct sock *sk) in inet_csk() function 156 return (void *)inet_csk(sk)->icsk_ca_priv; in inet_csk_ca() 179 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED; in inet_csk_schedule_ack() 184 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED; in inet_csk_ack_scheduled() 189 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); in inet_csk_delack_init() 197 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timer() 221 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_reset_xmit_timer() 274 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_added() 279 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_len() 298 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? in inet_csk_listen_poll() [all …]
|
| D | tcp.h | 349 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_dec_quickack_mode() 651 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) in tcp_bound_rto() 652 inet_csk(sk)->icsk_rto = TCP_RTO_MAX; in tcp_bound_rto() 1100 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_needs_ecn() 1107 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_ca_state() 1116 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_event() 1186 (1 << inet_csk(sk)->icsk_ca_state); in tcp_in_cwnd_reduction() 1291 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); in tcp_probe0_base() 1298 u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff; in tcp_probe0_when() 1305 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) in tcp_check_probe_timer() [all …]
|
| /Linux-v5.4/net/ipv4/ |
| D | tcp_recovery.c | 32 if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery) in tcp_rack_reo_wnd() 127 timeout, inet_csk(sk)->icsk_rto); in tcp_rack_mark_lost() 174 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { in tcp_rack_reo_timeout() 176 if (!inet_csk(sk)->icsk_ca_ops->cong_control) in tcp_rack_reo_timeout() 181 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS) in tcp_rack_reo_timeout() 233 const u8 state = inet_csk(sk)->icsk_ca_state; in tcp_newreno_mark_lost()
|
| D | inet_connection_sock.c | 374 if (!inet_csk(sk)->icsk_bind_hash) in inet_csk_get_port() 376 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); in inet_csk_get_port() 391 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_wait_for_connect() 439 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_accept() 508 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_init_xmit_timers() 519 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timers() 672 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); in inet_csk_reqsk_queue_drop() 690 struct inet_connection_sock *icsk = inet_csk(sk_listener); in reqsk_timer_handler() 788 struct inet_connection_sock *newicsk = inet_csk(newsk); in inet_csk_clone_lock() 834 WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); in inet_csk_destroy_sock() [all …]
|
| D | tcp_ulp.c | 101 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_update_ulp() 114 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_ulp() 132 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_set_ulp()
|
| D | tcp_dctcp.h | 29 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { in dctcp_ece_ack_update() 33 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; in dctcp_ece_ack_update()
|
| D | tcp_timer.c | 28 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_rto_to_user_timeout() 197 if (!inet_csk(sk)->icsk_retransmits) in retransmits_timed_out() 215 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timeout() 273 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_delack_timer_handler() 340 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_probe_timer() 391 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastopen_synack_timer() 435 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_retransmit_timer() 575 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timer_handler() 653 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_keepalive_timer()
|
| D | inet_hashtables.c | 97 inet_csk(sk)->icsk_bind_hash = tb; in inet_bind_hash() 112 tb = inet_csk(sk)->icsk_bind_hash; in __inet_put_port() 114 inet_csk(sk)->icsk_bind_hash = NULL; in __inet_put_port() 139 tb = inet_csk(sk)->icsk_bind_hash; in __inet_inherit_port() 203 hlist_add_tail_rcu(&inet_csk(sk)->icsk_listen_portaddr_node, in inet_hash2() 206 hlist_add_head_rcu(&inet_csk(sk)->icsk_listen_portaddr_node, in inet_hash2() 217 WARN_ON_ONCE(hlist_unhashed(&inet_csk(sk)->icsk_listen_portaddr_node))) in inet_unhash2() 223 hlist_del_init_rcu(&inet_csk(sk)->icsk_listen_portaddr_node); in inet_unhash2() 518 struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; in inet_reuseport_add_sock() 527 inet_csk(sk2)->icsk_bind_hash == tb && in inet_reuseport_add_sock() [all …]
|
| D | tcp_cong.c | 160 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_assign_congestion_control() 179 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_congestion_control() 193 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_reinit_congestion_control() 207 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_congestion_control() 339 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_congestion_control()
|
| D | tcp_output.c | 66 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_new_data_sent() 148 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) in tcp_cwnd_restart() 159 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent() 892 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); in tcp_release_cb() 1016 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_transmit_skb() 1462 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_mtu_to_mss() 1502 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mss_to_mtu() 1525 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_init() 1563 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_sync_mss() 1598 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) in tcp_current_mss() [all …]
|
| D | tcp_fastopen.c | 49 inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1); in tcp_fastopen_destroy_cipher() 94 q = &inet_csk(sk)->icsk_accept_queue.fastopenq; in tcp_fastopen_reset_cipher() 237 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in tcp_fastopen_create_child() 241 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in tcp_fastopen_create_child() 303 fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq; in tcp_fastopen_queue_check() 550 u32 timeouts = inet_csk(sk)->icsk_retransmits; in tcp_fastopen_active_detect_blackhole()
|
| D | tcp_input.c | 164 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss() 215 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack() 227 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_quickack_mode() 241 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_quickack_mode() 263 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; in tcp_ecn_accept_cwr() 337 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_sndbuf_expand() 401 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; in __tcp_grow_window() 431 inet_csk(sk)->icsk_ack.quick |= 1; in tcp_grow_window() 478 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_window() 511 inet_csk(sk)->icsk_ack.rcv_mss = hint; in tcp_initialize_rcv_mss() [all …]
|
| D | tcp_dctcp.c | 98 inet_csk(sk)->icsk_ca_ops = &dctcp_reno; in dctcp_init() 156 new_state != inet_csk(sk)->icsk_ca_state) in dctcp_state() 193 if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) { in dctcp_get_info()
|
| D | tcp_minisocks.c | 255 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_time_wait() 408 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_openreq_child() 470 newicsk = inet_csk(newsk); in tcp_create_openreq_child() 757 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && in tcp_check_req() 770 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in tcp_check_req()
|
| D | tcp_diag.c | 115 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_diag_get_aux() 146 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_diag_get_aux_size()
|
| D | tcp_htcp.c | 84 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_rtt() 104 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_achieved_throughput()
|
| /Linux-v5.4/net/dccp/ |
| D | output.c | 46 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_transmit_skb() 161 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_sync_mss() 268 inet_csk(sk)->icsk_rto, in dccp_xmit_packet() 381 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0) in dccp_retransmit_skb() 385 inet_csk(sk)->icsk_retransmits++; in dccp_retransmit_skb() 512 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk); in dccp_send_reset() 537 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_connect() 581 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; in dccp_send_ack() 601 struct inet_connection_sock *icsk = inet_csk(sk);
|
| D | minisocks.c | 38 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_time_wait() 91 struct inet_connection_sock *newicsk = inet_csk(newsk); in dccp_create_openreq_child() 192 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in dccp_check_req()
|
| D | timer.c | 33 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_write_timeout() 85 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_retransmit_timer()
|
| D | ipv6.c | 149 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) in dccp_v6_err() 429 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped; in dccp_v6_request_recv_sock() 449 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); in dccp_v6_request_recv_sock() 521 inet_csk(newsk)->icsk_ext_hdr_len = 0; in dccp_v6_request_recv_sock() 523 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + in dccp_v6_request_recv_sock() 807 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_v6_connect() 1010 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; in dccp_v6_init_sock()
|
| D | proto.c | 101 if (inet_csk(sk)->icsk_bind_hash != NULL && in dccp_set_state() 186 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_init_sock() 222 if (inet_csk(sk)->icsk_bind_hash != NULL) in dccp_destroy_sock() 260 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_disconnect() 570 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level, in dccp_setsockopt() 691 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level, in dccp_getsockopt()
|
| D | diag.c | 19 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_get_info()
|
| D | ipv4.c | 92 inet_csk(sk)->icsk_ext_hdr_len = 0; in dccp_v4_connect() 94 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; in dccp_v4_connect() 172 inet_csk(sk)->icsk_pmtu_cookie > mtu) { in dccp_do_pmtu_discovery() 928 inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops; in dccp_v4_init_sock()
|
| /Linux-v5.4/net/core/ |
| D | request_sock.c | 97 fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq; in reqsk_fastopen_remove()
|
| /Linux-v5.4/net/tls/ |
| D | tls_main.c | 302 struct inet_connection_sock *icsk = inet_csk(sk); in tls_sk_proto_close() 609 struct inet_connection_sock *icsk = inet_csk(sk); in create_ctx() 651 struct inet_connection_sock *icsk = inet_csk(sk); in tls_hw_sk_destruct() 833 ctx = rcu_dereference(inet_csk(sk)->icsk_ulp_data); in tls_get_info()
|