Home
last modified time | relevance | path

Searched refs:inet_csk (Results 1 – 25 of 48) sorted by relevance

12

/Linux-v4.19/include/net/
Dinet_connection_sock.h153 static inline struct inet_connection_sock *inet_csk(const struct sock *sk) in inet_csk() function
160 return (void *)inet_csk(sk)->icsk_ca_priv; in inet_csk_ca()
183 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED; in inet_csk_schedule_ack()
188 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED; in inet_csk_ack_scheduled()
193 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); in inet_csk_delack_init()
201 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timer()
225 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_reset_xmit_timer()
278 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_added()
283 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_len()
302 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? in inet_csk_listen_poll()
Dtcp.h350 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_dec_quickack_mode()
640 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) in tcp_bound_rto()
641 inet_csk(sk)->icsk_rto = TCP_RTO_MAX; in tcp_bound_rto()
1077 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_needs_ecn()
1084 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_ca_state()
1093 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_event()
1163 (1 << inet_csk(sk)->icsk_ca_state); in tcp_in_cwnd_reduction()
1245 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); in tcp_probe0_base()
1252 u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff; in tcp_probe0_when()
1259 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) in tcp_check_probe_timer()
[all …]
/Linux-v4.19/net/ipv4/
Dtcp_dctcp.c108 inet_csk(sk)->icsk_ca_ops = &dctcp_reno; in dctcp_init()
137 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) in dctcp_ce_state_0_to_1()
139 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; in dctcp_ce_state_0_to_1()
158 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) in dctcp_ce_state_1_to_0()
160 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; in dctcp_ce_state_1_to_0()
179 acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss; in dctcp_update_alpha()
257 if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) { in dctcp_get_info()
Dtcp_recovery.c32 if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery) in tcp_rack_reo_wnd()
126 timeout, inet_csk(sk)->icsk_rto); in tcp_rack_mark_lost()
173 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { in tcp_rack_reo_timeout()
175 if (!inet_csk(sk)->icsk_ca_ops->cong_control) in tcp_rack_reo_timeout()
180 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS) in tcp_rack_reo_timeout()
232 const u8 state = inet_csk(sk)->icsk_ca_state; in tcp_newreno_mark_lost()
Dinet_connection_sock.c371 if (!inet_csk(sk)->icsk_bind_hash) in inet_csk_get_port()
373 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); in inet_csk_get_port()
388 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_wait_for_connect()
436 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_accept()
505 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_init_xmit_timers()
516 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timers()
669 if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) { in inet_csk_reqsk_queue_drop()
670 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); in inet_csk_reqsk_queue_drop()
688 struct inet_connection_sock *icsk = inet_csk(sk_listener); in reqsk_timer_handler()
790 struct inet_connection_sock *newicsk = inet_csk(newsk); in inet_csk_clone_lock()
[all …]
Dtcp_timer.c41 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_rto_to_user_timeout()
195 if (!inet_csk(sk)->icsk_retransmits) in retransmits_timed_out()
218 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timeout()
275 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_delack_timer_handler()
342 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_probe_timer()
393 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastopen_synack_timer()
433 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_retransmit_timer()
574 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timer_handler()
652 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_keepalive_timer()
Dtcp_ulp.c124 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_ulp()
139 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_ulp()
167 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_ulp_id()
Dtcp_fastopen.c49 inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1); in tcp_fastopen_destroy_cipher()
98 q = &inet_csk(sk)->icsk_accept_queue.fastopenq; in tcp_fastopen_reset_cipher()
122 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx); in __tcp_fastopen_cookie_gen()
220 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in tcp_fastopen_create_child()
228 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in tcp_fastopen_create_child()
290 fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq; in tcp_fastopen_queue_check()
523 u32 timeouts = inet_csk(sk)->icsk_retransmits; in tcp_fastopen_active_detect_blackhole()
Dinet_hashtables.c99 inet_csk(sk)->icsk_bind_hash = tb; in inet_bind_hash()
114 tb = inet_csk(sk)->icsk_bind_hash; in __inet_put_port()
116 inet_csk(sk)->icsk_bind_hash = NULL; in __inet_put_port()
140 tb = inet_csk(sk)->icsk_bind_hash; in __inet_inherit_port()
201 hlist_add_tail_rcu(&inet_csk(sk)->icsk_listen_portaddr_node, in inet_hash2()
204 hlist_add_head_rcu(&inet_csk(sk)->icsk_listen_portaddr_node, in inet_hash2()
215 WARN_ON_ONCE(hlist_unhashed(&inet_csk(sk)->icsk_listen_portaddr_node))) in inet_unhash2()
221 hlist_del_init_rcu(&inet_csk(sk)->icsk_listen_portaddr_node); in inet_unhash2()
562 struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; in inet_reuseport_add_sock()
571 inet_csk(sk2)->icsk_bind_hash == tb && in inet_reuseport_add_sock()
[all …]
Dtcp_cong.c159 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_assign_congestion_control()
178 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_congestion_control()
192 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_reinit_congestion_control()
206 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_congestion_control()
337 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_congestion_control()
Dtcp_output.c54 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_new_data_sent()
136 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) in tcp_cwnd_restart()
147 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent()
894 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); in tcp_release_cb()
1019 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_transmit_skb()
1436 const struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_mtu_to_mss()
1477 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mss_to_mtu()
1500 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_init()
1538 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_sync_mss()
1573 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) in tcp_current_mss()
[all …]
Dtcp_input.c158 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss()
209 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack()
221 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_quickack_mode()
235 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_quickack_mode()
257 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; in tcp_ecn_accept_cwr()
331 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_sndbuf_expand()
394 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; in __tcp_grow_window()
424 inet_csk(sk)->icsk_ack.quick |= 1; in tcp_grow_window()
492 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_window()
524 inet_csk(sk)->icsk_ack.rcv_mss = hint; in tcp_initialize_rcv_mss()
[all …]
Dtcp_minisocks.c254 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_time_wait()
400 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_openreq_child()
461 newicsk = inet_csk(newsk); in tcp_create_openreq_child()
773 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && in tcp_check_req()
786 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in tcp_check_req()
Dtcp_htcp.c83 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_rtt()
103 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_achieved_throughput()
Dtcp_rate.c164 inet_csk(sk)->icsk_ca_state, in tcp_rate_gen()
Dtcp_ipv4.c267 inet_csk(sk)->icsk_ext_hdr_len = 0; in tcp_v4_connect()
269 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; in tcp_v4_connect()
363 inet_csk(sk)->icsk_pmtu_cookie > mtu) { in tcp_v4_mtu_reduced()
482 icsk = inet_csk(sk); in tcp_v4_err()
1435 inet_csk(newsk)->icsk_ext_hdr_len = 0; in tcp_v4_syn_recv_sock()
1437 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; in tcp_v4_syn_recv_sock()
1957 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v4_init_sock()
2001 if (inet_csk(sk)->icsk_bind_hash) in tcp_v4_destroy_sock()
2305 const struct inet_connection_sock *icsk = inet_csk(sk); in get_tcp4_sock()
/Linux-v4.19/net/dccp/
Doutput.c50 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_transmit_skb()
165 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_sync_mss()
272 inet_csk(sk)->icsk_rto, in dccp_xmit_packet()
385 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0) in dccp_retransmit_skb()
389 inet_csk(sk)->icsk_retransmits++; in dccp_retransmit_skb()
516 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk); in dccp_send_reset()
541 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_connect()
585 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; in dccp_send_ack()
605 struct inet_connection_sock *icsk = inet_csk(sk);
Dminisocks.c42 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_time_wait()
95 struct inet_connection_sock *newicsk = inet_csk(newsk); in dccp_create_openreq_child()
196 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in dccp_check_req()
Dtimer.c37 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_write_timeout()
89 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_retransmit_timer()
Dipv6.c151 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) in dccp_v6_err()
429 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped; in dccp_v6_request_recv_sock()
449 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); in dccp_v6_request_recv_sock()
521 inet_csk(newsk)->icsk_ext_hdr_len = 0; in dccp_v6_request_recv_sock()
523 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + in dccp_v6_request_recv_sock()
807 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_v6_connect()
1010 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; in dccp_v6_init_sock()
Dproto.c104 if (inet_csk(sk)->icsk_bind_hash != NULL && in dccp_set_state()
189 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_init_sock()
225 if (inet_csk(sk)->icsk_bind_hash != NULL) in dccp_destroy_sock()
263 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_disconnect()
574 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level, in dccp_setsockopt()
695 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level, in dccp_getsockopt()
Ddiag.c22 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_get_info()
Dipv4.c96 inet_csk(sk)->icsk_ext_hdr_len = 0; in dccp_v4_connect()
98 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; in dccp_v4_connect()
176 inet_csk(sk)->icsk_pmtu_cookie > mtu) { in dccp_do_pmtu_discovery()
929 inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops; in dccp_v4_init_sock()
/Linux-v4.19/net/core/
Drequest_sock.c101 fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq; in reqsk_fastopen_remove()
/Linux-v4.19/net/ipv6/
Dtcp_ipv6.c140 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_connect()
346 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { in tcp_v6_mtu_reduced()
1100 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; in tcp_v6_syn_recv_sock()
1127 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); in tcp_v6_syn_recv_sock()
1204 inet_csk(newsk)->icsk_ext_hdr_len = 0; in tcp_v6_syn_recv_sock()
1206 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + in tcp_v6_syn_recv_sock()
1747 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_init_sock()
1808 const struct inet_connection_sock *icsk = inet_csk(sp); in get_tcp6_sock()

12