Lines Matching refs:sk
35 int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
36 void (*send_check)(struct sock *sk, struct sk_buff *skb);
37 int (*rebuild_header)(struct sock *sk);
38 void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
39 int (*conn_request)(struct sock *sk, struct sk_buff *skb);
40 struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
48 int (*setsockopt)(struct sock *sk, int level, int optname,
50 int (*getsockopt)(struct sock *sk, int level, int optname,
52 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
53 void (*mtu_reduced)(struct sock *sk);
96 void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
98 unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
145 static inline struct inet_connection_sock *inet_csk(const struct sock *sk) in inet_csk() argument
147 return (struct inet_connection_sock *)sk; in inet_csk()
150 static inline void *inet_csk_ca(const struct sock *sk) in inet_csk_ca() argument
152 return (void *)inet_csk(sk)->icsk_ca_priv; in inet_csk_ca()
155 struct sock *inet_csk_clone_lock(const struct sock *sk,
167 void inet_csk_init_xmit_timers(struct sock *sk,
171 void inet_csk_clear_xmit_timers(struct sock *sk);
173 static inline void inet_csk_schedule_ack(struct sock *sk) in inet_csk_schedule_ack() argument
175 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED; in inet_csk_schedule_ack()
178 static inline int inet_csk_ack_scheduled(const struct sock *sk) in inet_csk_ack_scheduled() argument
180 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED; in inet_csk_ack_scheduled()
183 static inline void inet_csk_delack_init(struct sock *sk) in inet_csk_delack_init() argument
185 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); in inet_csk_delack_init()
188 void inet_csk_delete_keepalive_timer(struct sock *sk);
189 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
191 static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) in inet_csk_clear_xmit_timer() argument
193 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timer()
198 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); in inet_csk_clear_xmit_timer()
204 sk_stop_timer(sk, &icsk->icsk_delack_timer); in inet_csk_clear_xmit_timer()
214 static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, in inet_csk_reset_xmit_timer() argument
218 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_reset_xmit_timer()
222 sk, what, when, (void *)_THIS_IP_); in inet_csk_reset_xmit_timer()
231 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); in inet_csk_reset_xmit_timer()
235 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); in inet_csk_reset_xmit_timer()
250 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
252 int inet_csk_get_port(struct sock *sk, unsigned short snum);
254 struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
256 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
260 struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
263 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
265 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
269 static inline void inet_csk_reqsk_queue_added(struct sock *sk) in inet_csk_reqsk_queue_added() argument
271 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_added()
274 static inline int inet_csk_reqsk_queue_len(const struct sock *sk) in inet_csk_reqsk_queue_len() argument
276 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_len()
279 static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) in inet_csk_reqsk_queue_is_full() argument
281 return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog; in inet_csk_reqsk_queue_is_full()
284 void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
285 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
287 static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk) in inet_csk_prepare_for_destroy_sock() argument
290 sock_set_flag(sk, SOCK_DEAD); in inet_csk_prepare_for_destroy_sock()
291 percpu_counter_inc(sk->sk_prot->orphan_count); in inet_csk_prepare_for_destroy_sock()
294 void inet_csk_destroy_sock(struct sock *sk);
295 void inet_csk_prepare_forced_close(struct sock *sk);
300 static inline __poll_t inet_csk_listen_poll(const struct sock *sk) in inet_csk_listen_poll() argument
302 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? in inet_csk_listen_poll()
306 int inet_csk_listen_start(struct sock *sk, int backlog);
307 void inet_csk_listen_stop(struct sock *sk);
309 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
313 struct sock *sk);
315 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
319 static inline void inet_csk_enter_pingpong_mode(struct sock *sk) in inet_csk_enter_pingpong_mode() argument
321 inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH; in inet_csk_enter_pingpong_mode()
324 static inline void inet_csk_exit_pingpong_mode(struct sock *sk) in inet_csk_exit_pingpong_mode() argument
326 inet_csk(sk)->icsk_ack.pingpong = 0; in inet_csk_exit_pingpong_mode()
329 static inline bool inet_csk_in_pingpong_mode(struct sock *sk) in inet_csk_in_pingpong_mode() argument
331 return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH; in inet_csk_in_pingpong_mode()
334 static inline void inet_csk_inc_pingpong_cnt(struct sock *sk) in inet_csk_inc_pingpong_cnt() argument
336 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_inc_pingpong_cnt()
342 static inline bool inet_csk_has_ulp(struct sock *sk) in inet_csk_has_ulp() argument
344 return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops; in inet_csk_has_ulp()