Lines Matching refs:ssk

336 static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)  in mptcp_rmem_schedule()  argument
354 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, in __mptcp_move_skb() argument
358 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skb()
363 __skb_unlink(skb, &ssk->sk_receive_queue); in __mptcp_move_skb()
369 if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) in __mptcp_move_skb()
498 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_timeout_from_subflow() local
500 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ? in mptcp_timeout_from_subflow()
501 inet_csk(ssk)->icsk_timeout - jiffies : 0; in mptcp_timeout_from_subflow()
514 static inline bool tcp_can_send_ack(const struct sock *ssk) in tcp_can_send_ack() argument
516 return !((1 << inet_sk_state_load(ssk)) & in tcp_can_send_ack()
520 void __mptcp_subflow_send_ack(struct sock *ssk) in __mptcp_subflow_send_ack() argument
522 if (tcp_can_send_ack(ssk)) in __mptcp_subflow_send_ack()
523 tcp_send_ack(ssk); in __mptcp_subflow_send_ack()
526 static void mptcp_subflow_send_ack(struct sock *ssk) in mptcp_subflow_send_ack() argument
530 slow = lock_sock_fast(ssk); in mptcp_subflow_send_ack()
531 __mptcp_subflow_send_ack(ssk); in mptcp_subflow_send_ack()
532 unlock_sock_fast(ssk, slow); in mptcp_subflow_send_ack()
543 static void mptcp_subflow_cleanup_rbuf(struct sock *ssk) in mptcp_subflow_cleanup_rbuf() argument
547 slow = lock_sock_fast(ssk); in mptcp_subflow_cleanup_rbuf()
548 if (tcp_can_send_ack(ssk)) in mptcp_subflow_cleanup_rbuf()
549 tcp_cleanup_rbuf(ssk, 1); in mptcp_subflow_cleanup_rbuf()
550 unlock_sock_fast(ssk, slow); in mptcp_subflow_cleanup_rbuf()
553 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty) in mptcp_subflow_could_cleanup() argument
555 const struct inet_connection_sock *icsk = inet_csk(ssk); in mptcp_subflow_could_cleanup()
557 const struct tcp_sock *tp = tcp_sk(ssk); in mptcp_subflow_could_cleanup()
578 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_cleanup_rbuf() local
580 if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty)) in mptcp_cleanup_rbuf()
581 mptcp_subflow_cleanup_rbuf(ssk); in mptcp_cleanup_rbuf()
636 struct sock *ssk, in __mptcp_move_skbs_from_subflow() argument
639 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skbs_from_subflow()
650 int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); in __mptcp_move_skbs_from_subflow()
658 pr_debug("msk=%p ssk=%p", msk, ssk); in __mptcp_move_skbs_from_subflow()
659 tp = tcp_sk(ssk); in __mptcp_move_skbs_from_subflow()
670 skb = skb_peek(&ssk->sk_receive_queue); in __mptcp_move_skbs_from_subflow()
703 if (__mptcp_move_skb(msk, ssk, skb, offset, len)) in __mptcp_move_skbs_from_subflow()
711 sk_eat_skb(ssk, skb); in __mptcp_move_skbs_from_subflow()
716 more_data_avail = mptcp_subflow_data_available(ssk); in __mptcp_move_skbs_from_subflow()
773 static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk) in __mptcp_subflow_error_report() argument
775 int err = sock_error(ssk); in __mptcp_subflow_error_report()
792 ssk_state = inet_sk_state_load(ssk); in __mptcp_subflow_error_report()
816 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) in move_skbs_to_msk() argument
821 __mptcp_move_skbs_from_subflow(msk, ssk, &moved); in move_skbs_to_msk()
823 if (unlikely(ssk->sk_err)) { in move_skbs_to_msk()
840 void mptcp_data_ready(struct sock *sk, struct sock *ssk) in mptcp_data_ready() argument
842 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_data_ready()
853 ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); in mptcp_data_ready()
866 if (move_skbs_to_msk(msk, ssk)) in mptcp_data_ready()
872 static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk) in mptcp_subflow_joined() argument
874 mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq); in mptcp_subflow_joined()
876 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); in mptcp_subflow_joined()
879 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) in __mptcp_finish_join() argument
889 if (sk->sk_socket && !ssk->sk_socket) in __mptcp_finish_join()
890 mptcp_sock_graft(ssk, sk->sk_socket); in __mptcp_finish_join()
892 mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++; in __mptcp_finish_join()
893 mptcp_sockopt_sync_locked(msk, ssk); in __mptcp_finish_join()
894 mptcp_subflow_joined(msk, ssk); in __mptcp_finish_join()
905 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_flush_join_list() local
906 bool slow = lock_sock_fast(ssk); in __mptcp_flush_join_list()
909 if (!__mptcp_finish_join(msk, ssk)) in __mptcp_flush_join_list()
910 mptcp_subflow_reset(ssk); in __mptcp_flush_join_list()
911 unlock_sock_fast(ssk, slow); in __mptcp_flush_join_list()
1084 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_enter_memory_pressure() local
1087 tcp_enter_memory_pressure(ssk); in mptcp_enter_memory_pressure()
1088 sk_stream_moderate_sndbuf(ssk); in mptcp_enter_memory_pressure()
1133 static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk, in mptcp_check_allowed_size() argument
1145 if (unlikely(tcp_sk(ssk)->snd_wnd < mptcp_snd_wnd)) { in mptcp_check_allowed_size()
1146 tcp_sk(ssk)->snd_wnd = min_t(u64, U32_MAX, mptcp_snd_wnd); in mptcp_check_allowed_size()
1147 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_SNDWNDSHARED); in mptcp_check_allowed_size()
1182 static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp) in __mptcp_alloc_tx_skb() argument
1190 if (likely(sk_wmem_schedule(ssk, skb->truesize))) { in __mptcp_alloc_tx_skb()
1191 tcp_skb_entail(ssk, skb); in __mptcp_alloc_tx_skb()
1199 static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held) in mptcp_alloc_tx_skb() argument
1203 return __mptcp_alloc_tx_skb(sk, ssk, gfp); in mptcp_alloc_tx_skb()
1219 struct sock *ssk, in mptcp_update_infinite_map() argument
1228 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX); in mptcp_update_infinite_map()
1229 mptcp_subflow_ctx(ssk)->send_infinite_map = 0; in mptcp_update_infinite_map()
1231 mptcp_do_fallback(ssk); in mptcp_update_infinite_map()
1234 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, in mptcp_sendmsg_frag() argument
1250 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); in mptcp_sendmsg_frag()
1256 if (unlikely(!__tcp_can_send(ssk))) in mptcp_sendmsg_frag()
1260 info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags); in mptcp_sendmsg_frag()
1263 skb = tcp_write_queue_tail(ssk); in mptcp_sendmsg_frag()
1280 tcp_mark_push(tcp_sk(ssk), skb); in mptcp_sendmsg_frag()
1287 skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held); in mptcp_sendmsg_frag()
1297 copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy); in mptcp_sendmsg_frag()
1301 if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) { in mptcp_sendmsg_frag()
1302 tcp_remove_empty_skb(ssk); in mptcp_sendmsg_frag()
1312 if (!sk_wmem_schedule(ssk, copy)) { in mptcp_sendmsg_frag()
1313 tcp_remove_empty_skb(ssk); in mptcp_sendmsg_frag()
1327 sk_wmem_queued_add(ssk, copy); in mptcp_sendmsg_frag()
1328 sk_mem_charge(ssk, copy); in mptcp_sendmsg_frag()
1329 WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy); in mptcp_sendmsg_frag()
1342 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq; in mptcp_sendmsg_frag()
1352 mptcp_subflow_ctx(ssk)->rel_write_seq += copy; in mptcp_sendmsg_frag()
1356 tcp_push_pending_frames(ssk); in mptcp_sendmsg_frag()
1362 if (mptcp_subflow_ctx(ssk)->send_infinite_map) in mptcp_sendmsg_frag()
1363 mptcp_update_infinite_map(msk, ssk, mpext); in mptcp_sendmsg_frag()
1365 mptcp_subflow_ctx(ssk)->rel_write_seq += copy; in mptcp_sendmsg_frag()
1376 struct sock *ssk; member
1417 struct sock *ssk; in mptcp_subflow_get_send() local
1423 send_info[i].ssk = NULL; in mptcp_subflow_get_send()
1429 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_send()
1438 subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate); in mptcp_subflow_get_send()
1444 linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace); in mptcp_subflow_get_send()
1446 send_info[subflow->backup].ssk = ssk; in mptcp_subflow_get_send()
1454 send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk; in mptcp_subflow_get_send()
1467 ssk = send_info[SSK_MODE_ACTIVE].ssk; in mptcp_subflow_get_send()
1468 if (!ssk || !sk_stream_memory_free(ssk)) in mptcp_subflow_get_send()
1472 wmem = READ_ONCE(ssk->sk_wmem_queued); in mptcp_subflow_get_send()
1474 return ssk; in mptcp_subflow_get_send()
1476 subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_get_send()
1478 READ_ONCE(ssk->sk_pacing_rate) * burst, in mptcp_subflow_get_send()
1481 return ssk; in mptcp_subflow_get_send()
1484 static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info) in mptcp_push_release() argument
1486 tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal); in mptcp_push_release()
1487 release_sock(ssk); in mptcp_push_release()
1523 static int __subflow_push_pending(struct sock *sk, struct sock *ssk, in __subflow_push_pending() argument
1537 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, info); in __subflow_push_pending()
1552 !sk_stream_memory_free(ssk) || in __subflow_push_pending()
1553 !mptcp_subflow_active(mptcp_subflow_ctx(ssk))) { in __subflow_push_pending()
1567 struct sock *prev_ssk = NULL, *ssk = NULL; in __mptcp_push_pending() local
1588 prev_ssk = ssk; in __mptcp_push_pending()
1589 ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_push_pending()
1590 if (ssk != prev_ssk) { in __mptcp_push_pending()
1601 lock_sock(ssk); in __mptcp_push_pending()
1606 ret = __subflow_push_pending(sk, ssk, &info); in __mptcp_push_pending()
1609 (1 << ssk->sk_state) & in __mptcp_push_pending()
1620 if (ssk) in __mptcp_push_pending()
1621 mptcp_push_release(ssk, &info); in __mptcp_push_pending()
1630 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool first) in __mptcp_subflow_push_pending() argument
1642 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_subflow_push_pending()
1650 ret = __subflow_push_pending(sk, ssk, &info); in __mptcp_subflow_push_pending()
1663 ret = __subflow_push_pending(sk, ssk, &info); in __mptcp_subflow_push_pending()
1672 if (xmit_ssk != ssk) { in __mptcp_subflow_push_pending()
1686 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, in __mptcp_subflow_push_pending()
1713 struct sock *ssk; in mptcp_sendmsg_fastopen() local
1723 ssk = __mptcp_nmpc_sk(msk); in mptcp_sendmsg_fastopen()
1724 if (IS_ERR(ssk)) in mptcp_sendmsg_fastopen()
1725 return PTR_ERR(ssk); in mptcp_sendmsg_fastopen()
1730 ssk = msk->first; in mptcp_sendmsg_fastopen()
1732 lock_sock(ssk); in mptcp_sendmsg_fastopen()
1735 ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL); in mptcp_sendmsg_fastopen()
1738 release_sock(ssk); in mptcp_sendmsg_fastopen()
2021 struct sock *ssk; in mptcp_rcv_space_adjust() local
2024 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_rcv_space_adjust()
2025 slow = lock_sock_fast(ssk); in mptcp_rcv_space_adjust()
2026 WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); in mptcp_rcv_space_adjust()
2027 tcp_sk(ssk)->window_clamp = window_clamp; in mptcp_rcv_space_adjust()
2028 tcp_cleanup_rbuf(ssk, 1); in mptcp_rcv_space_adjust()
2029 unlock_sock_fast(ssk, slow); in mptcp_rcv_space_adjust()
2066 struct sock *ssk = mptcp_subflow_recv_lookup(msk); in __mptcp_move_skbs() local
2073 if (likely(!ssk)) in __mptcp_move_skbs()
2076 slowpath = lock_sock_fast(ssk); in __mptcp_move_skbs()
2079 done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved); in __mptcp_move_skbs()
2082 if (unlikely(ssk->sk_err)) in __mptcp_move_skbs()
2084 unlock_sock_fast(ssk, slowpath); in __mptcp_move_skbs()
2278 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_retrans() local
2284 if (!tcp_rtx_and_write_queues_empty(ssk)) { in mptcp_subflow_get_retrans()
2285 mptcp_pm_subflow_chk_stale(msk, ssk); in mptcp_subflow_get_retrans()
2292 backup = ssk; in mptcp_subflow_get_retrans()
2297 pick = ssk; in mptcp_subflow_get_retrans()
2355 static void __mptcp_subflow_disconnect(struct sock *ssk, in __mptcp_subflow_disconnect() argument
2359 if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || in __mptcp_subflow_disconnect()
2364 WARN_ON_ONCE(tcp_disconnect(ssk, 0)); in __mptcp_subflow_disconnect()
2367 tcp_shutdown(ssk, SEND_SHUTDOWN); in __mptcp_subflow_disconnect()
2379 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, in __mptcp_close_ssk() argument
2391 if (msk->in_accept_queue && msk->first == ssk && in __mptcp_close_ssk()
2392 (sock_flag(sk, SOCK_DEAD) || sock_flag(ssk, SOCK_DEAD))) { in __mptcp_close_ssk()
2396 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); in __mptcp_close_ssk()
2397 mptcp_subflow_drop_ctx(ssk); in __mptcp_close_ssk()
2401 dispose_it = msk->free_first || ssk != msk->first; in __mptcp_close_ssk()
2405 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); in __mptcp_close_ssk()
2411 ssk->sk_lingertime = 0; in __mptcp_close_ssk()
2412 sock_set_flag(ssk, SOCK_LINGER); in __mptcp_close_ssk()
2418 __mptcp_subflow_disconnect(ssk, subflow, flags); in __mptcp_close_ssk()
2419 release_sock(ssk); in __mptcp_close_ssk()
2430 if (!inet_csk(ssk)->icsk_ulp_ops) { in __mptcp_close_ssk()
2431 WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD)); in __mptcp_close_ssk()
2435 __tcp_close(ssk, 0); in __mptcp_close_ssk()
2438 __sock_put(ssk); in __mptcp_close_ssk()
2442 __mptcp_subflow_error_report(sk, ssk); in __mptcp_close_ssk()
2443 release_sock(ssk); in __mptcp_close_ssk()
2445 sock_put(ssk); in __mptcp_close_ssk()
2447 if (ssk == msk->first) in __mptcp_close_ssk()
2471 void mptcp_close_ssk(struct sock *sk, struct sock *ssk, in mptcp_close_ssk() argument
2475 mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL); in mptcp_close_ssk()
2480 mptcp_pm_subflow_check_next(mptcp_sk(sk), ssk, subflow); in mptcp_close_ssk()
2482 __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH); in mptcp_close_ssk()
2498 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_close_subflow() local
2500 if (inet_sk_state_load(ssk) != TCP_CLOSE) in __mptcp_close_subflow()
2504 if (!skb_queue_empty_lockless(&ssk->sk_receive_queue)) in __mptcp_close_subflow()
2507 mptcp_close_ssk(sk, ssk, subflow); in __mptcp_close_subflow()
2577 struct sock *ssk; in __mptcp_retrans() local
2612 ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_retrans()
2614 lock_sock(ssk); in __mptcp_retrans()
2621 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); in __mptcp_retrans()
2631 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, in __mptcp_retrans()
2636 release_sock(ssk); in __mptcp_retrans()
2674 struct sock *ssk = msk->first; in mptcp_mp_fail_no_response() local
2677 if (!ssk) in mptcp_mp_fail_no_response()
2682 slow = lock_sock_fast(ssk); in mptcp_mp_fail_no_response()
2683 mptcp_subflow_reset(ssk); in mptcp_mp_fail_no_response()
2684 WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0); in mptcp_mp_fail_no_response()
2685 unlock_sock_fast(ssk, slow); in mptcp_mp_fail_no_response()
2835 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) in mptcp_subflow_shutdown() argument
2837 lock_sock(ssk); in mptcp_subflow_shutdown()
2839 switch (ssk->sk_state) { in mptcp_subflow_shutdown()
2845 WARN_ON_ONCE(tcp_disconnect(ssk, O_NONBLOCK)); in mptcp_subflow_shutdown()
2850 ssk->sk_shutdown |= how; in mptcp_subflow_shutdown()
2851 tcp_shutdown(ssk, how); in mptcp_subflow_shutdown()
2859 pr_debug("Sending DATA_FIN on subflow %p", ssk); in mptcp_subflow_shutdown()
2860 tcp_send_ack(ssk); in mptcp_subflow_shutdown()
2867 release_sock(ssk); in mptcp_subflow_shutdown()
2981 struct sock *ssk; in mptcp_check_listen_stop() local
2987 ssk = mptcp_sk(sk)->first; in mptcp_check_listen_stop()
2988 if (WARN_ON_ONCE(!ssk || inet_sk_state_load(ssk) != TCP_LISTEN)) in mptcp_check_listen_stop()
2991 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); in mptcp_check_listen_stop()
2992 tcp_set_state(ssk, TCP_CLOSE); in mptcp_check_listen_stop()
2993 mptcp_subflow_queue_clean(sk, ssk); in mptcp_check_listen_stop()
2994 inet_csk_listen_stop(ssk); in mptcp_check_listen_stop()
2995 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED); in mptcp_check_listen_stop()
2996 release_sock(ssk); in mptcp_check_listen_stop()
3029 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_close() local
3030 bool slow = lock_sock_fast_nested(ssk); in __mptcp_close()
3032 subflows_alive += ssk->sk_state != TCP_CLOSE; in __mptcp_close()
3037 if (ssk == msk->first) in __mptcp_close()
3043 ssk->sk_socket = NULL; in __mptcp_close()
3044 ssk->sk_wq = NULL; in __mptcp_close()
3045 unlock_sock_fast(ssk, slow); in __mptcp_close()
3084 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) in mptcp_copy_inaddrs() argument
3087 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); in mptcp_copy_inaddrs()
3090 msk->sk_v6_daddr = ssk->sk_v6_daddr; in mptcp_copy_inaddrs()
3091 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; in mptcp_copy_inaddrs()
3099 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; in mptcp_copy_inaddrs()
3100 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; in mptcp_copy_inaddrs()
3101 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; in mptcp_copy_inaddrs()
3102 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; in mptcp_copy_inaddrs()
3103 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; in mptcp_copy_inaddrs()
3104 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; in mptcp_copy_inaddrs()
3165 struct sock *ssk, in mptcp_sk_clone_init() argument
3209 WRITE_ONCE(msk->first, ssk); in mptcp_sk_clone_init()
3210 list_add(&mptcp_subflow_ctx(ssk)->node, &msk->conn_list); in mptcp_sk_clone_init()
3211 sock_hold(ssk); in mptcp_sk_clone_init()
3221 mptcp_copy_inaddrs(nsk, ssk); in mptcp_sk_clone_init()
3222 mptcp_propagate_sndbuf(nsk, ssk); in mptcp_sk_clone_init()
3224 mptcp_rcv_space_init(msk, ssk); in mptcp_sk_clone_init()
3231 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) in mptcp_rcv_space_init() argument
3233 const struct tcp_sock *tp = tcp_sk(ssk); in mptcp_rcv_space_init()
3246 WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd); in mptcp_rcv_space_init()
3249 static struct sock *mptcp_accept(struct sock *ssk, int flags, int *err, in mptcp_accept() argument
3254 pr_debug("ssk=%p, listener=%p", ssk, mptcp_subflow_ctx(ssk)); in mptcp_accept()
3255 newsk = inet_csk_accept(ssk, flags, err, kern); in mptcp_accept()
3276 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEPASSIVEACK); in mptcp_accept()
3278 MPTCP_INC_STATS(sock_net(ssk), in mptcp_accept()
3336 void __mptcp_check_push(struct sock *sk, struct sock *ssk) in __mptcp_check_push() argument
3342 __mptcp_subflow_push_pending(sk, ssk, false); in __mptcp_check_push()
3409 static void schedule_3rdack_retransmission(struct sock *ssk) in schedule_3rdack_retransmission() argument
3411 struct inet_connection_sock *icsk = inet_csk(ssk); in schedule_3rdack_retransmission()
3412 struct tcp_sock *tp = tcp_sk(ssk); in schedule_3rdack_retransmission()
3415 if (mptcp_subflow_ctx(ssk)->fully_established) in schedule_3rdack_retransmission()
3428 sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout); in schedule_3rdack_retransmission()
3431 void mptcp_subflow_process_delegated(struct sock *ssk, long status) in mptcp_subflow_process_delegated() argument
3433 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_process_delegated()
3439 __mptcp_subflow_push_pending(sk, ssk, true); in mptcp_subflow_process_delegated()
3445 schedule_3rdack_retransmission(ssk); in mptcp_subflow_process_delegated()
3473 void mptcp_finish_connect(struct sock *ssk) in mptcp_finish_connect() argument
3479 subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_connect()
3496 mptcp_pm_new_connection(msk, ssk, 0); in mptcp_finish_connect()
3498 mptcp_rcv_space_init(msk, ssk); in mptcp_finish_connect()
3510 bool mptcp_finish_join(struct sock *ssk) in mptcp_finish_join() argument
3512 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_join()
3527 mptcp_subflow_joined(msk, ssk); in mptcp_finish_join()
3539 ret = __mptcp_finish_join(msk, ssk); in mptcp_finish_join()
3541 sock_hold(ssk); in mptcp_finish_join()
3545 sock_hold(ssk); in mptcp_finish_join()
3647 struct sock *ssk; in mptcp_connect() local
3649 ssk = __mptcp_nmpc_sk(msk); in mptcp_connect()
3650 if (IS_ERR(ssk)) in mptcp_connect()
3651 return PTR_ERR(ssk); in mptcp_connect()
3654 subflow = mptcp_subflow_ctx(ssk); in mptcp_connect()
3659 if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info)) in mptcp_connect()
3662 if (subflow->request_mptcp && mptcp_token_new_connect(ssk)) { in mptcp_connect()
3663 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT); in mptcp_connect()
3673 lock_sock(ssk); in mptcp_connect()
3678 if (ssk->sk_state != TCP_CLOSE) in mptcp_connect()
3681 if (BPF_CGROUP_PRE_CONNECT_ENABLED(ssk)) { in mptcp_connect()
3682 err = ssk->sk_prot->pre_connect(ssk, uaddr, addr_len); in mptcp_connect()
3687 err = ssk->sk_prot->connect(ssk, uaddr, addr_len); in mptcp_connect()
3691 inet_assign_bit(DEFER_CONNECT, sk, inet_test_bit(DEFER_CONNECT, ssk)); in mptcp_connect()
3695 release_sock(ssk); in mptcp_connect()
3707 mptcp_copy_inaddrs(sk, ssk); in mptcp_connect()
3748 struct sock *ssk, *sk = sock->sk; in mptcp_bind() local
3752 ssk = __mptcp_nmpc_sk(msk); in mptcp_bind()
3753 if (IS_ERR(ssk)) { in mptcp_bind()
3754 err = PTR_ERR(ssk); in mptcp_bind()
3759 err = inet_bind_sk(ssk, uaddr, addr_len); in mptcp_bind()
3762 err = inet6_bind_sk(ssk, uaddr, addr_len); in mptcp_bind()
3765 mptcp_copy_inaddrs(sk, ssk); in mptcp_bind()
3776 struct sock *ssk; in mptcp_listen() local
3787 ssk = __mptcp_nmpc_sk(msk); in mptcp_listen()
3788 if (IS_ERR(ssk)) { in mptcp_listen()
3789 err = PTR_ERR(ssk); in mptcp_listen()
3796 lock_sock(ssk); in mptcp_listen()
3797 err = __inet_listen_sk(ssk, backlog); in mptcp_listen()
3798 release_sock(ssk); in mptcp_listen()
3799 inet_sk_state_store(sk, inet_sk_state_load(ssk)); in mptcp_listen()
3803 mptcp_copy_inaddrs(sk, ssk); in mptcp_listen()
3804 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED); in mptcp_listen()
3816 struct sock *ssk, *newsk; in mptcp_stream_accept() local
3824 ssk = READ_ONCE(msk->first); in mptcp_stream_accept()
3825 if (!ssk) in mptcp_stream_accept()
3828 newsk = mptcp_accept(ssk, flags, &err, kern); in mptcp_stream_accept()
3846 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_stream_accept() local
3848 if (!ssk->sk_socket) in mptcp_stream_accept()
3849 mptcp_sock_graft(ssk, newsock); in mptcp_stream_accept()
3897 struct sock *ssk = READ_ONCE(msk->first); in mptcp_poll() local
3899 if (WARN_ON_ONCE(!ssk)) in mptcp_poll()
3902 return inet_csk_listen_poll(ssk); in mptcp_poll()
3968 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_napi_poll() local
3970 bh_lock_sock_nested(ssk); in mptcp_napi_poll()
3971 if (!sock_owned_by_user(ssk)) { in mptcp_napi_poll()
3972 mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0)); in mptcp_napi_poll()
3982 bh_unlock_sock(ssk); in mptcp_napi_poll()
3983 sock_put(ssk); in mptcp_napi_poll()