Lines Matching refs:ssk

327 static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)  in mptcp_rmem_schedule()  argument
345 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, in __mptcp_move_skb() argument
349 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skb()
354 __skb_unlink(skb, &ssk->sk_receive_queue); in __mptcp_move_skb()
360 if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) in __mptcp_move_skb()
489 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_timeout_from_subflow() local
491 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ? in mptcp_timeout_from_subflow()
492 inet_csk(ssk)->icsk_timeout - jiffies : 0; in mptcp_timeout_from_subflow()
505 static inline bool tcp_can_send_ack(const struct sock *ssk) in tcp_can_send_ack() argument
507 return !((1 << inet_sk_state_load(ssk)) & in tcp_can_send_ack()
511 void __mptcp_subflow_send_ack(struct sock *ssk) in __mptcp_subflow_send_ack() argument
513 if (tcp_can_send_ack(ssk)) in __mptcp_subflow_send_ack()
514 tcp_send_ack(ssk); in __mptcp_subflow_send_ack()
517 static void mptcp_subflow_send_ack(struct sock *ssk) in mptcp_subflow_send_ack() argument
521 slow = lock_sock_fast(ssk); in mptcp_subflow_send_ack()
522 __mptcp_subflow_send_ack(ssk); in mptcp_subflow_send_ack()
523 unlock_sock_fast(ssk, slow); in mptcp_subflow_send_ack()
534 static void mptcp_subflow_cleanup_rbuf(struct sock *ssk) in mptcp_subflow_cleanup_rbuf() argument
538 slow = lock_sock_fast(ssk); in mptcp_subflow_cleanup_rbuf()
539 if (tcp_can_send_ack(ssk)) in mptcp_subflow_cleanup_rbuf()
540 tcp_cleanup_rbuf(ssk, 1); in mptcp_subflow_cleanup_rbuf()
541 unlock_sock_fast(ssk, slow); in mptcp_subflow_cleanup_rbuf()
544 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty) in mptcp_subflow_could_cleanup() argument
546 const struct inet_connection_sock *icsk = inet_csk(ssk); in mptcp_subflow_could_cleanup()
548 const struct tcp_sock *tp = tcp_sk(ssk); in mptcp_subflow_could_cleanup()
569 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_cleanup_rbuf() local
571 if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty)) in mptcp_cleanup_rbuf()
572 mptcp_subflow_cleanup_rbuf(ssk); in mptcp_cleanup_rbuf()
629 struct sock *ssk, in __mptcp_move_skbs_from_subflow() argument
632 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skbs_from_subflow()
643 int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); in __mptcp_move_skbs_from_subflow()
651 pr_debug("msk=%p ssk=%p", msk, ssk); in __mptcp_move_skbs_from_subflow()
652 tp = tcp_sk(ssk); in __mptcp_move_skbs_from_subflow()
663 skb = skb_peek(&ssk->sk_receive_queue); in __mptcp_move_skbs_from_subflow()
696 if (__mptcp_move_skb(msk, ssk, skb, offset, len)) in __mptcp_move_skbs_from_subflow()
704 sk_eat_skb(ssk, skb); in __mptcp_move_skbs_from_subflow()
709 more_data_avail = mptcp_subflow_data_available(ssk); in __mptcp_move_skbs_from_subflow()
768 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) in move_skbs_to_msk() argument
773 __mptcp_move_skbs_from_subflow(msk, ssk, &moved); in move_skbs_to_msk()
775 if (unlikely(ssk->sk_err)) { in move_skbs_to_msk()
792 void mptcp_data_ready(struct sock *sk, struct sock *ssk) in mptcp_data_ready() argument
794 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_data_ready()
805 ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); in mptcp_data_ready()
818 if (move_skbs_to_msk(msk, ssk)) in mptcp_data_ready()
824 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) in __mptcp_finish_join() argument
834 if (sk->sk_socket && !ssk->sk_socket) in __mptcp_finish_join()
835 mptcp_sock_graft(ssk, sk->sk_socket); in __mptcp_finish_join()
837 mptcp_propagate_sndbuf((struct sock *)msk, ssk); in __mptcp_finish_join()
838 mptcp_sockopt_sync_locked(msk, ssk); in __mptcp_finish_join()
848 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_flush_join_list() local
849 bool slow = lock_sock_fast(ssk); in __mptcp_flush_join_list()
852 if (!__mptcp_finish_join(msk, ssk)) in __mptcp_flush_join_list()
853 mptcp_subflow_reset(ssk); in __mptcp_flush_join_list()
854 unlock_sock_fast(ssk, slow); in __mptcp_flush_join_list()
1077 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_enter_memory_pressure() local
1080 tcp_enter_memory_pressure(ssk); in mptcp_enter_memory_pressure()
1081 sk_stream_moderate_sndbuf(ssk); in mptcp_enter_memory_pressure()
1126 static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk, in mptcp_check_allowed_size() argument
1138 if (unlikely(tcp_sk(ssk)->snd_wnd < mptcp_snd_wnd)) { in mptcp_check_allowed_size()
1139 tcp_sk(ssk)->snd_wnd = min_t(u64, U32_MAX, mptcp_snd_wnd); in mptcp_check_allowed_size()
1140 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_SNDWNDSHARED); in mptcp_check_allowed_size()
1175 static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp) in __mptcp_alloc_tx_skb() argument
1183 if (likely(sk_wmem_schedule(ssk, skb->truesize))) { in __mptcp_alloc_tx_skb()
1184 tcp_skb_entail(ssk, skb); in __mptcp_alloc_tx_skb()
1192 static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held) in mptcp_alloc_tx_skb() argument
1196 return __mptcp_alloc_tx_skb(sk, ssk, gfp); in mptcp_alloc_tx_skb()
1212 struct sock *ssk, in mptcp_update_infinite_map() argument
1221 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX); in mptcp_update_infinite_map()
1222 mptcp_subflow_ctx(ssk)->send_infinite_map = 0; in mptcp_update_infinite_map()
1224 mptcp_do_fallback(ssk); in mptcp_update_infinite_map()
1227 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, in mptcp_sendmsg_frag() argument
1243 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); in mptcp_sendmsg_frag()
1249 if (unlikely(!__tcp_can_send(ssk))) in mptcp_sendmsg_frag()
1253 info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags); in mptcp_sendmsg_frag()
1256 skb = tcp_write_queue_tail(ssk); in mptcp_sendmsg_frag()
1273 tcp_mark_push(tcp_sk(ssk), skb); in mptcp_sendmsg_frag()
1280 skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held); in mptcp_sendmsg_frag()
1290 copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy); in mptcp_sendmsg_frag()
1295 tcp_remove_empty_skb(ssk); in mptcp_sendmsg_frag()
1310 if (!sk_wmem_schedule(ssk, copy)) { in mptcp_sendmsg_frag()
1311 tcp_remove_empty_skb(ssk); in mptcp_sendmsg_frag()
1325 sk_wmem_queued_add(ssk, copy); in mptcp_sendmsg_frag()
1326 sk_mem_charge(ssk, copy); in mptcp_sendmsg_frag()
1327 WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy); in mptcp_sendmsg_frag()
1341 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq; in mptcp_sendmsg_frag()
1351 mptcp_subflow_ctx(ssk)->rel_write_seq += copy; in mptcp_sendmsg_frag()
1355 tcp_push_pending_frames(ssk); in mptcp_sendmsg_frag()
1361 if (mptcp_subflow_ctx(ssk)->send_infinite_map) in mptcp_sendmsg_frag()
1362 mptcp_update_infinite_map(msk, ssk, mpext); in mptcp_sendmsg_frag()
1364 mptcp_subflow_ctx(ssk)->rel_write_seq += copy; in mptcp_sendmsg_frag()
1375 struct sock *ssk; member
1416 struct sock *ssk; in mptcp_subflow_get_send() local
1439 send_info[i].ssk = NULL; in mptcp_subflow_get_send()
1445 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_send()
1454 subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate); in mptcp_subflow_get_send()
1460 linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace); in mptcp_subflow_get_send()
1462 send_info[subflow->backup].ssk = ssk; in mptcp_subflow_get_send()
1470 send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk; in mptcp_subflow_get_send()
1483 ssk = send_info[SSK_MODE_ACTIVE].ssk; in mptcp_subflow_get_send()
1484 if (!ssk || !sk_stream_memory_free(ssk)) in mptcp_subflow_get_send()
1488 wmem = READ_ONCE(ssk->sk_wmem_queued); in mptcp_subflow_get_send()
1491 return ssk; in mptcp_subflow_get_send()
1494 subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_get_send()
1496 READ_ONCE(ssk->sk_pacing_rate) * burst, in mptcp_subflow_get_send()
1498 msk->last_snd = ssk; in mptcp_subflow_get_send()
1500 return ssk; in mptcp_subflow_get_send()
1503 static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info) in mptcp_push_release() argument
1505 tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal); in mptcp_push_release()
1506 release_sock(ssk); in mptcp_push_release()
1542 struct sock *prev_ssk = NULL, *ssk = NULL; in __mptcp_push_pending() local
1558 prev_ssk = ssk; in __mptcp_push_pending()
1559 ssk = mptcp_subflow_get_send(msk); in __mptcp_push_pending()
1564 if (ssk != prev_ssk && prev_ssk) in __mptcp_push_pending()
1566 if (!ssk) in __mptcp_push_pending()
1573 if (ssk != prev_ssk) in __mptcp_push_pending()
1574 lock_sock(ssk); in __mptcp_push_pending()
1576 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); in __mptcp_push_pending()
1580 mptcp_push_release(ssk, &info); in __mptcp_push_pending()
1594 if (ssk) in __mptcp_push_pending()
1595 mptcp_push_release(ssk, &info); in __mptcp_push_pending()
1605 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk) in __mptcp_subflow_push_pending() argument
1628 xmit_ssk = first ? ssk : mptcp_subflow_get_send(mptcp_sk(sk)); in __mptcp_subflow_push_pending()
1631 if (xmit_ssk != ssk) { in __mptcp_subflow_push_pending()
1637 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); in __mptcp_subflow_push_pending()
1656 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, in __mptcp_subflow_push_pending()
1676 static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msghdr *msg, in mptcp_sendmsg_fastopen() argument
1683 lock_sock(ssk); in mptcp_sendmsg_fastopen()
1687 ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL); in mptcp_sendmsg_fastopen()
1690 release_sock(ssk); in mptcp_sendmsg_fastopen()
1971 struct sock *ssk; in mptcp_rcv_space_adjust() local
1974 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_rcv_space_adjust()
1975 slow = lock_sock_fast(ssk); in mptcp_rcv_space_adjust()
1976 WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); in mptcp_rcv_space_adjust()
1977 tcp_sk(ssk)->window_clamp = window_clamp; in mptcp_rcv_space_adjust()
1978 tcp_cleanup_rbuf(ssk, 1); in mptcp_rcv_space_adjust()
1979 unlock_sock_fast(ssk, slow); in mptcp_rcv_space_adjust()
2016 struct sock *ssk = mptcp_subflow_recv_lookup(msk); in __mptcp_move_skbs() local
2023 if (likely(!ssk)) in __mptcp_move_skbs()
2026 slowpath = lock_sock_fast(ssk); in __mptcp_move_skbs()
2029 done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved); in __mptcp_move_skbs()
2032 if (unlikely(ssk->sk_err)) in __mptcp_move_skbs()
2034 unlock_sock_fast(ssk, slowpath); in __mptcp_move_skbs()
2236 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_retrans() local
2242 if (!tcp_rtx_and_write_queues_empty(ssk)) { in mptcp_subflow_get_retrans()
2243 mptcp_pm_subflow_chk_stale(msk, ssk); in mptcp_subflow_get_retrans()
2250 backup = ssk; in mptcp_subflow_get_retrans()
2255 pick = ssk; in mptcp_subflow_get_retrans()
2325 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, in __mptcp_close_ssk() argument
2332 dispose_it = !msk->subflow || ssk != msk->subflow->sk; in __mptcp_close_ssk()
2336 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); in __mptcp_close_ssk()
2342 ssk->sk_lingertime = 0; in __mptcp_close_ssk()
2343 sock_set_flag(ssk, SOCK_LINGER); in __mptcp_close_ssk()
2349 tcp_disconnect(ssk, 0); in __mptcp_close_ssk()
2352 release_sock(ssk); in __mptcp_close_ssk()
2357 sock_orphan(ssk); in __mptcp_close_ssk()
2364 if (!inet_csk(ssk)->icsk_ulp_ops) { in __mptcp_close_ssk()
2368 if (ssk->sk_state == TCP_LISTEN) { in __mptcp_close_ssk()
2369 tcp_set_state(ssk, TCP_CLOSE); in __mptcp_close_ssk()
2370 mptcp_subflow_queue_clean(ssk); in __mptcp_close_ssk()
2371 inet_csk_listen_stop(ssk); in __mptcp_close_ssk()
2373 __tcp_close(ssk, 0); in __mptcp_close_ssk()
2376 __sock_put(ssk); in __mptcp_close_ssk()
2378 release_sock(ssk); in __mptcp_close_ssk()
2380 sock_put(ssk); in __mptcp_close_ssk()
2382 if (ssk == msk->first) in __mptcp_close_ssk()
2386 if (ssk == msk->last_snd) in __mptcp_close_ssk()
2393 void mptcp_close_ssk(struct sock *sk, struct sock *ssk, in mptcp_close_ssk() argument
2397 mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL); in mptcp_close_ssk()
2402 mptcp_pm_subflow_check_next(mptcp_sk(sk), ssk, subflow); in mptcp_close_ssk()
2404 __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH); in mptcp_close_ssk()
2419 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_close_subflow() local
2421 if (inet_sk_state_load(ssk) != TCP_CLOSE) in __mptcp_close_subflow()
2425 if (!skb_queue_empty_lockless(&ssk->sk_receive_queue)) in __mptcp_close_subflow()
2428 mptcp_close_ssk((struct sock *)msk, ssk, subflow); in __mptcp_close_subflow()
2506 struct sock *ssk; in __mptcp_retrans() local
2512 ssk = mptcp_subflow_get_retrans(msk); in __mptcp_retrans()
2531 if (!ssk) in __mptcp_retrans()
2534 lock_sock(ssk); in __mptcp_retrans()
2540 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); in __mptcp_retrans()
2550 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, in __mptcp_retrans()
2555 release_sock(ssk); in __mptcp_retrans()
2587 struct sock *ssk = msk->first; in mptcp_mp_fail_no_response() local
2590 if (!ssk) in mptcp_mp_fail_no_response()
2595 slow = lock_sock_fast(ssk); in mptcp_mp_fail_no_response()
2596 mptcp_subflow_reset(ssk); in mptcp_mp_fail_no_response()
2597 WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0); in mptcp_mp_fail_no_response()
2598 unlock_sock_fast(ssk, slow); in mptcp_mp_fail_no_response()
2758 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) in mptcp_subflow_shutdown() argument
2760 lock_sock(ssk); in mptcp_subflow_shutdown()
2762 switch (ssk->sk_state) { in mptcp_subflow_shutdown()
2768 tcp_disconnect(ssk, O_NONBLOCK); in mptcp_subflow_shutdown()
2773 ssk->sk_shutdown |= how; in mptcp_subflow_shutdown()
2774 tcp_shutdown(ssk, how); in mptcp_subflow_shutdown()
2776 pr_debug("Sending DATA_FIN on subflow %p", ssk); in mptcp_subflow_shutdown()
2777 tcp_send_ack(ssk); in mptcp_subflow_shutdown()
2784 release_sock(ssk); in mptcp_subflow_shutdown()
2929 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_close() local
2930 bool slow = lock_sock_fast_nested(ssk); in __mptcp_close()
2935 if (ssk == msk->first) in __mptcp_close()
2941 ssk->sk_socket = NULL; in __mptcp_close()
2942 ssk->sk_wq = NULL; in __mptcp_close()
2943 unlock_sock_fast(ssk, slow); in __mptcp_close()
2976 void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) in mptcp_copy_inaddrs() argument
2979 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); in mptcp_copy_inaddrs()
2982 msk->sk_v6_daddr = ssk->sk_v6_daddr; in mptcp_copy_inaddrs()
2983 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; in mptcp_copy_inaddrs()
2991 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; in mptcp_copy_inaddrs()
2992 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; in mptcp_copy_inaddrs()
2993 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; in mptcp_copy_inaddrs()
2994 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; in mptcp_copy_inaddrs()
2995 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; in mptcp_copy_inaddrs()
2996 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; in mptcp_copy_inaddrs()
3098 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) in mptcp_rcv_space_init() argument
3100 const struct tcp_sock *tp = tcp_sk(ssk); in mptcp_rcv_space_init()
3113 WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd); in mptcp_rcv_space_init()
3215 void __mptcp_check_push(struct sock *sk, struct sock *ssk) in __mptcp_check_push() argument
3223 if (xmit_ssk == ssk) in __mptcp_check_push()
3224 __mptcp_subflow_push_pending(sk, ssk); in __mptcp_check_push()
3290 static void schedule_3rdack_retransmission(struct sock *ssk) in schedule_3rdack_retransmission() argument
3292 struct inet_connection_sock *icsk = inet_csk(ssk); in schedule_3rdack_retransmission()
3293 struct tcp_sock *tp = tcp_sk(ssk); in schedule_3rdack_retransmission()
3296 if (mptcp_subflow_ctx(ssk)->fully_established) in schedule_3rdack_retransmission()
3309 sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout); in schedule_3rdack_retransmission()
3312 void mptcp_subflow_process_delegated(struct sock *ssk) in mptcp_subflow_process_delegated() argument
3314 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_process_delegated()
3320 __mptcp_subflow_push_pending(sk, ssk); in mptcp_subflow_process_delegated()
3327 schedule_3rdack_retransmission(ssk); in mptcp_subflow_process_delegated()
3359 void mptcp_finish_connect(struct sock *ssk) in mptcp_finish_connect() argument
3366 subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_connect()
3389 mptcp_pm_new_connection(msk, ssk, 0); in mptcp_finish_connect()
3391 mptcp_rcv_space_init(msk, ssk); in mptcp_finish_connect()
3403 bool mptcp_finish_join(struct sock *ssk) in mptcp_finish_join() argument
3405 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_join()
3430 ret = __mptcp_finish_join(msk, ssk); in mptcp_finish_join()
3432 sock_hold(ssk); in mptcp_finish_join()
3436 sock_hold(ssk); in mptcp_finish_join()
3452 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); in mptcp_finish_join()
3731 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_stream_accept() local
3733 if (!ssk->sk_socket) in mptcp_stream_accept()
3734 mptcp_sock_graft(ssk, newsock); in mptcp_stream_accept()
3838 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_napi_poll() local
3840 bh_lock_sock_nested(ssk); in mptcp_napi_poll()
3841 if (!sock_owned_by_user(ssk) && in mptcp_napi_poll()
3843 mptcp_subflow_process_delegated(ssk); in mptcp_napi_poll()
3849 bh_unlock_sock(ssk); in mptcp_napi_poll()
3850 sock_put(ssk); in mptcp_napi_poll()