Lines Matching refs:subflow
84 struct mptcp_subflow_context *subflow; in __mptcp_socket_create() local
95 subflow = mptcp_subflow_ctx(ssock->sk); in __mptcp_socket_create()
96 list_add(&subflow->node, &msk->conn_list); in __mptcp_socket_create()
98 subflow->request_mptcp = 1; in __mptcp_socket_create()
99 subflow->subflow_id = msk->subflow_id++; in __mptcp_socket_create()
102 subflow->local_id_valid = 1; in __mptcp_socket_create()
358 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skb() local
378 MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow); in __mptcp_move_skb()
496 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow) in mptcp_timeout_from_subflow() argument
498 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_timeout_from_subflow()
500 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ? in mptcp_timeout_from_subflow()
506 struct mptcp_subflow_context *subflow; in mptcp_set_timeout() local
509 mptcp_for_each_subflow(mptcp_sk(sk), subflow) in mptcp_set_timeout()
510 tout = max(tout, mptcp_timeout_from_subflow(subflow)); in mptcp_set_timeout()
537 struct mptcp_subflow_context *subflow; in mptcp_send_ack() local
539 mptcp_for_each_subflow(msk, subflow) in mptcp_send_ack()
540 mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow)); in mptcp_send_ack()
569 struct mptcp_subflow_context *subflow; in mptcp_cleanup_rbuf() local
577 mptcp_for_each_subflow(msk, subflow) { in mptcp_cleanup_rbuf()
578 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_cleanup_rbuf()
639 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skbs_from_subflow() local
667 map_remaining = subflow->map_data_len - in __mptcp_move_skbs_from_subflow()
668 mptcp_subflow_get_map_offset(subflow); in __mptcp_move_skbs_from_subflow()
687 subflow->map_data_len = skb->len; in __mptcp_move_skbs_from_subflow()
805 struct mptcp_subflow_context *subflow; in __mptcp_error_report() local
808 mptcp_for_each_subflow(msk, subflow) in __mptcp_error_report()
809 if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow))) in __mptcp_error_report()
842 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_data_ready() local
850 if (unlikely(subflow->disposable)) in mptcp_data_ready()
901 struct mptcp_subflow_context *tmp, *subflow; in __mptcp_flush_join_list() local
904 list_for_each_entry_safe(subflow, tmp, join_list, node) { in __mptcp_flush_join_list()
905 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_flush_join_list()
908 list_move_tail(&subflow->node, &msk->conn_list); in __mptcp_flush_join_list()
948 struct mptcp_subflow_context *subflow; in mptcp_subflow_recv_lookup() local
952 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_recv_lookup()
953 if (READ_ONCE(subflow->data_avail)) in mptcp_subflow_recv_lookup()
954 return mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_recv_lookup()
1078 struct mptcp_subflow_context *subflow; in mptcp_enter_memory_pressure() local
1083 mptcp_for_each_subflow(msk, subflow) { in mptcp_enter_memory_pressure()
1084 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_enter_memory_pressure()
1380 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow) in mptcp_subflow_set_active() argument
1382 if (!subflow->stale) in mptcp_subflow_set_active()
1385 subflow->stale = 0; in mptcp_subflow_set_active()
1386 MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER); in mptcp_subflow_set_active()
1389 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) in mptcp_subflow_active() argument
1391 if (unlikely(subflow->stale)) { in mptcp_subflow_active()
1392 u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp); in mptcp_subflow_active()
1394 if (subflow->stale_rcv_tstamp == rcv_tstamp) in mptcp_subflow_active()
1397 mptcp_subflow_set_active(subflow); in mptcp_subflow_active()
1399 return __mptcp_subflow_active(subflow); in mptcp_subflow_active()
1413 struct mptcp_subflow_context *subflow; in mptcp_subflow_get_send() local
1427 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_get_send()
1428 trace_mptcp_subflow_get_send(subflow); in mptcp_subflow_get_send()
1429 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_send()
1430 if (!mptcp_subflow_active(subflow)) in mptcp_subflow_get_send()
1433 tout = max(tout, mptcp_timeout_from_subflow(subflow)); in mptcp_subflow_get_send()
1434 nr_active += !subflow->backup; in mptcp_subflow_get_send()
1435 pace = subflow->avg_pacing_rate; in mptcp_subflow_get_send()
1438 subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate); in mptcp_subflow_get_send()
1439 pace = subflow->avg_pacing_rate; in mptcp_subflow_get_send()
1445 if (linger_time < send_info[subflow->backup].linger_time) { in mptcp_subflow_get_send()
1446 send_info[subflow->backup].ssk = ssk; in mptcp_subflow_get_send()
1447 send_info[subflow->backup].linger_time = linger_time; in mptcp_subflow_get_send()
1476 subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_get_send()
1477 subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem + in mptcp_subflow_get_send()
1576 struct mptcp_subflow_context *subflow; in __mptcp_push_pending() local
1584 mptcp_for_each_subflow(msk, subflow) { in __mptcp_push_pending()
1585 if (READ_ONCE(subflow->scheduled)) { in __mptcp_push_pending()
1586 mptcp_subflow_set_scheduled(subflow, false); in __mptcp_push_pending()
1589 ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_push_pending()
1642 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_subflow_push_pending() local
1649 mptcp_subflow_set_scheduled(subflow, false); in __mptcp_subflow_push_pending()
1661 if (READ_ONCE(subflow->scheduled)) { in __mptcp_subflow_push_pending()
1662 mptcp_subflow_set_scheduled(subflow, false); in __mptcp_subflow_push_pending()
1669 mptcp_for_each_subflow(msk, subflow) { in __mptcp_subflow_push_pending()
1670 if (READ_ONCE(subflow->scheduled)) { in __mptcp_subflow_push_pending()
1671 xmit_ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_subflow_push_pending()
1673 mptcp_subflow_delegate(subflow, in __mptcp_subflow_push_pending()
1950 struct mptcp_subflow_context *subflow; in mptcp_rcv_space_adjust() local
1971 mptcp_for_each_subflow(msk, subflow) { in mptcp_rcv_space_adjust()
1976 tp = tcp_sk(mptcp_subflow_tcp_sock(subflow)); in mptcp_rcv_space_adjust()
2020 mptcp_for_each_subflow(msk, subflow) { in mptcp_rcv_space_adjust()
2024 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_rcv_space_adjust()
2274 struct mptcp_subflow_context *subflow; in mptcp_subflow_get_retrans() local
2277 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_get_retrans()
2278 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_retrans()
2280 if (!__mptcp_subflow_active(subflow)) in mptcp_subflow_get_retrans()
2286 min_stale_count = min_t(int, min_stale_count, subflow->stale_count); in mptcp_subflow_get_retrans()
2290 if (subflow->backup) { in mptcp_subflow_get_retrans()
2356 struct mptcp_subflow_context *subflow, in __mptcp_subflow_disconnect() argument
2365 mptcp_subflow_ctx_reset(subflow); in __mptcp_subflow_disconnect()
2380 struct mptcp_subflow_context *subflow, in __mptcp_close_ssk() argument
2403 list_del(&subflow->node); in __mptcp_close_ssk()
2413 subflow->send_fastclose = 1; in __mptcp_close_ssk()
2418 __mptcp_subflow_disconnect(ssk, subflow, flags); in __mptcp_close_ssk()
2424 subflow->disposable = 1; in __mptcp_close_ssk()
2432 kfree_rcu(subflow, rcu); in __mptcp_close_ssk()
2472 struct mptcp_subflow_context *subflow) in mptcp_close_ssk() argument
2480 mptcp_pm_subflow_check_next(mptcp_sk(sk), ssk, subflow); in mptcp_close_ssk()
2482 __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH); in mptcp_close_ssk()
2492 struct mptcp_subflow_context *subflow, *tmp; in __mptcp_close_subflow() local
2497 mptcp_for_each_subflow_safe(msk, subflow, tmp) { in __mptcp_close_subflow()
2498 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_close_subflow()
2507 mptcp_close_ssk(sk, ssk, subflow); in __mptcp_close_subflow()
2524 struct mptcp_subflow_context *subflow, *tmp; in mptcp_check_fastclose() local
2532 mptcp_for_each_subflow_safe(msk, subflow, tmp) { in mptcp_check_fastclose()
2533 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); in mptcp_check_fastclose()
2574 struct mptcp_subflow_context *subflow; in __mptcp_retrans() local
2606 mptcp_for_each_subflow(msk, subflow) { in __mptcp_retrans()
2607 if (READ_ONCE(subflow->scheduled)) { in __mptcp_retrans()
2610 mptcp_subflow_set_scheduled(subflow, false); in __mptcp_retrans()
2612 ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_retrans()
2690 struct mptcp_subflow_context *subflow, *tmp; in mptcp_do_fastclose() local
2694 mptcp_for_each_subflow_safe(msk, subflow, tmp) in mptcp_do_fastclose()
2695 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), in mptcp_do_fastclose()
2696 subflow, MPTCP_CF_FASTCLOSE); in mptcp_do_fastclose()
2899 struct mptcp_subflow_context *subflow; in mptcp_check_send_data_fin() local
2915 mptcp_for_each_subflow(msk, subflow) { in mptcp_check_send_data_fin()
2916 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); in mptcp_check_send_data_fin()
3001 struct mptcp_subflow_context *subflow; in __mptcp_close() local
3028 mptcp_for_each_subflow(msk, subflow) { in __mptcp_close()
3029 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_close()
3038 subflow->fail_tout = 0; in __mptcp_close()
3261 struct mptcp_subflow_context *subflow; in mptcp_accept() local
3264 subflow = mptcp_subflow_ctx(newsk); in mptcp_accept()
3265 new_mptcp_sock = subflow->conn; in mptcp_accept()
3289 struct mptcp_subflow_context *subflow, *tmp; in mptcp_destroy_common() local
3295 mptcp_for_each_subflow_safe(msk, subflow, tmp) in mptcp_destroy_common()
3296 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags); in mptcp_destroy_common()
3433 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_process_delegated() local
3434 struct sock *sk = subflow->conn; in mptcp_subflow_process_delegated()
3475 struct mptcp_subflow_context *subflow; in mptcp_finish_connect() local
3479 subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_connect()
3480 sk = subflow->conn; in mptcp_finish_connect()
3483 pr_debug("msk=%p, token=%u", sk, subflow->token); in mptcp_finish_connect()
3485 subflow->map_seq = subflow->iasn; in mptcp_finish_connect()
3486 subflow->map_subflow_seq = 1; in mptcp_finish_connect()
3491 WRITE_ONCE(msk->local_key, subflow->local_key); in mptcp_finish_connect()
3492 WRITE_ONCE(msk->write_seq, subflow->idsn + 1); in mptcp_finish_connect()
3512 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_join() local
3513 struct mptcp_sock *msk = mptcp_sk(subflow->conn); in mptcp_finish_join()
3517 pr_debug("msk=%p, subflow=%p", msk, subflow); in mptcp_finish_join()
3521 subflow->reset_reason = MPTCP_RST_EMPTCP; in mptcp_finish_join()
3526 if (!list_empty(&subflow->node)) { in mptcp_finish_join()
3542 list_add_tail(&subflow->node, &msk->conn_list); in mptcp_finish_join()
3546 list_add_tail(&subflow->node, &msk->join_list); in mptcp_finish_join()
3553 subflow->reset_reason = MPTCP_RST_EPROHIBIT; in mptcp_finish_join()
3636 struct mptcp_subflow_context *subflow) in mptcp_subflow_early_fallback() argument
3638 subflow->request_mptcp = 0; in mptcp_subflow_early_fallback()
3644 struct mptcp_subflow_context *subflow; in mptcp_connect() local
3654 subflow = mptcp_subflow_ctx(ssk); in mptcp_connect()
3660 mptcp_subflow_early_fallback(msk, subflow); in mptcp_connect()
3662 if (subflow->request_mptcp && mptcp_token_new_connect(ssk)) { in mptcp_connect()
3664 mptcp_subflow_early_fallback(msk, subflow); in mptcp_connect()
3837 struct mptcp_subflow_context *subflow; in mptcp_stream_accept() local
3845 mptcp_for_each_subflow(msk, subflow) { in mptcp_stream_accept()
3846 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_stream_accept()
3963 struct mptcp_subflow_context *subflow; in mptcp_napi_poll() local
3967 while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) { in mptcp_napi_poll()
3968 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_napi_poll()
3972 mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0)); in mptcp_napi_poll()
3980 clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status); in mptcp_napi_poll()