Lines Matching refs:ssk

30 static void mptcp_subflow_ops_undo_override(struct sock *ssk);
354 void mptcp_subflow_reset(struct sock *ssk) in mptcp_subflow_reset() argument
356 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_reset()
362 tcp_set_state(ssk, TCP_CLOSE); in mptcp_subflow_reset()
363 tcp_send_active_reset(ssk, GFP_ATOMIC); in mptcp_subflow_reset()
364 tcp_done(ssk); in mptcp_subflow_reset()
603 static void subflow_drop_ctx(struct sock *ssk) in subflow_drop_ctx() argument
605 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); in subflow_drop_ctx()
610 subflow_ulp_fallback(ssk, ctx); in subflow_drop_ctx()
810 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb) in skb_is_fully_mapped() argument
812 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in skb_is_fully_mapped()
815 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq; in skb_is_fully_mapped()
823 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb) in validate_mapping() argument
825 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in validate_mapping()
826 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; in validate_mapping()
844 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb, in validate_data_csum() argument
847 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in validate_data_csum()
867 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len; in validate_data_csum()
887 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) { in validate_data_csum()
892 if (unlikely(ssk->sk_state == TCP_CLOSE)) in validate_data_csum()
893 while ((skb = skb_peek(&ssk->sk_receive_queue))) in validate_data_csum()
894 sk_eat_skb(ssk, skb); in validate_data_csum()
918 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR); in validate_data_csum()
920 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPFAILTX); in validate_data_csum()
927 static enum mapping_status get_mapping_status(struct sock *ssk, in get_mapping_status() argument
930 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in get_mapping_status()
937 skb = skb_peek(&ssk->sk_receive_queue); in get_mapping_status()
941 if (mptcp_check_fallback(ssk)) in get_mapping_status()
956 sk_eat_skb(ssk, skb); in get_mapping_status()
970 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); in get_mapping_status()
1027 if (skb_is_fully_mapped(ssk, skb)) { in get_mapping_status()
1028 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH); in get_mapping_status()
1059 if (!validate_mapping(ssk, skb)) { in get_mapping_status()
1060 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH); in get_mapping_status()
1067 return validate_data_csum(ssk, skb, csum_reqd); in get_mapping_status()
1070 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, in mptcp_subflow_discard_data() argument
1073 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_discard_data()
1081 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA); in mptcp_subflow_discard_data()
1082 tcp_sk(ssk)->copied_seq += incr; in mptcp_subflow_discard_data()
1083 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq)) in mptcp_subflow_discard_data()
1084 sk_eat_skb(ssk, skb); in mptcp_subflow_discard_data()
1090 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk) in subflow_sched_work_if_closed() argument
1094 if (likely(ssk->sk_state != TCP_CLOSE)) in subflow_sched_work_if_closed()
1097 if (skb_queue_empty(&ssk->sk_receive_queue) && in subflow_sched_work_if_closed()
1105 static bool subflow_check_data_avail(struct sock *ssk) in subflow_check_data_avail() argument
1107 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in subflow_check_data_avail()
1112 if (!skb_peek(&ssk->sk_receive_queue)) in subflow_check_data_avail()
1122 status = get_mapping_status(ssk, msk); in subflow_check_data_avail()
1123 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue)); in subflow_check_data_avail()
1133 skb = skb_peek(&ssk->sk_receive_queue); in subflow_check_data_avail()
1153 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq); in subflow_check_data_avail()
1163 subflow_sched_work_if_closed(msk, ssk); in subflow_check_data_avail()
1169 if (mptcp_has_another_subflow(ssk)) { in subflow_check_data_avail()
1170 while ((skb = skb_peek(&ssk->sk_receive_queue))) in subflow_check_data_avail()
1171 sk_eat_skb(ssk, skb); in subflow_check_data_avail()
1173 ssk->sk_err = EBADMSG; in subflow_check_data_avail()
1174 tcp_set_state(ssk, TCP_CLOSE); in subflow_check_data_avail()
1177 tcp_send_active_reset(ssk, GFP_ATOMIC); in subflow_check_data_avail()
1186 ssk->sk_err = EBADMSG; in subflow_check_data_avail()
1187 tcp_set_state(ssk, TCP_CLOSE); in subflow_check_data_avail()
1190 tcp_send_active_reset(ssk, GFP_ATOMIC); in subflow_check_data_avail()
1196 skb = skb_peek(&ssk->sk_receive_queue); in subflow_check_data_avail()
1200 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; in subflow_check_data_avail()
1232 void mptcp_space(const struct sock *ssk, int *space, int *full_space) in mptcp_space() argument
1234 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_space()
1247 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_error_report() local
1248 int err = sock_error(ssk); in __mptcp_error_report()
1259 inet_sk_state_store(sk, inet_sk_state_load(ssk)); in __mptcp_error_report()
1269 static void subflow_error_report(struct sock *ssk) in subflow_error_report() argument
1271 struct sock *sk = mptcp_subflow_ctx(ssk)->conn; in subflow_error_report()
1310 static void subflow_write_space(struct sock *ssk) in subflow_write_space() argument
1312 struct sock *sk = mptcp_subflow_ctx(ssk)->conn; in subflow_write_space()
1314 mptcp_propagate_sndbuf(sk, ssk); in subflow_write_space()
1388 struct sock *ssk; in __mptcp_subflow_connect() local
1402 ssk = sf->sk; in __mptcp_subflow_connect()
1403 subflow = mptcp_subflow_ctx(ssk); in __mptcp_subflow_connect()
1409 err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk); in __mptcp_subflow_connect()
1421 mptcp_info2sockaddr(loc, &addr, ssk->sk_family); in __mptcp_subflow_connect()
1428 ssk->sk_bound_dev_if = ifindex; in __mptcp_subflow_connect()
1441 mptcp_info2sockaddr(remote, &addr, ssk->sk_family); in __mptcp_subflow_connect()
1444 mptcp_sockopt_sync(msk, ssk); in __mptcp_subflow_connect()
1450 mptcp_sock_graft(ssk, sk->sk_socket); in __mptcp_subflow_connect()
1490 static void mptcp_subflow_ops_override(struct sock *ssk) in mptcp_subflow_ops_override() argument
1493 if (ssk->sk_prot == &tcpv6_prot) in mptcp_subflow_ops_override()
1494 ssk->sk_prot = &tcpv6_prot_override; in mptcp_subflow_ops_override()
1497 ssk->sk_prot = &tcp_prot_override; in mptcp_subflow_ops_override()
1500 static void mptcp_subflow_ops_undo_override(struct sock *ssk) in mptcp_subflow_ops_undo_override() argument
1503 if (ssk->sk_prot == &tcpv6_prot_override) in mptcp_subflow_ops_undo_override()
1504 ssk->sk_prot = &tcpv6_prot; in mptcp_subflow_ops_undo_override()
1507 ssk->sk_prot = &tcp_prot; in mptcp_subflow_ops_undo_override()
1678 static void subflow_ulp_release(struct sock *ssk) in subflow_ulp_release() argument
1680 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); in subflow_ulp_release()
1697 mptcp_subflow_ops_undo_override(ssk); in subflow_ulp_release()
1752 static void tcp_release_cb_override(struct sock *ssk) in tcp_release_cb_override() argument
1754 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in tcp_release_cb_override()
1757 mptcp_subflow_process_delegated(ssk); in tcp_release_cb_override()
1759 tcp_release_cb(ssk); in tcp_release_cb_override()