Lines Matching +full:closed +full:- +full:loop

2  * Copyright (c) 2018-2020 Intel Corporation
4 * SPDX-License-Identifier: Apache-2.0
63 #define TCP_RTO_MS (conn->rto)
102 struct net_buf *buf, *first = pkt->cursor.buf, *second = first->frags; in tcp_pkt_linearize()
109 ret = -EINVAL; in tcp_pkt_linearize()
119 ret = -ENOBUFS; in tcp_pkt_linearize()
123 net_buf_linearize(buf->data, net_buf_max_len(buf), pkt->frags, pos, len); in tcp_pkt_linearize()
126 len1 = first->len - (pkt->cursor.pos - pkt->cursor.buf->data); in tcp_pkt_linearize()
127 len2 = len - len1; in tcp_pkt_linearize()
129 first->len -= len1; in tcp_pkt_linearize()
132 size_t pull_len = MIN(second->len, len2); in tcp_pkt_linearize()
135 len2 -= pull_len; in tcp_pkt_linearize()
137 next = second->frags; in tcp_pkt_linearize()
138 if (second->len == 0) { in tcp_pkt_linearize()
144 buf->frags = second; in tcp_pkt_linearize()
145 first->frags = buf; in tcp_pkt_linearize()
194 return -ENOBUFS; in tcp_endpoint_set()
199 ep->sin.sin_port = src == TCP_EP_SRC ? th_sport(th) : in tcp_endpoint_set()
201 net_ipv4_addr_copy_raw((uint8_t *)&ep->sin.sin_addr, in tcp_endpoint_set()
203 ip->src : ip->dst); in tcp_endpoint_set()
204 ep->sa.sa_family = AF_INET; in tcp_endpoint_set()
206 ret = -EINVAL; in tcp_endpoint_set()
218 return -ENOBUFS; in tcp_endpoint_set()
223 ep->sin6.sin6_port = src == TCP_EP_SRC ? th_sport(th) : in tcp_endpoint_set()
225 net_ipv6_addr_copy_raw((uint8_t *)&ep->sin6.sin6_addr, in tcp_endpoint_set()
227 ip->src : ip->dst); in tcp_endpoint_set()
228 ep->sa.sa_family = AF_INET6; in tcp_endpoint_set()
230 ret = -EINVAL; in tcp_endpoint_set()
237 ret = -EINVAL; in tcp_endpoint_set()
248 const struct tcp *conn = ctx->tcp; in net_tcp_endpoint_copy()
249 socklen_t newlen = ctx->local.family == AF_INET ? in net_tcp_endpoint_copy()
258 if (conn->state < TCP_ESTABLISHED) { in net_tcp_endpoint_copy()
259 if (IS_ENABLED(CONFIG_NET_IPV4) && ctx->local.family == AF_INET) { in net_tcp_endpoint_copy()
260 memcpy(&net_sin(local)->sin_addr, in net_tcp_endpoint_copy()
261 net_sin_ptr(&ctx->local)->sin_addr, in net_tcp_endpoint_copy()
263 net_sin(local)->sin_port = net_sin_ptr(&ctx->local)->sin_port; in net_tcp_endpoint_copy()
264 net_sin(local)->sin_family = AF_INET; in net_tcp_endpoint_copy()
265 } else if (IS_ENABLED(CONFIG_NET_IPV6) && ctx->local.family == AF_INET6) { in net_tcp_endpoint_copy()
266 memcpy(&net_sin6(local)->sin6_addr, in net_tcp_endpoint_copy()
267 net_sin6_ptr(&ctx->local)->sin6_addr, in net_tcp_endpoint_copy()
269 net_sin6(local)->sin6_port = net_sin6_ptr(&ctx->local)->sin6_port; in net_tcp_endpoint_copy()
270 net_sin6(local)->sin6_family = AF_INET6; in net_tcp_endpoint_copy()
271 net_sin6(local)->sin6_scope_id = in net_tcp_endpoint_copy()
272 net_sin6_ptr(&ctx->local)->sin6_scope_id; in net_tcp_endpoint_copy()
274 return -EINVAL; in net_tcp_endpoint_copy()
277 memcpy(local, &conn->src.sa, newlen); in net_tcp_endpoint_copy()
282 memcpy(peer, &conn->dst.sa, newlen); in net_tcp_endpoint_copy()
298 len += snprintk(buf + len, BUF_SIZE - len, "SYN,"); in tcp_flags()
301 len += snprintk(buf + len, BUF_SIZE - len, "FIN,"); in tcp_flags()
304 len += snprintk(buf + len, BUF_SIZE - len, "ACK,"); in tcp_flags()
307 len += snprintk(buf + len, BUF_SIZE - len, "PSH,"); in tcp_flags()
310 len += snprintk(buf + len, BUF_SIZE - len, "RST,"); in tcp_flags()
313 len += snprintk(buf + len, BUF_SIZE - len, "URG,"); in tcp_flags()
317 buf[len - 1] = '\0'; /* delete the last comma */ in tcp_flags()
327 size_t tcp_options_len = (th_off(th) - 5) * 4; in tcp_data_len()
328 int len = net_pkt_get_len(pkt) - net_pkt_ip_hdr_len(pkt) - in tcp_data_len()
329 net_pkt_ip_opts_len(pkt) - sizeof(*th) - tcp_options_len; in tcp_data_len()
344 len += snprintk(buf + len, BUF_SIZE - len, in tcp_th()
349 len += snprintk(buf + len, BUF_SIZE - len, in tcp_th()
353 len += snprintk(buf + len, BUF_SIZE - len, in tcp_th()
357 len += snprintk(buf + len, BUF_SIZE - len, in tcp_th()
367 net_pkt_lladdr_dst(pkt)->type == NET_LINK_IEEE802154))
438 conn->rto = (uint16_t)rto; in tcp_derive_rto()
451 conn, step, conn->ca.cwnd, conn->ca.ssthresh, in tcp_new_reno_log()
452 conn->ca.pending_fast_retransmit_bytes); in tcp_new_reno_log()
457 conn->ca.cwnd = conn_mss(conn) * TCP_CONGESTION_INITIAL_WIN; in tcp_new_reno_init()
458 conn->ca.ssthresh = conn_mss(conn) * TCP_CONGESTION_INITIAL_SSTHRESH; in tcp_new_reno_init()
459 conn->ca.pending_fast_retransmit_bytes = 0; in tcp_new_reno_init()
465 if (conn->ca.pending_fast_retransmit_bytes == 0) { in tcp_new_reno_fast_retransmit()
466 conn->ca.ssthresh = MAX(conn_mss(conn) * 2, conn->unacked_len / 2); in tcp_new_reno_fast_retransmit()
468 conn->ca.cwnd = conn_mss(conn) * 3 + conn->ca.ssthresh; in tcp_new_reno_fast_retransmit()
469 conn->ca.pending_fast_retransmit_bytes = conn->unacked_len; in tcp_new_reno_fast_retransmit()
476 conn->ca.ssthresh = MAX(conn_mss(conn) * 2, conn->unacked_len / 2); in tcp_new_reno_timeout()
477 conn->ca.cwnd = conn_mss(conn); in tcp_new_reno_timeout()
484 int32_t new_win = conn->ca.cwnd; in tcp_new_reno_dup_ack()
487 conn->ca.cwnd = MIN(new_win, UINT16_MAX); in tcp_new_reno_dup_ack()
493 int32_t new_win = conn->ca.cwnd; in tcp_new_reno_pkts_acked()
496 if (conn->ca.pending_fast_retransmit_bytes == 0) { in tcp_new_reno_pkts_acked()
497 if (conn->ca.cwnd < conn->ca.ssthresh) { in tcp_new_reno_pkts_acked()
501 new_win += ((win_inc * win_inc) + conn->ca.cwnd - 1) / conn->ca.cwnd; in tcp_new_reno_pkts_acked()
503 conn->ca.cwnd = MIN(new_win, UINT16_MAX); in tcp_new_reno_pkts_acked()
506 if (conn->ca.pending_fast_retransmit_bytes <= acked_len) { in tcp_new_reno_pkts_acked()
507 conn->ca.pending_fast_retransmit_bytes = 0; in tcp_new_reno_pkts_acked()
508 conn->ca.cwnd = conn->ca.ssthresh; in tcp_new_reno_pkts_acked()
510 conn->ca.pending_fast_retransmit_bytes -= acked_len; in tcp_new_reno_pkts_acked()
511 conn->ca.cwnd -= acked_len; in tcp_new_reno_pkts_acked()
561 conn->keep_alive = false; in keep_alive_timer_init()
562 conn->keep_idle = CONFIG_NET_TCP_KEEPIDLE_DEFAULT; in keep_alive_timer_init()
563 conn->keep_intvl = CONFIG_NET_TCP_KEEPINTVL_DEFAULT; in keep_alive_timer_init()
564 conn->keep_cnt = CONFIG_NET_TCP_KEEPCNT_DEFAULT; in keep_alive_timer_init()
566 conn->keep_idle, conn->keep_intvl, conn->keep_cnt); in keep_alive_timer_init()
567 k_work_init_delayable(&conn->keepalive_timer, tcp_send_keepalive_probe); in keep_alive_timer_init()
572 to->keep_alive = from->keep_alive; in keep_alive_param_copy()
573 to->keep_idle = from->keep_idle; in keep_alive_param_copy()
574 to->keep_intvl = from->keep_intvl; in keep_alive_param_copy()
575 to->keep_cnt = from->keep_cnt; in keep_alive_param_copy()
580 if (!conn->keep_alive || conn->state != TCP_ESTABLISHED) { in keep_alive_timer_restart()
584 conn->keep_cur = 0; in keep_alive_timer_restart()
585 k_work_reschedule_for_queue(&tcp_work_q, &conn->keepalive_timer, in keep_alive_timer_restart()
586 K_SECONDS(conn->keep_idle)); in keep_alive_timer_restart()
591 k_work_cancel_delayable(&conn->keepalive_timer); in keep_alive_timer_stop()
599 return -EINVAL; in set_tcp_keep_alive()
604 return -EINVAL; in set_tcp_keep_alive()
607 conn->keep_alive = (bool)keep_alive; in set_tcp_keep_alive()
623 return -EINVAL; in set_tcp_keep_idle()
628 return -EINVAL; in set_tcp_keep_idle()
631 conn->keep_idle = keep_idle; in set_tcp_keep_idle()
643 return -EINVAL; in set_tcp_keep_intvl()
648 return -EINVAL; in set_tcp_keep_intvl()
651 conn->keep_intvl = keep_intvl; in set_tcp_keep_intvl()
663 return -EINVAL; in set_tcp_keep_cnt()
668 return -EINVAL; in set_tcp_keep_cnt()
671 conn->keep_cnt = keep_cnt; in set_tcp_keep_cnt()
682 return -EINVAL; in get_tcp_keep_alive()
685 *((int *)value) = (int)conn->keep_alive; in get_tcp_keep_alive()
694 return -EINVAL; in get_tcp_keep_idle()
697 *((int *)value) = (int)conn->keep_idle; in get_tcp_keep_idle()
706 return -EINVAL; in get_tcp_keep_intvl()
709 *((int *)value) = (int)conn->keep_intvl; in get_tcp_keep_intvl()
718 return -EINVAL; in get_tcp_keep_cnt()
721 *((int *)value) = (int)conn->keep_cnt; in get_tcp_keep_cnt()
732 #define set_tcp_keep_alive(...) (-ENOPROTOOPT)
733 #define set_tcp_keep_idle(...) (-ENOPROTOOPT)
734 #define set_tcp_keep_intvl(...) (-ENOPROTOOPT)
735 #define set_tcp_keep_cnt(...) (-ENOPROTOOPT)
736 #define get_tcp_keep_alive(...) (-ENOPROTOOPT)
737 #define get_tcp_keep_idle(...) (-ENOPROTOOPT)
738 #define get_tcp_keep_intvl(...) (-ENOPROTOOPT)
739 #define get_tcp_keep_cnt(...) (-ENOPROTOOPT)
747 k_work_cancel_delayable(&conn->send_timer); in tcp_send_queue_flush()
749 while ((pkt = tcp_slist(conn, &conn->send_queue, get, in tcp_send_queue_flush()
761 if (conn->test_closed_cb != NULL) { in tcp_conn_release()
762 conn->test_closed_cb(conn, conn->test_user_data); in tcp_conn_release()
769 while ((pkt = k_fifo_get(&conn->recv_data, K_NO_WAIT)) != NULL) { in tcp_conn_release()
773 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_conn_release()
775 if (conn->context->conn_handler) { in tcp_conn_release()
776 net_conn_unregister(conn->context->conn_handler); in tcp_conn_release()
777 conn->context->conn_handler = NULL; in tcp_conn_release()
780 /* As the TCP socket could be closed without connect being called, in tcp_conn_release()
783 if (conn->iface != NULL && conn->addr_ref_done) { in tcp_conn_release()
784 net_if_addr_unref(conn->iface, conn->src.sa.sa_family, in tcp_conn_release()
785 conn->src.sa.sa_family == AF_INET ? in tcp_conn_release()
786 (const void *)&conn->src.sin.sin_addr : in tcp_conn_release()
787 (const void *)&conn->src.sin6.sin6_addr); in tcp_conn_release()
790 conn->context->tcp = NULL; in tcp_conn_release()
791 conn->state = TCP_UNUSED; in tcp_conn_release()
795 (void)k_work_cancel_delayable(&conn->send_data_timer); in tcp_conn_release()
796 tcp_pkt_unref(conn->send_data); in tcp_conn_release()
799 tcp_pkt_unref(conn->queue_recv_data); in tcp_conn_release()
802 (void)k_work_cancel_delayable(&conn->timewait_timer); in tcp_conn_release()
803 (void)k_work_cancel_delayable(&conn->fin_timer); in tcp_conn_release()
804 (void)k_work_cancel_delayable(&conn->persist_timer); in tcp_conn_release()
805 (void)k_work_cancel_delayable(&conn->ack_timer); in tcp_conn_release()
806 (void)k_work_cancel_delayable(&conn->send_timer); in tcp_conn_release()
807 (void)k_work_cancel_delayable(&conn->recv_queue_timer); in tcp_conn_release()
810 k_mutex_unlock(&conn->lock); in tcp_conn_release()
812 net_context_unref(conn->context); in tcp_conn_release()
813 conn->context = NULL; in tcp_conn_release()
816 sys_slist_find_and_remove(&tcp_conns, &conn->next); in tcp_conn_release()
827 NET_ASSERT(ctx->tcp != NULL); in tcp_install_close_cb()
829 ((struct tcp *)ctx->tcp)->test_closed_cb = cb; in tcp_install_close_cb()
830 ((struct tcp *)ctx->tcp)->test_user_data = user_data; in tcp_install_close_cb()
836 int ref_count = atomic_get(&conn->ref_count); in tcp_conn_unref()
840 ref_count = atomic_dec(&conn->ref_count) - 1; in tcp_conn_unref()
842 tp_out(net_context_get_family(conn->context), conn->iface, in tcp_conn_unref()
851 k_work_submit_to_queue(&tcp_work_q, &conn->conn_release); in tcp_conn_unref()
867 NET_DBG("conn: %p closed by TCP stack (%s():%d)", conn, caller, line); in tcp_conn_close_debug()
869 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_conn_close_debug()
872 k_mutex_unlock(&conn->lock); in tcp_conn_close_debug()
874 if (conn->in_connect) { in tcp_conn_close_debug()
875 if (conn->connect_cb) { in tcp_conn_close_debug()
876 conn->connect_cb(conn->context, status, conn->context->user_data); in tcp_conn_close_debug()
879 conn->connect_cb = NULL; in tcp_conn_close_debug()
882 conn->in_connect = false; in tcp_conn_close_debug()
883 k_sem_reset(&conn->connect_sem); in tcp_conn_close_debug()
884 } else if (conn->context->recv_cb) { in tcp_conn_close_debug()
885 conn->context->recv_cb(conn->context, NULL, NULL, NULL, in tcp_conn_close_debug()
886 status, conn->recv_user_data); in tcp_conn_close_debug()
889 k_sem_give(&conn->tx_sem); in tcp_conn_close_debug()
900 pkt = tcp_slist(conn, &conn->send_queue, peek_head, in tcp_send_process_no_lock()
906 NET_DBG("%s %s", tcp_th(pkt), conn->in_retransmission ? in tcp_send_process_no_lock()
909 if (conn->in_retransmission) { in tcp_send_process_no_lock()
910 if (conn->send_retries > 0) { in tcp_send_process_no_lock()
915 conn->send_retries--; in tcp_send_process_no_lock()
924 uint8_t fl = th_get(pkt)->th_flags; in tcp_send_process_no_lock()
928 pkt = forget ? tcp_slist(conn, &conn->send_queue, get, in tcp_send_process_no_lock()
943 !k_work_delayable_remaining_get(&conn->send_timer)) { in tcp_send_process_no_lock()
944 conn->send_retries = tcp_retries; in tcp_send_process_no_lock()
945 conn->in_retransmission = true; in tcp_send_process_no_lock()
949 if (conn->in_retransmission) { in tcp_send_process_no_lock()
950 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_timer, in tcp_send_process_no_lock()
952 } else if (local && !sys_slist_is_empty(&conn->send_queue)) { in tcp_send_process_no_lock()
953 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_timer, in tcp_send_process_no_lock()
967 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_send_process()
971 k_mutex_unlock(&conn->lock); in tcp_send_process()
974 tcp_conn_close(conn, -ETIMEDOUT); in tcp_send_process()
980 if (conn->in_retransmission == false) { in tcp_send_timer_cancel()
984 k_work_cancel_delayable(&conn->send_timer); in tcp_send_timer_cancel()
987 struct net_pkt *pkt = tcp_slist(conn, &conn->send_queue, get, in tcp_send_timer_cancel()
995 if (sys_slist_is_empty(&conn->send_queue)) { in tcp_send_timer_cancel()
996 conn->in_retransmission = false; in tcp_send_timer_cancel()
998 conn->send_retries = tcp_retries; in tcp_send_timer_cancel()
999 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_timer, in tcp_send_timer_cancel()
1011 if (net_context_get_family(conn->context) != AF_INET6) { in tcp_nbr_reachability_hint()
1016 iface = net_context_get_iface(conn->context); in tcp_nbr_reachability_hint()
1018 /* Ensure that Neighbor Reachability hints are rate-limited (using threshold in tcp_nbr_reachability_hint()
1021 if ((now - conn->last_nd_hint_time) > (net_if_ipv6_get_reachable_time(iface) / 2)) { in tcp_nbr_reachability_hint()
1022 net_ipv6_nbr_reachability_hint(iface, &conn->dst.sin6.sin6_addr); in tcp_nbr_reachability_hint()
1023 conn->last_nd_hint_time = now; in tcp_nbr_reachability_hint()
1063 tcp_state_to_str(conn->state, false), in tcp_conn_state()
1064 conn->seq, conn->ack); in tcp_conn_state()
1100 recv_options->mss_found = false; in tcp_options_check()
1101 recv_options->wnd_found = false; in tcp_options_check()
1103 for ( ; options && len >= 1; options += opt_len, len -= opt_len) { in tcp_options_check()
1136 recv_options->mss = in tcp_options_check()
1138 recv_options->mss_found = true; in tcp_options_check()
1139 NET_DBG("MSS=%hu", recv_options->mss); in tcp_options_check()
1147 recv_options->window = opt; in tcp_options_check()
1148 recv_options->wnd_found = true; in tcp_options_check()
1164 int32_t threshold = MIN(conn_mss(conn), conn->recv_win_max / 2); in tcp_short_window()
1166 if (conn->recv_win > threshold) { in tcp_short_window()
1175 int32_t threshold = MAX(conn_mss(conn), conn->recv_win_max / 2); in tcp_need_window_update()
1180 return (conn->recv_win == conn->recv_win_max && in tcp_need_window_update()
1181 conn->recv_win_sent <= threshold); in tcp_need_window_update()
1190 * @return 0 on success, -EINVAL
1199 new_win = conn->recv_win + delta; in tcp_update_recv_wnd()
1202 } else if (new_win > conn->recv_win_max) { in tcp_update_recv_wnd()
1203 new_win = conn->recv_win_max; in tcp_update_recv_wnd()
1208 conn->recv_win = new_win; in tcp_update_recv_wnd()
1214 conn->state == TCP_ESTABLISHED) { in tcp_update_recv_wnd()
1215 k_work_cancel_delayable(&conn->ack_timer); in tcp_update_recv_wnd()
1228 !net_pkt_is_empty(conn->queue_recv_data)) { in tcp_check_pending_data()
1233 * 3 | 3 | 6 | 4 | 3+3-6= 0 | 6-3-3=0 | Append in tcp_check_pending_data()
1234 * 3 | 4 | 6 | 4 | 3+4-6 = 1 | 6-3-4=-1 | Append, pull from queue in tcp_check_pending_data()
1235 * 3 | 7 | 6 | 4 | 3+7-6 = 4 | 6-3-7=-4 | Drop queued data in tcp_check_pending_data()
1236 * 3 | 8 | 6 | 4 | 3+8-6 = 5 | 6-3-8=-5 | Drop queued data in tcp_check_pending_data()
1237 * 6 | 5 | 6 | 4 | 6+5-6 = 5 | 6-6-5=-5 | Drop queued data in tcp_check_pending_data()
1238 * 6 | 4 | 6 | 4 | 6+4-6 = 4 | 6-6-4=-4 | Drop queued data / packet in tcp_check_pending_data()
1239 * 10 | 2 | 6 | 4 | 10+2-6= 6 | 6-10-2=-6| Should not happen, dropping queue in tcp_check_pending_data()
1240 * 7 | 4 | 6 | 4 | 7+4-6 = 5 | 6-7-4=-5 | Should not happen, dropping queue in tcp_check_pending_data()
1241 * 11 | 2 | 6 | 4 | 11+2-6= 7 | 6-11-2=-7| Should not happen, dropping queue in tcp_check_pending_data()
1242 * 2 | 3 | 6 | 4 | 2+3-6= MI | 6-2-3=1 | Keep queued data in tcp_check_pending_data()
1250 pending_seq = tcp_get_seq(conn->queue_recv_data->buffer); in tcp_check_pending_data()
1251 end_offset = expected_seq - pending_seq; in tcp_check_pending_data()
1252 gap_size = (int32_t)(pending_seq - th_seq(th) - ((uint32_t)len)); in tcp_check_pending_data()
1253 pending_len = net_pkt_get_len(conn->queue_recv_data); in tcp_check_pending_data()
1257 pending_len -= end_offset; in tcp_check_pending_data()
1263 net_buf_frag_add(pkt->buffer, in tcp_check_pending_data()
1264 conn->queue_recv_data->buffer); in tcp_check_pending_data()
1265 conn->queue_recv_data->buffer = NULL; in tcp_check_pending_data()
1267 k_work_cancel_delayable(&conn->recv_queue_timer); in tcp_check_pending_data()
1271 net_buf_unref(conn->queue_recv_data->buffer); in tcp_check_pending_data()
1272 conn->queue_recv_data->buffer = NULL; in tcp_check_pending_data()
1274 k_work_cancel_delayable(&conn->recv_queue_timer); in tcp_check_pending_data()
1293 if (conn->context->recv_cb) { in tcp_data_get()
1294 /* If there is any out-of-order pending data, then pass it in tcp_data_get()
1302 net_pkt_skip(pkt, net_pkt_get_len(pkt) - *len); in tcp_data_get()
1304 tcp_update_recv_wnd(conn, -*len); in tcp_data_get()
1305 if (*len > conn->recv_win_sent) { in tcp_data_get()
1306 conn->recv_win_sent = 0; in tcp_data_get()
1308 conn->recv_win_sent -= *len; in tcp_data_get()
1317 k_fifo_put(&conn->recv_data, pkt); in tcp_data_get()
1337 return -EINVAL; in tcp_finalize_pkt()
1348 return -ENOBUFS; in tcp_header_add()
1353 UNALIGNED_PUT(conn->src.sin.sin_port, &th->th_sport); in tcp_header_add()
1354 UNALIGNED_PUT(conn->dst.sin.sin_port, &th->th_dport); in tcp_header_add()
1355 th->th_off = 5; in tcp_header_add()
1357 if (conn->send_options.mss_found) { in tcp_header_add()
1358 th->th_off++; in tcp_header_add()
1361 UNALIGNED_PUT(flags, &th->th_flags); in tcp_header_add()
1362 UNALIGNED_PUT(htons(conn->recv_win), &th->th_win); in tcp_header_add()
1363 UNALIGNED_PUT(htonl(seq), &th->th_seq); in tcp_header_add()
1366 UNALIGNED_PUT(htonl(conn->ack), &th->th_ack); in tcp_header_add()
1375 return net_context_create_ipv4_new(conn->context, pkt, in ip_header_add()
1376 &conn->src.sin.sin_addr, in ip_header_add()
1377 &conn->dst.sin.sin_addr); in ip_header_add()
1381 return net_context_create_ipv6_new(conn->context, pkt, in ip_header_add()
1382 &conn->src.sin6.sin6_addr, in ip_header_add()
1383 &conn->dst.sin6.sin6_addr); in ip_header_add()
1386 return -EINVAL; in ip_header_add()
1394 return -EINVAL; in set_tcp_nodelay()
1400 return -EINVAL; in set_tcp_nodelay()
1403 conn->tcp_nodelay = (bool)no_delay_int; in set_tcp_nodelay()
1410 int no_delay_int = (int)conn->tcp_nodelay; in get_tcp_nodelay()
1428 return -ENOBUFS; in net_tcp_set_mss_opt()
1443 (struct in_addr *)NET_IPV4_HDR(pkt)->dst) || in is_destination_local()
1445 (struct in_addr *)NET_IPV4_HDR(pkt)->dst)) { in is_destination_local()
1452 (struct in6_addr *)NET_IPV6_HDR(pkt)->dst) || in is_destination_local()
1454 (struct in6_addr *)NET_IPV6_HDR(pkt)->dst)) { in is_destination_local()
1475 rst = tcp_pkt_alloc_no_conn(pkt->iface, pkt->family, in net_tcp_reply_rst()
1484 (struct in_addr *)NET_IPV4_HDR(pkt)->dst, in net_tcp_reply_rst()
1485 (struct in_addr *)NET_IPV4_HDR(pkt)->src); in net_tcp_reply_rst()
1488 (struct in6_addr *)NET_IPV6_HDR(pkt)->dst, in net_tcp_reply_rst()
1489 (struct in6_addr *)NET_IPV6_HDR(pkt)->src); in net_tcp_reply_rst()
1491 ret = -EINVAL; in net_tcp_reply_rst()
1506 UNALIGNED_PUT(th_pkt->th_dport, &th_rst->th_sport); in net_tcp_reply_rst()
1507 UNALIGNED_PUT(th_pkt->th_sport, &th_rst->th_dport); in net_tcp_reply_rst()
1508 th_rst->th_off = 5; in net_tcp_reply_rst()
1511 UNALIGNED_PUT(RST, &th_rst->th_flags); in net_tcp_reply_rst()
1512 UNALIGNED_PUT(th_pkt->th_ack, &th_rst->th_seq); in net_tcp_reply_rst()
1514 uint32_t ack = ntohl(th_pkt->th_seq) + tcp_data_len(pkt); in net_tcp_reply_rst()
1516 UNALIGNED_PUT(RST | ACK, &th_rst->th_flags); in net_tcp_reply_rst()
1517 UNALIGNED_PUT(htonl(ack), &th_rst->th_ack); in net_tcp_reply_rst()
1547 if (conn->send_options.mss_found) { in tcp_out_ext()
1553 ret = -ENOBUFS; in tcp_out_ext()
1559 net_pkt_append_buffer(pkt, data->buffer); in tcp_out_ext()
1560 data->buffer = NULL; in tcp_out_ext()
1575 if (conn->send_options.mss_found) { in tcp_out_ext()
1594 sys_slist_append(&conn->send_queue, &pkt->next); in tcp_out_ext()
1597 conn->recv_win_sent = conn->recv_win; in tcp_out_ext()
1602 * thread to finish with any state-machine changes before in tcp_out_ext()
1606 &conn->send_timer, K_NO_WAIT); in tcp_out_ext()
1608 tcp_conn_close(conn, -ETIMEDOUT); in tcp_out_ext()
1616 (void)tcp_out_ext(conn, flags, NULL /* no data */, conn->seq); in tcp_out()
1625 ret = -EINVAL; in tcp_pkt_pull()
1657 if (pkt->buffer) { in tcp_pkt_append()
1658 buf = net_buf_frag_last(pkt->buffer); in tcp_pkt_append()
1661 alloc_len -= net_buf_tailroom(buf); in tcp_pkt_append()
1671 return -ENOBUFS; in tcp_pkt_append()
1676 buf = pkt->buffer; in tcp_pkt_append()
1685 len -= write_len; in tcp_pkt_append()
1686 buf = buf->frags; in tcp_pkt_append()
1696 bool window_full = (conn->send_data_total >= conn->send_win); in tcp_window_full()
1699 window_full = window_full || (conn->send_data_total >= conn->ca.cwnd); in tcp_window_full()
1713 if (conn->unacked_len > conn->send_data_total) { in tcp_unsent_len()
1715 conn->send_data_total, conn->unacked_len); in tcp_unsent_len()
1716 unsent_len = -ERANGE; in tcp_unsent_len()
1720 unsent_len = conn->send_data_total - conn->unacked_len; in tcp_unsent_len()
1721 if (conn->unacked_len >= conn->send_win) { in tcp_unsent_len()
1724 unsent_len = MIN(unsent_len, conn->send_win - conn->unacked_len); in tcp_unsent_len()
1727 if (conn->unacked_len >= conn->ca.cwnd) { in tcp_unsent_len()
1730 unsent_len = MIN(unsent_len, conn->ca.cwnd - conn->unacked_len); in tcp_unsent_len()
1751 ret = -ENODATA; in tcp_send_data()
1758 ret = -ENOBUFS; in tcp_send_data()
1762 ret = tcp_pkt_peek(pkt, conn->send_data, conn->unacked_len, len); in tcp_send_data()
1765 ret = -ENOBUFS; in tcp_send_data()
1769 ret = tcp_out_ext(conn, PSH | ACK, pkt, conn->seq + conn->unacked_len); in tcp_send_data()
1771 conn->unacked_len += len; in tcp_send_data()
1773 if (conn->data_mode == TCP_DATA_MODE_RESEND) { in tcp_send_data()
1774 net_stats_update_tcp_resent(conn->iface, len); in tcp_send_data()
1775 net_stats_update_tcp_seg_rexmit(conn->iface); in tcp_send_data()
1777 net_stats_update_tcp_sent(conn->iface, len); in tcp_send_data()
1778 net_stats_update_tcp_seg_sent(conn->iface); in tcp_send_data()
1801 if (conn->data_mode == TCP_DATA_MODE_RESEND) { in tcp_send_queued_data()
1807 if ((conn->tcp_nodelay == false) && (conn->unacked_len > 0)) { in tcp_send_queued_data()
1825 if (conn->send_data_total) { in tcp_send_queued_data()
1829 if (k_work_delayable_remaining_get(&conn->send_data_timer)) { in tcp_send_queued_data()
1834 conn->send_data_retries = 0; in tcp_send_queued_data()
1835 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_data_timer, in tcp_send_queued_data()
1847 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_cleanup_recv_queue()
1850 net_pkt_get_len(conn->queue_recv_data), in tcp_cleanup_recv_queue()
1851 tcp_get_seq(conn->queue_recv_data->buffer)); in tcp_cleanup_recv_queue()
1853 net_buf_unref(conn->queue_recv_data->buffer); in tcp_cleanup_recv_queue()
1854 conn->queue_recv_data->buffer = NULL; in tcp_cleanup_recv_queue()
1856 k_mutex_unlock(&conn->lock); in tcp_cleanup_recv_queue()
1867 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_resend_data()
1869 NET_DBG("send_data_retries=%hu", conn->send_data_retries); in tcp_resend_data()
1871 if (conn->send_data_retries >= tcp_retries) { in tcp_resend_data()
1878 (conn->send_data_retries == 0)) { in tcp_resend_data()
1881 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT); in tcp_resend_data()
1885 conn->data_mode = TCP_DATA_MODE_RESEND; in tcp_resend_data()
1886 conn->unacked_len = 0; in tcp_resend_data()
1889 conn->send_data_retries++; in tcp_resend_data()
1891 if (conn->in_close && conn->send_data_total == 0) { in tcp_resend_data()
1896 &conn->fin_timer, in tcp_resend_data()
1902 conn->seq + conn->unacked_len); in tcp_resend_data()
1911 } else if (ret == -ENODATA) { in tcp_resend_data()
1912 conn->data_mode = TCP_DATA_MODE_SEND; in tcp_resend_data()
1915 } else if (ret == -ENOBUFS) { in tcp_resend_data()
1921 if (conn->send_data_retries < tcp_retries) { in tcp_resend_data()
1923 for (int i = 0; i < conn->send_data_retries; i++) { in tcp_resend_data()
1928 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_data_timer, in tcp_resend_data()
1932 k_mutex_unlock(&conn->lock); in tcp_resend_data()
1935 tcp_conn_close(conn, -ETIMEDOUT); in tcp_resend_data()
1944 /* no need to acquire the conn->lock as there is nothing scheduled here */ in tcp_timewait_timeout()
1947 (void)tcp_conn_close(conn, -ETIMEDOUT); in tcp_timewait_timeout()
1955 (void)tcp_conn_close(conn, -ETIMEDOUT); in tcp_establish_timeout()
1963 /* no need to acquire the conn->lock as there is nothing scheduled here */ in tcp_fin_timeout()
1964 if (conn->state == TCP_SYN_RECEIVED) { in tcp_fin_timeout()
1972 (void)tcp_conn_close(conn, -ETIMEDOUT); in tcp_fin_timeout()
1983 (void)tcp_conn_close(conn, -ETIMEDOUT); in tcp_last_ack_timeout()
1994 k_work_init_delayable(&conn->fin_timer, tcp_last_ack_timeout); in tcp_setup_last_ack_timer()
2000 &conn->fin_timer, in tcp_setup_last_ack_timer()
2006 k_work_cancel_delayable(&conn->fin_timer); in tcp_cancel_last_ack_timer()
2015 if (conn->state != TCP_ESTABLISHED) { in tcp_send_keepalive_probe()
2020 if (!conn->keep_alive) { in tcp_send_keepalive_probe()
2025 conn->keep_cur++; in tcp_send_keepalive_probe()
2026 if (conn->keep_cur > conn->keep_cnt) { in tcp_send_keepalive_probe()
2029 tcp_conn_close(conn, -ETIMEDOUT); in tcp_send_keepalive_probe()
2034 k_work_reschedule_for_queue(&tcp_work_q, &conn->keepalive_timer, in tcp_send_keepalive_probe()
2035 K_SECONDS(conn->keep_intvl)); in tcp_send_keepalive_probe()
2038 (void)tcp_out_ext(conn, ACK, NULL, conn->seq - 1); in tcp_send_keepalive_probe()
2047 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_send_zwp()
2049 (void)tcp_out_ext(conn, ACK, NULL, conn->seq - 1); in tcp_send_zwp()
2053 if (conn->send_win == 0) { in tcp_send_zwp()
2057 if (conn->zwp_retries < 63) { in tcp_send_zwp()
2058 conn->zwp_retries++; in tcp_send_zwp()
2061 timeout <<= conn->zwp_retries; in tcp_send_zwp()
2067 &tcp_work_q, &conn->persist_timer, K_MSEC(timeout)); in tcp_send_zwp()
2070 k_mutex_unlock(&conn->lock); in tcp_send_zwp()
2078 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_send_ack()
2082 k_mutex_unlock(&conn->lock); in tcp_send_ack()
2087 int ref_count = atomic_inc(&conn->ref_count) + 1; in tcp_conn_ref()
2106 conn->queue_recv_data = tcp_rx_pkt_alloc(conn, 0); in tcp_conn_alloc()
2107 if (conn->queue_recv_data == NULL) { in tcp_conn_alloc()
2114 conn->send_data = tcp_pkt_alloc(conn, 0); in tcp_conn_alloc()
2115 if (conn->send_data == NULL) { in tcp_conn_alloc()
2120 k_mutex_init(&conn->lock); in tcp_conn_alloc()
2121 k_fifo_init(&conn->recv_data); in tcp_conn_alloc()
2122 k_sem_init(&conn->connect_sem, 0, K_SEM_MAX_LIMIT); in tcp_conn_alloc()
2123 k_sem_init(&conn->tx_sem, 1, 1); in tcp_conn_alloc()
2125 conn->in_connect = false; in tcp_conn_alloc()
2126 conn->state = TCP_LISTEN; in tcp_conn_alloc()
2127 conn->recv_win_max = tcp_rx_window; in tcp_conn_alloc()
2128 conn->recv_win = conn->recv_win_max; in tcp_conn_alloc()
2129 conn->recv_win_sent = conn->recv_win_max; in tcp_conn_alloc()
2130 conn->send_win_max = MAX(tcp_tx_window, NET_IPV6_MTU); in tcp_conn_alloc()
2131 conn->send_win = conn->send_win_max; in tcp_conn_alloc()
2132 conn->tcp_nodelay = false; in tcp_conn_alloc()
2133 conn->addr_ref_done = false; in tcp_conn_alloc()
2135 conn->dup_ack_cnt = 0; in tcp_conn_alloc()
2141 conn->ca.cwnd = UINT16_MAX; in tcp_conn_alloc()
2147 conn->seq = 0U; in tcp_conn_alloc()
2149 sys_slist_init(&conn->send_queue); in tcp_conn_alloc()
2151 k_work_init_delayable(&conn->send_timer, tcp_send_process); in tcp_conn_alloc()
2152 k_work_init_delayable(&conn->timewait_timer, tcp_timewait_timeout); in tcp_conn_alloc()
2153 k_work_init_delayable(&conn->fin_timer, tcp_fin_timeout); in tcp_conn_alloc()
2154 k_work_init_delayable(&conn->send_data_timer, tcp_resend_data); in tcp_conn_alloc()
2155 k_work_init_delayable(&conn->recv_queue_timer, tcp_cleanup_recv_queue); in tcp_conn_alloc()
2156 k_work_init_delayable(&conn->persist_timer, tcp_send_zwp); in tcp_conn_alloc()
2157 k_work_init_delayable(&conn->ack_timer, tcp_send_ack); in tcp_conn_alloc()
2158 k_work_init(&conn->conn_release, tcp_conn_release); in tcp_conn_alloc()
2164 sys_slist_append(&tcp_conns, &conn->next); in tcp_conn_alloc()
2172 if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT && conn->queue_recv_data) { in tcp_conn_alloc()
2173 tcp_pkt_unref(conn->queue_recv_data); in tcp_conn_alloc()
2174 conn->queue_recv_data = NULL; in tcp_conn_alloc()
2188 ret = -ENOMEM; in net_tcp_get()
2193 conn->context = context; in net_tcp_get()
2194 context->tcp = conn; in net_tcp_get()
2208 return !memcmp(ep, &ep_tmp, tcp_endpoint_len(ep->sa.sa_family)); in tcp_endpoint_cmp()
2213 return tcp_endpoint_cmp(&conn->src, pkt, TCP_EP_DST) && in tcp_conn_cmp()
2214 tcp_endpoint_cmp(&conn->dst, pkt, TCP_EP_SRC); in tcp_conn_cmp()
2260 struct tcp *conn_old = ((struct net_context *)user_data)->tcp; in tcp_recv()
2268 conn->accepted_conn = conn_old; in tcp_recv()
2371 saddr->sa_family == AF_INET6) { in tcp_init_isn()
2372 return tcpv6_init_isn(&net_sin6(saddr)->sin6_addr, in tcp_init_isn()
2373 &net_sin6(daddr)->sin6_addr, in tcp_init_isn()
2374 net_sin6(saddr)->sin6_port, in tcp_init_isn()
2375 net_sin6(daddr)->sin6_port); in tcp_init_isn()
2377 saddr->sa_family == AF_INET) { in tcp_init_isn()
2378 return tcpv4_init_isn(&net_sin(saddr)->sin_addr, in tcp_init_isn()
2379 &net_sin(daddr)->sin_addr, in tcp_init_isn()
2380 net_sin(saddr)->sin_port, in tcp_init_isn()
2381 net_sin(daddr)->sin_port); in tcp_init_isn()
2405 conn = context->tcp; in tcp_conn_new()
2406 conn->iface = pkt->iface; in tcp_conn_new()
2409 net_context_set_family(conn->context, net_pkt_family(pkt)); in tcp_conn_new()
2411 if (tcp_endpoint_set(&conn->dst, pkt, TCP_EP_SRC) < 0) { in tcp_conn_new()
2417 if (tcp_endpoint_set(&conn->src, pkt, TCP_EP_DST) < 0) { in tcp_conn_new()
2424 net_sprint_addr(conn->src.sa.sa_family, in tcp_conn_new()
2425 (const void *)&conn->src.sin.sin_addr), in tcp_conn_new()
2426 net_sprint_addr(conn->dst.sa.sa_family, in tcp_conn_new()
2427 (const void *)&conn->dst.sin.sin_addr)); in tcp_conn_new()
2429 memcpy(&context->remote, &conn->dst, sizeof(context->remote)); in tcp_conn_new()
2430 context->flags |= NET_CONTEXT_REMOTE_ADDR_SET; in tcp_conn_new()
2432 net_sin_ptr(&context->local)->sin_family = af; in tcp_conn_new()
2438 net_ipaddr_copy(&net_sin6(&local_addr)->sin6_addr, in tcp_conn_new()
2439 &conn->src.sin6.sin6_addr); in tcp_conn_new()
2442 net_ipaddr_copy(&net_sin(&local_addr)->sin_addr, in tcp_conn_new()
2443 &conn->src.sin.sin_addr); in tcp_conn_new()
2457 * are already copied above from conn->dst. The call to net_context_bind in tcp_conn_new()
2468 net_sin6_ptr(&context->local)->sin6_port = conn->src.sin6.sin6_port; in tcp_conn_new()
2471 net_sin_ptr(&context->local)->sin_port = conn->src.sin.sin_port; in tcp_conn_new()
2476 conn->seq = tcp_init_isn(&local_addr, &context->remote); in tcp_conn_new()
2481 (const void *)&net_sin(&local_addr)->sin_addr), in tcp_conn_new()
2482 net_sprint_addr(context->remote.sa_family, in tcp_conn_new()
2483 (const void *)&net_sin(&context->remote)->sin_addr)); in tcp_conn_new()
2486 &context->remote, &local_addr, in tcp_conn_new()
2487 ntohs(conn->dst.sin.sin_port),/* local port */ in tcp_conn_new()
2488 ntohs(conn->src.sin.sin_port),/* remote port */ in tcp_conn_new()
2490 &context->conn_handler); in tcp_conn_new()
2498 net_if_addr_ref(conn->iface, conn->dst.sa.sa_family, in tcp_conn_new()
2499 conn->src.sa.sa_family == AF_INET ? in tcp_conn_new()
2500 (const void *)&conn->src.sin.sin_addr : in tcp_conn_new()
2501 (const void *)&conn->src.sin6.sin6_addr); in tcp_conn_new()
2502 conn->addr_ref_done = true; in tcp_conn_new()
2514 return (net_tcp_seq_cmp(th_seq(hdr), conn->ack) >= 0) && in tcp_validate_seq()
2515 (net_tcp_seq_cmp(th_seq(hdr), conn->ack + conn->recv_win) < 0); in tcp_validate_seq()
2525 * - Data already received earlier: new_len <= 0 in tcp_compute_new_length()
2526 * - Partially new data new_len > 0 in tcp_compute_new_length()
2527 * - Out of order data new_len > 0, in tcp_compute_new_length()
2530 new_len = (int32_t)(len) - net_tcp_seq_cmp(conn->ack, th_seq(hdr)); in tcp_compute_new_length()
2542 /* Entering TIME-WAIT, so cancel the timer and start the TIME-WAIT timer */ in tcp_enter_time_wait()
2543 k_work_cancel_delayable(&conn->fin_timer); in tcp_enter_time_wait()
2545 &tcp_work_q, &conn->timewait_timer, in tcp_enter_time_wait()
2561 NET_DBG("buf %p seq %u len %d", tmp, seq, tmp->len); in check_seq_list()
2569 next_seq = seq + tmp->len; in check_seq_list()
2571 tmp = tmp->frags; in check_seq_list()
2583 NET_DBG("conn: %p len %zd seq %u ack %u", conn, len, seq, conn->ack); in tcp_queue_recv_data()
2585 tmp = pkt->buffer; in tcp_queue_recv_data()
2588 seq += tmp->len; in tcp_queue_recv_data()
2589 tmp = tmp->frags; in tcp_queue_recv_data()
2593 seq += tmp->len; in tcp_queue_recv_data()
2594 tmp = tmp->frags; in tcp_queue_recv_data()
2601 if (!net_pkt_is_empty(conn->queue_recv_data)) { in tcp_queue_recv_data()
2613 * 3 | 3 | 6 | 4 | 3+3-6= 0 | NA | NA | Prepend in tcp_queue_recv_data()
2614 * 3 | 4 | 6 | 4 | 3+4-6 = 1 | NA | NA | Prepend, pull from buffer in tcp_queue_recv_data()
2615 * 3 | 7 | 6 | 4 | 3+7-6 = 4 | 6-3=3 | 6+4-3=7 | Drop queued data in tcp_queue_recv_data()
2616 * 3 | 8 | 6 | 4 | 3+8-6 = 5 | 6-3=3 | 6+4-3=7 | Drop queued data in tcp_queue_recv_data()
2617 * 6 | 5 | 6 | 4 | 6+5-6 = 5 | 6-6=0 | 6+4-6=4 | Drop queued data in tcp_queue_recv_data()
2618 * 6 | 4 | 6 | 4 | 6+4-6 = 4 | 6-6=0 | 6+4-6=4 | Drop queued data / packet in tcp_queue_recv_data()
2619 * 7 | 2 | 6 | 4 | 7+2-6 = 3 | 6-7=MI | 6+4-7=3 | Drop packet in tcp_queue_recv_data()
2620 * 10 | 2 | 6 | 4 | 10+2-6= 6 | 6-10=MI-3| 6+4-10=0 | Append in tcp_queue_recv_data()
2621 * 7 | 4 | 6 | 4 | 7+4-6 = 5 | 6-7 =MI | 6+4-7 =3 | Append, pull from packet in tcp_queue_recv_data()
2622 * 11 | 2 | 6 | 4 | 11+2-6= 7 | 6-11=MI-6| 6+4-11=MI-1 | Drop incoming packet in tcp_queue_recv_data()
2623 * 2 | 3 | 6 | 4 | 2+3-6= MI | 6-2=4 | 6+4-2=8 | Drop incoming packet in tcp_queue_recv_data()
2631 pending_seq = tcp_get_seq(conn->queue_recv_data->buffer); in tcp_queue_recv_data()
2632 end_offset = seq - pending_seq; in tcp_queue_recv_data()
2633 pending_len = net_pkt_get_len(conn->queue_recv_data); in tcp_queue_recv_data()
2641 net_buf_frag_add(pkt->buffer, in tcp_queue_recv_data()
2642 conn->queue_recv_data->buffer); in tcp_queue_recv_data()
2645 conn->queue_recv_data->buffer = pkt->buffer; in tcp_queue_recv_data()
2651 last = net_buf_frag_last(conn->queue_recv_data->buffer); in tcp_queue_recv_data()
2654 start_offset = pending_seq - seq_start; in tcp_queue_recv_data()
2656 end_offset = (pending_seq + last->len) - seq_start; in tcp_queue_recv_data()
2663 net_buf_unref(conn->queue_recv_data->buffer); in tcp_queue_recv_data()
2664 conn->queue_recv_data->buffer = pkt->buffer; in tcp_queue_recv_data()
2669 net_pkt_remove_tail(conn->queue_recv_data, in tcp_queue_recv_data()
2676 net_buf_frag_add(conn->queue_recv_data->buffer, in tcp_queue_recv_data()
2677 pkt->buffer); in tcp_queue_recv_data()
2685 if (check_seq_list(conn->queue_recv_data->buffer) == false) { in tcp_queue_recv_data()
2689 net_buf_unref(conn->queue_recv_data->buffer); in tcp_queue_recv_data()
2690 conn->queue_recv_data->buffer = NULL; in tcp_queue_recv_data()
2696 net_pkt_append_buffer(conn->queue_recv_data, pkt->buffer); in tcp_queue_recv_data()
2702 pkt->buffer = NULL; in tcp_queue_recv_data()
2704 if (!k_work_delayable_is_pending(&conn->recv_queue_timer)) { in tcp_queue_recv_data()
2706 &tcp_work_q, &conn->recv_queue_timer, in tcp_queue_recv_data()
2723 net_stats_update_tcp_seg_recv(conn->iface); in tcp_data_received()
2730 k_work_schedule_for_queue(&tcp_work_q, &conn->ack_timer, in tcp_data_received()
2733 k_work_cancel_delayable(&conn->ack_timer); in tcp_data_received()
2749 headers_len = net_pkt_get_len(pkt) - data_len; in tcp_out_of_order_data()
2756 /* We received out-of-order data. Try to queue it. in tcp_out_of_order_data()
2767 (void)net_context_get_option(conn->context, NET_OPT_SNDBUF, in tcp_check_sock_options()
2772 (void)net_context_get_option(conn->context, NET_OPT_RCVBUF, in tcp_check_sock_options()
2776 if (sndbuf_opt > 0 && sndbuf_opt != conn->send_win_max) { in tcp_check_sock_options()
2777 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_check_sock_options()
2779 conn->send_win_max = sndbuf_opt; in tcp_check_sock_options()
2780 if (conn->send_win > conn->send_win_max) { in tcp_check_sock_options()
2781 conn->send_win = conn->send_win_max; in tcp_check_sock_options()
2784 k_mutex_unlock(&conn->lock); in tcp_check_sock_options()
2787 if (rcvbuf_opt > 0 && rcvbuf_opt != conn->recv_win_max) { in tcp_check_sock_options()
2790 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_check_sock_options()
2792 diff = rcvbuf_opt - conn->recv_win_max; in tcp_check_sock_options()
2793 conn->recv_win_max = rcvbuf_opt; in tcp_check_sock_options()
2796 k_mutex_unlock(&conn->lock); in tcp_check_sock_options()
2807 size_t tcp_options_len = th ? (th_off(th) - 5) * 4 : 0; in tcp_in()
2822 if (conn->state != TCP_SYN_SENT) { in tcp_in()
2826 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_in()
2829 if (conn->state == TCP_UNUSED) { in tcp_in()
2830 k_mutex_unlock(&conn->lock); in tcp_in()
2839 close_status = -ECONNRESET; in tcp_in()
2847 k_mutex_unlock(&conn->lock); in tcp_in()
2855 close_status = -ECONNRESET; in tcp_in()
2861 if (conn->in_connect) { in tcp_in()
2864 close_status = -ECONNREFUSED; in tcp_in()
2871 if (tcp_options_len && !tcp_options_check(&conn->recv_options, pkt, in tcp_in()
2876 close_status = -ECONNRESET; in tcp_in()
2880 if (th && (conn->state != TCP_LISTEN) && (conn->state != TCP_SYN_SENT) && in tcp_in()
2884 * condition, reset should be sent and connection closed. in tcp_in()
2887 conn, tcp_state_to_str(conn->state, false)); in tcp_in()
2888 net_stats_update_tcp_seg_drop(conn->iface); in tcp_in()
2891 close_status = -ECONNRESET; in tcp_in()
2896 conn->send_win = ntohs(th_win(th)); in tcp_in()
2897 if (conn->send_win > conn->send_win_max) { in tcp_in()
2899 conn->send_win, conn->send_win_max); in tcp_in()
2901 conn->send_win = conn->send_win_max; in tcp_in()
2904 if (conn->send_win == 0) { in tcp_in()
2905 if (!k_work_delayable_is_pending(&conn->persist_timer)) { in tcp_in()
2906 conn->zwp_retries = 0; in tcp_in()
2908 &tcp_work_q, &conn->persist_timer, in tcp_in()
2912 (void)k_work_cancel_delayable(&conn->persist_timer); in tcp_in()
2916 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT); in tcp_in()
2918 k_sem_give(&conn->tx_sem); in tcp_in()
2925 switch (conn->state) { in tcp_in()
2929 conn->send_options.mss_found = true; in tcp_in()
2932 conn->send_options.mss_found = false; in tcp_in()
2939 &conn->establish_timer, in tcp_in()
2943 conn->send_options.mss_found = true; in tcp_in()
2944 ret = tcp_out_ext(conn, SYN, NULL /* no data */, conn->seq); in tcp_in()
2949 conn->send_options.mss_found = false; in tcp_in()
2957 if (FL(&fl, &, ACK, th_ack(th) == conn->seq && in tcp_in()
2958 th_seq(th) == conn->ack)) { in tcp_in()
2962 if (conn->accepted_conn != NULL) { in tcp_in()
2963 accept_cb = conn->accepted_conn->accept_cb; in tcp_in()
2964 context = conn->accepted_conn->context; in tcp_in()
2965 keep_alive_param_copy(conn, conn->accepted_conn); in tcp_in()
2968 k_work_cancel_delayable(&conn->establish_timer); in tcp_in()
2971 net_context_set_state(conn->context, in tcp_in()
2975 conn->accepted_conn = NULL; in tcp_in()
2984 net_tcp_put(conn->context); in tcp_in()
2990 net_ipaddr_copy(&conn->context->remote, &conn->dst.sa); in tcp_in()
2992 /* Check if v4-mapping-to-v6 needs to be done for in tcp_in()
2996 net_context_get_family(conn->context) == AF_INET && in tcp_in()
3002 &net_sin(&conn->context->remote)->sin_addr, in tcp_in()
3004 net_ipaddr_copy(&net_sin6(&conn->context->remote)->sin6_addr, in tcp_in()
3007 net_sin6(&conn->context->remote)->sin6_family = AF_INET6; in tcp_in()
3020 accept_cb(conn->context, &conn->context->remote, in tcp_in()
3053 if (FL(&fl, &, SYN | ACK, th && th_ack(th) == conn->seq)) { in tcp_in()
3069 net_context_set_state(conn->context, in tcp_in()
3093 /* full-close */ in tcp_in()
3094 if (th && FL(&fl, &, FIN, th_seq(th) == conn->ack)) { in tcp_in()
3113 if (net_tcp_seq_cmp(th_ack(th), conn->seq) > 0) { in tcp_in()
3114 uint32_t len_acked = th_ack(th) - conn->seq; in tcp_in()
3139 if (th && (net_tcp_seq_cmp(th_ack(th), conn->seq) == 0)) { in tcp_in()
3141 if (conn->send_data_total > 0) { in tcp_in()
3147 conn->dup_ack_cnt = MIN(conn->dup_ack_cnt + 1, in tcp_in()
3152 conn->dup_ack_cnt = 0; in tcp_in()
3156 if ((conn->data_mode == TCP_DATA_MODE_SEND) && in tcp_in()
3157 (conn->dup_ack_cnt == DUPLICATE_ACK_RETRANSMIT_TRHESHOLD)) { in tcp_in()
3159 int temp_unacked_len = conn->unacked_len; in tcp_in()
3161 conn->unacked_len = 0; in tcp_in()
3166 conn->unacked_len = temp_unacked_len; in tcp_in()
3170 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT); in tcp_in()
3175 NET_ASSERT((conn->send_data_total == 0) || in tcp_in()
3176 k_work_delayable_is_pending(&conn->send_data_timer), in tcp_in()
3180 if (th && (net_tcp_seq_cmp(th_ack(th), conn->seq) > 0)) { in tcp_in()
3181 uint32_t len_acked = th_ack(th) - conn->seq; in tcp_in()
3185 if ((conn->send_data_total < len_acked) || in tcp_in()
3186 (tcp_pkt_pull(conn->send_data, in tcp_in()
3190 conn->send_data_total); in tcp_in()
3191 net_stats_update_tcp_seg_drop(conn->iface); in tcp_in()
3194 close_status = -ECONNRESET; in tcp_in()
3200 conn->dup_ack_cnt = 0; in tcp_in()
3204 conn->send_data_total -= len_acked; in tcp_in()
3205 if (conn->unacked_len < len_acked) { in tcp_in()
3206 conn->unacked_len = 0; in tcp_in()
3208 conn->unacked_len -= len_acked; in tcp_in()
3212 k_sem_give(&conn->tx_sem); in tcp_in()
3216 net_stats_update_tcp_seg_recv(conn->iface); in tcp_in()
3226 conn->send_data_retries = 0; in tcp_in()
3227 if (conn->data_mode == TCP_DATA_MODE_RESEND) { in tcp_in()
3228 conn->unacked_len = 0; in tcp_in()
3231 conn->data_mode = TCP_DATA_MODE_SEND; in tcp_in()
3232 if (conn->send_data_total > 0) { in tcp_in()
3233 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_data_timer, in tcp_in()
3238 if (conn->in_close && conn->send_data_total == 0) { in tcp_in()
3243 &conn->fin_timer, in tcp_in()
3254 if (ret < 0 && ret != -ENOBUFS) { in tcp_in()
3263 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT); in tcp_in()
3268 if (th_seq(th) == conn->ack) { in tcp_in()
3281 } else if (net_tcp_seq_greater(conn->ack, th_seq(th))) { in tcp_in()
3285 * There is a tiny risk of creating a ACK loop this way when in tcp_in()
3291 net_stats_update_tcp_seg_ackerr(conn->iface); in tcp_in()
3306 if (conn->send_data_total == 0) { in tcp_in()
3307 conn->send_data_retries = 0; in tcp_in()
3308 k_work_cancel_delayable(&conn->send_data_timer); in tcp_in()
3313 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT); in tcp_in()
3315 k_sem_give(&conn->tx_sem); in tcp_in()
3326 if (th && FL(&fl, ==, ACK, th_ack(th) == conn->seq)) { in tcp_in()
3343 * - & - -> TCP_FIN_WAIT_1 in tcp_in()
3344 * FIN & - -> TCP_CLOSING in tcp_in()
3345 * - & ACK -> TCP_FIN_WAIT_2 in tcp_in()
3346 * FIN & ACK -> TCP_TIME_WAIT in tcp_in()
3352 /* We do not implement half closed sockets, therefore in tcp_in()
3356 net_stats_update_tcp_seg_drop(conn->iface); in tcp_in()
3363 if (FL(&fl, &, ACK, th_ack(th) == conn->seq)) { in tcp_in()
3366 , conn, conn->seq, conn->ack); in tcp_in()
3377 if (FL(&fl, &, FIN, net_tcp_seq_cmp(th_seq(th) + len, conn->ack) == 0)) { in tcp_in()
3394 tcp_out_ext(conn, FIN | ACK, NULL, conn->seq - 1); in tcp_in()
3410 tcp_out_ext(conn, FIN | ACK, NULL, conn->seq - 1); in tcp_in()
3421 * - -> TCP_FIN_WAIT_2 in tcp_in()
3422 * FIN -> TCP_TIME_WAIT in tcp_in()
3432 /* We do not implement half closed sockets, therefore in tcp_in()
3436 net_stats_update_tcp_seg_drop(conn->iface); in tcp_in()
3447 if (FL(&fl, &, FIN, net_tcp_seq_cmp(th_seq(th) + len, conn->ack) == 0)) { in tcp_in()
3471 * - -> TCP_CLOSING in tcp_in()
3472 * ACK -> TCP_TIME_WAIT in tcp_in()
3482 net_stats_update_tcp_seg_drop(conn->iface); in tcp_in()
3490 if (FL(&fl, &, ACK, th_ack(th) == conn->seq)) { in tcp_in()
3493 , conn, conn->seq, conn->ack); in tcp_in()
3504 * Since the conn->ack was already incremented in TCP_FIN_WAIT_1 in tcp_in()
3508 net_tcp_seq_cmp(th_seq(th) + len + 1, conn->ack) == 0)) || in tcp_in()
3518 tcp_out_ext(conn, FIN | ACK, NULL, conn->seq - 1); in tcp_in()
3537 NET_ERR("conn: %p, new bytes %u during TIME-WAIT state " in tcp_in()
3539 net_stats_update_tcp_seg_drop(conn->iface); in tcp_in()
3547 net_tcp_seq_cmp(th_seq(th) + 1, conn->ack) == 0)) || in tcp_in()
3557 tcp_state_to_str(conn->state, true)); in tcp_in()
3575 conn->in_connect = false; in tcp_in()
3576 if (conn->connect_cb) { in tcp_in()
3577 conn->connect_cb(conn->context, 0, conn->context->user_data); in tcp_in()
3580 conn->connect_cb = NULL; in tcp_in()
3583 k_sem_give(&conn->connect_sem); in tcp_in()
3589 if (conn->context) { in tcp_in()
3590 /* If the conn->context is not set, then the connection was in tcp_in()
3591 * already closed. in tcp_in()
3593 conn_handler = (struct net_conn *)conn->context->conn_handler; in tcp_in()
3596 recv_user_data = conn->recv_user_data; in tcp_in()
3597 recv_data_fifo = &conn->recv_data; in tcp_in()
3599 k_mutex_unlock(&conn->lock); in tcp_in()
3605 while (conn_handler && atomic_get(&conn->ref_count) > 0 && in tcp_in()
3618 if (do_close && conn->state != TCP_UNUSED && conn->state != TCP_CLOSED) { in tcp_in()
3628 struct tcp *conn = context->tcp; in net_tcp_put()
3631 return -ENOENT; in net_tcp_put()
3634 k_mutex_lock(&conn->lock, K_FOREVER); in net_tcp_put()
3641 if (conn->state == TCP_ESTABLISHED || in net_tcp_put()
3642 conn->state == TCP_SYN_RECEIVED) { in net_tcp_put()
3644 if (conn->send_data_total > 0) { in net_tcp_put()
3646 conn->send_data_total); in net_tcp_put()
3647 conn->in_close = true; in net_tcp_put()
3652 &conn->send_data_timer, in net_tcp_put()
3661 &conn->fin_timer, in net_tcp_put()
3665 conn->seq + conn->unacked_len); in net_tcp_put()
3674 } else if (conn->in_connect) { in net_tcp_put()
3675 conn->in_connect = false; in net_tcp_put()
3676 k_sem_reset(&conn->connect_sem); in net_tcp_put()
3679 k_mutex_unlock(&conn->lock); in net_tcp_put()
3696 struct tcp *conn = context->tcp; in net_tcp_update_recv_wnd()
3700 NET_ERR("context->tcp == NULL"); in net_tcp_update_recv_wnd()
3701 return -EPROTOTYPE; in net_tcp_update_recv_wnd()
3704 k_mutex_lock(&conn->lock, K_FOREVER); in net_tcp_update_recv_wnd()
3706 ret = tcp_update_recv_wnd((struct tcp *)context->tcp, delta); in net_tcp_update_recv_wnd()
3708 k_mutex_unlock(&conn->lock); in net_tcp_update_recv_wnd()
3716 struct tcp *conn = context->tcp; in net_tcp_queue()
3720 if (!conn || conn->state != TCP_ESTABLISHED) { in net_tcp_queue()
3721 return -ENOTCONN; in net_tcp_queue()
3724 k_mutex_lock(&conn->lock, K_FOREVER); in net_tcp_queue()
3731 ret = -EAGAIN; in net_tcp_queue()
3738 for (int i = 0; i < msg->msg_iovlen; i++) { in net_tcp_queue()
3739 len += msg->msg_iov[i].iov_len; in net_tcp_queue()
3744 * that conn->send_data_total is less than conn->send_win, as it was in net_tcp_queue()
3748 len = MIN(conn->send_win - conn->send_data_total, len); in net_tcp_queue()
3751 for (int i = 0; i < msg->msg_iovlen; i++) { in net_tcp_queue()
3752 int iovlen = MIN(msg->msg_iov[i].iov_len, len); in net_tcp_queue()
3754 ret = tcp_pkt_append(conn->send_data, in net_tcp_queue()
3755 msg->msg_iov[i].iov_base, in net_tcp_queue()
3766 len -= iovlen; in net_tcp_queue()
3773 ret = tcp_pkt_append(conn->send_data, data, len); in net_tcp_queue()
3781 conn->send_data_total += queued_len; in net_tcp_queue()
3784 * failure now (out-of-buf case), it can be ignored for now, retransmit in net_tcp_queue()
3788 if (ret < 0 && ret != -ENOBUFS) { in net_tcp_queue()
3794 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT); in net_tcp_queue()
3799 k_mutex_unlock(&conn->lock); in net_tcp_queue()
3804 /* net context is about to send out queued data - inform caller only */
3830 net_sprint_addr(local_addr->sa_family, in net_tcp_connect()
3831 (const void *)&net_sin(local_addr)->sin_addr), in net_tcp_connect()
3832 net_sprint_addr(remote_addr->sa_family, in net_tcp_connect()
3833 (const void *)&net_sin(remote_addr)->sin_addr)); in net_tcp_connect()
3835 conn = context->tcp; in net_tcp_connect()
3836 conn->iface = net_context_get_iface(context); in net_tcp_connect()
3845 ret = -EINVAL; in net_tcp_connect()
3849 memset(&conn->src, 0, sizeof(struct sockaddr_in)); in net_tcp_connect()
3850 memset(&conn->dst, 0, sizeof(struct sockaddr_in)); in net_tcp_connect()
3852 conn->src.sa.sa_family = AF_INET; in net_tcp_connect()
3853 conn->dst.sa.sa_family = AF_INET; in net_tcp_connect()
3855 conn->dst.sin.sin_port = remote_port; in net_tcp_connect()
3856 conn->src.sin.sin_port = local_port; in net_tcp_connect()
3863 &net_sin(local_addr)->sin_addr)) { in net_tcp_connect()
3866 &net_sin(remote_addr)->sin_addr); in net_tcp_connect()
3867 net_ipaddr_copy(&conn->src.sin.sin_addr, ip4); in net_tcp_connect()
3869 net_ipaddr_copy(&conn->src.sin.sin_addr, in net_tcp_connect()
3870 &net_sin(local_addr)->sin_addr); in net_tcp_connect()
3872 net_ipaddr_copy(&conn->dst.sin.sin_addr, in net_tcp_connect()
3873 &net_sin(remote_addr)->sin_addr); in net_tcp_connect()
3878 ret = -EINVAL; in net_tcp_connect()
3882 memset(&conn->src, 0, sizeof(struct sockaddr_in6)); in net_tcp_connect()
3883 memset(&conn->dst, 0, sizeof(struct sockaddr_in6)); in net_tcp_connect()
3885 conn->src.sin6.sin6_family = AF_INET6; in net_tcp_connect()
3886 conn->dst.sin6.sin6_family = AF_INET6; in net_tcp_connect()
3888 conn->dst.sin6.sin6_port = remote_port; in net_tcp_connect()
3889 conn->src.sin6.sin6_port = local_port; in net_tcp_connect()
3892 &net_sin6(local_addr)->sin6_addr)) { in net_tcp_connect()
3895 &net_sin6(remote_addr)->sin6_addr); in net_tcp_connect()
3896 net_ipaddr_copy(&conn->src.sin6.sin6_addr, ip6); in net_tcp_connect()
3898 net_ipaddr_copy(&conn->src.sin6.sin6_addr, in net_tcp_connect()
3899 &net_sin6(local_addr)->sin6_addr); in net_tcp_connect()
3901 net_ipaddr_copy(&conn->dst.sin6.sin6_addr, in net_tcp_connect()
3902 &net_sin6(remote_addr)->sin6_addr); in net_tcp_connect()
3906 ret = -EPROTONOSUPPORT; in net_tcp_connect()
3911 conn->seq = tcp_init_isn(&conn->src.sa, &conn->dst.sa); in net_tcp_connect()
3915 net_sprint_addr(conn->src.sa.sa_family, in net_tcp_connect()
3916 (const void *)&conn->src.sin.sin_addr), in net_tcp_connect()
3917 net_sprint_addr(conn->dst.sa.sa_family, in net_tcp_connect()
3918 (const void *)&conn->dst.sin.sin_addr)); in net_tcp_connect()
3927 &context->conn_handler); in net_tcp_connect()
3932 net_if_addr_ref(conn->iface, conn->src.sa.sa_family, in net_tcp_connect()
3933 conn->src.sa.sa_family == AF_INET ? in net_tcp_connect()
3934 (const void *)&conn->src.sin.sin_addr : in net_tcp_connect()
3935 (const void *)&conn->src.sin6.sin6_addr); in net_tcp_connect()
3936 conn->addr_ref_done = true; in net_tcp_connect()
3938 conn->connect_cb = cb; in net_tcp_connect()
3939 context->user_data = user_data; in net_tcp_connect()
3944 conn->in_connect = !IS_ENABLED(CONFIG_NET_TEST_PROTOCOL); in net_tcp_connect()
3946 /* The ref will make sure that if the connection is closed in tcp_in(), in net_tcp_connect()
3953 if (conn->state == TCP_UNUSED || conn->state == TCP_CLOSED) { in net_tcp_connect()
3954 ret = -ENOTCONN; in net_tcp_connect()
3957 conn->state != TCP_ESTABLISHED) { in net_tcp_connect()
3958 ret = -EINPROGRESS; in net_tcp_connect()
3960 } else if (k_sem_take(&conn->connect_sem, timeout) != 0 && in net_tcp_connect()
3961 conn->state != TCP_ESTABLISHED) { in net_tcp_connect()
3962 if (conn->in_connect) { in net_tcp_connect()
3963 conn->in_connect = false; in net_tcp_connect()
3964 tcp_conn_close(conn, -ETIMEDOUT); in net_tcp_connect()
3967 ret = -ETIMEDOUT; in net_tcp_connect()
3970 conn->in_connect = false; in net_tcp_connect()
3985 struct tcp *conn = context->tcp; in net_tcp_accept()
3990 return -EINVAL; in net_tcp_accept()
3995 if (conn->state != TCP_LISTEN) { in net_tcp_accept()
3996 return -EINVAL; in net_tcp_accept()
3999 conn->accept_cb = cb; in net_tcp_accept()
4008 return -EINVAL; in net_tcp_accept()
4013 if (net_sin_ptr(&context->local)->sin_addr) { in net_tcp_accept()
4014 net_ipaddr_copy(&in->sin_addr, in net_tcp_accept()
4015 net_sin_ptr(&context->local)->sin_addr); in net_tcp_accept()
4018 in->sin_port = in net_tcp_accept()
4019 net_sin((struct sockaddr *)&context->local)->sin_port; in net_tcp_accept()
4020 local_port = ntohs(in->sin_port); in net_tcp_accept()
4021 remote_port = ntohs(net_sin(&context->remote)->sin_port); in net_tcp_accept()
4027 return -EINVAL; in net_tcp_accept()
4032 if (net_sin6_ptr(&context->local)->sin6_addr) { in net_tcp_accept()
4033 net_ipaddr_copy(&in6->sin6_addr, in net_tcp_accept()
4034 net_sin6_ptr(&context->local)->sin6_addr); in net_tcp_accept()
4037 in6->sin6_port = in net_tcp_accept()
4038 net_sin6((struct sockaddr *)&context->local)->sin6_port; in net_tcp_accept()
4039 local_port = ntohs(in6->sin6_port); in net_tcp_accept()
4040 remote_port = ntohs(net_sin6(&context->remote)->sin6_port); in net_tcp_accept()
4045 return -EINVAL; in net_tcp_accept()
4048 context->user_data = user_data; in net_tcp_accept()
4053 net_conn_unregister(context->conn_handler); in net_tcp_accept()
4057 context->flags & NET_CONTEXT_REMOTE_ADDR_SET ? in net_tcp_accept()
4058 &context->remote : NULL, in net_tcp_accept()
4062 &context->conn_handler); in net_tcp_accept()
4068 struct tcp *conn = context->tcp; in net_tcp_recv()
4072 context->recv_cb = cb; in net_tcp_recv()
4075 conn->recv_user_data = user_data; in net_tcp_recv()
4090 return -ENOBUFS; in net_tcp_finalize()
4093 tcp_hdr->chksum = 0U; in net_tcp_finalize()
4096 tcp_hdr->chksum = net_calc_chksum_tcp(pkt); in net_tcp_finalize()
4146 conn = context->tcp; in tcp_input()
4147 tcp_endpoint_set(&conn->dst, pkt, TCP_EP_SRC); in tcp_input()
4148 tcp_endpoint_set(&conn->src, pkt, TCP_EP_DST); in tcp_input()
4156 conn->iface = pkt->iface; in tcp_input()
4174 net_pkt_pull(up, net_pkt_get_len(up) - len); in tp_tcp_recv_cb()
4176 for (struct net_buf *buf = pkt->buffer; buf != NULL; buf = buf->frags) { in tp_tcp_recv_cb()
4177 net_tcp_queue(conn->context, buf->data, buf->len); in tp_tcp_recv_cb()
4193 .state = tcp_state_to_str(conn->state, true), in tp_init()
4194 .seq = conn->seq, in tp_init()
4195 .ack = conn->ack, in tp_init()
4220 size_t data_len = ntohs(uh->len) - sizeof(*uh); in tp_input()
4240 data_len = ntohs(uh->len) - sizeof(*uh); in tp_input()
4261 if (is("CONNECT", tp->op)) { in tp_input()
4262 tp_output(pkt->family, pkt->iface, buf, 1); in tp_input()
4270 conn = context->tcp; in tp_input()
4271 tcp_endpoint_set(&conn->dst, pkt, TCP_EP_SRC); in tp_input()
4272 tcp_endpoint_set(&conn->src, pkt, TCP_EP_DST); in tp_input()
4273 conn->iface = pkt->iface; in tp_input()
4276 conn->seq = tp->seq; in tp_input()
4279 if (is("CLOSE", tp->op)) { in tp_input()
4285 context = conn->context; in tp_input()
4295 if (is("CLOSE2", tp->op)) { in tp_input()
4298 net_tcp_put(conn->context); in tp_input()
4300 if (is("RECV", tp->op)) { in tp_input()
4307 tp->data = hexstr; in tp_input()
4308 NET_DBG("%zd = tcp_recv(\"%s\")", len, tp->data); in tp_input()
4312 if (is("SEND", tp->op)) { in tp_input()
4313 ssize_t len = tp_str_to_hex(buf, sizeof(buf), tp->data); in tp_input()
4317 tp_output(pkt->family, pkt->iface, buf, 1); in tp_input()
4319 NET_DBG("tcp_send(\"%s\")", tp->data); in tp_input()
4321 net_tcp_queue(conn->context, buf, len); in tp_input()
4340 tp_state = tp->type; in tp_input()
4343 NET_ASSERT(false, "Unimplemented tp command: %s", tp->msg); in tp_input()
4347 tp_output(pkt->family, pkt->iface, buf, json_len); in tp_input()
4350 tp_output(pkt->family, pkt->iface, buf, 1); in tp_input()
4386 if (atomic_get(&conn->ref_count) > 0) { in net_tcp_foreach()
4450 sa_family_t family = net_context_get_family(conn->context); in net_tcp_get_supported_mss()
4453 struct net_if *iface = net_context_get_iface(conn->context); in net_tcp_get_supported_mss()
4456 dest_mtu = get_ipv4_destination_mtu(iface, &conn->dst.sin.sin_addr); in net_tcp_get_supported_mss()
4459 return dest_mtu - NET_IPV4TCPH_LEN; in net_tcp_get_supported_mss()
4462 struct net_if *iface = net_context_get_iface(conn->context); in net_tcp_get_supported_mss()
4465 dest_mtu = get_ipv6_destination_mtu(iface, &conn->dst.sin6.sin6_addr); in net_tcp_get_supported_mss()
4468 return dest_mtu - NET_IPV6TCPH_LEN; in net_tcp_get_supported_mss()
4484 if (IS_ENABLED(CONFIG_NET_IPV6) && data->remote.sa_family == AF_INET6 && in testing_find_conn()
4485 net_ipv6_addr_cmp(&conn->dst.sin6.sin6_addr, in testing_find_conn()
4486 &net_sin6(&data->remote)->sin6_addr)) { in testing_find_conn()
4487 if (data->mtu > 0) { in testing_find_conn()
4494 data->mtu = net_tcp_get_supported_mss(conn) + NET_IPV6TCPH_LEN; in testing_find_conn()
4498 if (IS_ENABLED(CONFIG_NET_IPV4) && data->remote.sa_family == AF_INET && in testing_find_conn()
4499 net_ipv4_addr_cmp(&conn->dst.sin.sin_addr, in testing_find_conn()
4500 &net_sin(&data->remote)->sin_addr)) { in testing_find_conn()
4501 if (data->mtu > 0) { in testing_find_conn()
4508 data->mtu = net_tcp_get_supported_mss(conn) + NET_IPV4TCPH_LEN; in testing_find_conn()
4534 struct tcp *conn = context->tcp; in net_tcp_set_option()
4538 k_mutex_lock(&conn->lock, K_FOREVER); in net_tcp_set_option()
4558 k_mutex_unlock(&conn->lock); in net_tcp_set_option()
4571 struct tcp *conn = context->tcp; in net_tcp_get_option()
4575 k_mutex_lock(&conn->lock, K_FOREVER); in net_tcp_get_option()
4595 k_mutex_unlock(&conn->lock); in net_tcp_get_option()
4607 struct tcp *conn = context->tcp; in net_tcp_tx_sem_get()
4609 return &conn->tx_sem; in net_tcp_tx_sem_get()
4614 struct tcp *conn = context->tcp; in net_tcp_conn_sem_get()
4616 return &conn->connect_sem; in net_tcp_conn_sem_get()
4624 /* Register inputs for TTCN-3 based TCP sanity check */ in net_tcp_init()