1 /*
2 * Copyright (c) 2018-2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(net_tcp, CONFIG_NET_TCP_LOG_LEVEL);
9
10 #include <stdarg.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/random/random.h>
15
16 #if defined(CONFIG_NET_TCP_ISN_RFC6528)
17 #include <mbedtls/md5.h>
18 #endif
19 #include <zephyr/net/net_pkt.h>
20 #include <zephyr/net/net_context.h>
21 #include <zephyr/net/udp.h>
22 #include "ipv4.h"
23 #include "ipv6.h"
24 #include "connection.h"
25 #include "net_stats.h"
26 #include "net_private.h"
27 #include "tcp_internal.h"
28
29 #define ACK_TIMEOUT_MS CONFIG_NET_TCP_ACK_TIMEOUT
30 #define ACK_TIMEOUT K_MSEC(ACK_TIMEOUT_MS)
31 #define LAST_ACK_TIMEOUT_MS tcp_fin_timeout_ms
32 #define LAST_ACK_TIMEOUT K_MSEC(LAST_ACK_TIMEOUT_MS)
33 #define FIN_TIMEOUT K_MSEC(tcp_fin_timeout_ms)
34 #define ACK_DELAY K_MSEC(100)
35 #define ZWP_MAX_DELAY_MS 120000
36 #define DUPLICATE_ACK_RETRANSMIT_TRHESHOLD 3
37
38 static int tcp_rto = CONFIG_NET_TCP_INIT_RETRANSMISSION_TIMEOUT;
39 static int tcp_retries = CONFIG_NET_TCP_RETRY_COUNT;
40 static int tcp_fin_timeout_ms;
41 static int tcp_rx_window =
42 #if (CONFIG_NET_TCP_MAX_RECV_WINDOW_SIZE != 0)
43 CONFIG_NET_TCP_MAX_RECV_WINDOW_SIZE;
44 #else
45 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
46 (CONFIG_NET_BUF_RX_COUNT * CONFIG_NET_BUF_DATA_SIZE) / 3;
47 #else
48 CONFIG_NET_BUF_DATA_POOL_SIZE / 3;
49 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
50 #endif
51 static int tcp_tx_window =
52 #if (CONFIG_NET_TCP_MAX_SEND_WINDOW_SIZE != 0)
53 CONFIG_NET_TCP_MAX_SEND_WINDOW_SIZE;
54 #else
55 #if defined(CONFIG_NET_BUF_FIXED_DATA_SIZE)
56 (CONFIG_NET_BUF_TX_COUNT * CONFIG_NET_BUF_DATA_SIZE) / 3;
57 #else
58 CONFIG_NET_BUF_DATA_POOL_SIZE / 3;
59 #endif /* CONFIG_NET_BUF_FIXED_DATA_SIZE */
60 #endif
61 #ifdef CONFIG_NET_TCP_RANDOMIZED_RTO
62 #define TCP_RTO_MS (conn->rto)
63 #else
64 #define TCP_RTO_MS (tcp_rto)
65 #endif
66
67 /* Define the number of MSS sections the congestion window is initialized at */
68 #define TCP_CONGESTION_INITIAL_WIN 1
69 #define TCP_CONGESTION_INITIAL_SSTHRESH 3
70
71 static sys_slist_t tcp_conns = SYS_SLIST_STATIC_INIT(&tcp_conns);
72
73 static K_MUTEX_DEFINE(tcp_lock);
74
75 K_MEM_SLAB_DEFINE_STATIC(tcp_conns_slab, sizeof(struct tcp),
76 CONFIG_NET_MAX_CONTEXTS, 4);
77
78 static struct k_work_q tcp_work_q;
79 static K_KERNEL_STACK_DEFINE(work_q_stack, CONFIG_NET_TCP_WORKQ_STACK_SIZE);
80
81 static enum net_verdict tcp_in(struct tcp *conn, struct net_pkt *pkt);
82 static bool is_destination_local(struct net_pkt *pkt);
83 static void tcp_out(struct tcp *conn, uint8_t flags);
84 static const char *tcp_state_to_str(enum tcp_state state, bool prefix);
85
86 int (*tcp_send_cb)(struct net_pkt *pkt) = NULL;
87 size_t (*tcp_recv_cb)(struct tcp *conn, struct net_pkt *pkt) = NULL;
88
tcp_get_seq(struct net_buf * buf)89 static uint32_t tcp_get_seq(struct net_buf *buf)
90 {
91 return *(uint32_t *)net_buf_user_data(buf);
92 }
93
tcp_set_seq(struct net_buf * buf,uint32_t seq)94 static void tcp_set_seq(struct net_buf *buf, uint32_t seq)
95 {
96 *(uint32_t *)net_buf_user_data(buf) = seq;
97 }
98
tcp_pkt_linearize(struct net_pkt * pkt,size_t pos,size_t len)99 static int tcp_pkt_linearize(struct net_pkt *pkt, size_t pos, size_t len)
100 {
101 struct net_buf *buf, *first = pkt->cursor.buf, *second = first->frags;
102 int ret = 0;
103 size_t len1, len2;
104
105 if (net_pkt_get_len(pkt) < (pos + len)) {
106 NET_ERR("Insufficient packet len=%zd (pos+len=%zu)",
107 net_pkt_get_len(pkt), pos + len);
108 ret = -EINVAL;
109 goto out;
110 }
111
112 buf = net_pkt_get_frag(pkt, len, TCP_PKT_ALLOC_TIMEOUT);
113
114 if (!buf || buf->size < len) {
115 if (buf) {
116 net_buf_unref(buf);
117 }
118 ret = -ENOBUFS;
119 goto out;
120 }
121
122 net_buf_linearize(buf->data, buf->size, pkt->frags, pos, len);
123 net_buf_add(buf, len);
124
125 len1 = first->len - (pkt->cursor.pos - pkt->cursor.buf->data);
126 len2 = len - len1;
127
128 first->len -= len1;
129
130 while (len2) {
131 size_t pull_len = MIN(second->len, len2);
132 struct net_buf *next;
133
134 len2 -= pull_len;
135 net_buf_pull(second, pull_len);
136 next = second->frags;
137 if (second->len == 0) {
138 net_buf_unref(second);
139 }
140 second = next;
141 }
142
143 buf->frags = second;
144 first->frags = buf;
145 out:
146 return ret;
147 }
148
th_get(struct net_pkt * pkt)149 static struct tcphdr *th_get(struct net_pkt *pkt)
150 {
151 size_t ip_len = net_pkt_ip_hdr_len(pkt) + net_pkt_ip_opts_len(pkt);
152 struct tcphdr *th = NULL;
153 again:
154 net_pkt_cursor_init(pkt);
155 net_pkt_set_overwrite(pkt, true);
156
157 if (net_pkt_skip(pkt, ip_len) != 0) {
158 goto out;
159 }
160
161 if (!net_pkt_is_contiguous(pkt, sizeof(*th))) {
162 if (tcp_pkt_linearize(pkt, ip_len, sizeof(*th)) < 0) {
163 goto out;
164 }
165
166 goto again;
167 }
168
169 th = net_pkt_cursor_get_pos(pkt);
170 out:
171 return th;
172 }
173
tcp_endpoint_len(sa_family_t af)174 static size_t tcp_endpoint_len(sa_family_t af)
175 {
176 return (af == AF_INET) ? sizeof(struct sockaddr_in) :
177 sizeof(struct sockaddr_in6);
178 }
179
tcp_endpoint_set(union tcp_endpoint * ep,struct net_pkt * pkt,enum pkt_addr src)180 static int tcp_endpoint_set(union tcp_endpoint *ep, struct net_pkt *pkt,
181 enum pkt_addr src)
182 {
183 int ret = 0;
184
185 switch (net_pkt_family(pkt)) {
186 case AF_INET:
187 if (IS_ENABLED(CONFIG_NET_IPV4)) {
188 struct net_ipv4_hdr *ip = NET_IPV4_HDR(pkt);
189 struct tcphdr *th;
190
191 th = th_get(pkt);
192 if (!th) {
193 return -ENOBUFS;
194 }
195
196 memset(ep, 0, sizeof(*ep));
197
198 ep->sin.sin_port = src == TCP_EP_SRC ? th_sport(th) :
199 th_dport(th);
200 net_ipv4_addr_copy_raw((uint8_t *)&ep->sin.sin_addr,
201 src == TCP_EP_SRC ?
202 ip->src : ip->dst);
203 ep->sa.sa_family = AF_INET;
204 } else {
205 ret = -EINVAL;
206 }
207
208 break;
209
210 case AF_INET6:
211 if (IS_ENABLED(CONFIG_NET_IPV6)) {
212 struct net_ipv6_hdr *ip = NET_IPV6_HDR(pkt);
213 struct tcphdr *th;
214
215 th = th_get(pkt);
216 if (!th) {
217 return -ENOBUFS;
218 }
219
220 memset(ep, 0, sizeof(*ep));
221
222 ep->sin6.sin6_port = src == TCP_EP_SRC ? th_sport(th) :
223 th_dport(th);
224 net_ipv6_addr_copy_raw((uint8_t *)&ep->sin6.sin6_addr,
225 src == TCP_EP_SRC ?
226 ip->src : ip->dst);
227 ep->sa.sa_family = AF_INET6;
228 } else {
229 ret = -EINVAL;
230 }
231
232 break;
233
234 default:
235 NET_ERR("Unknown address family: %hu", net_pkt_family(pkt));
236 ret = -EINVAL;
237 }
238
239 return ret;
240 }
241
tcp_flags(uint8_t flags)242 static const char *tcp_flags(uint8_t flags)
243 {
244 #define BUF_SIZE 25 /* 6 * 4 + 1 */
245 static char buf[BUF_SIZE];
246 int len = 0;
247
248 buf[0] = '\0';
249
250 if (flags) {
251 if (flags & SYN) {
252 len += snprintk(buf + len, BUF_SIZE - len, "SYN,");
253 }
254 if (flags & FIN) {
255 len += snprintk(buf + len, BUF_SIZE - len, "FIN,");
256 }
257 if (flags & ACK) {
258 len += snprintk(buf + len, BUF_SIZE - len, "ACK,");
259 }
260 if (flags & PSH) {
261 len += snprintk(buf + len, BUF_SIZE - len, "PSH,");
262 }
263 if (flags & RST) {
264 len += snprintk(buf + len, BUF_SIZE - len, "RST,");
265 }
266 if (flags & URG) {
267 len += snprintk(buf + len, BUF_SIZE - len, "URG,");
268 }
269
270 if (len > 0) {
271 buf[len - 1] = '\0'; /* delete the last comma */
272 }
273 }
274 #undef BUF_SIZE
275 return buf;
276 }
277
tcp_data_len(struct net_pkt * pkt)278 static size_t tcp_data_len(struct net_pkt *pkt)
279 {
280 struct tcphdr *th = th_get(pkt);
281 size_t tcp_options_len = (th_off(th) - 5) * 4;
282 int len = net_pkt_get_len(pkt) - net_pkt_ip_hdr_len(pkt) -
283 net_pkt_ip_opts_len(pkt) - sizeof(*th) - tcp_options_len;
284
285 return len > 0 ? (size_t)len : 0;
286 }
287
tcp_th(struct net_pkt * pkt)288 static const char *tcp_th(struct net_pkt *pkt)
289 {
290 #define BUF_SIZE 80
291 static char buf[BUF_SIZE];
292 int len = 0;
293 struct tcphdr *th = th_get(pkt);
294
295 buf[0] = '\0';
296
297 if (th_off(th) < 5) {
298 len += snprintk(buf + len, BUF_SIZE - len,
299 "bogus th_off: %hu", (uint16_t)th_off(th));
300 goto end;
301 }
302
303 len += snprintk(buf + len, BUF_SIZE - len,
304 "%s Seq=%u", tcp_flags(th_flags(th)), th_seq(th));
305
306 if (th_flags(th) & ACK) {
307 len += snprintk(buf + len, BUF_SIZE - len,
308 " Ack=%u", th_ack(th));
309 }
310
311 len += snprintk(buf + len, BUF_SIZE - len,
312 " Len=%ld", (long)tcp_data_len(pkt));
313 end:
314 #undef BUF_SIZE
315 return buf;
316 }
317
318 #define is_6lo_technology(pkt) \
319 (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6 && \
320 ((IS_ENABLED(CONFIG_NET_L2_BT) && \
321 net_pkt_lladdr_dst(pkt)->type == NET_LINK_BLUETOOTH) || \
322 (IS_ENABLED(CONFIG_NET_L2_IEEE802154) && \
323 net_pkt_lladdr_dst(pkt)->type == NET_LINK_IEEE802154)))
324
tcp_send(struct net_pkt * pkt)325 static void tcp_send(struct net_pkt *pkt)
326 {
327 NET_DBG("%s", tcp_th(pkt));
328
329 tcp_pkt_ref(pkt);
330
331 if (tcp_send_cb) {
332 if (tcp_send_cb(pkt) < 0) {
333 NET_ERR("net_send_data()");
334 tcp_pkt_unref(pkt);
335 }
336 goto out;
337 }
338
339 /* We must have special handling for some network technologies that
340 * tweak the IP protocol headers during packet sending. This happens
341 * with Bluetooth and IEEE 802.15.4 which use IPv6 header compression
342 * (6lo) and alter the sent network packet. So in order to avoid any
343 * corruption of the original data buffer, we must copy the sent data.
344 * For Bluetooth, its fragmentation code will even mangle the data
345 * part of the message so we need to copy those too.
346 */
347 if (is_6lo_technology(pkt)) {
348 struct net_pkt *new_pkt;
349
350 new_pkt = tcp_pkt_clone(pkt);
351 if (!new_pkt) {
352 /* The caller of this func assumes that the net_pkt
353 * is consumed by this function. We call unref here
354 * so that the unref at the end of the func will
355 * free the net_pkt.
356 */
357 tcp_pkt_unref(pkt);
358 goto out;
359 }
360
361 if (net_send_data(new_pkt) < 0) {
362 tcp_pkt_unref(new_pkt);
363 }
364
365 /* We simulate sending of the original pkt and unref it like
366 * the device driver would do.
367 */
368 tcp_pkt_unref(pkt);
369 } else {
370 if (net_send_data(pkt) < 0) {
371 NET_ERR("net_send_data()");
372 tcp_pkt_unref(pkt);
373 }
374 }
375 out:
376 tcp_pkt_unref(pkt);
377 }
378
tcp_derive_rto(struct tcp * conn)379 static void tcp_derive_rto(struct tcp *conn)
380 {
381 #ifdef CONFIG_NET_TCP_RANDOMIZED_RTO
382 /* Compute a randomized rto 1 and 1.5 times tcp_rto */
383 uint32_t gain;
384 uint8_t gain8;
385 uint32_t rto;
386
387 /* Getting random is computational expensive, so only use 8 bits */
388 sys_rand_get(&gain8, sizeof(uint8_t));
389
390 gain = (uint32_t)gain8;
391 gain += 1 << 9;
392
393 rto = (uint32_t)tcp_rto;
394 rto = (gain * rto) >> 9;
395 conn->rto = (uint16_t)rto;
396 #else
397 ARG_UNUSED(conn);
398 #endif
399 }
400
401 #ifdef CONFIG_NET_TCP_CONGESTION_AVOIDANCE
402
403 /* Implementation according to RFC6582 */
404
tcp_new_reno_log(struct tcp * conn,char * step)405 static void tcp_new_reno_log(struct tcp *conn, char *step)
406 {
407 NET_DBG("conn: %p, ca %s, cwnd=%d, ssthres=%d, fast_pend=%i",
408 conn, step, conn->ca.cwnd, conn->ca.ssthresh,
409 conn->ca.pending_fast_retransmit_bytes);
410 }
411
tcp_new_reno_init(struct tcp * conn)412 static void tcp_new_reno_init(struct tcp *conn)
413 {
414 conn->ca.cwnd = conn_mss(conn) * TCP_CONGESTION_INITIAL_WIN;
415 conn->ca.ssthresh = conn_mss(conn) * TCP_CONGESTION_INITIAL_SSTHRESH;
416 conn->ca.pending_fast_retransmit_bytes = 0;
417 tcp_new_reno_log(conn, "init");
418 }
419
tcp_new_reno_fast_retransmit(struct tcp * conn)420 static void tcp_new_reno_fast_retransmit(struct tcp *conn)
421 {
422 if (conn->ca.pending_fast_retransmit_bytes == 0) {
423 conn->ca.ssthresh = MAX(conn_mss(conn) * 2, conn->unacked_len / 2);
424 /* Account for the lost segments */
425 conn->ca.cwnd = conn_mss(conn) * 3 + conn->ca.ssthresh;
426 conn->ca.pending_fast_retransmit_bytes = conn->unacked_len;
427 tcp_new_reno_log(conn, "fast_retransmit");
428 }
429 }
430
tcp_new_reno_timeout(struct tcp * conn)431 static void tcp_new_reno_timeout(struct tcp *conn)
432 {
433 conn->ca.ssthresh = MAX(conn_mss(conn) * 2, conn->unacked_len / 2);
434 conn->ca.cwnd = conn_mss(conn);
435 tcp_new_reno_log(conn, "timeout");
436 }
437
438 /* For every duplicate ack increment the cwnd by mss */
tcp_new_reno_dup_ack(struct tcp * conn)439 static void tcp_new_reno_dup_ack(struct tcp *conn)
440 {
441 int32_t new_win = conn->ca.cwnd;
442
443 new_win += conn_mss(conn);
444 conn->ca.cwnd = MIN(new_win, UINT16_MAX);
445 tcp_new_reno_log(conn, "dup_ack");
446 }
447
tcp_new_reno_pkts_acked(struct tcp * conn,uint32_t acked_len)448 static void tcp_new_reno_pkts_acked(struct tcp *conn, uint32_t acked_len)
449 {
450 int32_t new_win = conn->ca.cwnd;
451 int32_t win_inc = MIN(acked_len, conn_mss(conn));
452
453 if (conn->ca.pending_fast_retransmit_bytes == 0) {
454 if (conn->ca.cwnd < conn->ca.ssthresh) {
455 new_win += win_inc;
456 } else {
457 /* Implement a div_ceil to avoid rounding to 0 */
458 new_win += ((win_inc * win_inc) + conn->ca.cwnd - 1) / conn->ca.cwnd;
459 }
460 conn->ca.cwnd = MIN(new_win, UINT16_MAX);
461 } else {
462 /* Check if it is still in fast recovery mode */
463 if (conn->ca.pending_fast_retransmit_bytes <= acked_len) {
464 conn->ca.pending_fast_retransmit_bytes = 0;
465 conn->ca.cwnd = conn->ca.ssthresh;
466 } else {
467 conn->ca.pending_fast_retransmit_bytes -= acked_len;
468 conn->ca.cwnd -= acked_len;
469 }
470 }
471 tcp_new_reno_log(conn, "pkts_acked");
472 }
473
tcp_ca_init(struct tcp * conn)474 static void tcp_ca_init(struct tcp *conn)
475 {
476 tcp_new_reno_init(conn);
477 }
478
tcp_ca_fast_retransmit(struct tcp * conn)479 static void tcp_ca_fast_retransmit(struct tcp *conn)
480 {
481 tcp_new_reno_fast_retransmit(conn);
482 }
483
tcp_ca_timeout(struct tcp * conn)484 static void tcp_ca_timeout(struct tcp *conn)
485 {
486 tcp_new_reno_timeout(conn);
487 }
488
tcp_ca_dup_ack(struct tcp * conn)489 static void tcp_ca_dup_ack(struct tcp *conn)
490 {
491 tcp_new_reno_dup_ack(conn);
492 }
493
tcp_ca_pkts_acked(struct tcp * conn,uint32_t acked_len)494 static void tcp_ca_pkts_acked(struct tcp *conn, uint32_t acked_len)
495 {
496 tcp_new_reno_pkts_acked(conn, acked_len);
497 }
498 #else
499
tcp_ca_init(struct tcp * conn)500 static void tcp_ca_init(struct tcp *conn) { }
501
tcp_ca_fast_retransmit(struct tcp * conn)502 static void tcp_ca_fast_retransmit(struct tcp *conn) { }
503
tcp_ca_timeout(struct tcp * conn)504 static void tcp_ca_timeout(struct tcp *conn) { }
505
tcp_ca_dup_ack(struct tcp * conn)506 static void tcp_ca_dup_ack(struct tcp *conn) { }
507
tcp_ca_pkts_acked(struct tcp * conn,uint32_t acked_len)508 static void tcp_ca_pkts_acked(struct tcp *conn, uint32_t acked_len) { }
509
510 #endif
511
512 #if defined(CONFIG_NET_TCP_KEEPALIVE)
513
514 static void tcp_send_keepalive_probe(struct k_work *work);
515
keep_alive_timer_init(struct tcp * conn)516 static void keep_alive_timer_init(struct tcp *conn)
517 {
518 conn->keep_alive = false;
519 conn->keep_idle = CONFIG_NET_TCP_KEEPIDLE_DEFAULT;
520 conn->keep_intvl = CONFIG_NET_TCP_KEEPINTVL_DEFAULT;
521 conn->keep_cnt = CONFIG_NET_TCP_KEEPCNT_DEFAULT;
522 NET_DBG("keepalive timer init idle = %d, interval = %d, cnt = %d",
523 conn->keep_idle, conn->keep_intvl, conn->keep_cnt);
524 k_work_init_delayable(&conn->keepalive_timer, tcp_send_keepalive_probe);
525 }
526
keep_alive_param_copy(struct tcp * to,struct tcp * from)527 static void keep_alive_param_copy(struct tcp *to, struct tcp *from)
528 {
529 to->keep_alive = from->keep_alive;
530 to->keep_idle = from->keep_idle;
531 to->keep_intvl = from->keep_intvl;
532 to->keep_cnt = from->keep_cnt;
533 }
534
keep_alive_timer_restart(struct tcp * conn)535 static void keep_alive_timer_restart(struct tcp *conn)
536 {
537 if (!conn->keep_alive || conn->state != TCP_ESTABLISHED) {
538 return;
539 }
540
541 conn->keep_cur = 0;
542 k_work_reschedule_for_queue(&tcp_work_q, &conn->keepalive_timer,
543 K_SECONDS(conn->keep_idle));
544 }
545
keep_alive_timer_stop(struct tcp * conn)546 static void keep_alive_timer_stop(struct tcp *conn)
547 {
548 k_work_cancel_delayable(&conn->keepalive_timer);
549 }
550
set_tcp_keep_alive(struct tcp * conn,const void * value,size_t len)551 static int set_tcp_keep_alive(struct tcp *conn, const void *value, size_t len)
552 {
553 int keep_alive;
554
555 if (conn == NULL || value == NULL || len != sizeof(int)) {
556 return -EINVAL;
557 }
558
559 keep_alive = *(int *)value;
560 if ((keep_alive < 0) || (keep_alive > 1)) {
561 return -EINVAL;
562 }
563
564 conn->keep_alive = (bool)keep_alive;
565
566 if (keep_alive) {
567 keep_alive_timer_restart(conn);
568 } else {
569 keep_alive_timer_stop(conn);
570 }
571
572 return 0;
573 }
574
set_tcp_keep_idle(struct tcp * conn,const void * value,size_t len)575 static int set_tcp_keep_idle(struct tcp *conn, const void *value, size_t len)
576 {
577 int keep_idle;
578
579 if (conn == NULL || value == NULL || len != sizeof(int)) {
580 return -EINVAL;
581 }
582
583 keep_idle = *(int *)value;
584 if (keep_idle < 1) {
585 return -EINVAL;
586 }
587
588 conn->keep_idle = keep_idle;
589
590 keep_alive_timer_restart(conn);
591
592 return 0;
593 }
594
set_tcp_keep_intvl(struct tcp * conn,const void * value,size_t len)595 static int set_tcp_keep_intvl(struct tcp *conn, const void *value, size_t len)
596 {
597 int keep_intvl;
598
599 if (conn == NULL || value == NULL || len != sizeof(int)) {
600 return -EINVAL;
601 }
602
603 keep_intvl = *(int *)value;
604 if (keep_intvl < 1) {
605 return -EINVAL;
606 }
607
608 conn->keep_intvl = keep_intvl;
609
610 keep_alive_timer_restart(conn);
611
612 return 0;
613 }
614
set_tcp_keep_cnt(struct tcp * conn,const void * value,size_t len)615 static int set_tcp_keep_cnt(struct tcp *conn, const void *value, size_t len)
616 {
617 int keep_cnt;
618
619 if (conn == NULL || value == NULL || len != sizeof(int)) {
620 return -EINVAL;
621 }
622
623 keep_cnt = *(int *)value;
624 if (keep_cnt < 1) {
625 return -EINVAL;
626 }
627
628 conn->keep_cnt = keep_cnt;
629
630 keep_alive_timer_restart(conn);
631
632 return 0;
633 }
634
get_tcp_keep_alive(struct tcp * conn,void * value,size_t * len)635 static int get_tcp_keep_alive(struct tcp *conn, void *value, size_t *len)
636 {
637 if (conn == NULL || value == NULL || len == NULL ||
638 *len != sizeof(int)) {
639 return -EINVAL;
640 }
641
642 *((int *)value) = (int)conn->keep_alive;
643
644 return 0;
645 }
646
get_tcp_keep_idle(struct tcp * conn,void * value,size_t * len)647 static int get_tcp_keep_idle(struct tcp *conn, void *value, size_t *len)
648 {
649 if (conn == NULL || value == NULL || len == NULL ||
650 *len != sizeof(int)) {
651 return -EINVAL;
652 }
653
654 *((int *)value) = (int)conn->keep_idle;
655
656 return 0;
657 }
658
get_tcp_keep_intvl(struct tcp * conn,void * value,size_t * len)659 static int get_tcp_keep_intvl(struct tcp *conn, void *value, size_t *len)
660 {
661 if (conn == NULL || value == NULL || len == NULL ||
662 *len != sizeof(int)) {
663 return -EINVAL;
664 }
665
666 *((int *)value) = (int)conn->keep_intvl;
667
668 return 0;
669 }
670
get_tcp_keep_cnt(struct tcp * conn,void * value,size_t * len)671 static int get_tcp_keep_cnt(struct tcp *conn, void *value, size_t *len)
672 {
673 if (conn == NULL || value == NULL || len == NULL ||
674 *len != sizeof(int)) {
675 return -EINVAL;
676 }
677
678 *((int *)value) = (int)conn->keep_cnt;
679
680 return 0;
681 }
682
683 #else /* CONFIG_NET_TCP_KEEPALIVE */
684
685 #define keep_alive_timer_init(...)
686 #define keep_alive_param_copy(...)
687 #define keep_alive_timer_restart(...)
688 #define keep_alive_timer_stop(...)
689 #define set_tcp_keep_alive(...) (-ENOPROTOOPT)
690 #define set_tcp_keep_idle(...) (-ENOPROTOOPT)
691 #define set_tcp_keep_intvl(...) (-ENOPROTOOPT)
692 #define set_tcp_keep_cnt(...) (-ENOPROTOOPT)
693 #define get_tcp_keep_alive(...) (-ENOPROTOOPT)
694 #define get_tcp_keep_idle(...) (-ENOPROTOOPT)
695 #define get_tcp_keep_intvl(...) (-ENOPROTOOPT)
696 #define get_tcp_keep_cnt(...) (-ENOPROTOOPT)
697
698 #endif /* CONFIG_NET_TCP_KEEPALIVE */
699
tcp_send_queue_flush(struct tcp * conn)700 static void tcp_send_queue_flush(struct tcp *conn)
701 {
702 struct net_pkt *pkt;
703
704 k_work_cancel_delayable(&conn->send_timer);
705
706 while ((pkt = tcp_slist(conn, &conn->send_queue, get,
707 struct net_pkt, next))) {
708 tcp_pkt_unref(pkt);
709 }
710 }
711
tcp_conn_release(struct k_work * work)712 static void tcp_conn_release(struct k_work *work)
713 {
714 struct tcp *conn = CONTAINER_OF(work, struct tcp, conn_release);
715 struct net_pkt *pkt;
716
717 #if defined(CONFIG_NET_TEST)
718 if (conn->test_closed_cb != NULL) {
719 conn->test_closed_cb(conn, conn->test_user_data);
720 }
721 #endif
722
723 k_mutex_lock(&tcp_lock, K_FOREVER);
724
725 /* Application is no longer there, unref any remaining packets on the
726 * fifo (although there shouldn't be any at this point.)
727 */
728 while ((pkt = k_fifo_get(&conn->recv_data, K_NO_WAIT)) != NULL) {
729 tcp_pkt_unref(pkt);
730 }
731
732 k_mutex_lock(&conn->lock, K_FOREVER);
733
734 if (conn->context->conn_handler) {
735 net_conn_unregister(conn->context->conn_handler);
736 conn->context->conn_handler = NULL;
737 }
738
739 conn->context->tcp = NULL;
740 conn->state = TCP_UNUSED;
741
742 tcp_send_queue_flush(conn);
743
744 (void)k_work_cancel_delayable(&conn->send_data_timer);
745 tcp_pkt_unref(conn->send_data);
746
747 if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT) {
748 tcp_pkt_unref(conn->queue_recv_data);
749 }
750
751 (void)k_work_cancel_delayable(&conn->timewait_timer);
752 (void)k_work_cancel_delayable(&conn->fin_timer);
753 (void)k_work_cancel_delayable(&conn->persist_timer);
754 (void)k_work_cancel_delayable(&conn->ack_timer);
755 (void)k_work_cancel_delayable(&conn->send_timer);
756 (void)k_work_cancel_delayable(&conn->recv_queue_timer);
757 keep_alive_timer_stop(conn);
758
759 k_mutex_unlock(&conn->lock);
760
761 net_context_unref(conn->context);
762 conn->context = NULL;
763
764 sys_slist_find_and_remove(&tcp_conns, &conn->next);
765
766 k_mem_slab_free(&tcp_conns_slab, (void *)conn);
767
768 k_mutex_unlock(&tcp_lock);
769 }
770
771 #if defined(CONFIG_NET_TEST)
tcp_install_close_cb(struct net_context * ctx,net_tcp_closed_cb_t cb,void * user_data)772 void tcp_install_close_cb(struct net_context *ctx,
773 net_tcp_closed_cb_t cb,
774 void *user_data)
775 {
776 NET_ASSERT(ctx->tcp != NULL);
777
778 ((struct tcp *)ctx->tcp)->test_closed_cb = cb;
779 ((struct tcp *)ctx->tcp)->test_user_data = user_data;
780 }
781 #endif
782
tcp_conn_unref(struct tcp * conn)783 static int tcp_conn_unref(struct tcp *conn)
784 {
785 int ref_count = atomic_get(&conn->ref_count);
786
787 NET_DBG("conn: %p, ref_count=%d", conn, ref_count);
788
789 k_mutex_lock(&conn->lock, K_FOREVER);
790
791 #if !defined(CONFIG_NET_TEST_PROTOCOL)
792 if (conn->in_connect) {
793 conn->in_connect = false;
794 k_sem_reset(&conn->connect_sem);
795 }
796 #endif /* CONFIG_NET_TEST_PROTOCOL */
797
798 k_mutex_unlock(&conn->lock);
799
800 ref_count = atomic_dec(&conn->ref_count) - 1;
801 if (ref_count != 0) {
802 tp_out(net_context_get_family(conn->context), conn->iface,
803 "TP_TRACE", "event", "CONN_DELETE");
804 return ref_count;
805 }
806
807 /* Release the TCP context from the TCP workqueue. This will ensure,
808 * that all pending TCP works are cancelled properly, when the context
809 * is released.
810 */
811 k_work_submit_to_queue(&tcp_work_q, &conn->conn_release);
812
813 return ref_count;
814 }
815
816 #if CONFIG_NET_TCP_LOG_LEVEL >= LOG_LEVEL_DBG
817 #define tcp_conn_close(conn, status) \
818 tcp_conn_close_debug(conn, status, __func__, __LINE__)
819
tcp_conn_close_debug(struct tcp * conn,int status,const char * caller,int line)820 static int tcp_conn_close_debug(struct tcp *conn, int status,
821 const char *caller, int line)
822 #else
823 static int tcp_conn_close(struct tcp *conn, int status)
824 #endif
825 {
826 #if CONFIG_NET_TCP_LOG_LEVEL >= LOG_LEVEL_DBG
827 NET_DBG("conn: %p closed by TCP stack (%s():%d)", conn, caller, line);
828 #endif
829 k_mutex_lock(&conn->lock, K_FOREVER);
830 conn_state(conn, TCP_CLOSED);
831 keep_alive_timer_stop(conn);
832 k_mutex_unlock(&conn->lock);
833
834 if (conn->in_connect) {
835 if (conn->connect_cb) {
836 conn->connect_cb(conn->context, status, conn->context->user_data);
837
838 /* Make sure the connect_cb is only called once. */
839 conn->connect_cb = NULL;
840 }
841 } else if (conn->context->recv_cb) {
842 conn->context->recv_cb(conn->context, NULL, NULL, NULL,
843 status, conn->recv_user_data);
844 }
845
846 k_sem_give(&conn->tx_sem);
847
848 return tcp_conn_unref(conn);
849 }
850
tcp_send_process_no_lock(struct tcp * conn)851 static bool tcp_send_process_no_lock(struct tcp *conn)
852 {
853 bool unref = false;
854 struct net_pkt *pkt;
855 bool local = false;
856
857 pkt = tcp_slist(conn, &conn->send_queue, peek_head,
858 struct net_pkt, next);
859 if (!pkt) {
860 goto out;
861 }
862
863 NET_DBG("%s %s", tcp_th(pkt), conn->in_retransmission ?
864 "in_retransmission" : "");
865
866 if (conn->in_retransmission) {
867 if (conn->send_retries > 0) {
868 struct net_pkt *clone = tcp_pkt_clone(pkt);
869
870 if (clone) {
871 tcp_send(clone);
872 conn->send_retries--;
873 }
874 } else {
875 unref = true;
876 goto out;
877 }
878 } else {
879 uint8_t fl = th_get(pkt)->th_flags;
880 bool forget = ACK == fl || PSH == fl || (ACK | PSH) == fl ||
881 RST & fl;
882
883 pkt = forget ? tcp_slist(conn, &conn->send_queue, get,
884 struct net_pkt, next) :
885 tcp_pkt_clone(pkt);
886 if (!pkt) {
887 NET_ERR("net_pkt alloc failure");
888 goto out;
889 }
890
891 if (is_destination_local(pkt)) {
892 local = true;
893 }
894
895 tcp_send(pkt);
896
897 if (forget == false &&
898 !k_work_delayable_remaining_get(&conn->send_timer)) {
899 conn->send_retries = tcp_retries;
900 conn->in_retransmission = true;
901 }
902 }
903
904 if (conn->in_retransmission) {
905 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_timer,
906 K_MSEC(TCP_RTO_MS));
907 } else if (local && !sys_slist_is_empty(&conn->send_queue)) {
908 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_timer,
909 K_NO_WAIT);
910 }
911
912 out:
913 return unref;
914 }
915
tcp_send_process(struct k_work * work)916 static void tcp_send_process(struct k_work *work)
917 {
918 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
919 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, send_timer);
920 bool unref;
921
922 k_mutex_lock(&conn->lock, K_FOREVER);
923
924 unref = tcp_send_process_no_lock(conn);
925
926 k_mutex_unlock(&conn->lock);
927
928 if (unref) {
929 tcp_conn_close(conn, -ETIMEDOUT);
930 }
931 }
932
tcp_send_timer_cancel(struct tcp * conn)933 static void tcp_send_timer_cancel(struct tcp *conn)
934 {
935 if (conn->in_retransmission == false) {
936 return;
937 }
938
939 k_work_cancel_delayable(&conn->send_timer);
940
941 {
942 struct net_pkt *pkt = tcp_slist(conn, &conn->send_queue, get,
943 struct net_pkt, next);
944 if (pkt) {
945 NET_DBG("%s", tcp_th(pkt));
946 tcp_pkt_unref(pkt);
947 }
948 }
949
950 if (sys_slist_is_empty(&conn->send_queue)) {
951 conn->in_retransmission = false;
952 } else {
953 conn->send_retries = tcp_retries;
954 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_timer,
955 K_MSEC(TCP_RTO_MS));
956 }
957 }
958
959 #if defined(CONFIG_NET_TCP_IPV6_ND_REACHABILITY_HINT)
960
tcp_nbr_reachability_hint(struct tcp * conn)961 static void tcp_nbr_reachability_hint(struct tcp *conn)
962 {
963 int64_t now;
964 struct net_if *iface;
965
966 if (net_context_get_family(conn->context) != AF_INET6) {
967 return;
968 }
969
970 now = k_uptime_get();
971 iface = net_context_get_iface(conn->context);
972
973 /* Ensure that Neighbor Reachability hints are rate-limited (using threshold
974 * of half of reachable time).
975 */
976 if ((now - conn->last_nd_hint_time) > (net_if_ipv6_get_reachable_time(iface) / 2)) {
977 net_ipv6_nbr_reachability_hint(iface, &conn->dst.sin6.sin6_addr);
978 conn->last_nd_hint_time = now;
979 }
980 }
981
982 #else /* CONFIG_NET_TCP_IPV6_ND_REACHABILITY_HINT */
983
984 #define tcp_nbr_reachability_hint(...)
985
986 #endif /* CONFIG_NET_TCP_IPV6_ND_REACHABILITY_HINT */
987
tcp_state_to_str(enum tcp_state state,bool prefix)988 static const char *tcp_state_to_str(enum tcp_state state, bool prefix)
989 {
990 const char *s = NULL;
991 #define _(_x) case _x: do { s = #_x; goto out; } while (0)
992 switch (state) {
993 _(TCP_UNUSED);
994 _(TCP_LISTEN);
995 _(TCP_SYN_SENT);
996 _(TCP_SYN_RECEIVED);
997 _(TCP_ESTABLISHED);
998 _(TCP_FIN_WAIT_1);
999 _(TCP_FIN_WAIT_2);
1000 _(TCP_CLOSE_WAIT);
1001 _(TCP_CLOSING);
1002 _(TCP_LAST_ACK);
1003 _(TCP_TIME_WAIT);
1004 _(TCP_CLOSED);
1005 }
1006 #undef _
1007 NET_ASSERT(s, "Invalid TCP state: %u", state);
1008 out:
1009 return prefix ? s : (s + 4);
1010 }
1011
tcp_conn_state(struct tcp * conn,struct net_pkt * pkt)1012 static const char *tcp_conn_state(struct tcp *conn, struct net_pkt *pkt)
1013 {
1014 #define BUF_SIZE 160
1015 static char buf[BUF_SIZE];
1016
1017 snprintk(buf, BUF_SIZE, "%s [%s Seq=%u Ack=%u]", pkt ? tcp_th(pkt) : "",
1018 tcp_state_to_str(conn->state, false),
1019 conn->seq, conn->ack);
1020 #undef BUF_SIZE
1021 return buf;
1022 }
1023
tcp_options_get(struct net_pkt * pkt,int tcp_options_len,uint8_t * buf,size_t buf_len)1024 static uint8_t *tcp_options_get(struct net_pkt *pkt, int tcp_options_len,
1025 uint8_t *buf, size_t buf_len)
1026 {
1027 struct net_pkt_cursor backup;
1028 int ret;
1029
1030 net_pkt_cursor_backup(pkt, &backup);
1031 net_pkt_cursor_init(pkt);
1032 net_pkt_skip(pkt, net_pkt_ip_hdr_len(pkt) + net_pkt_ip_opts_len(pkt) +
1033 sizeof(struct tcphdr));
1034 ret = net_pkt_read(pkt, buf, MIN(tcp_options_len, buf_len));
1035 if (ret < 0) {
1036 buf = NULL;
1037 }
1038
1039 net_pkt_cursor_restore(pkt, &backup);
1040
1041 return buf;
1042 }
1043
tcp_options_check(struct tcp_options * recv_options,struct net_pkt * pkt,ssize_t len)1044 static bool tcp_options_check(struct tcp_options *recv_options,
1045 struct net_pkt *pkt, ssize_t len)
1046 {
1047 uint8_t options_buf[40]; /* TCP header max options size is 40 */
1048 bool result = len > 0 && ((len % 4) == 0) ? true : false;
1049 uint8_t *options = tcp_options_get(pkt, len, options_buf,
1050 sizeof(options_buf));
1051 uint8_t opt, opt_len;
1052
1053 NET_DBG("len=%zd", len);
1054
1055 recv_options->mss_found = false;
1056 recv_options->wnd_found = false;
1057
1058 for ( ; options && len >= 1; options += opt_len, len -= opt_len) {
1059 opt = options[0];
1060
1061 if (opt == NET_TCP_END_OPT) {
1062 break;
1063 } else if (opt == NET_TCP_NOP_OPT) {
1064 opt_len = 1;
1065 continue;
1066 } else {
1067 if (len < 2) { /* Only END and NOP can have length 1 */
1068 NET_ERR("Illegal option %d with length %zd",
1069 opt, len);
1070 result = false;
1071 break;
1072 }
1073 opt_len = options[1];
1074 }
1075
1076 NET_DBG("opt: %hu, opt_len: %hu",
1077 (uint16_t)opt, (uint16_t)opt_len);
1078
1079 if (opt_len < 2 || opt_len > len) {
1080 result = false;
1081 break;
1082 }
1083
1084 switch (opt) {
1085 case NET_TCP_MSS_OPT:
1086 if (opt_len != 4) {
1087 result = false;
1088 goto end;
1089 }
1090
1091 recv_options->mss =
1092 ntohs(UNALIGNED_GET((uint16_t *)(options + 2)));
1093 recv_options->mss_found = true;
1094 NET_DBG("MSS=%hu", recv_options->mss);
1095 break;
1096 case NET_TCP_WINDOW_SCALE_OPT:
1097 if (opt_len != 3) {
1098 result = false;
1099 goto end;
1100 }
1101
1102 recv_options->window = opt;
1103 recv_options->wnd_found = true;
1104 break;
1105 default:
1106 continue;
1107 }
1108 }
1109 end:
1110 if (false == result) {
1111 NET_WARN("Invalid TCP options");
1112 }
1113
1114 return result;
1115 }
1116
tcp_short_window(struct tcp * conn)1117 static bool tcp_short_window(struct tcp *conn)
1118 {
1119 int32_t threshold = MIN(conn_mss(conn), conn->recv_win_max / 2);
1120
1121 if (conn->recv_win > threshold) {
1122 return false;
1123 }
1124
1125 return true;
1126 }
1127
1128 /**
1129 * @brief Update TCP receive window
1130 *
1131 * @param conn TCP network connection
1132 * @param delta Receive window delta
1133 *
1134 * @return 0 on success, -EINVAL
1135 * if the receive window delta is out of bounds
1136 */
tcp_update_recv_wnd(struct tcp * conn,int32_t delta)1137 static int tcp_update_recv_wnd(struct tcp *conn, int32_t delta)
1138 {
1139 int32_t new_win;
1140 bool short_win_before;
1141 bool short_win_after;
1142
1143 new_win = conn->recv_win + delta;
1144 if (new_win < 0) {
1145 new_win = 0;
1146 } else if (new_win > conn->recv_win_max) {
1147 new_win = conn->recv_win_max;
1148 }
1149
1150 short_win_before = tcp_short_window(conn);
1151
1152 conn->recv_win = new_win;
1153
1154 short_win_after = tcp_short_window(conn);
1155
1156 if (short_win_before && !short_win_after &&
1157 conn->state == TCP_ESTABLISHED) {
1158 k_work_cancel_delayable(&conn->ack_timer);
1159 tcp_out(conn, ACK);
1160 }
1161
1162 return 0;
1163 }
1164
tcp_check_pending_data(struct tcp * conn,struct net_pkt * pkt,size_t len)1165 static size_t tcp_check_pending_data(struct tcp *conn, struct net_pkt *pkt,
1166 size_t len)
1167 {
1168 size_t pending_len = 0;
1169
1170 if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT &&
1171 !net_pkt_is_empty(conn->queue_recv_data)) {
1172 /* Some potentential cases:
1173 * Note: MI = MAX_INT
1174 * Packet | Queued| End off | Gap size | Required handling
1175 * Seq|Len|Seq|Len| | |
1176 * 3 | 3 | 6 | 4 | 3+3-6= 0 | 6-3-3=0 | Append
1177 * 3 | 4 | 6 | 4 | 3+4-6 = 1 | 6-3-4=-1 | Append, pull from queue
1178 * 3 | 7 | 6 | 4 | 3+7-6 = 4 | 6-3-7=-4 | Drop queued data
1179 * 3 | 8 | 6 | 4 | 3+8-6 = 5 | 6-3-8=-5 | Drop queued data
1180 * 6 | 5 | 6 | 4 | 6+5-6 = 5 | 6-6-5=-5 | Drop queued data
1181 * 6 | 4 | 6 | 4 | 6+4-6 = 4 | 6-6-4=-4 | Drop queued data / packet
1182 * 10 | 2 | 6 | 4 | 10+2-6= 6 | 6-10-2=-6| Should not happen, dropping queue
1183 * 7 | 4 | 6 | 4 | 7+4-6 = 5 | 6-7-4=-5 | Should not happen, dropping queue
1184 * 11 | 2 | 6 | 4 | 11+2-6= 7 | 6-11-2=-7| Should not happen, dropping queue
1185 * 2 | 3 | 6 | 4 | 2+3-6= MI | 6-2-3=1 | Keep queued data
1186 */
1187 struct tcphdr *th = th_get(pkt);
1188 uint32_t expected_seq = th_seq(th) + len;
1189 uint32_t pending_seq;
1190 int32_t gap_size;
1191 uint32_t end_offset;
1192
1193 pending_seq = tcp_get_seq(conn->queue_recv_data->buffer);
1194 end_offset = expected_seq - pending_seq;
1195 gap_size = (int32_t)(pending_seq - th_seq(th) - ((uint32_t)len));
1196 pending_len = net_pkt_get_len(conn->queue_recv_data);
1197 if (end_offset < pending_len) {
1198 if (end_offset) {
1199 net_pkt_remove_tail(pkt, end_offset);
1200 pending_len -= end_offset;
1201 }
1202
1203 NET_DBG("Found pending data seq %u len %zd",
1204 expected_seq, pending_len);
1205
1206 net_buf_frag_add(pkt->buffer,
1207 conn->queue_recv_data->buffer);
1208 conn->queue_recv_data->buffer = NULL;
1209
1210 k_work_cancel_delayable(&conn->recv_queue_timer);
1211 } else {
1212 /* Check if the queued data is just a section of the incoming data */
1213 if (gap_size <= 0) {
1214 net_buf_unref(conn->queue_recv_data->buffer);
1215 conn->queue_recv_data->buffer = NULL;
1216
1217 k_work_cancel_delayable(&conn->recv_queue_timer);
1218 }
1219
1220 pending_len = 0;
1221 }
1222 }
1223
1224 return pending_len;
1225 }
1226
tcp_data_get(struct tcp * conn,struct net_pkt * pkt,size_t * len)1227 static enum net_verdict tcp_data_get(struct tcp *conn, struct net_pkt *pkt, size_t *len)
1228 {
1229 enum net_verdict ret = NET_DROP;
1230
1231 if (tcp_recv_cb) {
1232 tcp_recv_cb(conn, pkt);
1233 goto out;
1234 }
1235
1236 if (conn->context->recv_cb) {
1237 /* If there is any out-of-order pending data, then pass it
1238 * to the application here.
1239 */
1240 *len += tcp_check_pending_data(conn, pkt, *len);
1241
1242 net_pkt_cursor_init(pkt);
1243 net_pkt_set_overwrite(pkt, true);
1244
1245 net_pkt_skip(pkt, net_pkt_get_len(pkt) - *len);
1246
1247 tcp_update_recv_wnd(conn, -*len);
1248
1249 /* Do not pass data to application with TCP conn
1250 * locked as there could be an issue when the app tries
1251 * to send the data and the conn is locked. So the recv
1252 * data is placed in fifo which is flushed in tcp_in()
1253 * after unlocking the conn
1254 */
1255 k_fifo_put(&conn->recv_data, pkt);
1256
1257 ret = NET_OK;
1258 }
1259 out:
1260 return ret;
1261 }
1262
tcp_finalize_pkt(struct net_pkt * pkt)1263 static int tcp_finalize_pkt(struct net_pkt *pkt)
1264 {
1265 net_pkt_cursor_init(pkt);
1266
1267 if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
1268 return net_ipv4_finalize(pkt, IPPROTO_TCP);
1269 }
1270
1271 if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
1272 return net_ipv6_finalize(pkt, IPPROTO_TCP);
1273 }
1274
1275 return -EINVAL;
1276 }
1277
tcp_header_add(struct tcp * conn,struct net_pkt * pkt,uint8_t flags,uint32_t seq)1278 static int tcp_header_add(struct tcp *conn, struct net_pkt *pkt, uint8_t flags,
1279 uint32_t seq)
1280 {
1281 NET_PKT_DATA_ACCESS_DEFINE(tcp_access, struct tcphdr);
1282 struct tcphdr *th;
1283
1284 th = (struct tcphdr *)net_pkt_get_data(pkt, &tcp_access);
1285 if (!th) {
1286 return -ENOBUFS;
1287 }
1288
1289 memset(th, 0, sizeof(struct tcphdr));
1290
1291 UNALIGNED_PUT(conn->src.sin.sin_port, &th->th_sport);
1292 UNALIGNED_PUT(conn->dst.sin.sin_port, &th->th_dport);
1293 th->th_off = 5;
1294
1295 if (conn->send_options.mss_found) {
1296 th->th_off++;
1297 }
1298
1299 UNALIGNED_PUT(flags, &th->th_flags);
1300 UNALIGNED_PUT(htons(conn->recv_win), &th->th_win);
1301 UNALIGNED_PUT(htonl(seq), &th->th_seq);
1302
1303 if (ACK & flags) {
1304 UNALIGNED_PUT(htonl(conn->ack), &th->th_ack);
1305 }
1306
1307 return net_pkt_set_data(pkt, &tcp_access);
1308 }
1309
ip_header_add(struct tcp * conn,struct net_pkt * pkt)1310 static int ip_header_add(struct tcp *conn, struct net_pkt *pkt)
1311 {
1312 if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
1313 return net_context_create_ipv4_new(conn->context, pkt,
1314 &conn->src.sin.sin_addr,
1315 &conn->dst.sin.sin_addr);
1316 }
1317
1318 if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
1319 return net_context_create_ipv6_new(conn->context, pkt,
1320 &conn->src.sin6.sin6_addr,
1321 &conn->dst.sin6.sin6_addr);
1322 }
1323
1324 return -EINVAL;
1325 }
1326
set_tcp_nodelay(struct tcp * conn,const void * value,size_t len)1327 static int set_tcp_nodelay(struct tcp *conn, const void *value, size_t len)
1328 {
1329 int no_delay_int;
1330
1331 if (len != sizeof(int)) {
1332 return -EINVAL;
1333 }
1334
1335 no_delay_int = *(int *)value;
1336
1337 if ((no_delay_int < 0) || (no_delay_int > 1)) {
1338 return -EINVAL;
1339 }
1340
1341 conn->tcp_nodelay = (bool)no_delay_int;
1342
1343 return 0;
1344 }
1345
get_tcp_nodelay(struct tcp * conn,void * value,size_t * len)1346 static int get_tcp_nodelay(struct tcp *conn, void *value, size_t *len)
1347 {
1348 int no_delay_int = (int)conn->tcp_nodelay;
1349
1350 *((int *)value) = no_delay_int;
1351
1352 if (len) {
1353 *len = sizeof(int);
1354 }
1355 return 0;
1356 }
1357
net_tcp_set_mss_opt(struct tcp * conn,struct net_pkt * pkt)1358 static int net_tcp_set_mss_opt(struct tcp *conn, struct net_pkt *pkt)
1359 {
1360 NET_PKT_DATA_ACCESS_DEFINE(mss_opt_access, struct tcp_mss_option);
1361 struct tcp_mss_option *mss;
1362 uint32_t recv_mss;
1363
1364 mss = net_pkt_get_data(pkt, &mss_opt_access);
1365 if (!mss) {
1366 return -ENOBUFS;
1367 }
1368
1369 recv_mss = net_tcp_get_supported_mss(conn);
1370 recv_mss |= (NET_TCP_MSS_OPT << 24) | (NET_TCP_MSS_SIZE << 16);
1371
1372 UNALIGNED_PUT(htonl(recv_mss), (uint32_t *)mss);
1373
1374 return net_pkt_set_data(pkt, &mss_opt_access);
1375 }
1376
is_destination_local(struct net_pkt * pkt)1377 static bool is_destination_local(struct net_pkt *pkt)
1378 {
1379 if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
1380 if (net_ipv4_is_addr_loopback(
1381 (struct in_addr *)NET_IPV4_HDR(pkt)->dst) ||
1382 net_ipv4_is_my_addr(
1383 (struct in_addr *)NET_IPV4_HDR(pkt)->dst)) {
1384 return true;
1385 }
1386 }
1387
1388 if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
1389 if (net_ipv6_is_addr_loopback(
1390 (struct in6_addr *)NET_IPV6_HDR(pkt)->dst) ||
1391 net_ipv6_is_my_addr(
1392 (struct in6_addr *)NET_IPV6_HDR(pkt)->dst)) {
1393 return true;
1394 }
1395 }
1396
1397 return false;
1398 }
1399
net_tcp_reply_rst(struct net_pkt * pkt)1400 void net_tcp_reply_rst(struct net_pkt *pkt)
1401 {
1402 NET_PKT_DATA_ACCESS_DEFINE(tcp_access_rst, struct tcphdr);
1403 struct tcphdr *th_pkt = th_get(pkt);
1404 struct tcphdr *th_rst;
1405 struct net_pkt *rst;
1406 int ret;
1407
1408 if (th_pkt == NULL || (th_flags(th_pkt) & RST)) {
1409 /* Don't reply to a RST segment. */
1410 return;
1411 }
1412
1413 rst = tcp_pkt_alloc_no_conn(pkt->iface, pkt->family,
1414 sizeof(struct tcphdr));
1415 if (rst == NULL) {
1416 return;
1417 }
1418
1419 /* IP header */
1420 if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
1421 ret = net_ipv4_create(rst,
1422 (struct in_addr *)NET_IPV4_HDR(pkt)->dst,
1423 (struct in_addr *)NET_IPV4_HDR(pkt)->src);
1424 } else if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
1425 ret = net_ipv6_create(rst,
1426 (struct in6_addr *)NET_IPV6_HDR(pkt)->dst,
1427 (struct in6_addr *)NET_IPV6_HDR(pkt)->src);
1428 } else {
1429 ret = -EINVAL;
1430 }
1431
1432 if (ret < 0) {
1433 goto err;
1434 }
1435
1436 /* TCP header */
1437 th_rst = (struct tcphdr *)net_pkt_get_data(rst, &tcp_access_rst);
1438 if (th_rst == NULL) {
1439 goto err;
1440 }
1441
1442 memset(th_rst, 0, sizeof(struct tcphdr));
1443
1444 UNALIGNED_PUT(th_pkt->th_dport, &th_rst->th_sport);
1445 UNALIGNED_PUT(th_pkt->th_sport, &th_rst->th_dport);
1446 th_rst->th_off = 5;
1447
1448 if (th_flags(th_pkt) & ACK) {
1449 UNALIGNED_PUT(RST, &th_rst->th_flags);
1450 UNALIGNED_PUT(th_pkt->th_ack, &th_rst->th_seq);
1451 } else {
1452 uint32_t ack = ntohl(th_pkt->th_seq) + tcp_data_len(pkt);
1453
1454 UNALIGNED_PUT(RST | ACK, &th_rst->th_flags);
1455 UNALIGNED_PUT(htonl(ack), &th_rst->th_ack);
1456 }
1457
1458 ret = net_pkt_set_data(rst, &tcp_access_rst);
1459 if (ret < 0) {
1460 goto err;
1461 }
1462
1463 ret = tcp_finalize_pkt(rst);
1464 if (ret < 0) {
1465 goto err;
1466 }
1467
1468 tcp_send(rst);
1469
1470 return;
1471
1472 err:
1473 tcp_pkt_unref(rst);
1474 }
1475
tcp_out_ext(struct tcp * conn,uint8_t flags,struct net_pkt * data,uint32_t seq)1476 static int tcp_out_ext(struct tcp *conn, uint8_t flags, struct net_pkt *data,
1477 uint32_t seq)
1478 {
1479 size_t alloc_len = sizeof(struct tcphdr);
1480 struct net_pkt *pkt;
1481 int ret = 0;
1482
1483 if (conn->send_options.mss_found) {
1484 alloc_len += sizeof(uint32_t);
1485 }
1486
1487 pkt = tcp_pkt_alloc(conn, alloc_len);
1488 if (!pkt) {
1489 ret = -ENOBUFS;
1490 goto out;
1491 }
1492
1493 if (data) {
1494 /* Append the data buffer to the pkt */
1495 net_pkt_append_buffer(pkt, data->buffer);
1496 data->buffer = NULL;
1497 }
1498
1499 ret = ip_header_add(conn, pkt);
1500 if (ret < 0) {
1501 tcp_pkt_unref(pkt);
1502 goto out;
1503 }
1504
1505 ret = tcp_header_add(conn, pkt, flags, seq);
1506 if (ret < 0) {
1507 tcp_pkt_unref(pkt);
1508 goto out;
1509 }
1510
1511 if (conn->send_options.mss_found) {
1512 ret = net_tcp_set_mss_opt(conn, pkt);
1513 if (ret < 0) {
1514 tcp_pkt_unref(pkt);
1515 goto out;
1516 }
1517 }
1518
1519 ret = tcp_finalize_pkt(pkt);
1520 if (ret < 0) {
1521 tcp_pkt_unref(pkt);
1522 goto out;
1523 }
1524
1525 NET_DBG("%s", tcp_th(pkt));
1526
1527 if (tcp_send_cb) {
1528 ret = tcp_send_cb(pkt);
1529 goto out;
1530 }
1531
1532 sys_slist_append(&conn->send_queue, &pkt->next);
1533
1534 if (is_destination_local(pkt)) {
1535 /* If the destination is local, we have to let the current
1536 * thread to finish with any state-machine changes before
1537 * sending the packet, or it might lead to state inconsistencies
1538 */
1539 k_work_schedule_for_queue(&tcp_work_q,
1540 &conn->send_timer, K_NO_WAIT);
1541 } else if (tcp_send_process_no_lock(conn)) {
1542 tcp_conn_close(conn, -ETIMEDOUT);
1543 }
1544 out:
1545 return ret;
1546 }
1547
tcp_out(struct tcp * conn,uint8_t flags)1548 static void tcp_out(struct tcp *conn, uint8_t flags)
1549 {
1550 (void)tcp_out_ext(conn, flags, NULL /* no data */, conn->seq);
1551 }
1552
tcp_pkt_pull(struct net_pkt * pkt,size_t len)1553 static int tcp_pkt_pull(struct net_pkt *pkt, size_t len)
1554 {
1555 int total = net_pkt_get_len(pkt);
1556 int ret = 0;
1557
1558 if (len > total) {
1559 ret = -EINVAL;
1560 goto out;
1561 }
1562
1563 net_pkt_cursor_init(pkt);
1564 net_pkt_set_overwrite(pkt, true);
1565 net_pkt_pull(pkt, len);
1566 net_pkt_trim_buffer(pkt);
1567 out:
1568 return ret;
1569 }
1570
tcp_pkt_peek(struct net_pkt * to,struct net_pkt * from,size_t pos,size_t len)1571 static int tcp_pkt_peek(struct net_pkt *to, struct net_pkt *from, size_t pos,
1572 size_t len)
1573 {
1574 net_pkt_cursor_init(to);
1575 net_pkt_cursor_init(from);
1576
1577 if (pos) {
1578 net_pkt_set_overwrite(from, true);
1579 net_pkt_skip(from, pos);
1580 }
1581
1582 return net_pkt_copy(to, from, len);
1583 }
1584
tcp_pkt_append(struct net_pkt * pkt,const uint8_t * data,size_t len)1585 static int tcp_pkt_append(struct net_pkt *pkt, const uint8_t *data, size_t len)
1586 {
1587 size_t alloc_len = len;
1588 struct net_buf *buf = NULL;
1589 int ret = 0;
1590
1591 if (pkt->buffer) {
1592 buf = net_buf_frag_last(pkt->buffer);
1593
1594 if (len > net_buf_tailroom(buf)) {
1595 alloc_len -= net_buf_tailroom(buf);
1596 } else {
1597 alloc_len = 0;
1598 }
1599 }
1600
1601 if (alloc_len > 0) {
1602 ret = net_pkt_alloc_buffer_raw(pkt, alloc_len,
1603 TCP_PKT_ALLOC_TIMEOUT);
1604 if (ret < 0) {
1605 return -ENOBUFS;
1606 }
1607 }
1608
1609 if (buf == NULL) {
1610 buf = pkt->buffer;
1611 }
1612
1613 while (buf != NULL && len > 0) {
1614 size_t write_len = MIN(len, net_buf_tailroom(buf));
1615
1616 net_buf_add_mem(buf, data, write_len);
1617
1618 data += write_len;
1619 len -= write_len;
1620 buf = buf->frags;
1621 }
1622
1623 NET_ASSERT(len == 0, "Not all bytes written");
1624
1625 return ret;
1626 }
1627
tcp_window_full(struct tcp * conn)1628 static bool tcp_window_full(struct tcp *conn)
1629 {
1630 bool window_full = (conn->send_data_total >= conn->send_win);
1631
1632 #ifdef CONFIG_NET_TCP_CONGESTION_AVOIDANCE
1633 window_full = window_full || (conn->send_data_total >= conn->ca.cwnd);
1634 #endif
1635
1636 NET_DBG("conn: %p window_full=%hu", conn, window_full);
1637
1638 return window_full;
1639 }
1640
tcp_unsent_len(struct tcp * conn)1641 static int tcp_unsent_len(struct tcp *conn)
1642 {
1643 int unsent_len;
1644
1645 if (conn->unacked_len > conn->send_data_total) {
1646 NET_ERR("total=%zu, unacked_len=%d",
1647 conn->send_data_total, conn->unacked_len);
1648 unsent_len = -ERANGE;
1649 goto out;
1650 }
1651
1652 unsent_len = conn->send_data_total - conn->unacked_len;
1653 if (conn->unacked_len >= conn->send_win) {
1654 unsent_len = 0;
1655 } else {
1656 unsent_len = MIN(unsent_len, conn->send_win - conn->unacked_len);
1657
1658 #ifdef CONFIG_NET_TCP_CONGESTION_AVOIDANCE
1659 if (conn->unacked_len >= conn->ca.cwnd) {
1660 unsent_len = 0;
1661 } else {
1662 unsent_len = MIN(unsent_len, conn->ca.cwnd - conn->unacked_len);
1663 }
1664 #endif
1665 }
1666 out:
1667 NET_DBG("unsent_len=%d", unsent_len);
1668
1669 return unsent_len;
1670 }
1671
tcp_send_data(struct tcp * conn)1672 static int tcp_send_data(struct tcp *conn)
1673 {
1674 int ret = 0;
1675 int len;
1676 struct net_pkt *pkt;
1677
1678 len = MIN(tcp_unsent_len(conn), conn_mss(conn));
1679 if (len < 0) {
1680 ret = len;
1681 goto out;
1682 }
1683 if (len == 0) {
1684 NET_DBG("conn: %p no data to send", conn);
1685 ret = -ENODATA;
1686 goto out;
1687 }
1688
1689 pkt = tcp_pkt_alloc(conn, len);
1690 if (!pkt) {
1691 NET_ERR("conn: %p packet allocation failed, len=%d", conn, len);
1692 ret = -ENOBUFS;
1693 goto out;
1694 }
1695
1696 ret = tcp_pkt_peek(pkt, conn->send_data, conn->unacked_len, len);
1697 if (ret < 0) {
1698 tcp_pkt_unref(pkt);
1699 ret = -ENOBUFS;
1700 goto out;
1701 }
1702
1703 ret = tcp_out_ext(conn, PSH | ACK, pkt, conn->seq + conn->unacked_len);
1704 if (ret == 0) {
1705 conn->unacked_len += len;
1706
1707 if (conn->data_mode == TCP_DATA_MODE_RESEND) {
1708 net_stats_update_tcp_resent(conn->iface, len);
1709 net_stats_update_tcp_seg_rexmit(conn->iface);
1710 } else {
1711 net_stats_update_tcp_sent(conn->iface, len);
1712 net_stats_update_tcp_seg_sent(conn->iface);
1713 }
1714 }
1715
1716 /* The data we want to send, has been moved to the send queue so we
1717 * can unref the head net_pkt. If there was an error, we need to remove
1718 * the packet anyway.
1719 */
1720 tcp_pkt_unref(pkt);
1721
1722 conn_send_data_dump(conn);
1723
1724 out:
1725 return ret;
1726 }
1727
1728 /* Send all queued but unsent data from the send_data packet by packet
1729 * until the receiver's window is full. */
tcp_send_queued_data(struct tcp * conn)1730 static int tcp_send_queued_data(struct tcp *conn)
1731 {
1732 int ret = 0;
1733 bool subscribe = false;
1734
1735 if (conn->data_mode == TCP_DATA_MODE_RESEND) {
1736 goto out;
1737 }
1738
1739 while (tcp_unsent_len(conn) > 0) {
1740 /* Implement Nagle's algorithm */
1741 if ((conn->tcp_nodelay == false) && (conn->unacked_len > 0)) {
1742 /* If there is already pending data */
1743 if (tcp_unsent_len(conn) < conn_mss(conn)) {
1744 /* The number of bytes to be transmitted is less than an MSS,
1745 * skip transmission for now.
1746 * Wait for more data to be transmitted or all pending data
1747 * being acknowledged.
1748 */
1749 break;
1750 }
1751 }
1752
1753 ret = tcp_send_data(conn);
1754 if (ret < 0) {
1755 break;
1756 }
1757 }
1758
1759 if (conn->send_data_total) {
1760 subscribe = true;
1761 }
1762
1763 if (k_work_delayable_remaining_get(&conn->send_data_timer)) {
1764 subscribe = false;
1765 }
1766
1767 if (subscribe) {
1768 conn->send_data_retries = 0;
1769 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_data_timer,
1770 K_MSEC(TCP_RTO_MS));
1771 }
1772 out:
1773 return ret;
1774 }
1775
tcp_cleanup_recv_queue(struct k_work * work)1776 static void tcp_cleanup_recv_queue(struct k_work *work)
1777 {
1778 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1779 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, recv_queue_timer);
1780
1781 k_mutex_lock(&conn->lock, K_FOREVER);
1782
1783 NET_DBG("Cleanup recv queue conn %p len %zd seq %u", conn,
1784 net_pkt_get_len(conn->queue_recv_data),
1785 tcp_get_seq(conn->queue_recv_data->buffer));
1786
1787 net_buf_unref(conn->queue_recv_data->buffer);
1788 conn->queue_recv_data->buffer = NULL;
1789
1790 k_mutex_unlock(&conn->lock);
1791 }
1792
tcp_resend_data(struct k_work * work)1793 static void tcp_resend_data(struct k_work *work)
1794 {
1795 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1796 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, send_data_timer);
1797 bool conn_unref = false;
1798 int ret;
1799 int exp_tcp_rto;
1800
1801 k_mutex_lock(&conn->lock, K_FOREVER);
1802
1803 NET_DBG("send_data_retries=%hu", conn->send_data_retries);
1804
1805 if (conn->send_data_retries >= tcp_retries) {
1806 NET_DBG("conn: %p close, data retransmissions exceeded", conn);
1807 conn_unref = true;
1808 goto out;
1809 }
1810
1811 if (IS_ENABLED(CONFIG_NET_TCP_CONGESTION_AVOIDANCE) &&
1812 (conn->send_data_retries == 0)) {
1813 tcp_ca_timeout(conn);
1814 if (tcp_window_full(conn)) {
1815 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT);
1816 }
1817 }
1818
1819 conn->data_mode = TCP_DATA_MODE_RESEND;
1820 conn->unacked_len = 0;
1821
1822 ret = tcp_send_data(conn);
1823 conn->send_data_retries++;
1824 if (ret == 0) {
1825 if (conn->in_close && conn->send_data_total == 0) {
1826 NET_DBG("TCP connection in %s close, "
1827 "not disposing yet (waiting %dms)",
1828 "active", tcp_fin_timeout_ms);
1829 k_work_reschedule_for_queue(&tcp_work_q,
1830 &conn->fin_timer,
1831 FIN_TIMEOUT);
1832
1833 conn_state(conn, TCP_FIN_WAIT_1);
1834
1835 ret = tcp_out_ext(conn, FIN | ACK, NULL,
1836 conn->seq + conn->unacked_len);
1837 if (ret == 0) {
1838 conn_seq(conn, + 1);
1839 }
1840
1841 keep_alive_timer_stop(conn);
1842
1843 goto out;
1844 }
1845 } else if (ret == -ENODATA) {
1846 conn->data_mode = TCP_DATA_MODE_SEND;
1847
1848 goto out;
1849 } else if (ret == -ENOBUFS) {
1850 NET_ERR("TCP failed to allocate buffer in retransmission");
1851 }
1852
1853 exp_tcp_rto = TCP_RTO_MS;
1854 /* The last retransmit does not need to wait that long */
1855 if (conn->send_data_retries < tcp_retries) {
1856 /* Every retransmit, the retransmission timeout increases by a factor 1.5 */
1857 for (int i = 0; i < conn->send_data_retries; i++) {
1858 exp_tcp_rto += exp_tcp_rto >> 1;
1859 }
1860 }
1861
1862 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_data_timer,
1863 K_MSEC(exp_tcp_rto));
1864
1865 out:
1866 k_mutex_unlock(&conn->lock);
1867
1868 if (conn_unref) {
1869 tcp_conn_close(conn, -ETIMEDOUT);
1870 }
1871 }
1872
tcp_timewait_timeout(struct k_work * work)1873 static void tcp_timewait_timeout(struct k_work *work)
1874 {
1875 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1876 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, timewait_timer);
1877
1878 /* no need to acquire the conn->lock as there is nothing scheduled here */
1879 NET_DBG("conn: %p %s", conn, tcp_conn_state(conn, NULL));
1880
1881 (void)tcp_conn_close(conn, -ETIMEDOUT);
1882 }
1883
tcp_establish_timeout(struct tcp * conn)1884 static void tcp_establish_timeout(struct tcp *conn)
1885 {
1886 NET_DBG("Did not receive %s in %dms", "ACK", ACK_TIMEOUT_MS);
1887 NET_DBG("conn: %p %s", conn, tcp_conn_state(conn, NULL));
1888
1889 (void)tcp_conn_close(conn, -ETIMEDOUT);
1890 }
1891
tcp_fin_timeout(struct k_work * work)1892 static void tcp_fin_timeout(struct k_work *work)
1893 {
1894 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1895 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, fin_timer);
1896
1897 /* no need to acquire the conn->lock as there is nothing scheduled here */
1898 if (conn->state == TCP_SYN_RECEIVED) {
1899 tcp_establish_timeout(conn);
1900 return;
1901 }
1902
1903 NET_DBG("Did not receive %s in %dms", "FIN", tcp_fin_timeout_ms);
1904 NET_DBG("conn: %p %s", conn, tcp_conn_state(conn, NULL));
1905
1906 (void)tcp_conn_close(conn, -ETIMEDOUT);
1907 }
1908
tcp_last_ack_timeout(struct k_work * work)1909 static void tcp_last_ack_timeout(struct k_work *work)
1910 {
1911 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1912 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, fin_timer);
1913
1914 NET_DBG("Did not receive %s in %dms", "last ACK", LAST_ACK_TIMEOUT_MS);
1915 NET_DBG("conn: %p %s", conn, tcp_conn_state(conn, NULL));
1916
1917 (void)tcp_conn_close(conn, -ETIMEDOUT);
1918 }
1919
tcp_setup_last_ack_timer(struct tcp * conn)1920 static void tcp_setup_last_ack_timer(struct tcp *conn)
1921 {
1922 /* Just in case the last ack is lost, install a timer that will
1923 * close the connection in that case. Use the fin_timer for that
1924 * as the fin handling cannot be done in this passive close state.
1925 * Instead of default tcp_fin_timeout() function, have a separate
1926 * function to catch this last ack case.
1927 */
1928 k_work_init_delayable(&conn->fin_timer, tcp_last_ack_timeout);
1929
1930 NET_DBG("TCP connection in %s close, "
1931 "not disposing yet (waiting %dms)",
1932 "passive", LAST_ACK_TIMEOUT_MS);
1933 k_work_reschedule_for_queue(&tcp_work_q,
1934 &conn->fin_timer,
1935 LAST_ACK_TIMEOUT);
1936 }
1937
tcp_cancel_last_ack_timer(struct tcp * conn)1938 static void tcp_cancel_last_ack_timer(struct tcp *conn)
1939 {
1940 k_work_cancel_delayable(&conn->fin_timer);
1941 }
1942
1943 #if defined(CONFIG_NET_TCP_KEEPALIVE)
tcp_send_keepalive_probe(struct k_work * work)1944 static void tcp_send_keepalive_probe(struct k_work *work)
1945 {
1946 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1947 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, keepalive_timer);
1948
1949 if (conn->state != TCP_ESTABLISHED) {
1950 NET_DBG("conn: %p TCP connection not established", conn);
1951 return;
1952 }
1953
1954 if (!conn->keep_alive) {
1955 NET_DBG("conn: %p keepalive is not enabled", conn);
1956 return;
1957 }
1958
1959 conn->keep_cur++;
1960 if (conn->keep_cur > conn->keep_cnt) {
1961 NET_DBG("conn: %p keepalive probe failed multiple times",
1962 conn);
1963 tcp_conn_close(conn, -ETIMEDOUT);
1964 return;
1965 }
1966
1967 NET_DBG("conn: %p keepalive probe", conn);
1968 k_work_reschedule_for_queue(&tcp_work_q, &conn->keepalive_timer,
1969 K_SECONDS(conn->keep_intvl));
1970
1971
1972 (void)tcp_out_ext(conn, ACK, NULL, conn->seq - 1);
1973 }
1974 #endif /* CONFIG_NET_TCP_KEEPALIVE */
1975
tcp_send_zwp(struct k_work * work)1976 static void tcp_send_zwp(struct k_work *work)
1977 {
1978 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1979 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, persist_timer);
1980
1981 k_mutex_lock(&conn->lock, K_FOREVER);
1982
1983 (void)tcp_out_ext(conn, ACK, NULL, conn->seq - 1);
1984
1985 tcp_derive_rto(conn);
1986
1987 if (conn->send_win == 0) {
1988 uint64_t timeout = TCP_RTO_MS;
1989
1990 /* Make sure the bitwise shift does not result in undefined behaviour */
1991 if (conn->zwp_retries < 63) {
1992 conn->zwp_retries++;
1993 }
1994
1995 timeout <<= conn->zwp_retries;
1996 if (timeout == 0 || timeout > ZWP_MAX_DELAY_MS) {
1997 timeout = ZWP_MAX_DELAY_MS;
1998 }
1999
2000 (void)k_work_reschedule_for_queue(
2001 &tcp_work_q, &conn->persist_timer, K_MSEC(timeout));
2002 }
2003
2004 k_mutex_unlock(&conn->lock);
2005 }
2006
tcp_send_ack(struct k_work * work)2007 static void tcp_send_ack(struct k_work *work)
2008 {
2009 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
2010 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, ack_timer);
2011
2012 k_mutex_lock(&conn->lock, K_FOREVER);
2013
2014 tcp_out(conn, ACK);
2015
2016 k_mutex_unlock(&conn->lock);
2017 }
2018
tcp_conn_ref(struct tcp * conn)2019 static void tcp_conn_ref(struct tcp *conn)
2020 {
2021 int ref_count = atomic_inc(&conn->ref_count) + 1;
2022
2023 NET_DBG("conn: %p, ref_count: %d", conn, ref_count);
2024 }
2025
tcp_conn_alloc(void)2026 static struct tcp *tcp_conn_alloc(void)
2027 {
2028 struct tcp *conn = NULL;
2029 int ret;
2030
2031 ret = k_mem_slab_alloc(&tcp_conns_slab, (void **)&conn, K_NO_WAIT);
2032 if (ret) {
2033 NET_ERR("Cannot allocate slab");
2034 goto out;
2035 }
2036
2037 memset(conn, 0, sizeof(*conn));
2038
2039 if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT) {
2040 conn->queue_recv_data = tcp_rx_pkt_alloc(conn, 0);
2041 if (conn->queue_recv_data == NULL) {
2042 NET_ERR("Cannot allocate %s queue for conn %p", "recv",
2043 conn);
2044 goto fail;
2045 }
2046 }
2047
2048 conn->send_data = tcp_pkt_alloc(conn, 0);
2049 if (conn->send_data == NULL) {
2050 NET_ERR("Cannot allocate %s queue for conn %p", "send", conn);
2051 goto fail;
2052 }
2053
2054 k_mutex_init(&conn->lock);
2055 k_fifo_init(&conn->recv_data);
2056 k_sem_init(&conn->connect_sem, 0, K_SEM_MAX_LIMIT);
2057 k_sem_init(&conn->tx_sem, 1, 1);
2058
2059 conn->in_connect = false;
2060 conn->state = TCP_LISTEN;
2061 conn->recv_win_max = tcp_rx_window;
2062 conn->recv_win = conn->recv_win_max;
2063 conn->send_win_max = MAX(tcp_tx_window, NET_IPV6_MTU);
2064 conn->send_win = conn->send_win_max;
2065 conn->tcp_nodelay = false;
2066 #ifdef CONFIG_NET_TCP_FAST_RETRANSMIT
2067 conn->dup_ack_cnt = 0;
2068 #endif
2069 #ifdef CONFIG_NET_TCP_CONGESTION_AVOIDANCE
2070 /* Initially set the congestion window at its max size, since only the MSS
2071 * is available as soon as the connection is established
2072 */
2073 conn->ca.cwnd = UINT16_MAX;
2074 #endif
2075
2076 /* The ISN value will be set when we get the connection attempt or
2077 * when trying to create a connection.
2078 */
2079 conn->seq = 0U;
2080
2081 sys_slist_init(&conn->send_queue);
2082
2083 k_work_init_delayable(&conn->send_timer, tcp_send_process);
2084 k_work_init_delayable(&conn->timewait_timer, tcp_timewait_timeout);
2085 k_work_init_delayable(&conn->fin_timer, tcp_fin_timeout);
2086 k_work_init_delayable(&conn->send_data_timer, tcp_resend_data);
2087 k_work_init_delayable(&conn->recv_queue_timer, tcp_cleanup_recv_queue);
2088 k_work_init_delayable(&conn->persist_timer, tcp_send_zwp);
2089 k_work_init_delayable(&conn->ack_timer, tcp_send_ack);
2090 k_work_init(&conn->conn_release, tcp_conn_release);
2091 keep_alive_timer_init(conn);
2092
2093 tcp_conn_ref(conn);
2094
2095 sys_slist_append(&tcp_conns, &conn->next);
2096 out:
2097 NET_DBG("conn: %p", conn);
2098
2099 return conn;
2100
2101 fail:
2102 if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT && conn->queue_recv_data) {
2103 tcp_pkt_unref(conn->queue_recv_data);
2104 conn->queue_recv_data = NULL;
2105 }
2106
2107 k_mem_slab_free(&tcp_conns_slab, (void *)conn);
2108 return NULL;
2109 }
2110
net_tcp_get(struct net_context * context)2111 int net_tcp_get(struct net_context *context)
2112 {
2113 int ret = 0;
2114 struct tcp *conn;
2115
2116 k_mutex_lock(&tcp_lock, K_FOREVER);
2117
2118 conn = tcp_conn_alloc();
2119 if (conn == NULL) {
2120 ret = -ENOMEM;
2121 goto out;
2122 }
2123
2124 /* Mutually link the net_context and tcp connection */
2125 conn->context = context;
2126 context->tcp = conn;
2127 out:
2128 k_mutex_unlock(&tcp_lock);
2129
2130 return ret;
2131 }
2132
tcp_endpoint_cmp(union tcp_endpoint * ep,struct net_pkt * pkt,enum pkt_addr which)2133 static bool tcp_endpoint_cmp(union tcp_endpoint *ep, struct net_pkt *pkt,
2134 enum pkt_addr which)
2135 {
2136 union tcp_endpoint ep_tmp;
2137
2138 if (tcp_endpoint_set(&ep_tmp, pkt, which) < 0) {
2139 return false;
2140 }
2141
2142 return !memcmp(ep, &ep_tmp, tcp_endpoint_len(ep->sa.sa_family));
2143 }
2144
tcp_conn_cmp(struct tcp * conn,struct net_pkt * pkt)2145 static bool tcp_conn_cmp(struct tcp *conn, struct net_pkt *pkt)
2146 {
2147 return tcp_endpoint_cmp(&conn->src, pkt, TCP_EP_DST) &&
2148 tcp_endpoint_cmp(&conn->dst, pkt, TCP_EP_SRC);
2149 }
2150
tcp_conn_search(struct net_pkt * pkt)2151 static struct tcp *tcp_conn_search(struct net_pkt *pkt)
2152 {
2153 bool found = false;
2154 struct tcp *conn;
2155 struct tcp *tmp;
2156
2157 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&tcp_conns, conn, tmp, next) {
2158 found = tcp_conn_cmp(conn, pkt);
2159 if (found) {
2160 break;
2161 }
2162 }
2163
2164 return found ? conn : NULL;
2165 }
2166
2167 static struct tcp *tcp_conn_new(struct net_pkt *pkt);
2168
tcp_recv(struct net_conn * net_conn,struct net_pkt * pkt,union net_ip_header * ip,union net_proto_header * proto,void * user_data)2169 static enum net_verdict tcp_recv(struct net_conn *net_conn,
2170 struct net_pkt *pkt,
2171 union net_ip_header *ip,
2172 union net_proto_header *proto,
2173 void *user_data)
2174 {
2175 struct tcp *conn;
2176 struct tcphdr *th;
2177 enum net_verdict verdict = NET_DROP;
2178
2179 ARG_UNUSED(net_conn);
2180 ARG_UNUSED(proto);
2181
2182 k_mutex_lock(&tcp_lock, K_FOREVER);
2183
2184 conn = tcp_conn_search(pkt);
2185 if (conn) {
2186 goto in;
2187 }
2188
2189 th = th_get(pkt);
2190
2191 if (th_flags(th) & SYN && !(th_flags(th) & ACK)) {
2192 struct tcp *conn_old = ((struct net_context *)user_data)->tcp;
2193
2194 conn = tcp_conn_new(pkt);
2195 if (!conn) {
2196 NET_ERR("Cannot allocate a new TCP connection");
2197 goto in;
2198 }
2199
2200 conn->accepted_conn = conn_old;
2201 }
2202 in:
2203 k_mutex_unlock(&tcp_lock);
2204
2205 if (conn) {
2206 verdict = tcp_in(conn, pkt);
2207 } else {
2208 net_tcp_reply_rst(pkt);
2209 }
2210
2211 return verdict;
2212 }
2213
seq_scale(uint32_t seq)2214 static uint32_t seq_scale(uint32_t seq)
2215 {
2216 return seq + (k_ticks_to_ns_floor32(k_uptime_ticks()) >> 6);
2217 }
2218
2219 static uint8_t unique_key[16]; /* MD5 128 bits as described in RFC6528 */
2220
tcpv6_init_isn(struct in6_addr * saddr,struct in6_addr * daddr,uint16_t sport,uint16_t dport)2221 static uint32_t tcpv6_init_isn(struct in6_addr *saddr,
2222 struct in6_addr *daddr,
2223 uint16_t sport,
2224 uint16_t dport)
2225 {
2226 struct {
2227 uint8_t key[sizeof(unique_key)];
2228 struct in6_addr saddr;
2229 struct in6_addr daddr;
2230 uint16_t sport;
2231 uint16_t dport;
2232 } buf = {
2233 .saddr = *(struct in6_addr *)saddr,
2234 .daddr = *(struct in6_addr *)daddr,
2235 .sport = sport,
2236 .dport = dport
2237 };
2238
2239 uint8_t hash[16];
2240 static bool once;
2241
2242 if (!once) {
2243 sys_rand_get(unique_key, sizeof(unique_key));
2244 once = true;
2245 }
2246
2247 memcpy(buf.key, unique_key, sizeof(buf.key));
2248
2249 #if defined(CONFIG_NET_TCP_ISN_RFC6528)
2250 mbedtls_md5((const unsigned char *)&buf, sizeof(buf), hash);
2251 #endif
2252
2253 return seq_scale(UNALIGNED_GET((uint32_t *)&hash[0]));
2254 }
2255
tcpv4_init_isn(struct in_addr * saddr,struct in_addr * daddr,uint16_t sport,uint16_t dport)2256 static uint32_t tcpv4_init_isn(struct in_addr *saddr,
2257 struct in_addr *daddr,
2258 uint16_t sport,
2259 uint16_t dport)
2260 {
2261 struct {
2262 uint8_t key[sizeof(unique_key)];
2263 struct in_addr saddr;
2264 struct in_addr daddr;
2265 uint16_t sport;
2266 uint16_t dport;
2267 } buf = {
2268 .saddr = *(struct in_addr *)saddr,
2269 .daddr = *(struct in_addr *)daddr,
2270 .sport = sport,
2271 .dport = dport
2272 };
2273
2274 uint8_t hash[16];
2275 static bool once;
2276
2277 if (!once) {
2278 sys_rand_get(unique_key, sizeof(unique_key));
2279 once = true;
2280 }
2281
2282 memcpy(buf.key, unique_key, sizeof(unique_key));
2283
2284 #if defined(CONFIG_NET_TCP_ISN_RFC6528)
2285 mbedtls_md5((const unsigned char *)&buf, sizeof(buf), hash);
2286 #endif
2287
2288 return seq_scale(UNALIGNED_GET((uint32_t *)&hash[0]));
2289 }
2290
tcp_init_isn(struct sockaddr * saddr,struct sockaddr * daddr)2291 static uint32_t tcp_init_isn(struct sockaddr *saddr, struct sockaddr *daddr)
2292 {
2293 if (IS_ENABLED(CONFIG_NET_TCP_ISN_RFC6528)) {
2294 if (IS_ENABLED(CONFIG_NET_IPV6) &&
2295 saddr->sa_family == AF_INET6) {
2296 return tcpv6_init_isn(&net_sin6(saddr)->sin6_addr,
2297 &net_sin6(daddr)->sin6_addr,
2298 net_sin6(saddr)->sin6_port,
2299 net_sin6(daddr)->sin6_port);
2300 } else if (IS_ENABLED(CONFIG_NET_IPV4) &&
2301 saddr->sa_family == AF_INET) {
2302 return tcpv4_init_isn(&net_sin(saddr)->sin_addr,
2303 &net_sin(daddr)->sin_addr,
2304 net_sin(saddr)->sin_port,
2305 net_sin(daddr)->sin_port);
2306 }
2307 }
2308
2309 return sys_rand32_get();
2310 }
2311
2312 /* Create a new tcp connection, as a part of it, create and register
2313 * net_context
2314 */
tcp_conn_new(struct net_pkt * pkt)2315 static struct tcp *tcp_conn_new(struct net_pkt *pkt)
2316 {
2317 struct tcp *conn = NULL;
2318 struct net_context *context = NULL;
2319 sa_family_t af = net_pkt_family(pkt);
2320 struct sockaddr local_addr = { 0 };
2321 int ret;
2322
2323 ret = net_context_get(af, SOCK_STREAM, IPPROTO_TCP, &context);
2324 if (ret < 0) {
2325 NET_ERR("net_context_get(): %d", ret);
2326 goto err;
2327 }
2328
2329 conn = context->tcp;
2330 conn->iface = pkt->iface;
2331 tcp_derive_rto(conn);
2332
2333 net_context_set_family(conn->context, net_pkt_family(pkt));
2334
2335 if (tcp_endpoint_set(&conn->dst, pkt, TCP_EP_SRC) < 0) {
2336 net_context_put(context);
2337 conn = NULL;
2338 goto err;
2339 }
2340
2341 if (tcp_endpoint_set(&conn->src, pkt, TCP_EP_DST) < 0) {
2342 net_context_put(context);
2343 conn = NULL;
2344 goto err;
2345 }
2346
2347 NET_DBG("conn: src: %s, dst: %s",
2348 net_sprint_addr(conn->src.sa.sa_family,
2349 (const void *)&conn->src.sin.sin_addr),
2350 net_sprint_addr(conn->dst.sa.sa_family,
2351 (const void *)&conn->dst.sin.sin_addr));
2352
2353 memcpy(&context->remote, &conn->dst, sizeof(context->remote));
2354 context->flags |= NET_CONTEXT_REMOTE_ADDR_SET;
2355
2356 net_sin_ptr(&context->local)->sin_family = af;
2357
2358 local_addr.sa_family = net_context_get_family(context);
2359
2360 if (IS_ENABLED(CONFIG_NET_IPV6) &&
2361 net_context_get_family(context) == AF_INET6) {
2362 net_ipaddr_copy(&net_sin6(&local_addr)->sin6_addr,
2363 &conn->src.sin6.sin6_addr);
2364 } else if (IS_ENABLED(CONFIG_NET_IPV4) &&
2365 net_context_get_family(context) == AF_INET) {
2366 net_ipaddr_copy(&net_sin(&local_addr)->sin_addr,
2367 &conn->src.sin.sin_addr);
2368 }
2369
2370 ret = net_context_bind(context, &local_addr, sizeof(local_addr));
2371 if (ret < 0) {
2372 NET_DBG("Cannot bind accepted context, connection reset");
2373 net_context_put(context);
2374 conn = NULL;
2375 goto err;
2376 }
2377
2378 /* The newly created context object for the new TCP client connection needs
2379 * all four parameters of the tuple (local address, local port, remote
2380 * address, remote port) to be properly identified. Remote address and port
2381 * are already copied above from conn->dst. The call to net_context_bind
2382 * with the prepared local_addr further copies the local address. However,
2383 * this call wont copy the local port, as the bind would then fail due to
2384 * an address/port reuse without the REUSEPORT option enables for both
2385 * connections. Therefore, we copy the port after the bind call.
2386 * It is safe to bind to this address/port combination, as the new TCP
2387 * client connection is separated from the local listening connection
2388 * by the specified remote address and remote port.
2389 */
2390 if (IS_ENABLED(CONFIG_NET_IPV6) &&
2391 net_context_get_family(context) == AF_INET6) {
2392 net_sin6_ptr(&context->local)->sin6_port = conn->src.sin6.sin6_port;
2393 } else if (IS_ENABLED(CONFIG_NET_IPV4) &&
2394 net_context_get_family(context) == AF_INET) {
2395 net_sin_ptr(&context->local)->sin_port = conn->src.sin.sin_port;
2396 }
2397
2398 if (!(IS_ENABLED(CONFIG_NET_TEST_PROTOCOL) ||
2399 IS_ENABLED(CONFIG_NET_TEST))) {
2400 conn->seq = tcp_init_isn(&local_addr, &context->remote);
2401 }
2402
2403 NET_DBG("context: local: %s, remote: %s",
2404 net_sprint_addr(local_addr.sa_family,
2405 (const void *)&net_sin(&local_addr)->sin_addr),
2406 net_sprint_addr(context->remote.sa_family,
2407 (const void *)&net_sin(&context->remote)->sin_addr));
2408
2409 ret = net_conn_register(IPPROTO_TCP, af,
2410 &context->remote, &local_addr,
2411 ntohs(conn->dst.sin.sin_port),/* local port */
2412 ntohs(conn->src.sin.sin_port),/* remote port */
2413 context, tcp_recv, context,
2414 &context->conn_handler);
2415 if (ret < 0) {
2416 NET_ERR("net_conn_register(): %d", ret);
2417 net_context_put(context);
2418 conn = NULL;
2419 goto err;
2420 }
2421 err:
2422 if (!conn) {
2423 net_stats_update_tcp_seg_conndrop(net_pkt_iface(pkt));
2424 }
2425
2426 return conn;
2427 }
2428
tcp_validate_seq(struct tcp * conn,struct tcphdr * hdr)2429 static bool tcp_validate_seq(struct tcp *conn, struct tcphdr *hdr)
2430 {
2431 return (net_tcp_seq_cmp(th_seq(hdr), conn->ack) >= 0) &&
2432 (net_tcp_seq_cmp(th_seq(hdr), conn->ack + conn->recv_win) < 0);
2433 }
2434
tcp_compute_new_length(struct tcp * conn,struct tcphdr * hdr,size_t len,bool fin_received)2435 static int32_t tcp_compute_new_length(struct tcp *conn, struct tcphdr *hdr, size_t len,
2436 bool fin_received)
2437 {
2438 int32_t new_len = 0;
2439
2440 if (len > 0) {
2441 /* Cases:
2442 * - Data already received earlier: new_len <= 0
2443 * - Partially new data new_len > 0
2444 * - Out of order data new_len > 0,
2445 * should be checked by sequence number
2446 */
2447 new_len = (int32_t)(len) - net_tcp_seq_cmp(conn->ack, th_seq(hdr));
2448 if (fin_received) {
2449 /* Add with one additional byte as the FIN flag has to be subtracted */
2450 new_len++;
2451 }
2452 }
2453 return new_len;
2454 }
2455
tcp_enter_time_wait(struct tcp * conn)2456 static enum tcp_state tcp_enter_time_wait(struct tcp *conn)
2457 {
2458 tcp_send_timer_cancel(conn);
2459 /* Entering TIME-WAIT, so cancel the timer and start the TIME-WAIT timer */
2460 k_work_cancel_delayable(&conn->fin_timer);
2461 k_work_reschedule_for_queue(
2462 &tcp_work_q, &conn->timewait_timer,
2463 K_MSEC(CONFIG_NET_TCP_TIME_WAIT_DELAY));
2464 return TCP_TIME_WAIT;
2465 }
2466
check_seq_list(struct net_buf * buf)2467 static bool check_seq_list(struct net_buf *buf)
2468 {
2469 struct net_buf *last = NULL;
2470 struct net_buf *tmp = buf;
2471 uint32_t seq;
2472 uint32_t next_seq = 0;
2473 bool result = true;
2474
2475 while (tmp) {
2476 seq = tcp_get_seq(tmp);
2477
2478 NET_DBG("buf %p seq %u len %d", tmp, seq, tmp->len);
2479
2480 if (last != NULL) {
2481 if (next_seq != seq) {
2482 result = false;
2483 }
2484 }
2485
2486 next_seq = seq + tmp->len;
2487 last = tmp;
2488 tmp = tmp->frags;
2489 }
2490 return result;
2491 }
2492
tcp_queue_recv_data(struct tcp * conn,struct net_pkt * pkt,size_t len,uint32_t seq)2493 static void tcp_queue_recv_data(struct tcp *conn, struct net_pkt *pkt,
2494 size_t len, uint32_t seq)
2495 {
2496 uint32_t seq_start = seq;
2497 bool inserted = false;
2498 struct net_buf *tmp;
2499
2500 NET_DBG("conn: %p len %zd seq %u ack %u", conn, len, seq, conn->ack);
2501
2502 tmp = pkt->buffer;
2503
2504 tcp_set_seq(tmp, seq);
2505 seq += tmp->len;
2506 tmp = tmp->frags;
2507
2508 while (tmp) {
2509 tcp_set_seq(tmp, seq);
2510 seq += tmp->len;
2511 tmp = tmp->frags;
2512 }
2513
2514 if (IS_ENABLED(CONFIG_NET_TCP_LOG_LEVEL_DBG)) {
2515 NET_DBG("Queuing data: conn %p", conn);
2516 }
2517
2518 if (!net_pkt_is_empty(conn->queue_recv_data)) {
2519 /* Place the data to correct place in the list. If the data
2520 * would not be sequential, then drop this packet.
2521 *
2522 * Only work with subtractions between sequence numbers in uint32_t format
2523 * to proper handle cases that are around the wrapping point.
2524 */
2525
2526 /* Some potentential cases:
2527 * Note: MI = MAX_INT
2528 * Packet | Queued| End off1 | Start off| End off2 | Required handling
2529 * Seq|Len|Seq|Len| | | |
2530 * 3 | 3 | 6 | 4 | 3+3-6= 0 | NA | NA | Prepend
2531 * 3 | 4 | 6 | 4 | 3+4-6 = 1 | NA | NA | Prepend, pull from buffer
2532 * 3 | 7 | 6 | 4 | 3+7-6 = 4 | 6-3=3 | 6+4-3=7 | Drop queued data
2533 * 3 | 8 | 6 | 4 | 3+8-6 = 5 | 6-3=3 | 6+4-3=7 | Drop queued data
2534 * 6 | 5 | 6 | 4 | 6+5-6 = 5 | 6-6=0 | 6+4-6=4 | Drop queued data
2535 * 6 | 4 | 6 | 4 | 6+4-6 = 4 | 6-6=0 | 6+4-6=4 | Drop queued data / packet
2536 * 7 | 2 | 6 | 4 | 7+2-6 = 3 | 6-7=MI | 6+4-7=3 | Drop packet
2537 * 10 | 2 | 6 | 4 | 10+2-6= 6 | 6-10=MI-3| 6+4-10=0 | Append
2538 * 7 | 4 | 6 | 4 | 7+4-6 = 5 | 6-7 =MI | 6+4-7 =3 | Append, pull from packet
2539 * 11 | 2 | 6 | 4 | 11+2-6= 7 | 6-11=MI-6| 6+4-11=MI-1 | Drop incoming packet
2540 * 2 | 3 | 6 | 4 | 2+3-6= MI | 6-2=4 | 6+4-2=8 | Drop incoming packet
2541 */
2542
2543 uint32_t pending_seq;
2544 uint32_t start_offset;
2545 uint32_t end_offset;
2546 size_t pending_len;
2547
2548 pending_seq = tcp_get_seq(conn->queue_recv_data->buffer);
2549 end_offset = seq - pending_seq;
2550 pending_len = net_pkt_get_len(conn->queue_recv_data);
2551 if (end_offset < pending_len) {
2552 if (end_offset < len) {
2553 if (end_offset) {
2554 net_pkt_remove_tail(pkt, end_offset);
2555 }
2556
2557 /* Put new data before the pending data */
2558 net_buf_frag_add(pkt->buffer,
2559 conn->queue_recv_data->buffer);
2560 NET_DBG("Adding at before queue, end_offset %i, pending_len %zu",
2561 end_offset, pending_len);
2562 conn->queue_recv_data->buffer = pkt->buffer;
2563 inserted = true;
2564 }
2565 } else {
2566 struct net_buf *last;
2567
2568 last = net_buf_frag_last(conn->queue_recv_data->buffer);
2569 pending_seq = tcp_get_seq(last);
2570
2571 start_offset = pending_seq - seq_start;
2572 /* Compute the offset w.r.t. the start point of the new packet */
2573 end_offset = (pending_seq + last->len) - seq_start;
2574
2575 /* Check if queue start with within the within the new packet */
2576 if ((start_offset < len) && (end_offset <= len)) {
2577 /* The queued data is irrelevant since the new packet overlaps the
2578 * new packet, take the new packet as contents
2579 */
2580 net_buf_unref(conn->queue_recv_data->buffer);
2581 conn->queue_recv_data->buffer = pkt->buffer;
2582 inserted = true;
2583 } else {
2584 if (end_offset < len) {
2585 if (end_offset) {
2586 net_pkt_remove_tail(conn->queue_recv_data,
2587 end_offset);
2588 }
2589
2590 /* Put new data after pending data */
2591 NET_DBG("Adding at end of queue, start %i, end %i, len %zu",
2592 start_offset, end_offset, len);
2593 net_buf_frag_add(conn->queue_recv_data->buffer,
2594 pkt->buffer);
2595 inserted = true;
2596 }
2597 }
2598 }
2599
2600 if (inserted) {
2601 NET_DBG("All pending data: conn %p", conn);
2602 if (check_seq_list(conn->queue_recv_data->buffer) == false) {
2603 NET_ERR("Incorrect order in out of order sequence for conn %p",
2604 conn);
2605 /* error in sequence list, drop it */
2606 net_buf_unref(conn->queue_recv_data->buffer);
2607 conn->queue_recv_data->buffer = NULL;
2608 }
2609 } else {
2610 NET_DBG("Cannot add new data to queue");
2611 }
2612 } else {
2613 net_pkt_append_buffer(conn->queue_recv_data, pkt->buffer);
2614 inserted = true;
2615 }
2616
2617 if (inserted) {
2618 /* We need to keep the received data but free the pkt */
2619 pkt->buffer = NULL;
2620
2621 if (!k_work_delayable_is_pending(&conn->recv_queue_timer)) {
2622 k_work_reschedule_for_queue(
2623 &tcp_work_q, &conn->recv_queue_timer,
2624 K_MSEC(CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT));
2625 }
2626 }
2627 }
2628
tcp_data_received(struct tcp * conn,struct net_pkt * pkt,size_t * len)2629 static enum net_verdict tcp_data_received(struct tcp *conn, struct net_pkt *pkt,
2630 size_t *len)
2631 {
2632 enum net_verdict ret;
2633
2634 if (*len == 0) {
2635 return NET_DROP;
2636 }
2637
2638 ret = tcp_data_get(conn, pkt, len);
2639
2640 net_stats_update_tcp_seg_recv(conn->iface);
2641 conn_ack(conn, *len);
2642
2643 /* Delay ACK response in case of small window or missing PSH,
2644 * as described in RFC 813.
2645 */
2646 if (tcp_short_window(conn)) {
2647 k_work_schedule_for_queue(&tcp_work_q, &conn->ack_timer,
2648 ACK_DELAY);
2649 } else {
2650 k_work_cancel_delayable(&conn->ack_timer);
2651 tcp_out(conn, ACK);
2652 }
2653
2654 return ret;
2655 }
2656
tcp_out_of_order_data(struct tcp * conn,struct net_pkt * pkt,size_t data_len,uint32_t seq)2657 static void tcp_out_of_order_data(struct tcp *conn, struct net_pkt *pkt,
2658 size_t data_len, uint32_t seq)
2659 {
2660 size_t headers_len;
2661
2662 if (data_len == 0) {
2663 return;
2664 }
2665
2666 headers_len = net_pkt_get_len(pkt) - data_len;
2667
2668 /* Get rid of protocol headers from the data */
2669 if (tcp_pkt_pull(pkt, headers_len) < 0) {
2670 return;
2671 }
2672
2673 /* We received out-of-order data. Try to queue it.
2674 */
2675 tcp_queue_recv_data(conn, pkt, data_len, seq);
2676 }
2677
tcp_check_sock_options(struct tcp * conn)2678 static void tcp_check_sock_options(struct tcp *conn)
2679 {
2680 int sndbuf_opt = 0;
2681 int rcvbuf_opt = 0;
2682
2683 if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDBUF)) {
2684 (void)net_context_get_option(conn->context, NET_OPT_SNDBUF,
2685 &sndbuf_opt, NULL);
2686 }
2687
2688 if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVBUF)) {
2689 (void)net_context_get_option(conn->context, NET_OPT_RCVBUF,
2690 &rcvbuf_opt, NULL);
2691 }
2692
2693 if (sndbuf_opt > 0 && sndbuf_opt != conn->send_win_max) {
2694 k_mutex_lock(&conn->lock, K_FOREVER);
2695
2696 conn->send_win_max = sndbuf_opt;
2697 if (conn->send_win > conn->send_win_max) {
2698 conn->send_win = conn->send_win_max;
2699 }
2700
2701 k_mutex_unlock(&conn->lock);
2702 }
2703
2704 if (rcvbuf_opt > 0 && rcvbuf_opt != conn->recv_win_max) {
2705 int diff;
2706
2707 k_mutex_lock(&conn->lock, K_FOREVER);
2708
2709 diff = rcvbuf_opt - conn->recv_win_max;
2710 conn->recv_win_max = rcvbuf_opt;
2711 tcp_update_recv_wnd(conn, diff);
2712
2713 k_mutex_unlock(&conn->lock);
2714 }
2715 }
2716
2717 /* TCP state machine, everything happens here */
tcp_in(struct tcp * conn,struct net_pkt * pkt)2718 static enum net_verdict tcp_in(struct tcp *conn, struct net_pkt *pkt)
2719 {
2720 struct tcphdr *th = pkt ? th_get(pkt) : NULL;
2721 uint8_t next = 0, fl = 0;
2722 bool do_close = false;
2723 bool connection_ok = false;
2724 size_t tcp_options_len = th ? (th_off(th) - 5) * 4 : 0;
2725 struct net_conn *conn_handler = NULL;
2726 struct net_pkt *recv_pkt;
2727 void *recv_user_data;
2728 struct k_fifo *recv_data_fifo;
2729 size_t len;
2730 int ret;
2731 int close_status = 0;
2732 enum net_verdict verdict = NET_DROP;
2733
2734 if (th) {
2735 /* Currently we ignore ECN and CWR flags */
2736 fl = th_flags(th) & ~(ECN | CWR);
2737 }
2738
2739 if (conn->state != TCP_SYN_SENT) {
2740 tcp_check_sock_options(conn);
2741 }
2742
2743 k_mutex_lock(&conn->lock, K_FOREVER);
2744
2745 /* Connection context was already freed. */
2746 if (conn->state == TCP_UNUSED) {
2747 k_mutex_unlock(&conn->lock);
2748 return NET_DROP;
2749 }
2750
2751 NET_DBG("%s", tcp_conn_state(conn, pkt));
2752
2753 if (th && th_off(th) < 5) {
2754 tcp_out(conn, RST);
2755 do_close = true;
2756 close_status = -ECONNRESET;
2757 goto out;
2758 }
2759
2760 if (FL(&fl, &, RST)) {
2761 /* We only accept RST packet that has valid seq field. */
2762 if (!tcp_validate_seq(conn, th)) {
2763 net_stats_update_tcp_seg_rsterr(net_pkt_iface(pkt));
2764 k_mutex_unlock(&conn->lock);
2765 return NET_DROP;
2766 }
2767
2768 /* Valid RST received. */
2769 verdict = NET_OK;
2770 net_stats_update_tcp_seg_rst(net_pkt_iface(pkt));
2771 do_close = true;
2772 close_status = -ECONNRESET;
2773
2774 /* If we receive RST and ACK for the sent SYN, it means
2775 * that there is no socket listening the port we are trying
2776 * to connect to. Set the errno properly in this case.
2777 */
2778 if (conn->in_connect) {
2779 fl = th_flags(th);
2780 if (FL(&fl, ==, RST | ACK)) {
2781 close_status = -ECONNREFUSED;
2782 }
2783 }
2784
2785 goto out;
2786 }
2787
2788 if (tcp_options_len && !tcp_options_check(&conn->recv_options, pkt,
2789 tcp_options_len)) {
2790 NET_DBG("DROP: Invalid TCP option list");
2791 tcp_out(conn, RST);
2792 do_close = true;
2793 close_status = -ECONNRESET;
2794 goto out;
2795 }
2796
2797 if (th && (conn->state != TCP_LISTEN) && (conn->state != TCP_SYN_SENT) &&
2798 tcp_validate_seq(conn, th) && FL(&fl, &, SYN)) {
2799 /* According to RFC 793, ch 3.9 Event Processing, receiving SYN
2800 * once the connection has been established is an error
2801 * condition, reset should be sent and connection closed.
2802 */
2803 NET_DBG("conn: %p, SYN received in %s state, dropping connection",
2804 conn, tcp_state_to_str(conn->state, false));
2805 net_stats_update_tcp_seg_drop(conn->iface);
2806 tcp_out(conn, RST);
2807 do_close = true;
2808 close_status = -ECONNRESET;
2809 goto out;
2810 }
2811
2812 if (th) {
2813 conn->send_win = ntohs(th_win(th));
2814 if (conn->send_win > conn->send_win_max) {
2815 NET_DBG("Lowering send window from %u to %u",
2816 conn->send_win, conn->send_win_max);
2817
2818 conn->send_win = conn->send_win_max;
2819 }
2820
2821 if (conn->send_win == 0) {
2822 if (!k_work_delayable_is_pending(&conn->persist_timer)) {
2823 conn->zwp_retries = 0;
2824 (void)k_work_reschedule_for_queue(
2825 &tcp_work_q, &conn->persist_timer,
2826 K_MSEC(TCP_RTO_MS));
2827 }
2828 } else {
2829 (void)k_work_cancel_delayable(&conn->persist_timer);
2830 }
2831
2832 if (tcp_window_full(conn)) {
2833 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT);
2834 } else {
2835 k_sem_give(&conn->tx_sem);
2836 }
2837 }
2838
2839 next_state:
2840 len = pkt ? tcp_data_len(pkt) : 0;
2841
2842 switch (conn->state) {
2843 case TCP_LISTEN:
2844 if (FL(&fl, ==, SYN)) {
2845 /* Make sure our MSS is also sent in the ACK */
2846 conn->send_options.mss_found = true;
2847 conn_ack(conn, th_seq(th) + 1); /* capture peer's isn */
2848 tcp_out(conn, SYN | ACK);
2849 conn->send_options.mss_found = false;
2850 conn_seq(conn, + 1);
2851 next = TCP_SYN_RECEIVED;
2852
2853 /* Close the connection if we do not receive ACK on time.
2854 */
2855 k_work_reschedule_for_queue(&tcp_work_q,
2856 &conn->establish_timer,
2857 ACK_TIMEOUT);
2858 verdict = NET_OK;
2859 } else {
2860 conn->send_options.mss_found = true;
2861 tcp_out(conn, SYN);
2862 conn->send_options.mss_found = false;
2863 conn_seq(conn, + 1);
2864 next = TCP_SYN_SENT;
2865 tcp_conn_ref(conn);
2866 }
2867 break;
2868 case TCP_SYN_RECEIVED:
2869 if (FL(&fl, &, ACK, th_ack(th) == conn->seq &&
2870 th_seq(th) == conn->ack)) {
2871 net_tcp_accept_cb_t accept_cb = NULL;
2872 struct net_context *context = NULL;
2873
2874 if (conn->accepted_conn != NULL) {
2875 accept_cb = conn->accepted_conn->accept_cb;
2876 context = conn->accepted_conn->context;
2877 keep_alive_param_copy(conn, conn->accepted_conn);
2878 }
2879
2880 k_work_cancel_delayable(&conn->establish_timer);
2881 tcp_send_timer_cancel(conn);
2882 tcp_conn_ref(conn);
2883 net_context_set_state(conn->context,
2884 NET_CONTEXT_CONNECTED);
2885
2886 /* Make sure the accept_cb is only called once. */
2887 conn->accepted_conn = NULL;
2888
2889 if (accept_cb == NULL) {
2890 /* In case of no accept_cb registered,
2891 * application will not take ownership of the
2892 * connection. To prevent connection leak, unref
2893 * the TCP context and put the connection into
2894 * active close (TCP_FIN_WAIT_1).
2895 */
2896 net_tcp_put(conn->context);
2897 break;
2898 }
2899
2900 keep_alive_timer_restart(conn);
2901
2902 net_ipaddr_copy(&conn->context->remote, &conn->dst.sa);
2903
2904 /* Check if v4-mapping-to-v6 needs to be done for
2905 * the accepted socket.
2906 */
2907 if (IS_ENABLED(CONFIG_NET_IPV4_MAPPING_TO_IPV6) &&
2908 net_context_get_family(conn->context) == AF_INET &&
2909 net_context_get_family(context) == AF_INET6 &&
2910 !net_context_is_v6only_set(context)) {
2911 struct in6_addr mapped;
2912
2913 net_ipv6_addr_create_v4_mapped(
2914 &net_sin(&conn->context->remote)->sin_addr,
2915 &mapped);
2916 net_ipaddr_copy(&net_sin6(&conn->context->remote)->sin6_addr,
2917 &mapped);
2918
2919 net_sin6(&conn->context->remote)->sin6_family = AF_INET6;
2920
2921 NET_DBG("Setting v4 mapped address %s",
2922 net_sprint_ipv6_addr(&mapped));
2923
2924 /* Note that we cannot set the local address to IPv6 one
2925 * as that is used to match the connection, and not just
2926 * for printing. The remote address is only used for
2927 * passing it to accept() and printing it by "net conn"
2928 * command.
2929 */
2930 }
2931
2932 accept_cb(conn->context, &conn->context->remote,
2933 net_context_get_family(context) == AF_INET6 ?
2934 sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in),
2935 0, context);
2936
2937 next = TCP_ESTABLISHED;
2938
2939 tcp_ca_init(conn);
2940
2941 if (len) {
2942 verdict = tcp_data_get(conn, pkt, &len);
2943 if (verdict == NET_OK) {
2944 /* net_pkt owned by the recv fifo now */
2945 pkt = NULL;
2946 }
2947
2948 conn_ack(conn, + len);
2949 tcp_out(conn, ACK);
2950 } else {
2951 verdict = NET_OK;
2952 }
2953
2954 /* ACK for SYN | ACK has been received. This signilizes that
2955 * the connection makes a "forward progress".
2956 */
2957 tcp_nbr_reachability_hint(conn);
2958 }
2959 break;
2960 case TCP_SYN_SENT:
2961 /* if we are in SYN SENT and receive only a SYN without an
2962 * ACK , shouldn't we go to SYN RECEIVED state? See Figure
2963 * 6 of RFC 793
2964 */
2965 if (FL(&fl, &, SYN | ACK, th && th_ack(th) == conn->seq)) {
2966 tcp_send_timer_cancel(conn);
2967 conn_ack(conn, th_seq(th) + 1);
2968 if (len) {
2969 verdict = tcp_data_get(conn, pkt, &len);
2970 if (verdict == NET_OK) {
2971 /* net_pkt owned by the recv fifo now */
2972 pkt = NULL;
2973 }
2974
2975 conn_ack(conn, + len);
2976 } else {
2977 verdict = NET_OK;
2978 }
2979
2980 next = TCP_ESTABLISHED;
2981 net_context_set_state(conn->context,
2982 NET_CONTEXT_CONNECTED);
2983 tcp_ca_init(conn);
2984 tcp_out(conn, ACK);
2985 keep_alive_timer_restart(conn);
2986
2987 /* The connection semaphore is released *after*
2988 * we have changed the connection state. This way
2989 * the application can send data and it is queued
2990 * properly even if this thread is running in lower
2991 * priority.
2992 */
2993 connection_ok = true;
2994
2995 /* ACK for SYN has been received. This signilizes that
2996 * the connection makes a "forward progress".
2997 */
2998 tcp_nbr_reachability_hint(conn);
2999 } else if (pkt) {
3000 net_tcp_reply_rst(pkt);
3001 }
3002
3003 break;
3004 case TCP_ESTABLISHED:
3005 /* full-close */
3006 if (th && FL(&fl, ==, (FIN | ACK), th_seq(th) == conn->ack)) {
3007 if (net_tcp_seq_cmp(th_ack(th), conn->seq) > 0) {
3008 uint32_t len_acked = th_ack(th) - conn->seq;
3009
3010 conn_seq(conn, + len_acked);
3011 }
3012
3013 conn_ack(conn, + 1);
3014 tcp_out(conn, FIN | ACK);
3015 next = TCP_LAST_ACK;
3016 verdict = NET_OK;
3017 keep_alive_timer_stop(conn);
3018 tcp_setup_last_ack_timer(conn);
3019 break;
3020 } else if (th && FL(&fl, ==, FIN, th_seq(th) == conn->ack)) {
3021 conn_ack(conn, + 1);
3022 tcp_out(conn, ACK);
3023 next = TCP_CLOSE_WAIT;
3024 verdict = NET_OK;
3025 keep_alive_timer_stop(conn);
3026 break;
3027 } else if (th && FL(&fl, ==, (FIN | ACK | PSH),
3028 th_seq(th) == conn->ack)) {
3029 if (len) {
3030 verdict = tcp_data_get(conn, pkt, &len);
3031 if (verdict == NET_OK) {
3032 /* net_pkt owned by the recv fifo now */
3033 pkt = NULL;
3034 }
3035 } else {
3036 verdict = NET_OK;
3037 }
3038
3039 conn_ack(conn, + len + 1);
3040 tcp_out(conn, FIN | ACK);
3041 next = TCP_LAST_ACK;
3042 keep_alive_timer_stop(conn);
3043 tcp_setup_last_ack_timer(conn);
3044 break;
3045 }
3046
3047 /* Whatever we've received, we know that peer is alive, so reset
3048 * the keepalive timer.
3049 */
3050 keep_alive_timer_restart(conn);
3051
3052 #ifdef CONFIG_NET_TCP_FAST_RETRANSMIT
3053 if (th && (net_tcp_seq_cmp(th_ack(th), conn->seq) == 0)) {
3054 /* Only if there is pending data, increment the duplicate ack count */
3055 if (conn->send_data_total > 0) {
3056 /* There could be also payload, only without payload account them */
3057 if (len == 0) {
3058 /* Increment the duplicate acc counter,
3059 * but maximize the value
3060 */
3061 conn->dup_ack_cnt = MIN(conn->dup_ack_cnt + 1,
3062 DUPLICATE_ACK_RETRANSMIT_TRHESHOLD + 1);
3063 tcp_ca_dup_ack(conn);
3064 }
3065 } else {
3066 conn->dup_ack_cnt = 0;
3067 }
3068
3069 /* Only do fast retransmit when not already in a resend state */
3070 if ((conn->data_mode == TCP_DATA_MODE_SEND) &&
3071 (conn->dup_ack_cnt == DUPLICATE_ACK_RETRANSMIT_TRHESHOLD)) {
3072 /* Apply a fast retransmit */
3073 int temp_unacked_len = conn->unacked_len;
3074
3075 conn->unacked_len = 0;
3076
3077 (void)tcp_send_data(conn);
3078
3079 /* Restore the current transmission */
3080 conn->unacked_len = temp_unacked_len;
3081
3082 tcp_ca_fast_retransmit(conn);
3083 if (tcp_window_full(conn)) {
3084 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT);
3085 }
3086 }
3087 }
3088 #endif
3089 NET_ASSERT((conn->send_data_total == 0) ||
3090 k_work_delayable_is_pending(&conn->send_data_timer),
3091 "conn: %p, Missing a subscription "
3092 "of the send_data queue timer", conn);
3093
3094 if (th && (net_tcp_seq_cmp(th_ack(th), conn->seq) > 0)) {
3095 uint32_t len_acked = th_ack(th) - conn->seq;
3096
3097 NET_DBG("conn: %p len_acked=%u", conn, len_acked);
3098
3099 if ((conn->send_data_total < len_acked) ||
3100 (tcp_pkt_pull(conn->send_data,
3101 len_acked) < 0)) {
3102 NET_ERR("conn: %p, Invalid len_acked=%u "
3103 "(total=%zu)", conn, len_acked,
3104 conn->send_data_total);
3105 net_stats_update_tcp_seg_drop(conn->iface);
3106 tcp_out(conn, RST);
3107 do_close = true;
3108 close_status = -ECONNRESET;
3109 break;
3110 }
3111
3112 #ifdef CONFIG_NET_TCP_FAST_RETRANSMIT
3113 /* New segment, reset duplicate ack counter */
3114 conn->dup_ack_cnt = 0;
3115 #endif
3116 tcp_ca_pkts_acked(conn, len_acked);
3117
3118 conn->send_data_total -= len_acked;
3119 if (conn->unacked_len < len_acked) {
3120 conn->unacked_len = 0;
3121 } else {
3122 conn->unacked_len -= len_acked;
3123 }
3124
3125 if (!tcp_window_full(conn)) {
3126 k_sem_give(&conn->tx_sem);
3127 }
3128
3129 conn_seq(conn, + len_acked);
3130 net_stats_update_tcp_seg_recv(conn->iface);
3131
3132 /* Receipt of an acknowledgment that covers a sequence number
3133 * not previously acknowledged indicates that the connection
3134 * makes a "forward progress".
3135 */
3136 tcp_nbr_reachability_hint(conn);
3137
3138 conn_send_data_dump(conn);
3139
3140 conn->send_data_retries = 0;
3141 if (conn->data_mode == TCP_DATA_MODE_RESEND) {
3142 conn->unacked_len = 0;
3143 tcp_derive_rto(conn);
3144 }
3145 conn->data_mode = TCP_DATA_MODE_SEND;
3146 if (conn->send_data_total > 0) {
3147 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_data_timer,
3148 K_MSEC(TCP_RTO_MS));
3149 }
3150
3151 /* We are closing the connection, send a FIN to peer */
3152 if (conn->in_close && conn->send_data_total == 0) {
3153 tcp_send_timer_cancel(conn);
3154 next = TCP_FIN_WAIT_1;
3155
3156 k_work_reschedule_for_queue(&tcp_work_q,
3157 &conn->fin_timer,
3158 FIN_TIMEOUT);
3159
3160 tcp_out(conn, FIN | ACK);
3161 conn_seq(conn, + 1);
3162 verdict = NET_OK;
3163 keep_alive_timer_stop(conn);
3164 break;
3165 }
3166
3167 ret = tcp_send_queued_data(conn);
3168 if (ret < 0 && ret != -ENOBUFS) {
3169 tcp_out(conn, RST);
3170 do_close = true;
3171 close_status = ret;
3172 verdict = NET_OK;
3173 break;
3174 }
3175
3176 if (tcp_window_full(conn)) {
3177 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT);
3178 }
3179 }
3180
3181 if (th) {
3182 if (th_seq(th) == conn->ack) {
3183 if (len > 0) {
3184 verdict = tcp_data_received(conn, pkt, &len);
3185 if (verdict == NET_OK) {
3186 /* net_pkt owned by the recv fifo now */
3187 pkt = NULL;
3188 }
3189 } else {
3190 /* ACK, no data */
3191 verdict = NET_OK;
3192 }
3193 } else if (net_tcp_seq_greater(conn->ack, th_seq(th))) {
3194 /* This should handle the acknowledgements of keep alive
3195 * packets and retransmitted data.
3196 * RISK:
3197 * There is a tiny risk of creating a ACK loop this way when
3198 * both ends of the connection are out of order due to packet
3199 * loss is a simulatanious bidirectional data flow.
3200 */
3201 tcp_out(conn, ACK); /* peer has resent */
3202
3203 net_stats_update_tcp_seg_ackerr(conn->iface);
3204 verdict = NET_OK;
3205 } else if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT) {
3206 tcp_out_of_order_data(conn, pkt, len,
3207 th_seq(th));
3208 /* Send out a duplicated ACK */
3209 if ((len > 0) || FL(&fl, &, FIN)) {
3210 tcp_out(conn, ACK);
3211 }
3212
3213 verdict = NET_OK;
3214 }
3215 }
3216
3217 /* Check if there is any data left to retransmit possibly*/
3218 if (conn->send_data_total == 0) {
3219 conn->send_data_retries = 0;
3220 k_work_cancel_delayable(&conn->send_data_timer);
3221 }
3222
3223 /* A lot could have happened to the transmission window check the situation here */
3224 if (tcp_window_full(conn)) {
3225 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT);
3226 } else {
3227 k_sem_give(&conn->tx_sem);
3228 }
3229
3230 break;
3231 case TCP_CLOSE_WAIT:
3232 tcp_out(conn, FIN);
3233 next = TCP_LAST_ACK;
3234 tcp_setup_last_ack_timer(conn);
3235 break;
3236 case TCP_LAST_ACK:
3237 if (th && FL(&fl, ==, ACK, th_seq(th) == conn->ack)) {
3238 tcp_send_timer_cancel(conn);
3239 do_close = true;
3240 verdict = NET_OK;
3241 close_status = 0;
3242
3243 /* Remove the last ack timer if we received it in time */
3244 tcp_cancel_last_ack_timer(conn);
3245 }
3246 break;
3247 case TCP_CLOSED:
3248 break;
3249 case TCP_FIN_WAIT_1:
3250 /*
3251 * FIN1:
3252 * Acknowledge path and sequence path are independent, treat them that way
3253 * The table of incoming messages and their destination states:
3254 * - & - -> TCP_FIN_WAIT_1
3255 * FIN & - -> TCP_CLOSING
3256 * - & ACK -> TCP_FIN_WAIT_2
3257 * FIN & ACK -> TCP_TIME_WAIT
3258 */
3259 if (th) {
3260 bool fin_acked = false;
3261
3262 if (tcp_compute_new_length(conn, th, len, false) > 0) {
3263 /* We do not implement half closed sockets, therefore
3264 * cannot accept new data in after sending our FIN, as
3265 * we are in sequence can send a reset now.
3266 */
3267 net_stats_update_tcp_seg_drop(conn->iface);
3268
3269 next = tcp_enter_time_wait(conn);
3270
3271 tcp_out(conn, RST);
3272 break;
3273 }
3274 if (FL(&fl, &, ACK, th_ack(th) == conn->seq)) {
3275 NET_DBG("conn %p: FIN acknowledged, going to FIN_WAIT_2 "
3276 "state seq %u, ack %u"
3277 , conn, conn->seq, conn->ack);
3278 tcp_send_timer_cancel(conn);
3279 fin_acked = true;
3280 next = TCP_FIN_WAIT_2;
3281 verdict = NET_OK;
3282 }
3283
3284 /*
3285 * There can also be data in the message, so compute with the length
3286 * of the packet to check the sequence number of the FIN flag with the ACK
3287 */
3288 if (FL(&fl, &, FIN, net_tcp_seq_cmp(th_seq(th) + len, conn->ack) == 0)) {
3289 conn_ack(conn, + 1);
3290
3291 /* State path is dependent on if the acknowledge is in */
3292 if (fin_acked) {
3293 /* Already acknowledged, we can go further */
3294 NET_DBG("conn %p: FIN received, going to TIME WAIT", conn);
3295
3296 next = tcp_enter_time_wait(conn);
3297
3298 tcp_out(conn, ACK);
3299 } else {
3300 /* Fin not yet acknowledged, waiting for the ack in CLOSING
3301 */
3302 NET_DBG("conn %p: FIN received, going to CLOSING as no "
3303 "ACK has been received", conn);
3304 tcp_send_timer_cancel(conn);
3305 tcp_out_ext(conn, FIN | ACK, NULL, conn->seq - 1);
3306 next = TCP_CLOSING;
3307 }
3308 verdict = NET_OK;
3309 } else {
3310 if (len > 0) {
3311 if (fin_acked) {
3312 /* Send out a duplicate ACK */
3313 tcp_send_timer_cancel(conn);
3314 tcp_out(conn, ACK);
3315 } else {
3316 /* In FIN1 state
3317 * Send out a duplicate ACK, with the pending FIN
3318 * flag
3319 */
3320 tcp_send_timer_cancel(conn);
3321 tcp_out_ext(conn, FIN | ACK, NULL, conn->seq - 1);
3322 }
3323 verdict = NET_OK;
3324 }
3325 }
3326 }
3327 break;
3328 case TCP_FIN_WAIT_2:
3329 /*
3330 * FIN2:
3331 * Only FIN is relevant in this state, as our FIN was already acknowledged
3332 * - -> TCP_FIN_WAIT_2
3333 * FIN -> TCP_TIME_WAIT
3334 */
3335 if (th) {
3336 /* No tcp_send_timer_cancel call required here, as is has been called
3337 * before entering this state, only allowed through the
3338 * tcp_enter_time_wait function.
3339 */
3340
3341 /* Compute if there is new data after our close */
3342 if (tcp_compute_new_length(conn, th, len, false) > 0) {
3343 /* We do not implement half closed sockets, therefore
3344 * cannot accept new data in after sending our FIN, as
3345 * we are in sequence can send a reset now.
3346 */
3347 net_stats_update_tcp_seg_drop(conn->iface);
3348
3349 next = tcp_enter_time_wait(conn);
3350
3351 tcp_out(conn, RST);
3352 break;
3353 }
3354 /*
3355 * There can also be data in the message, so compute with the length
3356 * of the packet to check the sequence number of the FIN flag with the ACK
3357 */
3358 if (FL(&fl, &, FIN, net_tcp_seq_cmp(th_seq(th) + len, conn->ack) == 0)) {
3359 conn_ack(conn, + 1);
3360 NET_DBG("conn %p: FIN received, going to TIME WAIT", conn);
3361
3362 next = tcp_enter_time_wait(conn);
3363
3364 verdict = NET_OK;
3365 tcp_out(conn, ACK);
3366 } else {
3367 if (len > 0) {
3368 /* Send out a duplicate ACK */
3369 tcp_out(conn, ACK);
3370 verdict = NET_OK;
3371 }
3372 }
3373 }
3374 break;
3375 case TCP_CLOSING:
3376 if (th) {
3377 bool fin_acked = false;
3378
3379 /*
3380 * Closing:
3381 * Our FIN has to be acknowledged
3382 * - -> TCP_CLOSING
3383 * ACK -> TCP_TIME_WAIT
3384 */
3385 int32_t new_len = tcp_compute_new_length(conn, th, len, true);
3386
3387 if (new_len > 0) {
3388 /* This should not happen here, as no data can be send after
3389 * the FIN flag has been send.
3390 */
3391 NET_ERR("conn: %p, new bytes %u during CLOSING state "
3392 "sending reset", conn, new_len);
3393 net_stats_update_tcp_seg_drop(conn->iface);
3394
3395 next = tcp_enter_time_wait(conn);
3396
3397 tcp_out(conn, RST);
3398 break;
3399 }
3400
3401 if (FL(&fl, &, ACK, th_ack(th) == conn->seq)) {
3402 NET_DBG("conn %p: FIN acknowledged, going to TIME WAIT "
3403 "state seq %u, ack %u"
3404 , conn, conn->seq, conn->ack);
3405
3406 next = tcp_enter_time_wait(conn);
3407 fin_acked = true;
3408
3409 verdict = NET_OK;
3410 }
3411
3412 /*
3413 * There can also be data in the message, so compute with the length
3414 * of the packet to check with the ack
3415 * Since the conn->ack was already incremented in TCP_FIN_WAIT_1
3416 * add 1 in the comparison sequence
3417 */
3418 if ((FL(&fl, &, FIN,
3419 net_tcp_seq_cmp(th_seq(th) + len + 1, conn->ack) == 0)) ||
3420 (len > 0)) {
3421 tcp_send_timer_cancel(conn);
3422 if (fin_acked) {
3423 /* Send out a duplicate ACK */
3424 tcp_out(conn, ACK);
3425 } else {
3426 /* Send out a duplicate ACK, with the pending FIN
3427 * flag
3428 */
3429 tcp_out_ext(conn, FIN | ACK, NULL, conn->seq - 1);
3430 }
3431 verdict = NET_OK;
3432 }
3433 }
3434 break;
3435 case TCP_TIME_WAIT:
3436 if (th) {
3437 int32_t new_len = tcp_compute_new_length(conn, th, len, true);
3438
3439 /* No tcp_send_timer_cancel call required here, as is has been called
3440 * before entering this state, only allowed through the
3441 * tcp_enter_time_wait function.
3442 */
3443
3444 if (new_len > 0) {
3445 /* This should not happen here, as no data can be send after
3446 * the FIN flag has been send.
3447 */
3448 NET_ERR("conn: %p, new bytes %u during TIME-WAIT state "
3449 "sending reset", conn, new_len);
3450 net_stats_update_tcp_seg_drop(conn->iface);
3451
3452 tcp_out(conn, RST);
3453 } else {
3454 /* Acknowledge any FIN attempts, in case retransmission took
3455 * place.
3456 */
3457 if ((FL(&fl, &, FIN,
3458 net_tcp_seq_cmp(th_seq(th) + 1, conn->ack) == 0)) ||
3459 (len > 0)) {
3460 tcp_out(conn, ACK);
3461 verdict = NET_OK;
3462 }
3463 }
3464 }
3465 break;
3466 default:
3467 NET_ASSERT(false, "%s is unimplemented",
3468 tcp_state_to_str(conn->state, true));
3469 }
3470
3471 out:
3472 if (pkt) {
3473 if (verdict == NET_OK) {
3474 net_pkt_unref(pkt);
3475 }
3476
3477 pkt = NULL;
3478 }
3479
3480 if (next) {
3481 th = NULL;
3482 conn_state(conn, next);
3483 next = 0;
3484
3485 if (connection_ok) {
3486 conn->in_connect = false;
3487 if (conn->connect_cb) {
3488 conn->connect_cb(conn->context, 0, conn->context->user_data);
3489
3490 /* Make sure the connect_cb is only called once. */
3491 conn->connect_cb = NULL;
3492 }
3493
3494 k_sem_give(&conn->connect_sem);
3495 }
3496
3497 goto next_state;
3498 }
3499
3500 if (conn->context) {
3501 /* If the conn->context is not set, then the connection was
3502 * already closed.
3503 */
3504 conn_handler = (struct net_conn *)conn->context->conn_handler;
3505 }
3506
3507 recv_user_data = conn->recv_user_data;
3508 recv_data_fifo = &conn->recv_data;
3509
3510 k_mutex_unlock(&conn->lock);
3511
3512 /* Pass all the received data stored in recv fifo to the application.
3513 * This is done like this so that we do not have any connection lock
3514 * held.
3515 */
3516 while (conn_handler && atomic_get(&conn->ref_count) > 0 &&
3517 (recv_pkt = k_fifo_get(recv_data_fifo, K_NO_WAIT)) != NULL) {
3518 if (net_context_packet_received(conn_handler, recv_pkt, NULL,
3519 NULL, recv_user_data) ==
3520 NET_DROP) {
3521 /* Application is no longer there, unref the pkt */
3522 tcp_pkt_unref(recv_pkt);
3523 }
3524 }
3525
3526 /* Make sure we close the connection only once by checking connection
3527 * state.
3528 */
3529 if (do_close && conn->state != TCP_UNUSED && conn->state != TCP_CLOSED) {
3530 tcp_conn_close(conn, close_status);
3531 }
3532
3533 return verdict;
3534 }
3535
3536 /* Active connection close: send FIN and go to FIN_WAIT_1 state */
net_tcp_put(struct net_context * context)3537 int net_tcp_put(struct net_context *context)
3538 {
3539 struct tcp *conn = context->tcp;
3540
3541 if (!conn) {
3542 return -ENOENT;
3543 }
3544
3545 k_mutex_lock(&conn->lock, K_FOREVER);
3546
3547 NET_DBG("%s", conn ? tcp_conn_state(conn, NULL) : "");
3548 NET_DBG("context %p %s", context,
3549 ({ const char *state = net_context_state(context);
3550 state ? state : "<unknown>"; }));
3551
3552 if (conn && (conn->state == TCP_ESTABLISHED ||
3553 conn->state == TCP_SYN_RECEIVED)) {
3554 /* Send all remaining data if possible. */
3555 if (conn->send_data_total > 0) {
3556 NET_DBG("conn %p pending %zu bytes", conn,
3557 conn->send_data_total);
3558 conn->in_close = true;
3559
3560 /* How long to wait until all the data has been sent?
3561 */
3562 k_work_reschedule_for_queue(&tcp_work_q,
3563 &conn->send_data_timer,
3564 K_MSEC(TCP_RTO_MS));
3565 } else {
3566 int ret;
3567
3568 NET_DBG("TCP connection in %s close, "
3569 "not disposing yet (waiting %dms)",
3570 "active", tcp_fin_timeout_ms);
3571 k_work_reschedule_for_queue(&tcp_work_q,
3572 &conn->fin_timer,
3573 FIN_TIMEOUT);
3574
3575 ret = tcp_out_ext(conn, FIN | ACK, NULL,
3576 conn->seq + conn->unacked_len);
3577 if (ret == 0) {
3578 conn_seq(conn, + 1);
3579 }
3580
3581 conn_state(conn, TCP_FIN_WAIT_1);
3582
3583 keep_alive_timer_stop(conn);
3584 }
3585 } else if (conn && conn->in_connect) {
3586 conn->in_connect = false;
3587 }
3588
3589 k_mutex_unlock(&conn->lock);
3590
3591 tcp_conn_unref(conn);
3592
3593 return 0;
3594 }
3595
net_tcp_listen(struct net_context * context)3596 int net_tcp_listen(struct net_context *context)
3597 {
3598 /* when created, tcp connections are in state TCP_LISTEN */
3599 net_context_set_state(context, NET_CONTEXT_LISTENING);
3600
3601 return 0;
3602 }
3603
net_tcp_update_recv_wnd(struct net_context * context,int32_t delta)3604 int net_tcp_update_recv_wnd(struct net_context *context, int32_t delta)
3605 {
3606 struct tcp *conn = context->tcp;
3607 int ret;
3608
3609 if (!conn) {
3610 NET_ERR("context->tcp == NULL");
3611 return -EPROTOTYPE;
3612 }
3613
3614 k_mutex_lock(&conn->lock, K_FOREVER);
3615
3616 ret = tcp_update_recv_wnd((struct tcp *)context->tcp, delta);
3617
3618 k_mutex_unlock(&conn->lock);
3619
3620 return ret;
3621 }
3622
net_tcp_queue(struct net_context * context,const void * data,size_t len,const struct msghdr * msg)3623 int net_tcp_queue(struct net_context *context, const void *data, size_t len,
3624 const struct msghdr *msg)
3625 {
3626 struct tcp *conn = context->tcp;
3627 size_t queued_len = 0;
3628 int ret = 0;
3629
3630 if (!conn || conn->state != TCP_ESTABLISHED) {
3631 return -ENOTCONN;
3632 }
3633
3634 k_mutex_lock(&conn->lock, K_FOREVER);
3635
3636 /* If there is no space to transmit, try at a later time.
3637 * The ZWP will make sure the window becomes available at
3638 * some point in time.
3639 */
3640 if (tcp_window_full(conn)) {
3641 ret = -EAGAIN;
3642 goto out;
3643 }
3644
3645 if (msg) {
3646 len = 0;
3647
3648 for (int i = 0; i < msg->msg_iovlen; i++) {
3649 len += msg->msg_iov[i].iov_len;
3650 }
3651 }
3652
3653 /* Queue no more than TX window permits. It's guaranteed at this point
3654 * that conn->send_data_total is less than conn->send_win, as it was
3655 * verified in tcp_window_full() check above. As the connection mutex
3656 * is held, their values shall not change since.
3657 */
3658 len = MIN(conn->send_win - conn->send_data_total, len);
3659
3660 if (msg) {
3661 for (int i = 0; i < msg->msg_iovlen; i++) {
3662 int iovlen = MIN(msg->msg_iov[i].iov_len, len);
3663
3664 ret = tcp_pkt_append(conn->send_data,
3665 msg->msg_iov[i].iov_base,
3666 iovlen);
3667 if (ret < 0) {
3668 if (queued_len == 0) {
3669 goto out;
3670 } else {
3671 break;
3672 }
3673 }
3674
3675 queued_len += iovlen;
3676 len -= iovlen;
3677
3678 if (len == 0) {
3679 break;
3680 }
3681 }
3682 } else {
3683 ret = tcp_pkt_append(conn->send_data, data, len);
3684 if (ret < 0) {
3685 goto out;
3686 }
3687
3688 queued_len = len;
3689 }
3690
3691 conn->send_data_total += queued_len;
3692
3693 /* Successfully queued data for transmission. Even if there's a transmit
3694 * failure now (out-of-buf case), it can be ignored for now, retransmit
3695 * timer will take care of queued data retransmission.
3696 */
3697 ret = tcp_send_queued_data(conn);
3698 if (ret < 0 && ret != -ENOBUFS) {
3699 tcp_conn_close(conn, ret);
3700 goto out;
3701 }
3702
3703 if (tcp_window_full(conn)) {
3704 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT);
3705 }
3706
3707 ret = queued_len;
3708 out:
3709 k_mutex_unlock(&conn->lock);
3710
3711 return ret;
3712 }
3713
3714 /* net context is about to send out queued data - inform caller only */
net_tcp_send_data(struct net_context * context,net_context_send_cb_t cb,void * user_data)3715 int net_tcp_send_data(struct net_context *context, net_context_send_cb_t cb,
3716 void *user_data)
3717 {
3718 if (cb) {
3719 cb(context, 0, user_data);
3720 }
3721
3722 return 0;
3723 }
3724
3725 /* When connect() is called on a TCP socket, register the socket for incoming
3726 * traffic with net context and give the TCP packet receiving function, which
3727 * in turn will call tcp_in() to deliver the TCP packet to the stack
3728 */
net_tcp_connect(struct net_context * context,const struct sockaddr * remote_addr,struct sockaddr * local_addr,uint16_t remote_port,uint16_t local_port,k_timeout_t timeout,net_context_connect_cb_t cb,void * user_data)3729 int net_tcp_connect(struct net_context *context,
3730 const struct sockaddr *remote_addr,
3731 struct sockaddr *local_addr,
3732 uint16_t remote_port, uint16_t local_port,
3733 k_timeout_t timeout, net_context_connect_cb_t cb,
3734 void *user_data)
3735 {
3736 struct tcp *conn;
3737 int ret = 0;
3738
3739 NET_DBG("context: %p, local: %s, remote: %s", context,
3740 net_sprint_addr(local_addr->sa_family,
3741 (const void *)&net_sin(local_addr)->sin_addr),
3742 net_sprint_addr(remote_addr->sa_family,
3743 (const void *)&net_sin(remote_addr)->sin_addr));
3744
3745 conn = context->tcp;
3746 conn->iface = net_context_get_iface(context);
3747 tcp_derive_rto(conn);
3748
3749 switch (net_context_get_family(context)) {
3750 const struct in_addr *ip4;
3751 const struct in6_addr *ip6;
3752
3753 case AF_INET:
3754 if (!IS_ENABLED(CONFIG_NET_IPV4)) {
3755 ret = -EINVAL;
3756 goto out;
3757 }
3758
3759 memset(&conn->src, 0, sizeof(struct sockaddr_in));
3760 memset(&conn->dst, 0, sizeof(struct sockaddr_in));
3761
3762 conn->src.sa.sa_family = AF_INET;
3763 conn->dst.sa.sa_family = AF_INET;
3764
3765 conn->dst.sin.sin_port = remote_port;
3766 conn->src.sin.sin_port = local_port;
3767
3768 /* we have to select the source address here as
3769 * net_context_create_ipv4_new() is not called in the packet
3770 * output chain
3771 */
3772 if (net_ipv4_is_addr_unspecified(
3773 &net_sin(local_addr)->sin_addr)) {
3774 ip4 = net_if_ipv4_select_src_addr(
3775 net_context_get_iface(context),
3776 &net_sin(remote_addr)->sin_addr);
3777 net_ipaddr_copy(&conn->src.sin.sin_addr, ip4);
3778 } else {
3779 net_ipaddr_copy(&conn->src.sin.sin_addr,
3780 &net_sin(local_addr)->sin_addr);
3781 }
3782 net_ipaddr_copy(&conn->dst.sin.sin_addr,
3783 &net_sin(remote_addr)->sin_addr);
3784 break;
3785
3786 case AF_INET6:
3787 if (!IS_ENABLED(CONFIG_NET_IPV6)) {
3788 ret = -EINVAL;
3789 goto out;
3790 }
3791
3792 memset(&conn->src, 0, sizeof(struct sockaddr_in6));
3793 memset(&conn->dst, 0, sizeof(struct sockaddr_in6));
3794
3795 conn->src.sin6.sin6_family = AF_INET6;
3796 conn->dst.sin6.sin6_family = AF_INET6;
3797
3798 conn->dst.sin6.sin6_port = remote_port;
3799 conn->src.sin6.sin6_port = local_port;
3800
3801 if (net_ipv6_is_addr_unspecified(
3802 &net_sin6(local_addr)->sin6_addr)) {
3803 ip6 = net_if_ipv6_select_src_addr(
3804 net_context_get_iface(context),
3805 &net_sin6(remote_addr)->sin6_addr);
3806 net_ipaddr_copy(&conn->src.sin6.sin6_addr, ip6);
3807 } else {
3808 net_ipaddr_copy(&conn->src.sin6.sin6_addr,
3809 &net_sin6(local_addr)->sin6_addr);
3810 }
3811 net_ipaddr_copy(&conn->dst.sin6.sin6_addr,
3812 &net_sin6(remote_addr)->sin6_addr);
3813 break;
3814
3815 default:
3816 ret = -EPROTONOSUPPORT;
3817 }
3818
3819 if (!(IS_ENABLED(CONFIG_NET_TEST_PROTOCOL) ||
3820 IS_ENABLED(CONFIG_NET_TEST))) {
3821 conn->seq = tcp_init_isn(&conn->src.sa, &conn->dst.sa);
3822 }
3823
3824 NET_DBG("conn: %p src: %s, dst: %s", conn,
3825 net_sprint_addr(conn->src.sa.sa_family,
3826 (const void *)&conn->src.sin.sin_addr),
3827 net_sprint_addr(conn->dst.sa.sa_family,
3828 (const void *)&conn->dst.sin.sin_addr));
3829
3830 net_context_set_state(context, NET_CONTEXT_CONNECTING);
3831
3832 ret = net_conn_register(net_context_get_proto(context),
3833 net_context_get_family(context),
3834 remote_addr, local_addr,
3835 ntohs(remote_port), ntohs(local_port),
3836 context, tcp_recv, context,
3837 &context->conn_handler);
3838 if (ret < 0) {
3839 goto out;
3840 }
3841
3842 conn->connect_cb = cb;
3843 context->user_data = user_data;
3844
3845 /* Input of a (nonexistent) packet with no flags set will cause
3846 * a TCP connection to be established
3847 */
3848 conn->in_connect = !IS_ENABLED(CONFIG_NET_TEST_PROTOCOL);
3849 (void)tcp_in(conn, NULL);
3850
3851 if (!IS_ENABLED(CONFIG_NET_TEST_PROTOCOL)) {
3852 if ((K_TIMEOUT_EQ(timeout, K_NO_WAIT)) &&
3853 conn->state != TCP_ESTABLISHED) {
3854 ret = -EINPROGRESS;
3855 goto out;
3856 } else if (k_sem_take(&conn->connect_sem, timeout) != 0 &&
3857 conn->state != TCP_ESTABLISHED) {
3858 if (conn->in_connect) {
3859 conn->in_connect = false;
3860 tcp_conn_close(conn, -ETIMEDOUT);
3861 }
3862
3863 ret = -ETIMEDOUT;
3864 goto out;
3865 }
3866 conn->in_connect = false;
3867 }
3868 out:
3869 NET_DBG("conn: %p, ret=%d", conn, ret);
3870
3871 return ret;
3872 }
3873
net_tcp_accept(struct net_context * context,net_tcp_accept_cb_t cb,void * user_data)3874 int net_tcp_accept(struct net_context *context, net_tcp_accept_cb_t cb,
3875 void *user_data)
3876 {
3877 struct tcp *conn = context->tcp;
3878 struct sockaddr local_addr = { };
3879 uint16_t local_port, remote_port;
3880
3881 if (!conn) {
3882 return -EINVAL;
3883 }
3884
3885 NET_DBG("context: %p, tcp: %p, cb: %p", context, conn, cb);
3886
3887 if (conn->state != TCP_LISTEN) {
3888 return -EINVAL;
3889 }
3890
3891 conn->accept_cb = cb;
3892 local_addr.sa_family = net_context_get_family(context);
3893
3894 switch (local_addr.sa_family) {
3895 struct sockaddr_in *in;
3896 struct sockaddr_in6 *in6;
3897
3898 case AF_INET:
3899 if (!IS_ENABLED(CONFIG_NET_IPV4)) {
3900 return -EINVAL;
3901 }
3902
3903 in = (struct sockaddr_in *)&local_addr;
3904
3905 if (net_sin_ptr(&context->local)->sin_addr) {
3906 net_ipaddr_copy(&in->sin_addr,
3907 net_sin_ptr(&context->local)->sin_addr);
3908 }
3909
3910 in->sin_port =
3911 net_sin((struct sockaddr *)&context->local)->sin_port;
3912 local_port = ntohs(in->sin_port);
3913 remote_port = ntohs(net_sin(&context->remote)->sin_port);
3914
3915 break;
3916
3917 case AF_INET6:
3918 if (!IS_ENABLED(CONFIG_NET_IPV6)) {
3919 return -EINVAL;
3920 }
3921
3922 in6 = (struct sockaddr_in6 *)&local_addr;
3923
3924 if (net_sin6_ptr(&context->local)->sin6_addr) {
3925 net_ipaddr_copy(&in6->sin6_addr,
3926 net_sin6_ptr(&context->local)->sin6_addr);
3927 }
3928
3929 in6->sin6_port =
3930 net_sin6((struct sockaddr *)&context->local)->sin6_port;
3931 local_port = ntohs(in6->sin6_port);
3932 remote_port = ntohs(net_sin6(&context->remote)->sin6_port);
3933
3934 break;
3935
3936 default:
3937 return -EINVAL;
3938 }
3939
3940 context->user_data = user_data;
3941
3942 /* Remove the temporary connection handler and register
3943 * a proper now as we have an established connection.
3944 */
3945 net_conn_unregister(context->conn_handler);
3946
3947 return net_conn_register(net_context_get_proto(context),
3948 local_addr.sa_family,
3949 context->flags & NET_CONTEXT_REMOTE_ADDR_SET ?
3950 &context->remote : NULL,
3951 &local_addr,
3952 remote_port, local_port,
3953 context, tcp_recv, context,
3954 &context->conn_handler);
3955 }
3956
net_tcp_recv(struct net_context * context,net_context_recv_cb_t cb,void * user_data)3957 int net_tcp_recv(struct net_context *context, net_context_recv_cb_t cb,
3958 void *user_data)
3959 {
3960 struct tcp *conn = context->tcp;
3961
3962 NET_DBG("context: %p, cb: %p, user_data: %p", context, cb, user_data);
3963
3964 context->recv_cb = cb;
3965
3966 if (conn) {
3967 conn->recv_user_data = user_data;
3968 }
3969
3970 return 0;
3971 }
3972
net_tcp_finalize(struct net_pkt * pkt,bool force_chksum)3973 int net_tcp_finalize(struct net_pkt *pkt, bool force_chksum)
3974 {
3975 NET_PKT_DATA_ACCESS_DEFINE(tcp_access, struct net_tcp_hdr);
3976 struct net_tcp_hdr *tcp_hdr;
3977
3978 tcp_hdr = (struct net_tcp_hdr *)net_pkt_get_data(pkt, &tcp_access);
3979 if (!tcp_hdr) {
3980 return -ENOBUFS;
3981 }
3982
3983 tcp_hdr->chksum = 0U;
3984
3985 if (net_if_need_calc_tx_checksum(net_pkt_iface(pkt)) || force_chksum) {
3986 tcp_hdr->chksum = net_calc_chksum_tcp(pkt);
3987 net_pkt_set_chksum_done(pkt, true);
3988 }
3989
3990 return net_pkt_set_data(pkt, &tcp_access);
3991 }
3992
net_tcp_input(struct net_pkt * pkt,struct net_pkt_data_access * tcp_access)3993 struct net_tcp_hdr *net_tcp_input(struct net_pkt *pkt,
3994 struct net_pkt_data_access *tcp_access)
3995 {
3996 struct net_tcp_hdr *tcp_hdr;
3997
3998 if (IS_ENABLED(CONFIG_NET_TCP_CHECKSUM) &&
3999 (net_if_need_calc_rx_checksum(net_pkt_iface(pkt)) ||
4000 net_pkt_is_ip_reassembled(pkt)) &&
4001 net_calc_chksum_tcp(pkt) != 0U) {
4002 NET_DBG("DROP: checksum mismatch");
4003 goto drop;
4004 }
4005
4006 tcp_hdr = (struct net_tcp_hdr *)net_pkt_get_data(pkt, tcp_access);
4007 if (tcp_hdr && !net_pkt_set_data(pkt, tcp_access)) {
4008 return tcp_hdr;
4009 }
4010
4011 drop:
4012 net_stats_update_tcp_seg_chkerr(net_pkt_iface(pkt));
4013 return NULL;
4014 }
4015
4016 #if defined(CONFIG_NET_TEST_PROTOCOL)
tcp_input(struct net_conn * net_conn,struct net_pkt * pkt,union net_ip_header * ip,union net_proto_header * proto,void * user_data)4017 static enum net_verdict tcp_input(struct net_conn *net_conn,
4018 struct net_pkt *pkt,
4019 union net_ip_header *ip,
4020 union net_proto_header *proto,
4021 void *user_data)
4022 {
4023 struct tcphdr *th = th_get(pkt);
4024 enum net_verdict verdict = NET_DROP;
4025
4026 if (th) {
4027 struct tcp *conn = tcp_conn_search(pkt);
4028
4029 if (conn == NULL && SYN == th_flags(th)) {
4030 struct net_context *context =
4031 tcp_calloc(1, sizeof(struct net_context));
4032 net_tcp_get(context);
4033 net_context_set_family(context, net_pkt_family(pkt));
4034 conn = context->tcp;
4035 tcp_endpoint_set(&conn->dst, pkt, TCP_EP_SRC);
4036 tcp_endpoint_set(&conn->src, pkt, TCP_EP_DST);
4037 /* Make an extra reference, the sanity check suite
4038 * will delete the connection explicitly
4039 */
4040 tcp_conn_ref(conn);
4041 }
4042
4043 if (conn) {
4044 conn->iface = pkt->iface;
4045 verdict = tcp_in(conn, pkt);
4046 }
4047 }
4048
4049 return verdict;
4050 }
4051
tp_tcp_recv_cb(struct tcp * conn,struct net_pkt * pkt)4052 static size_t tp_tcp_recv_cb(struct tcp *conn, struct net_pkt *pkt)
4053 {
4054 ssize_t len = tcp_data_len(pkt);
4055 struct net_pkt *up = tcp_pkt_clone(pkt);
4056
4057 NET_DBG("pkt: %p, len: %zu", pkt, net_pkt_get_len(pkt));
4058
4059 net_pkt_cursor_init(up);
4060 net_pkt_set_overwrite(up, true);
4061
4062 net_pkt_pull(up, net_pkt_get_len(up) - len);
4063
4064 for (struct net_buf *buf = pkt->buffer; buf != NULL; buf = buf->frags) {
4065 net_tcp_queue(conn->context, buf->data, buf->len);
4066 }
4067
4068 return len;
4069 }
4070
tp_tcp_recv(int fd,void * buf,size_t len,int flags)4071 static ssize_t tp_tcp_recv(int fd, void *buf, size_t len, int flags)
4072 {
4073 return 0;
4074 }
4075
tp_init(struct tcp * conn,struct tp * tp)4076 static void tp_init(struct tcp *conn, struct tp *tp)
4077 {
4078 struct tp out = {
4079 .msg = "",
4080 .status = "",
4081 .state = tcp_state_to_str(conn->state, true),
4082 .seq = conn->seq,
4083 .ack = conn->ack,
4084 .rcv = "",
4085 .data = "",
4086 .op = "",
4087 };
4088
4089 *tp = out;
4090 }
4091
tcp_to_json(struct tcp * conn,void * data,size_t * data_len)4092 static void tcp_to_json(struct tcp *conn, void *data, size_t *data_len)
4093 {
4094 struct tp tp;
4095
4096 tp_init(conn, &tp);
4097
4098 tp_encode(&tp, data, data_len);
4099 }
4100
tp_input(struct net_conn * net_conn,struct net_pkt * pkt,union net_ip_header * ip_hdr,union net_proto_header * proto,void * user_data)4101 enum net_verdict tp_input(struct net_conn *net_conn,
4102 struct net_pkt *pkt,
4103 union net_ip_header *ip_hdr,
4104 union net_proto_header *proto,
4105 void *user_data)
4106 {
4107 struct net_udp_hdr *uh = net_udp_get_hdr(pkt, NULL);
4108 size_t data_len = ntohs(uh->len) - sizeof(*uh);
4109 struct tcp *conn = tcp_conn_search(pkt);
4110 size_t json_len = 0;
4111 struct tp *tp;
4112 struct tp_new *tp_new;
4113 enum tp_type type;
4114 bool responded = false;
4115 static char buf[512];
4116 enum net_verdict verdict = NET_DROP;
4117
4118 net_pkt_cursor_init(pkt);
4119 net_pkt_set_overwrite(pkt, true);
4120 net_pkt_skip(pkt, net_pkt_ip_hdr_len(pkt) +
4121 net_pkt_ip_opts_len(pkt) + sizeof(*uh));
4122 net_pkt_read(pkt, buf, data_len);
4123 buf[data_len] = '\0';
4124 data_len += 1;
4125
4126 type = json_decode_msg(buf, data_len);
4127
4128 data_len = ntohs(uh->len) - sizeof(*uh);
4129
4130 net_pkt_cursor_init(pkt);
4131 net_pkt_set_overwrite(pkt, true);
4132 net_pkt_skip(pkt, net_pkt_ip_hdr_len(pkt) +
4133 net_pkt_ip_opts_len(pkt) + sizeof(*uh));
4134 net_pkt_read(pkt, buf, data_len);
4135 buf[data_len] = '\0';
4136 data_len += 1;
4137
4138 switch (type) {
4139 case TP_CONFIG_REQUEST:
4140 tp_new = json_to_tp_new(buf, data_len);
4141 break;
4142 default:
4143 tp = json_to_tp(buf, data_len);
4144 break;
4145 }
4146
4147 switch (type) {
4148 case TP_COMMAND:
4149 if (is("CONNECT", tp->op)) {
4150 tp_output(pkt->family, pkt->iface, buf, 1);
4151 responded = true;
4152 {
4153 struct net_context *context = tcp_calloc(1,
4154 sizeof(struct net_context));
4155 net_tcp_get(context);
4156 net_context_set_family(context,
4157 net_pkt_family(pkt));
4158 conn = context->tcp;
4159 tcp_endpoint_set(&conn->dst, pkt, TCP_EP_SRC);
4160 tcp_endpoint_set(&conn->src, pkt, TCP_EP_DST);
4161 conn->iface = pkt->iface;
4162 tcp_conn_ref(conn);
4163 }
4164 conn->seq = tp->seq;
4165 verdict = tcp_in(conn, NULL);
4166 }
4167 if (is("CLOSE", tp->op)) {
4168 tp_trace = false;
4169 {
4170 struct net_context *context;
4171
4172 conn = (void *)sys_slist_peek_head(&tcp_conns);
4173 context = conn->context;
4174 while (tcp_conn_close(conn, 0))
4175 ;
4176 tcp_free(context);
4177 }
4178 tp_mem_stat();
4179 tp_nbuf_stat();
4180 tp_pkt_stat();
4181 tp_seq_stat();
4182 }
4183 if (is("CLOSE2", tp->op)) {
4184 struct tcp *conn =
4185 (void *)sys_slist_peek_head(&tcp_conns);
4186 net_tcp_put(conn->context);
4187 }
4188 if (is("RECV", tp->op)) {
4189 #define HEXSTR_SIZE 64
4190 char hexstr[HEXSTR_SIZE];
4191 ssize_t len = tp_tcp_recv(0, buf, sizeof(buf), 0);
4192
4193 tp_init(conn, tp);
4194 bin2hex(buf, len, hexstr, HEXSTR_SIZE);
4195 tp->data = hexstr;
4196 NET_DBG("%zd = tcp_recv(\"%s\")", len, tp->data);
4197 json_len = sizeof(buf);
4198 tp_encode(tp, buf, &json_len);
4199 }
4200 if (is("SEND", tp->op)) {
4201 ssize_t len = tp_str_to_hex(buf, sizeof(buf), tp->data);
4202 struct tcp *conn =
4203 (void *)sys_slist_peek_head(&tcp_conns);
4204
4205 tp_output(pkt->family, pkt->iface, buf, 1);
4206 responded = true;
4207 NET_DBG("tcp_send(\"%s\")", tp->data);
4208 {
4209 net_tcp_queue(conn->context, buf, len);
4210 }
4211 }
4212 break;
4213 case TP_CONFIG_REQUEST:
4214 tp_new_find_and_apply(tp_new, "tcp_rto", &tcp_rto, TP_INT);
4215 tp_new_find_and_apply(tp_new, "tcp_retries", &tcp_retries,
4216 TP_INT);
4217 tp_new_find_and_apply(tp_new, "tcp_window", &tcp_rx_window,
4218 TP_INT);
4219 tp_new_find_and_apply(tp_new, "tp_trace", &tp_trace, TP_BOOL);
4220 break;
4221 case TP_INTROSPECT_REQUEST:
4222 json_len = sizeof(buf);
4223 conn = (void *)sys_slist_peek_head(&tcp_conns);
4224 tcp_to_json(conn, buf, &json_len);
4225 break;
4226 case TP_DEBUG_STOP: case TP_DEBUG_CONTINUE:
4227 tp_state = tp->type;
4228 break;
4229 default:
4230 NET_ASSERT(false, "Unimplemented tp command: %s", tp->msg);
4231 }
4232
4233 if (json_len) {
4234 tp_output(pkt->family, pkt->iface, buf, json_len);
4235 } else if ((TP_CONFIG_REQUEST == type || TP_COMMAND == type)
4236 && responded == false) {
4237 tp_output(pkt->family, pkt->iface, buf, 1);
4238 }
4239
4240 return verdict;
4241 }
4242
test_cb_register(sa_family_t family,uint8_t proto,uint16_t remote_port,uint16_t local_port,net_conn_cb_t cb)4243 static void test_cb_register(sa_family_t family, uint8_t proto, uint16_t remote_port,
4244 uint16_t local_port, net_conn_cb_t cb)
4245 {
4246 struct net_conn_handle *conn_handle = NULL;
4247 const struct sockaddr addr = { .sa_family = family, };
4248
4249 int ret = net_conn_register(proto,
4250 family,
4251 &addr, /* remote address */
4252 &addr, /* local address */
4253 local_port,
4254 remote_port,
4255 NULL,
4256 cb,
4257 NULL, /* user_data */
4258 &conn_handle);
4259 if (ret < 0) {
4260 NET_ERR("net_conn_register(): %d", ret);
4261 }
4262 }
4263 #endif /* CONFIG_NET_TEST_PROTOCOL */
4264
net_tcp_foreach(net_tcp_cb_t cb,void * user_data)4265 void net_tcp_foreach(net_tcp_cb_t cb, void *user_data)
4266 {
4267 struct tcp *conn;
4268 struct tcp *tmp;
4269
4270 k_mutex_lock(&tcp_lock, K_FOREVER);
4271
4272 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&tcp_conns, conn, tmp, next) {
4273
4274 if (atomic_get(&conn->ref_count) > 0) {
4275 k_mutex_unlock(&tcp_lock);
4276 cb(conn, user_data);
4277 k_mutex_lock(&tcp_lock, K_FOREVER);
4278 }
4279 }
4280
4281 k_mutex_unlock(&tcp_lock);
4282 }
4283
net_tcp_get_supported_mss(const struct tcp * conn)4284 uint16_t net_tcp_get_supported_mss(const struct tcp *conn)
4285 {
4286 sa_family_t family = net_context_get_family(conn->context);
4287
4288 if (family == AF_INET) {
4289 #if defined(CONFIG_NET_IPV4)
4290 struct net_if *iface = net_context_get_iface(conn->context);
4291 int mss = 0;
4292
4293 if (iface && net_if_get_mtu(iface) >= NET_IPV4TCPH_LEN) {
4294 /* Detect MSS based on interface MTU minus "TCP,IP
4295 * header size"
4296 */
4297 mss = net_if_get_mtu(iface) - NET_IPV4TCPH_LEN;
4298 }
4299
4300 if (mss == 0) {
4301 mss = NET_IPV4_MTU - NET_IPV4TCPH_LEN;
4302 }
4303
4304 return mss;
4305 #else
4306 return 0;
4307 #endif /* CONFIG_NET_IPV4 */
4308 }
4309 #if defined(CONFIG_NET_IPV6)
4310 else if (family == AF_INET6) {
4311 struct net_if *iface = net_context_get_iface(conn->context);
4312 int mss = 0;
4313
4314 if (iface && net_if_get_mtu(iface) >= NET_IPV6TCPH_LEN) {
4315 /* Detect MSS based on interface MTU minus "TCP,IP
4316 * header size"
4317 */
4318 mss = net_if_get_mtu(iface) - NET_IPV6TCPH_LEN;
4319 }
4320
4321 if (mss == 0) {
4322 mss = NET_IPV6_MTU - NET_IPV6TCPH_LEN;
4323 }
4324
4325 return mss;
4326 }
4327 #endif /* CONFIG_NET_IPV6 */
4328
4329 return 0;
4330 }
4331
net_tcp_set_option(struct net_context * context,enum tcp_conn_option option,const void * value,size_t len)4332 int net_tcp_set_option(struct net_context *context,
4333 enum tcp_conn_option option,
4334 const void *value, size_t len)
4335 {
4336 int ret = 0;
4337
4338 NET_ASSERT(context);
4339
4340 struct tcp *conn = context->tcp;
4341
4342 NET_ASSERT(conn);
4343
4344 k_mutex_lock(&conn->lock, K_FOREVER);
4345
4346 switch (option) {
4347 case TCP_OPT_NODELAY:
4348 ret = set_tcp_nodelay(conn, value, len);
4349 break;
4350 case TCP_OPT_KEEPALIVE:
4351 ret = set_tcp_keep_alive(conn, value, len);
4352 break;
4353 case TCP_OPT_KEEPIDLE:
4354 ret = set_tcp_keep_idle(conn, value, len);
4355 break;
4356 case TCP_OPT_KEEPINTVL:
4357 ret = set_tcp_keep_intvl(conn, value, len);
4358 break;
4359 case TCP_OPT_KEEPCNT:
4360 ret = set_tcp_keep_cnt(conn, value, len);
4361 break;
4362 }
4363
4364 k_mutex_unlock(&conn->lock);
4365
4366 return ret;
4367 }
4368
net_tcp_get_option(struct net_context * context,enum tcp_conn_option option,void * value,size_t * len)4369 int net_tcp_get_option(struct net_context *context,
4370 enum tcp_conn_option option,
4371 void *value, size_t *len)
4372 {
4373 int ret = 0;
4374
4375 NET_ASSERT(context);
4376
4377 struct tcp *conn = context->tcp;
4378
4379 NET_ASSERT(conn);
4380
4381 k_mutex_lock(&conn->lock, K_FOREVER);
4382
4383 switch (option) {
4384 case TCP_OPT_NODELAY:
4385 ret = get_tcp_nodelay(conn, value, len);
4386 break;
4387 case TCP_OPT_KEEPALIVE:
4388 ret = get_tcp_keep_alive(conn, value, len);
4389 break;
4390 case TCP_OPT_KEEPIDLE:
4391 ret = get_tcp_keep_idle(conn, value, len);
4392 break;
4393 case TCP_OPT_KEEPINTVL:
4394 ret = get_tcp_keep_intvl(conn, value, len);
4395 break;
4396 case TCP_OPT_KEEPCNT:
4397 ret = get_tcp_keep_cnt(conn, value, len);
4398 break;
4399 }
4400
4401 k_mutex_unlock(&conn->lock);
4402
4403 return ret;
4404 }
4405
net_tcp_state_str(enum tcp_state state)4406 const char *net_tcp_state_str(enum tcp_state state)
4407 {
4408 return tcp_state_to_str(state, false);
4409 }
4410
net_tcp_tx_sem_get(struct net_context * context)4411 struct k_sem *net_tcp_tx_sem_get(struct net_context *context)
4412 {
4413 struct tcp *conn = context->tcp;
4414
4415 return &conn->tx_sem;
4416 }
4417
net_tcp_conn_sem_get(struct net_context * context)4418 struct k_sem *net_tcp_conn_sem_get(struct net_context *context)
4419 {
4420 struct tcp *conn = context->tcp;
4421
4422 return &conn->connect_sem;
4423 }
4424
net_tcp_init(void)4425 void net_tcp_init(void)
4426 {
4427 int i;
4428 int rto;
4429 #if defined(CONFIG_NET_TEST_PROTOCOL)
4430 /* Register inputs for TTCN-3 based TCP sanity check */
4431 test_cb_register(AF_INET, IPPROTO_TCP, 4242, 4242, tcp_input);
4432 test_cb_register(AF_INET6, IPPROTO_TCP, 4242, 4242, tcp_input);
4433 test_cb_register(AF_INET, IPPROTO_UDP, 4242, 4242, tp_input);
4434 test_cb_register(AF_INET6, IPPROTO_UDP, 4242, 4242, tp_input);
4435
4436 tcp_recv_cb = tp_tcp_recv_cb;
4437 #endif
4438
4439 #if defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
4440 #define THREAD_PRIORITY K_PRIO_COOP(CONFIG_NET_TCP_WORKER_PRIO)
4441 #else
4442 #define THREAD_PRIORITY K_PRIO_PREEMPT(CONFIG_NET_TCP_WORKER_PRIO)
4443 #endif
4444
4445 /* Use private workqueue in order not to block the system work queue.
4446 */
4447 k_work_queue_start(&tcp_work_q, work_q_stack,
4448 K_KERNEL_STACK_SIZEOF(work_q_stack), THREAD_PRIORITY,
4449 NULL);
4450
4451 /* Compute the largest possible retransmission timeout */
4452 tcp_fin_timeout_ms = 0;
4453 rto = tcp_rto;
4454 for (i = 0; i < tcp_retries; i++) {
4455 tcp_fin_timeout_ms += rto;
4456 rto += rto >> 1;
4457 }
4458 /* At the last timeout cicle */
4459 tcp_fin_timeout_ms += tcp_rto;
4460
4461 /* When CONFIG_NET_TCP_RANDOMIZED_RTO is active in can be worse case 1.5 times larger */
4462 if (IS_ENABLED(CONFIG_NET_TCP_RANDOMIZED_RTO)) {
4463 tcp_fin_timeout_ms += tcp_fin_timeout_ms >> 1;
4464 }
4465
4466 k_thread_name_set(&tcp_work_q.thread, "tcp_work");
4467 NET_DBG("Workq started. Thread ID: %p", &tcp_work_q.thread);
4468 }
4469