1 /*
2 * Copyright (c) 2018-2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <logging/log.h>
8 LOG_MODULE_REGISTER(net_tcp, CONFIG_NET_TCP_LOG_LEVEL);
9
10 #include <stdarg.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <zephyr.h>
14 #include <random/rand32.h>
15
16 #if defined(CONFIG_NET_TCP_ISN_RFC6528)
17 #include <mbedtls/md5.h>
18 #endif
19 #include <net/net_pkt.h>
20 #include <net/net_context.h>
21 #include <net/udp.h>
22 #include "ipv4.h"
23 #include "ipv6.h"
24 #include "connection.h"
25 #include "net_stats.h"
26 #include "net_private.h"
27 #include "tcp_internal.h"
28
29 #define ACK_TIMEOUT_MS CONFIG_NET_TCP_ACK_TIMEOUT
30 #define ACK_TIMEOUT K_MSEC(ACK_TIMEOUT_MS)
31 #define FIN_TIMEOUT_MS MSEC_PER_SEC
32 #define FIN_TIMEOUT K_MSEC(FIN_TIMEOUT_MS)
33
34 static int tcp_rto = CONFIG_NET_TCP_INIT_RETRANSMISSION_TIMEOUT;
35 static int tcp_retries = CONFIG_NET_TCP_RETRY_COUNT;
36 static int tcp_window = NET_IPV6_MTU;
37
38 static sys_slist_t tcp_conns = SYS_SLIST_STATIC_INIT(&tcp_conns);
39
40 static K_MUTEX_DEFINE(tcp_lock);
41
42 static K_MEM_SLAB_DEFINE(tcp_conns_slab, sizeof(struct tcp),
43 CONFIG_NET_MAX_CONTEXTS, 4);
44
45 static struct k_work_q tcp_work_q;
46 static K_KERNEL_STACK_DEFINE(work_q_stack, CONFIG_NET_TCP_WORKQ_STACK_SIZE);
47
48 static void tcp_in(struct tcp *conn, struct net_pkt *pkt);
49
50 int (*tcp_send_cb)(struct net_pkt *pkt) = NULL;
51 size_t (*tcp_recv_cb)(struct tcp *conn, struct net_pkt *pkt) = NULL;
52
tcp_get_seq(struct net_buf * buf)53 static uint32_t tcp_get_seq(struct net_buf *buf)
54 {
55 return *(uint32_t *)net_buf_user_data(buf);
56 }
57
tcp_set_seq(struct net_buf * buf,uint32_t seq)58 static void tcp_set_seq(struct net_buf *buf, uint32_t seq)
59 {
60 *(uint32_t *)net_buf_user_data(buf) = seq;
61 }
62
tcp_pkt_linearize(struct net_pkt * pkt,size_t pos,size_t len)63 static int tcp_pkt_linearize(struct net_pkt *pkt, size_t pos, size_t len)
64 {
65 struct net_buf *buf, *first = pkt->cursor.buf, *second = first->frags;
66 int ret = 0;
67 size_t len1, len2;
68
69 if (net_pkt_get_len(pkt) < (pos + len)) {
70 NET_ERR("Insufficient packet len=%zd (pos+len=%zu)",
71 net_pkt_get_len(pkt), pos + len);
72 ret = -EINVAL;
73 goto out;
74 }
75
76 buf = net_pkt_get_frag(pkt, TCP_PKT_ALLOC_TIMEOUT);
77
78 if (!buf || buf->size < len) {
79 if (buf) {
80 net_buf_unref(buf);
81 }
82 ret = -ENOBUFS;
83 goto out;
84 }
85
86 net_buf_linearize(buf->data, buf->size, pkt->frags, pos, len);
87 net_buf_add(buf, len);
88
89 len1 = first->len - (pkt->cursor.pos - pkt->cursor.buf->data);
90 len2 = len - len1;
91
92 first->len -= len1;
93
94 while (len2) {
95 size_t pull_len = MIN(second->len, len2);
96 struct net_buf *next;
97
98 len2 -= pull_len;
99 net_buf_pull(second, pull_len);
100 next = second->frags;
101 if (second->len == 0) {
102 net_buf_unref(second);
103 }
104 second = next;
105 }
106
107 buf->frags = second;
108 first->frags = buf;
109 out:
110 return ret;
111 }
112
th_get(struct net_pkt * pkt)113 static struct tcphdr *th_get(struct net_pkt *pkt)
114 {
115 size_t ip_len = net_pkt_ip_hdr_len(pkt) + net_pkt_ip_opts_len(pkt);
116 struct tcphdr *th = NULL;
117 again:
118 net_pkt_cursor_init(pkt);
119 net_pkt_set_overwrite(pkt, true);
120
121 if (net_pkt_skip(pkt, ip_len) != 0) {
122 goto out;
123 }
124
125 if (!net_pkt_is_contiguous(pkt, sizeof(*th))) {
126 if (tcp_pkt_linearize(pkt, ip_len, sizeof(*th)) < 0) {
127 goto out;
128 }
129
130 goto again;
131 }
132
133 th = net_pkt_cursor_get_pos(pkt);
134 out:
135 return th;
136 }
137
tcp_endpoint_len(sa_family_t af)138 static size_t tcp_endpoint_len(sa_family_t af)
139 {
140 return (af == AF_INET) ? sizeof(struct sockaddr_in) :
141 sizeof(struct sockaddr_in6);
142 }
143
tcp_endpoint_set(union tcp_endpoint * ep,struct net_pkt * pkt,enum pkt_addr src)144 static int tcp_endpoint_set(union tcp_endpoint *ep, struct net_pkt *pkt,
145 enum pkt_addr src)
146 {
147 int ret = 0;
148
149 switch (net_pkt_family(pkt)) {
150 case AF_INET:
151 if (IS_ENABLED(CONFIG_NET_IPV4)) {
152 struct net_ipv4_hdr *ip = NET_IPV4_HDR(pkt);
153 struct tcphdr *th;
154
155 th = th_get(pkt);
156 if (!th) {
157 return -ENOBUFS;
158 }
159
160 memset(ep, 0, sizeof(*ep));
161
162 ep->sin.sin_port = src == TCP_EP_SRC ? th_sport(th) :
163 th_dport(th);
164 net_ipaddr_copy(&ep->sin.sin_addr,
165 src == TCP_EP_SRC ?
166 &ip->src : &ip->dst);
167 ep->sa.sa_family = AF_INET;
168 } else {
169 ret = -EINVAL;
170 }
171
172 break;
173
174 case AF_INET6:
175 if (IS_ENABLED(CONFIG_NET_IPV6)) {
176 struct net_ipv6_hdr *ip = NET_IPV6_HDR(pkt);
177 struct tcphdr *th;
178
179 th = th_get(pkt);
180 if (!th) {
181 return -ENOBUFS;
182 }
183
184 memset(ep, 0, sizeof(*ep));
185
186 ep->sin6.sin6_port = src == TCP_EP_SRC ? th_sport(th) :
187 th_dport(th);
188 net_ipaddr_copy(&ep->sin6.sin6_addr,
189 src == TCP_EP_SRC ?
190 &ip->src : &ip->dst);
191 ep->sa.sa_family = AF_INET6;
192 } else {
193 ret = -EINVAL;
194 }
195
196 break;
197
198 default:
199 NET_ERR("Unknown address family: %hu", net_pkt_family(pkt));
200 ret = -EINVAL;
201 }
202
203 return ret;
204 }
205
tcp_flags(uint8_t flags)206 static const char *tcp_flags(uint8_t flags)
207 {
208 #define BUF_SIZE 25 /* 6 * 4 + 1 */
209 static char buf[BUF_SIZE];
210 int len = 0;
211
212 buf[0] = '\0';
213
214 if (flags) {
215 if (flags & SYN) {
216 len += snprintk(buf + len, BUF_SIZE - len, "SYN,");
217 }
218 if (flags & FIN) {
219 len += snprintk(buf + len, BUF_SIZE - len, "FIN,");
220 }
221 if (flags & ACK) {
222 len += snprintk(buf + len, BUF_SIZE - len, "ACK,");
223 }
224 if (flags & PSH) {
225 len += snprintk(buf + len, BUF_SIZE - len, "PSH,");
226 }
227 if (flags & RST) {
228 len += snprintk(buf + len, BUF_SIZE - len, "RST,");
229 }
230 if (flags & URG) {
231 len += snprintk(buf + len, BUF_SIZE - len, "URG,");
232 }
233
234 if (len > 0) {
235 buf[len - 1] = '\0'; /* delete the last comma */
236 }
237 }
238 #undef BUF_SIZE
239 return buf;
240 }
241
tcp_data_len(struct net_pkt * pkt)242 static size_t tcp_data_len(struct net_pkt *pkt)
243 {
244 struct tcphdr *th = th_get(pkt);
245 size_t tcp_options_len = (th_off(th) - 5) * 4;
246 int len = net_pkt_get_len(pkt) - net_pkt_ip_hdr_len(pkt) -
247 net_pkt_ip_opts_len(pkt) - sizeof(*th) - tcp_options_len;
248
249 return len > 0 ? (size_t)len : 0;
250 }
251
tcp_th(struct net_pkt * pkt)252 static const char *tcp_th(struct net_pkt *pkt)
253 {
254 #define BUF_SIZE 80
255 static char buf[BUF_SIZE];
256 int len = 0;
257 struct tcphdr *th = th_get(pkt);
258
259 buf[0] = '\0';
260
261 if (th_off(th) < 5) {
262 len += snprintk(buf + len, BUF_SIZE - len,
263 "bogus th_off: %hu", (uint16_t)th_off(th));
264 goto end;
265 }
266
267 len += snprintk(buf + len, BUF_SIZE - len,
268 "%s Seq=%u", tcp_flags(th_flags(th)), th_seq(th));
269
270 if (th_flags(th) & ACK) {
271 len += snprintk(buf + len, BUF_SIZE - len,
272 " Ack=%u", th_ack(th));
273 }
274
275 len += snprintk(buf + len, BUF_SIZE - len,
276 " Len=%ld", (long)tcp_data_len(pkt));
277 end:
278 #undef BUF_SIZE
279 return buf;
280 }
281
282 #define is_6lo_technology(pkt) \
283 (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6 && \
284 ((IS_ENABLED(CONFIG_NET_L2_BT) && \
285 net_pkt_lladdr_dst(pkt)->type == NET_LINK_BLUETOOTH) || \
286 (IS_ENABLED(CONFIG_NET_L2_IEEE802154) && \
287 net_pkt_lladdr_dst(pkt)->type == NET_LINK_IEEE802154) || \
288 (IS_ENABLED(CONFIG_NET_L2_CANBUS) && \
289 net_pkt_lladdr_dst(pkt)->type == NET_LINK_CANBUS)))
290
tcp_send(struct net_pkt * pkt)291 static void tcp_send(struct net_pkt *pkt)
292 {
293 NET_DBG("%s", log_strdup(tcp_th(pkt)));
294
295 tcp_pkt_ref(pkt);
296
297 if (tcp_send_cb) {
298 if (tcp_send_cb(pkt) < 0) {
299 NET_ERR("net_send_data()");
300 tcp_pkt_unref(pkt);
301 }
302 goto out;
303 }
304
305 /* We must have special handling for some network technologies that
306 * tweak the IP protocol headers during packet sending. This happens
307 * with Bluetooth and IEEE 802.15.4 which use IPv6 header compression
308 * (6lo) and alter the sent network packet. So in order to avoid any
309 * corruption of the original data buffer, we must copy the sent data.
310 * For Bluetooth, its fragmentation code will even mangle the data
311 * part of the message so we need to copy those too.
312 */
313 if (is_6lo_technology(pkt)) {
314 struct net_pkt *new_pkt;
315
316 new_pkt = tcp_pkt_clone(pkt);
317 if (!new_pkt) {
318 /* The caller of this func assumes that the net_pkt
319 * is consumed by this function. We call unref here
320 * so that the unref at the end of the func will
321 * free the net_pkt.
322 */
323 tcp_pkt_unref(pkt);
324 goto out;
325 }
326
327 if (net_send_data(new_pkt) < 0) {
328 tcp_pkt_unref(new_pkt);
329 }
330
331 /* We simulate sending of the original pkt and unref it like
332 * the device driver would do.
333 */
334 tcp_pkt_unref(pkt);
335 } else {
336 if (net_send_data(pkt) < 0) {
337 NET_ERR("net_send_data()");
338 tcp_pkt_unref(pkt);
339 }
340 }
341 out:
342 tcp_pkt_unref(pkt);
343 }
344
tcp_send_queue_flush(struct tcp * conn)345 static void tcp_send_queue_flush(struct tcp *conn)
346 {
347 struct net_pkt *pkt;
348
349 k_work_cancel_delayable(&conn->send_timer);
350
351 while ((pkt = tcp_slist(conn, &conn->send_queue, get,
352 struct net_pkt, next))) {
353 tcp_pkt_unref(pkt);
354 }
355 }
356
357 #if CONFIG_NET_TCP_LOG_LEVEL >= LOG_LEVEL_DBG
358 #define tcp_conn_unref(conn) \
359 tcp_conn_unref_debug(conn, __func__, __LINE__)
360
tcp_conn_unref_debug(struct tcp * conn,const char * caller,int line)361 static int tcp_conn_unref_debug(struct tcp *conn, const char *caller, int line)
362 #else
363 static int tcp_conn_unref(struct tcp *conn)
364 #endif
365 {
366 int ref_count = atomic_get(&conn->ref_count);
367 struct net_pkt *pkt;
368
369 #if CONFIG_NET_TCP_LOG_LEVEL >= LOG_LEVEL_DBG
370 NET_DBG("conn: %p, ref_count=%d (%s():%d)", conn, ref_count,
371 caller, line);
372 #endif
373
374 #if !defined(CONFIG_NET_TEST_PROTOCOL)
375 if (conn->in_connect) {
376 NET_DBG("conn: %p is waiting on connect semaphore", conn);
377 tcp_send_queue_flush(conn);
378 goto out;
379 }
380 #endif /* CONFIG_NET_TEST_PROTOCOL */
381
382 ref_count = atomic_dec(&conn->ref_count) - 1;
383 if (ref_count != 0) {
384 tp_out(net_context_get_family(conn->context), conn->iface,
385 "TP_TRACE", "event", "CONN_DELETE");
386 return ref_count;
387 }
388
389 k_mutex_lock(&tcp_lock, K_FOREVER);
390
391 /* If there is any pending data, pass that to application */
392 while ((pkt = k_fifo_get(&conn->recv_data, K_NO_WAIT)) != NULL) {
393 if (net_context_packet_received(
394 (struct net_conn *)conn->context->conn_handler,
395 pkt, NULL, NULL, conn->recv_user_data) ==
396 NET_DROP) {
397 /* Application is no longer there, unref the pkt */
398 tcp_pkt_unref(pkt);
399 }
400 }
401
402 if (conn->context->conn_handler) {
403 net_conn_unregister(conn->context->conn_handler);
404 conn->context->conn_handler = NULL;
405 }
406
407 if (conn->context->recv_cb) {
408 conn->context->recv_cb(conn->context, NULL, NULL, NULL,
409 -ECONNRESET, conn->recv_user_data);
410 }
411
412 conn->context->tcp = NULL;
413
414 net_context_unref(conn->context);
415
416 tcp_send_queue_flush(conn);
417
418 k_work_cancel_delayable(&conn->send_data_timer);
419 tcp_pkt_unref(conn->send_data);
420
421 if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT) {
422 tcp_pkt_unref(conn->queue_recv_data);
423 }
424
425 k_work_cancel_delayable(&conn->timewait_timer);
426 k_work_cancel_delayable(&conn->fin_timer);
427
428 sys_slist_find_and_remove(&tcp_conns, &conn->next);
429
430 memset(conn, 0, sizeof(*conn));
431
432 k_mem_slab_free(&tcp_conns_slab, (void **)&conn);
433
434 k_mutex_unlock(&tcp_lock);
435 out:
436 return ref_count;
437 }
438
net_tcp_unref(struct net_context * context)439 int net_tcp_unref(struct net_context *context)
440 {
441 int ref_count = 0;
442
443 NET_DBG("context: %p, conn: %p", context, context->tcp);
444
445 if (context->tcp) {
446 ref_count = tcp_conn_unref(context->tcp);
447 }
448
449 return ref_count;
450 }
451
tcp_send_process_no_lock(struct tcp * conn)452 static bool tcp_send_process_no_lock(struct tcp *conn)
453 {
454 bool unref = false;
455 struct net_pkt *pkt;
456
457 pkt = tcp_slist(conn, &conn->send_queue, peek_head,
458 struct net_pkt, next);
459 if (!pkt) {
460 goto out;
461 }
462
463 NET_DBG("%s %s", log_strdup(tcp_th(pkt)), conn->in_retransmission ?
464 "in_retransmission" : "");
465
466 if (conn->in_retransmission) {
467 if (conn->send_retries > 0) {
468 struct net_pkt *clone = tcp_pkt_clone(pkt);
469
470 if (clone) {
471 tcp_send(clone);
472 conn->send_retries--;
473 }
474 } else {
475 unref = true;
476 goto out;
477 }
478 } else {
479 uint8_t fl = th_get(pkt)->th_flags;
480 bool forget = ACK == fl || PSH == fl || (ACK | PSH) == fl ||
481 RST & fl;
482
483 pkt = forget ? tcp_slist(conn, &conn->send_queue, get,
484 struct net_pkt, next) :
485 tcp_pkt_clone(pkt);
486 if (!pkt) {
487 NET_ERR("net_pkt alloc failure");
488 goto out;
489 }
490
491 tcp_send(pkt);
492
493 if (forget == false &&
494 !k_work_delayable_remaining_get(&conn->send_timer)) {
495 conn->send_retries = tcp_retries;
496 conn->in_retransmission = true;
497 }
498 }
499
500 if (conn->in_retransmission) {
501 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_timer,
502 K_MSEC(tcp_rto));
503 }
504
505 out:
506 return unref;
507 }
508
tcp_send_process(struct k_work * work)509 static void tcp_send_process(struct k_work *work)
510 {
511 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
512 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, send_timer);
513 bool unref;
514
515 k_mutex_lock(&conn->lock, K_FOREVER);
516
517 unref = tcp_send_process_no_lock(conn);
518
519 k_mutex_unlock(&conn->lock);
520
521 if (unref) {
522 tcp_conn_unref(conn);
523 }
524 }
525
tcp_send_timer_cancel(struct tcp * conn)526 static void tcp_send_timer_cancel(struct tcp *conn)
527 {
528 if (conn->in_retransmission == false) {
529 return;
530 }
531
532 k_work_cancel_delayable(&conn->send_timer);
533
534 {
535 struct net_pkt *pkt = tcp_slist(conn, &conn->send_queue, get,
536 struct net_pkt, next);
537 if (pkt) {
538 NET_DBG("%s", log_strdup(tcp_th(pkt)));
539 tcp_pkt_unref(pkt);
540 }
541 }
542
543 if (sys_slist_is_empty(&conn->send_queue)) {
544 conn->in_retransmission = false;
545 } else {
546 conn->send_retries = tcp_retries;
547 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_timer,
548 K_MSEC(tcp_rto));
549 }
550 }
551
tcp_state_to_str(enum tcp_state state,bool prefix)552 static const char *tcp_state_to_str(enum tcp_state state, bool prefix)
553 {
554 const char *s = NULL;
555 #define _(_x) case _x: do { s = #_x; goto out; } while (0)
556 switch (state) {
557 _(TCP_LISTEN);
558 _(TCP_SYN_SENT);
559 _(TCP_SYN_RECEIVED);
560 _(TCP_ESTABLISHED);
561 _(TCP_FIN_WAIT_1);
562 _(TCP_FIN_WAIT_2);
563 _(TCP_CLOSE_WAIT);
564 _(TCP_CLOSING);
565 _(TCP_LAST_ACK);
566 _(TCP_TIME_WAIT);
567 _(TCP_CLOSED);
568 }
569 #undef _
570 NET_ASSERT(s, "Invalid TCP state: %u", state);
571 out:
572 return prefix ? s : (s + 4);
573 }
574
tcp_conn_state(struct tcp * conn,struct net_pkt * pkt)575 static const char *tcp_conn_state(struct tcp *conn, struct net_pkt *pkt)
576 {
577 #define BUF_SIZE 160
578 static char buf[BUF_SIZE];
579
580 snprintk(buf, BUF_SIZE, "%s [%s Seq=%u Ack=%u]", pkt ? tcp_th(pkt) : "",
581 tcp_state_to_str(conn->state, false),
582 conn->seq, conn->ack);
583 #undef BUF_SIZE
584 return buf;
585 }
586
tcp_options_get(struct net_pkt * pkt,int tcp_options_len,uint8_t * buf,size_t buf_len)587 static uint8_t *tcp_options_get(struct net_pkt *pkt, int tcp_options_len,
588 uint8_t *buf, size_t buf_len)
589 {
590 struct net_pkt_cursor backup;
591 int ret;
592
593 net_pkt_cursor_backup(pkt, &backup);
594 net_pkt_cursor_init(pkt);
595 net_pkt_skip(pkt, net_pkt_ip_hdr_len(pkt) + net_pkt_ip_opts_len(pkt) +
596 sizeof(struct tcphdr));
597 ret = net_pkt_read(pkt, buf, MIN(tcp_options_len, buf_len));
598 if (ret < 0) {
599 buf = NULL;
600 }
601
602 net_pkt_cursor_restore(pkt, &backup);
603
604 return buf;
605 }
606
tcp_options_check(struct tcp_options * recv_options,struct net_pkt * pkt,ssize_t len)607 static bool tcp_options_check(struct tcp_options *recv_options,
608 struct net_pkt *pkt, ssize_t len)
609 {
610 uint8_t options_buf[40]; /* TCP header max options size is 40 */
611 bool result = len > 0 && ((len % 4) == 0) ? true : false;
612 uint8_t *options = tcp_options_get(pkt, len, options_buf,
613 sizeof(options_buf));
614 uint8_t opt, opt_len;
615
616 NET_DBG("len=%zd", len);
617
618 recv_options->mss_found = false;
619 recv_options->wnd_found = false;
620
621 for ( ; options && len >= 1; options += opt_len, len -= opt_len) {
622 opt = options[0];
623
624 if (opt == NET_TCP_END_OPT) {
625 break;
626 } else if (opt == NET_TCP_NOP_OPT) {
627 opt_len = 1;
628 continue;
629 } else {
630 if (len < 2) { /* Only END and NOP can have length 1 */
631 NET_ERR("Illegal option %d with length %zd",
632 opt, len);
633 result = false;
634 break;
635 }
636 opt_len = options[1];
637 }
638 NET_DBG("opt: %hu, opt_len: %hu", (uint16_t)opt, (uint16_t)opt_len);
639
640 if (opt_len < 2 || opt_len > len) {
641 result = false;
642 break;
643 }
644
645 switch (opt) {
646 case NET_TCP_MSS_OPT:
647 if (opt_len != 4) {
648 result = false;
649 goto end;
650 }
651
652 recv_options->mss =
653 ntohs(UNALIGNED_GET((uint16_t *)(options + 2)));
654 recv_options->mss_found = true;
655 NET_DBG("MSS=%hu", recv_options->mss);
656 break;
657 case NET_TCP_WINDOW_SCALE_OPT:
658 if (opt_len != 3) {
659 result = false;
660 goto end;
661 }
662
663 recv_options->window = opt;
664 recv_options->wnd_found = true;
665 break;
666 default:
667 continue;
668 }
669 }
670 end:
671 if (false == result) {
672 NET_WARN("Invalid TCP options");
673 }
674
675 return result;
676 }
677
tcp_check_pending_data(struct tcp * conn,struct net_pkt * pkt,size_t len)678 static size_t tcp_check_pending_data(struct tcp *conn, struct net_pkt *pkt,
679 size_t len)
680 {
681 size_t pending_len = 0;
682
683 if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT &&
684 !net_pkt_is_empty(conn->queue_recv_data)) {
685 struct tcphdr *th = th_get(pkt);
686 uint32_t expected_seq = th_seq(th) + len;
687 uint32_t pending_seq;
688
689 pending_seq = tcp_get_seq(conn->queue_recv_data->buffer);
690 if (pending_seq == expected_seq) {
691 pending_len = net_pkt_get_len(conn->queue_recv_data);
692
693 NET_DBG("Found pending data seq %u len %zd",
694 pending_seq, pending_len);
695 net_buf_frag_add(pkt->buffer,
696 conn->queue_recv_data->buffer);
697 conn->queue_recv_data->buffer = NULL;
698
699 k_work_cancel_delayable(&conn->recv_queue_timer);
700 }
701 }
702
703 return pending_len;
704 }
705
tcp_data_get(struct tcp * conn,struct net_pkt * pkt,size_t * len)706 static int tcp_data_get(struct tcp *conn, struct net_pkt *pkt, size_t *len)
707 {
708 int ret = 0;
709
710 if (tcp_recv_cb) {
711 tcp_recv_cb(conn, pkt);
712 goto out;
713 }
714
715 if (conn->context->recv_cb) {
716 struct net_pkt *up = tcp_pkt_clone(pkt);
717
718 if (!up) {
719 ret = -ENOBUFS;
720 goto out;
721 }
722
723 /* If there is any out-of-order pending data, then pass it
724 * to the application here.
725 */
726 *len += tcp_check_pending_data(conn, up, *len);
727
728 net_pkt_cursor_init(up);
729 net_pkt_set_overwrite(up, true);
730
731 net_pkt_skip(up, net_pkt_get_len(up) - *len);
732
733 /* Do not pass data to application with TCP conn
734 * locked as there could be an issue when the app tries
735 * to send the data and the conn is locked. So the recv
736 * data is placed in fifo which is flushed in tcp_in()
737 * after unlocking the conn
738 */
739 k_fifo_put(&conn->recv_data, up);
740 }
741 out:
742 return ret;
743 }
744
tcp_finalize_pkt(struct net_pkt * pkt)745 static int tcp_finalize_pkt(struct net_pkt *pkt)
746 {
747 net_pkt_cursor_init(pkt);
748
749 if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
750 return net_ipv4_finalize(pkt, IPPROTO_TCP);
751 }
752
753 if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
754 return net_ipv6_finalize(pkt, IPPROTO_TCP);
755 }
756
757 return -EINVAL;
758 }
759
tcp_header_add(struct tcp * conn,struct net_pkt * pkt,uint8_t flags,uint32_t seq)760 static int tcp_header_add(struct tcp *conn, struct net_pkt *pkt, uint8_t flags,
761 uint32_t seq)
762 {
763 NET_PKT_DATA_ACCESS_DEFINE(tcp_access, struct tcphdr);
764 struct tcphdr *th;
765
766 th = (struct tcphdr *)net_pkt_get_data(pkt, &tcp_access);
767 if (!th) {
768 return -ENOBUFS;
769 }
770
771 memset(th, 0, sizeof(struct tcphdr));
772
773 UNALIGNED_PUT(conn->src.sin.sin_port, &th->th_sport);
774 UNALIGNED_PUT(conn->dst.sin.sin_port, &th->th_dport);
775 th->th_off = 5;
776
777 if (conn->send_options.mss_found) {
778 th->th_off++;
779 }
780
781 UNALIGNED_PUT(flags, &th->th_flags);
782 UNALIGNED_PUT(htons(conn->recv_win), &th->th_win);
783 UNALIGNED_PUT(htonl(seq), &th->th_seq);
784
785 if (ACK & flags) {
786 UNALIGNED_PUT(htonl(conn->ack), &th->th_ack);
787 }
788
789 return net_pkt_set_data(pkt, &tcp_access);
790 }
791
ip_header_add(struct tcp * conn,struct net_pkt * pkt)792 static int ip_header_add(struct tcp *conn, struct net_pkt *pkt)
793 {
794 if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
795 return net_context_create_ipv4_new(conn->context, pkt,
796 &conn->src.sin.sin_addr,
797 &conn->dst.sin.sin_addr);
798 }
799
800 if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
801 return net_context_create_ipv6_new(conn->context, pkt,
802 &conn->src.sin6.sin6_addr,
803 &conn->dst.sin6.sin6_addr);
804 }
805
806 return -EINVAL;
807 }
808
net_tcp_set_mss_opt(struct tcp * conn,struct net_pkt * pkt)809 static int net_tcp_set_mss_opt(struct tcp *conn, struct net_pkt *pkt)
810 {
811 struct mss_option {
812 uint32_t option;
813 };
814 NET_PKT_DATA_ACCESS_DEFINE(mss_option, struct mss_option);
815 struct mss_option *mss;
816 uint32_t recv_mss;
817
818 mss = net_pkt_get_data(pkt, &mss_option);
819 if (!mss) {
820 return -ENOBUFS;
821 }
822
823 recv_mss = net_tcp_get_recv_mss(conn);
824 recv_mss |= (NET_TCP_MSS_OPT << 24) | (NET_TCP_MSS_SIZE << 16);
825
826 UNALIGNED_PUT(htonl(recv_mss), (uint32_t *)mss);
827
828 return net_pkt_set_data(pkt, &mss_option);
829 }
830
is_destination_local(struct net_pkt * pkt)831 static bool is_destination_local(struct net_pkt *pkt)
832 {
833 if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
834 if (net_ipv4_is_addr_loopback(&NET_IPV4_HDR(pkt)->dst) ||
835 net_ipv4_is_my_addr(&NET_IPV4_HDR(pkt)->dst)) {
836 return true;
837 }
838 }
839
840 if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
841 if (net_ipv6_is_addr_loopback(&NET_IPV6_HDR(pkt)->dst) ||
842 net_ipv6_is_my_addr(&NET_IPV6_HDR(pkt)->dst)) {
843 return true;
844 }
845 }
846
847 return false;
848 }
849
tcp_out_ext(struct tcp * conn,uint8_t flags,struct net_pkt * data,uint32_t seq)850 static int tcp_out_ext(struct tcp *conn, uint8_t flags, struct net_pkt *data,
851 uint32_t seq)
852 {
853 size_t alloc_len = sizeof(struct tcphdr);
854 struct net_pkt *pkt;
855 int ret = 0;
856
857 if (conn->send_options.mss_found) {
858 alloc_len += sizeof(uint32_t);
859 }
860
861 pkt = tcp_pkt_alloc(conn, alloc_len);
862 if (!pkt) {
863 ret = -ENOBUFS;
864 goto out;
865 }
866
867 if (data) {
868 /* Append the data buffer to the pkt */
869 net_pkt_append_buffer(pkt, data->buffer);
870 data->buffer = NULL;
871 }
872
873 ret = ip_header_add(conn, pkt);
874 if (ret < 0) {
875 tcp_pkt_unref(pkt);
876 goto out;
877 }
878
879 ret = tcp_header_add(conn, pkt, flags, seq);
880 if (ret < 0) {
881 tcp_pkt_unref(pkt);
882 goto out;
883 }
884
885 if (conn->send_options.mss_found) {
886 ret = net_tcp_set_mss_opt(conn, pkt);
887 if (ret < 0) {
888 tcp_pkt_unref(pkt);
889 goto out;
890 }
891 }
892
893 ret = tcp_finalize_pkt(pkt);
894 if (ret < 0) {
895 tcp_pkt_unref(pkt);
896 goto out;
897 }
898
899 NET_DBG("%s", log_strdup(tcp_th(pkt)));
900
901 if (tcp_send_cb) {
902 ret = tcp_send_cb(pkt);
903 goto out;
904 }
905
906 sys_slist_append(&conn->send_queue, &pkt->next);
907
908 if (is_destination_local(pkt)) {
909 /* If the destination is local, we have to let the current
910 * thread to finish with any state-machine changes before
911 * sending the packet, or it might lead to state unconsistencies
912 */
913 k_work_schedule_for_queue(&tcp_work_q,
914 &conn->send_timer, K_NO_WAIT);
915 } else if (tcp_send_process_no_lock(conn)) {
916 tcp_conn_unref(conn);
917 }
918 out:
919 return ret;
920 }
921
tcp_out(struct tcp * conn,uint8_t flags)922 static void tcp_out(struct tcp *conn, uint8_t flags)
923 {
924 (void)tcp_out_ext(conn, flags, NULL /* no data */, conn->seq);
925 }
926
tcp_pkt_pull(struct net_pkt * pkt,size_t len)927 static int tcp_pkt_pull(struct net_pkt *pkt, size_t len)
928 {
929 int total = net_pkt_get_len(pkt);
930 int ret = 0;
931
932 if (len > total) {
933 ret = -EINVAL;
934 goto out;
935 }
936
937 net_pkt_cursor_init(pkt);
938 net_pkt_set_overwrite(pkt, true);
939 net_pkt_pull(pkt, len);
940 net_pkt_trim_buffer(pkt);
941 out:
942 return ret;
943 }
944
tcp_pkt_peek(struct net_pkt * to,struct net_pkt * from,size_t pos,size_t len)945 static int tcp_pkt_peek(struct net_pkt *to, struct net_pkt *from, size_t pos,
946 size_t len)
947 {
948 net_pkt_cursor_init(to);
949 net_pkt_cursor_init(from);
950
951 if (pos) {
952 net_pkt_set_overwrite(from, true);
953 net_pkt_skip(from, pos);
954 }
955
956 return net_pkt_copy(to, from, len);
957 }
958
tcp_window_full(struct tcp * conn)959 static bool tcp_window_full(struct tcp *conn)
960 {
961 bool window_full = !(conn->unacked_len < conn->send_win);
962
963 NET_DBG("conn: %p window_full=%hu", conn, window_full);
964
965 return window_full;
966 }
967
tcp_unsent_len(struct tcp * conn)968 static int tcp_unsent_len(struct tcp *conn)
969 {
970 int unsent_len;
971
972 if (conn->unacked_len > conn->send_data_total) {
973 NET_ERR("total=%zu, unacked_len=%d",
974 conn->send_data_total, conn->unacked_len);
975 unsent_len = -ERANGE;
976 goto out;
977 }
978
979 unsent_len = conn->send_data_total - conn->unacked_len;
980 out:
981 NET_DBG("unsent_len=%d", unsent_len);
982
983 return unsent_len;
984 }
985
tcp_send_data(struct tcp * conn)986 static int tcp_send_data(struct tcp *conn)
987 {
988 int ret = 0;
989 int pos, len;
990 struct net_pkt *pkt;
991
992 pos = conn->unacked_len;
993 len = MIN3(conn->send_data_total - conn->unacked_len,
994 conn->send_win - conn->unacked_len,
995 conn_mss(conn));
996 if (len == 0) {
997 NET_DBG("conn: %p no data to send", conn);
998 ret = -ENODATA;
999 goto out;
1000 }
1001
1002 pkt = tcp_pkt_alloc(conn, len);
1003 if (!pkt) {
1004 NET_ERR("conn: %p packet allocation failed, len=%d", conn, len);
1005 ret = -ENOBUFS;
1006 goto out;
1007 }
1008
1009 ret = tcp_pkt_peek(pkt, conn->send_data, pos, len);
1010 if (ret < 0) {
1011 tcp_pkt_unref(pkt);
1012 ret = -ENOBUFS;
1013 goto out;
1014 }
1015
1016 ret = tcp_out_ext(conn, PSH | ACK, pkt, conn->seq + conn->unacked_len);
1017 if (ret == 0) {
1018 conn->unacked_len += len;
1019
1020 if (conn->data_mode == TCP_DATA_MODE_RESEND) {
1021 net_stats_update_tcp_resent(conn->iface, len);
1022 net_stats_update_tcp_seg_rexmit(conn->iface);
1023 } else {
1024 net_stats_update_tcp_sent(conn->iface, len);
1025 net_stats_update_tcp_seg_sent(conn->iface);
1026 }
1027 }
1028
1029 /* The data we want to send, has been moved to the send queue so we
1030 * can unref the head net_pkt. If there was an error, we need to remove
1031 * the packet anyway.
1032 */
1033 tcp_pkt_unref(pkt);
1034
1035 conn_send_data_dump(conn);
1036
1037 out:
1038 return ret;
1039 }
1040
1041 /* Send all queued but unsent data from the send_data packet by packet
1042 * until the receiver's window is full. */
tcp_send_queued_data(struct tcp * conn)1043 static int tcp_send_queued_data(struct tcp *conn)
1044 {
1045 int ret = 0;
1046 bool subscribe = false;
1047
1048 if (conn->data_mode == TCP_DATA_MODE_RESEND) {
1049 goto out;
1050 }
1051
1052 while (tcp_unsent_len(conn) > 0) {
1053
1054 if (tcp_window_full(conn)) {
1055 subscribe = true;
1056 break;
1057 }
1058
1059 ret = tcp_send_data(conn);
1060 if (ret < 0) {
1061 break;
1062 }
1063 }
1064
1065 if (conn->unacked_len) {
1066 subscribe = true;
1067 }
1068
1069 if (k_work_delayable_remaining_get(&conn->send_data_timer)) {
1070 subscribe = false;
1071 }
1072
1073 /* If we have out-of-bufs case, then do not start retransmit timer
1074 * yet. The socket layer will catch this and resend data if needed.
1075 */
1076 if (ret == -ENOBUFS) {
1077 NET_DBG("No bufs, cancelling retransmit timer");
1078 k_work_cancel_delayable(&conn->send_data_timer);
1079 }
1080
1081 if (subscribe) {
1082 conn->send_data_retries = 0;
1083 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_data_timer,
1084 K_MSEC(tcp_rto));
1085 }
1086 out:
1087 return ret;
1088 }
1089
tcp_cleanup_recv_queue(struct k_work * work)1090 static void tcp_cleanup_recv_queue(struct k_work *work)
1091 {
1092 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1093 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, recv_queue_timer);
1094
1095 k_mutex_lock(&conn->lock, K_FOREVER);
1096
1097 NET_DBG("Cleanup recv queue conn %p len %zd seq %u", conn,
1098 net_pkt_get_len(conn->queue_recv_data),
1099 tcp_get_seq(conn->queue_recv_data->buffer));
1100
1101 net_buf_unref(conn->queue_recv_data->buffer);
1102 conn->queue_recv_data->buffer = NULL;
1103
1104 k_mutex_unlock(&conn->lock);
1105 }
1106
tcp_resend_data(struct k_work * work)1107 static void tcp_resend_data(struct k_work *work)
1108 {
1109 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1110 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, send_data_timer);
1111 bool conn_unref = false;
1112 int ret;
1113
1114 k_mutex_lock(&conn->lock, K_FOREVER);
1115
1116 NET_DBG("send_data_retries=%hu", conn->send_data_retries);
1117
1118 if (conn->send_data_retries >= tcp_retries) {
1119 NET_DBG("conn: %p close, data retransmissions exceeded", conn);
1120 conn_unref = true;
1121 goto out;
1122 }
1123
1124 conn->data_mode = TCP_DATA_MODE_RESEND;
1125 conn->unacked_len = 0;
1126
1127 ret = tcp_send_data(conn);
1128 conn->send_data_retries++;
1129 if (ret == 0) {
1130 if (conn->in_close && conn->send_data_total == 0) {
1131 NET_DBG("TCP connection in active close, "
1132 "not disposing yet (waiting %dms)",
1133 FIN_TIMEOUT_MS);
1134 k_work_reschedule_for_queue(
1135 &tcp_work_q, &conn->fin_timer, FIN_TIMEOUT);
1136
1137 conn_state(conn, TCP_FIN_WAIT_1);
1138
1139 ret = tcp_out_ext(conn, FIN | ACK, NULL,
1140 conn->seq + conn->unacked_len);
1141 if (ret == 0) {
1142 conn_seq(conn, + 1);
1143 }
1144
1145 goto out;
1146 }
1147 } else if (ret == -ENODATA) {
1148 conn->data_mode = TCP_DATA_MODE_SEND;
1149 goto out;
1150 }
1151
1152 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_data_timer,
1153 K_MSEC(tcp_rto));
1154
1155 out:
1156 k_mutex_unlock(&conn->lock);
1157
1158 if (conn_unref) {
1159 tcp_conn_unref(conn);
1160 }
1161 }
1162
tcp_timewait_timeout(struct k_work * work)1163 static void tcp_timewait_timeout(struct k_work *work)
1164 {
1165 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1166 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, timewait_timer);
1167
1168 NET_DBG("conn: %p %s", conn, log_strdup(tcp_conn_state(conn, NULL)));
1169
1170 /* Extra unref from net_tcp_put() */
1171 net_context_unref(conn->context);
1172 }
1173
tcp_establish_timeout(struct tcp * conn)1174 static void tcp_establish_timeout(struct tcp *conn)
1175 {
1176 NET_DBG("Did not receive %s in %dms", "ACK", ACK_TIMEOUT_MS);
1177 NET_DBG("conn: %p %s", conn, log_strdup(tcp_conn_state(conn, NULL)));
1178
1179 (void)tcp_conn_unref(conn);
1180 }
1181
tcp_fin_timeout(struct k_work * work)1182 static void tcp_fin_timeout(struct k_work *work)
1183 {
1184 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1185 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, fin_timer);
1186
1187 if (conn->state == TCP_SYN_RECEIVED) {
1188 tcp_establish_timeout(conn);
1189 return;
1190 }
1191
1192 NET_DBG("Did not receive %s in %dms", "FIN", FIN_TIMEOUT_MS);
1193 NET_DBG("conn: %p %s", conn, log_strdup(tcp_conn_state(conn, NULL)));
1194
1195 /* Extra unref from net_tcp_put() */
1196 net_context_unref(conn->context);
1197 }
1198
tcp_conn_ref(struct tcp * conn)1199 static void tcp_conn_ref(struct tcp *conn)
1200 {
1201 int ref_count = atomic_inc(&conn->ref_count) + 1;
1202
1203 NET_DBG("conn: %p, ref_count: %d", conn, ref_count);
1204 }
1205
tcp_conn_alloc(void)1206 static struct tcp *tcp_conn_alloc(void)
1207 {
1208 struct tcp *conn = NULL;
1209 int ret;
1210
1211 ret = k_mem_slab_alloc(&tcp_conns_slab, (void **)&conn, K_NO_WAIT);
1212 if (ret) {
1213 NET_ERR("Cannot allocate slab");
1214 goto out;
1215 }
1216
1217 memset(conn, 0, sizeof(*conn));
1218
1219 if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT) {
1220 conn->queue_recv_data = tcp_rx_pkt_alloc(conn, 0);
1221 if (conn->queue_recv_data == NULL) {
1222 NET_ERR("Cannot allocate %s queue for conn %p", "recv",
1223 conn);
1224 goto fail;
1225 }
1226 }
1227
1228 conn->send_data = tcp_pkt_alloc(conn, 0);
1229 if (conn->send_data == NULL) {
1230 NET_ERR("Cannot allocate %s queue for conn %p", "send", conn);
1231 goto fail;
1232 }
1233
1234 k_mutex_init(&conn->lock);
1235 k_fifo_init(&conn->recv_data);
1236 k_sem_init(&conn->connect_sem, 0, K_SEM_MAX_LIMIT);
1237
1238 conn->in_connect = false;
1239 conn->state = TCP_LISTEN;
1240 conn->recv_win = tcp_window;
1241
1242 /* The ISN value will be set when we get the connection attempt or
1243 * when trying to create a connection.
1244 */
1245 conn->seq = 0U;
1246
1247 sys_slist_init(&conn->send_queue);
1248
1249 k_work_init_delayable(&conn->send_timer, tcp_send_process);
1250 k_work_init_delayable(&conn->timewait_timer, tcp_timewait_timeout);
1251 k_work_init_delayable(&conn->fin_timer, tcp_fin_timeout);
1252 k_work_init_delayable(&conn->send_data_timer, tcp_resend_data);
1253 k_work_init_delayable(&conn->recv_queue_timer, tcp_cleanup_recv_queue);
1254
1255 tcp_conn_ref(conn);
1256
1257 sys_slist_append(&tcp_conns, &conn->next);
1258 out:
1259 NET_DBG("conn: %p", conn);
1260
1261 return conn;
1262
1263 fail:
1264 if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT && conn->queue_recv_data) {
1265 tcp_pkt_unref(conn->queue_recv_data);
1266 conn->queue_recv_data = NULL;
1267 }
1268
1269 k_mem_slab_free(&tcp_conns_slab, (void **)&conn);
1270 return NULL;
1271 }
1272
net_tcp_get(struct net_context * context)1273 int net_tcp_get(struct net_context *context)
1274 {
1275 int ret = 0;
1276 struct tcp *conn;
1277
1278 k_mutex_lock(&tcp_lock, K_FOREVER);
1279
1280 conn = tcp_conn_alloc();
1281 if (conn == NULL) {
1282 ret = -ENOMEM;
1283 goto out;
1284 }
1285
1286 /* Mutually link the net_context and tcp connection */
1287 conn->context = context;
1288 context->tcp = conn;
1289 out:
1290 k_mutex_unlock(&tcp_lock);
1291
1292 return ret;
1293 }
1294
tcp_endpoint_cmp(union tcp_endpoint * ep,struct net_pkt * pkt,enum pkt_addr which)1295 static bool tcp_endpoint_cmp(union tcp_endpoint *ep, struct net_pkt *pkt,
1296 enum pkt_addr which)
1297 {
1298 union tcp_endpoint ep_tmp;
1299
1300 if (tcp_endpoint_set(&ep_tmp, pkt, which) < 0) {
1301 return false;
1302 }
1303
1304 return !memcmp(ep, &ep_tmp, tcp_endpoint_len(ep->sa.sa_family));
1305 }
1306
tcp_conn_cmp(struct tcp * conn,struct net_pkt * pkt)1307 static bool tcp_conn_cmp(struct tcp *conn, struct net_pkt *pkt)
1308 {
1309 return tcp_endpoint_cmp(&conn->src, pkt, TCP_EP_DST) &&
1310 tcp_endpoint_cmp(&conn->dst, pkt, TCP_EP_SRC);
1311 }
1312
tcp_conn_search(struct net_pkt * pkt)1313 static struct tcp *tcp_conn_search(struct net_pkt *pkt)
1314 {
1315 bool found = false;
1316 struct tcp *conn;
1317 struct tcp *tmp;
1318
1319 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&tcp_conns, conn, tmp, next) {
1320
1321 found = tcp_conn_cmp(conn, pkt);
1322 if (found) {
1323 break;
1324 }
1325 }
1326
1327 return found ? conn : NULL;
1328 }
1329
1330 static struct tcp *tcp_conn_new(struct net_pkt *pkt);
1331
tcp_recv(struct net_conn * net_conn,struct net_pkt * pkt,union net_ip_header * ip,union net_proto_header * proto,void * user_data)1332 static enum net_verdict tcp_recv(struct net_conn *net_conn,
1333 struct net_pkt *pkt,
1334 union net_ip_header *ip,
1335 union net_proto_header *proto,
1336 void *user_data)
1337 {
1338 struct tcp *conn;
1339 struct tcphdr *th;
1340
1341 ARG_UNUSED(net_conn);
1342 ARG_UNUSED(proto);
1343
1344 conn = tcp_conn_search(pkt);
1345 if (conn) {
1346 goto in;
1347 }
1348
1349 th = th_get(pkt);
1350
1351 if (th_flags(th) & SYN && !(th_flags(th) & ACK)) {
1352 struct tcp *conn_old = ((struct net_context *)user_data)->tcp;
1353
1354 conn = tcp_conn_new(pkt);
1355 if (!conn) {
1356 NET_ERR("Cannot allocate a new TCP connection");
1357 goto in;
1358 }
1359
1360 net_ipaddr_copy(&conn_old->context->remote, &conn->dst.sa);
1361
1362 conn->accepted_conn = conn_old;
1363 }
1364 in:
1365 if (conn) {
1366 tcp_in(conn, pkt);
1367 }
1368
1369 return NET_DROP;
1370 }
1371
seq_scale(uint32_t seq)1372 static uint32_t seq_scale(uint32_t seq)
1373 {
1374 return seq + (k_ticks_to_ns_floor32(k_uptime_ticks()) >> 6);
1375 }
1376
1377 static uint8_t unique_key[16]; /* MD5 128 bits as described in RFC6528 */
1378
tcpv6_init_isn(struct in6_addr * saddr,struct in6_addr * daddr,uint16_t sport,uint16_t dport)1379 static uint32_t tcpv6_init_isn(struct in6_addr *saddr,
1380 struct in6_addr *daddr,
1381 uint16_t sport,
1382 uint16_t dport)
1383 {
1384 struct {
1385 uint8_t key[sizeof(unique_key)];
1386 struct in6_addr saddr;
1387 struct in6_addr daddr;
1388 uint16_t sport;
1389 uint16_t dport;
1390 } buf = {
1391 .saddr = *(struct in6_addr *)saddr,
1392 .daddr = *(struct in6_addr *)daddr,
1393 .sport = sport,
1394 .dport = dport
1395 };
1396
1397 uint8_t hash[16];
1398 static bool once;
1399
1400 if (!once) {
1401 sys_rand_get(unique_key, sizeof(unique_key));
1402 once = true;
1403 }
1404
1405 memcpy(buf.key, unique_key, sizeof(buf.key));
1406
1407 #if IS_ENABLED(CONFIG_NET_TCP_ISN_RFC6528)
1408 mbedtls_md5_ret((const unsigned char *)&buf, sizeof(buf), hash);
1409 #endif
1410
1411 return seq_scale(UNALIGNED_GET((uint32_t *)&hash[0]));
1412 }
1413
tcpv4_init_isn(struct in_addr * saddr,struct in_addr * daddr,uint16_t sport,uint16_t dport)1414 static uint32_t tcpv4_init_isn(struct in_addr *saddr,
1415 struct in_addr *daddr,
1416 uint16_t sport,
1417 uint16_t dport)
1418 {
1419 struct {
1420 uint8_t key[sizeof(unique_key)];
1421 struct in_addr saddr;
1422 struct in_addr daddr;
1423 uint16_t sport;
1424 uint16_t dport;
1425 } buf = {
1426 .saddr = *(struct in_addr *)saddr,
1427 .daddr = *(struct in_addr *)daddr,
1428 .sport = sport,
1429 .dport = dport
1430 };
1431
1432 uint8_t hash[16];
1433 static bool once;
1434
1435 if (!once) {
1436 sys_rand_get(unique_key, sizeof(unique_key));
1437 once = true;
1438 }
1439
1440 memcpy(buf.key, unique_key, sizeof(unique_key));
1441
1442 #if IS_ENABLED(CONFIG_NET_TCP_ISN_RFC6528)
1443 mbedtls_md5_ret((const unsigned char *)&buf, sizeof(buf), hash);
1444 #endif
1445
1446 return seq_scale(UNALIGNED_GET((uint32_t *)&hash[0]));
1447 }
1448
tcp_init_isn(struct sockaddr * saddr,struct sockaddr * daddr)1449 static uint32_t tcp_init_isn(struct sockaddr *saddr, struct sockaddr *daddr)
1450 {
1451 if (IS_ENABLED(CONFIG_NET_TCP_ISN_RFC6528)) {
1452 if (IS_ENABLED(CONFIG_NET_IPV6) &&
1453 saddr->sa_family == AF_INET6) {
1454 return tcpv6_init_isn(&net_sin6(saddr)->sin6_addr,
1455 &net_sin6(daddr)->sin6_addr,
1456 net_sin6(saddr)->sin6_port,
1457 net_sin6(daddr)->sin6_port);
1458 } else if (IS_ENABLED(CONFIG_NET_IPV4) &&
1459 saddr->sa_family == AF_INET) {
1460 return tcpv4_init_isn(&net_sin(saddr)->sin_addr,
1461 &net_sin(daddr)->sin_addr,
1462 net_sin(saddr)->sin_port,
1463 net_sin(daddr)->sin_port);
1464 }
1465 }
1466
1467 return sys_rand32_get();
1468 }
1469
1470 /* Create a new tcp connection, as a part of it, create and register
1471 * net_context
1472 */
tcp_conn_new(struct net_pkt * pkt)1473 static struct tcp *tcp_conn_new(struct net_pkt *pkt)
1474 {
1475 struct tcp *conn = NULL;
1476 struct net_context *context = NULL;
1477 sa_family_t af = net_pkt_family(pkt);
1478 struct sockaddr local_addr = { 0 };
1479 int ret;
1480
1481 ret = net_context_get(af, SOCK_STREAM, IPPROTO_TCP, &context);
1482 if (ret < 0) {
1483 NET_ERR("net_context_get(): %d", ret);
1484 goto err;
1485 }
1486
1487 conn = context->tcp;
1488 conn->iface = pkt->iface;
1489
1490 net_context_set_family(conn->context, net_pkt_family(pkt));
1491
1492 if (tcp_endpoint_set(&conn->dst, pkt, TCP_EP_SRC) < 0) {
1493 net_context_unref(context);
1494 conn = NULL;
1495 goto err;
1496 }
1497
1498 if (tcp_endpoint_set(&conn->src, pkt, TCP_EP_DST) < 0) {
1499 net_context_unref(context);
1500 conn = NULL;
1501 goto err;
1502 }
1503
1504 NET_DBG("conn: src: %s, dst: %s",
1505 log_strdup(net_sprint_addr(conn->src.sa.sa_family,
1506 (const void *)&conn->src.sin.sin_addr)),
1507 log_strdup(net_sprint_addr(conn->dst.sa.sa_family,
1508 (const void *)&conn->dst.sin.sin_addr)));
1509
1510 memcpy(&context->remote, &conn->dst, sizeof(context->remote));
1511 context->flags |= NET_CONTEXT_REMOTE_ADDR_SET;
1512
1513 net_sin_ptr(&context->local)->sin_family = af;
1514
1515 local_addr.sa_family = net_context_get_family(context);
1516
1517 if (IS_ENABLED(CONFIG_NET_IPV6) &&
1518 net_context_get_family(context) == AF_INET6) {
1519 if (net_sin6_ptr(&context->local)->sin6_addr) {
1520 net_ipaddr_copy(&net_sin6(&local_addr)->sin6_addr,
1521 net_sin6_ptr(&context->local)->sin6_addr);
1522 }
1523 } else if (IS_ENABLED(CONFIG_NET_IPV4) &&
1524 net_context_get_family(context) == AF_INET) {
1525 if (net_sin_ptr(&context->local)->sin_addr) {
1526 net_ipaddr_copy(&net_sin(&local_addr)->sin_addr,
1527 net_sin_ptr(&context->local)->sin_addr);
1528 }
1529 }
1530
1531 ret = net_context_bind(context, &local_addr, sizeof(local_addr));
1532 if (ret < 0) {
1533 NET_DBG("Cannot bind accepted context, connection reset");
1534 net_context_unref(context);
1535 conn = NULL;
1536 goto err;
1537 }
1538
1539 if (!(IS_ENABLED(CONFIG_NET_TEST_PROTOCOL) ||
1540 IS_ENABLED(CONFIG_NET_TEST))) {
1541 conn->seq = tcp_init_isn(&local_addr, &context->remote);
1542 }
1543
1544 NET_DBG("context: local: %s, remote: %s",
1545 log_strdup(net_sprint_addr(
1546 local_addr.sa_family,
1547 (const void *)&net_sin(&local_addr)->sin_addr)),
1548 log_strdup(net_sprint_addr(
1549 context->remote.sa_family,
1550 (const void *)&net_sin(&context->remote)->sin_addr)));
1551
1552 ret = net_conn_register(IPPROTO_TCP, af,
1553 &context->remote, &local_addr,
1554 ntohs(conn->dst.sin.sin_port),/* local port */
1555 ntohs(conn->src.sin.sin_port),/* remote port */
1556 context, tcp_recv, context,
1557 &context->conn_handler);
1558 if (ret < 0) {
1559 NET_ERR("net_conn_register(): %d", ret);
1560 net_context_unref(context);
1561 conn = NULL;
1562 goto err;
1563 }
1564 err:
1565 if (!conn) {
1566 net_stats_update_tcp_seg_conndrop(net_pkt_iface(pkt));
1567 }
1568
1569 return conn;
1570 }
1571
tcp_validate_seq(struct tcp * conn,struct tcphdr * hdr)1572 static bool tcp_validate_seq(struct tcp *conn, struct tcphdr *hdr)
1573 {
1574 return (net_tcp_seq_cmp(th_seq(hdr), conn->ack) >= 0) &&
1575 (net_tcp_seq_cmp(th_seq(hdr), conn->ack + conn->recv_win) < 0);
1576 }
1577
print_seq_list(struct net_buf * buf)1578 static void print_seq_list(struct net_buf *buf)
1579 {
1580 struct net_buf *tmp = buf;
1581 uint32_t seq;
1582
1583 while (tmp) {
1584 seq = tcp_get_seq(tmp);
1585
1586 NET_DBG("buf %p seq %u len %d", tmp, seq, tmp->len);
1587
1588 tmp = tmp->frags;
1589 }
1590 }
1591
tcp_queue_recv_data(struct tcp * conn,struct net_pkt * pkt,size_t len,uint32_t seq)1592 static void tcp_queue_recv_data(struct tcp *conn, struct net_pkt *pkt,
1593 size_t len, uint32_t seq)
1594 {
1595 uint32_t seq_start = seq;
1596 bool inserted = false;
1597 struct net_buf *tmp;
1598
1599 NET_DBG("conn: %p len %zd seq %u ack %u", conn, len, seq, conn->ack);
1600
1601 tmp = pkt->buffer;
1602
1603 tcp_set_seq(tmp, seq);
1604 seq += tmp->len;
1605 tmp = tmp->frags;
1606
1607 while (tmp) {
1608 tcp_set_seq(tmp, seq);
1609 seq += tmp->len;
1610 tmp = tmp->frags;
1611 }
1612
1613 if (IS_ENABLED(CONFIG_NET_TCP_LOG_LEVEL_DBG)) {
1614 NET_DBG("Queuing data: conn %p", conn);
1615 print_seq_list(pkt->buffer);
1616 }
1617
1618 if (!net_pkt_is_empty(conn->queue_recv_data)) {
1619 /* Place the data to correct place in the list. If the data
1620 * would not be sequential, then drop this packet.
1621 */
1622 uint32_t pending_seq;
1623
1624 pending_seq = tcp_get_seq(conn->queue_recv_data->buffer);
1625 if (pending_seq == seq) {
1626 /* Put new data before the pending data */
1627 net_buf_frag_add(pkt->buffer,
1628 conn->queue_recv_data->buffer);
1629 conn->queue_recv_data->buffer = pkt->buffer;
1630 inserted = true;
1631 } else {
1632 struct net_buf *last;
1633
1634 last = net_buf_frag_last(conn->queue_recv_data->buffer);
1635 pending_seq = tcp_get_seq(last);
1636
1637 if ((pending_seq + last->len) == seq_start) {
1638 /* Put new data after pending data */
1639 last->frags = pkt->buffer;
1640 inserted = true;
1641 }
1642 }
1643
1644 if (IS_ENABLED(CONFIG_NET_TCP_LOG_LEVEL_DBG)) {
1645 if (inserted) {
1646 NET_DBG("All pending data: conn %p", conn);
1647 print_seq_list(conn->queue_recv_data->buffer);
1648 } else {
1649 NET_DBG("Cannot add new data to queue");
1650 }
1651 }
1652 } else {
1653 net_pkt_append_buffer(conn->queue_recv_data, pkt->buffer);
1654 inserted = true;
1655 }
1656
1657 if (inserted) {
1658 /* We need to keep the received data but free the pkt */
1659 pkt->buffer = NULL;
1660
1661 if (!k_work_delayable_is_pending(&conn->recv_queue_timer)) {
1662 k_work_reschedule_for_queue(
1663 &tcp_work_q, &conn->recv_queue_timer,
1664 K_MSEC(CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT));
1665 }
1666 }
1667 }
1668
tcp_data_received(struct tcp * conn,struct net_pkt * pkt,size_t * len)1669 static bool tcp_data_received(struct tcp *conn, struct net_pkt *pkt,
1670 size_t *len)
1671 {
1672 if (tcp_data_get(conn, pkt, len) < 0) {
1673 return false;
1674 }
1675
1676 net_stats_update_tcp_seg_recv(conn->iface);
1677 conn_ack(conn, *len);
1678 tcp_out(conn, ACK);
1679
1680 return true;
1681 }
1682
tcp_out_of_order_data(struct tcp * conn,struct net_pkt * pkt,size_t data_len,uint32_t seq)1683 static void tcp_out_of_order_data(struct tcp *conn, struct net_pkt *pkt,
1684 size_t data_len, uint32_t seq)
1685 {
1686 size_t headers_len;
1687
1688 headers_len = net_pkt_get_len(pkt) - data_len;
1689
1690 /* Get rid of protocol headers from the data */
1691 if (tcp_pkt_pull(pkt, headers_len) < 0) {
1692 return;
1693 }
1694
1695 /* We received out-of-order data. Try to queue it.
1696 */
1697 tcp_queue_recv_data(conn, pkt, data_len, seq);
1698 }
1699
1700 /* TCP state machine, everything happens here */
tcp_in(struct tcp * conn,struct net_pkt * pkt)1701 static void tcp_in(struct tcp *conn, struct net_pkt *pkt)
1702 {
1703 struct tcphdr *th = pkt ? th_get(pkt) : NULL;
1704 uint8_t next = 0, fl = 0;
1705 bool do_close = false;
1706 bool connection_ok = false;
1707 size_t tcp_options_len = th ? (th_off(th) - 5) * 4 : 0;
1708 struct net_conn *conn_handler = NULL;
1709 struct net_pkt *recv_pkt;
1710 void *recv_user_data;
1711 struct k_fifo *recv_data_fifo;
1712 size_t len;
1713 int ret;
1714
1715 if (th) {
1716 /* Currently we ignore ECN and CWR flags */
1717 fl = th_flags(th) & ~(ECN | CWR);
1718 }
1719
1720 k_mutex_lock(&conn->lock, K_FOREVER);
1721
1722 NET_DBG("%s", log_strdup(tcp_conn_state(conn, pkt)));
1723
1724 if (th && th_off(th) < 5) {
1725 tcp_out(conn, RST);
1726 conn_state(conn, TCP_CLOSED);
1727 goto next_state;
1728 }
1729
1730 if (FL(&fl, &, RST)) {
1731 /* We only accept RST packet that has valid seq field. */
1732 if (!tcp_validate_seq(conn, th)) {
1733 net_stats_update_tcp_seg_rsterr(net_pkt_iface(pkt));
1734 k_mutex_unlock(&conn->lock);
1735 return;
1736 }
1737
1738 net_stats_update_tcp_seg_rst(net_pkt_iface(pkt));
1739 conn_state(conn, TCP_CLOSED);
1740 goto next_state;
1741 }
1742
1743 if (tcp_options_len && !tcp_options_check(&conn->recv_options, pkt,
1744 tcp_options_len)) {
1745 NET_DBG("DROP: Invalid TCP option list");
1746 tcp_out(conn, RST);
1747 conn_state(conn, TCP_CLOSED);
1748 goto next_state;
1749 }
1750
1751 if (th) {
1752 size_t max_win;
1753
1754 conn->send_win = ntohs(th_win(th));
1755
1756 #if defined(CONFIG_NET_TCP_MAX_SEND_WINDOW_SIZE)
1757 if (CONFIG_NET_TCP_MAX_SEND_WINDOW_SIZE) {
1758 max_win = CONFIG_NET_TCP_MAX_SEND_WINDOW_SIZE;
1759 } else
1760 #endif
1761 {
1762 /* Adjust the window so that we do not run out of bufs
1763 * while waiting acks.
1764 */
1765 max_win = (CONFIG_NET_BUF_TX_COUNT *
1766 CONFIG_NET_BUF_DATA_SIZE) / 3;
1767 }
1768
1769 max_win = MAX(max_win, NET_IPV6_MTU);
1770 if ((size_t)conn->send_win > max_win) {
1771 NET_DBG("Lowering send window from %zd to %zd",
1772 (size_t)conn->send_win, max_win);
1773
1774 conn->send_win = max_win;
1775 }
1776 }
1777
1778 next_state:
1779 len = pkt ? tcp_data_len(pkt) : 0;
1780
1781 switch (conn->state) {
1782 case TCP_LISTEN:
1783 if (FL(&fl, ==, SYN)) {
1784 /* Make sure our MSS is also sent in the ACK */
1785 conn->send_options.mss_found = true;
1786 conn_ack(conn, th_seq(th) + 1); /* capture peer's isn */
1787 tcp_out(conn, SYN | ACK);
1788 conn->send_options.mss_found = false;
1789 conn_seq(conn, + 1);
1790 next = TCP_SYN_RECEIVED;
1791
1792 /* Close the connection if we do not receive ACK on time.
1793 */
1794 k_work_reschedule_for_queue(&tcp_work_q,
1795 &conn->establish_timer,
1796 ACK_TIMEOUT);
1797 } else {
1798 conn->send_options.mss_found = true;
1799 tcp_out(conn, SYN);
1800 conn->send_options.mss_found = false;
1801 conn_seq(conn, + 1);
1802 next = TCP_SYN_SENT;
1803 }
1804 break;
1805 case TCP_SYN_RECEIVED:
1806 if (FL(&fl, &, ACK, th_ack(th) == conn->seq &&
1807 th_seq(th) == conn->ack)) {
1808 k_work_cancel_delayable(&conn->establish_timer);
1809 tcp_send_timer_cancel(conn);
1810 next = TCP_ESTABLISHED;
1811 net_context_set_state(conn->context,
1812 NET_CONTEXT_CONNECTED);
1813
1814 if (conn->accepted_conn) {
1815 if (conn->accepted_conn->accept_cb) {
1816 conn->accepted_conn->accept_cb(
1817 conn->context,
1818 &conn->accepted_conn->context->remote,
1819 sizeof(struct sockaddr), 0,
1820 conn->accepted_conn->context);
1821 }
1822
1823 /* Make sure the accept_cb is only called once.
1824 */
1825 conn->accepted_conn = NULL;
1826 }
1827
1828 if (len) {
1829 if (tcp_data_get(conn, pkt, &len) < 0) {
1830 break;
1831 }
1832 conn_ack(conn, + len);
1833 tcp_out(conn, ACK);
1834 }
1835 }
1836 break;
1837 case TCP_SYN_SENT:
1838 /* if we are in SYN SENT and receive only a SYN without an
1839 * ACK , shouldn't we go to SYN RECEIVED state? See Figure
1840 * 6 of RFC 793
1841 */
1842 if (FL(&fl, &, SYN | ACK, th && th_ack(th) == conn->seq)) {
1843 tcp_send_timer_cancel(conn);
1844 conn_ack(conn, th_seq(th) + 1);
1845 if (len) {
1846 if (tcp_data_get(conn, pkt, &len) < 0) {
1847 break;
1848 }
1849 conn_ack(conn, + len);
1850 }
1851
1852 next = TCP_ESTABLISHED;
1853 net_context_set_state(conn->context,
1854 NET_CONTEXT_CONNECTED);
1855 tcp_out(conn, ACK);
1856
1857 /* The connection semaphore is released *after*
1858 * we have changed the connection state. This way
1859 * the application can send data and it is queued
1860 * properly even if this thread is running in lower
1861 * priority.
1862 */
1863 connection_ok = true;
1864 }
1865 break;
1866 case TCP_ESTABLISHED:
1867 /* full-close */
1868 if (th && FL(&fl, ==, (FIN | ACK), th_seq(th) == conn->ack)) {
1869 if (net_tcp_seq_cmp(th_ack(th), conn->seq) > 0) {
1870 uint32_t len_acked = th_ack(th) - conn->seq;
1871
1872 conn_seq(conn, + len_acked);
1873 }
1874
1875 conn_ack(conn, + 1);
1876 tcp_out(conn, FIN | ACK);
1877 next = TCP_LAST_ACK;
1878 break;
1879 } else if (th && FL(&fl, ==, FIN, th_seq(th) == conn->ack)) {
1880 conn_ack(conn, + 1);
1881 tcp_out(conn, ACK);
1882 next = TCP_CLOSE_WAIT;
1883 break;
1884 } else if (th && FL(&fl, ==, (FIN | ACK | PSH),
1885 th_seq(th) == conn->ack)) {
1886 if (len) {
1887 if (tcp_data_get(conn, pkt, &len) < 0) {
1888 break;
1889 }
1890 }
1891
1892 conn_ack(conn, + len + 1);
1893 tcp_out(conn, FIN | ACK);
1894 next = TCP_LAST_ACK;
1895 break;
1896 }
1897
1898 if (th && net_tcp_seq_cmp(th_ack(th), conn->seq) > 0) {
1899 uint32_t len_acked = th_ack(th) - conn->seq;
1900
1901 NET_DBG("conn: %p len_acked=%u", conn, len_acked);
1902
1903 if ((conn->send_data_total < len_acked) ||
1904 (tcp_pkt_pull(conn->send_data,
1905 len_acked) < 0)) {
1906 NET_ERR("conn: %p, Invalid len_acked=%u "
1907 "(total=%zu)", conn, len_acked,
1908 conn->send_data_total);
1909 net_stats_update_tcp_seg_drop(conn->iface);
1910 tcp_out(conn, RST);
1911 conn_state(conn, TCP_CLOSED);
1912 break;
1913 }
1914
1915 conn->send_data_total -= len_acked;
1916 if (conn->unacked_len < len_acked) {
1917 conn->unacked_len = 0;
1918 } else {
1919 conn->unacked_len -= len_acked;
1920 }
1921 conn_seq(conn, + len_acked);
1922 net_stats_update_tcp_seg_recv(conn->iface);
1923
1924 conn_send_data_dump(conn);
1925
1926 if (!k_work_delayable_remaining_get(
1927 &conn->send_data_timer)) {
1928 NET_DBG("conn: %p, Missing a subscription "
1929 "of the send_data queue timer", conn);
1930 break;
1931 }
1932 conn->send_data_retries = 0;
1933 k_work_cancel_delayable(&conn->send_data_timer);
1934 if (conn->data_mode == TCP_DATA_MODE_RESEND) {
1935 conn->unacked_len = 0;
1936 }
1937 conn->data_mode = TCP_DATA_MODE_SEND;
1938
1939 /* We are closing the connection, send a FIN to peer */
1940 if (conn->in_close && conn->send_data_total == 0) {
1941 tcp_send_timer_cancel(conn);
1942 next = TCP_FIN_WAIT_1;
1943
1944 tcp_out(conn, FIN | ACK);
1945 conn_seq(conn, + 1);
1946 break;
1947 }
1948
1949 ret = tcp_send_queued_data(conn);
1950 if (ret < 0 && ret != -ENOBUFS) {
1951 tcp_out(conn, RST);
1952 conn_state(conn, TCP_CLOSED);
1953 break;
1954 }
1955 }
1956
1957 if (th && len) {
1958 if (th_seq(th) == conn->ack) {
1959 if (!tcp_data_received(conn, pkt, &len)) {
1960 break;
1961 }
1962 } else if (net_tcp_seq_greater(conn->ack, th_seq(th))) {
1963 tcp_out(conn, ACK); /* peer has resent */
1964
1965 net_stats_update_tcp_seg_ackerr(conn->iface);
1966 } else if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT) {
1967 tcp_out_of_order_data(conn, pkt, len,
1968 th_seq(th));
1969 }
1970 }
1971 break;
1972 case TCP_CLOSE_WAIT:
1973 tcp_out(conn, FIN);
1974 next = TCP_LAST_ACK;
1975 break;
1976 case TCP_LAST_ACK:
1977 if (th && FL(&fl, ==, ACK, th_seq(th) == conn->ack)) {
1978 tcp_send_timer_cancel(conn);
1979 next = TCP_CLOSED;
1980 }
1981 break;
1982 case TCP_CLOSED:
1983 do_close = true;
1984 break;
1985 case TCP_FIN_WAIT_1:
1986 /* Acknowledge but drop any data */
1987 conn_ack(conn, + len);
1988
1989 if (th && FL(&fl, ==, (FIN | ACK), th_seq(th) == conn->ack)) {
1990 tcp_send_timer_cancel(conn);
1991 conn_ack(conn, + 1);
1992 tcp_out(conn, ACK);
1993 next = TCP_TIME_WAIT;
1994 } else if (th && FL(&fl, ==, FIN, th_seq(th) == conn->ack)) {
1995 tcp_send_timer_cancel(conn);
1996 conn_ack(conn, + 1);
1997 tcp_out(conn, ACK);
1998 next = TCP_CLOSING;
1999 } else if (th && FL(&fl, ==, ACK, th_seq(th) == conn->ack)) {
2000 tcp_send_timer_cancel(conn);
2001 next = TCP_FIN_WAIT_2;
2002 }
2003 break;
2004 case TCP_FIN_WAIT_2:
2005 if (th && (FL(&fl, ==, FIN, th_seq(th) == conn->ack) ||
2006 FL(&fl, ==, FIN | ACK, th_seq(th) == conn->ack) ||
2007 FL(&fl, ==, FIN | PSH | ACK, th_seq(th) == conn->ack))) {
2008 /* Received FIN on FIN_WAIT_2, so cancel the timer */
2009 k_work_cancel_delayable(&conn->fin_timer);
2010
2011 conn_ack(conn, + 1);
2012 tcp_out(conn, ACK);
2013 next = TCP_TIME_WAIT;
2014 }
2015 break;
2016 case TCP_CLOSING:
2017 if (th && FL(&fl, ==, ACK, th_seq(th) == conn->ack)) {
2018 tcp_send_timer_cancel(conn);
2019 next = TCP_TIME_WAIT;
2020 }
2021 break;
2022 case TCP_TIME_WAIT:
2023 k_work_reschedule_for_queue(
2024 &tcp_work_q, &conn->timewait_timer,
2025 K_MSEC(CONFIG_NET_TCP_TIME_WAIT_DELAY));
2026 break;
2027 default:
2028 NET_ASSERT(false, "%s is unimplemented",
2029 tcp_state_to_str(conn->state, true));
2030 }
2031
2032 if (next) {
2033 pkt = NULL;
2034 th = NULL;
2035 conn_state(conn, next);
2036 next = 0;
2037
2038 if (connection_ok) {
2039 k_sem_give(&conn->connect_sem);
2040 }
2041
2042 goto next_state;
2043 }
2044
2045 /* If the conn->context is not set, then the connection was already
2046 * closed.
2047 */
2048 if (conn->context) {
2049 conn_handler = (struct net_conn *)conn->context->conn_handler;
2050 }
2051
2052 recv_user_data = conn->recv_user_data;
2053 recv_data_fifo = &conn->recv_data;
2054
2055 k_mutex_unlock(&conn->lock);
2056
2057 /* Pass all the received data stored in recv fifo to the application.
2058 * This is done like this so that we do not have any connection lock
2059 * held.
2060 */
2061 while (conn_handler && atomic_get(&conn->ref_count) > 0 &&
2062 (recv_pkt = k_fifo_get(recv_data_fifo, K_NO_WAIT)) != NULL) {
2063 if (net_context_packet_received(conn_handler, recv_pkt, NULL,
2064 NULL, recv_user_data) ==
2065 NET_DROP) {
2066 /* Application is no longer there, unref the pkt */
2067 tcp_pkt_unref(recv_pkt);
2068 }
2069 }
2070
2071 /* We must not try to unref the connection while having a connection
2072 * lock because the unref will try to acquire net_context lock and the
2073 * application might have that lock held already, and that might lead
2074 * to a deadlock.
2075 */
2076 if (do_close) {
2077 tcp_conn_unref(conn);
2078 }
2079 }
2080
2081 /* Active connection close: send FIN and go to FIN_WAIT_1 state */
net_tcp_put(struct net_context * context)2082 int net_tcp_put(struct net_context *context)
2083 {
2084 struct tcp *conn = context->tcp;
2085
2086 if (!conn) {
2087 return -ENOENT;
2088 }
2089
2090 k_mutex_lock(&conn->lock, K_FOREVER);
2091
2092 NET_DBG("%s", conn ? log_strdup(tcp_conn_state(conn, NULL)) : "");
2093 NET_DBG("context %p %s", context,
2094 log_strdup(({ const char *state = net_context_state(context);
2095 state ? state : "<unknown>"; })));
2096
2097 if (conn && conn->state == TCP_ESTABLISHED) {
2098 /* Send all remaining data if possible. */
2099 if (conn->send_data_total > 0) {
2100 NET_DBG("conn %p pending %zu bytes", conn,
2101 conn->send_data_total);
2102 conn->in_close = true;
2103
2104 /* How long to wait until all the data has been sent?
2105 */
2106 k_work_reschedule_for_queue(&tcp_work_q,
2107 &conn->send_data_timer,
2108 K_MSEC(tcp_rto));
2109 } else {
2110 int ret;
2111
2112 NET_DBG("TCP connection in active close, not "
2113 "disposing yet (waiting %dms)", FIN_TIMEOUT_MS);
2114 k_work_reschedule_for_queue(
2115 &tcp_work_q, &conn->fin_timer, FIN_TIMEOUT);
2116
2117 ret = tcp_out_ext(conn, FIN | ACK, NULL,
2118 conn->seq + conn->unacked_len);
2119 if (ret == 0) {
2120 conn_seq(conn, + 1);
2121 }
2122
2123 conn_state(conn, TCP_FIN_WAIT_1);
2124 }
2125
2126 /* Make sure we do not delete the connection yet until we have
2127 * sent the final ACK.
2128 */
2129 net_context_ref(context);
2130 }
2131
2132 k_mutex_unlock(&conn->lock);
2133
2134 net_context_unref(context);
2135
2136 return 0;
2137 }
2138
net_tcp_listen(struct net_context * context)2139 int net_tcp_listen(struct net_context *context)
2140 {
2141 /* when created, tcp connections are in state TCP_LISTEN */
2142 net_context_set_state(context, NET_CONTEXT_LISTENING);
2143
2144 return 0;
2145 }
2146
net_tcp_update_recv_wnd(struct net_context * context,int32_t delta)2147 int net_tcp_update_recv_wnd(struct net_context *context, int32_t delta)
2148 {
2149 ARG_UNUSED(context);
2150 ARG_UNUSED(delta);
2151
2152 return -EPROTONOSUPPORT;
2153 }
2154
2155 /* net_context queues the outgoing data for the TCP connection */
net_tcp_queue_data(struct net_context * context,struct net_pkt * pkt)2156 int net_tcp_queue_data(struct net_context *context, struct net_pkt *pkt)
2157 {
2158 struct tcp *conn = context->tcp;
2159 struct net_buf *orig_buf = NULL;
2160 int ret = 0;
2161 size_t len;
2162
2163 if (!conn || conn->state != TCP_ESTABLISHED) {
2164 return -ENOTCONN;
2165 }
2166
2167 k_mutex_lock(&conn->lock, K_FOREVER);
2168
2169 if (tcp_window_full(conn)) {
2170 /* Trigger resend if the timer is not active */
2171 /* TODO: use k_work_delayable for send_data_timer so we don't
2172 * have to directly access the internals of the legacy object.
2173 *
2174 * NOTE: It is not permitted to access any fields of k_work or
2175 * k_work_delayable directly. This replacement does so, but
2176 * only as a temporary workaround until the legacy
2177 * k_delayed_work structure is replaced with k_work_delayable;
2178 * at that point k_work_schedule() can be invoked to cause the
2179 * work to be scheduled if it is not already scheduled.
2180 *
2181 * This solution diverges from the original, which would
2182 * invoke the retransmit function directly here. Because that
2183 * function is given a k_work pointer, again this cannot be
2184 * done without accessing the internal data of the
2185 * k_work_delayable structure.
2186 *
2187 * The original inline retransmission could be supported by
2188 * refactoring the work_handler to delegate to a function that
2189 * takes conn directly, rather than the work item in which
2190 * conn is embedded, and calling that function directly here
2191 * and in the work handler.
2192 */
2193 (void)k_work_schedule_for_queue(
2194 &tcp_work_q, &conn->send_data_timer, K_NO_WAIT);
2195
2196 ret = -EAGAIN;
2197 goto out;
2198 }
2199
2200 len = net_pkt_get_len(pkt);
2201
2202 if (conn->send_data->buffer) {
2203 orig_buf = net_buf_frag_last(conn->send_data->buffer);
2204 }
2205
2206 net_pkt_append_buffer(conn->send_data, pkt->buffer);
2207 conn->send_data_total += len;
2208 NET_DBG("conn: %p Queued %zu bytes (total %zu)", conn, len,
2209 conn->send_data_total);
2210 pkt->buffer = NULL;
2211
2212 ret = tcp_send_queued_data(conn);
2213 if (ret < 0 && ret != -ENOBUFS) {
2214 tcp_conn_unref(conn);
2215 goto out;
2216 }
2217
2218 if (ret == -ENOBUFS) {
2219 /* Restore the original data so that we do not resend the pkt
2220 * data multiple times.
2221 */
2222 conn->send_data_total -= len;
2223
2224 if (orig_buf) {
2225 pkt->buffer = orig_buf->frags;
2226 orig_buf->frags = NULL;
2227 } else {
2228 pkt->buffer = conn->send_data->buffer;
2229 conn->send_data->buffer = NULL;
2230 }
2231 } else {
2232 /* We should not free the pkt if there was an error. It will be
2233 * freed in net_context.c:context_sendto()
2234 */
2235 tcp_pkt_unref(pkt);
2236 }
2237 out:
2238 k_mutex_unlock(&conn->lock);
2239
2240 return ret;
2241 }
2242
2243 /* net context is about to send out queued data - inform caller only */
net_tcp_send_data(struct net_context * context,net_context_send_cb_t cb,void * user_data)2244 int net_tcp_send_data(struct net_context *context, net_context_send_cb_t cb,
2245 void *user_data)
2246 {
2247 if (cb) {
2248 cb(context, 0, user_data);
2249 }
2250
2251 return 0;
2252 }
2253
2254 /* When connect() is called on a TCP socket, register the socket for incoming
2255 * traffic with net context and give the TCP packet receiving function, which
2256 * in turn will call tcp_in() to deliver the TCP packet to the stack
2257 */
net_tcp_connect(struct net_context * context,const struct sockaddr * remote_addr,struct sockaddr * local_addr,uint16_t remote_port,uint16_t local_port,k_timeout_t timeout,net_context_connect_cb_t cb,void * user_data)2258 int net_tcp_connect(struct net_context *context,
2259 const struct sockaddr *remote_addr,
2260 struct sockaddr *local_addr,
2261 uint16_t remote_port, uint16_t local_port,
2262 k_timeout_t timeout, net_context_connect_cb_t cb,
2263 void *user_data)
2264 {
2265 struct tcp *conn;
2266 int ret = 0;
2267
2268 NET_DBG("context: %p, local: %s, remote: %s", context,
2269 log_strdup(net_sprint_addr(
2270 local_addr->sa_family,
2271 (const void *)&net_sin(local_addr)->sin_addr)),
2272 log_strdup(net_sprint_addr(
2273 remote_addr->sa_family,
2274 (const void *)&net_sin(remote_addr)->sin_addr)));
2275
2276 conn = context->tcp;
2277 conn->iface = net_context_get_iface(context);
2278
2279 switch (net_context_get_family(context)) {
2280 const struct in_addr *ip4;
2281 const struct in6_addr *ip6;
2282
2283 case AF_INET:
2284 memset(&conn->src, 0, sizeof(struct sockaddr_in));
2285 memset(&conn->dst, 0, sizeof(struct sockaddr_in));
2286
2287 conn->src.sa.sa_family = AF_INET;
2288 conn->dst.sa.sa_family = AF_INET;
2289
2290 conn->dst.sin.sin_port = remote_port;
2291 conn->src.sin.sin_port = local_port;
2292
2293 /* we have to select the source address here as
2294 * net_context_create_ipv4_new() is not called in the packet
2295 * output chain
2296 */
2297 ip4 = net_if_ipv4_select_src_addr(
2298 net_context_get_iface(context),
2299 &net_sin(remote_addr)->sin_addr);
2300 conn->src.sin.sin_addr = *ip4;
2301 net_ipaddr_copy(&conn->dst.sin.sin_addr,
2302 &net_sin(remote_addr)->sin_addr);
2303 break;
2304
2305 case AF_INET6:
2306 memset(&conn->src, 0, sizeof(struct sockaddr_in6));
2307 memset(&conn->dst, 0, sizeof(struct sockaddr_in6));
2308
2309 conn->src.sin6.sin6_family = AF_INET6;
2310 conn->dst.sin6.sin6_family = AF_INET6;
2311
2312 conn->dst.sin6.sin6_port = remote_port;
2313 conn->src.sin6.sin6_port = local_port;
2314
2315 ip6 = net_if_ipv6_select_src_addr(
2316 net_context_get_iface(context),
2317 &net_sin6(remote_addr)->sin6_addr);
2318 conn->src.sin6.sin6_addr = *ip6;
2319 net_ipaddr_copy(&conn->dst.sin6.sin6_addr,
2320 &net_sin6(remote_addr)->sin6_addr);
2321 break;
2322
2323 default:
2324 ret = -EPROTONOSUPPORT;
2325 }
2326
2327 if (!(IS_ENABLED(CONFIG_NET_TEST_PROTOCOL) ||
2328 IS_ENABLED(CONFIG_NET_TEST))) {
2329 conn->seq = tcp_init_isn(&conn->src.sa, &conn->dst.sa);
2330 }
2331
2332 NET_DBG("conn: %p src: %s, dst: %s", conn,
2333 log_strdup(net_sprint_addr(conn->src.sa.sa_family,
2334 (const void *)&conn->src.sin.sin_addr)),
2335 log_strdup(net_sprint_addr(conn->dst.sa.sa_family,
2336 (const void *)&conn->dst.sin.sin_addr)));
2337
2338 net_context_set_state(context, NET_CONTEXT_CONNECTING);
2339
2340 ret = net_conn_register(net_context_get_ip_proto(context),
2341 net_context_get_family(context),
2342 remote_addr, local_addr,
2343 ntohs(remote_port), ntohs(local_port),
2344 context, tcp_recv, context,
2345 &context->conn_handler);
2346 if (ret < 0) {
2347 goto out;
2348 }
2349
2350 /* Input of a (nonexistent) packet with no flags set will cause
2351 * a TCP connection to be established
2352 */
2353 tcp_in(conn, NULL);
2354
2355 if (!IS_ENABLED(CONFIG_NET_TEST_PROTOCOL)) {
2356 conn->in_connect = true;
2357
2358 if (k_sem_take(&conn->connect_sem, timeout) != 0 &&
2359 conn->state != TCP_ESTABLISHED) {
2360 conn->in_connect = false;
2361 tcp_conn_unref(conn);
2362 ret = -ETIMEDOUT;
2363 goto out;
2364 }
2365 conn->in_connect = false;
2366 }
2367 out:
2368 NET_DBG("conn: %p, ret=%d", conn, ret);
2369
2370 return ret;
2371 }
2372
net_tcp_accept(struct net_context * context,net_tcp_accept_cb_t cb,void * user_data)2373 int net_tcp_accept(struct net_context *context, net_tcp_accept_cb_t cb,
2374 void *user_data)
2375 {
2376 struct tcp *conn = context->tcp;
2377 struct sockaddr local_addr = { };
2378 uint16_t local_port, remote_port;
2379
2380 if (!conn) {
2381 return -EINVAL;
2382 }
2383
2384 NET_DBG("context: %p, tcp: %p, cb: %p", context, conn, cb);
2385
2386 if (conn->state != TCP_LISTEN) {
2387 return -EINVAL;
2388 }
2389
2390 conn->accept_cb = cb;
2391 local_addr.sa_family = net_context_get_family(context);
2392
2393 switch (local_addr.sa_family) {
2394 struct sockaddr_in *in;
2395 struct sockaddr_in6 *in6;
2396
2397 case AF_INET:
2398 in = (struct sockaddr_in *)&local_addr;
2399
2400 if (net_sin_ptr(&context->local)->sin_addr) {
2401 net_ipaddr_copy(&in->sin_addr,
2402 net_sin_ptr(&context->local)->sin_addr);
2403 }
2404
2405 in->sin_port =
2406 net_sin((struct sockaddr *)&context->local)->sin_port;
2407 local_port = ntohs(in->sin_port);
2408 remote_port = ntohs(net_sin(&context->remote)->sin_port);
2409
2410 break;
2411
2412 case AF_INET6:
2413 in6 = (struct sockaddr_in6 *)&local_addr;
2414
2415 if (net_sin6_ptr(&context->local)->sin6_addr) {
2416 net_ipaddr_copy(&in6->sin6_addr,
2417 net_sin6_ptr(&context->local)->sin6_addr);
2418 }
2419
2420 in6->sin6_port =
2421 net_sin6((struct sockaddr *)&context->local)->sin6_port;
2422 local_port = ntohs(in6->sin6_port);
2423 remote_port = ntohs(net_sin6(&context->remote)->sin6_port);
2424
2425 break;
2426
2427 default:
2428 return -EINVAL;
2429 }
2430
2431 context->user_data = user_data;
2432
2433 /* Remove the temporary connection handler and register
2434 * a proper now as we have an established connection.
2435 */
2436 net_conn_unregister(context->conn_handler);
2437
2438 return net_conn_register(net_context_get_ip_proto(context),
2439 local_addr.sa_family,
2440 context->flags & NET_CONTEXT_REMOTE_ADDR_SET ?
2441 &context->remote : NULL,
2442 &local_addr,
2443 remote_port, local_port,
2444 context, tcp_recv, context,
2445 &context->conn_handler);
2446 }
2447
net_tcp_recv(struct net_context * context,net_context_recv_cb_t cb,void * user_data)2448 int net_tcp_recv(struct net_context *context, net_context_recv_cb_t cb,
2449 void *user_data)
2450 {
2451 struct tcp *conn = context->tcp;
2452
2453 NET_DBG("context: %p, cb: %p, user_data: %p", context, cb, user_data);
2454
2455 context->recv_cb = cb;
2456
2457 if (conn) {
2458 conn->recv_user_data = user_data;
2459 }
2460
2461 return 0;
2462 }
2463
net_tcp_finalize(struct net_pkt * pkt)2464 int net_tcp_finalize(struct net_pkt *pkt)
2465 {
2466 NET_PKT_DATA_ACCESS_DEFINE(tcp_access, struct net_tcp_hdr);
2467 struct net_tcp_hdr *tcp_hdr;
2468
2469 tcp_hdr = (struct net_tcp_hdr *)net_pkt_get_data(pkt, &tcp_access);
2470 if (!tcp_hdr) {
2471 return -ENOBUFS;
2472 }
2473
2474 tcp_hdr->chksum = 0U;
2475
2476 if (net_if_need_calc_tx_checksum(net_pkt_iface(pkt))) {
2477 tcp_hdr->chksum = net_calc_chksum_tcp(pkt);
2478 }
2479
2480 return net_pkt_set_data(pkt, &tcp_access);
2481 }
2482
net_tcp_input(struct net_pkt * pkt,struct net_pkt_data_access * tcp_access)2483 struct net_tcp_hdr *net_tcp_input(struct net_pkt *pkt,
2484 struct net_pkt_data_access *tcp_access)
2485 {
2486 struct net_tcp_hdr *tcp_hdr;
2487
2488 if (IS_ENABLED(CONFIG_NET_TCP_CHECKSUM) &&
2489 net_if_need_calc_rx_checksum(net_pkt_iface(pkt)) &&
2490 net_calc_chksum_tcp(pkt) != 0U) {
2491 NET_DBG("DROP: checksum mismatch");
2492 goto drop;
2493 }
2494
2495 tcp_hdr = (struct net_tcp_hdr *)net_pkt_get_data(pkt, tcp_access);
2496 if (tcp_hdr && !net_pkt_set_data(pkt, tcp_access)) {
2497 return tcp_hdr;
2498 }
2499
2500 drop:
2501 net_stats_update_tcp_seg_chkerr(net_pkt_iface(pkt));
2502 return NULL;
2503 }
2504
2505 #if defined(CONFIG_NET_TEST_PROTOCOL)
tcp_input(struct net_conn * net_conn,struct net_pkt * pkt,union net_ip_header * ip,union net_proto_header * proto,void * user_data)2506 static enum net_verdict tcp_input(struct net_conn *net_conn,
2507 struct net_pkt *pkt,
2508 union net_ip_header *ip,
2509 union net_proto_header *proto,
2510 void *user_data)
2511 {
2512 struct tcphdr *th = th_get(pkt);
2513
2514 if (th) {
2515 struct tcp *conn = tcp_conn_search(pkt);
2516
2517 if (conn == NULL && SYN == th_flags(th)) {
2518 struct net_context *context =
2519 tcp_calloc(1, sizeof(struct net_context));
2520 net_tcp_get(context);
2521 net_context_set_family(context, net_pkt_family(pkt));
2522 conn = context->tcp;
2523 tcp_endpoint_set(&conn->dst, pkt, TCP_EP_SRC);
2524 tcp_endpoint_set(&conn->src, pkt, TCP_EP_DST);
2525 /* Make an extra reference, the sanity check suite
2526 * will delete the connection explicitly
2527 */
2528 tcp_conn_ref(conn);
2529 }
2530
2531 if (conn) {
2532 conn->iface = pkt->iface;
2533 tcp_in(conn, pkt);
2534 }
2535 }
2536
2537 return NET_DROP;
2538 }
2539
tp_tcp_recv_cb(struct tcp * conn,struct net_pkt * pkt)2540 static size_t tp_tcp_recv_cb(struct tcp *conn, struct net_pkt *pkt)
2541 {
2542 ssize_t len = tcp_data_len(pkt);
2543 struct net_pkt *up = tcp_pkt_clone(pkt);
2544
2545 NET_DBG("pkt: %p, len: %zu", pkt, net_pkt_get_len(pkt));
2546
2547 net_pkt_cursor_init(up);
2548 net_pkt_set_overwrite(up, true);
2549
2550 net_pkt_pull(up, net_pkt_get_len(up) - len);
2551
2552 net_tcp_queue_data(conn->context, up);
2553
2554 return len;
2555 }
2556
tp_tcp_recv(int fd,void * buf,size_t len,int flags)2557 static ssize_t tp_tcp_recv(int fd, void *buf, size_t len, int flags)
2558 {
2559 return 0;
2560 }
2561
tp_init(struct tcp * conn,struct tp * tp)2562 static void tp_init(struct tcp *conn, struct tp *tp)
2563 {
2564 struct tp out = {
2565 .msg = "",
2566 .status = "",
2567 .state = tcp_state_to_str(conn->state, true),
2568 .seq = conn->seq,
2569 .ack = conn->ack,
2570 .rcv = "",
2571 .data = "",
2572 .op = "",
2573 };
2574
2575 *tp = out;
2576 }
2577
tcp_to_json(struct tcp * conn,void * data,size_t * data_len)2578 static void tcp_to_json(struct tcp *conn, void *data, size_t *data_len)
2579 {
2580 struct tp tp;
2581
2582 tp_init(conn, &tp);
2583
2584 tp_encode(&tp, data, data_len);
2585 }
2586
tp_input(struct net_conn * net_conn,struct net_pkt * pkt,union net_ip_header * ip_hdr,union net_proto_header * proto,void * user_data)2587 enum net_verdict tp_input(struct net_conn *net_conn,
2588 struct net_pkt *pkt,
2589 union net_ip_header *ip_hdr,
2590 union net_proto_header *proto,
2591 void *user_data)
2592 {
2593 struct net_udp_hdr *uh = net_udp_get_hdr(pkt, NULL);
2594 size_t data_len = ntohs(uh->len) - sizeof(*uh);
2595 struct tcp *conn = tcp_conn_search(pkt);
2596 size_t json_len = 0;
2597 struct tp *tp;
2598 struct tp_new *tp_new;
2599 enum tp_type type;
2600 bool responded = false;
2601 static char buf[512];
2602
2603 net_pkt_cursor_init(pkt);
2604 net_pkt_set_overwrite(pkt, true);
2605 net_pkt_skip(pkt, net_pkt_ip_hdr_len(pkt) +
2606 net_pkt_ip_opts_len(pkt) + sizeof(*uh));
2607 net_pkt_read(pkt, buf, data_len);
2608 buf[data_len] = '\0';
2609 data_len += 1;
2610
2611 type = json_decode_msg(buf, data_len);
2612
2613 data_len = ntohs(uh->len) - sizeof(*uh);
2614
2615 net_pkt_cursor_init(pkt);
2616 net_pkt_set_overwrite(pkt, true);
2617 net_pkt_skip(pkt, net_pkt_ip_hdr_len(pkt) +
2618 net_pkt_ip_opts_len(pkt) + sizeof(*uh));
2619 net_pkt_read(pkt, buf, data_len);
2620 buf[data_len] = '\0';
2621 data_len += 1;
2622
2623 switch (type) {
2624 case TP_CONFIG_REQUEST:
2625 tp_new = json_to_tp_new(buf, data_len);
2626 break;
2627 default:
2628 tp = json_to_tp(buf, data_len);
2629 break;
2630 }
2631
2632 switch (type) {
2633 case TP_COMMAND:
2634 if (is("CONNECT", tp->op)) {
2635 tp_output(pkt->family, pkt->iface, buf, 1);
2636 responded = true;
2637 {
2638 struct net_context *context = tcp_calloc(1,
2639 sizeof(struct net_context));
2640 net_tcp_get(context);
2641 net_context_set_family(context,
2642 net_pkt_family(pkt));
2643 conn = context->tcp;
2644 tcp_endpoint_set(&conn->dst, pkt, TCP_EP_SRC);
2645 tcp_endpoint_set(&conn->src, pkt, TCP_EP_DST);
2646 conn->iface = pkt->iface;
2647 tcp_conn_ref(conn);
2648 }
2649 conn->seq = tp->seq;
2650 tcp_in(conn, NULL);
2651 }
2652 if (is("CLOSE", tp->op)) {
2653 tp_trace = false;
2654 {
2655 struct net_context *context;
2656
2657 conn = (void *)sys_slist_peek_head(&tcp_conns);
2658 context = conn->context;
2659 while (tcp_conn_unref(conn))
2660 ;
2661 tcp_free(context);
2662 }
2663 tp_mem_stat();
2664 tp_nbuf_stat();
2665 tp_pkt_stat();
2666 tp_seq_stat();
2667 }
2668 if (is("CLOSE2", tp->op)) {
2669 struct tcp *conn =
2670 (void *)sys_slist_peek_head(&tcp_conns);
2671 net_tcp_put(conn->context);
2672 }
2673 if (is("RECV", tp->op)) {
2674 #define HEXSTR_SIZE 64
2675 char hexstr[HEXSTR_SIZE];
2676 ssize_t len = tp_tcp_recv(0, buf, sizeof(buf), 0);
2677
2678 tp_init(conn, tp);
2679 bin2hex(buf, len, hexstr, HEXSTR_SIZE);
2680 tp->data = hexstr;
2681 NET_DBG("%zd = tcp_recv(\"%s\")", len, tp->data);
2682 json_len = sizeof(buf);
2683 tp_encode(tp, buf, &json_len);
2684 }
2685 if (is("SEND", tp->op)) {
2686 ssize_t len = tp_str_to_hex(buf, sizeof(buf), tp->data);
2687 struct tcp *conn =
2688 (void *)sys_slist_peek_head(&tcp_conns);
2689
2690 tp_output(pkt->family, pkt->iface, buf, 1);
2691 responded = true;
2692 NET_DBG("tcp_send(\"%s\")", tp->data);
2693 {
2694 struct net_pkt *data_pkt;
2695
2696 data_pkt = tcp_pkt_alloc(conn, len);
2697 net_pkt_write(data_pkt, buf, len);
2698 net_pkt_cursor_init(data_pkt);
2699 net_tcp_queue_data(conn->context, data_pkt);
2700 }
2701 }
2702 break;
2703 case TP_CONFIG_REQUEST:
2704 tp_new_find_and_apply(tp_new, "tcp_rto", &tcp_rto, TP_INT);
2705 tp_new_find_and_apply(tp_new, "tcp_retries", &tcp_retries,
2706 TP_INT);
2707 tp_new_find_and_apply(tp_new, "tcp_window", &tcp_window,
2708 TP_INT);
2709 tp_new_find_and_apply(tp_new, "tp_trace", &tp_trace, TP_BOOL);
2710 break;
2711 case TP_INTROSPECT_REQUEST:
2712 json_len = sizeof(buf);
2713 conn = (void *)sys_slist_peek_head(&tcp_conns);
2714 tcp_to_json(conn, buf, &json_len);
2715 break;
2716 case TP_DEBUG_STOP: case TP_DEBUG_CONTINUE:
2717 tp_state = tp->type;
2718 break;
2719 default:
2720 NET_ASSERT(false, "Unimplemented tp command: %s", tp->msg);
2721 }
2722
2723 if (json_len) {
2724 tp_output(pkt->family, pkt->iface, buf, json_len);
2725 } else if ((TP_CONFIG_REQUEST == type || TP_COMMAND == type)
2726 && responded == false) {
2727 tp_output(pkt->family, pkt->iface, buf, 1);
2728 }
2729
2730 return NET_DROP;
2731 }
2732
test_cb_register(sa_family_t family,uint8_t proto,uint16_t remote_port,uint16_t local_port,net_conn_cb_t cb)2733 static void test_cb_register(sa_family_t family, uint8_t proto, uint16_t remote_port,
2734 uint16_t local_port, net_conn_cb_t cb)
2735 {
2736 struct net_conn_handle *conn_handle = NULL;
2737 const struct sockaddr addr = { .sa_family = family, };
2738
2739 int ret = net_conn_register(proto,
2740 family,
2741 &addr, /* remote address */
2742 &addr, /* local address */
2743 local_port,
2744 remote_port,
2745 NULL,
2746 cb,
2747 NULL, /* user_data */
2748 &conn_handle);
2749 if (ret < 0) {
2750 NET_ERR("net_conn_register(): %d", ret);
2751 }
2752 }
2753 #endif /* CONFIG_NET_TEST_PROTOCOL */
2754
net_tcp_foreach(net_tcp_cb_t cb,void * user_data)2755 void net_tcp_foreach(net_tcp_cb_t cb, void *user_data)
2756 {
2757 struct tcp *conn;
2758 struct tcp *tmp;
2759
2760 k_mutex_lock(&tcp_lock, K_FOREVER);
2761
2762 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&tcp_conns, conn, tmp, next) {
2763
2764 if (atomic_get(&conn->ref_count) > 0) {
2765 k_mutex_unlock(&tcp_lock);
2766 cb(conn, user_data);
2767 k_mutex_lock(&tcp_lock, K_FOREVER);
2768 }
2769 }
2770
2771 k_mutex_unlock(&tcp_lock);
2772 }
2773
net_tcp_get_recv_mss(const struct tcp * conn)2774 uint16_t net_tcp_get_recv_mss(const struct tcp *conn)
2775 {
2776 sa_family_t family = net_context_get_family(conn->context);
2777
2778 if (family == AF_INET) {
2779 #if defined(CONFIG_NET_IPV4)
2780 struct net_if *iface = net_context_get_iface(conn->context);
2781
2782 if (iface && net_if_get_mtu(iface) >= NET_IPV4TCPH_LEN) {
2783 /* Detect MSS based on interface MTU minus "TCP,IP
2784 * header size"
2785 */
2786 return net_if_get_mtu(iface) - NET_IPV4TCPH_LEN;
2787 }
2788 #else
2789 return 0;
2790 #endif /* CONFIG_NET_IPV4 */
2791 }
2792 #if defined(CONFIG_NET_IPV6)
2793 else if (family == AF_INET6) {
2794 struct net_if *iface = net_context_get_iface(conn->context);
2795 int mss = 0;
2796
2797 if (iface && net_if_get_mtu(iface) >= NET_IPV6TCPH_LEN) {
2798 /* Detect MSS based on interface MTU minus "TCP,IP
2799 * header size"
2800 */
2801 mss = net_if_get_mtu(iface) - NET_IPV6TCPH_LEN;
2802 }
2803
2804 if (mss < NET_IPV6_MTU) {
2805 mss = NET_IPV6_MTU;
2806 }
2807
2808 return mss;
2809 }
2810 #endif /* CONFIG_NET_IPV6 */
2811
2812 return 0;
2813 }
2814
net_tcp_state_str(enum tcp_state state)2815 const char *net_tcp_state_str(enum tcp_state state)
2816 {
2817 return tcp_state_to_str(state, false);
2818 }
2819
net_tcp_init(void)2820 void net_tcp_init(void)
2821 {
2822 #if defined(CONFIG_NET_TEST_PROTOCOL)
2823 /* Register inputs for TTCN-3 based TCP2 sanity check */
2824 test_cb_register(AF_INET, IPPROTO_TCP, 4242, 4242, tcp_input);
2825 test_cb_register(AF_INET6, IPPROTO_TCP, 4242, 4242, tcp_input);
2826 test_cb_register(AF_INET, IPPROTO_UDP, 4242, 4242, tp_input);
2827 test_cb_register(AF_INET6, IPPROTO_UDP, 4242, 4242, tp_input);
2828
2829 tcp_recv_cb = tp_tcp_recv_cb;
2830 #endif
2831
2832 #if IS_ENABLED(CONFIG_NET_TC_THREAD_COOPERATIVE)
2833 #define THREAD_PRIORITY K_PRIO_COOP(0)
2834 #else
2835 #define THREAD_PRIORITY K_PRIO_PREEMPT(0)
2836 #endif
2837
2838 /* Use private workqueue in order not to block the system work queue.
2839 */
2840 k_work_queue_start(&tcp_work_q, work_q_stack,
2841 K_KERNEL_STACK_SIZEOF(work_q_stack), THREAD_PRIORITY,
2842 NULL);
2843
2844 k_thread_name_set(&tcp_work_q.thread, "tcp_work");
2845 NET_DBG("Workq started. Thread ID: %p", &tcp_work_q.thread);
2846 }
2847