1 /* tcp.c - TCP specific code for echo server */
2 
3 /*
4  * Copyright (c) 2017 Intel Corporation.
5  * Copyright (c) 2018 Nordic Semiconductor ASA.
6  *
7  * SPDX-License-Identifier: Apache-2.0
8  */
9 
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_DECLARE(net_echo_server_sample, LOG_LEVEL_DBG);
12 
13 #include <zephyr/kernel.h>
14 #include <errno.h>
15 #include <stdio.h>
16 
17 #include <zephyr/posix/sys/socket.h>
18 #include <zephyr/posix/unistd.h>
19 
20 #include <zephyr/net/socket.h>
21 #include <zephyr/net/tls_credentials.h>
22 
23 #include "common.h"
24 #include "certificate.h"
25 
26 #define MAX_CLIENT_QUEUE CONFIG_NET_SAMPLE_NUM_HANDLERS
27 
28 #if defined(CONFIG_NET_IPV4)
29 K_THREAD_STACK_ARRAY_DEFINE(tcp4_handler_stack, CONFIG_NET_SAMPLE_NUM_HANDLERS,
30 			    STACK_SIZE);
31 static struct k_thread tcp4_handler_thread[CONFIG_NET_SAMPLE_NUM_HANDLERS];
32 static APP_BMEM bool tcp4_handler_in_use[CONFIG_NET_SAMPLE_NUM_HANDLERS];
33 #endif
34 
35 #if defined(CONFIG_NET_IPV6)
36 K_THREAD_STACK_ARRAY_DEFINE(tcp6_handler_stack, CONFIG_NET_SAMPLE_NUM_HANDLERS,
37 			    STACK_SIZE);
38 static struct k_thread tcp6_handler_thread[CONFIG_NET_SAMPLE_NUM_HANDLERS];
39 static APP_BMEM bool tcp6_handler_in_use[CONFIG_NET_SAMPLE_NUM_HANDLERS];
40 #endif
41 
42 static void process_tcp4(void);
43 static void process_tcp6(void);
44 
45 K_THREAD_DEFINE(tcp4_thread_id, STACK_SIZE,
46 		process_tcp4, NULL, NULL, NULL,
47 		THREAD_PRIORITY,
48 		IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0, -1);
49 
50 K_THREAD_DEFINE(tcp6_thread_id, STACK_SIZE,
51 		process_tcp6, NULL, NULL, NULL,
52 		THREAD_PRIORITY,
53 		IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0, -1);
54 
sendall(int sock,const void * buf,size_t len)55 static ssize_t sendall(int sock, const void *buf, size_t len)
56 {
57 	while (len) {
58 		ssize_t out_len = send(sock, buf, len, 0);
59 
60 		if (out_len < 0) {
61 			return out_len;
62 		}
63 		buf = (const char *)buf + out_len;
64 		len -= out_len;
65 	}
66 
67 	return 0;
68 }
69 
start_tcp_proto(struct data * data,struct sockaddr * bind_addr,socklen_t bind_addrlen)70 static int start_tcp_proto(struct data *data,
71 			   struct sockaddr *bind_addr,
72 			   socklen_t bind_addrlen)
73 {
74 	int optval;
75 	int ret;
76 
77 #if defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS)
78 	data->tcp.sock = socket(bind_addr->sa_family, SOCK_STREAM,
79 				IPPROTO_TLS_1_2);
80 #else
81 	data->tcp.sock = socket(bind_addr->sa_family, SOCK_STREAM,
82 				IPPROTO_TCP);
83 #endif
84 	if (data->tcp.sock < 0) {
85 		LOG_ERR("Failed to create TCP socket (%s): %d", data->proto,
86 			errno);
87 		return -errno;
88 	}
89 
90 #if defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS)
91 	sec_tag_t sec_tag_list[] = {
92 		SERVER_CERTIFICATE_TAG,
93 #if defined(CONFIG_MBEDTLS_KEY_EXCHANGE_PSK_ENABLED)
94 		PSK_TAG,
95 #endif
96 	};
97 
98 	ret = setsockopt(data->tcp.sock, SOL_TLS, TLS_SEC_TAG_LIST,
99 			 sec_tag_list, sizeof(sec_tag_list));
100 	if (ret < 0) {
101 		LOG_ERR("Failed to set TCP secure option (%s): %d", data->proto,
102 			errno);
103 		ret = -errno;
104 	}
105 #endif
106 
107 	if (bind_addr->sa_family == AF_INET6) {
108 		/* Prefer IPv6 temporary addresses */
109 		optval = IPV6_PREFER_SRC_PUBLIC;
110 		(void)setsockopt(data->tcp.sock, IPPROTO_IPV6,
111 				 IPV6_ADDR_PREFERENCES,
112 				 &optval, sizeof(optval));
113 
114 		/*
115 		 * Bind only to IPv6 without mapping to IPv4, since we bind to
116 		 * IPv4 using another socket
117 		 */
118 		optval = 1;
119 		(void)setsockopt(data->tcp.sock, IPPROTO_IPV6, IPV6_V6ONLY,
120 				 &optval, sizeof(optval));
121 	}
122 
123 	ret = bind(data->tcp.sock, bind_addr, bind_addrlen);
124 	if (ret < 0) {
125 		LOG_ERR("Failed to bind TCP socket (%s): %d", data->proto,
126 			errno);
127 		return -errno;
128 	}
129 
130 	ret = listen(data->tcp.sock, MAX_CLIENT_QUEUE);
131 	if (ret < 0) {
132 		LOG_ERR("Failed to listen on TCP socket (%s): %d",
133 			data->proto, errno);
134 		ret = -errno;
135 	}
136 
137 	return ret;
138 }
139 
handle_data(void * ptr1,void * ptr2,void * ptr3)140 static void handle_data(void *ptr1, void *ptr2, void *ptr3)
141 {
142 	int slot = POINTER_TO_INT(ptr1);
143 	struct data *data = ptr2;
144 	bool *in_use = ptr3;
145 	int offset = 0;
146 	int received;
147 	int client;
148 	int ret;
149 
150 	client = data->tcp.accepted[slot].sock;
151 
152 	do {
153 		received = recv(client,
154 			data->tcp.accepted[slot].recv_buffer + offset,
155 			sizeof(data->tcp.accepted[slot].recv_buffer) - offset,
156 			0);
157 
158 		if (received == 0) {
159 			/* Connection closed */
160 			LOG_INF("TCP (%s): Connection closed", data->proto);
161 			break;
162 		} else if (received < 0) {
163 			/* Socket error */
164 			LOG_ERR("TCP (%s): Connection error %d", data->proto,
165 				errno);
166 			break;
167 		} else {
168 			atomic_add(&data->tcp.bytes_received, received);
169 		}
170 
171 		offset += received;
172 
173 #if !defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS)
174 		/* To prevent fragmentation of the response, reply only if
175 		 * buffer is full or there is no more data to read
176 		 */
177 		if (offset == sizeof(data->tcp.accepted[slot].recv_buffer) ||
178 		    (recv(client,
179 			  data->tcp.accepted[slot].recv_buffer + offset,
180 			  sizeof(data->tcp.accepted[slot].recv_buffer) -
181 								offset,
182 			  MSG_PEEK | MSG_DONTWAIT) < 0 &&
183 		     (errno == EAGAIN || errno == EWOULDBLOCK))) {
184 #endif
185 			ret = sendall(client,
186 				      data->tcp.accepted[slot].recv_buffer,
187 				      offset);
188 			if (ret < 0) {
189 				LOG_ERR("TCP (%s): Failed to send, "
190 					"closing socket", data->proto);
191 				break;
192 			}
193 
194 			LOG_DBG("TCP (%s): Received and replied with %d bytes",
195 				data->proto, offset);
196 
197 			if (++data->tcp.accepted[slot].counter % 1000 == 0U) {
198 				LOG_INF("%s TCP: Sent %u packets", data->proto,
199 					data->tcp.accepted[slot].counter);
200 			}
201 
202 			offset = 0;
203 #if !defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS)
204 		}
205 #endif
206 	} while (true);
207 
208 	*in_use = false;
209 
210 	(void)close(client);
211 
212 	data->tcp.accepted[slot].sock = -1;
213 }
214 
get_free_slot(struct data * data)215 static int get_free_slot(struct data *data)
216 {
217 	int i;
218 
219 	for (i = 0; i < CONFIG_NET_SAMPLE_NUM_HANDLERS; i++) {
220 		if (data->tcp.accepted[i].sock < 0) {
221 			return i;
222 		}
223 	}
224 
225 	return -1;
226 }
227 
process_tcp(struct data * data)228 static int process_tcp(struct data *data)
229 {
230 	int client;
231 	int slot;
232 	struct sockaddr_in client_addr;
233 	socklen_t client_addr_len = sizeof(client_addr);
234 
235 	LOG_INF("Waiting for TCP connection on port %d (%s)...",
236 		MY_PORT, data->proto);
237 
238 	client = accept(data->tcp.sock, (struct sockaddr *)&client_addr,
239 			&client_addr_len);
240 	if (client < 0) {
241 		LOG_ERR("%s accept error (%d)", data->proto, -errno);
242 		return -errno;
243 	}
244 
245 	slot = get_free_slot(data);
246 	if (slot < 0) {
247 		LOG_ERR("Cannot accept more connections");
248 		close(client);
249 		return 0;
250 	}
251 
252 	data->tcp.accepted[slot].sock = client;
253 
254 	LOG_INF("TCP (%s): Accepted connection", data->proto);
255 
256 #define MAX_NAME_LEN sizeof("tcp6[xxx]")
257 
258 #if defined(CONFIG_NET_IPV6)
259 	if (client_addr.sin_family == AF_INET6) {
260 		tcp6_handler_in_use[slot] = true;
261 
262 		k_thread_create(
263 			&tcp6_handler_thread[slot],
264 			tcp6_handler_stack[slot],
265 			K_THREAD_STACK_SIZEOF(tcp6_handler_stack[slot]),
266 			handle_data,
267 			INT_TO_POINTER(slot), data, &tcp6_handler_in_use[slot],
268 			THREAD_PRIORITY,
269 			IS_ENABLED(CONFIG_USERSPACE) ? K_USER |
270 						       K_INHERIT_PERMS : 0,
271 			K_NO_WAIT);
272 
273 		if (IS_ENABLED(CONFIG_THREAD_NAME)) {
274 			char name[MAX_NAME_LEN];
275 
276 			snprintk(name, sizeof(name), "tcp6[%3d]", (uint8_t)slot);
277 			k_thread_name_set(&tcp6_handler_thread[slot], name);
278 		}
279 	}
280 #endif
281 
282 #if defined(CONFIG_NET_IPV4)
283 	if (client_addr.sin_family == AF_INET) {
284 		tcp4_handler_in_use[slot] = true;
285 
286 		k_thread_create(
287 			&tcp4_handler_thread[slot],
288 			tcp4_handler_stack[slot],
289 			K_THREAD_STACK_SIZEOF(tcp4_handler_stack[slot]),
290 			handle_data,
291 			INT_TO_POINTER(slot), data, &tcp4_handler_in_use[slot],
292 			THREAD_PRIORITY,
293 			IS_ENABLED(CONFIG_USERSPACE) ? K_USER |
294 						       K_INHERIT_PERMS : 0,
295 			K_NO_WAIT);
296 
297 		if (IS_ENABLED(CONFIG_THREAD_NAME)) {
298 			char name[MAX_NAME_LEN];
299 
300 			snprintk(name, sizeof(name), "tcp4[%3d]", (uint8_t)slot);
301 			k_thread_name_set(&tcp4_handler_thread[slot], name);
302 		}
303 	}
304 #endif
305 
306 	return 0;
307 }
308 
process_tcp4(void)309 static void process_tcp4(void)
310 {
311 	int ret;
312 	struct sockaddr_in addr4;
313 
314 	(void)memset(&addr4, 0, sizeof(addr4));
315 	addr4.sin_family = AF_INET;
316 	addr4.sin_port = htons(MY_PORT);
317 
318 	ret = start_tcp_proto(&conf.ipv4, (struct sockaddr *)&addr4,
319 			      sizeof(addr4));
320 	if (ret < 0) {
321 		quit();
322 		return;
323 	}
324 
325 	while (ret == 0) {
326 		ret = process_tcp(&conf.ipv4);
327 		if (ret < 0) {
328 			break;
329 		}
330 	}
331 
332 	quit();
333 }
334 
process_tcp6(void)335 static void process_tcp6(void)
336 {
337 	int ret;
338 	struct sockaddr_in6 addr6;
339 
340 	(void)memset(&addr6, 0, sizeof(addr6));
341 	addr6.sin6_family = AF_INET6;
342 	addr6.sin6_port = htons(MY_PORT);
343 
344 	ret = start_tcp_proto(&conf.ipv6, (struct sockaddr *)&addr6,
345 			      sizeof(addr6));
346 	if (ret < 0) {
347 		quit();
348 		return;
349 	}
350 
351 	while (ret == 0) {
352 		ret = process_tcp(&conf.ipv6);
353 		if (ret != 0) {
354 			break;
355 		}
356 	}
357 
358 	quit();
359 }
360 
print_stats(struct k_work * work)361 static void print_stats(struct k_work *work)
362 {
363 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
364 	struct data *data = CONTAINER_OF(dwork, struct data, tcp.stats_print);
365 	int total_received = atomic_get(&data->tcp.bytes_received);
366 
367 	if (total_received) {
368 		if ((total_received / STATS_TIMER) < 1024) {
369 			LOG_INF("%s TCP: Received %d B/sec", data->proto,
370 				total_received / STATS_TIMER);
371 		} else {
372 			LOG_INF("%s TCP: Received %d KiB/sec", data->proto,
373 				total_received / 1024 / STATS_TIMER);
374 		}
375 
376 		atomic_set(&data->tcp.bytes_received, 0);
377 	}
378 
379 	k_work_reschedule(&data->tcp.stats_print, K_SECONDS(STATS_TIMER));
380 }
381 
start_tcp(void)382 void start_tcp(void)
383 {
384 	int i;
385 
386 	for (i = 0; i < CONFIG_NET_SAMPLE_NUM_HANDLERS; i++) {
387 		conf.ipv6.tcp.accepted[i].sock = -1;
388 		conf.ipv4.tcp.accepted[i].sock = -1;
389 
390 #if defined(CONFIG_NET_IPV4)
391 		tcp4_handler_in_use[i] = false;
392 #endif
393 #if defined(CONFIG_NET_IPV6)
394 		tcp6_handler_in_use[i] = false;
395 #endif
396 	}
397 
398 #if defined(CONFIG_NET_IPV6)
399 #if defined(CONFIG_USERSPACE)
400 	k_mem_domain_add_thread(&app_domain, tcp6_thread_id);
401 
402 	for (i = 0; i < CONFIG_NET_SAMPLE_NUM_HANDLERS; i++) {
403 		k_thread_access_grant(tcp6_thread_id, &tcp6_handler_thread[i]);
404 		k_thread_access_grant(tcp6_thread_id, &tcp6_handler_stack[i]);
405 	}
406 #endif
407 
408 	k_work_init_delayable(&conf.ipv6.tcp.stats_print, print_stats);
409 	k_thread_start(tcp6_thread_id);
410 	k_work_reschedule(&conf.ipv6.tcp.stats_print, K_SECONDS(STATS_TIMER));
411 #endif
412 
413 #if defined(CONFIG_NET_IPV4)
414 #if defined(CONFIG_USERSPACE)
415 	k_mem_domain_add_thread(&app_domain, tcp4_thread_id);
416 
417 	for (i = 0; i < CONFIG_NET_SAMPLE_NUM_HANDLERS; i++) {
418 		k_thread_access_grant(tcp4_thread_id, &tcp4_handler_thread[i]);
419 		k_thread_access_grant(tcp4_thread_id, &tcp4_handler_stack[i]);
420 	}
421 #endif
422 
423 	k_work_init_delayable(&conf.ipv4.tcp.stats_print, print_stats);
424 	k_thread_start(tcp4_thread_id);
425 	k_work_reschedule(&conf.ipv4.tcp.stats_print, K_SECONDS(STATS_TIMER));
426 #endif
427 }
428 
stop_tcp(void)429 void stop_tcp(void)
430 {
431 	int i;
432 
433 	/* Not very graceful way to close a thread, but as we may be blocked
434 	 * in accept or recv call it seems to be necessary
435 	 */
436 
437 	if (IS_ENABLED(CONFIG_NET_IPV6)) {
438 		k_thread_abort(tcp6_thread_id);
439 		if (conf.ipv6.tcp.sock >= 0) {
440 			(void)close(conf.ipv6.tcp.sock);
441 		}
442 
443 		for (i = 0; i < CONFIG_NET_SAMPLE_NUM_HANDLERS; i++) {
444 #if defined(CONFIG_NET_IPV6)
445 			if (tcp6_handler_in_use[i] == true) {
446 				k_thread_abort(&tcp6_handler_thread[i]);
447 			}
448 #endif
449 			if (conf.ipv6.tcp.accepted[i].sock >= 0) {
450 				(void)close(conf.ipv6.tcp.accepted[i].sock);
451 			}
452 		}
453 	}
454 
455 	if (IS_ENABLED(CONFIG_NET_IPV4)) {
456 		k_thread_abort(tcp4_thread_id);
457 		if (conf.ipv4.tcp.sock >= 0) {
458 			(void)close(conf.ipv4.tcp.sock);
459 		}
460 
461 		for (i = 0; i < CONFIG_NET_SAMPLE_NUM_HANDLERS; i++) {
462 #if defined(CONFIG_NET_IPV4)
463 			if (tcp4_handler_in_use[i] == true) {
464 				k_thread_abort(&tcp4_handler_thread[i]);
465 			}
466 #endif
467 			if (conf.ipv4.tcp.accepted[i].sock >= 0) {
468 				(void)close(conf.ipv4.tcp.accepted[i].sock);
469 			}
470 		}
471 	}
472 }
473