1 /* tcp.c - TCP specific code for echo server */
2 
3 /*
4  * Copyright (c) 2017 Intel Corporation.
5  * Copyright (c) 2018 Nordic Semiconductor ASA.
6  *
7  * SPDX-License-Identifier: Apache-2.0
8  */
9 
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_DECLARE(net_echo_server_sample, LOG_LEVEL_DBG);
12 
13 #include <zephyr/kernel.h>
14 #include <errno.h>
15 #include <stdio.h>
16 
17 #include <zephyr/net/socket.h>
18 #include <zephyr/net/tls_credentials.h>
19 
20 #include "common.h"
21 #include "certificate.h"
22 
23 #define MAX_CLIENT_QUEUE CONFIG_NET_SAMPLE_NUM_HANDLERS
24 
25 #if defined(CONFIG_NET_IPV4)
26 K_THREAD_STACK_ARRAY_DEFINE(tcp4_handler_stack, CONFIG_NET_SAMPLE_NUM_HANDLERS,
27 			    STACK_SIZE);
28 static struct k_thread tcp4_handler_thread[CONFIG_NET_SAMPLE_NUM_HANDLERS];
29 static APP_BMEM bool tcp4_handler_in_use[CONFIG_NET_SAMPLE_NUM_HANDLERS];
30 #endif
31 
32 #if defined(CONFIG_NET_IPV6)
33 K_THREAD_STACK_ARRAY_DEFINE(tcp6_handler_stack, CONFIG_NET_SAMPLE_NUM_HANDLERS,
34 			    STACK_SIZE);
35 static struct k_thread tcp6_handler_thread[CONFIG_NET_SAMPLE_NUM_HANDLERS];
36 static APP_BMEM bool tcp6_handler_in_use[CONFIG_NET_SAMPLE_NUM_HANDLERS];
37 #endif
38 
39 static void process_tcp4(void);
40 static void process_tcp6(void);
41 
42 K_THREAD_DEFINE(tcp4_thread_id, STACK_SIZE,
43 		process_tcp4, NULL, NULL, NULL,
44 		THREAD_PRIORITY,
45 		IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0, -1);
46 
47 K_THREAD_DEFINE(tcp6_thread_id, STACK_SIZE,
48 		process_tcp6, NULL, NULL, NULL,
49 		THREAD_PRIORITY,
50 		IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0, -1);
51 
sendall(int sock,const void * buf,size_t len)52 static ssize_t sendall(int sock, const void *buf, size_t len)
53 {
54 	while (len) {
55 		ssize_t out_len = send(sock, buf, len, 0);
56 
57 		if (out_len < 0) {
58 			return out_len;
59 		}
60 		buf = (const char *)buf + out_len;
61 		len -= out_len;
62 	}
63 
64 	return 0;
65 }
66 
start_tcp_proto(struct data * data,struct sockaddr * bind_addr,socklen_t bind_addrlen)67 static int start_tcp_proto(struct data *data,
68 			   struct sockaddr *bind_addr,
69 			   socklen_t bind_addrlen)
70 {
71 	int optval;
72 	int ret;
73 
74 #if defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS)
75 	data->tcp.sock = socket(bind_addr->sa_family, SOCK_STREAM,
76 				IPPROTO_TLS_1_2);
77 #else
78 	data->tcp.sock = socket(bind_addr->sa_family, SOCK_STREAM,
79 				IPPROTO_TCP);
80 #endif
81 	if (data->tcp.sock < 0) {
82 		LOG_ERR("Failed to create TCP socket (%s): %d", data->proto,
83 			errno);
84 		return -errno;
85 	}
86 
87 #if defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS)
88 	sec_tag_t sec_tag_list[] = {
89 		SERVER_CERTIFICATE_TAG,
90 #if defined(CONFIG_MBEDTLS_KEY_EXCHANGE_PSK_ENABLED)
91 		PSK_TAG,
92 #endif
93 	};
94 
95 	ret = setsockopt(data->tcp.sock, SOL_TLS, TLS_SEC_TAG_LIST,
96 			 sec_tag_list, sizeof(sec_tag_list));
97 	if (ret < 0) {
98 		LOG_ERR("Failed to set TCP secure option (%s): %d", data->proto,
99 			errno);
100 		ret = -errno;
101 	}
102 #endif
103 
104 	if (bind_addr->sa_family == AF_INET6) {
105 		/* Prefer IPv6 temporary addresses */
106 		optval = IPV6_PREFER_SRC_PUBLIC;
107 		(void)setsockopt(data->tcp.sock, IPPROTO_IPV6,
108 				 IPV6_ADDR_PREFERENCES,
109 				 &optval, sizeof(optval));
110 
111 		/*
112 		 * Bind only to IPv6 without mapping to IPv4, since we bind to
113 		 * IPv4 using another socket
114 		 */
115 		optval = 1;
116 		(void)setsockopt(data->tcp.sock, IPPROTO_IPV6, IPV6_V6ONLY,
117 				 &optval, sizeof(optval));
118 	}
119 
120 	ret = bind(data->tcp.sock, bind_addr, bind_addrlen);
121 	if (ret < 0) {
122 		LOG_ERR("Failed to bind TCP socket (%s): %d", data->proto,
123 			errno);
124 		return -errno;
125 	}
126 
127 	ret = listen(data->tcp.sock, MAX_CLIENT_QUEUE);
128 	if (ret < 0) {
129 		LOG_ERR("Failed to listen on TCP socket (%s): %d",
130 			data->proto, errno);
131 		ret = -errno;
132 	}
133 
134 	return ret;
135 }
136 
handle_data(void * ptr1,void * ptr2,void * ptr3)137 static void handle_data(void *ptr1, void *ptr2, void *ptr3)
138 {
139 	int slot = POINTER_TO_INT(ptr1);
140 	struct data *data = ptr2;
141 	bool *in_use = ptr3;
142 	int offset = 0;
143 	int received;
144 	int client;
145 	int ret;
146 
147 	client = data->tcp.accepted[slot].sock;
148 
149 	do {
150 		received = recv(client,
151 			data->tcp.accepted[slot].recv_buffer + offset,
152 			sizeof(data->tcp.accepted[slot].recv_buffer) - offset,
153 			0);
154 
155 		if (received == 0) {
156 			/* Connection closed */
157 			LOG_INF("TCP (%s): Connection closed", data->proto);
158 			break;
159 		} else if (received < 0) {
160 			/* Socket error */
161 			LOG_ERR("TCP (%s): Connection error %d", data->proto,
162 				errno);
163 			break;
164 		} else {
165 			atomic_add(&data->tcp.bytes_received, received);
166 		}
167 
168 		offset += received;
169 
170 #if !defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS)
171 		/* To prevent fragmentation of the response, reply only if
172 		 * buffer is full or there is no more data to read
173 		 */
174 		if (offset == sizeof(data->tcp.accepted[slot].recv_buffer) ||
175 		    (recv(client,
176 			  data->tcp.accepted[slot].recv_buffer + offset,
177 			  sizeof(data->tcp.accepted[slot].recv_buffer) -
178 								offset,
179 			  MSG_PEEK | MSG_DONTWAIT) < 0 &&
180 		     (errno == EAGAIN || errno == EWOULDBLOCK))) {
181 #endif
182 			ret = sendall(client,
183 				      data->tcp.accepted[slot].recv_buffer,
184 				      offset);
185 			if (ret < 0) {
186 				LOG_ERR("TCP (%s): Failed to send, "
187 					"closing socket", data->proto);
188 				break;
189 			}
190 
191 			LOG_DBG("TCP (%s): Received and replied with %d bytes",
192 				data->proto, offset);
193 
194 			if (++data->tcp.accepted[slot].counter % 1000 == 0U) {
195 				LOG_INF("%s TCP: Sent %u packets", data->proto,
196 					data->tcp.accepted[slot].counter);
197 			}
198 
199 			offset = 0;
200 #if !defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS)
201 		}
202 #endif
203 	} while (true);
204 
205 	*in_use = false;
206 
207 	(void)close(client);
208 
209 	data->tcp.accepted[slot].sock = -1;
210 }
211 
get_free_slot(struct data * data)212 static int get_free_slot(struct data *data)
213 {
214 	int i;
215 
216 	for (i = 0; i < CONFIG_NET_SAMPLE_NUM_HANDLERS; i++) {
217 		if (data->tcp.accepted[i].sock < 0) {
218 			return i;
219 		}
220 	}
221 
222 	return -1;
223 }
224 
process_tcp(struct data * data)225 static int process_tcp(struct data *data)
226 {
227 	int client;
228 	int slot;
229 	struct sockaddr_in client_addr;
230 	socklen_t client_addr_len = sizeof(client_addr);
231 
232 	LOG_INF("Waiting for TCP connection on port %d (%s)...",
233 		MY_PORT, data->proto);
234 
235 	client = accept(data->tcp.sock, (struct sockaddr *)&client_addr,
236 			&client_addr_len);
237 	if (client < 0) {
238 		LOG_ERR("%s accept error (%d)", data->proto, -errno);
239 		return -errno;
240 	}
241 
242 	slot = get_free_slot(data);
243 	if (slot < 0) {
244 		LOG_ERR("Cannot accept more connections");
245 		close(client);
246 		return 0;
247 	}
248 
249 	data->tcp.accepted[slot].sock = client;
250 
251 	LOG_INF("TCP (%s): Accepted connection", data->proto);
252 
253 #define MAX_NAME_LEN sizeof("tcp6[xxx]")
254 
255 #if defined(CONFIG_NET_IPV6)
256 	if (client_addr.sin_family == AF_INET6) {
257 		tcp6_handler_in_use[slot] = true;
258 
259 		k_thread_create(
260 			&tcp6_handler_thread[slot],
261 			tcp6_handler_stack[slot],
262 			K_THREAD_STACK_SIZEOF(tcp6_handler_stack[slot]),
263 			handle_data,
264 			INT_TO_POINTER(slot), data, &tcp6_handler_in_use[slot],
265 			THREAD_PRIORITY,
266 			IS_ENABLED(CONFIG_USERSPACE) ? K_USER |
267 						       K_INHERIT_PERMS : 0,
268 			K_NO_WAIT);
269 
270 		if (IS_ENABLED(CONFIG_THREAD_NAME)) {
271 			char name[MAX_NAME_LEN];
272 
273 			snprintk(name, sizeof(name), "tcp6[%3d]", (uint8_t)slot);
274 			k_thread_name_set(&tcp6_handler_thread[slot], name);
275 		}
276 	}
277 #endif
278 
279 #if defined(CONFIG_NET_IPV4)
280 	if (client_addr.sin_family == AF_INET) {
281 		tcp4_handler_in_use[slot] = true;
282 
283 		k_thread_create(
284 			&tcp4_handler_thread[slot],
285 			tcp4_handler_stack[slot],
286 			K_THREAD_STACK_SIZEOF(tcp4_handler_stack[slot]),
287 			handle_data,
288 			INT_TO_POINTER(slot), data, &tcp4_handler_in_use[slot],
289 			THREAD_PRIORITY,
290 			IS_ENABLED(CONFIG_USERSPACE) ? K_USER |
291 						       K_INHERIT_PERMS : 0,
292 			K_NO_WAIT);
293 
294 		if (IS_ENABLED(CONFIG_THREAD_NAME)) {
295 			char name[MAX_NAME_LEN];
296 
297 			snprintk(name, sizeof(name), "tcp4[%3d]", (uint8_t)slot);
298 			k_thread_name_set(&tcp4_handler_thread[slot], name);
299 		}
300 	}
301 #endif
302 
303 	return 0;
304 }
305 
process_tcp4(void)306 static void process_tcp4(void)
307 {
308 	int ret;
309 	struct sockaddr_in addr4;
310 
311 	(void)memset(&addr4, 0, sizeof(addr4));
312 	addr4.sin_family = AF_INET;
313 	addr4.sin_port = htons(MY_PORT);
314 
315 	ret = start_tcp_proto(&conf.ipv4, (struct sockaddr *)&addr4,
316 			      sizeof(addr4));
317 	if (ret < 0) {
318 		quit();
319 		return;
320 	}
321 
322 	while (ret == 0) {
323 		ret = process_tcp(&conf.ipv4);
324 		if (ret < 0) {
325 			break;
326 		}
327 	}
328 
329 	quit();
330 }
331 
process_tcp6(void)332 static void process_tcp6(void)
333 {
334 	int ret;
335 	struct sockaddr_in6 addr6;
336 
337 	(void)memset(&addr6, 0, sizeof(addr6));
338 	addr6.sin6_family = AF_INET6;
339 	addr6.sin6_port = htons(MY_PORT);
340 
341 	ret = start_tcp_proto(&conf.ipv6, (struct sockaddr *)&addr6,
342 			      sizeof(addr6));
343 	if (ret < 0) {
344 		quit();
345 		return;
346 	}
347 
348 	while (ret == 0) {
349 		ret = process_tcp(&conf.ipv6);
350 		if (ret != 0) {
351 			break;
352 		}
353 	}
354 
355 	quit();
356 }
357 
print_stats(struct k_work * work)358 static void print_stats(struct k_work *work)
359 {
360 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
361 	struct data *data = CONTAINER_OF(dwork, struct data, tcp.stats_print);
362 	int total_received = atomic_get(&data->tcp.bytes_received);
363 
364 	if (total_received) {
365 		if ((total_received / STATS_TIMER) < 1024) {
366 			LOG_INF("%s TCP: Received %d B/sec", data->proto,
367 				total_received / STATS_TIMER);
368 		} else {
369 			LOG_INF("%s TCP: Received %d KiB/sec", data->proto,
370 				total_received / 1024 / STATS_TIMER);
371 		}
372 
373 		atomic_set(&data->tcp.bytes_received, 0);
374 	}
375 
376 	k_work_reschedule(&data->tcp.stats_print, K_SECONDS(STATS_TIMER));
377 }
378 
start_tcp(void)379 void start_tcp(void)
380 {
381 	int i;
382 
383 	for (i = 0; i < CONFIG_NET_SAMPLE_NUM_HANDLERS; i++) {
384 		conf.ipv6.tcp.accepted[i].sock = -1;
385 		conf.ipv4.tcp.accepted[i].sock = -1;
386 
387 #if defined(CONFIG_NET_IPV4)
388 		tcp4_handler_in_use[i] = false;
389 #endif
390 #if defined(CONFIG_NET_IPV6)
391 		tcp6_handler_in_use[i] = false;
392 #endif
393 	}
394 
395 #if defined(CONFIG_NET_IPV6)
396 #if defined(CONFIG_USERSPACE)
397 	k_mem_domain_add_thread(&app_domain, tcp6_thread_id);
398 
399 	for (i = 0; i < CONFIG_NET_SAMPLE_NUM_HANDLERS; i++) {
400 		k_thread_access_grant(tcp6_thread_id, &tcp6_handler_thread[i]);
401 		k_thread_access_grant(tcp6_thread_id, &tcp6_handler_stack[i]);
402 	}
403 #endif
404 
405 	k_work_init_delayable(&conf.ipv6.tcp.stats_print, print_stats);
406 	k_thread_start(tcp6_thread_id);
407 	k_work_reschedule(&conf.ipv6.tcp.stats_print, K_SECONDS(STATS_TIMER));
408 #endif
409 
410 #if defined(CONFIG_NET_IPV4)
411 #if defined(CONFIG_USERSPACE)
412 	k_mem_domain_add_thread(&app_domain, tcp4_thread_id);
413 
414 	for (i = 0; i < CONFIG_NET_SAMPLE_NUM_HANDLERS; i++) {
415 		k_thread_access_grant(tcp4_thread_id, &tcp4_handler_thread[i]);
416 		k_thread_access_grant(tcp4_thread_id, &tcp4_handler_stack[i]);
417 	}
418 #endif
419 
420 	k_work_init_delayable(&conf.ipv4.tcp.stats_print, print_stats);
421 	k_thread_start(tcp4_thread_id);
422 	k_work_reschedule(&conf.ipv4.tcp.stats_print, K_SECONDS(STATS_TIMER));
423 #endif
424 }
425 
stop_tcp(void)426 void stop_tcp(void)
427 {
428 	int i;
429 
430 	/* Not very graceful way to close a thread, but as we may be blocked
431 	 * in accept or recv call it seems to be necessary
432 	 */
433 
434 	if (IS_ENABLED(CONFIG_NET_IPV6)) {
435 		k_thread_abort(tcp6_thread_id);
436 		if (conf.ipv6.tcp.sock >= 0) {
437 			(void)close(conf.ipv6.tcp.sock);
438 		}
439 
440 		for (i = 0; i < CONFIG_NET_SAMPLE_NUM_HANDLERS; i++) {
441 #if defined(CONFIG_NET_IPV6)
442 			if (tcp6_handler_in_use[i] == true) {
443 				k_thread_abort(&tcp6_handler_thread[i]);
444 			}
445 #endif
446 			if (conf.ipv6.tcp.accepted[i].sock >= 0) {
447 				(void)close(conf.ipv6.tcp.accepted[i].sock);
448 			}
449 		}
450 	}
451 
452 	if (IS_ENABLED(CONFIG_NET_IPV4)) {
453 		k_thread_abort(tcp4_thread_id);
454 		if (conf.ipv4.tcp.sock >= 0) {
455 			(void)close(conf.ipv4.tcp.sock);
456 		}
457 
458 		for (i = 0; i < CONFIG_NET_SAMPLE_NUM_HANDLERS; i++) {
459 #if defined(CONFIG_NET_IPV4)
460 			if (tcp4_handler_in_use[i] == true) {
461 				k_thread_abort(&tcp4_handler_thread[i]);
462 			}
463 #endif
464 			if (conf.ipv4.tcp.accepted[i].sock >= 0) {
465 				(void)close(conf.ipv4.tcp.accepted[i].sock);
466 			}
467 		}
468 	}
469 }
470