1 /*
2 * Copyright (c) 2017 Linaro Limited
3 * Copyright (c) 2021 Nordic Semiconductor
4 * Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved.
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 /* Zephyr headers */
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_DECLARE(net_sock, CONFIG_NET_SOCKETS_LOG_LEVEL);
12
13 #include <zephyr/kernel.h>
14 #include <zephyr/net/mld.h>
15 #include <zephyr/net/net_context.h>
16 #include <zephyr/net/net_pkt.h>
17 #include <zephyr/tracing/tracing.h>
18 #include <zephyr/net/socket.h>
19 #include <zephyr/net/socket_types.h>
20 #include <zephyr/posix/fcntl.h>
21 #include <zephyr/sys/fdtable.h>
22 #include <zephyr/sys/math_extras.h>
23 #include <zephyr/sys/iterable_sections.h>
24
25 #if defined(CONFIG_SOCKS)
26 #include "socks.h"
27 #endif
28
29 #include <zephyr/net/igmp.h>
30 #include "../../ip/ipv6.h"
31
32 #include "../../ip/net_stats.h"
33
34 #include "sockets_internal.h"
35 #include "../../ip/tcp_internal.h"
36 #include "../../ip/net_private.h"
37
38 const struct socket_op_vtable sock_fd_op_vtable;
39
40 static void zsock_received_cb(struct net_context *ctx,
41 struct net_pkt *pkt,
42 union net_ip_header *ip_hdr,
43 union net_proto_header *proto_hdr,
44 int status,
45 void *user_data);
46
fifo_wait_non_empty(struct k_fifo * fifo,k_timeout_t timeout)47 static int fifo_wait_non_empty(struct k_fifo *fifo, k_timeout_t timeout)
48 {
49 struct k_poll_event events[] = {
50 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
51 K_POLL_MODE_NOTIFY_ONLY, fifo),
52 };
53
54 return k_poll(events, ARRAY_SIZE(events), timeout);
55 }
56
zsock_flush_queue(struct net_context * ctx)57 static void zsock_flush_queue(struct net_context *ctx)
58 {
59 bool is_listen = net_context_get_state(ctx) == NET_CONTEXT_LISTENING;
60 void *p;
61
62 /* recv_q and accept_q are shared via a union */
63 while ((p = k_fifo_get(&ctx->recv_q, K_NO_WAIT)) != NULL) {
64 if (is_listen) {
65 NET_DBG("discarding ctx %p", p);
66 net_context_put(p);
67 } else {
68 NET_DBG("discarding pkt %p", p);
69 net_pkt_unref(p);
70 }
71 }
72
73 /* Some threads might be waiting on recv, cancel the wait */
74 k_fifo_cancel_wait(&ctx->recv_q);
75
76 /* Wake reader if it was sleeping */
77 (void)k_condvar_signal(&ctx->cond.recv);
78 }
79
zsock_socket_internal(int family,int type,int proto)80 static int zsock_socket_internal(int family, int type, int proto)
81 {
82 int fd = zvfs_reserve_fd();
83 struct net_context *ctx;
84 int res;
85
86 if (fd < 0) {
87 return -1;
88 }
89
90 if (proto == 0) {
91 if (family == AF_INET || family == AF_INET6) {
92 if (type == SOCK_DGRAM) {
93 proto = IPPROTO_UDP;
94 } else if (type == SOCK_STREAM) {
95 proto = IPPROTO_TCP;
96 }
97 }
98 }
99
100 res = net_context_get(family, type, proto, &ctx);
101 if (res < 0) {
102 zvfs_free_fd(fd);
103 errno = -res;
104 return -1;
105 }
106
107 /* Initialize user_data, all other calls will preserve it */
108 ctx->user_data = NULL;
109
110 /* The socket flags are stored here */
111 ctx->socket_data = NULL;
112
113 /* recv_q and accept_q are in union */
114 k_fifo_init(&ctx->recv_q);
115
116 /* Condition variable is used to avoid keeping lock for a long time
117 * when waiting data to be received
118 */
119 k_condvar_init(&ctx->cond.recv);
120
121 /* TCP context is effectively owned by both application
122 * and the stack: stack may detect that peer closed/aborted
123 * connection, but it must not dispose of the context behind
124 * the application back. Likewise, when application "closes"
125 * context, it's not disposed of immediately - there's yet
126 * closing handshake for stack to perform.
127 */
128 if (proto == IPPROTO_TCP) {
129 net_context_ref(ctx);
130 }
131
132 zvfs_finalize_typed_fd(fd, ctx, (const struct fd_op_vtable *)&sock_fd_op_vtable,
133 ZVFS_MODE_IFSOCK);
134
135 NET_DBG("socket: ctx=%p, fd=%d", ctx, fd);
136
137 return fd;
138 }
139
zsock_close_ctx(struct net_context * ctx,int sock)140 int zsock_close_ctx(struct net_context *ctx, int sock)
141 {
142 int ret;
143
144 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, close, sock);
145
146 NET_DBG("close: ctx=%p, fd=%d", ctx, sock);
147
148 /* Reset callbacks to avoid any race conditions while
149 * flushing queues. No need to check return values here,
150 * as these are fail-free operations and we're closing
151 * socket anyway.
152 */
153 if (net_context_get_state(ctx) == NET_CONTEXT_LISTENING) {
154 (void)net_context_accept(ctx, NULL, K_NO_WAIT, NULL);
155 } else {
156 (void)net_context_recv(ctx, NULL, K_NO_WAIT, NULL);
157 }
158
159 ctx->user_data = INT_TO_POINTER(EINTR);
160 sock_set_error(ctx);
161
162 zsock_flush_queue(ctx);
163
164 ret = net_context_put(ctx);
165 if (ret < 0) {
166 errno = -ret;
167 ret = -1;
168 }
169
170 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, close, sock, ret < 0 ? -errno : ret);
171
172 if (ret == 0) {
173 (void)sock_obj_core_dealloc(sock);
174 }
175
176 return ret;
177 }
178
zsock_accepted_cb(struct net_context * new_ctx,struct sockaddr * addr,socklen_t addrlen,int status,void * user_data)179 static void zsock_accepted_cb(struct net_context *new_ctx,
180 struct sockaddr *addr, socklen_t addrlen,
181 int status, void *user_data)
182 {
183 struct net_context *parent = user_data;
184
185 NET_DBG("parent=%p, ctx=%p, st=%d", parent, new_ctx, status);
186
187 if (status == 0) {
188 /* This just installs a callback, so cannot fail. */
189 (void)net_context_recv(new_ctx, zsock_received_cb, K_NO_WAIT,
190 NULL);
191 k_fifo_init(&new_ctx->recv_q);
192 k_condvar_init(&new_ctx->cond.recv);
193
194 k_fifo_put(&parent->accept_q, new_ctx);
195
196 /* TCP context is effectively owned by both application
197 * and the stack: stack may detect that peer closed/aborted
198 * connection, but it must not dispose of the context behind
199 * the application back. Likewise, when application "closes"
200 * context, it's not disposed of immediately - there's yet
201 * closing handshake for stack to perform.
202 */
203 net_context_ref(new_ctx);
204
205 (void)k_condvar_signal(&parent->cond.recv);
206 }
207
208 }
209
zsock_received_cb(struct net_context * ctx,struct net_pkt * pkt,union net_ip_header * ip_hdr,union net_proto_header * proto_hdr,int status,void * user_data)210 static void zsock_received_cb(struct net_context *ctx,
211 struct net_pkt *pkt,
212 union net_ip_header *ip_hdr,
213 union net_proto_header *proto_hdr,
214 int status,
215 void *user_data)
216 {
217 if (ctx->cond.lock) {
218 (void)k_mutex_lock(ctx->cond.lock, K_FOREVER);
219 }
220
221 NET_DBG("ctx=%p, pkt=%p, st=%d, user_data=%p", ctx, pkt, status,
222 user_data);
223
224 if (status < 0) {
225 ctx->user_data = INT_TO_POINTER(-status);
226 sock_set_error(ctx);
227 }
228
229 /* if pkt is NULL, EOF */
230 if (!pkt) {
231 struct net_pkt *last_pkt = k_fifo_peek_tail(&ctx->recv_q);
232
233 if (!last_pkt) {
234 /* If there're no packets in the queue, recv() may
235 * be blocked waiting on it to become non-empty,
236 * so cancel that wait.
237 */
238 sock_set_eof(ctx);
239 k_fifo_cancel_wait(&ctx->recv_q);
240 NET_DBG("Marked socket %p as peer-closed", ctx);
241 } else {
242 net_pkt_set_eof(last_pkt, true);
243 NET_DBG("Set EOF flag on pkt %p", last_pkt);
244 }
245
246 goto unlock;
247 }
248
249 /* Normal packet */
250 net_pkt_set_eof(pkt, false);
251
252 net_pkt_set_rx_stats_tick(pkt, k_cycle_get_32());
253
254 k_fifo_put(&ctx->recv_q, pkt);
255
256 unlock:
257 /* Wake reader if it was sleeping */
258 (void)k_condvar_signal(&ctx->cond.recv);
259
260 if (ctx->cond.lock) {
261 (void)k_mutex_unlock(ctx->cond.lock);
262 }
263 }
264
zsock_shutdown_ctx(struct net_context * ctx,int how)265 int zsock_shutdown_ctx(struct net_context *ctx, int how)
266 {
267 int ret;
268
269 if (how == ZSOCK_SHUT_RD) {
270 if (net_context_get_state(ctx) == NET_CONTEXT_LISTENING) {
271 ret = net_context_accept(ctx, NULL, K_NO_WAIT, NULL);
272 if (ret < 0) {
273 errno = -ret;
274 return -1;
275 }
276 } else {
277 ret = net_context_recv(ctx, NULL, K_NO_WAIT, NULL);
278 if (ret < 0) {
279 errno = -ret;
280 return -1;
281 }
282 }
283
284 sock_set_eof(ctx);
285
286 zsock_flush_queue(ctx);
287
288 return 0;
289 }
290
291 if (how == ZSOCK_SHUT_WR || how == ZSOCK_SHUT_RDWR) {
292 errno = ENOTSUP;
293 return -1;
294 }
295
296 errno = EINVAL;
297 return -1;
298 }
299
zsock_bind_ctx(struct net_context * ctx,const struct sockaddr * addr,socklen_t addrlen)300 int zsock_bind_ctx(struct net_context *ctx, const struct sockaddr *addr,
301 socklen_t addrlen)
302 {
303 int ret;
304
305 ret = net_context_bind(ctx, addr, addrlen);
306 if (ret < 0) {
307 errno = -ret;
308 return -1;
309 }
310
311 /* For DGRAM socket, we expect to receive packets after call to
312 * bind(), but for STREAM socket, next expected operation is
313 * listen(), which doesn't work if recv callback is set.
314 */
315 if (net_context_get_type(ctx) == SOCK_DGRAM) {
316 ret = net_context_recv(ctx, zsock_received_cb, K_NO_WAIT,
317 ctx->user_data);
318 if (ret < 0) {
319 errno = -ret;
320 return -1;
321 }
322 }
323
324 return 0;
325 }
326
zsock_connected_cb(struct net_context * ctx,int status,void * user_data)327 static void zsock_connected_cb(struct net_context *ctx, int status, void *user_data)
328 {
329 if (status < 0) {
330 ctx->user_data = INT_TO_POINTER(-status);
331 sock_set_error(ctx);
332 }
333 }
334
zsock_connect_ctx(struct net_context * ctx,const struct sockaddr * addr,socklen_t addrlen)335 int zsock_connect_ctx(struct net_context *ctx, const struct sockaddr *addr,
336 socklen_t addrlen)
337 {
338 k_timeout_t timeout = K_MSEC(CONFIG_NET_SOCKETS_CONNECT_TIMEOUT);
339 net_context_connect_cb_t cb = NULL;
340 int ret;
341
342 #if defined(CONFIG_SOCKS)
343 if (net_context_is_proxy_enabled(ctx)) {
344 ret = net_socks5_connect(ctx, addr, addrlen);
345 if (ret < 0) {
346 errno = -ret;
347 return -1;
348 }
349 ret = net_context_recv(ctx, zsock_received_cb,
350 K_NO_WAIT, ctx->user_data);
351 if (ret < 0) {
352 errno = -ret;
353 return -1;
354 }
355 return 0;
356 }
357 #endif
358 if (net_context_get_state(ctx) == NET_CONTEXT_CONNECTED) {
359 return 0;
360 }
361
362 if (net_context_get_state(ctx) == NET_CONTEXT_CONNECTING) {
363 if (sock_is_error(ctx)) {
364 errno = POINTER_TO_INT(ctx->user_data);
365 return -1;
366 }
367
368 errno = EALREADY;
369 return -1;
370 }
371
372 if (sock_is_nonblock(ctx)) {
373 timeout = K_NO_WAIT;
374 cb = zsock_connected_cb;
375 }
376
377 if (net_context_get_type(ctx) == SOCK_STREAM) {
378 /* For STREAM sockets net_context_recv() only installs
379 * recv callback w/o side effects, and it has to be done
380 * first to avoid race condition, when TCP stream data
381 * arrives right after connect.
382 */
383 ret = net_context_recv(ctx, zsock_received_cb,
384 K_NO_WAIT, ctx->user_data);
385 if (ret < 0) {
386 errno = -ret;
387 return -1;
388 }
389 ret = net_context_connect(ctx, addr, addrlen, cb,
390 timeout, ctx->user_data);
391 if (ret < 0) {
392 errno = -ret;
393 return -1;
394 }
395 } else {
396 ret = net_context_connect(ctx, addr, addrlen, cb,
397 timeout, ctx->user_data);
398 if (ret < 0) {
399 errno = -ret;
400 return -1;
401 }
402 ret = net_context_recv(ctx, zsock_received_cb,
403 K_NO_WAIT, ctx->user_data);
404 if (ret < 0) {
405 errno = -ret;
406 return -1;
407 }
408 }
409
410 return 0;
411 }
412
zsock_listen_ctx(struct net_context * ctx,int backlog)413 int zsock_listen_ctx(struct net_context *ctx, int backlog)
414 {
415 int ret;
416
417 ret = net_context_listen(ctx, backlog);
418 if (ret < 0) {
419 errno = -ret;
420 return -1;
421 }
422
423 ret = net_context_accept(ctx, zsock_accepted_cb, K_NO_WAIT, ctx);
424 if (ret < 0) {
425 errno = -ret;
426 return -1;
427 }
428
429 return 0;
430 }
431
zsock_accept_ctx(struct net_context * parent,struct sockaddr * addr,socklen_t * addrlen)432 int zsock_accept_ctx(struct net_context *parent, struct sockaddr *addr,
433 socklen_t *addrlen)
434 {
435 struct net_context *ctx;
436 struct net_pkt *last_pkt;
437 int fd, ret;
438
439 if (!sock_is_nonblock(parent)) {
440 k_timeout_t timeout = K_FOREVER;
441
442 /* accept() can reuse zsock_wait_data(), as underneath it's
443 * monitoring the same queue (accept_q is an alias for recv_q).
444 */
445 ret = zsock_wait_data(parent, &timeout);
446 if (ret < 0) {
447 errno = -ret;
448 return -1;
449 }
450 }
451
452 ctx = k_fifo_get(&parent->accept_q, K_NO_WAIT);
453 if (ctx == NULL) {
454 errno = EAGAIN;
455 return -1;
456 }
457
458 fd = zvfs_reserve_fd();
459 if (fd < 0) {
460 zsock_flush_queue(ctx);
461 net_context_put(ctx);
462 return -1;
463 }
464
465 /* Check if the connection is already disconnected */
466 last_pkt = k_fifo_peek_tail(&ctx->recv_q);
467 if (last_pkt) {
468 if (net_pkt_eof(last_pkt)) {
469 sock_set_eof(ctx);
470 zvfs_free_fd(fd);
471 zsock_flush_queue(ctx);
472 net_context_put(ctx);
473 errno = ECONNABORTED;
474 return -1;
475 }
476 }
477
478 if (net_context_is_closing(ctx)) {
479 errno = ECONNABORTED;
480 zvfs_free_fd(fd);
481 zsock_flush_queue(ctx);
482 net_context_put(ctx);
483 return -1;
484 }
485
486 net_context_set_accepting(ctx, false);
487
488
489 if (addr != NULL && addrlen != NULL) {
490 int len = MIN(*addrlen, sizeof(ctx->remote));
491
492 memcpy(addr, &ctx->remote, len);
493 /* addrlen is a value-result argument, set to actual
494 * size of source address
495 */
496 if (ctx->remote.sa_family == AF_INET) {
497 *addrlen = sizeof(struct sockaddr_in);
498 } else if (ctx->remote.sa_family == AF_INET6) {
499 *addrlen = sizeof(struct sockaddr_in6);
500 } else {
501 zvfs_free_fd(fd);
502 errno = ENOTSUP;
503 zsock_flush_queue(ctx);
504 net_context_put(ctx);
505 return -1;
506 }
507 }
508
509 NET_DBG("accept: ctx=%p, fd=%d", ctx, fd);
510
511 zvfs_finalize_typed_fd(fd, ctx, (const struct fd_op_vtable *)&sock_fd_op_vtable,
512 ZVFS_MODE_IFSOCK);
513
514 return fd;
515 }
516
517 #define WAIT_BUFS_INITIAL_MS 10
518 #define WAIT_BUFS_MAX_MS 100
519 #define MAX_WAIT_BUFS K_MSEC(CONFIG_NET_SOCKET_MAX_SEND_WAIT)
520
send_check_and_wait(struct net_context * ctx,int status,k_timepoint_t buf_timeout,k_timeout_t timeout,uint32_t * retry_timeout)521 static int send_check_and_wait(struct net_context *ctx, int status,
522 k_timepoint_t buf_timeout, k_timeout_t timeout,
523 uint32_t *retry_timeout)
524 {
525 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
526 goto out;
527 }
528
529 if (status != -ENOBUFS && status != -EAGAIN) {
530 goto out;
531 }
532
533 /* If we cannot get any buffers in reasonable
534 * amount of time, then do not wait forever as
535 * there might be some bigger issue.
536 * If we get -EAGAIN and cannot recover, then
537 * it means that the sending window is blocked
538 * and we just cannot send anything.
539 */
540 if (sys_timepoint_expired(buf_timeout)) {
541 if (status == -ENOBUFS) {
542 status = -ENOMEM;
543 } else {
544 status = -ENOBUFS;
545 }
546
547 goto out;
548 }
549
550 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
551 *retry_timeout =
552 MIN(*retry_timeout, k_ticks_to_ms_floor32(timeout.ticks));
553 }
554
555 if (ctx->cond.lock) {
556 (void)k_mutex_unlock(ctx->cond.lock);
557 }
558
559 if (status == -ENOBUFS) {
560 /* We can monitor net_pkt/net_buf availability, so just wait. */
561 k_sleep(K_MSEC(*retry_timeout));
562 }
563
564 if (status == -EAGAIN) {
565 if (IS_ENABLED(CONFIG_NET_NATIVE_TCP) &&
566 net_context_get_type(ctx) == SOCK_STREAM &&
567 !net_if_is_ip_offloaded(net_context_get_iface(ctx))) {
568 struct k_poll_event event;
569
570 k_poll_event_init(&event,
571 K_POLL_TYPE_SEM_AVAILABLE,
572 K_POLL_MODE_NOTIFY_ONLY,
573 net_tcp_tx_sem_get(ctx));
574
575 k_poll(&event, 1, K_MSEC(*retry_timeout));
576 } else {
577 k_sleep(K_MSEC(*retry_timeout));
578 }
579 }
580 /* Exponentially increase the retry timeout
581 * Cap the value to WAIT_BUFS_MAX_MS
582 */
583 *retry_timeout = MIN(WAIT_BUFS_MAX_MS, *retry_timeout << 1);
584
585 if (ctx->cond.lock) {
586 (void)k_mutex_lock(ctx->cond.lock, K_FOREVER);
587 }
588
589 return 0;
590
591 out:
592 errno = -status;
593 return -1;
594 }
595
zsock_sendto_ctx(struct net_context * ctx,const void * buf,size_t len,int flags,const struct sockaddr * dest_addr,socklen_t addrlen)596 ssize_t zsock_sendto_ctx(struct net_context *ctx, const void *buf, size_t len,
597 int flags,
598 const struct sockaddr *dest_addr, socklen_t addrlen)
599 {
600 k_timeout_t timeout = K_FOREVER;
601 uint32_t retry_timeout = WAIT_BUFS_INITIAL_MS;
602 k_timepoint_t buf_timeout, end;
603 int status;
604
605 if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) {
606 timeout = K_NO_WAIT;
607 buf_timeout = sys_timepoint_calc(K_NO_WAIT);
608 } else {
609 net_context_get_option(ctx, NET_OPT_SNDTIMEO, &timeout, NULL);
610 buf_timeout = sys_timepoint_calc(MAX_WAIT_BUFS);
611 }
612 end = sys_timepoint_calc(timeout);
613
614 /* Register the callback before sending in order to receive the response
615 * from the peer.
616 */
617 status = net_context_recv(ctx, zsock_received_cb,
618 K_NO_WAIT, ctx->user_data);
619 if (status < 0) {
620 errno = -status;
621 return -1;
622 }
623
624 while (1) {
625 if (dest_addr) {
626 status = net_context_sendto(ctx, buf, len, dest_addr,
627 addrlen, NULL, timeout,
628 ctx->user_data);
629 } else {
630 status = net_context_send(ctx, buf, len, NULL, timeout,
631 ctx->user_data);
632 }
633
634 if (status < 0) {
635 status = send_check_and_wait(ctx, status, buf_timeout,
636 timeout, &retry_timeout);
637 if (status < 0) {
638 return status;
639 }
640
641 /* Update the timeout value in case loop is repeated. */
642 timeout = sys_timepoint_timeout(end);
643
644 continue;
645 }
646
647 break;
648 }
649
650 return status;
651 }
652
zsock_sendmsg_ctx(struct net_context * ctx,const struct msghdr * msg,int flags)653 ssize_t zsock_sendmsg_ctx(struct net_context *ctx, const struct msghdr *msg,
654 int flags)
655 {
656 k_timeout_t timeout = K_FOREVER;
657 uint32_t retry_timeout = WAIT_BUFS_INITIAL_MS;
658 k_timepoint_t buf_timeout, end;
659 int status;
660
661 if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) {
662 timeout = K_NO_WAIT;
663 buf_timeout = sys_timepoint_calc(K_NO_WAIT);
664 } else {
665 net_context_get_option(ctx, NET_OPT_SNDTIMEO, &timeout, NULL);
666 buf_timeout = sys_timepoint_calc(MAX_WAIT_BUFS);
667 }
668 end = sys_timepoint_calc(timeout);
669
670 while (1) {
671 status = net_context_sendmsg(ctx, msg, flags, NULL, timeout, NULL);
672 if (status < 0) {
673 status = send_check_and_wait(ctx, status,
674 buf_timeout,
675 timeout, &retry_timeout);
676 if (status < 0) {
677 return status;
678 }
679
680 /* Update the timeout value in case loop is repeated. */
681 timeout = sys_timepoint_timeout(end);
682
683 continue;
684 }
685
686 break;
687 }
688
689 return status;
690 }
691
sock_get_pkt_src_addr(struct net_pkt * pkt,enum net_ip_protocol proto,struct sockaddr * addr,socklen_t addrlen)692 static int sock_get_pkt_src_addr(struct net_pkt *pkt,
693 enum net_ip_protocol proto,
694 struct sockaddr *addr,
695 socklen_t addrlen)
696 {
697 int ret = 0;
698 struct net_pkt_cursor backup;
699 uint16_t *port;
700
701 if (!addr || !pkt) {
702 return -EINVAL;
703 }
704
705 net_pkt_cursor_backup(pkt, &backup);
706 net_pkt_cursor_init(pkt);
707
708 addr->sa_family = net_pkt_family(pkt);
709
710 if (IS_ENABLED(CONFIG_NET_IPV4) &&
711 net_pkt_family(pkt) == AF_INET) {
712 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access,
713 struct net_ipv4_hdr);
714 struct sockaddr_in *addr4 = net_sin(addr);
715 struct net_ipv4_hdr *ipv4_hdr;
716
717 if (addrlen < sizeof(struct sockaddr_in)) {
718 ret = -EINVAL;
719 goto error;
720 }
721
722 ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(
723 pkt, &ipv4_access);
724 if (!ipv4_hdr ||
725 net_pkt_acknowledge_data(pkt, &ipv4_access) ||
726 net_pkt_skip(pkt, net_pkt_ipv4_opts_len(pkt))) {
727 ret = -ENOBUFS;
728 goto error;
729 }
730
731 net_ipv4_addr_copy_raw((uint8_t *)&addr4->sin_addr, ipv4_hdr->src);
732 port = &addr4->sin_port;
733 } else if (IS_ENABLED(CONFIG_NET_IPV6) &&
734 net_pkt_family(pkt) == AF_INET6) {
735 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv6_access,
736 struct net_ipv6_hdr);
737 struct sockaddr_in6 *addr6 = net_sin6(addr);
738 struct net_ipv6_hdr *ipv6_hdr;
739
740 if (addrlen < sizeof(struct sockaddr_in6)) {
741 ret = -EINVAL;
742 goto error;
743 }
744
745 ipv6_hdr = (struct net_ipv6_hdr *)net_pkt_get_data(
746 pkt, &ipv6_access);
747 if (!ipv6_hdr ||
748 net_pkt_acknowledge_data(pkt, &ipv6_access) ||
749 net_pkt_skip(pkt, net_pkt_ipv6_ext_len(pkt))) {
750 ret = -ENOBUFS;
751 goto error;
752 }
753
754 net_ipv6_addr_copy_raw((uint8_t *)&addr6->sin6_addr, ipv6_hdr->src);
755 port = &addr6->sin6_port;
756 } else {
757 ret = -ENOTSUP;
758 goto error;
759 }
760
761 if (IS_ENABLED(CONFIG_NET_UDP) && proto == IPPROTO_UDP) {
762 NET_PKT_DATA_ACCESS_DEFINE(udp_access, struct net_udp_hdr);
763 struct net_udp_hdr *udp_hdr;
764
765 udp_hdr = (struct net_udp_hdr *)net_pkt_get_data(pkt,
766 &udp_access);
767 if (!udp_hdr) {
768 ret = -ENOBUFS;
769 goto error;
770 }
771
772 *port = udp_hdr->src_port;
773 } else if (IS_ENABLED(CONFIG_NET_TCP) && proto == IPPROTO_TCP) {
774 NET_PKT_DATA_ACCESS_DEFINE(tcp_access, struct net_tcp_hdr);
775 struct net_tcp_hdr *tcp_hdr;
776
777 tcp_hdr = (struct net_tcp_hdr *)net_pkt_get_data(pkt,
778 &tcp_access);
779 if (!tcp_hdr) {
780 ret = -ENOBUFS;
781 goto error;
782 }
783
784 *port = tcp_hdr->src_port;
785 } else {
786 ret = -ENOTSUP;
787 }
788
789 error:
790 net_pkt_cursor_restore(pkt, &backup);
791
792 return ret;
793 }
794
795 #if defined(CONFIG_NET_OFFLOAD)
net_pkt_remote_addr_is_unspecified(struct net_pkt * pkt)796 static bool net_pkt_remote_addr_is_unspecified(struct net_pkt *pkt)
797 {
798 bool ret = true;
799
800 if (net_pkt_family(pkt) == AF_INET) {
801 ret = net_ipv4_is_addr_unspecified(&net_sin(&pkt->remote)->sin_addr);
802 } else if (net_pkt_family(pkt) == AF_INET6) {
803 ret = net_ipv6_is_addr_unspecified(&net_sin6(&pkt->remote)->sin6_addr);
804 }
805
806 return ret;
807 }
808
sock_get_offload_pkt_src_addr(struct net_pkt * pkt,struct net_context * ctx,struct sockaddr * addr,socklen_t addrlen)809 static int sock_get_offload_pkt_src_addr(struct net_pkt *pkt,
810 struct net_context *ctx,
811 struct sockaddr *addr,
812 socklen_t addrlen)
813 {
814 int ret = 0;
815
816 if (!addr || !pkt) {
817 return -EINVAL;
818 }
819
820 if (!net_pkt_remote_addr_is_unspecified(pkt)) {
821 if (IS_ENABLED(CONFIG_NET_IPV4) &&
822 net_pkt_family(pkt) == AF_INET) {
823 if (addrlen < sizeof(struct sockaddr_in)) {
824 ret = -EINVAL;
825 goto error;
826 }
827
828 memcpy(addr, &pkt->remote, sizeof(struct sockaddr_in));
829 } else if (IS_ENABLED(CONFIG_NET_IPV6) &&
830 net_pkt_family(pkt) == AF_INET6) {
831 if (addrlen < sizeof(struct sockaddr_in6)) {
832 ret = -EINVAL;
833 goto error;
834 }
835
836 memcpy(addr, &pkt->remote, sizeof(struct sockaddr_in6));
837 }
838 } else if (ctx->flags & NET_CONTEXT_REMOTE_ADDR_SET) {
839 memcpy(addr, &ctx->remote, MIN(addrlen, sizeof(ctx->remote)));
840 } else {
841 ret = -ENOTSUP;
842 }
843
844 error:
845 return ret;
846 }
847 #else
sock_get_offload_pkt_src_addr(struct net_pkt * pkt,struct net_context * ctx,struct sockaddr * addr,socklen_t addrlen)848 static int sock_get_offload_pkt_src_addr(struct net_pkt *pkt,
849 struct net_context *ctx,
850 struct sockaddr *addr,
851 socklen_t addrlen)
852 {
853 ARG_UNUSED(pkt);
854 ARG_UNUSED(ctx);
855 ARG_UNUSED(addr);
856 ARG_UNUSED(addrlen);
857
858 return 0;
859 }
860 #endif /* CONFIG_NET_OFFLOAD */
861
net_socket_update_tc_rx_time(struct net_pkt * pkt,uint32_t end_tick)862 void net_socket_update_tc_rx_time(struct net_pkt *pkt, uint32_t end_tick)
863 {
864 net_pkt_set_rx_stats_tick(pkt, end_tick);
865
866 net_stats_update_tc_rx_time(net_pkt_iface(pkt),
867 net_pkt_priority(pkt),
868 net_pkt_create_time(pkt),
869 end_tick);
870
871 SYS_PORT_TRACING_FUNC(net, rx_time, pkt, end_tick);
872
873 if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)) {
874 uint32_t val, prev = net_pkt_create_time(pkt);
875 int i;
876
877 for (i = 0; i < net_pkt_stats_tick_count(pkt); i++) {
878 if (!net_pkt_stats_tick(pkt)[i]) {
879 break;
880 }
881
882 val = net_pkt_stats_tick(pkt)[i] - prev;
883 prev = net_pkt_stats_tick(pkt)[i];
884 net_pkt_stats_tick(pkt)[i] = val;
885 }
886
887 net_stats_update_tc_rx_time_detail(
888 net_pkt_iface(pkt),
889 net_pkt_priority(pkt),
890 net_pkt_stats_tick(pkt));
891 }
892 }
893
zsock_wait_data(struct net_context * ctx,k_timeout_t * timeout)894 int zsock_wait_data(struct net_context *ctx, k_timeout_t *timeout)
895 {
896 int ret;
897
898 if (ctx->cond.lock == NULL) {
899 /* For some reason the lock pointer is not set properly
900 * when called by fdtable.c:zvfs_finalize_fd()
901 * It is not practical to try to figure out the fdtable
902 * lock at this point so skip it.
903 */
904 NET_WARN("No lock pointer set for context %p", ctx);
905 return -EINVAL;
906 }
907
908 if (k_fifo_is_empty(&ctx->recv_q)) {
909 /* Wait for the data to arrive but without holding a lock */
910 ret = k_condvar_wait(&ctx->cond.recv, ctx->cond.lock,
911 *timeout);
912 if (ret < 0) {
913 return ret;
914 }
915
916 if (sock_is_error(ctx)) {
917 return -POINTER_TO_INT(ctx->user_data);
918 }
919 }
920
921 return 0;
922 }
923
insert_pktinfo(struct msghdr * msg,int level,int type,void * pktinfo,size_t pktinfo_len)924 static int insert_pktinfo(struct msghdr *msg, int level, int type,
925 void *pktinfo, size_t pktinfo_len)
926 {
927 struct cmsghdr *cmsg;
928
929 if (msg->msg_controllen < pktinfo_len) {
930 return -EINVAL;
931 }
932
933 for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
934 if (cmsg->cmsg_len == 0) {
935 break;
936 }
937 }
938
939 if (cmsg == NULL) {
940 return -EINVAL;
941 }
942
943 cmsg->cmsg_len = CMSG_LEN(pktinfo_len);
944 cmsg->cmsg_level = level;
945 cmsg->cmsg_type = type;
946
947 memcpy(CMSG_DATA(cmsg), pktinfo, pktinfo_len);
948
949 return 0;
950 }
951
add_timestamping(struct net_context * ctx,struct net_pkt * pkt,struct msghdr * msg)952 static int add_timestamping(struct net_context *ctx,
953 struct net_pkt *pkt,
954 struct msghdr *msg)
955 {
956 uint8_t timestamping = 0;
957
958 net_context_get_option(ctx, NET_OPT_TIMESTAMPING, ×tamping, NULL);
959
960 if (timestamping) {
961 return insert_pktinfo(msg, SOL_SOCKET, SO_TIMESTAMPING,
962 net_pkt_timestamp(pkt), sizeof(struct net_ptp_time));
963 }
964
965 return -ENOTSUP;
966 }
967
add_pktinfo(struct net_context * ctx,struct net_pkt * pkt,struct msghdr * msg)968 static int add_pktinfo(struct net_context *ctx,
969 struct net_pkt *pkt,
970 struct msghdr *msg)
971 {
972 int ret = -ENOTSUP;
973 struct net_pkt_cursor backup;
974
975 net_pkt_cursor_backup(pkt, &backup);
976 net_pkt_cursor_init(pkt);
977
978 if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
979 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access,
980 struct net_ipv4_hdr);
981 struct in_pktinfo info;
982 struct net_ipv4_hdr *ipv4_hdr;
983
984 ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(
985 pkt, &ipv4_access);
986 if (ipv4_hdr == NULL ||
987 net_pkt_acknowledge_data(pkt, &ipv4_access) ||
988 net_pkt_skip(pkt, net_pkt_ipv4_opts_len(pkt))) {
989 ret = -ENOBUFS;
990 goto out;
991 }
992
993 net_ipv4_addr_copy_raw((uint8_t *)&info.ipi_addr, ipv4_hdr->dst);
994 net_ipv4_addr_copy_raw((uint8_t *)&info.ipi_spec_dst,
995 (uint8_t *)net_sin_ptr(&ctx->local)->sin_addr);
996 info.ipi_ifindex = ctx->iface;
997
998 ret = insert_pktinfo(msg, IPPROTO_IP, IP_PKTINFO,
999 &info, sizeof(info));
1000
1001 goto out;
1002 }
1003
1004 if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
1005 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv6_access,
1006 struct net_ipv6_hdr);
1007 struct in6_pktinfo info;
1008 struct net_ipv6_hdr *ipv6_hdr;
1009
1010 ipv6_hdr = (struct net_ipv6_hdr *)net_pkt_get_data(
1011 pkt, &ipv6_access);
1012 if (ipv6_hdr == NULL ||
1013 net_pkt_acknowledge_data(pkt, &ipv6_access) ||
1014 net_pkt_skip(pkt, net_pkt_ipv6_ext_len(pkt))) {
1015 ret = -ENOBUFS;
1016 goto out;
1017 }
1018
1019 net_ipv6_addr_copy_raw((uint8_t *)&info.ipi6_addr, ipv6_hdr->dst);
1020 info.ipi6_ifindex = ctx->iface;
1021
1022 ret = insert_pktinfo(msg, IPPROTO_IPV6, IPV6_RECVPKTINFO,
1023 &info, sizeof(info));
1024
1025 goto out;
1026 }
1027
1028 out:
1029 net_pkt_cursor_restore(pkt, &backup);
1030
1031 return ret;
1032 }
1033
update_msg_controllen(struct msghdr * msg)1034 static int update_msg_controllen(struct msghdr *msg)
1035 {
1036 struct cmsghdr *cmsg;
1037 size_t cmsg_space = 0;
1038
1039 for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
1040 if (cmsg->cmsg_len == 0) {
1041 break;
1042 }
1043 cmsg_space += cmsg->cmsg_len;
1044 }
1045 msg->msg_controllen = cmsg_space;
1046
1047 return 0;
1048 }
1049
zsock_recv_dgram(struct net_context * ctx,struct msghdr * msg,void * buf,size_t max_len,int flags,struct sockaddr * src_addr,socklen_t * addrlen)1050 static inline ssize_t zsock_recv_dgram(struct net_context *ctx,
1051 struct msghdr *msg,
1052 void *buf,
1053 size_t max_len,
1054 int flags,
1055 struct sockaddr *src_addr,
1056 socklen_t *addrlen)
1057 {
1058 k_timeout_t timeout = K_FOREVER;
1059 size_t recv_len = 0;
1060 size_t read_len;
1061 struct net_pkt_cursor backup;
1062 struct net_pkt *pkt;
1063
1064 if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) {
1065 timeout = K_NO_WAIT;
1066 } else {
1067 int ret;
1068
1069 net_context_get_option(ctx, NET_OPT_RCVTIMEO, &timeout, NULL);
1070
1071 ret = zsock_wait_data(ctx, &timeout);
1072 if (ret < 0) {
1073 errno = -ret;
1074 return -1;
1075 }
1076 }
1077
1078 if (flags & ZSOCK_MSG_PEEK) {
1079 int res;
1080
1081 res = fifo_wait_non_empty(&ctx->recv_q, timeout);
1082 /* EAGAIN when timeout expired, EINTR when cancelled */
1083 if (res && res != -EAGAIN && res != -EINTR) {
1084 errno = -res;
1085 return -1;
1086 }
1087
1088 pkt = k_fifo_peek_head(&ctx->recv_q);
1089 } else {
1090 pkt = k_fifo_get(&ctx->recv_q, timeout);
1091 }
1092
1093 if (!pkt) {
1094 errno = EAGAIN;
1095 return -1;
1096 }
1097
1098 net_pkt_cursor_backup(pkt, &backup);
1099
1100 if (src_addr && addrlen) {
1101 if (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
1102 net_if_is_ip_offloaded(net_context_get_iface(ctx))) {
1103 int ret;
1104
1105 ret = sock_get_offload_pkt_src_addr(pkt, ctx, src_addr,
1106 *addrlen);
1107 if (ret < 0) {
1108 errno = -ret;
1109 NET_DBG("sock_get_offload_pkt_src_addr %d", ret);
1110 goto fail;
1111 }
1112 } else {
1113 int ret;
1114
1115 ret = sock_get_pkt_src_addr(pkt, net_context_get_proto(ctx),
1116 src_addr, *addrlen);
1117 if (ret < 0) {
1118 errno = -ret;
1119 NET_DBG("sock_get_pkt_src_addr %d", ret);
1120 goto fail;
1121 }
1122 }
1123
1124 /* addrlen is a value-result argument, set to actual
1125 * size of source address
1126 */
1127 if (src_addr->sa_family == AF_INET) {
1128 *addrlen = sizeof(struct sockaddr_in);
1129 } else if (src_addr->sa_family == AF_INET6) {
1130 *addrlen = sizeof(struct sockaddr_in6);
1131 } else {
1132 errno = ENOTSUP;
1133 goto fail;
1134 }
1135 }
1136
1137 if (msg != NULL) {
1138 int iovec = 0;
1139 size_t tmp_read_len;
1140
1141 if (msg->msg_iovlen < 1 || msg->msg_iov == NULL) {
1142 errno = ENOMEM;
1143 return -1;
1144 }
1145
1146 recv_len = net_pkt_remaining_data(pkt);
1147 tmp_read_len = read_len = MIN(recv_len, max_len);
1148
1149 while (tmp_read_len > 0) {
1150 size_t len;
1151
1152 buf = msg->msg_iov[iovec].iov_base;
1153 if (buf == NULL) {
1154 errno = EINVAL;
1155 return -1;
1156 }
1157
1158 len = MIN(tmp_read_len, msg->msg_iov[iovec].iov_len);
1159
1160 if (net_pkt_read(pkt, buf, len)) {
1161 errno = ENOBUFS;
1162 goto fail;
1163 }
1164
1165 if (len <= tmp_read_len) {
1166 tmp_read_len -= len;
1167 msg->msg_iov[iovec].iov_len = len;
1168 iovec++;
1169 } else {
1170 errno = EINVAL;
1171 return -1;
1172 }
1173 }
1174
1175 msg->msg_iovlen = iovec;
1176
1177 if (recv_len != read_len) {
1178 msg->msg_flags |= ZSOCK_MSG_TRUNC;
1179 }
1180
1181 } else {
1182 recv_len = net_pkt_remaining_data(pkt);
1183 read_len = MIN(recv_len, max_len);
1184
1185 if (net_pkt_read(pkt, buf, read_len)) {
1186 errno = ENOBUFS;
1187 goto fail;
1188 }
1189 }
1190
1191 if (msg != NULL) {
1192 if (msg->msg_control != NULL) {
1193 if (msg->msg_controllen > 0) {
1194 if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMPING) &&
1195 net_context_is_timestamping_set(ctx)) {
1196 if (add_timestamping(ctx, pkt, msg) < 0) {
1197 msg->msg_flags |= ZSOCK_MSG_CTRUNC;
1198 }
1199 }
1200
1201 if (IS_ENABLED(CONFIG_NET_CONTEXT_RECV_PKTINFO) &&
1202 net_context_is_recv_pktinfo_set(ctx)) {
1203 if (add_pktinfo(ctx, pkt, msg) < 0) {
1204 msg->msg_flags |= ZSOCK_MSG_CTRUNC;
1205 }
1206 }
1207
1208 /* msg_controllen must be updated to reflect the total length of all
1209 * control messages in the buffer. If there are no control data,
1210 * msg_controllen will be cleared as expected It will also take into
1211 * account pre-existing control data
1212 */
1213 update_msg_controllen(msg);
1214 }
1215 } else {
1216 msg->msg_controllen = 0U;
1217 }
1218 }
1219
1220 if ((IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) ||
1221 IS_ENABLED(CONFIG_TRACING_NET_CORE)) &&
1222 !(flags & ZSOCK_MSG_PEEK)) {
1223 net_socket_update_tc_rx_time(pkt, k_cycle_get_32());
1224 }
1225
1226 if (!(flags & ZSOCK_MSG_PEEK)) {
1227 net_pkt_unref(pkt);
1228 } else {
1229 net_pkt_cursor_restore(pkt, &backup);
1230 }
1231
1232 return (flags & ZSOCK_MSG_TRUNC) ? recv_len : read_len;
1233
1234 fail:
1235 if (!(flags & ZSOCK_MSG_PEEK)) {
1236 net_pkt_unref(pkt);
1237 }
1238
1239 return -1;
1240 }
1241
zsock_recv_stream_immediate(struct net_context * ctx,uint8_t ** buf,size_t * max_len,int flags)1242 static size_t zsock_recv_stream_immediate(struct net_context *ctx, uint8_t **buf, size_t *max_len,
1243 int flags)
1244 {
1245 size_t len;
1246 size_t pkt_len;
1247 size_t recv_len = 0;
1248 struct net_pkt *pkt;
1249 struct net_pkt_cursor backup;
1250 struct net_pkt *origin = NULL;
1251 const bool do_recv = !(buf == NULL || max_len == NULL);
1252 size_t _max_len = (max_len == NULL) ? SIZE_MAX : *max_len;
1253 const bool peek = (flags & ZSOCK_MSG_PEEK) == ZSOCK_MSG_PEEK;
1254
1255 while (_max_len > 0) {
1256 /* only peek until we know we can dequeue and / or requeue buffer */
1257 pkt = k_fifo_peek_head(&ctx->recv_q);
1258 if (pkt == NULL || pkt == origin) {
1259 break;
1260 }
1261
1262 if (origin == NULL) {
1263 /* mark first pkt to avoid cycles when observing */
1264 origin = pkt;
1265 }
1266
1267 pkt_len = net_pkt_remaining_data(pkt);
1268 len = MIN(_max_len, pkt_len);
1269 recv_len += len;
1270 _max_len -= len;
1271
1272 if (do_recv && len > 0) {
1273 if (peek) {
1274 net_pkt_cursor_backup(pkt, &backup);
1275 }
1276
1277 net_pkt_read(pkt, *buf, len);
1278 /* update buffer position for caller */
1279 *buf += len;
1280
1281 if (peek) {
1282 net_pkt_cursor_restore(pkt, &backup);
1283 }
1284 }
1285
1286 if (do_recv && !peek) {
1287 if (len == pkt_len) {
1288 /* dequeue empty packets when not observing */
1289 pkt = k_fifo_get(&ctx->recv_q, K_NO_WAIT);
1290 if (net_pkt_eof(pkt)) {
1291 sock_set_eof(ctx);
1292 }
1293
1294 if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) ||
1295 IS_ENABLED(CONFIG_TRACING_NET_CORE)) {
1296 net_socket_update_tc_rx_time(pkt, k_cycle_get_32());
1297 }
1298
1299 net_pkt_unref(pkt);
1300 }
1301 } else if (!do_recv || peek) {
1302 /* requeue packets when observing */
1303 k_fifo_put(&ctx->recv_q, k_fifo_get(&ctx->recv_q, K_NO_WAIT));
1304 }
1305 }
1306
1307 if (do_recv) {
1308 /* convey remaining buffer size back to caller */
1309 *max_len = _max_len;
1310 }
1311
1312 return recv_len;
1313 }
1314
zsock_fionread_ctx(struct net_context * ctx)1315 static int zsock_fionread_ctx(struct net_context *ctx)
1316 {
1317 size_t ret = zsock_recv_stream_immediate(ctx, NULL, NULL, 0);
1318
1319 return MIN(ret, INT_MAX);
1320 }
1321
zsock_recv_stream_timed(struct net_context * ctx,struct msghdr * msg,uint8_t * buf,size_t max_len,int flags,k_timeout_t timeout)1322 static ssize_t zsock_recv_stream_timed(struct net_context *ctx, struct msghdr *msg,
1323 uint8_t *buf, size_t max_len,
1324 int flags, k_timeout_t timeout)
1325 {
1326 int res;
1327 k_timepoint_t end;
1328 size_t recv_len = 0, iovec = 0, available_len, max_iovlen = 0;
1329 const bool waitall = (flags & ZSOCK_MSG_WAITALL) == ZSOCK_MSG_WAITALL;
1330
1331 if (msg != NULL && buf == NULL) {
1332 if (msg->msg_iovlen < 1) {
1333 return -EINVAL;
1334 }
1335
1336 buf = msg->msg_iov[iovec].iov_base;
1337 available_len = msg->msg_iov[iovec].iov_len;
1338 msg->msg_iov[iovec].iov_len = 0;
1339 max_iovlen = msg->msg_iovlen;
1340 }
1341
1342 for (end = sys_timepoint_calc(timeout); max_len > 0; timeout = sys_timepoint_timeout(end)) {
1343
1344 if (sock_is_error(ctx)) {
1345 return -POINTER_TO_INT(ctx->user_data);
1346 }
1347
1348 if (sock_is_eof(ctx)) {
1349 return 0;
1350 }
1351
1352 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1353 res = zsock_wait_data(ctx, &timeout);
1354 if (res < 0) {
1355 return res;
1356 }
1357 }
1358
1359 if (msg != NULL) {
1360 again:
1361 res = zsock_recv_stream_immediate(ctx, &buf, &available_len, flags);
1362 recv_len += res;
1363
1364 if (res == 0 && recv_len == 0 && K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1365 return -EAGAIN;
1366 }
1367
1368 msg->msg_iov[iovec].iov_len += res;
1369 buf = (uint8_t *)(msg->msg_iov[iovec].iov_base) + res;
1370 max_len -= res;
1371
1372 if (available_len == 0) {
1373 /* All data to this iovec was written */
1374 iovec++;
1375
1376 if (iovec == max_iovlen) {
1377 break;
1378 }
1379
1380 msg->msg_iovlen = iovec;
1381 buf = msg->msg_iov[iovec].iov_base;
1382 available_len = msg->msg_iov[iovec].iov_len;
1383 msg->msg_iov[iovec].iov_len = 0;
1384
1385 /* If there is more data, read it now and do not wait */
1386 if (buf != NULL && available_len > 0) {
1387 goto again;
1388 }
1389
1390 continue;
1391 }
1392
1393 } else {
1394 res = zsock_recv_stream_immediate(ctx, &buf, &max_len, flags);
1395 recv_len += res;
1396
1397 if (res == 0) {
1398 if (recv_len == 0 && K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1399 return -EAGAIN;
1400 }
1401 }
1402 }
1403
1404 if (!waitall) {
1405 break;
1406 }
1407 }
1408
1409 return recv_len;
1410 }
1411
zsock_recv_stream(struct net_context * ctx,struct msghdr * msg,void * buf,size_t max_len,int flags)1412 static ssize_t zsock_recv_stream(struct net_context *ctx, struct msghdr *msg,
1413 void *buf, size_t max_len, int flags)
1414 {
1415 ssize_t res;
1416 size_t recv_len = 0;
1417 k_timeout_t timeout = K_FOREVER;
1418
1419 if (!net_context_is_used(ctx)) {
1420 errno = EBADF;
1421 return -1;
1422 }
1423
1424 if (net_context_get_state(ctx) != NET_CONTEXT_CONNECTED) {
1425 errno = ENOTCONN;
1426 return -1;
1427 }
1428
1429 if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) {
1430 timeout = K_NO_WAIT;
1431 } else if (!sock_is_eof(ctx) && !sock_is_error(ctx)) {
1432 net_context_get_option(ctx, NET_OPT_RCVTIMEO, &timeout, NULL);
1433 }
1434
1435 if (max_len == 0) {
1436 /* no bytes requested - done! */
1437 return 0;
1438 }
1439
1440 res = zsock_recv_stream_timed(ctx, msg, buf, max_len, flags, timeout);
1441 recv_len += MAX(0, res);
1442
1443 if (res < 0) {
1444 errno = -res;
1445 return -1;
1446 }
1447
1448 if (!(flags & ZSOCK_MSG_PEEK)) {
1449 net_context_update_recv_wnd(ctx, recv_len);
1450 }
1451
1452 return recv_len;
1453 }
1454
zsock_recvfrom_ctx(struct net_context * ctx,void * buf,size_t max_len,int flags,struct sockaddr * src_addr,socklen_t * addrlen)1455 ssize_t zsock_recvfrom_ctx(struct net_context *ctx, void *buf, size_t max_len,
1456 int flags,
1457 struct sockaddr *src_addr, socklen_t *addrlen)
1458 {
1459 enum net_sock_type sock_type = net_context_get_type(ctx);
1460
1461 if (max_len == 0) {
1462 return 0;
1463 }
1464
1465 if (sock_type == SOCK_DGRAM) {
1466 return zsock_recv_dgram(ctx, NULL, buf, max_len, flags, src_addr, addrlen);
1467 } else if (sock_type == SOCK_STREAM) {
1468 return zsock_recv_stream(ctx, NULL, buf, max_len, flags);
1469 }
1470
1471 __ASSERT(0, "Unknown socket type");
1472
1473 errno = ENOTSUP;
1474
1475 return -1;
1476 }
1477
zsock_recvmsg_ctx(struct net_context * ctx,struct msghdr * msg,int flags)1478 ssize_t zsock_recvmsg_ctx(struct net_context *ctx, struct msghdr *msg,
1479 int flags)
1480 {
1481 enum net_sock_type sock_type = net_context_get_type(ctx);
1482 size_t i, max_len = 0;
1483
1484 if (msg == NULL) {
1485 errno = EINVAL;
1486 return -1;
1487 }
1488
1489 if (msg->msg_iov == NULL) {
1490 errno = ENOMEM;
1491 return -1;
1492 }
1493
1494 for (i = 0; i < msg->msg_iovlen; i++) {
1495 max_len += msg->msg_iov[i].iov_len;
1496 }
1497
1498 if (sock_type == SOCK_DGRAM) {
1499 return zsock_recv_dgram(ctx, msg, NULL, max_len, flags,
1500 msg->msg_name, &msg->msg_namelen);
1501 } else if (sock_type == SOCK_STREAM) {
1502 return zsock_recv_stream(ctx, msg, NULL, max_len, flags);
1503 }
1504
1505 __ASSERT(0, "Unknown socket type");
1506
1507 errno = ENOTSUP;
1508
1509 return -1;
1510 }
1511
zsock_poll_prepare_ctx(struct net_context * ctx,struct zsock_pollfd * pfd,struct k_poll_event ** pev,struct k_poll_event * pev_end)1512 static int zsock_poll_prepare_ctx(struct net_context *ctx,
1513 struct zsock_pollfd *pfd,
1514 struct k_poll_event **pev,
1515 struct k_poll_event *pev_end)
1516 {
1517 if (pfd->events & ZSOCK_POLLIN) {
1518 if (*pev == pev_end) {
1519 return -ENOMEM;
1520 }
1521
1522 (*pev)->obj = &ctx->recv_q;
1523 (*pev)->type = K_POLL_TYPE_FIFO_DATA_AVAILABLE;
1524 (*pev)->mode = K_POLL_MODE_NOTIFY_ONLY;
1525 (*pev)->state = K_POLL_STATE_NOT_READY;
1526 (*pev)++;
1527 }
1528
1529 if (pfd->events & ZSOCK_POLLOUT) {
1530 if (IS_ENABLED(CONFIG_NET_NATIVE_TCP) &&
1531 net_context_get_type(ctx) == SOCK_STREAM &&
1532 !net_if_is_ip_offloaded(net_context_get_iface(ctx))) {
1533 if (*pev == pev_end) {
1534 return -ENOMEM;
1535 }
1536
1537 if (net_context_get_state(ctx) == NET_CONTEXT_CONNECTING) {
1538 (*pev)->obj = net_tcp_conn_sem_get(ctx);
1539 } else {
1540 (*pev)->obj = net_tcp_tx_sem_get(ctx);
1541 }
1542
1543 (*pev)->type = K_POLL_TYPE_SEM_AVAILABLE;
1544 (*pev)->mode = K_POLL_MODE_NOTIFY_ONLY;
1545 (*pev)->state = K_POLL_STATE_NOT_READY;
1546 (*pev)++;
1547 } else {
1548 return -EALREADY;
1549 }
1550
1551 }
1552
1553 /* If socket is already in EOF or error, it can be reported
1554 * immediately, so we tell poll() to short-circuit wait.
1555 */
1556 if (sock_is_eof(ctx) || sock_is_error(ctx)) {
1557 return -EALREADY;
1558 }
1559
1560 return 0;
1561 }
1562
zsock_poll_update_ctx(struct net_context * ctx,struct zsock_pollfd * pfd,struct k_poll_event ** pev)1563 static int zsock_poll_update_ctx(struct net_context *ctx,
1564 struct zsock_pollfd *pfd,
1565 struct k_poll_event **pev)
1566 {
1567 ARG_UNUSED(ctx);
1568
1569 if (pfd->events & ZSOCK_POLLIN) {
1570 if ((*pev)->state != K_POLL_STATE_NOT_READY || sock_is_eof(ctx)) {
1571 pfd->revents |= ZSOCK_POLLIN;
1572 }
1573 (*pev)++;
1574 }
1575 if (pfd->events & ZSOCK_POLLOUT) {
1576 if (IS_ENABLED(CONFIG_NET_NATIVE_TCP) &&
1577 net_context_get_type(ctx) == SOCK_STREAM &&
1578 !net_if_is_ip_offloaded(net_context_get_iface(ctx))) {
1579 if ((*pev)->state != K_POLL_STATE_NOT_READY &&
1580 !sock_is_eof(ctx) &&
1581 (net_context_get_state(ctx) == NET_CONTEXT_CONNECTED)) {
1582 pfd->revents |= ZSOCK_POLLOUT;
1583 }
1584 (*pev)++;
1585 } else {
1586 pfd->revents |= ZSOCK_POLLOUT;
1587 }
1588 }
1589
1590 if (sock_is_error(ctx)) {
1591 pfd->revents |= ZSOCK_POLLERR;
1592 }
1593
1594 if (sock_is_eof(ctx)) {
1595 pfd->revents |= ZSOCK_POLLHUP;
1596 }
1597
1598 return 0;
1599 }
1600
get_tcp_option(int optname)1601 static enum tcp_conn_option get_tcp_option(int optname)
1602 {
1603 switch (optname) {
1604 case TCP_KEEPIDLE:
1605 return TCP_OPT_KEEPIDLE;
1606 case TCP_KEEPINTVL:
1607 return TCP_OPT_KEEPINTVL;
1608 case TCP_KEEPCNT:
1609 return TCP_OPT_KEEPCNT;
1610 }
1611
1612 return -EINVAL;
1613 }
1614
ipv4_multicast_if(struct net_context * ctx,const void * optval,socklen_t optlen,bool do_get)1615 static int ipv4_multicast_if(struct net_context *ctx, const void *optval,
1616 socklen_t optlen, bool do_get)
1617 {
1618 struct net_if *iface = NULL;
1619 int ifindex, ret;
1620
1621 if (do_get) {
1622 struct net_if_addr *ifaddr;
1623 size_t len = sizeof(ifindex);
1624
1625 if (optval == NULL || (optlen != sizeof(struct in_addr))) {
1626 errno = EINVAL;
1627 return -1;
1628 }
1629
1630 ret = net_context_get_option(ctx, NET_OPT_MCAST_IFINDEX,
1631 &ifindex, &len);
1632 if (ret < 0) {
1633 errno = -ret;
1634 return -1;
1635 }
1636
1637 if (ifindex == 0) {
1638 /* No interface set */
1639 ((struct in_addr *)optval)->s_addr = INADDR_ANY;
1640 return 0;
1641 }
1642
1643 ifaddr = net_if_ipv4_addr_get_first_by_index(ifindex);
1644 if (ifaddr == NULL) {
1645 errno = ENOENT;
1646 return -1;
1647 }
1648
1649 net_ipaddr_copy((struct in_addr *)optval, &ifaddr->address.in_addr);
1650
1651 return 0;
1652 }
1653
1654 /* setsockopt() can accept either struct ip_mreqn or
1655 * struct ip_mreq. We need to handle both cases.
1656 */
1657 if (optval == NULL || (optlen != sizeof(struct ip_mreqn) &&
1658 optlen != sizeof(struct ip_mreq))) {
1659 errno = EINVAL;
1660 return -1;
1661 }
1662
1663 if (optlen == sizeof(struct ip_mreqn)) {
1664 struct ip_mreqn *mreqn = (struct ip_mreqn *)optval;
1665
1666 if (mreqn->imr_ifindex != 0) {
1667 iface = net_if_get_by_index(mreqn->imr_ifindex);
1668
1669 } else if (mreqn->imr_address.s_addr != INADDR_ANY) {
1670 struct net_if_addr *ifaddr;
1671
1672 ifaddr = net_if_ipv4_addr_lookup(&mreqn->imr_address, &iface);
1673 if (ifaddr == NULL) {
1674 errno = ENOENT;
1675 return -1;
1676 }
1677 }
1678 } else {
1679 struct ip_mreq *mreq = (struct ip_mreq *)optval;
1680
1681 if (mreq->imr_interface.s_addr != INADDR_ANY) {
1682 struct net_if_addr *ifaddr;
1683
1684 ifaddr = net_if_ipv4_addr_lookup(&mreq->imr_interface, &iface);
1685 if (ifaddr == NULL) {
1686 errno = ENOENT;
1687 return -1;
1688 }
1689 }
1690 }
1691
1692 if (iface == NULL) {
1693 ifindex = 0;
1694 } else {
1695 ifindex = net_if_get_by_iface(iface);
1696 }
1697
1698 ret = net_context_set_option(ctx, NET_OPT_MCAST_IFINDEX,
1699 &ifindex, sizeof(ifindex));
1700 if (ret < 0) {
1701 errno = -ret;
1702 return -1;
1703 }
1704
1705 return 0;
1706 }
1707
zsock_getsockopt_ctx(struct net_context * ctx,int level,int optname,void * optval,socklen_t * optlen)1708 int zsock_getsockopt_ctx(struct net_context *ctx, int level, int optname,
1709 void *optval, socklen_t *optlen)
1710 {
1711 int ret;
1712
1713 switch (level) {
1714 case SOL_SOCKET:
1715 switch (optname) {
1716 case SO_ERROR: {
1717 if (*optlen != sizeof(int)) {
1718 errno = EINVAL;
1719 return -1;
1720 }
1721
1722 *(int *)optval = POINTER_TO_INT(ctx->user_data);
1723
1724 return 0;
1725 }
1726
1727 case SO_TYPE: {
1728 int type = (int)net_context_get_type(ctx);
1729
1730 if (*optlen != sizeof(type)) {
1731 errno = EINVAL;
1732 return -1;
1733 }
1734
1735 *(int *)optval = type;
1736
1737 return 0;
1738 }
1739
1740 case SO_TXTIME:
1741 if (IS_ENABLED(CONFIG_NET_CONTEXT_TXTIME)) {
1742 ret = net_context_get_option(ctx,
1743 NET_OPT_TXTIME,
1744 optval, optlen);
1745 if (ret < 0) {
1746 errno = -ret;
1747 return -1;
1748 }
1749
1750 return 0;
1751 }
1752 break;
1753
1754 case SO_PROTOCOL: {
1755 int proto = (int)net_context_get_proto(ctx);
1756
1757 if (*optlen != sizeof(proto)) {
1758 errno = EINVAL;
1759 return -1;
1760 }
1761
1762 *(int *)optval = proto;
1763
1764 return 0;
1765 }
1766
1767 case SO_DOMAIN: {
1768 if (*optlen != sizeof(int)) {
1769 errno = EINVAL;
1770 return -1;
1771 }
1772
1773 *(int *)optval = net_context_get_family(ctx);
1774
1775 return 0;
1776 }
1777
1778 break;
1779
1780 case SO_RCVBUF:
1781 if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVBUF)) {
1782 ret = net_context_get_option(ctx,
1783 NET_OPT_RCVBUF,
1784 optval, optlen);
1785 if (ret < 0) {
1786 errno = -ret;
1787 return -1;
1788 }
1789
1790 return 0;
1791 }
1792 break;
1793
1794 case SO_SNDBUF:
1795 if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDBUF)) {
1796 ret = net_context_get_option(ctx,
1797 NET_OPT_SNDBUF,
1798 optval, optlen);
1799 if (ret < 0) {
1800 errno = -ret;
1801 return -1;
1802 }
1803
1804 return 0;
1805 }
1806 break;
1807
1808 case SO_REUSEADDR:
1809 if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEADDR)) {
1810 ret = net_context_get_option(ctx,
1811 NET_OPT_REUSEADDR,
1812 optval, optlen);
1813 if (ret < 0) {
1814 errno = -ret;
1815 return -1;
1816 }
1817
1818 return 0;
1819 }
1820 break;
1821
1822 case SO_REUSEPORT:
1823 if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEPORT)) {
1824 ret = net_context_get_option(ctx,
1825 NET_OPT_REUSEPORT,
1826 optval, optlen);
1827 if (ret < 0) {
1828 errno = -ret;
1829 return -1;
1830 }
1831
1832 return 0;
1833 }
1834 break;
1835
1836 case SO_KEEPALIVE:
1837 if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE) &&
1838 net_context_get_proto(ctx) == IPPROTO_TCP) {
1839 ret = net_tcp_get_option(ctx,
1840 TCP_OPT_KEEPALIVE,
1841 optval, optlen);
1842 if (ret < 0) {
1843 errno = -ret;
1844 return -1;
1845 }
1846
1847 return 0;
1848 }
1849
1850 break;
1851
1852 case SO_TIMESTAMPING:
1853 if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMPING)) {
1854 ret = net_context_get_option(ctx,
1855 NET_OPT_TIMESTAMPING,
1856 optval, optlen);
1857
1858 if (ret < 0) {
1859 errno = -ret;
1860 return -1;
1861 }
1862
1863 return 0;
1864 }
1865
1866 break;
1867 }
1868
1869 break;
1870
1871 case IPPROTO_TCP:
1872 switch (optname) {
1873 case TCP_NODELAY:
1874 ret = net_tcp_get_option(ctx, TCP_OPT_NODELAY, optval, optlen);
1875 return ret;
1876
1877 case TCP_KEEPIDLE:
1878 __fallthrough;
1879 case TCP_KEEPINTVL:
1880 __fallthrough;
1881 case TCP_KEEPCNT:
1882 if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE)) {
1883 ret = net_tcp_get_option(ctx,
1884 get_tcp_option(optname),
1885 optval, optlen);
1886 if (ret < 0) {
1887 errno = -ret;
1888 return -1;
1889 }
1890
1891 return 0;
1892 }
1893
1894 break;
1895 }
1896
1897 break;
1898
1899 case IPPROTO_IP:
1900 switch (optname) {
1901 case IP_TOS:
1902 if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) {
1903 ret = net_context_get_option(ctx,
1904 NET_OPT_DSCP_ECN,
1905 optval,
1906 optlen);
1907 if (ret < 0) {
1908 errno = -ret;
1909 return -1;
1910 }
1911
1912 return 0;
1913 }
1914
1915 break;
1916
1917 case IP_TTL:
1918 ret = net_context_get_option(ctx, NET_OPT_TTL,
1919 optval, optlen);
1920 if (ret < 0) {
1921 errno = -ret;
1922 return -1;
1923 }
1924
1925 return 0;
1926
1927 case IP_MULTICAST_IF:
1928 if (IS_ENABLED(CONFIG_NET_IPV4)) {
1929 if (net_context_get_family(ctx) != AF_INET) {
1930 errno = EAFNOSUPPORT;
1931 return -1;
1932 }
1933
1934 return ipv4_multicast_if(ctx, optval, *optlen, true);
1935 }
1936
1937 break;
1938
1939 case IP_MULTICAST_TTL:
1940 ret = net_context_get_option(ctx, NET_OPT_MCAST_TTL,
1941 optval, optlen);
1942 if (ret < 0) {
1943 errno = -ret;
1944 return -1;
1945 }
1946
1947 return 0;
1948
1949 case IP_MTU:
1950 if (IS_ENABLED(CONFIG_NET_IPV4)) {
1951 ret = net_context_get_option(ctx, NET_OPT_MTU,
1952 optval, optlen);
1953 if (ret < 0) {
1954 errno = -ret;
1955 return -1;
1956 }
1957
1958 return 0;
1959 }
1960
1961 break;
1962
1963 case IP_LOCAL_PORT_RANGE:
1964 if (IS_ENABLED(CONFIG_NET_CONTEXT_CLAMP_PORT_RANGE)) {
1965 ret = net_context_get_option(ctx,
1966 NET_OPT_LOCAL_PORT_RANGE,
1967 optval, optlen);
1968 if (ret < 0) {
1969 errno = -ret;
1970 return -1;
1971 }
1972
1973 return 0;
1974 }
1975
1976 break;
1977 }
1978
1979 break;
1980
1981 case IPPROTO_IPV6:
1982 switch (optname) {
1983 case IPV6_MTU:
1984 if (IS_ENABLED(CONFIG_NET_IPV6)) {
1985 ret = net_context_get_option(ctx, NET_OPT_MTU,
1986 optval, optlen);
1987 if (ret < 0) {
1988 errno = -ret;
1989 return -1;
1990 }
1991
1992 return 0;
1993 }
1994
1995 break;
1996
1997 case IPV6_V6ONLY:
1998 if (IS_ENABLED(CONFIG_NET_IPV4_MAPPING_TO_IPV6)) {
1999 ret = net_context_get_option(ctx,
2000 NET_OPT_IPV6_V6ONLY,
2001 optval,
2002 optlen);
2003 if (ret < 0) {
2004 errno = -ret;
2005 return -1;
2006 }
2007
2008 return 0;
2009 }
2010
2011 break;
2012
2013 case IPV6_ADDR_PREFERENCES:
2014 if (IS_ENABLED(CONFIG_NET_IPV6)) {
2015 ret = net_context_get_option(ctx,
2016 NET_OPT_ADDR_PREFERENCES,
2017 optval,
2018 optlen);
2019 if (ret < 0) {
2020 errno = -ret;
2021 return -1;
2022 }
2023
2024 return 0;
2025 }
2026
2027 break;
2028
2029 case IPV6_TCLASS:
2030 if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) {
2031 ret = net_context_get_option(ctx,
2032 NET_OPT_DSCP_ECN,
2033 optval,
2034 optlen);
2035 if (ret < 0) {
2036 errno = -ret;
2037 return -1;
2038 }
2039
2040 return 0;
2041 }
2042
2043 break;
2044
2045 case IPV6_UNICAST_HOPS:
2046 ret = net_context_get_option(ctx,
2047 NET_OPT_UNICAST_HOP_LIMIT,
2048 optval, optlen);
2049 if (ret < 0) {
2050 errno = -ret;
2051 return -1;
2052 }
2053
2054 return 0;
2055
2056 case IPV6_MULTICAST_IF:
2057 if (IS_ENABLED(CONFIG_NET_IPV6)) {
2058 if (net_context_get_family(ctx) != AF_INET6) {
2059 errno = EAFNOSUPPORT;
2060 return -1;
2061 }
2062
2063 ret = net_context_get_option(ctx,
2064 NET_OPT_MCAST_IFINDEX,
2065 optval, optlen);
2066 if (ret < 0) {
2067 errno = -ret;
2068 return -1;
2069 }
2070
2071 return 0;
2072 }
2073
2074 case IPV6_MULTICAST_HOPS:
2075 ret = net_context_get_option(ctx,
2076 NET_OPT_MCAST_HOP_LIMIT,
2077 optval, optlen);
2078 if (ret < 0) {
2079 errno = -ret;
2080 return -1;
2081 }
2082
2083 return 0;
2084 }
2085
2086 break;
2087 }
2088
2089 errno = ENOPROTOOPT;
2090 return -1;
2091 }
2092
ipv4_multicast_group(struct net_context * ctx,const void * optval,socklen_t optlen,bool do_join)2093 static int ipv4_multicast_group(struct net_context *ctx, const void *optval,
2094 socklen_t optlen, bool do_join)
2095 {
2096 struct ip_mreqn *mreqn;
2097 struct net_if *iface;
2098 int ifindex, ret;
2099
2100 if (optval == NULL || optlen != sizeof(struct ip_mreqn)) {
2101 errno = EINVAL;
2102 return -1;
2103 }
2104
2105 mreqn = (struct ip_mreqn *)optval;
2106
2107 if (mreqn->imr_multiaddr.s_addr == INADDR_ANY) {
2108 errno = EINVAL;
2109 return -1;
2110 }
2111
2112 if (mreqn->imr_ifindex != 0) {
2113 iface = net_if_get_by_index(mreqn->imr_ifindex);
2114 } else {
2115 ifindex = net_if_ipv4_addr_lookup_by_index(&mreqn->imr_address);
2116 iface = net_if_get_by_index(ifindex);
2117 }
2118
2119 if (iface == NULL) {
2120 /* Check if ctx has already an interface and if not,
2121 * then select the default interface.
2122 */
2123 if (ctx->iface <= 0) {
2124 iface = net_if_get_default();
2125 } else {
2126 iface = net_if_get_by_index(ctx->iface);
2127 }
2128
2129 if (iface == NULL) {
2130 errno = EINVAL;
2131 return -1;
2132 }
2133 }
2134
2135 if (do_join) {
2136 ret = net_ipv4_igmp_join(iface, &mreqn->imr_multiaddr, NULL);
2137 } else {
2138 ret = net_ipv4_igmp_leave(iface, &mreqn->imr_multiaddr);
2139 }
2140
2141 if (ret < 0) {
2142 errno = -ret;
2143 return -1;
2144 }
2145
2146 return 0;
2147 }
2148
ipv6_multicast_group(struct net_context * ctx,const void * optval,socklen_t optlen,bool do_join)2149 static int ipv6_multicast_group(struct net_context *ctx, const void *optval,
2150 socklen_t optlen, bool do_join)
2151 {
2152 struct ipv6_mreq *mreq;
2153 struct net_if *iface;
2154 int ret;
2155
2156 if (optval == NULL || optlen != sizeof(struct ipv6_mreq)) {
2157 errno = EINVAL;
2158 return -1;
2159 }
2160
2161 mreq = (struct ipv6_mreq *)optval;
2162
2163 if (memcmp(&mreq->ipv6mr_multiaddr,
2164 net_ipv6_unspecified_address(),
2165 sizeof(mreq->ipv6mr_multiaddr)) == 0) {
2166 errno = EINVAL;
2167 return -1;
2168 }
2169
2170 iface = net_if_get_by_index(mreq->ipv6mr_ifindex);
2171 if (iface == NULL) {
2172 /* Check if ctx has already an interface and if not,
2173 * then select the default interface.
2174 */
2175 if (ctx->iface <= 0) {
2176 iface = net_if_get_default();
2177 } else {
2178 iface = net_if_get_by_index(ctx->iface);
2179 }
2180
2181 if (iface == NULL) {
2182 errno = ENOENT;
2183 return -1;
2184 }
2185 }
2186
2187 if (do_join) {
2188 ret = net_ipv6_mld_join(iface, &mreq->ipv6mr_multiaddr);
2189 } else {
2190 ret = net_ipv6_mld_leave(iface, &mreq->ipv6mr_multiaddr);
2191 }
2192
2193 if (ret < 0) {
2194 errno = -ret;
2195 return -1;
2196 }
2197
2198 return 0;
2199 }
2200
zsock_setsockopt_ctx(struct net_context * ctx,int level,int optname,const void * optval,socklen_t optlen)2201 int zsock_setsockopt_ctx(struct net_context *ctx, int level, int optname,
2202 const void *optval, socklen_t optlen)
2203 {
2204 int ret;
2205
2206 switch (level) {
2207 case SOL_SOCKET:
2208 switch (optname) {
2209 case SO_RCVBUF:
2210 if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVBUF)) {
2211 ret = net_context_set_option(ctx,
2212 NET_OPT_RCVBUF,
2213 optval, optlen);
2214 if (ret < 0) {
2215 errno = -ret;
2216 return -1;
2217 }
2218
2219 return 0;
2220 }
2221
2222 break;
2223
2224 case SO_SNDBUF:
2225 if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDBUF)) {
2226 ret = net_context_set_option(ctx,
2227 NET_OPT_SNDBUF,
2228 optval, optlen);
2229 if (ret < 0) {
2230 errno = -ret;
2231 return -1;
2232 }
2233
2234 return 0;
2235 }
2236
2237 break;
2238
2239 case SO_REUSEADDR:
2240 if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEADDR)) {
2241 ret = net_context_set_option(ctx,
2242 NET_OPT_REUSEADDR,
2243 optval, optlen);
2244 if (ret < 0) {
2245 errno = -ret;
2246 return -1;
2247 }
2248
2249 return 0;
2250 }
2251
2252 break;
2253
2254 case SO_REUSEPORT:
2255 if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEPORT)) {
2256 ret = net_context_set_option(ctx,
2257 NET_OPT_REUSEPORT,
2258 optval, optlen);
2259 if (ret < 0) {
2260 errno = -ret;
2261 return -1;
2262 }
2263
2264 return 0;
2265 }
2266
2267 break;
2268
2269 case SO_PRIORITY:
2270 if (IS_ENABLED(CONFIG_NET_CONTEXT_PRIORITY)) {
2271 ret = net_context_set_option(ctx,
2272 NET_OPT_PRIORITY,
2273 optval, optlen);
2274 if (ret < 0) {
2275 errno = -ret;
2276 return -1;
2277 }
2278
2279 return 0;
2280 }
2281
2282 break;
2283
2284 case SO_RCVTIMEO:
2285 if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVTIMEO)) {
2286 const struct zsock_timeval *tv = optval;
2287 k_timeout_t timeout;
2288
2289 if (optlen != sizeof(struct zsock_timeval)) {
2290 errno = EINVAL;
2291 return -1;
2292 }
2293
2294 if (tv->tv_sec == 0 && tv->tv_usec == 0) {
2295 timeout = K_FOREVER;
2296 } else {
2297 timeout = K_USEC(tv->tv_sec * 1000000ULL
2298 + tv->tv_usec);
2299 }
2300
2301 ret = net_context_set_option(ctx,
2302 NET_OPT_RCVTIMEO,
2303 &timeout,
2304 sizeof(timeout));
2305
2306 if (ret < 0) {
2307 errno = -ret;
2308 return -1;
2309 }
2310
2311 return 0;
2312 }
2313
2314 break;
2315
2316 case SO_SNDTIMEO:
2317 if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDTIMEO)) {
2318 const struct zsock_timeval *tv = optval;
2319 k_timeout_t timeout;
2320
2321 if (optlen != sizeof(struct zsock_timeval)) {
2322 errno = EINVAL;
2323 return -1;
2324 }
2325
2326 if (tv->tv_sec == 0 && tv->tv_usec == 0) {
2327 timeout = K_FOREVER;
2328 } else {
2329 timeout = K_USEC(tv->tv_sec * 1000000ULL
2330 + tv->tv_usec);
2331 }
2332
2333 ret = net_context_set_option(ctx,
2334 NET_OPT_SNDTIMEO,
2335 &timeout,
2336 sizeof(timeout));
2337 if (ret < 0) {
2338 errno = -ret;
2339 return -1;
2340 }
2341
2342 return 0;
2343 }
2344
2345 break;
2346
2347 case SO_TXTIME:
2348 if (IS_ENABLED(CONFIG_NET_CONTEXT_TXTIME)) {
2349 ret = net_context_set_option(ctx,
2350 NET_OPT_TXTIME,
2351 optval, optlen);
2352 if (ret < 0) {
2353 errno = -ret;
2354 return -1;
2355 }
2356
2357 return 0;
2358 }
2359
2360 break;
2361
2362 case SO_SOCKS5:
2363 if (IS_ENABLED(CONFIG_SOCKS)) {
2364 ret = net_context_set_option(ctx,
2365 NET_OPT_SOCKS5,
2366 optval, optlen);
2367 if (ret < 0) {
2368 errno = -ret;
2369 return -1;
2370 }
2371
2372 net_context_set_proxy_enabled(ctx, true);
2373
2374 return 0;
2375 }
2376
2377 break;
2378
2379 case SO_BINDTODEVICE: {
2380 struct net_if *iface;
2381 const struct ifreq *ifreq = optval;
2382
2383 if (net_context_get_family(ctx) != AF_INET &&
2384 net_context_get_family(ctx) != AF_INET6) {
2385 errno = EAFNOSUPPORT;
2386 return -1;
2387 }
2388
2389 /* optlen equal to 0 or empty interface name should
2390 * remove the binding.
2391 */
2392 if ((optlen == 0) || (ifreq != NULL &&
2393 strlen(ifreq->ifr_name) == 0)) {
2394 ctx->flags &= ~NET_CONTEXT_BOUND_TO_IFACE;
2395 return 0;
2396 }
2397
2398 if ((ifreq == NULL) || (optlen != sizeof(*ifreq))) {
2399 errno = EINVAL;
2400 return -1;
2401 }
2402
2403 if (IS_ENABLED(CONFIG_NET_INTERFACE_NAME)) {
2404 ret = net_if_get_by_name(ifreq->ifr_name);
2405 if (ret < 0) {
2406 errno = -ret;
2407 return -1;
2408 }
2409
2410 iface = net_if_get_by_index(ret);
2411 if (iface == NULL) {
2412 errno = ENODEV;
2413 return -1;
2414 }
2415 } else {
2416 const struct device *dev;
2417
2418 dev = device_get_binding(ifreq->ifr_name);
2419 if (dev == NULL) {
2420 errno = ENODEV;
2421 return -1;
2422 }
2423
2424 iface = net_if_lookup_by_dev(dev);
2425 if (iface == NULL) {
2426 errno = ENODEV;
2427 return -1;
2428 }
2429 }
2430
2431 net_context_bind_iface(ctx, iface);
2432
2433 return 0;
2434 }
2435
2436 case SO_LINGER:
2437 /* ignored. for compatibility purposes only */
2438 return 0;
2439
2440 case SO_KEEPALIVE:
2441 if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE) &&
2442 net_context_get_proto(ctx) == IPPROTO_TCP) {
2443 ret = net_tcp_set_option(ctx,
2444 TCP_OPT_KEEPALIVE,
2445 optval, optlen);
2446 if (ret < 0) {
2447 errno = -ret;
2448 return -1;
2449 }
2450
2451 return 0;
2452 }
2453
2454 break;
2455
2456 case SO_TIMESTAMPING:
2457 if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMPING)) {
2458 ret = net_context_set_option(ctx,
2459 NET_OPT_TIMESTAMPING,
2460 optval, optlen);
2461
2462 if (ret < 0) {
2463 errno = -ret;
2464 return -1;
2465 }
2466
2467 return 0;
2468 }
2469
2470 break;
2471 }
2472
2473 break;
2474
2475 case IPPROTO_TCP:
2476 switch (optname) {
2477 case TCP_NODELAY:
2478 ret = net_tcp_set_option(ctx,
2479 TCP_OPT_NODELAY, optval, optlen);
2480 return ret;
2481
2482 case TCP_KEEPIDLE:
2483 __fallthrough;
2484 case TCP_KEEPINTVL:
2485 __fallthrough;
2486 case TCP_KEEPCNT:
2487 if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE)) {
2488 ret = net_tcp_set_option(ctx,
2489 get_tcp_option(optname),
2490 optval, optlen);
2491 if (ret < 0) {
2492 errno = -ret;
2493 return -1;
2494 }
2495
2496 return 0;
2497 }
2498
2499 break;
2500 }
2501 break;
2502
2503 case IPPROTO_IP:
2504 switch (optname) {
2505 case IP_TOS:
2506 if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) {
2507 ret = net_context_set_option(ctx,
2508 NET_OPT_DSCP_ECN,
2509 optval,
2510 optlen);
2511 if (ret < 0) {
2512 errno = -ret;
2513 return -1;
2514 }
2515
2516 return 0;
2517 }
2518
2519 break;
2520
2521 case IP_PKTINFO:
2522 if (IS_ENABLED(CONFIG_NET_IPV4) &&
2523 IS_ENABLED(CONFIG_NET_CONTEXT_RECV_PKTINFO)) {
2524 ret = net_context_set_option(ctx,
2525 NET_OPT_RECV_PKTINFO,
2526 optval,
2527 optlen);
2528 if (ret < 0) {
2529 errno = -ret;
2530 return -1;
2531 }
2532
2533 return 0;
2534 }
2535
2536 break;
2537
2538 case IP_MULTICAST_IF:
2539 if (IS_ENABLED(CONFIG_NET_IPV4)) {
2540 return ipv4_multicast_if(ctx, optval, optlen, false);
2541 }
2542
2543 break;
2544
2545 case IP_MULTICAST_TTL:
2546 ret = net_context_set_option(ctx, NET_OPT_MCAST_TTL,
2547 optval, optlen);
2548 if (ret < 0) {
2549 errno = -ret;
2550 return -1;
2551 }
2552
2553 return 0;
2554
2555 case IP_TTL:
2556 ret = net_context_set_option(ctx, NET_OPT_TTL,
2557 optval, optlen);
2558 if (ret < 0) {
2559 errno = -ret;
2560 return -1;
2561 }
2562
2563 return 0;
2564
2565 case IP_ADD_MEMBERSHIP:
2566 if (IS_ENABLED(CONFIG_NET_IPV4)) {
2567 return ipv4_multicast_group(ctx, optval,
2568 optlen, true);
2569 }
2570
2571 break;
2572
2573 case IP_DROP_MEMBERSHIP:
2574 if (IS_ENABLED(CONFIG_NET_IPV4)) {
2575 return ipv4_multicast_group(ctx, optval,
2576 optlen, false);
2577 }
2578
2579 break;
2580
2581 case IP_LOCAL_PORT_RANGE:
2582 if (IS_ENABLED(CONFIG_NET_CONTEXT_CLAMP_PORT_RANGE)) {
2583 ret = net_context_set_option(ctx,
2584 NET_OPT_LOCAL_PORT_RANGE,
2585 optval, optlen);
2586 if (ret < 0) {
2587 errno = -ret;
2588 return -1;
2589 }
2590
2591 return 0;
2592 }
2593
2594 break;
2595 }
2596
2597 break;
2598
2599 case IPPROTO_IPV6:
2600 switch (optname) {
2601 case IPV6_MTU:
2602 if (IS_ENABLED(CONFIG_NET_IPV6)) {
2603 ret = net_context_set_option(ctx, NET_OPT_MTU,
2604 optval, optlen);
2605 if (ret < 0) {
2606 errno = -ret;
2607 return -1;
2608 }
2609
2610 return 0;
2611 }
2612
2613 break;
2614
2615 case IPV6_V6ONLY:
2616 if (IS_ENABLED(CONFIG_NET_IPV4_MAPPING_TO_IPV6)) {
2617 ret = net_context_set_option(ctx,
2618 NET_OPT_IPV6_V6ONLY,
2619 optval,
2620 optlen);
2621 if (ret < 0) {
2622 errno = -ret;
2623 return -1;
2624 }
2625 }
2626
2627 return 0;
2628
2629 case IPV6_RECVPKTINFO:
2630 if (IS_ENABLED(CONFIG_NET_IPV6) &&
2631 IS_ENABLED(CONFIG_NET_CONTEXT_RECV_PKTINFO)) {
2632 ret = net_context_set_option(ctx,
2633 NET_OPT_RECV_PKTINFO,
2634 optval,
2635 optlen);
2636 if (ret < 0) {
2637 errno = -ret;
2638 return -1;
2639 }
2640
2641 return 0;
2642 }
2643
2644 break;
2645
2646 case IPV6_ADDR_PREFERENCES:
2647 if (IS_ENABLED(CONFIG_NET_IPV6)) {
2648 ret = net_context_set_option(ctx,
2649 NET_OPT_ADDR_PREFERENCES,
2650 optval,
2651 optlen);
2652 if (ret < 0) {
2653 errno = -ret;
2654 return -1;
2655 }
2656
2657 return 0;
2658 }
2659
2660 break;
2661
2662 case IPV6_TCLASS:
2663 if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) {
2664 ret = net_context_set_option(ctx,
2665 NET_OPT_DSCP_ECN,
2666 optval,
2667 optlen);
2668 if (ret < 0) {
2669 errno = -ret;
2670 return -1;
2671 }
2672
2673 return 0;
2674 }
2675
2676 break;
2677
2678 case IPV6_UNICAST_HOPS:
2679 ret = net_context_set_option(ctx,
2680 NET_OPT_UNICAST_HOP_LIMIT,
2681 optval, optlen);
2682 if (ret < 0) {
2683 errno = -ret;
2684 return -1;
2685 }
2686
2687 return 0;
2688
2689 case IPV6_MULTICAST_IF:
2690 ret = net_context_set_option(ctx,
2691 NET_OPT_MCAST_IFINDEX,
2692 optval, optlen);
2693 if (ret < 0) {
2694 errno = -ret;
2695 return -1;
2696 }
2697
2698 return 0;
2699
2700 case IPV6_MULTICAST_HOPS:
2701 ret = net_context_set_option(ctx,
2702 NET_OPT_MCAST_HOP_LIMIT,
2703 optval, optlen);
2704 if (ret < 0) {
2705 errno = -ret;
2706 return -1;
2707 }
2708
2709 return 0;
2710
2711 case IPV6_ADD_MEMBERSHIP:
2712 if (IS_ENABLED(CONFIG_NET_IPV6)) {
2713 return ipv6_multicast_group(ctx, optval,
2714 optlen, true);
2715 }
2716
2717 break;
2718
2719 case IPV6_DROP_MEMBERSHIP:
2720 if (IS_ENABLED(CONFIG_NET_IPV6)) {
2721 return ipv6_multicast_group(ctx, optval,
2722 optlen, false);
2723 }
2724
2725 break;
2726 }
2727
2728 break;
2729 }
2730
2731 errno = ENOPROTOOPT;
2732 return -1;
2733 }
2734
zsock_getpeername_ctx(struct net_context * ctx,struct sockaddr * addr,socklen_t * addrlen)2735 int zsock_getpeername_ctx(struct net_context *ctx, struct sockaddr *addr,
2736 socklen_t *addrlen)
2737 {
2738 socklen_t newlen = 0;
2739
2740 if (addr == NULL || addrlen == NULL) {
2741 errno = EINVAL;
2742 return -1;
2743 }
2744
2745 if (!(ctx->flags & NET_CONTEXT_REMOTE_ADDR_SET)) {
2746 errno = ENOTCONN;
2747 return -1;
2748 }
2749
2750 if (net_context_get_type(ctx) == SOCK_STREAM &&
2751 net_context_get_state(ctx) != NET_CONTEXT_CONNECTED) {
2752 errno = ENOTCONN;
2753 return -1;
2754 }
2755
2756 if (IS_ENABLED(CONFIG_NET_IPV4) && ctx->remote.sa_family == AF_INET) {
2757 struct sockaddr_in addr4 = { 0 };
2758
2759 addr4.sin_family = AF_INET;
2760 addr4.sin_port = net_sin(&ctx->remote)->sin_port;
2761 memcpy(&addr4.sin_addr, &net_sin(&ctx->remote)->sin_addr,
2762 sizeof(struct in_addr));
2763 newlen = sizeof(struct sockaddr_in);
2764
2765 memcpy(addr, &addr4, MIN(*addrlen, newlen));
2766 } else if (IS_ENABLED(CONFIG_NET_IPV6) &&
2767 ctx->remote.sa_family == AF_INET6) {
2768 struct sockaddr_in6 addr6 = { 0 };
2769
2770 addr6.sin6_family = AF_INET6;
2771 addr6.sin6_port = net_sin6(&ctx->remote)->sin6_port;
2772 memcpy(&addr6.sin6_addr, &net_sin6(&ctx->remote)->sin6_addr,
2773 sizeof(struct in6_addr));
2774 newlen = sizeof(struct sockaddr_in6);
2775
2776 memcpy(addr, &addr6, MIN(*addrlen, newlen));
2777 } else {
2778 errno = EINVAL;
2779 return -1;
2780 }
2781
2782 *addrlen = newlen;
2783
2784 return 0;
2785 }
2786
zsock_getsockname_ctx(struct net_context * ctx,struct sockaddr * addr,socklen_t * addrlen)2787 int zsock_getsockname_ctx(struct net_context *ctx, struct sockaddr *addr,
2788 socklen_t *addrlen)
2789 {
2790 socklen_t newlen = 0;
2791 int ret;
2792
2793 if (IS_ENABLED(CONFIG_NET_IPV4) && ctx->local.family == AF_INET) {
2794 struct sockaddr_in addr4 = { 0 };
2795
2796 if (net_sin_ptr(&ctx->local)->sin_addr == NULL) {
2797 errno = EINVAL;
2798 return -1;
2799 }
2800
2801 newlen = sizeof(struct sockaddr_in);
2802
2803 ret = net_context_get_local_addr(ctx,
2804 (struct sockaddr *)&addr4,
2805 &newlen);
2806 if (ret < 0) {
2807 errno = -ret;
2808 return -1;
2809 }
2810
2811 memcpy(addr, &addr4, MIN(*addrlen, newlen));
2812
2813 } else if (IS_ENABLED(CONFIG_NET_IPV6) && ctx->local.family == AF_INET6) {
2814 struct sockaddr_in6 addr6 = { 0 };
2815
2816 if (net_sin6_ptr(&ctx->local)->sin6_addr == NULL) {
2817 errno = EINVAL;
2818 return -1;
2819 }
2820
2821 newlen = sizeof(struct sockaddr_in6);
2822
2823 ret = net_context_get_local_addr(ctx,
2824 (struct sockaddr *)&addr6,
2825 &newlen);
2826 if (ret < 0) {
2827 errno = -ret;
2828 return -1;
2829 }
2830
2831 memcpy(addr, &addr6, MIN(*addrlen, newlen));
2832 } else {
2833 errno = EINVAL;
2834 return -1;
2835 }
2836
2837 *addrlen = newlen;
2838
2839 return 0;
2840 }
2841
sock_read_vmeth(void * obj,void * buffer,size_t count)2842 static ssize_t sock_read_vmeth(void *obj, void *buffer, size_t count)
2843 {
2844 return zsock_recvfrom_ctx(obj, buffer, count, 0, NULL, 0);
2845 }
2846
sock_write_vmeth(void * obj,const void * buffer,size_t count)2847 static ssize_t sock_write_vmeth(void *obj, const void *buffer, size_t count)
2848 {
2849 return zsock_sendto_ctx(obj, buffer, count, 0, NULL, 0);
2850 }
2851
zsock_ctx_set_lock(struct net_context * ctx,struct k_mutex * lock)2852 static void zsock_ctx_set_lock(struct net_context *ctx, struct k_mutex *lock)
2853 {
2854 ctx->cond.lock = lock;
2855 }
2856
sock_ioctl_vmeth(void * obj,unsigned int request,va_list args)2857 static int sock_ioctl_vmeth(void *obj, unsigned int request, va_list args)
2858 {
2859 switch (request) {
2860
2861 /* In Zephyr, fcntl() is just an alias of ioctl(). */
2862 case F_GETFL:
2863 if (sock_is_nonblock(obj)) {
2864 return O_NONBLOCK;
2865 }
2866
2867 return 0;
2868
2869 case F_SETFL: {
2870 int flags;
2871
2872 flags = va_arg(args, int);
2873
2874 if (flags & O_NONBLOCK) {
2875 sock_set_flag(obj, SOCK_NONBLOCK, SOCK_NONBLOCK);
2876 } else {
2877 sock_set_flag(obj, SOCK_NONBLOCK, 0);
2878 }
2879
2880 return 0;
2881 }
2882
2883 case ZFD_IOCTL_POLL_PREPARE: {
2884 struct zsock_pollfd *pfd;
2885 struct k_poll_event **pev;
2886 struct k_poll_event *pev_end;
2887
2888 pfd = va_arg(args, struct zsock_pollfd *);
2889 pev = va_arg(args, struct k_poll_event **);
2890 pev_end = va_arg(args, struct k_poll_event *);
2891
2892 return zsock_poll_prepare_ctx(obj, pfd, pev, pev_end);
2893 }
2894
2895 case ZFD_IOCTL_POLL_UPDATE: {
2896 struct zsock_pollfd *pfd;
2897 struct k_poll_event **pev;
2898
2899 pfd = va_arg(args, struct zsock_pollfd *);
2900 pev = va_arg(args, struct k_poll_event **);
2901
2902 return zsock_poll_update_ctx(obj, pfd, pev);
2903 }
2904
2905 case ZFD_IOCTL_SET_LOCK: {
2906 struct k_mutex *lock;
2907
2908 lock = va_arg(args, struct k_mutex *);
2909
2910 zsock_ctx_set_lock(obj, lock);
2911 return 0;
2912 }
2913
2914 case ZFD_IOCTL_FIONBIO:
2915 sock_set_flag(obj, SOCK_NONBLOCK, SOCK_NONBLOCK);
2916 return 0;
2917
2918 case ZFD_IOCTL_FIONREAD: {
2919 int *avail = va_arg(args, int *);
2920
2921 *avail = zsock_fionread_ctx(obj);
2922 return 0;
2923 }
2924
2925 default:
2926 errno = EOPNOTSUPP;
2927 return -1;
2928 }
2929 }
2930
sock_shutdown_vmeth(void * obj,int how)2931 static int sock_shutdown_vmeth(void *obj, int how)
2932 {
2933 return zsock_shutdown_ctx(obj, how);
2934 }
2935
sock_bind_vmeth(void * obj,const struct sockaddr * addr,socklen_t addrlen)2936 static int sock_bind_vmeth(void *obj, const struct sockaddr *addr,
2937 socklen_t addrlen)
2938 {
2939 return zsock_bind_ctx(obj, addr, addrlen);
2940 }
2941
sock_connect_vmeth(void * obj,const struct sockaddr * addr,socklen_t addrlen)2942 static int sock_connect_vmeth(void *obj, const struct sockaddr *addr,
2943 socklen_t addrlen)
2944 {
2945 return zsock_connect_ctx(obj, addr, addrlen);
2946 }
2947
sock_listen_vmeth(void * obj,int backlog)2948 static int sock_listen_vmeth(void *obj, int backlog)
2949 {
2950 return zsock_listen_ctx(obj, backlog);
2951 }
2952
sock_accept_vmeth(void * obj,struct sockaddr * addr,socklen_t * addrlen)2953 static int sock_accept_vmeth(void *obj, struct sockaddr *addr,
2954 socklen_t *addrlen)
2955 {
2956 return zsock_accept_ctx(obj, addr, addrlen);
2957 }
2958
sock_sendto_vmeth(void * obj,const void * buf,size_t len,int flags,const struct sockaddr * dest_addr,socklen_t addrlen)2959 static ssize_t sock_sendto_vmeth(void *obj, const void *buf, size_t len,
2960 int flags, const struct sockaddr *dest_addr,
2961 socklen_t addrlen)
2962 {
2963 return zsock_sendto_ctx(obj, buf, len, flags, dest_addr, addrlen);
2964 }
2965
sock_sendmsg_vmeth(void * obj,const struct msghdr * msg,int flags)2966 static ssize_t sock_sendmsg_vmeth(void *obj, const struct msghdr *msg,
2967 int flags)
2968 {
2969 return zsock_sendmsg_ctx(obj, msg, flags);
2970 }
2971
sock_recvmsg_vmeth(void * obj,struct msghdr * msg,int flags)2972 static ssize_t sock_recvmsg_vmeth(void *obj, struct msghdr *msg, int flags)
2973 {
2974 return zsock_recvmsg_ctx(obj, msg, flags);
2975 }
2976
sock_recvfrom_vmeth(void * obj,void * buf,size_t max_len,int flags,struct sockaddr * src_addr,socklen_t * addrlen)2977 static ssize_t sock_recvfrom_vmeth(void *obj, void *buf, size_t max_len,
2978 int flags, struct sockaddr *src_addr,
2979 socklen_t *addrlen)
2980 {
2981 return zsock_recvfrom_ctx(obj, buf, max_len, flags,
2982 src_addr, addrlen);
2983 }
2984
sock_getsockopt_vmeth(void * obj,int level,int optname,void * optval,socklen_t * optlen)2985 static int sock_getsockopt_vmeth(void *obj, int level, int optname,
2986 void *optval, socklen_t *optlen)
2987 {
2988 return zsock_getsockopt_ctx(obj, level, optname, optval, optlen);
2989 }
2990
sock_setsockopt_vmeth(void * obj,int level,int optname,const void * optval,socklen_t optlen)2991 static int sock_setsockopt_vmeth(void *obj, int level, int optname,
2992 const void *optval, socklen_t optlen)
2993 {
2994 return zsock_setsockopt_ctx(obj, level, optname, optval, optlen);
2995 }
2996
sock_close2_vmeth(void * obj,int fd)2997 static int sock_close2_vmeth(void *obj, int fd)
2998 {
2999 return zsock_close_ctx(obj, fd);
3000 }
sock_getpeername_vmeth(void * obj,struct sockaddr * addr,socklen_t * addrlen)3001 static int sock_getpeername_vmeth(void *obj, struct sockaddr *addr,
3002 socklen_t *addrlen)
3003 {
3004 return zsock_getpeername_ctx(obj, addr, addrlen);
3005 }
3006
sock_getsockname_vmeth(void * obj,struct sockaddr * addr,socklen_t * addrlen)3007 static int sock_getsockname_vmeth(void *obj, struct sockaddr *addr,
3008 socklen_t *addrlen)
3009 {
3010 return zsock_getsockname_ctx(obj, addr, addrlen);
3011 }
3012
3013 const struct socket_op_vtable sock_fd_op_vtable = {
3014 .fd_vtable = {
3015 .read = sock_read_vmeth,
3016 .write = sock_write_vmeth,
3017 .close2 = sock_close2_vmeth,
3018 .ioctl = sock_ioctl_vmeth,
3019 },
3020 .shutdown = sock_shutdown_vmeth,
3021 .bind = sock_bind_vmeth,
3022 .connect = sock_connect_vmeth,
3023 .listen = sock_listen_vmeth,
3024 .accept = sock_accept_vmeth,
3025 .sendto = sock_sendto_vmeth,
3026 .sendmsg = sock_sendmsg_vmeth,
3027 .recvmsg = sock_recvmsg_vmeth,
3028 .recvfrom = sock_recvfrom_vmeth,
3029 .getsockopt = sock_getsockopt_vmeth,
3030 .setsockopt = sock_setsockopt_vmeth,
3031 .getpeername = sock_getpeername_vmeth,
3032 .getsockname = sock_getsockname_vmeth,
3033 };
3034
inet_is_supported(int family,int type,int proto)3035 static bool inet_is_supported(int family, int type, int proto)
3036 {
3037 if (family != AF_INET && family != AF_INET6) {
3038 return false;
3039 }
3040
3041 return true;
3042 }
3043
3044 NET_SOCKET_REGISTER(af_inet46, NET_SOCKET_DEFAULT_PRIO, AF_UNSPEC,
3045 inet_is_supported, zsock_socket_internal);
3046