1 /*
2 * Copyright (c) 2017 Linaro Limited
3 * Copyright (c) 2021 Nordic Semiconductor
4 * Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved.
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 /* Zephyr headers */
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_DECLARE(net_sock, CONFIG_NET_SOCKETS_LOG_LEVEL);
12
13 #include <zephyr/kernel.h>
14 #include <zephyr/net/mld.h>
15 #include <zephyr/net/net_context.h>
16 #include <zephyr/net/net_pkt.h>
17 #include <zephyr/tracing/tracing.h>
18 #include <zephyr/net/socket.h>
19 #include <zephyr/net/socket_types.h>
20 #include <zephyr/posix/fcntl.h>
21 #include <zephyr/sys/fdtable.h>
22 #include <zephyr/sys/math_extras.h>
23 #include <zephyr/sys/iterable_sections.h>
24
25 #if defined(CONFIG_SOCKS)
26 #include "socks.h"
27 #endif
28
29 #include <zephyr/net/igmp.h>
30 #include "../../ip/ipv6.h"
31
32 #include "../../ip/net_stats.h"
33
34 #include "sockets_internal.h"
35 #include "../../ip/tcp_internal.h"
36 #include "../../ip/net_private.h"
37
38 const struct socket_op_vtable sock_fd_op_vtable;
39
40 static void zsock_received_cb(struct net_context *ctx,
41 struct net_pkt *pkt,
42 union net_ip_header *ip_hdr,
43 union net_proto_header *proto_hdr,
44 int status,
45 void *user_data);
46
fifo_wait_non_empty(struct k_fifo * fifo,k_timeout_t timeout)47 static int fifo_wait_non_empty(struct k_fifo *fifo, k_timeout_t timeout)
48 {
49 struct k_poll_event events[] = {
50 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
51 K_POLL_MODE_NOTIFY_ONLY, fifo),
52 };
53
54 return k_poll(events, ARRAY_SIZE(events), timeout);
55 }
56
zsock_flush_queue(struct net_context * ctx)57 static void zsock_flush_queue(struct net_context *ctx)
58 {
59 bool is_listen = net_context_get_state(ctx) == NET_CONTEXT_LISTENING;
60 void *p;
61
62 /* recv_q and accept_q are shared via a union */
63 while ((p = k_fifo_get(&ctx->recv_q, K_NO_WAIT)) != NULL) {
64 if (is_listen) {
65 NET_DBG("discarding ctx %p", p);
66
67 /* Note that we must release all the packets we
68 * might have received to the accepted socket.
69 */
70 zsock_flush_queue(p);
71 net_context_put(p);
72 } else {
73 NET_DBG("discarding pkt %p", p);
74 net_pkt_unref(p);
75 }
76 }
77
78 /* Some threads might be waiting on recv, cancel the wait */
79 k_fifo_cancel_wait(&ctx->recv_q);
80
81 /* Wake reader if it was sleeping */
82 (void)k_condvar_signal(&ctx->cond.recv);
83 }
84
zsock_socket_internal(int family,int type,int proto)85 static int zsock_socket_internal(int family, int type, int proto)
86 {
87 int fd = zvfs_reserve_fd();
88 struct net_context *ctx;
89 int res;
90
91 if (fd < 0) {
92 return -1;
93 }
94
95 if (proto == 0) {
96 if (family == AF_INET || family == AF_INET6) {
97 if (type == SOCK_DGRAM) {
98 proto = IPPROTO_UDP;
99 } else if (type == SOCK_STREAM) {
100 proto = IPPROTO_TCP;
101 }
102 }
103 }
104
105 res = net_context_get(family, type, proto, &ctx);
106 if (res < 0) {
107 zvfs_free_fd(fd);
108 errno = -res;
109 return -1;
110 }
111
112 /* Initialize user_data, all other calls will preserve it */
113 ctx->user_data = NULL;
114
115 /* The socket flags are stored here */
116 ctx->socket_data = NULL;
117
118 /* recv_q and accept_q are in union */
119 k_fifo_init(&ctx->recv_q);
120
121 /* Condition variable is used to avoid keeping lock for a long time
122 * when waiting data to be received
123 */
124 k_condvar_init(&ctx->cond.recv);
125
126 /* TCP context is effectively owned by both application
127 * and the stack: stack may detect that peer closed/aborted
128 * connection, but it must not dispose of the context behind
129 * the application back. Likewise, when application "closes"
130 * context, it's not disposed of immediately - there's yet
131 * closing handshake for stack to perform.
132 */
133 if (proto == IPPROTO_TCP) {
134 net_context_ref(ctx);
135 }
136
137 zvfs_finalize_typed_fd(fd, ctx, (const struct fd_op_vtable *)&sock_fd_op_vtable,
138 ZVFS_MODE_IFSOCK);
139
140 NET_DBG("socket: ctx=%p, fd=%d", ctx, fd);
141
142 return fd;
143 }
144
zsock_close_ctx(struct net_context * ctx,int sock)145 int zsock_close_ctx(struct net_context *ctx, int sock)
146 {
147 int ret;
148
149 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, close, sock);
150
151 NET_DBG("close: ctx=%p, fd=%d", ctx, sock);
152
153 /* Reset callbacks to avoid any race conditions while
154 * flushing queues. No need to check return values here,
155 * as these are fail-free operations and we're closing
156 * socket anyway.
157 */
158 if (net_context_get_state(ctx) == NET_CONTEXT_LISTENING) {
159 (void)net_context_accept(ctx, NULL, K_NO_WAIT, NULL);
160 } else {
161 (void)net_context_recv(ctx, NULL, K_NO_WAIT, NULL);
162 }
163
164 ctx->user_data = INT_TO_POINTER(EINTR);
165 sock_set_error(ctx);
166
167 zsock_flush_queue(ctx);
168
169 ret = net_context_put(ctx);
170 if (ret < 0) {
171 errno = -ret;
172 ret = -1;
173 }
174
175 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, close, sock, ret < 0 ? -errno : ret);
176
177 if (ret == 0) {
178 (void)sock_obj_core_dealloc(sock);
179 }
180
181 return ret;
182 }
183
zsock_accepted_cb(struct net_context * new_ctx,struct sockaddr * addr,socklen_t addrlen,int status,void * user_data)184 static void zsock_accepted_cb(struct net_context *new_ctx,
185 struct sockaddr *addr, socklen_t addrlen,
186 int status, void *user_data)
187 {
188 struct net_context *parent = user_data;
189
190 NET_DBG("parent=%p, ctx=%p, st=%d", parent, new_ctx, status);
191
192 if (status == 0) {
193 /* This just installs a callback, so cannot fail. */
194 (void)net_context_recv(new_ctx, zsock_received_cb, K_NO_WAIT,
195 NULL);
196 k_fifo_init(&new_ctx->recv_q);
197 k_condvar_init(&new_ctx->cond.recv);
198
199 k_fifo_put(&parent->accept_q, new_ctx);
200
201 /* TCP context is effectively owned by both application
202 * and the stack: stack may detect that peer closed/aborted
203 * connection, but it must not dispose of the context behind
204 * the application back. Likewise, when application "closes"
205 * context, it's not disposed of immediately - there's yet
206 * closing handshake for stack to perform.
207 */
208 net_context_ref(new_ctx);
209
210 (void)k_condvar_signal(&parent->cond.recv);
211 }
212
213 }
214
zsock_received_cb(struct net_context * ctx,struct net_pkt * pkt,union net_ip_header * ip_hdr,union net_proto_header * proto_hdr,int status,void * user_data)215 static void zsock_received_cb(struct net_context *ctx,
216 struct net_pkt *pkt,
217 union net_ip_header *ip_hdr,
218 union net_proto_header *proto_hdr,
219 int status,
220 void *user_data)
221 {
222 if (ctx->cond.lock) {
223 (void)k_mutex_lock(ctx->cond.lock, K_FOREVER);
224 }
225
226 NET_DBG("ctx=%p, pkt=%p, st=%d, user_data=%p", ctx, pkt, status,
227 user_data);
228
229 if (status < 0) {
230 ctx->user_data = INT_TO_POINTER(-status);
231 sock_set_error(ctx);
232 }
233
234 /* if pkt is NULL, EOF */
235 if (!pkt) {
236 struct net_pkt *last_pkt = k_fifo_peek_tail(&ctx->recv_q);
237
238 if (!last_pkt) {
239 /* If there're no packets in the queue, recv() may
240 * be blocked waiting on it to become non-empty,
241 * so cancel that wait.
242 */
243 sock_set_eof(ctx);
244 k_fifo_cancel_wait(&ctx->recv_q);
245 NET_DBG("Marked socket %p as peer-closed", ctx);
246 } else {
247 net_pkt_set_eof(last_pkt, true);
248 NET_DBG("Set EOF flag on pkt %p", last_pkt);
249 }
250
251 goto unlock;
252 }
253
254 /* Normal packet */
255 net_pkt_set_eof(pkt, false);
256
257 net_pkt_set_rx_stats_tick(pkt, k_cycle_get_32());
258
259 k_fifo_put(&ctx->recv_q, pkt);
260
261 unlock:
262 /* Wake reader if it was sleeping */
263 (void)k_condvar_signal(&ctx->cond.recv);
264
265 if (ctx->cond.lock) {
266 (void)k_mutex_unlock(ctx->cond.lock);
267 }
268 }
269
zsock_shutdown_ctx(struct net_context * ctx,int how)270 int zsock_shutdown_ctx(struct net_context *ctx, int how)
271 {
272 int ret;
273
274 if (how == ZSOCK_SHUT_RD) {
275 if (net_context_get_state(ctx) == NET_CONTEXT_LISTENING) {
276 ret = net_context_accept(ctx, NULL, K_NO_WAIT, NULL);
277 if (ret < 0) {
278 errno = -ret;
279 return -1;
280 }
281 } else {
282 ret = net_context_recv(ctx, NULL, K_NO_WAIT, NULL);
283 if (ret < 0) {
284 errno = -ret;
285 return -1;
286 }
287 }
288
289 sock_set_eof(ctx);
290
291 zsock_flush_queue(ctx);
292
293 return 0;
294 }
295
296 if (how == ZSOCK_SHUT_WR || how == ZSOCK_SHUT_RDWR) {
297 errno = ENOTSUP;
298 return -1;
299 }
300
301 errno = EINVAL;
302 return -1;
303 }
304
zsock_bind_ctx(struct net_context * ctx,const struct sockaddr * addr,socklen_t addrlen)305 int zsock_bind_ctx(struct net_context *ctx, const struct sockaddr *addr,
306 socklen_t addrlen)
307 {
308 int ret;
309
310 ret = net_context_bind(ctx, addr, addrlen);
311 if (ret < 0) {
312 errno = -ret;
313 return -1;
314 }
315
316 /* For DGRAM socket, we expect to receive packets after call to
317 * bind(), but for STREAM socket, next expected operation is
318 * listen(), which doesn't work if recv callback is set.
319 */
320 if (net_context_get_type(ctx) == SOCK_DGRAM) {
321 ret = net_context_recv(ctx, zsock_received_cb, K_NO_WAIT,
322 ctx->user_data);
323 if (ret < 0) {
324 errno = -ret;
325 return -1;
326 }
327 }
328
329 return 0;
330 }
331
zsock_connected_cb(struct net_context * ctx,int status,void * user_data)332 static void zsock_connected_cb(struct net_context *ctx, int status, void *user_data)
333 {
334 if (status < 0) {
335 ctx->user_data = INT_TO_POINTER(-status);
336 sock_set_error(ctx);
337 }
338 }
339
zsock_connect_ctx(struct net_context * ctx,const struct sockaddr * addr,socklen_t addrlen)340 int zsock_connect_ctx(struct net_context *ctx, const struct sockaddr *addr,
341 socklen_t addrlen)
342 {
343 k_timeout_t timeout = K_MSEC(CONFIG_NET_SOCKETS_CONNECT_TIMEOUT);
344 net_context_connect_cb_t cb = NULL;
345 int ret;
346
347 #if defined(CONFIG_SOCKS)
348 if (net_context_is_proxy_enabled(ctx)) {
349 ret = net_socks5_connect(ctx, addr, addrlen);
350 if (ret < 0) {
351 errno = -ret;
352 return -1;
353 }
354 ret = net_context_recv(ctx, zsock_received_cb,
355 K_NO_WAIT, ctx->user_data);
356 if (ret < 0) {
357 errno = -ret;
358 return -1;
359 }
360 return 0;
361 }
362 #endif
363 if (net_context_get_state(ctx) == NET_CONTEXT_CONNECTED) {
364 return 0;
365 }
366
367 if (net_context_get_state(ctx) == NET_CONTEXT_CONNECTING) {
368 if (sock_is_error(ctx)) {
369 errno = POINTER_TO_INT(ctx->user_data);
370 return -1;
371 }
372
373 errno = EALREADY;
374 return -1;
375 }
376
377 if (sock_is_nonblock(ctx)) {
378 timeout = K_NO_WAIT;
379 cb = zsock_connected_cb;
380 }
381
382 if (net_context_get_type(ctx) == SOCK_STREAM) {
383 /* For STREAM sockets net_context_recv() only installs
384 * recv callback w/o side effects, and it has to be done
385 * first to avoid race condition, when TCP stream data
386 * arrives right after connect.
387 */
388 ret = net_context_recv(ctx, zsock_received_cb,
389 K_NO_WAIT, ctx->user_data);
390 if (ret < 0) {
391 errno = -ret;
392 return -1;
393 }
394 ret = net_context_connect(ctx, addr, addrlen, cb,
395 timeout, ctx->user_data);
396 if (ret < 0) {
397 errno = -ret;
398 return -1;
399 }
400 } else {
401 ret = net_context_connect(ctx, addr, addrlen, cb,
402 timeout, ctx->user_data);
403 if (ret < 0) {
404 errno = -ret;
405 return -1;
406 }
407 ret = net_context_recv(ctx, zsock_received_cb,
408 K_NO_WAIT, ctx->user_data);
409 if (ret < 0) {
410 errno = -ret;
411 return -1;
412 }
413 }
414
415 return 0;
416 }
417
zsock_listen_ctx(struct net_context * ctx,int backlog)418 int zsock_listen_ctx(struct net_context *ctx, int backlog)
419 {
420 int ret;
421
422 ret = net_context_listen(ctx, backlog);
423 if (ret < 0) {
424 errno = -ret;
425 return -1;
426 }
427
428 ret = net_context_accept(ctx, zsock_accepted_cb, K_NO_WAIT, ctx);
429 if (ret < 0) {
430 errno = -ret;
431 return -1;
432 }
433
434 return 0;
435 }
436
zsock_accept_ctx(struct net_context * parent,struct sockaddr * addr,socklen_t * addrlen)437 int zsock_accept_ctx(struct net_context *parent, struct sockaddr *addr,
438 socklen_t *addrlen)
439 {
440 struct net_context *ctx;
441 struct net_pkt *last_pkt;
442 int fd, ret;
443
444 if (!sock_is_nonblock(parent)) {
445 k_timeout_t timeout = K_FOREVER;
446
447 /* accept() can reuse zsock_wait_data(), as underneath it's
448 * monitoring the same queue (accept_q is an alias for recv_q).
449 */
450 ret = zsock_wait_data(parent, &timeout);
451 if (ret < 0) {
452 errno = -ret;
453 return -1;
454 }
455 }
456
457 ctx = k_fifo_get(&parent->accept_q, K_NO_WAIT);
458 if (ctx == NULL) {
459 errno = EAGAIN;
460 return -1;
461 }
462
463 fd = zvfs_reserve_fd();
464 if (fd < 0) {
465 zsock_flush_queue(ctx);
466 net_context_put(ctx);
467 return -1;
468 }
469
470 /* Check if the connection is already disconnected */
471 last_pkt = k_fifo_peek_tail(&ctx->recv_q);
472 if (last_pkt) {
473 if (net_pkt_eof(last_pkt)) {
474 sock_set_eof(ctx);
475 zvfs_free_fd(fd);
476 zsock_flush_queue(ctx);
477 net_context_put(ctx);
478 errno = ECONNABORTED;
479 return -1;
480 }
481 }
482
483 if (net_context_is_closing(ctx)) {
484 errno = ECONNABORTED;
485 zvfs_free_fd(fd);
486 zsock_flush_queue(ctx);
487 net_context_put(ctx);
488 return -1;
489 }
490
491 net_context_set_accepting(ctx, false);
492
493
494 if (addr != NULL && addrlen != NULL) {
495 int len = MIN(*addrlen, sizeof(ctx->remote));
496
497 memcpy(addr, &ctx->remote, len);
498 /* addrlen is a value-result argument, set to actual
499 * size of source address
500 */
501 if (ctx->remote.sa_family == AF_INET) {
502 *addrlen = sizeof(struct sockaddr_in);
503 } else if (ctx->remote.sa_family == AF_INET6) {
504 *addrlen = sizeof(struct sockaddr_in6);
505 } else {
506 zvfs_free_fd(fd);
507 errno = ENOTSUP;
508 zsock_flush_queue(ctx);
509 net_context_put(ctx);
510 return -1;
511 }
512 }
513
514 NET_DBG("accept: ctx=%p, fd=%d", ctx, fd);
515
516 zvfs_finalize_typed_fd(fd, ctx, (const struct fd_op_vtable *)&sock_fd_op_vtable,
517 ZVFS_MODE_IFSOCK);
518
519 return fd;
520 }
521
522 #define WAIT_BUFS_INITIAL_MS 10
523 #define WAIT_BUFS_MAX_MS 100
524 #define MAX_WAIT_BUFS K_MSEC(CONFIG_NET_SOCKET_MAX_SEND_WAIT)
525
send_check_and_wait(struct net_context * ctx,int status,k_timepoint_t buf_timeout,k_timeout_t timeout,uint32_t * retry_timeout)526 static int send_check_and_wait(struct net_context *ctx, int status,
527 k_timepoint_t buf_timeout, k_timeout_t timeout,
528 uint32_t *retry_timeout)
529 {
530 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
531 goto out;
532 }
533
534 if (status != -ENOBUFS && status != -EAGAIN) {
535 goto out;
536 }
537
538 /* If we cannot get any buffers in reasonable
539 * amount of time, then do not wait forever as
540 * there might be some bigger issue.
541 * If we get -EAGAIN and cannot recover, then
542 * it means that the sending window is blocked
543 * and we just cannot send anything.
544 */
545 if (sys_timepoint_expired(buf_timeout)) {
546 if (status == -ENOBUFS) {
547 status = -ENOMEM;
548 } else {
549 status = -ENOBUFS;
550 }
551
552 goto out;
553 }
554
555 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
556 *retry_timeout =
557 MIN(*retry_timeout, k_ticks_to_ms_floor32(timeout.ticks));
558 }
559
560 if (ctx->cond.lock) {
561 (void)k_mutex_unlock(ctx->cond.lock);
562 }
563
564 if (status == -ENOBUFS) {
565 /* We can monitor net_pkt/net_buf availability, so just wait. */
566 k_sleep(K_MSEC(*retry_timeout));
567 }
568
569 if (status == -EAGAIN) {
570 if (IS_ENABLED(CONFIG_NET_NATIVE_TCP) &&
571 net_context_get_type(ctx) == SOCK_STREAM &&
572 !net_if_is_ip_offloaded(net_context_get_iface(ctx))) {
573 struct k_poll_event event;
574
575 k_poll_event_init(&event,
576 K_POLL_TYPE_SEM_AVAILABLE,
577 K_POLL_MODE_NOTIFY_ONLY,
578 net_tcp_tx_sem_get(ctx));
579
580 k_poll(&event, 1, K_MSEC(*retry_timeout));
581 } else {
582 k_sleep(K_MSEC(*retry_timeout));
583 }
584 }
585 /* Exponentially increase the retry timeout
586 * Cap the value to WAIT_BUFS_MAX_MS
587 */
588 *retry_timeout = MIN(WAIT_BUFS_MAX_MS, *retry_timeout << 1);
589
590 if (ctx->cond.lock) {
591 (void)k_mutex_lock(ctx->cond.lock, K_FOREVER);
592 }
593
594 return 0;
595
596 out:
597 errno = -status;
598 return -1;
599 }
600
zsock_sendto_ctx(struct net_context * ctx,const void * buf,size_t len,int flags,const struct sockaddr * dest_addr,socklen_t addrlen)601 ssize_t zsock_sendto_ctx(struct net_context *ctx, const void *buf, size_t len,
602 int flags,
603 const struct sockaddr *dest_addr, socklen_t addrlen)
604 {
605 k_timeout_t timeout = K_FOREVER;
606 uint32_t retry_timeout = WAIT_BUFS_INITIAL_MS;
607 k_timepoint_t buf_timeout, end;
608 int status;
609
610 if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) {
611 timeout = K_NO_WAIT;
612 buf_timeout = sys_timepoint_calc(K_NO_WAIT);
613 } else {
614 net_context_get_option(ctx, NET_OPT_SNDTIMEO, &timeout, NULL);
615 buf_timeout = sys_timepoint_calc(MAX_WAIT_BUFS);
616 }
617 end = sys_timepoint_calc(timeout);
618
619 /* Register the callback before sending in order to receive the response
620 * from the peer.
621 */
622 status = net_context_recv(ctx, zsock_received_cb,
623 K_NO_WAIT, ctx->user_data);
624 if (status < 0) {
625 errno = -status;
626 return -1;
627 }
628
629 while (1) {
630 if (dest_addr) {
631 status = net_context_sendto(ctx, buf, len, dest_addr,
632 addrlen, NULL, timeout,
633 ctx->user_data);
634 } else {
635 status = net_context_send(ctx, buf, len, NULL, timeout,
636 ctx->user_data);
637 }
638
639 if (status < 0) {
640 status = send_check_and_wait(ctx, status, buf_timeout,
641 timeout, &retry_timeout);
642 if (status < 0) {
643 return status;
644 }
645
646 /* Update the timeout value in case loop is repeated. */
647 timeout = sys_timepoint_timeout(end);
648
649 continue;
650 }
651
652 break;
653 }
654
655 return status;
656 }
657
zsock_sendmsg_ctx(struct net_context * ctx,const struct msghdr * msg,int flags)658 ssize_t zsock_sendmsg_ctx(struct net_context *ctx, const struct msghdr *msg,
659 int flags)
660 {
661 k_timeout_t timeout = K_FOREVER;
662 uint32_t retry_timeout = WAIT_BUFS_INITIAL_MS;
663 k_timepoint_t buf_timeout, end;
664 int status;
665
666 if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) {
667 timeout = K_NO_WAIT;
668 buf_timeout = sys_timepoint_calc(K_NO_WAIT);
669 } else {
670 net_context_get_option(ctx, NET_OPT_SNDTIMEO, &timeout, NULL);
671 buf_timeout = sys_timepoint_calc(MAX_WAIT_BUFS);
672 }
673 end = sys_timepoint_calc(timeout);
674
675 while (1) {
676 status = net_context_sendmsg(ctx, msg, flags, NULL, timeout, NULL);
677 if (status < 0) {
678 status = send_check_and_wait(ctx, status,
679 buf_timeout,
680 timeout, &retry_timeout);
681 if (status < 0) {
682 return status;
683 }
684
685 /* Update the timeout value in case loop is repeated. */
686 timeout = sys_timepoint_timeout(end);
687
688 continue;
689 }
690
691 break;
692 }
693
694 return status;
695 }
696
sock_get_pkt_src_addr(struct net_pkt * pkt,enum net_ip_protocol proto,struct sockaddr * addr,socklen_t addrlen)697 static int sock_get_pkt_src_addr(struct net_pkt *pkt,
698 enum net_ip_protocol proto,
699 struct sockaddr *addr,
700 socklen_t addrlen)
701 {
702 int ret = 0;
703 struct net_pkt_cursor backup;
704 uint16_t *port;
705
706 if (!addr || !pkt) {
707 return -EINVAL;
708 }
709
710 net_pkt_cursor_backup(pkt, &backup);
711 net_pkt_cursor_init(pkt);
712
713 addr->sa_family = net_pkt_family(pkt);
714
715 if (IS_ENABLED(CONFIG_NET_IPV4) &&
716 net_pkt_family(pkt) == AF_INET) {
717 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access,
718 struct net_ipv4_hdr);
719 struct sockaddr_in *addr4 = net_sin(addr);
720 struct net_ipv4_hdr *ipv4_hdr;
721
722 if (addrlen < sizeof(struct sockaddr_in)) {
723 ret = -EINVAL;
724 goto error;
725 }
726
727 ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(
728 pkt, &ipv4_access);
729 if (!ipv4_hdr ||
730 net_pkt_acknowledge_data(pkt, &ipv4_access) ||
731 net_pkt_skip(pkt, net_pkt_ipv4_opts_len(pkt))) {
732 ret = -ENOBUFS;
733 goto error;
734 }
735
736 net_ipv4_addr_copy_raw((uint8_t *)&addr4->sin_addr, ipv4_hdr->src);
737 port = &addr4->sin_port;
738 } else if (IS_ENABLED(CONFIG_NET_IPV6) &&
739 net_pkt_family(pkt) == AF_INET6) {
740 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv6_access,
741 struct net_ipv6_hdr);
742 struct sockaddr_in6 *addr6 = net_sin6(addr);
743 struct net_ipv6_hdr *ipv6_hdr;
744
745 if (addrlen < sizeof(struct sockaddr_in6)) {
746 ret = -EINVAL;
747 goto error;
748 }
749
750 ipv6_hdr = (struct net_ipv6_hdr *)net_pkt_get_data(
751 pkt, &ipv6_access);
752 if (!ipv6_hdr ||
753 net_pkt_acknowledge_data(pkt, &ipv6_access) ||
754 net_pkt_skip(pkt, net_pkt_ipv6_ext_len(pkt))) {
755 ret = -ENOBUFS;
756 goto error;
757 }
758
759 net_ipv6_addr_copy_raw((uint8_t *)&addr6->sin6_addr, ipv6_hdr->src);
760 port = &addr6->sin6_port;
761 } else {
762 ret = -ENOTSUP;
763 goto error;
764 }
765
766 if (IS_ENABLED(CONFIG_NET_UDP) && proto == IPPROTO_UDP) {
767 NET_PKT_DATA_ACCESS_DEFINE(udp_access, struct net_udp_hdr);
768 struct net_udp_hdr *udp_hdr;
769
770 udp_hdr = (struct net_udp_hdr *)net_pkt_get_data(pkt,
771 &udp_access);
772 if (!udp_hdr) {
773 ret = -ENOBUFS;
774 goto error;
775 }
776
777 *port = udp_hdr->src_port;
778 } else if (IS_ENABLED(CONFIG_NET_TCP) && proto == IPPROTO_TCP) {
779 NET_PKT_DATA_ACCESS_DEFINE(tcp_access, struct net_tcp_hdr);
780 struct net_tcp_hdr *tcp_hdr;
781
782 tcp_hdr = (struct net_tcp_hdr *)net_pkt_get_data(pkt,
783 &tcp_access);
784 if (!tcp_hdr) {
785 ret = -ENOBUFS;
786 goto error;
787 }
788
789 *port = tcp_hdr->src_port;
790 } else {
791 ret = -ENOTSUP;
792 }
793
794 error:
795 net_pkt_cursor_restore(pkt, &backup);
796
797 return ret;
798 }
799
800 #if defined(CONFIG_NET_OFFLOAD)
net_pkt_remote_addr_is_unspecified(struct net_pkt * pkt)801 static bool net_pkt_remote_addr_is_unspecified(struct net_pkt *pkt)
802 {
803 bool ret = true;
804
805 if (net_pkt_family(pkt) == AF_INET) {
806 ret = net_ipv4_is_addr_unspecified(&net_sin(&pkt->remote)->sin_addr);
807 } else if (net_pkt_family(pkt) == AF_INET6) {
808 ret = net_ipv6_is_addr_unspecified(&net_sin6(&pkt->remote)->sin6_addr);
809 }
810
811 return ret;
812 }
813
sock_get_offload_pkt_src_addr(struct net_pkt * pkt,struct net_context * ctx,struct sockaddr * addr,socklen_t addrlen)814 static int sock_get_offload_pkt_src_addr(struct net_pkt *pkt,
815 struct net_context *ctx,
816 struct sockaddr *addr,
817 socklen_t addrlen)
818 {
819 int ret = 0;
820
821 if (!addr || !pkt) {
822 return -EINVAL;
823 }
824
825 if (!net_pkt_remote_addr_is_unspecified(pkt)) {
826 if (IS_ENABLED(CONFIG_NET_IPV4) &&
827 net_pkt_family(pkt) == AF_INET) {
828 if (addrlen < sizeof(struct sockaddr_in)) {
829 ret = -EINVAL;
830 goto error;
831 }
832
833 memcpy(addr, &pkt->remote, sizeof(struct sockaddr_in));
834 } else if (IS_ENABLED(CONFIG_NET_IPV6) &&
835 net_pkt_family(pkt) == AF_INET6) {
836 if (addrlen < sizeof(struct sockaddr_in6)) {
837 ret = -EINVAL;
838 goto error;
839 }
840
841 memcpy(addr, &pkt->remote, sizeof(struct sockaddr_in6));
842 }
843 } else if (ctx->flags & NET_CONTEXT_REMOTE_ADDR_SET) {
844 memcpy(addr, &ctx->remote, MIN(addrlen, sizeof(ctx->remote)));
845 } else {
846 ret = -ENOTSUP;
847 }
848
849 error:
850 return ret;
851 }
852 #else
sock_get_offload_pkt_src_addr(struct net_pkt * pkt,struct net_context * ctx,struct sockaddr * addr,socklen_t addrlen)853 static int sock_get_offload_pkt_src_addr(struct net_pkt *pkt,
854 struct net_context *ctx,
855 struct sockaddr *addr,
856 socklen_t addrlen)
857 {
858 ARG_UNUSED(pkt);
859 ARG_UNUSED(ctx);
860 ARG_UNUSED(addr);
861 ARG_UNUSED(addrlen);
862
863 return 0;
864 }
865 #endif /* CONFIG_NET_OFFLOAD */
866
net_socket_update_tc_rx_time(struct net_pkt * pkt,uint32_t end_tick)867 void net_socket_update_tc_rx_time(struct net_pkt *pkt, uint32_t end_tick)
868 {
869 net_pkt_set_rx_stats_tick(pkt, end_tick);
870
871 net_stats_update_tc_rx_time(net_pkt_iface(pkt),
872 net_pkt_priority(pkt),
873 net_pkt_create_time(pkt),
874 end_tick);
875
876 SYS_PORT_TRACING_FUNC(net, rx_time, pkt, end_tick);
877
878 if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)) {
879 uint32_t val, prev = net_pkt_create_time(pkt);
880 int i;
881
882 for (i = 0; i < net_pkt_stats_tick_count(pkt); i++) {
883 if (!net_pkt_stats_tick(pkt)[i]) {
884 break;
885 }
886
887 val = net_pkt_stats_tick(pkt)[i] - prev;
888 prev = net_pkt_stats_tick(pkt)[i];
889 net_pkt_stats_tick(pkt)[i] = val;
890 }
891
892 net_stats_update_tc_rx_time_detail(
893 net_pkt_iface(pkt),
894 net_pkt_priority(pkt),
895 net_pkt_stats_tick(pkt));
896 }
897 }
898
zsock_wait_data(struct net_context * ctx,k_timeout_t * timeout)899 int zsock_wait_data(struct net_context *ctx, k_timeout_t *timeout)
900 {
901 int ret;
902
903 if (ctx->cond.lock == NULL) {
904 /* For some reason the lock pointer is not set properly
905 * when called by fdtable.c:zvfs_finalize_fd()
906 * It is not practical to try to figure out the fdtable
907 * lock at this point so skip it.
908 */
909 NET_WARN("No lock pointer set for context %p", ctx);
910 return -EINVAL;
911 }
912
913 if (k_fifo_is_empty(&ctx->recv_q)) {
914 /* Wait for the data to arrive but without holding a lock */
915 ret = k_condvar_wait(&ctx->cond.recv, ctx->cond.lock,
916 *timeout);
917 if (ret < 0) {
918 return ret;
919 }
920
921 if (sock_is_error(ctx)) {
922 return -POINTER_TO_INT(ctx->user_data);
923 }
924 }
925
926 return 0;
927 }
928
insert_pktinfo(struct msghdr * msg,int level,int type,void * pktinfo,size_t pktinfo_len)929 static int insert_pktinfo(struct msghdr *msg, int level, int type,
930 void *pktinfo, size_t pktinfo_len)
931 {
932 struct cmsghdr *cmsg;
933
934 if (msg->msg_controllen < pktinfo_len) {
935 return -EINVAL;
936 }
937
938 for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
939 if (cmsg->cmsg_len == 0) {
940 break;
941 }
942 }
943
944 if (cmsg == NULL) {
945 return -EINVAL;
946 }
947
948 cmsg->cmsg_len = CMSG_LEN(pktinfo_len);
949 cmsg->cmsg_level = level;
950 cmsg->cmsg_type = type;
951
952 memcpy(CMSG_DATA(cmsg), pktinfo, pktinfo_len);
953
954 return 0;
955 }
956
add_timestamping(struct net_context * ctx,struct net_pkt * pkt,struct msghdr * msg)957 static int add_timestamping(struct net_context *ctx,
958 struct net_pkt *pkt,
959 struct msghdr *msg)
960 {
961 uint8_t timestamping = 0;
962
963 net_context_get_option(ctx, NET_OPT_TIMESTAMPING, ×tamping, NULL);
964
965 if (timestamping) {
966 return insert_pktinfo(msg, SOL_SOCKET, SO_TIMESTAMPING,
967 net_pkt_timestamp(pkt), sizeof(struct net_ptp_time));
968 }
969
970 return -ENOTSUP;
971 }
972
add_pktinfo(struct net_context * ctx,struct net_pkt * pkt,struct msghdr * msg)973 static int add_pktinfo(struct net_context *ctx,
974 struct net_pkt *pkt,
975 struct msghdr *msg)
976 {
977 int ret = -ENOTSUP;
978 struct net_pkt_cursor backup;
979
980 net_pkt_cursor_backup(pkt, &backup);
981 net_pkt_cursor_init(pkt);
982
983 if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
984 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access,
985 struct net_ipv4_hdr);
986 struct in_pktinfo info;
987 struct net_ipv4_hdr *ipv4_hdr;
988
989 ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(
990 pkt, &ipv4_access);
991 if (ipv4_hdr == NULL ||
992 net_pkt_acknowledge_data(pkt, &ipv4_access) ||
993 net_pkt_skip(pkt, net_pkt_ipv4_opts_len(pkt))) {
994 ret = -ENOBUFS;
995 goto out;
996 }
997
998 net_ipv4_addr_copy_raw((uint8_t *)&info.ipi_addr, ipv4_hdr->dst);
999 net_ipv4_addr_copy_raw((uint8_t *)&info.ipi_spec_dst,
1000 (uint8_t *)net_sin_ptr(&ctx->local)->sin_addr);
1001 info.ipi_ifindex = ctx->iface;
1002
1003 ret = insert_pktinfo(msg, IPPROTO_IP, IP_PKTINFO,
1004 &info, sizeof(info));
1005
1006 goto out;
1007 }
1008
1009 if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
1010 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv6_access,
1011 struct net_ipv6_hdr);
1012 struct in6_pktinfo info;
1013 struct net_ipv6_hdr *ipv6_hdr;
1014
1015 ipv6_hdr = (struct net_ipv6_hdr *)net_pkt_get_data(
1016 pkt, &ipv6_access);
1017 if (ipv6_hdr == NULL ||
1018 net_pkt_acknowledge_data(pkt, &ipv6_access) ||
1019 net_pkt_skip(pkt, net_pkt_ipv6_ext_len(pkt))) {
1020 ret = -ENOBUFS;
1021 goto out;
1022 }
1023
1024 net_ipv6_addr_copy_raw((uint8_t *)&info.ipi6_addr, ipv6_hdr->dst);
1025 info.ipi6_ifindex = ctx->iface;
1026
1027 ret = insert_pktinfo(msg, IPPROTO_IPV6, IPV6_RECVPKTINFO,
1028 &info, sizeof(info));
1029
1030 goto out;
1031 }
1032
1033 out:
1034 net_pkt_cursor_restore(pkt, &backup);
1035
1036 return ret;
1037 }
1038
update_msg_controllen(struct msghdr * msg)1039 static int update_msg_controllen(struct msghdr *msg)
1040 {
1041 struct cmsghdr *cmsg;
1042 size_t cmsg_space = 0;
1043
1044 for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
1045 if (cmsg->cmsg_len == 0) {
1046 break;
1047 }
1048 cmsg_space += cmsg->cmsg_len;
1049 }
1050 msg->msg_controllen = cmsg_space;
1051
1052 return 0;
1053 }
1054
zsock_recv_dgram(struct net_context * ctx,struct msghdr * msg,void * buf,size_t max_len,int flags,struct sockaddr * src_addr,socklen_t * addrlen)1055 static ssize_t zsock_recv_dgram(struct net_context *ctx,
1056 struct msghdr *msg,
1057 void *buf,
1058 size_t max_len,
1059 int flags,
1060 struct sockaddr *src_addr,
1061 socklen_t *addrlen)
1062 {
1063 k_timeout_t timeout = K_FOREVER;
1064 size_t recv_len = 0;
1065 size_t read_len;
1066 struct net_pkt_cursor backup;
1067 struct net_pkt *pkt;
1068
1069 if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) {
1070 timeout = K_NO_WAIT;
1071 } else {
1072 int ret;
1073
1074 net_context_get_option(ctx, NET_OPT_RCVTIMEO, &timeout, NULL);
1075
1076 ret = zsock_wait_data(ctx, &timeout);
1077 if (ret < 0) {
1078 errno = -ret;
1079 return -1;
1080 }
1081 }
1082
1083 if (flags & ZSOCK_MSG_PEEK) {
1084 int res;
1085
1086 res = fifo_wait_non_empty(&ctx->recv_q, timeout);
1087 /* EAGAIN when timeout expired, EINTR when cancelled */
1088 if (res && res != -EAGAIN && res != -EINTR) {
1089 errno = -res;
1090 return -1;
1091 }
1092
1093 pkt = k_fifo_peek_head(&ctx->recv_q);
1094 } else {
1095 pkt = k_fifo_get(&ctx->recv_q, timeout);
1096 }
1097
1098 if (!pkt) {
1099 errno = EAGAIN;
1100 return -1;
1101 }
1102
1103 net_pkt_cursor_backup(pkt, &backup);
1104
1105 if (src_addr && addrlen) {
1106 if (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
1107 net_if_is_ip_offloaded(net_context_get_iface(ctx))) {
1108 int ret;
1109
1110 ret = sock_get_offload_pkt_src_addr(pkt, ctx, src_addr,
1111 *addrlen);
1112 if (ret < 0) {
1113 errno = -ret;
1114 NET_DBG("sock_get_offload_pkt_src_addr %d", ret);
1115 goto fail;
1116 }
1117 } else {
1118 int ret;
1119
1120 ret = sock_get_pkt_src_addr(pkt, net_context_get_proto(ctx),
1121 src_addr, *addrlen);
1122 if (ret < 0) {
1123 errno = -ret;
1124 NET_DBG("sock_get_pkt_src_addr %d", ret);
1125 goto fail;
1126 }
1127 }
1128
1129 /* addrlen is a value-result argument, set to actual
1130 * size of source address
1131 */
1132 if (src_addr->sa_family == AF_INET) {
1133 *addrlen = sizeof(struct sockaddr_in);
1134 } else if (src_addr->sa_family == AF_INET6) {
1135 *addrlen = sizeof(struct sockaddr_in6);
1136 } else {
1137 errno = ENOTSUP;
1138 goto fail;
1139 }
1140 }
1141
1142 if (msg != NULL) {
1143 int iovec = 0;
1144 size_t tmp_read_len;
1145
1146 if (msg->msg_iovlen < 1 || msg->msg_iov == NULL) {
1147 errno = ENOMEM;
1148 return -1;
1149 }
1150
1151 recv_len = net_pkt_remaining_data(pkt);
1152 tmp_read_len = read_len = MIN(recv_len, max_len);
1153
1154 while (tmp_read_len > 0) {
1155 size_t len;
1156
1157 buf = msg->msg_iov[iovec].iov_base;
1158 if (buf == NULL) {
1159 errno = EINVAL;
1160 return -1;
1161 }
1162
1163 len = MIN(tmp_read_len, msg->msg_iov[iovec].iov_len);
1164
1165 if (net_pkt_read(pkt, buf, len)) {
1166 errno = ENOBUFS;
1167 goto fail;
1168 }
1169
1170 if (len <= tmp_read_len) {
1171 tmp_read_len -= len;
1172 iovec++;
1173 } else {
1174 errno = EINVAL;
1175 return -1;
1176 }
1177 }
1178
1179 if (recv_len != read_len) {
1180 msg->msg_flags |= ZSOCK_MSG_TRUNC;
1181 }
1182
1183 } else {
1184 recv_len = net_pkt_remaining_data(pkt);
1185 read_len = MIN(recv_len, max_len);
1186
1187 if (net_pkt_read(pkt, buf, read_len)) {
1188 errno = ENOBUFS;
1189 goto fail;
1190 }
1191 }
1192
1193 if (msg != NULL) {
1194 if (msg->msg_control != NULL) {
1195 if (msg->msg_controllen > 0) {
1196 if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMPING) &&
1197 net_context_is_timestamping_set(ctx)) {
1198 if (add_timestamping(ctx, pkt, msg) < 0) {
1199 msg->msg_flags |= ZSOCK_MSG_CTRUNC;
1200 }
1201 }
1202
1203 if (IS_ENABLED(CONFIG_NET_CONTEXT_RECV_PKTINFO) &&
1204 net_context_is_recv_pktinfo_set(ctx)) {
1205 if (add_pktinfo(ctx, pkt, msg) < 0) {
1206 msg->msg_flags |= ZSOCK_MSG_CTRUNC;
1207 }
1208 }
1209
1210 /* msg_controllen must be updated to reflect the total length of all
1211 * control messages in the buffer. If there are no control data,
1212 * msg_controllen will be cleared as expected It will also take into
1213 * account pre-existing control data
1214 */
1215 update_msg_controllen(msg);
1216 }
1217 } else {
1218 msg->msg_controllen = 0U;
1219 }
1220 }
1221
1222 if ((IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) ||
1223 IS_ENABLED(CONFIG_TRACING_NET_CORE)) &&
1224 !(flags & ZSOCK_MSG_PEEK)) {
1225 net_socket_update_tc_rx_time(pkt, k_cycle_get_32());
1226 }
1227
1228 if (!(flags & ZSOCK_MSG_PEEK)) {
1229 net_pkt_unref(pkt);
1230 } else {
1231 net_pkt_cursor_restore(pkt, &backup);
1232 }
1233
1234 return (flags & ZSOCK_MSG_TRUNC) ? recv_len : read_len;
1235
1236 fail:
1237 if (!(flags & ZSOCK_MSG_PEEK)) {
1238 net_pkt_unref(pkt);
1239 }
1240
1241 return -1;
1242 }
1243
zsock_recv_stream_immediate(struct net_context * ctx,uint8_t ** buf,size_t * max_len,int flags)1244 static size_t zsock_recv_stream_immediate(struct net_context *ctx, uint8_t **buf, size_t *max_len,
1245 int flags)
1246 {
1247 size_t len;
1248 size_t pkt_len;
1249 size_t recv_len = 0;
1250 struct net_pkt *pkt;
1251 struct net_pkt_cursor backup;
1252 struct net_pkt *origin = NULL;
1253 const bool do_recv = !(buf == NULL || max_len == NULL);
1254 size_t _max_len = (max_len == NULL) ? SIZE_MAX : *max_len;
1255 const bool peek = (flags & ZSOCK_MSG_PEEK) == ZSOCK_MSG_PEEK;
1256
1257 while (_max_len > 0) {
1258 /* only peek until we know we can dequeue and / or requeue buffer */
1259 pkt = k_fifo_peek_head(&ctx->recv_q);
1260 if (pkt == NULL || pkt == origin) {
1261 break;
1262 }
1263
1264 if (origin == NULL) {
1265 /* mark first pkt to avoid cycles when observing */
1266 origin = pkt;
1267 }
1268
1269 pkt_len = net_pkt_remaining_data(pkt);
1270 len = MIN(_max_len, pkt_len);
1271 recv_len += len;
1272 _max_len -= len;
1273
1274 if (do_recv && len > 0) {
1275 if (peek) {
1276 net_pkt_cursor_backup(pkt, &backup);
1277 }
1278
1279 net_pkt_read(pkt, *buf, len);
1280 /* update buffer position for caller */
1281 *buf += len;
1282
1283 if (peek) {
1284 net_pkt_cursor_restore(pkt, &backup);
1285 }
1286 }
1287
1288 if (do_recv && !peek) {
1289 if (len == pkt_len) {
1290 /* dequeue empty packets when not observing */
1291 pkt = k_fifo_get(&ctx->recv_q, K_NO_WAIT);
1292 if (net_pkt_eof(pkt)) {
1293 sock_set_eof(ctx);
1294 }
1295
1296 if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) ||
1297 IS_ENABLED(CONFIG_TRACING_NET_CORE)) {
1298 net_socket_update_tc_rx_time(pkt, k_cycle_get_32());
1299 }
1300
1301 net_pkt_unref(pkt);
1302 }
1303 } else if (!do_recv || peek) {
1304 /* requeue packets when observing */
1305 k_fifo_put(&ctx->recv_q, k_fifo_get(&ctx->recv_q, K_NO_WAIT));
1306 }
1307 }
1308
1309 if (do_recv) {
1310 /* convey remaining buffer size back to caller */
1311 *max_len = _max_len;
1312 }
1313
1314 return recv_len;
1315 }
1316
zsock_fionread_ctx(struct net_context * ctx)1317 static int zsock_fionread_ctx(struct net_context *ctx)
1318 {
1319 size_t ret = zsock_recv_stream_immediate(ctx, NULL, NULL, 0);
1320
1321 return MIN(ret, INT_MAX);
1322 }
1323
zsock_recv_stream_timed(struct net_context * ctx,struct msghdr * msg,uint8_t * buf,size_t max_len,int flags,k_timeout_t timeout)1324 static ssize_t zsock_recv_stream_timed(struct net_context *ctx, struct msghdr *msg,
1325 uint8_t *buf, size_t max_len,
1326 int flags, k_timeout_t timeout)
1327 {
1328 int res;
1329 k_timepoint_t end;
1330 size_t recv_len = 0, iovec = 0, available_len;
1331 const bool waitall = (flags & ZSOCK_MSG_WAITALL) == ZSOCK_MSG_WAITALL;
1332
1333 if (msg != NULL && buf == NULL) {
1334 if (msg->msg_iovlen < 1) {
1335 return -EINVAL;
1336 }
1337
1338 buf = msg->msg_iov[iovec].iov_base;
1339 available_len = msg->msg_iov[iovec].iov_len;
1340 }
1341
1342 for (end = sys_timepoint_calc(timeout); max_len > 0; timeout = sys_timepoint_timeout(end)) {
1343
1344 if (sock_is_error(ctx)) {
1345 return -POINTER_TO_INT(ctx->user_data);
1346 }
1347
1348 if (sock_is_eof(ctx)) {
1349 return 0;
1350 }
1351
1352 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1353 res = zsock_wait_data(ctx, &timeout);
1354 if (res < 0) {
1355 return res;
1356 }
1357 }
1358
1359 if (msg != NULL) {
1360 again:
1361 res = zsock_recv_stream_immediate(ctx, &buf, &available_len, flags);
1362 recv_len += res;
1363
1364 if (res == 0 && recv_len == 0 && K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1365 return -EAGAIN;
1366 }
1367
1368 buf = (uint8_t *)(msg->msg_iov[iovec].iov_base) + res;
1369 max_len -= res;
1370
1371 if (available_len == 0) {
1372 /* All data to this iovec was written */
1373 iovec++;
1374
1375 if (iovec == msg->msg_iovlen) {
1376 break;
1377 }
1378
1379 buf = msg->msg_iov[iovec].iov_base;
1380 available_len = msg->msg_iov[iovec].iov_len;
1381
1382 /* If there is more data, read it now and do not wait */
1383 if (buf != NULL && available_len > 0) {
1384 goto again;
1385 }
1386
1387 continue;
1388 }
1389
1390 } else {
1391 res = zsock_recv_stream_immediate(ctx, &buf, &max_len, flags);
1392 recv_len += res;
1393
1394 if (res == 0) {
1395 if (recv_len == 0 && K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1396 return -EAGAIN;
1397 }
1398 }
1399 }
1400
1401 if (!waitall) {
1402 break;
1403 }
1404 }
1405
1406 return recv_len;
1407 }
1408
zsock_recv_stream(struct net_context * ctx,struct msghdr * msg,void * buf,size_t max_len,int flags)1409 static ssize_t zsock_recv_stream(struct net_context *ctx, struct msghdr *msg,
1410 void *buf, size_t max_len, int flags)
1411 {
1412 ssize_t res;
1413 size_t recv_len = 0;
1414 k_timeout_t timeout = K_FOREVER;
1415
1416 if (!net_context_is_used(ctx)) {
1417 errno = EBADF;
1418 return -1;
1419 }
1420
1421 if (net_context_get_state(ctx) != NET_CONTEXT_CONNECTED) {
1422 errno = ENOTCONN;
1423 return -1;
1424 }
1425
1426 if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) {
1427 timeout = K_NO_WAIT;
1428 } else if (!sock_is_eof(ctx) && !sock_is_error(ctx)) {
1429 net_context_get_option(ctx, NET_OPT_RCVTIMEO, &timeout, NULL);
1430 }
1431
1432 if (max_len == 0) {
1433 /* no bytes requested - done! */
1434 return 0;
1435 }
1436
1437 res = zsock_recv_stream_timed(ctx, msg, buf, max_len, flags, timeout);
1438 recv_len += MAX(0, res);
1439
1440 if (res < 0) {
1441 errno = -res;
1442 return -1;
1443 }
1444
1445 if (!(flags & ZSOCK_MSG_PEEK)) {
1446 net_context_update_recv_wnd(ctx, recv_len);
1447 }
1448
1449 return recv_len;
1450 }
1451
zsock_recvfrom_ctx(struct net_context * ctx,void * buf,size_t max_len,int flags,struct sockaddr * src_addr,socklen_t * addrlen)1452 ssize_t zsock_recvfrom_ctx(struct net_context *ctx, void *buf, size_t max_len,
1453 int flags,
1454 struct sockaddr *src_addr, socklen_t *addrlen)
1455 {
1456 enum net_sock_type sock_type = net_context_get_type(ctx);
1457
1458 if (max_len == 0) {
1459 return 0;
1460 }
1461
1462 if (sock_type == SOCK_DGRAM) {
1463 return zsock_recv_dgram(ctx, NULL, buf, max_len, flags, src_addr, addrlen);
1464 } else if (sock_type == SOCK_STREAM) {
1465 return zsock_recv_stream(ctx, NULL, buf, max_len, flags);
1466 }
1467
1468 __ASSERT(0, "Unknown socket type");
1469
1470 errno = ENOTSUP;
1471
1472 return -1;
1473 }
1474
zsock_recvmsg_ctx(struct net_context * ctx,struct msghdr * msg,int flags)1475 ssize_t zsock_recvmsg_ctx(struct net_context *ctx, struct msghdr *msg,
1476 int flags)
1477 {
1478 enum net_sock_type sock_type = net_context_get_type(ctx);
1479 size_t i, max_len = 0;
1480
1481 if (msg == NULL) {
1482 errno = EINVAL;
1483 return -1;
1484 }
1485
1486 if (msg->msg_iov == NULL) {
1487 errno = ENOMEM;
1488 return -1;
1489 }
1490
1491 for (i = 0; i < msg->msg_iovlen; i++) {
1492 max_len += msg->msg_iov[i].iov_len;
1493 }
1494
1495 if (sock_type == SOCK_DGRAM) {
1496 return zsock_recv_dgram(ctx, msg, NULL, max_len, flags,
1497 msg->msg_name, &msg->msg_namelen);
1498 } else if (sock_type == SOCK_STREAM) {
1499 return zsock_recv_stream(ctx, msg, NULL, max_len, flags);
1500 }
1501
1502 __ASSERT(0, "Unknown socket type");
1503
1504 errno = ENOTSUP;
1505
1506 return -1;
1507 }
1508
zsock_poll_prepare_ctx(struct net_context * ctx,struct zsock_pollfd * pfd,struct k_poll_event ** pev,struct k_poll_event * pev_end)1509 static int zsock_poll_prepare_ctx(struct net_context *ctx,
1510 struct zsock_pollfd *pfd,
1511 struct k_poll_event **pev,
1512 struct k_poll_event *pev_end)
1513 {
1514 if (pfd->events & ZSOCK_POLLIN) {
1515 if (*pev == pev_end) {
1516 return -ENOMEM;
1517 }
1518
1519 (*pev)->obj = &ctx->recv_q;
1520 (*pev)->type = K_POLL_TYPE_FIFO_DATA_AVAILABLE;
1521 (*pev)->mode = K_POLL_MODE_NOTIFY_ONLY;
1522 (*pev)->state = K_POLL_STATE_NOT_READY;
1523 (*pev)++;
1524 }
1525
1526 if (pfd->events & ZSOCK_POLLOUT) {
1527 if (IS_ENABLED(CONFIG_NET_NATIVE_TCP) &&
1528 net_context_get_type(ctx) == SOCK_STREAM &&
1529 !net_if_is_ip_offloaded(net_context_get_iface(ctx))) {
1530 if (*pev == pev_end) {
1531 return -ENOMEM;
1532 }
1533
1534 if (net_context_get_state(ctx) == NET_CONTEXT_CONNECTING) {
1535 (*pev)->obj = net_tcp_conn_sem_get(ctx);
1536 } else {
1537 (*pev)->obj = net_tcp_tx_sem_get(ctx);
1538 }
1539
1540 (*pev)->type = K_POLL_TYPE_SEM_AVAILABLE;
1541 (*pev)->mode = K_POLL_MODE_NOTIFY_ONLY;
1542 (*pev)->state = K_POLL_STATE_NOT_READY;
1543 (*pev)++;
1544 } else {
1545 return -EALREADY;
1546 }
1547
1548 }
1549
1550 /* If socket is already in EOF or error, it can be reported
1551 * immediately, so we tell poll() to short-circuit wait.
1552 */
1553 if (sock_is_eof(ctx) || sock_is_error(ctx)) {
1554 return -EALREADY;
1555 }
1556
1557 return 0;
1558 }
1559
zsock_poll_update_ctx(struct net_context * ctx,struct zsock_pollfd * pfd,struct k_poll_event ** pev)1560 static int zsock_poll_update_ctx(struct net_context *ctx,
1561 struct zsock_pollfd *pfd,
1562 struct k_poll_event **pev)
1563 {
1564 ARG_UNUSED(ctx);
1565
1566 if (pfd->events & ZSOCK_POLLIN) {
1567 if (((*pev)->state != K_POLL_STATE_NOT_READY &&
1568 (*pev)->state != K_POLL_STATE_CANCELLED) ||
1569 sock_is_eof(ctx)) {
1570 pfd->revents |= ZSOCK_POLLIN;
1571 }
1572 (*pev)++;
1573 }
1574 if (pfd->events & ZSOCK_POLLOUT) {
1575 if (IS_ENABLED(CONFIG_NET_NATIVE_TCP) &&
1576 net_context_get_type(ctx) == SOCK_STREAM &&
1577 !net_if_is_ip_offloaded(net_context_get_iface(ctx))) {
1578 if ((*pev)->state != K_POLL_STATE_NOT_READY &&
1579 !sock_is_eof(ctx) &&
1580 (net_context_get_state(ctx) == NET_CONTEXT_CONNECTED)) {
1581 pfd->revents |= ZSOCK_POLLOUT;
1582 }
1583 (*pev)++;
1584 } else {
1585 pfd->revents |= ZSOCK_POLLOUT;
1586 }
1587 }
1588
1589 if (sock_is_error(ctx)) {
1590 pfd->revents |= ZSOCK_POLLERR;
1591 }
1592
1593 if (sock_is_eof(ctx)) {
1594 pfd->revents |= ZSOCK_POLLHUP;
1595 }
1596
1597 return 0;
1598 }
1599
get_tcp_option(int optname)1600 static enum tcp_conn_option get_tcp_option(int optname)
1601 {
1602 switch (optname) {
1603 case TCP_KEEPIDLE:
1604 return TCP_OPT_KEEPIDLE;
1605 case TCP_KEEPINTVL:
1606 return TCP_OPT_KEEPINTVL;
1607 case TCP_KEEPCNT:
1608 return TCP_OPT_KEEPCNT;
1609 }
1610
1611 return -EINVAL;
1612 }
1613
ipv4_multicast_if(struct net_context * ctx,const void * optval,socklen_t optlen,bool do_get)1614 static int ipv4_multicast_if(struct net_context *ctx, const void *optval,
1615 socklen_t optlen, bool do_get)
1616 {
1617 struct net_if *iface = NULL;
1618 int ifindex, ret;
1619
1620 if (do_get) {
1621 struct net_if_addr *ifaddr;
1622 size_t len = sizeof(ifindex);
1623
1624 if (optval == NULL || (optlen != sizeof(struct in_addr))) {
1625 errno = EINVAL;
1626 return -1;
1627 }
1628
1629 ret = net_context_get_option(ctx, NET_OPT_MCAST_IFINDEX,
1630 &ifindex, &len);
1631 if (ret < 0) {
1632 errno = -ret;
1633 return -1;
1634 }
1635
1636 if (ifindex == 0) {
1637 /* No interface set */
1638 ((struct in_addr *)optval)->s_addr = INADDR_ANY;
1639 return 0;
1640 }
1641
1642 ifaddr = net_if_ipv4_addr_get_first_by_index(ifindex);
1643 if (ifaddr == NULL) {
1644 errno = ENOENT;
1645 return -1;
1646 }
1647
1648 net_ipaddr_copy((struct in_addr *)optval, &ifaddr->address.in_addr);
1649
1650 return 0;
1651 }
1652
1653 /* setsockopt() can accept either struct ip_mreqn or
1654 * struct ip_mreq. We need to handle both cases.
1655 */
1656 if (optval == NULL || (optlen != sizeof(struct ip_mreqn) &&
1657 optlen != sizeof(struct ip_mreq))) {
1658 errno = EINVAL;
1659 return -1;
1660 }
1661
1662 if (optlen == sizeof(struct ip_mreqn)) {
1663 struct ip_mreqn *mreqn = (struct ip_mreqn *)optval;
1664
1665 if (mreqn->imr_ifindex != 0) {
1666 iface = net_if_get_by_index(mreqn->imr_ifindex);
1667
1668 } else if (mreqn->imr_address.s_addr != INADDR_ANY) {
1669 struct net_if_addr *ifaddr;
1670
1671 ifaddr = net_if_ipv4_addr_lookup(&mreqn->imr_address, &iface);
1672 if (ifaddr == NULL) {
1673 errno = ENOENT;
1674 return -1;
1675 }
1676 }
1677 } else {
1678 struct ip_mreq *mreq = (struct ip_mreq *)optval;
1679
1680 if (mreq->imr_interface.s_addr != INADDR_ANY) {
1681 struct net_if_addr *ifaddr;
1682
1683 ifaddr = net_if_ipv4_addr_lookup(&mreq->imr_interface, &iface);
1684 if (ifaddr == NULL) {
1685 errno = ENOENT;
1686 return -1;
1687 }
1688 }
1689 }
1690
1691 if (iface == NULL) {
1692 ifindex = 0;
1693 } else {
1694 ifindex = net_if_get_by_iface(iface);
1695 }
1696
1697 ret = net_context_set_option(ctx, NET_OPT_MCAST_IFINDEX,
1698 &ifindex, sizeof(ifindex));
1699 if (ret < 0) {
1700 errno = -ret;
1701 return -1;
1702 }
1703
1704 return 0;
1705 }
1706
zsock_getsockopt_ctx(struct net_context * ctx,int level,int optname,void * optval,socklen_t * optlen)1707 int zsock_getsockopt_ctx(struct net_context *ctx, int level, int optname,
1708 void *optval, socklen_t *optlen)
1709 {
1710 int ret;
1711
1712 switch (level) {
1713 case SOL_SOCKET:
1714 switch (optname) {
1715 case SO_ERROR: {
1716 if (*optlen != sizeof(int)) {
1717 errno = EINVAL;
1718 return -1;
1719 }
1720
1721 *(int *)optval = POINTER_TO_INT(ctx->user_data);
1722
1723 return 0;
1724 }
1725
1726 case SO_TYPE: {
1727 int type = (int)net_context_get_type(ctx);
1728
1729 if (*optlen != sizeof(type)) {
1730 errno = EINVAL;
1731 return -1;
1732 }
1733
1734 *(int *)optval = type;
1735
1736 return 0;
1737 }
1738
1739 case SO_TXTIME:
1740 if (IS_ENABLED(CONFIG_NET_CONTEXT_TXTIME)) {
1741 ret = net_context_get_option(ctx,
1742 NET_OPT_TXTIME,
1743 optval, optlen);
1744 if (ret < 0) {
1745 errno = -ret;
1746 return -1;
1747 }
1748
1749 return 0;
1750 }
1751 break;
1752
1753 case SO_PROTOCOL: {
1754 int proto = (int)net_context_get_proto(ctx);
1755
1756 if (*optlen != sizeof(proto)) {
1757 errno = EINVAL;
1758 return -1;
1759 }
1760
1761 *(int *)optval = proto;
1762
1763 return 0;
1764 }
1765
1766 case SO_DOMAIN: {
1767 if (*optlen != sizeof(int)) {
1768 errno = EINVAL;
1769 return -1;
1770 }
1771
1772 *(int *)optval = net_context_get_family(ctx);
1773
1774 return 0;
1775 }
1776
1777 break;
1778
1779 case SO_RCVBUF:
1780 if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVBUF)) {
1781 ret = net_context_get_option(ctx,
1782 NET_OPT_RCVBUF,
1783 optval, optlen);
1784 if (ret < 0) {
1785 errno = -ret;
1786 return -1;
1787 }
1788
1789 return 0;
1790 }
1791 break;
1792
1793 case SO_SNDBUF:
1794 if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDBUF)) {
1795 ret = net_context_get_option(ctx,
1796 NET_OPT_SNDBUF,
1797 optval, optlen);
1798 if (ret < 0) {
1799 errno = -ret;
1800 return -1;
1801 }
1802
1803 return 0;
1804 }
1805 break;
1806
1807 case SO_REUSEADDR:
1808 if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEADDR)) {
1809 ret = net_context_get_option(ctx,
1810 NET_OPT_REUSEADDR,
1811 optval, optlen);
1812 if (ret < 0) {
1813 errno = -ret;
1814 return -1;
1815 }
1816
1817 return 0;
1818 }
1819 break;
1820
1821 case SO_REUSEPORT:
1822 if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEPORT)) {
1823 ret = net_context_get_option(ctx,
1824 NET_OPT_REUSEPORT,
1825 optval, optlen);
1826 if (ret < 0) {
1827 errno = -ret;
1828 return -1;
1829 }
1830
1831 return 0;
1832 }
1833 break;
1834
1835 case SO_KEEPALIVE:
1836 if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE) &&
1837 net_context_get_proto(ctx) == IPPROTO_TCP) {
1838 ret = net_tcp_get_option(ctx,
1839 TCP_OPT_KEEPALIVE,
1840 optval, optlen);
1841 if (ret < 0) {
1842 errno = -ret;
1843 return -1;
1844 }
1845
1846 return 0;
1847 }
1848
1849 break;
1850
1851 case SO_TIMESTAMPING:
1852 if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMPING)) {
1853 ret = net_context_get_option(ctx,
1854 NET_OPT_TIMESTAMPING,
1855 optval, optlen);
1856
1857 if (ret < 0) {
1858 errno = -ret;
1859 return -1;
1860 }
1861
1862 return 0;
1863 }
1864
1865 break;
1866 }
1867
1868 break;
1869
1870 case IPPROTO_TCP:
1871 switch (optname) {
1872 case TCP_NODELAY:
1873 ret = net_tcp_get_option(ctx, TCP_OPT_NODELAY, optval, optlen);
1874 return ret;
1875
1876 case TCP_KEEPIDLE:
1877 __fallthrough;
1878 case TCP_KEEPINTVL:
1879 __fallthrough;
1880 case TCP_KEEPCNT:
1881 if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE)) {
1882 ret = net_tcp_get_option(ctx,
1883 get_tcp_option(optname),
1884 optval, optlen);
1885 if (ret < 0) {
1886 errno = -ret;
1887 return -1;
1888 }
1889
1890 return 0;
1891 }
1892
1893 break;
1894 }
1895
1896 break;
1897
1898 case IPPROTO_IP:
1899 switch (optname) {
1900 case IP_TOS:
1901 if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) {
1902 ret = net_context_get_option(ctx,
1903 NET_OPT_DSCP_ECN,
1904 optval,
1905 optlen);
1906 if (ret < 0) {
1907 errno = -ret;
1908 return -1;
1909 }
1910
1911 return 0;
1912 }
1913
1914 break;
1915
1916 case IP_TTL:
1917 ret = net_context_get_option(ctx, NET_OPT_TTL,
1918 optval, optlen);
1919 if (ret < 0) {
1920 errno = -ret;
1921 return -1;
1922 }
1923
1924 return 0;
1925
1926 case IP_MULTICAST_IF:
1927 if (IS_ENABLED(CONFIG_NET_IPV4)) {
1928 if (net_context_get_family(ctx) != AF_INET) {
1929 errno = EAFNOSUPPORT;
1930 return -1;
1931 }
1932
1933 return ipv4_multicast_if(ctx, optval, *optlen, true);
1934 }
1935
1936 break;
1937
1938 case IP_MULTICAST_TTL:
1939 ret = net_context_get_option(ctx, NET_OPT_MCAST_TTL,
1940 optval, optlen);
1941 if (ret < 0) {
1942 errno = -ret;
1943 return -1;
1944 }
1945
1946 return 0;
1947
1948 case IP_MTU:
1949 if (IS_ENABLED(CONFIG_NET_IPV4)) {
1950 ret = net_context_get_option(ctx, NET_OPT_MTU,
1951 optval, optlen);
1952 if (ret < 0) {
1953 errno = -ret;
1954 return -1;
1955 }
1956
1957 return 0;
1958 }
1959
1960 break;
1961
1962 case IP_LOCAL_PORT_RANGE:
1963 if (IS_ENABLED(CONFIG_NET_CONTEXT_CLAMP_PORT_RANGE)) {
1964 ret = net_context_get_option(ctx,
1965 NET_OPT_LOCAL_PORT_RANGE,
1966 optval, optlen);
1967 if (ret < 0) {
1968 errno = -ret;
1969 return -1;
1970 }
1971
1972 return 0;
1973 }
1974
1975 break;
1976 }
1977
1978 break;
1979
1980 case IPPROTO_IPV6:
1981 switch (optname) {
1982 case IPV6_MTU:
1983 if (IS_ENABLED(CONFIG_NET_IPV6)) {
1984 ret = net_context_get_option(ctx, NET_OPT_MTU,
1985 optval, optlen);
1986 if (ret < 0) {
1987 errno = -ret;
1988 return -1;
1989 }
1990
1991 return 0;
1992 }
1993
1994 break;
1995
1996 case IPV6_V6ONLY:
1997 if (IS_ENABLED(CONFIG_NET_IPV4_MAPPING_TO_IPV6)) {
1998 ret = net_context_get_option(ctx,
1999 NET_OPT_IPV6_V6ONLY,
2000 optval,
2001 optlen);
2002 if (ret < 0) {
2003 errno = -ret;
2004 return -1;
2005 }
2006
2007 return 0;
2008 }
2009
2010 break;
2011
2012 case IPV6_ADDR_PREFERENCES:
2013 if (IS_ENABLED(CONFIG_NET_IPV6)) {
2014 ret = net_context_get_option(ctx,
2015 NET_OPT_ADDR_PREFERENCES,
2016 optval,
2017 optlen);
2018 if (ret < 0) {
2019 errno = -ret;
2020 return -1;
2021 }
2022
2023 return 0;
2024 }
2025
2026 break;
2027
2028 case IPV6_TCLASS:
2029 if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) {
2030 ret = net_context_get_option(ctx,
2031 NET_OPT_DSCP_ECN,
2032 optval,
2033 optlen);
2034 if (ret < 0) {
2035 errno = -ret;
2036 return -1;
2037 }
2038
2039 return 0;
2040 }
2041
2042 break;
2043
2044 case IPV6_UNICAST_HOPS:
2045 ret = net_context_get_option(ctx,
2046 NET_OPT_UNICAST_HOP_LIMIT,
2047 optval, optlen);
2048 if (ret < 0) {
2049 errno = -ret;
2050 return -1;
2051 }
2052
2053 return 0;
2054
2055 case IPV6_MULTICAST_IF:
2056 if (IS_ENABLED(CONFIG_NET_IPV6)) {
2057 if (net_context_get_family(ctx) != AF_INET6) {
2058 errno = EAFNOSUPPORT;
2059 return -1;
2060 }
2061
2062 ret = net_context_get_option(ctx,
2063 NET_OPT_MCAST_IFINDEX,
2064 optval, optlen);
2065 if (ret < 0) {
2066 errno = -ret;
2067 return -1;
2068 }
2069
2070 return 0;
2071 }
2072
2073 case IPV6_MULTICAST_HOPS:
2074 ret = net_context_get_option(ctx,
2075 NET_OPT_MCAST_HOP_LIMIT,
2076 optval, optlen);
2077 if (ret < 0) {
2078 errno = -ret;
2079 return -1;
2080 }
2081
2082 return 0;
2083 }
2084
2085 break;
2086 }
2087
2088 errno = ENOPROTOOPT;
2089 return -1;
2090 }
2091
ipv4_multicast_group(struct net_context * ctx,const void * optval,socklen_t optlen,bool do_join)2092 static int ipv4_multicast_group(struct net_context *ctx, const void *optval,
2093 socklen_t optlen, bool do_join)
2094 {
2095 struct ip_mreqn *mreqn;
2096 struct net_if *iface;
2097 int ifindex, ret;
2098
2099 if (optval == NULL || optlen != sizeof(struct ip_mreqn)) {
2100 errno = EINVAL;
2101 return -1;
2102 }
2103
2104 mreqn = (struct ip_mreqn *)optval;
2105
2106 if (mreqn->imr_multiaddr.s_addr == INADDR_ANY) {
2107 errno = EINVAL;
2108 return -1;
2109 }
2110
2111 if (mreqn->imr_ifindex != 0) {
2112 iface = net_if_get_by_index(mreqn->imr_ifindex);
2113 } else {
2114 ifindex = net_if_ipv4_addr_lookup_by_index(&mreqn->imr_address);
2115 iface = net_if_get_by_index(ifindex);
2116 }
2117
2118 if (iface == NULL) {
2119 /* Check if ctx has already an interface and if not,
2120 * then select the default interface.
2121 */
2122 if (ctx->iface <= 0) {
2123 iface = net_if_get_default();
2124 } else {
2125 iface = net_if_get_by_index(ctx->iface);
2126 }
2127
2128 if (iface == NULL) {
2129 errno = EINVAL;
2130 return -1;
2131 }
2132 }
2133
2134 if (do_join) {
2135 ret = net_ipv4_igmp_join(iface, &mreqn->imr_multiaddr, NULL);
2136 } else {
2137 ret = net_ipv4_igmp_leave(iface, &mreqn->imr_multiaddr);
2138 }
2139
2140 if (ret < 0) {
2141 errno = -ret;
2142 return -1;
2143 }
2144
2145 return 0;
2146 }
2147
ipv6_multicast_group(struct net_context * ctx,const void * optval,socklen_t optlen,bool do_join)2148 static int ipv6_multicast_group(struct net_context *ctx, const void *optval,
2149 socklen_t optlen, bool do_join)
2150 {
2151 struct ipv6_mreq *mreq;
2152 struct net_if *iface;
2153 int ret;
2154
2155 if (optval == NULL || optlen != sizeof(struct ipv6_mreq)) {
2156 errno = EINVAL;
2157 return -1;
2158 }
2159
2160 mreq = (struct ipv6_mreq *)optval;
2161
2162 if (memcmp(&mreq->ipv6mr_multiaddr,
2163 net_ipv6_unspecified_address(),
2164 sizeof(mreq->ipv6mr_multiaddr)) == 0) {
2165 errno = EINVAL;
2166 return -1;
2167 }
2168
2169 iface = net_if_get_by_index(mreq->ipv6mr_ifindex);
2170 if (iface == NULL) {
2171 /* Check if ctx has already an interface and if not,
2172 * then select the default interface.
2173 */
2174 if (ctx->iface <= 0) {
2175 iface = net_if_get_default();
2176 } else {
2177 iface = net_if_get_by_index(ctx->iface);
2178 }
2179
2180 if (iface == NULL) {
2181 errno = ENOENT;
2182 return -1;
2183 }
2184 }
2185
2186 if (do_join) {
2187 ret = net_ipv6_mld_join(iface, &mreq->ipv6mr_multiaddr);
2188 } else {
2189 ret = net_ipv6_mld_leave(iface, &mreq->ipv6mr_multiaddr);
2190 }
2191
2192 if (ret < 0) {
2193 errno = -ret;
2194 return -1;
2195 }
2196
2197 return 0;
2198 }
2199
zsock_setsockopt_ctx(struct net_context * ctx,int level,int optname,const void * optval,socklen_t optlen)2200 int zsock_setsockopt_ctx(struct net_context *ctx, int level, int optname,
2201 const void *optval, socklen_t optlen)
2202 {
2203 int ret;
2204
2205 switch (level) {
2206 case SOL_SOCKET:
2207 switch (optname) {
2208 case SO_RCVBUF:
2209 if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVBUF)) {
2210 ret = net_context_set_option(ctx,
2211 NET_OPT_RCVBUF,
2212 optval, optlen);
2213 if (ret < 0) {
2214 errno = -ret;
2215 return -1;
2216 }
2217
2218 return 0;
2219 }
2220
2221 break;
2222
2223 case SO_SNDBUF:
2224 if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDBUF)) {
2225 ret = net_context_set_option(ctx,
2226 NET_OPT_SNDBUF,
2227 optval, optlen);
2228 if (ret < 0) {
2229 errno = -ret;
2230 return -1;
2231 }
2232
2233 return 0;
2234 }
2235
2236 break;
2237
2238 case SO_REUSEADDR:
2239 if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEADDR)) {
2240 ret = net_context_set_option(ctx,
2241 NET_OPT_REUSEADDR,
2242 optval, optlen);
2243 if (ret < 0) {
2244 errno = -ret;
2245 return -1;
2246 }
2247
2248 return 0;
2249 }
2250
2251 break;
2252
2253 case SO_REUSEPORT:
2254 if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEPORT)) {
2255 ret = net_context_set_option(ctx,
2256 NET_OPT_REUSEPORT,
2257 optval, optlen);
2258 if (ret < 0) {
2259 errno = -ret;
2260 return -1;
2261 }
2262
2263 return 0;
2264 }
2265
2266 break;
2267
2268 case SO_PRIORITY:
2269 if (IS_ENABLED(CONFIG_NET_CONTEXT_PRIORITY)) {
2270 ret = net_context_set_option(ctx,
2271 NET_OPT_PRIORITY,
2272 optval, optlen);
2273 if (ret < 0) {
2274 errno = -ret;
2275 return -1;
2276 }
2277
2278 return 0;
2279 }
2280
2281 break;
2282
2283 case SO_RCVTIMEO:
2284 if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVTIMEO)) {
2285 const struct zsock_timeval *tv = optval;
2286 k_timeout_t timeout;
2287
2288 if (optlen != sizeof(struct zsock_timeval)) {
2289 errno = EINVAL;
2290 return -1;
2291 }
2292
2293 if (tv->tv_sec == 0 && tv->tv_usec == 0) {
2294 timeout = K_FOREVER;
2295 } else {
2296 timeout = K_USEC(tv->tv_sec * 1000000ULL
2297 + tv->tv_usec);
2298 }
2299
2300 ret = net_context_set_option(ctx,
2301 NET_OPT_RCVTIMEO,
2302 &timeout,
2303 sizeof(timeout));
2304
2305 if (ret < 0) {
2306 errno = -ret;
2307 return -1;
2308 }
2309
2310 return 0;
2311 }
2312
2313 break;
2314
2315 case SO_SNDTIMEO:
2316 if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDTIMEO)) {
2317 const struct zsock_timeval *tv = optval;
2318 k_timeout_t timeout;
2319
2320 if (optlen != sizeof(struct zsock_timeval)) {
2321 errno = EINVAL;
2322 return -1;
2323 }
2324
2325 if (tv->tv_sec == 0 && tv->tv_usec == 0) {
2326 timeout = K_FOREVER;
2327 } else {
2328 timeout = K_USEC(tv->tv_sec * 1000000ULL
2329 + tv->tv_usec);
2330 }
2331
2332 ret = net_context_set_option(ctx,
2333 NET_OPT_SNDTIMEO,
2334 &timeout,
2335 sizeof(timeout));
2336 if (ret < 0) {
2337 errno = -ret;
2338 return -1;
2339 }
2340
2341 return 0;
2342 }
2343
2344 break;
2345
2346 case SO_TXTIME:
2347 if (IS_ENABLED(CONFIG_NET_CONTEXT_TXTIME)) {
2348 ret = net_context_set_option(ctx,
2349 NET_OPT_TXTIME,
2350 optval, optlen);
2351 if (ret < 0) {
2352 errno = -ret;
2353 return -1;
2354 }
2355
2356 return 0;
2357 }
2358
2359 break;
2360
2361 case SO_SOCKS5:
2362 if (IS_ENABLED(CONFIG_SOCKS)) {
2363 ret = net_context_set_option(ctx,
2364 NET_OPT_SOCKS5,
2365 optval, optlen);
2366 if (ret < 0) {
2367 errno = -ret;
2368 return -1;
2369 }
2370
2371 net_context_set_proxy_enabled(ctx, true);
2372
2373 return 0;
2374 }
2375
2376 break;
2377
2378 case SO_BINDTODEVICE: {
2379 struct net_if *iface;
2380 const struct ifreq *ifreq = optval;
2381
2382 if (net_context_get_family(ctx) != AF_INET &&
2383 net_context_get_family(ctx) != AF_INET6) {
2384 errno = EAFNOSUPPORT;
2385 return -1;
2386 }
2387
2388 /* optlen equal to 0 or empty interface name should
2389 * remove the binding.
2390 */
2391 if ((optlen == 0) || (ifreq != NULL &&
2392 strlen(ifreq->ifr_name) == 0)) {
2393 ctx->flags &= ~NET_CONTEXT_BOUND_TO_IFACE;
2394 return 0;
2395 }
2396
2397 if ((ifreq == NULL) || (optlen != sizeof(*ifreq))) {
2398 errno = EINVAL;
2399 return -1;
2400 }
2401
2402 if (IS_ENABLED(CONFIG_NET_INTERFACE_NAME)) {
2403 ret = net_if_get_by_name(ifreq->ifr_name);
2404 if (ret < 0) {
2405 errno = -ret;
2406 return -1;
2407 }
2408
2409 iface = net_if_get_by_index(ret);
2410 if (iface == NULL) {
2411 errno = ENODEV;
2412 return -1;
2413 }
2414 } else {
2415 const struct device *dev;
2416
2417 dev = device_get_binding(ifreq->ifr_name);
2418 if (dev == NULL) {
2419 errno = ENODEV;
2420 return -1;
2421 }
2422
2423 iface = net_if_lookup_by_dev(dev);
2424 if (iface == NULL) {
2425 errno = ENODEV;
2426 return -1;
2427 }
2428 }
2429
2430 net_context_bind_iface(ctx, iface);
2431
2432 return 0;
2433 }
2434
2435 case SO_LINGER:
2436 /* ignored. for compatibility purposes only */
2437 return 0;
2438
2439 case SO_KEEPALIVE:
2440 if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE) &&
2441 net_context_get_proto(ctx) == IPPROTO_TCP) {
2442 ret = net_tcp_set_option(ctx,
2443 TCP_OPT_KEEPALIVE,
2444 optval, optlen);
2445 if (ret < 0) {
2446 errno = -ret;
2447 return -1;
2448 }
2449
2450 return 0;
2451 }
2452
2453 break;
2454
2455 case SO_TIMESTAMPING:
2456 if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMPING)) {
2457 ret = net_context_set_option(ctx,
2458 NET_OPT_TIMESTAMPING,
2459 optval, optlen);
2460
2461 if (ret < 0) {
2462 errno = -ret;
2463 return -1;
2464 }
2465
2466 return 0;
2467 }
2468
2469 break;
2470 }
2471
2472 break;
2473
2474 case IPPROTO_TCP:
2475 switch (optname) {
2476 case TCP_NODELAY:
2477 ret = net_tcp_set_option(ctx,
2478 TCP_OPT_NODELAY, optval, optlen);
2479 return ret;
2480
2481 case TCP_KEEPIDLE:
2482 __fallthrough;
2483 case TCP_KEEPINTVL:
2484 __fallthrough;
2485 case TCP_KEEPCNT:
2486 if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE)) {
2487 ret = net_tcp_set_option(ctx,
2488 get_tcp_option(optname),
2489 optval, optlen);
2490 if (ret < 0) {
2491 errno = -ret;
2492 return -1;
2493 }
2494
2495 return 0;
2496 }
2497
2498 break;
2499 }
2500 break;
2501
2502 case IPPROTO_IP:
2503 switch (optname) {
2504 case IP_TOS:
2505 if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) {
2506 ret = net_context_set_option(ctx,
2507 NET_OPT_DSCP_ECN,
2508 optval,
2509 optlen);
2510 if (ret < 0) {
2511 errno = -ret;
2512 return -1;
2513 }
2514
2515 return 0;
2516 }
2517
2518 break;
2519
2520 case IP_PKTINFO:
2521 if (IS_ENABLED(CONFIG_NET_IPV4) &&
2522 IS_ENABLED(CONFIG_NET_CONTEXT_RECV_PKTINFO)) {
2523 ret = net_context_set_option(ctx,
2524 NET_OPT_RECV_PKTINFO,
2525 optval,
2526 optlen);
2527 if (ret < 0) {
2528 errno = -ret;
2529 return -1;
2530 }
2531
2532 return 0;
2533 }
2534
2535 break;
2536
2537 case IP_MULTICAST_IF:
2538 if (IS_ENABLED(CONFIG_NET_IPV4)) {
2539 return ipv4_multicast_if(ctx, optval, optlen, false);
2540 }
2541
2542 break;
2543
2544 case IP_MULTICAST_TTL:
2545 ret = net_context_set_option(ctx, NET_OPT_MCAST_TTL,
2546 optval, optlen);
2547 if (ret < 0) {
2548 errno = -ret;
2549 return -1;
2550 }
2551
2552 return 0;
2553
2554 case IP_TTL:
2555 ret = net_context_set_option(ctx, NET_OPT_TTL,
2556 optval, optlen);
2557 if (ret < 0) {
2558 errno = -ret;
2559 return -1;
2560 }
2561
2562 return 0;
2563
2564 case IP_ADD_MEMBERSHIP:
2565 if (IS_ENABLED(CONFIG_NET_IPV4)) {
2566 return ipv4_multicast_group(ctx, optval,
2567 optlen, true);
2568 }
2569
2570 break;
2571
2572 case IP_DROP_MEMBERSHIP:
2573 if (IS_ENABLED(CONFIG_NET_IPV4)) {
2574 return ipv4_multicast_group(ctx, optval,
2575 optlen, false);
2576 }
2577
2578 break;
2579
2580 case IP_LOCAL_PORT_RANGE:
2581 if (IS_ENABLED(CONFIG_NET_CONTEXT_CLAMP_PORT_RANGE)) {
2582 ret = net_context_set_option(ctx,
2583 NET_OPT_LOCAL_PORT_RANGE,
2584 optval, optlen);
2585 if (ret < 0) {
2586 errno = -ret;
2587 return -1;
2588 }
2589
2590 return 0;
2591 }
2592
2593 break;
2594 }
2595
2596 break;
2597
2598 case IPPROTO_IPV6:
2599 switch (optname) {
2600 case IPV6_MTU:
2601 if (IS_ENABLED(CONFIG_NET_IPV6)) {
2602 ret = net_context_set_option(ctx, NET_OPT_MTU,
2603 optval, optlen);
2604 if (ret < 0) {
2605 errno = -ret;
2606 return -1;
2607 }
2608
2609 return 0;
2610 }
2611
2612 break;
2613
2614 case IPV6_V6ONLY:
2615 if (IS_ENABLED(CONFIG_NET_IPV4_MAPPING_TO_IPV6)) {
2616 ret = net_context_set_option(ctx,
2617 NET_OPT_IPV6_V6ONLY,
2618 optval,
2619 optlen);
2620 if (ret < 0) {
2621 errno = -ret;
2622 return -1;
2623 }
2624 }
2625
2626 return 0;
2627
2628 case IPV6_RECVPKTINFO:
2629 if (IS_ENABLED(CONFIG_NET_IPV6) &&
2630 IS_ENABLED(CONFIG_NET_CONTEXT_RECV_PKTINFO)) {
2631 ret = net_context_set_option(ctx,
2632 NET_OPT_RECV_PKTINFO,
2633 optval,
2634 optlen);
2635 if (ret < 0) {
2636 errno = -ret;
2637 return -1;
2638 }
2639
2640 return 0;
2641 }
2642
2643 break;
2644
2645 case IPV6_ADDR_PREFERENCES:
2646 if (IS_ENABLED(CONFIG_NET_IPV6)) {
2647 ret = net_context_set_option(ctx,
2648 NET_OPT_ADDR_PREFERENCES,
2649 optval,
2650 optlen);
2651 if (ret < 0) {
2652 errno = -ret;
2653 return -1;
2654 }
2655
2656 return 0;
2657 }
2658
2659 break;
2660
2661 case IPV6_TCLASS:
2662 if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) {
2663 ret = net_context_set_option(ctx,
2664 NET_OPT_DSCP_ECN,
2665 optval,
2666 optlen);
2667 if (ret < 0) {
2668 errno = -ret;
2669 return -1;
2670 }
2671
2672 return 0;
2673 }
2674
2675 break;
2676
2677 case IPV6_UNICAST_HOPS:
2678 ret = net_context_set_option(ctx,
2679 NET_OPT_UNICAST_HOP_LIMIT,
2680 optval, optlen);
2681 if (ret < 0) {
2682 errno = -ret;
2683 return -1;
2684 }
2685
2686 return 0;
2687
2688 case IPV6_MULTICAST_IF:
2689 ret = net_context_set_option(ctx,
2690 NET_OPT_MCAST_IFINDEX,
2691 optval, optlen);
2692 if (ret < 0) {
2693 errno = -ret;
2694 return -1;
2695 }
2696
2697 return 0;
2698
2699 case IPV6_MULTICAST_HOPS:
2700 ret = net_context_set_option(ctx,
2701 NET_OPT_MCAST_HOP_LIMIT,
2702 optval, optlen);
2703 if (ret < 0) {
2704 errno = -ret;
2705 return -1;
2706 }
2707
2708 return 0;
2709
2710 case IPV6_ADD_MEMBERSHIP:
2711 if (IS_ENABLED(CONFIG_NET_IPV6)) {
2712 return ipv6_multicast_group(ctx, optval,
2713 optlen, true);
2714 }
2715
2716 break;
2717
2718 case IPV6_DROP_MEMBERSHIP:
2719 if (IS_ENABLED(CONFIG_NET_IPV6)) {
2720 return ipv6_multicast_group(ctx, optval,
2721 optlen, false);
2722 }
2723
2724 break;
2725 }
2726
2727 break;
2728 }
2729
2730 errno = ENOPROTOOPT;
2731 return -1;
2732 }
2733
zsock_getpeername_ctx(struct net_context * ctx,struct sockaddr * addr,socklen_t * addrlen)2734 int zsock_getpeername_ctx(struct net_context *ctx, struct sockaddr *addr,
2735 socklen_t *addrlen)
2736 {
2737 socklen_t newlen = 0;
2738
2739 if (addr == NULL || addrlen == NULL) {
2740 errno = EINVAL;
2741 return -1;
2742 }
2743
2744 if (!(ctx->flags & NET_CONTEXT_REMOTE_ADDR_SET)) {
2745 errno = ENOTCONN;
2746 return -1;
2747 }
2748
2749 if (net_context_get_type(ctx) == SOCK_STREAM &&
2750 net_context_get_state(ctx) != NET_CONTEXT_CONNECTED) {
2751 errno = ENOTCONN;
2752 return -1;
2753 }
2754
2755 if (IS_ENABLED(CONFIG_NET_IPV4) && ctx->remote.sa_family == AF_INET) {
2756 struct sockaddr_in addr4 = { 0 };
2757
2758 addr4.sin_family = AF_INET;
2759 addr4.sin_port = net_sin(&ctx->remote)->sin_port;
2760 memcpy(&addr4.sin_addr, &net_sin(&ctx->remote)->sin_addr,
2761 sizeof(struct in_addr));
2762 newlen = sizeof(struct sockaddr_in);
2763
2764 memcpy(addr, &addr4, MIN(*addrlen, newlen));
2765 } else if (IS_ENABLED(CONFIG_NET_IPV6) &&
2766 ctx->remote.sa_family == AF_INET6) {
2767 struct sockaddr_in6 addr6 = { 0 };
2768
2769 addr6.sin6_family = AF_INET6;
2770 addr6.sin6_port = net_sin6(&ctx->remote)->sin6_port;
2771 memcpy(&addr6.sin6_addr, &net_sin6(&ctx->remote)->sin6_addr,
2772 sizeof(struct in6_addr));
2773 newlen = sizeof(struct sockaddr_in6);
2774
2775 memcpy(addr, &addr6, MIN(*addrlen, newlen));
2776 } else {
2777 errno = EINVAL;
2778 return -1;
2779 }
2780
2781 *addrlen = newlen;
2782
2783 return 0;
2784 }
2785
zsock_getsockname_ctx(struct net_context * ctx,struct sockaddr * addr,socklen_t * addrlen)2786 int zsock_getsockname_ctx(struct net_context *ctx, struct sockaddr *addr,
2787 socklen_t *addrlen)
2788 {
2789 socklen_t newlen = 0;
2790 int ret;
2791
2792 if (IS_ENABLED(CONFIG_NET_IPV4) && ctx->local.family == AF_INET) {
2793 struct sockaddr_in addr4 = { 0 };
2794
2795 if (net_sin_ptr(&ctx->local)->sin_addr == NULL) {
2796 errno = EINVAL;
2797 return -1;
2798 }
2799
2800 newlen = sizeof(struct sockaddr_in);
2801
2802 ret = net_context_get_local_addr(ctx,
2803 (struct sockaddr *)&addr4,
2804 &newlen);
2805 if (ret < 0) {
2806 errno = -ret;
2807 return -1;
2808 }
2809
2810 memcpy(addr, &addr4, MIN(*addrlen, newlen));
2811
2812 } else if (IS_ENABLED(CONFIG_NET_IPV6) && ctx->local.family == AF_INET6) {
2813 struct sockaddr_in6 addr6 = { 0 };
2814
2815 if (net_sin6_ptr(&ctx->local)->sin6_addr == NULL) {
2816 errno = EINVAL;
2817 return -1;
2818 }
2819
2820 newlen = sizeof(struct sockaddr_in6);
2821
2822 ret = net_context_get_local_addr(ctx,
2823 (struct sockaddr *)&addr6,
2824 &newlen);
2825 if (ret < 0) {
2826 errno = -ret;
2827 return -1;
2828 }
2829
2830 memcpy(addr, &addr6, MIN(*addrlen, newlen));
2831 } else {
2832 errno = EINVAL;
2833 return -1;
2834 }
2835
2836 *addrlen = newlen;
2837
2838 return 0;
2839 }
2840
sock_read_vmeth(void * obj,void * buffer,size_t count)2841 static ssize_t sock_read_vmeth(void *obj, void *buffer, size_t count)
2842 {
2843 return zsock_recvfrom_ctx(obj, buffer, count, 0, NULL, 0);
2844 }
2845
sock_write_vmeth(void * obj,const void * buffer,size_t count)2846 static ssize_t sock_write_vmeth(void *obj, const void *buffer, size_t count)
2847 {
2848 return zsock_sendto_ctx(obj, buffer, count, 0, NULL, 0);
2849 }
2850
zsock_ctx_set_lock(struct net_context * ctx,struct k_mutex * lock)2851 static void zsock_ctx_set_lock(struct net_context *ctx, struct k_mutex *lock)
2852 {
2853 ctx->cond.lock = lock;
2854 }
2855
sock_ioctl_vmeth(void * obj,unsigned int request,va_list args)2856 static int sock_ioctl_vmeth(void *obj, unsigned int request, va_list args)
2857 {
2858 switch (request) {
2859
2860 /* In Zephyr, fcntl() is just an alias of ioctl(). */
2861 case F_GETFL:
2862 if (sock_is_nonblock(obj)) {
2863 return O_NONBLOCK;
2864 }
2865
2866 return 0;
2867
2868 case F_SETFL: {
2869 int flags;
2870
2871 flags = va_arg(args, int);
2872
2873 if (flags & O_NONBLOCK) {
2874 sock_set_flag(obj, SOCK_NONBLOCK, SOCK_NONBLOCK);
2875 } else {
2876 sock_set_flag(obj, SOCK_NONBLOCK, 0);
2877 }
2878
2879 return 0;
2880 }
2881
2882 case ZFD_IOCTL_POLL_PREPARE: {
2883 struct zsock_pollfd *pfd;
2884 struct k_poll_event **pev;
2885 struct k_poll_event *pev_end;
2886
2887 pfd = va_arg(args, struct zsock_pollfd *);
2888 pev = va_arg(args, struct k_poll_event **);
2889 pev_end = va_arg(args, struct k_poll_event *);
2890
2891 return zsock_poll_prepare_ctx(obj, pfd, pev, pev_end);
2892 }
2893
2894 case ZFD_IOCTL_POLL_UPDATE: {
2895 struct zsock_pollfd *pfd;
2896 struct k_poll_event **pev;
2897
2898 pfd = va_arg(args, struct zsock_pollfd *);
2899 pev = va_arg(args, struct k_poll_event **);
2900
2901 return zsock_poll_update_ctx(obj, pfd, pev);
2902 }
2903
2904 case ZFD_IOCTL_SET_LOCK: {
2905 struct k_mutex *lock;
2906
2907 lock = va_arg(args, struct k_mutex *);
2908
2909 zsock_ctx_set_lock(obj, lock);
2910 return 0;
2911 }
2912
2913 case ZFD_IOCTL_FIONBIO:
2914 sock_set_flag(obj, SOCK_NONBLOCK, SOCK_NONBLOCK);
2915 return 0;
2916
2917 case ZFD_IOCTL_FIONREAD: {
2918 int *avail = va_arg(args, int *);
2919
2920 *avail = zsock_fionread_ctx(obj);
2921 return 0;
2922 }
2923
2924 default:
2925 errno = EOPNOTSUPP;
2926 return -1;
2927 }
2928 }
2929
sock_shutdown_vmeth(void * obj,int how)2930 static int sock_shutdown_vmeth(void *obj, int how)
2931 {
2932 return zsock_shutdown_ctx(obj, how);
2933 }
2934
sock_bind_vmeth(void * obj,const struct sockaddr * addr,socklen_t addrlen)2935 static int sock_bind_vmeth(void *obj, const struct sockaddr *addr,
2936 socklen_t addrlen)
2937 {
2938 return zsock_bind_ctx(obj, addr, addrlen);
2939 }
2940
sock_connect_vmeth(void * obj,const struct sockaddr * addr,socklen_t addrlen)2941 static int sock_connect_vmeth(void *obj, const struct sockaddr *addr,
2942 socklen_t addrlen)
2943 {
2944 return zsock_connect_ctx(obj, addr, addrlen);
2945 }
2946
sock_listen_vmeth(void * obj,int backlog)2947 static int sock_listen_vmeth(void *obj, int backlog)
2948 {
2949 return zsock_listen_ctx(obj, backlog);
2950 }
2951
sock_accept_vmeth(void * obj,struct sockaddr * addr,socklen_t * addrlen)2952 static int sock_accept_vmeth(void *obj, struct sockaddr *addr,
2953 socklen_t *addrlen)
2954 {
2955 return zsock_accept_ctx(obj, addr, addrlen);
2956 }
2957
sock_sendto_vmeth(void * obj,const void * buf,size_t len,int flags,const struct sockaddr * dest_addr,socklen_t addrlen)2958 static ssize_t sock_sendto_vmeth(void *obj, const void *buf, size_t len,
2959 int flags, const struct sockaddr *dest_addr,
2960 socklen_t addrlen)
2961 {
2962 return zsock_sendto_ctx(obj, buf, len, flags, dest_addr, addrlen);
2963 }
2964
sock_sendmsg_vmeth(void * obj,const struct msghdr * msg,int flags)2965 static ssize_t sock_sendmsg_vmeth(void *obj, const struct msghdr *msg,
2966 int flags)
2967 {
2968 return zsock_sendmsg_ctx(obj, msg, flags);
2969 }
2970
sock_recvmsg_vmeth(void * obj,struct msghdr * msg,int flags)2971 static ssize_t sock_recvmsg_vmeth(void *obj, struct msghdr *msg, int flags)
2972 {
2973 return zsock_recvmsg_ctx(obj, msg, flags);
2974 }
2975
sock_recvfrom_vmeth(void * obj,void * buf,size_t max_len,int flags,struct sockaddr * src_addr,socklen_t * addrlen)2976 static ssize_t sock_recvfrom_vmeth(void *obj, void *buf, size_t max_len,
2977 int flags, struct sockaddr *src_addr,
2978 socklen_t *addrlen)
2979 {
2980 return zsock_recvfrom_ctx(obj, buf, max_len, flags,
2981 src_addr, addrlen);
2982 }
2983
sock_getsockopt_vmeth(void * obj,int level,int optname,void * optval,socklen_t * optlen)2984 static int sock_getsockopt_vmeth(void *obj, int level, int optname,
2985 void *optval, socklen_t *optlen)
2986 {
2987 return zsock_getsockopt_ctx(obj, level, optname, optval, optlen);
2988 }
2989
sock_setsockopt_vmeth(void * obj,int level,int optname,const void * optval,socklen_t optlen)2990 static int sock_setsockopt_vmeth(void *obj, int level, int optname,
2991 const void *optval, socklen_t optlen)
2992 {
2993 return zsock_setsockopt_ctx(obj, level, optname, optval, optlen);
2994 }
2995
sock_close2_vmeth(void * obj,int fd)2996 static int sock_close2_vmeth(void *obj, int fd)
2997 {
2998 return zsock_close_ctx(obj, fd);
2999 }
sock_getpeername_vmeth(void * obj,struct sockaddr * addr,socklen_t * addrlen)3000 static int sock_getpeername_vmeth(void *obj, struct sockaddr *addr,
3001 socklen_t *addrlen)
3002 {
3003 return zsock_getpeername_ctx(obj, addr, addrlen);
3004 }
3005
sock_getsockname_vmeth(void * obj,struct sockaddr * addr,socklen_t * addrlen)3006 static int sock_getsockname_vmeth(void *obj, struct sockaddr *addr,
3007 socklen_t *addrlen)
3008 {
3009 return zsock_getsockname_ctx(obj, addr, addrlen);
3010 }
3011
3012 const struct socket_op_vtable sock_fd_op_vtable = {
3013 .fd_vtable = {
3014 .read = sock_read_vmeth,
3015 .write = sock_write_vmeth,
3016 .close2 = sock_close2_vmeth,
3017 .ioctl = sock_ioctl_vmeth,
3018 },
3019 .shutdown = sock_shutdown_vmeth,
3020 .bind = sock_bind_vmeth,
3021 .connect = sock_connect_vmeth,
3022 .listen = sock_listen_vmeth,
3023 .accept = sock_accept_vmeth,
3024 .sendto = sock_sendto_vmeth,
3025 .sendmsg = sock_sendmsg_vmeth,
3026 .recvmsg = sock_recvmsg_vmeth,
3027 .recvfrom = sock_recvfrom_vmeth,
3028 .getsockopt = sock_getsockopt_vmeth,
3029 .setsockopt = sock_setsockopt_vmeth,
3030 .getpeername = sock_getpeername_vmeth,
3031 .getsockname = sock_getsockname_vmeth,
3032 };
3033
inet_is_supported(int family,int type,int proto)3034 static bool inet_is_supported(int family, int type, int proto)
3035 {
3036 if (family != AF_INET && family != AF_INET6) {
3037 return false;
3038 }
3039
3040 return true;
3041 }
3042
3043 NET_SOCKET_REGISTER(af_inet46, NET_SOCKET_DEFAULT_PRIO, AF_UNSPEC,
3044 inet_is_supported, zsock_socket_internal);
3045