1 /*
2 * Copyright (c) 2017 Linaro Limited
3 * Copyright (c) 2021 Nordic Semiconductor
4 * Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved.
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 /* Zephyr headers */
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_REGISTER(net_sock, CONFIG_NET_SOCKETS_LOG_LEVEL);
12
13 #include <zephyr/kernel.h>
14 #include <zephyr/net/net_context.h>
15 #include <zephyr/net/net_pkt.h>
16 #include <zephyr/tracing/tracing.h>
17 #include <zephyr/net/socket.h>
18 #include <zephyr/net/socket_types.h>
19 #include <zephyr/posix/fcntl.h>
20 #include <zephyr/internal/syscall_handler.h>
21 #include <zephyr/sys/fdtable.h>
22 #include <zephyr/sys/math_extras.h>
23 #include <zephyr/sys/iterable_sections.h>
24
25 #if defined(CONFIG_SOCKS)
26 #include "socks.h"
27 #endif
28
29 #include <zephyr/net/igmp.h>
30 #include "../../ip/ipv6.h"
31
32 #include "../../ip/net_stats.h"
33
34 #include "sockets_internal.h"
35 #include "../../ip/tcp_internal.h"
36 #include "../../ip/net_private.h"
37
38 #define SET_ERRNO(x) \
39 { int _err = x; if (_err < 0) { errno = -_err; return -1; } }
40
41 #define VTABLE_CALL(fn, sock, ...) \
42 ({ \
43 const struct socket_op_vtable *vtable; \
44 struct k_mutex *lock; \
45 void *obj; \
46 int retval; \
47 \
48 obj = get_sock_vtable(sock, &vtable, &lock); \
49 if (obj == NULL) { \
50 errno = EBADF; \
51 return -1; \
52 } \
53 \
54 if (vtable->fn == NULL) { \
55 errno = EOPNOTSUPP; \
56 return -1; \
57 } \
58 \
59 (void)k_mutex_lock(lock, K_FOREVER); \
60 \
61 retval = vtable->fn(obj, __VA_ARGS__); \
62 \
63 k_mutex_unlock(lock); \
64 \
65 retval; \
66 })
67
68 const struct socket_op_vtable sock_fd_op_vtable;
69
get_sock_vtable(int sock,const struct socket_op_vtable ** vtable,struct k_mutex ** lock)70 static inline void *get_sock_vtable(int sock,
71 const struct socket_op_vtable **vtable,
72 struct k_mutex **lock)
73 {
74 void *ctx;
75
76 ctx = zvfs_get_fd_obj_and_vtable(sock,
77 (const struct fd_op_vtable **)vtable,
78 lock);
79
80 #ifdef CONFIG_USERSPACE
81 if (ctx != NULL && k_is_in_user_syscall()) {
82 if (!k_object_is_valid(ctx, K_OBJ_NET_SOCKET)) {
83 /* Invalidate the context, the caller doesn't have
84 * sufficient permission or there was some other
85 * problem with the net socket object
86 */
87 ctx = NULL;
88 }
89 }
90 #endif /* CONFIG_USERSPACE */
91
92 if (ctx == NULL) {
93 NET_DBG("Invalid access on sock %d by thread %p (%s)", sock,
94 _current, k_thread_name_get(_current));
95 }
96
97 return ctx;
98 }
99
z_impl_zsock_get_context_object(int sock)100 void *z_impl_zsock_get_context_object(int sock)
101 {
102 const struct socket_op_vtable *ignored;
103
104 return get_sock_vtable(sock, &ignored, NULL);
105 }
106
107 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_get_context_object(int sock)108 void *z_vrfy_zsock_get_context_object(int sock)
109 {
110 /* All checking done in implementation */
111 return z_impl_zsock_get_context_object(sock);
112 }
113
114 #include <zephyr/syscalls/zsock_get_context_object_mrsh.c>
115 #endif
116
117 static void zsock_received_cb(struct net_context *ctx,
118 struct net_pkt *pkt,
119 union net_ip_header *ip_hdr,
120 union net_proto_header *proto_hdr,
121 int status,
122 void *user_data);
123
fifo_wait_non_empty(struct k_fifo * fifo,k_timeout_t timeout)124 static int fifo_wait_non_empty(struct k_fifo *fifo, k_timeout_t timeout)
125 {
126 struct k_poll_event events[] = {
127 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
128 K_POLL_MODE_NOTIFY_ONLY, fifo),
129 };
130
131 return k_poll(events, ARRAY_SIZE(events), timeout);
132 }
133
zsock_flush_queue(struct net_context * ctx)134 static void zsock_flush_queue(struct net_context *ctx)
135 {
136 bool is_listen = net_context_get_state(ctx) == NET_CONTEXT_LISTENING;
137 void *p;
138
139 /* recv_q and accept_q are shared via a union */
140 while ((p = k_fifo_get(&ctx->recv_q, K_NO_WAIT)) != NULL) {
141 if (is_listen) {
142 NET_DBG("discarding ctx %p", p);
143 net_context_put(p);
144 } else {
145 NET_DBG("discarding pkt %p", p);
146 net_pkt_unref(p);
147 }
148 }
149
150 /* Some threads might be waiting on recv, cancel the wait */
151 k_fifo_cancel_wait(&ctx->recv_q);
152
153 /* Wake reader if it was sleeping */
154 (void)k_condvar_signal(&ctx->cond.recv);
155 }
156
157 #if defined(CONFIG_NET_NATIVE)
zsock_socket_internal(int family,int type,int proto)158 static int zsock_socket_internal(int family, int type, int proto)
159 {
160 int fd = zvfs_reserve_fd();
161 struct net_context *ctx;
162 int res;
163
164 if (fd < 0) {
165 return -1;
166 }
167
168 if (proto == 0) {
169 if (family == AF_INET || family == AF_INET6) {
170 if (type == SOCK_DGRAM) {
171 proto = IPPROTO_UDP;
172 } else if (type == SOCK_STREAM) {
173 proto = IPPROTO_TCP;
174 }
175 }
176 }
177
178 res = net_context_get(family, type, proto, &ctx);
179 if (res < 0) {
180 zvfs_free_fd(fd);
181 errno = -res;
182 return -1;
183 }
184
185 /* Initialize user_data, all other calls will preserve it */
186 ctx->user_data = NULL;
187
188 /* The socket flags are stored here */
189 ctx->socket_data = NULL;
190
191 /* recv_q and accept_q are in union */
192 k_fifo_init(&ctx->recv_q);
193
194 /* Condition variable is used to avoid keeping lock for a long time
195 * when waiting data to be received
196 */
197 k_condvar_init(&ctx->cond.recv);
198
199 /* TCP context is effectively owned by both application
200 * and the stack: stack may detect that peer closed/aborted
201 * connection, but it must not dispose of the context behind
202 * the application back. Likewise, when application "closes"
203 * context, it's not disposed of immediately - there's yet
204 * closing handshake for stack to perform.
205 */
206 if (proto == IPPROTO_TCP) {
207 net_context_ref(ctx);
208 }
209
210 zvfs_finalize_typed_fd(fd, ctx, (const struct fd_op_vtable *)&sock_fd_op_vtable,
211 ZVFS_MODE_IFSOCK);
212
213 NET_DBG("socket: ctx=%p, fd=%d", ctx, fd);
214
215 return fd;
216 }
217 #endif /* CONFIG_NET_NATIVE */
218
z_impl_zsock_socket(int family,int type,int proto)219 int z_impl_zsock_socket(int family, int type, int proto)
220 {
221 STRUCT_SECTION_FOREACH(net_socket_register, sock_family) {
222 int ret;
223
224 if (sock_family->family != family &&
225 sock_family->family != AF_UNSPEC) {
226 continue;
227 }
228
229 NET_ASSERT(sock_family->is_supported);
230
231 if (!sock_family->is_supported(family, type, proto)) {
232 continue;
233 }
234
235 errno = 0;
236 ret = sock_family->handler(family, type, proto);
237
238 SYS_PORT_TRACING_OBJ_INIT(socket, ret < 0 ? -errno : ret,
239 family, type, proto);
240
241 (void)sock_obj_core_alloc(ret, sock_family, family, type, proto);
242
243 return ret;
244 }
245
246 errno = EAFNOSUPPORT;
247 SYS_PORT_TRACING_OBJ_INIT(socket, -errno, family, type, proto);
248 return -1;
249 }
250
251 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_socket(int family,int type,int proto)252 static inline int z_vrfy_zsock_socket(int family, int type, int proto)
253 {
254 /* implementation call to net_context_get() should do all necessary
255 * checking
256 */
257 return z_impl_zsock_socket(family, type, proto);
258 }
259 #include <zephyr/syscalls/zsock_socket_mrsh.c>
260 #endif /* CONFIG_USERSPACE */
261
zsock_close_ctx(struct net_context * ctx)262 int zsock_close_ctx(struct net_context *ctx)
263 {
264 /* Reset callbacks to avoid any race conditions while
265 * flushing queues. No need to check return values here,
266 * as these are fail-free operations and we're closing
267 * socket anyway.
268 */
269 if (net_context_get_state(ctx) == NET_CONTEXT_LISTENING) {
270 (void)net_context_accept(ctx, NULL, K_NO_WAIT, NULL);
271 } else {
272 (void)net_context_recv(ctx, NULL, K_NO_WAIT, NULL);
273 }
274
275 ctx->user_data = INT_TO_POINTER(EINTR);
276 sock_set_error(ctx);
277
278 zsock_flush_queue(ctx);
279
280 SET_ERRNO(net_context_put(ctx));
281
282 return 0;
283 }
284
z_impl_zsock_close(int sock)285 int z_impl_zsock_close(int sock)
286 {
287 const struct socket_op_vtable *vtable;
288 struct k_mutex *lock;
289 void *ctx;
290 int ret;
291
292 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, close, sock);
293
294 ctx = get_sock_vtable(sock, &vtable, &lock);
295 if (ctx == NULL) {
296 errno = EBADF;
297 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, close, sock, -errno);
298 return -1;
299 }
300
301 (void)k_mutex_lock(lock, K_FOREVER);
302
303 NET_DBG("close: ctx=%p, fd=%d", ctx, sock);
304
305 ret = vtable->fd_vtable.close(ctx);
306
307 k_mutex_unlock(lock);
308
309 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, close, sock, ret < 0 ? -errno : ret);
310
311 zvfs_free_fd(sock);
312
313 (void)sock_obj_core_dealloc(sock);
314
315 return ret;
316 }
317
318 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_close(int sock)319 static inline int z_vrfy_zsock_close(int sock)
320 {
321 return z_impl_zsock_close(sock);
322 }
323 #include <zephyr/syscalls/zsock_close_mrsh.c>
324 #endif /* CONFIG_USERSPACE */
325
z_impl_zsock_shutdown(int sock,int how)326 int z_impl_zsock_shutdown(int sock, int how)
327 {
328 const struct socket_op_vtable *vtable;
329 struct k_mutex *lock;
330 void *ctx;
331 int ret;
332
333 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, shutdown, sock, how);
334
335 ctx = get_sock_vtable(sock, &vtable, &lock);
336 if (ctx == NULL) {
337 errno = EBADF;
338 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, shutdown, sock, -errno);
339 return -1;
340 }
341
342 if (!vtable->shutdown) {
343 errno = ENOTSUP;
344 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, shutdown, sock, -errno);
345 return -1;
346 }
347
348 (void)k_mutex_lock(lock, K_FOREVER);
349
350 NET_DBG("shutdown: ctx=%p, fd=%d, how=%d", ctx, sock, how);
351
352 ret = vtable->shutdown(ctx, how);
353
354 k_mutex_unlock(lock);
355
356 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, shutdown, sock, ret < 0 ? -errno : ret);
357
358 return ret;
359 }
360
361 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_shutdown(int sock,int how)362 static inline int z_vrfy_zsock_shutdown(int sock, int how)
363 {
364 return z_impl_zsock_shutdown(sock, how);
365 }
366 #include <zephyr/syscalls/zsock_shutdown_mrsh.c>
367 #endif /* CONFIG_USERSPACE */
368
zsock_accepted_cb(struct net_context * new_ctx,struct sockaddr * addr,socklen_t addrlen,int status,void * user_data)369 static void zsock_accepted_cb(struct net_context *new_ctx,
370 struct sockaddr *addr, socklen_t addrlen,
371 int status, void *user_data) {
372 struct net_context *parent = user_data;
373
374 NET_DBG("parent=%p, ctx=%p, st=%d", parent, new_ctx, status);
375
376 if (status == 0) {
377 /* This just installs a callback, so cannot fail. */
378 (void)net_context_recv(new_ctx, zsock_received_cb, K_NO_WAIT,
379 NULL);
380 k_fifo_init(&new_ctx->recv_q);
381 k_condvar_init(&new_ctx->cond.recv);
382
383 k_fifo_put(&parent->accept_q, new_ctx);
384
385 /* TCP context is effectively owned by both application
386 * and the stack: stack may detect that peer closed/aborted
387 * connection, but it must not dispose of the context behind
388 * the application back. Likewise, when application "closes"
389 * context, it's not disposed of immediately - there's yet
390 * closing handshake for stack to perform.
391 */
392 net_context_ref(new_ctx);
393
394 (void)k_condvar_signal(&parent->cond.recv);
395 }
396
397 }
398
zsock_received_cb(struct net_context * ctx,struct net_pkt * pkt,union net_ip_header * ip_hdr,union net_proto_header * proto_hdr,int status,void * user_data)399 static void zsock_received_cb(struct net_context *ctx,
400 struct net_pkt *pkt,
401 union net_ip_header *ip_hdr,
402 union net_proto_header *proto_hdr,
403 int status,
404 void *user_data)
405 {
406 if (ctx->cond.lock) {
407 (void)k_mutex_lock(ctx->cond.lock, K_FOREVER);
408 }
409
410 NET_DBG("ctx=%p, pkt=%p, st=%d, user_data=%p", ctx, pkt, status,
411 user_data);
412
413 if (status < 0) {
414 ctx->user_data = INT_TO_POINTER(-status);
415 sock_set_error(ctx);
416 }
417
418 /* if pkt is NULL, EOF */
419 if (!pkt) {
420 struct net_pkt *last_pkt = k_fifo_peek_tail(&ctx->recv_q);
421
422 if (!last_pkt) {
423 /* If there're no packets in the queue, recv() may
424 * be blocked waiting on it to become non-empty,
425 * so cancel that wait.
426 */
427 sock_set_eof(ctx);
428 k_fifo_cancel_wait(&ctx->recv_q);
429 NET_DBG("Marked socket %p as peer-closed", ctx);
430 } else {
431 net_pkt_set_eof(last_pkt, true);
432 NET_DBG("Set EOF flag on pkt %p", last_pkt);
433 }
434
435 goto unlock;
436 }
437
438 /* Normal packet */
439 net_pkt_set_eof(pkt, false);
440
441 net_pkt_set_rx_stats_tick(pkt, k_cycle_get_32());
442
443 k_fifo_put(&ctx->recv_q, pkt);
444
445 unlock:
446 /* Wake reader if it was sleeping */
447 (void)k_condvar_signal(&ctx->cond.recv);
448
449 if (ctx->cond.lock) {
450 (void)k_mutex_unlock(ctx->cond.lock);
451 }
452 }
453
zsock_shutdown_ctx(struct net_context * ctx,int how)454 int zsock_shutdown_ctx(struct net_context *ctx, int how)
455 {
456 if (how == ZSOCK_SHUT_RD) {
457 if (net_context_get_state(ctx) == NET_CONTEXT_LISTENING) {
458 SET_ERRNO(net_context_accept(ctx, NULL, K_NO_WAIT, NULL));
459 } else {
460 SET_ERRNO(net_context_recv(ctx, NULL, K_NO_WAIT, NULL));
461 }
462
463 sock_set_eof(ctx);
464
465 zsock_flush_queue(ctx);
466 } else if (how == ZSOCK_SHUT_WR || how == ZSOCK_SHUT_RDWR) {
467 SET_ERRNO(-ENOTSUP);
468 } else {
469 SET_ERRNO(-EINVAL);
470 }
471
472 return 0;
473 }
474
zsock_bind_ctx(struct net_context * ctx,const struct sockaddr * addr,socklen_t addrlen)475 int zsock_bind_ctx(struct net_context *ctx, const struct sockaddr *addr,
476 socklen_t addrlen)
477 {
478 SET_ERRNO(net_context_bind(ctx, addr, addrlen));
479 /* For DGRAM socket, we expect to receive packets after call to
480 * bind(), but for STREAM socket, next expected operation is
481 * listen(), which doesn't work if recv callback is set.
482 */
483 if (net_context_get_type(ctx) == SOCK_DGRAM) {
484 SET_ERRNO(net_context_recv(ctx, zsock_received_cb, K_NO_WAIT,
485 ctx->user_data));
486 }
487
488 return 0;
489 }
490
z_impl_zsock_bind(int sock,const struct sockaddr * addr,socklen_t addrlen)491 int z_impl_zsock_bind(int sock, const struct sockaddr *addr, socklen_t addrlen)
492 {
493 int ret;
494
495 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, bind, sock, addr, addrlen);
496
497 ret = VTABLE_CALL(bind, sock, addr, addrlen);
498
499 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, bind, sock, ret < 0 ? -errno : ret);
500
501 return ret;
502 }
503
504 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_bind(int sock,const struct sockaddr * addr,socklen_t addrlen)505 static inline int z_vrfy_zsock_bind(int sock, const struct sockaddr *addr,
506 socklen_t addrlen)
507 {
508 struct sockaddr_storage dest_addr_copy;
509
510 K_OOPS(K_SYSCALL_VERIFY(addrlen <= sizeof(dest_addr_copy)));
511 K_OOPS(k_usermode_from_copy(&dest_addr_copy, (void *)addr, addrlen));
512
513 return z_impl_zsock_bind(sock, (struct sockaddr *)&dest_addr_copy,
514 addrlen);
515 }
516 #include <zephyr/syscalls/zsock_bind_mrsh.c>
517 #endif /* CONFIG_USERSPACE */
518
zsock_connected_cb(struct net_context * ctx,int status,void * user_data)519 static void zsock_connected_cb(struct net_context *ctx, int status, void *user_data)
520 {
521 if (status < 0) {
522 ctx->user_data = INT_TO_POINTER(-status);
523 sock_set_error(ctx);
524 }
525 }
526
zsock_connect_ctx(struct net_context * ctx,const struct sockaddr * addr,socklen_t addrlen)527 int zsock_connect_ctx(struct net_context *ctx, const struct sockaddr *addr,
528 socklen_t addrlen)
529 {
530
531 #if defined(CONFIG_SOCKS)
532 if (net_context_is_proxy_enabled(ctx)) {
533 SET_ERRNO(net_socks5_connect(ctx, addr, addrlen));
534 SET_ERRNO(net_context_recv(ctx, zsock_received_cb,
535 K_NO_WAIT, ctx->user_data));
536 return 0;
537 }
538 #endif
539 if (net_context_get_state(ctx) == NET_CONTEXT_CONNECTED) {
540 return 0;
541 } else if (net_context_get_state(ctx) == NET_CONTEXT_CONNECTING) {
542 if (sock_is_error(ctx)) {
543 SET_ERRNO(-POINTER_TO_INT(ctx->user_data));
544 } else {
545 SET_ERRNO(-EALREADY);
546 }
547 } else {
548 k_timeout_t timeout = K_MSEC(CONFIG_NET_SOCKETS_CONNECT_TIMEOUT);
549 net_context_connect_cb_t cb = NULL;
550
551 if (sock_is_nonblock(ctx)) {
552 timeout = K_NO_WAIT;
553 cb = zsock_connected_cb;
554 }
555
556 if (net_context_get_type(ctx) == SOCK_STREAM) {
557 /* For STREAM sockets net_context_recv() only installs
558 * recv callback w/o side effects, and it has to be done
559 * first to avoid race condition, when TCP stream data
560 * arrives right after connect.
561 */
562 SET_ERRNO(net_context_recv(ctx, zsock_received_cb,
563 K_NO_WAIT, ctx->user_data));
564 SET_ERRNO(net_context_connect(ctx, addr, addrlen, cb,
565 timeout, ctx->user_data));
566 } else {
567 SET_ERRNO(net_context_connect(ctx, addr, addrlen, cb,
568 timeout, ctx->user_data));
569 SET_ERRNO(net_context_recv(ctx, zsock_received_cb,
570 K_NO_WAIT, ctx->user_data));
571 }
572 }
573
574 return 0;
575 }
576
z_impl_zsock_connect(int sock,const struct sockaddr * addr,socklen_t addrlen)577 int z_impl_zsock_connect(int sock, const struct sockaddr *addr,
578 socklen_t addrlen)
579 {
580 int ret;
581
582 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, connect, sock, addr, addrlen);
583
584 ret = VTABLE_CALL(connect, sock, addr, addrlen);
585
586 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, connect, sock,
587 ret < 0 ? -errno : ret);
588 return ret;
589 }
590
591 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_connect(int sock,const struct sockaddr * addr,socklen_t addrlen)592 int z_vrfy_zsock_connect(int sock, const struct sockaddr *addr,
593 socklen_t addrlen)
594 {
595 struct sockaddr_storage dest_addr_copy;
596
597 K_OOPS(K_SYSCALL_VERIFY(addrlen <= sizeof(dest_addr_copy)));
598 K_OOPS(k_usermode_from_copy(&dest_addr_copy, (void *)addr, addrlen));
599
600 return z_impl_zsock_connect(sock, (struct sockaddr *)&dest_addr_copy,
601 addrlen);
602 }
603 #include <zephyr/syscalls/zsock_connect_mrsh.c>
604 #endif /* CONFIG_USERSPACE */
605
zsock_listen_ctx(struct net_context * ctx,int backlog)606 int zsock_listen_ctx(struct net_context *ctx, int backlog)
607 {
608 SET_ERRNO(net_context_listen(ctx, backlog));
609 SET_ERRNO(net_context_accept(ctx, zsock_accepted_cb, K_NO_WAIT, ctx));
610
611 return 0;
612 }
613
z_impl_zsock_listen(int sock,int backlog)614 int z_impl_zsock_listen(int sock, int backlog)
615 {
616 int ret;
617
618 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, listen, sock, backlog);
619
620 ret = VTABLE_CALL(listen, sock, backlog);
621
622 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, listen, sock,
623 ret < 0 ? -errno : ret);
624 return ret;
625 }
626
627 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_listen(int sock,int backlog)628 static inline int z_vrfy_zsock_listen(int sock, int backlog)
629 {
630 return z_impl_zsock_listen(sock, backlog);
631 }
632 #include <zephyr/syscalls/zsock_listen_mrsh.c>
633 #endif /* CONFIG_USERSPACE */
634
zsock_accept_ctx(struct net_context * parent,struct sockaddr * addr,socklen_t * addrlen)635 int zsock_accept_ctx(struct net_context *parent, struct sockaddr *addr,
636 socklen_t *addrlen)
637 {
638 struct net_context *ctx;
639 struct net_pkt *last_pkt;
640 int fd, ret;
641
642 if (!sock_is_nonblock(parent)) {
643 k_timeout_t timeout = K_FOREVER;
644
645 /* accept() can reuse zsock_wait_data(), as underneath it's
646 * monitoring the same queue (accept_q is an alias for recv_q).
647 */
648 ret = zsock_wait_data(parent, &timeout);
649 if (ret < 0) {
650 errno = -ret;
651 return -1;
652 }
653 }
654
655 ctx = k_fifo_get(&parent->accept_q, K_NO_WAIT);
656 if (ctx == NULL) {
657 errno = EAGAIN;
658 return -1;
659 }
660
661 fd = zvfs_reserve_fd();
662 if (fd < 0) {
663 zsock_flush_queue(ctx);
664 net_context_put(ctx);
665 return -1;
666 }
667
668 /* Check if the connection is already disconnected */
669 last_pkt = k_fifo_peek_tail(&ctx->recv_q);
670 if (last_pkt) {
671 if (net_pkt_eof(last_pkt)) {
672 sock_set_eof(ctx);
673 zvfs_free_fd(fd);
674 zsock_flush_queue(ctx);
675 net_context_put(ctx);
676 errno = ECONNABORTED;
677 return -1;
678 }
679 }
680
681 if (net_context_is_closing(ctx)) {
682 errno = ECONNABORTED;
683 zvfs_free_fd(fd);
684 zsock_flush_queue(ctx);
685 net_context_put(ctx);
686 return -1;
687 }
688
689 net_context_set_accepting(ctx, false);
690
691
692 if (addr != NULL && addrlen != NULL) {
693 int len = MIN(*addrlen, sizeof(ctx->remote));
694
695 memcpy(addr, &ctx->remote, len);
696 /* addrlen is a value-result argument, set to actual
697 * size of source address
698 */
699 if (ctx->remote.sa_family == AF_INET) {
700 *addrlen = sizeof(struct sockaddr_in);
701 } else if (ctx->remote.sa_family == AF_INET6) {
702 *addrlen = sizeof(struct sockaddr_in6);
703 } else {
704 zvfs_free_fd(fd);
705 errno = ENOTSUP;
706 zsock_flush_queue(ctx);
707 net_context_put(ctx);
708 return -1;
709 }
710 }
711
712 NET_DBG("accept: ctx=%p, fd=%d", ctx, fd);
713
714 zvfs_finalize_typed_fd(fd, ctx, (const struct fd_op_vtable *)&sock_fd_op_vtable,
715 ZVFS_MODE_IFSOCK);
716
717 return fd;
718 }
719
z_impl_zsock_accept(int sock,struct sockaddr * addr,socklen_t * addrlen)720 int z_impl_zsock_accept(int sock, struct sockaddr *addr, socklen_t *addrlen)
721 {
722 int new_sock;
723
724 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, accept, sock);
725
726 new_sock = VTABLE_CALL(accept, sock, addr, addrlen);
727
728 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, accept, new_sock, addr, addrlen,
729 new_sock < 0 ? -errno : 0);
730
731 (void)sock_obj_core_alloc_find(sock, new_sock, SOCK_STREAM);
732
733 return new_sock;
734 }
735
736 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_accept(int sock,struct sockaddr * addr,socklen_t * addrlen)737 static inline int z_vrfy_zsock_accept(int sock, struct sockaddr *addr,
738 socklen_t *addrlen)
739 {
740 socklen_t addrlen_copy;
741 int ret;
742
743 K_OOPS(addrlen && k_usermode_from_copy(&addrlen_copy, addrlen,
744 sizeof(socklen_t)));
745 K_OOPS(addr && K_SYSCALL_MEMORY_WRITE(addr, addrlen ? addrlen_copy : 0));
746
747 ret = z_impl_zsock_accept(sock, (struct sockaddr *)addr,
748 addrlen ? &addrlen_copy : NULL);
749
750 K_OOPS(ret >= 0 && addrlen && k_usermode_to_copy(addrlen, &addrlen_copy,
751 sizeof(socklen_t)));
752
753 return ret;
754 }
755 #include <zephyr/syscalls/zsock_accept_mrsh.c>
756 #endif /* CONFIG_USERSPACE */
757
758 #define WAIT_BUFS_INITIAL_MS 10
759 #define WAIT_BUFS_MAX_MS 100
760 #define MAX_WAIT_BUFS K_MSEC(CONFIG_NET_SOCKET_MAX_SEND_WAIT)
761
send_check_and_wait(struct net_context * ctx,int status,k_timepoint_t buf_timeout,k_timeout_t timeout,uint32_t * retry_timeout)762 static int send_check_and_wait(struct net_context *ctx, int status,
763 k_timepoint_t buf_timeout, k_timeout_t timeout,
764 uint32_t *retry_timeout)
765 {
766 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
767 goto out;
768 }
769
770 if (status != -ENOBUFS && status != -EAGAIN) {
771 goto out;
772 }
773
774 /* If we cannot get any buffers in reasonable
775 * amount of time, then do not wait forever as
776 * there might be some bigger issue.
777 * If we get -EAGAIN and cannot recover, then
778 * it means that the sending window is blocked
779 * and we just cannot send anything.
780 */
781 if (sys_timepoint_expired(buf_timeout)) {
782 if (status == -ENOBUFS) {
783 status = -ENOMEM;
784 } else {
785 status = -ENOBUFS;
786 }
787
788 goto out;
789 }
790
791 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
792 *retry_timeout =
793 MIN(*retry_timeout, k_ticks_to_ms_floor32(timeout.ticks));
794 }
795
796 if (ctx->cond.lock) {
797 (void)k_mutex_unlock(ctx->cond.lock);
798 }
799
800 if (status == -ENOBUFS) {
801 /* We can monitor net_pkt/net_buf availability, so just wait. */
802 k_sleep(K_MSEC(*retry_timeout));
803 }
804
805 if (status == -EAGAIN) {
806 if (IS_ENABLED(CONFIG_NET_NATIVE_TCP) &&
807 net_context_get_type(ctx) == SOCK_STREAM &&
808 !net_if_is_ip_offloaded(net_context_get_iface(ctx))) {
809 struct k_poll_event event;
810
811 k_poll_event_init(&event,
812 K_POLL_TYPE_SEM_AVAILABLE,
813 K_POLL_MODE_NOTIFY_ONLY,
814 net_tcp_tx_sem_get(ctx));
815
816 k_poll(&event, 1, K_MSEC(*retry_timeout));
817 } else {
818 k_sleep(K_MSEC(*retry_timeout));
819 }
820 }
821 /* Exponentially increase the retry timeout
822 * Cap the value to WAIT_BUFS_MAX_MS
823 */
824 *retry_timeout = MIN(WAIT_BUFS_MAX_MS, *retry_timeout << 1);
825
826 if (ctx->cond.lock) {
827 (void)k_mutex_lock(ctx->cond.lock, K_FOREVER);
828 }
829
830 return 0;
831
832 out:
833 errno = -status;
834 return -1;
835 }
836
zsock_sendto_ctx(struct net_context * ctx,const void * buf,size_t len,int flags,const struct sockaddr * dest_addr,socklen_t addrlen)837 ssize_t zsock_sendto_ctx(struct net_context *ctx, const void *buf, size_t len,
838 int flags,
839 const struct sockaddr *dest_addr, socklen_t addrlen)
840 {
841 k_timeout_t timeout = K_FOREVER;
842 uint32_t retry_timeout = WAIT_BUFS_INITIAL_MS;
843 k_timepoint_t buf_timeout, end;
844 int status;
845
846 if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) {
847 timeout = K_NO_WAIT;
848 buf_timeout = sys_timepoint_calc(K_NO_WAIT);
849 } else {
850 net_context_get_option(ctx, NET_OPT_SNDTIMEO, &timeout, NULL);
851 buf_timeout = sys_timepoint_calc(MAX_WAIT_BUFS);
852 }
853 end = sys_timepoint_calc(timeout);
854
855 /* Register the callback before sending in order to receive the response
856 * from the peer.
857 */
858 status = net_context_recv(ctx, zsock_received_cb,
859 K_NO_WAIT, ctx->user_data);
860 if (status < 0) {
861 errno = -status;
862 return -1;
863 }
864
865 while (1) {
866 if (dest_addr) {
867 status = net_context_sendto(ctx, buf, len, dest_addr,
868 addrlen, NULL, timeout,
869 ctx->user_data);
870 } else {
871 status = net_context_send(ctx, buf, len, NULL, timeout,
872 ctx->user_data);
873 }
874
875 if (status < 0) {
876 status = send_check_and_wait(ctx, status, buf_timeout,
877 timeout, &retry_timeout);
878 if (status < 0) {
879 return status;
880 }
881
882 /* Update the timeout value in case loop is repeated. */
883 timeout = sys_timepoint_timeout(end);
884
885 continue;
886 }
887
888 break;
889 }
890
891 return status;
892 }
893
z_impl_zsock_sendto(int sock,const void * buf,size_t len,int flags,const struct sockaddr * dest_addr,socklen_t addrlen)894 ssize_t z_impl_zsock_sendto(int sock, const void *buf, size_t len, int flags,
895 const struct sockaddr *dest_addr, socklen_t addrlen)
896 {
897 int bytes_sent;
898
899 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, sendto, sock, len, flags,
900 dest_addr, addrlen);
901
902 bytes_sent = VTABLE_CALL(sendto, sock, buf, len, flags, dest_addr, addrlen);
903
904 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, sendto, sock,
905 bytes_sent < 0 ? -errno : bytes_sent);
906
907 sock_obj_core_update_send_stats(sock, bytes_sent);
908
909 return bytes_sent;
910 }
911
912 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_sendto(int sock,const void * buf,size_t len,int flags,const struct sockaddr * dest_addr,socklen_t addrlen)913 ssize_t z_vrfy_zsock_sendto(int sock, const void *buf, size_t len, int flags,
914 const struct sockaddr *dest_addr, socklen_t addrlen)
915 {
916 struct sockaddr_storage dest_addr_copy;
917
918 K_OOPS(K_SYSCALL_MEMORY_READ(buf, len));
919 if (dest_addr) {
920 K_OOPS(K_SYSCALL_VERIFY(addrlen <= sizeof(dest_addr_copy)));
921 K_OOPS(k_usermode_from_copy(&dest_addr_copy, (void *)dest_addr,
922 addrlen));
923 }
924
925 return z_impl_zsock_sendto(sock, (const void *)buf, len, flags,
926 dest_addr ? (struct sockaddr *)&dest_addr_copy : NULL,
927 addrlen);
928 }
929 #include <zephyr/syscalls/zsock_sendto_mrsh.c>
930 #endif /* CONFIG_USERSPACE */
931
msghdr_non_empty_iov_count(const struct msghdr * msg)932 size_t msghdr_non_empty_iov_count(const struct msghdr *msg)
933 {
934 size_t non_empty_iov_count = 0;
935
936 for (size_t i = 0; i < msg->msg_iovlen; i++) {
937 if (msg->msg_iov[i].iov_len) {
938 non_empty_iov_count++;
939 }
940 }
941
942 return non_empty_iov_count;
943 }
944
zsock_sendmsg_ctx(struct net_context * ctx,const struct msghdr * msg,int flags)945 ssize_t zsock_sendmsg_ctx(struct net_context *ctx, const struct msghdr *msg,
946 int flags)
947 {
948 k_timeout_t timeout = K_FOREVER;
949 uint32_t retry_timeout = WAIT_BUFS_INITIAL_MS;
950 k_timepoint_t buf_timeout, end;
951 int status;
952
953 if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) {
954 timeout = K_NO_WAIT;
955 buf_timeout = sys_timepoint_calc(K_NO_WAIT);
956 } else {
957 net_context_get_option(ctx, NET_OPT_SNDTIMEO, &timeout, NULL);
958 buf_timeout = sys_timepoint_calc(MAX_WAIT_BUFS);
959 }
960 end = sys_timepoint_calc(timeout);
961
962 while (1) {
963 status = net_context_sendmsg(ctx, msg, flags, NULL, timeout, NULL);
964 if (status < 0) {
965 status = send_check_and_wait(ctx, status,
966 buf_timeout,
967 timeout, &retry_timeout);
968 if (status < 0) {
969 return status;
970 }
971
972 /* Update the timeout value in case loop is repeated. */
973 timeout = sys_timepoint_timeout(end);
974
975 continue;
976 }
977
978 break;
979 }
980
981 return status;
982 }
983
z_impl_zsock_sendmsg(int sock,const struct msghdr * msg,int flags)984 ssize_t z_impl_zsock_sendmsg(int sock, const struct msghdr *msg, int flags)
985 {
986 int bytes_sent;
987
988 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, sendmsg, sock, msg, flags);
989
990 bytes_sent = VTABLE_CALL(sendmsg, sock, msg, flags);
991
992 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, sendmsg, sock,
993 bytes_sent < 0 ? -errno : bytes_sent);
994
995 sock_obj_core_update_send_stats(sock, bytes_sent);
996
997 return bytes_sent;
998 }
999
1000 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_sendmsg(int sock,const struct msghdr * msg,int flags)1001 static inline ssize_t z_vrfy_zsock_sendmsg(int sock,
1002 const struct msghdr *msg,
1003 int flags)
1004 {
1005 struct msghdr msg_copy;
1006 size_t i;
1007 int ret;
1008
1009 K_OOPS(k_usermode_from_copy(&msg_copy, (void *)msg, sizeof(msg_copy)));
1010
1011 msg_copy.msg_name = NULL;
1012 msg_copy.msg_control = NULL;
1013
1014 msg_copy.msg_iov = k_usermode_alloc_from_copy(msg->msg_iov,
1015 msg->msg_iovlen * sizeof(struct iovec));
1016 if (!msg_copy.msg_iov) {
1017 errno = ENOMEM;
1018 goto fail;
1019 }
1020
1021 for (i = 0; i < msg->msg_iovlen; i++) {
1022 msg_copy.msg_iov[i].iov_base =
1023 k_usermode_alloc_from_copy(msg->msg_iov[i].iov_base,
1024 msg->msg_iov[i].iov_len);
1025 if (!msg_copy.msg_iov[i].iov_base) {
1026 errno = ENOMEM;
1027 goto fail;
1028 }
1029
1030 msg_copy.msg_iov[i].iov_len = msg->msg_iov[i].iov_len;
1031 }
1032
1033 if (msg->msg_namelen > 0) {
1034 msg_copy.msg_name = k_usermode_alloc_from_copy(msg->msg_name,
1035 msg->msg_namelen);
1036 if (!msg_copy.msg_name) {
1037 errno = ENOMEM;
1038 goto fail;
1039 }
1040 }
1041
1042 if (msg->msg_controllen > 0) {
1043 msg_copy.msg_control = k_usermode_alloc_from_copy(msg->msg_control,
1044 msg->msg_controllen);
1045 if (!msg_copy.msg_control) {
1046 errno = ENOMEM;
1047 goto fail;
1048 }
1049 }
1050
1051 ret = z_impl_zsock_sendmsg(sock, (const struct msghdr *)&msg_copy,
1052 flags);
1053
1054 k_free(msg_copy.msg_name);
1055 k_free(msg_copy.msg_control);
1056
1057 for (i = 0; i < msg_copy.msg_iovlen; i++) {
1058 k_free(msg_copy.msg_iov[i].iov_base);
1059 }
1060
1061 k_free(msg_copy.msg_iov);
1062
1063 return ret;
1064
1065 fail:
1066 if (msg_copy.msg_name) {
1067 k_free(msg_copy.msg_name);
1068 }
1069
1070 if (msg_copy.msg_control) {
1071 k_free(msg_copy.msg_control);
1072 }
1073
1074 if (msg_copy.msg_iov) {
1075 for (i = 0; i < msg_copy.msg_iovlen; i++) {
1076 if (msg_copy.msg_iov[i].iov_base) {
1077 k_free(msg_copy.msg_iov[i].iov_base);
1078 }
1079 }
1080
1081 k_free(msg_copy.msg_iov);
1082 }
1083
1084 return -1;
1085 }
1086 #include <zephyr/syscalls/zsock_sendmsg_mrsh.c>
1087 #endif /* CONFIG_USERSPACE */
1088
sock_get_pkt_src_addr(struct net_pkt * pkt,enum net_ip_protocol proto,struct sockaddr * addr,socklen_t addrlen)1089 static int sock_get_pkt_src_addr(struct net_pkt *pkt,
1090 enum net_ip_protocol proto,
1091 struct sockaddr *addr,
1092 socklen_t addrlen)
1093 {
1094 int ret = 0;
1095 struct net_pkt_cursor backup;
1096 uint16_t *port;
1097
1098 if (!addr || !pkt) {
1099 return -EINVAL;
1100 }
1101
1102 net_pkt_cursor_backup(pkt, &backup);
1103 net_pkt_cursor_init(pkt);
1104
1105 addr->sa_family = net_pkt_family(pkt);
1106
1107 if (IS_ENABLED(CONFIG_NET_IPV4) &&
1108 net_pkt_family(pkt) == AF_INET) {
1109 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access,
1110 struct net_ipv4_hdr);
1111 struct sockaddr_in *addr4 = net_sin(addr);
1112 struct net_ipv4_hdr *ipv4_hdr;
1113
1114 if (addrlen < sizeof(struct sockaddr_in)) {
1115 ret = -EINVAL;
1116 goto error;
1117 }
1118
1119 ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(
1120 pkt, &ipv4_access);
1121 if (!ipv4_hdr ||
1122 net_pkt_acknowledge_data(pkt, &ipv4_access) ||
1123 net_pkt_skip(pkt, net_pkt_ipv4_opts_len(pkt))) {
1124 ret = -ENOBUFS;
1125 goto error;
1126 }
1127
1128 net_ipv4_addr_copy_raw((uint8_t *)&addr4->sin_addr, ipv4_hdr->src);
1129 port = &addr4->sin_port;
1130 } else if (IS_ENABLED(CONFIG_NET_IPV6) &&
1131 net_pkt_family(pkt) == AF_INET6) {
1132 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv6_access,
1133 struct net_ipv6_hdr);
1134 struct sockaddr_in6 *addr6 = net_sin6(addr);
1135 struct net_ipv6_hdr *ipv6_hdr;
1136
1137 if (addrlen < sizeof(struct sockaddr_in6)) {
1138 ret = -EINVAL;
1139 goto error;
1140 }
1141
1142 ipv6_hdr = (struct net_ipv6_hdr *)net_pkt_get_data(
1143 pkt, &ipv6_access);
1144 if (!ipv6_hdr ||
1145 net_pkt_acknowledge_data(pkt, &ipv6_access) ||
1146 net_pkt_skip(pkt, net_pkt_ipv6_ext_len(pkt))) {
1147 ret = -ENOBUFS;
1148 goto error;
1149 }
1150
1151 net_ipv6_addr_copy_raw((uint8_t *)&addr6->sin6_addr, ipv6_hdr->src);
1152 port = &addr6->sin6_port;
1153 } else {
1154 ret = -ENOTSUP;
1155 goto error;
1156 }
1157
1158 if (IS_ENABLED(CONFIG_NET_UDP) && proto == IPPROTO_UDP) {
1159 NET_PKT_DATA_ACCESS_DEFINE(udp_access, struct net_udp_hdr);
1160 struct net_udp_hdr *udp_hdr;
1161
1162 udp_hdr = (struct net_udp_hdr *)net_pkt_get_data(pkt,
1163 &udp_access);
1164 if (!udp_hdr) {
1165 ret = -ENOBUFS;
1166 goto error;
1167 }
1168
1169 *port = udp_hdr->src_port;
1170 } else if (IS_ENABLED(CONFIG_NET_TCP) && proto == IPPROTO_TCP) {
1171 NET_PKT_DATA_ACCESS_DEFINE(tcp_access, struct net_tcp_hdr);
1172 struct net_tcp_hdr *tcp_hdr;
1173
1174 tcp_hdr = (struct net_tcp_hdr *)net_pkt_get_data(pkt,
1175 &tcp_access);
1176 if (!tcp_hdr) {
1177 ret = -ENOBUFS;
1178 goto error;
1179 }
1180
1181 *port = tcp_hdr->src_port;
1182 } else {
1183 ret = -ENOTSUP;
1184 }
1185
1186 error:
1187 net_pkt_cursor_restore(pkt, &backup);
1188
1189 return ret;
1190 }
1191
1192 #if defined(CONFIG_NET_OFFLOAD)
net_pkt_remote_addr_is_unspecified(struct net_pkt * pkt)1193 static bool net_pkt_remote_addr_is_unspecified(struct net_pkt *pkt)
1194 {
1195 bool ret = true;
1196
1197 if (net_pkt_family(pkt) == AF_INET) {
1198 ret = net_ipv4_is_addr_unspecified(&net_sin(&pkt->remote)->sin_addr);
1199 } else if (net_pkt_family(pkt) == AF_INET6) {
1200 ret = net_ipv6_is_addr_unspecified(&net_sin6(&pkt->remote)->sin6_addr);
1201 }
1202
1203 return ret;
1204 }
1205
sock_get_offload_pkt_src_addr(struct net_pkt * pkt,struct net_context * ctx,struct sockaddr * addr,socklen_t addrlen)1206 static int sock_get_offload_pkt_src_addr(struct net_pkt *pkt,
1207 struct net_context *ctx,
1208 struct sockaddr *addr,
1209 socklen_t addrlen)
1210 {
1211 int ret = 0;
1212
1213 if (!addr || !pkt) {
1214 return -EINVAL;
1215 }
1216
1217 if (!net_pkt_remote_addr_is_unspecified(pkt)) {
1218 if (IS_ENABLED(CONFIG_NET_IPV4) &&
1219 net_pkt_family(pkt) == AF_INET) {
1220 if (addrlen < sizeof(struct sockaddr_in)) {
1221 ret = -EINVAL;
1222 goto error;
1223 }
1224
1225 memcpy(addr, &pkt->remote, sizeof(struct sockaddr_in));
1226 } else if (IS_ENABLED(CONFIG_NET_IPV6) &&
1227 net_pkt_family(pkt) == AF_INET6) {
1228 if (addrlen < sizeof(struct sockaddr_in6)) {
1229 ret = -EINVAL;
1230 goto error;
1231 }
1232
1233 memcpy(addr, &pkt->remote, sizeof(struct sockaddr_in6));
1234 }
1235 } else if (ctx->flags & NET_CONTEXT_REMOTE_ADDR_SET) {
1236 memcpy(addr, &ctx->remote, MIN(addrlen, sizeof(ctx->remote)));
1237 } else {
1238 ret = -ENOTSUP;
1239 }
1240
1241 error:
1242 return ret;
1243 }
1244 #else
sock_get_offload_pkt_src_addr(struct net_pkt * pkt,struct net_context * ctx,struct sockaddr * addr,socklen_t addrlen)1245 static int sock_get_offload_pkt_src_addr(struct net_pkt *pkt,
1246 struct net_context *ctx,
1247 struct sockaddr *addr,
1248 socklen_t addrlen)
1249 {
1250 ARG_UNUSED(pkt);
1251 ARG_UNUSED(ctx);
1252 ARG_UNUSED(addr);
1253 ARG_UNUSED(addrlen);
1254
1255 return 0;
1256 }
1257 #endif /* CONFIG_NET_OFFLOAD */
1258
net_socket_update_tc_rx_time(struct net_pkt * pkt,uint32_t end_tick)1259 void net_socket_update_tc_rx_time(struct net_pkt *pkt, uint32_t end_tick)
1260 {
1261 net_pkt_set_rx_stats_tick(pkt, end_tick);
1262
1263 net_stats_update_tc_rx_time(net_pkt_iface(pkt),
1264 net_pkt_priority(pkt),
1265 net_pkt_create_time(pkt),
1266 end_tick);
1267
1268 if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS_DETAIL)) {
1269 uint32_t val, prev = net_pkt_create_time(pkt);
1270 int i;
1271
1272 for (i = 0; i < net_pkt_stats_tick_count(pkt); i++) {
1273 if (!net_pkt_stats_tick(pkt)[i]) {
1274 break;
1275 }
1276
1277 val = net_pkt_stats_tick(pkt)[i] - prev;
1278 prev = net_pkt_stats_tick(pkt)[i];
1279 net_pkt_stats_tick(pkt)[i] = val;
1280 }
1281
1282 net_stats_update_tc_rx_time_detail(
1283 net_pkt_iface(pkt),
1284 net_pkt_priority(pkt),
1285 net_pkt_stats_tick(pkt));
1286 }
1287 }
1288
zsock_wait_data(struct net_context * ctx,k_timeout_t * timeout)1289 int zsock_wait_data(struct net_context *ctx, k_timeout_t *timeout)
1290 {
1291 int ret;
1292
1293 if (ctx->cond.lock == NULL) {
1294 /* For some reason the lock pointer is not set properly
1295 * when called by fdtable.c:zvfs_finalize_fd()
1296 * It is not practical to try to figure out the fdtable
1297 * lock at this point so skip it.
1298 */
1299 NET_WARN("No lock pointer set for context %p", ctx);
1300 return -EINVAL;
1301 }
1302
1303 if (k_fifo_is_empty(&ctx->recv_q)) {
1304 /* Wait for the data to arrive but without holding a lock */
1305 ret = k_condvar_wait(&ctx->cond.recv, ctx->cond.lock,
1306 *timeout);
1307 if (ret < 0) {
1308 return ret;
1309 }
1310
1311 if (sock_is_error(ctx)) {
1312 return -POINTER_TO_INT(ctx->user_data);
1313 }
1314 }
1315
1316 return 0;
1317 }
1318
insert_pktinfo(struct msghdr * msg,int level,int type,void * pktinfo,size_t pktinfo_len)1319 static int insert_pktinfo(struct msghdr *msg, int level, int type,
1320 void *pktinfo, size_t pktinfo_len)
1321 {
1322 struct cmsghdr *cmsg;
1323
1324 if (msg->msg_controllen < pktinfo_len) {
1325 return -EINVAL;
1326 }
1327
1328 for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
1329 if (cmsg->cmsg_len == 0) {
1330 break;
1331 }
1332 }
1333
1334 if (cmsg == NULL) {
1335 return -EINVAL;
1336 }
1337
1338 cmsg->cmsg_len = CMSG_LEN(pktinfo_len);
1339 cmsg->cmsg_level = level;
1340 cmsg->cmsg_type = type;
1341
1342 memcpy(CMSG_DATA(cmsg), pktinfo, pktinfo_len);
1343
1344 return 0;
1345 }
1346
add_timestamping(struct net_context * ctx,struct net_pkt * pkt,struct msghdr * msg)1347 static int add_timestamping(struct net_context *ctx,
1348 struct net_pkt *pkt,
1349 struct msghdr *msg)
1350 {
1351 uint8_t timestamping = 0;
1352
1353 net_context_get_option(ctx, NET_OPT_TIMESTAMPING, ×tamping, NULL);
1354
1355 if (timestamping) {
1356 return insert_pktinfo(msg, SOL_SOCKET, SO_TIMESTAMPING,
1357 net_pkt_timestamp(pkt), sizeof(struct net_ptp_time));
1358 }
1359
1360 return -ENOTSUP;
1361 }
1362
add_pktinfo(struct net_context * ctx,struct net_pkt * pkt,struct msghdr * msg)1363 static int add_pktinfo(struct net_context *ctx,
1364 struct net_pkt *pkt,
1365 struct msghdr *msg)
1366 {
1367 int ret = -ENOTSUP;
1368 struct net_pkt_cursor backup;
1369
1370 net_pkt_cursor_backup(pkt, &backup);
1371 net_pkt_cursor_init(pkt);
1372
1373 if (IS_ENABLED(CONFIG_NET_IPV4) && net_pkt_family(pkt) == AF_INET) {
1374 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv4_access,
1375 struct net_ipv4_hdr);
1376 struct in_pktinfo info;
1377 struct net_ipv4_hdr *ipv4_hdr;
1378
1379 ipv4_hdr = (struct net_ipv4_hdr *)net_pkt_get_data(
1380 pkt, &ipv4_access);
1381 if (ipv4_hdr == NULL ||
1382 net_pkt_acknowledge_data(pkt, &ipv4_access) ||
1383 net_pkt_skip(pkt, net_pkt_ipv4_opts_len(pkt))) {
1384 ret = -ENOBUFS;
1385 goto out;
1386 }
1387
1388 net_ipv4_addr_copy_raw((uint8_t *)&info.ipi_addr, ipv4_hdr->dst);
1389 net_ipv4_addr_copy_raw((uint8_t *)&info.ipi_spec_dst,
1390 (uint8_t *)net_sin_ptr(&ctx->local)->sin_addr);
1391 info.ipi_ifindex = ctx->iface;
1392
1393 ret = insert_pktinfo(msg, IPPROTO_IP, IP_PKTINFO,
1394 &info, sizeof(info));
1395
1396 goto out;
1397 }
1398
1399 if (IS_ENABLED(CONFIG_NET_IPV6) && net_pkt_family(pkt) == AF_INET6) {
1400 NET_PKT_DATA_ACCESS_CONTIGUOUS_DEFINE(ipv6_access,
1401 struct net_ipv6_hdr);
1402 struct in6_pktinfo info;
1403 struct net_ipv6_hdr *ipv6_hdr;
1404
1405 ipv6_hdr = (struct net_ipv6_hdr *)net_pkt_get_data(
1406 pkt, &ipv6_access);
1407 if (ipv6_hdr == NULL ||
1408 net_pkt_acknowledge_data(pkt, &ipv6_access) ||
1409 net_pkt_skip(pkt, net_pkt_ipv6_ext_len(pkt))) {
1410 ret = -ENOBUFS;
1411 goto out;
1412 }
1413
1414 net_ipv6_addr_copy_raw((uint8_t *)&info.ipi6_addr, ipv6_hdr->dst);
1415 info.ipi6_ifindex = ctx->iface;
1416
1417 ret = insert_pktinfo(msg, IPPROTO_IPV6, IPV6_RECVPKTINFO,
1418 &info, sizeof(info));
1419
1420 goto out;
1421 }
1422
1423 out:
1424 net_pkt_cursor_restore(pkt, &backup);
1425
1426 return ret;
1427 }
1428
zsock_recv_dgram(struct net_context * ctx,struct msghdr * msg,void * buf,size_t max_len,int flags,struct sockaddr * src_addr,socklen_t * addrlen)1429 static inline ssize_t zsock_recv_dgram(struct net_context *ctx,
1430 struct msghdr *msg,
1431 void *buf,
1432 size_t max_len,
1433 int flags,
1434 struct sockaddr *src_addr,
1435 socklen_t *addrlen)
1436 {
1437 k_timeout_t timeout = K_FOREVER;
1438 size_t recv_len = 0;
1439 size_t read_len;
1440 struct net_pkt_cursor backup;
1441 struct net_pkt *pkt;
1442
1443 if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) {
1444 timeout = K_NO_WAIT;
1445 } else {
1446 int ret;
1447
1448 net_context_get_option(ctx, NET_OPT_RCVTIMEO, &timeout, NULL);
1449
1450 ret = zsock_wait_data(ctx, &timeout);
1451 if (ret < 0) {
1452 errno = -ret;
1453 return -1;
1454 }
1455 }
1456
1457 if (flags & ZSOCK_MSG_PEEK) {
1458 int res;
1459
1460 res = fifo_wait_non_empty(&ctx->recv_q, timeout);
1461 /* EAGAIN when timeout expired, EINTR when cancelled */
1462 if (res && res != -EAGAIN && res != -EINTR) {
1463 errno = -res;
1464 return -1;
1465 }
1466
1467 pkt = k_fifo_peek_head(&ctx->recv_q);
1468 } else {
1469 pkt = k_fifo_get(&ctx->recv_q, timeout);
1470 }
1471
1472 if (!pkt) {
1473 errno = EAGAIN;
1474 return -1;
1475 }
1476
1477 net_pkt_cursor_backup(pkt, &backup);
1478
1479 if (src_addr && addrlen) {
1480 if (IS_ENABLED(CONFIG_NET_OFFLOAD) &&
1481 net_if_is_ip_offloaded(net_context_get_iface(ctx))) {
1482 int ret;
1483
1484 ret = sock_get_offload_pkt_src_addr(pkt, ctx, src_addr,
1485 *addrlen);
1486 if (ret < 0) {
1487 errno = -ret;
1488 NET_DBG("sock_get_offload_pkt_src_addr %d", ret);
1489 goto fail;
1490 }
1491 } else {
1492 int ret;
1493
1494 ret = sock_get_pkt_src_addr(pkt, net_context_get_proto(ctx),
1495 src_addr, *addrlen);
1496 if (ret < 0) {
1497 errno = -ret;
1498 NET_DBG("sock_get_pkt_src_addr %d", ret);
1499 goto fail;
1500 }
1501 }
1502
1503 /* addrlen is a value-result argument, set to actual
1504 * size of source address
1505 */
1506 if (src_addr->sa_family == AF_INET) {
1507 *addrlen = sizeof(struct sockaddr_in);
1508 } else if (src_addr->sa_family == AF_INET6) {
1509 *addrlen = sizeof(struct sockaddr_in6);
1510 } else {
1511 errno = ENOTSUP;
1512 goto fail;
1513 }
1514 }
1515
1516 if (msg != NULL) {
1517 int iovec = 0;
1518 size_t tmp_read_len;
1519
1520 if (msg->msg_iovlen < 1 || msg->msg_iov == NULL) {
1521 errno = ENOMEM;
1522 return -1;
1523 }
1524
1525 recv_len = net_pkt_remaining_data(pkt);
1526 tmp_read_len = read_len = MIN(recv_len, max_len);
1527
1528 while (tmp_read_len > 0) {
1529 size_t len;
1530
1531 buf = msg->msg_iov[iovec].iov_base;
1532 if (buf == NULL) {
1533 errno = EINVAL;
1534 return -1;
1535 }
1536
1537 len = MIN(tmp_read_len, msg->msg_iov[iovec].iov_len);
1538
1539 if (net_pkt_read(pkt, buf, len)) {
1540 errno = ENOBUFS;
1541 goto fail;
1542 }
1543
1544 if (len <= tmp_read_len) {
1545 tmp_read_len -= len;
1546 msg->msg_iov[iovec].iov_len = len;
1547 iovec++;
1548 } else {
1549 errno = EINVAL;
1550 return -1;
1551 }
1552 }
1553
1554 msg->msg_iovlen = iovec;
1555
1556 if (recv_len != read_len) {
1557 msg->msg_flags |= ZSOCK_MSG_TRUNC;
1558 }
1559
1560 } else {
1561 recv_len = net_pkt_remaining_data(pkt);
1562 read_len = MIN(recv_len, max_len);
1563
1564 if (net_pkt_read(pkt, buf, read_len)) {
1565 errno = ENOBUFS;
1566 goto fail;
1567 }
1568 }
1569
1570 if (msg != NULL) {
1571 if (msg->msg_control != NULL) {
1572 if (msg->msg_controllen > 0) {
1573 bool clear_controllen = true;
1574
1575 if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMPING)) {
1576 clear_controllen = false;
1577 if (add_timestamping(ctx, pkt, msg) < 0) {
1578 msg->msg_flags |= ZSOCK_MSG_CTRUNC;
1579 }
1580 }
1581
1582 if (IS_ENABLED(CONFIG_NET_CONTEXT_RECV_PKTINFO) &&
1583 net_context_is_recv_pktinfo_set(ctx)) {
1584 clear_controllen = false;
1585 if (add_pktinfo(ctx, pkt, msg) < 0) {
1586 msg->msg_flags |= ZSOCK_MSG_CTRUNC;
1587 }
1588 }
1589
1590 if (clear_controllen) {
1591 msg->msg_controllen = 0;
1592 }
1593 }
1594 } else {
1595 msg->msg_controllen = 0U;
1596 }
1597 }
1598
1599 if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS) &&
1600 !(flags & ZSOCK_MSG_PEEK)) {
1601 net_socket_update_tc_rx_time(pkt, k_cycle_get_32());
1602 }
1603
1604 if (!(flags & ZSOCK_MSG_PEEK)) {
1605 net_pkt_unref(pkt);
1606 } else {
1607 net_pkt_cursor_restore(pkt, &backup);
1608 }
1609
1610 return (flags & ZSOCK_MSG_TRUNC) ? recv_len : read_len;
1611
1612 fail:
1613 if (!(flags & ZSOCK_MSG_PEEK)) {
1614 net_pkt_unref(pkt);
1615 }
1616
1617 return -1;
1618 }
1619
zsock_recv_stream_immediate(struct net_context * ctx,uint8_t ** buf,size_t * max_len,int flags)1620 static size_t zsock_recv_stream_immediate(struct net_context *ctx, uint8_t **buf, size_t *max_len,
1621 int flags)
1622 {
1623 size_t len;
1624 size_t pkt_len;
1625 size_t recv_len = 0;
1626 struct net_pkt *pkt;
1627 struct net_pkt_cursor backup;
1628 struct net_pkt *origin = NULL;
1629 const bool do_recv = !(buf == NULL || max_len == NULL);
1630 size_t _max_len = (max_len == NULL) ? SIZE_MAX : *max_len;
1631 const bool peek = (flags & ZSOCK_MSG_PEEK) == ZSOCK_MSG_PEEK;
1632
1633 while (_max_len > 0) {
1634 /* only peek until we know we can dequeue and / or requeue buffer */
1635 pkt = k_fifo_peek_head(&ctx->recv_q);
1636 if (pkt == NULL || pkt == origin) {
1637 break;
1638 }
1639
1640 if (origin == NULL) {
1641 /* mark first pkt to avoid cycles when observing */
1642 origin = pkt;
1643 }
1644
1645 pkt_len = net_pkt_remaining_data(pkt);
1646 len = MIN(_max_len, pkt_len);
1647 recv_len += len;
1648 _max_len -= len;
1649
1650 if (do_recv && len > 0) {
1651 if (peek) {
1652 net_pkt_cursor_backup(pkt, &backup);
1653 }
1654
1655 net_pkt_read(pkt, *buf, len);
1656 /* update buffer position for caller */
1657 *buf += len;
1658
1659 if (peek) {
1660 net_pkt_cursor_restore(pkt, &backup);
1661 }
1662 }
1663
1664 if (do_recv && !peek) {
1665 if (len == pkt_len) {
1666 /* dequeue empty packets when not observing */
1667 pkt = k_fifo_get(&ctx->recv_q, K_NO_WAIT);
1668 if (net_pkt_eof(pkt)) {
1669 sock_set_eof(ctx);
1670 }
1671
1672 if (IS_ENABLED(CONFIG_NET_PKT_RXTIME_STATS)) {
1673 net_socket_update_tc_rx_time(pkt, k_cycle_get_32());
1674 }
1675
1676 net_pkt_unref(pkt);
1677 }
1678 } else if (!do_recv || peek) {
1679 /* requeue packets when observing */
1680 k_fifo_put(&ctx->recv_q, k_fifo_get(&ctx->recv_q, K_NO_WAIT));
1681 }
1682 }
1683
1684 if (do_recv) {
1685 /* convey remaining buffer size back to caller */
1686 *max_len = _max_len;
1687 }
1688
1689 return recv_len;
1690 }
1691
zsock_fionread_ctx(struct net_context * ctx)1692 static int zsock_fionread_ctx(struct net_context *ctx)
1693 {
1694 size_t ret = zsock_recv_stream_immediate(ctx, NULL, NULL, 0);
1695
1696 return MIN(ret, INT_MAX);
1697 }
1698
zsock_recv_stream_timed(struct net_context * ctx,struct msghdr * msg,uint8_t * buf,size_t max_len,int flags,k_timeout_t timeout)1699 static ssize_t zsock_recv_stream_timed(struct net_context *ctx, struct msghdr *msg,
1700 uint8_t *buf, size_t max_len,
1701 int flags, k_timeout_t timeout)
1702 {
1703 int res;
1704 k_timepoint_t end;
1705 size_t recv_len = 0, iovec = 0, available_len, max_iovlen = 0;
1706 const bool waitall = (flags & ZSOCK_MSG_WAITALL) == ZSOCK_MSG_WAITALL;
1707
1708 if (msg != NULL && buf == NULL) {
1709 if (msg->msg_iovlen < 1) {
1710 return -EINVAL;
1711 }
1712
1713 buf = msg->msg_iov[iovec].iov_base;
1714 available_len = msg->msg_iov[iovec].iov_len;
1715 msg->msg_iov[iovec].iov_len = 0;
1716 max_iovlen = msg->msg_iovlen;
1717 }
1718
1719 for (end = sys_timepoint_calc(timeout); max_len > 0; timeout = sys_timepoint_timeout(end)) {
1720
1721 if (sock_is_error(ctx)) {
1722 return -POINTER_TO_INT(ctx->user_data);
1723 }
1724
1725 if (sock_is_eof(ctx)) {
1726 return 0;
1727 }
1728
1729 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1730 res = zsock_wait_data(ctx, &timeout);
1731 if (res < 0) {
1732 return res;
1733 }
1734 }
1735
1736 if (msg != NULL) {
1737 again:
1738 res = zsock_recv_stream_immediate(ctx, &buf, &available_len, flags);
1739 recv_len += res;
1740
1741 if (res == 0 && recv_len == 0 && K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1742 return -EAGAIN;
1743 }
1744
1745 msg->msg_iov[iovec].iov_len += res;
1746 buf = (uint8_t *)(msg->msg_iov[iovec].iov_base) + res;
1747 max_len -= res;
1748
1749 if (available_len == 0) {
1750 /* All data to this iovec was written */
1751 iovec++;
1752
1753 if (iovec == max_iovlen) {
1754 break;
1755 }
1756
1757 msg->msg_iovlen = iovec;
1758 buf = msg->msg_iov[iovec].iov_base;
1759 available_len = msg->msg_iov[iovec].iov_len;
1760 msg->msg_iov[iovec].iov_len = 0;
1761
1762 /* If there is more data, read it now and do not wait */
1763 if (buf != NULL && available_len > 0) {
1764 goto again;
1765 }
1766
1767 continue;
1768 }
1769
1770 } else {
1771 res = zsock_recv_stream_immediate(ctx, &buf, &max_len, flags);
1772 recv_len += res;
1773
1774 if (res == 0) {
1775 if (recv_len == 0 && K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1776 return -EAGAIN;
1777 }
1778 }
1779 }
1780
1781 if (!waitall) {
1782 break;
1783 }
1784 }
1785
1786 return recv_len;
1787 }
1788
zsock_recv_stream(struct net_context * ctx,struct msghdr * msg,void * buf,size_t max_len,int flags)1789 static ssize_t zsock_recv_stream(struct net_context *ctx, struct msghdr *msg,
1790 void *buf, size_t max_len, int flags)
1791 {
1792 ssize_t res;
1793 size_t recv_len = 0;
1794 k_timeout_t timeout = K_FOREVER;
1795
1796 if (!net_context_is_used(ctx)) {
1797 errno = EBADF;
1798 return -1;
1799 }
1800
1801 if (net_context_get_state(ctx) != NET_CONTEXT_CONNECTED) {
1802 errno = ENOTCONN;
1803 return -1;
1804 }
1805
1806 if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) {
1807 timeout = K_NO_WAIT;
1808 } else if (!sock_is_eof(ctx) && !sock_is_error(ctx)) {
1809 net_context_get_option(ctx, NET_OPT_RCVTIMEO, &timeout, NULL);
1810 }
1811
1812 if (max_len == 0) {
1813 /* no bytes requested - done! */
1814 return 0;
1815 }
1816
1817 res = zsock_recv_stream_timed(ctx, msg, buf, max_len, flags, timeout);
1818 recv_len += MAX(0, res);
1819
1820 if (res < 0) {
1821 errno = -res;
1822 return -1;
1823 }
1824
1825 if (!(flags & ZSOCK_MSG_PEEK)) {
1826 net_context_update_recv_wnd(ctx, recv_len);
1827 }
1828
1829 return recv_len;
1830 }
1831
zsock_recvfrom_ctx(struct net_context * ctx,void * buf,size_t max_len,int flags,struct sockaddr * src_addr,socklen_t * addrlen)1832 ssize_t zsock_recvfrom_ctx(struct net_context *ctx, void *buf, size_t max_len,
1833 int flags,
1834 struct sockaddr *src_addr, socklen_t *addrlen)
1835 {
1836 enum net_sock_type sock_type = net_context_get_type(ctx);
1837
1838 if (max_len == 0) {
1839 return 0;
1840 }
1841
1842 if (sock_type == SOCK_DGRAM) {
1843 return zsock_recv_dgram(ctx, NULL, buf, max_len, flags, src_addr, addrlen);
1844 } else if (sock_type == SOCK_STREAM) {
1845 return zsock_recv_stream(ctx, NULL, buf, max_len, flags);
1846 }
1847
1848 __ASSERT(0, "Unknown socket type");
1849
1850 errno = ENOTSUP;
1851
1852 return -1;
1853 }
1854
z_impl_zsock_recvfrom(int sock,void * buf,size_t max_len,int flags,struct sockaddr * src_addr,socklen_t * addrlen)1855 ssize_t z_impl_zsock_recvfrom(int sock, void *buf, size_t max_len, int flags,
1856 struct sockaddr *src_addr, socklen_t *addrlen)
1857 {
1858 int bytes_received;
1859
1860 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, recvfrom, sock, max_len, flags, src_addr, addrlen);
1861
1862 bytes_received = VTABLE_CALL(recvfrom, sock, buf, max_len, flags, src_addr, addrlen);
1863
1864 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, recvfrom, sock,
1865 src_addr, addrlen,
1866 bytes_received < 0 ? -errno : bytes_received);
1867
1868 sock_obj_core_update_recv_stats(sock, bytes_received);
1869
1870 return bytes_received;
1871 }
1872
1873 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_recvfrom(int sock,void * buf,size_t max_len,int flags,struct sockaddr * src_addr,socklen_t * addrlen)1874 ssize_t z_vrfy_zsock_recvfrom(int sock, void *buf, size_t max_len, int flags,
1875 struct sockaddr *src_addr, socklen_t *addrlen)
1876 {
1877 socklen_t addrlen_copy;
1878 ssize_t ret;
1879
1880 if (K_SYSCALL_MEMORY_WRITE(buf, max_len)) {
1881 errno = EFAULT;
1882 return -1;
1883 }
1884
1885 if (addrlen) {
1886 K_OOPS(k_usermode_from_copy(&addrlen_copy, addrlen,
1887 sizeof(socklen_t)));
1888 }
1889 K_OOPS(src_addr && K_SYSCALL_MEMORY_WRITE(src_addr, addrlen_copy));
1890
1891 ret = z_impl_zsock_recvfrom(sock, (void *)buf, max_len, flags,
1892 (struct sockaddr *)src_addr,
1893 addrlen ? &addrlen_copy : NULL);
1894
1895 if (addrlen) {
1896 K_OOPS(k_usermode_to_copy(addrlen, &addrlen_copy,
1897 sizeof(socklen_t)));
1898 }
1899
1900 return ret;
1901 }
1902 #include <zephyr/syscalls/zsock_recvfrom_mrsh.c>
1903 #endif /* CONFIG_USERSPACE */
1904
zsock_recvmsg_ctx(struct net_context * ctx,struct msghdr * msg,int flags)1905 ssize_t zsock_recvmsg_ctx(struct net_context *ctx, struct msghdr *msg,
1906 int flags)
1907 {
1908 enum net_sock_type sock_type = net_context_get_type(ctx);
1909 size_t i, max_len = 0;
1910
1911 if (msg == NULL) {
1912 errno = EINVAL;
1913 return -1;
1914 }
1915
1916 if (msg->msg_iov == NULL) {
1917 errno = ENOMEM;
1918 return -1;
1919 }
1920
1921 for (i = 0; i < msg->msg_iovlen; i++) {
1922 max_len += msg->msg_iov[i].iov_len;
1923 }
1924
1925 if (sock_type == SOCK_DGRAM) {
1926 return zsock_recv_dgram(ctx, msg, NULL, max_len, flags,
1927 msg->msg_name, &msg->msg_namelen);
1928 } else if (sock_type == SOCK_STREAM) {
1929 return zsock_recv_stream(ctx, msg, NULL, max_len, flags);
1930 }
1931
1932 __ASSERT(0, "Unknown socket type");
1933
1934 errno = ENOTSUP;
1935
1936 return -1;
1937 }
1938
z_impl_zsock_recvmsg(int sock,struct msghdr * msg,int flags)1939 ssize_t z_impl_zsock_recvmsg(int sock, struct msghdr *msg, int flags)
1940 {
1941 int bytes_received;
1942
1943 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, recvmsg, sock, msg, flags);
1944
1945 bytes_received = VTABLE_CALL(recvmsg, sock, msg, flags);
1946
1947 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, recvmsg, sock, msg,
1948 bytes_received < 0 ? -errno : bytes_received);
1949
1950 sock_obj_core_update_recv_stats(sock, bytes_received);
1951
1952 return bytes_received;
1953 }
1954
1955 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_recvmsg(int sock,struct msghdr * msg,int flags)1956 ssize_t z_vrfy_zsock_recvmsg(int sock, struct msghdr *msg, int flags)
1957 {
1958 struct msghdr msg_copy;
1959 size_t iovlen;
1960 size_t i;
1961 int ret;
1962
1963 if (msg == NULL) {
1964 errno = EINVAL;
1965 return -1;
1966 }
1967
1968 if (msg->msg_iov == NULL) {
1969 errno = ENOMEM;
1970 return -1;
1971 }
1972
1973 K_OOPS(k_usermode_from_copy(&msg_copy, (void *)msg, sizeof(msg_copy)));
1974
1975 k_usermode_from_copy(&iovlen, &msg->msg_iovlen, sizeof(iovlen));
1976
1977 msg_copy.msg_name = NULL;
1978 msg_copy.msg_control = NULL;
1979
1980 msg_copy.msg_iov = k_usermode_alloc_from_copy(msg->msg_iov,
1981 msg->msg_iovlen * sizeof(struct iovec));
1982 if (!msg_copy.msg_iov) {
1983 errno = ENOMEM;
1984 goto fail;
1985 }
1986
1987 /* Clear the pointers in the copy so that if the allocation in the
1988 * next loop fails, we do not try to free non allocated memory
1989 * in fail branch.
1990 */
1991 memset(msg_copy.msg_iov, 0, msg->msg_iovlen * sizeof(struct iovec));
1992
1993 for (i = 0; i < iovlen; i++) {
1994 /* TODO: In practice we do not need to copy the actual data
1995 * in msghdr when receiving data but currently there is no
1996 * ready made function to do just that (unless we want to call
1997 * relevant malloc function here ourselves). So just use
1998 * the copying variant for now.
1999 */
2000 msg_copy.msg_iov[i].iov_base =
2001 k_usermode_alloc_from_copy(msg->msg_iov[i].iov_base,
2002 msg->msg_iov[i].iov_len);
2003 if (!msg_copy.msg_iov[i].iov_base) {
2004 errno = ENOMEM;
2005 goto fail;
2006 }
2007
2008 msg_copy.msg_iov[i].iov_len = msg->msg_iov[i].iov_len;
2009 }
2010
2011 if (msg->msg_namelen > 0) {
2012 if (msg->msg_name == NULL) {
2013 errno = EINVAL;
2014 goto fail;
2015 }
2016
2017 msg_copy.msg_name = k_usermode_alloc_from_copy(msg->msg_name,
2018 msg->msg_namelen);
2019 if (msg_copy.msg_name == NULL) {
2020 errno = ENOMEM;
2021 goto fail;
2022 }
2023 }
2024
2025 if (msg->msg_controllen > 0) {
2026 if (msg->msg_control == NULL) {
2027 errno = EINVAL;
2028 goto fail;
2029 }
2030
2031 msg_copy.msg_control =
2032 k_usermode_alloc_from_copy(msg->msg_control,
2033 msg->msg_controllen);
2034 if (msg_copy.msg_control == NULL) {
2035 errno = ENOMEM;
2036 goto fail;
2037 }
2038 }
2039
2040 ret = z_impl_zsock_recvmsg(sock, &msg_copy, flags);
2041
2042 /* Do not copy anything back if there was an error or nothing was
2043 * received.
2044 */
2045 if (ret > 0) {
2046 if (msg->msg_namelen > 0 && msg->msg_name != NULL) {
2047 K_OOPS(k_usermode_to_copy(msg->msg_name,
2048 msg_copy.msg_name,
2049 msg_copy.msg_namelen));
2050 }
2051
2052 if (msg->msg_controllen > 0 &&
2053 msg->msg_control != NULL) {
2054 K_OOPS(k_usermode_to_copy(msg->msg_control,
2055 msg_copy.msg_control,
2056 msg_copy.msg_controllen));
2057
2058 msg->msg_controllen = msg_copy.msg_controllen;
2059 } else {
2060 msg->msg_controllen = 0U;
2061 }
2062
2063 k_usermode_to_copy(&msg->msg_iovlen,
2064 &msg_copy.msg_iovlen,
2065 sizeof(msg->msg_iovlen));
2066
2067 /* The new iovlen cannot be bigger than the original one */
2068 NET_ASSERT(msg_copy.msg_iovlen <= iovlen);
2069
2070 for (i = 0; i < iovlen; i++) {
2071 if (i < msg_copy.msg_iovlen) {
2072 K_OOPS(k_usermode_to_copy(msg->msg_iov[i].iov_base,
2073 msg_copy.msg_iov[i].iov_base,
2074 msg_copy.msg_iov[i].iov_len));
2075 K_OOPS(k_usermode_to_copy(&msg->msg_iov[i].iov_len,
2076 &msg_copy.msg_iov[i].iov_len,
2077 sizeof(msg->msg_iov[i].iov_len)));
2078 } else {
2079 /* Clear out those vectors that we could not populate */
2080 msg->msg_iov[i].iov_len = 0;
2081 }
2082 }
2083
2084 k_usermode_to_copy(&msg->msg_flags,
2085 &msg_copy.msg_flags,
2086 sizeof(msg->msg_flags));
2087 }
2088
2089 k_free(msg_copy.msg_name);
2090 k_free(msg_copy.msg_control);
2091
2092 /* Note that we need to free according to original iovlen */
2093 for (i = 0; i < iovlen; i++) {
2094 k_free(msg_copy.msg_iov[i].iov_base);
2095 }
2096
2097 k_free(msg_copy.msg_iov);
2098
2099 return ret;
2100
2101 fail:
2102 if (msg_copy.msg_name) {
2103 k_free(msg_copy.msg_name);
2104 }
2105
2106 if (msg_copy.msg_control) {
2107 k_free(msg_copy.msg_control);
2108 }
2109
2110 if (msg_copy.msg_iov) {
2111 for (i = 0; i < msg_copy.msg_iovlen; i++) {
2112 if (msg_copy.msg_iov[i].iov_base) {
2113 k_free(msg_copy.msg_iov[i].iov_base);
2114 }
2115 }
2116
2117 k_free(msg_copy.msg_iov);
2118 }
2119
2120 return -1;
2121 }
2122 #include <zephyr/syscalls/zsock_recvmsg_mrsh.c>
2123 #endif /* CONFIG_USERSPACE */
2124
2125 /* As this is limited function, we don't follow POSIX signature, with
2126 * "..." instead of last arg.
2127 */
z_impl_zsock_fcntl_impl(int sock,int cmd,int flags)2128 int z_impl_zsock_fcntl_impl(int sock, int cmd, int flags)
2129 {
2130 const struct socket_op_vtable *vtable;
2131 struct k_mutex *lock;
2132 void *obj;
2133 int ret;
2134
2135 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, fcntl, sock, cmd, flags);
2136
2137 obj = get_sock_vtable(sock, &vtable, &lock);
2138 if (obj == NULL) {
2139 errno = EBADF;
2140 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, fcntl, sock, -errno);
2141 return -1;
2142 }
2143
2144 (void)k_mutex_lock(lock, K_FOREVER);
2145
2146 ret = zvfs_fdtable_call_ioctl((const struct fd_op_vtable *)vtable,
2147 obj, cmd, flags);
2148
2149 k_mutex_unlock(lock);
2150
2151 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, fcntl, sock,
2152 ret < 0 ? -errno : ret);
2153 return ret;
2154 }
2155
2156 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_fcntl_impl(int sock,int cmd,int flags)2157 static inline int z_vrfy_zsock_fcntl_impl(int sock, int cmd, int flags)
2158 {
2159 return z_impl_zsock_fcntl_impl(sock, cmd, flags);
2160 }
2161 #include <zephyr/syscalls/zsock_fcntl_impl_mrsh.c>
2162 #endif
2163
z_impl_zsock_ioctl_impl(int sock,unsigned long request,va_list args)2164 int z_impl_zsock_ioctl_impl(int sock, unsigned long request, va_list args)
2165 {
2166 const struct socket_op_vtable *vtable;
2167 struct k_mutex *lock;
2168 void *ctx;
2169 int ret;
2170
2171 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, ioctl, sock, request);
2172
2173 ctx = get_sock_vtable(sock, &vtable, &lock);
2174 if (ctx == NULL) {
2175 errno = EBADF;
2176 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, ioctl, sock, -errno);
2177 return -1;
2178 }
2179
2180 (void)k_mutex_lock(lock, K_FOREVER);
2181
2182 NET_DBG("ioctl: ctx=%p, fd=%d, request=%lu", ctx, sock, request);
2183
2184 ret = vtable->fd_vtable.ioctl(ctx, request, args);
2185
2186 k_mutex_unlock(lock);
2187
2188 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, ioctl, sock,
2189 ret < 0 ? -errno : ret);
2190 return ret;
2191
2192 }
2193
2194 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_ioctl_impl(int sock,unsigned long request,va_list args)2195 static inline int z_vrfy_zsock_ioctl_impl(int sock, unsigned long request, va_list args)
2196 {
2197 switch (request) {
2198 case ZFD_IOCTL_FIONBIO:
2199 break;
2200
2201 case ZFD_IOCTL_FIONREAD: {
2202 int *avail;
2203
2204 avail = va_arg(args, int *);
2205 K_OOPS(K_SYSCALL_MEMORY_WRITE(avail, sizeof(*avail)));
2206
2207 break;
2208 }
2209
2210 default:
2211 errno = EOPNOTSUPP;
2212 return -1;
2213 }
2214
2215 return z_impl_zsock_ioctl_impl(sock, request, args);
2216 }
2217 #include <zephyr/syscalls/zsock_ioctl_impl_mrsh.c>
2218 #endif
2219
zsock_poll_prepare_ctx(struct net_context * ctx,struct zsock_pollfd * pfd,struct k_poll_event ** pev,struct k_poll_event * pev_end)2220 static int zsock_poll_prepare_ctx(struct net_context *ctx,
2221 struct zsock_pollfd *pfd,
2222 struct k_poll_event **pev,
2223 struct k_poll_event *pev_end)
2224 {
2225 if (pfd->events & ZSOCK_POLLIN) {
2226 if (*pev == pev_end) {
2227 return -ENOMEM;
2228 }
2229
2230 (*pev)->obj = &ctx->recv_q;
2231 (*pev)->type = K_POLL_TYPE_FIFO_DATA_AVAILABLE;
2232 (*pev)->mode = K_POLL_MODE_NOTIFY_ONLY;
2233 (*pev)->state = K_POLL_STATE_NOT_READY;
2234 (*pev)++;
2235 }
2236
2237 if (pfd->events & ZSOCK_POLLOUT) {
2238 if (IS_ENABLED(CONFIG_NET_NATIVE_TCP) &&
2239 net_context_get_type(ctx) == SOCK_STREAM &&
2240 !net_if_is_ip_offloaded(net_context_get_iface(ctx))) {
2241 if (*pev == pev_end) {
2242 return -ENOMEM;
2243 }
2244
2245 if (net_context_get_state(ctx) == NET_CONTEXT_CONNECTING) {
2246 (*pev)->obj = net_tcp_conn_sem_get(ctx);
2247 } else {
2248 (*pev)->obj = net_tcp_tx_sem_get(ctx);
2249 }
2250
2251 (*pev)->type = K_POLL_TYPE_SEM_AVAILABLE;
2252 (*pev)->mode = K_POLL_MODE_NOTIFY_ONLY;
2253 (*pev)->state = K_POLL_STATE_NOT_READY;
2254 (*pev)++;
2255 } else {
2256 return -EALREADY;
2257 }
2258
2259 }
2260
2261 /* If socket is already in EOF or error, it can be reported
2262 * immediately, so we tell poll() to short-circuit wait.
2263 */
2264 if (sock_is_eof(ctx) || sock_is_error(ctx)) {
2265 return -EALREADY;
2266 }
2267
2268 return 0;
2269 }
2270
zsock_poll_update_ctx(struct net_context * ctx,struct zsock_pollfd * pfd,struct k_poll_event ** pev)2271 static int zsock_poll_update_ctx(struct net_context *ctx,
2272 struct zsock_pollfd *pfd,
2273 struct k_poll_event **pev)
2274 {
2275 ARG_UNUSED(ctx);
2276
2277 if (pfd->events & ZSOCK_POLLIN) {
2278 if ((*pev)->state != K_POLL_STATE_NOT_READY || sock_is_eof(ctx)) {
2279 pfd->revents |= ZSOCK_POLLIN;
2280 }
2281 (*pev)++;
2282 }
2283 if (pfd->events & ZSOCK_POLLOUT) {
2284 if (IS_ENABLED(CONFIG_NET_NATIVE_TCP) &&
2285 net_context_get_type(ctx) == SOCK_STREAM &&
2286 !net_if_is_ip_offloaded(net_context_get_iface(ctx))) {
2287 if ((*pev)->state != K_POLL_STATE_NOT_READY &&
2288 !sock_is_eof(ctx) &&
2289 (net_context_get_state(ctx) == NET_CONTEXT_CONNECTED)) {
2290 pfd->revents |= ZSOCK_POLLOUT;
2291 }
2292 (*pev)++;
2293 } else {
2294 pfd->revents |= ZSOCK_POLLOUT;
2295 }
2296 }
2297
2298 if (sock_is_error(ctx)) {
2299 pfd->revents |= ZSOCK_POLLERR;
2300 }
2301
2302 if (sock_is_eof(ctx)) {
2303 pfd->revents |= ZSOCK_POLLHUP;
2304 }
2305
2306 return 0;
2307 }
2308
time_left(uint32_t start,uint32_t timeout)2309 static inline int time_left(uint32_t start, uint32_t timeout)
2310 {
2311 uint32_t elapsed = k_uptime_get_32() - start;
2312
2313 return timeout - elapsed;
2314 }
2315
zsock_poll_internal(struct zsock_pollfd * fds,int nfds,k_timeout_t timeout)2316 int zsock_poll_internal(struct zsock_pollfd *fds, int nfds, k_timeout_t timeout)
2317 {
2318 bool retry;
2319 int ret = 0;
2320 int i;
2321 struct zsock_pollfd *pfd;
2322 struct k_poll_event poll_events[CONFIG_NET_SOCKETS_POLL_MAX];
2323 struct k_poll_event *pev;
2324 struct k_poll_event *pev_end = poll_events + ARRAY_SIZE(poll_events);
2325 const struct fd_op_vtable *vtable;
2326 struct k_mutex *lock;
2327 k_timepoint_t end;
2328 bool offload = false;
2329 const struct fd_op_vtable *offl_vtable = NULL;
2330 void *offl_ctx = NULL;
2331
2332 end = sys_timepoint_calc(timeout);
2333
2334 pev = poll_events;
2335 for (pfd = fds, i = nfds; i--; pfd++) {
2336 void *ctx;
2337 int result;
2338
2339 /* Per POSIX, negative fd's are just ignored */
2340 if (pfd->fd < 0) {
2341 continue;
2342 }
2343
2344 ctx = get_sock_vtable(pfd->fd,
2345 (const struct socket_op_vtable **)&vtable,
2346 &lock);
2347 if (ctx == NULL) {
2348 /* Will set POLLNVAL in return loop */
2349 continue;
2350 }
2351
2352 (void)k_mutex_lock(lock, K_FOREVER);
2353
2354 result = zvfs_fdtable_call_ioctl(vtable, ctx,
2355 ZFD_IOCTL_POLL_PREPARE,
2356 pfd, &pev, pev_end);
2357 if (result == -EALREADY) {
2358 /* If POLL_PREPARE returned with EALREADY, it means
2359 * it already detected that some socket is ready. In
2360 * this case, we still perform a k_poll to pick up
2361 * as many events as possible, but without any wait.
2362 */
2363 timeout = K_NO_WAIT;
2364 end = sys_timepoint_calc(timeout);
2365 result = 0;
2366 } else if (result == -EXDEV) {
2367 /* If POLL_PREPARE returned EXDEV, it means
2368 * it detected an offloaded socket.
2369 * If offloaded socket is used with native TLS, the TLS
2370 * wrapper for the offloaded poll will be used.
2371 * In case the fds array contains a mixup of offloaded
2372 * and non-offloaded sockets, the offloaded poll handler
2373 * shall return an error.
2374 */
2375 offload = true;
2376 if (offl_vtable == NULL || net_socket_is_tls(ctx)) {
2377 offl_vtable = vtable;
2378 offl_ctx = ctx;
2379 }
2380
2381 result = 0;
2382 }
2383
2384 k_mutex_unlock(lock);
2385
2386 if (result < 0) {
2387 errno = -result;
2388 return -1;
2389 }
2390 }
2391
2392 if (offload) {
2393 int poll_timeout;
2394
2395 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
2396 poll_timeout = SYS_FOREVER_MS;
2397 } else {
2398 poll_timeout = k_ticks_to_ms_floor32(timeout.ticks);
2399 }
2400
2401 return zvfs_fdtable_call_ioctl(offl_vtable, offl_ctx,
2402 ZFD_IOCTL_POLL_OFFLOAD,
2403 fds, nfds, poll_timeout);
2404 }
2405
2406 timeout = sys_timepoint_timeout(end);
2407
2408 do {
2409 ret = k_poll(poll_events, pev - poll_events, timeout);
2410 /* EAGAIN when timeout expired, EINTR when cancelled (i.e. EOF) */
2411 if (ret != 0 && ret != -EAGAIN && ret != -EINTR) {
2412 errno = -ret;
2413 return -1;
2414 }
2415
2416 retry = false;
2417 ret = 0;
2418
2419 pev = poll_events;
2420 for (pfd = fds, i = nfds; i--; pfd++) {
2421 void *ctx;
2422 int result;
2423
2424 pfd->revents = 0;
2425
2426 if (pfd->fd < 0) {
2427 continue;
2428 }
2429
2430 ctx = get_sock_vtable(
2431 pfd->fd,
2432 (const struct socket_op_vtable **)&vtable,
2433 &lock);
2434 if (ctx == NULL) {
2435 pfd->revents = ZSOCK_POLLNVAL;
2436 ret++;
2437 continue;
2438 }
2439
2440 (void)k_mutex_lock(lock, K_FOREVER);
2441
2442 result = zvfs_fdtable_call_ioctl(vtable, ctx,
2443 ZFD_IOCTL_POLL_UPDATE,
2444 pfd, &pev);
2445 k_mutex_unlock(lock);
2446
2447 if (result == -EAGAIN) {
2448 retry = true;
2449 continue;
2450 } else if (result != 0) {
2451 errno = -result;
2452 return -1;
2453 }
2454
2455 if (pfd->revents != 0) {
2456 ret++;
2457 }
2458 }
2459
2460 if (retry) {
2461 if (ret > 0) {
2462 break;
2463 }
2464
2465 timeout = sys_timepoint_timeout(end);
2466
2467 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
2468 break;
2469 }
2470 }
2471 } while (retry);
2472
2473 return ret;
2474 }
2475
z_impl_zsock_poll(struct zsock_pollfd * fds,int nfds,int poll_timeout)2476 int z_impl_zsock_poll(struct zsock_pollfd *fds, int nfds, int poll_timeout)
2477 {
2478 k_timeout_t timeout;
2479 int ret;
2480
2481 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, poll, fds, nfds, poll_timeout);
2482
2483 if (poll_timeout < 0) {
2484 timeout = K_FOREVER;
2485 } else {
2486 timeout = K_MSEC(poll_timeout);
2487 }
2488
2489 ret = zsock_poll_internal(fds, nfds, timeout);
2490
2491 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, poll, fds, nfds,
2492 ret < 0 ? -errno : ret);
2493 return ret;
2494 }
2495
2496 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_poll(struct zsock_pollfd * fds,int nfds,int timeout)2497 static inline int z_vrfy_zsock_poll(struct zsock_pollfd *fds,
2498 int nfds, int timeout)
2499 {
2500 struct zsock_pollfd *fds_copy;
2501 size_t fds_size;
2502 int ret;
2503
2504 /* Copy fds array from user mode */
2505 if (size_mul_overflow(nfds, sizeof(struct zsock_pollfd), &fds_size)) {
2506 errno = EFAULT;
2507 return -1;
2508 }
2509 fds_copy = k_usermode_alloc_from_copy((void *)fds, fds_size);
2510 if (!fds_copy) {
2511 errno = ENOMEM;
2512 return -1;
2513 }
2514
2515 ret = z_impl_zsock_poll(fds_copy, nfds, timeout);
2516
2517 if (ret >= 0) {
2518 k_usermode_to_copy((void *)fds, fds_copy, fds_size);
2519 }
2520 k_free(fds_copy);
2521
2522 return ret;
2523 }
2524 #include <zephyr/syscalls/zsock_poll_mrsh.c>
2525 #endif
2526
z_impl_zsock_inet_pton(sa_family_t family,const char * src,void * dst)2527 int z_impl_zsock_inet_pton(sa_family_t family, const char *src, void *dst)
2528 {
2529 if (net_addr_pton(family, src, dst) == 0) {
2530 return 1;
2531 } else {
2532 return 0;
2533 }
2534 }
2535
2536 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_inet_pton(sa_family_t family,const char * src,void * dst)2537 static inline int z_vrfy_zsock_inet_pton(sa_family_t family,
2538 const char *src, void *dst)
2539 {
2540 int dst_size;
2541 char src_copy[NET_IPV6_ADDR_LEN];
2542 char dst_copy[sizeof(struct in6_addr)];
2543 int ret;
2544
2545 switch (family) {
2546 case AF_INET:
2547 dst_size = sizeof(struct in_addr);
2548 break;
2549 case AF_INET6:
2550 dst_size = sizeof(struct in6_addr);
2551 break;
2552 default:
2553 errno = EAFNOSUPPORT;
2554 return -1;
2555 }
2556
2557 K_OOPS(k_usermode_string_copy(src_copy, (char *)src, sizeof(src_copy)));
2558 ret = z_impl_zsock_inet_pton(family, src_copy, dst_copy);
2559 K_OOPS(k_usermode_to_copy(dst, dst_copy, dst_size));
2560
2561 return ret;
2562 }
2563 #include <zephyr/syscalls/zsock_inet_pton_mrsh.c>
2564 #endif
2565
get_tcp_option(int optname)2566 static enum tcp_conn_option get_tcp_option(int optname)
2567 {
2568 switch (optname) {
2569 case TCP_KEEPIDLE:
2570 return TCP_OPT_KEEPIDLE;
2571 case TCP_KEEPINTVL:
2572 return TCP_OPT_KEEPINTVL;
2573 case TCP_KEEPCNT:
2574 return TCP_OPT_KEEPCNT;
2575 }
2576
2577 return -EINVAL;
2578 }
2579
zsock_getsockopt_ctx(struct net_context * ctx,int level,int optname,void * optval,socklen_t * optlen)2580 int zsock_getsockopt_ctx(struct net_context *ctx, int level, int optname,
2581 void *optval, socklen_t *optlen)
2582 {
2583 int ret;
2584
2585 switch (level) {
2586 case SOL_SOCKET:
2587 switch (optname) {
2588 case SO_ERROR: {
2589 if (*optlen != sizeof(int)) {
2590 errno = EINVAL;
2591 return -1;
2592 }
2593
2594 *(int *)optval = POINTER_TO_INT(ctx->user_data);
2595
2596 return 0;
2597 }
2598
2599 case SO_TYPE: {
2600 int type = (int)net_context_get_type(ctx);
2601
2602 if (*optlen != sizeof(type)) {
2603 errno = EINVAL;
2604 return -1;
2605 }
2606
2607 *(int *)optval = type;
2608
2609 return 0;
2610 }
2611
2612 case SO_TXTIME:
2613 if (IS_ENABLED(CONFIG_NET_CONTEXT_TXTIME)) {
2614 ret = net_context_get_option(ctx,
2615 NET_OPT_TXTIME,
2616 optval, optlen);
2617 if (ret < 0) {
2618 errno = -ret;
2619 return -1;
2620 }
2621
2622 return 0;
2623 }
2624 break;
2625
2626 case SO_PROTOCOL: {
2627 int proto = (int)net_context_get_proto(ctx);
2628
2629 if (*optlen != sizeof(proto)) {
2630 errno = EINVAL;
2631 return -1;
2632 }
2633
2634 *(int *)optval = proto;
2635
2636 return 0;
2637 }
2638
2639 case SO_DOMAIN: {
2640 if (*optlen != sizeof(int)) {
2641 errno = EINVAL;
2642 return -1;
2643 }
2644
2645 *(int *)optval = net_context_get_family(ctx);
2646
2647 return 0;
2648 }
2649
2650 break;
2651
2652 case SO_RCVBUF:
2653 if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVBUF)) {
2654 ret = net_context_get_option(ctx,
2655 NET_OPT_RCVBUF,
2656 optval, optlen);
2657 if (ret < 0) {
2658 errno = -ret;
2659 return -1;
2660 }
2661
2662 return 0;
2663 }
2664 break;
2665
2666 case SO_SNDBUF:
2667 if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDBUF)) {
2668 ret = net_context_get_option(ctx,
2669 NET_OPT_SNDBUF,
2670 optval, optlen);
2671 if (ret < 0) {
2672 errno = -ret;
2673 return -1;
2674 }
2675
2676 return 0;
2677 }
2678 break;
2679
2680 case SO_REUSEADDR:
2681 if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEADDR)) {
2682 ret = net_context_get_option(ctx,
2683 NET_OPT_REUSEADDR,
2684 optval, optlen);
2685 if (ret < 0) {
2686 errno = -ret;
2687 return -1;
2688 }
2689
2690 return 0;
2691 }
2692 break;
2693
2694 case SO_REUSEPORT:
2695 if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEPORT)) {
2696 ret = net_context_get_option(ctx,
2697 NET_OPT_REUSEPORT,
2698 optval, optlen);
2699 if (ret < 0) {
2700 errno = -ret;
2701 return -1;
2702 }
2703
2704 return 0;
2705 }
2706 break;
2707
2708 case SO_KEEPALIVE:
2709 if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE) &&
2710 net_context_get_proto(ctx) == IPPROTO_TCP) {
2711 ret = net_tcp_get_option(ctx,
2712 TCP_OPT_KEEPALIVE,
2713 optval, optlen);
2714 if (ret < 0) {
2715 errno = -ret;
2716 return -1;
2717 }
2718
2719 return 0;
2720 }
2721
2722 break;
2723
2724 case SO_TIMESTAMPING:
2725 if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMPING)) {
2726 ret = net_context_get_option(ctx,
2727 NET_OPT_TIMESTAMPING,
2728 optval, optlen);
2729
2730 if (ret < 0) {
2731 errno = -ret;
2732 return -1;
2733 }
2734
2735 return 0;
2736 }
2737
2738 break;
2739 }
2740
2741 break;
2742
2743 case IPPROTO_TCP:
2744 switch (optname) {
2745 case TCP_NODELAY:
2746 ret = net_tcp_get_option(ctx, TCP_OPT_NODELAY, optval, optlen);
2747 return ret;
2748
2749 case TCP_KEEPIDLE:
2750 __fallthrough;
2751 case TCP_KEEPINTVL:
2752 __fallthrough;
2753 case TCP_KEEPCNT:
2754 if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE)) {
2755 ret = net_tcp_get_option(ctx,
2756 get_tcp_option(optname),
2757 optval, optlen);
2758 if (ret < 0) {
2759 errno = -ret;
2760 return -1;
2761 }
2762
2763 return 0;
2764 }
2765
2766 break;
2767 }
2768
2769 break;
2770
2771 case IPPROTO_IP:
2772 switch (optname) {
2773 case IP_TOS:
2774 if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) {
2775 ret = net_context_get_option(ctx,
2776 NET_OPT_DSCP_ECN,
2777 optval,
2778 optlen);
2779 if (ret < 0) {
2780 errno = -ret;
2781 return -1;
2782 }
2783
2784 return 0;
2785 }
2786
2787 break;
2788
2789 case IP_TTL:
2790 ret = net_context_get_option(ctx, NET_OPT_TTL,
2791 optval, optlen);
2792 if (ret < 0) {
2793 errno = -ret;
2794 return -1;
2795 }
2796
2797 return 0;
2798
2799 case IP_MULTICAST_TTL:
2800 ret = net_context_get_option(ctx, NET_OPT_MCAST_TTL,
2801 optval, optlen);
2802 if (ret < 0) {
2803 errno = -ret;
2804 return -1;
2805 }
2806
2807 return 0;
2808 }
2809
2810 break;
2811
2812 case IPPROTO_IPV6:
2813 switch (optname) {
2814 case IPV6_V6ONLY:
2815 if (IS_ENABLED(CONFIG_NET_IPV4_MAPPING_TO_IPV6)) {
2816 ret = net_context_get_option(ctx,
2817 NET_OPT_IPV6_V6ONLY,
2818 optval,
2819 optlen);
2820 if (ret < 0) {
2821 errno = -ret;
2822 return -1;
2823 }
2824
2825 return 0;
2826 }
2827
2828 break;
2829
2830 case IPV6_ADDR_PREFERENCES:
2831 if (IS_ENABLED(CONFIG_NET_IPV6)) {
2832 ret = net_context_get_option(ctx,
2833 NET_OPT_ADDR_PREFERENCES,
2834 optval,
2835 optlen);
2836 if (ret < 0) {
2837 errno = -ret;
2838 return -1;
2839 }
2840
2841 return 0;
2842 }
2843
2844 break;
2845
2846 case IPV6_TCLASS:
2847 if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) {
2848 ret = net_context_get_option(ctx,
2849 NET_OPT_DSCP_ECN,
2850 optval,
2851 optlen);
2852 if (ret < 0) {
2853 errno = -ret;
2854 return -1;
2855 }
2856
2857 return 0;
2858 }
2859
2860 break;
2861
2862 case IPV6_UNICAST_HOPS:
2863 ret = net_context_get_option(ctx,
2864 NET_OPT_UNICAST_HOP_LIMIT,
2865 optval, optlen);
2866 if (ret < 0) {
2867 errno = -ret;
2868 return -1;
2869 }
2870
2871 return 0;
2872
2873 case IPV6_MULTICAST_HOPS:
2874 ret = net_context_get_option(ctx,
2875 NET_OPT_MCAST_HOP_LIMIT,
2876 optval, optlen);
2877 if (ret < 0) {
2878 errno = -ret;
2879 return -1;
2880 }
2881
2882 return 0;
2883 }
2884
2885 break;
2886 }
2887
2888 errno = ENOPROTOOPT;
2889 return -1;
2890 }
2891
z_impl_zsock_getsockopt(int sock,int level,int optname,void * optval,socklen_t * optlen)2892 int z_impl_zsock_getsockopt(int sock, int level, int optname,
2893 void *optval, socklen_t *optlen)
2894 {
2895 int ret;
2896
2897 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, getsockopt, sock, level, optname);
2898
2899 ret = VTABLE_CALL(getsockopt, sock, level, optname, optval, optlen);
2900
2901 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, getsockopt, sock, level, optname,
2902 optval, *optlen, ret < 0 ? -errno : ret);
2903 return ret;
2904 }
2905
2906 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_getsockopt(int sock,int level,int optname,void * optval,socklen_t * optlen)2907 int z_vrfy_zsock_getsockopt(int sock, int level, int optname,
2908 void *optval, socklen_t *optlen)
2909 {
2910 socklen_t kernel_optlen = *(socklen_t *)optlen;
2911 void *kernel_optval;
2912 int ret;
2913
2914 if (K_SYSCALL_MEMORY_WRITE(optval, kernel_optlen)) {
2915 errno = -EPERM;
2916 return -1;
2917 }
2918
2919 kernel_optval = k_usermode_alloc_from_copy((const void *)optval,
2920 kernel_optlen);
2921 K_OOPS(!kernel_optval);
2922
2923 ret = z_impl_zsock_getsockopt(sock, level, optname,
2924 kernel_optval, &kernel_optlen);
2925
2926 K_OOPS(k_usermode_to_copy((void *)optval, kernel_optval, kernel_optlen));
2927 K_OOPS(k_usermode_to_copy((void *)optlen, &kernel_optlen,
2928 sizeof(socklen_t)));
2929
2930 k_free(kernel_optval);
2931
2932 return ret;
2933 }
2934 #include <zephyr/syscalls/zsock_getsockopt_mrsh.c>
2935 #endif /* CONFIG_USERSPACE */
2936
ipv4_multicast_group(struct net_context * ctx,const void * optval,socklen_t optlen,bool do_join)2937 static int ipv4_multicast_group(struct net_context *ctx, const void *optval,
2938 socklen_t optlen, bool do_join)
2939 {
2940 struct ip_mreqn *mreqn;
2941 struct net_if *iface;
2942 int ifindex, ret;
2943
2944 if (optval == NULL || optlen != sizeof(struct ip_mreqn)) {
2945 errno = EINVAL;
2946 return -1;
2947 }
2948
2949 mreqn = (struct ip_mreqn *)optval;
2950
2951 if (mreqn->imr_multiaddr.s_addr == INADDR_ANY) {
2952 errno = EINVAL;
2953 return -1;
2954 }
2955
2956 if (mreqn->imr_ifindex != 0) {
2957 iface = net_if_get_by_index(mreqn->imr_ifindex);
2958 } else {
2959 ifindex = net_if_ipv4_addr_lookup_by_index(&mreqn->imr_address);
2960 iface = net_if_get_by_index(ifindex);
2961 }
2962
2963 if (iface == NULL) {
2964 /* Check if ctx has already an interface and if not,
2965 * then select the default interface.
2966 */
2967 if (ctx->iface <= 0) {
2968 iface = net_if_get_default();
2969 } else {
2970 iface = net_if_get_by_index(ctx->iface);
2971 }
2972
2973 if (iface == NULL) {
2974 errno = EINVAL;
2975 return -1;
2976 }
2977 }
2978
2979 if (do_join) {
2980 ret = net_ipv4_igmp_join(iface, &mreqn->imr_multiaddr, NULL);
2981 } else {
2982 ret = net_ipv4_igmp_leave(iface, &mreqn->imr_multiaddr);
2983 }
2984
2985 if (ret < 0) {
2986 errno = -ret;
2987 return -1;
2988 }
2989
2990 return 0;
2991 }
2992
ipv6_multicast_group(struct net_context * ctx,const void * optval,socklen_t optlen,bool do_join)2993 static int ipv6_multicast_group(struct net_context *ctx, const void *optval,
2994 socklen_t optlen, bool do_join)
2995 {
2996 struct ipv6_mreq *mreq;
2997 struct net_if *iface;
2998 int ret;
2999
3000 if (optval == NULL || optlen != sizeof(struct ipv6_mreq)) {
3001 errno = EINVAL;
3002 return -1;
3003 }
3004
3005 mreq = (struct ipv6_mreq *)optval;
3006
3007 if (memcmp(&mreq->ipv6mr_multiaddr,
3008 net_ipv6_unspecified_address(),
3009 sizeof(mreq->ipv6mr_multiaddr)) == 0) {
3010 errno = EINVAL;
3011 return -1;
3012 }
3013
3014 iface = net_if_get_by_index(mreq->ipv6mr_ifindex);
3015 if (iface == NULL) {
3016 /* Check if ctx has already an interface and if not,
3017 * then select the default interface.
3018 */
3019 if (ctx->iface <= 0) {
3020 iface = net_if_get_default();
3021 } else {
3022 iface = net_if_get_by_index(ctx->iface);
3023 }
3024
3025 if (iface == NULL) {
3026 errno = ENOENT;
3027 return -1;
3028 }
3029 }
3030
3031 if (do_join) {
3032 ret = net_ipv6_mld_join(iface, &mreq->ipv6mr_multiaddr);
3033 } else {
3034 ret = net_ipv6_mld_leave(iface, &mreq->ipv6mr_multiaddr);
3035 }
3036
3037 if (ret < 0) {
3038 errno = -ret;
3039 return -1;
3040 }
3041
3042 return 0;
3043 }
3044
zsock_setsockopt_ctx(struct net_context * ctx,int level,int optname,const void * optval,socklen_t optlen)3045 int zsock_setsockopt_ctx(struct net_context *ctx, int level, int optname,
3046 const void *optval, socklen_t optlen)
3047 {
3048 int ret;
3049
3050 switch (level) {
3051 case SOL_SOCKET:
3052 switch (optname) {
3053 case SO_RCVBUF:
3054 if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVBUF)) {
3055 ret = net_context_set_option(ctx,
3056 NET_OPT_RCVBUF,
3057 optval, optlen);
3058 if (ret < 0) {
3059 errno = -ret;
3060 return -1;
3061 }
3062
3063 return 0;
3064 }
3065
3066 break;
3067
3068 case SO_SNDBUF:
3069 if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDBUF)) {
3070 ret = net_context_set_option(ctx,
3071 NET_OPT_SNDBUF,
3072 optval, optlen);
3073 if (ret < 0) {
3074 errno = -ret;
3075 return -1;
3076 }
3077
3078 return 0;
3079 }
3080
3081 break;
3082
3083 case SO_REUSEADDR:
3084 if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEADDR)) {
3085 ret = net_context_set_option(ctx,
3086 NET_OPT_REUSEADDR,
3087 optval, optlen);
3088 if (ret < 0) {
3089 errno = -ret;
3090 return -1;
3091 }
3092
3093 return 0;
3094 }
3095
3096 break;
3097
3098 case SO_REUSEPORT:
3099 if (IS_ENABLED(CONFIG_NET_CONTEXT_REUSEPORT)) {
3100 ret = net_context_set_option(ctx,
3101 NET_OPT_REUSEPORT,
3102 optval, optlen);
3103 if (ret < 0) {
3104 errno = -ret;
3105 return -1;
3106 }
3107
3108 return 0;
3109 }
3110
3111 break;
3112
3113 case SO_PRIORITY:
3114 if (IS_ENABLED(CONFIG_NET_CONTEXT_PRIORITY)) {
3115 ret = net_context_set_option(ctx,
3116 NET_OPT_PRIORITY,
3117 optval, optlen);
3118 if (ret < 0) {
3119 errno = -ret;
3120 return -1;
3121 }
3122
3123 return 0;
3124 }
3125
3126 break;
3127
3128 case SO_RCVTIMEO:
3129 if (IS_ENABLED(CONFIG_NET_CONTEXT_RCVTIMEO)) {
3130 const struct zsock_timeval *tv = optval;
3131 k_timeout_t timeout;
3132
3133 if (optlen != sizeof(struct zsock_timeval)) {
3134 errno = EINVAL;
3135 return -1;
3136 }
3137
3138 if (tv->tv_sec == 0 && tv->tv_usec == 0) {
3139 timeout = K_FOREVER;
3140 } else {
3141 timeout = K_USEC(tv->tv_sec * 1000000ULL
3142 + tv->tv_usec);
3143 }
3144
3145 ret = net_context_set_option(ctx,
3146 NET_OPT_RCVTIMEO,
3147 &timeout,
3148 sizeof(timeout));
3149
3150 if (ret < 0) {
3151 errno = -ret;
3152 return -1;
3153 }
3154
3155 return 0;
3156 }
3157
3158 break;
3159
3160 case SO_SNDTIMEO:
3161 if (IS_ENABLED(CONFIG_NET_CONTEXT_SNDTIMEO)) {
3162 const struct zsock_timeval *tv = optval;
3163 k_timeout_t timeout;
3164
3165 if (optlen != sizeof(struct zsock_timeval)) {
3166 errno = EINVAL;
3167 return -1;
3168 }
3169
3170 if (tv->tv_sec == 0 && tv->tv_usec == 0) {
3171 timeout = K_FOREVER;
3172 } else {
3173 timeout = K_USEC(tv->tv_sec * 1000000ULL
3174 + tv->tv_usec);
3175 }
3176
3177 ret = net_context_set_option(ctx,
3178 NET_OPT_SNDTIMEO,
3179 &timeout,
3180 sizeof(timeout));
3181 if (ret < 0) {
3182 errno = -ret;
3183 return -1;
3184 }
3185
3186 return 0;
3187 }
3188
3189 break;
3190
3191 case SO_TXTIME:
3192 if (IS_ENABLED(CONFIG_NET_CONTEXT_TXTIME)) {
3193 ret = net_context_set_option(ctx,
3194 NET_OPT_TXTIME,
3195 optval, optlen);
3196 if (ret < 0) {
3197 errno = -ret;
3198 return -1;
3199 }
3200
3201 return 0;
3202 }
3203
3204 break;
3205
3206 case SO_SOCKS5:
3207 if (IS_ENABLED(CONFIG_SOCKS)) {
3208 ret = net_context_set_option(ctx,
3209 NET_OPT_SOCKS5,
3210 optval, optlen);
3211 if (ret < 0) {
3212 errno = -ret;
3213 return -1;
3214 }
3215
3216 net_context_set_proxy_enabled(ctx, true);
3217
3218 return 0;
3219 }
3220
3221 break;
3222
3223 case SO_BINDTODEVICE: {
3224 struct net_if *iface;
3225 const struct ifreq *ifreq = optval;
3226
3227 if (net_context_get_family(ctx) != AF_INET &&
3228 net_context_get_family(ctx) != AF_INET6) {
3229 errno = EAFNOSUPPORT;
3230 return -1;
3231 }
3232
3233 /* optlen equal to 0 or empty interface name should
3234 * remove the binding.
3235 */
3236 if ((optlen == 0) || (ifreq != NULL &&
3237 strlen(ifreq->ifr_name) == 0)) {
3238 ctx->flags &= ~NET_CONTEXT_BOUND_TO_IFACE;
3239 return 0;
3240 }
3241
3242 if ((ifreq == NULL) || (optlen != sizeof(*ifreq))) {
3243 errno = EINVAL;
3244 return -1;
3245 }
3246
3247 if (IS_ENABLED(CONFIG_NET_INTERFACE_NAME)) {
3248 ret = net_if_get_by_name(ifreq->ifr_name);
3249 if (ret < 0) {
3250 errno = -ret;
3251 return -1;
3252 }
3253
3254 iface = net_if_get_by_index(ret);
3255 if (iface == NULL) {
3256 errno = ENODEV;
3257 return -1;
3258 }
3259 } else {
3260 const struct device *dev;
3261
3262 dev = device_get_binding(ifreq->ifr_name);
3263 if (dev == NULL) {
3264 errno = ENODEV;
3265 return -1;
3266 }
3267
3268 iface = net_if_lookup_by_dev(dev);
3269 if (iface == NULL) {
3270 errno = ENODEV;
3271 return -1;
3272 }
3273 }
3274
3275 net_context_bind_iface(ctx, iface);
3276
3277 return 0;
3278 }
3279
3280 case SO_LINGER:
3281 /* ignored. for compatibility purposes only */
3282 return 0;
3283
3284 case SO_KEEPALIVE:
3285 if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE) &&
3286 net_context_get_proto(ctx) == IPPROTO_TCP) {
3287 ret = net_tcp_set_option(ctx,
3288 TCP_OPT_KEEPALIVE,
3289 optval, optlen);
3290 if (ret < 0) {
3291 errno = -ret;
3292 return -1;
3293 }
3294
3295 return 0;
3296 }
3297
3298 break;
3299
3300 case SO_TIMESTAMPING:
3301 if (IS_ENABLED(CONFIG_NET_CONTEXT_TIMESTAMPING)) {
3302 ret = net_context_set_option(ctx,
3303 NET_OPT_TIMESTAMPING,
3304 optval, optlen);
3305
3306 if (ret < 0) {
3307 errno = -ret;
3308 return -1;
3309 }
3310
3311 return 0;
3312 }
3313
3314 break;
3315 }
3316
3317 break;
3318
3319 case IPPROTO_TCP:
3320 switch (optname) {
3321 case TCP_NODELAY:
3322 ret = net_tcp_set_option(ctx,
3323 TCP_OPT_NODELAY, optval, optlen);
3324 return ret;
3325
3326 case TCP_KEEPIDLE:
3327 __fallthrough;
3328 case TCP_KEEPINTVL:
3329 __fallthrough;
3330 case TCP_KEEPCNT:
3331 if (IS_ENABLED(CONFIG_NET_TCP_KEEPALIVE)) {
3332 ret = net_tcp_set_option(ctx,
3333 get_tcp_option(optname),
3334 optval, optlen);
3335 if (ret < 0) {
3336 errno = -ret;
3337 return -1;
3338 }
3339
3340 return 0;
3341 }
3342
3343 break;
3344 }
3345 break;
3346
3347 case IPPROTO_IP:
3348 switch (optname) {
3349 case IP_TOS:
3350 if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) {
3351 ret = net_context_set_option(ctx,
3352 NET_OPT_DSCP_ECN,
3353 optval,
3354 optlen);
3355 if (ret < 0) {
3356 errno = -ret;
3357 return -1;
3358 }
3359
3360 return 0;
3361 }
3362
3363 break;
3364
3365 case IP_PKTINFO:
3366 if (IS_ENABLED(CONFIG_NET_IPV4) &&
3367 IS_ENABLED(CONFIG_NET_CONTEXT_RECV_PKTINFO)) {
3368 ret = net_context_set_option(ctx,
3369 NET_OPT_RECV_PKTINFO,
3370 optval,
3371 optlen);
3372 if (ret < 0) {
3373 errno = -ret;
3374 return -1;
3375 }
3376
3377 return 0;
3378 }
3379
3380 break;
3381
3382 case IP_MULTICAST_TTL:
3383 ret = net_context_set_option(ctx, NET_OPT_MCAST_TTL,
3384 optval, optlen);
3385 if (ret < 0) {
3386 errno = -ret;
3387 return -1;
3388 }
3389
3390 return 0;
3391
3392 case IP_TTL:
3393 ret = net_context_set_option(ctx, NET_OPT_TTL,
3394 optval, optlen);
3395 if (ret < 0) {
3396 errno = -ret;
3397 return -1;
3398 }
3399
3400 return 0;
3401
3402 case IP_ADD_MEMBERSHIP:
3403 if (IS_ENABLED(CONFIG_NET_IPV4)) {
3404 return ipv4_multicast_group(ctx, optval,
3405 optlen, true);
3406 }
3407
3408 break;
3409
3410 case IP_DROP_MEMBERSHIP:
3411 if (IS_ENABLED(CONFIG_NET_IPV4)) {
3412 return ipv4_multicast_group(ctx, optval,
3413 optlen, false);
3414 }
3415
3416 break;
3417 }
3418
3419 break;
3420
3421 case IPPROTO_IPV6:
3422 switch (optname) {
3423 case IPV6_V6ONLY:
3424 if (IS_ENABLED(CONFIG_NET_IPV4_MAPPING_TO_IPV6)) {
3425 ret = net_context_set_option(ctx,
3426 NET_OPT_IPV6_V6ONLY,
3427 optval,
3428 optlen);
3429 if (ret < 0) {
3430 errno = -ret;
3431 return -1;
3432 }
3433 }
3434
3435 return 0;
3436
3437 case IPV6_RECVPKTINFO:
3438 if (IS_ENABLED(CONFIG_NET_IPV6) &&
3439 IS_ENABLED(CONFIG_NET_CONTEXT_RECV_PKTINFO)) {
3440 ret = net_context_set_option(ctx,
3441 NET_OPT_RECV_PKTINFO,
3442 optval,
3443 optlen);
3444 if (ret < 0) {
3445 errno = -ret;
3446 return -1;
3447 }
3448
3449 return 0;
3450 }
3451
3452 break;
3453
3454 case IPV6_ADDR_PREFERENCES:
3455 if (IS_ENABLED(CONFIG_NET_IPV6)) {
3456 ret = net_context_set_option(ctx,
3457 NET_OPT_ADDR_PREFERENCES,
3458 optval,
3459 optlen);
3460 if (ret < 0) {
3461 errno = -ret;
3462 return -1;
3463 }
3464
3465 return 0;
3466 }
3467
3468 break;
3469
3470 case IPV6_TCLASS:
3471 if (IS_ENABLED(CONFIG_NET_CONTEXT_DSCP_ECN)) {
3472 ret = net_context_set_option(ctx,
3473 NET_OPT_DSCP_ECN,
3474 optval,
3475 optlen);
3476 if (ret < 0) {
3477 errno = -ret;
3478 return -1;
3479 }
3480
3481 return 0;
3482 }
3483
3484 break;
3485
3486 case IPV6_UNICAST_HOPS:
3487 ret = net_context_set_option(ctx,
3488 NET_OPT_UNICAST_HOP_LIMIT,
3489 optval, optlen);
3490 if (ret < 0) {
3491 errno = -ret;
3492 return -1;
3493 }
3494
3495 return 0;
3496
3497 case IPV6_MULTICAST_HOPS:
3498 ret = net_context_set_option(ctx,
3499 NET_OPT_MCAST_HOP_LIMIT,
3500 optval, optlen);
3501 if (ret < 0) {
3502 errno = -ret;
3503 return -1;
3504 }
3505
3506 return 0;
3507
3508 case IPV6_ADD_MEMBERSHIP:
3509 if (IS_ENABLED(CONFIG_NET_IPV6)) {
3510 return ipv6_multicast_group(ctx, optval,
3511 optlen, true);
3512 }
3513
3514 break;
3515
3516 case IPV6_DROP_MEMBERSHIP:
3517 if (IS_ENABLED(CONFIG_NET_IPV6)) {
3518 return ipv6_multicast_group(ctx, optval,
3519 optlen, false);
3520 }
3521
3522 break;
3523 }
3524
3525 break;
3526 }
3527
3528 errno = ENOPROTOOPT;
3529 return -1;
3530 }
3531
z_impl_zsock_setsockopt(int sock,int level,int optname,const void * optval,socklen_t optlen)3532 int z_impl_zsock_setsockopt(int sock, int level, int optname,
3533 const void *optval, socklen_t optlen)
3534 {
3535 int ret;
3536
3537 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, setsockopt, sock,
3538 level, optname, optval, optlen);
3539
3540 ret = VTABLE_CALL(setsockopt, sock, level, optname, optval, optlen);
3541
3542 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, setsockopt, sock,
3543 ret < 0 ? -errno : ret);
3544 return ret;
3545 }
3546
3547 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_setsockopt(int sock,int level,int optname,const void * optval,socklen_t optlen)3548 int z_vrfy_zsock_setsockopt(int sock, int level, int optname,
3549 const void *optval, socklen_t optlen)
3550 {
3551 void *kernel_optval;
3552 int ret;
3553
3554 kernel_optval = k_usermode_alloc_from_copy((const void *)optval, optlen);
3555 K_OOPS(!kernel_optval);
3556
3557 ret = z_impl_zsock_setsockopt(sock, level, optname,
3558 kernel_optval, optlen);
3559
3560 k_free(kernel_optval);
3561
3562 return ret;
3563 }
3564 #include <zephyr/syscalls/zsock_setsockopt_mrsh.c>
3565 #endif /* CONFIG_USERSPACE */
3566
zsock_getpeername_ctx(struct net_context * ctx,struct sockaddr * addr,socklen_t * addrlen)3567 int zsock_getpeername_ctx(struct net_context *ctx, struct sockaddr *addr,
3568 socklen_t *addrlen)
3569 {
3570 socklen_t newlen = 0;
3571
3572 if (addr == NULL || addrlen == NULL) {
3573 SET_ERRNO(-EINVAL);
3574 }
3575
3576 if (!(ctx->flags & NET_CONTEXT_REMOTE_ADDR_SET)) {
3577 SET_ERRNO(-ENOTCONN);
3578 }
3579
3580 if (net_context_get_type(ctx) == SOCK_STREAM &&
3581 net_context_get_state(ctx) != NET_CONTEXT_CONNECTED) {
3582 SET_ERRNO(-ENOTCONN);
3583 }
3584
3585 if (IS_ENABLED(CONFIG_NET_IPV4) && ctx->remote.sa_family == AF_INET) {
3586 struct sockaddr_in addr4 = { 0 };
3587
3588 addr4.sin_family = AF_INET;
3589 addr4.sin_port = net_sin(&ctx->remote)->sin_port;
3590 memcpy(&addr4.sin_addr, &net_sin(&ctx->remote)->sin_addr,
3591 sizeof(struct in_addr));
3592 newlen = sizeof(struct sockaddr_in);
3593
3594 memcpy(addr, &addr4, MIN(*addrlen, newlen));
3595 } else if (IS_ENABLED(CONFIG_NET_IPV6) &&
3596 ctx->remote.sa_family == AF_INET6) {
3597 struct sockaddr_in6 addr6 = { 0 };
3598
3599 addr6.sin6_family = AF_INET6;
3600 addr6.sin6_port = net_sin6(&ctx->remote)->sin6_port;
3601 memcpy(&addr6.sin6_addr, &net_sin6(&ctx->remote)->sin6_addr,
3602 sizeof(struct in6_addr));
3603 newlen = sizeof(struct sockaddr_in6);
3604
3605 memcpy(addr, &addr6, MIN(*addrlen, newlen));
3606 } else {
3607 SET_ERRNO(-EINVAL);
3608 }
3609
3610 *addrlen = newlen;
3611
3612 return 0;
3613 }
3614
z_impl_zsock_getpeername(int sock,struct sockaddr * addr,socklen_t * addrlen)3615 int z_impl_zsock_getpeername(int sock, struct sockaddr *addr,
3616 socklen_t *addrlen)
3617 {
3618 int ret;
3619
3620 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, getpeername, sock);
3621
3622 ret = VTABLE_CALL(getpeername, sock, addr, addrlen);
3623
3624 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, getpeername, sock,
3625 addr, addrlen,
3626 ret < 0 ? -errno : ret);
3627 return ret;
3628 }
3629
3630 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_getpeername(int sock,struct sockaddr * addr,socklen_t * addrlen)3631 static inline int z_vrfy_zsock_getpeername(int sock, struct sockaddr *addr,
3632 socklen_t *addrlen)
3633 {
3634 socklen_t addrlen_copy;
3635 int ret;
3636
3637 K_OOPS(k_usermode_from_copy(&addrlen_copy, (void *)addrlen,
3638 sizeof(socklen_t)));
3639
3640 if (K_SYSCALL_MEMORY_WRITE(addr, addrlen_copy)) {
3641 errno = EFAULT;
3642 return -1;
3643 }
3644
3645 ret = z_impl_zsock_getpeername(sock, (struct sockaddr *)addr,
3646 &addrlen_copy);
3647
3648 if (ret == 0 &&
3649 k_usermode_to_copy((void *)addrlen, &addrlen_copy,
3650 sizeof(socklen_t))) {
3651 errno = EINVAL;
3652 return -1;
3653 }
3654
3655 return ret;
3656 }
3657 #include <zephyr/syscalls/zsock_getpeername_mrsh.c>
3658 #endif /* CONFIG_USERSPACE */
3659
zsock_getsockname_ctx(struct net_context * ctx,struct sockaddr * addr,socklen_t * addrlen)3660 int zsock_getsockname_ctx(struct net_context *ctx, struct sockaddr *addr,
3661 socklen_t *addrlen)
3662 {
3663 socklen_t newlen = 0;
3664 int ret;
3665
3666 if (IS_ENABLED(CONFIG_NET_IPV4) && ctx->local.family == AF_INET) {
3667 struct sockaddr_in addr4 = { 0 };
3668
3669 if (net_sin_ptr(&ctx->local)->sin_addr == NULL) {
3670 SET_ERRNO(-EINVAL);
3671 }
3672
3673 newlen = sizeof(struct sockaddr_in);
3674
3675 ret = net_context_get_local_addr(ctx,
3676 (struct sockaddr *)&addr4,
3677 &newlen);
3678 if (ret < 0) {
3679 SET_ERRNO(-ret);
3680 }
3681
3682 memcpy(addr, &addr4, MIN(*addrlen, newlen));
3683
3684 } else if (IS_ENABLED(CONFIG_NET_IPV6) && ctx->local.family == AF_INET6) {
3685 struct sockaddr_in6 addr6 = { 0 };
3686
3687 if (net_sin6_ptr(&ctx->local)->sin6_addr == NULL) {
3688 SET_ERRNO(-EINVAL);
3689 }
3690
3691 newlen = sizeof(struct sockaddr_in6);
3692
3693 ret = net_context_get_local_addr(ctx,
3694 (struct sockaddr *)&addr6,
3695 &newlen);
3696 if (ret < 0) {
3697 SET_ERRNO(-ret);
3698 }
3699
3700 memcpy(addr, &addr6, MIN(*addrlen, newlen));
3701 } else {
3702 SET_ERRNO(-EINVAL);
3703 }
3704
3705 *addrlen = newlen;
3706
3707 return 0;
3708 }
3709
z_impl_zsock_getsockname(int sock,struct sockaddr * addr,socklen_t * addrlen)3710 int z_impl_zsock_getsockname(int sock, struct sockaddr *addr,
3711 socklen_t *addrlen)
3712 {
3713 int ret;
3714
3715 SYS_PORT_TRACING_OBJ_FUNC_ENTER(socket, getsockname, sock);
3716
3717 ret = VTABLE_CALL(getsockname, sock, addr, addrlen);
3718
3719 SYS_PORT_TRACING_OBJ_FUNC_EXIT(socket, getsockname, sock,
3720 addr, addrlen,
3721 ret < 0 ? -errno : ret);
3722 return ret;
3723 }
3724
3725 #ifdef CONFIG_USERSPACE
z_vrfy_zsock_getsockname(int sock,struct sockaddr * addr,socklen_t * addrlen)3726 static inline int z_vrfy_zsock_getsockname(int sock, struct sockaddr *addr,
3727 socklen_t *addrlen)
3728 {
3729 socklen_t addrlen_copy;
3730 int ret;
3731
3732 K_OOPS(k_usermode_from_copy(&addrlen_copy, (void *)addrlen,
3733 sizeof(socklen_t)));
3734
3735 if (K_SYSCALL_MEMORY_WRITE(addr, addrlen_copy)) {
3736 errno = EFAULT;
3737 return -1;
3738 }
3739
3740 ret = z_impl_zsock_getsockname(sock, (struct sockaddr *)addr,
3741 &addrlen_copy);
3742
3743 if (ret == 0 &&
3744 k_usermode_to_copy((void *)addrlen, &addrlen_copy,
3745 sizeof(socklen_t))) {
3746 errno = EINVAL;
3747 return -1;
3748 }
3749
3750 return ret;
3751 }
3752 #include <zephyr/syscalls/zsock_getsockname_mrsh.c>
3753 #endif /* CONFIG_USERSPACE */
3754
sock_read_vmeth(void * obj,void * buffer,size_t count)3755 static ssize_t sock_read_vmeth(void *obj, void *buffer, size_t count)
3756 {
3757 return zsock_recvfrom_ctx(obj, buffer, count, 0, NULL, 0);
3758 }
3759
sock_write_vmeth(void * obj,const void * buffer,size_t count)3760 static ssize_t sock_write_vmeth(void *obj, const void *buffer, size_t count)
3761 {
3762 return zsock_sendto_ctx(obj, buffer, count, 0, NULL, 0);
3763 }
3764
zsock_ctx_set_lock(struct net_context * ctx,struct k_mutex * lock)3765 static void zsock_ctx_set_lock(struct net_context *ctx, struct k_mutex *lock)
3766 {
3767 ctx->cond.lock = lock;
3768 }
3769
sock_ioctl_vmeth(void * obj,unsigned int request,va_list args)3770 static int sock_ioctl_vmeth(void *obj, unsigned int request, va_list args)
3771 {
3772 switch (request) {
3773
3774 /* In Zephyr, fcntl() is just an alias of ioctl(). */
3775 case F_GETFL:
3776 if (sock_is_nonblock(obj)) {
3777 return O_NONBLOCK;
3778 }
3779
3780 return 0;
3781
3782 case F_SETFL: {
3783 int flags;
3784
3785 flags = va_arg(args, int);
3786
3787 if (flags & O_NONBLOCK) {
3788 sock_set_flag(obj, SOCK_NONBLOCK, SOCK_NONBLOCK);
3789 } else {
3790 sock_set_flag(obj, SOCK_NONBLOCK, 0);
3791 }
3792
3793 return 0;
3794 }
3795
3796 case ZFD_IOCTL_POLL_PREPARE: {
3797 struct zsock_pollfd *pfd;
3798 struct k_poll_event **pev;
3799 struct k_poll_event *pev_end;
3800
3801 pfd = va_arg(args, struct zsock_pollfd *);
3802 pev = va_arg(args, struct k_poll_event **);
3803 pev_end = va_arg(args, struct k_poll_event *);
3804
3805 return zsock_poll_prepare_ctx(obj, pfd, pev, pev_end);
3806 }
3807
3808 case ZFD_IOCTL_POLL_UPDATE: {
3809 struct zsock_pollfd *pfd;
3810 struct k_poll_event **pev;
3811
3812 pfd = va_arg(args, struct zsock_pollfd *);
3813 pev = va_arg(args, struct k_poll_event **);
3814
3815 return zsock_poll_update_ctx(obj, pfd, pev);
3816 }
3817
3818 case ZFD_IOCTL_SET_LOCK: {
3819 struct k_mutex *lock;
3820
3821 lock = va_arg(args, struct k_mutex *);
3822
3823 zsock_ctx_set_lock(obj, lock);
3824 return 0;
3825 }
3826
3827 case ZFD_IOCTL_FIONBIO:
3828 sock_set_flag(obj, SOCK_NONBLOCK, SOCK_NONBLOCK);
3829 return 0;
3830
3831 case ZFD_IOCTL_FIONREAD: {
3832 int *avail = va_arg(args, int *);
3833
3834 *avail = zsock_fionread_ctx(obj);
3835 return 0;
3836 }
3837
3838 default:
3839 errno = EOPNOTSUPP;
3840 return -1;
3841 }
3842 }
3843
sock_shutdown_vmeth(void * obj,int how)3844 static int sock_shutdown_vmeth(void *obj, int how)
3845 {
3846 return zsock_shutdown_ctx(obj, how);
3847 }
3848
sock_bind_vmeth(void * obj,const struct sockaddr * addr,socklen_t addrlen)3849 static int sock_bind_vmeth(void *obj, const struct sockaddr *addr,
3850 socklen_t addrlen)
3851 {
3852 return zsock_bind_ctx(obj, addr, addrlen);
3853 }
3854
sock_connect_vmeth(void * obj,const struct sockaddr * addr,socklen_t addrlen)3855 static int sock_connect_vmeth(void *obj, const struct sockaddr *addr,
3856 socklen_t addrlen)
3857 {
3858 return zsock_connect_ctx(obj, addr, addrlen);
3859 }
3860
sock_listen_vmeth(void * obj,int backlog)3861 static int sock_listen_vmeth(void *obj, int backlog)
3862 {
3863 return zsock_listen_ctx(obj, backlog);
3864 }
3865
sock_accept_vmeth(void * obj,struct sockaddr * addr,socklen_t * addrlen)3866 static int sock_accept_vmeth(void *obj, struct sockaddr *addr,
3867 socklen_t *addrlen)
3868 {
3869 return zsock_accept_ctx(obj, addr, addrlen);
3870 }
3871
sock_sendto_vmeth(void * obj,const void * buf,size_t len,int flags,const struct sockaddr * dest_addr,socklen_t addrlen)3872 static ssize_t sock_sendto_vmeth(void *obj, const void *buf, size_t len,
3873 int flags, const struct sockaddr *dest_addr,
3874 socklen_t addrlen)
3875 {
3876 return zsock_sendto_ctx(obj, buf, len, flags, dest_addr, addrlen);
3877 }
3878
sock_sendmsg_vmeth(void * obj,const struct msghdr * msg,int flags)3879 static ssize_t sock_sendmsg_vmeth(void *obj, const struct msghdr *msg,
3880 int flags)
3881 {
3882 return zsock_sendmsg_ctx(obj, msg, flags);
3883 }
3884
sock_recvmsg_vmeth(void * obj,struct msghdr * msg,int flags)3885 static ssize_t sock_recvmsg_vmeth(void *obj, struct msghdr *msg, int flags)
3886 {
3887 return zsock_recvmsg_ctx(obj, msg, flags);
3888 }
3889
sock_recvfrom_vmeth(void * obj,void * buf,size_t max_len,int flags,struct sockaddr * src_addr,socklen_t * addrlen)3890 static ssize_t sock_recvfrom_vmeth(void *obj, void *buf, size_t max_len,
3891 int flags, struct sockaddr *src_addr,
3892 socklen_t *addrlen)
3893 {
3894 return zsock_recvfrom_ctx(obj, buf, max_len, flags,
3895 src_addr, addrlen);
3896 }
3897
sock_getsockopt_vmeth(void * obj,int level,int optname,void * optval,socklen_t * optlen)3898 static int sock_getsockopt_vmeth(void *obj, int level, int optname,
3899 void *optval, socklen_t *optlen)
3900 {
3901 return zsock_getsockopt_ctx(obj, level, optname, optval, optlen);
3902 }
3903
sock_setsockopt_vmeth(void * obj,int level,int optname,const void * optval,socklen_t optlen)3904 static int sock_setsockopt_vmeth(void *obj, int level, int optname,
3905 const void *optval, socklen_t optlen)
3906 {
3907 return zsock_setsockopt_ctx(obj, level, optname, optval, optlen);
3908 }
3909
sock_close_vmeth(void * obj)3910 static int sock_close_vmeth(void *obj)
3911 {
3912 return zsock_close_ctx(obj);
3913 }
sock_getpeername_vmeth(void * obj,struct sockaddr * addr,socklen_t * addrlen)3914 static int sock_getpeername_vmeth(void *obj, struct sockaddr *addr,
3915 socklen_t *addrlen)
3916 {
3917 return zsock_getpeername_ctx(obj, addr, addrlen);
3918 }
3919
sock_getsockname_vmeth(void * obj,struct sockaddr * addr,socklen_t * addrlen)3920 static int sock_getsockname_vmeth(void *obj, struct sockaddr *addr,
3921 socklen_t *addrlen)
3922 {
3923 return zsock_getsockname_ctx(obj, addr, addrlen);
3924 }
3925
3926 const struct socket_op_vtable sock_fd_op_vtable = {
3927 .fd_vtable = {
3928 .read = sock_read_vmeth,
3929 .write = sock_write_vmeth,
3930 .close = sock_close_vmeth,
3931 .ioctl = sock_ioctl_vmeth,
3932 },
3933 .shutdown = sock_shutdown_vmeth,
3934 .bind = sock_bind_vmeth,
3935 .connect = sock_connect_vmeth,
3936 .listen = sock_listen_vmeth,
3937 .accept = sock_accept_vmeth,
3938 .sendto = sock_sendto_vmeth,
3939 .sendmsg = sock_sendmsg_vmeth,
3940 .recvmsg = sock_recvmsg_vmeth,
3941 .recvfrom = sock_recvfrom_vmeth,
3942 .getsockopt = sock_getsockopt_vmeth,
3943 .setsockopt = sock_setsockopt_vmeth,
3944 .getpeername = sock_getpeername_vmeth,
3945 .getsockname = sock_getsockname_vmeth,
3946 };
3947
3948 #if defined(CONFIG_NET_NATIVE)
inet_is_supported(int family,int type,int proto)3949 static bool inet_is_supported(int family, int type, int proto)
3950 {
3951 if (family != AF_INET && family != AF_INET6) {
3952 return false;
3953 }
3954
3955 return true;
3956 }
3957
3958 NET_SOCKET_REGISTER(af_inet46, NET_SOCKET_DEFAULT_PRIO, AF_UNSPEC,
3959 inet_is_supported, zsock_socket_internal);
3960 #endif /* CONFIG_NET_NATIVE */
3961