1 /*
2 * Copyright (c) 2017-2018 Linaro Limited
3 * Copyright (c) 2021 Nordic Semiconductor
4 * Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved.
5 * Copyright (c) 2024 Tenstorrent AI ULC
6 *
7 * SPDX-License-Identifier: Apache-2.0
8 */
9
10 #include <zephyr/kernel.h>
11 #include <zephyr/internal/syscall_handler.h>
12 #include <zephyr/sys/fdtable.h>
13
14 #if defined(CONFIG_NET_SOCKETS_SOCKOPT_TLS)
15 bool net_socket_is_tls(void *obj);
16 #else
17 #define net_socket_is_tls(obj) false
18 #endif
19
zvfs_poll_internal(struct zvfs_pollfd * fds,int nfds,k_timeout_t timeout)20 int zvfs_poll_internal(struct zvfs_pollfd *fds, int nfds, k_timeout_t timeout)
21 {
22 bool retry;
23 int ret = 0;
24 int i;
25 struct zvfs_pollfd *pfd;
26 struct k_poll_event poll_events[CONFIG_ZVFS_POLL_MAX];
27 struct k_poll_event *pev;
28 struct k_poll_event *pev_end = poll_events + ARRAY_SIZE(poll_events);
29 const struct fd_op_vtable *vtable;
30 struct k_mutex *lock;
31 k_timepoint_t end;
32 bool offload = false;
33 const struct fd_op_vtable *offl_vtable = NULL;
34 void *offl_ctx = NULL;
35
36 end = sys_timepoint_calc(timeout);
37
38 pev = poll_events;
39 for (pfd = fds, i = nfds; i--; pfd++) {
40 void *ctx;
41 int result;
42
43 /* Per POSIX, negative fd's are just ignored */
44 if (pfd->fd < 0) {
45 continue;
46 }
47
48 ctx = zvfs_get_fd_obj_and_vtable(pfd->fd, &vtable, &lock);
49 if (ctx == NULL) {
50 /* Will set POLLNVAL in return loop */
51 continue;
52 }
53
54 (void)k_mutex_lock(lock, K_FOREVER);
55
56 result = zvfs_fdtable_call_ioctl(vtable, ctx, ZFD_IOCTL_POLL_PREPARE, pfd, &pev,
57 pev_end);
58 if (result == -EALREADY) {
59 /* If POLL_PREPARE returned with EALREADY, it means
60 * it already detected that some socket is ready. In
61 * this case, we still perform a k_poll to pick up
62 * as many events as possible, but without any wait.
63 */
64 timeout = K_NO_WAIT;
65 end = sys_timepoint_calc(timeout);
66 result = 0;
67 } else if (result == -EXDEV) {
68 /* If POLL_PREPARE returned EXDEV, it means
69 * it detected an offloaded socket.
70 * If offloaded socket is used with native TLS, the TLS
71 * wrapper for the offloaded poll will be used.
72 * In case the fds array contains a mixup of offloaded
73 * and non-offloaded sockets, the offloaded poll handler
74 * shall return an error.
75 */
76 offload = true;
77 if (offl_vtable == NULL || net_socket_is_tls(ctx)) {
78 offl_vtable = vtable;
79 offl_ctx = ctx;
80 }
81
82 result = 0;
83 }
84
85 k_mutex_unlock(lock);
86
87 if (result < 0) {
88 errno = -result;
89 return -1;
90 }
91 }
92
93 if (offload) {
94 int poll_timeout;
95
96 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
97 poll_timeout = SYS_FOREVER_MS;
98 } else {
99 poll_timeout = k_ticks_to_ms_floor32(timeout.ticks);
100 }
101
102 return zvfs_fdtable_call_ioctl(offl_vtable, offl_ctx, ZFD_IOCTL_POLL_OFFLOAD, fds,
103 nfds, poll_timeout);
104 }
105
106 timeout = sys_timepoint_timeout(end);
107
108 do {
109 ret = k_poll(poll_events, pev - poll_events, timeout);
110 /* EAGAIN when timeout expired, EINTR when cancelled (i.e. EOF) */
111 if (ret != 0 && ret != -EAGAIN && ret != -EINTR) {
112 errno = -ret;
113 return -1;
114 }
115
116 retry = false;
117 ret = 0;
118
119 pev = poll_events;
120 for (pfd = fds, i = nfds; i--; pfd++) {
121 void *ctx;
122 int result;
123
124 pfd->revents = 0;
125
126 if (pfd->fd < 0) {
127 continue;
128 }
129
130 ctx = zvfs_get_fd_obj_and_vtable(pfd->fd, &vtable, &lock);
131 if (ctx == NULL) {
132 pfd->revents = ZVFS_POLLNVAL;
133 ret++;
134 continue;
135 }
136
137 (void)k_mutex_lock(lock, K_FOREVER);
138
139 result = zvfs_fdtable_call_ioctl(vtable, ctx, ZFD_IOCTL_POLL_UPDATE, pfd,
140 &pev);
141 k_mutex_unlock(lock);
142
143 if (result == -EAGAIN) {
144 retry = true;
145 continue;
146 } else if (result != 0) {
147 errno = -result;
148 return -1;
149 }
150
151 if (pfd->revents != 0) {
152 ret++;
153 }
154 }
155
156 if (retry) {
157 if (ret > 0) {
158 break;
159 }
160
161 timeout = sys_timepoint_timeout(end);
162
163 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
164 break;
165 }
166 }
167 } while (retry);
168
169 return ret;
170 }
171
z_impl_zvfs_poll(struct zvfs_pollfd * fds,int nfds,int poll_timeout)172 int z_impl_zvfs_poll(struct zvfs_pollfd *fds, int nfds, int poll_timeout)
173 {
174 k_timeout_t timeout;
175
176 if (poll_timeout < 0) {
177 timeout = K_FOREVER;
178 } else {
179 timeout = K_MSEC(poll_timeout);
180 }
181
182 return zvfs_poll_internal(fds, nfds, timeout);
183 }
184
185 #ifdef CONFIG_USERSPACE
z_vrfy_zvfs_poll(struct zvfs_pollfd * fds,int nfds,int timeout)186 static inline int z_vrfy_zvfs_poll(struct zvfs_pollfd *fds, int nfds, int timeout)
187 {
188 struct zvfs_pollfd *fds_copy;
189 size_t fds_size;
190 int ret;
191
192 /* Copy fds array from user mode */
193 if (size_mul_overflow(nfds, sizeof(struct zvfs_pollfd), &fds_size)) {
194 errno = EFAULT;
195 return -1;
196 }
197 fds_copy = k_usermode_alloc_from_copy((void *)fds, fds_size);
198 if (!fds_copy) {
199 errno = ENOMEM;
200 return -1;
201 }
202
203 ret = z_impl_zvfs_poll(fds_copy, nfds, timeout);
204
205 if (ret >= 0) {
206 k_usermode_to_copy((void *)fds, fds_copy, fds_size);
207 }
208 k_free(fds_copy);
209
210 return ret;
211 }
212 #include <zephyr/syscalls/zvfs_poll_mrsh.c>
213 #endif
214