1 /*
2 * Copyright (c) 2023 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 LOG_MODULE_REGISTER(net_sock_svc, CONFIG_NET_SOCKETS_LOG_LEVEL);
9
10 #include <zephyr/kernel.h>
11 #include <zephyr/init.h>
12 #include <zephyr/net/socket_service.h>
13 #include <zephyr/zvfs/eventfd.h>
14
15 static int init_socket_service(void);
16
17 enum SOCKET_SERVICE_THREAD_STATUS {
18 SOCKET_SERVICE_THREAD_UNINITIALIZED = 0,
19 SOCKET_SERVICE_THREAD_FAILED,
20 SOCKET_SERVICE_THREAD_STOPPED,
21 SOCKET_SERVICE_THREAD_RUNNING,
22 };
23 static enum SOCKET_SERVICE_THREAD_STATUS thread_status;
24
25 static K_MUTEX_DEFINE(lock);
26 static K_CONDVAR_DEFINE(wait_start);
27
28 STRUCT_SECTION_START_EXTERN(net_socket_service_desc);
29 STRUCT_SECTION_END_EXTERN(net_socket_service_desc);
30
31 static struct service {
32 struct zsock_pollfd events[CONFIG_ZVFS_POLL_MAX];
33 int count;
34 } ctx;
35
36 #define get_idx(svc) (*(svc->idx))
37
net_socket_service_foreach(net_socket_service_cb_t cb,void * user_data)38 void net_socket_service_foreach(net_socket_service_cb_t cb, void *user_data)
39 {
40 STRUCT_SECTION_FOREACH(net_socket_service_desc, svc) {
41 cb(svc, user_data);
42 }
43 }
44
cleanup_svc_events(const struct net_socket_service_desc * svc)45 static void cleanup_svc_events(const struct net_socket_service_desc *svc)
46 {
47 for (int i = 0; i < svc->pev_len; i++) {
48 svc->pev[i].event.fd = -1;
49 svc->pev[i].event.events = 0;
50 }
51 }
52
z_impl_net_socket_service_register(const struct net_socket_service_desc * svc,struct zsock_pollfd * fds,int len,void * user_data)53 int z_impl_net_socket_service_register(const struct net_socket_service_desc *svc,
54 struct zsock_pollfd *fds, int len,
55 void *user_data)
56 {
57 int i, ret = -ENOENT;
58
59 k_mutex_lock(&lock, K_FOREVER);
60
61 if (thread_status == SOCKET_SERVICE_THREAD_UNINITIALIZED) {
62 (void)k_condvar_wait(&wait_start, &lock, K_FOREVER);
63 } else if (thread_status != SOCKET_SERVICE_THREAD_RUNNING) {
64 NET_ERR("Socket service thread not running, service %p register fails.", svc);
65 ret = -EIO;
66 goto out;
67 }
68
69 if (STRUCT_SECTION_START(net_socket_service_desc) > svc ||
70 STRUCT_SECTION_END(net_socket_service_desc) <= svc) {
71 goto out;
72 }
73
74 if (fds == NULL) {
75 cleanup_svc_events(svc);
76 } else {
77 if (len > svc->pev_len) {
78 NET_DBG("Too many file descriptors, "
79 "max is %d for service %p",
80 svc->pev_len, svc);
81 ret = -ENOMEM;
82 goto out;
83 }
84
85 for (i = 0; i < len; i++) {
86 svc->pev[i].event = fds[i];
87 svc->pev[i].user_data = user_data;
88 }
89 }
90
91 /* Tell the thread to re-read the variables */
92 zvfs_eventfd_write(ctx.events[0].fd, 1);
93 ret = 0;
94
95 out:
96 k_mutex_unlock(&lock);
97
98 return ret;
99 }
100
find_svc_and_event(struct zsock_pollfd * pev,struct net_socket_service_event ** event)101 static struct net_socket_service_desc *find_svc_and_event(
102 struct zsock_pollfd *pev,
103 struct net_socket_service_event **event)
104 {
105 STRUCT_SECTION_FOREACH(net_socket_service_desc, svc) {
106 for (int i = 0; i < svc->pev_len; i++) {
107 if (svc->pev[i].event.fd == pev->fd) {
108 *event = &svc->pev[i];
109 return svc;
110 }
111 }
112 }
113
114 return NULL;
115 }
116
117 /* We do not set the user callback to our work struct because we need to
118 * hook into the flow and restore the global poll array so that the next poll
119 * round will not notice it and call the callback again while we are
120 * servicing the callback.
121 */
net_socket_service_callback(struct net_socket_service_event * pev)122 void net_socket_service_callback(struct net_socket_service_event *pev)
123 {
124 struct net_socket_service_desc *svc = pev->svc;
125 struct net_socket_service_event ev = *pev;
126
127 ev.callback(&ev);
128
129 /* Copy back the socket fd to the global array because we marked
130 * it as -1 when triggering the work.
131 */
132 for (int i = 0; i < svc->pev_len; i++) {
133 ctx.events[get_idx(svc) + i] = svc->pev[i].event;
134 }
135 }
136
call_work(struct zsock_pollfd * pev,struct net_socket_service_event * event)137 static int call_work(struct zsock_pollfd *pev, struct net_socket_service_event *event)
138 {
139 int ret = 0;
140
141 /* Mark the global fd non pollable so that we do not
142 * call the callback second time.
143 */
144 pev->fd = -1;
145
146 /* Synchronous call */
147 net_socket_service_callback(event);
148
149 return ret;
150
151 }
152
trigger_work(struct zsock_pollfd * pev)153 static int trigger_work(struct zsock_pollfd *pev)
154 {
155 struct net_socket_service_event *event;
156 struct net_socket_service_desc *svc;
157
158 svc = find_svc_and_event(pev, &event);
159 if (svc == NULL) {
160 return -ENOENT;
161 }
162
163 event->svc = svc;
164
165 /* Copy the triggered event to our event so that we know what
166 * was actually causing the event.
167 */
168 event->event = *pev;
169
170 return call_work(pev, event);
171 }
172
socket_service_thread(void)173 static void socket_service_thread(void)
174 {
175 int ret, i, fd, count = 0;
176 zvfs_eventfd_t value;
177
178 STRUCT_SECTION_COUNT(net_socket_service_desc, &ret);
179 if (ret == 0) {
180 NET_INFO("No socket services found, service disabled.");
181 goto fail;
182 }
183
184 /* Create contiguous poll event array to enable socket polling */
185 STRUCT_SECTION_FOREACH(net_socket_service_desc, svc) {
186 NET_DBG("Service %s has %d pollable sockets",
187 COND_CODE_1(CONFIG_NET_SOCKETS_LOG_LEVEL_DBG,
188 (svc->owner), ("")),
189 svc->pev_len);
190 get_idx(svc) = count + 1;
191 count += svc->pev_len;
192 }
193
194 if ((count + 1) > ARRAY_SIZE(ctx.events)) {
195 NET_ERR("You have %d services to monitor but "
196 "%zd poll entries configured.",
197 count + 1, ARRAY_SIZE(ctx.events));
198 NET_ERR("Please increase value of %s to at least %d",
199 "CONFIG_ZVFS_POLL_MAX", count + 1);
200 goto fail;
201 }
202
203 NET_DBG("Monitoring %d socket entries", count);
204
205 ctx.count = count + 1;
206
207 /* Create an zvfs_eventfd that can be used to trigger events during polling */
208 fd = zvfs_eventfd(0, 0);
209 if (fd < 0) {
210 fd = -errno;
211 NET_ERR("zvfs_eventfd failed (%d)", fd);
212 goto out;
213 }
214
215 thread_status = SOCKET_SERVICE_THREAD_RUNNING;
216 k_condvar_broadcast(&wait_start);
217
218 ctx.events[0].fd = fd;
219 ctx.events[0].events = ZSOCK_POLLIN;
220
221 restart:
222 i = 1;
223
224 k_mutex_lock(&lock, K_FOREVER);
225
226 /* Copy individual events to the big array */
227 STRUCT_SECTION_FOREACH(net_socket_service_desc, svc) {
228 for (int j = 0; j < svc->pev_len; j++) {
229 ctx.events[get_idx(svc) + j] = svc->pev[j].event;
230 }
231 }
232
233 k_mutex_unlock(&lock);
234
235 while (true) {
236 ret = zsock_poll(ctx.events, count + 1, -1);
237 if (ret < 0) {
238 ret = -errno;
239 NET_ERR("poll failed (%d)", ret);
240 goto out;
241 }
242
243 if (ret == 0) {
244 /* should not happen because timeout is -1 */
245 break;
246 }
247
248 if (ret > 0 && ctx.events[0].revents) {
249 zvfs_eventfd_read(ctx.events[0].fd, &value);
250 NET_DBG("Received restart event.");
251 goto restart;
252 }
253
254 for (i = 1; i < (count + 1); i++) {
255 if (ctx.events[i].fd < 0) {
256 continue;
257 }
258
259 if (ctx.events[i].revents > 0) {
260 ret = trigger_work(&ctx.events[i]);
261 if (ret < 0) {
262 NET_DBG("Triggering work failed (%d)", ret);
263 }
264 }
265 }
266 }
267
268 out:
269 NET_DBG("Socket service thread stopped");
270 thread_status = SOCKET_SERVICE_THREAD_STOPPED;
271
272 return;
273
274 fail:
275 thread_status = SOCKET_SERVICE_THREAD_FAILED;
276 k_condvar_broadcast(&wait_start);
277 }
278
init_socket_service(void)279 static int init_socket_service(void)
280 {
281 k_tid_t ssm;
282 static struct k_thread service_thread;
283
284 static K_THREAD_STACK_DEFINE(service_thread_stack,
285 CONFIG_NET_SOCKETS_SERVICE_STACK_SIZE);
286
287 ssm = k_thread_create(&service_thread,
288 service_thread_stack,
289 K_THREAD_STACK_SIZEOF(service_thread_stack),
290 (k_thread_entry_t)socket_service_thread, NULL, NULL, NULL,
291 CLAMP(CONFIG_NET_SOCKETS_SERVICE_THREAD_PRIO,
292 K_HIGHEST_APPLICATION_THREAD_PRIO,
293 K_LOWEST_APPLICATION_THREAD_PRIO), 0, K_NO_WAIT);
294
295 k_thread_name_set(ssm, "net_socket_service");
296
297 return 0;
298 }
299
socket_service_init(void)300 void socket_service_init(void)
301 {
302 (void)init_socket_service();
303 }
304