1 /*
2 * Event loop based on select() loop
3 * Copyright (c) 2002-2009, Jouni Malinen <j@w1.fi>
4 *
5 * This software may be distributed under the terms of the BSD license.
6 * See README for more details.
7 */
8
9 #include "includes.h"
10 #include <assert.h>
11
12 #include "common.h"
13 #include "trace.h"
14 #include "list.h"
15 #include "eloop.h"
16
17 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_EPOLL)
18 #error Do not define both of poll and epoll
19 #endif
20
21 #if defined(CONFIG_ELOOP_POLL) && defined(CONFIG_ELOOP_KQUEUE)
22 #error Do not define both of poll and kqueue
23 #endif
24
25 #if !defined(CONFIG_ELOOP_POLL) && !defined(CONFIG_ELOOP_EPOLL) && \
26 !defined(CONFIG_ELOOP_KQUEUE)
27 #define CONFIG_ELOOP_SELECT
28 #endif
29
30 #ifdef CONFIG_ELOOP_POLL
31 #include <poll.h>
32 #endif /* CONFIG_ELOOP_POLL */
33
34 #ifdef CONFIG_ELOOP_EPOLL
35 #include <sys/epoll.h>
36 #endif /* CONFIG_ELOOP_EPOLL */
37
38 #ifdef CONFIG_ELOOP_KQUEUE
39 #include <sys/event.h>
40 #endif /* CONFIG_ELOOP_KQUEUE */
41
42 struct eloop_sock {
43 int sock;
44 void *eloop_data;
45 void *user_data;
46 eloop_sock_handler handler;
47 WPA_TRACE_REF(eloop);
48 WPA_TRACE_REF(user);
49 WPA_TRACE_INFO
50 };
51
52 struct eloop_timeout {
53 struct dl_list list;
54 struct os_reltime time;
55 void *eloop_data;
56 void *user_data;
57 eloop_timeout_handler handler;
58 WPA_TRACE_REF(eloop);
59 WPA_TRACE_REF(user);
60 WPA_TRACE_INFO
61 };
62
63 struct eloop_signal {
64 int sig;
65 void *user_data;
66 eloop_signal_handler handler;
67 int signaled;
68 };
69
70 struct eloop_sock_table {
71 size_t count;
72 struct eloop_sock *table;
73 eloop_event_type type;
74 int changed;
75 };
76
77 struct eloop_data {
78 int max_sock;
79
80 size_t count; /* sum of all table counts */
81 #ifdef CONFIG_ELOOP_POLL
82 size_t max_pollfd_map; /* number of pollfds_map currently allocated */
83 size_t max_poll_fds; /* number of pollfds currently allocated */
84 struct pollfd *pollfds;
85 struct pollfd **pollfds_map;
86 #endif /* CONFIG_ELOOP_POLL */
87 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
88 int max_fd;
89 struct eloop_sock *fd_table;
90 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
91 #ifdef CONFIG_ELOOP_EPOLL
92 int epollfd;
93 size_t epoll_max_event_num;
94 struct epoll_event *epoll_events;
95 #endif /* CONFIG_ELOOP_EPOLL */
96 #ifdef CONFIG_ELOOP_KQUEUE
97 int kqueuefd;
98 size_t kqueue_nevents;
99 struct kevent *kqueue_events;
100 #endif /* CONFIG_ELOOP_KQUEUE */
101 struct eloop_sock_table readers;
102 struct eloop_sock_table writers;
103 struct eloop_sock_table exceptions;
104
105 struct dl_list timeout;
106
107 size_t signal_count;
108 struct eloop_signal *signals;
109 int signaled;
110 int pending_terminate;
111
112 int terminate;
113 };
114
115 static struct eloop_data eloop;
116
117
118 #ifdef WPA_TRACE
119
eloop_sigsegv_handler(int sig)120 static void eloop_sigsegv_handler(int sig)
121 {
122 wpa_trace_show("eloop SIGSEGV");
123 abort();
124 }
125
eloop_trace_sock_add_ref(struct eloop_sock_table * table)126 static void eloop_trace_sock_add_ref(struct eloop_sock_table *table)
127 {
128 size_t i;
129
130 if (table == NULL || table->table == NULL)
131 return;
132 for (i = 0; i < table->count; i++) {
133 wpa_trace_add_ref(&table->table[i], eloop,
134 table->table[i].eloop_data);
135 wpa_trace_add_ref(&table->table[i], user,
136 table->table[i].user_data);
137 }
138 }
139
140
eloop_trace_sock_remove_ref(struct eloop_sock_table * table)141 static void eloop_trace_sock_remove_ref(struct eloop_sock_table *table)
142 {
143 size_t i;
144
145 if (table == NULL || table->table == NULL)
146 return;
147 for (i = 0; i < table->count; i++) {
148 wpa_trace_remove_ref(&table->table[i], eloop,
149 table->table[i].eloop_data);
150 wpa_trace_remove_ref(&table->table[i], user,
151 table->table[i].user_data);
152 }
153 }
154
155 #else /* WPA_TRACE */
156
157 #define eloop_trace_sock_add_ref(table) do { } while (0)
158 #define eloop_trace_sock_remove_ref(table) do { } while (0)
159
160 #endif /* WPA_TRACE */
161
162
eloop_init(void)163 int eloop_init(void)
164 {
165 os_memset(&eloop, 0, sizeof(eloop));
166 dl_list_init(&eloop.timeout);
167 #ifdef CONFIG_ELOOP_EPOLL
168 eloop.epollfd = epoll_create1(0);
169 if (eloop.epollfd < 0) {
170 wpa_printf(MSG_ERROR, "%s: epoll_create1 failed. %s",
171 __func__, strerror(errno));
172 return -1;
173 }
174 #endif /* CONFIG_ELOOP_EPOLL */
175 #ifdef CONFIG_ELOOP_KQUEUE
176 eloop.kqueuefd = kqueue();
177 if (eloop.kqueuefd < 0) {
178 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
179 __func__, strerror(errno));
180 return -1;
181 }
182 #endif /* CONFIG_ELOOP_KQUEUE */
183 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
184 eloop.readers.type = EVENT_TYPE_READ;
185 eloop.writers.type = EVENT_TYPE_WRITE;
186 eloop.exceptions.type = EVENT_TYPE_EXCEPTION;
187 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
188 #ifdef WPA_TRACE
189 signal(SIGSEGV, eloop_sigsegv_handler);
190 #endif /* WPA_TRACE */
191 return 0;
192 }
193
194
195 #ifdef CONFIG_ELOOP_EPOLL
eloop_sock_queue(int sock,eloop_event_type type)196 static int eloop_sock_queue(int sock, eloop_event_type type)
197 {
198 struct epoll_event ev;
199
200 os_memset(&ev, 0, sizeof(ev));
201 switch (type) {
202 case EVENT_TYPE_READ:
203 ev.events = EPOLLIN;
204 break;
205 case EVENT_TYPE_WRITE:
206 ev.events = EPOLLOUT;
207 break;
208 /*
209 * Exceptions are always checked when using epoll, but I suppose it's
210 * possible that someone registered a socket *only* for exception
211 * handling.
212 */
213 case EVENT_TYPE_EXCEPTION:
214 ev.events = EPOLLERR | EPOLLHUP;
215 break;
216 }
217 ev.data.fd = sock;
218 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
219 wpa_printf(MSG_ERROR, "%s: epoll_ctl(ADD) for fd=%d failed: %s",
220 __func__, sock, strerror(errno));
221 return -1;
222 }
223 return 0;
224 }
225 #endif /* CONFIG_ELOOP_EPOLL */
226
227
228 #ifdef CONFIG_ELOOP_KQUEUE
229
event_type_kevent_filter(eloop_event_type type)230 static short event_type_kevent_filter(eloop_event_type type)
231 {
232 switch (type) {
233 case EVENT_TYPE_READ:
234 return EVFILT_READ;
235 case EVENT_TYPE_WRITE:
236 return EVFILT_WRITE;
237 default:
238 return 0;
239 }
240 }
241
242
eloop_sock_queue(int sock,eloop_event_type type)243 static int eloop_sock_queue(int sock, eloop_event_type type)
244 {
245 struct kevent ke;
246
247 EV_SET(&ke, sock, event_type_kevent_filter(type), EV_ADD, 0, 0, 0);
248 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) == -1) {
249 wpa_printf(MSG_ERROR, "%s: kevent(ADD) for fd=%d failed: %s",
250 __func__, sock, strerror(errno));
251 return -1;
252 }
253 return 0;
254 }
255
256 #endif /* CONFIG_ELOOP_KQUEUE */
257
258
eloop_sock_table_add_sock(struct eloop_sock_table * table,int sock,eloop_sock_handler handler,void * eloop_data,void * user_data)259 static int eloop_sock_table_add_sock(struct eloop_sock_table *table,
260 int sock, eloop_sock_handler handler,
261 void *eloop_data, void *user_data)
262 {
263 #ifdef CONFIG_ELOOP_EPOLL
264 struct epoll_event *temp_events;
265 #endif /* CONFIG_ELOOP_EPOLL */
266 #ifdef CONFIG_ELOOP_KQUEUE
267 struct kevent *temp_events;
268 #endif /* CONFIG_ELOOP_EPOLL */
269 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
270 struct eloop_sock *temp_table;
271 size_t next;
272 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
273 struct eloop_sock *tmp;
274 int new_max_sock;
275
276 if (sock > eloop.max_sock)
277 new_max_sock = sock;
278 else
279 new_max_sock = eloop.max_sock;
280
281 if (table == NULL)
282 return -1;
283
284 #ifdef CONFIG_ELOOP_POLL
285 if ((size_t) new_max_sock >= eloop.max_pollfd_map) {
286 struct pollfd **nmap;
287 nmap = os_realloc_array(eloop.pollfds_map, new_max_sock + 50,
288 sizeof(struct pollfd *));
289 if (nmap == NULL)
290 return -1;
291
292 eloop.max_pollfd_map = new_max_sock + 50;
293 eloop.pollfds_map = nmap;
294 }
295
296 if (eloop.count + 1 > eloop.max_poll_fds) {
297 struct pollfd *n;
298 size_t nmax = eloop.count + 1 + 50;
299
300 n = os_realloc_array(eloop.pollfds, nmax,
301 sizeof(struct pollfd));
302 if (n == NULL)
303 return -1;
304
305 eloop.max_poll_fds = nmax;
306 eloop.pollfds = n;
307 }
308 #endif /* CONFIG_ELOOP_POLL */
309 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
310 if (new_max_sock >= eloop.max_fd) {
311 next = new_max_sock + 16;
312 temp_table = os_realloc_array(eloop.fd_table, next,
313 sizeof(struct eloop_sock));
314 if (temp_table == NULL)
315 return -1;
316
317 eloop.max_fd = next;
318 eloop.fd_table = temp_table;
319 }
320 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
321
322 #ifdef CONFIG_ELOOP_EPOLL
323 if (eloop.count + 1 > eloop.epoll_max_event_num) {
324 next = eloop.epoll_max_event_num == 0 ? 8 :
325 eloop.epoll_max_event_num * 2;
326 temp_events = os_realloc_array(eloop.epoll_events, next,
327 sizeof(struct epoll_event));
328 if (temp_events == NULL) {
329 wpa_printf(MSG_ERROR, "%s: malloc for epoll failed: %s",
330 __func__, strerror(errno));
331 return -1;
332 }
333
334 eloop.epoll_max_event_num = next;
335 eloop.epoll_events = temp_events;
336 }
337 #endif /* CONFIG_ELOOP_EPOLL */
338 #ifdef CONFIG_ELOOP_KQUEUE
339 if (eloop.count + 1 > eloop.kqueue_nevents) {
340 next = eloop.kqueue_nevents == 0 ? 8 : eloop.kqueue_nevents * 2;
341 temp_events = os_malloc(next * sizeof(*temp_events));
342 if (!temp_events) {
343 wpa_printf(MSG_ERROR,
344 "%s: malloc for kqueue failed: %s",
345 __func__, strerror(errno));
346 return -1;
347 }
348
349 os_free(eloop.kqueue_events);
350 eloop.kqueue_events = temp_events;
351 eloop.kqueue_nevents = next;
352 }
353 #endif /* CONFIG_ELOOP_KQUEUE */
354
355 eloop_trace_sock_remove_ref(table);
356 tmp = os_realloc_array(table->table, table->count + 1,
357 sizeof(struct eloop_sock));
358 if (tmp == NULL) {
359 eloop_trace_sock_add_ref(table);
360 return -1;
361 }
362
363 tmp[table->count].sock = sock;
364 tmp[table->count].eloop_data = eloop_data;
365 tmp[table->count].user_data = user_data;
366 tmp[table->count].handler = handler;
367 wpa_trace_record(&tmp[table->count]);
368 table->count++;
369 table->table = tmp;
370 eloop.max_sock = new_max_sock;
371 eloop.count++;
372 table->changed = 1;
373 eloop_trace_sock_add_ref(table);
374
375 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
376 if (eloop_sock_queue(sock, table->type) < 0)
377 return -1;
378 os_memcpy(&eloop.fd_table[sock], &table->table[table->count - 1],
379 sizeof(struct eloop_sock));
380 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
381 return 0;
382 }
383
384
eloop_sock_table_remove_sock(struct eloop_sock_table * table,int sock)385 static void eloop_sock_table_remove_sock(struct eloop_sock_table *table,
386 int sock)
387 {
388 #ifdef CONFIG_ELOOP_KQUEUE
389 struct kevent ke;
390 #endif /* CONFIG_ELOOP_KQUEUE */
391 size_t i;
392
393 if (table == NULL || table->table == NULL || table->count == 0)
394 return;
395
396 for (i = 0; i < table->count; i++) {
397 if (table->table[i].sock == sock)
398 break;
399 }
400 if (i == table->count)
401 return;
402 eloop_trace_sock_remove_ref(table);
403 if (i != table->count - 1) {
404 os_memmove(&table->table[i], &table->table[i + 1],
405 (table->count - i - 1) *
406 sizeof(struct eloop_sock));
407 }
408 table->count--;
409 eloop.count--;
410 table->changed = 1;
411 eloop_trace_sock_add_ref(table);
412 #ifdef CONFIG_ELOOP_EPOLL
413 if (epoll_ctl(eloop.epollfd, EPOLL_CTL_DEL, sock, NULL) < 0) {
414 wpa_printf(MSG_ERROR, "%s: epoll_ctl(DEL) for fd=%d failed: %s",
415 __func__, sock, strerror(errno));
416 return;
417 }
418 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
419 #endif /* CONFIG_ELOOP_EPOLL */
420 #ifdef CONFIG_ELOOP_KQUEUE
421 EV_SET(&ke, sock, event_type_kevent_filter(table->type), EV_DELETE, 0,
422 0, 0);
423 if (kevent(eloop.kqueuefd, &ke, 1, NULL, 0, NULL) < 0) {
424 wpa_printf(MSG_ERROR, "%s: kevent(DEL) for fd=%d failed: %s",
425 __func__, sock, strerror(errno));
426 return;
427 }
428 os_memset(&eloop.fd_table[sock], 0, sizeof(struct eloop_sock));
429 #endif /* CONFIG_ELOOP_KQUEUE */
430 }
431
432
433 #ifdef CONFIG_ELOOP_POLL
434
find_pollfd(struct pollfd ** pollfds_map,int fd,int mx)435 static struct pollfd * find_pollfd(struct pollfd **pollfds_map, int fd, int mx)
436 {
437 if (fd < mx && fd >= 0)
438 return pollfds_map[fd];
439 return NULL;
440 }
441
442
eloop_sock_table_set_fds(struct eloop_sock_table * readers,struct eloop_sock_table * writers,struct eloop_sock_table * exceptions,struct pollfd * pollfds,struct pollfd ** pollfds_map,int max_pollfd_map)443 static int eloop_sock_table_set_fds(struct eloop_sock_table *readers,
444 struct eloop_sock_table *writers,
445 struct eloop_sock_table *exceptions,
446 struct pollfd *pollfds,
447 struct pollfd **pollfds_map,
448 int max_pollfd_map)
449 {
450 size_t i;
451 int nxt = 0;
452 int fd;
453 struct pollfd *pfd;
454
455 /* Clear pollfd lookup map. It will be re-populated below. */
456 os_memset(pollfds_map, 0, sizeof(struct pollfd *) * max_pollfd_map);
457
458 if (readers && readers->table) {
459 for (i = 0; i < readers->count; i++) {
460 fd = readers->table[i].sock;
461 assert(fd >= 0 && fd < max_pollfd_map);
462 pollfds[nxt].fd = fd;
463 pollfds[nxt].events = POLLIN;
464 pollfds[nxt].revents = 0;
465 pollfds_map[fd] = &(pollfds[nxt]);
466 nxt++;
467 }
468 }
469
470 if (writers && writers->table) {
471 for (i = 0; i < writers->count; i++) {
472 /*
473 * See if we already added this descriptor, update it
474 * if so.
475 */
476 fd = writers->table[i].sock;
477 assert(fd >= 0 && fd < max_pollfd_map);
478 pfd = pollfds_map[fd];
479 if (!pfd) {
480 pfd = &(pollfds[nxt]);
481 pfd->events = 0;
482 pfd->fd = fd;
483 pollfds[i].revents = 0;
484 pollfds_map[fd] = pfd;
485 nxt++;
486 }
487 pfd->events |= POLLOUT;
488 }
489 }
490
491 /*
492 * Exceptions are always checked when using poll, but I suppose it's
493 * possible that someone registered a socket *only* for exception
494 * handling. Set the POLLIN bit in this case.
495 */
496 if (exceptions && exceptions->table) {
497 for (i = 0; i < exceptions->count; i++) {
498 /*
499 * See if we already added this descriptor, just use it
500 * if so.
501 */
502 fd = exceptions->table[i].sock;
503 assert(fd >= 0 && fd < max_pollfd_map);
504 pfd = pollfds_map[fd];
505 if (!pfd) {
506 pfd = &(pollfds[nxt]);
507 pfd->events = POLLIN;
508 pfd->fd = fd;
509 pollfds[i].revents = 0;
510 pollfds_map[fd] = pfd;
511 nxt++;
512 }
513 }
514 }
515
516 return nxt;
517 }
518
519
eloop_sock_table_dispatch_table(struct eloop_sock_table * table,struct pollfd ** pollfds_map,int max_pollfd_map,short int revents)520 static int eloop_sock_table_dispatch_table(struct eloop_sock_table *table,
521 struct pollfd **pollfds_map,
522 int max_pollfd_map,
523 short int revents)
524 {
525 size_t i;
526 struct pollfd *pfd;
527
528 if (!table || !table->table)
529 return 0;
530
531 table->changed = 0;
532 for (i = 0; i < table->count; i++) {
533 pfd = find_pollfd(pollfds_map, table->table[i].sock,
534 max_pollfd_map);
535 if (!pfd)
536 continue;
537
538 if (!(pfd->revents & revents))
539 continue;
540
541 table->table[i].handler(table->table[i].sock,
542 table->table[i].eloop_data,
543 table->table[i].user_data);
544 if (table->changed)
545 return 1;
546 }
547
548 return 0;
549 }
550
551
eloop_sock_table_dispatch(struct eloop_sock_table * readers,struct eloop_sock_table * writers,struct eloop_sock_table * exceptions,struct pollfd ** pollfds_map,int max_pollfd_map)552 static void eloop_sock_table_dispatch(struct eloop_sock_table *readers,
553 struct eloop_sock_table *writers,
554 struct eloop_sock_table *exceptions,
555 struct pollfd **pollfds_map,
556 int max_pollfd_map)
557 {
558 if (eloop_sock_table_dispatch_table(readers, pollfds_map,
559 max_pollfd_map, POLLIN | POLLERR |
560 POLLHUP))
561 return; /* pollfds may be invalid at this point */
562
563 if (eloop_sock_table_dispatch_table(writers, pollfds_map,
564 max_pollfd_map, POLLOUT))
565 return; /* pollfds may be invalid at this point */
566
567 eloop_sock_table_dispatch_table(exceptions, pollfds_map,
568 max_pollfd_map, POLLERR | POLLHUP);
569 }
570
571 #endif /* CONFIG_ELOOP_POLL */
572
573 #ifdef CONFIG_ELOOP_SELECT
574
eloop_sock_table_set_fds(struct eloop_sock_table * table,fd_set * fds)575 static void eloop_sock_table_set_fds(struct eloop_sock_table *table,
576 fd_set *fds)
577 {
578 size_t i;
579
580 FD_ZERO(fds);
581
582 if (table->table == NULL)
583 return;
584
585 for (i = 0; i < table->count; i++) {
586 assert(table->table[i].sock >= 0);
587 FD_SET(table->table[i].sock, fds);
588 }
589 }
590
591
eloop_sock_table_dispatch(struct eloop_sock_table * table,fd_set * fds)592 static void eloop_sock_table_dispatch(struct eloop_sock_table *table,
593 fd_set *fds)
594 {
595 size_t i;
596
597 if (table == NULL || table->table == NULL)
598 return;
599
600 table->changed = 0;
601 for (i = 0; i < table->count; i++) {
602 if (FD_ISSET(table->table[i].sock, fds)) {
603 table->table[i].handler(table->table[i].sock,
604 table->table[i].eloop_data,
605 table->table[i].user_data);
606 if (table->changed)
607 break;
608 }
609 }
610 }
611
612 #endif /* CONFIG_ELOOP_SELECT */
613
614
615 #ifdef CONFIG_ELOOP_EPOLL
eloop_sock_table_dispatch(struct epoll_event * events,int nfds)616 static void eloop_sock_table_dispatch(struct epoll_event *events, int nfds)
617 {
618 struct eloop_sock *table;
619 int i;
620
621 for (i = 0; i < nfds; i++) {
622 table = &eloop.fd_table[events[i].data.fd];
623 if (table->handler == NULL)
624 continue;
625 table->handler(table->sock, table->eloop_data,
626 table->user_data);
627 if (eloop.readers.changed ||
628 eloop.writers.changed ||
629 eloop.exceptions.changed)
630 break;
631 }
632 }
633 #endif /* CONFIG_ELOOP_EPOLL */
634
635
636 #ifdef CONFIG_ELOOP_KQUEUE
637
eloop_sock_table_dispatch(struct kevent * events,int nfds)638 static void eloop_sock_table_dispatch(struct kevent *events, int nfds)
639 {
640 struct eloop_sock *table;
641 int i;
642
643 for (i = 0; i < nfds; i++) {
644 table = &eloop.fd_table[events[i].ident];
645 if (table->handler == NULL)
646 continue;
647 table->handler(table->sock, table->eloop_data,
648 table->user_data);
649 if (eloop.readers.changed ||
650 eloop.writers.changed ||
651 eloop.exceptions.changed)
652 break;
653 }
654 }
655
656
eloop_sock_table_requeue(struct eloop_sock_table * table)657 static int eloop_sock_table_requeue(struct eloop_sock_table *table)
658 {
659 size_t i;
660 int r;
661
662 r = 0;
663 for (i = 0; i < table->count && table->table; i++) {
664 if (eloop_sock_queue(table->table[i].sock, table->type) == -1)
665 r = -1;
666 }
667 return r;
668 }
669
670 #endif /* CONFIG_ELOOP_KQUEUE */
671
672
eloop_sock_requeue(void)673 int eloop_sock_requeue(void)
674 {
675 int r = 0;
676
677 #ifdef CONFIG_ELOOP_KQUEUE
678 close(eloop.kqueuefd);
679 eloop.kqueuefd = kqueue();
680 if (eloop.kqueuefd < 0) {
681 wpa_printf(MSG_ERROR, "%s: kqueue failed: %s",
682 __func__, strerror(errno));
683 return -1;
684 }
685
686 if (eloop_sock_table_requeue(&eloop.readers) < 0)
687 r = -1;
688 if (eloop_sock_table_requeue(&eloop.writers) < 0)
689 r = -1;
690 if (eloop_sock_table_requeue(&eloop.exceptions) < 0)
691 r = -1;
692 #endif /* CONFIG_ELOOP_KQUEUE */
693
694 return r;
695 }
696
697
eloop_sock_table_destroy(struct eloop_sock_table * table)698 static void eloop_sock_table_destroy(struct eloop_sock_table *table)
699 {
700 if (table) {
701 size_t i;
702
703 for (i = 0; i < table->count && table->table; i++) {
704 wpa_printf(MSG_INFO, "ELOOP: remaining socket: "
705 "sock=%d eloop_data=%p user_data=%p "
706 "handler=%p",
707 table->table[i].sock,
708 table->table[i].eloop_data,
709 table->table[i].user_data,
710 table->table[i].handler);
711 wpa_trace_dump_funcname("eloop unregistered socket "
712 "handler",
713 table->table[i].handler);
714 wpa_trace_dump("eloop sock", &table->table[i]);
715 }
716 os_free(table->table);
717 }
718 }
719
720
eloop_register_read_sock(int sock,eloop_sock_handler handler,void * eloop_data,void * user_data)721 int eloop_register_read_sock(int sock, eloop_sock_handler handler,
722 void *eloop_data, void *user_data)
723 {
724 return eloop_register_sock(sock, EVENT_TYPE_READ, handler,
725 eloop_data, user_data);
726 }
727
728
eloop_unregister_read_sock(int sock)729 void eloop_unregister_read_sock(int sock)
730 {
731 eloop_unregister_sock(sock, EVENT_TYPE_READ);
732 }
733
734
eloop_get_sock_table(eloop_event_type type)735 static struct eloop_sock_table *eloop_get_sock_table(eloop_event_type type)
736 {
737 switch (type) {
738 case EVENT_TYPE_READ:
739 return &eloop.readers;
740 case EVENT_TYPE_WRITE:
741 return &eloop.writers;
742 case EVENT_TYPE_EXCEPTION:
743 return &eloop.exceptions;
744 }
745
746 return NULL;
747 }
748
749
eloop_register_sock(int sock,eloop_event_type type,eloop_sock_handler handler,void * eloop_data,void * user_data)750 int eloop_register_sock(int sock, eloop_event_type type,
751 eloop_sock_handler handler,
752 void *eloop_data, void *user_data)
753 {
754 struct eloop_sock_table *table;
755
756 assert(sock >= 0);
757 table = eloop_get_sock_table(type);
758 return eloop_sock_table_add_sock(table, sock, handler,
759 eloop_data, user_data);
760 }
761
762
eloop_unregister_sock(int sock,eloop_event_type type)763 void eloop_unregister_sock(int sock, eloop_event_type type)
764 {
765 struct eloop_sock_table *table;
766
767 table = eloop_get_sock_table(type);
768 eloop_sock_table_remove_sock(table, sock);
769 }
770
771
eloop_register_timeout(unsigned int secs,unsigned int usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)772 int eloop_register_timeout(unsigned int secs, unsigned int usecs,
773 eloop_timeout_handler handler,
774 void *eloop_data, void *user_data)
775 {
776 struct eloop_timeout *timeout, *tmp;
777 os_time_t now_sec;
778
779 timeout = os_zalloc(sizeof(*timeout));
780 if (timeout == NULL)
781 return -1;
782 if (os_get_reltime(&timeout->time) < 0) {
783 os_free(timeout);
784 return -1;
785 }
786 now_sec = timeout->time.sec;
787 timeout->time.sec += secs;
788 if (timeout->time.sec < now_sec)
789 goto overflow;
790 timeout->time.usec += usecs;
791 while (timeout->time.usec >= 1000000) {
792 timeout->time.sec++;
793 timeout->time.usec -= 1000000;
794 }
795 if (timeout->time.sec < now_sec)
796 goto overflow;
797 timeout->eloop_data = eloop_data;
798 timeout->user_data = user_data;
799 timeout->handler = handler;
800 wpa_trace_add_ref(timeout, eloop, eloop_data);
801 wpa_trace_add_ref(timeout, user, user_data);
802 wpa_trace_record(timeout);
803
804 /* Maintain timeouts in order of increasing time */
805 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
806 if (os_reltime_before(&timeout->time, &tmp->time)) {
807 dl_list_add(tmp->list.prev, &timeout->list);
808 return 0;
809 }
810 }
811 dl_list_add_tail(&eloop.timeout, &timeout->list);
812
813 return 0;
814
815 overflow:
816 /*
817 * Integer overflow - assume long enough timeout to be assumed
818 * to be infinite, i.e., the timeout would never happen.
819 */
820 wpa_printf(MSG_DEBUG,
821 "ELOOP: Too long timeout (secs=%u usecs=%u) to ever happen - ignore it",
822 secs,usecs);
823 os_free(timeout);
824 return 0;
825 }
826
827
eloop_remove_timeout(struct eloop_timeout * timeout)828 static void eloop_remove_timeout(struct eloop_timeout *timeout)
829 {
830 dl_list_del(&timeout->list);
831 wpa_trace_remove_ref(timeout, eloop, timeout->eloop_data);
832 wpa_trace_remove_ref(timeout, user, timeout->user_data);
833 os_free(timeout);
834 }
835
836
eloop_cancel_timeout(eloop_timeout_handler handler,void * eloop_data,void * user_data)837 int eloop_cancel_timeout(eloop_timeout_handler handler,
838 void *eloop_data, void *user_data)
839 {
840 struct eloop_timeout *timeout, *prev;
841 int removed = 0;
842
843 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
844 struct eloop_timeout, list) {
845 if (timeout->handler == handler &&
846 (timeout->eloop_data == eloop_data ||
847 eloop_data == ELOOP_ALL_CTX) &&
848 (timeout->user_data == user_data ||
849 user_data == ELOOP_ALL_CTX)) {
850 eloop_remove_timeout(timeout);
851 removed++;
852 }
853 }
854
855 return removed;
856 }
857
858
eloop_cancel_timeout_one(eloop_timeout_handler handler,void * eloop_data,void * user_data,struct os_reltime * remaining)859 int eloop_cancel_timeout_one(eloop_timeout_handler handler,
860 void *eloop_data, void *user_data,
861 struct os_reltime *remaining)
862 {
863 struct eloop_timeout *timeout, *prev;
864 int removed = 0;
865 struct os_reltime now;
866
867 os_get_reltime(&now);
868 remaining->sec = remaining->usec = 0;
869
870 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
871 struct eloop_timeout, list) {
872 if (timeout->handler == handler &&
873 (timeout->eloop_data == eloop_data) &&
874 (timeout->user_data == user_data)) {
875 removed = 1;
876 if (os_reltime_before(&now, &timeout->time))
877 os_reltime_sub(&timeout->time, &now, remaining);
878 eloop_remove_timeout(timeout);
879 break;
880 }
881 }
882 return removed;
883 }
884
885
eloop_is_timeout_registered(eloop_timeout_handler handler,void * eloop_data,void * user_data)886 int eloop_is_timeout_registered(eloop_timeout_handler handler,
887 void *eloop_data, void *user_data)
888 {
889 struct eloop_timeout *tmp;
890
891 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
892 if (tmp->handler == handler &&
893 tmp->eloop_data == eloop_data &&
894 tmp->user_data == user_data)
895 return 1;
896 }
897
898 return 0;
899 }
900
901
eloop_deplete_timeout(unsigned int req_secs,unsigned int req_usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)902 int eloop_deplete_timeout(unsigned int req_secs, unsigned int req_usecs,
903 eloop_timeout_handler handler, void *eloop_data,
904 void *user_data)
905 {
906 struct os_reltime now, requested, remaining;
907 struct eloop_timeout *tmp;
908
909 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
910 if (tmp->handler == handler &&
911 tmp->eloop_data == eloop_data &&
912 tmp->user_data == user_data) {
913 requested.sec = req_secs;
914 requested.usec = req_usecs;
915 os_get_reltime(&now);
916 os_reltime_sub(&tmp->time, &now, &remaining);
917 if (os_reltime_before(&requested, &remaining)) {
918 eloop_cancel_timeout(handler, eloop_data,
919 user_data);
920 eloop_register_timeout(requested.sec,
921 requested.usec,
922 handler, eloop_data,
923 user_data);
924 return 1;
925 }
926 return 0;
927 }
928 }
929
930 return -1;
931 }
932
933
eloop_replenish_timeout(unsigned int req_secs,unsigned int req_usecs,eloop_timeout_handler handler,void * eloop_data,void * user_data)934 int eloop_replenish_timeout(unsigned int req_secs, unsigned int req_usecs,
935 eloop_timeout_handler handler, void *eloop_data,
936 void *user_data)
937 {
938 struct os_reltime now, requested, remaining;
939 struct eloop_timeout *tmp;
940
941 dl_list_for_each(tmp, &eloop.timeout, struct eloop_timeout, list) {
942 if (tmp->handler == handler &&
943 tmp->eloop_data == eloop_data &&
944 tmp->user_data == user_data) {
945 requested.sec = req_secs;
946 requested.usec = req_usecs;
947 os_get_reltime(&now);
948 os_reltime_sub(&tmp->time, &now, &remaining);
949 if (os_reltime_before(&remaining, &requested)) {
950 eloop_cancel_timeout(handler, eloop_data,
951 user_data);
952 eloop_register_timeout(requested.sec,
953 requested.usec,
954 handler, eloop_data,
955 user_data);
956 return 1;
957 }
958 return 0;
959 }
960 }
961
962 return -1;
963 }
964
965
966 #if !(defined(CONFIG_NATIVE_WINDOWS) || defined(__ZEPHYR__))
eloop_handle_alarm(int sig)967 static void eloop_handle_alarm(int sig)
968 {
969 wpa_printf(MSG_ERROR, "eloop: could not process SIGINT or SIGTERM in "
970 "two seconds. Looks like there\n"
971 "is a bug that ends up in a busy loop that "
972 "prevents clean shutdown.\n"
973 "Killing program forcefully.\n");
974 exit(1);
975 }
976 #endif /* CONFIG_NATIVE_WINDOWS */
977
978
eloop_handle_signal(int sig)979 static void eloop_handle_signal(int sig)
980 {
981 size_t i;
982
983 #if !(defined(CONFIG_NATIVE_WINDOWS) || defined(__ZEPHYR__))
984 if ((sig == SIGINT || sig == SIGTERM) && !eloop.pending_terminate) {
985 /* Use SIGALRM to break out from potential busy loops that
986 * would not allow the program to be killed. */
987 eloop.pending_terminate = 1;
988 signal(SIGALRM, eloop_handle_alarm);
989 alarm(2);
990 }
991 #endif /* CONFIG_NATIVE_WINDOWS */
992
993 eloop.signaled++;
994 for (i = 0; i < eloop.signal_count; i++) {
995 if (eloop.signals[i].sig == sig) {
996 eloop.signals[i].signaled++;
997 break;
998 }
999 }
1000 }
1001
1002
eloop_process_pending_signals(void)1003 static void eloop_process_pending_signals(void)
1004 {
1005 size_t i;
1006
1007 if (eloop.signaled == 0)
1008 return;
1009 eloop.signaled = 0;
1010
1011 if (eloop.pending_terminate) {
1012 #if !(defined(CONFIG_NATIVE_WINDOWS) || defined(__ZEPHYR__))
1013 alarm(0);
1014 #endif /* CONFIG_NATIVE_WINDOWS */
1015 eloop.pending_terminate = 0;
1016 }
1017
1018 for (i = 0; i < eloop.signal_count; i++) {
1019 if (eloop.signals[i].signaled) {
1020 eloop.signals[i].signaled = 0;
1021 eloop.signals[i].handler(eloop.signals[i].sig,
1022 eloop.signals[i].user_data);
1023 }
1024 }
1025 }
1026
1027
eloop_register_signal(int sig,eloop_signal_handler handler,void * user_data)1028 int eloop_register_signal(int sig, eloop_signal_handler handler,
1029 void *user_data)
1030 {
1031 struct eloop_signal *tmp;
1032
1033 tmp = os_realloc_array(eloop.signals, eloop.signal_count + 1,
1034 sizeof(struct eloop_signal));
1035 if (tmp == NULL)
1036 return -1;
1037
1038 tmp[eloop.signal_count].sig = sig;
1039 tmp[eloop.signal_count].user_data = user_data;
1040 tmp[eloop.signal_count].handler = handler;
1041 tmp[eloop.signal_count].signaled = 0;
1042 eloop.signal_count++;
1043 eloop.signals = tmp;
1044 signal(sig, eloop_handle_signal);
1045
1046 return 0;
1047 }
1048
1049
eloop_register_signal_terminate(eloop_signal_handler handler,void * user_data)1050 int eloop_register_signal_terminate(eloop_signal_handler handler,
1051 void *user_data)
1052 {
1053 int ret = eloop_register_signal(SIGINT, handler, user_data);
1054 if (ret == 0)
1055 ret = eloop_register_signal(SIGTERM, handler, user_data);
1056 return ret;
1057 }
1058
1059
eloop_register_signal_reconfig(eloop_signal_handler handler,void * user_data)1060 int eloop_register_signal_reconfig(eloop_signal_handler handler,
1061 void *user_data)
1062 {
1063 #ifdef CONFIG_NATIVE_WINDOWS
1064 return 0;
1065 #else /* CONFIG_NATIVE_WINDOWS */
1066 return eloop_register_signal(SIGHUP, handler, user_data);
1067 #endif /* CONFIG_NATIVE_WINDOWS */
1068 }
1069
1070
eloop_run(void)1071 void eloop_run(void)
1072 {
1073 #ifdef CONFIG_ELOOP_POLL
1074 int num_poll_fds;
1075 int timeout_ms = 0;
1076 #endif /* CONFIG_ELOOP_POLL */
1077 #ifdef CONFIG_ELOOP_SELECT
1078 fd_set *rfds, *wfds, *efds;
1079 struct timeval _tv;
1080 #endif /* CONFIG_ELOOP_SELECT */
1081 #ifdef CONFIG_ELOOP_EPOLL
1082 int timeout_ms = -1;
1083 #endif /* CONFIG_ELOOP_EPOLL */
1084 #ifdef CONFIG_ELOOP_KQUEUE
1085 struct timespec ts;
1086 #endif /* CONFIG_ELOOP_KQUEUE */
1087 int res;
1088 struct os_reltime tv, now;
1089
1090 #ifdef CONFIG_ELOOP_SELECT
1091 rfds = os_malloc(sizeof(*rfds));
1092 wfds = os_malloc(sizeof(*wfds));
1093 efds = os_malloc(sizeof(*efds));
1094 if (rfds == NULL || wfds == NULL || efds == NULL)
1095 goto out;
1096 #endif /* CONFIG_ELOOP_SELECT */
1097
1098 while (!eloop.terminate &&
1099 (!dl_list_empty(&eloop.timeout) || eloop.readers.count > 0 ||
1100 eloop.writers.count > 0 || eloop.exceptions.count > 0)) {
1101 struct eloop_timeout *timeout;
1102
1103 if (eloop.pending_terminate) {
1104 /*
1105 * This may happen in some corner cases where a signal
1106 * is received during a blocking operation. We need to
1107 * process the pending signals and exit if requested to
1108 * avoid hitting the SIGALRM limit if the blocking
1109 * operation took more than two seconds.
1110 */
1111 eloop_process_pending_signals();
1112 if (eloop.terminate)
1113 break;
1114 }
1115
1116 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1117 list);
1118 if (timeout) {
1119 os_get_reltime(&now);
1120 if (os_reltime_before(&now, &timeout->time))
1121 os_reltime_sub(&timeout->time, &now, &tv);
1122 else
1123 tv.sec = tv.usec = 0;
1124 #if defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL)
1125 timeout_ms = tv.sec * 1000 + tv.usec / 1000;
1126 #endif /* defined(CONFIG_ELOOP_POLL) || defined(CONFIG_ELOOP_EPOLL) */
1127 #ifdef CONFIG_ELOOP_SELECT
1128 _tv.tv_sec = tv.sec;
1129 _tv.tv_usec = tv.usec;
1130 #endif /* CONFIG_ELOOP_SELECT */
1131 #ifdef CONFIG_ELOOP_KQUEUE
1132 ts.tv_sec = tv.sec;
1133 ts.tv_nsec = tv.usec * 1000L;
1134 #endif /* CONFIG_ELOOP_KQUEUE */
1135 }
1136
1137 #ifdef CONFIG_ELOOP_POLL
1138 num_poll_fds = eloop_sock_table_set_fds(
1139 &eloop.readers, &eloop.writers, &eloop.exceptions,
1140 eloop.pollfds, eloop.pollfds_map,
1141 eloop.max_pollfd_map);
1142 res = poll(eloop.pollfds, num_poll_fds,
1143 timeout ? timeout_ms : -1);
1144 #endif /* CONFIG_ELOOP_POLL */
1145 #ifdef CONFIG_ELOOP_SELECT
1146 eloop_sock_table_set_fds(&eloop.readers, rfds);
1147 eloop_sock_table_set_fds(&eloop.writers, wfds);
1148 eloop_sock_table_set_fds(&eloop.exceptions, efds);
1149 res = select(eloop.max_sock + 1, rfds, wfds, efds,
1150 timeout ? &_tv : NULL);
1151 #endif /* CONFIG_ELOOP_SELECT */
1152 #ifdef CONFIG_ELOOP_EPOLL
1153 if (eloop.count == 0) {
1154 res = 0;
1155 } else {
1156 res = epoll_wait(eloop.epollfd, eloop.epoll_events,
1157 eloop.count, timeout_ms);
1158 }
1159 #endif /* CONFIG_ELOOP_EPOLL */
1160 #ifdef CONFIG_ELOOP_KQUEUE
1161 if (eloop.count == 0) {
1162 res = 0;
1163 } else {
1164 res = kevent(eloop.kqueuefd, NULL, 0,
1165 eloop.kqueue_events, eloop.kqueue_nevents,
1166 timeout ? &ts : NULL);
1167 }
1168 #endif /* CONFIG_ELOOP_KQUEUE */
1169 if (res < 0 && errno != EINTR && errno != 0) {
1170 wpa_printf(MSG_ERROR, "eloop: %s: %s",
1171 #ifdef CONFIG_ELOOP_POLL
1172 "poll"
1173 #endif /* CONFIG_ELOOP_POLL */
1174 #ifdef CONFIG_ELOOP_SELECT
1175 "select"
1176 #endif /* CONFIG_ELOOP_SELECT */
1177 #ifdef CONFIG_ELOOP_EPOLL
1178 "epoll"
1179 #endif /* CONFIG_ELOOP_EPOLL */
1180 #ifdef CONFIG_ELOOP_KQUEUE
1181 "kqueue"
1182 #endif /* CONFIG_ELOOP_EKQUEUE */
1183
1184 , strerror(errno));
1185 goto out;
1186 }
1187
1188 eloop.readers.changed = 0;
1189 eloop.writers.changed = 0;
1190 eloop.exceptions.changed = 0;
1191
1192 eloop_process_pending_signals();
1193
1194
1195 /* check if some registered timeouts have occurred */
1196 timeout = dl_list_first(&eloop.timeout, struct eloop_timeout,
1197 list);
1198 if (timeout) {
1199 os_get_reltime(&now);
1200 if (!os_reltime_before(&now, &timeout->time)) {
1201 void *eloop_data = timeout->eloop_data;
1202 void *user_data = timeout->user_data;
1203 eloop_timeout_handler handler =
1204 timeout->handler;
1205 eloop_remove_timeout(timeout);
1206 handler(eloop_data, user_data);
1207 }
1208
1209 }
1210
1211 if (res <= 0)
1212 continue;
1213
1214 if (eloop.readers.changed ||
1215 eloop.writers.changed ||
1216 eloop.exceptions.changed) {
1217 /*
1218 * Sockets may have been closed and reopened with the
1219 * same FD in the signal or timeout handlers, so we
1220 * must skip the previous results and check again
1221 * whether any of the currently registered sockets have
1222 * events.
1223 */
1224 continue;
1225 }
1226
1227 #ifdef CONFIG_ELOOP_POLL
1228 eloop_sock_table_dispatch(&eloop.readers, &eloop.writers,
1229 &eloop.exceptions, eloop.pollfds_map,
1230 eloop.max_pollfd_map);
1231 #endif /* CONFIG_ELOOP_POLL */
1232 #ifdef CONFIG_ELOOP_SELECT
1233 eloop_sock_table_dispatch(&eloop.readers, rfds);
1234 eloop_sock_table_dispatch(&eloop.writers, wfds);
1235 eloop_sock_table_dispatch(&eloop.exceptions, efds);
1236 #endif /* CONFIG_ELOOP_SELECT */
1237 #ifdef CONFIG_ELOOP_EPOLL
1238 eloop_sock_table_dispatch(eloop.epoll_events, res);
1239 #endif /* CONFIG_ELOOP_EPOLL */
1240 #ifdef CONFIG_ELOOP_KQUEUE
1241 eloop_sock_table_dispatch(eloop.kqueue_events, res);
1242 #endif /* CONFIG_ELOOP_KQUEUE */
1243 }
1244
1245 eloop.terminate = 0;
1246 out:
1247 #ifdef CONFIG_ELOOP_SELECT
1248 os_free(rfds);
1249 os_free(wfds);
1250 os_free(efds);
1251 #endif /* CONFIG_ELOOP_SELECT */
1252 return;
1253 }
1254
1255
eloop_terminate(void)1256 void eloop_terminate(void)
1257 {
1258 eloop.terminate = 1;
1259 }
1260
1261
eloop_destroy(void)1262 void eloop_destroy(void)
1263 {
1264 struct eloop_timeout *timeout, *prev;
1265 struct os_reltime now;
1266
1267 os_get_reltime(&now);
1268 dl_list_for_each_safe(timeout, prev, &eloop.timeout,
1269 struct eloop_timeout, list) {
1270 int sec, usec;
1271 sec = timeout->time.sec - now.sec;
1272 usec = timeout->time.usec - now.usec;
1273 if (timeout->time.usec < now.usec) {
1274 sec--;
1275 usec += 1000000;
1276 }
1277 wpa_printf(MSG_INFO, "ELOOP: remaining timeout: %d.%06d "
1278 "eloop_data=%p user_data=%p handler=%p",
1279 sec, usec, timeout->eloop_data, timeout->user_data,
1280 timeout->handler);
1281 wpa_trace_dump_funcname("eloop unregistered timeout handler",
1282 timeout->handler);
1283 wpa_trace_dump("eloop timeout", timeout);
1284 eloop_remove_timeout(timeout);
1285 }
1286 eloop_sock_table_destroy(&eloop.readers);
1287 eloop_sock_table_destroy(&eloop.writers);
1288 eloop_sock_table_destroy(&eloop.exceptions);
1289 os_free(eloop.signals);
1290
1291 #ifdef CONFIG_ELOOP_POLL
1292 os_free(eloop.pollfds);
1293 os_free(eloop.pollfds_map);
1294 #endif /* CONFIG_ELOOP_POLL */
1295 #if defined(CONFIG_ELOOP_EPOLL) || defined(CONFIG_ELOOP_KQUEUE)
1296 os_free(eloop.fd_table);
1297 #endif /* CONFIG_ELOOP_EPOLL || CONFIG_ELOOP_KQUEUE */
1298 #ifdef CONFIG_ELOOP_EPOLL
1299 os_free(eloop.epoll_events);
1300 close(eloop.epollfd);
1301 #endif /* CONFIG_ELOOP_EPOLL */
1302 #ifdef CONFIG_ELOOP_KQUEUE
1303 os_free(eloop.kqueue_events);
1304 close(eloop.kqueuefd);
1305 #endif /* CONFIG_ELOOP_KQUEUE */
1306 }
1307
1308
eloop_terminated(void)1309 int eloop_terminated(void)
1310 {
1311 return eloop.terminate || eloop.pending_terminate;
1312 }
1313
1314
eloop_wait_for_read_sock(int sock)1315 void eloop_wait_for_read_sock(int sock)
1316 {
1317 #ifdef CONFIG_ELOOP_POLL
1318 struct pollfd pfd;
1319
1320 if (sock < 0)
1321 return;
1322
1323 os_memset(&pfd, 0, sizeof(pfd));
1324 pfd.fd = sock;
1325 pfd.events = POLLIN;
1326
1327 poll(&pfd, 1, -1);
1328 #endif /* CONFIG_ELOOP_POLL */
1329 #if defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL)
1330 /*
1331 * We can use epoll() here. But epoll() requres 4 system calls.
1332 * epoll_create1(), epoll_ctl() for ADD, epoll_wait, and close() for
1333 * epoll fd. So select() is better for performance here.
1334 */
1335 fd_set rfds;
1336
1337 if (sock < 0)
1338 return;
1339
1340 FD_ZERO(&rfds);
1341 FD_SET(sock, &rfds);
1342 select(sock + 1, &rfds, NULL, NULL, NULL);
1343 #endif /* defined(CONFIG_ELOOP_SELECT) || defined(CONFIG_ELOOP_EPOLL) */
1344 #ifdef CONFIG_ELOOP_KQUEUE
1345 int kfd;
1346 struct kevent ke1, ke2;
1347
1348 kfd = kqueue();
1349 if (kfd == -1)
1350 return;
1351 EV_SET(&ke1, sock, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0);
1352 kevent(kfd, &ke1, 1, &ke2, 1, NULL);
1353 close(kfd);
1354 #endif /* CONFIG_ELOOP_KQUEUE */
1355 }
1356
1357 #ifdef CONFIG_ELOOP_SELECT
1358 #undef CONFIG_ELOOP_SELECT
1359 #endif /* CONFIG_ELOOP_SELECT */
1360