uloop: add support for integrating with a different event loop
[project/libubox.git] / uloop.c
1 /*
2 * uloop - event loop implementation
3 *
4 * Copyright (C) 2010-2016 Felix Fietkau <nbd@openwrt.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 #include <sys/time.h>
19 #include <sys/types.h>
20
21 #include <unistd.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <errno.h>
25 #include <poll.h>
26 #include <string.h>
27 #include <fcntl.h>
28 #include <stdbool.h>
29 #include <limits.h>
30
31 #include "uloop.h"
32 #include "utils.h"
33
34 #ifdef USE_KQUEUE
35 #include <sys/event.h>
36 #endif
37 #ifdef USE_EPOLL
38 #include <sys/epoll.h>
39 #endif
40 #include <sys/wait.h>
41
42 struct uloop_fd_event {
43 struct uloop_fd *fd;
44 unsigned int events;
45 };
46
47 struct uloop_fd_stack {
48 struct uloop_fd_stack *next;
49 struct uloop_fd *fd;
50 unsigned int events;
51 };
52
53 static struct uloop_fd_stack *fd_stack = NULL;
54
55 #define ULOOP_MAX_EVENTS 10
56
57 static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
58 static struct list_head processes = LIST_HEAD_INIT(processes);
59
60 static int poll_fd = -1;
61 bool uloop_cancelled = false;
62 bool uloop_handle_sigchld = true;
63 static int uloop_status = 0;
64 static bool do_sigchld = false;
65
66 static struct uloop_fd_event cur_fds[ULOOP_MAX_EVENTS];
67 static int cur_fd, cur_nfds;
68 static int uloop_run_depth = 0;
69
70 uloop_fd_handler uloop_fd_set_cb = NULL;
71
72 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags);
73
74 #ifdef USE_KQUEUE
75 #include "uloop-kqueue.c"
76 #endif
77
78 #ifdef USE_EPOLL
79 #include "uloop-epoll.c"
80 #endif
81
82 static void waker_consume(struct uloop_fd *fd, unsigned int events)
83 {
84 char buf[4];
85
86 while (read(fd->fd, buf, 4) > 0)
87 ;
88 }
89
90 static int waker_pipe = -1;
91 static struct uloop_fd waker_fd = {
92 .fd = -1,
93 .cb = waker_consume,
94 };
95
96 static void waker_init_fd(int fd)
97 {
98 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
99 fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | O_NONBLOCK);
100 }
101
102 static int waker_init(void)
103 {
104 int fds[2];
105
106 if (waker_pipe >= 0)
107 return 0;
108
109 if (pipe(fds) < 0)
110 return -1;
111
112 waker_init_fd(fds[0]);
113 waker_init_fd(fds[1]);
114 waker_pipe = fds[1];
115
116 waker_fd.fd = fds[0];
117 waker_fd.cb = waker_consume;
118 uloop_fd_add(&waker_fd, ULOOP_READ);
119
120 return 0;
121 }
122
123 static void uloop_setup_signals(bool add);
124
125 int uloop_init(void)
126 {
127 if (uloop_init_pollfd() < 0)
128 return -1;
129
130 if (waker_init() < 0) {
131 uloop_done();
132 return -1;
133 }
134
135 uloop_setup_signals(true);
136
137 return 0;
138 }
139
140 static bool uloop_fd_stack_event(struct uloop_fd *fd, int events)
141 {
142 struct uloop_fd_stack *cur;
143
144 /*
145 * Do not buffer events for level-triggered fds, they will keep firing.
146 * Caller needs to take care of recursion issues.
147 */
148 if (!(fd->flags & ULOOP_EDGE_TRIGGER))
149 return false;
150
151 for (cur = fd_stack; cur; cur = cur->next) {
152 if (cur->fd != fd)
153 continue;
154
155 if (events < 0)
156 cur->fd = NULL;
157 else
158 cur->events |= events | ULOOP_EVENT_BUFFERED;
159
160 return true;
161 }
162
163 return false;
164 }
165
166 static void uloop_run_events(int64_t timeout)
167 {
168 struct uloop_fd_event *cur;
169 struct uloop_fd *fd;
170
171 if (!cur_nfds) {
172 cur_fd = 0;
173 cur_nfds = uloop_fetch_events(timeout);
174 if (cur_nfds < 0)
175 cur_nfds = 0;
176 }
177
178 while (cur_nfds > 0) {
179 struct uloop_fd_stack stack_cur;
180 unsigned int events;
181
182 cur = &cur_fds[cur_fd++];
183 cur_nfds--;
184
185 fd = cur->fd;
186 events = cur->events;
187 if (!fd)
188 continue;
189
190 if (!fd->cb)
191 continue;
192
193 if (uloop_fd_stack_event(fd, cur->events))
194 continue;
195
196 stack_cur.next = fd_stack;
197 stack_cur.fd = fd;
198 fd_stack = &stack_cur;
199 do {
200 stack_cur.events = 0;
201 fd->cb(fd, events);
202 events = stack_cur.events & ULOOP_EVENT_MASK;
203 } while (stack_cur.fd && events);
204 fd_stack = stack_cur.next;
205
206 return;
207 }
208 }
209
210 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
211 {
212 unsigned int fl;
213 int ret;
214
215 if (!(flags & (ULOOP_READ | ULOOP_WRITE)))
216 return uloop_fd_delete(sock);
217
218 if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
219 fl = fcntl(sock->fd, F_GETFL, 0);
220 fl |= O_NONBLOCK;
221 fcntl(sock->fd, F_SETFL, fl);
222 }
223
224 ret = register_poll(sock, flags);
225 if (ret < 0)
226 goto out;
227
228 if (uloop_fd_set_cb)
229 uloop_fd_set_cb(sock, flags);
230
231 sock->flags = flags;
232 sock->registered = true;
233 sock->eof = false;
234 sock->error = false;
235
236 out:
237 return ret;
238 }
239
240 int uloop_fd_delete(struct uloop_fd *fd)
241 {
242 int i;
243
244 for (i = 0; i < cur_nfds; i++) {
245 if (cur_fds[cur_fd + i].fd != fd)
246 continue;
247
248 cur_fds[cur_fd + i].fd = NULL;
249 }
250
251 if (!fd->registered)
252 return 0;
253
254 if (uloop_fd_set_cb)
255 uloop_fd_set_cb(fd, 0);
256
257 fd->registered = false;
258 fd->flags = 0;
259 uloop_fd_stack_event(fd, -1);
260 return __uloop_fd_delete(fd);
261 }
262
263 static int64_t tv_diff(struct timeval *t1, struct timeval *t2)
264 {
265 return
266 (t1->tv_sec - t2->tv_sec) * 1000 +
267 (t1->tv_usec - t2->tv_usec) / 1000;
268 }
269
270 int uloop_timeout_add(struct uloop_timeout *timeout)
271 {
272 struct uloop_timeout *tmp;
273 struct list_head *h = &timeouts;
274
275 if (timeout->pending)
276 return -1;
277
278 list_for_each_entry(tmp, &timeouts, list) {
279 if (tv_diff(&tmp->time, &timeout->time) > 0) {
280 h = &tmp->list;
281 break;
282 }
283 }
284
285 list_add_tail(&timeout->list, h);
286 timeout->pending = true;
287
288 return 0;
289 }
290
291 static void uloop_gettime(struct timeval *tv)
292 {
293 struct timespec ts;
294
295 clock_gettime(CLOCK_MONOTONIC, &ts);
296 tv->tv_sec = ts.tv_sec;
297 tv->tv_usec = ts.tv_nsec / 1000;
298 }
299
300 int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
301 {
302 struct timeval *time = &timeout->time;
303
304 if (timeout->pending)
305 uloop_timeout_cancel(timeout);
306
307 uloop_gettime(time);
308
309 time->tv_sec += msecs / 1000;
310 time->tv_usec += (msecs % 1000) * 1000;
311
312 if (time->tv_usec > 1000000) {
313 time->tv_sec++;
314 time->tv_usec -= 1000000;
315 }
316
317 return uloop_timeout_add(timeout);
318 }
319
320 int uloop_timeout_cancel(struct uloop_timeout *timeout)
321 {
322 if (!timeout->pending)
323 return -1;
324
325 list_del(&timeout->list);
326 timeout->pending = false;
327
328 return 0;
329 }
330
331 int uloop_timeout_remaining(struct uloop_timeout *timeout)
332 {
333 int64_t td;
334 struct timeval now;
335
336 if (!timeout->pending)
337 return -1;
338
339 uloop_gettime(&now);
340
341 td = tv_diff(&timeout->time, &now);
342
343 if (td > INT_MAX)
344 return INT_MAX;
345 else if (td < INT_MIN)
346 return INT_MIN;
347 else
348 return (int)td;
349 }
350
351 int64_t uloop_timeout_remaining64(struct uloop_timeout *timeout)
352 {
353 struct timeval now;
354
355 if (!timeout->pending)
356 return -1;
357
358 uloop_gettime(&now);
359
360 return tv_diff(&timeout->time, &now);
361 }
362
363 int uloop_process_add(struct uloop_process *p)
364 {
365 struct uloop_process *tmp;
366 struct list_head *h = &processes;
367
368 if (p->pending)
369 return -1;
370
371 list_for_each_entry(tmp, &processes, list) {
372 if (tmp->pid > p->pid) {
373 h = &tmp->list;
374 break;
375 }
376 }
377
378 list_add_tail(&p->list, h);
379 p->pending = true;
380
381 return 0;
382 }
383
384 int uloop_process_delete(struct uloop_process *p)
385 {
386 if (!p->pending)
387 return -1;
388
389 list_del(&p->list);
390 p->pending = false;
391
392 return 0;
393 }
394
395 static void uloop_handle_processes(void)
396 {
397 struct uloop_process *p, *tmp;
398 pid_t pid;
399 int ret;
400
401 do_sigchld = false;
402
403 while (1) {
404 pid = waitpid(-1, &ret, WNOHANG);
405 if (pid < 0 && errno == EINTR)
406 continue;
407
408 if (pid <= 0)
409 return;
410
411 list_for_each_entry_safe(p, tmp, &processes, list) {
412 if (p->pid < pid)
413 continue;
414
415 if (p->pid > pid)
416 break;
417
418 uloop_process_delete(p);
419 p->cb(p, ret);
420 }
421 }
422
423 }
424
425 static void uloop_signal_wake(void)
426 {
427 do {
428 if (write(waker_pipe, "w", 1) < 0) {
429 if (errno == EINTR)
430 continue;
431 }
432 break;
433 } while (1);
434 }
435
436 static void uloop_handle_sigint(int signo)
437 {
438 uloop_status = signo;
439 uloop_cancelled = true;
440 uloop_signal_wake();
441 }
442
443 static void uloop_sigchld(int signo)
444 {
445 do_sigchld = true;
446 uloop_signal_wake();
447 }
448
449 static void uloop_install_handler(int signum, void (*handler)(int), struct sigaction* old, bool add)
450 {
451 struct sigaction s;
452 struct sigaction *act;
453
454 act = NULL;
455 sigaction(signum, NULL, &s);
456
457 if (add) {
458 if (s.sa_handler == SIG_DFL) { /* Do not override existing custom signal handlers */
459 memcpy(old, &s, sizeof(struct sigaction));
460 s.sa_handler = handler;
461 s.sa_flags = 0;
462 act = &s;
463 }
464 }
465 else if (s.sa_handler == handler) { /* Do not restore if someone modified our handler */
466 act = old;
467 }
468
469 if (act != NULL)
470 sigaction(signum, act, NULL);
471 }
472
473 static void uloop_ignore_signal(int signum, bool ignore)
474 {
475 struct sigaction s;
476 void *new_handler = NULL;
477
478 sigaction(signum, NULL, &s);
479
480 if (ignore) {
481 if (s.sa_handler == SIG_DFL) /* Ignore only if there isn't any custom handler */
482 new_handler = SIG_IGN;
483 } else {
484 if (s.sa_handler == SIG_IGN) /* Restore only if noone modified our SIG_IGN */
485 new_handler = SIG_DFL;
486 }
487
488 if (new_handler) {
489 s.sa_handler = new_handler;
490 s.sa_flags = 0;
491 sigaction(signum, &s, NULL);
492 }
493 }
494
495 static void uloop_setup_signals(bool add)
496 {
497 static struct sigaction old_sigint, old_sigchld, old_sigterm;
498
499 uloop_install_handler(SIGINT, uloop_handle_sigint, &old_sigint, add);
500 uloop_install_handler(SIGTERM, uloop_handle_sigint, &old_sigterm, add);
501
502 if (uloop_handle_sigchld)
503 uloop_install_handler(SIGCHLD, uloop_sigchld, &old_sigchld, add);
504
505 uloop_ignore_signal(SIGPIPE, add);
506 }
507
508 int uloop_get_next_timeout(void)
509 {
510 struct uloop_timeout *timeout;
511 struct timeval tv;
512 int64_t diff;
513
514 if (list_empty(&timeouts))
515 return -1;
516
517 uloop_gettime(&tv);
518
519 timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
520 diff = tv_diff(&timeout->time, &tv);
521 if (diff < 0)
522 return 0;
523 if (diff > INT_MAX)
524 return INT_MAX;
525
526 return diff;
527 }
528
529 static void uloop_process_timeouts(void)
530 {
531 struct uloop_timeout *t;
532 struct timeval tv;
533
534 if (list_empty(&timeouts))
535 return;
536
537 uloop_gettime(&tv);
538 while (!list_empty(&timeouts)) {
539 t = list_first_entry(&timeouts, struct uloop_timeout, list);
540
541 if (tv_diff(&t->time, &tv) > 0)
542 break;
543
544 uloop_timeout_cancel(t);
545 if (t->cb)
546 t->cb(t);
547 }
548 }
549
550 static void uloop_clear_timeouts(void)
551 {
552 struct uloop_timeout *t, *tmp;
553
554 list_for_each_entry_safe(t, tmp, &timeouts, list)
555 uloop_timeout_cancel(t);
556 }
557
558 static void uloop_clear_processes(void)
559 {
560 struct uloop_process *p, *tmp;
561
562 list_for_each_entry_safe(p, tmp, &processes, list)
563 uloop_process_delete(p);
564 }
565
566 bool uloop_cancelling(void)
567 {
568 return uloop_run_depth > 0 && uloop_cancelled;
569 }
570
571 int uloop_run_timeout(int timeout)
572 {
573 int next_time = 0;
574
575 uloop_run_depth++;
576
577 uloop_status = 0;
578 uloop_cancelled = false;
579 do {
580 uloop_process_timeouts();
581
582 if (do_sigchld)
583 uloop_handle_processes();
584
585 if (uloop_cancelled)
586 break;
587
588 next_time = uloop_get_next_timeout();
589 if (timeout >= 0 && (next_time < 0 || timeout < next_time))
590 next_time = timeout;
591 uloop_run_events(next_time);
592 } while (!uloop_cancelled && timeout < 0);
593
594 --uloop_run_depth;
595
596 return uloop_status;
597 }
598
599 void uloop_done(void)
600 {
601 uloop_setup_signals(false);
602
603 if (poll_fd >= 0) {
604 close(poll_fd);
605 poll_fd = -1;
606 }
607
608 if (waker_pipe >= 0) {
609 uloop_fd_delete(&waker_fd);
610 close(waker_pipe);
611 close(waker_fd.fd);
612 waker_pipe = -1;
613 }
614
615 uloop_clear_timeouts();
616 uloop_clear_processes();
617 }