ebd78398fe1f054e0376dab833cea07ef14d336a
[project/libubox.git] / uloop.c
1 /*
2 * uloop - event loop implementation
3 *
4 * Copyright (C) 2010-2016 Felix Fietkau <nbd@openwrt.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 #include <sys/time.h>
19 #include <sys/types.h>
20
21 #include <unistd.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <errno.h>
25 #include <poll.h>
26 #include <string.h>
27 #include <fcntl.h>
28 #include <stdbool.h>
29 #include <limits.h>
30
31 #include "uloop.h"
32 #include "utils.h"
33
34 #ifdef USE_KQUEUE
35 #include <sys/event.h>
36 #endif
37 #ifdef USE_EPOLL
38 #include <sys/epoll.h>
39 #include <sys/timerfd.h>
40 #endif
41 #include <sys/wait.h>
42
43 struct uloop_fd_event {
44 struct uloop_fd *fd;
45 unsigned int events;
46 };
47
48 struct uloop_fd_stack {
49 struct uloop_fd_stack *next;
50 struct uloop_fd *fd;
51 unsigned int events;
52 };
53
54 static struct uloop_fd_stack *fd_stack = NULL;
55
56 #define ULOOP_MAX_EVENTS 10
57
58 static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
59 static struct list_head processes = LIST_HEAD_INIT(processes);
60 static struct list_head signals = LIST_HEAD_INIT(signals);
61
62 static int poll_fd = -1;
63 bool uloop_cancelled = false;
64 bool uloop_handle_sigchld = true;
65 static int uloop_status = 0;
66 static bool do_sigchld = false;
67
68 static struct uloop_fd_event cur_fds[ULOOP_MAX_EVENTS];
69 static int cur_fd, cur_nfds;
70 static int uloop_run_depth = 0;
71
72 uloop_fd_handler uloop_fd_set_cb = NULL;
73
74 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags);
75
76 #ifdef USE_KQUEUE
77 #include "uloop-kqueue.c"
78 #endif
79
80 #ifdef USE_EPOLL
81 #include "uloop-epoll.c"
82 #endif
83
84 static void set_signo(uint64_t *signums, int signo)
85 {
86 if (signo >= 1 && signo <= 64)
87 *signums |= (1u << (signo - 1));
88 }
89
90 static bool get_signo(uint64_t signums, int signo)
91 {
92 return (signo >= 1) && (signo <= 64) && (signums & (1u << (signo - 1)));
93 }
94
95 static void signal_consume(struct uloop_fd *fd, unsigned int events)
96 {
97 struct uloop_signal *usig, *usig_next;
98 uint64_t signums = 0;
99 uint8_t buf[32];
100 ssize_t nsigs;
101
102 do {
103 nsigs = read(fd->fd, buf, sizeof(buf));
104
105 for (ssize_t i = 0; i < nsigs; i++)
106 set_signo(&signums, buf[i]);
107 }
108 while (nsigs > 0);
109
110 list_for_each_entry_safe(usig, usig_next, &signals, list)
111 if (get_signo(signums, usig->signo))
112 usig->cb(usig);
113 }
114
115 static int waker_pipe = -1;
116 static struct uloop_fd waker_fd = {
117 .fd = -1,
118 .cb = signal_consume,
119 };
120
121 static void waker_init_fd(int fd)
122 {
123 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
124 fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | O_NONBLOCK);
125 }
126
127 static int waker_init(void)
128 {
129 int fds[2];
130
131 if (waker_pipe >= 0)
132 return 0;
133
134 if (pipe(fds) < 0)
135 return -1;
136
137 waker_init_fd(fds[0]);
138 waker_init_fd(fds[1]);
139 waker_pipe = fds[1];
140
141 waker_fd.fd = fds[0];
142 waker_fd.cb = signal_consume;
143 uloop_fd_add(&waker_fd, ULOOP_READ);
144
145 return 0;
146 }
147
148 static void uloop_setup_signals(bool add);
149
150 int uloop_init(void)
151 {
152 if (uloop_init_pollfd() < 0)
153 return -1;
154
155 if (waker_init() < 0) {
156 uloop_done();
157 return -1;
158 }
159
160 uloop_setup_signals(true);
161
162 return 0;
163 }
164
165 static bool uloop_fd_stack_event(struct uloop_fd *fd, int events)
166 {
167 struct uloop_fd_stack *cur;
168
169 /*
170 * Do not buffer events for level-triggered fds, they will keep firing.
171 * Caller needs to take care of recursion issues.
172 */
173 if (!(fd->flags & ULOOP_EDGE_TRIGGER))
174 return false;
175
176 for (cur = fd_stack; cur; cur = cur->next) {
177 if (cur->fd != fd)
178 continue;
179
180 if (events < 0)
181 cur->fd = NULL;
182 else
183 cur->events |= events | ULOOP_EVENT_BUFFERED;
184
185 return true;
186 }
187
188 return false;
189 }
190
191 static void uloop_run_events(int64_t timeout)
192 {
193 struct uloop_fd_event *cur;
194 struct uloop_fd *fd;
195
196 if (!cur_nfds) {
197 cur_fd = 0;
198 cur_nfds = uloop_fetch_events(timeout);
199 if (cur_nfds < 0)
200 cur_nfds = 0;
201 }
202
203 while (cur_nfds > 0) {
204 struct uloop_fd_stack stack_cur;
205 unsigned int events;
206
207 cur = &cur_fds[cur_fd++];
208 cur_nfds--;
209
210 fd = cur->fd;
211 events = cur->events;
212 if (!fd)
213 continue;
214
215 if (!fd->cb)
216 continue;
217
218 if (uloop_fd_stack_event(fd, cur->events))
219 continue;
220
221 stack_cur.next = fd_stack;
222 stack_cur.fd = fd;
223 fd_stack = &stack_cur;
224 do {
225 stack_cur.events = 0;
226 fd->cb(fd, events);
227 events = stack_cur.events & ULOOP_EVENT_MASK;
228 } while (stack_cur.fd && events);
229 fd_stack = stack_cur.next;
230
231 return;
232 }
233 }
234
235 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
236 {
237 unsigned int fl;
238 int ret;
239
240 if (!(flags & (ULOOP_READ | ULOOP_WRITE)))
241 return uloop_fd_delete(sock);
242
243 if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
244 fl = fcntl(sock->fd, F_GETFL, 0);
245 fl |= O_NONBLOCK;
246 fcntl(sock->fd, F_SETFL, fl);
247 }
248
249 ret = register_poll(sock, flags);
250 if (ret < 0)
251 goto out;
252
253 if (uloop_fd_set_cb)
254 uloop_fd_set_cb(sock, flags);
255
256 sock->flags = flags;
257 sock->registered = true;
258 sock->eof = false;
259 sock->error = false;
260
261 out:
262 return ret;
263 }
264
265 int uloop_fd_delete(struct uloop_fd *fd)
266 {
267 int i;
268
269 for (i = 0; i < cur_nfds; i++) {
270 if (cur_fds[cur_fd + i].fd != fd)
271 continue;
272
273 cur_fds[cur_fd + i].fd = NULL;
274 }
275
276 if (!fd->registered)
277 return 0;
278
279 if (uloop_fd_set_cb)
280 uloop_fd_set_cb(fd, 0);
281
282 fd->registered = false;
283 fd->flags = 0;
284 uloop_fd_stack_event(fd, -1);
285 return __uloop_fd_delete(fd);
286 }
287
288 static int64_t tv_diff(struct timeval *t1, struct timeval *t2)
289 {
290 return
291 (t1->tv_sec - t2->tv_sec) * 1000 +
292 (t1->tv_usec - t2->tv_usec) / 1000;
293 }
294
295 int uloop_timeout_add(struct uloop_timeout *timeout)
296 {
297 struct uloop_timeout *tmp;
298 struct list_head *h = &timeouts;
299
300 if (timeout->pending)
301 return -1;
302
303 list_for_each_entry(tmp, &timeouts, list) {
304 if (tv_diff(&tmp->time, &timeout->time) > 0) {
305 h = &tmp->list;
306 break;
307 }
308 }
309
310 list_add_tail(&timeout->list, h);
311 timeout->pending = true;
312
313 return 0;
314 }
315
316 static void uloop_gettime(struct timeval *tv)
317 {
318 struct timespec ts;
319
320 clock_gettime(CLOCK_MONOTONIC, &ts);
321 tv->tv_sec = ts.tv_sec;
322 tv->tv_usec = ts.tv_nsec / 1000;
323 }
324
325 int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
326 {
327 struct timeval *time = &timeout->time;
328
329 if (timeout->pending)
330 uloop_timeout_cancel(timeout);
331
332 uloop_gettime(time);
333
334 time->tv_sec += msecs / 1000;
335 time->tv_usec += (msecs % 1000) * 1000;
336
337 if (time->tv_usec > 1000000) {
338 time->tv_sec++;
339 time->tv_usec -= 1000000;
340 }
341
342 return uloop_timeout_add(timeout);
343 }
344
345 int uloop_timeout_cancel(struct uloop_timeout *timeout)
346 {
347 if (!timeout->pending)
348 return -1;
349
350 list_del(&timeout->list);
351 timeout->pending = false;
352
353 return 0;
354 }
355
356 int uloop_timeout_remaining(struct uloop_timeout *timeout)
357 {
358 int64_t td;
359 struct timeval now;
360
361 if (!timeout->pending)
362 return -1;
363
364 uloop_gettime(&now);
365
366 td = tv_diff(&timeout->time, &now);
367
368 if (td > INT_MAX)
369 return INT_MAX;
370 else if (td < INT_MIN)
371 return INT_MIN;
372 else
373 return (int)td;
374 }
375
376 int64_t uloop_timeout_remaining64(struct uloop_timeout *timeout)
377 {
378 struct timeval now;
379
380 if (!timeout->pending)
381 return -1;
382
383 uloop_gettime(&now);
384
385 return tv_diff(&timeout->time, &now);
386 }
387
388 int uloop_process_add(struct uloop_process *p)
389 {
390 struct uloop_process *tmp;
391 struct list_head *h = &processes;
392
393 if (p->pending)
394 return -1;
395
396 list_for_each_entry(tmp, &processes, list) {
397 if (tmp->pid > p->pid) {
398 h = &tmp->list;
399 break;
400 }
401 }
402
403 list_add_tail(&p->list, h);
404 p->pending = true;
405
406 return 0;
407 }
408
409 int uloop_process_delete(struct uloop_process *p)
410 {
411 if (!p->pending)
412 return -1;
413
414 list_del(&p->list);
415 p->pending = false;
416
417 return 0;
418 }
419
420 static void uloop_handle_processes(void)
421 {
422 struct uloop_process *p, *tmp;
423 pid_t pid;
424 int ret;
425
426 do_sigchld = false;
427
428 while (1) {
429 pid = waitpid(-1, &ret, WNOHANG);
430 if (pid < 0 && errno == EINTR)
431 continue;
432
433 if (pid <= 0)
434 return;
435
436 list_for_each_entry_safe(p, tmp, &processes, list) {
437 if (p->pid < pid)
438 continue;
439
440 if (p->pid > pid)
441 break;
442
443 uloop_process_delete(p);
444 p->cb(p, ret);
445 }
446 }
447
448 }
449
450 int uloop_interval_set(struct uloop_interval *timer, unsigned int msecs)
451 {
452 return timer_register(timer, msecs);
453 }
454
455 int uloop_interval_cancel(struct uloop_interval *timer)
456 {
457 return timer_remove(timer);
458 }
459
460 int64_t uloop_interval_remaining(struct uloop_interval *timer)
461 {
462 return timer_next(timer);
463 }
464
465 static void uloop_signal_wake(int signo)
466 {
467 uint8_t sigbyte = signo;
468
469 if (signo == SIGCHLD)
470 do_sigchld = true;
471
472 do {
473 if (write(waker_pipe, &sigbyte, 1) < 0) {
474 if (errno == EINTR)
475 continue;
476 }
477 break;
478 } while (1);
479 }
480
481 static void uloop_handle_sigint(int signo)
482 {
483 uloop_status = signo;
484 uloop_cancelled = true;
485 uloop_signal_wake(signo);
486 }
487
488 static void uloop_install_handler(int signum, void (*handler)(int), struct sigaction* old, bool add)
489 {
490 struct sigaction s;
491 struct sigaction *act;
492
493 act = NULL;
494 sigaction(signum, NULL, &s);
495
496 if (add) {
497 if (s.sa_handler == SIG_DFL) { /* Do not override existing custom signal handlers */
498 memcpy(old, &s, sizeof(struct sigaction));
499 s.sa_handler = handler;
500 s.sa_flags = 0;
501 act = &s;
502 }
503 }
504 else if (s.sa_handler == handler) { /* Do not restore if someone modified our handler */
505 act = old;
506 }
507
508 if (act != NULL)
509 sigaction(signum, act, NULL);
510 }
511
512 static void uloop_ignore_signal(int signum, bool ignore)
513 {
514 struct sigaction s;
515 void *new_handler = NULL;
516
517 sigaction(signum, NULL, &s);
518
519 if (ignore) {
520 if (s.sa_handler == SIG_DFL) /* Ignore only if there isn't any custom handler */
521 new_handler = SIG_IGN;
522 } else {
523 if (s.sa_handler == SIG_IGN) /* Restore only if noone modified our SIG_IGN */
524 new_handler = SIG_DFL;
525 }
526
527 if (new_handler) {
528 s.sa_handler = new_handler;
529 s.sa_flags = 0;
530 sigaction(signum, &s, NULL);
531 }
532 }
533
534 static void uloop_setup_signals(bool add)
535 {
536 static struct sigaction old_sigint, old_sigchld, old_sigterm;
537
538 uloop_install_handler(SIGINT, uloop_handle_sigint, &old_sigint, add);
539 uloop_install_handler(SIGTERM, uloop_handle_sigint, &old_sigterm, add);
540
541 if (uloop_handle_sigchld)
542 uloop_install_handler(SIGCHLD, uloop_signal_wake, &old_sigchld, add);
543
544 uloop_ignore_signal(SIGPIPE, add);
545 }
546
547 int uloop_signal_add(struct uloop_signal *s)
548 {
549 struct list_head *h = &signals;
550 struct uloop_signal *tmp;
551 struct sigaction sa;
552
553 if (s->pending)
554 return -1;
555
556 list_for_each_entry(tmp, &signals, list) {
557 if (tmp->signo > s->signo) {
558 h = &tmp->list;
559 break;
560 }
561 }
562
563 list_add_tail(&s->list, h);
564 s->pending = true;
565
566 sigaction(s->signo, NULL, &s->orig);
567
568 if (s->orig.sa_handler != uloop_signal_wake) {
569 sa.sa_handler = uloop_signal_wake;
570 sa.sa_flags = 0;
571 sigemptyset(&sa.sa_mask);
572 sigaction(s->signo, &sa, NULL);
573 }
574
575 return 0;
576 }
577
578 int uloop_signal_delete(struct uloop_signal *s)
579 {
580 if (!s->pending)
581 return -1;
582
583 list_del(&s->list);
584 s->pending = false;
585
586 if (s->orig.sa_handler != uloop_signal_wake)
587 sigaction(s->signo, &s->orig, NULL);
588
589 return 0;
590 }
591
592 int uloop_get_next_timeout(void)
593 {
594 struct uloop_timeout *timeout;
595 struct timeval tv;
596 int64_t diff;
597
598 if (list_empty(&timeouts))
599 return -1;
600
601 uloop_gettime(&tv);
602
603 timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
604 diff = tv_diff(&timeout->time, &tv);
605 if (diff < 0)
606 return 0;
607 if (diff > INT_MAX)
608 return INT_MAX;
609
610 return diff;
611 }
612
613 static void uloop_process_timeouts(void)
614 {
615 struct uloop_timeout *t;
616 struct timeval tv;
617
618 if (list_empty(&timeouts))
619 return;
620
621 uloop_gettime(&tv);
622 while (!list_empty(&timeouts)) {
623 t = list_first_entry(&timeouts, struct uloop_timeout, list);
624
625 if (tv_diff(&t->time, &tv) > 0)
626 break;
627
628 uloop_timeout_cancel(t);
629 if (t->cb)
630 t->cb(t);
631 }
632 }
633
634 static void uloop_clear_timeouts(void)
635 {
636 struct uloop_timeout *t, *tmp;
637
638 list_for_each_entry_safe(t, tmp, &timeouts, list)
639 uloop_timeout_cancel(t);
640 }
641
642 static void uloop_clear_processes(void)
643 {
644 struct uloop_process *p, *tmp;
645
646 list_for_each_entry_safe(p, tmp, &processes, list)
647 uloop_process_delete(p);
648 }
649
650 bool uloop_cancelling(void)
651 {
652 return uloop_run_depth > 0 && uloop_cancelled;
653 }
654
655 int uloop_run_timeout(int timeout)
656 {
657 int next_time = 0;
658
659 uloop_run_depth++;
660
661 uloop_status = 0;
662 uloop_cancelled = false;
663 do {
664 uloop_process_timeouts();
665
666 if (do_sigchld)
667 uloop_handle_processes();
668
669 if (uloop_cancelled)
670 break;
671
672 next_time = uloop_get_next_timeout();
673 if (timeout >= 0 && (next_time < 0 || timeout < next_time))
674 next_time = timeout;
675 uloop_run_events(next_time);
676 } while (!uloop_cancelled && timeout < 0);
677
678 --uloop_run_depth;
679
680 return uloop_status;
681 }
682
683 void uloop_done(void)
684 {
685 uloop_setup_signals(false);
686
687 if (poll_fd >= 0) {
688 close(poll_fd);
689 poll_fd = -1;
690 }
691
692 if (waker_pipe >= 0) {
693 uloop_fd_delete(&waker_fd);
694 close(waker_pipe);
695 close(waker_fd.fd);
696 waker_pipe = -1;
697 }
698
699 uloop_clear_timeouts();
700 uloop_clear_processes();
701 }