ustream: prevent recursive calls to the read callback
[project/libubox.git] / uloop.c
1 /*
2 * uloop - event loop implementation
3 *
4 * Copyright (C) 2010-2016 Felix Fietkau <nbd@openwrt.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 #include <sys/time.h>
19 #include <sys/types.h>
20
21 #include <unistd.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <errno.h>
25 #include <poll.h>
26 #include <string.h>
27 #include <fcntl.h>
28 #include <stdbool.h>
29 #include <limits.h>
30
31 #include "uloop.h"
32 #include "utils.h"
33
34 #ifdef USE_KQUEUE
35 #include <sys/event.h>
36 #endif
37 #ifdef USE_EPOLL
38 #include <sys/epoll.h>
39 #include <sys/timerfd.h>
40 #endif
41 #include <sys/wait.h>
42
43 struct uloop_fd_event {
44 struct uloop_fd *fd;
45 unsigned int events;
46 };
47
48 struct uloop_fd_stack {
49 struct uloop_fd_stack *next;
50 struct uloop_fd *fd;
51 unsigned int events;
52 };
53
54 static struct uloop_fd_stack *fd_stack = NULL;
55
56 #define ULOOP_MAX_EVENTS 10
57
58 static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
59 static struct list_head processes = LIST_HEAD_INIT(processes);
60 static struct list_head signals = LIST_HEAD_INIT(signals);
61
62 static int poll_fd = -1;
63 bool uloop_cancelled = false;
64 bool uloop_handle_sigchld = true;
65 static int uloop_status = 0;
66 static bool do_sigchld = false;
67
68 static struct uloop_fd_event cur_fds[ULOOP_MAX_EVENTS];
69 static int cur_fd, cur_nfds;
70 static int uloop_run_depth = 0;
71
72 uloop_fd_handler uloop_fd_set_cb = NULL;
73
74 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags);
75
76 #ifdef USE_KQUEUE
77 #include "uloop-kqueue.c"
78 #endif
79
80 #ifdef USE_EPOLL
81 #include "uloop-epoll.c"
82 #endif
83
84 static void set_signo(uint64_t *signums, int signo)
85 {
86 if (signo >= 1 && signo <= 64)
87 *signums |= (1u << (signo - 1));
88 }
89
90 static bool get_signo(uint64_t signums, int signo)
91 {
92 return (signo >= 1) && (signo <= 64) && (signums & (1u << (signo - 1)));
93 }
94
95 static void signal_consume(struct uloop_fd *fd, unsigned int events)
96 {
97 struct uloop_signal *usig, *usig_next;
98 uint64_t signums = 0;
99 uint8_t buf[32];
100 ssize_t nsigs;
101
102 do {
103 nsigs = read(fd->fd, buf, sizeof(buf));
104
105 for (ssize_t i = 0; i < nsigs; i++)
106 set_signo(&signums, buf[i]);
107 }
108 while (nsigs > 0);
109
110 list_for_each_entry_safe(usig, usig_next, &signals, list)
111 if (get_signo(signums, usig->signo))
112 usig->cb(usig);
113 }
114
115 static int waker_pipe = -1;
116 static struct uloop_fd waker_fd = {
117 .fd = -1,
118 .cb = signal_consume,
119 };
120
121 static void waker_init_fd(int fd)
122 {
123 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
124 fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | O_NONBLOCK);
125 }
126
127 static int waker_init(void)
128 {
129 int fds[2];
130
131 if (waker_pipe >= 0)
132 return 0;
133
134 if (pipe(fds) < 0)
135 return -1;
136
137 waker_init_fd(fds[0]);
138 waker_init_fd(fds[1]);
139 waker_pipe = fds[1];
140
141 waker_fd.fd = fds[0];
142 waker_fd.cb = signal_consume;
143 uloop_fd_add(&waker_fd, ULOOP_READ);
144
145 return 0;
146 }
147
148 static void uloop_setup_signals(bool add);
149
150 int uloop_init(void)
151 {
152 if (uloop_init_pollfd() < 0)
153 return -1;
154
155 if (waker_init() < 0) {
156 uloop_done();
157 return -1;
158 }
159
160 uloop_setup_signals(true);
161
162 return 0;
163 }
164
165 static bool uloop_fd_stack_event(struct uloop_fd *fd, int events)
166 {
167 struct uloop_fd_stack *cur;
168
169 /*
170 * Do not buffer events for level-triggered fds, they will keep firing.
171 * Caller needs to take care of recursion issues.
172 */
173 if (!(fd->flags & ULOOP_EDGE_TRIGGER))
174 return false;
175
176 for (cur = fd_stack; cur; cur = cur->next) {
177 if (cur->fd != fd)
178 continue;
179
180 if (events < 0)
181 cur->fd = NULL;
182 else
183 cur->events |= events | ULOOP_EVENT_BUFFERED;
184
185 return true;
186 }
187
188 return false;
189 }
190
191 static void uloop_run_events(int64_t timeout)
192 {
193 struct uloop_fd_event *cur;
194 struct uloop_fd *fd;
195
196 if (!cur_nfds) {
197 cur_fd = 0;
198 cur_nfds = uloop_fetch_events(timeout);
199 if (cur_nfds < 0)
200 cur_nfds = 0;
201 }
202
203 while (cur_nfds > 0) {
204 struct uloop_fd_stack stack_cur;
205 unsigned int events;
206
207 cur = &cur_fds[cur_fd++];
208 cur_nfds--;
209
210 fd = cur->fd;
211 events = cur->events;
212 if (!fd)
213 continue;
214
215 if (!fd->cb)
216 continue;
217
218 if (uloop_fd_stack_event(fd, cur->events))
219 continue;
220
221 stack_cur.next = fd_stack;
222 stack_cur.fd = fd;
223 fd_stack = &stack_cur;
224 do {
225 stack_cur.events = 0;
226 fd->cb(fd, events);
227 events = stack_cur.events & ULOOP_EVENT_MASK;
228 } while (stack_cur.fd && events);
229 fd_stack = stack_cur.next;
230
231 return;
232 }
233 }
234
235 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
236 {
237 unsigned int fl;
238 int ret;
239
240 if (!(flags & (ULOOP_READ | ULOOP_WRITE)))
241 return uloop_fd_delete(sock);
242
243 if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
244 fl = fcntl(sock->fd, F_GETFL, 0);
245 fl |= O_NONBLOCK;
246 fcntl(sock->fd, F_SETFL, fl);
247 }
248
249 ret = register_poll(sock, flags);
250 if (ret < 0)
251 goto out;
252
253 if (uloop_fd_set_cb)
254 uloop_fd_set_cb(sock, flags);
255
256 sock->flags = flags;
257 sock->registered = true;
258 sock->eof = false;
259 sock->error = false;
260
261 out:
262 return ret;
263 }
264
265 int uloop_fd_delete(struct uloop_fd *fd)
266 {
267 int ret;
268 int i;
269
270 for (i = 0; i < cur_nfds; i++) {
271 if (cur_fds[cur_fd + i].fd != fd)
272 continue;
273
274 cur_fds[cur_fd + i].fd = NULL;
275 }
276
277 if (!fd->registered)
278 return 0;
279
280 if (uloop_fd_set_cb)
281 uloop_fd_set_cb(fd, 0);
282
283 fd->registered = false;
284 uloop_fd_stack_event(fd, -1);
285 ret = __uloop_fd_delete(fd);
286 fd->flags = 0;
287
288 return ret;
289 }
290
291 static int64_t tv_diff(struct timeval *t1, struct timeval *t2)
292 {
293 return
294 (t1->tv_sec - t2->tv_sec) * 1000 +
295 (t1->tv_usec - t2->tv_usec) / 1000;
296 }
297
298 int uloop_timeout_add(struct uloop_timeout *timeout)
299 {
300 struct uloop_timeout *tmp;
301 struct list_head *h = &timeouts;
302
303 if (timeout->pending)
304 return -1;
305
306 list_for_each_entry(tmp, &timeouts, list) {
307 if (tv_diff(&tmp->time, &timeout->time) > 0) {
308 h = &tmp->list;
309 break;
310 }
311 }
312
313 list_add_tail(&timeout->list, h);
314 timeout->pending = true;
315
316 return 0;
317 }
318
319 static void uloop_gettime(struct timeval *tv)
320 {
321 struct timespec ts;
322
323 clock_gettime(CLOCK_MONOTONIC, &ts);
324 tv->tv_sec = ts.tv_sec;
325 tv->tv_usec = ts.tv_nsec / 1000;
326 }
327
328 int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
329 {
330 struct timeval *time = &timeout->time;
331
332 if (timeout->pending)
333 uloop_timeout_cancel(timeout);
334
335 uloop_gettime(time);
336
337 time->tv_sec += msecs / 1000;
338 time->tv_usec += (msecs % 1000) * 1000;
339
340 if (time->tv_usec > 1000000) {
341 time->tv_sec++;
342 time->tv_usec -= 1000000;
343 }
344
345 return uloop_timeout_add(timeout);
346 }
347
348 int uloop_timeout_cancel(struct uloop_timeout *timeout)
349 {
350 if (!timeout->pending)
351 return -1;
352
353 list_del(&timeout->list);
354 timeout->pending = false;
355
356 return 0;
357 }
358
359 int uloop_timeout_remaining(struct uloop_timeout *timeout)
360 {
361 int64_t td;
362 struct timeval now;
363
364 if (!timeout->pending)
365 return -1;
366
367 uloop_gettime(&now);
368
369 td = tv_diff(&timeout->time, &now);
370
371 if (td > INT_MAX)
372 return INT_MAX;
373 else if (td < INT_MIN)
374 return INT_MIN;
375 else
376 return (int)td;
377 }
378
379 int64_t uloop_timeout_remaining64(struct uloop_timeout *timeout)
380 {
381 struct timeval now;
382
383 if (!timeout->pending)
384 return -1;
385
386 uloop_gettime(&now);
387
388 return tv_diff(&timeout->time, &now);
389 }
390
391 int uloop_process_add(struct uloop_process *p)
392 {
393 struct uloop_process *tmp;
394 struct list_head *h = &processes;
395
396 if (p->pending)
397 return -1;
398
399 list_for_each_entry(tmp, &processes, list) {
400 if (tmp->pid > p->pid) {
401 h = &tmp->list;
402 break;
403 }
404 }
405
406 list_add_tail(&p->list, h);
407 p->pending = true;
408
409 return 0;
410 }
411
412 int uloop_process_delete(struct uloop_process *p)
413 {
414 if (!p->pending)
415 return -1;
416
417 list_del(&p->list);
418 p->pending = false;
419
420 return 0;
421 }
422
423 static void uloop_handle_processes(void)
424 {
425 struct uloop_process *p, *tmp;
426 pid_t pid;
427 int ret;
428
429 do_sigchld = false;
430
431 while (1) {
432 pid = waitpid(-1, &ret, WNOHANG);
433 if (pid < 0 && errno == EINTR)
434 continue;
435
436 if (pid <= 0)
437 return;
438
439 list_for_each_entry_safe(p, tmp, &processes, list) {
440 if (p->pid < pid)
441 continue;
442
443 if (p->pid > pid)
444 break;
445
446 uloop_process_delete(p);
447 p->cb(p, ret);
448 }
449 }
450
451 }
452
453 int uloop_interval_set(struct uloop_interval *timer, unsigned int msecs)
454 {
455 return timer_register(timer, msecs);
456 }
457
458 int uloop_interval_cancel(struct uloop_interval *timer)
459 {
460 return timer_remove(timer);
461 }
462
463 int64_t uloop_interval_remaining(struct uloop_interval *timer)
464 {
465 return timer_next(timer);
466 }
467
468 static void uloop_signal_wake(int signo)
469 {
470 uint8_t sigbyte = signo;
471
472 if (signo == SIGCHLD)
473 do_sigchld = true;
474
475 do {
476 if (write(waker_pipe, &sigbyte, 1) < 0) {
477 if (errno == EINTR)
478 continue;
479 }
480 break;
481 } while (1);
482 }
483
484 static void uloop_handle_sigint(int signo)
485 {
486 uloop_status = signo;
487 uloop_cancelled = true;
488 uloop_signal_wake(signo);
489 }
490
491 static void uloop_install_handler(int signum, void (*handler)(int), struct sigaction* old, bool add)
492 {
493 struct sigaction s;
494 struct sigaction *act;
495
496 act = NULL;
497 sigaction(signum, NULL, &s);
498
499 if (add) {
500 if (s.sa_handler == SIG_DFL) { /* Do not override existing custom signal handlers */
501 memcpy(old, &s, sizeof(struct sigaction));
502 s.sa_handler = handler;
503 s.sa_flags = 0;
504 act = &s;
505 }
506 }
507 else if (s.sa_handler == handler) { /* Do not restore if someone modified our handler */
508 act = old;
509 }
510
511 if (act != NULL)
512 sigaction(signum, act, NULL);
513 }
514
515 static void uloop_ignore_signal(int signum, bool ignore)
516 {
517 struct sigaction s;
518 void *new_handler = NULL;
519
520 sigaction(signum, NULL, &s);
521
522 if (ignore) {
523 if (s.sa_handler == SIG_DFL) /* Ignore only if there isn't any custom handler */
524 new_handler = SIG_IGN;
525 } else {
526 if (s.sa_handler == SIG_IGN) /* Restore only if noone modified our SIG_IGN */
527 new_handler = SIG_DFL;
528 }
529
530 if (new_handler) {
531 s.sa_handler = new_handler;
532 s.sa_flags = 0;
533 sigaction(signum, &s, NULL);
534 }
535 }
536
537 static void uloop_setup_signals(bool add)
538 {
539 static struct sigaction old_sigint, old_sigchld, old_sigterm;
540
541 uloop_install_handler(SIGINT, uloop_handle_sigint, &old_sigint, add);
542 uloop_install_handler(SIGTERM, uloop_handle_sigint, &old_sigterm, add);
543
544 if (uloop_handle_sigchld)
545 uloop_install_handler(SIGCHLD, uloop_signal_wake, &old_sigchld, add);
546
547 uloop_ignore_signal(SIGPIPE, add);
548 }
549
550 int uloop_signal_add(struct uloop_signal *s)
551 {
552 struct list_head *h = &signals;
553 struct uloop_signal *tmp;
554 struct sigaction sa;
555
556 if (s->pending)
557 return -1;
558
559 list_for_each_entry(tmp, &signals, list) {
560 if (tmp->signo > s->signo) {
561 h = &tmp->list;
562 break;
563 }
564 }
565
566 list_add_tail(&s->list, h);
567 s->pending = true;
568
569 sigaction(s->signo, NULL, &s->orig);
570
571 if (s->orig.sa_handler != uloop_signal_wake) {
572 sa.sa_handler = uloop_signal_wake;
573 sa.sa_flags = 0;
574 sigemptyset(&sa.sa_mask);
575 sigaction(s->signo, &sa, NULL);
576 }
577
578 return 0;
579 }
580
581 int uloop_signal_delete(struct uloop_signal *s)
582 {
583 if (!s->pending)
584 return -1;
585
586 list_del(&s->list);
587 s->pending = false;
588
589 if (s->orig.sa_handler != uloop_signal_wake)
590 sigaction(s->signo, &s->orig, NULL);
591
592 return 0;
593 }
594
595 int uloop_get_next_timeout(void)
596 {
597 struct uloop_timeout *timeout;
598 struct timeval tv;
599 int64_t diff;
600
601 if (list_empty(&timeouts))
602 return -1;
603
604 uloop_gettime(&tv);
605
606 timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
607 diff = tv_diff(&timeout->time, &tv);
608 if (diff < 0)
609 return 0;
610 if (diff > INT_MAX)
611 return INT_MAX;
612
613 return diff;
614 }
615
616 static void uloop_process_timeouts(void)
617 {
618 struct uloop_timeout *t;
619 struct timeval tv;
620
621 if (list_empty(&timeouts))
622 return;
623
624 uloop_gettime(&tv);
625 while (!list_empty(&timeouts)) {
626 t = list_first_entry(&timeouts, struct uloop_timeout, list);
627
628 if (tv_diff(&t->time, &tv) > 0)
629 break;
630
631 uloop_timeout_cancel(t);
632 if (t->cb)
633 t->cb(t);
634 }
635 }
636
637 static void uloop_clear_timeouts(void)
638 {
639 struct uloop_timeout *t, *tmp;
640
641 list_for_each_entry_safe(t, tmp, &timeouts, list)
642 uloop_timeout_cancel(t);
643 }
644
645 static void uloop_clear_processes(void)
646 {
647 struct uloop_process *p, *tmp;
648
649 list_for_each_entry_safe(p, tmp, &processes, list)
650 uloop_process_delete(p);
651 }
652
653 bool uloop_cancelling(void)
654 {
655 return uloop_run_depth > 0 && uloop_cancelled;
656 }
657
658 int uloop_run_timeout(int timeout)
659 {
660 int next_time = 0;
661
662 uloop_run_depth++;
663
664 uloop_status = 0;
665 uloop_cancelled = false;
666 do {
667 uloop_process_timeouts();
668
669 if (do_sigchld)
670 uloop_handle_processes();
671
672 if (uloop_cancelled)
673 break;
674
675 next_time = uloop_get_next_timeout();
676 if (timeout >= 0 && (next_time < 0 || timeout < next_time))
677 next_time = timeout;
678 uloop_run_events(next_time);
679 } while (!uloop_cancelled && timeout < 0);
680
681 --uloop_run_depth;
682
683 return uloop_status;
684 }
685
686 void uloop_done(void)
687 {
688 uloop_setup_signals(false);
689
690 if (poll_fd >= 0) {
691 close(poll_fd);
692 poll_fd = -1;
693 }
694
695 if (waker_pipe >= 0) {
696 uloop_fd_delete(&waker_fd);
697 close(waker_pipe);
698 close(waker_fd.fd);
699 waker_pipe = -1;
700 }
701
702 uloop_clear_timeouts();
703 uloop_clear_processes();
704 }