uloop: add uloop_timeout_remaining64
[project/libubox.git] / uloop.c
1 /*
2 * uloop - event loop implementation
3 *
4 * Copyright (C) 2010-2016 Felix Fietkau <nbd@openwrt.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 #include <sys/time.h>
19 #include <sys/types.h>
20
21 #include <unistd.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <errno.h>
25 #include <poll.h>
26 #include <string.h>
27 #include <fcntl.h>
28 #include <stdbool.h>
29 #include <limits.h>
30
31 #include "uloop.h"
32 #include "utils.h"
33
34 #ifdef USE_KQUEUE
35 #include <sys/event.h>
36 #endif
37 #ifdef USE_EPOLL
38 #include <sys/epoll.h>
39 #endif
40 #include <sys/wait.h>
41
42 struct uloop_fd_event {
43 struct uloop_fd *fd;
44 unsigned int events;
45 };
46
47 struct uloop_fd_stack {
48 struct uloop_fd_stack *next;
49 struct uloop_fd *fd;
50 unsigned int events;
51 };
52
53 static struct uloop_fd_stack *fd_stack = NULL;
54
55 #define ULOOP_MAX_EVENTS 10
56
57 static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
58 static struct list_head processes = LIST_HEAD_INIT(processes);
59
60 static int poll_fd = -1;
61 bool uloop_cancelled = false;
62 bool uloop_handle_sigchld = true;
63 static int uloop_status = 0;
64 static bool do_sigchld = false;
65
66 static struct uloop_fd_event cur_fds[ULOOP_MAX_EVENTS];
67 static int cur_fd, cur_nfds;
68 static int uloop_run_depth = 0;
69
70 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags);
71
72 #ifdef USE_KQUEUE
73 #include "uloop-kqueue.c"
74 #endif
75
76 #ifdef USE_EPOLL
77 #include "uloop-epoll.c"
78 #endif
79
80 static void waker_consume(struct uloop_fd *fd, unsigned int events)
81 {
82 char buf[4];
83
84 while (read(fd->fd, buf, 4) > 0)
85 ;
86 }
87
88 static int waker_pipe = -1;
89 static struct uloop_fd waker_fd = {
90 .fd = -1,
91 .cb = waker_consume,
92 };
93
94 static void waker_init_fd(int fd)
95 {
96 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
97 fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | O_NONBLOCK);
98 }
99
100 static int waker_init(void)
101 {
102 int fds[2];
103
104 if (waker_pipe >= 0)
105 return 0;
106
107 if (pipe(fds) < 0)
108 return -1;
109
110 waker_init_fd(fds[0]);
111 waker_init_fd(fds[1]);
112 waker_pipe = fds[1];
113
114 waker_fd.fd = fds[0];
115 waker_fd.cb = waker_consume;
116 uloop_fd_add(&waker_fd, ULOOP_READ);
117
118 return 0;
119 }
120
121 static void uloop_setup_signals(bool add);
122
123 int uloop_init(void)
124 {
125 if (uloop_init_pollfd() < 0)
126 return -1;
127
128 if (waker_init() < 0) {
129 uloop_done();
130 return -1;
131 }
132
133 uloop_setup_signals(true);
134
135 return 0;
136 }
137
138 static bool uloop_fd_stack_event(struct uloop_fd *fd, int events)
139 {
140 struct uloop_fd_stack *cur;
141
142 /*
143 * Do not buffer events for level-triggered fds, they will keep firing.
144 * Caller needs to take care of recursion issues.
145 */
146 if (!(fd->flags & ULOOP_EDGE_TRIGGER))
147 return false;
148
149 for (cur = fd_stack; cur; cur = cur->next) {
150 if (cur->fd != fd)
151 continue;
152
153 if (events < 0)
154 cur->fd = NULL;
155 else
156 cur->events |= events | ULOOP_EVENT_BUFFERED;
157
158 return true;
159 }
160
161 return false;
162 }
163
164 static void uloop_run_events(int timeout)
165 {
166 struct uloop_fd_event *cur;
167 struct uloop_fd *fd;
168
169 if (!cur_nfds) {
170 cur_fd = 0;
171 cur_nfds = uloop_fetch_events(timeout);
172 if (cur_nfds < 0)
173 cur_nfds = 0;
174 }
175
176 while (cur_nfds > 0) {
177 struct uloop_fd_stack stack_cur;
178 unsigned int events;
179
180 cur = &cur_fds[cur_fd++];
181 cur_nfds--;
182
183 fd = cur->fd;
184 events = cur->events;
185 if (!fd)
186 continue;
187
188 if (!fd->cb)
189 continue;
190
191 if (uloop_fd_stack_event(fd, cur->events))
192 continue;
193
194 stack_cur.next = fd_stack;
195 stack_cur.fd = fd;
196 fd_stack = &stack_cur;
197 do {
198 stack_cur.events = 0;
199 fd->cb(fd, events);
200 events = stack_cur.events & ULOOP_EVENT_MASK;
201 } while (stack_cur.fd && events);
202 fd_stack = stack_cur.next;
203
204 return;
205 }
206 }
207
208 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
209 {
210 unsigned int fl;
211 int ret;
212
213 if (!(flags & (ULOOP_READ | ULOOP_WRITE)))
214 return uloop_fd_delete(sock);
215
216 if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
217 fl = fcntl(sock->fd, F_GETFL, 0);
218 fl |= O_NONBLOCK;
219 fcntl(sock->fd, F_SETFL, fl);
220 }
221
222 ret = register_poll(sock, flags);
223 if (ret < 0)
224 goto out;
225
226 sock->registered = true;
227 sock->eof = false;
228 sock->error = false;
229
230 out:
231 return ret;
232 }
233
234 int uloop_fd_delete(struct uloop_fd *fd)
235 {
236 int i;
237
238 for (i = 0; i < cur_nfds; i++) {
239 if (cur_fds[cur_fd + i].fd != fd)
240 continue;
241
242 cur_fds[cur_fd + i].fd = NULL;
243 }
244
245 if (!fd->registered)
246 return 0;
247
248 fd->registered = false;
249 uloop_fd_stack_event(fd, -1);
250 return __uloop_fd_delete(fd);
251 }
252
253 static int64_t tv_diff(struct timeval *t1, struct timeval *t2)
254 {
255 return
256 (t1->tv_sec - t2->tv_sec) * 1000 +
257 (t1->tv_usec - t2->tv_usec) / 1000;
258 }
259
260 int uloop_timeout_add(struct uloop_timeout *timeout)
261 {
262 struct uloop_timeout *tmp;
263 struct list_head *h = &timeouts;
264
265 if (timeout->pending)
266 return -1;
267
268 list_for_each_entry(tmp, &timeouts, list) {
269 if (tv_diff(&tmp->time, &timeout->time) > 0) {
270 h = &tmp->list;
271 break;
272 }
273 }
274
275 list_add_tail(&timeout->list, h);
276 timeout->pending = true;
277
278 return 0;
279 }
280
281 static void uloop_gettime(struct timeval *tv)
282 {
283 struct timespec ts;
284
285 clock_gettime(CLOCK_MONOTONIC, &ts);
286 tv->tv_sec = ts.tv_sec;
287 tv->tv_usec = ts.tv_nsec / 1000;
288 }
289
290 int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
291 {
292 struct timeval *time = &timeout->time;
293
294 if (timeout->pending)
295 uloop_timeout_cancel(timeout);
296
297 uloop_gettime(time);
298
299 time->tv_sec += msecs / 1000;
300 time->tv_usec += (msecs % 1000) * 1000;
301
302 if (time->tv_usec > 1000000) {
303 time->tv_sec++;
304 time->tv_usec -= 1000000;
305 }
306
307 return uloop_timeout_add(timeout);
308 }
309
310 int uloop_timeout_cancel(struct uloop_timeout *timeout)
311 {
312 if (!timeout->pending)
313 return -1;
314
315 list_del(&timeout->list);
316 timeout->pending = false;
317
318 return 0;
319 }
320
321 int uloop_timeout_remaining(struct uloop_timeout *timeout)
322 {
323 int64_t td;
324 struct timeval now;
325
326 if (!timeout->pending)
327 return -1;
328
329 uloop_gettime(&now);
330
331 td = tv_diff(&timeout->time, &now);
332
333 if (td > INT_MAX)
334 return INT_MAX;
335 else if (td < INT_MIN)
336 return INT_MIN;
337 else
338 return (int)td;
339 }
340
341 int64_t uloop_timeout_remaining64(struct uloop_timeout *timeout)
342 {
343 struct timeval now;
344
345 if (!timeout->pending)
346 return -1;
347
348 uloop_gettime(&now);
349
350 return tv_diff(&timeout->time, &now);
351 }
352
353 int uloop_process_add(struct uloop_process *p)
354 {
355 struct uloop_process *tmp;
356 struct list_head *h = &processes;
357
358 if (p->pending)
359 return -1;
360
361 list_for_each_entry(tmp, &processes, list) {
362 if (tmp->pid > p->pid) {
363 h = &tmp->list;
364 break;
365 }
366 }
367
368 list_add_tail(&p->list, h);
369 p->pending = true;
370
371 return 0;
372 }
373
374 int uloop_process_delete(struct uloop_process *p)
375 {
376 if (!p->pending)
377 return -1;
378
379 list_del(&p->list);
380 p->pending = false;
381
382 return 0;
383 }
384
385 static void uloop_handle_processes(void)
386 {
387 struct uloop_process *p, *tmp;
388 pid_t pid;
389 int ret;
390
391 do_sigchld = false;
392
393 while (1) {
394 pid = waitpid(-1, &ret, WNOHANG);
395 if (pid < 0 && errno == EINTR)
396 continue;
397
398 if (pid <= 0)
399 return;
400
401 list_for_each_entry_safe(p, tmp, &processes, list) {
402 if (p->pid < pid)
403 continue;
404
405 if (p->pid > pid)
406 break;
407
408 uloop_process_delete(p);
409 p->cb(p, ret);
410 }
411 }
412
413 }
414
415 static void uloop_signal_wake(void)
416 {
417 do {
418 if (write(waker_pipe, "w", 1) < 0) {
419 if (errno == EINTR)
420 continue;
421 }
422 break;
423 } while (1);
424 }
425
426 static void uloop_handle_sigint(int signo)
427 {
428 uloop_status = signo;
429 uloop_cancelled = true;
430 uloop_signal_wake();
431 }
432
433 static void uloop_sigchld(int signo)
434 {
435 do_sigchld = true;
436 uloop_signal_wake();
437 }
438
439 static void uloop_install_handler(int signum, void (*handler)(int), struct sigaction* old, bool add)
440 {
441 struct sigaction s;
442 struct sigaction *act;
443
444 act = NULL;
445 sigaction(signum, NULL, &s);
446
447 if (add) {
448 if (s.sa_handler == SIG_DFL) { /* Do not override existing custom signal handlers */
449 memcpy(old, &s, sizeof(struct sigaction));
450 s.sa_handler = handler;
451 s.sa_flags = 0;
452 act = &s;
453 }
454 }
455 else if (s.sa_handler == handler) { /* Do not restore if someone modified our handler */
456 act = old;
457 }
458
459 if (act != NULL)
460 sigaction(signum, act, NULL);
461 }
462
463 static void uloop_ignore_signal(int signum, bool ignore)
464 {
465 struct sigaction s;
466 void *new_handler = NULL;
467
468 sigaction(signum, NULL, &s);
469
470 if (ignore) {
471 if (s.sa_handler == SIG_DFL) /* Ignore only if there isn't any custom handler */
472 new_handler = SIG_IGN;
473 } else {
474 if (s.sa_handler == SIG_IGN) /* Restore only if noone modified our SIG_IGN */
475 new_handler = SIG_DFL;
476 }
477
478 if (new_handler) {
479 s.sa_handler = new_handler;
480 s.sa_flags = 0;
481 sigaction(signum, &s, NULL);
482 }
483 }
484
485 static void uloop_setup_signals(bool add)
486 {
487 static struct sigaction old_sigint, old_sigchld, old_sigterm;
488
489 uloop_install_handler(SIGINT, uloop_handle_sigint, &old_sigint, add);
490 uloop_install_handler(SIGTERM, uloop_handle_sigint, &old_sigterm, add);
491
492 if (uloop_handle_sigchld)
493 uloop_install_handler(SIGCHLD, uloop_sigchld, &old_sigchld, add);
494
495 uloop_ignore_signal(SIGPIPE, add);
496 }
497
498 static int uloop_get_next_timeout(struct timeval *tv)
499 {
500 struct uloop_timeout *timeout;
501 int64_t diff;
502
503 if (list_empty(&timeouts))
504 return -1;
505
506 timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
507 diff = tv_diff(&timeout->time, tv);
508 if (diff < 0)
509 return 0;
510
511 return diff;
512 }
513
514 static void uloop_process_timeouts(struct timeval *tv)
515 {
516 struct uloop_timeout *t;
517
518 while (!list_empty(&timeouts)) {
519 t = list_first_entry(&timeouts, struct uloop_timeout, list);
520
521 if (tv_diff(&t->time, tv) > 0)
522 break;
523
524 uloop_timeout_cancel(t);
525 if (t->cb)
526 t->cb(t);
527 }
528 }
529
530 static void uloop_clear_timeouts(void)
531 {
532 struct uloop_timeout *t, *tmp;
533
534 list_for_each_entry_safe(t, tmp, &timeouts, list)
535 uloop_timeout_cancel(t);
536 }
537
538 static void uloop_clear_processes(void)
539 {
540 struct uloop_process *p, *tmp;
541
542 list_for_each_entry_safe(p, tmp, &processes, list)
543 uloop_process_delete(p);
544 }
545
546 bool uloop_cancelling(void)
547 {
548 return uloop_run_depth > 0 && uloop_cancelled;
549 }
550
551 int uloop_run_timeout(int timeout)
552 {
553 int next_time = 0;
554 struct timeval tv;
555
556 uloop_run_depth++;
557
558 uloop_status = 0;
559 uloop_cancelled = false;
560 while (!uloop_cancelled)
561 {
562 uloop_gettime(&tv);
563 uloop_process_timeouts(&tv);
564
565 if (do_sigchld)
566 uloop_handle_processes();
567
568 if (uloop_cancelled)
569 break;
570
571 uloop_gettime(&tv);
572
573 next_time = uloop_get_next_timeout(&tv);
574 if (timeout >= 0 && timeout < next_time)
575 next_time = timeout;
576 uloop_run_events(next_time);
577 }
578
579 --uloop_run_depth;
580
581 return uloop_status;
582 }
583
584 void uloop_done(void)
585 {
586 uloop_setup_signals(false);
587
588 if (poll_fd >= 0) {
589 close(poll_fd);
590 poll_fd = -1;
591 }
592
593 if (waker_pipe >= 0) {
594 uloop_fd_delete(&waker_fd);
595 close(waker_pipe);
596 close(waker_fd.fd);
597 waker_pipe = -1;
598 }
599
600 uloop_clear_timeouts();
601 uloop_clear_processes();
602 }