uloop: fix immediate timeout processing on mac os x
[project/libubox.git] / uloop.c
1 /*
2 * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
4 * Copyright (C) 2010 Steven Barth <steven@midlink.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 *
20 */
21
22 #include <sys/time.h>
23 #include <sys/types.h>
24
25 #include <unistd.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <errno.h>
29 #include <poll.h>
30 #include <string.h>
31 #include <fcntl.h>
32 #include <stdbool.h>
33
34 #include "uloop.h"
35 #include "utils.h"
36
37 #ifdef USE_KQUEUE
38 #include <sys/event.h>
39 #endif
40 #ifdef USE_EPOLL
41 #include <sys/epoll.h>
42 #endif
43 #include <sys/wait.h>
44
45 #define ULOOP_MAX_EVENTS 10
46
47 static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
48 static struct list_head processes = LIST_HEAD_INIT(processes);
49
50 static int poll_fd = -1;
51 bool uloop_cancelled = false;
52 bool uloop_handle_sigchld = true;
53 static bool do_sigchld = false;
54 static int cur_fd, cur_nfds;
55
56 #ifdef USE_KQUEUE
57
58 int uloop_init(void)
59 {
60 if (poll_fd >= 0)
61 return 0;
62
63 poll_fd = kqueue();
64 if (poll_fd < 0)
65 return -1;
66
67 return 0;
68 }
69
70
71 static uint16_t get_flags(unsigned int flags, unsigned int mask)
72 {
73 uint16_t kflags = 0;
74
75 if (!(flags & mask))
76 return EV_DELETE;
77
78 kflags = EV_ADD;
79 if (flags & ULOOP_EDGE_TRIGGER)
80 kflags |= EV_CLEAR;
81
82 return kflags;
83 }
84
85 static struct kevent events[ULOOP_MAX_EVENTS];
86
87 static int register_poll(struct uloop_fd *fd, unsigned int flags)
88 {
89 struct timespec timeout = { 0, 0 };
90 struct kevent ev[2];
91 unsigned int changed;
92 int nev = 0;
93 unsigned int fl = 0;
94
95 changed = fd->kqflags ^ flags;
96 if (changed & ULOOP_EDGE_TRIGGER)
97 changed |= flags;
98
99 if (changed & ULOOP_READ) {
100 uint16_t kflags = get_flags(flags, ULOOP_READ);
101 EV_SET(&ev[nev++], fd->fd, EVFILT_READ, kflags, 0, 0, fd);
102 }
103
104 if (changed & ULOOP_WRITE) {
105 uint16_t kflags = get_flags(flags, ULOOP_WRITE);
106 EV_SET(&ev[nev++], fd->fd, EVFILT_WRITE, kflags, 0, 0, fd);
107 }
108
109 if (!flags)
110 fl |= EV_DELETE;
111
112 if (nev && (kevent(poll_fd, ev, nev, NULL, fl, &timeout) == -1))
113 return -1;
114
115 fd->kqflags = flags;
116 return 0;
117 }
118
119 int uloop_fd_delete(struct uloop_fd *sock)
120 {
121 int i;
122
123 for (i = cur_fd + 1; i < cur_nfds; i++) {
124 if (events[i].udata != sock)
125 continue;
126
127 events[i].udata = NULL;
128 }
129
130 sock->registered = false;
131 return register_poll(sock, 0);
132 }
133
134 static void uloop_run_events(int timeout)
135 {
136 struct timespec ts;
137 int nfds, n;
138
139 if (timeout >= 0) {
140 ts.tv_sec = timeout / 1000;
141 ts.tv_nsec = (timeout % 1000) * 1000000;
142 }
143
144 nfds = kevent(poll_fd, NULL, 0, events, ARRAY_SIZE(events), timeout >= 0 ? &ts : NULL);
145 for(n = 0; n < nfds; ++n)
146 {
147 struct uloop_fd *u = events[n].udata;
148 unsigned int ev = 0;
149
150 if (!u)
151 continue;
152
153 if (events[n].flags & EV_ERROR) {
154 u->error = true;
155 uloop_fd_delete(u);
156 }
157
158 if(events[n].filter == EVFILT_READ)
159 ev |= ULOOP_READ;
160 else if (events[n].filter == EVFILT_WRITE)
161 ev |= ULOOP_WRITE;
162
163 if (events[n].flags & EV_EOF)
164 u->eof = true;
165 else if (!ev)
166 continue;
167
168 if (u->cb) {
169 cur_fd = n;
170 cur_nfds = nfds;
171 u->cb(u, ev);
172 }
173 }
174 cur_nfds = 0;
175 }
176
177 #endif
178
179 #ifdef USE_EPOLL
180
181 /**
182 * FIXME: uClibc < 0.9.30.3 does not define EPOLLRDHUP for Linux >= 2.6.17
183 */
184 #ifndef EPOLLRDHUP
185 #define EPOLLRDHUP 0x2000
186 #endif
187
188 int uloop_init(void)
189 {
190 if (poll_fd >= 0)
191 return 0;
192
193 poll_fd = epoll_create(32);
194 if (poll_fd < 0)
195 return -1;
196
197 fcntl(poll_fd, F_SETFD, fcntl(poll_fd, F_GETFD) | FD_CLOEXEC);
198 return 0;
199 }
200
201 static int register_poll(struct uloop_fd *fd, unsigned int flags)
202 {
203 struct epoll_event ev;
204 int op = fd->registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
205
206 memset(&ev, 0, sizeof(struct epoll_event));
207
208 if (flags & ULOOP_READ)
209 ev.events |= EPOLLIN | EPOLLRDHUP;
210
211 if (flags & ULOOP_WRITE)
212 ev.events |= EPOLLOUT;
213
214 if (flags & ULOOP_EDGE_TRIGGER)
215 ev.events |= EPOLLET;
216
217 ev.data.fd = fd->fd;
218 ev.data.ptr = fd;
219
220 return epoll_ctl(poll_fd, op, fd->fd, &ev);
221 }
222
223 static struct epoll_event events[ULOOP_MAX_EVENTS];
224
225 int uloop_fd_delete(struct uloop_fd *sock)
226 {
227 int i;
228
229 for (i = cur_fd + 1; i < cur_nfds; i++) {
230 if (events[i].data.ptr != sock)
231 continue;
232
233 events[i].data.ptr = NULL;
234 }
235 sock->registered = false;
236 return epoll_ctl(poll_fd, EPOLL_CTL_DEL, sock->fd, 0);
237 }
238
239 static void uloop_run_events(int timeout)
240 {
241 int n, nfds;
242
243 nfds = epoll_wait(poll_fd, events, ARRAY_SIZE(events), timeout);
244 for(n = 0; n < nfds; ++n)
245 {
246 struct uloop_fd *u = events[n].data.ptr;
247 unsigned int ev = 0;
248
249 if (!u)
250 continue;
251
252 if(events[n].events & (EPOLLERR|EPOLLHUP)) {
253 u->error = true;
254 uloop_fd_delete(u);
255 }
256
257 if(!(events[n].events & (EPOLLRDHUP|EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP)))
258 continue;
259
260 if(events[n].events & EPOLLRDHUP)
261 u->eof = true;
262
263 if(events[n].events & EPOLLIN)
264 ev |= ULOOP_READ;
265
266 if(events[n].events & EPOLLOUT)
267 ev |= ULOOP_WRITE;
268
269 if(u->cb) {
270 cur_fd = n;
271 cur_nfds = nfds;
272 u->cb(u, ev);
273 }
274 }
275 cur_nfds = 0;
276 }
277
278 #endif
279
280 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
281 {
282 unsigned int fl;
283 int ret;
284
285 if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
286 fl = fcntl(sock->fd, F_GETFL, 0);
287 fl |= O_NONBLOCK;
288 fcntl(sock->fd, F_SETFL, fl);
289 }
290
291 ret = register_poll(sock, flags);
292 if (ret < 0)
293 goto out;
294
295 sock->registered = true;
296 sock->eof = false;
297
298 out:
299 return ret;
300 }
301
302 static int tv_diff(struct timeval *t1, struct timeval *t2)
303 {
304 return
305 (t1->tv_sec - t2->tv_sec) * 1000 +
306 (t1->tv_usec - t2->tv_usec) / 1000;
307 }
308
309 int uloop_timeout_add(struct uloop_timeout *timeout)
310 {
311 struct uloop_timeout *tmp;
312 struct list_head *h = &timeouts;
313
314 if (timeout->pending)
315 return -1;
316
317 list_for_each_entry(tmp, &timeouts, list) {
318 if (tv_diff(&tmp->time, &timeout->time) > 0) {
319 h = &tmp->list;
320 break;
321 }
322 }
323
324 list_add_tail(&timeout->list, h);
325 timeout->pending = true;
326
327 return 0;
328 }
329
330 int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
331 {
332 struct timeval *time = &timeout->time;
333
334 if (timeout->pending)
335 uloop_timeout_cancel(timeout);
336
337 gettimeofday(&timeout->time, NULL);
338
339 time->tv_sec += msecs / 1000;
340 time->tv_usec += (msecs % 1000) * 1000;
341
342 if (time->tv_usec > 1000000) {
343 time->tv_sec++;
344 time->tv_usec %= 1000000;
345 }
346
347 return uloop_timeout_add(timeout);
348 }
349
350 int uloop_timeout_cancel(struct uloop_timeout *timeout)
351 {
352 if (!timeout->pending)
353 return -1;
354
355 list_del(&timeout->list);
356 timeout->pending = false;
357
358 return 0;
359 }
360
361 int uloop_process_add(struct uloop_process *p)
362 {
363 struct uloop_process *tmp;
364 struct list_head *h = &processes;
365
366 if (p->pending)
367 return -1;
368
369 list_for_each_entry(tmp, &processes, list) {
370 if (tmp->pid > p->pid) {
371 h = &tmp->list;
372 break;
373 }
374 }
375
376 list_add_tail(&p->list, h);
377 p->pending = true;
378
379 return 0;
380 }
381
382 int uloop_process_delete(struct uloop_process *p)
383 {
384 if (!p->pending)
385 return -1;
386
387 list_del(&p->list);
388 p->pending = false;
389
390 return 0;
391 }
392
393 static void uloop_handle_processes(void)
394 {
395 struct uloop_process *p, *tmp;
396 pid_t pid;
397 int ret;
398
399 do_sigchld = false;
400
401 while (1) {
402 pid = waitpid(-1, &ret, WNOHANG);
403 if (pid <= 0)
404 return;
405
406 list_for_each_entry_safe(p, tmp, &processes, list) {
407 if (p->pid < pid)
408 continue;
409
410 if (p->pid > pid)
411 break;
412
413 uloop_process_delete(p);
414 p->cb(p, ret);
415 }
416 }
417
418 }
419
420 static void uloop_handle_sigint(int signo)
421 {
422 uloop_cancelled = true;
423 }
424
425 static void uloop_sigchld(int signo)
426 {
427 do_sigchld = true;
428 }
429
430 static void uloop_setup_signals(void)
431 {
432 struct sigaction s;
433
434 memset(&s, 0, sizeof(struct sigaction));
435 s.sa_handler = uloop_handle_sigint;
436 s.sa_flags = 0;
437 sigaction(SIGINT, &s, NULL);
438
439 if (uloop_handle_sigchld) {
440 s.sa_handler = uloop_sigchld;
441 sigaction(SIGCHLD, &s, NULL);
442 }
443 }
444
445 static int uloop_get_next_timeout(struct timeval *tv)
446 {
447 struct uloop_timeout *timeout;
448 int diff;
449
450 if (list_empty(&timeouts))
451 return -1;
452
453 timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
454 diff = tv_diff(&timeout->time, tv);
455 if (diff < 0)
456 return 0;
457
458 return diff;
459 }
460
461 static void uloop_process_timeouts(struct timeval *tv)
462 {
463 struct uloop_timeout *t;
464
465 while (!list_empty(&timeouts)) {
466 t = list_first_entry(&timeouts, struct uloop_timeout, list);
467
468 if (tv_diff(&t->time, tv) > 0)
469 break;
470
471 uloop_timeout_cancel(t);
472 if (t->cb)
473 t->cb(t);
474 }
475 }
476
477 static void uloop_clear_timeouts(void)
478 {
479 struct uloop_timeout *t, *tmp;
480
481 list_for_each_entry_safe(t, tmp, &timeouts, list)
482 uloop_timeout_cancel(t);
483 }
484
485 static void uloop_clear_processes(void)
486 {
487 struct uloop_process *p, *tmp;
488
489 list_for_each_entry_safe(p, tmp, &processes, list)
490 uloop_process_delete(p);
491 }
492
493 void uloop_run(void)
494 {
495 struct timeval tv;
496
497 uloop_setup_signals();
498 while(!uloop_cancelled)
499 {
500 gettimeofday(&tv, NULL);
501 uloop_process_timeouts(&tv);
502 if (uloop_cancelled)
503 break;
504
505 if (do_sigchld)
506 uloop_handle_processes();
507 uloop_run_events(uloop_get_next_timeout(&tv));
508 }
509 }
510
511 void uloop_done(void)
512 {
513 if (poll_fd < 0)
514 return;
515
516 close(poll_fd);
517 poll_fd = -1;
518
519 uloop_clear_timeouts();
520 uloop_clear_processes();
521 }