uloop: use list.h, add support for handling sigchld
[project/libubox.git] / uloop.c
1 /*
2 * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
4 * Copyright (C) 2010 Steven Barth <steven@midlink.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 *
20 */
21
22 #include <sys/time.h>
23 #include <sys/types.h>
24
25 #include <unistd.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <errno.h>
29 #include <poll.h>
30 #include <string.h>
31 #include <fcntl.h>
32 #include <stdbool.h>
33
34 #include "uloop.h"
35
36 #ifdef USE_KQUEUE
37 #include <sys/event.h>
38 #endif
39 #ifdef USE_EPOLL
40 #include <sys/epoll.h>
41 #endif
42
43
44 #ifndef ARRAY_SIZE
45 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
46 #endif
47 #define ULOOP_MAX_EVENTS 10
48
49 static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
50 static struct list_head processes = LIST_HEAD_INIT(processes);
51
52 static int poll_fd = -1;
53 bool uloop_cancelled = false;
54 bool uloop_handle_sigchld = true;
55 static bool do_sigchld = false;
56
57 #ifdef USE_KQUEUE
58
59 int uloop_init(void)
60 {
61 if (poll_fd >= 0)
62 return 0;
63
64 poll_fd = kqueue();
65 if (poll_fd < 0)
66 return -1;
67
68 return 0;
69 }
70
71
72 static uint16_t get_flags(unsigned int flags, unsigned int mask)
73 {
74 uint16_t kflags = 0;
75
76 if (!(flags & mask))
77 return EV_DELETE;
78
79 kflags = EV_ADD;
80 if (flags & ULOOP_EDGE_TRIGGER)
81 kflags |= EV_CLEAR;
82
83 return kflags;
84 }
85
86 static int register_poll(struct uloop_fd *fd, unsigned int flags)
87 {
88 struct timespec timeout = { 0, 0 };
89 struct kevent ev[2];
90 unsigned int changed;
91 int nev = 0;
92
93 changed = fd->kqflags ^ flags;
94 if (changed & ULOOP_EDGE_TRIGGER)
95 changed |= flags;
96
97 if (changed & ULOOP_READ) {
98 uint16_t kflags = get_flags(flags, ULOOP_READ);
99 EV_SET(&ev[nev++], fd->fd, EVFILT_READ, kflags, 0, 0, fd);
100 }
101
102 if (changed & ULOOP_WRITE) {
103 uint16_t kflags = get_flags(flags, ULOOP_WRITE);
104 EV_SET(&ev[nev++], fd->fd, EVFILT_WRITE, kflags, 0, 0, fd);
105 }
106
107 if (nev && (kevent(poll_fd, ev, nev, NULL, 0, &timeout) == -1))
108 return -1;
109
110 fd->kqflags = flags;
111 return 0;
112 }
113
114 int uloop_fd_delete(struct uloop_fd *sock)
115 {
116 sock->registered = false;
117 return register_poll(sock, 0);
118 }
119
120 static void uloop_run_events(int timeout)
121 {
122 struct kevent events[ULOOP_MAX_EVENTS];
123 struct timespec ts;
124 int nfds, n;
125
126 if (timeout > 0) {
127 ts.tv_sec = timeout / 1000;
128 ts.tv_nsec = (timeout % 1000) * 1000000;
129 }
130
131 nfds = kevent(poll_fd, NULL, 0, events, ARRAY_SIZE(events), timeout > 0 ? &ts : NULL);
132 for(n = 0; n < nfds; ++n)
133 {
134 struct uloop_fd *u = events[n].udata;
135 unsigned int ev = 0;
136
137 if(events[n].flags & EV_ERROR) {
138 u->error = true;
139 uloop_fd_delete(u);
140 }
141
142 if(events[n].filter == EVFILT_READ)
143 ev |= ULOOP_READ;
144 else if (events[n].filter == EVFILT_WRITE)
145 ev |= ULOOP_WRITE;
146
147 if(events[n].flags & EV_EOF)
148 u->eof = true;
149 else if (!ev)
150 continue;
151
152 if(u->cb)
153 u->cb(u, ev);
154 }
155 }
156
157 #endif
158
159 #ifdef USE_EPOLL
160
161 /**
162 * FIXME: uClibc < 0.9.30.3 does not define EPOLLRDHUP for Linux >= 2.6.17
163 */
164 #ifndef EPOLLRDHUP
165 #define EPOLLRDHUP 0x2000
166 #endif
167
168 int uloop_init(void)
169 {
170 if (poll_fd >= 0)
171 return 0;
172
173 poll_fd = epoll_create(32);
174 if (poll_fd < 0)
175 return -1;
176
177 fcntl(poll_fd, F_SETFD, fcntl(poll_fd, F_GETFD) | FD_CLOEXEC);
178 return 0;
179 }
180
181 static int register_poll(struct uloop_fd *fd, unsigned int flags)
182 {
183 struct epoll_event ev;
184 int op = fd->registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
185
186 memset(&ev, 0, sizeof(struct epoll_event));
187
188 if (flags & ULOOP_READ)
189 ev.events |= EPOLLIN | EPOLLRDHUP;
190
191 if (flags & ULOOP_WRITE)
192 ev.events |= EPOLLOUT;
193
194 if (flags & ULOOP_EDGE_TRIGGER)
195 ev.events |= EPOLLET;
196
197 ev.data.fd = fd->fd;
198 ev.data.ptr = fd;
199
200 return epoll_ctl(poll_fd, op, fd->fd, &ev);
201 }
202
203 int uloop_fd_delete(struct uloop_fd *sock)
204 {
205 sock->registered = false;
206 return epoll_ctl(poll_fd, EPOLL_CTL_DEL, sock->fd, 0);
207 }
208
209 static void uloop_run_events(int timeout)
210 {
211 struct epoll_event events[ULOOP_MAX_EVENTS];
212 int nfds, n;
213
214 nfds = epoll_wait(poll_fd, events, ARRAY_SIZE(events), timeout);
215 for(n = 0; n < nfds; ++n)
216 {
217 struct uloop_fd *u = events[n].data.ptr;
218 unsigned int ev = 0;
219
220 if(events[n].events & EPOLLERR) {
221 u->error = true;
222 uloop_fd_delete(u);
223 }
224
225 if(!(events[n].events & (EPOLLRDHUP|EPOLLIN|EPOLLOUT|EPOLLERR)))
226 continue;
227
228 if(events[n].events & EPOLLRDHUP)
229 u->eof = true;
230
231 if(events[n].events & EPOLLIN)
232 ev |= ULOOP_READ;
233
234 if(events[n].events & EPOLLOUT)
235 ev |= ULOOP_WRITE;
236
237 if(u->cb)
238 u->cb(u, ev);
239 }
240 }
241
242 #endif
243
244 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
245 {
246 unsigned int fl;
247 int ret;
248
249 if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
250 fl = fcntl(sock->fd, F_GETFL, 0);
251 fl |= O_NONBLOCK;
252 fcntl(sock->fd, F_SETFL, fl);
253 }
254
255 ret = register_poll(sock, flags);
256 if (ret < 0)
257 goto out;
258
259 sock->registered = true;
260 sock->eof = false;
261
262 out:
263 return ret;
264 }
265
266 static int tv_diff(struct timeval *t1, struct timeval *t2)
267 {
268 if (t1->tv_sec != t2->tv_sec)
269 return (t1->tv_sec - t2->tv_sec) * 1000;
270 else
271 return (t1->tv_usec - t2->tv_usec) / 1000;
272 }
273
274 int uloop_timeout_add(struct uloop_timeout *timeout)
275 {
276 struct uloop_timeout *tmp;
277 struct list_head *h = &timeouts;
278
279 if (timeout->pending)
280 return -1;
281
282 list_for_each_entry(tmp, &timeouts, list) {
283 if (tv_diff(&tmp->time, &timeout->time) > 0) {
284 h = &tmp->list;
285 break;
286 }
287 }
288
289 list_add_tail(&timeout->list, h);
290 timeout->pending = true;
291
292 return 0;
293 }
294
295 int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
296 {
297 struct timeval *time = &timeout->time;
298
299 if (timeout->pending)
300 uloop_timeout_cancel(timeout);
301
302 gettimeofday(&timeout->time, NULL);
303
304 time->tv_sec += msecs / 1000;
305 time->tv_usec += msecs % 1000;
306
307 if (time->tv_usec > 1000000) {
308 time->tv_sec++;
309 time->tv_usec %= 100000;
310 }
311
312 return uloop_timeout_add(timeout);
313 }
314
315 int uloop_timeout_cancel(struct uloop_timeout *timeout)
316 {
317 if (!timeout->pending)
318 return -1;
319
320 list_del(&timeout->list);
321 timeout->pending = false;
322
323 return 0;
324 }
325
326 int uloop_process_add(struct uloop_process *p)
327 {
328 struct uloop_process *tmp;
329 struct list_head *h = &processes;
330
331 if (p->pending)
332 return -1;
333
334 list_for_each_entry(tmp, &processes, list) {
335 if (tmp->pid > p->pid) {
336 h = &tmp->list;
337 break;
338 }
339 }
340
341 list_add_tail(&p->list, h);
342 p->pending = true;
343
344 return 0;
345 }
346
347 int uloop_process_delete(struct uloop_process *p)
348 {
349 if (!p->pending)
350 return -1;
351
352 list_del(&p->list);
353 p->pending = false;
354
355 return 0;
356 }
357
358 static void uloop_handle_processes(void)
359 {
360 struct uloop_process *p, *tmp;
361 pid_t pid;
362 int ret;
363
364 do_sigchld = false;
365
366 while (1) {
367 pid = waitpid(-1, &ret, WNOHANG);
368 if (pid <= 0)
369 return;
370
371 list_for_each_entry_safe(p, tmp, &processes, list) {
372 if (p->pid < pid)
373 continue;
374
375 if (p->pid > pid)
376 break;
377
378 uloop_process_delete(p);
379 p->cb(p, ret);
380 }
381 }
382
383 }
384
385 static void uloop_handle_sigint(int signo)
386 {
387 uloop_cancelled = true;
388 }
389
390 static void uloop_sigchld(int signo)
391 {
392 do_sigchld = true;
393 }
394
395 static void uloop_setup_signals(void)
396 {
397 struct sigaction s;
398
399 memset(&s, 0, sizeof(struct sigaction));
400 s.sa_handler = uloop_handle_sigint;
401 s.sa_flags = 0;
402 sigaction(SIGINT, &s, NULL);
403
404 if (uloop_handle_sigchld) {
405 s.sa_handler = uloop_sigchld;
406 sigaction(SIGCHLD, &s, NULL);
407 }
408 }
409
410 static int uloop_get_next_timeout(struct timeval *tv)
411 {
412 struct uloop_timeout *timeout;
413 int diff;
414
415 if (list_empty(&timeouts))
416 return -1;
417
418 timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
419 diff = tv_diff(&timeout->time, tv);
420 if (diff < 0)
421 return 0;
422
423 return diff;
424 }
425
426 static void uloop_process_timeouts(struct timeval *tv)
427 {
428 struct uloop_timeout *t, *tmp;
429
430 list_for_each_entry_safe(t, tmp, &timeouts, list) {
431 if (tv_diff(&t->time, tv) > 0)
432 break;
433
434 uloop_timeout_cancel(t);
435 if (t->cb)
436 t->cb(t);
437 }
438 }
439
440 void uloop_run(void)
441 {
442 struct timeval tv;
443
444 uloop_setup_signals();
445 while(!uloop_cancelled)
446 {
447 gettimeofday(&tv, NULL);
448 uloop_process_timeouts(&tv);
449 if (uloop_cancelled)
450 break;
451
452 if (do_sigchld)
453 uloop_handle_processes();
454 uloop_run_events(uloop_get_next_timeout(&tv));
455 }
456 }
457
458 void uloop_done(void)
459 {
460 if (poll_fd < 0)
461 return;
462
463 close(poll_fd);
464 poll_fd = -1;
465 }