add a missing include
[project/libubox.git] / uloop.c
1 /*
2 * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
4 * Copyright (C) 2010 Steven Barth <steven@midlink.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 *
20 */
21
22 #include <sys/time.h>
23 #include <sys/types.h>
24
25 #include <unistd.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <errno.h>
29 #include <poll.h>
30 #include <string.h>
31 #include <fcntl.h>
32 #include <stdbool.h>
33
34 #include "uloop.h"
35
36 #ifdef USE_KQUEUE
37 #include <sys/event.h>
38 #endif
39 #ifdef USE_EPOLL
40 #include <sys/epoll.h>
41 #endif
42 #include <sys/wait.h>
43
44
45 #ifndef ARRAY_SIZE
46 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
47 #endif
48 #define ULOOP_MAX_EVENTS 10
49
50 static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
51 static struct list_head processes = LIST_HEAD_INIT(processes);
52
53 static int poll_fd = -1;
54 bool uloop_cancelled = false;
55 bool uloop_handle_sigchld = true;
56 static bool do_sigchld = false;
57
58 #ifdef USE_KQUEUE
59
60 int uloop_init(void)
61 {
62 if (poll_fd >= 0)
63 return 0;
64
65 poll_fd = kqueue();
66 if (poll_fd < 0)
67 return -1;
68
69 return 0;
70 }
71
72
73 static uint16_t get_flags(unsigned int flags, unsigned int mask)
74 {
75 uint16_t kflags = 0;
76
77 if (!(flags & mask))
78 return EV_DELETE;
79
80 kflags = EV_ADD;
81 if (flags & ULOOP_EDGE_TRIGGER)
82 kflags |= EV_CLEAR;
83
84 return kflags;
85 }
86
87 static int register_poll(struct uloop_fd *fd, unsigned int flags)
88 {
89 struct timespec timeout = { 0, 0 };
90 struct kevent ev[2];
91 unsigned int changed;
92 int nev = 0;
93
94 changed = fd->kqflags ^ flags;
95 if (changed & ULOOP_EDGE_TRIGGER)
96 changed |= flags;
97
98 if (changed & ULOOP_READ) {
99 uint16_t kflags = get_flags(flags, ULOOP_READ);
100 EV_SET(&ev[nev++], fd->fd, EVFILT_READ, kflags, 0, 0, fd);
101 }
102
103 if (changed & ULOOP_WRITE) {
104 uint16_t kflags = get_flags(flags, ULOOP_WRITE);
105 EV_SET(&ev[nev++], fd->fd, EVFILT_WRITE, kflags, 0, 0, fd);
106 }
107
108 if (nev && (kevent(poll_fd, ev, nev, NULL, 0, &timeout) == -1))
109 return -1;
110
111 fd->kqflags = flags;
112 return 0;
113 }
114
115 int uloop_fd_delete(struct uloop_fd *sock)
116 {
117 sock->registered = false;
118 return register_poll(sock, 0);
119 }
120
121 static void uloop_run_events(int timeout)
122 {
123 struct kevent events[ULOOP_MAX_EVENTS];
124 struct timespec ts;
125 int nfds, n;
126
127 if (timeout > 0) {
128 ts.tv_sec = timeout / 1000;
129 ts.tv_nsec = (timeout % 1000) * 1000000;
130 }
131
132 nfds = kevent(poll_fd, NULL, 0, events, ARRAY_SIZE(events), timeout > 0 ? &ts : NULL);
133 for(n = 0; n < nfds; ++n)
134 {
135 struct uloop_fd *u = events[n].udata;
136 unsigned int ev = 0;
137
138 if(events[n].flags & EV_ERROR) {
139 u->error = true;
140 uloop_fd_delete(u);
141 }
142
143 if(events[n].filter == EVFILT_READ)
144 ev |= ULOOP_READ;
145 else if (events[n].filter == EVFILT_WRITE)
146 ev |= ULOOP_WRITE;
147
148 if(events[n].flags & EV_EOF)
149 u->eof = true;
150 else if (!ev)
151 continue;
152
153 if(u->cb)
154 u->cb(u, ev);
155 }
156 }
157
158 #endif
159
160 #ifdef USE_EPOLL
161
162 /**
163 * FIXME: uClibc < 0.9.30.3 does not define EPOLLRDHUP for Linux >= 2.6.17
164 */
165 #ifndef EPOLLRDHUP
166 #define EPOLLRDHUP 0x2000
167 #endif
168
169 int uloop_init(void)
170 {
171 if (poll_fd >= 0)
172 return 0;
173
174 poll_fd = epoll_create(32);
175 if (poll_fd < 0)
176 return -1;
177
178 fcntl(poll_fd, F_SETFD, fcntl(poll_fd, F_GETFD) | FD_CLOEXEC);
179 return 0;
180 }
181
182 static int register_poll(struct uloop_fd *fd, unsigned int flags)
183 {
184 struct epoll_event ev;
185 int op = fd->registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
186
187 memset(&ev, 0, sizeof(struct epoll_event));
188
189 if (flags & ULOOP_READ)
190 ev.events |= EPOLLIN | EPOLLRDHUP;
191
192 if (flags & ULOOP_WRITE)
193 ev.events |= EPOLLOUT;
194
195 if (flags & ULOOP_EDGE_TRIGGER)
196 ev.events |= EPOLLET;
197
198 ev.data.fd = fd->fd;
199 ev.data.ptr = fd;
200
201 return epoll_ctl(poll_fd, op, fd->fd, &ev);
202 }
203
204 int uloop_fd_delete(struct uloop_fd *sock)
205 {
206 sock->registered = false;
207 return epoll_ctl(poll_fd, EPOLL_CTL_DEL, sock->fd, 0);
208 }
209
210 static void uloop_run_events(int timeout)
211 {
212 struct epoll_event events[ULOOP_MAX_EVENTS];
213 int nfds, n;
214
215 nfds = epoll_wait(poll_fd, events, ARRAY_SIZE(events), timeout);
216 for(n = 0; n < nfds; ++n)
217 {
218 struct uloop_fd *u = events[n].data.ptr;
219 unsigned int ev = 0;
220
221 if(events[n].events & EPOLLERR) {
222 u->error = true;
223 uloop_fd_delete(u);
224 }
225
226 if(!(events[n].events & (EPOLLRDHUP|EPOLLIN|EPOLLOUT|EPOLLERR)))
227 continue;
228
229 if(events[n].events & EPOLLRDHUP)
230 u->eof = true;
231
232 if(events[n].events & EPOLLIN)
233 ev |= ULOOP_READ;
234
235 if(events[n].events & EPOLLOUT)
236 ev |= ULOOP_WRITE;
237
238 if(u->cb)
239 u->cb(u, ev);
240 }
241 }
242
243 #endif
244
245 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
246 {
247 unsigned int fl;
248 int ret;
249
250 if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
251 fl = fcntl(sock->fd, F_GETFL, 0);
252 fl |= O_NONBLOCK;
253 fcntl(sock->fd, F_SETFL, fl);
254 }
255
256 ret = register_poll(sock, flags);
257 if (ret < 0)
258 goto out;
259
260 sock->registered = true;
261 sock->eof = false;
262
263 out:
264 return ret;
265 }
266
267 static int tv_diff(struct timeval *t1, struct timeval *t2)
268 {
269 if (t1->tv_sec != t2->tv_sec)
270 return (t1->tv_sec - t2->tv_sec) * 1000;
271 else
272 return (t1->tv_usec - t2->tv_usec) / 1000;
273 }
274
275 int uloop_timeout_add(struct uloop_timeout *timeout)
276 {
277 struct uloop_timeout *tmp;
278 struct list_head *h = &timeouts;
279
280 if (timeout->pending)
281 return -1;
282
283 list_for_each_entry(tmp, &timeouts, list) {
284 if (tv_diff(&tmp->time, &timeout->time) > 0) {
285 h = &tmp->list;
286 break;
287 }
288 }
289
290 list_add_tail(&timeout->list, h);
291 timeout->pending = true;
292
293 return 0;
294 }
295
296 int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
297 {
298 struct timeval *time = &timeout->time;
299
300 if (timeout->pending)
301 uloop_timeout_cancel(timeout);
302
303 gettimeofday(&timeout->time, NULL);
304
305 time->tv_sec += msecs / 1000;
306 time->tv_usec += msecs % 1000;
307
308 if (time->tv_usec > 1000000) {
309 time->tv_sec++;
310 time->tv_usec %= 100000;
311 }
312
313 return uloop_timeout_add(timeout);
314 }
315
316 int uloop_timeout_cancel(struct uloop_timeout *timeout)
317 {
318 if (!timeout->pending)
319 return -1;
320
321 list_del(&timeout->list);
322 timeout->pending = false;
323
324 return 0;
325 }
326
327 int uloop_process_add(struct uloop_process *p)
328 {
329 struct uloop_process *tmp;
330 struct list_head *h = &processes;
331
332 if (p->pending)
333 return -1;
334
335 list_for_each_entry(tmp, &processes, list) {
336 if (tmp->pid > p->pid) {
337 h = &tmp->list;
338 break;
339 }
340 }
341
342 list_add_tail(&p->list, h);
343 p->pending = true;
344
345 return 0;
346 }
347
348 int uloop_process_delete(struct uloop_process *p)
349 {
350 if (!p->pending)
351 return -1;
352
353 list_del(&p->list);
354 p->pending = false;
355
356 return 0;
357 }
358
359 static void uloop_handle_processes(void)
360 {
361 struct uloop_process *p, *tmp;
362 pid_t pid;
363 int ret;
364
365 do_sigchld = false;
366
367 while (1) {
368 pid = waitpid(-1, &ret, WNOHANG);
369 if (pid <= 0)
370 return;
371
372 list_for_each_entry_safe(p, tmp, &processes, list) {
373 if (p->pid < pid)
374 continue;
375
376 if (p->pid > pid)
377 break;
378
379 uloop_process_delete(p);
380 p->cb(p, ret);
381 }
382 }
383
384 }
385
386 static void uloop_handle_sigint(int signo)
387 {
388 uloop_cancelled = true;
389 }
390
391 static void uloop_sigchld(int signo)
392 {
393 do_sigchld = true;
394 }
395
396 static void uloop_setup_signals(void)
397 {
398 struct sigaction s;
399
400 memset(&s, 0, sizeof(struct sigaction));
401 s.sa_handler = uloop_handle_sigint;
402 s.sa_flags = 0;
403 sigaction(SIGINT, &s, NULL);
404
405 if (uloop_handle_sigchld) {
406 s.sa_handler = uloop_sigchld;
407 sigaction(SIGCHLD, &s, NULL);
408 }
409 }
410
411 static int uloop_get_next_timeout(struct timeval *tv)
412 {
413 struct uloop_timeout *timeout;
414 int diff;
415
416 if (list_empty(&timeouts))
417 return -1;
418
419 timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
420 diff = tv_diff(&timeout->time, tv);
421 if (diff < 0)
422 return 0;
423
424 return diff;
425 }
426
427 static void uloop_process_timeouts(struct timeval *tv)
428 {
429 struct uloop_timeout *t, *tmp;
430
431 list_for_each_entry_safe(t, tmp, &timeouts, list) {
432 if (tv_diff(&t->time, tv) > 0)
433 break;
434
435 uloop_timeout_cancel(t);
436 if (t->cb)
437 t->cb(t);
438 }
439 }
440
441 void uloop_run(void)
442 {
443 struct timeval tv;
444
445 uloop_setup_signals();
446 while(!uloop_cancelled)
447 {
448 gettimeofday(&tv, NULL);
449 uloop_process_timeouts(&tv);
450 if (uloop_cancelled)
451 break;
452
453 if (do_sigchld)
454 uloop_handle_processes();
455 uloop_run_events(uloop_get_next_timeout(&tv));
456 }
457 }
458
459 void uloop_done(void)
460 {
461 if (poll_fd < 0)
462 return;
463
464 close(poll_fd);
465 poll_fd = -1;
466 }