caba7d9a1f9f25ab0c7b36a45c3e8539fe02b4f0
[project/udebug.git] / lib.c
1 #define _GNU_SOURCE
2 #include <sys/types.h>
3 #include <sys/mman.h>
4 #include <sys/socket.h>
5 #include <unistd.h>
6 #include <string.h>
7 #include <stdio.h>
8 #include <fcntl.h>
9 #include <errno.h>
10 #include <poll.h>
11 #include <time.h>
12 #include "priv.h"
13
14 #include <libubox/usock.h>
15
16 #define ALIGN(i, sz) (((i) + (sz) - 1) & ~((sz) - 1))
17
18 #ifndef MAP_ANONYMOUS
19 #define MAP_ANONYMOUS MAP_ANON
20 #endif
21
22 #define UDEBUG_MIN_ALLOC_LEN 128
23 static struct blob_buf b;
24
25 static void __randname(char *template)
26 {
27 int i;
28 struct timespec ts;
29 unsigned long r;
30
31 clock_gettime(CLOCK_REALTIME, &ts);
32 r = ts.tv_sec + ts.tv_nsec;
33 for (i=0; i<6; i++, r>>=5)
34 template[i] = 'A'+(r&15)+(r&16)*2;
35 }
36
37 int udebug_id_cmp(const void *k1, const void *k2, void *ptr)
38 {
39 uint32_t id1 = (uint32_t)(uintptr_t)k1, id2 = (uint32_t)(uintptr_t)k2;
40 if (id1 < id2)
41 return -1;
42 else if (id1 > id2)
43 return 1;
44 return 0;
45 }
46
47 static inline int
48 shm_open_anon(char *name)
49 {
50 char *template = name + strlen(name) - 6;
51 int fd;
52
53 if (template < name || memcmp(template, "XXXXXX", 6) != 0)
54 return -1;
55
56 for (int i = 0; i < 100; i++) {
57 __randname(template);
58 fd = shm_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
59 if (fd >= 0) {
60 if (shm_unlink(name) < 0) {
61 close(fd);
62 continue;
63 }
64 return fd;
65 }
66
67 if (fd < 0 && errno != EEXIST)
68 return -1;
69 }
70
71 return -1;
72 }
73
74 uint64_t udebug_timestamp(void)
75 {
76 struct timespec ts;
77 uint64_t val;
78
79 clock_gettime(CLOCK_REALTIME, &ts);
80
81 val = ts.tv_sec;
82 val *= UDEBUG_TS_SEC;
83 val += ts.tv_nsec / 1000;
84
85 return val;
86 }
87
88 static int
89 __udebug_buf_map(struct udebug_buf *buf)
90 {
91 void *ptr, *ptr2;
92
93 ptr = mmap(NULL, buf->head_size + 2 * buf->data_size, PROT_NONE,
94 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
95 if (ptr == MAP_FAILED)
96 return -1;
97
98 ptr2 = mmap(ptr, buf->head_size + buf->data_size,
99 PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED, buf->fd, 0);
100 if (ptr2 != ptr)
101 goto err_unmap;
102
103 ptr2 = mmap(ptr + buf->head_size + buf->data_size, buf->data_size,
104 PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED, buf->fd,
105 buf->head_size);
106 if (ptr2 != ptr + buf->head_size + buf->data_size)
107 goto err_unmap;
108
109 buf->hdr = ptr;
110 buf->data = ptr + buf->head_size;
111 return 0;
112
113 err_unmap:
114 munmap(ptr, buf->head_size + 2 * buf->data_size);
115 return -1;
116 }
117
118 static int
119 writev_retry(int fd, struct iovec *iov, int iov_len, int sock_fd)
120 {
121 uint8_t fd_buf[CMSG_SPACE(sizeof(int))] = { 0 };
122 struct msghdr msghdr = { 0 };
123 struct cmsghdr *cmsg;
124 int len = 0;
125 int *pfd;
126
127 msghdr.msg_iov = iov,
128 msghdr.msg_iovlen = iov_len,
129 msghdr.msg_control = fd_buf;
130 msghdr.msg_controllen = sizeof(fd_buf);
131
132 cmsg = CMSG_FIRSTHDR(&msghdr);
133 cmsg->cmsg_type = SCM_RIGHTS;
134 cmsg->cmsg_level = SOL_SOCKET;
135 cmsg->cmsg_len = CMSG_LEN(sizeof(int));
136
137 pfd = (int *) CMSG_DATA(cmsg);
138 msghdr.msg_controllen = cmsg->cmsg_len;
139
140 do {
141 ssize_t cur_len;
142
143 if (sock_fd < 0) {
144 msghdr.msg_control = NULL;
145 msghdr.msg_controllen = 0;
146 } else {
147 *pfd = sock_fd;
148 }
149
150 cur_len = sendmsg(fd, &msghdr, 0);
151 if (cur_len < 0) {
152 struct pollfd pfd = {
153 .fd = fd,
154 .events = POLLOUT
155 };
156
157 switch(errno) {
158 case EAGAIN:
159 poll(&pfd, 1, -1);
160 break;
161 case EINTR:
162 break;
163 default:
164 return -1;
165 }
166 continue;
167 }
168
169 if (len > 0)
170 sock_fd = -1;
171
172 len += cur_len;
173 while (cur_len >= (ssize_t) iov->iov_len) {
174 cur_len -= iov->iov_len;
175 iov_len--;
176 iov++;
177 if (!iov_len)
178 return len;
179 }
180 iov->iov_base += cur_len;
181 iov->iov_len -= cur_len;
182 msghdr.msg_iov = iov;
183 msghdr.msg_iovlen = iov_len;
184 } while (1);
185
186 /* Should never reach here */
187 return -1;
188 }
189
190 static int
191 recv_retry(int fd, struct iovec *iov, bool wait, int *recv_fd)
192 {
193 uint8_t fd_buf[CMSG_SPACE(sizeof(int))] = { 0 };
194 struct msghdr msghdr = { 0 };
195 struct cmsghdr *cmsg;
196 int total = 0;
197 int bytes;
198 int *pfd;
199
200 msghdr.msg_iov = iov,
201 msghdr.msg_iovlen = 1,
202 msghdr.msg_control = fd_buf;
203 msghdr.msg_controllen = sizeof(fd_buf);
204
205 cmsg = CMSG_FIRSTHDR(&msghdr);
206 cmsg->cmsg_type = SCM_RIGHTS;
207 cmsg->cmsg_level = SOL_SOCKET;
208 cmsg->cmsg_len = CMSG_LEN(sizeof(int));
209
210 pfd = (int *) CMSG_DATA(cmsg);
211
212 while (iov->iov_len > 0) {
213 if (recv_fd) {
214 msghdr.msg_control = fd_buf;
215 msghdr.msg_controllen = cmsg->cmsg_len;
216 } else {
217 msghdr.msg_control = NULL;
218 msghdr.msg_controllen = 0;
219 }
220
221 *pfd = -1;
222 bytes = recvmsg(fd, &msghdr, 0);
223 if (!bytes)
224 return -2;
225 if (bytes < 0) {
226 bytes = 0;
227 if (errno == EINTR)
228 continue;
229
230 if (errno != EAGAIN)
231 return -2;
232 }
233 if (!wait && !bytes)
234 return 0;
235
236 if (recv_fd)
237 *recv_fd = *pfd;
238 else if (*pfd >= 0)
239 close(*pfd);
240
241 if (bytes > 0)
242 recv_fd = NULL;
243
244 wait = true;
245 iov->iov_len -= bytes;
246 iov->iov_base += bytes;
247 total += bytes;
248
249 if (iov->iov_len > 0) {
250 struct pollfd pfd = {
251 .fd = fd,
252 .events = POLLIN
253 };
254 int ret;
255 do {
256 ret = poll(&pfd, 1, UDEBUG_TIMEOUT);
257 } while (ret < 0 && errno == EINTR);
258
259 if (!(pfd.revents & POLLIN))
260 return -1;
261 }
262 }
263
264 return total;
265 }
266
267 void udebug_send_msg(struct udebug *ctx, struct udebug_client_msg *msg,
268 struct blob_attr *meta, int fd)
269 {
270 struct iovec iov[2] = {
271 { .iov_base = msg, .iov_len = sizeof(*msg) },
272 {}
273 };
274
275 if (!meta) {
276 blob_buf_init(&b, 0);
277 meta = b.head;
278 }
279
280 iov[1].iov_base = meta;
281 iov[1].iov_len = blob_pad_len(meta);
282 writev_retry(ctx->fd.fd, iov, ARRAY_SIZE(iov), fd);
283 }
284
285 static void
286 udebug_buf_msg(struct udebug_buf *buf, enum udebug_client_msg_type type)
287 {
288 struct udebug_client_msg msg = {
289 .type = type,
290 .id = buf->id,
291 };
292
293 udebug_send_msg(buf->ctx, &msg, NULL, -1);
294 }
295
296 static size_t __udebug_headsize(unsigned int ring_size, unsigned int page_size)
297 {
298 ring_size *= sizeof(struct udebug_ptr);
299 return ALIGN(sizeof(struct udebug_hdr) + ring_size, page_size);
300 }
301
302 int udebug_buf_open(struct udebug_buf *buf, int fd, uint32_t ring_size, uint32_t data_size)
303 {
304 INIT_LIST_HEAD(&buf->list);
305 buf->fd = fd;
306 buf->ring_size = ring_size;
307 buf->head_size = __udebug_headsize(ring_size, sysconf(_SC_PAGESIZE));
308 buf->data_size = data_size;
309
310 if (buf->ring_size > (1U << 24) || buf->data_size > (1U << 29))
311 return -1;
312
313 if (__udebug_buf_map(buf))
314 return -1;
315
316 if (buf->ring_size != buf->hdr->ring_size ||
317 buf->data_size != buf->hdr->data_size) {
318 munmap(buf->hdr, buf->head_size + 2 * buf->data_size);
319 buf->hdr = NULL;
320 return -1;
321 }
322
323 return 0;
324 }
325
326 int udebug_buf_init(struct udebug_buf *buf, size_t entries, size_t size)
327 {
328 uint32_t pagesz = sysconf(_SC_PAGESIZE);
329 char filename[] = "/udebug.XXXXXX";
330 unsigned int order = 12;
331 uint8_t ring_order = 5;
332 size_t head_size;
333 int fd;
334
335 INIT_LIST_HEAD(&buf->list);
336 if (size < pagesz)
337 size = pagesz;
338 while(size > 1 << order)
339 order++;
340 size = 1 << order;
341 while (entries > 1 << ring_order)
342 ring_order++;
343 entries = 1 << ring_order;
344
345 if (size > (1U << 29) || entries > (1U << 24))
346 return -1;
347
348 head_size = __udebug_headsize(entries, pagesz);
349 while (ALIGN(sizeof(*buf->hdr) + (entries * 2) * sizeof(struct udebug_ptr), pagesz) == head_size)
350 entries *= 2;
351
352 fd = shm_open_anon(filename);
353 if (fd < 0)
354 return -1;
355
356 if (ftruncate(fd, head_size + size) < 0)
357 goto err_close;
358
359 buf->head_size = head_size;
360 buf->data_size = size;
361 buf->ring_size = entries;
362 buf->fd = fd;
363
364 if (__udebug_buf_map(buf))
365 goto err_close;
366
367 buf->hdr->ring_size = entries;
368 buf->hdr->data_size = size;
369
370 /* ensure hdr changes are visible */
371 __sync_synchronize();
372
373 return 0;
374
375 err_close:
376 close(fd);
377 return -1;
378 }
379
380 static void *udebug_buf_alloc(struct udebug_buf *buf, uint32_t ofs, uint32_t len)
381 {
382 struct udebug_hdr *hdr = buf->hdr;
383
384 hdr->data_used = u32_max(hdr->data_used, ofs + len + 1);
385
386 /* ensure that data_used update is visible before clobbering data */
387 __sync_synchronize();
388
389 return udebug_buf_ptr(buf, ofs);
390 }
391
392 uint64_t udebug_buf_flags(struct udebug_buf *buf)
393 {
394 struct udebug_hdr *hdr = buf->hdr;
395 uint64_t flags;
396
397 if (!hdr)
398 return 0;
399
400 flags = hdr->flags[0];
401 if (sizeof(flags) != sizeof(uintptr_t))
402 flags |= ((uint64_t)hdr->flags[1]) << 32;
403
404 return flags;
405 }
406
407 void udebug_entry_init_ts(struct udebug_buf *buf, uint64_t timestamp)
408 {
409 struct udebug_hdr *hdr = buf->hdr;
410 struct udebug_ptr *ptr;
411
412 if (!hdr)
413 return;
414
415 ptr = udebug_ring_ptr(hdr, hdr->head);
416 ptr->start = hdr->data_head;
417 ptr->len = 0;
418 ptr->timestamp = timestamp;
419 }
420
421 void *udebug_entry_append(struct udebug_buf *buf, const void *data, uint32_t len)
422 {
423 struct udebug_hdr *hdr = buf->hdr;
424 struct udebug_ptr *ptr;
425 uint32_t ofs;
426 void *ret;
427
428 if (!hdr)
429 return NULL;
430
431 ptr = udebug_ring_ptr(hdr, hdr->head);
432 ofs = ptr->start + ptr->len;
433 if (ptr->len + len > buf->data_size / 2)
434 return NULL;
435
436 ret = udebug_buf_alloc(buf, ofs, len);
437 if (data)
438 memcpy(ret, data, len);
439 ptr->len += len;
440
441 return ret;
442 }
443
444 int udebug_entry_printf(struct udebug_buf *buf, const char *fmt, ...)
445 {
446 va_list ap;
447 size_t ret;
448
449 va_start(ap, fmt);
450 ret = udebug_entry_vprintf(buf, fmt, ap);
451 va_end(ap);
452
453 return ret;
454 }
455
456 int udebug_entry_vprintf(struct udebug_buf *buf, const char *fmt, va_list ap)
457 {
458 struct udebug_hdr *hdr = buf->hdr;
459 struct udebug_ptr *ptr;
460 uint32_t ofs;
461 uint32_t len;
462 char *str;
463
464 if (!hdr)
465 return -1;
466
467 ptr = udebug_ring_ptr(hdr, hdr->head);
468 ofs = ptr->start + ptr->len;
469 if (ptr->len > buf->data_size / 2)
470 return -1;
471
472 str = udebug_buf_alloc(buf, ofs, UDEBUG_MIN_ALLOC_LEN);
473 len = vsnprintf(str, UDEBUG_MIN_ALLOC_LEN, fmt, ap);
474 if (len <= UDEBUG_MIN_ALLOC_LEN)
475 goto out;
476
477 if (ptr->len + len > buf->data_size / 2)
478 return -1;
479
480 udebug_buf_alloc(buf, ofs, len + 1);
481 len = vsnprintf(str, len, fmt, ap);
482
483 out:
484 ptr->len += len;
485 return 0;
486 }
487
488 void udebug_entry_add(struct udebug_buf *buf)
489 {
490 struct udebug_hdr *hdr = buf->hdr;
491 struct udebug_ptr *ptr = udebug_ring_ptr(hdr, hdr->head);
492 uint32_t notify;
493 uint8_t *data;
494
495 /* ensure strings are always 0-terminated */
496 data = udebug_buf_ptr(buf, ptr->start + ptr->len);
497 *data = 0;
498 hdr->data_head = ptr->start + ptr->len + 1;
499
500 /* ensure that all data changes are visible before advancing head */
501 __sync_synchronize();
502
503 u32_set(&hdr->head, u32_get(&hdr->head) + 1);
504 if (!u32_get(&hdr->head))
505 u32_set(&hdr->head_hi, u32_get(&hdr->head_hi) + 1);
506
507 /* ensure that head change is visible */
508 __sync_synchronize();
509
510 notify = __atomic_exchange_n(&hdr->notify, 0, __ATOMIC_RELAXED);
511 if (notify) {
512 struct udebug_client_msg msg = {
513 .type = CL_MSG_RING_NOTIFY,
514 .id = buf->id,
515 .notify_mask = notify,
516 };
517 blob_buf_init(&b, 0);
518
519 udebug_send_msg(buf->ctx, &msg, b.head, -1);
520 }
521 }
522 void udebug_buf_free(struct udebug_buf *buf)
523 {
524 struct udebug *ctx = buf->ctx;
525
526 if (!list_empty(&buf->list) && buf->list.prev)
527 list_del(&buf->list);
528
529 if (ctx && ctx->fd.fd >= 0)
530 udebug_buf_msg(buf, CL_MSG_RING_REMOVE);
531
532 munmap(buf->hdr, buf->head_size + 2 * buf->data_size);
533 close(buf->fd);
534 memset(buf, 0, sizeof(*buf));
535 }
536
537 static void
538 __udebug_buf_add(struct udebug *ctx, struct udebug_buf *buf)
539 {
540 struct udebug_client_msg msg = {
541 .type = CL_MSG_RING_ADD,
542 .id = buf->id,
543 .ring_size = buf->hdr->ring_size,
544 .data_size = buf->hdr->data_size,
545 };
546 const struct udebug_buf_meta *meta = buf->meta;
547 void *c;
548
549 blob_buf_init(&b, 0);
550 blobmsg_add_string(&b, "name", meta->name);
551 c = blobmsg_open_array(&b, "flags");
552 for (size_t i = 0; i < meta->n_flags; i++) {
553 const struct udebug_buf_flag *flag = &meta->flags[i];
554 void *e = blobmsg_open_array(&b, NULL);
555 blobmsg_add_string(&b, NULL, flag->name);
556 blobmsg_add_u64(&b, NULL, flag->mask);
557 blobmsg_close_array(&b, e);
558 }
559 blobmsg_close_array(&b, c);
560
561 udebug_send_msg(ctx, &msg, b.head, buf->fd);
562 }
563
564 int udebug_buf_add(struct udebug *ctx, struct udebug_buf *buf,
565 const struct udebug_buf_meta *meta)
566 {
567 list_add_tail(&buf->list, &ctx->local_rings);
568 buf->ctx = ctx;
569 buf->meta = meta;
570 buf->id = ctx->next_id++;
571 buf->hdr->format = meta->format;
572 buf->hdr->sub_format = meta->sub_format;
573
574 if (ctx->fd.fd >= 0)
575 __udebug_buf_add(ctx, buf);
576
577 return 0;
578 }
579
580 void udebug_init(struct udebug *ctx)
581 {
582 INIT_LIST_HEAD(&ctx->local_rings);
583 avl_init(&ctx->remote_rings, udebug_id_cmp, true, NULL);
584 ctx->fd.fd = -1;
585 ctx->poll_handle = -1;
586 }
587
588 static void udebug_reconnect_cb(struct uloop_timeout *t)
589 {
590 struct udebug *ctx = container_of(t, struct udebug, reconnect);
591
592 if (udebug_connect(ctx, ctx->socket_path) < 0) {
593 uloop_timeout_set(&ctx->reconnect, 1000);
594 return;
595 }
596
597 udebug_add_uloop(ctx);
598 }
599
600 void udebug_auto_connect(struct udebug *ctx, const char *path)
601 {
602 free(ctx->socket_path);
603 ctx->reconnect.cb = udebug_reconnect_cb;
604 ctx->socket_path = path ? strdup(path) : NULL;
605 if (ctx->fd.fd >= 0)
606 return;
607
608 udebug_reconnect_cb(&ctx->reconnect);
609 }
610
611 int udebug_connect(struct udebug *ctx, const char *path)
612 {
613 struct udebug_remote_buf *rb;
614 struct udebug_buf *buf;
615
616 if (ctx->fd.fd >= 0)
617 close(ctx->fd.fd);
618 ctx->fd.fd = -1;
619
620 if (!path)
621 path = UDEBUG_SOCK_NAME;
622
623 ctx->fd.fd = usock(USOCK_UNIX, path, NULL);
624 if (ctx->fd.fd < 0)
625 return -1;
626
627 list_for_each_entry(buf, &ctx->local_rings, list)
628 __udebug_buf_add(ctx, buf);
629
630 avl_for_each_element(&ctx->remote_rings, rb, node) {
631 if (!rb->poll)
632 continue;
633
634 rb->poll = false;
635 udebug_remote_buf_set_poll(ctx, rb, true);
636 }
637
638 return 0;
639 }
640
641 static bool
642 udebug_recv_msg(struct udebug *ctx, struct udebug_client_msg *msg, int *fd,
643 bool wait)
644 {
645 struct iovec iov = {
646 .iov_base = msg,
647 .iov_len = sizeof(*msg)
648 };
649 int ret;
650
651 ret = recv_retry(ctx->fd.fd, &iov, wait, fd);
652 if (ret == -2)
653 uloop_fd_delete(&ctx->fd);
654
655 return ret == sizeof(*msg);
656 }
657
658 struct udebug_client_msg *__udebug_poll(struct udebug *ctx, int *fd, bool wait)
659 {
660 static struct udebug_client_msg msg = {};
661
662 while (udebug_recv_msg(ctx, &msg, fd, wait)) {
663 struct udebug_remote_buf *rb;
664 void *key;
665
666 if (msg.type != CL_MSG_RING_NOTIFY)
667 return &msg;
668
669 if (fd && *fd >= 0)
670 close(*fd);
671
672 if (!ctx->notify_cb)
673 continue;
674
675 key = (void *)(uintptr_t)msg.id;
676 rb = avl_find_element(&ctx->remote_rings, key, rb, node);
677 if (!rb || !rb->poll)
678 continue;
679
680 if (ctx->poll_handle >= 0)
681 __atomic_fetch_or(&rb->buf.hdr->notify,
682 1UL << ctx->poll_handle,
683 __ATOMIC_RELAXED);
684 ctx->notify_cb(ctx, rb);
685 }
686
687 return NULL;
688 }
689
690 void udebug_poll(struct udebug *ctx)
691 {
692 while (__udebug_poll(ctx, NULL, false));
693 }
694
695 static void udebug_fd_cb(struct uloop_fd *fd, unsigned int events)
696 {
697 struct udebug *ctx = container_of(fd, struct udebug, fd);
698
699 if (fd->eof)
700 uloop_fd_delete(fd);
701
702 udebug_poll(ctx);
703 }
704
705 void udebug_add_uloop(struct udebug *ctx)
706 {
707 if (ctx->fd.registered)
708 return;
709
710 ctx->fd.cb = udebug_fd_cb;
711 uloop_fd_add(&ctx->fd, ULOOP_READ);
712 }
713
714 void __udebug_disconnect(struct udebug *ctx, bool reconnect)
715 {
716 uloop_fd_delete(&ctx->fd);
717 close(ctx->fd.fd);
718 ctx->fd.fd = -1;
719 ctx->poll_handle = -1;
720 if (ctx->reconnect.cb && reconnect)
721 uloop_timeout_set(&ctx->reconnect, 1);
722 }
723
724 void udebug_free(struct udebug *ctx)
725 {
726 struct udebug_remote_buf *rb, *tmp;
727 struct udebug_buf *buf;
728
729 free(ctx->socket_path);
730 ctx->socket_path = NULL;
731
732 __udebug_disconnect(ctx, false);
733 uloop_timeout_cancel(&ctx->reconnect);
734
735 while (!list_empty(&ctx->local_rings)) {
736 buf = list_first_entry(&ctx->local_rings, struct udebug_buf, list);
737 udebug_buf_free(buf);
738 }
739
740 avl_for_each_element_safe(&ctx->remote_rings, rb, node, tmp)
741 udebug_remote_buf_unmap(ctx, rb);
742 }