16beefda3d2942ee5481331f26715148ec4f82e7
[project/libubox.git] / udebug.c
1 /*
2 * udebug - debug ring buffer library
3 *
4 * Copyright (C) 2023 Felix Fietkau <nbd@nbd.name>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 #define _GNU_SOURCE
19 #include <sys/types.h>
20 #include <sys/mman.h>
21 #include <sys/socket.h>
22 #include <unistd.h>
23 #include <string.h>
24 #include <stdio.h>
25 #include <fcntl.h>
26 #include <errno.h>
27 #include <poll.h>
28 #include <time.h>
29 #include "udebug-priv.h"
30 #include "usock.h"
31
32 #define ALIGN(i, sz) (((i) + (sz) - 1) & ~((sz) - 1))
33
34 #ifndef MAP_ANONYMOUS
35 #define MAP_ANONYMOUS MAP_ANON
36 #endif
37
38 #define UDEBUG_MIN_ALLOC_LEN 128
39 static struct blob_buf b;
40
41 static void __randname(char *template)
42 {
43 int i;
44 struct timespec ts;
45 unsigned long r;
46
47 clock_gettime(CLOCK_REALTIME, &ts);
48 r = ts.tv_sec + ts.tv_nsec;
49 for (i=0; i<6; i++, r>>=5)
50 template[i] = 'A'+(r&15)+(r&16)*2;
51 }
52
53 int udebug_id_cmp(const void *k1, const void *k2, void *ptr)
54 {
55 uint32_t id1 = (uint32_t)(uintptr_t)k1, id2 = (uint32_t)(uintptr_t)k2;
56 if (id1 < id2)
57 return -1;
58 else if (id1 > id2)
59 return 1;
60 return 0;
61 }
62
63 static inline int
64 shm_open_anon(char *name)
65 {
66 char *template = name + strlen(name) - 6;
67 int fd;
68
69 if (template < name || memcmp(template, "XXXXXX", 6) != 0)
70 return -1;
71
72 for (int i = 0; i < 100; i++) {
73 __randname(template);
74 fd = shm_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
75 if (fd >= 0) {
76 if (shm_unlink(name) < 0) {
77 close(fd);
78 continue;
79 }
80 return fd;
81 }
82
83 if (fd < 0 && errno != EEXIST)
84 return -1;
85 }
86
87 return -1;
88 }
89
90 static void __udebug_disconnect(struct udebug *ctx, bool reconnect)
91 {
92 uloop_fd_delete(&ctx->fd);
93 close(ctx->fd.fd);
94 ctx->fd.fd = -1;
95 ctx->poll_handle = -1;
96 if (ctx->reconnect.cb && reconnect)
97 uloop_timeout_set(&ctx->reconnect, 1);
98 }
99
100 uint64_t udebug_timestamp(void)
101 {
102 struct timespec ts;
103 uint64_t val;
104
105 clock_gettime(CLOCK_REALTIME, &ts);
106
107 val = ts.tv_sec;
108 val *= UDEBUG_TS_SEC;
109 val += ts.tv_nsec / 1000;
110
111 return val;
112 }
113
114 static int
115 __udebug_buf_map(struct udebug_buf *buf)
116 {
117 void *ptr, *ptr2;
118
119 ptr = mmap(NULL, buf->head_size + 2 * buf->data_size, PROT_NONE,
120 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
121 if (ptr == MAP_FAILED)
122 return -1;
123
124 ptr2 = mmap(ptr, buf->head_size + buf->data_size,
125 PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED, buf->fd, 0);
126 if (ptr2 != ptr)
127 goto err_unmap;
128
129 ptr2 = mmap(ptr + buf->head_size + buf->data_size, buf->data_size,
130 PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED, buf->fd,
131 buf->head_size);
132 if (ptr2 != ptr + buf->head_size + buf->data_size)
133 goto err_unmap;
134
135 buf->hdr = ptr;
136 buf->data = ptr + buf->head_size;
137 return 0;
138
139 err_unmap:
140 munmap(ptr, buf->head_size + 2 * buf->data_size);
141 return -1;
142 }
143
144 static int
145 writev_retry(int fd, struct iovec *iov, int iov_len, int sock_fd)
146 {
147 uint8_t fd_buf[CMSG_SPACE(sizeof(int))] = { 0 };
148 struct msghdr msghdr = { 0 };
149 struct cmsghdr *cmsg;
150 int len = 0;
151 int *pfd;
152
153 msghdr.msg_iov = iov,
154 msghdr.msg_iovlen = iov_len,
155 msghdr.msg_control = fd_buf;
156 msghdr.msg_controllen = sizeof(fd_buf);
157
158 cmsg = CMSG_FIRSTHDR(&msghdr);
159 cmsg->cmsg_type = SCM_RIGHTS;
160 cmsg->cmsg_level = SOL_SOCKET;
161 cmsg->cmsg_len = CMSG_LEN(sizeof(int));
162
163 pfd = (int *) CMSG_DATA(cmsg);
164 msghdr.msg_controllen = cmsg->cmsg_len;
165
166 do {
167 ssize_t cur_len;
168
169 if (sock_fd < 0) {
170 msghdr.msg_control = NULL;
171 msghdr.msg_controllen = 0;
172 } else {
173 *pfd = sock_fd;
174 }
175
176 cur_len = sendmsg(fd, &msghdr, 0);
177 if (cur_len < 0) {
178 struct pollfd pfd = {
179 .fd = fd,
180 .events = POLLOUT
181 };
182
183 switch(errno) {
184 case EAGAIN:
185 poll(&pfd, 1, -1);
186 break;
187 case EINTR:
188 break;
189 default:
190 return -1;
191 }
192 continue;
193 }
194
195 if (len > 0)
196 sock_fd = -1;
197
198 len += cur_len;
199 while (cur_len >= (ssize_t) iov->iov_len) {
200 cur_len -= iov->iov_len;
201 iov_len--;
202 iov++;
203 if (!iov_len)
204 return len;
205 }
206 iov->iov_base += cur_len;
207 iov->iov_len -= cur_len;
208 msghdr.msg_iov = iov;
209 msghdr.msg_iovlen = iov_len;
210 } while (1);
211
212 /* Should never reach here */
213 return -1;
214 }
215
216 static int
217 recv_retry(int fd, struct iovec *iov, bool wait, int *recv_fd)
218 {
219 uint8_t fd_buf[CMSG_SPACE(sizeof(int))] = { 0 };
220 struct msghdr msghdr = { 0 };
221 struct cmsghdr *cmsg;
222 int total = 0;
223 int bytes;
224 int *pfd;
225
226 msghdr.msg_iov = iov,
227 msghdr.msg_iovlen = 1,
228 msghdr.msg_control = fd_buf;
229 msghdr.msg_controllen = sizeof(fd_buf);
230
231 cmsg = CMSG_FIRSTHDR(&msghdr);
232 cmsg->cmsg_type = SCM_RIGHTS;
233 cmsg->cmsg_level = SOL_SOCKET;
234 cmsg->cmsg_len = CMSG_LEN(sizeof(int));
235
236 pfd = (int *) CMSG_DATA(cmsg);
237
238 while (iov->iov_len > 0) {
239 if (recv_fd) {
240 msghdr.msg_control = fd_buf;
241 msghdr.msg_controllen = cmsg->cmsg_len;
242 } else {
243 msghdr.msg_control = NULL;
244 msghdr.msg_controllen = 0;
245 }
246
247 *pfd = -1;
248 bytes = recvmsg(fd, &msghdr, 0);
249 if (!bytes)
250 return -2;
251 if (bytes < 0) {
252 bytes = 0;
253 if (errno == EINTR)
254 continue;
255
256 if (errno != EAGAIN)
257 return -2;
258 }
259 if (!wait && !bytes)
260 return 0;
261
262 if (recv_fd)
263 *recv_fd = *pfd;
264 else if (*pfd >= 0)
265 close(*pfd);
266
267 if (bytes > 0)
268 recv_fd = NULL;
269
270 wait = true;
271 iov->iov_len -= bytes;
272 iov->iov_base += bytes;
273 total += bytes;
274
275 if (iov->iov_len > 0) {
276 struct pollfd pfd = {
277 .fd = fd,
278 .events = POLLIN
279 };
280 int ret;
281 do {
282 ret = poll(&pfd, 1, UDEBUG_TIMEOUT);
283 } while (ret < 0 && errno == EINTR);
284
285 if (!(pfd.revents & POLLIN))
286 return -1;
287 }
288 }
289
290 return total;
291 }
292
293 static void
294 udebug_send_msg(struct udebug *ctx, struct udebug_client_msg *msg,
295 struct blob_attr *meta, int fd)
296 {
297 struct iovec iov[2] = {
298 { .iov_base = msg, .iov_len = sizeof(*msg) },
299 {}
300 };
301
302 if (!meta) {
303 blob_buf_init(&b, 0);
304 meta = b.head;
305 }
306
307 iov[1].iov_base = meta;
308 iov[1].iov_len = blob_pad_len(meta);
309 writev_retry(ctx->fd.fd, iov, ARRAY_SIZE(iov), fd);
310 }
311
312 static bool
313 udebug_recv_msg(struct udebug *ctx, struct udebug_client_msg *msg, int *fd,
314 bool wait)
315 {
316 struct iovec iov = {
317 .iov_base = msg,
318 .iov_len = sizeof(*msg)
319 };
320 int ret;
321
322 ret = recv_retry(ctx->fd.fd, &iov, wait, fd);
323 if (ret == -2)
324 __udebug_disconnect(ctx, true);
325
326 return ret == sizeof(*msg);
327 }
328
329 static struct udebug_client_msg *
330 __udebug_poll(struct udebug *ctx, int *fd, bool wait)
331 {
332 static struct udebug_client_msg msg = {};
333
334 while (udebug_recv_msg(ctx, &msg, fd, wait)) {
335 struct udebug_remote_buf *rb;
336 void *key;
337
338 if (msg.type != CL_MSG_RING_NOTIFY)
339 return &msg;
340
341 if (fd && *fd >= 0)
342 close(*fd);
343
344 if (!ctx->notify_cb)
345 continue;
346
347 key = (void *)(uintptr_t)msg.id;
348 rb = avl_find_element(&ctx->remote_rings, key, rb, node);
349 if (!rb || !rb->poll)
350 continue;
351
352 if (ctx->poll_handle >= 0)
353 __atomic_fetch_or(&rb->buf.hdr->notify,
354 1UL << ctx->poll_handle,
355 __ATOMIC_RELAXED);
356 ctx->notify_cb(ctx, rb);
357 }
358
359 return NULL;
360 }
361
362 static struct udebug_client_msg *
363 udebug_wait_for_response(struct udebug *ctx, struct udebug_client_msg *msg, int *rfd)
364 {
365 int type = msg->type;
366 int fd = -1;
367
368 do {
369 if (fd >= 0)
370 close(fd);
371 fd = -1;
372 msg = __udebug_poll(ctx, &fd, true);
373 } while (msg && msg->type != type);
374 if (!msg)
375 return NULL;
376
377 if (rfd)
378 *rfd = fd;
379 else if (fd >= 0)
380 close(fd);
381
382 return msg;
383 }
384
385 static void
386 udebug_buf_msg(struct udebug_buf *buf, enum udebug_client_msg_type type)
387 {
388 struct udebug_client_msg msg = {
389 .type = type,
390 .id = buf->id,
391 };
392
393 udebug_send_msg(buf->ctx, &msg, NULL, -1);
394 udebug_wait_for_response(buf->ctx, &msg, NULL);
395 }
396
397 static size_t __udebug_headsize(unsigned int ring_size, unsigned int page_size)
398 {
399 ring_size *= sizeof(struct udebug_ptr);
400 return ALIGN(sizeof(struct udebug_hdr) + ring_size, page_size);
401 }
402
403 int udebug_buf_open(struct udebug_buf *buf, int fd, uint32_t ring_size, uint32_t data_size)
404 {
405 INIT_LIST_HEAD(&buf->list);
406 buf->fd = fd;
407 buf->ring_size = ring_size;
408 buf->head_size = __udebug_headsize(ring_size, sysconf(_SC_PAGESIZE));
409 buf->data_size = data_size;
410
411 if (buf->ring_size > (1U << 24) || buf->data_size > (1U << 29))
412 return -1;
413
414 if (__udebug_buf_map(buf))
415 return -1;
416
417 if (buf->ring_size != buf->hdr->ring_size ||
418 buf->data_size != buf->hdr->data_size) {
419 munmap(buf->hdr, buf->head_size + 2 * buf->data_size);
420 buf->hdr = NULL;
421 return -1;
422 }
423
424 return 0;
425 }
426
427 int udebug_buf_init(struct udebug_buf *buf, size_t entries, size_t size)
428 {
429 uint32_t pagesz = sysconf(_SC_PAGESIZE);
430 char filename[] = "/udebug.XXXXXX";
431 unsigned int order = 12;
432 uint8_t ring_order = 5;
433 size_t head_size;
434 int fd;
435
436 INIT_LIST_HEAD(&buf->list);
437 if (size < pagesz)
438 size = pagesz;
439 while(size > 1U << order)
440 order++;
441 size = 1 << order;
442 while (entries > 1U << ring_order)
443 ring_order++;
444 entries = 1 << ring_order;
445
446 if (size > (1U << 29) || entries > (1U << 24))
447 return -1;
448
449 head_size = __udebug_headsize(entries, pagesz);
450 while (ALIGN(sizeof(*buf->hdr) + (entries * 2) * sizeof(struct udebug_ptr), pagesz) == head_size)
451 entries *= 2;
452
453 fd = shm_open_anon(filename);
454 if (fd < 0)
455 return -1;
456
457 if (ftruncate(fd, head_size + size) < 0)
458 goto err_close;
459
460 buf->head_size = head_size;
461 buf->data_size = size;
462 buf->ring_size = entries;
463 buf->fd = fd;
464
465 if (__udebug_buf_map(buf))
466 goto err_close;
467
468 buf->hdr->ring_size = entries;
469 buf->hdr->data_size = size;
470
471 /* ensure hdr changes are visible */
472 __sync_synchronize();
473
474 return 0;
475
476 err_close:
477 close(fd);
478 return -1;
479 }
480
481 static void *udebug_buf_alloc(struct udebug_buf *buf, uint32_t ofs, uint32_t len)
482 {
483 struct udebug_hdr *hdr = buf->hdr;
484
485 hdr->data_used = u32_max(hdr->data_used, ofs + len + 1);
486
487 /* ensure that data_used update is visible before clobbering data */
488 __sync_synchronize();
489
490 return udebug_buf_ptr(buf, ofs);
491 }
492
493 uint64_t udebug_buf_flags(struct udebug_buf *buf)
494 {
495 struct udebug_hdr *hdr = buf->hdr;
496 uint64_t flags;
497
498 if (!hdr)
499 return 0;
500
501 flags = hdr->flags[0];
502 if (sizeof(flags) != sizeof(uintptr_t))
503 flags |= ((uint64_t)hdr->flags[1]) << 32;
504
505 return flags;
506 }
507
508 void udebug_entry_init_ts(struct udebug_buf *buf, uint64_t timestamp)
509 {
510 struct udebug_hdr *hdr = buf->hdr;
511 struct udebug_ptr *ptr;
512
513 if (!hdr)
514 return;
515
516 ptr = udebug_ring_ptr(hdr, hdr->head);
517 ptr->start = hdr->data_head;
518 ptr->len = 0;
519 ptr->timestamp = timestamp;
520 }
521
522 void *udebug_entry_append(struct udebug_buf *buf, const void *data, uint32_t len)
523 {
524 struct udebug_hdr *hdr = buf->hdr;
525 struct udebug_ptr *ptr;
526 uint32_t ofs;
527 void *ret;
528
529 if (!hdr)
530 return NULL;
531
532 ptr = udebug_ring_ptr(hdr, hdr->head);
533 ofs = ptr->start + ptr->len;
534 if (ptr->len + len > buf->data_size / 2)
535 return NULL;
536
537 ret = udebug_buf_alloc(buf, ofs, len);
538 if (data)
539 memcpy(ret, data, len);
540 ptr->len += len;
541
542 return ret;
543 }
544
545 uint16_t udebug_entry_trim(struct udebug_buf *buf, uint16_t len)
546 {
547 struct udebug_hdr *hdr = buf->hdr;
548 struct udebug_ptr *ptr = udebug_ring_ptr(hdr, hdr->head);
549
550 if (len)
551 ptr->len -= len;
552
553 return ptr->len;
554 }
555
556 void udebug_entry_set_length(struct udebug_buf *buf, uint16_t len)
557 {
558 struct udebug_hdr *hdr = buf->hdr;
559 struct udebug_ptr *ptr = udebug_ring_ptr(hdr, hdr->head);
560
561 ptr->len = len;
562 }
563
564 int udebug_entry_printf(struct udebug_buf *buf, const char *fmt, ...)
565 {
566 va_list ap;
567 size_t ret;
568
569 va_start(ap, fmt);
570 ret = udebug_entry_vprintf(buf, fmt, ap);
571 va_end(ap);
572
573 return ret;
574 }
575
576 int udebug_entry_vprintf(struct udebug_buf *buf, const char *fmt, va_list ap)
577 {
578 struct udebug_hdr *hdr = buf->hdr;
579 struct udebug_ptr *ptr;
580 uint32_t ofs;
581 uint32_t len;
582 char *str;
583
584 if (!hdr)
585 return -1;
586
587 ptr = udebug_ring_ptr(hdr, hdr->head);
588 ofs = ptr->start + ptr->len;
589 if (ptr->len > buf->data_size / 2)
590 return -1;
591
592 str = udebug_buf_alloc(buf, ofs, UDEBUG_MIN_ALLOC_LEN);
593 len = vsnprintf(str, UDEBUG_MIN_ALLOC_LEN, fmt, ap);
594 if (len <= UDEBUG_MIN_ALLOC_LEN)
595 goto out;
596
597 if (ptr->len + len > buf->data_size / 2)
598 return -1;
599
600 udebug_buf_alloc(buf, ofs, len + 1);
601 len = vsnprintf(str, len, fmt, ap);
602
603 out:
604 ptr->len += len;
605 return 0;
606 }
607
608 void udebug_entry_add(struct udebug_buf *buf)
609 {
610 struct udebug_hdr *hdr = buf->hdr;
611 struct udebug_ptr *ptr = udebug_ring_ptr(hdr, hdr->head);
612 uint32_t notify;
613 uint8_t *data;
614
615 /* ensure strings are always 0-terminated */
616 data = udebug_buf_ptr(buf, ptr->start + ptr->len);
617 *data = 0;
618 hdr->data_head = ptr->start + ptr->len + 1;
619
620 /* ensure that all data changes are visible before advancing head */
621 __sync_synchronize();
622
623 u32_set(&hdr->head, u32_get(&hdr->head) + 1);
624 if (!u32_get(&hdr->head))
625 u32_set(&hdr->head_hi, u32_get(&hdr->head_hi) + 1);
626
627 /* ensure that head change is visible */
628 __sync_synchronize();
629
630 notify = __atomic_exchange_n(&hdr->notify, 0, __ATOMIC_RELAXED);
631 if (notify) {
632 struct udebug_client_msg msg = {
633 .type = CL_MSG_RING_NOTIFY,
634 .id = buf->id,
635 .notify_mask = notify,
636 };
637 blob_buf_init(&b, 0);
638
639 udebug_send_msg(buf->ctx, &msg, b.head, -1);
640 }
641 }
642 void udebug_buf_free(struct udebug_buf *buf)
643 {
644 struct udebug *ctx = buf->ctx;
645
646 if (!list_empty(&buf->list) && buf->list.prev)
647 list_del(&buf->list);
648
649 if (ctx && ctx->fd.fd >= 0)
650 udebug_buf_msg(buf, CL_MSG_RING_REMOVE);
651
652 munmap(buf->hdr, buf->head_size + 2 * buf->data_size);
653 close(buf->fd);
654 memset(buf, 0, sizeof(*buf));
655 }
656
657 static void
658 __udebug_buf_add(struct udebug *ctx, struct udebug_buf *buf)
659 {
660 struct udebug_client_msg msg = {
661 .type = CL_MSG_RING_ADD,
662 .id = buf->id,
663 .ring_size = buf->hdr->ring_size,
664 .data_size = buf->hdr->data_size,
665 };
666 const struct udebug_buf_meta *meta = buf->meta;
667 void *c;
668
669 blob_buf_init(&b, 0);
670 blobmsg_add_string(&b, "name", meta->name);
671 c = blobmsg_open_array(&b, "flags");
672 for (size_t i = 0; i < meta->n_flags; i++) {
673 const struct udebug_buf_flag *flag = &meta->flags[i];
674 void *e = blobmsg_open_array(&b, NULL);
675 blobmsg_add_string(&b, NULL, flag->name);
676 blobmsg_add_u64(&b, NULL, flag->mask);
677 blobmsg_close_array(&b, e);
678 }
679 blobmsg_close_array(&b, c);
680
681 udebug_send_msg(ctx, &msg, b.head, buf->fd);
682 udebug_wait_for_response(ctx, &msg, NULL);
683 }
684
685 int udebug_buf_add(struct udebug *ctx, struct udebug_buf *buf,
686 const struct udebug_buf_meta *meta)
687 {
688 list_add_tail(&buf->list, &ctx->local_rings);
689 buf->ctx = ctx;
690 buf->meta = meta;
691 buf->id = ctx->next_id++;
692 buf->hdr->format = meta->format;
693 buf->hdr->sub_format = meta->sub_format;
694
695 if (ctx->fd.fd >= 0)
696 __udebug_buf_add(ctx, buf);
697
698 return 0;
699 }
700
701 void udebug_init(struct udebug *ctx)
702 {
703 INIT_LIST_HEAD(&ctx->local_rings);
704 avl_init(&ctx->remote_rings, udebug_id_cmp, true, NULL);
705 ctx->fd.fd = -1;
706 ctx->poll_handle = -1;
707 }
708
709 static void udebug_reconnect_cb(struct uloop_timeout *t)
710 {
711 struct udebug *ctx = container_of(t, struct udebug, reconnect);
712
713 if (udebug_connect(ctx, ctx->socket_path) < 0) {
714 uloop_timeout_set(&ctx->reconnect, 1000);
715 return;
716 }
717
718 udebug_add_uloop(ctx);
719 }
720
721 void udebug_auto_connect(struct udebug *ctx, const char *path)
722 {
723 free(ctx->socket_path);
724 ctx->reconnect.cb = udebug_reconnect_cb;
725 ctx->socket_path = path ? strdup(path) : NULL;
726 if (ctx->fd.fd >= 0)
727 return;
728
729 udebug_reconnect_cb(&ctx->reconnect);
730 }
731
732 int udebug_connect(struct udebug *ctx, const char *path)
733 {
734 struct udebug_remote_buf *rb;
735 struct udebug_buf *buf;
736
737 if (ctx->fd.fd >= 0)
738 close(ctx->fd.fd);
739 ctx->fd.fd = -1;
740
741 if (!path)
742 path = UDEBUG_SOCK_NAME;
743
744 ctx->fd.fd = usock(USOCK_UNIX, path, NULL);
745 if (ctx->fd.fd < 0)
746 return -1;
747
748 list_for_each_entry(buf, &ctx->local_rings, list)
749 __udebug_buf_add(ctx, buf);
750
751 avl_for_each_element(&ctx->remote_rings, rb, node) {
752 if (!rb->poll)
753 continue;
754
755 rb->poll = false;
756 udebug_remote_buf_set_poll(ctx, rb, true);
757 }
758
759 return 0;
760 }
761
762 void udebug_poll(struct udebug *ctx)
763 {
764 while (__udebug_poll(ctx, NULL, false));
765 }
766
767 struct udebug_client_msg *
768 udebug_send_and_wait(struct udebug *ctx, struct udebug_client_msg *msg, int *rfd)
769 {
770 udebug_send_msg(ctx, msg, NULL, -1);
771
772 return udebug_wait_for_response(ctx, msg, rfd);
773 }
774
775 static void udebug_fd_cb(struct uloop_fd *fd, unsigned int events)
776 {
777 struct udebug *ctx = container_of(fd, struct udebug, fd);
778
779 if (fd->eof)
780 __udebug_disconnect(ctx, true);
781
782 udebug_poll(ctx);
783 }
784
785 void udebug_add_uloop(struct udebug *ctx)
786 {
787 if (ctx->fd.registered)
788 return;
789
790 ctx->fd.cb = udebug_fd_cb;
791 uloop_fd_add(&ctx->fd, ULOOP_READ);
792 }
793
794 void udebug_free(struct udebug *ctx)
795 {
796 struct udebug_remote_buf *rb, *tmp;
797 struct udebug_buf *buf;
798
799 free(ctx->socket_path);
800 ctx->socket_path = NULL;
801
802 __udebug_disconnect(ctx, false);
803 uloop_timeout_cancel(&ctx->reconnect);
804
805 while (!list_empty(&ctx->local_rings)) {
806 buf = list_first_entry(&ctx->local_rings, struct udebug_buf, list);
807 udebug_buf_free(buf);
808 }
809
810 avl_for_each_element_safe(&ctx->remote_rings, rb, node, tmp)
811 udebug_remote_buf_unmap(ctx, rb);
812 }