ustream: prevent recursive calls to the read callback
[project/libubox.git] / udebug.c
1 /*
2 * udebug - debug ring buffer library
3 *
4 * Copyright (C) 2023 Felix Fietkau <nbd@nbd.name>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 #define _GNU_SOURCE
19 #include <sys/types.h>
20 #include <sys/mman.h>
21 #include <sys/socket.h>
22 #include <unistd.h>
23 #include <string.h>
24 #include <stdio.h>
25 #include <fcntl.h>
26 #include <errno.h>
27 #include <poll.h>
28 #include <time.h>
29 #include "udebug-priv.h"
30 #include "usock.h"
31
32 #define ALIGN(i, sz) (((i) + (sz) - 1) & ~((sz) - 1))
33
34 #ifndef MAP_ANONYMOUS
35 #define MAP_ANONYMOUS MAP_ANON
36 #endif
37
38 #define UDEBUG_MIN_ALLOC_LEN 128
39 static struct blob_buf b;
40 static unsigned int page_size;
41
42 static void __randname(char *template)
43 {
44 int i;
45 struct timespec ts;
46 unsigned long r;
47
48 clock_gettime(CLOCK_REALTIME, &ts);
49 r = ts.tv_sec + ts.tv_nsec;
50 for (i=0; i<6; i++, r>>=5)
51 template[i] = 'A'+(r&15)+(r&16)*2;
52 }
53
54 int udebug_id_cmp(const void *k1, const void *k2, void *ptr)
55 {
56 uint32_t id1 = (uint32_t)(uintptr_t)k1, id2 = (uint32_t)(uintptr_t)k2;
57 if (id1 < id2)
58 return -1;
59 else if (id1 > id2)
60 return 1;
61 return 0;
62 }
63
64 static inline int
65 shm_open_anon(char *name)
66 {
67 char *template = name + strlen(name) - 6;
68 int fd;
69
70 if (template < name || memcmp(template, "XXXXXX", 6) != 0)
71 return -1;
72
73 for (int i = 0; i < 100; i++) {
74 __randname(template);
75 fd = shm_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
76 if (fd >= 0) {
77 if (shm_unlink(name) < 0) {
78 close(fd);
79 continue;
80 }
81 return fd;
82 }
83
84 if (fd < 0 && errno != EEXIST)
85 return -1;
86 }
87
88 return -1;
89 }
90
91 static void __udebug_disconnect(struct udebug *ctx, bool reconnect)
92 {
93 uloop_fd_delete(&ctx->fd);
94 close(ctx->fd.fd);
95 ctx->fd.fd = -1;
96 ctx->poll_handle = -1;
97 if (ctx->reconnect.cb && reconnect)
98 uloop_timeout_set(&ctx->reconnect, 1);
99 }
100
101 uint64_t udebug_timestamp(void)
102 {
103 struct timespec ts;
104 uint64_t val;
105
106 clock_gettime(CLOCK_REALTIME, &ts);
107
108 val = ts.tv_sec;
109 val *= UDEBUG_TS_SEC;
110 val += ts.tv_nsec / 1000;
111
112 return val;
113 }
114
115 static int
116 __udebug_buf_map(struct udebug_buf *buf, int fd)
117 {
118 unsigned int pad = 0;
119 void *ptr, *ptr2;
120
121 #ifdef mips
122 pad = page_size;
123 #endif
124 ptr = mmap(NULL, buf->head_size + 2 * buf->data_size + pad, PROT_NONE,
125 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
126 if (ptr == MAP_FAILED)
127 return -1;
128
129 #ifdef mips
130 ptr = (void *)ALIGN((unsigned long)ptr, page_size);
131 #endif
132
133 ptr2 = mmap(ptr, buf->head_size + buf->data_size,
134 PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED, fd, 0);
135 if (ptr2 != ptr)
136 goto err_unmap;
137
138 ptr2 = mmap(ptr + buf->head_size + buf->data_size, buf->data_size,
139 PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
140 buf->head_size);
141 if (ptr2 != ptr + buf->head_size + buf->data_size)
142 goto err_unmap;
143
144 buf->hdr = ptr;
145 buf->data = ptr + buf->head_size;
146 return 0;
147
148 err_unmap:
149 munmap(ptr, buf->head_size + 2 * buf->data_size);
150 return -1;
151 }
152
153 static int
154 writev_retry(int fd, struct iovec *iov, int iov_len, int sock_fd)
155 {
156 uint8_t fd_buf[CMSG_SPACE(sizeof(int))] = { 0 };
157 struct msghdr msghdr = { 0 };
158 struct cmsghdr *cmsg;
159 int len = 0;
160 int *pfd;
161
162 msghdr.msg_iov = iov,
163 msghdr.msg_iovlen = iov_len,
164 msghdr.msg_control = fd_buf;
165 msghdr.msg_controllen = sizeof(fd_buf);
166
167 cmsg = CMSG_FIRSTHDR(&msghdr);
168 cmsg->cmsg_type = SCM_RIGHTS;
169 cmsg->cmsg_level = SOL_SOCKET;
170 cmsg->cmsg_len = CMSG_LEN(sizeof(int));
171
172 pfd = (int *) CMSG_DATA(cmsg);
173 msghdr.msg_controllen = cmsg->cmsg_len;
174
175 do {
176 ssize_t cur_len;
177
178 if (sock_fd < 0) {
179 msghdr.msg_control = NULL;
180 msghdr.msg_controllen = 0;
181 } else {
182 *pfd = sock_fd;
183 }
184
185 cur_len = sendmsg(fd, &msghdr, 0);
186 if (cur_len < 0) {
187 struct pollfd pfd = {
188 .fd = fd,
189 .events = POLLOUT
190 };
191
192 switch(errno) {
193 case EAGAIN:
194 poll(&pfd, 1, -1);
195 break;
196 case EINTR:
197 break;
198 default:
199 return -1;
200 }
201 continue;
202 }
203
204 if (len > 0)
205 sock_fd = -1;
206
207 len += cur_len;
208 while (cur_len >= (ssize_t) iov->iov_len) {
209 cur_len -= iov->iov_len;
210 iov_len--;
211 iov++;
212 if (!iov_len)
213 return len;
214 }
215 iov->iov_base += cur_len;
216 iov->iov_len -= cur_len;
217 msghdr.msg_iov = iov;
218 msghdr.msg_iovlen = iov_len;
219 } while (1);
220
221 /* Should never reach here */
222 return -1;
223 }
224
225 static int
226 recv_retry(int fd, struct iovec *iov, bool wait, int *recv_fd)
227 {
228 uint8_t fd_buf[CMSG_SPACE(sizeof(int))] = { 0 };
229 struct msghdr msghdr = { 0 };
230 struct cmsghdr *cmsg;
231 int total = 0;
232 int bytes;
233 int *pfd;
234
235 msghdr.msg_iov = iov,
236 msghdr.msg_iovlen = 1,
237 msghdr.msg_control = fd_buf;
238 msghdr.msg_controllen = sizeof(fd_buf);
239
240 cmsg = CMSG_FIRSTHDR(&msghdr);
241 cmsg->cmsg_type = SCM_RIGHTS;
242 cmsg->cmsg_level = SOL_SOCKET;
243 cmsg->cmsg_len = CMSG_LEN(sizeof(int));
244
245 pfd = (int *) CMSG_DATA(cmsg);
246
247 while (iov->iov_len > 0) {
248 if (recv_fd) {
249 msghdr.msg_control = fd_buf;
250 msghdr.msg_controllen = cmsg->cmsg_len;
251 } else {
252 msghdr.msg_control = NULL;
253 msghdr.msg_controllen = 0;
254 }
255
256 *pfd = -1;
257 bytes = recvmsg(fd, &msghdr, 0);
258 if (!bytes)
259 return -2;
260 if (bytes < 0) {
261 bytes = 0;
262 if (errno == EINTR)
263 continue;
264
265 if (errno != EAGAIN)
266 return -2;
267 }
268 if (!wait && !bytes)
269 return 0;
270
271 if (recv_fd)
272 *recv_fd = *pfd;
273 else if (*pfd >= 0)
274 close(*pfd);
275
276 if (bytes > 0)
277 recv_fd = NULL;
278
279 wait = true;
280 iov->iov_len -= bytes;
281 iov->iov_base += bytes;
282 total += bytes;
283
284 if (iov->iov_len > 0) {
285 struct pollfd pfd = {
286 .fd = fd,
287 .events = POLLIN
288 };
289 int ret;
290 do {
291 ret = poll(&pfd, 1, UDEBUG_TIMEOUT);
292 } while (ret < 0 && errno == EINTR);
293
294 if (!(pfd.revents & POLLIN))
295 return -1;
296 }
297 }
298
299 return total;
300 }
301
302 static void
303 udebug_send_msg(struct udebug *ctx, struct udebug_client_msg *msg,
304 struct blob_attr *meta, int fd)
305 {
306 struct iovec iov[2] = {
307 { .iov_base = msg, .iov_len = sizeof(*msg) },
308 {}
309 };
310
311 if (!meta) {
312 blob_buf_init(&b, 0);
313 meta = b.head;
314 }
315
316 iov[1].iov_base = meta;
317 iov[1].iov_len = blob_pad_len(meta);
318 writev_retry(ctx->fd.fd, iov, ARRAY_SIZE(iov), fd);
319 }
320
321 static bool
322 udebug_recv_msg(struct udebug *ctx, struct udebug_client_msg *msg, int *fd,
323 bool wait)
324 {
325 struct iovec iov = {
326 .iov_base = msg,
327 .iov_len = sizeof(*msg)
328 };
329 int ret;
330
331 ret = recv_retry(ctx->fd.fd, &iov, wait, fd);
332 if (ret == -2)
333 __udebug_disconnect(ctx, true);
334
335 return ret == sizeof(*msg);
336 }
337
338 static struct udebug_client_msg *
339 __udebug_poll(struct udebug *ctx, int *fd, bool wait)
340 {
341 static struct udebug_client_msg msg = {};
342
343 while (udebug_recv_msg(ctx, &msg, fd, wait)) {
344 struct udebug_remote_buf *rb;
345 void *key;
346
347 if (msg.type != CL_MSG_RING_NOTIFY)
348 return &msg;
349
350 if (fd && *fd >= 0)
351 close(*fd);
352
353 if (!ctx->notify_cb)
354 continue;
355
356 key = (void *)(uintptr_t)msg.id;
357 rb = avl_find_element(&ctx->remote_rings, key, rb, node);
358 if (!rb || !rb->poll)
359 continue;
360
361 if (ctx->poll_handle >= 0)
362 __atomic_fetch_or(&rb->buf.hdr->notify,
363 1UL << ctx->poll_handle,
364 __ATOMIC_RELAXED);
365 ctx->notify_cb(ctx, rb);
366 }
367
368 return NULL;
369 }
370
371 static struct udebug_client_msg *
372 udebug_wait_for_response(struct udebug *ctx, struct udebug_client_msg *msg, int *rfd)
373 {
374 int type = msg->type;
375 int fd = -1;
376
377 do {
378 if (fd >= 0)
379 close(fd);
380 fd = -1;
381 msg = __udebug_poll(ctx, &fd, true);
382 } while (msg && msg->type != type);
383 if (!msg)
384 return NULL;
385
386 if (rfd)
387 *rfd = fd;
388 else if (fd >= 0)
389 close(fd);
390
391 return msg;
392 }
393
394 static void
395 udebug_buf_msg(struct udebug_buf *buf, enum udebug_client_msg_type type)
396 {
397 struct udebug_client_msg msg = {
398 .type = type,
399 .id = buf->id,
400 };
401
402 udebug_send_msg(buf->ctx, &msg, NULL, -1);
403 udebug_wait_for_response(buf->ctx, &msg, NULL);
404 }
405
406 static size_t __udebug_headsize(unsigned int ring_size)
407 {
408 ring_size *= sizeof(struct udebug_ptr);
409 return ALIGN(sizeof(struct udebug_hdr) + ring_size, page_size);
410 }
411
412 static void udebug_init_page_size(void)
413 {
414 if (page_size)
415 return;
416 page_size = sysconf(_SC_PAGESIZE);
417 #ifdef mips
418 /* leave extra alignment room to account for data cache aliases */
419 if (page_size < 32 * 1024)
420 page_size = 32 * 1024;
421 #endif
422 }
423
424 int udebug_buf_open(struct udebug_buf *buf, int fd, uint32_t ring_size, uint32_t data_size)
425 {
426 udebug_init_page_size();
427 INIT_LIST_HEAD(&buf->list);
428 buf->ring_size = ring_size;
429 buf->head_size = __udebug_headsize(ring_size);
430 buf->data_size = data_size;
431
432 if (buf->ring_size > (1U << 24) || buf->data_size > (1U << 29))
433 return -1;
434
435 if (__udebug_buf_map(buf, fd))
436 return -1;
437
438 if (buf->ring_size != buf->hdr->ring_size ||
439 buf->data_size != buf->hdr->data_size) {
440 munmap(buf->hdr, buf->head_size + 2 * buf->data_size);
441 buf->hdr = NULL;
442 return -1;
443 }
444
445 buf->fd = fd;
446
447 return 0;
448 }
449
450 int udebug_buf_init(struct udebug_buf *buf, size_t entries, size_t size)
451 {
452 char filename[] = "/udebug.XXXXXX";
453 unsigned int order = 12;
454 uint8_t ring_order = 5;
455 size_t head_size;
456 int fd;
457
458 udebug_init_page_size();
459 INIT_LIST_HEAD(&buf->list);
460 if (size < page_size)
461 size = page_size;
462 while(size > 1U << order)
463 order++;
464 size = 1 << order;
465 while (entries > 1U << ring_order)
466 ring_order++;
467 entries = 1 << ring_order;
468
469 if (size > (1U << 29) || entries > (1U << 24))
470 return -1;
471
472 head_size = __udebug_headsize(entries);
473 while (ALIGN(sizeof(*buf->hdr) + (entries * 2) * sizeof(struct udebug_ptr), page_size) == head_size)
474 entries *= 2;
475
476 fd = shm_open_anon(filename);
477 if (fd < 0)
478 return -1;
479
480 if (ftruncate(fd, head_size + size) < 0)
481 goto err_close;
482
483 buf->head_size = head_size;
484 buf->data_size = size;
485 buf->ring_size = entries;
486
487 if (__udebug_buf_map(buf, fd))
488 goto err_close;
489
490 buf->fd = fd;
491 buf->hdr->ring_size = entries;
492 buf->hdr->data_size = size;
493
494 /* ensure hdr changes are visible */
495 __sync_synchronize();
496
497 return 0;
498
499 err_close:
500 close(fd);
501 return -1;
502 }
503
504 static void *udebug_buf_alloc(struct udebug_buf *buf, uint32_t ofs, uint32_t len)
505 {
506 struct udebug_hdr *hdr = buf->hdr;
507
508 hdr->data_used = u32_max(hdr->data_used, ofs + len + 1);
509
510 /* ensure that data_used update is visible before clobbering data */
511 __sync_synchronize();
512
513 return udebug_buf_ptr(buf, ofs);
514 }
515
516 uint64_t udebug_buf_flags(struct udebug_buf *buf)
517 {
518 struct udebug_hdr *hdr = buf->hdr;
519 uint64_t flags;
520
521 if (!hdr)
522 return 0;
523
524 flags = hdr->flags[0];
525 if (sizeof(flags) != sizeof(uintptr_t))
526 flags |= ((uint64_t)hdr->flags[1]) << 32;
527
528 return flags;
529 }
530
531 void udebug_entry_init_ts(struct udebug_buf *buf, uint64_t timestamp)
532 {
533 struct udebug_hdr *hdr = buf->hdr;
534 struct udebug_ptr *ptr;
535
536 if (!hdr)
537 return;
538
539 ptr = udebug_ring_ptr(hdr, hdr->head);
540 ptr->start = hdr->data_head;
541 ptr->len = 0;
542 ptr->timestamp = timestamp;
543 }
544
545 void *udebug_entry_append(struct udebug_buf *buf, const void *data, uint32_t len)
546 {
547 struct udebug_hdr *hdr = buf->hdr;
548 struct udebug_ptr *ptr;
549 uint32_t ofs;
550 void *ret;
551
552 if (!hdr)
553 return NULL;
554
555 ptr = udebug_ring_ptr(hdr, hdr->head);
556 ofs = ptr->start + ptr->len;
557 if (ptr->len + len > buf->data_size / 2)
558 return NULL;
559
560 ret = udebug_buf_alloc(buf, ofs, len);
561 if (data)
562 memcpy(ret, data, len);
563 ptr->len += len;
564
565 return ret;
566 }
567
568 uint16_t udebug_entry_trim(struct udebug_buf *buf, uint16_t len)
569 {
570 struct udebug_hdr *hdr = buf->hdr;
571 struct udebug_ptr *ptr;
572
573 if (!hdr)
574 return 0;
575
576 ptr = udebug_ring_ptr(hdr, hdr->head);
577 if (len)
578 ptr->len -= len;
579
580 return ptr->len;
581 }
582
583 void udebug_entry_set_length(struct udebug_buf *buf, uint16_t len)
584 {
585 struct udebug_hdr *hdr = buf->hdr;
586 struct udebug_ptr *ptr;
587
588 if (!hdr)
589 return;
590
591 ptr = udebug_ring_ptr(hdr, hdr->head);
592 ptr->len = len;
593 }
594
595 int udebug_entry_printf(struct udebug_buf *buf, const char *fmt, ...)
596 {
597 va_list ap;
598 size_t ret;
599
600 va_start(ap, fmt);
601 ret = udebug_entry_vprintf(buf, fmt, ap);
602 va_end(ap);
603
604 return ret;
605 }
606
607 int udebug_entry_vprintf(struct udebug_buf *buf, const char *fmt, va_list ap)
608 {
609 struct udebug_hdr *hdr = buf->hdr;
610 struct udebug_ptr *ptr;
611 uint32_t ofs;
612 uint32_t len;
613 va_list ap2;
614 char *str;
615
616 if (!hdr)
617 return -1;
618
619 ptr = udebug_ring_ptr(hdr, hdr->head);
620 ofs = ptr->start + ptr->len;
621 if (ptr->len > buf->data_size / 2)
622 return -1;
623
624 str = udebug_buf_alloc(buf, ofs, UDEBUG_MIN_ALLOC_LEN);
625 va_copy(ap2, ap);
626 len = vsnprintf(str, UDEBUG_MIN_ALLOC_LEN, fmt, ap2);
627 va_end(ap2);
628 if (len <= UDEBUG_MIN_ALLOC_LEN)
629 goto out;
630
631 if (ptr->len + len > buf->data_size / 2)
632 return -1;
633
634 udebug_buf_alloc(buf, ofs, len + 1);
635 len = vsnprintf(str, len, fmt, ap);
636
637 out:
638 ptr->len += len;
639 return 0;
640 }
641
642 void udebug_entry_add(struct udebug_buf *buf)
643 {
644 struct udebug_hdr *hdr = buf->hdr;
645 struct udebug_ptr *ptr;
646 uint32_t notify;
647 uint8_t *data;
648
649 if (!hdr)
650 return;
651
652 ptr = udebug_ring_ptr(hdr, hdr->head);
653
654 /* ensure strings are always 0-terminated */
655 data = udebug_buf_ptr(buf, ptr->start + ptr->len);
656 *data = 0;
657 hdr->data_head = ptr->start + ptr->len + 1;
658
659 /* ensure that all data changes are visible before advancing head */
660 __sync_synchronize();
661
662 u32_set(&hdr->head, u32_get(&hdr->head) + 1);
663 if (!u32_get(&hdr->head))
664 u32_set(&hdr->head_hi, u32_get(&hdr->head_hi) + 1);
665
666 /* ensure that head change is visible */
667 __sync_synchronize();
668
669 notify = __atomic_exchange_n(&hdr->notify, 0, __ATOMIC_RELAXED);
670 if (notify) {
671 struct udebug_client_msg msg = {
672 .type = CL_MSG_RING_NOTIFY,
673 .id = buf->id,
674 .notify_mask = notify,
675 };
676 blob_buf_init(&b, 0);
677
678 udebug_send_msg(buf->ctx, &msg, b.head, -1);
679 }
680 }
681 void udebug_buf_free(struct udebug_buf *buf)
682 {
683 struct udebug *ctx = buf->ctx;
684
685 if (!list_empty(&buf->list) && buf->list.prev)
686 list_del(&buf->list);
687
688 if (ctx && ctx->fd.fd >= 0)
689 udebug_buf_msg(buf, CL_MSG_RING_REMOVE);
690
691 munmap(buf->hdr, buf->head_size + 2 * buf->data_size);
692 close(buf->fd);
693 memset(buf, 0, sizeof(*buf));
694 }
695
696 static void
697 __udebug_buf_add(struct udebug *ctx, struct udebug_buf *buf)
698 {
699 struct udebug_client_msg msg = {
700 .type = CL_MSG_RING_ADD,
701 .id = buf->id,
702 .ring_size = buf->hdr->ring_size,
703 .data_size = buf->hdr->data_size,
704 };
705 const struct udebug_buf_meta *meta = buf->meta;
706 void *c;
707
708 blob_buf_init(&b, 0);
709 blobmsg_add_string(&b, "name", meta->name);
710 c = blobmsg_open_array(&b, "flags");
711 for (size_t i = 0; i < meta->n_flags; i++) {
712 const struct udebug_buf_flag *flag = &meta->flags[i];
713 void *e = blobmsg_open_array(&b, NULL);
714 blobmsg_add_string(&b, NULL, flag->name);
715 blobmsg_add_u64(&b, NULL, flag->mask);
716 blobmsg_close_array(&b, e);
717 }
718 blobmsg_close_array(&b, c);
719
720 udebug_send_msg(ctx, &msg, b.head, buf->fd);
721 udebug_wait_for_response(ctx, &msg, NULL);
722 }
723
724 int udebug_buf_add(struct udebug *ctx, struct udebug_buf *buf,
725 const struct udebug_buf_meta *meta)
726 {
727 if (!buf->hdr)
728 return -1;
729
730 list_add_tail(&buf->list, &ctx->local_rings);
731 buf->ctx = ctx;
732 buf->meta = meta;
733 buf->id = ctx->next_id++;
734 buf->hdr->format = meta->format;
735 buf->hdr->sub_format = meta->sub_format;
736
737 if (ctx->fd.fd >= 0)
738 __udebug_buf_add(ctx, buf);
739
740 return 0;
741 }
742
743 void udebug_init(struct udebug *ctx)
744 {
745 INIT_LIST_HEAD(&ctx->local_rings);
746 avl_init(&ctx->remote_rings, udebug_id_cmp, true, NULL);
747 ctx->fd.fd = -1;
748 ctx->poll_handle = -1;
749 }
750
751 static void udebug_reconnect_cb(struct uloop_timeout *t)
752 {
753 struct udebug *ctx = container_of(t, struct udebug, reconnect);
754
755 if (udebug_connect(ctx, ctx->socket_path) < 0) {
756 uloop_timeout_set(&ctx->reconnect, 1000);
757 return;
758 }
759
760 udebug_add_uloop(ctx);
761 }
762
763 void udebug_auto_connect(struct udebug *ctx, const char *path)
764 {
765 free(ctx->socket_path);
766 ctx->reconnect.cb = udebug_reconnect_cb;
767 ctx->socket_path = path ? strdup(path) : NULL;
768 if (ctx->fd.fd >= 0)
769 return;
770
771 udebug_reconnect_cb(&ctx->reconnect);
772 }
773
774 int udebug_connect(struct udebug *ctx, const char *path)
775 {
776 struct udebug_remote_buf *rb;
777 struct udebug_buf *buf;
778
779 if (ctx->fd.fd >= 0)
780 close(ctx->fd.fd);
781 ctx->fd.fd = -1;
782
783 if (!path)
784 path = UDEBUG_SOCK_NAME;
785
786 ctx->fd.fd = usock(USOCK_UNIX, path, NULL);
787 if (ctx->fd.fd < 0)
788 return -1;
789
790 list_for_each_entry(buf, &ctx->local_rings, list)
791 __udebug_buf_add(ctx, buf);
792
793 avl_for_each_element(&ctx->remote_rings, rb, node) {
794 if (!rb->poll)
795 continue;
796
797 rb->poll = false;
798 udebug_remote_buf_set_poll(ctx, rb, true);
799 }
800
801 return 0;
802 }
803
804 void udebug_poll(struct udebug *ctx)
805 {
806 while (__udebug_poll(ctx, NULL, false));
807 }
808
809 struct udebug_client_msg *
810 udebug_send_and_wait(struct udebug *ctx, struct udebug_client_msg *msg, int *rfd)
811 {
812 udebug_send_msg(ctx, msg, NULL, -1);
813
814 return udebug_wait_for_response(ctx, msg, rfd);
815 }
816
817 static void udebug_fd_cb(struct uloop_fd *fd, unsigned int events)
818 {
819 struct udebug *ctx = container_of(fd, struct udebug, fd);
820
821 if (fd->eof)
822 __udebug_disconnect(ctx, true);
823
824 udebug_poll(ctx);
825 }
826
827 void udebug_add_uloop(struct udebug *ctx)
828 {
829 if (ctx->fd.registered)
830 return;
831
832 ctx->fd.cb = udebug_fd_cb;
833 uloop_fd_add(&ctx->fd, ULOOP_READ);
834 }
835
836 void udebug_free(struct udebug *ctx)
837 {
838 struct udebug_remote_buf *rb, *tmp;
839 struct udebug_buf *buf;
840
841 free(ctx->socket_path);
842 ctx->socket_path = NULL;
843
844 __udebug_disconnect(ctx, false);
845 uloop_timeout_cancel(&ctx->reconnect);
846
847 while (!list_empty(&ctx->local_rings)) {
848 buf = list_first_entry(&ctx->local_rings, struct udebug_buf, list);
849 udebug_buf_free(buf);
850 }
851
852 avl_for_each_element_safe(&ctx->remote_rings, rb, node, tmp)
853 udebug_remote_buf_unmap(ctx, rb);
854 }