udebug: add mips specific quirk
[project/libubox.git] / udebug.c
1 /*
2 * udebug - debug ring buffer library
3 *
4 * Copyright (C) 2023 Felix Fietkau <nbd@nbd.name>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 #define _GNU_SOURCE
19 #include <sys/types.h>
20 #include <sys/mman.h>
21 #include <sys/socket.h>
22 #include <unistd.h>
23 #include <string.h>
24 #include <stdio.h>
25 #include <fcntl.h>
26 #include <errno.h>
27 #include <poll.h>
28 #include <time.h>
29 #include "udebug-priv.h"
30 #include "usock.h"
31
32 #define ALIGN(i, sz) (((i) + (sz) - 1) & ~((sz) - 1))
33
34 #ifndef MAP_ANONYMOUS
35 #define MAP_ANONYMOUS MAP_ANON
36 #endif
37
38 #define UDEBUG_MIN_ALLOC_LEN 128
39 static struct blob_buf b;
40 static unsigned int page_size;
41
42 static void __randname(char *template)
43 {
44 int i;
45 struct timespec ts;
46 unsigned long r;
47
48 clock_gettime(CLOCK_REALTIME, &ts);
49 r = ts.tv_sec + ts.tv_nsec;
50 for (i=0; i<6; i++, r>>=5)
51 template[i] = 'A'+(r&15)+(r&16)*2;
52 }
53
54 int udebug_id_cmp(const void *k1, const void *k2, void *ptr)
55 {
56 uint32_t id1 = (uint32_t)(uintptr_t)k1, id2 = (uint32_t)(uintptr_t)k2;
57 if (id1 < id2)
58 return -1;
59 else if (id1 > id2)
60 return 1;
61 return 0;
62 }
63
64 static inline int
65 shm_open_anon(char *name)
66 {
67 char *template = name + strlen(name) - 6;
68 int fd;
69
70 if (template < name || memcmp(template, "XXXXXX", 6) != 0)
71 return -1;
72
73 for (int i = 0; i < 100; i++) {
74 __randname(template);
75 fd = shm_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
76 if (fd >= 0) {
77 if (shm_unlink(name) < 0) {
78 close(fd);
79 continue;
80 }
81 return fd;
82 }
83
84 if (fd < 0 && errno != EEXIST)
85 return -1;
86 }
87
88 return -1;
89 }
90
91 static void __udebug_disconnect(struct udebug *ctx, bool reconnect)
92 {
93 uloop_fd_delete(&ctx->fd);
94 close(ctx->fd.fd);
95 ctx->fd.fd = -1;
96 ctx->poll_handle = -1;
97 if (ctx->reconnect.cb && reconnect)
98 uloop_timeout_set(&ctx->reconnect, 1);
99 }
100
101 uint64_t udebug_timestamp(void)
102 {
103 struct timespec ts;
104 uint64_t val;
105
106 clock_gettime(CLOCK_REALTIME, &ts);
107
108 val = ts.tv_sec;
109 val *= UDEBUG_TS_SEC;
110 val += ts.tv_nsec / 1000;
111
112 return val;
113 }
114
115 static int
116 __udebug_buf_map(struct udebug_buf *buf)
117 {
118 unsigned int pad = 0;
119 void *ptr, *ptr2;
120
121 #ifdef mips
122 pad = page_size;
123 #endif
124 ptr = mmap(NULL, buf->head_size + 2 * buf->data_size + pad, PROT_NONE,
125 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
126 if (ptr == MAP_FAILED)
127 return -1;
128
129 #ifdef mips
130 ptr = (void *)ALIGN((unsigned long)ptr, page_size);
131 #endif
132
133 ptr2 = mmap(ptr, buf->head_size + buf->data_size,
134 PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED, buf->fd, 0);
135 if (ptr2 != ptr)
136 goto err_unmap;
137
138 ptr2 = mmap(ptr + buf->head_size + buf->data_size, buf->data_size,
139 PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED, buf->fd,
140 buf->head_size);
141 if (ptr2 != ptr + buf->head_size + buf->data_size)
142 goto err_unmap;
143
144 buf->hdr = ptr;
145 buf->data = ptr + buf->head_size;
146 return 0;
147
148 err_unmap:
149 munmap(ptr, buf->head_size + 2 * buf->data_size);
150 return -1;
151 }
152
153 static int
154 writev_retry(int fd, struct iovec *iov, int iov_len, int sock_fd)
155 {
156 uint8_t fd_buf[CMSG_SPACE(sizeof(int))] = { 0 };
157 struct msghdr msghdr = { 0 };
158 struct cmsghdr *cmsg;
159 int len = 0;
160 int *pfd;
161
162 msghdr.msg_iov = iov,
163 msghdr.msg_iovlen = iov_len,
164 msghdr.msg_control = fd_buf;
165 msghdr.msg_controllen = sizeof(fd_buf);
166
167 cmsg = CMSG_FIRSTHDR(&msghdr);
168 cmsg->cmsg_type = SCM_RIGHTS;
169 cmsg->cmsg_level = SOL_SOCKET;
170 cmsg->cmsg_len = CMSG_LEN(sizeof(int));
171
172 pfd = (int *) CMSG_DATA(cmsg);
173 msghdr.msg_controllen = cmsg->cmsg_len;
174
175 do {
176 ssize_t cur_len;
177
178 if (sock_fd < 0) {
179 msghdr.msg_control = NULL;
180 msghdr.msg_controllen = 0;
181 } else {
182 *pfd = sock_fd;
183 }
184
185 cur_len = sendmsg(fd, &msghdr, 0);
186 if (cur_len < 0) {
187 struct pollfd pfd = {
188 .fd = fd,
189 .events = POLLOUT
190 };
191
192 switch(errno) {
193 case EAGAIN:
194 poll(&pfd, 1, -1);
195 break;
196 case EINTR:
197 break;
198 default:
199 return -1;
200 }
201 continue;
202 }
203
204 if (len > 0)
205 sock_fd = -1;
206
207 len += cur_len;
208 while (cur_len >= (ssize_t) iov->iov_len) {
209 cur_len -= iov->iov_len;
210 iov_len--;
211 iov++;
212 if (!iov_len)
213 return len;
214 }
215 iov->iov_base += cur_len;
216 iov->iov_len -= cur_len;
217 msghdr.msg_iov = iov;
218 msghdr.msg_iovlen = iov_len;
219 } while (1);
220
221 /* Should never reach here */
222 return -1;
223 }
224
225 static int
226 recv_retry(int fd, struct iovec *iov, bool wait, int *recv_fd)
227 {
228 uint8_t fd_buf[CMSG_SPACE(sizeof(int))] = { 0 };
229 struct msghdr msghdr = { 0 };
230 struct cmsghdr *cmsg;
231 int total = 0;
232 int bytes;
233 int *pfd;
234
235 msghdr.msg_iov = iov,
236 msghdr.msg_iovlen = 1,
237 msghdr.msg_control = fd_buf;
238 msghdr.msg_controllen = sizeof(fd_buf);
239
240 cmsg = CMSG_FIRSTHDR(&msghdr);
241 cmsg->cmsg_type = SCM_RIGHTS;
242 cmsg->cmsg_level = SOL_SOCKET;
243 cmsg->cmsg_len = CMSG_LEN(sizeof(int));
244
245 pfd = (int *) CMSG_DATA(cmsg);
246
247 while (iov->iov_len > 0) {
248 if (recv_fd) {
249 msghdr.msg_control = fd_buf;
250 msghdr.msg_controllen = cmsg->cmsg_len;
251 } else {
252 msghdr.msg_control = NULL;
253 msghdr.msg_controllen = 0;
254 }
255
256 *pfd = -1;
257 bytes = recvmsg(fd, &msghdr, 0);
258 if (!bytes)
259 return -2;
260 if (bytes < 0) {
261 bytes = 0;
262 if (errno == EINTR)
263 continue;
264
265 if (errno != EAGAIN)
266 return -2;
267 }
268 if (!wait && !bytes)
269 return 0;
270
271 if (recv_fd)
272 *recv_fd = *pfd;
273 else if (*pfd >= 0)
274 close(*pfd);
275
276 if (bytes > 0)
277 recv_fd = NULL;
278
279 wait = true;
280 iov->iov_len -= bytes;
281 iov->iov_base += bytes;
282 total += bytes;
283
284 if (iov->iov_len > 0) {
285 struct pollfd pfd = {
286 .fd = fd,
287 .events = POLLIN
288 };
289 int ret;
290 do {
291 ret = poll(&pfd, 1, UDEBUG_TIMEOUT);
292 } while (ret < 0 && errno == EINTR);
293
294 if (!(pfd.revents & POLLIN))
295 return -1;
296 }
297 }
298
299 return total;
300 }
301
302 static void
303 udebug_send_msg(struct udebug *ctx, struct udebug_client_msg *msg,
304 struct blob_attr *meta, int fd)
305 {
306 struct iovec iov[2] = {
307 { .iov_base = msg, .iov_len = sizeof(*msg) },
308 {}
309 };
310
311 if (!meta) {
312 blob_buf_init(&b, 0);
313 meta = b.head;
314 }
315
316 iov[1].iov_base = meta;
317 iov[1].iov_len = blob_pad_len(meta);
318 writev_retry(ctx->fd.fd, iov, ARRAY_SIZE(iov), fd);
319 }
320
321 static bool
322 udebug_recv_msg(struct udebug *ctx, struct udebug_client_msg *msg, int *fd,
323 bool wait)
324 {
325 struct iovec iov = {
326 .iov_base = msg,
327 .iov_len = sizeof(*msg)
328 };
329 int ret;
330
331 ret = recv_retry(ctx->fd.fd, &iov, wait, fd);
332 if (ret == -2)
333 __udebug_disconnect(ctx, true);
334
335 return ret == sizeof(*msg);
336 }
337
338 static struct udebug_client_msg *
339 __udebug_poll(struct udebug *ctx, int *fd, bool wait)
340 {
341 static struct udebug_client_msg msg = {};
342
343 while (udebug_recv_msg(ctx, &msg, fd, wait)) {
344 struct udebug_remote_buf *rb;
345 void *key;
346
347 if (msg.type != CL_MSG_RING_NOTIFY)
348 return &msg;
349
350 if (fd && *fd >= 0)
351 close(*fd);
352
353 if (!ctx->notify_cb)
354 continue;
355
356 key = (void *)(uintptr_t)msg.id;
357 rb = avl_find_element(&ctx->remote_rings, key, rb, node);
358 if (!rb || !rb->poll)
359 continue;
360
361 if (ctx->poll_handle >= 0)
362 __atomic_fetch_or(&rb->buf.hdr->notify,
363 1UL << ctx->poll_handle,
364 __ATOMIC_RELAXED);
365 ctx->notify_cb(ctx, rb);
366 }
367
368 return NULL;
369 }
370
371 static struct udebug_client_msg *
372 udebug_wait_for_response(struct udebug *ctx, struct udebug_client_msg *msg, int *rfd)
373 {
374 int type = msg->type;
375 int fd = -1;
376
377 do {
378 if (fd >= 0)
379 close(fd);
380 fd = -1;
381 msg = __udebug_poll(ctx, &fd, true);
382 } while (msg && msg->type != type);
383 if (!msg)
384 return NULL;
385
386 if (rfd)
387 *rfd = fd;
388 else if (fd >= 0)
389 close(fd);
390
391 return msg;
392 }
393
394 static void
395 udebug_buf_msg(struct udebug_buf *buf, enum udebug_client_msg_type type)
396 {
397 struct udebug_client_msg msg = {
398 .type = type,
399 .id = buf->id,
400 };
401
402 udebug_send_msg(buf->ctx, &msg, NULL, -1);
403 udebug_wait_for_response(buf->ctx, &msg, NULL);
404 }
405
406 static size_t __udebug_headsize(unsigned int ring_size)
407 {
408 ring_size *= sizeof(struct udebug_ptr);
409 return ALIGN(sizeof(struct udebug_hdr) + ring_size, page_size);
410 }
411
412 static void udebug_init_page_size(void)
413 {
414 if (page_size)
415 return;
416 page_size = sysconf(_SC_PAGESIZE);
417 #ifdef mips
418 /* leave extra alignment room to account for data cache aliases */
419 if (page_size < 32 * 1024)
420 page_size = 32 * 1024;
421 #endif
422 }
423
424 int udebug_buf_open(struct udebug_buf *buf, int fd, uint32_t ring_size, uint32_t data_size)
425 {
426 udebug_init_page_size();
427 INIT_LIST_HEAD(&buf->list);
428 buf->fd = fd;
429 buf->ring_size = ring_size;
430 buf->head_size = __udebug_headsize(ring_size);
431 buf->data_size = data_size;
432
433 if (buf->ring_size > (1U << 24) || buf->data_size > (1U << 29))
434 return -1;
435
436 if (__udebug_buf_map(buf))
437 return -1;
438
439 if (buf->ring_size != buf->hdr->ring_size ||
440 buf->data_size != buf->hdr->data_size) {
441 munmap(buf->hdr, buf->head_size + 2 * buf->data_size);
442 buf->hdr = NULL;
443 return -1;
444 }
445
446 return 0;
447 }
448
449 int udebug_buf_init(struct udebug_buf *buf, size_t entries, size_t size)
450 {
451 char filename[] = "/udebug.XXXXXX";
452 unsigned int order = 12;
453 uint8_t ring_order = 5;
454 size_t head_size;
455 int fd;
456
457 udebug_init_page_size();
458 INIT_LIST_HEAD(&buf->list);
459 if (size < page_size)
460 size = page_size;
461 while(size > 1U << order)
462 order++;
463 size = 1 << order;
464 while (entries > 1U << ring_order)
465 ring_order++;
466 entries = 1 << ring_order;
467
468 if (size > (1U << 29) || entries > (1U << 24))
469 return -1;
470
471 head_size = __udebug_headsize(entries);
472 while (ALIGN(sizeof(*buf->hdr) + (entries * 2) * sizeof(struct udebug_ptr), page_size) == head_size)
473 entries *= 2;
474
475 fd = shm_open_anon(filename);
476 if (fd < 0)
477 return -1;
478
479 if (ftruncate(fd, head_size + size) < 0)
480 goto err_close;
481
482 buf->head_size = head_size;
483 buf->data_size = size;
484 buf->ring_size = entries;
485
486 if (__udebug_buf_map(buf))
487 goto err_close;
488
489 buf->fd = fd;
490 buf->hdr->ring_size = entries;
491 buf->hdr->data_size = size;
492
493 /* ensure hdr changes are visible */
494 __sync_synchronize();
495
496 return 0;
497
498 err_close:
499 close(fd);
500 return -1;
501 }
502
503 static void *udebug_buf_alloc(struct udebug_buf *buf, uint32_t ofs, uint32_t len)
504 {
505 struct udebug_hdr *hdr = buf->hdr;
506
507 hdr->data_used = u32_max(hdr->data_used, ofs + len + 1);
508
509 /* ensure that data_used update is visible before clobbering data */
510 __sync_synchronize();
511
512 return udebug_buf_ptr(buf, ofs);
513 }
514
515 uint64_t udebug_buf_flags(struct udebug_buf *buf)
516 {
517 struct udebug_hdr *hdr = buf->hdr;
518 uint64_t flags;
519
520 if (!hdr)
521 return 0;
522
523 flags = hdr->flags[0];
524 if (sizeof(flags) != sizeof(uintptr_t))
525 flags |= ((uint64_t)hdr->flags[1]) << 32;
526
527 return flags;
528 }
529
530 void udebug_entry_init_ts(struct udebug_buf *buf, uint64_t timestamp)
531 {
532 struct udebug_hdr *hdr = buf->hdr;
533 struct udebug_ptr *ptr;
534
535 if (!hdr)
536 return;
537
538 ptr = udebug_ring_ptr(hdr, hdr->head);
539 ptr->start = hdr->data_head;
540 ptr->len = 0;
541 ptr->timestamp = timestamp;
542 }
543
544 void *udebug_entry_append(struct udebug_buf *buf, const void *data, uint32_t len)
545 {
546 struct udebug_hdr *hdr = buf->hdr;
547 struct udebug_ptr *ptr;
548 uint32_t ofs;
549 void *ret;
550
551 if (!hdr)
552 return NULL;
553
554 ptr = udebug_ring_ptr(hdr, hdr->head);
555 ofs = ptr->start + ptr->len;
556 if (ptr->len + len > buf->data_size / 2)
557 return NULL;
558
559 ret = udebug_buf_alloc(buf, ofs, len);
560 if (data)
561 memcpy(ret, data, len);
562 ptr->len += len;
563
564 return ret;
565 }
566
567 uint16_t udebug_entry_trim(struct udebug_buf *buf, uint16_t len)
568 {
569 struct udebug_hdr *hdr = buf->hdr;
570 struct udebug_ptr *ptr;
571
572 if (!hdr)
573 return 0;
574
575 ptr = udebug_ring_ptr(hdr, hdr->head);
576 if (len)
577 ptr->len -= len;
578
579 return ptr->len;
580 }
581
582 void udebug_entry_set_length(struct udebug_buf *buf, uint16_t len)
583 {
584 struct udebug_hdr *hdr = buf->hdr;
585 struct udebug_ptr *ptr;
586
587 if (!hdr)
588 return;
589
590 ptr = udebug_ring_ptr(hdr, hdr->head);
591 ptr->len = len;
592 }
593
594 int udebug_entry_printf(struct udebug_buf *buf, const char *fmt, ...)
595 {
596 va_list ap;
597 size_t ret;
598
599 va_start(ap, fmt);
600 ret = udebug_entry_vprintf(buf, fmt, ap);
601 va_end(ap);
602
603 return ret;
604 }
605
606 int udebug_entry_vprintf(struct udebug_buf *buf, const char *fmt, va_list ap)
607 {
608 struct udebug_hdr *hdr = buf->hdr;
609 struct udebug_ptr *ptr;
610 uint32_t ofs;
611 uint32_t len;
612 char *str;
613
614 if (!hdr)
615 return -1;
616
617 ptr = udebug_ring_ptr(hdr, hdr->head);
618 ofs = ptr->start + ptr->len;
619 if (ptr->len > buf->data_size / 2)
620 return -1;
621
622 str = udebug_buf_alloc(buf, ofs, UDEBUG_MIN_ALLOC_LEN);
623 len = vsnprintf(str, UDEBUG_MIN_ALLOC_LEN, fmt, ap);
624 if (len <= UDEBUG_MIN_ALLOC_LEN)
625 goto out;
626
627 if (ptr->len + len > buf->data_size / 2)
628 return -1;
629
630 udebug_buf_alloc(buf, ofs, len + 1);
631 len = vsnprintf(str, len, fmt, ap);
632
633 out:
634 ptr->len += len;
635 return 0;
636 }
637
638 void udebug_entry_add(struct udebug_buf *buf)
639 {
640 struct udebug_hdr *hdr = buf->hdr;
641 struct udebug_ptr *ptr;
642 uint32_t notify;
643 uint8_t *data;
644
645 if (!hdr)
646 return;
647
648 ptr = udebug_ring_ptr(hdr, hdr->head);
649
650 /* ensure strings are always 0-terminated */
651 data = udebug_buf_ptr(buf, ptr->start + ptr->len);
652 *data = 0;
653 hdr->data_head = ptr->start + ptr->len + 1;
654
655 /* ensure that all data changes are visible before advancing head */
656 __sync_synchronize();
657
658 u32_set(&hdr->head, u32_get(&hdr->head) + 1);
659 if (!u32_get(&hdr->head))
660 u32_set(&hdr->head_hi, u32_get(&hdr->head_hi) + 1);
661
662 /* ensure that head change is visible */
663 __sync_synchronize();
664
665 notify = __atomic_exchange_n(&hdr->notify, 0, __ATOMIC_RELAXED);
666 if (notify) {
667 struct udebug_client_msg msg = {
668 .type = CL_MSG_RING_NOTIFY,
669 .id = buf->id,
670 .notify_mask = notify,
671 };
672 blob_buf_init(&b, 0);
673
674 udebug_send_msg(buf->ctx, &msg, b.head, -1);
675 }
676 }
677 void udebug_buf_free(struct udebug_buf *buf)
678 {
679 struct udebug *ctx = buf->ctx;
680
681 if (!list_empty(&buf->list) && buf->list.prev)
682 list_del(&buf->list);
683
684 if (ctx && ctx->fd.fd >= 0)
685 udebug_buf_msg(buf, CL_MSG_RING_REMOVE);
686
687 munmap(buf->hdr, buf->head_size + 2 * buf->data_size);
688 close(buf->fd);
689 memset(buf, 0, sizeof(*buf));
690 }
691
692 static void
693 __udebug_buf_add(struct udebug *ctx, struct udebug_buf *buf)
694 {
695 struct udebug_client_msg msg = {
696 .type = CL_MSG_RING_ADD,
697 .id = buf->id,
698 .ring_size = buf->hdr->ring_size,
699 .data_size = buf->hdr->data_size,
700 };
701 const struct udebug_buf_meta *meta = buf->meta;
702 void *c;
703
704 blob_buf_init(&b, 0);
705 blobmsg_add_string(&b, "name", meta->name);
706 c = blobmsg_open_array(&b, "flags");
707 for (size_t i = 0; i < meta->n_flags; i++) {
708 const struct udebug_buf_flag *flag = &meta->flags[i];
709 void *e = blobmsg_open_array(&b, NULL);
710 blobmsg_add_string(&b, NULL, flag->name);
711 blobmsg_add_u64(&b, NULL, flag->mask);
712 blobmsg_close_array(&b, e);
713 }
714 blobmsg_close_array(&b, c);
715
716 udebug_send_msg(ctx, &msg, b.head, buf->fd);
717 udebug_wait_for_response(ctx, &msg, NULL);
718 }
719
720 int udebug_buf_add(struct udebug *ctx, struct udebug_buf *buf,
721 const struct udebug_buf_meta *meta)
722 {
723 if (!buf->hdr)
724 return -1;
725
726 list_add_tail(&buf->list, &ctx->local_rings);
727 buf->ctx = ctx;
728 buf->meta = meta;
729 buf->id = ctx->next_id++;
730 buf->hdr->format = meta->format;
731 buf->hdr->sub_format = meta->sub_format;
732
733 if (ctx->fd.fd >= 0)
734 __udebug_buf_add(ctx, buf);
735
736 return 0;
737 }
738
739 void udebug_init(struct udebug *ctx)
740 {
741 INIT_LIST_HEAD(&ctx->local_rings);
742 avl_init(&ctx->remote_rings, udebug_id_cmp, true, NULL);
743 ctx->fd.fd = -1;
744 ctx->poll_handle = -1;
745 }
746
747 static void udebug_reconnect_cb(struct uloop_timeout *t)
748 {
749 struct udebug *ctx = container_of(t, struct udebug, reconnect);
750
751 if (udebug_connect(ctx, ctx->socket_path) < 0) {
752 uloop_timeout_set(&ctx->reconnect, 1000);
753 return;
754 }
755
756 udebug_add_uloop(ctx);
757 }
758
759 void udebug_auto_connect(struct udebug *ctx, const char *path)
760 {
761 free(ctx->socket_path);
762 ctx->reconnect.cb = udebug_reconnect_cb;
763 ctx->socket_path = path ? strdup(path) : NULL;
764 if (ctx->fd.fd >= 0)
765 return;
766
767 udebug_reconnect_cb(&ctx->reconnect);
768 }
769
770 int udebug_connect(struct udebug *ctx, const char *path)
771 {
772 struct udebug_remote_buf *rb;
773 struct udebug_buf *buf;
774
775 if (ctx->fd.fd >= 0)
776 close(ctx->fd.fd);
777 ctx->fd.fd = -1;
778
779 if (!path)
780 path = UDEBUG_SOCK_NAME;
781
782 ctx->fd.fd = usock(USOCK_UNIX, path, NULL);
783 if (ctx->fd.fd < 0)
784 return -1;
785
786 list_for_each_entry(buf, &ctx->local_rings, list)
787 __udebug_buf_add(ctx, buf);
788
789 avl_for_each_element(&ctx->remote_rings, rb, node) {
790 if (!rb->poll)
791 continue;
792
793 rb->poll = false;
794 udebug_remote_buf_set_poll(ctx, rb, true);
795 }
796
797 return 0;
798 }
799
800 void udebug_poll(struct udebug *ctx)
801 {
802 while (__udebug_poll(ctx, NULL, false));
803 }
804
805 struct udebug_client_msg *
806 udebug_send_and_wait(struct udebug *ctx, struct udebug_client_msg *msg, int *rfd)
807 {
808 udebug_send_msg(ctx, msg, NULL, -1);
809
810 return udebug_wait_for_response(ctx, msg, rfd);
811 }
812
813 static void udebug_fd_cb(struct uloop_fd *fd, unsigned int events)
814 {
815 struct udebug *ctx = container_of(fd, struct udebug, fd);
816
817 if (fd->eof)
818 __udebug_disconnect(ctx, true);
819
820 udebug_poll(ctx);
821 }
822
823 void udebug_add_uloop(struct udebug *ctx)
824 {
825 if (ctx->fd.registered)
826 return;
827
828 ctx->fd.cb = udebug_fd_cb;
829 uloop_fd_add(&ctx->fd, ULOOP_READ);
830 }
831
832 void udebug_free(struct udebug *ctx)
833 {
834 struct udebug_remote_buf *rb, *tmp;
835 struct udebug_buf *buf;
836
837 free(ctx->socket_path);
838 ctx->socket_path = NULL;
839
840 __udebug_disconnect(ctx, false);
841 uloop_timeout_cancel(&ctx->reconnect);
842
843 while (!list_empty(&ctx->local_rings)) {
844 buf = list_first_entry(&ctx->local_rings, struct udebug_buf, list);
845 udebug_buf_free(buf);
846 }
847
848 avl_for_each_element_safe(&ctx->remote_rings, rb, node, tmp)
849 udebug_remote_buf_unmap(ctx, rb);
850 }