ustream: prevent recursive calls to the read callback
[project/libubox.git] / udebug-remote.c
1 /*
2 * udebug - debug ring buffer library
3 *
4 * Copyright (C) 2023 Felix Fietkau <nbd@nbd.name>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 #include "udebug-priv.h"
19
20 static int
21 udebug_remote_get_handle(struct udebug *ctx)
22 {
23 struct udebug_client_msg *msg;
24 struct udebug_client_msg send_msg = {
25 .type = CL_MSG_GET_HANDLE,
26 };
27
28 if (ctx->poll_handle >= 0 || !udebug_is_connected(ctx))
29 return 0;
30
31 msg = udebug_send_and_wait(ctx, &send_msg, NULL);
32 if (!msg)
33 return -1;
34
35 ctx->poll_handle = msg->id;
36 return 0;
37 }
38
39 struct udebug_remote_buf *udebug_remote_buf_get(struct udebug *ctx, uint32_t id)
40 {
41 struct udebug_remote_buf *rb;
42 void *key = (void *)(uintptr_t)id;
43
44 return avl_find_element(&ctx->remote_rings, key, rb, node);
45 }
46
47 int udebug_remote_buf_map(struct udebug *ctx, struct udebug_remote_buf *rb, uint32_t id)
48 {
49 void *key = (void *)(uintptr_t)id;
50 struct udebug_client_msg *msg;
51 struct udebug_client_msg send_msg = {
52 .type = CL_MSG_RING_GET,
53 .id = id,
54 };
55 int fd = -1;
56
57 if (rb->buf.data || !udebug_is_connected(ctx))
58 return -1;
59
60 msg = udebug_send_and_wait(ctx, &send_msg, &fd);
61 if (!msg || fd < 0)
62 return -1;
63
64 if (udebug_buf_open(&rb->buf, fd, msg->ring_size, msg->data_size)) {
65 fprintf(stderr, "failed to open fd %d, ring_size=%d, data_size=%d\n", fd, msg->ring_size, msg->data_size);
66 close(fd);
67 return -1;
68 }
69
70 rb->pcap_iface = ~0;
71 rb->node.key = key;
72 avl_insert(&ctx->remote_rings, &rb->node);
73
74 return 0;
75 }
76
77 void udebug_remote_buf_unmap(struct udebug *ctx, struct udebug_remote_buf *rb)
78 {
79 if (!rb->buf.data)
80 return;
81
82 avl_delete(&ctx->remote_rings, &rb->node);
83 udebug_buf_free(&rb->buf);
84 rb->poll = 0;
85 rb->node.key = NULL;
86 rb->pcap_iface = ~0;
87 }
88
89 int udebug_remote_buf_set_poll(struct udebug *ctx, struct udebug_remote_buf *rb, bool val)
90 {
91 int handle;
92
93 if (!rb->buf.data)
94 return -1;
95
96 if (rb->poll == val)
97 return 0;
98
99 rb->poll = val;
100 if (!val)
101 return 0;
102
103 handle = udebug_remote_get_handle(ctx);
104 if (handle < 0)
105 return -1;
106
107 __atomic_fetch_or(&rb->buf.hdr->notify, 1UL << handle, __ATOMIC_RELAXED);
108 return 0;
109 }
110
111 static void
112 rbuf_advance_read_head(struct udebug_remote_buf *rb, uint32_t head,
113 uint32_t *data_start)
114 {
115 struct udebug_hdr *hdr = rb->buf.hdr;
116 uint32_t min_head = head + 1 - rb->buf.ring_size;
117 uint32_t min_data = u32_get(&hdr->data_used) - rb->buf.data_size;
118 struct udebug_ptr *last_ptr = udebug_ring_ptr(hdr, head - 1);
119
120 if (!u32_get(&hdr->head_hi) && u32_sub(0, min_head) > 0)
121 min_head = 0;
122
123 /* advance head to skip over any entries that are guaranteed
124 * to be overwritten now. final check will be performed after
125 * data copying */
126
127 if (u32_sub(rb->head, min_head) < 0)
128 rb->head = min_head;
129
130 for (size_t i = 0; i < rb->buf.ring_size; i++) {
131 struct udebug_ptr *ptr = udebug_ring_ptr(hdr, rb->head);
132
133 if (data_start) {
134 *data_start = u32_get(&ptr->start);
135 __sync_synchronize();
136 }
137
138 if (ptr->timestamp > last_ptr->timestamp)
139 continue;
140
141 if (u32_sub(ptr->start, min_data) > 0)
142 break;
143
144 rb->head++;
145 }
146 }
147
148 void udebug_remote_buf_set_start_time(struct udebug_remote_buf *rb, uint64_t ts)
149 {
150 struct udebug_hdr *hdr = rb->buf.hdr;
151 uint32_t head = u32_get(&hdr->head);
152 uint32_t start = rb->head, end = head;
153 uint32_t diff;
154
155 if (!hdr)
156 return;
157
158 rbuf_advance_read_head(rb, head, NULL);
159 while ((diff = u32_sub(end, start)) > 0) {
160 uint32_t cur = start + diff / 2;
161 struct udebug_ptr *ptr;
162
163 ptr = udebug_ring_ptr(hdr, cur);
164 if (ptr->timestamp > ts)
165 end = cur - 1;
166 else
167 start = cur + 1;
168 }
169
170 rb->head = start;
171 }
172
173 void udebug_remote_buf_set_start_offset(struct udebug_remote_buf *rb, uint32_t idx)
174 {
175 if (!rb->buf.hdr)
176 return;
177
178 rb->head = rb->buf.hdr->head - idx;
179 }
180
181 void udebug_remote_buf_set_flags(struct udebug_remote_buf *rb, uint64_t mask, uint64_t set)
182 {
183 struct udebug_hdr *hdr = rb->buf.hdr;
184
185 if (!hdr)
186 return;
187
188 if ((uintptr_t)mask)
189 __atomic_and_fetch(&hdr->flags[0], (uintptr_t)~mask, __ATOMIC_RELAXED);
190 if ((uintptr_t)set)
191 __atomic_or_fetch(&hdr->flags[0], (uintptr_t)set, __ATOMIC_RELAXED);
192
193 if (sizeof(mask) == sizeof(unsigned long))
194 return;
195
196 mask >>= 32;
197 if ((uintptr_t)mask)
198 __atomic_and_fetch(&hdr->flags[1], (uintptr_t)~mask, __ATOMIC_RELAXED);
199 if ((uintptr_t)set)
200 __atomic_or_fetch(&hdr->flags[1], (uintptr_t)set, __ATOMIC_RELAXED);
201 }
202
203 struct udebug_snapshot *
204 udebug_remote_buf_snapshot(struct udebug_remote_buf *rb)
205 {
206 struct udebug_hdr *hdr = rb->buf.hdr;
207 struct udebug_ptr *last_ptr;
208 uint32_t data_start, data_end, data_used;
209 struct udebug_snapshot *s = NULL;
210 struct udebug_ptr *ptr_buf, *first_ptr;
211 uint32_t data_size, ptr_size;
212 uint32_t head, first_idx;
213 uint32_t prev_read_head = rb->head;
214 void *data_buf;
215
216 if (!hdr)
217 return NULL;
218
219 head = u32_get(&hdr->head);
220 rbuf_advance_read_head(rb, head, &data_start);
221 if (rb->head == head)
222 return NULL;
223
224 first_idx = rb->head;
225 first_ptr = udebug_ring_ptr(hdr, first_idx);
226 last_ptr = udebug_ring_ptr(hdr, head - 1);
227 data_end = last_ptr->start + last_ptr->len;
228
229 data_size = data_end - data_start;
230 ptr_size = head - rb->head;
231 if (data_size > rb->buf.data_size || ptr_size > rb->buf.ring_size) {
232 fprintf(stderr, "Invalid data size: %x > %x, %x > %x\n", data_size, (int)rb->buf.data_size, ptr_size, (int)rb->buf.ring_size);
233 goto out;
234 }
235
236 s = calloc_a(sizeof(*s),
237 &ptr_buf, ptr_size * sizeof(*ptr_buf),
238 &data_buf, data_size);
239
240 s->data = memcpy(data_buf, udebug_buf_ptr(&rb->buf, data_start), data_size);
241 s->data_size = data_size;
242 s->entries = ptr_buf;
243 s->dropped = rb->head - prev_read_head;
244
245 if (first_ptr > last_ptr) {
246 struct udebug_ptr *start_ptr = udebug_ring_ptr(hdr, 0);
247 struct udebug_ptr *end_ptr = udebug_ring_ptr(hdr, rb->buf.ring_size - 1) + 1;
248 uint32_t size = end_ptr - first_ptr;
249 memcpy(s->entries, first_ptr, size * sizeof(*s->entries));
250 memcpy(s->entries + size, start_ptr, (last_ptr + 1 - start_ptr) * sizeof(*s->entries));
251 } else {
252 memcpy(s->entries, first_ptr, (last_ptr + 1 - first_ptr) * sizeof(*s->entries));
253 }
254
255 /* get a snapshot of the counter that indicates how much data has been
256 * clobbered by newly added entries */
257 __sync_synchronize();
258 data_used = u32_get(&hdr->data_used) - rb->buf.data_size;
259
260 s->n_entries = head - first_idx;
261
262 rbuf_advance_read_head(rb, head, NULL);
263 if (s->n_entries < rb->head - first_idx) {
264 free(s);
265 s = NULL;
266 goto out;
267 }
268
269 s->entries += rb->head - first_idx;
270 s->n_entries -= rb->head - first_idx;
271 while (s->n_entries > 0 &&
272 u32_sub(s->entries[0].start, data_used) < 0) {
273 s->entries++;
274 s->n_entries--;
275 s->dropped++;
276 }
277
278 for (size_t i = 0; i < s->n_entries; i++)
279 s->entries[i].start -= data_start;
280
281 s->format = hdr->format;
282 s->sub_format = hdr->sub_format;
283 s->rbuf_idx = (uint32_t)(uintptr_t)rb->node.key;
284
285 out:
286 rb->head = head;
287 return s;
288 }
289
290 bool udebug_snapshot_get_entry(struct udebug_snapshot *s, struct udebug_iter *it, unsigned int entry)
291 {
292 struct udebug_ptr *ptr;
293
294 it->len = 0;
295 if (entry >= s->n_entries)
296 goto error;
297
298 ptr = &s->entries[entry];
299 if (ptr->start > s->data_size || ptr->len > s->data_size ||
300 ptr->start + ptr->len > s->data_size)
301 goto error;
302
303 it->s = s;
304 it->data = s->data + ptr->start;
305 it->len = ptr->len;
306 it->timestamp = ptr->timestamp;
307 return true;
308
309 error:
310 it->data = NULL;
311 return false;
312 }
313
314 void udebug_iter_start(struct udebug_iter *it, struct udebug_snapshot **s, size_t n)
315 {
316 memset(it, 0, sizeof(*it));
317
318 it->list = s;
319 it->n = n;
320
321 for (size_t i = 0; i < it->n; i++)
322 it->list[i]->iter_idx = 0;
323 }
324
325 bool udebug_iter_next(struct udebug_iter *it)
326 {
327 while (1) {
328 struct udebug_snapshot *s;
329 uint64_t cur_ts;
330 int cur = -1;
331
332 for (size_t i = 0; i < it->n; i++) {
333 struct udebug_ptr *ptr;
334
335 s = it->list[i];
336 if (s->iter_idx >= s->n_entries)
337 continue;
338
339 ptr = &s->entries[s->iter_idx];
340 if (cur >= 0 && ptr->timestamp > cur_ts)
341 continue;
342
343 cur = i;
344 cur_ts = ptr->timestamp;
345 }
346
347 if (cur < 0)
348 return false;
349
350 s = it->list[cur];
351 it->s_idx = cur;
352 if (!udebug_snapshot_get_entry(s, it, s->iter_idx++))
353 continue;
354
355 return true;
356 }
357 }