ucode-mod-bpf: add new package for a ucode libbpf binding
[openwrt/staging/stintel.git] / package / utils / ucode-mod-bpf / src / bpf.c
1 #include <sys/resource.h>
2 #include <sys/types.h>
3 #include <sys/socket.h>
4 #include <net/if.h>
5
6 #include <stdint.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <unistd.h>
10
11 #include <bpf/bpf.h>
12 #include <bpf/libbpf.h>
13
14 #include "ucode/module.h"
15
16 #define err_return_int(err, ...) do { set_error(err, __VA_ARGS__); return -1; } while(0)
17 #define err_return(err, ...) do { set_error(err, __VA_ARGS__); return NULL; } while(0)
18 #define TRUE ucv_boolean_new(true)
19
20 static uc_resource_type_t *module_type, *map_type, *map_iter_type, *program_type;
21 static uc_value_t *registry;
22 static uc_vm_t *debug_vm;
23
24 static struct {
25 int code;
26 char *msg;
27 } last_error;
28
29 struct uc_bpf_fd {
30 int fd;
31 bool close;
32 };
33
34 struct uc_bpf_map {
35 struct uc_bpf_fd fd; /* must be first */
36 unsigned int key_size, val_size;
37 };
38
39 struct uc_bpf_map_iter {
40 int fd;
41 unsigned int key_size;
42 bool has_next;
43 uint8_t key[];
44 };
45
46 __attribute__((format(printf, 2, 3))) static void
47 set_error(int errcode, const char *fmt, ...)
48 {
49 va_list ap;
50
51 free(last_error.msg);
52
53 last_error.code = errcode;
54 last_error.msg = NULL;
55
56 if (fmt) {
57 va_start(ap, fmt);
58 xvasprintf(&last_error.msg, fmt, ap);
59 va_end(ap);
60 }
61 }
62
63 static void init_env(void)
64 {
65 static bool init_done = false;
66 struct rlimit limit = {
67 .rlim_cur = RLIM_INFINITY,
68 .rlim_max = RLIM_INFINITY,
69 };
70
71 if (init_done)
72 return;
73
74 setrlimit(RLIMIT_MEMLOCK, &limit);
75 init_done = true;
76 }
77
78 static uc_value_t *
79 uc_bpf_error(uc_vm_t *vm, size_t nargs)
80 {
81 uc_value_t *numeric = uc_fn_arg(0);
82 const char *msg = last_error.msg;
83 int code = last_error.code;
84 uc_stringbuf_t *buf;
85 const char *s;
86
87 if (last_error.code == 0)
88 return NULL;
89
90 set_error(0, NULL);
91
92 if (ucv_is_truish(numeric))
93 return ucv_int64_new(code);
94
95 buf = ucv_stringbuf_new();
96 if (code < 0 && msg) {
97 ucv_stringbuf_addstr(buf, msg, strlen(msg));
98 } else {
99 s = strerror(code);
100 ucv_stringbuf_addstr(buf, s, strlen(s));
101 if (msg)
102 ucv_stringbuf_printf(buf, ": %s", msg);
103 }
104
105 return ucv_stringbuf_finish(buf);
106 }
107
108 static int
109 uc_bpf_module_set_opts(struct bpf_object *obj, uc_value_t *opts)
110 {
111 uc_value_t *val;
112
113 if (!opts)
114 return 0;
115
116 if (ucv_type(opts) != UC_OBJECT)
117 err_return_int(EINVAL, "options argument");
118
119 if ((val = ucv_object_get(opts, "rodata", NULL)) != NULL) {
120 struct bpf_map *map = NULL;
121
122 if (ucv_type(val) != UC_STRING)
123 err_return_int(EINVAL, "rodata type");
124
125 while ((map = bpf_object__next_map(obj, map)) != NULL) {
126 if (!strstr(bpf_map__name(map), ".rodata"))
127 continue;
128
129 break;
130 }
131
132 if (!map)
133 err_return_int(errno, "rodata map");
134
135 if (bpf_map__set_initial_value(map, ucv_string_get(val),
136 ucv_string_length(val)))
137 err_return_int(errno, "rodata");
138 }
139
140 if ((val = ucv_object_get(opts, "program-type", NULL)) != NULL) {
141 if (ucv_type(val) != UC_OBJECT)
142 err_return_int(EINVAL, "prog_types argument");
143
144 ucv_object_foreach(val, name, type) {
145 struct bpf_program *prog;
146
147 if (ucv_type(type) != UC_INTEGER)
148 err_return_int(EINVAL, "program %s type", name);
149
150 prog = bpf_object__find_program_by_name(obj, name);
151 if (!prog)
152 err_return_int(-1, "program %s not found", name);
153
154 bpf_program__set_type(prog, ucv_int64_get(type));
155 }
156 }
157
158 return 0;
159 }
160
161 static uc_value_t *
162 uc_bpf_open_module(uc_vm_t *vm, size_t nargs)
163 {
164 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, bpf_opts);
165 uc_value_t *path = uc_fn_arg(0);
166 uc_value_t *opts = uc_fn_arg(1);
167 struct bpf_object *obj;
168
169 if (ucv_type(path) != UC_STRING)
170 err_return(EINVAL, "module path");
171
172 init_env();
173 obj = bpf_object__open_file(ucv_string_get(path), &bpf_opts);
174 if (libbpf_get_error(obj))
175 err_return(errno, NULL);
176
177 if (uc_bpf_module_set_opts(obj, opts)) {
178 bpf_object__close(obj);
179 return NULL;
180 }
181
182 if (bpf_object__load(obj)) {
183 bpf_object__close(obj);
184 err_return(errno, NULL);
185 }
186
187 return uc_resource_new(module_type, obj);
188 }
189
190 static uc_value_t *
191 uc_bpf_map_create(int fd, unsigned int key_size, unsigned int val_size, bool close)
192 {
193 struct uc_bpf_map *uc_map;
194
195 uc_map = xalloc(sizeof(*uc_map));
196 uc_map->fd.fd = fd;
197 uc_map->key_size = key_size;
198 uc_map->val_size = val_size;
199 uc_map->fd.close = close;
200
201 return uc_resource_new(map_type, uc_map);
202 }
203
204 static uc_value_t *
205 uc_bpf_open_map(uc_vm_t *vm, size_t nargs)
206 {
207 struct bpf_map_info info;
208 uc_value_t *path = uc_fn_arg(0);
209 __u32 len = sizeof(info);
210 int err;
211 int fd;
212
213 if (ucv_type(path) != UC_STRING)
214 err_return(EINVAL, "module path");
215
216 fd = bpf_obj_get(ucv_string_get(path));
217 if (fd < 0)
218 err_return(errno, NULL);
219
220 err = bpf_obj_get_info_by_fd(fd, &info, &len);
221 if (err) {
222 close(fd);
223 err_return(errno, NULL);
224 }
225
226 return uc_bpf_map_create(fd, info.key_size, info.value_size, true);
227 }
228
229 static uc_value_t *
230 uc_bpf_open_program(uc_vm_t *vm, size_t nargs)
231 {
232 uc_value_t *path = uc_fn_arg(0);
233 struct uc_bpf_fd *f;
234 int fd;
235
236 if (ucv_type(path) != UC_STRING)
237 err_return(EINVAL, "module path");
238
239 fd = bpf_obj_get(ucv_string_get(path));
240 if (fd < 0)
241 err_return(errno, NULL);
242
243 f = xalloc(sizeof(*f));
244 f->fd = fd;
245 f->close = true;
246
247 return uc_resource_new(program_type, f);
248 }
249
250 static uc_value_t *
251 uc_bpf_module_get_maps(uc_vm_t *vm, size_t nargs)
252 {
253 struct bpf_object *obj = uc_fn_thisval("bpf.module");
254 struct bpf_map *map = NULL;
255 uc_value_t *rv;
256 int i = 0;
257
258 if (!obj)
259 err_return(EINVAL, NULL);
260
261 rv = ucv_array_new(vm);
262 bpf_object__for_each_map(map, obj)
263 ucv_array_set(rv, i++, ucv_string_new(bpf_map__name(map)));
264
265 return rv;
266 }
267
268 static uc_value_t *
269 uc_bpf_module_get_map(uc_vm_t *vm, size_t nargs)
270 {
271 struct bpf_object *obj = uc_fn_thisval("bpf.module");
272 struct bpf_map *map;
273 uc_value_t *name = uc_fn_arg(0);
274 int fd;
275
276 if (!obj || ucv_type(name) != UC_STRING)
277 err_return(EINVAL, NULL);
278
279 map = bpf_object__find_map_by_name(obj, ucv_string_get(name));
280 if (!map)
281 err_return(errno, NULL);
282
283 fd = bpf_map__fd(map);
284 if (fd < 0)
285 err_return(EINVAL, NULL);
286
287 return uc_bpf_map_create(fd, bpf_map__key_size(map), bpf_map__value_size(map), false);
288 }
289
290 static uc_value_t *
291 uc_bpf_module_get_programs(uc_vm_t *vm, size_t nargs)
292 {
293 struct bpf_object *obj = uc_fn_thisval("bpf.module");
294 struct bpf_program *prog = NULL;
295 uc_value_t *rv;
296 int i = 0;
297
298 if (!obj)
299 err_return(EINVAL, NULL);
300
301 rv = ucv_array_new(vm);
302 bpf_object__for_each_program(prog, obj)
303 ucv_array_set(rv, i++, ucv_string_new(bpf_program__name(prog)));
304
305 return rv;
306 }
307
308 static uc_value_t *
309 uc_bpf_module_get_program(uc_vm_t *vm, size_t nargs)
310 {
311 struct bpf_object *obj = uc_fn_thisval("bpf.module");
312 struct bpf_program *prog;
313 uc_value_t *name = uc_fn_arg(0);
314 struct uc_bpf_fd *f;
315 int fd;
316
317 if (!obj || !name || ucv_type(name) != UC_STRING)
318 err_return(EINVAL, NULL);
319
320 prog = bpf_object__find_program_by_name(obj, ucv_string_get(name));
321 if (!prog)
322 err_return(errno, NULL);
323
324 fd = bpf_program__fd(prog);
325 if (fd < 0)
326 err_return(EINVAL, NULL);
327
328 f = xalloc(sizeof(*f));
329 f->fd = fd;
330
331 return uc_resource_new(program_type, f);
332 }
333
334 static void *
335 uc_bpf_map_arg(uc_value_t *val, const char *kind, unsigned int size)
336 {
337 static union {
338 uint32_t u32;
339 uint64_t u64;
340 } val_int;
341
342 switch (ucv_type(val)) {
343 case UC_INTEGER:
344 if (size == 4)
345 val_int.u32 = ucv_int64_get(val);
346 else if (size == 8)
347 val_int.u64 = ucv_int64_get(val);
348 else
349 break;
350
351 return &val_int;
352 case UC_STRING:
353 if (size != ucv_string_length(val))
354 break;
355
356 return ucv_string_get(val);
357 default:
358 err_return(EINVAL, "%s type", kind);
359 }
360
361 err_return(EINVAL, "%s size mismatch (expected: %d)", kind, size);
362 }
363
364 static uc_value_t *
365 uc_bpf_map_get(uc_vm_t *vm, size_t nargs)
366 {
367 struct uc_bpf_map *map = uc_fn_thisval("bpf.map");
368 uc_value_t *a_key = uc_fn_arg(0);
369 void *key, *val;
370
371 if (!map)
372 err_return(EINVAL, NULL);
373
374 key = uc_bpf_map_arg(a_key, "key", map->key_size);
375 if (!key)
376 return NULL;
377
378 val = alloca(map->val_size);
379 if (bpf_map_lookup_elem(map->fd.fd, key, val))
380 return NULL;
381
382 return ucv_string_new_length(val, map->val_size);
383 }
384
385 static uc_value_t *
386 uc_bpf_map_set(uc_vm_t *vm, size_t nargs)
387 {
388 struct uc_bpf_map *map = uc_fn_thisval("bpf.map");
389 uc_value_t *a_key = uc_fn_arg(0);
390 uc_value_t *a_val = uc_fn_arg(1);
391 uc_value_t *a_flags = uc_fn_arg(2);
392 uint64_t flags;
393 void *key, *val;
394
395 if (!map)
396 err_return(EINVAL, NULL);
397
398 key = uc_bpf_map_arg(a_key, "key", map->key_size);
399 if (!key)
400 return NULL;
401
402 val = uc_bpf_map_arg(a_val, "value", map->val_size);
403 if (!val)
404 return NULL;
405
406 if (!a_flags)
407 flags = BPF_ANY;
408 else if (ucv_type(a_flags) != UC_INTEGER)
409 err_return(EINVAL, "flags");
410 else
411 flags = ucv_int64_get(a_flags);
412
413 if (bpf_map_update_elem(map->fd.fd, key, val, flags))
414 return NULL;
415
416 return ucv_string_new_length(val, map->val_size);
417 }
418
419 static uc_value_t *
420 uc_bpf_map_delete(uc_vm_t *vm, size_t nargs)
421 {
422 struct uc_bpf_map *map = uc_fn_thisval("bpf.map");
423 uc_value_t *a_key = uc_fn_arg(0);
424 uc_value_t *a_return = uc_fn_arg(1);
425 void *key, *val = NULL;
426 int ret;
427
428 if (!map)
429 err_return(EINVAL, NULL);
430
431 key = uc_bpf_map_arg(a_key, "key", map->key_size);
432 if (!key)
433 return NULL;
434
435 if (!ucv_is_truish(a_return)) {
436 ret = bpf_map_delete_elem(map->fd.fd, key);
437
438 return ucv_boolean_new(ret == 0);
439 }
440
441 val = alloca(map->val_size);
442 if (bpf_map_lookup_and_delete_elem(map->fd.fd, key, val))
443 return NULL;
444
445 return ucv_string_new_length(val, map->val_size);
446 }
447
448 static uc_value_t *
449 uc_bpf_map_delete_all(uc_vm_t *vm, size_t nargs)
450 {
451 struct uc_bpf_map *map = uc_fn_thisval("bpf.map");
452 uc_value_t *filter = uc_fn_arg(0);
453 bool has_next;
454 void *key, *next;
455
456 if (!map)
457 err_return(EINVAL, NULL);
458
459 key = alloca(map->key_size);
460 next = alloca(map->key_size);
461 has_next = !bpf_map_get_next_key(map->fd.fd, NULL, next);
462 while (has_next) {
463 bool skip = false;
464
465 memcpy(key, next, map->key_size);
466 has_next = !bpf_map_get_next_key(map->fd.fd, next, next);
467
468 if (ucv_is_callable(filter)) {
469 uc_value_t *rv;
470
471 uc_value_push(ucv_get(filter));
472 uc_value_push(ucv_string_new_length((const char *)key, map->key_size));
473 if (uc_call(1) != EXCEPTION_NONE)
474 break;
475
476 rv = uc_vm_stack_pop(vm);
477 if (!rv)
478 break;
479
480 skip = !ucv_is_truish(rv);
481 ucv_put(rv);
482 }
483
484 if (!skip)
485 bpf_map_delete_elem(map->fd.fd, key);
486 }
487
488 return TRUE;
489 }
490
491 static uc_value_t *
492 uc_bpf_map_iterator(uc_vm_t *vm, size_t nargs)
493 {
494 struct uc_bpf_map *map = uc_fn_thisval("bpf.map");
495 struct uc_bpf_map_iter *iter;
496
497 if (!map)
498 err_return(EINVAL, NULL);
499
500 iter = xalloc(sizeof(*iter) + map->key_size);
501 iter->fd = map->fd.fd;
502 iter->key_size = map->key_size;
503 iter->has_next = !bpf_map_get_next_key(iter->fd, NULL, &iter->key);
504
505 return uc_resource_new(map_iter_type, iter);
506 }
507
508 static uc_value_t *
509 uc_bpf_map_iter_next(uc_vm_t *vm, size_t nargs)
510 {
511 struct uc_bpf_map_iter *iter = uc_fn_thisval("bpf.map_iter");
512 uc_value_t *rv;
513
514 if (!iter->has_next)
515 return NULL;
516
517 rv = ucv_string_new_length((const char *)iter->key, iter->key_size);
518 iter->has_next = !bpf_map_get_next_key(iter->fd, &iter->key, &iter->key);
519
520 return rv;
521 }
522
523 static uc_value_t *
524 uc_bpf_map_iter_next_int(uc_vm_t *vm, size_t nargs)
525 {
526 struct uc_bpf_map_iter *iter = uc_fn_thisval("bpf.map_iter");
527 uint64_t intval;
528 uc_value_t *rv;
529
530 if (!iter->has_next)
531 return NULL;
532
533 if (iter->key_size == 4)
534 intval = *(uint32_t *)iter->key;
535 else if (iter->key_size == 8)
536 intval = *(uint64_t *)iter->key;
537 else
538 return NULL;
539
540 rv = ucv_int64_new(intval);
541 iter->has_next = !bpf_map_get_next_key(iter->fd, &iter->key, &iter->key);
542
543 return rv;
544 }
545
546 static uc_value_t *
547 uc_bpf_map_foreach(uc_vm_t *vm, size_t nargs)
548 {
549 struct uc_bpf_map *map = uc_fn_thisval("bpf.map");
550 uc_value_t *func = uc_fn_arg(0);
551 bool has_next;
552 void *key, *next;
553 bool ret = false;
554
555 key = alloca(map->key_size);
556 next = alloca(map->key_size);
557 has_next = !bpf_map_get_next_key(map->fd.fd, NULL, next);
558
559 while (has_next) {
560 uc_value_t *rv;
561 bool stop;
562
563 memcpy(key, next, map->key_size);
564 has_next = !bpf_map_get_next_key(map->fd.fd, next, next);
565
566 uc_value_push(ucv_get(func));
567 uc_value_push(ucv_string_new_length((const char *)key, map->key_size));
568
569 if (uc_call(1) != EXCEPTION_NONE)
570 break;
571
572 rv = uc_vm_stack_pop(vm);
573 stop = (ucv_type(rv) == UC_BOOLEAN && !ucv_boolean_get(rv));
574 ucv_put(rv);
575
576 if (stop)
577 break;
578
579 ret = true;
580 }
581
582 return ucv_boolean_new(ret);
583 }
584
585 static uc_value_t *
586 uc_bpf_obj_pin(uc_vm_t *vm, size_t nargs, const char *type)
587 {
588 struct uc_bpf_fd *f = uc_fn_thisval(type);
589 uc_value_t *path = uc_fn_arg(0);
590
591 if (ucv_type(path) != UC_STRING)
592 err_return(EINVAL, NULL);
593
594 if (bpf_obj_pin(f->fd, ucv_string_get(path)))
595 err_return(errno, NULL);
596
597 return TRUE;
598 }
599
600 static uc_value_t *
601 uc_bpf_program_pin(uc_vm_t *vm, size_t nargs)
602 {
603 return uc_bpf_obj_pin(vm, nargs, "bpf.program");
604 }
605
606 static uc_value_t *
607 uc_bpf_map_pin(uc_vm_t *vm, size_t nargs)
608 {
609 return uc_bpf_obj_pin(vm, nargs, "bpf.map");
610 }
611
612 static uc_value_t *
613 uc_bpf_set_tc_hook(uc_value_t *ifname, uc_value_t *type, uc_value_t *prio,
614 int fd)
615 {
616 DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook);
617 DECLARE_LIBBPF_OPTS(bpf_tc_opts, attach_tc,
618 .handle = 1);
619 const char *type_str;
620 uint64_t prio_val;
621
622 if (ucv_type(ifname) != UC_STRING || ucv_type(type) != UC_STRING ||
623 ucv_type(prio) != UC_INTEGER)
624 err_return(EINVAL, NULL);
625
626 prio_val = ucv_int64_get(prio);
627 if (prio_val > 0xffff)
628 err_return(EINVAL, NULL);
629
630 type_str = ucv_string_get(type);
631 if (!strcmp(type_str, "ingress"))
632 hook.attach_point = BPF_TC_INGRESS;
633 else if (!strcmp(type_str, "egress"))
634 hook.attach_point = BPF_TC_EGRESS;
635 else
636 err_return(EINVAL, NULL);
637
638 hook.ifindex = if_nametoindex(ucv_string_get(ifname));
639 if (!hook.ifindex)
640 goto error;
641
642 bpf_tc_hook_create(&hook);
643 attach_tc.priority = prio_val;
644 if (bpf_tc_detach(&hook, &attach_tc) < 0 && fd < 0)
645 goto error;
646
647 if (fd < 0)
648 goto out;
649
650 attach_tc.prog_fd = fd;
651 if (bpf_tc_attach(&hook, &attach_tc) < 0)
652 goto error;
653
654 out:
655 return TRUE;
656
657 error:
658 if (fd >= 0)
659 err_return(ENOENT, NULL);
660 return NULL;
661 }
662
663 static uc_value_t *
664 uc_bpf_program_tc_attach(uc_vm_t *vm, size_t nargs)
665 {
666 struct uc_bpf_fd *f = uc_fn_thisval("bpf.program");
667 uc_value_t *ifname = uc_fn_arg(0);
668 uc_value_t *type = uc_fn_arg(1);
669 uc_value_t *prio = uc_fn_arg(2);
670
671 if (!f)
672 err_return(EINVAL, NULL);
673
674 return uc_bpf_set_tc_hook(ifname, type, prio, f->fd);
675 }
676
677 static uc_value_t *
678 uc_bpf_tc_detach(uc_vm_t *vm, size_t nargs)
679 {
680 uc_value_t *ifname = uc_fn_arg(0);
681 uc_value_t *type = uc_fn_arg(1);
682 uc_value_t *prio = uc_fn_arg(2);
683
684 return uc_bpf_set_tc_hook(ifname, type, prio, -1);
685 }
686
687 static int
688 uc_bpf_debug_print(enum libbpf_print_level level, const char *format,
689 va_list args)
690 {
691 char buf[256], *str = NULL;
692 uc_value_t *val;
693 va_list ap;
694 int size;
695
696 va_copy(ap, args);
697 size = vsnprintf(buf, sizeof(buf), format, ap);
698 va_end(ap);
699
700 if (size > 0 && (unsigned long)size < ARRAY_SIZE(buf) - 1) {
701 val = ucv_string_new(buf);
702 goto out;
703 }
704
705 if (vasprintf(&str, format, args) < 0)
706 return 0;
707
708 val = ucv_string_new(str);
709 free(str);
710
711 out:
712 uc_vm_stack_push(debug_vm, ucv_get(ucv_array_get(registry, 0)));
713 uc_vm_stack_push(debug_vm, ucv_int64_new(level));
714 uc_vm_stack_push(debug_vm, val);
715 if (uc_vm_call(debug_vm, false, 2) == EXCEPTION_NONE)
716 ucv_put(uc_vm_stack_pop(debug_vm));
717
718 return 0;
719 }
720
721 static uc_value_t *
722 uc_bpf_set_debug_handler(uc_vm_t *vm, size_t nargs)
723 {
724 uc_value_t *handler = uc_fn_arg(0);
725
726 if (handler && !ucv_is_callable(handler))
727 err_return(EINVAL, NULL);
728
729 debug_vm = vm;
730 libbpf_set_print(handler ? uc_bpf_debug_print : NULL);
731
732 ucv_array_set(registry, 0, ucv_get(handler));
733
734 return NULL;
735 }
736
737 static void
738 register_constants(uc_vm_t *vm, uc_value_t *scope)
739 {
740 #define ADD_CONST(x) ucv_object_add(scope, #x, ucv_int64_new(x))
741 ADD_CONST(BPF_PROG_TYPE_SCHED_CLS);
742 ADD_CONST(BPF_PROG_TYPE_SCHED_ACT);
743
744 ADD_CONST(BPF_ANY);
745 ADD_CONST(BPF_NOEXIST);
746 ADD_CONST(BPF_EXIST);
747 ADD_CONST(BPF_F_LOCK);
748 }
749
750 static const uc_function_list_t module_fns[] = {
751 { "get_map", uc_bpf_module_get_map },
752 { "get_maps", uc_bpf_module_get_maps },
753 { "get_programs", uc_bpf_module_get_programs },
754 { "get_program", uc_bpf_module_get_program },
755 };
756
757 static void module_free(void *ptr)
758 {
759 struct bpf_object *obj = ptr;
760
761 bpf_object__close(obj);
762 }
763
764 static const uc_function_list_t map_fns[] = {
765 { "pin", uc_bpf_map_pin },
766 { "get", uc_bpf_map_get },
767 { "set", uc_bpf_map_set },
768 { "delete", uc_bpf_map_delete },
769 { "delete_all", uc_bpf_map_delete_all },
770 { "foreach", uc_bpf_map_foreach },
771 { "iterator", uc_bpf_map_iterator },
772 };
773
774 static void uc_bpf_fd_free(void *ptr)
775 {
776 struct uc_bpf_fd *f = ptr;
777
778 if (f->close)
779 close(f->fd);
780 free(f);
781 }
782
783 static const uc_function_list_t map_iter_fns[] = {
784 { "next", uc_bpf_map_iter_next },
785 { "next_int", uc_bpf_map_iter_next_int },
786 };
787
788 static const uc_function_list_t prog_fns[] = {
789 { "pin", uc_bpf_program_pin },
790 { "tc_attach", uc_bpf_program_tc_attach },
791 };
792
793 static const uc_function_list_t global_fns[] = {
794 { "error", uc_bpf_error },
795 { "set_debug_handler", uc_bpf_set_debug_handler },
796 { "open_module", uc_bpf_open_module },
797 { "open_map", uc_bpf_open_map },
798 { "open_program", uc_bpf_open_program },
799 { "tc_detach", uc_bpf_tc_detach },
800 };
801
802 void uc_module_init(uc_vm_t *vm, uc_value_t *scope)
803 {
804 uc_function_list_register(scope, global_fns);
805 register_constants(vm, scope);
806
807 registry = ucv_array_new(vm);
808 uc_vm_registry_set(vm, "bpf.registry", registry);
809
810 module_type = uc_type_declare(vm, "bpf.module", module_fns, module_free);
811 map_type = uc_type_declare(vm, "bpf.map", map_fns, uc_bpf_fd_free);
812 map_iter_type = uc_type_declare(vm, "bpf.map_iter", map_iter_fns, free);
813 program_type = uc_type_declare(vm, "bpf.program", prog_fns, uc_bpf_fd_free);
814 }